From 892fd259cbf6b1dc16116611b84f7f164a11fe2e Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 21 Oct 2021 13:50:42 +0100 Subject: [PATCH 001/268] KVM: arm64: Reorder vcpu flag definitions The vcpu arch flags are in an interesting, semi random order. As I have made the mistake of reusing a flag once, let's rework this in an order that I find a bit less confusing. Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 2a5f7f38006f..3bfd30137ce2 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -417,14 +417,12 @@ struct kvm_vcpu_arch { #define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */ #define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */ #define KVM_ARM64_PENDING_EXCEPTION (1 << 8) /* Exception pending */ +/* + * Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be + * set together with an exception... + */ +#define KVM_ARM64_INCREMENT_PC (1 << 9) /* Increment PC */ #define KVM_ARM64_EXCEPT_MASK (7 << 9) /* Target EL/MODE */ -#define KVM_ARM64_DEBUG_STATE_SAVE_SPE (1 << 12) /* Save SPE context if active */ -#define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */ - -#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ - KVM_GUESTDBG_USE_SW_BP | \ - KVM_GUESTDBG_USE_HW | \ - KVM_GUESTDBG_SINGLESTEP) /* * When KVM_ARM64_PENDING_EXCEPTION is set, KVM_ARM64_EXCEPT_MASK can * take the following values: @@ -442,11 +440,13 @@ struct kvm_vcpu_arch { #define KVM_ARM64_EXCEPT_AA64_EL1 (0 << 11) #define KVM_ARM64_EXCEPT_AA64_EL2 (1 << 11) -/* - * Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be - * set together with an exception... - */ -#define KVM_ARM64_INCREMENT_PC (1 << 9) /* Increment PC */ +#define KVM_ARM64_DEBUG_STATE_SAVE_SPE (1 << 12) /* Save SPE context if active */ +#define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */ + +#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ + KVM_GUESTDBG_USE_SW_BP | \ + KVM_GUESTDBG_USE_HW | \ + KVM_GUESTDBG_SINGLESTEP) #define vcpu_has_sve(vcpu) (system_supports_sve() && \ ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE)) From 8383741ab2e773a992f1f0f8acdca5e7a4687c49 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 27 Oct 2021 11:18:00 +0100 Subject: [PATCH 002/268] KVM: arm64: Get rid of host SVE tracking/saving The SVE host tracking in KVM is pretty involved. It relies on a set of flags tracking the ownership of the SVE register, as well as that of the EL0 access. It is also pretty scary: __hyp_sve_save_host() computes a thread_struct pointer and obtains a sve_state which gets directly accessed without further ado, even on nVHE. How can this even work? The answer to that is that it doesn't, and that this is mostly dead code. Closer examination shows that on executing a syscall, userspace loses its SVE state entirely. This is part of the ABI. Another thing to notice is that although the kernel provides helpers such as kernel_neon_begin()/end(), they only deal with the FP/NEON state, and not SVE. Given that you can only execute a guest as the result of a syscall, and that the kernel cannot use SVE by itself, it becomes pretty obvious that there is never any host SVE state to save, and that this code is only there to increase confusion. Get rid of the TIF_SVE tracking and host save infrastructure altogether. Reviewed-by: Mark Brown Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 1 - arch/arm64/kvm/fpsimd.c | 20 +++++------------- arch/arm64/kvm/hyp/include/hyp/switch.h | 27 +++---------------------- 3 files changed, 8 insertions(+), 40 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 3bfd30137ce2..3ccfc3e3e436 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -411,7 +411,6 @@ struct kvm_vcpu_arch { #define KVM_ARM64_DEBUG_DIRTY (1 << 0) #define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */ #define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */ -#define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */ #define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */ #define KVM_ARM64_GUEST_HAS_SVE (1 << 5) /* SVE exposed to guest */ #define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */ diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 5621020b28de..2d15e1d6e214 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -66,22 +66,15 @@ error: * * Here, we just set the correct metadata to indicate that the FPSIMD * state in the cpu regs (if any) belongs to current on the host. - * - * TIF_SVE is backed up here, since it may get clobbered with guest state. - * This flag is restored by kvm_arch_vcpu_put_fp(vcpu). */ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) { BUG_ON(!current->mm); + BUG_ON(test_thread_flag(TIF_SVE)); - vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | - KVM_ARM64_HOST_SVE_IN_USE | - KVM_ARM64_HOST_SVE_ENABLED); + vcpu->arch.flags &= ~KVM_ARM64_FP_ENABLED; vcpu->arch.flags |= KVM_ARM64_FP_HOST; - if (test_thread_flag(TIF_SVE)) - vcpu->arch.flags |= KVM_ARM64_HOST_SVE_IN_USE; - if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN) vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED; } @@ -115,13 +108,11 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) { unsigned long flags; - bool host_has_sve = system_supports_sve(); - bool guest_has_sve = vcpu_has_sve(vcpu); local_irq_save(flags); if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { - if (guest_has_sve) { + if (vcpu_has_sve(vcpu)) { __vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR); /* Restore the VL that was saved when bound to the CPU */ @@ -131,7 +122,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) } fpsimd_save_and_flush_cpu_state(); - } else if (has_vhe() && host_has_sve) { + } else if (has_vhe() && system_supports_sve()) { /* * The FPSIMD/SVE state in the CPU has not been touched, and we * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been @@ -145,8 +136,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0); } - update_thread_flag(TIF_SVE, - vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE); + update_thread_flag(TIF_SVE, 0); local_irq_restore(flags); } diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 7a0af1d39303..e65c2956b881 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -143,16 +143,6 @@ static inline bool __populate_fault_info(struct kvm_vcpu *vcpu) return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault); } -static inline void __hyp_sve_save_host(struct kvm_vcpu *vcpu) -{ - struct thread_struct *thread; - - thread = container_of(vcpu->arch.host_fpsimd_state, struct thread_struct, - uw.fpsimd_state); - - __sve_save_state(sve_pffr(thread), &vcpu->arch.host_fpsimd_state->fpsr); -} - static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu) { sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2); @@ -169,21 +159,14 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu) */ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) { - bool sve_guest, sve_host; + bool sve_guest; u8 esr_ec; u64 reg; if (!system_supports_fpsimd()) return false; - if (system_supports_sve()) { - sve_guest = vcpu_has_sve(vcpu); - sve_host = vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE; - } else { - sve_guest = false; - sve_host = false; - } - + sve_guest = vcpu_has_sve(vcpu); esr_ec = kvm_vcpu_trap_get_class(vcpu); /* Don't handle SVE traps for non-SVE vcpus here: */ @@ -207,11 +190,7 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) isb(); if (vcpu->arch.flags & KVM_ARM64_FP_HOST) { - if (sve_host) - __hyp_sve_save_host(vcpu); - else - __fpsimd_save_state(vcpu->arch.host_fpsimd_state); - + __fpsimd_save_state(vcpu->arch.host_fpsimd_state); vcpu->arch.flags &= ~KVM_ARM64_FP_HOST; } From e66425fc9ba33e9716d6e7c6bc78bb62f981d4df Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 22 Nov 2021 15:59:23 +0000 Subject: [PATCH 003/268] KVM: arm64: Remove unused __sve_save_state Now that we don't have any users left for __sve_save_state, remove it altogether. Should we ever need to save the SVE state from the hypervisor again, we can always re-introduce it. Suggested-by: Zenghui Yu Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_hyp.h | 1 - arch/arm64/kvm/hyp/fpsimd.S | 6 ------ 2 files changed, 7 deletions(-) diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index 5afd14ab15b9..462882f356c7 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -90,7 +90,6 @@ void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu); void __fpsimd_save_state(struct user_fpsimd_state *fp_regs); void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs); -void __sve_save_state(void *sve_pffr, u32 *fpsr); void __sve_restore_state(void *sve_pffr, u32 *fpsr); #ifndef __KVM_NVHE_HYPERVISOR__ diff --git a/arch/arm64/kvm/hyp/fpsimd.S b/arch/arm64/kvm/hyp/fpsimd.S index e950875e31ce..61e6f3ba7b7d 100644 --- a/arch/arm64/kvm/hyp/fpsimd.S +++ b/arch/arm64/kvm/hyp/fpsimd.S @@ -25,9 +25,3 @@ SYM_FUNC_START(__sve_restore_state) sve_load 0, x1, x2, 3 ret SYM_FUNC_END(__sve_restore_state) - -SYM_FUNC_START(__sve_save_state) - mov x2, #1 - sve_save 0, x1, x2, 3 - ret -SYM_FUNC_END(__sve_save_state) From af9a0e21d817f40595aa629de32d3bd96582abef Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 21 Oct 2021 14:10:35 +0100 Subject: [PATCH 004/268] KVM: arm64: Introduce flag shadowing TIF_FOREIGN_FPSTATE We currently have to maintain a mapping the thread_info structure at EL2 in order to be able to check the TIF_FOREIGN_FPSTATE flag. In order to eventually get rid of this, start with a vcpu flag that shadows the thread flag on each entry into the hypervisor. Reviewed-by: Mark Brown Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 2 ++ arch/arm64/kvm/arm.c | 1 + arch/arm64/kvm/fpsimd.c | 8 ++++++++ arch/arm64/kvm/hyp/include/hyp/switch.h | 2 +- 4 files changed, 12 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 3ccfc3e3e436..9f1703ebae15 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -441,6 +441,7 @@ struct kvm_vcpu_arch { #define KVM_ARM64_DEBUG_STATE_SAVE_SPE (1 << 12) /* Save SPE context if active */ #define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */ +#define KVM_ARM64_FP_FOREIGN_FPSTATE (1 << 14) #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ KVM_GUESTDBG_USE_SW_BP | \ @@ -736,6 +737,7 @@ long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, /* Guest/host FPSIMD coordination helpers */ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu); +void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index e4727dc771bf..d1b93dc8d639 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -849,6 +849,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) } kvm_arm_setup_debug(vcpu); + kvm_arch_vcpu_ctxflush_fp(vcpu); /************************************************************** * Enter the guest diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 2d15e1d6e214..a18b9c1744d5 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -79,6 +79,14 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED; } +void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu) +{ + if (test_thread_flag(TIF_FOREIGN_FPSTATE)) + vcpu->arch.flags |= KVM_ARM64_FP_FOREIGN_FPSTATE; + else + vcpu->arch.flags &= ~KVM_ARM64_FP_FOREIGN_FPSTATE; +} + /* * If the guest FPSIMD state was loaded, update the host's context * tracking data mark the CPU FPSIMD regs as dirty and belonging to vcpu diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index e65c2956b881..a243a2a82131 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -49,7 +49,7 @@ static inline bool update_fp_enabled(struct kvm_vcpu *vcpu) * trap the accesses. */ if (!system_supports_fpsimd() || - vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE) + vcpu->arch.flags & KVM_ARM64_FP_FOREIGN_FPSTATE) vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | KVM_ARM64_FP_HOST); From bee14bca735a6f897a6ec3e42f3e5d2d8966e87e Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 21 Oct 2021 14:18:00 +0100 Subject: [PATCH 005/268] KVM: arm64: Stop mapping current thread_info at EL2 Now that we can track an equivalent of TIF_FOREIGN_FPSTATE, drop the mapping of current's thread_info at EL2. Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 2 -- arch/arm64/kvm/fpsimd.c | 12 +----------- arch/arm64/kvm/hyp/include/hyp/switch.h | 1 - arch/arm64/kvm/hyp/nvhe/switch.c | 1 - arch/arm64/kvm/hyp/vhe/switch.c | 1 - 5 files changed, 1 insertion(+), 16 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 9f1703ebae15..dd5810ea37a9 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -26,7 +26,6 @@ #include #include #include -#include #define __KVM_HAVE_ARCH_INTC_INITIALIZED @@ -321,7 +320,6 @@ struct kvm_vcpu_arch { struct kvm_guest_debug_arch vcpu_debug_state; struct kvm_guest_debug_arch external_debug_state; - struct thread_info *host_thread_info; /* hyp VA */ struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */ struct { diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index a18b9c1744d5..b5b59b9f2eff 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -7,7 +7,6 @@ */ #include #include -#include #include #include #include @@ -28,17 +27,9 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu) { int ret; - struct thread_info *ti = ¤t->thread_info; struct user_fpsimd_state *fpsimd = ¤t->thread.uw.fpsimd_state; - /* - * Make sure the host task thread flags and fpsimd state are - * visible to hyp: - */ - ret = create_hyp_mappings(ti, ti + 1, PAGE_HYP); - if (ret) - goto error; - + /* Make sure the host task fpsimd state is visible to hyp: */ ret = create_hyp_mappings(fpsimd, fpsimd + 1, PAGE_HYP); if (ret) goto error; @@ -54,7 +45,6 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu) goto error; } - vcpu->arch.host_thread_info = kern_hyp_va(ti); vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd); error: return ret; diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index a243a2a82131..11e8580f2fdc 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -29,7 +29,6 @@ #include #include #include -#include struct kvm_exception_table_entry { int insn, fixup; diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index c0e3fed26d93..329c706af39f 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index 5a2cb5d9bc4b..1d162b9c78bf 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -24,7 +24,6 @@ #include #include #include -#include /* VHE specific context */ DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data); From 31aa126de88e1e9f562f708a9b0ec5917ba97dce Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 21 Oct 2021 15:33:22 +0100 Subject: [PATCH 006/268] arm64/fpsimd: Document the use of TIF_FOREIGN_FPSTATE by KVM The bit of documentation that talks about TIF_FOREIGN_FPSTATE does not mention the ungodly tricks that KVM plays with this flag. Try and document this for the posterity. Reviewed-by: Mark Brown Signed-off-by: Marc Zyngier --- arch/arm64/kernel/fpsimd.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index fa244c426f61..6fb361e8bed8 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -78,7 +78,11 @@ * indicate whether or not the userland FPSIMD state of the current task is * present in the registers. The flag is set unless the FPSIMD registers of this * CPU currently contain the most recent userland FPSIMD state of the current - * task. + * task. If the task is behaving as a VMM, then this is will be managed by + * KVM which will clear it to indicate that the vcpu FPSIMD state is currently + * loaded on the CPU, allowing the state to be saved if a FPSIMD-aware + * softirq kicks in. Upon vcpu_put(), KVM will save the vcpu FP state and + * flag the register state as invalid. * * In order to allow softirq handlers to use FPSIMD, kernel_neon_begin() may * save the task's FPSIMD context back to task_struct from softirq context. From bff01a61af3c8b9756940c2fb0be2af570cdb5bf Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 14 Oct 2021 10:24:48 +0100 Subject: [PATCH 007/268] KVM: arm64: Move SVE state mapping at HYP to finalize-time We currently map the SVE state to HYP on detection of a PID change. Although this matches what we do for FPSIMD, this is pretty pointless for SVE, as the buffer is per-vcpu and has nothing to do with the thread that is being run. Move the mapping of the SVE state to finalize-time, which is where we allocate the state memory, and thus the most logical place to do this. Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier --- arch/arm64/kvm/fpsimd.c | 11 ----------- arch/arm64/kvm/reset.c | 11 ++++++++++- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 5621020b28de..62c0d78da7be 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -43,17 +43,6 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu) if (ret) goto error; - if (vcpu->arch.sve_state) { - void *sve_end; - - sve_end = vcpu->arch.sve_state + vcpu_sve_state_size(vcpu); - - ret = create_hyp_mappings(vcpu->arch.sve_state, sve_end, - PAGE_HYP); - if (ret) - goto error; - } - vcpu->arch.host_thread_info = kern_hyp_va(ti); vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd); error: diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 426bd7fbc3fd..c7a0249df840 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -94,6 +94,8 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu) { void *buf; unsigned int vl; + size_t reg_sz; + int ret; vl = vcpu->arch.sve_max_vl; @@ -106,10 +108,17 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu) vl > SVE_VL_ARCH_MAX)) return -EIO; - buf = kzalloc(SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl)), GFP_KERNEL_ACCOUNT); + reg_sz = vcpu_sve_state_size(vcpu); + buf = kzalloc(reg_sz, GFP_KERNEL_ACCOUNT); if (!buf) return -ENOMEM; + ret = create_hyp_mappings(buf, buf + reg_sz, PAGE_HYP); + if (ret) { + kfree(buf); + return ret; + } + vcpu->arch.sve_state = buf; vcpu->arch.flags |= KVM_ARM64_VCPU_SVE_FINALIZED; return 0; From 052f064d42b7bac87cbcd140710f262c5f1c3c8b Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 14 Oct 2021 11:30:42 +0100 Subject: [PATCH 008/268] KVM: arm64: Move kvm_arch_vcpu_run_pid_change() out of line Having kvm_arch_vcpu_run_pid_change() inline doesn't bring anything to the table. Move it next to kvm_vcpu_first_run_init(), which will be convenient for what is next to come. Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 7 +------ arch/arm64/kvm/arm.c | 5 +++++ 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 2a5f7f38006f..f464e1530985 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -749,12 +749,7 @@ static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr) void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu); -#ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */ -static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) -{ - return kvm_arch_vcpu_run_map_fp(vcpu); -} - +#ifdef CONFIG_KVM void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr); void kvm_clr_pmu_events(u32 clr); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index e4727dc771bf..e939b3d06682 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -584,6 +584,11 @@ static void update_vmid(struct kvm_vmid *vmid) spin_unlock(&kvm_vmid_lock); } +int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) +{ + return kvm_arch_vcpu_run_map_fp(vcpu); +} + static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; From 1408e73d21feffe77680acd4da611295db0dfcd8 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 14 Oct 2021 12:18:48 +0100 Subject: [PATCH 009/268] KVM: arm64: Restructure the point where has_run_once is advertised Restructure kvm_vcpu_first_run_init() to set the has_run_once flag after having completed all the "run once" activities. This includes moving the flip of the userspace irqchip static key to a point where nothing can fail. Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier --- arch/arm64/kvm/arm.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index e939b3d06682..b30b05a2f025 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -600,8 +600,6 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) if (!kvm_arm_vcpu_is_finalized(vcpu)) return -EPERM; - vcpu->arch.has_run_once = true; - kvm_arm_vcpu_init_debug(vcpu); if (likely(irqchip_in_kernel(kvm))) { @@ -612,12 +610,6 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) ret = kvm_vgic_map_resources(kvm); if (ret) return ret; - } else { - /* - * Tell the rest of the code that there are userspace irqchip - * VMs in the wild. - */ - static_branch_inc(&userspace_irqchip_in_use); } ret = kvm_timer_enable(vcpu); @@ -625,6 +617,18 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) return ret; ret = kvm_arm_pmu_v3_enable(vcpu); + if (ret) + return ret; + + if (!irqchip_in_kernel(kvm)) { + /* + * Tell the rest of the code that there are userspace irqchip + * VMs in the wild. + */ + static_branch_inc(&userspace_irqchip_in_use); + } + + vcpu->arch.has_run_once = true; /* * Initialize traps for protected VMs. From b5aa368abfbf4c0e041c792e3340955554eff97e Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 14 Oct 2021 11:42:38 +0100 Subject: [PATCH 010/268] KVM: arm64: Merge kvm_arch_vcpu_run_pid_change() and kvm_vcpu_first_run_init() The kvm_arch_vcpu_run_pid_change() helper gets called on each PID change. The kvm_vcpu_first_run_init() helper gets run on the... first run(!) of a vcpu. As it turns out, the first run of a vcpu also triggers a PID change event (vcpu->pid is initially NULL). Use this property to merge these two helpers and get rid of another arm64-specific oddity. Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier --- arch/arm64/kvm/arm.c | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index b30b05a2f025..f20a265972c7 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -584,22 +584,34 @@ static void update_vmid(struct kvm_vmid *vmid) spin_unlock(&kvm_vmid_lock); } -int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) { - return kvm_arch_vcpu_run_map_fp(vcpu); + return vcpu->arch.target >= 0; } -static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) +/* + * Handle both the initialisation that is being done when the vcpu is + * run for the first time, as well as the updates that must be + * performed each time we get a new thread dealing with this vcpu. + */ +int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; - int ret = 0; + int ret; - if (likely(vcpu->arch.has_run_once)) - return 0; + if (!kvm_vcpu_initialized(vcpu)) + return -ENOEXEC; if (!kvm_arm_vcpu_is_finalized(vcpu)) return -EPERM; + ret = kvm_arch_vcpu_run_map_fp(vcpu); + if (ret) + return ret; + + if (likely(vcpu->arch.has_run_once)) + return 0; + kvm_arm_vcpu_init_debug(vcpu); if (likely(irqchip_in_kernel(kvm))) { @@ -688,11 +700,6 @@ static void vcpu_req_sleep(struct kvm_vcpu *vcpu) smp_rmb(); } -static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) -{ - return vcpu->arch.target >= 0; -} - static void check_vcpu_requests(struct kvm_vcpu *vcpu) { if (kvm_request_pending(vcpu)) { @@ -788,13 +795,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) struct kvm_run *run = vcpu->run; int ret; - if (unlikely(!kvm_vcpu_initialized(vcpu))) - return -ENOEXEC; - - ret = kvm_vcpu_first_run_init(vcpu); - if (ret) - return ret; - if (run->exit_reason == KVM_EXIT_MMIO) { ret = kvm_handle_mmio_return(vcpu); if (ret) From cc5705fb1bf119ebb693d594f0157e0dd418590e Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 14 Oct 2021 12:13:06 +0100 Subject: [PATCH 011/268] KVM: arm64: Drop vcpu->arch.has_run_once for vcpu->pid With the transition to kvm_arch_vcpu_run_pid_change() to handle the "run once" activities, it becomes obvious that has_run_once is now an exact shadow of vcpu->pid. Replace vcpu->arch.has_run_once with a new vcpu_has_run_once() helper that directly checks for vcpu->pid, and get rid of the now unused field. Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 5 ++--- arch/arm64/kvm/arm.c | 8 +++----- arch/arm64/kvm/vgic/vgic-init.c | 2 +- 3 files changed, 6 insertions(+), 9 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index f464e1530985..cbb5ff81919b 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -367,9 +367,6 @@ struct kvm_vcpu_arch { int target; DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); - /* Detect first run of a vcpu */ - bool has_run_once; - /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ u64 vsesr_el2; @@ -606,6 +603,8 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, void kvm_arm_halt_guest(struct kvm *kvm); void kvm_arm_resume_guest(struct kvm *kvm); +#define vcpu_has_run_once(vcpu) !!rcu_access_pointer((vcpu)->pid) + #ifndef __KVM_NVHE_HYPERVISOR__ #define kvm_call_hyp_nvhe(f, ...) \ ({ \ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index f20a265972c7..c79d8e5230bc 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -351,7 +351,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { - if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm))) + if (vcpu_has_run_once(vcpu) && unlikely(!irqchip_in_kernel(vcpu->kvm))) static_branch_dec(&userspace_irqchip_in_use); kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); @@ -609,7 +609,7 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) if (ret) return ret; - if (likely(vcpu->arch.has_run_once)) + if (likely(vcpu_has_run_once(vcpu))) return 0; kvm_arm_vcpu_init_debug(vcpu); @@ -640,8 +640,6 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) static_branch_inc(&userspace_irqchip_in_use); } - vcpu->arch.has_run_once = true; - /* * Initialize traps for protected VMs. * NOTE: Move to run in EL2 directly, rather than via a hypercall, once @@ -1132,7 +1130,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, * need to invalidate the I-cache though, as FWB does *not* * imply CTR_EL0.DIC. */ - if (vcpu->arch.has_run_once) { + if (vcpu_has_run_once(vcpu)) { if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) stage2_unmap_vm(vcpu->kvm); else diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c index 0a06d0648970..ce2b42c38e62 100644 --- a/arch/arm64/kvm/vgic/vgic-init.c +++ b/arch/arm64/kvm/vgic/vgic-init.c @@ -91,7 +91,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type) return ret; kvm_for_each_vcpu(i, vcpu, kvm) { - if (vcpu->arch.has_run_once) + if (vcpu_has_run_once(vcpu)) goto out_unlock; } ret = 0; From 00e228b31596c6bf5f08b5ef76f80cb26e620f02 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 26 Nov 2021 11:36:43 +0000 Subject: [PATCH 012/268] KVM: arm64: Add minimal handling for the ARMv8.7 PMU When running a KVM guest hosted on an ARMv8.7 machine, the host kernel complains that it doesn't know about the architected number of events. Fix it by adding the PMUver code corresponding to PMUv3 for ARMv8.7. Reviewed-by: Alexandru Elisei Tested-by: Alexandru Elisei Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211126115533.217903-1-maz@kernel.org --- arch/arm64/include/asm/sysreg.h | 1 + arch/arm64/kvm/pmu-emul.c | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 16b3f1a1d468..615dd6278f8b 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -937,6 +937,7 @@ #define ID_AA64DFR0_PMUVER_8_1 0x4 #define ID_AA64DFR0_PMUVER_8_4 0x5 #define ID_AA64DFR0_PMUVER_8_5 0x6 +#define ID_AA64DFR0_PMUVER_8_7 0x7 #define ID_AA64DFR0_PMUVER_IMP_DEF 0xf #define ID_AA64DFR0_PMSVER_8_2 0x1 diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index a5e4bbf5e68f..ca92cc5c71c6 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -28,6 +28,7 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm) case ID_AA64DFR0_PMUVER_8_1: case ID_AA64DFR0_PMUVER_8_4: case ID_AA64DFR0_PMUVER_8_5: + case ID_AA64DFR0_PMUVER_8_7: return GENMASK(15, 0); default: /* Shouldn't be here, just for sanity */ WARN_ONCE(1, "Unknown PMU version %d\n", kvm->arch.pmuver); From 636dcd0204599f94680f8f87b46cef7c66034ac0 Mon Sep 17 00:00:00 2001 From: Rikard Falkeborn Date: Sat, 4 Dec 2021 22:35:18 +0100 Subject: [PATCH 013/268] KVM: arm64: Constify kvm_io_gic_ops The only usage of kvm_io_gic_ops is to make a comparison with its address and to pass its address to kvm_iodevice_init() which takes a pointer to const kvm_io_device_ops as input. Make it const to allow the compiler to put it in read-only memory. Signed-off-by: Rikard Falkeborn Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211204213518.83642-1-rikard.falkeborn@gmail.com --- arch/arm64/kvm/vgic/vgic-mmio.c | 2 +- arch/arm64/kvm/vgic/vgic-mmio.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/vgic/vgic-mmio.c b/arch/arm64/kvm/vgic/vgic-mmio.c index 48c6067fc5ec..7068da080799 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio.c +++ b/arch/arm64/kvm/vgic/vgic-mmio.c @@ -1050,7 +1050,7 @@ static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, return 0; } -struct kvm_io_device_ops kvm_io_gic_ops = { +const struct kvm_io_device_ops kvm_io_gic_ops = { .read = dispatch_mmio_read, .write = dispatch_mmio_write, }; diff --git a/arch/arm64/kvm/vgic/vgic-mmio.h b/arch/arm64/kvm/vgic/vgic-mmio.h index fefcca2b14dc..3fa696f198a3 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio.h +++ b/arch/arm64/kvm/vgic/vgic-mmio.h @@ -34,7 +34,7 @@ struct vgic_register_region { }; }; -extern struct kvm_io_device_ops kvm_io_gic_ops; +extern const struct kvm_io_device_ops kvm_io_gic_ops; #define VGIC_ACCESS_8bit 1 #define VGIC_ACCESS_32bit 2 From 7e04f05984dd03edad7daaa4fa97958b7133c62a Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 2 Dec 2021 17:10:46 +0000 Subject: [PATCH 014/268] arm64: Add missing include of asm/cpufeature.h to asm/mmu.h asm/mmu.h refers to cpus_have_const_cap() in the definition of arm64_kernel_unmapped_at_el0() so include asm/cpufeature.h directly rather than force all users of the header to do it themselves. Signed-off-by: Will Deacon Tested-by: Fuad Tabba Reviewed-by: Fuad Tabba Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211202171048.26924-2-will@kernel.org --- arch/arm64/include/asm/mmu.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index e9c30859f80c..48f8466a4be9 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -15,6 +15,7 @@ #ifndef __ASSEMBLY__ #include +#include typedef struct { atomic64_t id; From ed4ed15d571065eb66ea718d7f6050553586417d Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 2 Dec 2021 17:10:47 +0000 Subject: [PATCH 015/268] KVM: arm64: Generate hyp_constants.h for the host In order to avoid exposing hypervisor (EL2) data structures directly to the host, generate hyp_constants.h to provide constants such as structure sizes to the host without dragging in the definitions themselves. Signed-off-by: Will Deacon Tested-by: Fuad Tabba Reviewed-by: Fuad Tabba Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211202171048.26924-3-will@kernel.org --- arch/arm64/kvm/.gitignore | 2 ++ arch/arm64/kvm/Makefile | 16 ++++++++++++++++ arch/arm64/kvm/hyp/hyp-constants.c | 10 ++++++++++ 3 files changed, 28 insertions(+) create mode 100644 arch/arm64/kvm/.gitignore create mode 100644 arch/arm64/kvm/hyp/hyp-constants.c diff --git a/arch/arm64/kvm/.gitignore b/arch/arm64/kvm/.gitignore new file mode 100644 index 000000000000..6182aefb8302 --- /dev/null +++ b/arch/arm64/kvm/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +hyp_constants.h diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index 989bb5dad2c8..0b561752f8d8 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -25,3 +25,19 @@ kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \ vgic/vgic-its.o vgic/vgic-debug.o kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o + +always-y := hyp_constants.h hyp-constants.s + +define rule_gen_hyp_constants + $(call filechk,offsets,__HYP_CONSTANTS_H__) +endef + +CFLAGS_hyp-constants.o = -I $(srctree)/$(src)/hyp/include +$(obj)/hyp-constants.s: $(src)/hyp/hyp-constants.c FORCE + $(call if_changed_dep,cc_s_c) + +$(obj)/hyp_constants.h: $(obj)/hyp-constants.s FORCE + $(call if_changed_rule,gen_hyp_constants) + +obj-kvm := $(addprefix $(obj)/, $(kvm-y)) +$(obj-kvm): $(obj)/hyp_constants.h diff --git a/arch/arm64/kvm/hyp/hyp-constants.c b/arch/arm64/kvm/hyp/hyp-constants.c new file mode 100644 index 000000000000..b3742a6691e8 --- /dev/null +++ b/arch/arm64/kvm/hyp/hyp-constants.c @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include + +int main(void) +{ + DEFINE(STRUCT_HYP_PAGE_SIZE, sizeof(struct hyp_page)); + return 0; +} From 9429f4b0412d05243237c7695c59d0a7b1174492 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 2 Dec 2021 17:10:48 +0000 Subject: [PATCH 016/268] KVM: arm64: Move host EL1 code out of hyp/ directory kvm/hyp/reserved_mem.c contains host code executing at EL1 and is not linked into the hypervisor object. Move the file into kvm/pkvm.c and rework the headers so that the definitions shared between the host and the hypervisor live in asm/kvm_pkvm.h. Signed-off-by: Will Deacon Tested-by: Fuad Tabba Reviewed-by: Fuad Tabba Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211202171048.26924-4-will@kernel.org --- arch/arm64/include/asm/kvm_pkvm.h | 71 +++++++++++++++++++ arch/arm64/kvm/Makefile | 2 +- arch/arm64/kvm/hyp/Makefile | 2 +- arch/arm64/kvm/hyp/include/nvhe/mm.h | 57 --------------- arch/arm64/kvm/hyp/nvhe/mem_protect.c | 1 + arch/arm64/kvm/hyp/nvhe/mm.c | 1 + arch/arm64/kvm/hyp/nvhe/setup.c | 1 + arch/arm64/kvm/{hyp/reserved_mem.c => pkvm.c} | 8 +-- 8 files changed, 80 insertions(+), 63 deletions(-) create mode 100644 arch/arm64/include/asm/kvm_pkvm.h rename arch/arm64/kvm/{hyp/reserved_mem.c => pkvm.c} (94%) diff --git a/arch/arm64/include/asm/kvm_pkvm.h b/arch/arm64/include/asm/kvm_pkvm.h new file mode 100644 index 000000000000..9f4ad2a8df59 --- /dev/null +++ b/arch/arm64/include/asm/kvm_pkvm.h @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 - Google LLC + * Author: Quentin Perret + */ +#ifndef __ARM64_KVM_PKVM_H__ +#define __ARM64_KVM_PKVM_H__ + +#include +#include + +#define HYP_MEMBLOCK_REGIONS 128 + +extern struct memblock_region kvm_nvhe_sym(hyp_memory)[]; +extern unsigned int kvm_nvhe_sym(hyp_memblock_nr); + +static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages) +{ + unsigned long total = 0, i; + + /* Provision the worst case scenario */ + for (i = 0; i < KVM_PGTABLE_MAX_LEVELS; i++) { + nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE); + total += nr_pages; + } + + return total; +} + +static inline unsigned long __hyp_pgtable_total_pages(void) +{ + unsigned long res = 0, i; + + /* Cover all of memory with page-granularity */ + for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) { + struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i]; + res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT); + } + + return res; +} + +static inline unsigned long hyp_s1_pgtable_pages(void) +{ + unsigned long res; + + res = __hyp_pgtable_total_pages(); + + /* Allow 1 GiB for private mappings */ + res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT); + + return res; +} + +static inline unsigned long host_s2_pgtable_pages(void) +{ + unsigned long res; + + /* + * Include an extra 16 pages to safely upper-bound the worst case of + * concatenated pgds. + */ + res = __hyp_pgtable_total_pages() + 16; + + /* Allow 1 GiB for MMIO mappings */ + res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT); + + return res; +} + +#endif /* __ARM64_KVM_PKVM_H__ */ diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index 0b561752f8d8..8ca8cf6f5619 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -15,7 +15,7 @@ kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \ arm.o mmu.o mmio.o psci.o perf.o hypercalls.o pvtime.o \ inject_fault.o va_layout.o handle_exit.o \ guest.o debug.o reset.o sys_regs.o \ - vgic-sys-reg-v3.o fpsimd.o pmu.o \ + vgic-sys-reg-v3.o fpsimd.o pmu.o pkvm.o \ arch_timer.o trng.o\ vgic/vgic.o vgic/vgic-init.o \ vgic/vgic-irqfd.o vgic/vgic-v2.o \ diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile index b726332eec49..687598e41b21 100644 --- a/arch/arm64/kvm/hyp/Makefile +++ b/arch/arm64/kvm/hyp/Makefile @@ -10,4 +10,4 @@ subdir-ccflags-y := -I$(incdir) \ -DDISABLE_BRANCH_PROFILING \ $(DISABLE_STACKLEAK_PLUGIN) -obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o reserved_mem.o +obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o diff --git a/arch/arm64/kvm/hyp/include/nvhe/mm.h b/arch/arm64/kvm/hyp/include/nvhe/mm.h index c9a8f535212e..ef6a58a04235 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/mm.h +++ b/arch/arm64/kvm/hyp/include/nvhe/mm.h @@ -10,9 +10,6 @@ #include #include -#define HYP_MEMBLOCK_REGIONS 128 -extern struct memblock_region kvm_nvhe_sym(hyp_memory)[]; -extern unsigned int kvm_nvhe_sym(hyp_memblock_nr); extern struct kvm_pgtable pkvm_pgtable; extern hyp_spinlock_t pkvm_pgd_lock; extern struct hyp_pool hpool; @@ -39,58 +36,4 @@ static inline void hyp_vmemmap_range(phys_addr_t phys, unsigned long size, *end = ALIGN(*end, PAGE_SIZE); } -static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages) -{ - unsigned long total = 0, i; - - /* Provision the worst case scenario */ - for (i = 0; i < KVM_PGTABLE_MAX_LEVELS; i++) { - nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE); - total += nr_pages; - } - - return total; -} - -static inline unsigned long __hyp_pgtable_total_pages(void) -{ - unsigned long res = 0, i; - - /* Cover all of memory with page-granularity */ - for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) { - struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i]; - res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT); - } - - return res; -} - -static inline unsigned long hyp_s1_pgtable_pages(void) -{ - unsigned long res; - - res = __hyp_pgtable_total_pages(); - - /* Allow 1 GiB for private mappings */ - res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT); - - return res; -} - -static inline unsigned long host_s2_pgtable_pages(void) -{ - unsigned long res; - - /* - * Include an extra 16 pages to safely upper-bound the worst case of - * concatenated pgds. - */ - res = __hyp_pgtable_total_pages() + 16; - - /* Allow 1 GiB for MMIO mappings */ - res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT); - - return res; -} - #endif /* __KVM_HYP_MM_H */ diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c index c1a90dd022b8..92262e89672d 100644 --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include diff --git a/arch/arm64/kvm/hyp/nvhe/mm.c b/arch/arm64/kvm/hyp/nvhe/mm.c index 2fabeceb889a..9e0ff5a700dd 100644 --- a/arch/arm64/kvm/hyp/nvhe/mm.c +++ b/arch/arm64/kvm/hyp/nvhe/mm.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c index 578f71798c2e..51e68a040d8a 100644 --- a/arch/arm64/kvm/hyp/nvhe/setup.c +++ b/arch/arm64/kvm/hyp/nvhe/setup.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include diff --git a/arch/arm64/kvm/hyp/reserved_mem.c b/arch/arm64/kvm/pkvm.c similarity index 94% rename from arch/arm64/kvm/hyp/reserved_mem.c rename to arch/arm64/kvm/pkvm.c index 578670e3f608..ebecb7c045f4 100644 --- a/arch/arm64/kvm/hyp/reserved_mem.c +++ b/arch/arm64/kvm/pkvm.c @@ -8,10 +8,9 @@ #include #include -#include +#include -#include -#include +#include "hyp_constants.h" static struct memblock_region *hyp_memory = kvm_nvhe_sym(hyp_memory); static unsigned int *hyp_memblock_nr_ptr = &kvm_nvhe_sym(hyp_memblock_nr); @@ -82,7 +81,8 @@ void __init kvm_hyp_reserve(void) do { prev = nr_pages; nr_pages = hyp_mem_pages + prev; - nr_pages = DIV_ROUND_UP(nr_pages * sizeof(struct hyp_page), PAGE_SIZE); + nr_pages = DIV_ROUND_UP(nr_pages * STRUCT_HYP_PAGE_SIZE, + PAGE_SIZE); nr_pages += __hyp_pgtable_max_pages(nr_pages); } while (nr_pages != prev); hyp_mem_pages += nr_pages; From f0e6e6fa41b3d2aa1dcb61dd4ed6d7be004bb5a8 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Mon, 18 Oct 2021 17:14:07 +0200 Subject: [PATCH 017/268] KVM: Drop stale kvm_is_transparent_hugepage() declaration kvm_is_transparent_hugepage() was removed in commit 205d76ff0684 ("KVM: Remove kvm_is_transparent_hugepage() and PageTransCompoundMap()") but its declaration in include/linux/kvm_host.h persisted. Drop it. Fixes: 205d76ff0684 (""KVM: Remove kvm_is_transparent_hugepage() and PageTransCompoundMap()") Signed-off-by: Vitaly Kuznetsov Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211018151407.2107363-1-vkuznets@redhat.com --- include/linux/kvm_host.h | 1 - 1 file changed, 1 deletion(-) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index c310648cc8f1..6d138adc78af 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1174,7 +1174,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); bool kvm_is_reserved_pfn(kvm_pfn_t pfn); bool kvm_is_zone_device_pfn(kvm_pfn_t pfn); -bool kvm_is_transparent_hugepage(kvm_pfn_t pfn); struct kvm_irq_ack_notifier { struct hlist_node link; From 49ae248b61aefa0eff84dca8e81bd9306cdaa6c9 Mon Sep 17 00:00:00 2001 From: Janis Schoetterl-Glausch Date: Thu, 18 Nov 2021 11:25:22 +0100 Subject: [PATCH 018/268] KVM: s390: Fix names of skey constants in api documentation They are defined in include/uapi/linux/kvm.h as KVM_S390_GET_SKEYS_NONE and KVM_S390_SKEYS_MAX, but the api documetation talks of KVM_S390_GET_KEYS_NONE and KVM_S390_SKEYS_ALLOC_MAX respectively. Signed-off-by: Janis Schoetterl-Glausch Reviewed-by: Janosch Frank Message-Id: <20211118102522.569660-1-scgl@linux.ibm.com> Signed-off-by: Janosch Frank --- Documentation/virt/kvm/api.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index aeeb071c7688..b86c7edae888 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -3701,7 +3701,7 @@ KVM with the currently defined set of flags. :Architectures: s390 :Type: vm ioctl :Parameters: struct kvm_s390_skeys -:Returns: 0 on success, KVM_S390_GET_KEYS_NONE if guest is not using storage +:Returns: 0 on success, KVM_S390_GET_SKEYS_NONE if guest is not using storage keys, negative value on error This ioctl is used to get guest storage key values on the s390 @@ -3720,7 +3720,7 @@ you want to get. The count field is the number of consecutive frames (starting from start_gfn) whose storage keys to get. The count field must be at least 1 and the maximum -allowed value is defined as KVM_S390_SKEYS_ALLOC_MAX. Values outside this range +allowed value is defined as KVM_S390_SKEYS_MAX. Values outside this range will cause the ioctl to return -EINVAL. The skeydata_addr field is the address to a buffer large enough to hold count @@ -3744,7 +3744,7 @@ you want to set. The count field is the number of consecutive frames (starting from start_gfn) whose storage keys to get. The count field must be at least 1 and the maximum -allowed value is defined as KVM_S390_SKEYS_ALLOC_MAX. Values outside this range +allowed value is defined as KVM_S390_SKEYS_MAX. Values outside this range will cause the ioctl to return -EINVAL. The skeydata_addr field is the address to a buffer containing count bytes of From dc1ce45575b3401102568dd60ba8894849d1d64b Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 16 Nov 2021 05:11:10 -0500 Subject: [PATCH 019/268] KVM: MMU: update comment on the number of page role combinations Fix the number of bits in the role, and simplify the explanation of why several bits or combinations of bits are redundant. Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 860ed500580c..d9f09535384b 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -291,19 +291,25 @@ struct kvm_kernel_irq_routing_entry; * the number of unique SPs that can theoretically be created is 2^n, where n * is the number of bits that are used to compute the role. * - * But, even though there are 18 bits in the mask below, not all combinations - * of modes and flags are possible. The maximum number of possible upper-level - * shadow pages for a single gfn is in the neighborhood of 2^13. + * But, even though there are 19 bits in the mask below, not all combinations + * of modes and flags are possible: * - * - invalid shadow pages are not accounted. - * - level is effectively limited to four combinations, not 16 as the number - * bits would imply, as 4k SPs are not tracked (allowed to go unsync). - * - level is effectively unused for non-PAE paging because there is exactly - * one upper level (see 4k SP exception above). - * - quadrant is used only for non-PAE paging and is exclusive with - * gpte_is_8_bytes. - * - execonly and ad_disabled are used only for nested EPT, which makes it - * exclusive with quadrant. + * - invalid shadow pages are not accounted, so the bits are effectively 18 + * + * - quadrant will only be used if gpte_is_8_bytes=0 (non-PAE paging); + * execonly and ad_disabled are only used for nested EPT which has + * gpte_is_8_bytes=1. Therefore, 2 bits are always unused. + * + * - the 4 bits of level are effectively limited to the values 2/3/4/5, + * as 4k SPs are not tracked (allowed to go unsync). In addition non-PAE + * paging has exactly one upper level, making level completely redundant + * when gpte_is_8_bytes=0. + * + * - on top of this, smep_andnot_wp and smap_andnot_wp are only set if + * cr0_wp=0, therefore these three bits only give rise to 5 possibilities. + * + * Therefore, the maximum number of possible upper-level shadow pages for a + * single gfn is a bit less than 2^13. */ union kvm_mmu_page_role { u32 word; From 27592ae8dbe41033261b6fdf27d78998aabd2665 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 16 Nov 2021 16:03:57 +0000 Subject: [PATCH 020/268] KVM: Move wiping of the kvm->vcpus array to common code All architectures have similar loops iterating over the vcpus, freeing one vcpu at a time, and eventually wiping the reference off the vcpus array. They are also inconsistently taking the kvm->lock mutex when wiping the references from the array. Make this code common, which will simplify further changes. The locking is dropped altogether, as this should only be called when there is no further references on the kvm structure. Reviewed-by: Claudio Imbrenda Signed-off-by: Marc Zyngier Message-Id: <20211116160403.4074052-2-maz@kernel.org> Signed-off-by: Paolo Bonzini --- arch/arm64/kvm/arm.c | 10 +--------- arch/mips/kvm/mips.c | 21 +-------------------- arch/powerpc/kvm/powerpc.c | 10 +--------- arch/riscv/kvm/vm.c | 10 +--------- arch/s390/kvm/kvm-s390.c | 18 +----------------- arch/x86/kvm/x86.c | 9 +-------- include/linux/kvm_host.h | 2 +- virt/kvm/kvm_main.c | 17 +++++++++++++++-- 8 files changed, 22 insertions(+), 75 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index e4727dc771bf..362b10cb992c 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -175,19 +175,11 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) */ void kvm_arch_destroy_vm(struct kvm *kvm) { - int i; - bitmap_free(kvm->arch.pmu_filter); kvm_vgic_destroy(kvm); - for (i = 0; i < KVM_MAX_VCPUS; ++i) { - if (kvm->vcpus[i]) { - kvm_vcpu_destroy(kvm->vcpus[i]); - kvm->vcpus[i] = NULL; - } - } - atomic_set(&kvm->online_vcpus, 0); + kvm_destroy_vcpus(kvm); } int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index aa20d074d388..6aa5e3771d05 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -171,25 +171,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) return 0; } -void kvm_mips_free_vcpus(struct kvm *kvm) -{ - unsigned int i; - struct kvm_vcpu *vcpu; - - kvm_for_each_vcpu(i, vcpu, kvm) { - kvm_vcpu_destroy(vcpu); - } - - mutex_lock(&kvm->lock); - - for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) - kvm->vcpus[i] = NULL; - - atomic_set(&kvm->online_vcpus, 0); - - mutex_unlock(&kvm->lock); -} - static void kvm_mips_free_gpa_pt(struct kvm *kvm) { /* It should always be safe to remove after flushing the whole range */ @@ -199,7 +180,7 @@ static void kvm_mips_free_gpa_pt(struct kvm *kvm) void kvm_arch_destroy_vm(struct kvm *kvm) { - kvm_mips_free_vcpus(kvm); + kvm_destroy_vcpus(kvm); kvm_mips_free_gpa_pt(kvm); } diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index a72920f4f221..98f5d90ebf5a 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -463,9 +463,6 @@ err_out: void kvm_arch_destroy_vm(struct kvm *kvm) { - unsigned int i; - struct kvm_vcpu *vcpu; - #ifdef CONFIG_KVM_XICS /* * We call kick_all_cpus_sync() to ensure that all @@ -476,14 +473,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm) kick_all_cpus_sync(); #endif - kvm_for_each_vcpu(i, vcpu, kvm) - kvm_vcpu_destroy(vcpu); + kvm_destroy_vcpus(kvm); mutex_lock(&kvm->lock); - for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) - kvm->vcpus[i] = NULL; - - atomic_set(&kvm->online_vcpus, 0); kvmppc_core_destroy_vm(kvm); diff --git a/arch/riscv/kvm/vm.c b/arch/riscv/kvm/vm.c index fb18af34a4b5..7619691d8953 100644 --- a/arch/riscv/kvm/vm.c +++ b/arch/riscv/kvm/vm.c @@ -46,15 +46,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) void kvm_arch_destroy_vm(struct kvm *kvm) { - int i; - - for (i = 0; i < KVM_MAX_VCPUS; ++i) { - if (kvm->vcpus[i]) { - kvm_vcpu_destroy(kvm->vcpus[i]); - kvm->vcpus[i] = NULL; - } - } - atomic_set(&kvm->online_vcpus, 0); + kvm_destroy_vcpus(kvm); } int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 14a18ba5ff2c..6eeb59af5d74 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -2821,27 +2821,11 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) free_page((unsigned long)(vcpu->arch.sie_block)); } -static void kvm_free_vcpus(struct kvm *kvm) -{ - unsigned int i; - struct kvm_vcpu *vcpu; - - kvm_for_each_vcpu(i, vcpu, kvm) - kvm_vcpu_destroy(vcpu); - - mutex_lock(&kvm->lock); - for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) - kvm->vcpus[i] = NULL; - - atomic_set(&kvm->online_vcpus, 0); - mutex_unlock(&kvm->lock); -} - void kvm_arch_destroy_vm(struct kvm *kvm) { u16 rc, rrc; - kvm_free_vcpus(kvm); + kvm_destroy_vcpus(kvm); sca_dispose(kvm); kvm_s390_gisa_destroy(kvm); /* diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e0aa4dd53c7f..0e6d11a726cd 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -11423,15 +11423,8 @@ static void kvm_free_vcpus(struct kvm *kvm) kvm_clear_async_pf_completion_queue(vcpu); kvm_unload_vcpu_mmu(vcpu); } - kvm_for_each_vcpu(i, vcpu, kvm) - kvm_vcpu_destroy(vcpu); - mutex_lock(&kvm->lock); - for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) - kvm->vcpus[i] = NULL; - - atomic_set(&kvm->online_vcpus, 0); - mutex_unlock(&kvm->lock); + kvm_destroy_vcpus(kvm); } void kvm_arch_sync_events(struct kvm *kvm) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index c310648cc8f1..e2f9f8f67c58 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -733,7 +733,7 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) if (WARN_ON_ONCE(!memslot->npages)) { \ } else -void kvm_vcpu_destroy(struct kvm_vcpu *vcpu); +void kvm_destroy_vcpus(struct kvm *kvm); void vcpu_load(struct kvm_vcpu *vcpu); void vcpu_put(struct kvm_vcpu *vcpu); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 72c4e6b39389..0a504c7988dc 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -435,7 +435,7 @@ static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) vcpu->last_used_slot = 0; } -void kvm_vcpu_destroy(struct kvm_vcpu *vcpu) +static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu) { kvm_dirty_ring_free(&vcpu->dirty_ring); kvm_arch_vcpu_destroy(vcpu); @@ -450,7 +450,20 @@ void kvm_vcpu_destroy(struct kvm_vcpu *vcpu) free_page((unsigned long)vcpu->run); kmem_cache_free(kvm_vcpu_cache, vcpu); } -EXPORT_SYMBOL_GPL(kvm_vcpu_destroy); + +void kvm_destroy_vcpus(struct kvm *kvm) +{ + unsigned int i; + struct kvm_vcpu *vcpu; + + kvm_for_each_vcpu(i, vcpu, kvm) { + kvm_vcpu_destroy(vcpu); + kvm->vcpus[i] = NULL; + } + + atomic_set(&kvm->online_vcpus, 0); +} +EXPORT_SYMBOL_GPL(kvm_destroy_vcpus); #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) From 75a9869f314d83ac70ebaa0b93773300681ab077 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 16 Nov 2021 16:03:58 +0000 Subject: [PATCH 021/268] KVM: mips: Use kvm_get_vcpu() instead of open-coded access MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As we are about to change the way vcpus are allocated, mandate the use of kvm_get_vcpu() instead of open-coding the access. Reviewed-by: Philippe Mathieu-Daudé Signed-off-by: Marc Zyngier Message-Id: <20211116160403.4074052-3-maz@kernel.org> Signed-off-by: Paolo Bonzini --- arch/mips/kvm/loongson_ipi.c | 4 ++-- arch/mips/kvm/mips.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/mips/kvm/loongson_ipi.c b/arch/mips/kvm/loongson_ipi.c index 3681fc8fba38..5d53f32d837c 100644 --- a/arch/mips/kvm/loongson_ipi.c +++ b/arch/mips/kvm/loongson_ipi.c @@ -120,7 +120,7 @@ static int loongson_vipi_write(struct loongson_kvm_ipi *ipi, s->status |= data; irq.cpu = id; irq.irq = 6; - kvm_vcpu_ioctl_interrupt(kvm->vcpus[id], &irq); + kvm_vcpu_ioctl_interrupt(kvm_get_vcpu(kvm, id), &irq); break; case CORE0_CLEAR_OFF: @@ -128,7 +128,7 @@ static int loongson_vipi_write(struct loongson_kvm_ipi *ipi, if (!s->status) { irq.cpu = id; irq.irq = -6; - kvm_vcpu_ioctl_interrupt(kvm->vcpus[id], &irq); + kvm_vcpu_ioctl_interrupt(kvm_get_vcpu(kvm, id), &irq); } break; diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 6aa5e3771d05..043204cd585f 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -479,7 +479,7 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, if (irq->cpu == -1) dvcpu = vcpu; else - dvcpu = vcpu->kvm->vcpus[irq->cpu]; + dvcpu = kvm_get_vcpu(vcpu->kvm, irq->cpu); if (intr == 2 || intr == 3 || intr == 4 || intr == 6) { kvm_mips_callbacks->queue_io_int(dvcpu, irq); From 113d10bca23cdd522da26969b96db8711b182642 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 16 Nov 2021 16:03:59 +0000 Subject: [PATCH 022/268] KVM: s390: Use kvm_get_vcpu() instead of open-coded access As we are about to change the way vcpus are allocated, mandate the use of kvm_get_vcpu() instead of open-coding the access. Reviewed-by: Claudio Imbrenda Signed-off-by: Marc Zyngier Message-Id: <20211116160403.4074052-4-maz@kernel.org> Signed-off-by: Paolo Bonzini --- arch/s390/kvm/kvm-s390.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 6eeb59af5d74..7a0f5abaa484 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -4574,7 +4574,7 @@ int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) } for (i = 0; i < online_vcpus; i++) { - if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) + if (!is_vcpu_stopped(kvm_get_vcpu(vcpu->kvm, i))) started_vcpus++; } @@ -4636,9 +4636,11 @@ int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) __disable_ibs_on_vcpu(vcpu); for (i = 0; i < online_vcpus; i++) { - if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) { + struct kvm_vcpu *tmp = kvm_get_vcpu(vcpu->kvm, i); + + if (!is_vcpu_stopped(tmp)) { started_vcpus++; - started_vcpu = vcpu->kvm->vcpus[i]; + started_vcpu = tmp; } } From c5b077549136584618a66258f09d8d4b41e7409c Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 16 Nov 2021 16:04:01 +0000 Subject: [PATCH 023/268] KVM: Convert the kvm->vcpus array to a xarray At least on arm64 and x86, the vcpus array is pretty huge (up to 1024 entries on x86) and is mostly empty in the majority of the cases (running 1k vcpu VMs is not that common). This mean that we end-up with a 4kB block of unused memory in the middle of the kvm structure. Instead of wasting away this memory, let's use an xarray instead, which gives us almost the same flexibility as a normal array, but with a reduced memory usage with smaller VMs. Signed-off-by: Marc Zyngier Message-Id: <20211116160403.4074052-6-maz@kernel.org> Signed-off-by: Paolo Bonzini --- include/linux/kvm_host.h | 5 +++-- virt/kvm/kvm_main.c | 15 +++++++++------ 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index e2f9f8f67c58..2201dc07126a 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -552,7 +553,7 @@ struct kvm { struct mutex slots_arch_lock; struct mm_struct *mm; /* userspace tied to this vm */ struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM]; - struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; + struct xarray vcpu_array; /* Used to wait for completion of MMU notifiers. */ spinlock_t mn_invalidate_lock; @@ -701,7 +702,7 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */ smp_rmb(); - return kvm->vcpus[i]; + return xa_load(&kvm->vcpu_array, i); } #define kvm_for_each_vcpu(idx, vcpup, kvm) \ diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 0a504c7988dc..594f90307b20 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -458,7 +458,7 @@ void kvm_destroy_vcpus(struct kvm *kvm) kvm_for_each_vcpu(i, vcpu, kvm) { kvm_vcpu_destroy(vcpu); - kvm->vcpus[i] = NULL; + xa_erase(&kvm->vcpu_array, i); } atomic_set(&kvm->online_vcpus, 0); @@ -1063,6 +1063,7 @@ static struct kvm *kvm_create_vm(unsigned long type) mutex_init(&kvm->slots_arch_lock); spin_lock_init(&kvm->mn_invalidate_lock); rcuwait_init(&kvm->mn_memslots_update_rcuwait); + xa_init(&kvm->vcpu_array); INIT_LIST_HEAD(&kvm->devices); @@ -3598,7 +3599,10 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) } vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus); - BUG_ON(kvm->vcpus[vcpu->vcpu_idx]); + r = xa_insert(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, GFP_KERNEL_ACCOUNT); + BUG_ON(r == -EBUSY); + if (r) + goto unlock_vcpu_destroy; /* Fill the stats id string for the vcpu */ snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d", @@ -3608,15 +3612,14 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) kvm_get_kvm(kvm); r = create_vcpu_fd(vcpu); if (r < 0) { + xa_erase(&kvm->vcpu_array, vcpu->vcpu_idx); kvm_put_kvm_no_destroy(kvm); goto unlock_vcpu_destroy; } - kvm->vcpus[vcpu->vcpu_idx] = vcpu; - /* - * Pairs with smp_rmb() in kvm_get_vcpu. Write kvm->vcpus - * before kvm->online_vcpu's incremented value. + * Pairs with smp_rmb() in kvm_get_vcpu. Store the vcpu + * pointer before kvm->online_vcpu's incremented value. */ smp_wmb(); atomic_inc(&kvm->online_vcpus); From 46808a4cb89708c2e5b264eb9d1035762581921b Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 16 Nov 2021 16:04:02 +0000 Subject: [PATCH 024/268] KVM: Use 'unsigned long' as kvm_for_each_vcpu()'s index Everywhere we use kvm_for_each_vpcu(), we use an int as the vcpu index. Unfortunately, we're about to move rework the iterator, which requires this to be upgrade to an unsigned long. Let's bite the bullet and repaint all of it in one go. Signed-off-by: Marc Zyngier Message-Id: <20211116160403.4074052-7-maz@kernel.org> Signed-off-by: Paolo Bonzini --- arch/arm64/kvm/arch_timer.c | 8 ++++---- arch/arm64/kvm/arm.c | 6 +++--- arch/arm64/kvm/pmu-emul.c | 2 +- arch/arm64/kvm/psci.c | 6 +++--- arch/arm64/kvm/reset.c | 2 +- arch/arm64/kvm/vgic/vgic-init.c | 10 ++++++---- arch/arm64/kvm/vgic/vgic-kvm-device.c | 2 +- arch/arm64/kvm/vgic/vgic-mmio-v2.c | 3 +-- arch/arm64/kvm/vgic/vgic-mmio-v3.c | 7 ++++--- arch/arm64/kvm/vgic/vgic-v3.c | 4 ++-- arch/arm64/kvm/vgic/vgic-v4.c | 5 +++-- arch/arm64/kvm/vgic/vgic.c | 2 +- arch/powerpc/kvm/book3s_32_mmu.c | 2 +- arch/powerpc/kvm/book3s_64_mmu.c | 2 +- arch/powerpc/kvm/book3s_hv.c | 8 ++++---- arch/powerpc/kvm/book3s_pr.c | 2 +- arch/powerpc/kvm/book3s_xics.c | 6 +++--- arch/powerpc/kvm/book3s_xics.h | 2 +- arch/powerpc/kvm/book3s_xive.c | 15 +++++++++------ arch/powerpc/kvm/book3s_xive.h | 4 ++-- arch/powerpc/kvm/book3s_xive_native.c | 8 ++++---- arch/powerpc/kvm/e500_emulate.c | 2 +- arch/riscv/kvm/vcpu_sbi.c | 2 +- arch/riscv/kvm/vmid.c | 2 +- arch/s390/kvm/interrupt.c | 2 +- arch/s390/kvm/kvm-s390.c | 21 +++++++++++---------- arch/s390/kvm/kvm-s390.h | 4 ++-- arch/x86/kvm/hyperv.c | 7 ++++--- arch/x86/kvm/i8254.c | 2 +- arch/x86/kvm/i8259.c | 5 +++-- arch/x86/kvm/ioapic.c | 4 ++-- arch/x86/kvm/irq_comm.c | 7 ++++--- arch/x86/kvm/kvm_onhyperv.c | 3 ++- arch/x86/kvm/lapic.c | 6 +++--- arch/x86/kvm/svm/avic.c | 2 +- arch/x86/kvm/svm/sev.c | 9 +++++---- arch/x86/kvm/x86.c | 23 ++++++++++++----------- include/linux/kvm_host.h | 2 +- virt/kvm/kvm_main.c | 13 +++++++------ 39 files changed, 118 insertions(+), 104 deletions(-) diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index 3df67c127489..d6f4114f1d11 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -750,7 +750,7 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) /* Make the updates of cntvoff for all vtimer contexts atomic */ static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff) { - int i; + unsigned long i; struct kvm *kvm = vcpu->kvm; struct kvm_vcpu *tmp; @@ -1189,8 +1189,8 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu) { - int vtimer_irq, ptimer_irq; - int i, ret; + int vtimer_irq, ptimer_irq, ret; + unsigned long i; vtimer_irq = vcpu_vtimer(vcpu)->irq.irq; ret = kvm_vgic_set_owner(vcpu, vtimer_irq, vcpu_vtimer(vcpu)); @@ -1297,7 +1297,7 @@ void kvm_timer_init_vhe(void) static void set_timer_irqs(struct kvm *kvm, int vtimer_irq, int ptimer_irq) { struct kvm_vcpu *vcpu; - int i; + unsigned long i; kvm_for_each_vcpu(i, vcpu, kvm) { vcpu_vtimer(vcpu)->irq.irq = vtimer_irq; diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 362b10cb992c..b6e65c6eb1d3 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -631,7 +631,7 @@ bool kvm_arch_intc_initialized(struct kvm *kvm) void kvm_arm_halt_guest(struct kvm *kvm) { - int i; + unsigned long i; struct kvm_vcpu *vcpu; kvm_for_each_vcpu(i, vcpu, kvm) @@ -641,7 +641,7 @@ void kvm_arm_halt_guest(struct kvm *kvm) void kvm_arm_resume_guest(struct kvm *kvm) { - int i; + unsigned long i; struct kvm_vcpu *vcpu; kvm_for_each_vcpu(i, vcpu, kvm) { @@ -2027,7 +2027,7 @@ static int finalize_hyp_mode(void) struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr) { struct kvm_vcpu *vcpu; - int i; + unsigned long i; mpidr &= MPIDR_HWID_BITMASK; kvm_for_each_vcpu(i, vcpu, kvm) { diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index a5e4bbf5e68f..0404357705a8 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -900,7 +900,7 @@ static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu) */ static bool pmu_irq_is_valid(struct kvm *kvm, int irq) { - int i; + unsigned long i; struct kvm_vcpu *vcpu; kvm_for_each_vcpu(i, vcpu, kvm) { diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c index 74c47d420253..ed675fce8fb7 100644 --- a/arch/arm64/kvm/psci.c +++ b/arch/arm64/kvm/psci.c @@ -121,8 +121,8 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu) { - int i, matching_cpus = 0; - unsigned long mpidr; + int matching_cpus = 0; + unsigned long i, mpidr; unsigned long target_affinity; unsigned long target_affinity_mask; unsigned long lowest_affinity_level; @@ -164,7 +164,7 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu) static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type) { - int i; + unsigned long i; struct kvm_vcpu *tmp; /* diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 426bd7fbc3fd..97de30a79770 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -170,7 +170,7 @@ static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu) { struct kvm_vcpu *tmp; bool is32bit; - int i; + unsigned long i; is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT); if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit) diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c index 0a06d0648970..a7382bda9676 100644 --- a/arch/arm64/kvm/vgic/vgic-init.c +++ b/arch/arm64/kvm/vgic/vgic-init.c @@ -70,8 +70,9 @@ void kvm_vgic_early_init(struct kvm *kvm) */ int kvm_vgic_create(struct kvm *kvm, u32 type) { - int i, ret; struct kvm_vcpu *vcpu; + unsigned long i; + int ret; if (irqchip_in_kernel(kvm)) return -EEXIST; @@ -255,7 +256,8 @@ int vgic_init(struct kvm *kvm) { struct vgic_dist *dist = &kvm->arch.vgic; struct kvm_vcpu *vcpu; - int ret = 0, i, idx; + int ret = 0, i; + unsigned long idx; if (vgic_initialized(kvm)) return 0; @@ -308,7 +310,7 @@ int vgic_init(struct kvm *kvm) goto out; } - kvm_for_each_vcpu(i, vcpu, kvm) + kvm_for_each_vcpu(idx, vcpu, kvm) kvm_vgic_vcpu_enable(vcpu); ret = kvm_vgic_setup_default_irq_routing(kvm); @@ -370,7 +372,7 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) static void __kvm_vgic_destroy(struct kvm *kvm) { struct kvm_vcpu *vcpu; - int i; + unsigned long i; vgic_debug_destroy(kvm); diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c index 0d000d2fe8d2..c6d52a1fd9c8 100644 --- a/arch/arm64/kvm/vgic/vgic-kvm-device.c +++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c @@ -325,7 +325,7 @@ void unlock_all_vcpus(struct kvm *kvm) bool lock_all_vcpus(struct kvm *kvm) { struct kvm_vcpu *tmp_vcpu; - int c; + unsigned long c; /* * Any time a vcpu is run, vcpu_load is called which tries to grab the diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v2.c b/arch/arm64/kvm/vgic/vgic-mmio-v2.c index 5f9014ae595b..12e4c223e6b8 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v2.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v2.c @@ -113,9 +113,8 @@ static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu, int intid = val & 0xf; int targets = (val >> 16) & 0xff; int mode = (val >> 24) & 0x03; - int c; struct kvm_vcpu *vcpu; - unsigned long flags; + unsigned long flags, c; switch (mode) { case 0x0: /* as specified by targets */ diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c index bf7ec4a78497..82906cb3f713 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c @@ -754,7 +754,8 @@ static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu) static int vgic_register_all_redist_iodevs(struct kvm *kvm) { struct kvm_vcpu *vcpu; - int c, ret = 0; + unsigned long c; + int ret = 0; kvm_for_each_vcpu(c, vcpu, kvm) { ret = vgic_register_redist_iodev(vcpu); @@ -995,10 +996,10 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1) struct kvm_vcpu *c_vcpu; u16 target_cpus; u64 mpidr; - int sgi, c; + int sgi; int vcpu_id = vcpu->vcpu_id; bool broadcast; - unsigned long flags; + unsigned long c, flags; sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT; broadcast = reg & BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT); diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c index 04f62c4b07fb..5fedaee15e72 100644 --- a/arch/arm64/kvm/vgic/vgic-v3.c +++ b/arch/arm64/kvm/vgic/vgic-v3.c @@ -542,13 +542,13 @@ int vgic_v3_map_resources(struct kvm *kvm) struct vgic_dist *dist = &kvm->arch.vgic; struct kvm_vcpu *vcpu; int ret = 0; - int c; + unsigned long c; kvm_for_each_vcpu(c, vcpu, kvm) { struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; if (IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) { - kvm_debug("vcpu %d redistributor base not set\n", c); + kvm_debug("vcpu %ld redistributor base not set\n", c); return -ENXIO; } } diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c index 772dd15a22c7..ad06ba6c9b00 100644 --- a/arch/arm64/kvm/vgic/vgic-v4.c +++ b/arch/arm64/kvm/vgic/vgic-v4.c @@ -189,7 +189,7 @@ void vgic_v4_configure_vsgis(struct kvm *kvm) { struct vgic_dist *dist = &kvm->arch.vgic; struct kvm_vcpu *vcpu; - int i; + unsigned long i; kvm_arm_halt_guest(kvm); @@ -235,7 +235,8 @@ int vgic_v4_init(struct kvm *kvm) { struct vgic_dist *dist = &kvm->arch.vgic; struct kvm_vcpu *vcpu; - int i, nr_vcpus, ret; + int nr_vcpus, ret; + unsigned long i; if (!kvm_vgic_global_state.has_gicv4) return 0; /* Nothing to see here... move along. */ diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c index 5dad4996cfb2..9b98876a8a93 100644 --- a/arch/arm64/kvm/vgic/vgic.c +++ b/arch/arm64/kvm/vgic/vgic.c @@ -990,7 +990,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) void vgic_kick_vcpus(struct kvm *kvm) { struct kvm_vcpu *vcpu; - int c; + unsigned long c; /* * We've injected an interrupt, time to find out who deserves diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c index 3fbd570f9c1e..0215f32932a9 100644 --- a/arch/powerpc/kvm/book3s_32_mmu.c +++ b/arch/powerpc/kvm/book3s_32_mmu.c @@ -337,7 +337,7 @@ static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum, static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large) { - int i; + unsigned long i; struct kvm_vcpu *v; /* flush this VA on all cpus */ diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index feee40cb2ba1..61290282fd9e 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c @@ -530,7 +530,7 @@ static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va, bool large) { u64 mask = 0xFFFFFFFFFULL; - long i; + unsigned long i; struct kvm_vcpu *v; dprintk("KVM MMU: tlbie(0x%lx)\n", va); diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 7b74fc0a986b..32873c6985f9 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1993,7 +1993,7 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, */ if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { struct kvm_vcpu *vcpu; - int i; + unsigned long i; kvm_for_each_vcpu(i, vcpu, kvm) { if (vcpu->arch.vcore != vc) @@ -4786,8 +4786,8 @@ static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm, { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; - int i, r; - unsigned long n; + int r; + unsigned long n, i; unsigned long *buf, *p; struct kvm_vcpu *vcpu; @@ -5861,7 +5861,7 @@ static int kvmhv_svm_off(struct kvm *kvm) int mmu_was_ready; int srcu_idx; int ret = 0; - int i; + unsigned long i; if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) return ret; diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 6bc9425acb32..bb0612c49b92 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -428,7 +428,7 @@ static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) /************* MMU Notifiers *************/ static bool do_kvm_unmap_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { - long i; + unsigned long i; struct kvm_vcpu *vcpu; kvm_for_each_vcpu(i, vcpu, kvm) diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c index ebd5d920de8c..9cc466006e8b 100644 --- a/arch/powerpc/kvm/book3s_xics.c +++ b/arch/powerpc/kvm/book3s_xics.c @@ -942,8 +942,8 @@ static int xics_debug_show(struct seq_file *m, void *private) struct kvmppc_xics *xics = m->private; struct kvm *kvm = xics->kvm; struct kvm_vcpu *vcpu; - int icsid, i; - unsigned long flags; + int icsid; + unsigned long flags, i; unsigned long t_rm_kick_vcpu, t_rm_check_resend; unsigned long t_rm_notify_eoi; unsigned long t_reject, t_check_resend; @@ -1340,7 +1340,7 @@ static int xics_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) static void kvmppc_xics_release(struct kvm_device *dev) { struct kvmppc_xics *xics = dev->private; - int i; + unsigned long i; struct kvm *kvm = xics->kvm; struct kvm_vcpu *vcpu; diff --git a/arch/powerpc/kvm/book3s_xics.h b/arch/powerpc/kvm/book3s_xics.h index 6231f76bdd66..8e4c79e2fcd8 100644 --- a/arch/powerpc/kvm/book3s_xics.h +++ b/arch/powerpc/kvm/book3s_xics.h @@ -116,7 +116,7 @@ static inline struct kvmppc_icp *kvmppc_xics_find_server(struct kvm *kvm, u32 nr) { struct kvm_vcpu *vcpu = NULL; - int i; + unsigned long i; kvm_for_each_vcpu(i, vcpu, kvm) { if (vcpu->arch.icp && nr == vcpu->arch.icp->server_num) diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index 225008882958..e216c068075d 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -368,7 +368,8 @@ static int xive_check_provisioning(struct kvm *kvm, u8 prio) { struct kvmppc_xive *xive = kvm->arch.xive; struct kvm_vcpu *vcpu; - int i, rc; + unsigned long i; + int rc; lockdep_assert_held(&xive->lock); @@ -439,7 +440,8 @@ static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio) int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio) { struct kvm_vcpu *vcpu; - int i, rc; + unsigned long i; + int rc; /* Locate target server */ vcpu = kvmppc_xive_find_server(kvm, *server); @@ -1519,7 +1521,8 @@ static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q) static void xive_pre_save_scan(struct kvmppc_xive *xive) { struct kvm_vcpu *vcpu = NULL; - int i, j; + unsigned long i; + int j; /* * See comment in xive_get_source() about how this @@ -1700,7 +1703,7 @@ static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq) { struct kvm *kvm = xive->kvm; struct kvm_vcpu *vcpu = NULL; - int i; + unsigned long i; kvm_for_each_vcpu(i, vcpu, kvm) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; @@ -2037,7 +2040,7 @@ static void kvmppc_xive_release(struct kvm_device *dev) struct kvmppc_xive *xive = dev->private; struct kvm *kvm = xive->kvm; struct kvm_vcpu *vcpu; - int i; + unsigned long i; pr_devel("Releasing xive device\n"); @@ -2291,7 +2294,7 @@ static int xive_debug_show(struct seq_file *m, void *private) u64 t_vm_h_cppr = 0; u64 t_vm_h_eoi = 0; u64 t_vm_h_ipi = 0; - unsigned int i; + unsigned long i; if (!kvm) return 0; diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h index e6a9651c6f1e..09d0657596c3 100644 --- a/arch/powerpc/kvm/book3s_xive.h +++ b/arch/powerpc/kvm/book3s_xive.h @@ -199,7 +199,7 @@ struct kvmppc_xive_vcpu { static inline struct kvm_vcpu *kvmppc_xive_find_server(struct kvm *kvm, u32 nr) { struct kvm_vcpu *vcpu = NULL; - int i; + unsigned long i; kvm_for_each_vcpu(i, vcpu, kvm) { if (vcpu->arch.xive_vcpu && nr == vcpu->arch.xive_vcpu->server_num) @@ -240,7 +240,7 @@ static inline u32 kvmppc_xive_vp(struct kvmppc_xive *xive, u32 server) static inline bool kvmppc_xive_vp_in_use(struct kvm *kvm, u32 vp_id) { struct kvm_vcpu *vcpu = NULL; - int i; + unsigned long i; kvm_for_each_vcpu(i, vcpu, kvm) { if (vcpu->arch.xive_vcpu && vp_id == vcpu->arch.xive_vcpu->vp_id) diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c index 99db9ac49901..561a5bfe0468 100644 --- a/arch/powerpc/kvm/book3s_xive_native.c +++ b/arch/powerpc/kvm/book3s_xive_native.c @@ -807,7 +807,7 @@ static int kvmppc_xive_reset(struct kvmppc_xive *xive) { struct kvm *kvm = xive->kvm; struct kvm_vcpu *vcpu; - unsigned int i; + unsigned long i; pr_devel("%s\n", __func__); @@ -916,7 +916,7 @@ static int kvmppc_xive_native_eq_sync(struct kvmppc_xive *xive) { struct kvm *kvm = xive->kvm; struct kvm_vcpu *vcpu; - unsigned int i; + unsigned long i; pr_devel("%s\n", __func__); @@ -1017,7 +1017,7 @@ static void kvmppc_xive_native_release(struct kvm_device *dev) struct kvmppc_xive *xive = dev->private; struct kvm *kvm = xive->kvm; struct kvm_vcpu *vcpu; - int i; + unsigned long i; pr_devel("Releasing xive native device\n"); @@ -1214,7 +1214,7 @@ static int xive_native_debug_show(struct seq_file *m, void *private) struct kvmppc_xive *xive = m->private; struct kvm *kvm = xive->kvm; struct kvm_vcpu *vcpu; - unsigned int i; + unsigned long i; if (!kvm) return 0; diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c index 64eb833e9f02..051102d50c31 100644 --- a/arch/powerpc/kvm/e500_emulate.c +++ b/arch/powerpc/kvm/e500_emulate.c @@ -65,7 +65,7 @@ static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb) ulong param = vcpu->arch.regs.gpr[rb]; int prio = dbell2prio(rb); int pir = param & PPC_DBELL_PIR_MASK; - int i; + unsigned long i; struct kvm_vcpu *cvcpu; if (prio < 0) diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c index 3b0e703d22cf..d0d2bcab2f7b 100644 --- a/arch/riscv/kvm/vcpu_sbi.c +++ b/arch/riscv/kvm/vcpu_sbi.c @@ -60,7 +60,7 @@ int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run) static void kvm_sbi_system_shutdown(struct kvm_vcpu *vcpu, struct kvm_run *run, u32 type) { - int i; + unsigned long i; struct kvm_vcpu *tmp; kvm_for_each_vcpu(i, tmp, vcpu->kvm) diff --git a/arch/riscv/kvm/vmid.c b/arch/riscv/kvm/vmid.c index 2c6253b293bc..807228f8f409 100644 --- a/arch/riscv/kvm/vmid.c +++ b/arch/riscv/kvm/vmid.c @@ -65,7 +65,7 @@ bool kvm_riscv_stage2_vmid_ver_changed(struct kvm_vmid *vmid) void kvm_riscv_stage2_vmid_update(struct kvm_vcpu *vcpu) { - int i; + unsigned long i; struct kvm_vcpu *v; struct cpumask hmask; struct kvm_vmid *vmid = &vcpu->kvm->arch.vmid; diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index c3bd993fdd0c..1aa094810f6d 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -2659,7 +2659,7 @@ static int flic_ais_mode_set_all(struct kvm *kvm, struct kvm_device_attr *attr) static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { int r = 0; - unsigned int i; + unsigned long i; struct kvm_vcpu *vcpu; switch (attr->group) { diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 7a0f5abaa484..fd5f4ec1b4b9 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -295,7 +295,7 @@ static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val, { struct kvm *kvm; struct kvm_vcpu *vcpu; - int i; + unsigned long i; unsigned long long *delta = v; list_for_each_entry(kvm, &vm_list, vm_list) { @@ -682,7 +682,7 @@ out: static void icpt_operexc_on_all_vcpus(struct kvm *kvm) { - unsigned int i; + unsigned long i; struct kvm_vcpu *vcpu; kvm_for_each_vcpu(i, vcpu, kvm) { @@ -936,7 +936,7 @@ static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm) { struct kvm_vcpu *vcpu; - int i; + unsigned long i; kvm_s390_vcpu_block_all(kvm); @@ -1021,7 +1021,7 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req) { - int cx; + unsigned long cx; struct kvm_vcpu *vcpu; kvm_for_each_vcpu(cx, vcpu, kvm) @@ -2206,7 +2206,7 @@ static int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rcp, u16 *rrcp) struct kvm_vcpu *vcpu; u16 rc, rrc; int ret = 0; - int i; + unsigned long i; /* * We ignore failures and try to destroy as many CPUs as possible. @@ -2230,7 +2230,8 @@ static int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rcp, u16 *rrcp) static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc) { - int i, r = 0; + unsigned long i; + int r = 0; u16 dummy; struct kvm_vcpu *vcpu; @@ -2929,7 +2930,7 @@ static int sca_switch_to_extended(struct kvm *kvm) struct bsca_block *old_sca = kvm->arch.sca; struct esca_block *new_sca; struct kvm_vcpu *vcpu; - unsigned int vcpu_idx; + unsigned long vcpu_idx; u32 scaol, scaoh; if (kvm->arch.use_esca) @@ -3411,7 +3412,7 @@ static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start, struct kvm *kvm = gmap->private; struct kvm_vcpu *vcpu; unsigned long prefix; - int i; + unsigned long i; if (gmap_is_shadow(gmap)) return; @@ -3904,7 +3905,7 @@ void kvm_s390_set_tod_clock(struct kvm *kvm, { struct kvm_vcpu *vcpu; union tod_clock clk; - int i; + unsigned long i; mutex_lock(&kvm->lock); preempt_disable(); @@ -4536,7 +4537,7 @@ static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) static void __disable_ibs_on_all_vcpus(struct kvm *kvm) { - unsigned int i; + unsigned long i; struct kvm_vcpu *vcpu; kvm_for_each_vcpu(i, vcpu, kvm) { diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index c07a050d757d..b887fe7a7064 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -357,7 +357,7 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu); static inline void kvm_s390_vcpu_block_all(struct kvm *kvm) { - int i; + unsigned long i; struct kvm_vcpu *vcpu; WARN_ON(!mutex_is_locked(&kvm->lock)); @@ -367,7 +367,7 @@ static inline void kvm_s390_vcpu_block_all(struct kvm *kvm) static inline void kvm_s390_vcpu_unblock_all(struct kvm *kvm) { - int i; + unsigned long i; struct kvm_vcpu *vcpu; kvm_for_each_vcpu(i, vcpu, kvm) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 5e19e6e4c2ce..7179fa645eda 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -164,7 +164,7 @@ static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint, static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx) { struct kvm_vcpu *vcpu = NULL; - int i; + unsigned long i; if (vpidx >= KVM_MAX_VCPUS) return NULL; @@ -1716,7 +1716,8 @@ static __always_inline unsigned long *sparse_set_to_vcpu_mask( { struct kvm_hv *hv = to_kvm_hv(kvm); struct kvm_vcpu *vcpu; - int i, bank, sbank = 0; + int bank, sbank = 0; + unsigned long i; memset(vp_bitmap, 0, KVM_HV_MAX_SPARSE_VCPU_SET_BITS * sizeof(*vp_bitmap)); @@ -1863,7 +1864,7 @@ static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector, .vector = vector }; struct kvm_vcpu *vcpu; - int i; + unsigned long i; kvm_for_each_vcpu(i, vcpu, kvm) { if (vcpu_bitmap && !test_bit(i, vcpu_bitmap)) diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 5a69cce4d72d..0b65a764ed3a 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -242,7 +242,7 @@ static void pit_do_work(struct kthread_work *work) struct kvm_pit *pit = container_of(work, struct kvm_pit, expired); struct kvm *kvm = pit->kvm; struct kvm_vcpu *vcpu; - int i; + unsigned long i; struct kvm_kpit_state *ps = &pit->pit_state; if (atomic_read(&ps->reinject) && !atomic_xchg(&ps->irq_ack, 0)) diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index 0b80263d46d8..814064d06016 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c @@ -50,7 +50,7 @@ static void pic_unlock(struct kvm_pic *s) { bool wakeup = s->wakeup_needed; struct kvm_vcpu *vcpu; - int i; + unsigned long i; s->wakeup_needed = false; @@ -270,7 +270,8 @@ int kvm_pic_read_irq(struct kvm *kvm) static void kvm_pic_reset(struct kvm_kpic_state *s) { - int irq, i; + int irq; + unsigned long i; struct kvm_vcpu *vcpu; u8 edge_irr = s->irr & ~s->elcr; bool found = false; diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c index 816a82515dcd..decfa36b7891 100644 --- a/arch/x86/kvm/ioapic.c +++ b/arch/x86/kvm/ioapic.c @@ -149,7 +149,7 @@ void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu) static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic) { struct kvm_vcpu *vcpu; - int i; + unsigned long i; if (RTC_GSI >= IOAPIC_NUM_PINS) return; @@ -184,7 +184,7 @@ static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic) static void ioapic_lazy_update_eoi(struct kvm_ioapic *ioapic, int irq) { - int i; + unsigned long i; struct kvm_vcpu *vcpu; union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq]; diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c index d5b72a08e566..39ad02d6dc63 100644 --- a/arch/x86/kvm/irq_comm.c +++ b/arch/x86/kvm/irq_comm.c @@ -45,9 +45,9 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e, int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, struct kvm_lapic_irq *irq, struct dest_map *dest_map) { - int i, r = -1; + int r = -1; struct kvm_vcpu *vcpu, *lowest = NULL; - unsigned long dest_vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)]; + unsigned long i, dest_vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)]; unsigned int dest_vcpus = 0; if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r, dest_map)) @@ -320,7 +320,8 @@ int kvm_set_routing_entry(struct kvm *kvm, bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq, struct kvm_vcpu **dest_vcpu) { - int i, r = 0; + int r = 0; + unsigned long i; struct kvm_vcpu *vcpu; if (kvm_intr_is_single_vcpu_fast(kvm, irq, dest_vcpu)) diff --git a/arch/x86/kvm/kvm_onhyperv.c b/arch/x86/kvm/kvm_onhyperv.c index c7db2df50a7a..b469f45e3fe4 100644 --- a/arch/x86/kvm/kvm_onhyperv.c +++ b/arch/x86/kvm/kvm_onhyperv.c @@ -33,7 +33,8 @@ int hv_remote_flush_tlb_with_range(struct kvm *kvm, { struct kvm_arch *kvm_arch = &kvm->arch; struct kvm_vcpu *vcpu; - int ret = 0, i, nr_unique_valid_roots; + int ret = 0, nr_unique_valid_roots; + unsigned long i; hpa_t root; spin_lock(&kvm_arch->hv_root_tdp_lock); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index f206fc35deff..451e80306b51 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -185,7 +185,7 @@ void kvm_recalculate_apic_map(struct kvm *kvm) { struct kvm_apic_map *new, *old = NULL; struct kvm_vcpu *vcpu; - int i; + unsigned long i; u32 max_id = 255; /* enough space for any xAPIC ID */ /* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map. */ @@ -1172,8 +1172,8 @@ void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq, struct kvm_lapic *src = NULL; struct kvm_apic_map *map; struct kvm_vcpu *vcpu; - unsigned long bitmap; - int i, vcpu_idx; + unsigned long bitmap, i; + int vcpu_idx; bool ret; rcu_read_lock(); diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index 8f9af7b7dbbe..b7200595cbd4 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -293,7 +293,7 @@ static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source, u32 icrl, u32 icrh) { struct kvm_vcpu *vcpu; - int i; + unsigned long i; kvm_for_each_vcpu(i, vcpu, kvm) { bool m = kvm_apic_match_dest(vcpu, source, diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 7656a2c5662a..322553322202 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -636,7 +636,8 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu, static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) { struct kvm_vcpu *vcpu; - int i, ret; + unsigned long i; + int ret; if (!sev_es_guest(kvm)) return -ENOTTY; @@ -1593,7 +1594,7 @@ static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm) static int sev_lock_vcpus_for_migration(struct kvm *kvm) { struct kvm_vcpu *vcpu; - int i, j; + unsigned long i, j; kvm_for_each_vcpu(i, vcpu, kvm) { if (mutex_lock_killable(&vcpu->mutex)) @@ -1615,7 +1616,7 @@ out_unlock: static void sev_unlock_vcpus_for_migration(struct kvm *kvm) { struct kvm_vcpu *vcpu; - int i; + unsigned long i; kvm_for_each_vcpu(i, vcpu, kvm) { mutex_unlock(&vcpu->mutex); @@ -1642,7 +1643,7 @@ static void sev_migrate_from(struct kvm_sev_info *dst, static int sev_es_migrate_from(struct kvm *dst, struct kvm *src) { - int i; + unsigned long i; struct kvm_vcpu *dst_vcpu, *src_vcpu; struct vcpu_svm *dst_svm, *src_svm; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0e6d11a726cd..96bcf2035bdc 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2816,7 +2816,7 @@ static void kvm_end_pvclock_update(struct kvm *kvm) { struct kvm_arch *ka = &kvm->arch; struct kvm_vcpu *vcpu; - int i; + unsigned long i; write_seqcount_end(&ka->pvclock_sc); raw_spin_unlock_irq(&ka->tsc_write_lock); @@ -3065,7 +3065,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) static void kvmclock_update_fn(struct work_struct *work) { - int i; + unsigned long i; struct delayed_work *dwork = to_delayed_work(work); struct kvm_arch *ka = container_of(dwork, struct kvm_arch, kvmclock_update_work); @@ -5692,7 +5692,7 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) * VM-Exit. */ struct kvm_vcpu *vcpu; - int i; + unsigned long i; kvm_for_each_vcpu(i, vcpu, kvm) kvm_vcpu_kick(vcpu); @@ -5961,7 +5961,8 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp) static int kvm_arch_suspend_notifier(struct kvm *kvm) { struct kvm_vcpu *vcpu; - int i, ret = 0; + unsigned long i; + int ret = 0; mutex_lock(&kvm->lock); kvm_for_each_vcpu(i, vcpu, kvm) { @@ -8388,7 +8389,8 @@ static void __kvmclock_cpufreq_notifier(struct cpufreq_freqs *freq, int cpu) { struct kvm *kvm; struct kvm_vcpu *vcpu; - int i, send_ipi = 0; + int send_ipi = 0; + unsigned long i; /* * We allow guests to temporarily run on slowing clocks, @@ -8561,9 +8563,8 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = { static void pvclock_gtod_update_fn(struct work_struct *work) { struct kvm *kvm; - struct kvm_vcpu *vcpu; - int i; + unsigned long i; mutex_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) @@ -10672,7 +10673,7 @@ static void kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm *kvm) { bool inhibit = false; struct kvm_vcpu *vcpu; - int i; + unsigned long i; down_write(&kvm->arch.apicv_update_lock); @@ -11160,7 +11161,7 @@ int kvm_arch_hardware_enable(void) { struct kvm *kvm; struct kvm_vcpu *vcpu; - int i; + unsigned long i; int ret; u64 local_tsc; u64 max_tsc = 0; @@ -11413,7 +11414,7 @@ static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) static void kvm_free_vcpus(struct kvm *kvm) { - unsigned int i; + unsigned long i; struct kvm_vcpu *vcpu; /* @@ -11659,7 +11660,7 @@ out_free: void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) { struct kvm_vcpu *vcpu; - int i; + unsigned long i; /* * memslots->generation has been incremented. diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 2201dc07126a..7da6086262c6 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -714,7 +714,7 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) { struct kvm_vcpu *vcpu = NULL; - int i; + unsigned long i; if (id < 0) return NULL; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 594f90307b20..1c68384a7c4b 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -305,8 +305,9 @@ bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, { struct kvm_vcpu *vcpu; struct cpumask *cpus; + unsigned long i; bool called; - int i, me; + int me; me = get_cpu(); @@ -453,7 +454,7 @@ static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu) void kvm_destroy_vcpus(struct kvm *kvm) { - unsigned int i; + unsigned long i; struct kvm_vcpu *vcpu; kvm_for_each_vcpu(i, vcpu, kvm) { @@ -3389,10 +3390,10 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) struct kvm *kvm = me->kvm; struct kvm_vcpu *vcpu; int last_boosted_vcpu = me->kvm->last_boosted_vcpu; + unsigned long i; int yielded = 0; int try = 3; int pass; - int i; kvm_vcpu_set_in_spin_loop(me, true); /* @@ -4201,7 +4202,7 @@ static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size) static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm) { - int i; + unsigned long i; struct kvm_vcpu *vcpu; int cleared = 0; @@ -5120,7 +5121,7 @@ static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset) static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val) { - int i; + unsigned long i; struct kvm_vcpu *vcpu; *val = 0; @@ -5133,7 +5134,7 @@ static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val) static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset) { - int i; + unsigned long i; struct kvm_vcpu *vcpu; kvm_for_each_vcpu(i, vcpu, kvm) From 214bd3a6f46981b7867946e1b4f628a06bcf2091 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 16 Nov 2021 16:04:03 +0000 Subject: [PATCH 025/268] KVM: Convert kvm_for_each_vcpu() to using xa_for_each_range() Now that the vcpu array is backed by an xarray, use the optimised iterator that matches the underlying data structure. Suggested-by: Sean Christopherson Signed-off-by: Marc Zyngier Message-Id: <20211116160403.4074052-8-maz@kernel.org> Signed-off-by: Paolo Bonzini --- include/linux/kvm_host.h | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 7da6086262c6..66548287ed42 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -705,11 +705,9 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) return xa_load(&kvm->vcpu_array, i); } -#define kvm_for_each_vcpu(idx, vcpup, kvm) \ - for (idx = 0; \ - idx < atomic_read(&kvm->online_vcpus) && \ - (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ - idx++) +#define kvm_for_each_vcpu(idx, vcpup, kvm) \ + xa_for_each_range(&kvm->vcpu_array, idx, vcpup, 0, \ + (atomic_read(&kvm->online_vcpus) - 1)) static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) { From afa319a54a8c760ba59683cd3c4318635049a664 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 6 Dec 2021 20:54:07 +0100 Subject: [PATCH 026/268] KVM: Require total number of memslot pages to fit in an unsigned long Explicitly disallow creating more memslot pages than can fit in an unsigned long, KVM doesn't correctly handle a total number of memslot pages that doesn't fit in an unsigned long and remedying that would be a waste of time. For a 64-bit kernel, this is a nop as memslots are not allowed to overlap in the gfn address space. With a 32-bit kernel, userspace can at most address 3gb of virtual memory, whereas wrapping the total number of pages would require 4tb+ of guest physical memory. Even with x86's second address space for SMM, userspace would need to alias all of guest memory more than one _thousand_ times. And on older x86 hardware with MAXPHYADDR < 43, the guest couldn't actually access any of those aliases even if userspace lied about guest.MAXPHYADDR. On 390 and arm64, this is a nop as they don't support 32-bit hosts. On x86, practically speaking this is simply acknowledging reality as the existing kvm_mmu_calculate_default_mmu_pages() assumes the total number of pages fits in an "unsigned long". On PPC, this is likely a nop as every flavor of PPC KVM assumes gfns (and gpas!) fit in unsigned long. arch/powerpc/kvm/book3s_32_mmu_host.c goes a step further and fails the build if CONFIG_PTE_64BIT=y, which presumably means that it does't support 64-bit physical addresses. On MIPS, this is also likely a nop as the core MMU helpers assume gpas fit in unsigned long, e.g. see kvm_mips_##name##_pte. And finally, RISC-V is a "don't care" as it doesn't exist in any release, i.e. there is no established ABI to break. Signed-off-by: Sean Christopherson Reviewed-by: Maciej S. Szmigiero Signed-off-by: Maciej S. Szmigiero Message-Id: <1c2c91baf8e78acccd4dad38da591002e61c013c.1638817638.git.maciej.szmigiero@oracle.com> --- include/linux/kvm_host.h | 1 + virt/kvm/kvm_main.c | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 66548287ed42..e38705359af5 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -552,6 +552,7 @@ struct kvm { */ struct mutex slots_arch_lock; struct mm_struct *mm; /* userspace tied to this vm */ + unsigned long nr_memslot_pages; struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM]; struct xarray vcpu_array; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 1c68384a7c4b..538fd57ea339 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1638,6 +1638,15 @@ static int kvm_set_memslot(struct kvm *kvm, update_memslots(slots, new, change); slots = install_new_memslots(kvm, as_id, slots); + /* + * Update the total number of memslot pages before calling the arch + * hook so that architectures can consume the result directly. + */ + if (change == KVM_MR_DELETE) + kvm->nr_memslot_pages -= old.npages; + else if (change == KVM_MR_CREATE) + kvm->nr_memslot_pages += new->npages; + kvm_arch_commit_memory_region(kvm, mem, &old, new, change); /* Free the old memslot's metadata. Note, this is the full copy!!! */ @@ -1668,6 +1677,9 @@ static int kvm_delete_memslot(struct kvm *kvm, if (!old->npages) return -EINVAL; + if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages)) + return -EIO; + memset(&new, 0, sizeof(new)); new.id = old->id; /* @@ -1751,6 +1763,13 @@ int __kvm_set_memory_region(struct kvm *kvm, if (!old.npages) { change = KVM_MR_CREATE; new.dirty_bitmap = NULL; + + /* + * To simplify KVM internals, the total number of pages across + * all memslots must fit in an unsigned long. + */ + if ((kvm->nr_memslot_pages + new.npages) < kvm->nr_memslot_pages) + return -EINVAL; } else { /* Modify an existing slot. */ if ((new.userspace_addr != old.userspace_addr) || (new.npages != old.npages) || From 47ea7d900b1cc66ec7a35a8b173ed16b01f9781b Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 6 Dec 2021 20:54:08 +0100 Subject: [PATCH 027/268] KVM: Open code kvm_delete_memslot() into its only caller Fold kvm_delete_memslot() into __kvm_set_memory_region() to free up the "kvm_delete_memslot()" name for use in a future helper. The delete logic isn't so complex/long that it truly needs a helper, and it will be simplified a wee bit further in upcoming commits. No functional change intended. Signed-off-by: Sean Christopherson Reviewed-by: Maciej S. Szmigiero Signed-off-by: Maciej S. Szmigiero Message-Id: <2887631c31a82947faa488ab72f55f8c68b7c194.1638817638.git.maciej.szmigiero@oracle.com> --- virt/kvm/kvm_main.c | 42 +++++++++++++++++------------------------- 1 file changed, 17 insertions(+), 25 deletions(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 538fd57ea339..af2730858ebd 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1668,29 +1668,6 @@ out_slots: return r; } -static int kvm_delete_memslot(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, - struct kvm_memory_slot *old, int as_id) -{ - struct kvm_memory_slot new; - - if (!old->npages) - return -EINVAL; - - if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages)) - return -EIO; - - memset(&new, 0, sizeof(new)); - new.id = old->id; - /* - * This is only for debugging purpose; it should never be referenced - * for a removed memslot. - */ - new.as_id = as_id; - - return kvm_set_memslot(kvm, mem, &new, as_id, KVM_MR_DELETE); -} - /* * Allocate some memory and give it an address in the guest physical address * space. @@ -1747,8 +1724,23 @@ int __kvm_set_memory_region(struct kvm *kvm, old.id = id; } - if (!mem->memory_size) - return kvm_delete_memslot(kvm, mem, &old, as_id); + if (!mem->memory_size) { + if (!old.npages) + return -EINVAL; + + if (WARN_ON_ONCE(kvm->nr_memslot_pages < old.npages)) + return -EIO; + + memset(&new, 0, sizeof(new)); + new.id = id; + /* + * This is only for debugging purpose; it should never be + * referenced for a removed memslot. + */ + new.as_id = as_id; + + return kvm_set_memslot(kvm, mem, &new, as_id, KVM_MR_DELETE); + } new.as_id = as_id; new.id = id; From 4e4d30cb9b8740e178731406aa28b96f12c6edbd Mon Sep 17 00:00:00 2001 From: "Maciej S. Szmigiero" Date: Mon, 6 Dec 2021 20:54:09 +0100 Subject: [PATCH 028/268] KVM: Resync only arch fields when slots_arch_lock gets reacquired There is no need to copy the whole memslot data after releasing slots_arch_lock for a moment to install temporary memslots copy in kvm_set_memslot() since this lock only protects the arch field of each memslot. Just resync this particular field after reacquiring slots_arch_lock. Note, this also eliminates the need to manually clear the INVALID flag when restoring memslots; the "setting" of the INVALID flag was an unwanted side effect of copying the entire memslots. Since kvm_copy_memslots() has just one caller remaining now open-code it instead. Signed-off-by: Maciej S. Szmigiero [sean: tweak shortlog, note INVALID flag in changelog, revert comment] Reviewed-by: Sean Christopherson Message-Id: --- virt/kvm/kvm_main.c | 45 +++++++++++++++++++++++++-------------------- 1 file changed, 25 insertions(+), 20 deletions(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index af2730858ebd..615d69bcde2c 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1515,12 +1515,6 @@ static size_t kvm_memslots_size(int slots) (sizeof(struct kvm_memory_slot) * slots); } -static void kvm_copy_memslots(struct kvm_memslots *to, - struct kvm_memslots *from) -{ - memcpy(to, from, kvm_memslots_size(from->used_slots)); -} - /* * Note, at a minimum, the current number of used slots must be allocated, even * when deleting a memslot, as we need a complete duplicate of the memslots for @@ -1539,11 +1533,22 @@ static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old, slots = kvzalloc(new_size, GFP_KERNEL_ACCOUNT); if (likely(slots)) - kvm_copy_memslots(slots, old); + memcpy(slots, old, kvm_memslots_size(old->used_slots)); return slots; } +static void kvm_copy_memslots_arch(struct kvm_memslots *to, + struct kvm_memslots *from) +{ + int i; + + WARN_ON_ONCE(to->used_slots != from->used_slots); + + for (i = 0; i < from->used_slots; i++) + to->memslots[i].arch = from->memslots[i].arch; +} + static int kvm_set_memslot(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, struct kvm_memory_slot *new, int as_id, @@ -1584,9 +1589,10 @@ static int kvm_set_memslot(struct kvm *kvm, slot->flags |= KVM_MEMSLOT_INVALID; /* - * We can re-use the memory from the old memslots. - * It will be overwritten with a copy of the new memslots - * after reacquiring the slots_arch_lock below. + * We can re-use the old memslots, the only difference from the + * newly installed memslots is the invalid flag, which will get + * dropped by update_memslots anyway. We'll also revert to the + * old memslots if preparing the new memory region fails. */ slots = install_new_memslots(kvm, as_id, slots); @@ -1603,12 +1609,14 @@ static int kvm_set_memslot(struct kvm *kvm, mutex_lock(&kvm->slots_arch_lock); /* - * The arch-specific fields of the memslots could have changed - * between releasing the slots_arch_lock in - * install_new_memslots and here, so get a fresh copy of the - * slots. + * The arch-specific fields of the now-active memslots could + * have been modified between releasing slots_arch_lock in + * install_new_memslots and re-acquiring slots_arch_lock above. + * Copy them to the inactive memslots. Arch code is required + * to retrieve memslots *after* acquiring slots_arch_lock, thus + * the active memslots are guaranteed to be fresh. */ - kvm_copy_memslots(slots, __kvm_memslots(kvm, as_id)); + kvm_copy_memslots_arch(slots, __kvm_memslots(kvm, as_id)); } /* @@ -1657,13 +1665,10 @@ static int kvm_set_memslot(struct kvm *kvm, return 0; out_slots: - if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { - slot = id_to_memslot(slots, new->id); - slot->flags &= ~KVM_MEMSLOT_INVALID; + if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) slots = install_new_memslots(kvm, as_id, slots); - } else { + else mutex_unlock(&kvm->slots_arch_lock); - } kvfree(slots); return r; } From ce5f0215620c11a5829da7f30bebf3adeeef3345 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 6 Dec 2021 20:54:10 +0100 Subject: [PATCH 029/268] KVM: Use "new" memslot's address space ID instead of dedicated param Now that the address space ID is stored in every slot, including fake slots used for deletion, use the slot's as_id instead of passing in the redundant information as a param to kvm_set_memslot(). This will greatly simplify future memslot work by avoiding passing a large number of variables around purely to honor @as_id. Drop a comment in the DELETE path about new->as_id being provided purely for debug, as that's now a lie. No functional change intended. Signed-off-by: Sean Christopherson Reviewed-by: Maciej S. Szmigiero Signed-off-by: Maciej S. Szmigiero Message-Id: <03189577be214ab8530a4b3a3ee3ed1c2f9e5815.1638817639.git.maciej.szmigiero@oracle.com> --- virt/kvm/kvm_main.c | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 615d69bcde2c..a7a1c872fe6d 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1551,7 +1551,7 @@ static void kvm_copy_memslots_arch(struct kvm_memslots *to, static int kvm_set_memslot(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, - struct kvm_memory_slot *new, int as_id, + struct kvm_memory_slot *new, enum kvm_mr_change change) { struct kvm_memory_slot *slot, old; @@ -1574,7 +1574,7 @@ static int kvm_set_memslot(struct kvm *kvm, */ mutex_lock(&kvm->slots_arch_lock); - slots = kvm_dup_memslots(__kvm_memslots(kvm, as_id), change); + slots = kvm_dup_memslots(__kvm_memslots(kvm, new->as_id), change); if (!slots) { mutex_unlock(&kvm->slots_arch_lock); return -ENOMEM; @@ -1594,7 +1594,7 @@ static int kvm_set_memslot(struct kvm *kvm, * dropped by update_memslots anyway. We'll also revert to the * old memslots if preparing the new memory region fails. */ - slots = install_new_memslots(kvm, as_id, slots); + slots = install_new_memslots(kvm, new->as_id, slots); /* From this point no new shadow pages pointing to a deleted, * or moved, memslot will be created. @@ -1616,7 +1616,7 @@ static int kvm_set_memslot(struct kvm *kvm, * to retrieve memslots *after* acquiring slots_arch_lock, thus * the active memslots are guaranteed to be fresh. */ - kvm_copy_memslots_arch(slots, __kvm_memslots(kvm, as_id)); + kvm_copy_memslots_arch(slots, __kvm_memslots(kvm, new->as_id)); } /* @@ -1633,7 +1633,7 @@ static int kvm_set_memslot(struct kvm *kvm, WARN_ON_ONCE(change != KVM_MR_CREATE); memset(&old, 0, sizeof(old)); old.id = new->id; - old.as_id = as_id; + old.as_id = new->as_id; } /* Copy the arch-specific data, again after (re)acquiring slots_arch_lock. */ @@ -1644,7 +1644,7 @@ static int kvm_set_memslot(struct kvm *kvm, goto out_slots; update_memslots(slots, new, change); - slots = install_new_memslots(kvm, as_id, slots); + slots = install_new_memslots(kvm, new->as_id, slots); /* * Update the total number of memslot pages before calling the arch @@ -1666,7 +1666,7 @@ static int kvm_set_memslot(struct kvm *kvm, out_slots: if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) - slots = install_new_memslots(kvm, as_id, slots); + slots = install_new_memslots(kvm, new->as_id, slots); else mutex_unlock(&kvm->slots_arch_lock); kvfree(slots); @@ -1738,13 +1738,9 @@ int __kvm_set_memory_region(struct kvm *kvm, memset(&new, 0, sizeof(new)); new.id = id; - /* - * This is only for debugging purpose; it should never be - * referenced for a removed memslot. - */ new.as_id = as_id; - return kvm_set_memslot(kvm, mem, &new, as_id, KVM_MR_DELETE); + return kvm_set_memslot(kvm, mem, &new, KVM_MR_DELETE); } new.as_id = as_id; @@ -1807,7 +1803,7 @@ int __kvm_set_memory_region(struct kvm *kvm, bitmap_set(new.dirty_bitmap, 0, new.npages); } - r = kvm_set_memslot(kvm, mem, &new, as_id, change); + r = kvm_set_memslot(kvm, mem, &new, change); if (r) goto out_bitmap; From 537a17b3149300987456e8949ccb991e604047d6 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 6 Dec 2021 20:54:11 +0100 Subject: [PATCH 030/268] KVM: Let/force architectures to deal with arch specific memslot data Pass the "old" slot to kvm_arch_prepare_memory_region() and force arch code to handle propagating arch specific data from "new" to "old" when necessary. This is a baby step towards dynamically allocating "new" from the get go, and is a (very) minor performance boost on x86 due to not unnecessarily copying arch data. For PPC HV, copy the rmap in the !CREATE and !DELETE paths, i.e. for MOVE and FLAGS_ONLY. This is functionally a nop as the previous behavior would overwrite the pointer for CREATE, and eventually discard/ignore it for DELETE. For x86, copy the arch data only for FLAGS_ONLY changes. Unlike PPC HV, x86 needs to reallocate arch data in the MOVE case as the size of x86's allocations depend on the alignment of the memslot's gfn. Opportunistically tweak kvm_arch_prepare_memory_region()'s param order to match the "commit" prototype. Signed-off-by: Sean Christopherson Reviewed-by: Maciej S. Szmigiero [mss: add missing RISCV kvm_arch_prepare_memory_region() change] Signed-off-by: Maciej S. Szmigiero Message-Id: <67dea5f11bbcfd71e3da5986f11e87f5dd4013f9.1638817639.git.maciej.szmigiero@oracle.com> --- arch/arm64/kvm/mmu.c | 7 ++++--- arch/mips/kvm/mips.c | 3 ++- arch/powerpc/include/asm/kvm_ppc.h | 10 ++++++---- arch/powerpc/kvm/book3s.c | 12 ++++++------ arch/powerpc/kvm/book3s_hv.c | 17 ++++++++++------- arch/powerpc/kvm/book3s_pr.c | 9 +++++---- arch/powerpc/kvm/booke.c | 5 +++-- arch/powerpc/kvm/powerpc.c | 5 +++-- arch/riscv/kvm/mmu.c | 7 ++++--- arch/s390/kvm/kvm-s390.c | 3 ++- arch/x86/kvm/x86.c | 15 +++++++++++---- include/linux/kvm_host.h | 3 ++- virt/kvm/kvm_main.c | 5 +---- 13 files changed, 59 insertions(+), 42 deletions(-) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 326cdfec74a1..5d474360bf6c 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1486,8 +1486,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, } int kvm_arch_prepare_memory_region(struct kvm *kvm, - struct kvm_memory_slot *memslot, const struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, enum kvm_mr_change change) { hva_t hva = mem->userspace_addr; @@ -1502,7 +1503,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, * Prevent userspace from creating a memory region outside of the IPA * space addressable by the KVM guest IPA space. */ - if ((memslot->base_gfn + memslot->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT)) + if ((new->base_gfn + new->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT)) return -EFAULT; mmap_read_lock(current->mm); @@ -1536,7 +1537,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, if (vma->vm_flags & VM_PFNMAP) { /* IO region dirty page logging not allowed */ - if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) { + if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) { ret = -EINVAL; break; } diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 043204cd585f..b2ce10784eb0 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -214,8 +214,9 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm, } int kvm_arch_prepare_memory_region(struct kvm *kvm, - struct kvm_memory_slot *memslot, const struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, enum kvm_mr_change change) { return 0; diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 671fbd1a765e..b01760dd1374 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -200,12 +200,13 @@ extern void kvmppc_core_destroy_vm(struct kvm *kvm); extern void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot); extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, - struct kvm_memory_slot *memslot, const struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, enum kvm_mr_change change); extern void kvmppc_core_commit_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, - const struct kvm_memory_slot *old, + struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change); extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, @@ -274,12 +275,13 @@ struct kvmppc_ops { int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log); void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot); int (*prepare_memory_region)(struct kvm *kvm, - struct kvm_memory_slot *memslot, const struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, enum kvm_mr_change change); void (*commit_memory_region)(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, - const struct kvm_memory_slot *old, + struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change); bool (*unmap_gfn_range)(struct kvm *kvm, struct kvm_gfn_range *range); diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index b785f6772391..8250e8308674 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -847,17 +847,17 @@ void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) } int kvmppc_core_prepare_memory_region(struct kvm *kvm, - struct kvm_memory_slot *memslot, - const struct kvm_userspace_memory_region *mem, - enum kvm_mr_change change) + const struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, + enum kvm_mr_change change) { - return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem, - change); + return kvm->arch.kvm_ops->prepare_memory_region(kvm, mem, old, new, change); } void kvmppc_core_commit_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, - const struct kvm_memory_slot *old, + struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change) { diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 32873c6985f9..d7594d49d288 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -4854,17 +4854,20 @@ static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *slot) } static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm, - struct kvm_memory_slot *slot, - const struct kvm_userspace_memory_region *mem, - enum kvm_mr_change change) + const struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, + enum kvm_mr_change change) { unsigned long npages = mem->memory_size >> PAGE_SHIFT; if (change == KVM_MR_CREATE) { - slot->arch.rmap = vzalloc(array_size(npages, - sizeof(*slot->arch.rmap))); - if (!slot->arch.rmap) + new->arch.rmap = vzalloc(array_size(npages, + sizeof(*new->arch.rmap))); + if (!new->arch.rmap) return -ENOMEM; + } else if (change != KVM_MR_DELETE) { + new->arch.rmap = old->arch.rmap; } return 0; @@ -4872,7 +4875,7 @@ static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm, static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, - const struct kvm_memory_slot *old, + struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change) { diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index bb0612c49b92..ffb559cf25f4 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -1899,16 +1899,17 @@ static void kvmppc_core_flush_memslot_pr(struct kvm *kvm, } static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm, - struct kvm_memory_slot *memslot, - const struct kvm_userspace_memory_region *mem, - enum kvm_mr_change change) + const struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, + enum kvm_mr_change change) { return 0; } static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, - const struct kvm_memory_slot *old, + struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change) { diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 8c15c90dd3a9..93c2ac2bee09 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -1821,8 +1821,9 @@ void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) } int kvmppc_core_prepare_memory_region(struct kvm *kvm, - struct kvm_memory_slot *memslot, const struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, enum kvm_mr_change change) { return 0; @@ -1830,7 +1831,7 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm, void kvmppc_core_commit_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, - const struct kvm_memory_slot *old, + struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change) { diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 98f5d90ebf5a..e875874cf836 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -698,11 +698,12 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) } int kvm_arch_prepare_memory_region(struct kvm *kvm, - struct kvm_memory_slot *memslot, const struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, enum kvm_mr_change change) { - return kvmppc_core_prepare_memory_region(kvm, memslot, mem, change); + return kvmppc_core_prepare_memory_region(kvm, mem, old, new, change); } void kvm_arch_commit_memory_region(struct kvm *kvm, diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c index fc058ff5f4b6..50380f525345 100644 --- a/arch/riscv/kvm/mmu.c +++ b/arch/riscv/kvm/mmu.c @@ -477,8 +477,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, } int kvm_arch_prepare_memory_region(struct kvm *kvm, - struct kvm_memory_slot *memslot, const struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, enum kvm_mr_change change) { hva_t hva = mem->userspace_addr; @@ -494,7 +495,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, * Prevent userspace from creating a memory region outside of the GPA * space addressable by the KVM guest GPA space. */ - if ((memslot->base_gfn + memslot->npages) >= + if ((new->base_gfn + new->npages) >= (stage2_gpa_size >> PAGE_SHIFT)) return -EFAULT; @@ -541,7 +542,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, pa += vm_start - vma->vm_start; /* IO region dirty page logging not allowed */ - if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) { + if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) { ret = -EINVAL; goto out; } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index fd5f4ec1b4b9..3beefadda0db 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -5007,8 +5007,9 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) /* Section: memory related */ int kvm_arch_prepare_memory_region(struct kvm *kvm, - struct kvm_memory_slot *memslot, const struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, enum kvm_mr_change change) { /* A few sanity checks. We can have memory slots which have to be diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 96bcf2035bdc..287ff4e43a13 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -11674,13 +11674,20 @@ void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) } int kvm_arch_prepare_memory_region(struct kvm *kvm, - struct kvm_memory_slot *memslot, - const struct kvm_userspace_memory_region *mem, - enum kvm_mr_change change) + const struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, + enum kvm_mr_change change) { if (change == KVM_MR_CREATE || change == KVM_MR_MOVE) - return kvm_alloc_memslot_metadata(kvm, memslot, + return kvm_alloc_memslot_metadata(kvm, new, mem->memory_size >> PAGE_SHIFT); + + if (change == KVM_MR_FLAGS_ONLY) + memcpy(&new->arch, &old->arch, sizeof(old->arch)); + else if (WARN_ON_ONCE(change != KVM_MR_DELETE)) + return -EIO; + return 0; } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index e38705359af5..cb7311dc6f32 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -833,8 +833,9 @@ int __kvm_set_memory_region(struct kvm *kvm, void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot); void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen); int kvm_arch_prepare_memory_region(struct kvm *kvm, - struct kvm_memory_slot *memslot, const struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, enum kvm_mr_change change); void kvm_arch_commit_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a7a1c872fe6d..46060cc542ef 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1636,10 +1636,7 @@ static int kvm_set_memslot(struct kvm *kvm, old.as_id = new->as_id; } - /* Copy the arch-specific data, again after (re)acquiring slots_arch_lock. */ - memcpy(&new->arch, &old.arch, sizeof(old.arch)); - - r = kvm_arch_prepare_memory_region(kvm, new, mem, change); + r = kvm_arch_prepare_memory_region(kvm, mem, &old, new, change); if (r) goto out_slots; From 509c594ca2dc8828b7b5d7b33192384741567cdf Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 6 Dec 2021 20:54:12 +0100 Subject: [PATCH 031/268] KVM: arm64: Use "new" memslot instead of userspace memory region Get the slot ID, hva, etc... from the "new" memslot instead of the userspace memory region when preparing/committing a memory region. This will allow a future commit to drop @mem from the prepare/commit hooks once all architectures convert to using "new". Opportunistically wait to get the hva begin+end until after filtering out the DELETE case in anticipation of a future commit passing NULL for @new when deleting a memslot. Signed-off-by: Sean Christopherson Signed-off-by: Maciej S. Szmigiero Message-Id: --- arch/arm64/kvm/mmu.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 5d474360bf6c..dd95350ea15d 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1473,14 +1473,14 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, * allocated dirty_bitmap[], dirty pages will be tracked while the * memory slot is write protected. */ - if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) { + if (change != KVM_MR_DELETE && new->flags & KVM_MEM_LOG_DIRTY_PAGES) { /* * If we're with initial-all-set, we don't need to write * protect any pages because they're all reported as dirty. * Huge pages and normal pages will be write protect gradually. */ if (!kvm_dirty_log_manual_protect_and_init_set(kvm)) { - kvm_mmu_wp_memory_region(kvm, mem->slot); + kvm_mmu_wp_memory_region(kvm, new->id); } } } @@ -1491,8 +1491,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *new, enum kvm_mr_change change) { - hva_t hva = mem->userspace_addr; - hva_t reg_end = hva + mem->memory_size; + hva_t hva, reg_end; int ret = 0; if (change != KVM_MR_CREATE && change != KVM_MR_MOVE && @@ -1506,6 +1505,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, if ((new->base_gfn + new->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT)) return -EFAULT; + hva = new->userspace_addr; + reg_end = hva + (new->npages << PAGE_SHIFT); + mmap_read_lock(current->mm); /* * A memory region could potentially cover multiple VMAs, and any holes From 3b1816177bfe2ce245a0f1194970644a79951570 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 6 Dec 2021 20:54:13 +0100 Subject: [PATCH 032/268] KVM: MIPS: Drop pr_debug from memslot commit to avoid using "mem" Remove an old (circa 2012) kvm_debug from kvm_arch_commit_memory_region() to print basic information when committing a memslot change. The primary motivation for removing the kvm_debug is to avoid using @mem, the user memory region, so that said param can be removed. Alternatively, the debug message could be converted to use @new, but that would require synthesizing select state to play nice with the DELETED case, which will pass NULL for @new in the future. And there's no argument to be had for dumping generic information in an arch callback, i.e. if there's a good reason for the debug message, then it belongs in common KVM code where all architectures can benefit. Signed-off-by: Sean Christopherson Signed-off-by: Maciej S. Szmigiero Message-Id: <446929a668f6e1346751571b71db41e94e976cdf.1638817639.git.maciej.szmigiero@oracle.com> --- arch/mips/kvm/mips.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index b2ce10784eb0..bda717301db8 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -230,10 +230,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, { int needs_flush; - kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n", - __func__, kvm, mem->slot, mem->guest_phys_addr, - mem->memory_size, mem->userspace_addr); - /* * If dirty page logging is enabled, write protect all pages in the slot * ready for dirty logging. From eaaaed137eccb9e8f3a88f6297e214f53885196f Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 6 Dec 2021 20:54:14 +0100 Subject: [PATCH 033/268] KVM: PPC: Avoid referencing userspace memory region in memslot updates For PPC HV, get the number of pages directly from the new memslot instead of computing the same from the userspace memory region, and explicitly check for !DELETE instead of inferring the same when toggling mmio_update. The motivation for these changes is to avoid referencing the @mem param so that it can be dropped in a future commit. No functional change intended. Signed-off-by: Sean Christopherson Signed-off-by: Maciej S. Szmigiero Message-Id: <1e97fb5198be25f98ef82e63a8d770c682264cc9.1638817639.git.maciej.szmigiero@oracle.com> --- arch/powerpc/include/asm/kvm_ppc.h | 4 ---- arch/powerpc/kvm/book3s.c | 6 ++---- arch/powerpc/kvm/book3s_hv.c | 12 +++--------- arch/powerpc/kvm/book3s_pr.c | 2 -- arch/powerpc/kvm/booke.c | 2 -- arch/powerpc/kvm/powerpc.c | 4 ++-- 6 files changed, 7 insertions(+), 23 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index b01760dd1374..935c58dc38c4 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -200,12 +200,10 @@ extern void kvmppc_core_destroy_vm(struct kvm *kvm); extern void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot); extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, struct kvm_memory_slot *new, enum kvm_mr_change change); extern void kvmppc_core_commit_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change); @@ -275,12 +273,10 @@ struct kvmppc_ops { int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log); void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot); int (*prepare_memory_region)(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, struct kvm_memory_slot *new, enum kvm_mr_change change); void (*commit_memory_region)(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change); diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 8250e8308674..6d525285dbe8 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -847,21 +847,19 @@ void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) } int kvmppc_core_prepare_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, struct kvm_memory_slot *new, enum kvm_mr_change change) { - return kvm->arch.kvm_ops->prepare_memory_region(kvm, mem, old, new, change); + return kvm->arch.kvm_ops->prepare_memory_region(kvm, old, new, change); } void kvmppc_core_commit_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change) { - kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change); + kvm->arch.kvm_ops->commit_memory_region(kvm, old, new, change); } bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index d7594d49d288..2b59ecc5f8c6 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -4854,15 +4854,12 @@ static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *slot) } static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, struct kvm_memory_slot *new, enum kvm_mr_change change) { - unsigned long npages = mem->memory_size >> PAGE_SHIFT; - if (change == KVM_MR_CREATE) { - new->arch.rmap = vzalloc(array_size(npages, + new->arch.rmap = vzalloc(array_size(new->npages, sizeof(*new->arch.rmap))); if (!new->arch.rmap) return -ENOMEM; @@ -4874,20 +4871,17 @@ static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm, } static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change) { - unsigned long npages = mem->memory_size >> PAGE_SHIFT; - /* - * If we are making a new memslot, it might make + * If we are creating or modifying a memslot, it might make * some address that was previously cached as emulated * MMIO be no longer emulated MMIO, so invalidate * all the caches of emulated MMIO translations. */ - if (npages) + if (change != KVM_MR_DELETE) atomic64_inc(&kvm->arch.mmio_update); /* diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index ffb559cf25f4..30426e8c8cf6 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -1899,7 +1899,6 @@ static void kvmppc_core_flush_memslot_pr(struct kvm *kvm, } static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, struct kvm_memory_slot *new, enum kvm_mr_change change) @@ -1908,7 +1907,6 @@ static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm, } static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change) diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 93c2ac2bee09..53b4c9597c30 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -1821,7 +1821,6 @@ void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) } int kvmppc_core_prepare_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, struct kvm_memory_slot *new, enum kvm_mr_change change) @@ -1830,7 +1829,6 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm, } void kvmppc_core_commit_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change) diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index e875874cf836..575140ecb23c 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -703,7 +703,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *new, enum kvm_mr_change change) { - return kvmppc_core_prepare_memory_region(kvm, mem, old, new, change); + return kvmppc_core_prepare_memory_region(kvm, old, new, change); } void kvm_arch_commit_memory_region(struct kvm *kvm, @@ -712,7 +712,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, const struct kvm_memory_slot *new, enum kvm_mr_change change) { - kvmppc_core_commit_memory_region(kvm, mem, old, new, change); + kvmppc_core_commit_memory_region(kvm, old, new, change); } void kvm_arch_flush_shadow_memslot(struct kvm *kvm, From cf5b486922dc8667234bcaaadda4586b3062a1c0 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 6 Dec 2021 20:54:15 +0100 Subject: [PATCH 034/268] KVM: s390: Use "new" memslot instead of userspace memory region Get the gfn, size, and hva from the new memslot instead of the userspace memory region when preparing/committing memory region changes. This will allow a future commit to drop the @mem param. Note, this has a subtle functional change as KVM would previously reject DELETE if userspace provided a garbage userspace_addr or guest_phys_addr, whereas KVM zeros those fields in the "new" memslot when deleting an existing memslot. Arguably the old behavior is more correct, but there's zero benefit into requiring userspace to provide sane values for hva and gfn. Signed-off-by: Sean Christopherson Signed-off-by: Maciej S. Szmigiero Message-Id: <917ed131c06a4c7b35dd7fb7ed7955be899ad8cc.1638817639.git.maciej.szmigiero@oracle.com> --- arch/s390/kvm/kvm-s390.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 3beefadda0db..959a568a97be 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -5012,18 +5012,20 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *new, enum kvm_mr_change change) { + gpa_t size = new->npages * PAGE_SIZE; + /* A few sanity checks. We can have memory slots which have to be located/ended at a segment boundary (1MB). The memory in userland is ok to be fragmented into various different vmas. It is okay to mmap() and munmap() stuff in this slot after doing this call at any time */ - if (mem->userspace_addr & 0xffffful) + if (new->userspace_addr & 0xffffful) return -EINVAL; - if (mem->memory_size & 0xffffful) + if (size & 0xffffful) return -EINVAL; - if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit) + if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit) return -EINVAL; /* When we are protected, we should not change the memory slots */ @@ -5052,8 +5054,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, break; fallthrough; case KVM_MR_CREATE: - rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, - mem->guest_phys_addr, mem->memory_size); + rc = gmap_map_segment(kvm->arch.gmap, new->userspace_addr, + new->base_gfn * PAGE_SIZE, + new->npages * PAGE_SIZE); break; case KVM_MR_FLAGS_ONLY: break; From 9d7d18ee3f48903f7b9bbf6305d690078c67271b Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 6 Dec 2021 20:54:16 +0100 Subject: [PATCH 035/268] KVM: x86: Use "new" memslot instead of userspace memory region Get the number of pages directly from the new memslot instead of computing the same from the userspace memory region when allocating memslot metadata. This will allow a future patch to drop @mem. No functional change intended. Signed-off-by: Sean Christopherson Reviewed-by: Maciej S. Szmigiero Signed-off-by: Maciej S. Szmigiero Message-Id: --- arch/x86/kvm/x86.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 287ff4e43a13..2a7567adb799 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -11593,9 +11593,9 @@ int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages) } static int kvm_alloc_memslot_metadata(struct kvm *kvm, - struct kvm_memory_slot *slot, - unsigned long npages) + struct kvm_memory_slot *slot) { + unsigned long npages = slot->npages; int i, r; /* @@ -11680,8 +11680,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, enum kvm_mr_change change) { if (change == KVM_MR_CREATE || change == KVM_MR_MOVE) - return kvm_alloc_memslot_metadata(kvm, new, - mem->memory_size >> PAGE_SHIFT); + return kvm_alloc_memslot_metadata(kvm, new); if (change == KVM_MR_FLAGS_ONLY) memcpy(&new->arch, &old->arch, sizeof(old->arch)); From d01495d4cffb327200d4522db6eb3fabfdc9e2f5 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 6 Dec 2021 20:54:17 +0100 Subject: [PATCH 036/268] KVM: RISC-V: Use "new" memslot instead of userspace memory region Get the slot ID, hva, etc... from the "new" memslot instead of the userspace memory region when preparing/committing a memory region. This will allow a future commit to drop @mem from the prepare/commit hooks once all architectures convert to using "new". Opportunistically wait to get the various "new" values until after filtering out the DELETE case in anticipation of a future commit passing NULL for @new when deleting a memslot. Signed-off-by: Sean Christopherson Signed-off-by: Maciej S. Szmigiero Message-Id: <543608ab88a1190e73a958efffafc98d2652c067.1638817640.git.maciej.szmigiero@oracle.com> --- arch/riscv/kvm/mmu.c | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c index 50380f525345..573ade138204 100644 --- a/arch/riscv/kvm/mmu.c +++ b/arch/riscv/kvm/mmu.c @@ -472,8 +472,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, * allocated dirty_bitmap[], dirty pages will be tracked while * the memory slot is write protected. */ - if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) - stage2_wp_memory_region(kvm, mem->slot); + if (change != KVM_MR_DELETE && new->flags & KVM_MEM_LOG_DIRTY_PAGES) + stage2_wp_memory_region(kvm, new->id); } int kvm_arch_prepare_memory_region(struct kvm *kvm, @@ -482,9 +482,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *new, enum kvm_mr_change change) { - hva_t hva = mem->userspace_addr; - hva_t reg_end = hva + mem->memory_size; - bool writable = !(mem->flags & KVM_MEM_READONLY); + hva_t hva, reg_end, size; + gpa_t base_gpa; + bool writable; int ret = 0; if (change != KVM_MR_CREATE && change != KVM_MR_MOVE && @@ -499,6 +499,12 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, (stage2_gpa_size >> PAGE_SHIFT)) return -EFAULT; + hva = new->userspace_addr; + size = new->npages << PAGE_SHIFT; + reg_end = hva + size; + base_gpa = new->base_gfn << PAGE_SHIFT; + writable = !(new->flags & KVM_MEM_READONLY); + mmap_read_lock(current->mm); /* @@ -534,8 +540,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, vm_end = min(reg_end, vma->vm_end); if (vma->vm_flags & VM_PFNMAP) { - gpa_t gpa = mem->guest_phys_addr + - (vm_start - mem->userspace_addr); + gpa_t gpa = base_gpa + (vm_start - hva); phys_addr_t pa; pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; @@ -560,8 +565,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, spin_lock(&kvm->mmu_lock); if (ret) - stage2_unmap_range(kvm, mem->guest_phys_addr, - mem->memory_size, false); + stage2_unmap_range(kvm, base_gpa, size, false); spin_unlock(&kvm->mmu_lock); out: From 6a99c6e3f52a6f0d4c6ebcfa7359c718a19ffbe6 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 6 Dec 2021 20:54:18 +0100 Subject: [PATCH 037/268] KVM: Stop passing kvm_userspace_memory_region to arch memslot hooks Drop the @mem param from kvm_arch_{prepare,commit}_memory_region() now that its use has been removed in all architectures. No functional change intended. Signed-off-by: Sean Christopherson Reviewed-by: Maciej S. Szmigiero Signed-off-by: Maciej S. Szmigiero Message-Id: --- arch/arm64/kvm/mmu.c | 2 -- arch/mips/kvm/mips.c | 2 -- arch/powerpc/kvm/powerpc.c | 2 -- arch/riscv/kvm/mmu.c | 2 -- arch/s390/kvm/kvm-s390.c | 2 -- arch/x86/kvm/x86.c | 2 -- include/linux/kvm_host.h | 2 -- virt/kvm/kvm_main.c | 9 ++++----- 8 files changed, 4 insertions(+), 19 deletions(-) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index dd95350ea15d..9b2d881ccf49 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1463,7 +1463,6 @@ out: } void kvm_arch_commit_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change) @@ -1486,7 +1485,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, } int kvm_arch_prepare_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, struct kvm_memory_slot *new, enum kvm_mr_change change) diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index bda717301db8..e59cb6246f76 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -214,7 +214,6 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm, } int kvm_arch_prepare_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, struct kvm_memory_slot *new, enum kvm_mr_change change) @@ -223,7 +222,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, } void kvm_arch_commit_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change) diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 575140ecb23c..f1233500f4dc 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -698,7 +698,6 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) } int kvm_arch_prepare_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, struct kvm_memory_slot *new, enum kvm_mr_change change) @@ -707,7 +706,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, } void kvm_arch_commit_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change) diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c index 573ade138204..7d884b15cf5e 100644 --- a/arch/riscv/kvm/mmu.c +++ b/arch/riscv/kvm/mmu.c @@ -462,7 +462,6 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm, } void kvm_arch_commit_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change) @@ -477,7 +476,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, } int kvm_arch_prepare_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, struct kvm_memory_slot *new, enum kvm_mr_change change) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 959a568a97be..5dddd7817905 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -5007,7 +5007,6 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) /* Section: memory related */ int kvm_arch_prepare_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, struct kvm_memory_slot *new, enum kvm_mr_change change) @@ -5035,7 +5034,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, } void kvm_arch_commit_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2a7567adb799..f862c514c2c0 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -11674,7 +11674,6 @@ void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) } int kvm_arch_prepare_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, struct kvm_memory_slot *new, enum kvm_mr_change change) @@ -11778,7 +11777,6 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm, } void kvm_arch_commit_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index cb7311dc6f32..da0d4f21a150 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -833,12 +833,10 @@ int __kvm_set_memory_region(struct kvm *kvm, void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot); void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen); int kvm_arch_prepare_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, struct kvm_memory_slot *new, enum kvm_mr_change change); void kvm_arch_commit_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 46060cc542ef..373079a03710 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1550,7 +1550,6 @@ static void kvm_copy_memslots_arch(struct kvm_memslots *to, } static int kvm_set_memslot(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem, struct kvm_memory_slot *new, enum kvm_mr_change change) { @@ -1636,7 +1635,7 @@ static int kvm_set_memslot(struct kvm *kvm, old.as_id = new->as_id; } - r = kvm_arch_prepare_memory_region(kvm, mem, &old, new, change); + r = kvm_arch_prepare_memory_region(kvm, &old, new, change); if (r) goto out_slots; @@ -1652,7 +1651,7 @@ static int kvm_set_memslot(struct kvm *kvm, else if (change == KVM_MR_CREATE) kvm->nr_memslot_pages += new->npages; - kvm_arch_commit_memory_region(kvm, mem, &old, new, change); + kvm_arch_commit_memory_region(kvm, &old, new, change); /* Free the old memslot's metadata. Note, this is the full copy!!! */ if (change == KVM_MR_DELETE) @@ -1737,7 +1736,7 @@ int __kvm_set_memory_region(struct kvm *kvm, new.id = id; new.as_id = as_id; - return kvm_set_memslot(kvm, mem, &new, KVM_MR_DELETE); + return kvm_set_memslot(kvm, &new, KVM_MR_DELETE); } new.as_id = as_id; @@ -1800,7 +1799,7 @@ int __kvm_set_memory_region(struct kvm *kvm, bitmap_set(new.dirty_bitmap, 0, new.npages); } - r = kvm_set_memslot(kvm, mem, &new, change); + r = kvm_set_memslot(kvm, &new, change); if (r) goto out_bitmap; From 07921665a651918350bc6653d4ca8a516a867b4b Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 6 Dec 2021 20:54:19 +0100 Subject: [PATCH 038/268] KVM: Use prepare/commit hooks to handle generic memslot metadata updates Handle the generic memslot metadata, a.k.a. dirty bitmap, updates at the same time that arch handles it's own metadata updates, i.e. at memslot prepare and commit. This will simplify converting @new to a dynamically allocated object, and more closely aligns common KVM with architecture code. No functional change intended. Signed-off-by: Sean Christopherson Reviewed-by: Maciej S. Szmigiero Signed-off-by: Maciej S. Szmigiero Message-Id: <2ddd5446e3706fe3c1e52e3df279f04c458be830.1638817640.git.maciej.szmigiero@oracle.com> --- virt/kvm/kvm_main.c | 109 +++++++++++++++++++++++++++----------------- 1 file changed, 66 insertions(+), 43 deletions(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 373079a03710..ec5567e8442b 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1549,6 +1549,69 @@ static void kvm_copy_memslots_arch(struct kvm_memslots *to, to->memslots[i].arch = from->memslots[i].arch; } +static int kvm_prepare_memory_region(struct kvm *kvm, + const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, + enum kvm_mr_change change) +{ + int r; + + /* + * If dirty logging is disabled, nullify the bitmap; the old bitmap + * will be freed on "commit". If logging is enabled in both old and + * new, reuse the existing bitmap. If logging is enabled only in the + * new and KVM isn't using a ring buffer, allocate and initialize a + * new bitmap. + */ + if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES)) + new->dirty_bitmap = NULL; + else if (old->dirty_bitmap) + new->dirty_bitmap = old->dirty_bitmap; + else if (!kvm->dirty_ring_size) { + r = kvm_alloc_dirty_bitmap(new); + if (r) + return r; + + if (kvm_dirty_log_manual_protect_and_init_set(kvm)) + bitmap_set(new->dirty_bitmap, 0, new->npages); + } + + r = kvm_arch_prepare_memory_region(kvm, old, new, change); + + /* Free the bitmap on failure if it was allocated above. */ + if (r && new->dirty_bitmap && !old->dirty_bitmap) + kvm_destroy_dirty_bitmap(new); + + return r; +} + +static void kvm_commit_memory_region(struct kvm *kvm, + struct kvm_memory_slot *old, + const struct kvm_memory_slot *new, + enum kvm_mr_change change) +{ + /* + * Update the total number of memslot pages before calling the arch + * hook so that architectures can consume the result directly. + */ + if (change == KVM_MR_DELETE) + kvm->nr_memslot_pages -= old->npages; + else if (change == KVM_MR_CREATE) + kvm->nr_memslot_pages += new->npages; + + kvm_arch_commit_memory_region(kvm, old, new, change); + + /* + * Free the old memslot's metadata. On DELETE, free the whole thing, + * otherwise free the dirty bitmap as needed (the below effectively + * checks both the flags and whether a ring buffer is being used). + */ + if (change == KVM_MR_DELETE) + kvm_free_memslot(kvm, old); + else if (old->dirty_bitmap && !new->dirty_bitmap) + kvm_destroy_dirty_bitmap(old); +} + static int kvm_set_memslot(struct kvm *kvm, struct kvm_memory_slot *new, enum kvm_mr_change change) @@ -1635,27 +1698,14 @@ static int kvm_set_memslot(struct kvm *kvm, old.as_id = new->as_id; } - r = kvm_arch_prepare_memory_region(kvm, &old, new, change); + r = kvm_prepare_memory_region(kvm, &old, new, change); if (r) goto out_slots; update_memslots(slots, new, change); slots = install_new_memslots(kvm, new->as_id, slots); - /* - * Update the total number of memslot pages before calling the arch - * hook so that architectures can consume the result directly. - */ - if (change == KVM_MR_DELETE) - kvm->nr_memslot_pages -= old.npages; - else if (change == KVM_MR_CREATE) - kvm->nr_memslot_pages += new->npages; - - kvm_arch_commit_memory_region(kvm, &old, new, change); - - /* Free the old memslot's metadata. Note, this is the full copy!!! */ - if (change == KVM_MR_DELETE) - kvm_free_memslot(kvm, &old); + kvm_commit_memory_region(kvm, &old, new, change); kvfree(slots); return 0; @@ -1751,7 +1801,6 @@ int __kvm_set_memory_region(struct kvm *kvm, if (!old.npages) { change = KVM_MR_CREATE; - new.dirty_bitmap = NULL; /* * To simplify KVM internals, the total number of pages across @@ -1771,9 +1820,6 @@ int __kvm_set_memory_region(struct kvm *kvm, change = KVM_MR_FLAGS_ONLY; else /* Nothing to change. */ return 0; - - /* Copy dirty_bitmap from the current memslot. */ - new.dirty_bitmap = old.dirty_bitmap; } if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { @@ -1787,30 +1833,7 @@ int __kvm_set_memory_region(struct kvm *kvm, } } - /* Allocate/free page dirty bitmap as needed */ - if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) - new.dirty_bitmap = NULL; - else if (!new.dirty_bitmap && !kvm->dirty_ring_size) { - r = kvm_alloc_dirty_bitmap(&new); - if (r) - return r; - - if (kvm_dirty_log_manual_protect_and_init_set(kvm)) - bitmap_set(new.dirty_bitmap, 0, new.npages); - } - - r = kvm_set_memslot(kvm, &new, change); - if (r) - goto out_bitmap; - - if (old.dirty_bitmap && !new.dirty_bitmap) - kvm_destroy_dirty_bitmap(&old); - return 0; - -out_bitmap: - if (new.dirty_bitmap && !old.dirty_bitmap) - kvm_destroy_dirty_bitmap(&new); - return r; + return kvm_set_memslot(kvm, &new, change); } EXPORT_SYMBOL_GPL(__kvm_set_memory_region); From 77aedf26fe5d2795cd6aa1a75a8dd62dbac503e6 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 6 Dec 2021 20:54:20 +0100 Subject: [PATCH 039/268] KVM: x86: Don't assume old/new memslots are non-NULL at memslot commit Play nice with a NULL @old or @new when handling memslot updates so that common KVM can pass NULL for one or the other in CREATE and DELETE cases instead of having to synthesize a dummy memslot. No functional change intended. Signed-off-by: Sean Christopherson Reviewed-by: Maciej S. Szmigiero Signed-off-by: Maciej S. Szmigiero Message-Id: <2eb7788adbdc2bc9a9c5f86844dd8ee5c8428732.1638817640.git.maciej.szmigiero@oracle.com> --- arch/x86/kvm/x86.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f862c514c2c0..aaf89c001c5c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -11709,13 +11709,15 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm, const struct kvm_memory_slot *new, enum kvm_mr_change change) { - bool log_dirty_pages = new->flags & KVM_MEM_LOG_DIRTY_PAGES; + u32 old_flags = old ? old->flags : 0; + u32 new_flags = new ? new->flags : 0; + bool log_dirty_pages = new_flags & KVM_MEM_LOG_DIRTY_PAGES; /* * Update CPU dirty logging if dirty logging is being toggled. This * applies to all operations. */ - if ((old->flags ^ new->flags) & KVM_MEM_LOG_DIRTY_PAGES) + if ((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES) kvm_mmu_update_cpu_dirty_logging(kvm, log_dirty_pages); /* @@ -11733,7 +11735,7 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm, * MOVE/DELETE: The old mappings will already have been cleaned up by * kvm_arch_flush_shadow_memslot(). */ - if ((change != KVM_MR_FLAGS_ONLY) || (new->flags & KVM_MEM_READONLY)) + if ((change != KVM_MR_FLAGS_ONLY) || (new_flags & KVM_MEM_READONLY)) return; /* @@ -11741,7 +11743,7 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm, * other flag is LOG_DIRTY_PAGES, i.e. something is wrong if dirty * logging isn't being toggled on or off. */ - if (WARN_ON_ONCE(!((old->flags ^ new->flags) & KVM_MEM_LOG_DIRTY_PAGES))) + if (WARN_ON_ONCE(!((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES))) return; if (!log_dirty_pages) { From ec5c86976674d2f5c0f389903d956eda1dc54a78 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 6 Dec 2021 20:54:21 +0100 Subject: [PATCH 040/268] KVM: s390: Skip gfn/size sanity checks on memslot DELETE or FLAGS_ONLY Sanity check the hva, gfn, and size of a userspace memory region only if any of those properties can change, i.e. skip the checks for DELETE and FLAGS_ONLY. KVM doesn't allow moving the hva or changing the size, a gfn change shows up as a MOVE even if flags are being modified, and the checks are pointless for the DELETE case as userspace_addr and gfn_base are zeroed by common KVM. No functional change intended. Signed-off-by: Sean Christopherson Signed-off-by: Maciej S. Szmigiero Message-Id: <05430738437ac2c9c7371ac4e11f4a533e1677da.1638817640.git.maciej.szmigiero@oracle.com> --- arch/s390/kvm/kvm-s390.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 5dddd7817905..251059ff81fc 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -5011,7 +5011,14 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *new, enum kvm_mr_change change) { - gpa_t size = new->npages * PAGE_SIZE; + gpa_t size; + + /* When we are protected, we should not change the memory slots */ + if (kvm_s390_pv_get_handle(kvm)) + return -EINVAL; + + if (change == KVM_MR_DELETE || change == KVM_MR_FLAGS_ONLY) + return 0; /* A few sanity checks. We can have memory slots which have to be located/ended at a segment boundary (1MB). The memory in userland is @@ -5021,15 +5028,13 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, if (new->userspace_addr & 0xffffful) return -EINVAL; + size = new->npages * PAGE_SIZE; if (size & 0xffffful) return -EINVAL; if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit) return -EINVAL; - /* When we are protected, we should not change the memory slots */ - if (kvm_s390_pv_get_handle(kvm)) - return -EINVAL; return 0; } From 7cd08553ab103a7ebca79035eb35b73418b2f475 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 6 Dec 2021 20:54:22 +0100 Subject: [PATCH 041/268] KVM: Don't make a full copy of the old memslot in __kvm_set_memory_region() Stop making a full copy of the old memslot in __kvm_set_memory_region() now that metadata updates are handled by kvm_set_memslot(), i.e. now that the old memslot's dirty bitmap doesn't need to be referenced after the memslot and its pointer is modified/invalidated by kvm_set_memslot(). No functional change intended. Signed-off-by: Sean Christopherson Reviewed-by: Maciej S. Szmigiero Signed-off-by: Maciej S. Szmigiero Message-Id: <5dce0946b41bba8c83f6e3424c6955c56bcc9f86.1638817640.git.maciej.szmigiero@oracle.com> --- virt/kvm/kvm_main.c | 35 +++++++++++++---------------------- 1 file changed, 13 insertions(+), 22 deletions(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index ec5567e8442b..8ccb1ac82d38 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1730,8 +1730,8 @@ out_slots: int __kvm_set_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem) { - struct kvm_memory_slot old, new; - struct kvm_memory_slot *tmp; + struct kvm_memory_slot *old, *tmp; + struct kvm_memory_slot new; enum kvm_mr_change change; int as_id, id; int r; @@ -1761,25 +1761,16 @@ int __kvm_set_memory_region(struct kvm *kvm, return -EINVAL; /* - * Make a full copy of the old memslot, the pointer will become stale - * when the memslots are re-sorted by update_memslots(), and the old - * memslot needs to be referenced after calling update_memslots(), e.g. - * to free its resources and for arch specific behavior. + * Note, the old memslot (and the pointer itself!) may be invalidated + * and/or destroyed by kvm_set_memslot(). */ - tmp = id_to_memslot(__kvm_memslots(kvm, as_id), id); - if (tmp) { - old = *tmp; - tmp = NULL; - } else { - memset(&old, 0, sizeof(old)); - old.id = id; - } + old = id_to_memslot(__kvm_memslots(kvm, as_id), id); if (!mem->memory_size) { - if (!old.npages) + if (!old || !old->npages) return -EINVAL; - if (WARN_ON_ONCE(kvm->nr_memslot_pages < old.npages)) + if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages)) return -EIO; memset(&new, 0, sizeof(new)); @@ -1799,7 +1790,7 @@ int __kvm_set_memory_region(struct kvm *kvm, if (new.npages > KVM_MEM_MAX_NR_PAGES) return -EINVAL; - if (!old.npages) { + if (!old || !old->npages) { change = KVM_MR_CREATE; /* @@ -1809,14 +1800,14 @@ int __kvm_set_memory_region(struct kvm *kvm, if ((kvm->nr_memslot_pages + new.npages) < kvm->nr_memslot_pages) return -EINVAL; } else { /* Modify an existing slot. */ - if ((new.userspace_addr != old.userspace_addr) || - (new.npages != old.npages) || - ((new.flags ^ old.flags) & KVM_MEM_READONLY)) + if ((new.userspace_addr != old->userspace_addr) || + (new.npages != old->npages) || + ((new.flags ^ old->flags) & KVM_MEM_READONLY)) return -EINVAL; - if (new.base_gfn != old.base_gfn) + if (new.base_gfn != old->base_gfn) change = KVM_MR_MOVE; - else if (new.flags != old.flags) + else if (new.flags != old->flags) change = KVM_MR_FLAGS_ONLY; else /* Nothing to change. */ return 0; From e0c2b6338ac8ca30d438157dc45396c3c1148563 Mon Sep 17 00:00:00 2001 From: "Maciej S. Szmigiero" Date: Mon, 6 Dec 2021 20:54:23 +0100 Subject: [PATCH 042/268] KVM: x86: Don't call kvm_mmu_change_mmu_pages() if the count hasn't changed There is no point in calling kvm_mmu_change_mmu_pages() for memslot operations that don't change the total page count, so do it just for KVM_MR_CREATE and KVM_MR_DELETE. Signed-off-by: Maciej S. Szmigiero Reviewed-by: Sean Christopherson Message-Id: <9e56b7616a11f5654e4ab486b3237366b7ba9f2a.1638817640.git.maciej.szmigiero@oracle.com> --- arch/x86/kvm/x86.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index aaf89c001c5c..29a030a3c797 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -11783,7 +11783,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, const struct kvm_memory_slot *new, enum kvm_mr_change change) { - if (!kvm->arch.n_requested_mmu_pages) + if (!kvm->arch.n_requested_mmu_pages && + (change == KVM_MR_CREATE || change == KVM_MR_DELETE)) kvm_mmu_change_mmu_pages(kvm, kvm_mmu_calculate_default_mmu_pages(kvm)); From f5756029eef501bcd39ecd844968e4fb3055c1bd Mon Sep 17 00:00:00 2001 From: "Maciej S. Szmigiero" Date: Mon, 6 Dec 2021 20:54:24 +0100 Subject: [PATCH 043/268] KVM: x86: Use nr_memslot_pages to avoid traversing the memslots array There is no point in recalculating from scratch the total number of pages in all memslots each time a memslot is created or deleted. Use KVM's cached nr_memslot_pages to compute the default max number of MMU pages. Note that even with nr_memslot_pages capped at ULONG_MAX we can't safely multiply it by KVM_PERMILLE_MMU_PAGES (20) since this operation can possibly overflow an unsigned long variable. Write this "* 20 / 1000" operation as "/ 50" instead to avoid such overflow. Signed-off-by: Maciej S. Szmigiero [sean: use common KVM field and rework changelog accordingly] Reviewed-by: Sean Christopherson Message-Id: --- arch/x86/include/asm/kvm_host.h | 3 +-- arch/x86/kvm/mmu/mmu.c | 24 ------------------------ arch/x86/kvm/x86.c | 10 +++++++--- 3 files changed, 8 insertions(+), 29 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index d9f09535384b..552ed45f133b 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -135,7 +135,7 @@ #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) -#define KVM_PERMILLE_MMU_PAGES 20 +#define KVM_MEMSLOT_PAGES_TO_MMU_PAGES_RATIO 50 #define KVM_MIN_ALLOC_MMU_PAGES 64UL #define KVM_MMU_HASH_SHIFT 12 #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT) @@ -1592,7 +1592,6 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, const struct kvm_memory_slot *memslot); void kvm_mmu_zap_all(struct kvm *kvm); void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen); -unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm); void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages); int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3); diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index e2e1d012df22..e41cf095f2d1 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -6150,30 +6150,6 @@ out: return ret; } -/* - * Calculate mmu pages needed for kvm. - */ -unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm) -{ - unsigned long nr_mmu_pages; - unsigned long nr_pages = 0; - struct kvm_memslots *slots; - struct kvm_memory_slot *memslot; - int i; - - for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { - slots = __kvm_memslots(kvm, i); - - kvm_for_each_memslot(memslot, slots) - nr_pages += memslot->npages; - } - - nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000; - nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES); - - return nr_mmu_pages; -} - void kvm_mmu_destroy(struct kvm_vcpu *vcpu) { kvm_mmu_unload(vcpu); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 29a030a3c797..73e0e40c94b6 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -11784,9 +11784,13 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, enum kvm_mr_change change) { if (!kvm->arch.n_requested_mmu_pages && - (change == KVM_MR_CREATE || change == KVM_MR_DELETE)) - kvm_mmu_change_mmu_pages(kvm, - kvm_mmu_calculate_default_mmu_pages(kvm)); + (change == KVM_MR_CREATE || change == KVM_MR_DELETE)) { + unsigned long nr_mmu_pages; + + nr_mmu_pages = kvm->nr_memslot_pages / KVM_MEMSLOT_PAGES_TO_MMU_PAGES_RATIO; + nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES); + kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); + } kvm_mmu_slot_apply_flags(kvm, old, new, change); From c928bfc2632fa3dd6a3bd4504ac6d8e42302287a Mon Sep 17 00:00:00 2001 From: "Maciej S. Szmigiero" Date: Mon, 6 Dec 2021 20:54:25 +0100 Subject: [PATCH 044/268] KVM: Integrate gfn_to_memslot_approx() into search_memslots() s390 arch has gfn_to_memslot_approx() which is almost identical to search_memslots(), differing only in that in case the gfn falls in a hole one of the memslots bordering the hole is returned. Add this lookup mode as an option to search_memslots() so we don't have two almost identical functions for looking up a memslot by its gfn. Signed-off-by: Maciej S. Szmigiero [sean: tweaked helper names to keep gfn_to_memslot_approx() in s390] Reviewed-by: Sean Christopherson Message-Id: <171cd89b52c718dbe180ecd909b4437a64a7e2ec.1638817640.git.maciej.szmigiero@oracle.com> --- arch/s390/kvm/kvm-s390.c | 45 +++++++--------------------------------- include/linux/kvm_host.h | 35 ++++++++++++++++++++++++------- virt/kvm/kvm_main.c | 2 +- 3 files changed, 36 insertions(+), 46 deletions(-) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 251059ff81fc..631be750af08 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1943,41 +1943,6 @@ out: /* for consistency */ #define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX) -/* - * Similar to gfn_to_memslot, but returns the index of a memslot also when the - * address falls in a hole. In that case the index of one of the memslots - * bordering the hole is returned. - */ -static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn) -{ - int start = 0, end = slots->used_slots; - int slot = atomic_read(&slots->last_used_slot); - struct kvm_memory_slot *memslots = slots->memslots; - - if (gfn >= memslots[slot].base_gfn && - gfn < memslots[slot].base_gfn + memslots[slot].npages) - return slot; - - while (start < end) { - slot = start + (end - start) / 2; - - if (gfn >= memslots[slot].base_gfn) - end = slot; - else - start = slot + 1; - } - - if (start >= slots->used_slots) - return slots->used_slots - 1; - - if (gfn >= memslots[start].base_gfn && - gfn < memslots[start].base_gfn + memslots[start].npages) { - atomic_set(&slots->last_used_slot, start); - } - - return start; -} - static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args, u8 *res, unsigned long bufsize) { @@ -2001,11 +1966,17 @@ static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args, return 0; } +static struct kvm_memory_slot *gfn_to_memslot_approx(struct kvm_memslots *slots, + gfn_t gfn) +{ + return ____gfn_to_memslot(slots, gfn, true); +} + static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots, unsigned long cur_gfn) { - int slotidx = gfn_to_memslot_approx(slots, cur_gfn); - struct kvm_memory_slot *ms = slots->memslots + slotidx; + struct kvm_memory_slot *ms = gfn_to_memslot_approx(slots, cur_gfn); + int slotidx = ms - slots->memslots; unsigned long ofs = cur_gfn - ms->base_gfn; if (ms->base_gfn + ms->npages <= cur_gfn) { diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index da0d4f21a150..2f80ce84fbcf 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1233,10 +1233,14 @@ try_get_memslot(struct kvm_memslots *slots, int slot_index, gfn_t gfn) * Returns a pointer to the memslot that contains gfn and records the index of * the slot in index. Otherwise returns NULL. * + * With "approx" set returns the memslot also when the address falls + * in a hole. In that case one of the memslots bordering the hole is + * returned. + * * IMPORTANT: Slots are sorted from highest GFN to lowest GFN! */ static inline struct kvm_memory_slot * -search_memslots(struct kvm_memslots *slots, gfn_t gfn, int *index) +search_memslots(struct kvm_memslots *slots, gfn_t gfn, int *index, bool approx) { int start = 0, end = slots->used_slots; struct kvm_memory_slot *memslots = slots->memslots; @@ -1254,22 +1258,26 @@ search_memslots(struct kvm_memslots *slots, gfn_t gfn, int *index) start = slot + 1; } + if (approx && start >= slots->used_slots) { + *index = slots->used_slots - 1; + return &memslots[slots->used_slots - 1]; + } + slot = try_get_memslot(slots, start, gfn); if (slot) { *index = start; return slot; } + if (approx) { + *index = start; + return &memslots[start]; + } return NULL; } -/* - * __gfn_to_memslot() and its descendants are here because it is called from - * non-modular code in arch/powerpc/kvm/book3s_64_vio{,_hv}.c. gfn_to_memslot() - * itself isn't here as an inline because that would bloat other code too much. - */ static inline struct kvm_memory_slot * -__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) +____gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn, bool approx) { struct kvm_memory_slot *slot; int slot_index = atomic_read(&slots->last_used_slot); @@ -1278,7 +1286,7 @@ __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) if (slot) return slot; - slot = search_memslots(slots, gfn, &slot_index); + slot = search_memslots(slots, gfn, &slot_index, approx); if (slot) { atomic_set(&slots->last_used_slot, slot_index); return slot; @@ -1287,6 +1295,17 @@ __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) return NULL; } +/* + * __gfn_to_memslot() and its descendants are here to allow arch code to inline + * the lookups in hot paths. gfn_to_memslot() itself isn't here as an inline + * because that would bloat other code too much. + */ +static inline struct kvm_memory_slot * +__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) +{ + return ____gfn_to_memslot(slots, gfn, false); +} + static inline unsigned long __gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn) { diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 8ccb1ac82d38..6ca076ae64a2 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2141,7 +2141,7 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn * search_memslots() instead of __gfn_to_memslot() to avoid * thrashing the VM-wide last_used_index in kvm_memslots. */ - slot = search_memslots(slots, gfn, &slot_index); + slot = search_memslots(slots, gfn, &slot_index, false); if (slot) { vcpu->last_used_slot = slot_index; return slot; From 1e8617d37fc36407f9fce9c08ef8d254613c00de Mon Sep 17 00:00:00 2001 From: "Maciej S. Szmigiero" Date: Mon, 6 Dec 2021 20:54:26 +0100 Subject: [PATCH 045/268] KVM: Move WARN on invalid memslot index to update_memslots() Since kvm_memslot_move_forward() can theoretically return a negative memslot index even when kvm_memslot_move_backward() returned a positive one (and so did not WARN) let's just move the warning to the common code. Signed-off-by: Maciej S. Szmigiero Reviewed-by: Claudio Imbrenda Reviewed-by: Sean Christopherson Message-Id: --- virt/kvm/kvm_main.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 6ca076ae64a2..a60d09beef61 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1322,8 +1322,7 @@ static inline int kvm_memslot_move_backward(struct kvm_memslots *slots, struct kvm_memory_slot *mslots = slots->memslots; int i; - if (WARN_ON_ONCE(slots->id_to_index[memslot->id] == -1) || - WARN_ON_ONCE(!slots->used_slots)) + if (slots->id_to_index[memslot->id] == -1 || !slots->used_slots) return -1; /* @@ -1427,6 +1426,9 @@ static void update_memslots(struct kvm_memslots *slots, i = kvm_memslot_move_backward(slots, memslot); i = kvm_memslot_move_forward(slots, memslot, i); + if (WARN_ON_ONCE(i < 0)) + return; + /* * Copy the memslot to its new position in memslots and update * its index accordingly. From 26b8345abc75a7404716864710930407b7d873f9 Mon Sep 17 00:00:00 2001 From: "Maciej S. Szmigiero" Date: Mon, 6 Dec 2021 20:54:27 +0100 Subject: [PATCH 046/268] KVM: Resolve memslot ID via a hash table instead of via a static array Memslot ID to the corresponding memslot mappings are currently kept as indices in static id_to_index array. The size of this array depends on the maximum allowed memslot count (regardless of the number of memslots actually in use). This has become especially problematic recently, when memslot count cap was removed, so the maximum count is now full 32k memslots - the maximum allowed by the current KVM API. Keeping these IDs in a hash table (instead of an array) avoids this problem. Resolving a memslot ID to the actual memslot (instead of its index) will also enable transitioning away from an array-based implementation of the whole memslots structure in a later commit. Co-developed-by: Sean Christopherson Signed-off-by: Sean Christopherson Signed-off-by: Maciej S. Szmigiero Message-Id: <117fb2c04320e6cd6cf34f205a72eadb0aa8d5f9.1638817640.git.maciej.szmigiero@oracle.com> --- include/linux/kvm_host.h | 25 +++++++---- virt/kvm/kvm_main.c | 95 +++++++++++++++++++++++++++++++--------- 2 files changed, 91 insertions(+), 29 deletions(-) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 2f80ce84fbcf..79db70a8323e 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -426,6 +427,7 @@ static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) struct kvm_memory_slot { + struct hlist_node id_node; gfn_t base_gfn; unsigned long npages; unsigned long *dirty_bitmap; @@ -527,8 +529,15 @@ static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) */ struct kvm_memslots { u64 generation; - /* The mapping table from slot id to the index in memslots[]. */ - short id_to_index[KVM_MEM_SLOTS_NUM]; + /* + * The mapping table from slot id to the index in memslots[]. + * + * 7-bit bucket count matches the size of the old id to index array for + * 512 slots, while giving good performance with this slot count. + * Higher bucket counts bring only small performance improvements but + * always result in higher memory usage (even for lower memslot counts). + */ + DECLARE_HASHTABLE(id_hash, 7); atomic_t last_used_slot; int used_slots; struct kvm_memory_slot memslots[]; @@ -796,16 +805,14 @@ static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu) static inline struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id) { - int index = slots->id_to_index[id]; struct kvm_memory_slot *slot; - if (index < 0) - return NULL; + hash_for_each_possible(slots->id_hash, slot, id_node, id) { + if (slot->id == id) + return slot; + } - slot = &slots->memslots[index]; - - WARN_ON(slot->id != id); - return slot; + return NULL; } /* diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a60d09beef61..dbff2ac9a8e3 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -867,15 +867,13 @@ static void kvm_destroy_pm_notifier(struct kvm *kvm) static struct kvm_memslots *kvm_alloc_memslots(void) { - int i; struct kvm_memslots *slots; slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT); if (!slots) return NULL; - for (i = 0; i < KVM_MEM_SLOTS_NUM; i++) - slots->id_to_index[i] = -1; + hash_init(slots->id_hash); return slots; } @@ -1274,17 +1272,48 @@ static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot) return 0; } +static void kvm_replace_memslot(struct kvm_memslots *slots, + struct kvm_memory_slot *old, + struct kvm_memory_slot *new) +{ + /* + * Remove the old memslot from the hash list, copying the node data + * would corrupt the list. + */ + if (old) { + hash_del(&old->id_node); + + if (!new) + return; + + /* Copy the source *data*, not the pointer, to the destination. */ + *new = *old; + } + + /* (Re)Add the new memslot. */ + hash_add(slots->id_hash, &new->id_node, new->id); +} + +static void kvm_shift_memslot(struct kvm_memslots *slots, int dst, int src) +{ + struct kvm_memory_slot *mslots = slots->memslots; + + kvm_replace_memslot(slots, &mslots[src], &mslots[dst]); +} + /* * Delete a memslot by decrementing the number of used slots and shifting all * other entries in the array forward one spot. + * @memslot is a detached dummy struct with just .id and .as_id filled. */ static inline void kvm_memslot_delete(struct kvm_memslots *slots, struct kvm_memory_slot *memslot) { struct kvm_memory_slot *mslots = slots->memslots; + struct kvm_memory_slot *oldslot = id_to_memslot(slots, memslot->id); int i; - if (WARN_ON(slots->id_to_index[memslot->id] == -1)) + if (WARN_ON(!oldslot)) return; slots->used_slots--; @@ -1292,12 +1321,17 @@ static inline void kvm_memslot_delete(struct kvm_memslots *slots, if (atomic_read(&slots->last_used_slot) >= slots->used_slots) atomic_set(&slots->last_used_slot, 0); - for (i = slots->id_to_index[memslot->id]; i < slots->used_slots; i++) { - mslots[i] = mslots[i + 1]; - slots->id_to_index[mslots[i].id] = i; - } + /* + * Remove the to-be-deleted memslot from the list _before_ shifting + * the trailing memslots forward, its data will be overwritten. + * Defer the (somewhat pointless) copying of the memslot until after + * the last slot has been shifted to avoid overwriting said last slot. + */ + kvm_replace_memslot(slots, oldslot, NULL); + + for (i = oldslot - mslots; i < slots->used_slots; i++) + kvm_shift_memslot(slots, i, i + 1); mslots[i] = *memslot; - slots->id_to_index[memslot->id] = -1; } /* @@ -1315,30 +1349,39 @@ static inline int kvm_memslot_insert_back(struct kvm_memslots *slots) * itself is not preserved in the array, i.e. not swapped at this time, only * its new index into the array is tracked. Returns the changed memslot's * current index into the memslots array. + * The memslot at the returned index will not be in @slots->id_hash by then. + * @memslot is a detached struct with desired final data of the changed slot. */ static inline int kvm_memslot_move_backward(struct kvm_memslots *slots, struct kvm_memory_slot *memslot) { struct kvm_memory_slot *mslots = slots->memslots; + struct kvm_memory_slot *oldslot = id_to_memslot(slots, memslot->id); int i; - if (slots->id_to_index[memslot->id] == -1 || !slots->used_slots) + if (!oldslot || !slots->used_slots) return -1; + /* + * Delete the slot from the hash table before sorting the remaining + * slots, the slot's data may be overwritten when copying slots as part + * of the sorting proccess. update_memslots() will unconditionally + * rewrite the entire slot and re-add it to the hash table. + */ + kvm_replace_memslot(slots, oldslot, NULL); + /* * Move the target memslot backward in the array by shifting existing * memslots with a higher GFN (than the target memslot) towards the * front of the array. */ - for (i = slots->id_to_index[memslot->id]; i < slots->used_slots - 1; i++) { + for (i = oldslot - mslots; i < slots->used_slots - 1; i++) { if (memslot->base_gfn > mslots[i + 1].base_gfn) break; WARN_ON_ONCE(memslot->base_gfn == mslots[i + 1].base_gfn); - /* Shift the next memslot forward one and update its index. */ - mslots[i] = mslots[i + 1]; - slots->id_to_index[mslots[i].id] = i; + kvm_shift_memslot(slots, i, i + 1); } return i; } @@ -1349,6 +1392,10 @@ static inline int kvm_memslot_move_backward(struct kvm_memslots *slots, * is not preserved in the array, i.e. not swapped at this time, only its new * index into the array is tracked. Returns the changed memslot's final index * into the memslots array. + * The memslot at the returned index will not be in @slots->id_hash by then. + * @memslot is a detached struct with desired final data of the new or + * changed slot. + * Assumes that the memslot at @start index is not in @slots->id_hash. */ static inline int kvm_memslot_move_forward(struct kvm_memslots *slots, struct kvm_memory_slot *memslot, @@ -1363,9 +1410,7 @@ static inline int kvm_memslot_move_forward(struct kvm_memslots *slots, WARN_ON_ONCE(memslot->base_gfn == mslots[i - 1].base_gfn); - /* Shift the next memslot back one and update its index. */ - mslots[i] = mslots[i - 1]; - slots->id_to_index[mslots[i].id] = i; + kvm_shift_memslot(slots, i, i - 1); } return i; } @@ -1410,6 +1455,9 @@ static inline int kvm_memslot_move_forward(struct kvm_memslots *slots, * most likely to be referenced, sorting it to the front of the array was * advantageous. The current binary search starts from the middle of the array * and uses an LRU pointer to improve performance for all memslots and GFNs. + * + * @memslot is a detached struct, not a part of the current or new memslot + * array. */ static void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *memslot, @@ -1434,7 +1482,7 @@ static void update_memslots(struct kvm_memslots *slots, * its index accordingly. */ slots->memslots[i] = *memslot; - slots->id_to_index[memslot->id] = i; + kvm_replace_memslot(slots, NULL, &slots->memslots[i]); } } @@ -1527,6 +1575,7 @@ static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old, { struct kvm_memslots *slots; size_t new_size; + struct kvm_memory_slot *memslot; if (change == KVM_MR_CREATE) new_size = kvm_memslots_size(old->used_slots + 1); @@ -1534,8 +1583,14 @@ static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old, new_size = kvm_memslots_size(old->used_slots); slots = kvzalloc(new_size, GFP_KERNEL_ACCOUNT); - if (likely(slots)) - memcpy(slots, old, kvm_memslots_size(old->used_slots)); + if (unlikely(!slots)) + return NULL; + + memcpy(slots, old, kvm_memslots_size(old->used_slots)); + + hash_init(slots->id_hash); + kvm_for_each_memslot(memslot, slots) + hash_add(slots->id_hash, &memslot->id_node, memslot->id); return slots; } From ed922739c9199bf515a3e7fec3e319ce1edeef2a Mon Sep 17 00:00:00 2001 From: "Maciej S. Szmigiero" Date: Mon, 6 Dec 2021 20:54:28 +0100 Subject: [PATCH 047/268] KVM: Use interval tree to do fast hva lookup in memslots The current memslots implementation only allows quick binary search by gfn, quick lookup by hva is not possible - the implementation has to do a linear scan of the whole memslots array, even though the operation being performed might apply just to a single memslot. This significantly hurts performance of per-hva operations with higher memslot counts. Since hva ranges can overlap between memslots an interval tree is needed for tracking them. [sean: handle interval tree updates in kvm_replace_memslot()] Signed-off-by: Maciej S. Szmigiero Message-Id: --- arch/arm64/kvm/Kconfig | 1 + arch/mips/kvm/Kconfig | 1 + arch/powerpc/kvm/Kconfig | 1 + arch/s390/kvm/Kconfig | 1 + arch/x86/kvm/Kconfig | 1 + include/linux/kvm_host.h | 3 +++ virt/kvm/kvm_main.c | 53 +++++++++++++++++++++++++++++----------- 7 files changed, 47 insertions(+), 14 deletions(-) diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index 8ffcbe29395e..f1f8fc069a97 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -39,6 +39,7 @@ menuconfig KVM select HAVE_KVM_IRQ_BYPASS select HAVE_KVM_VCPU_RUN_PID_CHANGE select SCHED_INFO + select INTERVAL_TREE help Support hosting virtualized guest machines. diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig index a77297480f56..91d197bee9c0 100644 --- a/arch/mips/kvm/Kconfig +++ b/arch/mips/kvm/Kconfig @@ -27,6 +27,7 @@ config KVM select KVM_MMIO select MMU_NOTIFIER select SRCU + select INTERVAL_TREE help Support for hosting Guest kernels. diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index ff581d70f20c..e4c24f524ba8 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig @@ -26,6 +26,7 @@ config KVM select KVM_VFIO select IRQ_BYPASS_MANAGER select HAVE_KVM_IRQ_BYPASS + select INTERVAL_TREE config KVM_BOOK3S_HANDLER bool diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig index 67a8e770e369..2e84d3922f7c 100644 --- a/arch/s390/kvm/Kconfig +++ b/arch/s390/kvm/Kconfig @@ -33,6 +33,7 @@ config KVM select HAVE_KVM_NO_POLL select SRCU select KVM_VFIO + select INTERVAL_TREE help Support hosting paravirtualized guest machines using the SIE virtualization capability on the mainframe. This should work diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 619186138176..7618bef0a4a9 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -43,6 +43,7 @@ config KVM select KVM_GENERIC_DIRTYLOG_READ_PROTECT select KVM_VFIO select SRCU + select INTERVAL_TREE select HAVE_KVM_PM_NOTIFIER if PM help Support hosting fully virtualized guest machines using hardware diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 79db70a8323e..9552ad6d6652 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -428,6 +429,7 @@ static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) struct kvm_memory_slot { struct hlist_node id_node; + struct interval_tree_node hva_node; gfn_t base_gfn; unsigned long npages; unsigned long *dirty_bitmap; @@ -529,6 +531,7 @@ static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) */ struct kvm_memslots { u64 generation; + struct rb_root_cached hva_tree; /* * The mapping table from slot id to the index in memslots[]. * diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index dbff2ac9a8e3..6ba7468bdbe3 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -512,6 +512,12 @@ static void kvm_null_fn(void) } #define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn) +/* Iterate over each memslot intersecting [start, last] (inclusive) range */ +#define kvm_for_each_memslot_in_hva_range(node, slots, start, last) \ + for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \ + node; \ + node = interval_tree_iter_next(node, start, last)) \ + static __always_inline int __kvm_handle_hva_range(struct kvm *kvm, const struct kvm_hva_range *range) { @@ -521,6 +527,9 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm, struct kvm_memslots *slots; int i, idx; + if (WARN_ON_ONCE(range->end <= range->start)) + return 0; + /* A null handler is allowed if and only if on_lock() is provided. */ if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) && IS_KVM_NULL_FN(range->handler))) @@ -529,15 +538,17 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm, idx = srcu_read_lock(&kvm->srcu); for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { + struct interval_tree_node *node; + slots = __kvm_memslots(kvm, i); - kvm_for_each_memslot(slot, slots) { + kvm_for_each_memslot_in_hva_range(node, slots, + range->start, range->end - 1) { unsigned long hva_start, hva_end; + slot = container_of(node, struct kvm_memory_slot, hva_node); hva_start = max(range->start, slot->userspace_addr); hva_end = min(range->end, slot->userspace_addr + (slot->npages << PAGE_SHIFT)); - if (hva_start >= hva_end) - continue; /* * To optimize for the likely case where the address @@ -873,6 +884,7 @@ static struct kvm_memslots *kvm_alloc_memslots(void) if (!slots) return NULL; + slots->hva_tree = RB_ROOT_CACHED; hash_init(slots->id_hash); return slots; @@ -1277,21 +1289,28 @@ static void kvm_replace_memslot(struct kvm_memslots *slots, struct kvm_memory_slot *new) { /* - * Remove the old memslot from the hash list, copying the node data - * would corrupt the list. + * Remove the old memslot from the hash list and interval tree, copying + * the node data would corrupt the structures. */ if (old) { hash_del(&old->id_node); + interval_tree_remove(&old->hva_node, &slots->hva_tree); if (!new) return; /* Copy the source *data*, not the pointer, to the destination. */ *new = *old; + } else { + /* If @old is NULL, initialize @new's hva range. */ + new->hva_node.start = new->userspace_addr; + new->hva_node.last = new->userspace_addr + + (new->npages << PAGE_SHIFT) - 1; } /* (Re)Add the new memslot. */ hash_add(slots->id_hash, &new->id_node, new->id); + interval_tree_insert(&new->hva_node, &slots->hva_tree); } static void kvm_shift_memslot(struct kvm_memslots *slots, int dst, int src) @@ -1322,7 +1341,7 @@ static inline void kvm_memslot_delete(struct kvm_memslots *slots, atomic_set(&slots->last_used_slot, 0); /* - * Remove the to-be-deleted memslot from the list _before_ shifting + * Remove the to-be-deleted memslot from the list/tree _before_ shifting * the trailing memslots forward, its data will be overwritten. * Defer the (somewhat pointless) copying of the memslot until after * the last slot has been shifted to avoid overwriting said last slot. @@ -1349,7 +1368,8 @@ static inline int kvm_memslot_insert_back(struct kvm_memslots *slots) * itself is not preserved in the array, i.e. not swapped at this time, only * its new index into the array is tracked. Returns the changed memslot's * current index into the memslots array. - * The memslot at the returned index will not be in @slots->id_hash by then. + * The memslot at the returned index will not be in @slots->hva_tree or + * @slots->id_hash by then. * @memslot is a detached struct with desired final data of the changed slot. */ static inline int kvm_memslot_move_backward(struct kvm_memslots *slots, @@ -1363,10 +1383,10 @@ static inline int kvm_memslot_move_backward(struct kvm_memslots *slots, return -1; /* - * Delete the slot from the hash table before sorting the remaining - * slots, the slot's data may be overwritten when copying slots as part - * of the sorting proccess. update_memslots() will unconditionally - * rewrite the entire slot and re-add it to the hash table. + * Delete the slot from the hash table and interval tree before sorting + * the remaining slots, the slot's data may be overwritten when copying + * slots as part of the sorting proccess. update_memslots() will + * unconditionally rewrite and re-add the entire slot. */ kvm_replace_memslot(slots, oldslot, NULL); @@ -1392,10 +1412,12 @@ static inline int kvm_memslot_move_backward(struct kvm_memslots *slots, * is not preserved in the array, i.e. not swapped at this time, only its new * index into the array is tracked. Returns the changed memslot's final index * into the memslots array. - * The memslot at the returned index will not be in @slots->id_hash by then. + * The memslot at the returned index will not be in @slots->hva_tree or + * @slots->id_hash by then. * @memslot is a detached struct with desired final data of the new or * changed slot. - * Assumes that the memslot at @start index is not in @slots->id_hash. + * Assumes that the memslot at @start index is not in @slots->hva_tree or + * @slots->id_hash. */ static inline int kvm_memslot_move_forward(struct kvm_memslots *slots, struct kvm_memory_slot *memslot, @@ -1588,9 +1610,12 @@ static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old, memcpy(slots, old, kvm_memslots_size(old->used_slots)); + slots->hva_tree = RB_ROOT_CACHED; hash_init(slots->id_hash); - kvm_for_each_memslot(memslot, slots) + kvm_for_each_memslot(memslot, slots) { + interval_tree_insert(&memslot->hva_node, &slots->hva_tree); hash_add(slots->id_hash, &memslot->id_node, memslot->id); + } return slots; } From 6a656832aa75784d02dccd8d37fc5c0896064c2e Mon Sep 17 00:00:00 2001 From: "Maciej S. Szmigiero" Date: Mon, 6 Dec 2021 20:54:29 +0100 Subject: [PATCH 048/268] KVM: s390: Introduce kvm_s390_get_gfn_end() And use it where s390 code would just access the memslot with the highest gfn directly. No functional change intended. Signed-off-by: Maciej S. Szmigiero Reviewed-by: Claudio Imbrenda Message-Id: <42496041d6af1c23b1cbba2636b344ca8d5fc3af.1638817641.git.maciej.szmigiero@oracle.com> --- arch/s390/kvm/kvm-s390.c | 2 +- arch/s390/kvm/kvm-s390.h | 12 ++++++++++++ arch/s390/kvm/pv.c | 4 +--- 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 631be750af08..5044b2a2c0cc 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -2014,7 +2014,7 @@ static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args, if (!ms) return 0; next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1); - mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages; + mem_end = kvm_s390_get_gfn_end(slots); while (args->count < bufsize) { hva = gfn_to_hva(kvm, cur_gfn); diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index b887fe7a7064..cc309cc37e96 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -217,6 +217,18 @@ static inline void kvm_s390_set_user_cpu_state_ctrl(struct kvm *kvm) kvm->arch.user_cpu_state_ctrl = 1; } +/* get the end gfn of the last (highest gfn) memslot */ +static inline unsigned long kvm_s390_get_gfn_end(struct kvm_memslots *slots) +{ + struct kvm_memory_slot *ms; + + if (WARN_ON(!slots->used_slots)) + return 0; + + ms = slots->memslots; + return ms->base_gfn + ms->npages; +} + /* implemented in pv.c */ int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc); int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc); diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c index 00d272d134c2..7f7c0d6af2ce 100644 --- a/arch/s390/kvm/pv.c +++ b/arch/s390/kvm/pv.c @@ -116,7 +116,6 @@ static int kvm_s390_pv_alloc_vm(struct kvm *kvm) unsigned long base = uv_info.guest_base_stor_len; unsigned long virt = uv_info.guest_virt_var_stor_len; unsigned long npages = 0, vlen = 0; - struct kvm_memory_slot *memslot; kvm->arch.pv.stor_var = NULL; kvm->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT, get_order(base)); @@ -130,8 +129,7 @@ static int kvm_s390_pv_alloc_vm(struct kvm *kvm) * Slots are sorted by GFN */ mutex_lock(&kvm->slots_lock); - memslot = kvm_memslots(kvm)->memslots; - npages = memslot->base_gfn + memslot->npages; + npages = kvm_s390_get_gfn_end(kvm_memslots(kvm)); mutex_unlock(&kvm->slots_lock); kvm->arch.pv.guest_len = npages * PAGE_SIZE; From a54d806688fe1e482350ce759a8a0fc9ebf814b0 Mon Sep 17 00:00:00 2001 From: "Maciej S. Szmigiero" Date: Mon, 6 Dec 2021 20:54:30 +0100 Subject: [PATCH 049/268] KVM: Keep memslots in tree-based structures instead of array-based ones The current memslot code uses a (reverse gfn-ordered) memslot array for keeping track of them. Because the memslot array that is currently in use cannot be modified every memslot management operation (create, delete, move, change flags) has to make a copy of the whole array so it has a scratch copy to work on. Strictly speaking, however, it is only necessary to make copy of the memslot that is being modified, copying all the memslots currently present is just a limitation of the array-based memslot implementation. Two memslot sets, however, are still needed so the VM continues to run on the currently active set while the requested operation is being performed on the second, currently inactive one. In order to have two memslot sets, but only one copy of actual memslots it is necessary to split out the memslot data from the memslot sets. The memslots themselves should be also kept independent of each other so they can be individually added or deleted. These two memslot sets should normally point to the same set of memslots. They can, however, be desynchronized when performing a memslot management operation by replacing the memslot to be modified by its copy. After the operation is complete, both memslot sets once again point to the same, common set of memslot data. This commit implements the aforementioned idea. For tracking of gfns an ordinary rbtree is used since memslots cannot overlap in the guest address space and so this data structure is sufficient for ensuring that lookups are done quickly. The "last used slot" mini-caches (both per-slot set one and per-vCPU one), that keep track of the last found-by-gfn memslot, are still present in the new code. Co-developed-by: Sean Christopherson Signed-off-by: Sean Christopherson Signed-off-by: Maciej S. Szmigiero Message-Id: <17c0cf3663b760a0d3753d4ac08c0753e941b811.1638817641.git.maciej.szmigiero@oracle.com> --- arch/arm64/kvm/mmu.c | 8 +- arch/powerpc/kvm/book3s_64_mmu_hv.c | 4 +- arch/powerpc/kvm/book3s_hv.c | 3 +- arch/powerpc/kvm/book3s_hv_nested.c | 4 +- arch/powerpc/kvm/book3s_hv_uvmem.c | 14 +- arch/s390/kvm/kvm-s390.c | 24 +- arch/s390/kvm/kvm-s390.h | 6 +- arch/x86/kvm/debugfs.c | 6 +- arch/x86/kvm/mmu/mmu.c | 8 +- include/linux/kvm_host.h | 141 +++-- virt/kvm/kvm_main.c | 815 ++++++++++++++-------------- 11 files changed, 529 insertions(+), 504 deletions(-) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 9b2d881ccf49..e65acf35cee3 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -210,13 +210,13 @@ static void stage2_flush_vm(struct kvm *kvm) { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; - int idx; + int idx, bkt; idx = srcu_read_lock(&kvm->srcu); spin_lock(&kvm->mmu_lock); slots = kvm_memslots(kvm); - kvm_for_each_memslot(memslot, slots) + kvm_for_each_memslot(memslot, bkt, slots) stage2_flush_memslot(kvm, memslot); spin_unlock(&kvm->mmu_lock); @@ -595,14 +595,14 @@ void stage2_unmap_vm(struct kvm *kvm) { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; - int idx; + int idx, bkt; idx = srcu_read_lock(&kvm->srcu); mmap_read_lock(current->mm); spin_lock(&kvm->mmu_lock); slots = kvm_memslots(kvm); - kvm_for_each_memslot(memslot, slots) + kvm_for_each_memslot(memslot, bkt, slots) stage2_unmap_memslot(kvm, memslot); spin_unlock(&kvm->mmu_lock); diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index c63e263312a4..213232914367 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -734,11 +734,11 @@ void kvmppc_rmap_reset(struct kvm *kvm) { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; - int srcu_idx; + int srcu_idx, bkt; srcu_idx = srcu_read_lock(&kvm->srcu); slots = kvm_memslots(kvm); - kvm_for_each_memslot(memslot, slots) { + kvm_for_each_memslot(memslot, bkt, slots) { /* Mutual exclusion with kvm_unmap_hva_range etc. */ spin_lock(&kvm->mmu_lock); /* diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 2b59ecc5f8c6..51e1c29a6fa0 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -5880,11 +5880,12 @@ static int kvmhv_svm_off(struct kvm *kvm) for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { struct kvm_memory_slot *memslot; struct kvm_memslots *slots = __kvm_memslots(kvm, i); + int bkt; if (!slots) continue; - kvm_for_each_memslot(memslot, slots) { + kvm_for_each_memslot(memslot, bkt, slots) { kvmppc_uvmem_drop_pages(memslot, kvm, true); uv_unregister_mem_slot(kvm->arch.lpid, memslot->id); } diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index ed8a2c9f5629..9435e482d514 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -749,7 +749,7 @@ void kvmhv_release_all_nested(struct kvm *kvm) struct kvm_nested_guest *gp; struct kvm_nested_guest *freelist = NULL; struct kvm_memory_slot *memslot; - int srcu_idx; + int srcu_idx, bkt; spin_lock(&kvm->mmu_lock); for (i = 0; i <= kvm->arch.max_nested_lpid; i++) { @@ -770,7 +770,7 @@ void kvmhv_release_all_nested(struct kvm *kvm) } srcu_idx = srcu_read_lock(&kvm->srcu); - kvm_for_each_memslot(memslot, kvm_memslots(kvm)) + kvm_for_each_memslot(memslot, bkt, kvm_memslots(kvm)) kvmhv_free_memslot_nest_rmap(memslot); srcu_read_unlock(&kvm->srcu, srcu_idx); } diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index 28c436df9935..e414ca44839f 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -459,7 +459,7 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm) struct kvm_memslots *slots; struct kvm_memory_slot *memslot, *m; int ret = H_SUCCESS; - int srcu_idx; + int srcu_idx, bkt; kvm->arch.secure_guest = KVMPPC_SECURE_INIT_START; @@ -478,7 +478,7 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm) /* register the memslot */ slots = kvm_memslots(kvm); - kvm_for_each_memslot(memslot, slots) { + kvm_for_each_memslot(memslot, bkt, slots) { ret = __kvmppc_uvmem_memslot_create(kvm, memslot); if (ret) break; @@ -486,7 +486,7 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm) if (ret) { slots = kvm_memslots(kvm); - kvm_for_each_memslot(m, slots) { + kvm_for_each_memslot(m, bkt, slots) { if (m == memslot) break; __kvmppc_uvmem_memslot_delete(kvm, memslot); @@ -647,7 +647,7 @@ void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *slot, unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm) { - int srcu_idx; + int srcu_idx, bkt; struct kvm_memory_slot *memslot; /* @@ -662,7 +662,7 @@ unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm) srcu_idx = srcu_read_lock(&kvm->srcu); - kvm_for_each_memslot(memslot, kvm_memslots(kvm)) + kvm_for_each_memslot(memslot, bkt, kvm_memslots(kvm)) kvmppc_uvmem_drop_pages(memslot, kvm, false); srcu_read_unlock(&kvm->srcu, srcu_idx); @@ -821,7 +821,7 @@ unsigned long kvmppc_h_svm_init_done(struct kvm *kvm) { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; - int srcu_idx; + int srcu_idx, bkt; long ret = H_SUCCESS; if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) @@ -830,7 +830,7 @@ unsigned long kvmppc_h_svm_init_done(struct kvm *kvm) /* migrate any unmoved normal pfn to device pfns*/ srcu_idx = srcu_read_lock(&kvm->srcu); slots = kvm_memslots(kvm); - kvm_for_each_memslot(memslot, slots) { + kvm_for_each_memslot(memslot, bkt, slots) { ret = kvmppc_uv_migrate_mem_slot(kvm, memslot); if (ret) { /* diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 5044b2a2c0cc..b943a589ee41 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1037,13 +1037,13 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm) struct kvm_memory_slot *ms; struct kvm_memslots *slots; unsigned long ram_pages = 0; - int slotnr; + int bkt; /* migration mode already enabled */ if (kvm->arch.migration_mode) return 0; slots = kvm_memslots(kvm); - if (!slots || !slots->used_slots) + if (!slots || kvm_memslots_empty(slots)) return -EINVAL; if (!kvm->arch.use_cmma) { @@ -1051,8 +1051,7 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm) return 0; } /* mark all the pages in active slots as dirty */ - for (slotnr = 0; slotnr < slots->used_slots; slotnr++) { - ms = slots->memslots + slotnr; + kvm_for_each_memslot(ms, bkt, slots) { if (!ms->dirty_bitmap) return -EINVAL; /* @@ -1976,22 +1975,21 @@ static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots, unsigned long cur_gfn) { struct kvm_memory_slot *ms = gfn_to_memslot_approx(slots, cur_gfn); - int slotidx = ms - slots->memslots; unsigned long ofs = cur_gfn - ms->base_gfn; + struct rb_node *mnode = &ms->gfn_node[slots->node_idx]; if (ms->base_gfn + ms->npages <= cur_gfn) { - slotidx--; + mnode = rb_next(mnode); /* If we are above the highest slot, wrap around */ - if (slotidx < 0) - slotidx = slots->used_slots - 1; + if (!mnode) + mnode = rb_first(&slots->gfn_tree); - ms = slots->memslots + slotidx; + ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]); ofs = 0; } ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs); - while ((slotidx > 0) && (ofs >= ms->npages)) { - slotidx--; - ms = slots->memslots + slotidx; + while (ofs >= ms->npages && (mnode = rb_next(mnode))) { + ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]); ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0); } return ms->base_gfn + ofs; @@ -2004,7 +2002,7 @@ static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args, struct kvm_memslots *slots = kvm_memslots(kvm); struct kvm_memory_slot *ms; - if (unlikely(!slots->used_slots)) + if (unlikely(kvm_memslots_empty(slots))) return 0; cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn); diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index cc309cc37e96..60f0effcce99 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -220,12 +220,14 @@ static inline void kvm_s390_set_user_cpu_state_ctrl(struct kvm *kvm) /* get the end gfn of the last (highest gfn) memslot */ static inline unsigned long kvm_s390_get_gfn_end(struct kvm_memslots *slots) { + struct rb_node *node; struct kvm_memory_slot *ms; - if (WARN_ON(!slots->used_slots)) + if (WARN_ON(kvm_memslots_empty(slots))) return 0; - ms = slots->memslots; + node = rb_last(&slots->gfn_tree); + ms = container_of(node, struct kvm_memory_slot, gfn_node[slots->node_idx]); return ms->base_gfn + ms->npages; } diff --git a/arch/x86/kvm/debugfs.c b/arch/x86/kvm/debugfs.c index 54a83a744538..543a8c04025c 100644 --- a/arch/x86/kvm/debugfs.c +++ b/arch/x86/kvm/debugfs.c @@ -107,9 +107,10 @@ static int kvm_mmu_rmaps_stat_show(struct seq_file *m, void *v) write_lock(&kvm->mmu_lock); for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { + int bkt; + slots = __kvm_memslots(kvm, i); - for (j = 0; j < slots->used_slots; j++) { - slot = &slots->memslots[j]; + kvm_for_each_memslot(slot, bkt, slots) for (k = 0; k < KVM_NR_PAGE_SIZES; k++) { rmap = slot->arch.rmap[k]; lpage_size = kvm_mmu_slot_lpages(slot, k + 1); @@ -121,7 +122,6 @@ static int kvm_mmu_rmaps_stat_show(struct seq_file *m, void *v) cur[index]++; } } - } } write_unlock(&kvm->mmu_lock); diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index e41cf095f2d1..c61430994d19 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3409,7 +3409,7 @@ static int mmu_first_shadow_root_alloc(struct kvm *kvm) { struct kvm_memslots *slots; struct kvm_memory_slot *slot; - int r = 0, i; + int r = 0, i, bkt; /* * Check if this is the first shadow root being allocated before @@ -3434,7 +3434,7 @@ static int mmu_first_shadow_root_alloc(struct kvm *kvm) for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { slots = __kvm_memslots(kvm, i); - kvm_for_each_memslot(slot, slots) { + kvm_for_each_memslot(slot, bkt, slots) { /* * Both of these functions are no-ops if the target is * already allocated, so unconditionally calling both @@ -5730,14 +5730,14 @@ static bool __kvm_zap_rmaps(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) struct kvm_memslots *slots; bool flush = false; gfn_t start, end; - int i; + int i, bkt; if (!kvm_memslots_have_rmaps(kvm)) return flush; for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { slots = __kvm_memslots(kvm, i); - kvm_for_each_memslot(memslot, slots) { + kvm_for_each_memslot(memslot, bkt, slots) { start = max(gfn_start, memslot->base_gfn); end = min(gfn_end, memslot->base_gfn + memslot->npages); if (start >= end) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 9552ad6d6652..9eda8a63feae 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -358,11 +359,13 @@ struct kvm_vcpu { struct kvm_dirty_ring dirty_ring; /* - * The index of the most recently used memslot by this vCPU. It's ok - * if this becomes stale due to memslot changes since we always check - * it is a valid slot. + * The most recently used memslot by this vCPU and the slots generation + * for which it is valid. + * No wraparound protection is needed since generations won't overflow in + * thousands of years, even assuming 1M memslot operations per second. */ - int last_used_slot; + struct kvm_memory_slot *last_used_slot; + u64 last_used_slot_gen; }; /* must be called with irqs disabled */ @@ -427,9 +430,26 @@ static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) */ #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) +/* + * Since at idle each memslot belongs to two memslot sets it has to contain + * two embedded nodes for each data structure that it forms a part of. + * + * Two memslot sets (one active and one inactive) are necessary so the VM + * continues to run on one memslot set while the other is being modified. + * + * These two memslot sets normally point to the same set of memslots. + * They can, however, be desynchronized when performing a memslot management + * operation by replacing the memslot to be modified by its copy. + * After the operation is complete, both memslot sets once again point to + * the same, common set of memslot data. + * + * The memslots themselves are independent of each other so they can be + * individually added or deleted. + */ struct kvm_memory_slot { - struct hlist_node id_node; - struct interval_tree_node hva_node; + struct hlist_node id_node[2]; + struct interval_tree_node hva_node[2]; + struct rb_node gfn_node[2]; gfn_t base_gfn; unsigned long npages; unsigned long *dirty_bitmap; @@ -524,16 +544,13 @@ static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) } #endif -/* - * Note: - * memslots are not sorted by id anymore, please use id_to_memslot() - * to get the memslot by its id. - */ struct kvm_memslots { u64 generation; + atomic_long_t last_used_slot; struct rb_root_cached hva_tree; + struct rb_root gfn_tree; /* - * The mapping table from slot id to the index in memslots[]. + * The mapping table from slot id to memslot. * * 7-bit bucket count matches the size of the old id to index array for * 512 slots, while giving good performance with this slot count. @@ -541,9 +558,7 @@ struct kvm_memslots { * always result in higher memory usage (even for lower memslot counts). */ DECLARE_HASHTABLE(id_hash, 7); - atomic_t last_used_slot; - int used_slots; - struct kvm_memory_slot memslots[]; + int node_idx; }; struct kvm { @@ -565,6 +580,9 @@ struct kvm { struct mutex slots_arch_lock; struct mm_struct *mm; /* userspace tied to this vm */ unsigned long nr_memslot_pages; + /* The two memslot sets - active and inactive (per address space) */ + struct kvm_memslots __memslots[KVM_ADDRESS_SPACE_NUM][2]; + /* The current active memslot set for each address space */ struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM]; struct xarray vcpu_array; @@ -739,11 +757,10 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) return NULL; } -#define kvm_for_each_memslot(memslot, slots) \ - for (memslot = &slots->memslots[0]; \ - memslot < slots->memslots + slots->used_slots; memslot++) \ - if (WARN_ON_ONCE(!memslot->npages)) { \ - } else +static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu) +{ + return vcpu->vcpu_idx; +} void kvm_destroy_vcpus(struct kvm *kvm); @@ -805,12 +822,23 @@ static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu) return __kvm_memslots(vcpu->kvm, as_id); } +static inline bool kvm_memslots_empty(struct kvm_memslots *slots) +{ + return RB_EMPTY_ROOT(&slots->gfn_tree); +} + +#define kvm_for_each_memslot(memslot, bkt, slots) \ + hash_for_each(slots->id_hash, bkt, memslot, id_node[slots->node_idx]) \ + if (WARN_ON_ONCE(!memslot->npages)) { \ + } else + static inline struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id) { struct kvm_memory_slot *slot; + int idx = slots->node_idx; - hash_for_each_possible(slots->id_hash, slot, id_node, id) { + hash_for_each_possible(slots->id_hash, slot, id_node[idx], id) { if (slot->id == id) return slot; } @@ -1214,25 +1242,15 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args); /* - * Returns a pointer to the memslot at slot_index if it contains gfn. + * Returns a pointer to the memslot if it contains gfn. * Otherwise returns NULL. */ static inline struct kvm_memory_slot * -try_get_memslot(struct kvm_memslots *slots, int slot_index, gfn_t gfn) +try_get_memslot(struct kvm_memory_slot *slot, gfn_t gfn) { - struct kvm_memory_slot *slot; - - if (slot_index < 0 || slot_index >= slots->used_slots) + if (!slot) return NULL; - /* - * slot_index can come from vcpu->last_used_slot which is not kept - * in sync with userspace-controllable memslot deletion. So use nospec - * to prevent the CPU from speculating past the end of memslots[]. - */ - slot_index = array_index_nospec(slot_index, slots->used_slots); - slot = &slots->memslots[slot_index]; - if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages) return slot; else @@ -1240,65 +1258,46 @@ try_get_memslot(struct kvm_memslots *slots, int slot_index, gfn_t gfn) } /* - * Returns a pointer to the memslot that contains gfn and records the index of - * the slot in index. Otherwise returns NULL. + * Returns a pointer to the memslot that contains gfn. Otherwise returns NULL. * * With "approx" set returns the memslot also when the address falls * in a hole. In that case one of the memslots bordering the hole is * returned. - * - * IMPORTANT: Slots are sorted from highest GFN to lowest GFN! */ static inline struct kvm_memory_slot * -search_memslots(struct kvm_memslots *slots, gfn_t gfn, int *index, bool approx) +search_memslots(struct kvm_memslots *slots, gfn_t gfn, bool approx) { - int start = 0, end = slots->used_slots; - struct kvm_memory_slot *memslots = slots->memslots; struct kvm_memory_slot *slot; + struct rb_node *node; + int idx = slots->node_idx; - if (unlikely(!slots->used_slots)) - return NULL; - - while (start < end) { - int slot = start + (end - start) / 2; - - if (gfn >= memslots[slot].base_gfn) - end = slot; - else - start = slot + 1; + slot = NULL; + for (node = slots->gfn_tree.rb_node; node; ) { + slot = container_of(node, struct kvm_memory_slot, gfn_node[idx]); + if (gfn >= slot->base_gfn) { + if (gfn < slot->base_gfn + slot->npages) + return slot; + node = node->rb_right; + } else + node = node->rb_left; } - if (approx && start >= slots->used_slots) { - *index = slots->used_slots - 1; - return &memslots[slots->used_slots - 1]; - } - - slot = try_get_memslot(slots, start, gfn); - if (slot) { - *index = start; - return slot; - } - if (approx) { - *index = start; - return &memslots[start]; - } - - return NULL; + return approx ? slot : NULL; } static inline struct kvm_memory_slot * ____gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn, bool approx) { struct kvm_memory_slot *slot; - int slot_index = atomic_read(&slots->last_used_slot); - slot = try_get_memslot(slots, slot_index, gfn); + slot = (struct kvm_memory_slot *)atomic_long_read(&slots->last_used_slot); + slot = try_get_memslot(slot, gfn); if (slot) return slot; - slot = search_memslots(slots, gfn, &slot_index, approx); + slot = search_memslots(slots, gfn, approx); if (slot) { - atomic_set(&slots->last_used_slot, slot_index); + atomic_long_set(&slots->last_used_slot, (unsigned long)slot); return slot; } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 6ba7468bdbe3..a87df97e0b14 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -433,7 +433,7 @@ static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) vcpu->preempted = false; vcpu->ready = false; preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); - vcpu->last_used_slot = 0; + vcpu->last_used_slot = NULL; } static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu) @@ -545,7 +545,7 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm, range->start, range->end - 1) { unsigned long hva_start, hva_end; - slot = container_of(node, struct kvm_memory_slot, hva_node); + slot = container_of(node, struct kvm_memory_slot, hva_node[slots->node_idx]); hva_start = max(range->start, slot->userspace_addr); hva_end = min(range->end, slot->userspace_addr + (slot->npages << PAGE_SHIFT)); @@ -876,20 +876,6 @@ static void kvm_destroy_pm_notifier(struct kvm *kvm) } #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */ -static struct kvm_memslots *kvm_alloc_memslots(void) -{ - struct kvm_memslots *slots; - - slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT); - if (!slots) - return NULL; - - slots->hva_tree = RB_ROOT_CACHED; - hash_init(slots->id_hash); - - return slots; -} - static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) { if (!memslot->dirty_bitmap) @@ -899,27 +885,33 @@ static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) memslot->dirty_bitmap = NULL; } +/* This does not remove the slot from struct kvm_memslots data structures */ static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) { kvm_destroy_dirty_bitmap(slot); kvm_arch_free_memslot(kvm, slot); - slot->flags = 0; - slot->npages = 0; + kfree(slot); } static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots) { + struct hlist_node *idnode; struct kvm_memory_slot *memslot; + int bkt; - if (!slots) + /* + * The same memslot objects live in both active and inactive sets, + * arbitrarily free using index '1' so the second invocation of this + * function isn't operating over a structure with dangling pointers + * (even though this function isn't actually touching them). + */ + if (!slots->node_idx) return; - kvm_for_each_memslot(memslot, slots) + hash_for_each_safe(slots->id_hash, bkt, idnode, memslot, id_node[1]) kvm_free_memslot(kvm, memslot); - - kvfree(slots); } static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc) @@ -1058,8 +1050,9 @@ int __weak kvm_arch_create_vm_debugfs(struct kvm *kvm) static struct kvm *kvm_create_vm(unsigned long type) { struct kvm *kvm = kvm_arch_alloc_vm(); + struct kvm_memslots *slots; int r = -ENOMEM; - int i; + int i, j; if (!kvm) return ERR_PTR(-ENOMEM); @@ -1087,13 +1080,20 @@ static struct kvm *kvm_create_vm(unsigned long type) refcount_set(&kvm->users_count, 1); for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { - struct kvm_memslots *slots = kvm_alloc_memslots(); + for (j = 0; j < 2; j++) { + slots = &kvm->__memslots[i][j]; - if (!slots) - goto out_err_no_arch_destroy_vm; - /* Generations must be different for each address space. */ - slots->generation = i; - rcu_assign_pointer(kvm->memslots[i], slots); + atomic_long_set(&slots->last_used_slot, (unsigned long)NULL); + slots->hva_tree = RB_ROOT_CACHED; + slots->gfn_tree = RB_ROOT; + hash_init(slots->id_hash); + slots->node_idx = j; + + /* Generations must be different for each address space. */ + slots->generation = i; + } + + rcu_assign_pointer(kvm->memslots[i], &kvm->__memslots[i][0]); } for (i = 0; i < KVM_NR_BUSES; i++) { @@ -1147,8 +1147,6 @@ out_err_no_arch_destroy_vm: WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count)); for (i = 0; i < KVM_NR_BUSES; i++) kfree(kvm_get_bus(kvm, i)); - for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) - kvm_free_memslots(kvm, __kvm_memslots(kvm, i)); cleanup_srcu_struct(&kvm->irq_srcu); out_err_no_irq_srcu: cleanup_srcu_struct(&kvm->srcu); @@ -1213,8 +1211,10 @@ static void kvm_destroy_vm(struct kvm *kvm) #endif kvm_arch_destroy_vm(kvm); kvm_destroy_devices(kvm); - for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) - kvm_free_memslots(kvm, __kvm_memslots(kvm, i)); + for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { + kvm_free_memslots(kvm, &kvm->__memslots[i][0]); + kvm_free_memslots(kvm, &kvm->__memslots[i][1]); + } cleanup_srcu_struct(&kvm->irq_srcu); cleanup_srcu_struct(&kvm->srcu); kvm_arch_free_vm(kvm); @@ -1284,227 +1284,136 @@ static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot) return 0; } -static void kvm_replace_memslot(struct kvm_memslots *slots, +static struct kvm_memslots *kvm_get_inactive_memslots(struct kvm *kvm, int as_id) +{ + struct kvm_memslots *active = __kvm_memslots(kvm, as_id); + int node_idx_inactive = active->node_idx ^ 1; + + return &kvm->__memslots[as_id][node_idx_inactive]; +} + +/* + * Helper to get the address space ID when one of memslot pointers may be NULL. + * This also serves as a sanity that at least one of the pointers is non-NULL, + * and that their address space IDs don't diverge. + */ +static int kvm_memslots_get_as_id(struct kvm_memory_slot *a, + struct kvm_memory_slot *b) +{ + if (WARN_ON_ONCE(!a && !b)) + return 0; + + if (!a) + return b->as_id; + if (!b) + return a->as_id; + + WARN_ON_ONCE(a->as_id != b->as_id); + return a->as_id; +} + +static void kvm_insert_gfn_node(struct kvm_memslots *slots, + struct kvm_memory_slot *slot) +{ + struct rb_root *gfn_tree = &slots->gfn_tree; + struct rb_node **node, *parent; + int idx = slots->node_idx; + + parent = NULL; + for (node = &gfn_tree->rb_node; *node; ) { + struct kvm_memory_slot *tmp; + + tmp = container_of(*node, struct kvm_memory_slot, gfn_node[idx]); + parent = *node; + if (slot->base_gfn < tmp->base_gfn) + node = &(*node)->rb_left; + else if (slot->base_gfn > tmp->base_gfn) + node = &(*node)->rb_right; + else + BUG(); + } + + rb_link_node(&slot->gfn_node[idx], parent, node); + rb_insert_color(&slot->gfn_node[idx], gfn_tree); +} + +static void kvm_erase_gfn_node(struct kvm_memslots *slots, + struct kvm_memory_slot *slot) +{ + rb_erase(&slot->gfn_node[slots->node_idx], &slots->gfn_tree); +} + +static void kvm_replace_gfn_node(struct kvm_memslots *slots, + struct kvm_memory_slot *old, + struct kvm_memory_slot *new) +{ + int idx = slots->node_idx; + + WARN_ON_ONCE(old->base_gfn != new->base_gfn); + + rb_replace_node(&old->gfn_node[idx], &new->gfn_node[idx], + &slots->gfn_tree); +} + +/* + * Replace @old with @new in the inactive memslots. + * + * With NULL @old this simply adds @new. + * With NULL @new this simply removes @old. + * + * If @new is non-NULL its hva_node[slots_idx] range has to be set + * appropriately. + */ +static void kvm_replace_memslot(struct kvm *kvm, struct kvm_memory_slot *old, struct kvm_memory_slot *new) { - /* - * Remove the old memslot from the hash list and interval tree, copying - * the node data would corrupt the structures. - */ + int as_id = kvm_memslots_get_as_id(old, new); + struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id); + int idx = slots->node_idx; + if (old) { - hash_del(&old->id_node); - interval_tree_remove(&old->hva_node, &slots->hva_tree); + hash_del(&old->id_node[idx]); + interval_tree_remove(&old->hva_node[idx], &slots->hva_tree); - if (!new) + if ((long)old == atomic_long_read(&slots->last_used_slot)) + atomic_long_set(&slots->last_used_slot, (long)new); + + if (!new) { + kvm_erase_gfn_node(slots, old); return; + } + } - /* Copy the source *data*, not the pointer, to the destination. */ - *new = *old; + /* + * Initialize @new's hva range. Do this even when replacing an @old + * slot, kvm_copy_memslot() deliberately does not touch node data. + */ + new->hva_node[idx].start = new->userspace_addr; + new->hva_node[idx].last = new->userspace_addr + + (new->npages << PAGE_SHIFT) - 1; + + /* + * (Re)Add the new memslot. There is no O(1) interval_tree_replace(), + * hva_node needs to be swapped with remove+insert even though hva can't + * change when replacing an existing slot. + */ + hash_add(slots->id_hash, &new->id_node[idx], new->id); + interval_tree_insert(&new->hva_node[idx], &slots->hva_tree); + + /* + * If the memslot gfn is unchanged, rb_replace_node() can be used to + * switch the node in the gfn tree instead of removing the old and + * inserting the new as two separate operations. Replacement is a + * single O(1) operation versus two O(log(n)) operations for + * remove+insert. + */ + if (old && old->base_gfn == new->base_gfn) { + kvm_replace_gfn_node(slots, old, new); } else { - /* If @old is NULL, initialize @new's hva range. */ - new->hva_node.start = new->userspace_addr; - new->hva_node.last = new->userspace_addr + - (new->npages << PAGE_SHIFT) - 1; - } - - /* (Re)Add the new memslot. */ - hash_add(slots->id_hash, &new->id_node, new->id); - interval_tree_insert(&new->hva_node, &slots->hva_tree); -} - -static void kvm_shift_memslot(struct kvm_memslots *slots, int dst, int src) -{ - struct kvm_memory_slot *mslots = slots->memslots; - - kvm_replace_memslot(slots, &mslots[src], &mslots[dst]); -} - -/* - * Delete a memslot by decrementing the number of used slots and shifting all - * other entries in the array forward one spot. - * @memslot is a detached dummy struct with just .id and .as_id filled. - */ -static inline void kvm_memslot_delete(struct kvm_memslots *slots, - struct kvm_memory_slot *memslot) -{ - struct kvm_memory_slot *mslots = slots->memslots; - struct kvm_memory_slot *oldslot = id_to_memslot(slots, memslot->id); - int i; - - if (WARN_ON(!oldslot)) - return; - - slots->used_slots--; - - if (atomic_read(&slots->last_used_slot) >= slots->used_slots) - atomic_set(&slots->last_used_slot, 0); - - /* - * Remove the to-be-deleted memslot from the list/tree _before_ shifting - * the trailing memslots forward, its data will be overwritten. - * Defer the (somewhat pointless) copying of the memslot until after - * the last slot has been shifted to avoid overwriting said last slot. - */ - kvm_replace_memslot(slots, oldslot, NULL); - - for (i = oldslot - mslots; i < slots->used_slots; i++) - kvm_shift_memslot(slots, i, i + 1); - mslots[i] = *memslot; -} - -/* - * "Insert" a new memslot by incrementing the number of used slots. Returns - * the new slot's initial index into the memslots array. - */ -static inline int kvm_memslot_insert_back(struct kvm_memslots *slots) -{ - return slots->used_slots++; -} - -/* - * Move a changed memslot backwards in the array by shifting existing slots - * with a higher GFN toward the front of the array. Note, the changed memslot - * itself is not preserved in the array, i.e. not swapped at this time, only - * its new index into the array is tracked. Returns the changed memslot's - * current index into the memslots array. - * The memslot at the returned index will not be in @slots->hva_tree or - * @slots->id_hash by then. - * @memslot is a detached struct with desired final data of the changed slot. - */ -static inline int kvm_memslot_move_backward(struct kvm_memslots *slots, - struct kvm_memory_slot *memslot) -{ - struct kvm_memory_slot *mslots = slots->memslots; - struct kvm_memory_slot *oldslot = id_to_memslot(slots, memslot->id); - int i; - - if (!oldslot || !slots->used_slots) - return -1; - - /* - * Delete the slot from the hash table and interval tree before sorting - * the remaining slots, the slot's data may be overwritten when copying - * slots as part of the sorting proccess. update_memslots() will - * unconditionally rewrite and re-add the entire slot. - */ - kvm_replace_memslot(slots, oldslot, NULL); - - /* - * Move the target memslot backward in the array by shifting existing - * memslots with a higher GFN (than the target memslot) towards the - * front of the array. - */ - for (i = oldslot - mslots; i < slots->used_slots - 1; i++) { - if (memslot->base_gfn > mslots[i + 1].base_gfn) - break; - - WARN_ON_ONCE(memslot->base_gfn == mslots[i + 1].base_gfn); - - kvm_shift_memslot(slots, i, i + 1); - } - return i; -} - -/* - * Move a changed memslot forwards in the array by shifting existing slots with - * a lower GFN toward the back of the array. Note, the changed memslot itself - * is not preserved in the array, i.e. not swapped at this time, only its new - * index into the array is tracked. Returns the changed memslot's final index - * into the memslots array. - * The memslot at the returned index will not be in @slots->hva_tree or - * @slots->id_hash by then. - * @memslot is a detached struct with desired final data of the new or - * changed slot. - * Assumes that the memslot at @start index is not in @slots->hva_tree or - * @slots->id_hash. - */ -static inline int kvm_memslot_move_forward(struct kvm_memslots *slots, - struct kvm_memory_slot *memslot, - int start) -{ - struct kvm_memory_slot *mslots = slots->memslots; - int i; - - for (i = start; i > 0; i--) { - if (memslot->base_gfn < mslots[i - 1].base_gfn) - break; - - WARN_ON_ONCE(memslot->base_gfn == mslots[i - 1].base_gfn); - - kvm_shift_memslot(slots, i, i - 1); - } - return i; -} - -/* - * Re-sort memslots based on their GFN to account for an added, deleted, or - * moved memslot. Sorting memslots by GFN allows using a binary search during - * memslot lookup. - * - * IMPORTANT: Slots are sorted from highest GFN to lowest GFN! I.e. the entry - * at memslots[0] has the highest GFN. - * - * The sorting algorithm takes advantage of having initially sorted memslots - * and knowing the position of the changed memslot. Sorting is also optimized - * by not swapping the updated memslot and instead only shifting other memslots - * and tracking the new index for the update memslot. Only once its final - * index is known is the updated memslot copied into its position in the array. - * - * - When deleting a memslot, the deleted memslot simply needs to be moved to - * the end of the array. - * - * - When creating a memslot, the algorithm "inserts" the new memslot at the - * end of the array and then it forward to its correct location. - * - * - When moving a memslot, the algorithm first moves the updated memslot - * backward to handle the scenario where the memslot's GFN was changed to a - * lower value. update_memslots() then falls through and runs the same flow - * as creating a memslot to move the memslot forward to handle the scenario - * where its GFN was changed to a higher value. - * - * Note, slots are sorted from highest->lowest instead of lowest->highest for - * historical reasons. Originally, invalid memslots where denoted by having - * GFN=0, thus sorting from highest->lowest naturally sorted invalid memslots - * to the end of the array. The current algorithm uses dedicated logic to - * delete a memslot and thus does not rely on invalid memslots having GFN=0. - * - * The other historical motiviation for highest->lowest was to improve the - * performance of memslot lookup. KVM originally used a linear search starting - * at memslots[0]. On x86, the largest memslot usually has one of the highest, - * if not *the* highest, GFN, as the bulk of the guest's RAM is located in a - * single memslot above the 4gb boundary. As the largest memslot is also the - * most likely to be referenced, sorting it to the front of the array was - * advantageous. The current binary search starts from the middle of the array - * and uses an LRU pointer to improve performance for all memslots and GFNs. - * - * @memslot is a detached struct, not a part of the current or new memslot - * array. - */ -static void update_memslots(struct kvm_memslots *slots, - struct kvm_memory_slot *memslot, - enum kvm_mr_change change) -{ - int i; - - if (change == KVM_MR_DELETE) { - kvm_memslot_delete(slots, memslot); - } else { - if (change == KVM_MR_CREATE) - i = kvm_memslot_insert_back(slots); - else - i = kvm_memslot_move_backward(slots, memslot); - i = kvm_memslot_move_forward(slots, memslot, i); - - if (WARN_ON_ONCE(i < 0)) - return; - - /* - * Copy the memslot to its new position in memslots and update - * its index accordingly. - */ - slots->memslots[i] = *memslot; - kvm_replace_memslot(slots, NULL, &slots->memslots[i]); + if (old) + kvm_erase_gfn_node(slots, old); + kvm_insert_gfn_node(slots, new); } } @@ -1522,11 +1431,12 @@ static int check_memory_region_flags(const struct kvm_userspace_memory_region *m return 0; } -static struct kvm_memslots *install_new_memslots(struct kvm *kvm, - int as_id, struct kvm_memslots *slots) +static void kvm_swap_active_memslots(struct kvm *kvm, int as_id) { - struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id); - u64 gen = old_memslots->generation; + struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id); + + /* Grab the generation from the activate memslots. */ + u64 gen = __kvm_memslots(kvm, as_id)->generation; WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS); slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS; @@ -1577,58 +1487,6 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm, kvm_arch_memslots_updated(kvm, gen); slots->generation = gen; - - return old_memslots; -} - -static size_t kvm_memslots_size(int slots) -{ - return sizeof(struct kvm_memslots) + - (sizeof(struct kvm_memory_slot) * slots); -} - -/* - * Note, at a minimum, the current number of used slots must be allocated, even - * when deleting a memslot, as we need a complete duplicate of the memslots for - * use when invalidating a memslot prior to deleting/moving the memslot. - */ -static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old, - enum kvm_mr_change change) -{ - struct kvm_memslots *slots; - size_t new_size; - struct kvm_memory_slot *memslot; - - if (change == KVM_MR_CREATE) - new_size = kvm_memslots_size(old->used_slots + 1); - else - new_size = kvm_memslots_size(old->used_slots); - - slots = kvzalloc(new_size, GFP_KERNEL_ACCOUNT); - if (unlikely(!slots)) - return NULL; - - memcpy(slots, old, kvm_memslots_size(old->used_slots)); - - slots->hva_tree = RB_ROOT_CACHED; - hash_init(slots->id_hash); - kvm_for_each_memslot(memslot, slots) { - interval_tree_insert(&memslot->hva_node, &slots->hva_tree); - hash_add(slots->id_hash, &memslot->id_node, memslot->id); - } - - return slots; -} - -static void kvm_copy_memslots_arch(struct kvm_memslots *to, - struct kvm_memslots *from) -{ - int i; - - WARN_ON_ONCE(to->used_slots != from->used_slots); - - for (i = 0; i < from->used_slots; i++) - to->memslots[i].arch = from->memslots[i].arch; } static int kvm_prepare_memory_region(struct kvm *kvm, @@ -1683,31 +1541,214 @@ static void kvm_commit_memory_region(struct kvm *kvm, kvm_arch_commit_memory_region(kvm, old, new, change); - /* - * Free the old memslot's metadata. On DELETE, free the whole thing, - * otherwise free the dirty bitmap as needed (the below effectively - * checks both the flags and whether a ring buffer is being used). - */ - if (change == KVM_MR_DELETE) + switch (change) { + case KVM_MR_CREATE: + /* Nothing more to do. */ + break; + case KVM_MR_DELETE: + /* Free the old memslot and all its metadata. */ kvm_free_memslot(kvm, old); - else if (old->dirty_bitmap && !new->dirty_bitmap) - kvm_destroy_dirty_bitmap(old); + break; + case KVM_MR_MOVE: + case KVM_MR_FLAGS_ONLY: + /* + * Free the dirty bitmap as needed; the below check encompasses + * both the flags and whether a ring buffer is being used) + */ + if (old->dirty_bitmap && !new->dirty_bitmap) + kvm_destroy_dirty_bitmap(old); + + /* + * The final quirk. Free the detached, old slot, but only its + * memory, not any metadata. Metadata, including arch specific + * data, may be reused by @new. + */ + kfree(old); + break; + default: + BUG(); + } +} + +/* + * Activate @new, which must be installed in the inactive slots by the caller, + * by swapping the active slots and then propagating @new to @old once @old is + * unreachable and can be safely modified. + * + * With NULL @old this simply adds @new to @active (while swapping the sets). + * With NULL @new this simply removes @old from @active and frees it + * (while also swapping the sets). + */ +static void kvm_activate_memslot(struct kvm *kvm, + struct kvm_memory_slot *old, + struct kvm_memory_slot *new) +{ + int as_id = kvm_memslots_get_as_id(old, new); + + kvm_swap_active_memslots(kvm, as_id); + + /* Propagate the new memslot to the now inactive memslots. */ + kvm_replace_memslot(kvm, old, new); +} + +static void kvm_copy_memslot(struct kvm_memory_slot *dest, + const struct kvm_memory_slot *src) +{ + dest->base_gfn = src->base_gfn; + dest->npages = src->npages; + dest->dirty_bitmap = src->dirty_bitmap; + dest->arch = src->arch; + dest->userspace_addr = src->userspace_addr; + dest->flags = src->flags; + dest->id = src->id; + dest->as_id = src->as_id; +} + +static void kvm_invalidate_memslot(struct kvm *kvm, + struct kvm_memory_slot *old, + struct kvm_memory_slot *working_slot) +{ + /* + * Mark the current slot INVALID. As with all memslot modifications, + * this must be done on an unreachable slot to avoid modifying the + * current slot in the active tree. + */ + kvm_copy_memslot(working_slot, old); + working_slot->flags |= KVM_MEMSLOT_INVALID; + kvm_replace_memslot(kvm, old, working_slot); + + /* + * Activate the slot that is now marked INVALID, but don't propagate + * the slot to the now inactive slots. The slot is either going to be + * deleted or recreated as a new slot. + */ + kvm_swap_active_memslots(kvm, old->as_id); + + /* + * From this point no new shadow pages pointing to a deleted, or moved, + * memslot will be created. Validation of sp->gfn happens in: + * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) + * - kvm_is_visible_gfn (mmu_check_root) + */ + kvm_arch_flush_shadow_memslot(kvm, working_slot); + + /* Was released by kvm_swap_active_memslots, reacquire. */ + mutex_lock(&kvm->slots_arch_lock); + + /* + * Copy the arch-specific field of the newly-installed slot back to the + * old slot as the arch data could have changed between releasing + * slots_arch_lock in install_new_memslots() and re-acquiring the lock + * above. Writers are required to retrieve memslots *after* acquiring + * slots_arch_lock, thus the active slot's data is guaranteed to be fresh. + */ + old->arch = working_slot->arch; +} + +static void kvm_create_memslot(struct kvm *kvm, + const struct kvm_memory_slot *new, + struct kvm_memory_slot *working) +{ + /* + * Add the new memslot to the inactive set as a copy of the + * new memslot data provided by userspace. + */ + kvm_copy_memslot(working, new); + kvm_replace_memslot(kvm, NULL, working); + kvm_activate_memslot(kvm, NULL, working); +} + +static void kvm_delete_memslot(struct kvm *kvm, + struct kvm_memory_slot *old, + struct kvm_memory_slot *invalid_slot) +{ + /* + * Remove the old memslot (in the inactive memslots) by passing NULL as + * the "new" slot. + */ + kvm_replace_memslot(kvm, old, NULL); + + /* And do the same for the invalid version in the active slot. */ + kvm_activate_memslot(kvm, invalid_slot, NULL); + + /* Free the invalid slot, the caller will clean up the old slot. */ + kfree(invalid_slot); +} + +static struct kvm_memory_slot *kvm_move_memslot(struct kvm *kvm, + struct kvm_memory_slot *old, + const struct kvm_memory_slot *new, + struct kvm_memory_slot *invalid_slot) +{ + struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, old->as_id); + + /* + * The memslot's gfn is changing, remove it from the inactive tree, it + * will be re-added with its updated gfn. Because its range is + * changing, an in-place replace is not possible. + */ + kvm_erase_gfn_node(slots, old); + + /* + * The old slot is now fully disconnected, reuse its memory for the + * persistent copy of "new". + */ + kvm_copy_memslot(old, new); + + /* Re-add to the gfn tree with the updated gfn */ + kvm_insert_gfn_node(slots, old); + + /* Replace the current INVALID slot with the updated memslot. */ + kvm_activate_memslot(kvm, invalid_slot, old); + + /* + * Clear the INVALID flag so that the invalid_slot is now a perfect + * copy of the old slot. Return it for cleanup in the caller. + */ + WARN_ON_ONCE(!(invalid_slot->flags & KVM_MEMSLOT_INVALID)); + invalid_slot->flags &= ~KVM_MEMSLOT_INVALID; + return invalid_slot; +} + +static void kvm_update_flags_memslot(struct kvm *kvm, + struct kvm_memory_slot *old, + const struct kvm_memory_slot *new, + struct kvm_memory_slot *working_slot) +{ + /* + * Similar to the MOVE case, but the slot doesn't need to be zapped as + * an intermediate step. Instead, the old memslot is simply replaced + * with a new, updated copy in both memslot sets. + */ + kvm_copy_memslot(working_slot, new); + kvm_replace_memslot(kvm, old, working_slot); + kvm_activate_memslot(kvm, old, working_slot); } static int kvm_set_memslot(struct kvm *kvm, + struct kvm_memory_slot *old, struct kvm_memory_slot *new, enum kvm_mr_change change) { - struct kvm_memory_slot *slot, old; - struct kvm_memslots *slots; + struct kvm_memory_slot *working; int r; /* - * Released in install_new_memslots. + * Modifications are done on an unreachable slot. Any changes are then + * (eventually) propagated to both the active and inactive slots. This + * allocation would ideally be on-demand (in helpers), but is done here + * to avoid having to handle failure after kvm_prepare_memory_region(). + */ + working = kzalloc(sizeof(*working), GFP_KERNEL_ACCOUNT); + if (!working) + return -ENOMEM; + + /* + * Released in kvm_swap_active_memslots. * * Must be held from before the current memslots are copied until * after the new memslots are installed with rcu_assign_pointer, - * then released before the synchronize srcu in install_new_memslots. + * then released before the synchronize srcu in kvm_swap_active_memslots. * * When modifying memslots outside of the slots_lock, must be held * before reading the pointer to the current memslots until after all @@ -1718,87 +1759,60 @@ static int kvm_set_memslot(struct kvm *kvm, */ mutex_lock(&kvm->slots_arch_lock); - slots = kvm_dup_memslots(__kvm_memslots(kvm, new->as_id), change); - if (!slots) { - mutex_unlock(&kvm->slots_arch_lock); - return -ENOMEM; - } + /* + * Invalidate the old slot if it's being deleted or moved. This is + * done prior to actually deleting/moving the memslot to allow vCPUs to + * continue running by ensuring there are no mappings or shadow pages + * for the memslot when it is deleted/moved. Without pre-invalidation + * (and without a lock), a window would exist between effecting the + * delete/move and committing the changes in arch code where KVM or a + * guest could access a non-existent memslot. + */ + if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) + kvm_invalidate_memslot(kvm, old, working); - if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { + r = kvm_prepare_memory_region(kvm, old, new, change); + if (r) { /* - * Note, the INVALID flag needs to be in the appropriate entry - * in the freshly allocated memslots, not in @old or @new. + * For DELETE/MOVE, revert the above INVALID change. No + * modifications required since the original slot was preserved + * in the inactive slots. Changing the active memslots also + * release slots_arch_lock. */ - slot = id_to_memslot(slots, new->id); - slot->flags |= KVM_MEMSLOT_INVALID; - - /* - * We can re-use the old memslots, the only difference from the - * newly installed memslots is the invalid flag, which will get - * dropped by update_memslots anyway. We'll also revert to the - * old memslots if preparing the new memory region fails. - */ - slots = install_new_memslots(kvm, new->as_id, slots); - - /* From this point no new shadow pages pointing to a deleted, - * or moved, memslot will be created. - * - * validation of sp->gfn happens in: - * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) - * - kvm_is_visible_gfn (mmu_check_root) - */ - kvm_arch_flush_shadow_memslot(kvm, slot); - - /* Released in install_new_memslots. */ - mutex_lock(&kvm->slots_arch_lock); - - /* - * The arch-specific fields of the now-active memslots could - * have been modified between releasing slots_arch_lock in - * install_new_memslots and re-acquiring slots_arch_lock above. - * Copy them to the inactive memslots. Arch code is required - * to retrieve memslots *after* acquiring slots_arch_lock, thus - * the active memslots are guaranteed to be fresh. - */ - kvm_copy_memslots_arch(slots, __kvm_memslots(kvm, new->as_id)); + if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) + kvm_activate_memslot(kvm, working, old); + else + mutex_unlock(&kvm->slots_arch_lock); + kfree(working); + return r; } /* - * Make a full copy of the old memslot, the pointer will become stale - * when the memslots are re-sorted by update_memslots(), and the old - * memslot needs to be referenced after calling update_memslots(), e.g. - * to free its resources and for arch specific behavior. This needs to - * happen *after* (re)acquiring slots_arch_lock. + * For DELETE and MOVE, the working slot is now active as the INVALID + * version of the old slot. MOVE is particularly special as it reuses + * the old slot and returns a copy of the old slot (in working_slot). + * For CREATE, there is no old slot. For DELETE and FLAGS_ONLY, the + * old slot is detached but otherwise preserved. */ - slot = id_to_memslot(slots, new->id); - if (slot) { - old = *slot; - } else { - WARN_ON_ONCE(change != KVM_MR_CREATE); - memset(&old, 0, sizeof(old)); - old.id = new->id; - old.as_id = new->as_id; - } - - r = kvm_prepare_memory_region(kvm, &old, new, change); - if (r) - goto out_slots; - - update_memslots(slots, new, change); - slots = install_new_memslots(kvm, new->as_id, slots); - - kvm_commit_memory_region(kvm, &old, new, change); - - kvfree(slots); - return 0; - -out_slots: - if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) - slots = install_new_memslots(kvm, new->as_id, slots); + if (change == KVM_MR_CREATE) + kvm_create_memslot(kvm, new, working); + else if (change == KVM_MR_DELETE) + kvm_delete_memslot(kvm, old, working); + else if (change == KVM_MR_MOVE) + old = kvm_move_memslot(kvm, old, new, working); + else if (change == KVM_MR_FLAGS_ONLY) + kvm_update_flags_memslot(kvm, old, new, working); else - mutex_unlock(&kvm->slots_arch_lock); - kvfree(slots); - return r; + BUG(); + + /* + * No need to refresh new->arch, changes after dropping slots_arch_lock + * will directly hit the final, active memsot. Architectures are + * responsible for knowing that new->arch may be stale. + */ + kvm_commit_memory_region(kvm, old, new, change); + + return 0; } /* @@ -1859,7 +1873,7 @@ int __kvm_set_memory_region(struct kvm *kvm, new.id = id; new.as_id = as_id; - return kvm_set_memslot(kvm, &new, KVM_MR_DELETE); + return kvm_set_memslot(kvm, old, &new, KVM_MR_DELETE); } new.as_id = as_id; @@ -1896,8 +1910,10 @@ int __kvm_set_memory_region(struct kvm *kvm, } if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { + int bkt; + /* Check for overlaps */ - kvm_for_each_memslot(tmp, __kvm_memslots(kvm, as_id)) { + kvm_for_each_memslot(tmp, bkt, __kvm_memslots(kvm, as_id)) { if (tmp->id == id) continue; if (!((new.base_gfn + new.npages <= tmp->base_gfn) || @@ -1906,7 +1922,7 @@ int __kvm_set_memory_region(struct kvm *kvm, } } - return kvm_set_memslot(kvm, &new, change); + return kvm_set_memslot(kvm, old, &new, change); } EXPORT_SYMBOL_GPL(__kvm_set_memory_region); @@ -2211,21 +2227,30 @@ EXPORT_SYMBOL_GPL(gfn_to_memslot); struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) { struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu); + u64 gen = slots->generation; struct kvm_memory_slot *slot; - int slot_index; - slot = try_get_memslot(slots, vcpu->last_used_slot, gfn); + /* + * This also protects against using a memslot from a different address space, + * since different address spaces have different generation numbers. + */ + if (unlikely(gen != vcpu->last_used_slot_gen)) { + vcpu->last_used_slot = NULL; + vcpu->last_used_slot_gen = gen; + } + + slot = try_get_memslot(vcpu->last_used_slot, gfn); if (slot) return slot; /* * Fall back to searching all memslots. We purposely use * search_memslots() instead of __gfn_to_memslot() to avoid - * thrashing the VM-wide last_used_index in kvm_memslots. + * thrashing the VM-wide last_used_slot in kvm_memslots. */ - slot = search_memslots(slots, gfn, &slot_index, false); + slot = search_memslots(slots, gfn, false); if (slot) { - vcpu->last_used_slot = slot_index; + vcpu->last_used_slot = slot; return slot; } From bcb63dcde829945487bad4917b614c28aaa59141 Mon Sep 17 00:00:00 2001 From: "Maciej S. Szmigiero" Date: Mon, 6 Dec 2021 20:54:31 +0100 Subject: [PATCH 050/268] KVM: Call kvm_arch_flush_shadow_memslot() on the old slot in kvm_invalidate_memslot() kvm_invalidate_memslot() calls kvm_arch_flush_shadow_memslot() on the active, but KVM_MEMSLOT_INVALID slot. Do it on the inactive (but valid) old slot instead since arch code really should not get passed such invalid slot. Note that this means that the "arch" field of the slot provided to kvm_arch_flush_shadow_memslot() may have stale data since this function is called with slots_arch_lock released. Suggested-by: Sean Christopherson Signed-off-by: Maciej S. Szmigiero Reviewed-by: Sean Christopherson Message-Id: <813595ecc193d6ae39a87709899d4251523b05f8.1638817641.git.maciej.szmigiero@oracle.com> --- virt/kvm/kvm_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a87df97e0b14..130eaf1c5711 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1630,7 +1630,7 @@ static void kvm_invalidate_memslot(struct kvm *kvm, * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) * - kvm_is_visible_gfn (mmu_check_root) */ - kvm_arch_flush_shadow_memslot(kvm, working_slot); + kvm_arch_flush_shadow_memslot(kvm, old); /* Was released by kvm_swap_active_memslots, reacquire. */ mutex_lock(&kvm->slots_arch_lock); From f4209439b522432d140d33393d4a3f12e695527b Mon Sep 17 00:00:00 2001 From: "Maciej S. Szmigiero" Date: Mon, 6 Dec 2021 20:54:32 +0100 Subject: [PATCH 051/268] KVM: Optimize gfn lookup in kvm_zap_gfn_range() Introduce a memslots gfn upper bound operation and use it to optimize kvm_zap_gfn_range(). This way this handler can do a quick lookup for intersecting gfns and won't have to do a linear scan of the whole memslot set. Signed-off-by: Maciej S. Szmigiero Message-Id: --- arch/x86/kvm/mmu/mmu.c | 12 +++-- include/linux/kvm_host.h | 94 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 103 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index c61430994d19..b83ae4804176 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -5728,19 +5728,22 @@ static bool __kvm_zap_rmaps(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) { const struct kvm_memory_slot *memslot; struct kvm_memslots *slots; + struct kvm_memslot_iter iter; bool flush = false; gfn_t start, end; - int i, bkt; + int i; if (!kvm_memslots_have_rmaps(kvm)) return flush; for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { slots = __kvm_memslots(kvm, i); - kvm_for_each_memslot(memslot, bkt, slots) { + + kvm_for_each_memslot_in_gfn_range(&iter, slots, gfn_start, gfn_end) { + memslot = iter.slot; start = max(gfn_start, memslot->base_gfn); end = min(gfn_end, memslot->base_gfn + memslot->npages); - if (start >= end) + if (WARN_ON_ONCE(start >= end)) continue; flush = slot_handle_level_range(kvm, memslot, kvm_zap_rmapp, @@ -5761,6 +5764,9 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) bool flush; int i; + if (WARN_ON_ONCE(gfn_end <= gfn_start)) + return; + write_lock(&kvm->mmu_lock); kvm_inc_notifier_count(kvm, gfn_start, gfn_end); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 9eda8a63feae..3bc98497e796 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -846,6 +846,100 @@ struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id) return NULL; } +/* Iterator used for walking memslots that overlap a gfn range. */ +struct kvm_memslot_iter { + struct kvm_memslots *slots; + struct rb_node *node; + struct kvm_memory_slot *slot; +}; + +static inline void kvm_memslot_iter_next(struct kvm_memslot_iter *iter) +{ + iter->node = rb_next(iter->node); + if (!iter->node) + return; + + iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[iter->slots->node_idx]); +} + +static inline void kvm_memslot_iter_start(struct kvm_memslot_iter *iter, + struct kvm_memslots *slots, + gfn_t start) +{ + int idx = slots->node_idx; + struct rb_node *tmp; + struct kvm_memory_slot *slot; + + iter->slots = slots; + + /* + * Find the so called "upper bound" of a key - the first node that has + * its key strictly greater than the searched one (the start gfn in our case). + */ + iter->node = NULL; + for (tmp = slots->gfn_tree.rb_node; tmp; ) { + slot = container_of(tmp, struct kvm_memory_slot, gfn_node[idx]); + if (start < slot->base_gfn) { + iter->node = tmp; + tmp = tmp->rb_left; + } else { + tmp = tmp->rb_right; + } + } + + /* + * Find the slot with the lowest gfn that can possibly intersect with + * the range, so we'll ideally have slot start <= range start + */ + if (iter->node) { + /* + * A NULL previous node means that the very first slot + * already has a higher start gfn. + * In this case slot start > range start. + */ + tmp = rb_prev(iter->node); + if (tmp) + iter->node = tmp; + } else { + /* a NULL node below means no slots */ + iter->node = rb_last(&slots->gfn_tree); + } + + if (iter->node) { + iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[idx]); + + /* + * It is possible in the slot start < range start case that the + * found slot ends before or at range start (slot end <= range start) + * and so it does not overlap the requested range. + * + * In such non-overlapping case the next slot (if it exists) will + * already have slot start > range start, otherwise the logic above + * would have found it instead of the current slot. + */ + if (iter->slot->base_gfn + iter->slot->npages <= start) + kvm_memslot_iter_next(iter); + } +} + +static inline bool kvm_memslot_iter_is_valid(struct kvm_memslot_iter *iter, gfn_t end) +{ + if (!iter->node) + return false; + + /* + * If this slot starts beyond or at the end of the range so does + * every next one + */ + return iter->slot->base_gfn < end; +} + +/* Iterate over each memslot at least partially intersecting [start, end) range */ +#define kvm_for_each_memslot_in_gfn_range(iter, slots, start, end) \ + for (kvm_memslot_iter_start(iter, slots, start); \ + kvm_memslot_iter_is_valid(iter, end); \ + kvm_memslot_iter_next(iter)) + /* * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations: * - create a new memory slot From 44401a204734ce837e0b36c8418af4fad6a21f95 Mon Sep 17 00:00:00 2001 From: "Maciej S. Szmigiero" Date: Mon, 6 Dec 2021 20:54:33 +0100 Subject: [PATCH 052/268] KVM: Optimize overlapping memslots check Do a quick lookup for possibly overlapping gfns when creating or moving a memslot instead of performing a linear scan of the whole memslot set. Signed-off-by: Maciej S. Szmigiero [sean: tweaked params to avoid churn in future cleanup] Reviewed-by: Sean Christopherson Message-Id: --- virt/kvm/kvm_main.c | 36 ++++++++++++++++++++++-------------- 1 file changed, 22 insertions(+), 14 deletions(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 130eaf1c5711..d27568b3b984 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1815,6 +1815,19 @@ static int kvm_set_memslot(struct kvm *kvm, return 0; } +static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id, + gfn_t start, gfn_t end) +{ + struct kvm_memslot_iter iter; + + kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) { + if (iter.slot->id != id) + return true; + } + + return false; +} + /* * Allocate some memory and give it an address in the guest physical address * space. @@ -1826,8 +1839,9 @@ static int kvm_set_memslot(struct kvm *kvm, int __kvm_set_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem) { - struct kvm_memory_slot *old, *tmp; + struct kvm_memory_slot *old; struct kvm_memory_slot new; + struct kvm_memslots *slots; enum kvm_mr_change change; int as_id, id; int r; @@ -1856,11 +1870,13 @@ int __kvm_set_memory_region(struct kvm *kvm, if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) return -EINVAL; + slots = __kvm_memslots(kvm, as_id); + /* * Note, the old memslot (and the pointer itself!) may be invalidated * and/or destroyed by kvm_set_memslot(). */ - old = id_to_memslot(__kvm_memslots(kvm, as_id), id); + old = id_to_memslot(slots, id); if (!mem->memory_size) { if (!old || !old->npages) @@ -1909,18 +1925,10 @@ int __kvm_set_memory_region(struct kvm *kvm, return 0; } - if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { - int bkt; - - /* Check for overlaps */ - kvm_for_each_memslot(tmp, bkt, __kvm_memslots(kvm, as_id)) { - if (tmp->id == id) - continue; - if (!((new.base_gfn + new.npages <= tmp->base_gfn) || - (new.base_gfn >= tmp->base_gfn + tmp->npages))) - return -EEXIST; - } - } + if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) && + kvm_check_memslot_overlap(slots, id, new.base_gfn, + new.base_gfn + new.npages)) + return -EEXIST; return kvm_set_memslot(kvm, old, &new, change); } From 0f9bdef3d933ba10d577b446c703a901fa5fdc30 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 6 Dec 2021 20:54:34 +0100 Subject: [PATCH 053/268] KVM: Wait 'til the bitter end to initialize the "new" memslot Initialize the "new" memslot in the !DELETE path only after the various sanity checks have passed. This will allow a future commit to allocate @new dynamically without having to copy a memslot, and without having to deal with freeing @new in error paths and in the "nothing to change" path that's hiding in the sanity checks. No functional change intended. Signed-off-by: Sean Christopherson Reviewed-by: Maciej S. Szmigiero Signed-off-by: Maciej S. Szmigiero Message-Id: --- virt/kvm/kvm_main.c | 37 ++++++++++++++++++++----------------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index d27568b3b984..71815e75e41c 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1843,6 +1843,8 @@ int __kvm_set_memory_region(struct kvm *kvm, struct kvm_memory_slot new; struct kvm_memslots *slots; enum kvm_mr_change change; + unsigned long npages; + gfn_t base_gfn; int as_id, id; int r; @@ -1869,6 +1871,8 @@ int __kvm_set_memory_region(struct kvm *kvm, return -EINVAL; if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) return -EINVAL; + if ((mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES) + return -EINVAL; slots = __kvm_memslots(kvm, as_id); @@ -1892,15 +1896,8 @@ int __kvm_set_memory_region(struct kvm *kvm, return kvm_set_memslot(kvm, old, &new, KVM_MR_DELETE); } - new.as_id = as_id; - new.id = id; - new.base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; - new.npages = mem->memory_size >> PAGE_SHIFT; - new.flags = mem->flags; - new.userspace_addr = mem->userspace_addr; - - if (new.npages > KVM_MEM_MAX_NR_PAGES) - return -EINVAL; + base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT); + npages = (mem->memory_size >> PAGE_SHIFT); if (!old || !old->npages) { change = KVM_MR_CREATE; @@ -1909,27 +1906,33 @@ int __kvm_set_memory_region(struct kvm *kvm, * To simplify KVM internals, the total number of pages across * all memslots must fit in an unsigned long. */ - if ((kvm->nr_memslot_pages + new.npages) < kvm->nr_memslot_pages) + if ((kvm->nr_memslot_pages + npages) < kvm->nr_memslot_pages) return -EINVAL; } else { /* Modify an existing slot. */ - if ((new.userspace_addr != old->userspace_addr) || - (new.npages != old->npages) || - ((new.flags ^ old->flags) & KVM_MEM_READONLY)) + if ((mem->userspace_addr != old->userspace_addr) || + (npages != old->npages) || + ((mem->flags ^ old->flags) & KVM_MEM_READONLY)) return -EINVAL; - if (new.base_gfn != old->base_gfn) + if (base_gfn != old->base_gfn) change = KVM_MR_MOVE; - else if (new.flags != old->flags) + else if (mem->flags != old->flags) change = KVM_MR_FLAGS_ONLY; else /* Nothing to change. */ return 0; } if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) && - kvm_check_memslot_overlap(slots, id, new.base_gfn, - new.base_gfn + new.npages)) + kvm_check_memslot_overlap(slots, id, base_gfn, base_gfn + npages)) return -EEXIST; + new.as_id = as_id; + new.id = id; + new.base_gfn = base_gfn; + new.npages = npages; + new.flags = mem->flags; + new.userspace_addr = mem->userspace_addr; + return kvm_set_memslot(kvm, old, &new, change); } EXPORT_SYMBOL_GPL(__kvm_set_memory_region); From 244893fa2859d656e2caf88683211604eb9afd37 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 6 Dec 2021 20:54:35 +0100 Subject: [PATCH 054/268] KVM: Dynamically allocate "new" memslots from the get-go Allocate the "new" memslot for !DELETE memslot updates straight away instead of filling an intermediate on-stack object and forcing kvm_set_memslot() to juggle the allocation and do weird things like reuse the old memslot object in MOVE. In the MOVE case, this results in an "extra" memslot allocation due to allocating both the "new" slot and the "invalid" slot, but that's a temporary and not-huge allocation, and MOVE is a relatively rare memslot operation. Regarding MOVE, drop the open-coded management of the gfn tree with a call to kvm_replace_memslot(), which already handles the case where new->base_gfn != old->base_gfn. This is made possible by virtue of not having to copy the "new" memslot data after erasing the old memslot from the gfn tree. Using kvm_replace_memslot(), and more specifically not reusing the old memslot, means the MOVE case now does hva tree and hash list updates, but that's a small price to pay for simplifying the code and making MOVE align with all the other flavors of updates. The "extra" updates are firmly in the noise from a performance perspective, e.g. the "move (in)active area" selfttests show a (very, very) slight improvement. Signed-off-by: Sean Christopherson Reviewed-by: Maciej S. Szmigiero Signed-off-by: Maciej S. Szmigiero Message-Id: --- virt/kvm/kvm_main.c | 178 +++++++++++++++++++------------------------- 1 file changed, 77 insertions(+), 101 deletions(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 71815e75e41c..e588dc4f9b7d 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1503,23 +1503,25 @@ static int kvm_prepare_memory_region(struct kvm *kvm, * new and KVM isn't using a ring buffer, allocate and initialize a * new bitmap. */ - if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES)) - new->dirty_bitmap = NULL; - else if (old->dirty_bitmap) - new->dirty_bitmap = old->dirty_bitmap; - else if (!kvm->dirty_ring_size) { - r = kvm_alloc_dirty_bitmap(new); - if (r) - return r; + if (change != KVM_MR_DELETE) { + if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES)) + new->dirty_bitmap = NULL; + else if (old && old->dirty_bitmap) + new->dirty_bitmap = old->dirty_bitmap; + else if (!kvm->dirty_ring_size) { + r = kvm_alloc_dirty_bitmap(new); + if (r) + return r; - if (kvm_dirty_log_manual_protect_and_init_set(kvm)) - bitmap_set(new->dirty_bitmap, 0, new->npages); + if (kvm_dirty_log_manual_protect_and_init_set(kvm)) + bitmap_set(new->dirty_bitmap, 0, new->npages); + } } r = kvm_arch_prepare_memory_region(kvm, old, new, change); /* Free the bitmap on failure if it was allocated above. */ - if (r && new->dirty_bitmap && !old->dirty_bitmap) + if (r && new && new->dirty_bitmap && old && !old->dirty_bitmap) kvm_destroy_dirty_bitmap(new); return r; @@ -1606,16 +1608,16 @@ static void kvm_copy_memslot(struct kvm_memory_slot *dest, static void kvm_invalidate_memslot(struct kvm *kvm, struct kvm_memory_slot *old, - struct kvm_memory_slot *working_slot) + struct kvm_memory_slot *invalid_slot) { /* * Mark the current slot INVALID. As with all memslot modifications, * this must be done on an unreachable slot to avoid modifying the * current slot in the active tree. */ - kvm_copy_memslot(working_slot, old); - working_slot->flags |= KVM_MEMSLOT_INVALID; - kvm_replace_memslot(kvm, old, working_slot); + kvm_copy_memslot(invalid_slot, old); + invalid_slot->flags |= KVM_MEMSLOT_INVALID; + kvm_replace_memslot(kvm, old, invalid_slot); /* * Activate the slot that is now marked INVALID, but don't propagate @@ -1642,20 +1644,15 @@ static void kvm_invalidate_memslot(struct kvm *kvm, * above. Writers are required to retrieve memslots *after* acquiring * slots_arch_lock, thus the active slot's data is guaranteed to be fresh. */ - old->arch = working_slot->arch; + old->arch = invalid_slot->arch; } static void kvm_create_memslot(struct kvm *kvm, - const struct kvm_memory_slot *new, - struct kvm_memory_slot *working) + struct kvm_memory_slot *new) { - /* - * Add the new memslot to the inactive set as a copy of the - * new memslot data provided by userspace. - */ - kvm_copy_memslot(working, new); - kvm_replace_memslot(kvm, NULL, working); - kvm_activate_memslot(kvm, NULL, working); + /* Add the new memslot to the inactive set and activate. */ + kvm_replace_memslot(kvm, NULL, new); + kvm_activate_memslot(kvm, NULL, new); } static void kvm_delete_memslot(struct kvm *kvm, @@ -1664,65 +1661,36 @@ static void kvm_delete_memslot(struct kvm *kvm, { /* * Remove the old memslot (in the inactive memslots) by passing NULL as - * the "new" slot. + * the "new" slot, and for the invalid version in the active slots. */ kvm_replace_memslot(kvm, old, NULL); - - /* And do the same for the invalid version in the active slot. */ kvm_activate_memslot(kvm, invalid_slot, NULL); - - /* Free the invalid slot, the caller will clean up the old slot. */ - kfree(invalid_slot); } -static struct kvm_memory_slot *kvm_move_memslot(struct kvm *kvm, - struct kvm_memory_slot *old, - const struct kvm_memory_slot *new, - struct kvm_memory_slot *invalid_slot) +static void kvm_move_memslot(struct kvm *kvm, + struct kvm_memory_slot *old, + struct kvm_memory_slot *new, + struct kvm_memory_slot *invalid_slot) { - struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, old->as_id); - /* - * The memslot's gfn is changing, remove it from the inactive tree, it - * will be re-added with its updated gfn. Because its range is - * changing, an in-place replace is not possible. + * Replace the old memslot in the inactive slots, and then swap slots + * and replace the current INVALID with the new as well. */ - kvm_erase_gfn_node(slots, old); - - /* - * The old slot is now fully disconnected, reuse its memory for the - * persistent copy of "new". - */ - kvm_copy_memslot(old, new); - - /* Re-add to the gfn tree with the updated gfn */ - kvm_insert_gfn_node(slots, old); - - /* Replace the current INVALID slot with the updated memslot. */ - kvm_activate_memslot(kvm, invalid_slot, old); - - /* - * Clear the INVALID flag so that the invalid_slot is now a perfect - * copy of the old slot. Return it for cleanup in the caller. - */ - WARN_ON_ONCE(!(invalid_slot->flags & KVM_MEMSLOT_INVALID)); - invalid_slot->flags &= ~KVM_MEMSLOT_INVALID; - return invalid_slot; + kvm_replace_memslot(kvm, old, new); + kvm_activate_memslot(kvm, invalid_slot, new); } static void kvm_update_flags_memslot(struct kvm *kvm, struct kvm_memory_slot *old, - const struct kvm_memory_slot *new, - struct kvm_memory_slot *working_slot) + struct kvm_memory_slot *new) { /* * Similar to the MOVE case, but the slot doesn't need to be zapped as * an intermediate step. Instead, the old memslot is simply replaced * with a new, updated copy in both memslot sets. */ - kvm_copy_memslot(working_slot, new); - kvm_replace_memslot(kvm, old, working_slot); - kvm_activate_memslot(kvm, old, working_slot); + kvm_replace_memslot(kvm, old, new); + kvm_activate_memslot(kvm, old, new); } static int kvm_set_memslot(struct kvm *kvm, @@ -1730,19 +1698,9 @@ static int kvm_set_memslot(struct kvm *kvm, struct kvm_memory_slot *new, enum kvm_mr_change change) { - struct kvm_memory_slot *working; + struct kvm_memory_slot *invalid_slot; int r; - /* - * Modifications are done on an unreachable slot. Any changes are then - * (eventually) propagated to both the active and inactive slots. This - * allocation would ideally be on-demand (in helpers), but is done here - * to avoid having to handle failure after kvm_prepare_memory_region(). - */ - working = kzalloc(sizeof(*working), GFP_KERNEL_ACCOUNT); - if (!working) - return -ENOMEM; - /* * Released in kvm_swap_active_memslots. * @@ -1767,9 +1725,19 @@ static int kvm_set_memslot(struct kvm *kvm, * (and without a lock), a window would exist between effecting the * delete/move and committing the changes in arch code where KVM or a * guest could access a non-existent memslot. + * + * Modifications are done on a temporary, unreachable slot. The old + * slot needs to be preserved in case a later step fails and the + * invalidation needs to be reverted. */ - if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) - kvm_invalidate_memslot(kvm, old, working); + if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { + invalid_slot = kzalloc(sizeof(*invalid_slot), GFP_KERNEL_ACCOUNT); + if (!invalid_slot) { + mutex_unlock(&kvm->slots_arch_lock); + return -ENOMEM; + } + kvm_invalidate_memslot(kvm, old, invalid_slot); + } r = kvm_prepare_memory_region(kvm, old, new, change); if (r) { @@ -1779,11 +1747,12 @@ static int kvm_set_memslot(struct kvm *kvm, * in the inactive slots. Changing the active memslots also * release slots_arch_lock. */ - if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) - kvm_activate_memslot(kvm, working, old); - else + if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { + kvm_activate_memslot(kvm, invalid_slot, old); + kfree(invalid_slot); + } else { mutex_unlock(&kvm->slots_arch_lock); - kfree(working); + } return r; } @@ -1795,16 +1764,20 @@ static int kvm_set_memslot(struct kvm *kvm, * old slot is detached but otherwise preserved. */ if (change == KVM_MR_CREATE) - kvm_create_memslot(kvm, new, working); + kvm_create_memslot(kvm, new); else if (change == KVM_MR_DELETE) - kvm_delete_memslot(kvm, old, working); + kvm_delete_memslot(kvm, old, invalid_slot); else if (change == KVM_MR_MOVE) - old = kvm_move_memslot(kvm, old, new, working); + kvm_move_memslot(kvm, old, new, invalid_slot); else if (change == KVM_MR_FLAGS_ONLY) - kvm_update_flags_memslot(kvm, old, new, working); + kvm_update_flags_memslot(kvm, old, new); else BUG(); + /* Free the temporary INVALID slot used for DELETE and MOVE. */ + if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) + kfree(invalid_slot); + /* * No need to refresh new->arch, changes after dropping slots_arch_lock * will directly hit the final, active memsot. Architectures are @@ -1839,8 +1812,7 @@ static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id, int __kvm_set_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem) { - struct kvm_memory_slot *old; - struct kvm_memory_slot new; + struct kvm_memory_slot *old, *new; struct kvm_memslots *slots; enum kvm_mr_change change; unsigned long npages; @@ -1889,11 +1861,7 @@ int __kvm_set_memory_region(struct kvm *kvm, if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages)) return -EIO; - memset(&new, 0, sizeof(new)); - new.id = id; - new.as_id = as_id; - - return kvm_set_memslot(kvm, old, &new, KVM_MR_DELETE); + return kvm_set_memslot(kvm, old, NULL, KVM_MR_DELETE); } base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT); @@ -1926,14 +1894,22 @@ int __kvm_set_memory_region(struct kvm *kvm, kvm_check_memslot_overlap(slots, id, base_gfn, base_gfn + npages)) return -EEXIST; - new.as_id = as_id; - new.id = id; - new.base_gfn = base_gfn; - new.npages = npages; - new.flags = mem->flags; - new.userspace_addr = mem->userspace_addr; + /* Allocate a slot that will persist in the memslot. */ + new = kzalloc(sizeof(*new), GFP_KERNEL_ACCOUNT); + if (!new) + return -ENOMEM; - return kvm_set_memslot(kvm, old, &new, change); + new->as_id = as_id; + new->id = id; + new->base_gfn = base_gfn; + new->npages = npages; + new->flags = mem->flags; + new->userspace_addr = mem->userspace_addr; + + r = kvm_set_memslot(kvm, old, new, change); + if (r) + kfree(new); + return r; } EXPORT_SYMBOL_GPL(__kvm_set_memory_region); From 907afa48e9d0f24713a34135428d981e4239a3be Mon Sep 17 00:00:00 2001 From: Emanuele Giuseppe Esposito Date: Wed, 3 Nov 2021 10:05:21 -0400 Subject: [PATCH 055/268] KVM: nSVM: move nested_vmcb_check_cr3_cr4 logic in nested_vmcb_valid_sregs Inline nested_vmcb_check_cr3_cr4 as it is not called by anyone else. Doing so simplifies next patches. Signed-off-by: Emanuele Giuseppe Esposito Reviewed-by: Maxim Levitsky Message-Id: <20211103140527.752797-2-eesposit@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/nested.c | 35 +++++++++++++---------------------- 1 file changed, 13 insertions(+), 22 deletions(-) diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index f8b7bc04b3e7..946c06a25d37 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -275,27 +275,6 @@ static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu, return true; } -static bool nested_vmcb_check_cr3_cr4(struct kvm_vcpu *vcpu, - struct vmcb_save_area *save) -{ - /* - * These checks are also performed by KVM_SET_SREGS, - * except that EFER.LMA is not checked by SVM against - * CR0.PG && EFER.LME. - */ - if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) { - if (CC(!(save->cr4 & X86_CR4_PAE)) || - CC(!(save->cr0 & X86_CR0_PE)) || - CC(kvm_vcpu_is_illegal_gpa(vcpu, save->cr3))) - return false; - } - - if (CC(!kvm_is_valid_cr4(vcpu, save->cr4))) - return false; - - return true; -} - /* Common checks that apply to both L1 and L2 state. */ static bool nested_vmcb_valid_sregs(struct kvm_vcpu *vcpu, struct vmcb_save_area *save) @@ -317,7 +296,19 @@ static bool nested_vmcb_valid_sregs(struct kvm_vcpu *vcpu, if (CC(!kvm_dr6_valid(save->dr6)) || CC(!kvm_dr7_valid(save->dr7))) return false; - if (!nested_vmcb_check_cr3_cr4(vcpu, save)) + /* + * These checks are also performed by KVM_SET_SREGS, + * except that EFER.LMA is not checked by SVM against + * CR0.PG && EFER.LME. + */ + if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) { + if (CC(!(save->cr4 & X86_CR4_PAE)) || + CC(!(save->cr0 & X86_CR0_PE)) || + CC(kvm_vcpu_is_illegal_gpa(vcpu, save->cr3))) + return false; + } + + if (CC(!kvm_is_valid_cr4(vcpu, save->cr4))) return false; if (CC(!kvm_valid_efer(vcpu, save->efer))) From f2740a8d851a57068c9f3624c6dc8edcf91754b2 Mon Sep 17 00:00:00 2001 From: Emanuele Giuseppe Esposito Date: Wed, 3 Nov 2021 10:05:22 -0400 Subject: [PATCH 056/268] KVM: nSVM: introduce svm->nested.save to cache save area before checks This is useful in the next patch, to keep a saved copy of vmcb12 registers and pass it around more easily. Instead of blindly copying everything, we just copy EFER, CR0, CR3, CR4, DR6 and DR7 which are needed by the VMRUN checks. If more fields will need to be checked, it will be quite obvious to see that they must be added in struct vmcb_save_area_cached and in nested_copy_vmcb_save_to_cache(). __nested_copy_vmcb_save_to_cache() takes a vmcb_save_area_cached parameter, which is useful in order to save the state to a local variable. Signed-off-by: Emanuele Giuseppe Esposito Message-Id: <20211103140527.752797-3-eesposit@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/nested.c | 23 +++++++++++++++++++++++ arch/x86/kvm/svm/svm.c | 1 + arch/x86/kvm/svm/svm.h | 17 +++++++++++++++++ 3 files changed, 41 insertions(+) diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 946c06a25d37..ceafe40ec0f9 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -328,6 +328,28 @@ void nested_load_control_from_vmcb12(struct vcpu_svm *svm, svm->nested.ctl.iopm_base_pa &= ~0x0fffULL; } +static void __nested_copy_vmcb_save_to_cache(struct vmcb_save_area_cached *to, + struct vmcb_save_area *from) +{ + /* + * Copy only fields that are validated, as we need them + * to avoid TOC/TOU races. + */ + to->efer = from->efer; + to->cr0 = from->cr0; + to->cr3 = from->cr3; + to->cr4 = from->cr4; + + to->dr6 = from->dr6; + to->dr7 = from->dr7; +} + +void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm, + struct vmcb_save_area *save) +{ + __nested_copy_vmcb_save_to_cache(&svm->nested.save, save); +} + /* * Synchronize fields that are written by the processor, so that * they can be copied back into the vmcb12. @@ -670,6 +692,7 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu) return -EINVAL; nested_load_control_from_vmcb12(svm, &vmcb12->control); + nested_copy_vmcb_save_to_cache(svm, &vmcb12->save); if (!nested_vmcb_valid_sregs(vcpu, &vmcb12->save) || !nested_vmcb_check_controls(vcpu, &svm->nested.ctl)) { diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index d0f68d11ec70..20d19162a7f8 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4435,6 +4435,7 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) vmcb12 = map.hva; nested_load_control_from_vmcb12(svm, &vmcb12->control); + nested_copy_vmcb_save_to_cache(svm, &vmcb12->save); ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false); unmap_save: diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 1c7306c370fa..4c7365582a5e 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -105,6 +105,15 @@ struct kvm_vmcb_info { uint64_t asid_generation; }; +struct vmcb_save_area_cached { + u64 efer; + u64 cr4; + u64 cr3; + u64 cr0; + u64 dr7; + u64 dr6; +}; + struct svm_nested_state { struct kvm_vmcb_info vmcb02; u64 hsave_msr; @@ -122,6 +131,12 @@ struct svm_nested_state { /* cache for control fields of the guest */ struct vmcb_control_area ctl; + /* + * Note: this struct is not kept up-to-date while L2 runs; it is only + * valid within nested_svm_vmrun. + */ + struct vmcb_save_area_cached save; + bool initialized; }; @@ -496,6 +511,8 @@ void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu); void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier); void nested_load_control_from_vmcb12(struct vcpu_svm *svm, struct vmcb_control_area *control); +void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm, + struct vmcb_save_area *save); void nested_sync_control_from_vmcb02(struct vcpu_svm *svm); void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm); void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb); From 7907160dbf1a1063b19201c79566069ec0da054c Mon Sep 17 00:00:00 2001 From: Emanuele Giuseppe Esposito Date: Wed, 3 Nov 2021 10:05:23 -0400 Subject: [PATCH 057/268] KVM: nSVM: rename nested_load_control_from_vmcb12 in nested_copy_vmcb_control_to_cache Following the same naming convention of the previous patch, rename nested_load_control_from_vmcb12. In addition, inline copy_vmcb_control_area as it is only called by this function. __nested_copy_vmcb_control_to_cache() works with vmcb_control_area parameters and it will be useful in next patches, when we use local variables instead of svm cached state. Signed-off-by: Emanuele Giuseppe Esposito Message-Id: <20211103140527.752797-4-eesposit@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/nested.c | 80 +++++++++++++++++++-------------------- arch/x86/kvm/svm/svm.c | 2 +- arch/x86/kvm/svm/svm.h | 4 +- 3 files changed, 43 insertions(+), 43 deletions(-) diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index ceafe40ec0f9..d6c7030e4ac0 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -163,37 +163,6 @@ void recalc_intercepts(struct vcpu_svm *svm) vmcb_set_intercept(c, INTERCEPT_VMSAVE); } -static void copy_vmcb_control_area(struct vmcb_control_area *dst, - struct vmcb_control_area *from) -{ - unsigned int i; - - for (i = 0; i < MAX_INTERCEPT; i++) - dst->intercepts[i] = from->intercepts[i]; - - dst->iopm_base_pa = from->iopm_base_pa; - dst->msrpm_base_pa = from->msrpm_base_pa; - dst->tsc_offset = from->tsc_offset; - /* asid not copied, it is handled manually for svm->vmcb. */ - dst->tlb_ctl = from->tlb_ctl; - dst->int_ctl = from->int_ctl; - dst->int_vector = from->int_vector; - dst->int_state = from->int_state; - dst->exit_code = from->exit_code; - dst->exit_code_hi = from->exit_code_hi; - dst->exit_info_1 = from->exit_info_1; - dst->exit_info_2 = from->exit_info_2; - dst->exit_int_info = from->exit_int_info; - dst->exit_int_info_err = from->exit_int_info_err; - dst->nested_ctl = from->nested_ctl; - dst->event_inj = from->event_inj; - dst->event_inj_err = from->event_inj_err; - dst->nested_cr3 = from->nested_cr3; - dst->virt_ext = from->virt_ext; - dst->pause_filter_count = from->pause_filter_count; - dst->pause_filter_thresh = from->pause_filter_thresh; -} - static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) { /* @@ -317,15 +286,46 @@ static bool nested_vmcb_valid_sregs(struct kvm_vcpu *vcpu, return true; } -void nested_load_control_from_vmcb12(struct vcpu_svm *svm, - struct vmcb_control_area *control) +static +void __nested_copy_vmcb_control_to_cache(struct vmcb_control_area *to, + struct vmcb_control_area *from) { - copy_vmcb_control_area(&svm->nested.ctl, control); + unsigned int i; - /* Copy it here because nested_svm_check_controls will check it. */ - svm->nested.ctl.asid = control->asid; - svm->nested.ctl.msrpm_base_pa &= ~0x0fffULL; - svm->nested.ctl.iopm_base_pa &= ~0x0fffULL; + for (i = 0; i < MAX_INTERCEPT; i++) + to->intercepts[i] = from->intercepts[i]; + + to->iopm_base_pa = from->iopm_base_pa; + to->msrpm_base_pa = from->msrpm_base_pa; + to->tsc_offset = from->tsc_offset; + to->tlb_ctl = from->tlb_ctl; + to->int_ctl = from->int_ctl; + to->int_vector = from->int_vector; + to->int_state = from->int_state; + to->exit_code = from->exit_code; + to->exit_code_hi = from->exit_code_hi; + to->exit_info_1 = from->exit_info_1; + to->exit_info_2 = from->exit_info_2; + to->exit_int_info = from->exit_int_info; + to->exit_int_info_err = from->exit_int_info_err; + to->nested_ctl = from->nested_ctl; + to->event_inj = from->event_inj; + to->event_inj_err = from->event_inj_err; + to->nested_cr3 = from->nested_cr3; + to->virt_ext = from->virt_ext; + to->pause_filter_count = from->pause_filter_count; + to->pause_filter_thresh = from->pause_filter_thresh; + + /* Copy asid here because nested_vmcb_check_controls will check it. */ + to->asid = from->asid; + to->msrpm_base_pa &= ~0x0fffULL; + to->iopm_base_pa &= ~0x0fffULL; +} + +void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm, + struct vmcb_control_area *control) +{ + __nested_copy_vmcb_control_to_cache(&svm->nested.ctl, control); } static void __nested_copy_vmcb_save_to_cache(struct vmcb_save_area_cached *to, @@ -691,7 +691,7 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu) if (WARN_ON_ONCE(!svm->nested.initialized)) return -EINVAL; - nested_load_control_from_vmcb12(svm, &vmcb12->control); + nested_copy_vmcb_control_to_cache(svm, &vmcb12->control); nested_copy_vmcb_save_to_cache(svm, &vmcb12->save); if (!nested_vmcb_valid_sregs(vcpu, &vmcb12->save) || @@ -1436,7 +1436,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa; svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save); - nested_load_control_from_vmcb12(svm, ctl); + nested_copy_vmcb_control_to_cache(svm, ctl); svm_switch_vmcb(svm, &svm->nested.vmcb02); nested_vmcb02_prepare_control(svm); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 20d19162a7f8..745351a2293c 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4434,7 +4434,7 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) */ vmcb12 = map.hva; - nested_load_control_from_vmcb12(svm, &vmcb12->control); + nested_copy_vmcb_control_to_cache(svm, &vmcb12->control); nested_copy_vmcb_save_to_cache(svm, &vmcb12->save); ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false); diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 4c7365582a5e..3f44ec79597b 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -509,8 +509,8 @@ int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, int nested_svm_exit_special(struct vcpu_svm *svm); void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu); void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier); -void nested_load_control_from_vmcb12(struct vcpu_svm *svm, - struct vmcb_control_area *control); +void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm, + struct vmcb_control_area *control); void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm, struct vmcb_save_area *save); void nested_sync_control_from_vmcb02(struct vcpu_svm *svm); From b7a3d8b6f433d293e9033b9d63651b6d17bf5654 Mon Sep 17 00:00:00 2001 From: Emanuele Giuseppe Esposito Date: Wed, 3 Nov 2021 10:05:24 -0400 Subject: [PATCH 058/268] KVM: nSVM: use vmcb_save_area_cached in nested_vmcb_valid_sregs() Now that struct vmcb_save_area_cached contains the required vmcb fields values (done in nested_load_save_from_vmcb12()), check them to see if they are correct in nested_vmcb_valid_sregs(). While at it, rename nested_vmcb_valid_sregs in nested_vmcb_check_save. __nested_vmcb_check_save takes the additional @save parameter, so it is helpful when we want to check a non-svm save state, like in svm_set_nested_state. The reason for that is that save is the L1 state, not L2, so we check it without moving it to svm->nested.save. Signed-off-by: Emanuele Giuseppe Esposito Message-Id: <20211103140527.752797-5-eesposit@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/nested.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index d6c7030e4ac0..545d0ad19de4 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -245,8 +245,8 @@ static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu, } /* Common checks that apply to both L1 and L2 state. */ -static bool nested_vmcb_valid_sregs(struct kvm_vcpu *vcpu, - struct vmcb_save_area *save) +static bool __nested_vmcb_check_save(struct kvm_vcpu *vcpu, + struct vmcb_save_area_cached *save) { /* * FIXME: these should be done after copying the fields, @@ -286,6 +286,14 @@ static bool nested_vmcb_valid_sregs(struct kvm_vcpu *vcpu, return true; } +static bool nested_vmcb_check_save(struct kvm_vcpu *vcpu) +{ + struct vcpu_svm *svm = to_svm(vcpu); + struct vmcb_save_area_cached *save = &svm->nested.save; + + return __nested_vmcb_check_save(vcpu, save); +} + static void __nested_copy_vmcb_control_to_cache(struct vmcb_control_area *to, struct vmcb_control_area *from) @@ -694,7 +702,7 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu) nested_copy_vmcb_control_to_cache(svm, &vmcb12->control); nested_copy_vmcb_save_to_cache(svm, &vmcb12->save); - if (!nested_vmcb_valid_sregs(vcpu, &vmcb12->save) || + if (!nested_vmcb_check_save(vcpu) || !nested_vmcb_check_controls(vcpu, &svm->nested.ctl)) { vmcb12->control.exit_code = SVM_EXIT_ERR; vmcb12->control.exit_code_hi = 0; @@ -1330,6 +1338,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, &user_kvm_nested_state->data.svm[0]; struct vmcb_control_area *ctl; struct vmcb_save_area *save; + struct vmcb_save_area_cached save_cached; unsigned long cr0; int ret; @@ -1397,10 +1406,11 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, * Validate host state saved from before VMRUN (see * nested_svm_check_permissions). */ + __nested_copy_vmcb_save_to_cache(&save_cached, save); if (!(save->cr0 & X86_CR0_PG) || !(save->cr0 & X86_CR0_PE) || (save->rflags & X86_EFLAGS_VM) || - !nested_vmcb_valid_sregs(vcpu, save)) + !__nested_vmcb_check_save(vcpu, &save_cached)) goto out_free; /* From 355d0473b1a11d7cf526fbd43c3908224e08a909 Mon Sep 17 00:00:00 2001 From: Emanuele Giuseppe Esposito Date: Wed, 3 Nov 2021 10:05:25 -0400 Subject: [PATCH 059/268] KVM: nSVM: use svm->nested.save to load vmcb12 registers and avoid TOC/TOU races Use the already checked svm->nested.save cached fields (EFER, CR0, CR4, ...) instead of vmcb12's in nested_vmcb02_prepare_save(). This prevents from creating TOC/TOU races, since the guest could modify the vmcb12 fields. This also avoids the need of force-setting EFER_SVME in nested_vmcb02_prepare_save. Signed-off-by: Emanuele Giuseppe Esposito Reviewed-by: Maxim Levitsky Message-Id: <20211103140527.752797-6-eesposit@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/nested.c | 24 ++++++------------------ 1 file changed, 6 insertions(+), 18 deletions(-) diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 545d0ad19de4..aad09d560ec0 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -248,13 +248,6 @@ static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu, static bool __nested_vmcb_check_save(struct kvm_vcpu *vcpu, struct vmcb_save_area_cached *save) { - /* - * FIXME: these should be done after copying the fields, - * to avoid TOC/TOU races. For these save area checks - * the possible damage is limited since kvm_set_cr0 and - * kvm_set_cr4 handle failure; EFER_SVME is an exception - * so it is force-set later in nested_prepare_vmcb_save. - */ if (CC(!(save->efer & EFER_SVME))) return false; @@ -511,15 +504,10 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12 kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED); - /* - * Force-set EFER_SVME even though it is checked earlier on the - * VMCB12, because the guest can flip the bit between the check - * and now. Clearing EFER_SVME would call svm_free_nested. - */ - svm_set_efer(&svm->vcpu, vmcb12->save.efer | EFER_SVME); + svm_set_efer(&svm->vcpu, svm->nested.save.efer); - svm_set_cr0(&svm->vcpu, vmcb12->save.cr0); - svm_set_cr4(&svm->vcpu, vmcb12->save.cr4); + svm_set_cr0(&svm->vcpu, svm->nested.save.cr0); + svm_set_cr4(&svm->vcpu, svm->nested.save.cr4); svm->vcpu.arch.cr2 = vmcb12->save.cr2; @@ -534,8 +522,8 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12 /* These bits will be set properly on the first execution when new_vmc12 is true */ if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DR))) { - svm->vmcb->save.dr7 = vmcb12->save.dr7 | DR7_FIXED_1; - svm->vcpu.arch.dr6 = vmcb12->save.dr6 | DR6_ACTIVE_LOW; + svm->vmcb->save.dr7 = svm->nested.save.dr7 | DR7_FIXED_1; + svm->vcpu.arch.dr6 = svm->nested.save.dr6 | DR6_ACTIVE_LOW; vmcb_mark_dirty(svm->vmcb, VMCB_DR); } } @@ -649,7 +637,7 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa, nested_vmcb02_prepare_control(svm); nested_vmcb02_prepare_save(svm, vmcb12); - ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3, + ret = nested_svm_load_cr3(&svm->vcpu, svm->nested.save.cr3, nested_npt_enabled(svm), from_vmrun); if (ret) return ret; From bd95926c2b2b9b66013a36b6558aa426147ed11f Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Thu, 11 Nov 2021 09:14:08 -0500 Subject: [PATCH 060/268] KVM: nSVM: split out __nested_vmcb_check_controls Remove the struct vmcb_control_area parameter from nested_vmcb_check_controls, for consistency with the functions that operate on the save area. This way, VMRUN uses the version without underscores for both areas, while KVM_SET_NESTED_STATE uses the version with underscores. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/nested.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index aad09d560ec0..565d9d401f43 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -219,8 +219,8 @@ static bool nested_svm_check_tlb_ctl(struct kvm_vcpu *vcpu, u8 tlb_ctl) } } -static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu, - struct vmcb_control_area *control) +static bool __nested_vmcb_check_controls(struct kvm_vcpu *vcpu, + struct vmcb_control_area *control) { if (CC(!vmcb_is_intercept(control, INTERCEPT_VMRUN))) return false; @@ -287,6 +287,14 @@ static bool nested_vmcb_check_save(struct kvm_vcpu *vcpu) return __nested_vmcb_check_save(vcpu, save); } +static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu) +{ + struct vcpu_svm *svm = to_svm(vcpu); + struct vmcb_control_area *ctl = &svm->nested.ctl; + + return __nested_vmcb_check_controls(vcpu, ctl); +} + static void __nested_copy_vmcb_control_to_cache(struct vmcb_control_area *to, struct vmcb_control_area *from) @@ -691,7 +699,7 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu) nested_copy_vmcb_save_to_cache(svm, &vmcb12->save); if (!nested_vmcb_check_save(vcpu) || - !nested_vmcb_check_controls(vcpu, &svm->nested.ctl)) { + !nested_vmcb_check_controls(vcpu)) { vmcb12->control.exit_code = SVM_EXIT_ERR; vmcb12->control.exit_code_hi = 0; vmcb12->control.exit_info_1 = 0; @@ -1379,7 +1387,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, goto out_free; ret = -EINVAL; - if (!nested_vmcb_check_controls(vcpu, ctl)) + if (!__nested_vmcb_check_controls(vcpu, ctl)) goto out_free; /* From 8fc78909c05d1691c0d087cb1b9a4858762c747d Mon Sep 17 00:00:00 2001 From: Emanuele Giuseppe Esposito Date: Wed, 3 Nov 2021 10:05:26 -0400 Subject: [PATCH 061/268] KVM: nSVM: introduce struct vmcb_ctrl_area_cached This structure will replace vmcb_control_area in svm_nested_state, providing only the fields that are actually used by the nested state. This avoids having and copying around uninitialized fields. The cost of this, however, is that all functions (in this case vmcb_is_intercept) expect the old structure, so they need to be duplicated. In addition, in svm_get_nested_state() user space expects a vmcb_control_area struct, so we need to copy back all fields in a temporary structure before copying it to userspace. Signed-off-by: Emanuele Giuseppe Esposito Reviewed-by: Maxim Levitsky Message-Id: <20211103140527.752797-7-eesposit@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/nested.c | 83 +++++++++++++++++++++++++++++++-------- arch/x86/kvm/svm/svm.c | 4 +- arch/x86/kvm/svm/svm.h | 39 ++++++++++++++++-- 3 files changed, 103 insertions(+), 23 deletions(-) diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 565d9d401f43..598843cfe6c4 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -58,8 +58,9 @@ static void svm_inject_page_fault_nested(struct kvm_vcpu *vcpu, struct x86_excep struct vcpu_svm *svm = to_svm(vcpu); WARN_ON(!is_guest_mode(vcpu)); - if (vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_EXCEPTION_OFFSET + PF_VECTOR) && - !svm->nested.nested_run_pending) { + if (vmcb12_is_intercept(&svm->nested.ctl, + INTERCEPT_EXCEPTION_OFFSET + PF_VECTOR) && + !svm->nested.nested_run_pending) { svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR; svm->vmcb->control.exit_code_hi = 0; svm->vmcb->control.exit_info_1 = fault->error_code; @@ -121,7 +122,8 @@ static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu) void recalc_intercepts(struct vcpu_svm *svm) { - struct vmcb_control_area *c, *h, *g; + struct vmcb_control_area *c, *h; + struct vmcb_ctrl_area_cached *g; unsigned int i; vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); @@ -172,7 +174,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) */ int i; - if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT))) + if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT))) return true; for (i = 0; i < MSRPM_OFFSETS; i++) { @@ -220,9 +222,9 @@ static bool nested_svm_check_tlb_ctl(struct kvm_vcpu *vcpu, u8 tlb_ctl) } static bool __nested_vmcb_check_controls(struct kvm_vcpu *vcpu, - struct vmcb_control_area *control) + struct vmcb_ctrl_area_cached *control) { - if (CC(!vmcb_is_intercept(control, INTERCEPT_VMRUN))) + if (CC(!vmcb12_is_intercept(control, INTERCEPT_VMRUN))) return false; if (CC(control->asid == 0)) @@ -290,13 +292,13 @@ static bool nested_vmcb_check_save(struct kvm_vcpu *vcpu) static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); - struct vmcb_control_area *ctl = &svm->nested.ctl; + struct vmcb_ctrl_area_cached *ctl = &svm->nested.ctl; return __nested_vmcb_check_controls(vcpu, ctl); } static -void __nested_copy_vmcb_control_to_cache(struct vmcb_control_area *to, +void __nested_copy_vmcb_control_to_cache(struct vmcb_ctrl_area_cached *to, struct vmcb_control_area *from) { unsigned int i; @@ -1006,7 +1008,7 @@ static int nested_svm_exit_handled_msr(struct vcpu_svm *svm) u32 offset, msr, value; int write, mask; - if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT))) + if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT))) return NESTED_EXIT_HOST; msr = svm->vcpu.arch.regs[VCPU_REGS_RCX]; @@ -1033,7 +1035,7 @@ static int nested_svm_intercept_ioio(struct vcpu_svm *svm) u8 start_bit; u64 gpa; - if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT))) + if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT))) return NESTED_EXIT_HOST; port = svm->vmcb->control.exit_info_1 >> 16; @@ -1064,12 +1066,12 @@ static int nested_svm_intercept(struct vcpu_svm *svm) vmexit = nested_svm_intercept_ioio(svm); break; case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: { - if (vmcb_is_intercept(&svm->nested.ctl, exit_code)) + if (vmcb12_is_intercept(&svm->nested.ctl, exit_code)) vmexit = NESTED_EXIT_DONE; break; } case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: { - if (vmcb_is_intercept(&svm->nested.ctl, exit_code)) + if (vmcb12_is_intercept(&svm->nested.ctl, exit_code)) vmexit = NESTED_EXIT_DONE; break; } @@ -1087,7 +1089,7 @@ static int nested_svm_intercept(struct vcpu_svm *svm) break; } default: { - if (vmcb_is_intercept(&svm->nested.ctl, exit_code)) + if (vmcb12_is_intercept(&svm->nested.ctl, exit_code)) vmexit = NESTED_EXIT_DONE; } } @@ -1165,7 +1167,7 @@ static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm) static inline bool nested_exit_on_init(struct vcpu_svm *svm) { - return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INIT); + return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INIT); } static int svm_check_nested_events(struct kvm_vcpu *vcpu) @@ -1269,11 +1271,47 @@ void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu) svm_write_tsc_multiplier(vcpu, vcpu->arch.tsc_scaling_ratio); } +/* Inverse operation of nested_copy_vmcb_control_to_cache(). asid is copied too. */ +static void nested_copy_vmcb_cache_to_control(struct vmcb_control_area *dst, + struct vmcb_ctrl_area_cached *from) +{ + unsigned int i; + + memset(dst, 0, sizeof(struct vmcb_control_area)); + + for (i = 0; i < MAX_INTERCEPT; i++) + dst->intercepts[i] = from->intercepts[i]; + + dst->iopm_base_pa = from->iopm_base_pa; + dst->msrpm_base_pa = from->msrpm_base_pa; + dst->tsc_offset = from->tsc_offset; + dst->asid = from->asid; + dst->tlb_ctl = from->tlb_ctl; + dst->int_ctl = from->int_ctl; + dst->int_vector = from->int_vector; + dst->int_state = from->int_state; + dst->exit_code = from->exit_code; + dst->exit_code_hi = from->exit_code_hi; + dst->exit_info_1 = from->exit_info_1; + dst->exit_info_2 = from->exit_info_2; + dst->exit_int_info = from->exit_int_info; + dst->exit_int_info_err = from->exit_int_info_err; + dst->nested_ctl = from->nested_ctl; + dst->event_inj = from->event_inj; + dst->event_inj_err = from->event_inj_err; + dst->nested_cr3 = from->nested_cr3; + dst->virt_ext = from->virt_ext; + dst->pause_filter_count = from->pause_filter_count; + dst->pause_filter_thresh = from->pause_filter_thresh; +} + static int svm_get_nested_state(struct kvm_vcpu *vcpu, struct kvm_nested_state __user *user_kvm_nested_state, u32 user_data_size) { struct vcpu_svm *svm; + struct vmcb_control_area *ctl; + unsigned long r; struct kvm_nested_state kvm_state = { .flags = 0, .format = KVM_STATE_NESTED_FORMAT_SVM, @@ -1315,9 +1353,18 @@ static int svm_get_nested_state(struct kvm_vcpu *vcpu, */ if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE)) return -EFAULT; - if (copy_to_user(&user_vmcb->control, &svm->nested.ctl, - sizeof(user_vmcb->control))) + + ctl = kzalloc(sizeof(*ctl), GFP_KERNEL); + if (!ctl) + return -ENOMEM; + + nested_copy_vmcb_cache_to_control(ctl, &svm->nested.ctl); + r = copy_to_user(&user_vmcb->control, ctl, + sizeof(user_vmcb->control)); + kfree(ctl); + if (r) return -EFAULT; + if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save, sizeof(user_vmcb->save))) return -EFAULT; @@ -1335,6 +1382,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, struct vmcb_control_area *ctl; struct vmcb_save_area *save; struct vmcb_save_area_cached save_cached; + struct vmcb_ctrl_area_cached ctl_cached; unsigned long cr0; int ret; @@ -1387,7 +1435,8 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, goto out_free; ret = -EINVAL; - if (!__nested_vmcb_check_controls(vcpu, ctl)) + __nested_copy_vmcb_control_to_cache(&ctl_cached, ctl); + if (!__nested_vmcb_check_controls(vcpu, &ctl_cached)) goto out_free; /* diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 745351a2293c..907ba85609a2 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2508,7 +2508,7 @@ static bool check_selective_cr0_intercepted(struct kvm_vcpu *vcpu, bool ret = false; if (!is_guest_mode(vcpu) || - (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0)))) + (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0)))) return false; cr0 &= ~SVM_CR0_SELECTIVE_MASK; @@ -4215,7 +4215,7 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu, info->intercept == x86_intercept_clts) break; - if (!(vmcb_is_intercept(&svm->nested.ctl, + if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0))) break; diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 3f44ec79597b..929bd60d754d 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -114,6 +114,31 @@ struct vmcb_save_area_cached { u64 dr6; }; +struct vmcb_ctrl_area_cached { + u32 intercepts[MAX_INTERCEPT]; + u16 pause_filter_thresh; + u16 pause_filter_count; + u64 iopm_base_pa; + u64 msrpm_base_pa; + u64 tsc_offset; + u32 asid; + u8 tlb_ctl; + u32 int_ctl; + u32 int_vector; + u32 int_state; + u32 exit_code; + u32 exit_code_hi; + u64 exit_info_1; + u64 exit_info_2; + u32 exit_int_info; + u32 exit_int_info_err; + u64 nested_ctl; + u32 event_inj; + u32 event_inj_err; + u64 nested_cr3; + u64 virt_ext; +}; + struct svm_nested_state { struct kvm_vmcb_info vmcb02; u64 hsave_msr; @@ -129,7 +154,7 @@ struct svm_nested_state { bool nested_run_pending; /* cache for control fields of the guest */ - struct vmcb_control_area ctl; + struct vmcb_ctrl_area_cached ctl; /* * Note: this struct is not kept up-to-date while L2 runs; it is only @@ -318,6 +343,12 @@ static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit) return test_bit(bit, (unsigned long *)&control->intercepts); } +static inline bool vmcb12_is_intercept(struct vmcb_ctrl_area_cached *control, u32 bit) +{ + WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); + return test_bit(bit, (unsigned long *)&control->intercepts); +} + static inline void set_dr_intercepts(struct vcpu_svm *svm) { struct vmcb *vmcb = svm->vmcb01.ptr; @@ -470,17 +501,17 @@ static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu) static inline bool nested_exit_on_smi(struct vcpu_svm *svm) { - return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SMI); + return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SMI); } static inline bool nested_exit_on_intr(struct vcpu_svm *svm) { - return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INTR); + return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INTR); } static inline bool nested_exit_on_nmi(struct vcpu_svm *svm) { - return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI); + return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_NMI); } int enter_svm_guest_mode(struct kvm_vcpu *vcpu, From ce92ef7604ffe74da84f559f6eba8c6053250451 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 17 Nov 2021 18:08:42 -0800 Subject: [PATCH 062/268] KVM: x86/mmu: Use shadow page role to detect PML-unfriendly pages for L2 Rework make_spte() to query the shadow page's role, specifically whether or not it's a guest_mode page, a.k.a. a page for L2, when determining if the SPTE is compatible with PML. This eliminates a dependency on @vcpu, with a future goal of being able to create SPTEs without a specific vCPU. Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu_internal.h | 7 +++---- arch/x86/kvm/mmu/spte.c | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index 52c6527b1a06..5897ce4cdf10 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -104,7 +104,7 @@ static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp) return kvm_mmu_role_as_id(sp->role); } -static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu) +static inline bool kvm_mmu_page_ad_need_write_protect(struct kvm_mmu_page *sp) { /* * When using the EPT page-modification log, the GPAs in the CPU dirty @@ -112,10 +112,9 @@ static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu) * on write protection to record dirty pages, which bypasses PML, since * writes now result in a vmexit. Note, the check on CPU dirty logging * being enabled is mandatory as the bits used to denote WP-only SPTEs - * are reserved for NPT w/ PAE (32-bit KVM). + * are reserved for PAE paging (32-bit KVM). */ - return vcpu->arch.mmu == &vcpu->arch.guest_mmu && - kvm_x86_ops.cpu_dirty_log_size; + return kvm_x86_ops.cpu_dirty_log_size && sp->role.guest_mode; } int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index 0c76c45fdb68..84e64dbdd89e 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -101,7 +101,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, if (sp->role.ad_disabled) spte |= SPTE_TDP_AD_DISABLED_MASK; - else if (kvm_vcpu_ad_need_write_protect(vcpu)) + else if (kvm_mmu_page_ad_need_write_protect(sp)) spte |= SPTE_TDP_AD_WRPROT_ONLY_MASK; /* From 9d395a0a7aca75caa72a8ab11a6efc9909c5a918 Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Mon, 15 Nov 2021 15:45:53 -0800 Subject: [PATCH 063/268] KVM: x86/mmu: Remove need for a vcpu from kvm_slot_page_track_is_active kvm_slot_page_track_is_active only uses its vCPU argument to get a pointer to the assoicated struct kvm, so just pass in the struct KVM to remove the need for a vCPU pointer. No functional change intended. Signed-off-by: Ben Gardon Message-Id: <20211115234603.2908381-6-bgardon@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_page_track.h | 2 +- arch/x86/kvm/mmu/mmu.c | 4 ++-- arch/x86/kvm/mmu/page_track.c | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h index 9d4a3b1b25b9..e99a30a4d38b 100644 --- a/arch/x86/include/asm/kvm_page_track.h +++ b/arch/x86/include/asm/kvm_page_track.h @@ -63,7 +63,7 @@ void kvm_slot_page_track_add_page(struct kvm *kvm, void kvm_slot_page_track_remove_page(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, enum kvm_page_track_mode mode); -bool kvm_slot_page_track_is_active(struct kvm_vcpu *vcpu, +bool kvm_slot_page_track_is_active(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, enum kvm_page_track_mode mode); diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index b83ae4804176..2ea6e5e1fde9 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2591,7 +2591,7 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, * track machinery is used to write-protect upper-level shadow pages, * i.e. this guards the role.level == 4K assertion below! */ - if (kvm_slot_page_track_is_active(vcpu, slot, gfn, KVM_PAGE_TRACK_WRITE)) + if (kvm_slot_page_track_is_active(vcpu->kvm, slot, gfn, KVM_PAGE_TRACK_WRITE)) return -EPERM; /* @@ -3888,7 +3888,7 @@ static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu, * guest is writing the page which is write tracked which can * not be fixed by page fault handler. */ - if (kvm_slot_page_track_is_active(vcpu, fault->slot, fault->gfn, KVM_PAGE_TRACK_WRITE)) + if (kvm_slot_page_track_is_active(vcpu->kvm, fault->slot, fault->gfn, KVM_PAGE_TRACK_WRITE)) return true; return false; diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c index cc4eb5b7fb76..35c221d5f6ce 100644 --- a/arch/x86/kvm/mmu/page_track.c +++ b/arch/x86/kvm/mmu/page_track.c @@ -173,7 +173,7 @@ EXPORT_SYMBOL_GPL(kvm_slot_page_track_remove_page); /* * check if the corresponding access on the specified guest page is tracked. */ -bool kvm_slot_page_track_is_active(struct kvm_vcpu *vcpu, +bool kvm_slot_page_track_is_active(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, enum kvm_page_track_mode mode) { @@ -186,7 +186,7 @@ bool kvm_slot_page_track_is_active(struct kvm_vcpu *vcpu, return false; if (mode == KVM_PAGE_TRACK_WRITE && - !kvm_page_track_write_tracking_enabled(vcpu->kvm)) + !kvm_page_track_write_tracking_enabled(kvm)) return false; index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); From 4d78d0b39ad03e7357452a669938653a379cfebd Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Mon, 15 Nov 2021 15:45:54 -0800 Subject: [PATCH 064/268] KVM: x86/mmu: Remove need for a vcpu from mmu_try_to_unsync_pages The vCPU argument to mmu_try_to_unsync_pages is now only used to get a pointer to the associated struct kvm, so pass in the kvm pointer from the beginning to remove the need for a vCPU when calling the function. Signed-off-by: Ben Gardon Message-Id: <20211115234603.2908381-7-bgardon@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 16 ++++++++-------- arch/x86/kvm/mmu/mmu_internal.h | 2 +- arch/x86/kvm/mmu/spte.c | 2 +- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 2ea6e5e1fde9..29bcf26b0cb3 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2565,10 +2565,10 @@ static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) return r; } -static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) +static void kvm_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) { trace_kvm_mmu_unsync_page(sp); - ++vcpu->kvm->stat.mmu_unsync; + ++kvm->stat.mmu_unsync; sp->unsync = 1; kvm_mmu_mark_parents_unsync(sp); @@ -2580,7 +2580,7 @@ static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) * were marked unsync (or if there is no shadow page), -EPERM if the SPTE must * be write-protected. */ -int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, +int mmu_try_to_unsync_pages(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, bool can_unsync, bool prefetch) { struct kvm_mmu_page *sp; @@ -2591,7 +2591,7 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, * track machinery is used to write-protect upper-level shadow pages, * i.e. this guards the role.level == 4K assertion below! */ - if (kvm_slot_page_track_is_active(vcpu->kvm, slot, gfn, KVM_PAGE_TRACK_WRITE)) + if (kvm_slot_page_track_is_active(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE)) return -EPERM; /* @@ -2600,7 +2600,7 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, * that case, KVM must complete emulation of the guest TLB flush before * allowing shadow pages to become unsync (writable by the guest). */ - for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { + for_each_gfn_indirect_valid_sp(kvm, sp, gfn) { if (!can_unsync) return -EPERM; @@ -2619,7 +2619,7 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, */ if (!locked) { locked = true; - spin_lock(&vcpu->kvm->arch.mmu_unsync_pages_lock); + spin_lock(&kvm->arch.mmu_unsync_pages_lock); /* * Recheck after taking the spinlock, a different vCPU @@ -2634,10 +2634,10 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, } WARN_ON(sp->role.level != PG_LEVEL_4K); - kvm_unsync_page(vcpu, sp); + kvm_unsync_page(kvm, sp); } if (locked) - spin_unlock(&vcpu->kvm->arch.mmu_unsync_pages_lock); + spin_unlock(&kvm->arch.mmu_unsync_pages_lock); /* * We need to ensure that the marking of unsync pages is visible diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index 5897ce4cdf10..787b8c553b9e 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -117,7 +117,7 @@ static inline bool kvm_mmu_page_ad_need_write_protect(struct kvm_mmu_page *sp) return kvm_x86_ops.cpu_dirty_log_size && sp->role.guest_mode; } -int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, +int mmu_try_to_unsync_pages(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, bool can_unsync, bool prefetch); void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn); diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index 84e64dbdd89e..8d3fe4311bc1 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -161,7 +161,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, * e.g. it's write-tracked (upper-level SPs) or has one or more * shadow pages and unsync'ing pages is not allowed. */ - if (mmu_try_to_unsync_pages(vcpu, slot, gfn, can_unsync, prefetch)) { + if (mmu_try_to_unsync_pages(vcpu->kvm, slot, gfn, can_unsync, prefetch)) { pgprintk("%s: found shadow page for %llx, marking ro\n", __func__, gfn); wrprot = true; From 8283e36abfff507c64fe8289ac30ea7ab59648aa Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Mon, 15 Nov 2021 15:45:58 -0800 Subject: [PATCH 065/268] KVM: x86/mmu: Propagate memslot const qualifier In preparation for implementing in-place hugepage promotion, various functions will need to be called from zap_collapsible_spte_range, which has the const qualifier on its memslot argument. Propagate the const qualifier to the various functions which will be needed. This just serves to simplify the following patch. No functional change intended. Signed-off-by: Ben Gardon Message-Id: <20211115234603.2908381-11-bgardon@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_page_track.h | 4 ++-- arch/x86/kvm/mmu/mmu.c | 2 +- arch/x86/kvm/mmu/mmu_internal.h | 2 +- arch/x86/kvm/mmu/page_track.c | 4 ++-- arch/x86/kvm/mmu/spte.c | 2 +- arch/x86/kvm/mmu/spte.h | 2 +- include/linux/kvm_host.h | 10 +++++----- virt/kvm/kvm_main.c | 12 ++++++------ 8 files changed, 19 insertions(+), 19 deletions(-) diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h index e99a30a4d38b..eb186bc57f6a 100644 --- a/arch/x86/include/asm/kvm_page_track.h +++ b/arch/x86/include/asm/kvm_page_track.h @@ -64,8 +64,8 @@ void kvm_slot_page_track_remove_page(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, enum kvm_page_track_mode mode); bool kvm_slot_page_track_is_active(struct kvm *kvm, - struct kvm_memory_slot *slot, gfn_t gfn, - enum kvm_page_track_mode mode); + const struct kvm_memory_slot *slot, + gfn_t gfn, enum kvm_page_track_mode mode); void kvm_page_track_register_notifier(struct kvm *kvm, diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 29bcf26b0cb3..c28cf7eeb79d 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2580,7 +2580,7 @@ static void kvm_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) * were marked unsync (or if there is no shadow page), -EPERM if the SPTE must * be write-protected. */ -int mmu_try_to_unsync_pages(struct kvm *kvm, struct kvm_memory_slot *slot, +int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, bool can_unsync, bool prefetch) { struct kvm_mmu_page *sp; diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index 787b8c553b9e..da6166b5c377 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -117,7 +117,7 @@ static inline bool kvm_mmu_page_ad_need_write_protect(struct kvm_mmu_page *sp) return kvm_x86_ops.cpu_dirty_log_size && sp->role.guest_mode; } -int mmu_try_to_unsync_pages(struct kvm *kvm, struct kvm_memory_slot *slot, +int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, bool can_unsync, bool prefetch); void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn); diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c index 35c221d5f6ce..68eb1fb548b6 100644 --- a/arch/x86/kvm/mmu/page_track.c +++ b/arch/x86/kvm/mmu/page_track.c @@ -174,8 +174,8 @@ EXPORT_SYMBOL_GPL(kvm_slot_page_track_remove_page); * check if the corresponding access on the specified guest page is tracked. */ bool kvm_slot_page_track_is_active(struct kvm *kvm, - struct kvm_memory_slot *slot, gfn_t gfn, - enum kvm_page_track_mode mode) + const struct kvm_memory_slot *slot, + gfn_t gfn, enum kvm_page_track_mode mode) { int index; diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index 8d3fe4311bc1..8a7b03207762 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -90,7 +90,7 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) } bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, - struct kvm_memory_slot *slot, + const struct kvm_memory_slot *slot, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool prefetch, bool can_unsync, bool host_writable, u64 *new_spte) diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index cc432f9a966b..a4af2a42695c 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -330,7 +330,7 @@ static inline u64 get_mmio_spte_generation(u64 spte) } bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, - struct kvm_memory_slot *slot, + const struct kvm_memory_slot *slot, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool prefetch, bool can_unsync, bool host_writable, u64 *new_spte); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 3bc98497e796..3eb7695aaa73 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -460,7 +460,7 @@ struct kvm_memory_slot { u16 as_id; }; -static inline bool kvm_slot_dirty_track_enabled(struct kvm_memory_slot *slot) +static inline bool kvm_slot_dirty_track_enabled(const struct kvm_memory_slot *slot) { return slot->flags & KVM_MEM_LOG_DIRTY_PAGES; } @@ -994,9 +994,9 @@ void kvm_set_page_accessed(struct page *page); kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, bool *writable); -kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); -kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); -kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, +kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn); +kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn); +kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, bool *async, bool write_fault, bool *writable, hva_t *hva); @@ -1073,7 +1073,7 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn); -void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, gfn_t gfn); +void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn); void mark_page_dirty(struct kvm *kvm, gfn_t gfn); struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index e588dc4f9b7d..f93b60165fd7 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2285,12 +2285,12 @@ out: return size; } -static bool memslot_is_readonly(struct kvm_memory_slot *slot) +static bool memslot_is_readonly(const struct kvm_memory_slot *slot) { return slot->flags & KVM_MEM_READONLY; } -static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, +static unsigned long __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t gfn, gfn_t *nr_pages, bool write) { if (!slot || slot->flags & KVM_MEMSLOT_INVALID) @@ -2585,7 +2585,7 @@ exit: return pfn; } -kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, +kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, bool *async, bool write_fault, bool *writable, hva_t *hva) { @@ -2625,13 +2625,13 @@ kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, } EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); -kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn) +kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn) { return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL, NULL); } EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot); -kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn) +kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn) { return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL, NULL); } @@ -3150,7 +3150,7 @@ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) EXPORT_SYMBOL_GPL(kvm_clear_guest); void mark_page_dirty_in_slot(struct kvm *kvm, - struct kvm_memory_slot *memslot, + const struct kvm_memory_slot *memslot, gfn_t gfn) { if (memslot && kvm_slot_dirty_track_enabled(memslot)) { From fb43496c8362b8b379b4348b581f8f88f47cd1f8 Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Mon, 15 Nov 2021 15:45:59 -0800 Subject: [PATCH 066/268] KVM: x86/MMU: Simplify flow of vmx_get_mt_mask Remove the gotos from vmx_get_mt_mask. It's easier to build the whole memory type at once, than it is to combine separate cacheability and ipat fields. No functional change intended. Signed-off-by: Ben Gardon Message-Id: <20211115234603.2908381-12-bgardon@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 23 +++++++---------------- 1 file changed, 7 insertions(+), 16 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 9453743ce0c4..01eab6d6ec22 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6947,7 +6947,6 @@ static int __init vmx_check_processor_compat(void) static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) { u8 cache; - u64 ipat = 0; /* We wanted to honor guest CD/MTRR/PAT, but doing so could result in * memory aliases with conflicting memory types and sometimes MCEs. @@ -6967,30 +6966,22 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) * EPT memory type is used to emulate guest CD/MTRR. */ - if (is_mmio) { - cache = MTRR_TYPE_UNCACHABLE; - goto exit; - } + if (is_mmio) + return MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT; - if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) { - ipat = VMX_EPT_IPAT_BIT; - cache = MTRR_TYPE_WRBACK; - goto exit; - } + if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) + return (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT; if (kvm_read_cr0(vcpu) & X86_CR0_CD) { - ipat = VMX_EPT_IPAT_BIT; if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) cache = MTRR_TYPE_WRBACK; else cache = MTRR_TYPE_UNCACHABLE; - goto exit; + + return (cache << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT; } - cache = kvm_mtrr_get_guest_memory_type(vcpu, gfn); - -exit: - return (cache << VMX_EPT_MT_EPTE_SHIFT) | ipat; + return kvm_mtrr_get_guest_memory_type(vcpu, gfn) << VMX_EPT_MT_EPTE_SHIFT; } static void vmcs_set_secondary_exec_control(struct vcpu_vmx *vmx, u32 new_ctl) From aefdc2ed445eb470bdba108bd6a19fb232d3bada Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 20 Oct 2021 06:38:05 -0400 Subject: [PATCH 067/268] KVM: Avoid atomic operations when kicking the running vCPU If we do have the vcpu mutex, as is the case if kvm_running_vcpu is set to the target vcpu of the kick, changes to vcpu->mode do not need atomic operations; cmpxchg is only needed _outside_ the mutex to ensure that the IN_GUEST_MODE->EXITING_GUEST_MODE change does not race with the vcpu thread going OUTSIDE_GUEST_MODE. Use this to optimize the case of a vCPU sending an interrupt to itself. Signed-off-by: Paolo Bonzini --- virt/kvm/kvm_main.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index f93b60165fd7..e9990c4c6e40 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3396,6 +3396,19 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu) if (kvm_vcpu_wake_up(vcpu)) return; + me = get_cpu(); + /* + * The only state change done outside the vcpu mutex is IN_GUEST_MODE + * to EXITING_GUEST_MODE. Therefore the moderately expensive "should + * kick" check does not need atomic operations if kvm_vcpu_kick is used + * within the vCPU thread itself. + */ + if (vcpu == __this_cpu_read(kvm_running_vcpu)) { + if (vcpu->mode == IN_GUEST_MODE) + WRITE_ONCE(vcpu->mode, EXITING_GUEST_MODE); + goto out; + } + /* * Note, the vCPU could get migrated to a different pCPU at any point * after kvm_arch_vcpu_should_kick(), which could result in sending an @@ -3403,12 +3416,12 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu) * IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the * vCPU also requires it to leave IN_GUEST_MODE. */ - me = get_cpu(); if (kvm_arch_vcpu_should_kick(vcpu)) { cpu = READ_ONCE(vcpu->cpu); if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) smp_send_reschedule(cpu); } +out: put_cpu(); } EXPORT_SYMBOL_GPL(kvm_vcpu_kick); From 98a26b69d8c3b7b2bd51350b1a1218c518d32123 Mon Sep 17 00:00:00 2001 From: Vihas Mak Date: Sun, 14 Nov 2021 22:13:12 +0530 Subject: [PATCH 068/268] KVM: x86: change TLB flush indicator to bool change 0 to false and 1 to true to fix following cocci warnings: arch/x86/kvm/mmu/mmu.c:1485:9-10: WARNING: return of 0/1 in function 'kvm_set_pte_rmapp' with return type bool arch/x86/kvm/mmu/mmu.c:1636:10-11: WARNING: return of 0/1 in function 'kvm_test_age_rmapp' with return type bool Signed-off-by: Vihas Mak Cc: Sean Christopherson Cc: Vitaly Kuznetsov Cc: Wanpeng Li Cc: Jim Mattson Cc: Joerg Roedel Message-Id: <20211114164312.GA28736@makvihas> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index c28cf7eeb79d..ede63912b1ac 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1454,7 +1454,7 @@ static bool kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, { u64 *sptep; struct rmap_iterator iter; - int need_flush = 0; + bool need_flush = false; u64 new_spte; kvm_pfn_t new_pfn; @@ -1466,7 +1466,7 @@ restart: rmap_printk("spte %p %llx gfn %llx (%d)\n", sptep, *sptep, gfn, level); - need_flush = 1; + need_flush = true; if (pte_write(pte)) { pte_list_remove(kvm, rmap_head, sptep); @@ -1482,7 +1482,7 @@ restart: if (need_flush && kvm_available_flush_tlb_with_range()) { kvm_flush_remote_tlbs_with_address(kvm, gfn, 1); - return 0; + return false; } return need_flush; @@ -1623,8 +1623,8 @@ static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, for_each_rmap_spte(rmap_head, &iter, sptep) if (is_accessed_spte(*sptep)) - return 1; - return 0; + return true; + return false; } #define RMAP_RECYCLE_THRESHOLD 1000 From 1831fa44df743a7cdffdf1c12c799bf6f3c12b8c Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 16 Nov 2021 09:32:47 -0500 Subject: [PATCH 069/268] KVM: VMX: Don't unblock vCPU w/ Posted IRQ if IRQs are disabled in guest Don't configure the wakeup handler when a vCPU is blocking with IRQs disabled, in which case any IRQ, posted or otherwise, should not be recognized and thus should not wake the vCPU. Fixes: bf9f6ac8d749 ("KVM: Update Posted-Interrupts Descriptor when vCPU is blocked") Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-2-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/posted_intr.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c index 1c94783b5a54..41f946e2123e 100644 --- a/arch/x86/kvm/vmx/posted_intr.c +++ b/arch/x86/kvm/vmx/posted_intr.c @@ -147,7 +147,8 @@ int pi_pre_block(struct kvm_vcpu *vcpu) struct pi_desc old, new; struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); - if (!vmx_can_use_vtd_pi(vcpu->kvm)) + if (!vmx_can_use_vtd_pi(vcpu->kvm) || + vmx_interrupt_blocked(vcpu)) return 0; WARN_ON(irqs_disabled()); From 91b01895071770ed0c256869d0f94d69a2fb8ecf Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:11:55 -0700 Subject: [PATCH 070/268] KVM: SVM: Ensure target pCPU is read once when signalling AVIC doorbell Ensure vcpu->cpu is read once when signalling the AVIC doorbell. If the compiler rereads the field and the vCPU is migrated between the check and writing the doorbell, KVM would signal the wrong physical CPU. Functionally, signalling the wrong CPU in this case is not an issue as task migration means the vCPU has exited and will pick up any pending interrupts on the next VMRUN. Add the READ_ONCE() purely to clean up the code. Opportunistically add a comment explaining the task migration behavior, and rename cpuid=>cpu to avoid conflating the CPU number with KVM's more common usage of CPUID. Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-3-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/avic.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index b7200595cbd4..0e5b49294086 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -675,10 +675,18 @@ int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec) smp_mb__after_atomic(); if (avic_vcpu_is_running(vcpu)) { - int cpuid = vcpu->cpu; + int cpu = READ_ONCE(vcpu->cpu); - if (cpuid != get_cpu()) - wrmsrl(SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpuid)); + /* + * Note, the vCPU could get migrated to a different pCPU at any + * point, which could result in signalling the wrong/previous + * pCPU. But if that happens the vCPU is guaranteed to do a + * VMRUN (after being migrated) and thus will process pending + * interrupts, i.e. a doorbell is not needed (and the spurious + * one is harmless). + */ + if (cpu != get_cpu()) + wrmsrl(SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpu)); put_cpu(); } else kvm_vcpu_wake_up(vcpu); From 6f390916c4fb359507d9ac4bf1b28a4f8abee5c0 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:11:56 -0700 Subject: [PATCH 071/268] KVM: s390: Ensure kvm_arch_no_poll() is read once when blocking vCPU Wrap s390's halt_poll_max_steal with READ_ONCE and snapshot the result of kvm_arch_no_poll() in kvm_vcpu_block() to avoid a mostly-theoretical, largely benign bug on s390 where the result of kvm_arch_no_poll() could change due to userspace modifying halt_poll_max_steal while the vCPU is blocking. The bug is largely benign as it will either cause KVM to skip updating halt-polling times (no_poll toggles false=>true) or to update halt-polling times with a slightly flawed block_ns. Note, READ_ONCE is unnecessary in the current code, add it in case the arch hook is ever inlined, and to provide a hint that userspace can change the param at will. Fixes: 8b905d28ee17 ("KVM: s390: provide kvm_arch_no_poll function") Reviewed-by: Christian Borntraeger Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-4-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/s390/kvm/kvm-s390.c | 2 +- virt/kvm/kvm_main.c | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index b943a589ee41..9653e4075b11 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -3403,7 +3403,7 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) { /* do not poll with more than halt_poll_max_steal percent of steal time */ if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >= - halt_poll_max_steal) { + READ_ONCE(halt_poll_max_steal)) { vcpu->stat.halt_no_poll_steal++; return true; } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index e9990c4c6e40..a26b069a6929 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3284,6 +3284,7 @@ update_halt_poll_stats(struct kvm_vcpu *vcpu, u64 poll_ns, bool waited) */ void kvm_vcpu_block(struct kvm_vcpu *vcpu) { + bool halt_poll_allowed = !kvm_arch_no_poll(vcpu); ktime_t start, cur, poll_end; bool waited = false; u64 block_ns; @@ -3291,7 +3292,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) kvm_arch_vcpu_blocking(vcpu); start = cur = poll_end = ktime_get(); - if (vcpu->halt_poll_ns && !kvm_arch_no_poll(vcpu)) { + if (vcpu->halt_poll_ns && halt_poll_allowed) { ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns); ++vcpu->stat.generic.halt_attempted_poll; @@ -3346,7 +3347,7 @@ out: update_halt_poll_stats( vcpu, ktime_to_ns(ktime_sub(poll_end, start)), waited); - if (!kvm_arch_no_poll(vcpu)) { + if (halt_poll_allowed) { if (!vcpu_valid_wakeup(vcpu)) { shrink_halt_poll_ns(vcpu); } else if (vcpu->kvm->max_halt_poll_ns) { From 510958e997217e39a16b47afb5a44dfa39013964 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:11:57 -0700 Subject: [PATCH 072/268] KVM: Force PPC to define its own rcuwait object Do not define/reference kvm_vcpu.wait if __KVM_HAVE_ARCH_WQP is true, and instead force the architecture (PPC) to define its own rcuwait object. Allowing common KVM to directly access vcpu->wait without a guard makes it all too easy to introduce potential bugs, e.g. kvm_vcpu_block(), kvm_vcpu_on_spin(), and async_pf_execute() all operate on vcpu->wait, not the result of kvm_arch_vcpu_get_wait(), and so may do the wrong thing for PPC. Due to PPC's shenanigans with respect to callbacks and waits (it switches to the virtual core's wait object at KVM_RUN!?!?), it's not clear whether or not this fixes any bugs. Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-5-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/powerpc/include/asm/kvm_host.h | 1 + arch/powerpc/kvm/powerpc.c | 3 ++- include/linux/kvm_host.h | 2 ++ virt/kvm/async_pf.c | 2 +- virt/kvm/kvm_main.c | 9 ++++++--- 5 files changed, 12 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index e4d23193eba7..6ec97eff9563 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -749,6 +749,7 @@ struct kvm_vcpu_arch { u8 irq_pending; /* Used by XIVE to signal pending guest irqs */ u32 last_inst; + struct rcuwait wait; struct rcuwait *waitp; struct kvmppc_vcore *vcore; int ret; diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index f1233500f4dc..7de9ddbc6af1 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -753,7 +753,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) if (err) goto out_vcpu_uninit; - vcpu->arch.waitp = &vcpu->wait; + rcuwait_init(&vcpu->arch.wait); + vcpu->arch.waitp = &vcpu->arch.wait; kvmppc_create_vcpu_debugfs(vcpu, vcpu->vcpu_id); return 0; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 3eb7695aaa73..afacbfb2e482 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -314,7 +314,9 @@ struct kvm_vcpu { struct mutex mutex; struct kvm_run *run; +#ifndef __KVM_HAVE_ARCH_WQP struct rcuwait wait; +#endif struct pid __rcu *pid; int sigset_active; sigset_t sigset; diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c index dd777688d14a..ccb35c22785e 100644 --- a/virt/kvm/async_pf.c +++ b/virt/kvm/async_pf.c @@ -85,7 +85,7 @@ static void async_pf_execute(struct work_struct *work) trace_kvm_async_pf_completed(addr, cr2_or_gpa); - rcuwait_wake_up(&vcpu->wait); + rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu)); mmput(mm); kvm_put_kvm(vcpu->kvm); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a26b069a6929..11db44f4110e 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -422,7 +422,9 @@ static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) vcpu->kvm = kvm; vcpu->vcpu_id = id; vcpu->pid = NULL; +#ifndef __KVM_HAVE_ARCH_WQP rcuwait_init(&vcpu->wait); +#endif kvm_async_pf_vcpu_init(vcpu); vcpu->pre_pcpu = -1; @@ -3284,6 +3286,7 @@ update_halt_poll_stats(struct kvm_vcpu *vcpu, u64 poll_ns, bool waited) */ void kvm_vcpu_block(struct kvm_vcpu *vcpu) { + struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); bool halt_poll_allowed = !kvm_arch_no_poll(vcpu); ktime_t start, cur, poll_end; bool waited = false; @@ -3322,7 +3325,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) } - prepare_to_rcuwait(&vcpu->wait); + prepare_to_rcuwait(wait); for (;;) { set_current_state(TASK_INTERRUPTIBLE); @@ -3332,7 +3335,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) waited = true; schedule(); } - finish_rcuwait(&vcpu->wait); + finish_rcuwait(wait); cur = ktime_get(); if (waited) { vcpu->stat.generic.halt_wait_ns += @@ -3544,7 +3547,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) continue; if (vcpu == me) continue; - if (rcuwait_active(&vcpu->wait) && + if (rcuwait_active(kvm_arch_vcpu_get_wait(vcpu)) && !vcpu_dy_runnable(vcpu)) continue; if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode && From 8df6a61c04038fa481a717fc86af38304aa600a3 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:11:58 -0700 Subject: [PATCH 073/268] KVM: Update halt-polling stats if and only if halt-polling was attempted Don't update halt-polling stats if halt-polling wasn't attempted. This is a nop as @poll_ns is guaranteed to be '0' (poll_end == start); in a future patch (to move the histogram stats into the helper), it will avoid to avoid a discrepancy in what is considered a "successful" halt-poll. No functional change intended. Reviewed-by: David Matlack Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-6-seanjc@google.com> Signed-off-by: Paolo Bonzini --- virt/kvm/kvm_main.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 11db44f4110e..1a15043ceecb 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3288,6 +3288,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) { struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); bool halt_poll_allowed = !kvm_arch_no_poll(vcpu); + bool do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns; ktime_t start, cur, poll_end; bool waited = false; u64 block_ns; @@ -3295,7 +3296,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) kvm_arch_vcpu_blocking(vcpu); start = cur = poll_end = ktime_get(); - if (vcpu->halt_poll_ns && halt_poll_allowed) { + if (do_halt_poll) { ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns); ++vcpu->stat.generic.halt_attempted_poll; @@ -3347,8 +3348,9 @@ out: kvm_arch_vcpu_unblocking(vcpu); block_ns = ktime_to_ns(cur) - ktime_to_ns(start); - update_halt_poll_stats( - vcpu, ktime_to_ns(ktime_sub(poll_end, start)), waited); + if (do_halt_poll) + update_halt_poll_stats( + vcpu, ktime_to_ns(ktime_sub(poll_end, start)), waited); if (halt_poll_allowed) { if (!vcpu_valid_wakeup(vcpu)) { From 29e72893cec3b0268e19e7857d10bf79843f94dc Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:11:59 -0700 Subject: [PATCH 074/268] KVM: Refactor and document halt-polling stats update helper Add a comment to document that halt-polling is considered successful even if the polling loop itself didn't detect a wake event, i.e. if a wake event was detect in the final kvm_vcpu_check_block(). Invert the param to update helper so that the helper is a dumb function that is "told" whether or not polling was successful, as opposed to determining success based on blocking behavior. Opportunistically tweak the params to the update helper to reduce the line length for the call site so that it fits on a single line, and so that the prototype conforms to the more traditional kernel style. No functional change intended. Reviewed-by: David Matlack Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-7-seanjc@google.com> Signed-off-by: Paolo Bonzini --- virt/kvm/kvm_main.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 1a15043ceecb..a7f9c313d642 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3272,13 +3272,15 @@ out: return ret; } -static inline void -update_halt_poll_stats(struct kvm_vcpu *vcpu, u64 poll_ns, bool waited) +static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start, + ktime_t end, bool success) { - if (waited) - vcpu->stat.generic.halt_poll_fail_ns += poll_ns; - else + u64 poll_ns = ktime_to_ns(ktime_sub(end, start)); + + if (success) vcpu->stat.generic.halt_poll_success_ns += poll_ns; + else + vcpu->stat.generic.halt_poll_fail_ns += poll_ns; } /* @@ -3348,9 +3350,13 @@ out: kvm_arch_vcpu_unblocking(vcpu); block_ns = ktime_to_ns(cur) - ktime_to_ns(start); + /* + * Note, halt-polling is considered successful so long as the vCPU was + * never actually scheduled out, i.e. even if the wake event arrived + * after of the halt-polling loop itself, but before the full wait. + */ if (do_halt_poll) - update_halt_poll_stats( - vcpu, ktime_to_ns(ktime_sub(poll_end, start)), waited); + update_halt_poll_stats(vcpu, start, poll_end, !waited); if (halt_poll_allowed) { if (!vcpu_valid_wakeup(vcpu)) { From 30c9434717fd27e634a157dcdee286703b1f4891 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:12:00 -0700 Subject: [PATCH 075/268] KVM: Reconcile discrepancies in halt-polling stats Move the halt-polling "success" and histogram stats update into the dedicated helper to fix a discrepancy where the success/fail "time" stats consider polling successful so long as the wait is avoided, but the main "success" and histogram stats consider polling successful if and only if a wake event was detected by the halt-polling loop. Move halt_attempted_poll to the helper as well so that all the stats are updated in a single location. While it's a bit odd to update the stat well after the fact, practically speaking there's no meaningful advantage to updating before polling. Note, there is a functional change in addition to the success vs. fail change. The histogram updates previously called ktime_get() instead of using "cur". But that change is desirable as it means all the stats are now updated with the same polling time, and avoids the extra ktime_get(), which isn't expensive but isn't free either. Reviewed-by: David Matlack Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-8-seanjc@google.com> Signed-off-by: Paolo Bonzini --- virt/kvm/kvm_main.c | 35 ++++++++++++++++------------------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a7f9c313d642..44158a4794d8 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3275,12 +3275,23 @@ out: static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start, ktime_t end, bool success) { + struct kvm_vcpu_stat_generic *stats = &vcpu->stat.generic; u64 poll_ns = ktime_to_ns(ktime_sub(end, start)); - if (success) - vcpu->stat.generic.halt_poll_success_ns += poll_ns; - else - vcpu->stat.generic.halt_poll_fail_ns += poll_ns; + ++vcpu->stat.generic.halt_attempted_poll; + + if (success) { + ++vcpu->stat.generic.halt_successful_poll; + + if (!vcpu_valid_wakeup(vcpu)) + ++vcpu->stat.generic.halt_poll_invalid; + + stats->halt_poll_success_ns += poll_ns; + KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_success_hist, poll_ns); + } else { + stats->halt_poll_fail_ns += poll_ns; + KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_fail_hist, poll_ns); + } } /* @@ -3301,30 +3312,16 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) if (do_halt_poll) { ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns); - ++vcpu->stat.generic.halt_attempted_poll; do { /* * This sets KVM_REQ_UNHALT if an interrupt * arrives. */ - if (kvm_vcpu_check_block(vcpu) < 0) { - ++vcpu->stat.generic.halt_successful_poll; - if (!vcpu_valid_wakeup(vcpu)) - ++vcpu->stat.generic.halt_poll_invalid; - - KVM_STATS_LOG_HIST_UPDATE( - vcpu->stat.generic.halt_poll_success_hist, - ktime_to_ns(ktime_get()) - - ktime_to_ns(start)); + if (kvm_vcpu_check_block(vcpu) < 0) goto out; - } cpu_relax(); poll_end = cur = ktime_get(); } while (kvm_vcpu_can_poll(cur, stop)); - - KVM_STATS_LOG_HIST_UPDATE( - vcpu->stat.generic.halt_poll_fail_hist, - ktime_to_ns(ktime_get()) - ktime_to_ns(start)); } From 75c89e5272fba7f8c8c4e4d7dc23be1b82e4b2d8 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:12:01 -0700 Subject: [PATCH 076/268] KVM: s390: Clear valid_wakeup in kvm_s390_handle_wait(), not in arch hook Move the clearing of valid_wakeup from kvm_arch_vcpu_block_finish() so that a future patch can drop said arch hook. Unlike the other blocking- related arch hooks, vcpu_blocking/unblocking(), vcpu_block_finish() needs to be called even if the KVM doesn't actually block the vCPU. This will allow future patches to differentiate between truly blocking the vCPU and emulating a halt condition without introducing a contradiction. Alternatively, the hook could be renamed to kvm_arch_vcpu_halt_finish(), but there's literally one call site in s390, and future cleanup can also be done to handle valid_wakeup fully within kvm_s390_handle_wait() and allow generic KVM to drop vcpu_valid_wakeup(). No functional change intended. Reviewed-by: Christian Borntraeger Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-9-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/s390/kvm/interrupt.c | 1 + arch/s390/kvm/kvm-s390.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 1aa094810f6d..3c8246fa208b 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -1336,6 +1336,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) no_timer: srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); kvm_vcpu_block(vcpu); + vcpu->valid_wakeup = false; __unset_cpu_idle(vcpu); vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 9653e4075b11..8f577c76fcd3 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -5049,7 +5049,7 @@ static inline unsigned long nonhyp_mask(int i) void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) { - vcpu->valid_wakeup = false; + } static int __init kvm_s390_init(void) From 6109c5a6ab7f38ed8e1beb06a90aa83884c18700 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:12:03 -0700 Subject: [PATCH 077/268] KVM: arm64: Move vGIC v4 handling for WFI out arch callback hook Move the put and reload of the vGIC out of the block/unblock callbacks and into a dedicated WFI helper. Functionally, this is nearly a nop as the block hook is called at the very beginning of kvm_vcpu_block(), and the only code in kvm_vcpu_block() after the unblock hook is to update the halt-polling controls, i.e. can only affect the next WFI. Back when the arch (un)blocking hooks were added by commits 3217f7c25bca ("KVM: Add kvm_arch_vcpu_{un}blocking callbacks) and d35268da6687 ("arm/arm64: KVM: arch_timer: Only schedule soft timer on vcpu_block"), the hooks were invoked only when KVM was about to "block", i.e. schedule out the vCPU. The use case at the time was to schedule a timer in the host based on the earliest timer in the guest in order to wake the blocking vCPU when the emulated guest timer fired. Commit accb99bcd0ca ("KVM: arm/arm64: Simplify bg_timer programming") reworked the timer logic to be even more precise, by waiting until the vCPU was actually scheduled out, and so move the timer logic from the (un)blocking hooks to vcpu_load/put. In the meantime, the hooks gained usage for enabling vGIC v4 doorbells in commit df9ba95993b9 ("KVM: arm/arm64: GICv4: Use the doorbell interrupt as an unblocking source"), and added related logic for the VMCR in commit 5eeaf10eec39 ("KVM: arm/arm64: Sync ICH_VMCR_EL2 back when about to block"). Finally, commit 07ab0f8d9a12 ("KVM: Call kvm_arch_vcpu_blocking early into the blocking sequence") hoisted the (un)blocking hooks so that they wrapped KVM's halt-polling logic in addition to the core "block" logic. In other words, the original need for arch hooks to take action _only_ in the block path is long since gone. Cc: Oliver Upton Cc: Marc Zyngier Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-11-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/arm64/include/asm/kvm_emulate.h | 2 ++ arch/arm64/kvm/arm.c | 52 +++++++++++++++++++--------- arch/arm64/kvm/handle_exit.c | 3 +- 3 files changed, 38 insertions(+), 19 deletions(-) diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index f4871e47b2d0..28acc65ccb17 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -41,6 +41,8 @@ void kvm_inject_vabt(struct kvm_vcpu *vcpu); void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); +void kvm_vcpu_wfi(struct kvm_vcpu *vcpu); + static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu) { return !(vcpu->arch.hcr_el2 & HCR_RW); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index b6e65c6eb1d3..ced54a3a3db0 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -360,27 +360,12 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) { - /* - * If we're about to block (most likely because we've just hit a - * WFI), we need to sync back the state of the GIC CPU interface - * so that we have the latest PMR and group enables. This ensures - * that kvm_arch_vcpu_runnable has up-to-date data to decide - * whether we have pending interrupts. - * - * For the same reason, we want to tell GICv4 that we need - * doorbells to be signalled, should an interrupt become pending. - */ - preempt_disable(); - kvm_vgic_vmcr_sync(vcpu); - vgic_v4_put(vcpu, true); - preempt_enable(); + } void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) { - preempt_disable(); - vgic_v4_load(vcpu); - preempt_enable(); + } void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) @@ -671,6 +656,39 @@ static void vcpu_req_sleep(struct kvm_vcpu *vcpu) smp_rmb(); } +/** + * kvm_vcpu_wfi - emulate Wait-For-Interrupt behavior + * @vcpu: The VCPU pointer + * + * Suspend execution of a vCPU until a valid wake event is detected, i.e. until + * the vCPU is runnable. The vCPU may or may not be scheduled out, depending + * on when a wake event arrives, e.g. there may already be a pending wake event. + */ +void kvm_vcpu_wfi(struct kvm_vcpu *vcpu) +{ + /* + * Sync back the state of the GIC CPU interface so that we have + * the latest PMR and group enables. This ensures that + * kvm_arch_vcpu_runnable has up-to-date data to decide whether + * we have pending interrupts, e.g. when determining if the + * vCPU should block. + * + * For the same reason, we want to tell GICv4 that we need + * doorbells to be signalled, should an interrupt become pending. + */ + preempt_disable(); + kvm_vgic_vmcr_sync(vcpu); + vgic_v4_put(vcpu, true); + preempt_enable(); + + kvm_vcpu_block(vcpu); + kvm_clear_request(KVM_REQ_UNHALT, vcpu); + + preempt_disable(); + vgic_v4_load(vcpu); + preempt_enable(); +} + static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) { return vcpu->arch.target >= 0; diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 275a27368a04..4794563a506b 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -95,8 +95,7 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu) } else { trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false); vcpu->stat.wfi_exit_stat++; - kvm_vcpu_block(vcpu); - kvm_clear_request(KVM_REQ_UNHALT, vcpu); + kvm_vcpu_wfi(vcpu); } kvm_incr_pc(vcpu); From f6c60d081e2ccb4655fa90625b630c860a99d036 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:12:04 -0700 Subject: [PATCH 078/268] KVM: Don't block+unblock when halt-polling is successful Invoke the arch hooks for block+unblock if and only if KVM actually attempts to block the vCPU. The only non-nop implementation is on x86, specifically SVM's AVIC, and there is no need to put the AVIC prior to halt-polling; KVM x86's kvm_vcpu_has_events() will scour the full vIRR to find pending IRQs regardless of whether the AVIC is loaded/"running". The primary motivation is to allow future cleanup to split out "block" from "halt", but this is also likely a small performance boost on x86 SVM when halt-polling is successful. Adjust the post-block path to update "cur" after unblocking, i.e. include AVIC load time in halt_wait_ns and halt_wait_hist, so that the behavior is consistent. Moving just the pre-block arch hook would result in only the AVIC put latency being included in the halt_wait stats. There is no obvious evidence that one way or the other is correct, so just ensure KVM is consistent. Note, x86 has two separate paths for handling APICv with respect to vCPU blocking. VMX uses hooks in x86's vcpu_block(), while SVM uses the arch hooks in kvm_vcpu_block(). Prior to this path, the two paths were more or less functionally identical. That is very much not the case after this patch, as the hooks used by VMX _must_ fire before halt-polling. x86's entire mess will be cleaned up in future patches. Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-12-seanjc@google.com> Signed-off-by: Paolo Bonzini --- virt/kvm/kvm_main.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 44158a4794d8..cc68d21a8e58 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3306,8 +3306,6 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) bool waited = false; u64 block_ns; - kvm_arch_vcpu_blocking(vcpu); - start = cur = poll_end = ktime_get(); if (do_halt_poll) { ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns); @@ -3324,6 +3322,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) } while (kvm_vcpu_can_poll(cur, stop)); } + kvm_arch_vcpu_blocking(vcpu); prepare_to_rcuwait(wait); for (;;) { @@ -3336,6 +3335,9 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) schedule(); } finish_rcuwait(wait); + + kvm_arch_vcpu_unblocking(vcpu); + cur = ktime_get(); if (waited) { vcpu->stat.generic.halt_wait_ns += @@ -3344,7 +3346,6 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) ktime_to_ns(cur) - ktime_to_ns(poll_end)); } out: - kvm_arch_vcpu_unblocking(vcpu); block_ns = ktime_to_ns(cur) - ktime_to_ns(start); /* From 1460179dcd76a4ae4121e2da29b586f41715dd1d Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:12:05 -0700 Subject: [PATCH 079/268] KVM: x86: Tweak halt emulation helper names to free up kvm_vcpu_halt() Rename a variety of HLT-related helpers to free up the function name "kvm_vcpu_halt" for future use in generic KVM code, e.g. to differentiate between "block" and "halt". No functional change intended. Reviewed-by: David Matlack Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-13-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/vmx/nested.c | 2 +- arch/x86/kvm/vmx/vmx.c | 4 ++-- arch/x86/kvm/x86.c | 13 +++++++------ 4 files changed, 11 insertions(+), 10 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 552ed45f133b..be1ad66cf39a 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1696,7 +1696,7 @@ int kvm_emulate_monitor(struct kvm_vcpu *vcpu); int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in); int kvm_emulate_cpuid(struct kvm_vcpu *vcpu); int kvm_emulate_halt(struct kvm_vcpu *vcpu); -int kvm_vcpu_halt(struct kvm_vcpu *vcpu); +int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu); int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu); int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 9c941535f78c..e885f557fcbe 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -3603,7 +3603,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) !(nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING) && (vmcs12->guest_rflags & X86_EFLAGS_IF))) { vmx->nested.nested_run_pending = 0; - return kvm_vcpu_halt(vcpu); + return kvm_emulate_halt_noskip(vcpu); } break; case GUEST_ACTIVITY_WAIT_SIPI: diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 01eab6d6ec22..b4defe000db8 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -4697,7 +4697,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu, if (kvm_emulate_instruction(vcpu, 0)) { if (vcpu->arch.halt_request) { vcpu->arch.halt_request = 0; - return kvm_vcpu_halt(vcpu); + return kvm_emulate_halt_noskip(vcpu); } return 1; } @@ -5368,7 +5368,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) if (vcpu->arch.halt_request) { vcpu->arch.halt_request = 0; - return kvm_vcpu_halt(vcpu); + return kvm_emulate_halt_noskip(vcpu); } /* diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 73e0e40c94b6..43cabc747318 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8725,7 +8725,7 @@ void kvm_arch_exit(void) #endif } -static int __kvm_vcpu_halt(struct kvm_vcpu *vcpu, int state, int reason) +static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason) { ++vcpu->stat.halt_exits; if (lapic_in_kernel(vcpu)) { @@ -8737,11 +8737,11 @@ static int __kvm_vcpu_halt(struct kvm_vcpu *vcpu, int state, int reason) } } -int kvm_vcpu_halt(struct kvm_vcpu *vcpu) +int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu) { - return __kvm_vcpu_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT); + return __kvm_emulate_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT); } -EXPORT_SYMBOL_GPL(kvm_vcpu_halt); +EXPORT_SYMBOL_GPL(kvm_emulate_halt_noskip); int kvm_emulate_halt(struct kvm_vcpu *vcpu) { @@ -8750,7 +8750,7 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu) * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered * KVM_EXIT_DEBUG here. */ - return kvm_vcpu_halt(vcpu) && ret; + return kvm_emulate_halt_noskip(vcpu) && ret; } EXPORT_SYMBOL_GPL(kvm_emulate_halt); @@ -8758,7 +8758,8 @@ int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu) { int ret = kvm_skip_emulated_instruction(vcpu); - return __kvm_vcpu_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD, KVM_EXIT_AP_RESET_HOLD) && ret; + return __kvm_emulate_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD, + KVM_EXIT_AP_RESET_HOLD) && ret; } EXPORT_SYMBOL_GPL(kvm_emulate_ap_reset_hold); From 005467e06b16261ffdd7130ff0b4f0ebd627599a Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:12:02 -0700 Subject: [PATCH 080/268] KVM: Drop obsolete kvm_arch_vcpu_block_finish() Drop kvm_arch_vcpu_block_finish() now that all arch implementations are nops. No functional change intended. Acked-by: Christian Borntraeger Reviewed-by: David Matlack Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-10-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/arm64/include/asm/kvm_host.h | 1 - arch/mips/include/asm/kvm_host.h | 1 - arch/powerpc/include/asm/kvm_host.h | 1 - arch/riscv/include/asm/kvm_host.h | 1 - arch/s390/include/asm/kvm_host.h | 2 -- arch/s390/kvm/kvm-s390.c | 5 ----- arch/x86/include/asm/kvm_host.h | 2 -- virt/kvm/kvm_main.c | 1 - 8 files changed, 14 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 2a5f7f38006f..0e75277be5d5 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -717,7 +717,6 @@ void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu); static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} -static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} void kvm_arm_init_debug(void); void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu); diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 696f6b009377..72b90d45a46e 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -897,7 +897,6 @@ static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} -static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB int kvm_arch_flush_remote_tlb(struct kvm *kvm); diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 6ec97eff9563..6e92b2c7a938 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -865,6 +865,5 @@ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_exit(void) {} static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} -static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} #endif /* __POWERPC_KVM_HOST_H__ */ diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index 2639b9ee48f9..907fafea787e 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -208,7 +208,6 @@ struct kvm_vcpu_arch { static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} -static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} #define KVM_ARCH_WANT_MMU_NOTIFIER diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index a604d51acfc8..a22c9266ea05 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -1010,6 +1010,4 @@ static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm, static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} -void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu); - #endif diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 8f577c76fcd3..dd099d352753 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -5047,11 +5047,6 @@ static inline unsigned long nonhyp_mask(int i) return 0x0000ffffffffffffUL >> (nonhyp_fai << 4); } -void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) -{ - -} - static int __init kvm_s390_init(void) { int i; diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index be1ad66cf39a..c07b30877de0 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1931,8 +1931,6 @@ static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) static_call_cond(kvm_x86_vcpu_unblocking)(vcpu); } -static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} - static inline int kvm_cpu_get_apicid(int mps_cpu) { #ifdef CONFIG_X86_LOCAL_APIC diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index cc68d21a8e58..53c58606e1e2 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3376,7 +3376,6 @@ out: } trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu)); - kvm_arch_vcpu_block_finish(vcpu); } EXPORT_SYMBOL_GPL(kvm_vcpu_block); From 91b99ea7065786d0bff1c9281b002455dbaeb08b Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:12:06 -0700 Subject: [PATCH 081/268] KVM: Rename kvm_vcpu_block() => kvm_vcpu_halt() Rename kvm_vcpu_block() to kvm_vcpu_halt() in preparation for splitting the actual "block" sequences into a separate helper (to be named kvm_vcpu_block()). x86 will use the standalone block-only path to handle non-halt cases where the vCPU is not runnable. Rename block_ns to halt_ns to match the new function name. No functional change intended. Reviewed-by: David Matlack Reviewed-by: Christian Borntraeger Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-14-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/arm64/kvm/arch_timer.c | 2 +- arch/arm64/kvm/arm.c | 2 +- arch/arm64/kvm/handle_exit.c | 2 +- arch/arm64/kvm/psci.c | 2 +- arch/mips/kvm/emulate.c | 2 +- arch/powerpc/kvm/book3s_pr.c | 2 +- arch/powerpc/kvm/book3s_pr_papr.c | 2 +- arch/powerpc/kvm/booke.c | 2 +- arch/powerpc/kvm/powerpc.c | 2 +- arch/riscv/kvm/vcpu_exit.c | 2 +- arch/s390/kvm/interrupt.c | 2 +- arch/x86/kvm/x86.c | 11 +++++++++-- include/linux/kvm_host.h | 2 +- virt/kvm/kvm_main.c | 20 +++++++++----------- 14 files changed, 30 insertions(+), 25 deletions(-) diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index d6f4114f1d11..3aeaa79ad4a2 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -467,7 +467,7 @@ out: } /* - * Schedule the background timer before calling kvm_vcpu_block, so that this + * Schedule the background timer before calling kvm_vcpu_halt, so that this * thread is removed from its waitqueue and made runnable when there's a timer * interrupt to handle. */ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index ced54a3a3db0..77ecc11d67ae 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -681,7 +681,7 @@ void kvm_vcpu_wfi(struct kvm_vcpu *vcpu) vgic_v4_put(vcpu, true); preempt_enable(); - kvm_vcpu_block(vcpu); + kvm_vcpu_halt(vcpu); kvm_clear_request(KVM_REQ_UNHALT, vcpu); preempt_disable(); diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 4794563a506b..6d0baf71aa67 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -82,7 +82,7 @@ static int handle_no_fpsimd(struct kvm_vcpu *vcpu) * * WFE: Yield the CPU and come back to this vcpu when the scheduler * decides to. - * WFI: Simply call kvm_vcpu_block(), which will halt execution of + * WFI: Simply call kvm_vcpu_halt(), which will halt execution of * world-switches and schedule other host processes until there is an * incoming IRQ or FIQ to the VM. */ diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c index ed675fce8fb7..ad6c9ef32928 100644 --- a/arch/arm64/kvm/psci.c +++ b/arch/arm64/kvm/psci.c @@ -46,7 +46,7 @@ static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu) * specification (ARM DEN 0022A). This means all suspend states * for KVM will preserve the register state. */ - kvm_vcpu_block(vcpu); + kvm_vcpu_halt(vcpu); kvm_clear_request(KVM_REQ_UNHALT, vcpu); return PSCI_RET_SUCCESS; diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index 22e745e49b0a..b494d8d39290 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -952,7 +952,7 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) if (!vcpu->arch.pending_exceptions) { kvm_vz_lose_htimer(vcpu); vcpu->arch.wait = 1; - kvm_vcpu_block(vcpu); + kvm_vcpu_halt(vcpu); /* * We we are runnable, then definitely go off to user space to diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 30426e8c8cf6..34a801c3604a 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -492,7 +492,7 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) if (msr & MSR_POW) { if (!vcpu->arch.pending_exceptions) { - kvm_vcpu_block(vcpu); + kvm_vcpu_halt(vcpu); kvm_clear_request(KVM_REQ_UNHALT, vcpu); vcpu->stat.generic.halt_wakeup++; diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c index ac14239f3424..1f10e7dfcdd0 100644 --- a/arch/powerpc/kvm/book3s_pr_papr.c +++ b/arch/powerpc/kvm/book3s_pr_papr.c @@ -376,7 +376,7 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) return kvmppc_h_pr_stuff_tce(vcpu); case H_CEDE: kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE); - kvm_vcpu_block(vcpu); + kvm_vcpu_halt(vcpu); kvm_clear_request(KVM_REQ_UNHALT, vcpu); vcpu->stat.generic.halt_wakeup++; return EMULATE_DONE; diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 53b4c9597c30..06c5830a93f9 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -718,7 +718,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) if (vcpu->arch.shared->msr & MSR_WE) { local_irq_enable(); - kvm_vcpu_block(vcpu); + kvm_vcpu_halt(vcpu); kvm_clear_request(KVM_REQ_UNHALT, vcpu); hard_irq_disable(); diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 7de9ddbc6af1..2ad0ccd202d5 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -236,7 +236,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) break; case EV_HCALL_TOKEN(EV_IDLE): r = EV_SUCCESS; - kvm_vcpu_block(vcpu); + kvm_vcpu_halt(vcpu); kvm_clear_request(KVM_REQ_UNHALT, vcpu); break; default: diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c index 7f2d742ae4c6..571f319e995a 100644 --- a/arch/riscv/kvm/vcpu_exit.c +++ b/arch/riscv/kvm/vcpu_exit.c @@ -146,7 +146,7 @@ static int system_opcode_insn(struct kvm_vcpu *vcpu, vcpu->stat.wfi_exit_stat++; if (!kvm_arch_vcpu_runnable(vcpu)) { srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx); - kvm_vcpu_block(vcpu); + kvm_vcpu_halt(vcpu); vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); kvm_clear_request(KVM_REQ_UNHALT, vcpu); } diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 3c8246fa208b..dbabd6f2404e 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -1335,7 +1335,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime); no_timer: srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); - kvm_vcpu_block(vcpu); + kvm_vcpu_halt(vcpu); vcpu->valid_wakeup = false; __unset_cpu_idle(vcpu); vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 43cabc747318..e3dd76f251e9 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8727,6 +8727,13 @@ void kvm_arch_exit(void) static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason) { + /* + * The vCPU has halted, e.g. executed HLT. Update the run state if the + * local APIC is in-kernel, the run loop will detect the non-runnable + * state and halt the vCPU. Exit to userspace if the local APIC is + * managed by userspace, in which case userspace is responsible for + * handling wake events. + */ ++vcpu->stat.halt_exits; if (lapic_in_kernel(vcpu)) { vcpu->arch.mp_state = state; @@ -9999,7 +10006,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) if (!kvm_arch_vcpu_runnable(vcpu) && (!kvm_x86_ops.pre_block || static_call(kvm_x86_pre_block)(vcpu) == 0)) { srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - kvm_vcpu_block(vcpu); + kvm_vcpu_halt(vcpu); vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); if (kvm_x86_ops.post_block) @@ -10196,7 +10203,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) r = -EINTR; goto out; } - kvm_vcpu_block(vcpu); + kvm_vcpu_halt(vcpu); if (kvm_apic_accept_events(vcpu) < 0) { r = 0; goto out; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index afacbfb2e482..ea3c22d55d56 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1102,7 +1102,7 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); void kvm_sigset_activate(struct kvm_vcpu *vcpu); void kvm_sigset_deactivate(struct kvm_vcpu *vcpu); -void kvm_vcpu_block(struct kvm_vcpu *vcpu); +void kvm_vcpu_halt(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 53c58606e1e2..0d301c95fa1a 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3294,17 +3294,14 @@ static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start, } } -/* - * The vCPU has executed a HLT instruction with in-kernel mode enabled. - */ -void kvm_vcpu_block(struct kvm_vcpu *vcpu) +void kvm_vcpu_halt(struct kvm_vcpu *vcpu) { struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); bool halt_poll_allowed = !kvm_arch_no_poll(vcpu); bool do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns; ktime_t start, cur, poll_end; bool waited = false; - u64 block_ns; + u64 halt_ns; start = cur = poll_end = ktime_get(); if (do_halt_poll) { @@ -3346,7 +3343,8 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) ktime_to_ns(cur) - ktime_to_ns(poll_end)); } out: - block_ns = ktime_to_ns(cur) - ktime_to_ns(start); + /* The total time the vCPU was "halted", including polling time. */ + halt_ns = ktime_to_ns(cur) - ktime_to_ns(start); /* * Note, halt-polling is considered successful so long as the vCPU was @@ -3360,24 +3358,24 @@ out: if (!vcpu_valid_wakeup(vcpu)) { shrink_halt_poll_ns(vcpu); } else if (vcpu->kvm->max_halt_poll_ns) { - if (block_ns <= vcpu->halt_poll_ns) + if (halt_ns <= vcpu->halt_poll_ns) ; /* we had a long block, shrink polling */ else if (vcpu->halt_poll_ns && - block_ns > vcpu->kvm->max_halt_poll_ns) + halt_ns > vcpu->kvm->max_halt_poll_ns) shrink_halt_poll_ns(vcpu); /* we had a short halt and our poll time is too small */ else if (vcpu->halt_poll_ns < vcpu->kvm->max_halt_poll_ns && - block_ns < vcpu->kvm->max_halt_poll_ns) + halt_ns < vcpu->kvm->max_halt_poll_ns) grow_halt_poll_ns(vcpu); } else { vcpu->halt_poll_ns = 0; } } - trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu)); + trace_kvm_vcpu_wakeup(halt_ns, waited, vcpu_valid_wakeup(vcpu)); } -EXPORT_SYMBOL_GPL(kvm_vcpu_block); +EXPORT_SYMBOL_GPL(kvm_vcpu_halt); bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu) { From fac4268894394213127e43856f41d10f29131e69 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:12:07 -0700 Subject: [PATCH 082/268] KVM: Split out a kvm_vcpu_block() helper from kvm_vcpu_halt() Factor out the "block" part of kvm_vcpu_halt() so that x86 can emulate non-halt wait/sleep/block conditions that should not be subjected to halt-polling. No functional change intended. Reviewed-by: Christian Borntraeger Reviewed-by: David Matlack Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-15-seanjc@google.com> Signed-off-by: Paolo Bonzini --- include/linux/kvm_host.h | 1 + virt/kvm/kvm_main.c | 52 +++++++++++++++++++++++++++------------- 2 files changed, 37 insertions(+), 16 deletions(-) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index ea3c22d55d56..bd13c5b5bd1d 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1103,6 +1103,7 @@ void kvm_sigset_activate(struct kvm_vcpu *vcpu); void kvm_sigset_deactivate(struct kvm_vcpu *vcpu); void kvm_vcpu_halt(struct kvm_vcpu *vcpu); +bool kvm_vcpu_block(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 0d301c95fa1a..370b95ad5f03 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3272,6 +3272,35 @@ out: return ret; } +/* + * Block the vCPU until the vCPU is runnable, an event arrives, or a signal is + * pending. This is mostly used when halting a vCPU, but may also be used + * directly for other vCPU non-runnable states, e.g. x86's Wait-For-SIPI. + */ +bool kvm_vcpu_block(struct kvm_vcpu *vcpu) +{ + struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); + bool waited = false; + + kvm_arch_vcpu_blocking(vcpu); + + prepare_to_rcuwait(wait); + for (;;) { + set_current_state(TASK_INTERRUPTIBLE); + + if (kvm_vcpu_check_block(vcpu) < 0) + break; + + waited = true; + schedule(); + } + finish_rcuwait(wait); + + kvm_arch_vcpu_unblocking(vcpu); + + return waited; +} + static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start, ktime_t end, bool success) { @@ -3294,9 +3323,14 @@ static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start, } } +/* + * Emulate a vCPU halt condition, e.g. HLT on x86, WFI on arm, etc... If halt + * polling is enabled, busy wait for a short time before blocking to avoid the + * expensive block+unblock sequence if a wake event arrives soon after the vCPU + * is halted. + */ void kvm_vcpu_halt(struct kvm_vcpu *vcpu) { - struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); bool halt_poll_allowed = !kvm_arch_no_poll(vcpu); bool do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns; ktime_t start, cur, poll_end; @@ -3319,21 +3353,7 @@ void kvm_vcpu_halt(struct kvm_vcpu *vcpu) } while (kvm_vcpu_can_poll(cur, stop)); } - kvm_arch_vcpu_blocking(vcpu); - - prepare_to_rcuwait(wait); - for (;;) { - set_current_state(TASK_INTERRUPTIBLE); - - if (kvm_vcpu_check_block(vcpu) < 0) - break; - - waited = true; - schedule(); - } - finish_rcuwait(wait); - - kvm_arch_vcpu_unblocking(vcpu); + waited = kvm_vcpu_block(vcpu); cur = ktime_get(); if (waited) { From c3858335c711569b82a234a560dc19247e8f3fcc Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Fri, 8 Oct 2021 19:12:08 -0700 Subject: [PATCH 083/268] KVM: stats: Add stat to detect if vcpu is currently blocking Add a "blocking" stat that userspace can use to detect the case where a vCPU is not being run because of an vCPU/guest action, e.g. HLT or WFS on x86, WFI on arm64, etc... Current guest/host/halt stats don't show this well, e.g. if a guest halts for a long period of time then the vCPU could could appear pathologically blocked due to a host condition, when in reality the vCPU has been put into a not-runnable state by the guest. Originally-by: Cannon Matthews Suggested-by: Sean Christopherson Reviewed-by: David Matlack Signed-off-by: Jing Zhang [sean: renamed stat to "blocking", massaged changelog] Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-16-seanjc@google.com> Signed-off-by: Paolo Bonzini --- include/linux/kvm_host.h | 3 ++- include/linux/kvm_types.h | 1 + virt/kvm/kvm_main.c | 4 ++++ 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index bd13c5b5bd1d..dc7740cafea7 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1587,7 +1587,8 @@ struct _kvm_stats_desc { STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_hist, \ HALT_POLL_HIST_COUNT), \ STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_wait_hist, \ - HALT_POLL_HIST_COUNT) + HALT_POLL_HIST_COUNT), \ + STATS_DESC_ICOUNTER(VCPU_GENERIC, blocking) extern struct dentry *kvm_debugfs_dir; diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index 234eab059839..888ef12862c9 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -87,6 +87,7 @@ struct kvm_vcpu_stat_generic { u64 halt_poll_success_hist[HALT_POLL_HIST_COUNT]; u64 halt_poll_fail_hist[HALT_POLL_HIST_COUNT]; u64 halt_wait_hist[HALT_POLL_HIST_COUNT]; + u64 blocking; }; #define KVM_STATS_NAME_SIZE 48 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 370b95ad5f03..2630db6e8cb5 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3282,6 +3282,8 @@ bool kvm_vcpu_block(struct kvm_vcpu *vcpu) struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); bool waited = false; + vcpu->stat.generic.blocking = 1; + kvm_arch_vcpu_blocking(vcpu); prepare_to_rcuwait(wait); @@ -3298,6 +3300,8 @@ bool kvm_vcpu_block(struct kvm_vcpu *vcpu) kvm_arch_vcpu_unblocking(vcpu); + vcpu->stat.generic.blocking = 0; + return waited; } From 109a98260b533722d1190dcfa18447dd39fee5ff Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:12:09 -0700 Subject: [PATCH 084/268] KVM: Don't redo ktime_get() when calculating halt-polling stop/deadline Calculate the halt-polling "stop" time using "start" instead of redoing ktime_get(). In practice, the numbers involved are in the noise (e.g., in the happy case where hardware correctly predicts do_halt_poll and there are no interrupts, "start" is probably only a few cycles old) and either approach is perfectly ok. But it's more precise to count any extra latency toward the halt-polling time. Reviewed-by: David Matlack Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-17-seanjc@google.com> Signed-off-by: Paolo Bonzini --- virt/kvm/kvm_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 2630db6e8cb5..97bde32082d0 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3343,7 +3343,7 @@ void kvm_vcpu_halt(struct kvm_vcpu *vcpu) start = cur = poll_end = ktime_get(); if (do_halt_poll) { - ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns); + ktime_t stop = ktime_add_ns(start, vcpu->halt_poll_ns); do { /* From c91d44971459073537874fcdd2f445e94cfb4f07 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:12:10 -0700 Subject: [PATCH 085/268] KVM: x86: Directly block (instead of "halting") UNINITIALIZED vCPUs Go directly to kvm_vcpu_block() when handling the case where userspace attempts to run an UNINITIALIZED vCPU. The vCPU is not halted, nor is it likely that halt-polling will be successful in this case. Reviewed-by: David Matlack Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-18-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e3dd76f251e9..9d628ec38414 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10203,7 +10203,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) r = -EINTR; goto out; } - kvm_vcpu_halt(vcpu); + kvm_vcpu_block(vcpu); if (kvm_apic_accept_events(vcpu) < 0) { r = 0; goto out; From cdafece4b964a27b2d3d76bf5725b49415bbaaea Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:12:11 -0700 Subject: [PATCH 086/268] KVM: x86: Invoke kvm_vcpu_block() directly for non-HALTED wait states Call kvm_vcpu_block() directly for all wait states except HALTED so that kvm_vcpu_halt() is no longer a misnomer on x86. Functionally, this means KVM will never attempt halt-polling or adjust vcpu->halt_poll_ns for INIT_RECEIVED (a.k.a. Wait-For-SIPI (WFS)) or AP_RESET_HOLD; UNINITIALIZED is handled in kvm_arch_vcpu_ioctl_run(), and x86 doesn't use any other "wait" states. As mentioned above, the motivation of this is purely so that "halt" isn't overloaded on x86, e.g. in KVM's stats. Skipping halt-polling for WFS (and RESET_HOLD) has no meaningful effect on guest performance as there are typically single-digit numbers of INIT-SIPI sequences per AP vCPU, per boot, versus thousands of HLTs just to boot to console. Reviewed-by: David Matlack Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-19-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9d628ec38414..50450ebe709f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10006,7 +10006,10 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) if (!kvm_arch_vcpu_runnable(vcpu) && (!kvm_x86_ops.pre_block || static_call(kvm_x86_pre_block)(vcpu) == 0)) { srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - kvm_vcpu_halt(vcpu); + if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) + kvm_vcpu_halt(vcpu); + else + kvm_vcpu_block(vcpu); vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); if (kvm_x86_ops.post_block) From d92a5d1c6c757f659ffb9c2c2e65fcf3d571c14e Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:12:12 -0700 Subject: [PATCH 087/268] KVM: Add helpers to wake/query blocking vCPU Add helpers to wake and query a blocking vCPU. In addition to providing nice names, the helpers reduce the probability of KVM neglecting to use kvm_arch_vcpu_get_wait(). No functional change intended. Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-20-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/arm64/kvm/arch_timer.c | 3 +-- arch/arm64/kvm/arm.c | 2 +- arch/x86/kvm/lapic.c | 2 +- include/linux/kvm_host.h | 14 ++++++++++++++ virt/kvm/async_pf.c | 2 +- virt/kvm/kvm_main.c | 8 ++------ 6 files changed, 20 insertions(+), 11 deletions(-) diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index 3aeaa79ad4a2..6e542e2eae32 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -649,7 +649,6 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) { struct arch_timer_cpu *timer = vcpu_timer(vcpu); struct timer_map map; - struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); if (unlikely(!timer->enabled)) return; @@ -672,7 +671,7 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) if (map.emul_ptimer) soft_timer_cancel(&map.emul_ptimer->hrtimer); - if (rcuwait_active(wait)) + if (kvm_vcpu_is_blocking(vcpu)) kvm_timer_blocking(vcpu); /* diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 77ecc11d67ae..14106a7c75b5 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -631,7 +631,7 @@ void kvm_arm_resume_guest(struct kvm *kvm) kvm_for_each_vcpu(i, vcpu, kvm) { vcpu->arch.pause = false; - rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu)); + __kvm_vcpu_wake_up(vcpu); } } diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 451e80306b51..bbac8477b3ec 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1931,7 +1931,7 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu) /* If the preempt notifier has already run, it also called apic_timer_expired */ if (!apic->lapic_timer.hv_timer_in_use) goto out; - WARN_ON(rcuwait_active(&vcpu->wait)); + WARN_ON(kvm_vcpu_is_blocking(vcpu)); apic_timer_expired(apic, false); cancel_hv_timer(apic); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index dc7740cafea7..f8ed799e8674 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1286,6 +1286,20 @@ static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu) #endif } +/* + * Wake a vCPU if necessary, but don't do any stats/metadata updates. Returns + * true if the vCPU was blocking and was awakened, false otherwise. + */ +static inline bool __kvm_vcpu_wake_up(struct kvm_vcpu *vcpu) +{ + return !!rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu)); +} + +static inline bool kvm_vcpu_is_blocking(struct kvm_vcpu *vcpu) +{ + return rcuwait_active(kvm_arch_vcpu_get_wait(vcpu)); +} + #ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED /* * returns true if the virtual interrupt controller is initialized and diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c index ccb35c22785e..9bfe1d6f6529 100644 --- a/virt/kvm/async_pf.c +++ b/virt/kvm/async_pf.c @@ -85,7 +85,7 @@ static void async_pf_execute(struct work_struct *work) trace_kvm_async_pf_completed(addr, cr2_or_gpa); - rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu)); + __kvm_vcpu_wake_up(vcpu); mmput(mm); kvm_put_kvm(vcpu->kvm); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 97bde32082d0..f3acff708bf5 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3403,10 +3403,7 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_halt); bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu) { - struct rcuwait *waitp; - - waitp = kvm_arch_vcpu_get_wait(vcpu); - if (rcuwait_wake_up(waitp)) { + if (__kvm_vcpu_wake_up(vcpu)) { WRITE_ONCE(vcpu->ready, true); ++vcpu->stat.generic.halt_wakeup; return true; @@ -3574,8 +3571,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) continue; if (vcpu == me) continue; - if (rcuwait_active(kvm_arch_vcpu_get_wait(vcpu)) && - !vcpu_dy_runnable(vcpu)) + if (kvm_vcpu_is_blocking(vcpu) && !vcpu_dy_runnable(vcpu)) continue; if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode && !kvm_arch_dy_has_pending_interrupt(vcpu) && From 057aa61bc992f2d27218b6558b0115d5623f1a7b Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:12:13 -0700 Subject: [PATCH 088/268] KVM: VMX: Skip Posted Interrupt updates if APICv is hard disabled Explicitly skip posted interrupt updates if APICv is disabled in all of KVM, or if the guest doesn't have an in-kernel APIC. The PI descriptor is kept up-to-date if APICv is inhibited, e.g. so that re-enabling APICv doesn't require a bunch of updates, but neither the module param nor the APIC type can be changed on-the-fly. Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-21-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/posted_intr.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c index 41f946e2123e..ee4ddc6e268b 100644 --- a/arch/x86/kvm/vmx/posted_intr.c +++ b/arch/x86/kvm/vmx/posted_intr.c @@ -29,11 +29,14 @@ void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) unsigned int dest; /* - * In case of hot-plug or hot-unplug, we may have to undo - * vmx_vcpu_pi_put even if there is no assigned device. And we - * always keep PI.NDST up to date for simplicity: it makes the - * code easier, and CPU migration is not a fast path. + * To simplify hot-plug and dynamic toggling of APICv, keep PI.NDST and + * PI.SN up-to-date even if there is no assigned device or if APICv is + * deactivated due to a dynamic inhibit bit, e.g. for Hyper-V's SyncIC. */ + if (!enable_apicv || !lapic_in_kernel(vcpu)) + return; + + /* Nothing to do if PI.SN and PI.NDST both have the desired value. */ if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu) return; From c95717218add161f3e93bd454743506ed5bdd8e1 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:12:15 -0700 Subject: [PATCH 089/268] KVM: VMX: Drop unnecessary PI logic to handle impossible conditions Drop sanity checks on the validity of the previous pCPU when handling vCPU block/unlock for posted interrupts. The intention behind the sanity checks is to avoid memory corruption in case of a race or incorrect locking, but the code has been stable for a few years now and the checks get in the way of eliminating kvm_vcpu.pre_cpu. Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-23-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/posted_intr.c | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c index ee4ddc6e268b..f15d4a7450a5 100644 --- a/arch/x86/kvm/vmx/posted_intr.c +++ b/arch/x86/kvm/vmx/posted_intr.c @@ -123,12 +123,10 @@ static void __pi_post_block(struct kvm_vcpu *vcpu) } while (cmpxchg64(&pi_desc->control, old.control, new.control) != old.control); - if (!WARN_ON_ONCE(vcpu->pre_pcpu == -1)) { - spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); - list_del(&vcpu->blocked_vcpu_list); - spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); - vcpu->pre_pcpu = -1; - } + spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); + list_del(&vcpu->blocked_vcpu_list); + spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); + vcpu->pre_pcpu = -1; } /* @@ -156,14 +154,12 @@ int pi_pre_block(struct kvm_vcpu *vcpu) WARN_ON(irqs_disabled()); local_irq_disable(); - if (!WARN_ON_ONCE(vcpu->pre_pcpu != -1)) { - vcpu->pre_pcpu = vcpu->cpu; - spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); - list_add_tail(&vcpu->blocked_vcpu_list, - &per_cpu(blocked_vcpu_on_cpu, - vcpu->pre_pcpu)); - spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); - } + + vcpu->pre_pcpu = vcpu->cpu; + spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); + list_add_tail(&vcpu->blocked_vcpu_list, + &per_cpu(blocked_vcpu_on_cpu, vcpu->pre_pcpu)); + spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); do { old.control = new.control = pi_desc->control; From 74ba5bc872d3fb173b94fe9a1b8f6eaa807fc4ad Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:12:16 -0700 Subject: [PATCH 090/268] KVM: VMX: Use boolean returns for Posted Interrupt "test" helpers Return bools instead of ints for the posted interrupt "test" helpers. The bit position of the flag being test does not matter to the callers, and is in fact lost by virtue of test_bit() itself returning a bool. Returning ints is potentially dangerous, e.g. "pi_test_on(pi_desc) == 1" is safe-ish because ON is bit 0 and thus any sane implementation of pi_test_on() will work, but for SN (bit 1), checking "== 1" would rely on pi_test_on() to return 0 or 1, a.k.a. bools, as opposed to 0 or 2 (the positive bit position). Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-24-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/posted_intr.c | 4 ++-- arch/x86/kvm/vmx/posted_intr.h | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c index f15d4a7450a5..8abd5d891bde 100644 --- a/arch/x86/kvm/vmx/posted_intr.c +++ b/arch/x86/kvm/vmx/posted_intr.c @@ -189,7 +189,7 @@ int pi_pre_block(struct kvm_vcpu *vcpu) new.control) != old.control); /* We should not block the vCPU if an interrupt is posted for it. */ - if (pi_test_on(pi_desc) == 1) + if (pi_test_on(pi_desc)) __pi_post_block(vcpu); local_irq_enable(); @@ -220,7 +220,7 @@ void pi_wakeup_handler(void) blocked_vcpu_list) { struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); - if (pi_test_on(pi_desc) == 1) + if (pi_test_on(pi_desc)) kvm_vcpu_kick(vcpu); } spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); diff --git a/arch/x86/kvm/vmx/posted_intr.h b/arch/x86/kvm/vmx/posted_intr.h index 7f7b2326caf5..36ae035f14aa 100644 --- a/arch/x86/kvm/vmx/posted_intr.h +++ b/arch/x86/kvm/vmx/posted_intr.h @@ -40,7 +40,7 @@ static inline bool pi_test_and_clear_on(struct pi_desc *pi_desc) (unsigned long *)&pi_desc->control); } -static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc) +static inline bool pi_test_and_set_pir(int vector, struct pi_desc *pi_desc) { return test_and_set_bit(vector, (unsigned long *)pi_desc->pir); } @@ -74,13 +74,13 @@ static inline void pi_clear_sn(struct pi_desc *pi_desc) (unsigned long *)&pi_desc->control); } -static inline int pi_test_on(struct pi_desc *pi_desc) +static inline bool pi_test_on(struct pi_desc *pi_desc) { return test_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control); } -static inline int pi_test_sn(struct pi_desc *pi_desc) +static inline bool pi_test_sn(struct pi_desc *pi_desc) { return test_bit(POSTED_INTR_SN, (unsigned long *)&pi_desc->control); From 29802380b6793eabcac648e1c097c7bd6333f3d4 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:12:17 -0700 Subject: [PATCH 091/268] KVM: VMX: Drop pointless PI.NDST update when blocking Don't update Posted Interrupt's NDST, a.k.a. the target pCPU, in the pre-block path, as NDST is guaranteed to be up-to-date. The comment about the vCPU being preempted during the update is simply wrong, as the update path runs with IRQs disabled (from before snapshotting vcpu->cpu, until after the update completes). Since commit 8b306e2f3c41 ("KVM: VMX: avoid double list add with VT-d posted interrupts", 2017-09-27) The vCPU can get preempted _before_ the update starts, but not during. And if the vCPU is preempted before, vmx_vcpu_pi_load() is responsible for updating NDST when the vCPU is scheduled back in. In that case, the check against the wakeup vector in vmx_vcpu_pi_load() cannot be true as that would require the notification vector to have been set to the wakeup vector _before_ blocking. Opportunistically switch to using vcpu->cpu for the list/lock lookups, which do not need pre_pcpu since the same commit. Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-25-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/posted_intr.c | 23 +++-------------------- 1 file changed, 3 insertions(+), 20 deletions(-) diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c index 8abd5d891bde..bb93e9637494 100644 --- a/arch/x86/kvm/vmx/posted_intr.c +++ b/arch/x86/kvm/vmx/posted_intr.c @@ -135,7 +135,6 @@ static void __pi_post_block(struct kvm_vcpu *vcpu) * - Store the vCPU to the wakeup list, so when interrupts happen * we can find the right vCPU to wake up. * - Change the Posted-interrupt descriptor as below: - * 'NDST' <-- vcpu->pre_pcpu * 'NV' <-- POSTED_INTR_WAKEUP_VECTOR * - If 'ON' is set during this process, which means at least one * interrupt is posted for this vCPU, we cannot block it, in @@ -144,7 +143,6 @@ static void __pi_post_block(struct kvm_vcpu *vcpu) */ int pi_pre_block(struct kvm_vcpu *vcpu) { - unsigned int dest; struct pi_desc old, new; struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); @@ -156,10 +154,10 @@ int pi_pre_block(struct kvm_vcpu *vcpu) local_irq_disable(); vcpu->pre_pcpu = vcpu->cpu; - spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); + spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->cpu)); list_add_tail(&vcpu->blocked_vcpu_list, - &per_cpu(blocked_vcpu_on_cpu, vcpu->pre_pcpu)); - spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); + &per_cpu(blocked_vcpu_on_cpu, vcpu->cpu)); + spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->cpu)); do { old.control = new.control = pi_desc->control; @@ -168,21 +166,6 @@ int pi_pre_block(struct kvm_vcpu *vcpu) "Warning: SN field of posted-interrupts " "is set before blocking\n"); - /* - * Since vCPU can be preempted during this process, - * vcpu->cpu could be different with pre_pcpu, we - * need to set pre_pcpu as the destination of wakeup - * notification event, then we can find the right vCPU - * to wakeup in wakeup handler if interrupts happen - * when the vCPU is in blocked state. - */ - dest = cpu_physical_id(vcpu->pre_pcpu); - - if (x2apic_mode) - new.ndst = dest; - else - new.ndst = (dest << 8) & 0xFF00; - /* set 'NV' to 'wakeup vector' */ new.nv = POSTED_INTR_WAKEUP_VECTOR; } while (cmpxchg64(&pi_desc->control, old.control, From 89ef0f21cf96200dfa46cec92228ef435681589f Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:12:18 -0700 Subject: [PATCH 092/268] KVM: VMX: Save/restore IRQs (instead of CLI/STI) during PI pre/post block Save/restore IRQs when disabling IRQs in posted interrupt pre/post block in preparation for moving the code into vcpu_put/load(), where it would be called with IRQs already disabled. No functional changed intended. Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-26-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/posted_intr.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c index bb93e9637494..b72dbe80f87a 100644 --- a/arch/x86/kvm/vmx/posted_intr.c +++ b/arch/x86/kvm/vmx/posted_intr.c @@ -145,13 +145,13 @@ int pi_pre_block(struct kvm_vcpu *vcpu) { struct pi_desc old, new; struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); + unsigned long flags; if (!vmx_can_use_vtd_pi(vcpu->kvm) || vmx_interrupt_blocked(vcpu)) return 0; - WARN_ON(irqs_disabled()); - local_irq_disable(); + local_irq_save(flags); vcpu->pre_pcpu = vcpu->cpu; spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->cpu)); @@ -175,19 +175,20 @@ int pi_pre_block(struct kvm_vcpu *vcpu) if (pi_test_on(pi_desc)) __pi_post_block(vcpu); - local_irq_enable(); + local_irq_restore(flags); return (vcpu->pre_pcpu == -1); } void pi_post_block(struct kvm_vcpu *vcpu) { + unsigned long flags; + if (vcpu->pre_pcpu == -1) return; - WARN_ON(irqs_disabled()); - local_irq_disable(); + local_irq_save(flags); __pi_post_block(vcpu); - local_irq_enable(); + local_irq_restore(flags); } /* From cfb0e1306a3790eb055ebf7cdb7b0ee8a23e9b6e Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:12:19 -0700 Subject: [PATCH 093/268] KVM: VMX: Read Posted Interrupt "control" exactly once per loop iteration Use READ_ONCE() when loading the posted interrupt descriptor control field to ensure "old" and "new" have the same base value. If the compiler emits separate loads, and loads into "new" before "old", KVM could theoretically drop the ON bit if it were set between the loads. Fixes: 28b835d60fcc ("KVM: Update Posted-Interrupts Descriptor when vCPU is preempted") Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-27-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/posted_intr.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c index b72dbe80f87a..a1ce598e9544 100644 --- a/arch/x86/kvm/vmx/posted_intr.c +++ b/arch/x86/kvm/vmx/posted_intr.c @@ -54,7 +54,7 @@ void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) /* The full case. */ do { - old.control = new.control = pi_desc->control; + old.control = new.control = READ_ONCE(pi_desc->control); dest = cpu_physical_id(cpu); @@ -107,7 +107,7 @@ static void __pi_post_block(struct kvm_vcpu *vcpu) unsigned int dest; do { - old.control = new.control = pi_desc->control; + old.control = new.control = READ_ONCE(pi_desc->control); WARN(old.nv != POSTED_INTR_WAKEUP_VECTOR, "Wakeup handler not enabled while the VCPU is blocked\n"); @@ -160,7 +160,7 @@ int pi_pre_block(struct kvm_vcpu *vcpu) spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->cpu)); do { - old.control = new.control = pi_desc->control; + old.control = new.control = READ_ONCE(pi_desc->control); WARN((pi_desc->sn == 1), "Warning: SN field of posted-interrupts " From 724b3962ef808388005b860450bfbef6bade26b3 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:12:20 -0700 Subject: [PATCH 094/268] KVM: VMX: Move Posted Interrupt ndst computation out of write loop Hoist the CPU => APIC ID conversion for the Posted Interrupt descriptor out of the loop to write the descriptor, preemption is disabled so the CPU won't change, and if the APIC ID changes KVM has bigger problems. No functional change intended. Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-28-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/posted_intr.c | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c index a1ce598e9544..e29a40042978 100644 --- a/arch/x86/kvm/vmx/posted_intr.c +++ b/arch/x86/kvm/vmx/posted_intr.c @@ -52,17 +52,15 @@ void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) goto after_clear_sn; } - /* The full case. */ + /* The full case. Set the new destination and clear SN. */ + dest = cpu_physical_id(cpu); + if (!x2apic_mode) + dest = (dest << 8) & 0xFF00; + do { old.control = new.control = READ_ONCE(pi_desc->control); - dest = cpu_physical_id(cpu); - - if (x2apic_mode) - new.ndst = dest; - else - new.ndst = (dest << 8) & 0xFF00; - + new.ndst = dest; new.sn = 0; } while (cmpxchg64(&pi_desc->control, old.control, new.control) != old.control); @@ -106,17 +104,16 @@ static void __pi_post_block(struct kvm_vcpu *vcpu) struct pi_desc old, new; unsigned int dest; + dest = cpu_physical_id(vcpu->cpu); + if (!x2apic_mode) + dest = (dest << 8) & 0xFF00; + do { old.control = new.control = READ_ONCE(pi_desc->control); WARN(old.nv != POSTED_INTR_WAKEUP_VECTOR, "Wakeup handler not enabled while the VCPU is blocked\n"); - dest = cpu_physical_id(vcpu->cpu); - - if (x2apic_mode) - new.ndst = dest; - else - new.ndst = (dest << 8) & 0xFF00; + new.ndst = dest; /* set 'NV' to 'notification vector' */ new.nv = POSTED_INTR_VECTOR; From baed82c8e4893a3258267dad198e04691d2f7c09 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Oct 2021 19:12:21 -0700 Subject: [PATCH 095/268] KVM: VMX: Remove vCPU from PI wakeup list before updating PID.NV Remove the vCPU from the wakeup list before updating the notification vector in the posted interrupt post-block helper. There is no need to wake the current vCPU as it is by definition not blocking. Practically speaking this is a nop as it only shaves a few meager cycles in the unlikely case that the vCPU was migrated and the previous pCPU gets a wakeup IRQ right before PID.NV is updated. The real motivation is to allow for more readable code in the future, when post-block is merged with vmx_vcpu_pi_load(), at which point removal from the list will be conditional on the old notification vector. Opportunistically add comments to document why KVM has a per-CPU spinlock that, at first glance, appears to be taken only on the owning CPU. Explicitly call out that the spinlock must be taken with IRQs disabled, a detail that was "lost" when KVM switched from spin_lock_irqsave() to spin_lock(), with IRQs disabled for the entirety of the relevant path. Signed-off-by: Sean Christopherson Message-Id: <20211009021236.4122790-29-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/posted_intr.c | 27 ++++++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c index e29a40042978..4db2b14ee7c6 100644 --- a/arch/x86/kvm/vmx/posted_intr.c +++ b/arch/x86/kvm/vmx/posted_intr.c @@ -11,10 +11,22 @@ #include "vmx.h" /* - * We maintain a per-CPU linked-list of vCPU, so in wakeup_handler() we - * can find which vCPU should be waken up. + * Maintain a per-CPU list of vCPUs that need to be awakened by wakeup_handler() + * when a WAKEUP_VECTOR interrupted is posted. vCPUs are added to the list when + * the vCPU is scheduled out and is blocking (e.g. in HLT) with IRQs enabled. + * The vCPUs posted interrupt descriptor is updated at the same time to set its + * notification vector to WAKEUP_VECTOR, so that posted interrupt from devices + * wake the target vCPUs. vCPUs are removed from the list and the notification + * vector is reset when the vCPU is scheduled in. */ static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu); +/* + * Protect the per-CPU list with a per-CPU spinlock to handle task migration. + * When a blocking vCPU is awakened _and_ migrated to a different pCPU, the + * ->sched_in() path will need to take the vCPU off the list of the _previous_ + * CPU. IRQs must be disabled when taking this lock, otherwise deadlock will + * occur if a wakeup IRQ arrives and attempts to acquire the lock. + */ static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock); static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu) @@ -104,6 +116,14 @@ static void __pi_post_block(struct kvm_vcpu *vcpu) struct pi_desc old, new; unsigned int dest; + /* + * Remove the vCPU from the wakeup list of the _previous_ pCPU, which + * will not be the same as the current pCPU if the task was migrated. + */ + spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); + list_del(&vcpu->blocked_vcpu_list); + spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); + dest = cpu_physical_id(vcpu->cpu); if (!x2apic_mode) dest = (dest << 8) & 0xFF00; @@ -120,9 +140,6 @@ static void __pi_post_block(struct kvm_vcpu *vcpu) } while (cmpxchg64(&pi_desc->control, old.control, new.control) != old.control); - spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); - list_del(&vcpu->blocked_vcpu_list); - spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); vcpu->pre_pcpu = -1; } From b1d66dad65dcc8a6e5942db27027a086aa4f5c16 Mon Sep 17 00:00:00 2001 From: Like Xu Date: Wed, 17 Nov 2021 16:03:04 +0800 Subject: [PATCH 096/268] KVM: x86/svm: Add module param to control PMU virtualization For Intel, the guest PMU can be disabled via clearing the PMU CPUID. For AMD, all hw implementations support the base set of four performance counters, with current mainstream hardware indicating the presence of two additional counters via X86_FEATURE_PERFCTR_CORE. In the virtualized world, the AMD guest driver may detect the presence of at least one counter MSR. Most hypervisor vendors would introduce a module param (like lbrv for svm) to disable PMU for all guests. Another control proposal per-VM is to pass PMU disable information via MSR_IA32_PERF_CAPABILITIES or one bit in CPUID Fn4000_00[FF:00]. Both of methods require some guest-side changes, so a module parameter may not be sufficiently granular, but practical enough. Signed-off-by: Like Xu Message-Id: <20211117080304.38989-1-likexu@tencent.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/cpuid.c | 2 +- arch/x86/kvm/svm/pmu.c | 4 ++++ arch/x86/kvm/svm/svm.c | 11 +++++++++++ arch/x86/kvm/svm/svm.h | 1 + 4 files changed, 17 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 07e9215e911d..0b920e12bb6d 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -523,7 +523,7 @@ void kvm_set_cpu_caps(void) F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) | F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) | 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM) | - F(TOPOEXT) | F(PERFCTR_CORE) + F(TOPOEXT) | 0 /* PERFCTR_CORE */ ); kvm_cpu_cap_mask(CPUID_8000_0001_EDX, diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c index b4095dfeeee6..0cf05e4caa4c 100644 --- a/arch/x86/kvm/svm/pmu.c +++ b/arch/x86/kvm/svm/pmu.c @@ -16,6 +16,7 @@ #include "cpuid.h" #include "lapic.h" #include "pmu.h" +#include "svm.h" enum pmu_type { PMU_TYPE_COUNTER = 0, @@ -100,6 +101,9 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr, { struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu); + if (!pmu) + return NULL; + switch (msr) { case MSR_F15H_PERF_CTL0: case MSR_F15H_PERF_CTL1: diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 907ba85609a2..64a10cdb2356 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -192,6 +192,10 @@ module_param(vgif, int, 0444); static int lbrv = true; module_param(lbrv, int, 0444); +/* enable/disable PMU virtualization */ +bool pmu = true; +module_param(pmu, bool, 0444); + static int tsc_scaling = true; module_param(tsc_scaling, int, 0444); @@ -954,6 +958,10 @@ static __init void svm_set_cpu_caps(void) boot_cpu_has(X86_FEATURE_AMD_SSBD)) kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD); + /* AMD PMU PERFCTR_CORE CPUID */ + if (pmu && boot_cpu_has(X86_FEATURE_PERFCTR_CORE)) + kvm_cpu_cap_set(X86_FEATURE_PERFCTR_CORE); + /* CPUID 0x8000001F (SME/SEV features) */ sev_set_cpu_caps(); } @@ -1087,6 +1095,9 @@ static __init int svm_hardware_setup(void) pr_info("LBR virtualization supported\n"); } + if (!pmu) + pr_info("PMU virtualization is disabled\n"); + svm_set_cpu_caps(); /* diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 929bd60d754d..a57390473013 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -32,6 +32,7 @@ extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; extern bool npt_enabled; extern bool intercept_smi; +extern bool pmu; /* * Clean bits in VMCB. From 2c5653caecc4807b8abfe9c41880ac38417be7bf Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Mon, 8 Nov 2021 20:43:53 +0800 Subject: [PATCH 097/268] KVM: X86: Ensure that dirty PDPTRs are loaded For VMX with EPT, dirty PDPTRs need to be loaded before the next vmentry via vmx_load_mmu_pgd() But not all paths that call load_pdptrs() will cause vmx_load_mmu_pgd() to be invoked. Normally, kvm_mmu_reset_context() is used to cause KVM_REQ_LOAD_MMU_PGD, but sometimes it is skipped: * commit d81135a57aa6("KVM: x86: do not reset mmu if CR0.CD and CR0.NW are changed") skips kvm_mmu_reset_context() after load_pdptrs() when changing CR0.CD and CR0.NW. * commit 21823fbda552("KVM: x86: Invalidate all PGDs for the current PCID on MOV CR3 w/ flush") skips KVM_REQ_LOAD_MMU_PGD after load_pdptrs() when rewriting the CR3 with the same value. * commit a91a7c709600("KVM: X86: Don't reset mmu context when toggling X86_CR4_PGE") skips kvm_mmu_reset_context() after load_pdptrs() when changing CR4.PGE. Fixes: d81135a57aa6 ("KVM: x86: do not reset mmu if CR0.CD and CR0.NW are changed") Fixes: 21823fbda552 ("KVM: x86: Invalidate all PGDs for the current PCID on MOV CR3 w/ flush") Fixes: a91a7c709600 ("KVM: X86: Don't reset mmu context when toggling X86_CR4_PGE") Signed-off-by: Lai Jiangshan Message-Id: <20211108124407.12187-2-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 50450ebe709f..ba2704fce181 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -830,6 +830,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); + kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu); vcpu->arch.pdptrs_from_userspace = false; return 1; From c0d6956e43054e397f4f661d0a62d490278ddef9 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Mon, 8 Nov 2021 20:43:54 +0800 Subject: [PATCH 098/268] KVM: VMX: Mark VCPU_EXREG_PDPTR available in ept_save_pdptrs() mmu->pdptrs[] and vmcs.GUEST_PDPTR[0-3] are synced, so mmu->pdptrs is available and GUEST_PDPTR[0-3] is not dirty. Signed-off-by: Lai Jiangshan Message-Id: <20211108124407.12187-3-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index b4defe000db8..cc87cd268eb4 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -2989,7 +2989,7 @@ void ept_save_pdptrs(struct kvm_vcpu *vcpu) mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2); mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3); - kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); + kvm_register_mark_available(vcpu, VCPU_EXREG_PDPTR); } #define CR3_EXITING_BITS (CPU_BASED_CR3_LOAD_EXITING | \ From 40e49c4f5fb0699b4b5b5b1ee0a1bc88b4fec00d Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Mon, 8 Nov 2021 20:43:55 +0800 Subject: [PATCH 099/268] KVM: SVM: Track dirtiness of PDPTRs even if NPT is disabled Use the same logic to handle the availability of VCPU_EXREG_PDPTR as VMX, also removing a branch in svm_vcpu_run(). Signed-off-by: Lai Jiangshan Message-Id: <20211108124407.12187-4-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/svm.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 64a10cdb2356..9b6386282198 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1598,10 +1598,16 @@ static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) { + kvm_register_mark_available(vcpu, reg); + switch (reg) { case VCPU_EXREG_PDPTR: - BUG_ON(!npt_enabled); - load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); + /* + * When !npt_enabled, mmu->pdptrs[] is already available since + * it is always updated per SDM when moving to CRs. + */ + if (npt_enabled) + load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); break; default: KVM_BUG_ON(1, vcpu->kvm); @@ -3974,8 +3980,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) vcpu->arch.apf.host_apf_flags = kvm_read_and_reset_apf_flags(); - if (npt_enabled) - kvm_register_clear_available(vcpu, VCPU_EXREG_PDPTR); + kvm_register_clear_available(vcpu, VCPU_EXREG_PDPTR); /* * We need to handle MC intercepts here before the vcpu has a chance to From 5ec60aad547f716530ad308266eeab378a4e287c Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Mon, 8 Nov 2021 20:43:56 +0800 Subject: [PATCH 100/268] KVM: VMX: Add and use X86_CR4_TLBFLUSH_BITS when !enable_ept In set_cr4_guest_host_mask(), X86_CR4_PGE is set to be intercepted when !enable_ept just because X86_CR4_PGE is the only bit that is responsible for flushing TLB but listed in KVM_POSSIBLE_CR4_GUEST_BITS. It is clearer and self-documented to use X86_CR4_TLBFLUSH_BITS instead. No functionality changed. Signed-off-by: Lai Jiangshan Message-Id: <20211108124407.12187-5-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/kvm_cache_regs.h | 2 ++ arch/x86/kvm/vmx/vmx.c | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index 90e1ffdc05b7..828f55ce816b 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -9,6 +9,8 @@ (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \ | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE) +#define X86_CR4_TLBFLUSH_BITS (X86_CR4_PGE | X86_CR4_PCIDE | X86_CR4_PAE | X86_CR4_SMEP) + #define BUILD_KVM_GPR_ACCESSORS(lname, uname) \ static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\ { \ diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index cc87cd268eb4..35d4df40af2a 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -4045,7 +4045,7 @@ void set_cr4_guest_host_mask(struct vcpu_vmx *vmx) vcpu->arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS & ~vcpu->arch.cr4_guest_rsvd_bits; if (!enable_ept) - vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_PGE; + vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_TLBFLUSH_BITS; if (is_guest_mode(&vmx->vcpu)) vcpu->arch.cr4_guest_owned_bits &= ~get_vmcs12(vcpu)->cr4_guest_host_mask; From a37ebdce168f57732ff2917a685980fc21133417 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Mon, 8 Nov 2021 20:43:57 +0800 Subject: [PATCH 101/268] KVM: VMX: Add and use X86_CR4_PDPTR_BITS when !enable_ept In set_cr4_guest_host_mask(), all cr4 pdptr bits are already set to be intercepted in an unclear way. Add X86_CR4_PDPTR_BITS to make it clear and self-documented. No functionality changed. Signed-off-by: Lai Jiangshan Message-Id: <20211108124407.12187-6-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/kvm_cache_regs.h | 1 + arch/x86/kvm/vmx/vmx.c | 4 +++- arch/x86/kvm/x86.c | 4 +--- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index 828f55ce816b..7c9f6455fc04 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -10,6 +10,7 @@ | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE) #define X86_CR4_TLBFLUSH_BITS (X86_CR4_PGE | X86_CR4_PCIDE | X86_CR4_PAE | X86_CR4_SMEP) +#define X86_CR4_PDPTR_BITS (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_SMEP) #define BUILD_KVM_GPR_ACCESSORS(lname, uname) \ static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\ diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 35d4df40af2a..77d79a3aa35a 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -4044,8 +4044,10 @@ void set_cr4_guest_host_mask(struct vcpu_vmx *vmx) vcpu->arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS & ~vcpu->arch.cr4_guest_rsvd_bits; - if (!enable_ept) + if (!enable_ept) { vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_TLBFLUSH_BITS; + vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_PDPTR_BITS; + } if (is_guest_mode(&vmx->vcpu)) vcpu->arch.cr4_guest_owned_bits &= ~get_vmcs12(vcpu)->cr4_guest_host_mask; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ba2704fce181..865938f1e94c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1051,8 +1051,6 @@ EXPORT_SYMBOL_GPL(kvm_post_set_cr4); int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { unsigned long old_cr4 = kvm_read_cr4(vcpu); - unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | - X86_CR4_SMEP; if (!kvm_is_valid_cr4(vcpu, cr4)) return 1; @@ -1063,7 +1061,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) if ((cr4 ^ old_cr4) & X86_CR4_LA57) return 1; } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) - && ((cr4 ^ old_cr4) & pdptr_bits) + && ((cr4 ^ old_cr4) & X86_CR4_PDPTR_BITS) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu))) return 1; From e63f315d74eeeb6ddf9096223d898730494da6f4 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Mon, 8 Nov 2021 20:43:58 +0800 Subject: [PATCH 102/268] KVM: X86: Move CR0 pdptr_bits into header file as X86_CR0_PDPTR_BITS Not functionality changed. Signed-off-by: Lai Jiangshan Message-Id: <20211108124407.12187-7-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/kvm_cache_regs.h | 3 +++ arch/x86/kvm/x86.c | 3 +-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index 7c9f6455fc04..6e6d0d01f18d 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -9,9 +9,12 @@ (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \ | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE) +#define X86_CR0_PDPTR_BITS (X86_CR0_CD | X86_CR0_NW | X86_CR0_PG) #define X86_CR4_TLBFLUSH_BITS (X86_CR4_PGE | X86_CR4_PCIDE | X86_CR4_PAE | X86_CR4_SMEP) #define X86_CR4_PDPTR_BITS (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_SMEP) +static_assert(!(KVM_POSSIBLE_CR0_GUEST_BITS & X86_CR0_PDPTR_BITS)); + #define BUILD_KVM_GPR_ACCESSORS(lname, uname) \ static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\ { \ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 865938f1e94c..9e0e74a2eaeb 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -857,7 +857,6 @@ EXPORT_SYMBOL_GPL(kvm_post_set_cr0); int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { unsigned long old_cr0 = kvm_read_cr0(vcpu); - unsigned long pdptr_bits = X86_CR0_CD | X86_CR0_NW | X86_CR0_PG; cr0 |= X86_CR0_ET; @@ -887,7 +886,7 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) } #endif if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) && - is_pae(vcpu) && ((cr0 ^ old_cr0) & pdptr_bits) && + is_pae(vcpu) && ((cr0 ^ old_cr0) & X86_CR0_PDPTR_BITS) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu))) return 1; From 8f29bf12a37807aa0a544485ab3a853481786203 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Mon, 8 Nov 2021 20:43:59 +0800 Subject: [PATCH 103/268] KVM: SVM: Remove outdated comment in svm_load_mmu_pgd() The comment had been added in the commit 689f3bf21628 ("KVM: x86: unify callbacks to load paging root") and its related code was removed later, and it has nothing to do with the next line of code. So the comment should be removed too. Signed-off-by: Lai Jiangshan Message-Id: <20211108124407.12187-8-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/svm.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 9b6386282198..b4874529f425 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4010,7 +4010,6 @@ static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, hv_track_root_tdp(vcpu, root_hpa); - /* Loading L2's CR3 is handled by enter_svm_guest_mode. */ if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) return; cr3 = vcpu->arch.cr3; From aec9c2402f74b898ad637a97360bfc001fb711e6 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Mon, 8 Nov 2021 20:44:00 +0800 Subject: [PATCH 104/268] KVM: SVM: Remove references to VCPU_EXREG_CR3 VCPU_EXREG_CR3 is never cleared from vcpu->arch.regs_avail or vcpu->arch.regs_dirty in SVM; therefore, marking CR3 as available is merely a NOP, and testing it will likewise always succeed. Signed-off-by: Lai Jiangshan Message-Id: <20211108124407.12187-9-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/nested.c | 1 - arch/x86/kvm/svm/svm.c | 2 -- 2 files changed, 3 deletions(-) diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 598843cfe6c4..2d0a7dd7e2a6 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -468,7 +468,6 @@ static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, kvm_mmu_new_pgd(vcpu, cr3); vcpu->arch.cr3 = cr3; - kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */ kvm_init_mmu(vcpu); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index b4874529f425..7f493ffc1f8d 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4010,8 +4010,6 @@ static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, hv_track_root_tdp(vcpu, root_hpa); - if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) - return; cr3 = vcpu->arch.cr3; } else if (vcpu->arch.mmu->shadow_root_level >= PT64_ROOT_4LEVEL) { cr3 = __sme_set(root_hpa) | kvm_get_active_pcid(vcpu); From 3883bc9d28ed348d419d2e405d11f0924783f721 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Mon, 8 Nov 2021 20:44:02 +0800 Subject: [PATCH 105/268] KVM: X86: Mark CR3 dirty when vcpu->arch.cr3 is changed When vcpu->arch.cr3 is changed, it should be marked dirty unless it is being updated to the value of the architecture guest CR3 (i.e. VMX.GUEST_CR3 or vmcb->save.cr3 when tdp is enabled). This patch has no functionality changed because kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3) is superset of kvm_register_mark_available(vcpu, VCPU_EXREG_CR3) with additional change to vcpu->arch.regs_dirty, but no code uses regs_dirty for VCPU_EXREG_CR3. (vmx_load_mmu_pgd() uses vcpu->arch.regs_avail instead to test if VCPU_EXREG_CR3 dirty which means current code (ab)uses regs_avail for VCPU_EXREG_CR3 dirty information.) Signed-off-by: Lai Jiangshan Message-Id: <20211108124407.12187-11-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/nested.c | 2 +- arch/x86/kvm/x86.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index e885f557fcbe..b240776151c6 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -1104,7 +1104,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, kvm_mmu_new_pgd(vcpu, cr3); vcpu->arch.cr3 = cr3; - kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); + kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */ kvm_init_mmu(vcpu); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9e0e74a2eaeb..be3b4a073e72 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1158,7 +1158,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) kvm_mmu_new_pgd(vcpu, cr3); vcpu->arch.cr3 = cr3; - kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); + kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); handle_tlb_flush: /* @@ -10567,7 +10567,7 @@ static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs, vcpu->arch.cr2 = sregs->cr2; *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; vcpu->arch.cr3 = sregs->cr3; - kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); + kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); kvm_set_cr8(vcpu, sregs->cr8); From c62c7bd4f95b8f2a28098c4139a369670998aef2 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Mon, 8 Nov 2021 20:44:03 +0800 Subject: [PATCH 106/268] KVM: VMX: Update vmcs.GUEST_CR3 only when the guest CR3 is dirty When vcpu->arch.cr3 is changed, it is marked dirty, so vmcs.GUEST_CR3 can be updated only when kvm_register_is_dirty(vcpu, VCPU_EXREG_CR3). Signed-off-by: Lai Jiangshan Message-Id: <20211108124407.12187-12-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 77d79a3aa35a..ffe45435b77e 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -3114,9 +3114,9 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, if (!enable_unrestricted_guest && !is_paging(vcpu)) guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr; - else if (test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) + else if (kvm_register_is_dirty(vcpu, VCPU_EXREG_CR3)) guest_cr3 = vcpu->arch.cr3; - else /* vmcs01.GUEST_CR3 is already up-to-date. */ + else /* vmcs.GUEST_CR3 is already up-to-date. */ update_guest_cr3 = false; vmx_ept_load_pdptrs(vcpu); } else { From 41e68b6964ebf20082af55ad1394523cf86c4c6a Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 26 Nov 2021 07:00:15 -0500 Subject: [PATCH 107/268] KVM: vmx, svm: clean up mass updates to regs_avail/regs_dirty bits Document the meaning of the three combinations of regs_avail and regs_dirty. Update regs_dirty just after writeback instead of doing it later after vmexit. After vmexit, instead, we clear the regs_avail bits corresponding to lazily-loaded registers. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/kvm_cache_regs.h | 7 +++++++ arch/x86/kvm/svm/svm.c | 3 ++- arch/x86/kvm/svm/svm.h | 10 ++++++++++ arch/x86/kvm/vmx/nested.c | 8 +++++++- arch/x86/kvm/vmx/vmx.c | 3 ++- arch/x86/kvm/vmx/vmx.h | 28 +++++++++++++++------------- 6 files changed, 43 insertions(+), 16 deletions(-) diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index 6e6d0d01f18d..ac3d3bd662f4 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -43,6 +43,13 @@ BUILD_KVM_GPR_ACCESSORS(r14, R14) BUILD_KVM_GPR_ACCESSORS(r15, R15) #endif +/* + * avail dirty + * 0 0 register in VMCS/VMCB + * 0 1 *INVALID* + * 1 0 register in vcpu->arch + * 1 1 register in vcpu->arch, needs to be stored back + */ static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu, enum kvm_reg reg) { diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 7f493ffc1f8d..de872098071d 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3946,6 +3946,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; } + vcpu->arch.regs_dirty = 0; if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) kvm_before_interrupt(vcpu); @@ -3980,7 +3981,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) vcpu->arch.apf.host_apf_flags = kvm_read_and_reset_apf_flags(); - kvm_register_clear_available(vcpu, VCPU_EXREG_PDPTR); + vcpu->arch.regs_avail &= ~SVM_REGS_LAZY_LOAD_SET; /* * We need to handle MC intercepts here before the vcpu has a chance to diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index a57390473013..9f153c59f2c8 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -326,6 +326,16 @@ static __always_inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) return container_of(vcpu, struct vcpu_svm, vcpu); } +/* + * Only the PDPTRs are loaded on demand into the shadow MMU. All other + * fields are synchronized in handle_exit, because accessing the VMCB is cheap. + * + * CR3 might be out of date in the VMCB but it is not marked dirty; instead, + * KVM_REQ_LOAD_MMU_PGD is always requested when the cached vcpu->arch.cr3 + * is changed. svm_load_mmu_pgd() then syncs the new CR3 value into the VMCB. + */ +#define SVM_REGS_LAZY_LOAD_SET (1 << VCPU_EXREG_PDPTR) + static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit) { WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index b240776151c6..dc5041ad860f 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -269,7 +269,13 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs) vmx_sync_vmcs_host_state(vmx, prev); put_cpu(); - vmx_register_cache_reset(vcpu); + vcpu->arch.regs_avail = ~VMX_REGS_LAZY_LOAD_SET; + + /* + * All lazily updated registers will be reloaded from VMCS12 on both + * vmentry and vmexit. + */ + vcpu->arch.regs_dirty = 0; } /* diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index ffe45435b77e..c65ff62e11f5 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6649,6 +6649,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); if (kvm_register_is_dirty(vcpu, VCPU_REGS_RIP)) vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); + vcpu->arch.regs_dirty = 0; cr3 = __get_current_cr3_fast(); if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { @@ -6743,7 +6744,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) loadsegment(es, __USER_DS); #endif - vmx_register_cache_reset(vcpu); + vcpu->arch.regs_avail &= ~VMX_REGS_LAZY_LOAD_SET; pt_guest_exit(vmx); diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 4df2ac24ffc1..f978699480e3 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -473,19 +473,21 @@ BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL) BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL) BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL) -static inline void vmx_register_cache_reset(struct kvm_vcpu *vcpu) -{ - vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) - | (1 << VCPU_EXREG_RFLAGS) - | (1 << VCPU_EXREG_PDPTR) - | (1 << VCPU_EXREG_SEGMENTS) - | (1 << VCPU_EXREG_CR0) - | (1 << VCPU_EXREG_CR3) - | (1 << VCPU_EXREG_CR4) - | (1 << VCPU_EXREG_EXIT_INFO_1) - | (1 << VCPU_EXREG_EXIT_INFO_2)); - vcpu->arch.regs_dirty = 0; -} +/* + * VMX_REGS_LAZY_LOAD_SET - The set of registers that will be updated in the + * cache on demand. Other registers not listed here are synced to + * the cache immediately after VM-Exit. + */ +#define VMX_REGS_LAZY_LOAD_SET ((1 << VCPU_REGS_RIP) | \ + (1 << VCPU_REGS_RSP) | \ + (1 << VCPU_EXREG_RFLAGS) | \ + (1 << VCPU_EXREG_PDPTR) | \ + (1 << VCPU_EXREG_SEGMENTS) | \ + (1 << VCPU_EXREG_CR0) | \ + (1 << VCPU_EXREG_CR3) | \ + (1 << VCPU_EXREG_CR4) | \ + (1 << VCPU_EXREG_EXIT_INFO_1) | \ + (1 << VCPU_EXREG_EXIT_INFO_2)) static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm) { From 2e9ebd55096f70b76c2a5edf93903c8c2f778a9f Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Mon, 8 Nov 2021 20:44:06 +0800 Subject: [PATCH 108/268] KVM: X86: Remove kvm_register_clear_available() It has no user. Signed-off-by: Lai Jiangshan Message-Id: <20211108124407.12187-15-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/kvm_cache_regs.h | 7 ------- 1 file changed, 7 deletions(-) diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index ac3d3bd662f4..3febc342360c 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -68,13 +68,6 @@ static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu, __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); } -static inline void kvm_register_clear_available(struct kvm_vcpu *vcpu, - enum kvm_reg reg) -{ - __clear_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); - __clear_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); -} - static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu, enum kvm_reg reg) { From 24cd19a28cb7174df502162641d6e1e12e7ffbd9 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 11 Nov 2021 22:45:26 +0800 Subject: [PATCH 109/268] KVM: X86: Update mmu->pdptrs only when it is changed It is unchanged in most cases. Signed-off-by: Lai Jiangshan Message-Id: <20211111144527.88852-1-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index be3b4a073e72..d4f1dff258cc 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -828,9 +828,12 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) } } - memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); - kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); - kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu); + kvm_register_mark_available(vcpu, VCPU_EXREG_PDPTR); + if (memcmp(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs))) { + memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); + kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); + kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu); + } vcpu->arch.pdptrs_from_userspace = false; return 1; From 6ab8a4053f7114d130fe1f3485d71efec20f5806 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 18 Nov 2021 19:08:01 +0800 Subject: [PATCH 110/268] KVM: VMX: Avoid to rdmsrl(MSR_IA32_SYSENTER_ESP) The value of host MSR_IA32_SYSENTER_ESP is known to be constant for each CPU: (cpu_entry_stack(cpu) + 1) when 32 bit syscall is enabled or NULL is 32 bit syscall is not enabled. So rdmsrl() can be avoided for the first case and both rdmsrl() and vmcs_writel() can be avoided for the second case. Signed-off-by: Lai Jiangshan Message-Id: <20211118110814.2568-3-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index c65ff62e11f5..56c46a50d85a 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1271,7 +1271,6 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, if (!already_loaded) { void *gdt = get_current_gdt_ro(); - unsigned long sysenter_esp; /* * Flush all EPTP/VPID contexts, the new pCPU may have stale @@ -1287,8 +1286,11 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss); vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt); /* 22.2.4 */ - rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); - vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ + if (IS_ENABLED(CONFIG_IA32_EMULATION) || IS_ENABLED(CONFIG_X86_32)) { + /* 22.2.3 */ + vmcs_writel(HOST_IA32_SYSENTER_ESP, + (unsigned long)(cpu_entry_stack(cpu) + 1)); + } vmx->loaded_vmcs->cpu = cpu; } @@ -4026,6 +4028,12 @@ void vmx_set_constant_host_state(struct vcpu_vmx *vmx) rdmsr(MSR_IA32_SYSENTER_CS, low32, high32); vmcs_write32(HOST_IA32_SYSENTER_CS, low32); + + /* + * If 32-bit syscall is enabled, vmx_vcpu_load_vcms rewrites + * HOST_IA32_SYSENTER_ESP. + */ + vmcs_writel(HOST_IA32_SYSENTER_ESP, 0); rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl); vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl); /* 22.2.3 */ From 3ab4ac877cfabd209d12cbd6af0aa02077bb778d Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 18 Nov 2021 19:08:02 +0800 Subject: [PATCH 111/268] KVM: VMX: Update msr value after kvm_set_user_return_msr() succeeds Aoid earlier modification. Signed-off-by: Lai Jiangshan Message-Id: <20211118110814.2568-4-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 56c46a50d85a..73cb1322d7f1 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -602,15 +602,13 @@ static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx, unsigned int slot = msr - vmx->guest_uret_msrs; int ret = 0; - u64 old_msr_data = msr->data; - msr->data = data; if (msr->load_into_hardware) { preempt_disable(); - ret = kvm_set_user_return_msr(slot, msr->data, msr->mask); + ret = kvm_set_user_return_msr(slot, data, msr->mask); preempt_enable(); - if (ret) - msr->data = old_msr_data; } + if (!ret) + msr->data = data; return ret; } From 15ad9762d69fd8e40a4a51828c1d6b0c1b8fbea0 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 18 Nov 2021 19:08:03 +0800 Subject: [PATCH 112/268] KVM: VMX: Save HOST_CR3 in vmx_prepare_switch_to_guest() The host CR3 in the vcpu thread can only be changed when scheduling. Moving the code in vmx_prepare_switch_to_guest() makes the code simpler. Signed-off-by: Lai Jiangshan Message-Id: <20211118110814.2568-5-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/nested.c | 8 +------- arch/x86/kvm/vmx/vmx.c | 17 ++++++++++------- 2 files changed, 11 insertions(+), 14 deletions(-) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index dc5041ad860f..b03df82fc57e 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -3033,7 +3033,7 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu, static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); - unsigned long cr3, cr4; + unsigned long cr4; bool vm_fail; if (!nested_early_check) @@ -3056,12 +3056,6 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu) */ vmcs_writel(GUEST_RFLAGS, 0); - cr3 = __get_current_cr3_fast(); - if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { - vmcs_writel(HOST_CR3, cr3); - vmx->loaded_vmcs->host_state.cr3 = cr3; - } - cr4 = cr4_read_shadow(); if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { vmcs_writel(HOST_CR4, cr4); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 73cb1322d7f1..1358770ca715 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1103,6 +1103,7 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) #ifdef CONFIG_X86_64 int cpu = raw_smp_processor_id(); #endif + unsigned long cr3; unsigned long fs_base, gs_base; u16 fs_sel, gs_sel; int i; @@ -1167,6 +1168,14 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) #endif vmx_set_host_fs_gs(host_state, fs_sel, gs_sel, fs_base, gs_base); + + /* Host CR3 including its PCID is stable when guest state is loaded. */ + cr3 = __get_current_cr3_fast(); + if (unlikely(cr3 != host_state->cr3)) { + vmcs_writel(HOST_CR3, cr3); + host_state->cr3 = cr3; + } + vmx->guest_state_loaded = true; } @@ -6612,7 +6621,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); - unsigned long cr3, cr4; + unsigned long cr4; /* Record the guest's net vcpu time for enforced NMI injections. */ if (unlikely(!enable_vnmi && @@ -6657,12 +6666,6 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); vcpu->arch.regs_dirty = 0; - cr3 = __get_current_cr3_fast(); - if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { - vmcs_writel(HOST_CR3, cr3); - vmx->loaded_vmcs->host_state.cr3 = cr3; - } - cr4 = cr4_read_shadow(); if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { vmcs_writel(HOST_CR4, cr4); From ed07ef5a66e486215bf3f51037c44d10fc9a5a1c Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 18 Nov 2021 19:08:05 +0800 Subject: [PATCH 113/268] KVM: VMX: Use kvm_set_msr_common() for MSR_IA32_TSC_ADJUST in the default way MSR_IA32_TSC_ADJUST can be left to the default way which also uese kvm_set_msr_common(). Signed-off-by: Lai Jiangshan Message-Id: <20211118110814.2568-7-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 1358770ca715..00f8203da726 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -2104,9 +2104,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) } ret = kvm_set_msr_common(vcpu, msr_info); break; - case MSR_IA32_TSC_ADJUST: - ret = kvm_set_msr_common(vcpu, msr_info); - break; case MSR_IA32_MCG_EXT_CTL: if ((!msr_info->host_initiated && !(to_vmx(vcpu)->msr_ia32_feature_control & From fe26f91d30fb129a56f68a880a03ad49d127c07a Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 18 Nov 2021 19:08:06 +0800 Subject: [PATCH 114/268] KVM: VMX: Change comments about vmx_get_msr() The variable name is changed in the code. Signed-off-by: Lai Jiangshan Message-Id: <20211118110814.2568-8-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 00f8203da726..041ab537e160 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1757,7 +1757,7 @@ static int vmx_get_msr_feature(struct kvm_msr_entry *msr) } /* - * Reads an msr value (of 'msr_index') into 'pdata'. + * Reads an msr value (of 'msr_info->index') into 'msr_info->data'. * Returns 0 on success, non-0 otherwise. * Assumes vcpu_load() was already called. */ From 1af4a1199a41f80b4a792ae76d4c79a01d0b5d41 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 18 Nov 2021 19:08:07 +0800 Subject: [PATCH 115/268] KVM: SVM: Rename get_max_npt_level() to get_npt_level() It returns the only proper NPT level, so the "max" in the name is not appropriate. Signed-off-by: Lai Jiangshan Message-Id: <20211118110814.2568-9-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/svm.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index de872098071d..7252dc389812 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -269,7 +269,7 @@ u32 svm_msrpm_offset(u32 msr) #define MAX_INST_SIZE 15 -static int get_max_npt_level(void) +static int get_npt_level(void) { #ifdef CONFIG_X86_64 return pgtable_l5_enabled() ? PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL; @@ -1037,9 +1037,9 @@ static __init int svm_hardware_setup(void) if (!boot_cpu_has(X86_FEATURE_NPT)) npt_enabled = false; - /* Force VM NPT level equal to the host's max NPT level */ - kvm_configure_mmu(npt_enabled, get_max_npt_level(), - get_max_npt_level(), PG_LEVEL_1G); + /* Force VM NPT level equal to the host's paging level */ + kvm_configure_mmu(npt_enabled, get_npt_level(), + get_npt_level(), PG_LEVEL_1G); pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis"); /* Note, SEV setup consumes npt_enabled. */ From 58356767107a6b02c9277810809aca8a36c473d7 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 18 Nov 2021 19:08:08 +0800 Subject: [PATCH 116/268] KVM: SVM: Allocate sd->save_area with __GFP_ZERO And remove clear_page() on it. Signed-off-by: Lai Jiangshan Message-Id: <20211118110814.2568-10-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/svm.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 7252dc389812..581b9dfe63f2 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -589,12 +589,10 @@ static int svm_cpu_init(int cpu) if (!sd) return ret; sd->cpu = cpu; - sd->save_area = alloc_page(GFP_KERNEL); + sd->save_area = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!sd->save_area) goto free_cpu_data; - clear_page(page_address(sd->save_area)); - ret = sev_cpu_init(sd); if (ret) goto free_save_area; From 27f4fca29f9cfd740dcb7b2bc577bcfd02ae367d Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 18 Nov 2021 19:08:09 +0800 Subject: [PATCH 117/268] KVM: X86: Skip allocating pae_root for vcpu->arch.guest_mmu when !tdp_enabled It is never used. Signed-off-by: Lai Jiangshan Message-Id: <20211118110814.2568-11-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index ede63912b1ac..644a5cb4cad9 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -5514,6 +5514,10 @@ static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; + /* vcpu->arch.guest_mmu isn't used when !tdp_enabled. */ + if (!tdp_enabled && mmu == &vcpu->arch.guest_mmu) + return 0; + /* * When using PAE paging, the four PDPTEs are treated as 'root' pages, * while the PDP table is a per-vCPU construct that's allocated at MMU From 84432316cd9aec6923bb3368e86d8f6166b60067 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 18 Nov 2021 19:08:10 +0800 Subject: [PATCH 118/268] KVM: X86: Fix comment in __kvm_mmu_create() The allocation of special roots is moved to mmu_alloc_special_roots(). Signed-off-by: Lai Jiangshan Message-Id: <20211118110814.2568-12-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 644a5cb4cad9..740e9f52d324 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -5527,7 +5527,7 @@ static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) * generally doesn't use PAE paging and can skip allocating the PDP * table. The main exception, handled here, is SVM's 32-bit NPT. The * other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit - * KVM; that horror is handled on-demand by mmu_alloc_shadow_roots(). + * KVM; that horror is handled on-demand by mmu_alloc_special_roots(). */ if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL) return 0; From 42f34c20a11333d031560bbb17d5e13e3bfcd44b Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 18 Nov 2021 19:08:11 +0800 Subject: [PATCH 119/268] KVM: X86: Remove unused declaration of __kvm_mmu_free_some_pages() The body of __kvm_mmu_free_some_pages() has been removed. Signed-off-by: Lai Jiangshan Message-Id: <20211118110814.2568-13-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index c07b30877de0..e28fce50bd02 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1762,7 +1762,6 @@ void kvm_inject_nmi(struct kvm_vcpu *vcpu); void kvm_update_dr7(struct kvm_vcpu *vcpu); int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn); -void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, ulong roots_to_free); void kvm_mmu_free_guest_mode_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu); From 41e35604eaff2266ba8523787ebe99c5ca4c4045 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 18 Nov 2021 19:08:12 +0800 Subject: [PATCH 120/268] KVM: X86: Remove useless code to set role.gpte_is_8_bytes when role.direct role.gpte_is_8_bytes is unused when role.direct; there is no point in changing a bit in the role, the value that was set when the MMU is initialized is just fine. Signed-off-by: Lai Jiangshan Message-Id: <20211118110814.2568-14-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 740e9f52d324..c1b6ade6b4a7 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2086,8 +2086,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, role = vcpu->arch.mmu->mmu_role.base; role.level = level; role.direct = direct; - if (role.direct) - role.gpte_is_8_bytes = true; role.access = access; if (!direct_mmu && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) { quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); From b46a13cb7ea1137b2e01dfaafcacd5cd79db8390 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 18 Nov 2021 19:08:13 +0800 Subject: [PATCH 121/268] KVM: X86: Calculate quadrant when !role.gpte_is_8_bytes role.quadrant is only valid when gpte size is 4 bytes and only be calculated when gpte size is 4 bytes. Although "vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL" also means gpte size is 4 bytes, but using "!role.gpte_is_8_bytes" is clearer Signed-off-by: Lai Jiangshan Message-Id: <20211118110814.2568-15-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index c1b6ade6b4a7..c22ed56ece25 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2087,7 +2087,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, role.level = level; role.direct = direct; role.access = access; - if (!direct_mmu && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) { + if (!direct_mmu && !role.gpte_is_8_bytes) { quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; role.quadrant = quadrant; From 1f5a21ee8400ccc82c67dc8c153301f694a04099 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Wed, 24 Nov 2021 20:20:44 +0800 Subject: [PATCH 122/268] KVM: X86: Add parameter struct kvm_mmu *mmu into mmu->gva_to_gpa() The mmu->gva_to_gpa() has no "struct kvm_mmu *mmu", so an extra FNAME(gva_to_gpa_nested) is needed. Add the parameter can simplify the code. And it makes it explicit that the walk is upon vcpu->arch.walk_mmu for gva and vcpu->arch.mmu for L2 gpa in translate_nested_gpa() via the new parameter. Signed-off-by: Lai Jiangshan Message-Id: <20211124122055.64424-3-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 5 ++-- arch/x86/kvm/mmu/mmu.c | 24 ++++++----------- arch/x86/kvm/mmu/paging_tmpl.h | 47 ++++++--------------------------- arch/x86/kvm/x86.c | 39 +++++++++++++++++---------- 4 files changed, 44 insertions(+), 71 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index e28fce50bd02..7d0b79ad270d 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -426,8 +426,9 @@ struct kvm_mmu { int (*page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); void (*inject_page_fault)(struct kvm_vcpu *vcpu, struct x86_exception *fault); - gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t gva_or_gpa, - u32 access, struct x86_exception *exception); + gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, + gpa_t gva_or_gpa, u32 access, + struct x86_exception *exception); gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, struct x86_exception *exception); int (*sync_page)(struct kvm_vcpu *vcpu, diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index c22ed56ece25..62e74296857f 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3732,21 +3732,13 @@ void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu) kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, roots_to_free); } -static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr, - u32 access, struct x86_exception *exception) +static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, + gpa_t vaddr, u32 access, + struct x86_exception *exception) { if (exception) exception->error_code = 0; - return vaddr; -} - -static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr, - u32 access, - struct x86_exception *exception) -{ - if (exception) - exception->error_code = 0; - return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception); + return mmu->translate_gpa(vcpu, vaddr, access, exception); } static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct) @@ -5001,13 +4993,13 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu) * the gva_to_gpa functions between mmu and nested_mmu are swapped. */ if (!is_paging(vcpu)) - g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested; + g_context->gva_to_gpa = nonpaging_gva_to_gpa; else if (is_long_mode(vcpu)) - g_context->gva_to_gpa = paging64_gva_to_gpa_nested; + g_context->gva_to_gpa = paging64_gva_to_gpa; else if (is_pae(vcpu)) - g_context->gva_to_gpa = paging64_gva_to_gpa_nested; + g_context->gva_to_gpa = paging64_gva_to_gpa; else - g_context->gva_to_gpa = paging32_gva_to_gpa_nested; + g_context->gva_to_gpa = paging32_gva_to_gpa; reset_guest_paging_metadata(vcpu, g_context); } diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 708a5d297fe1..d16fb5eda02c 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -547,16 +547,6 @@ static int FNAME(walk_addr)(struct guest_walker *walker, access); } -#if PTTYPE != PTTYPE_EPT -static int FNAME(walk_addr_nested)(struct guest_walker *walker, - struct kvm_vcpu *vcpu, gva_t addr, - u32 access) -{ - return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu, - addr, access); -} -#endif - static bool FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte, pt_element_t gpte, bool no_dirty_log) @@ -1000,14 +990,20 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) } /* Note, @addr is a GPA when gva_to_gpa() translates an L2 GPA to an L1 GPA. */ -static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t addr, u32 access, +static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, + gpa_t addr, u32 access, struct x86_exception *exception) { struct guest_walker walker; gpa_t gpa = UNMAPPED_GVA; int r; - r = FNAME(walk_addr)(&walker, vcpu, addr, access); +#ifndef CONFIG_X86_64 + /* A 64-bit GVA should be impossible on 32-bit KVM. */ + WARN_ON_ONCE((addr >> 32) && mmu == vcpu->arch.walk_mmu); +#endif + + r = FNAME(walk_addr_generic)(&walker, vcpu, mmu, addr, access); if (r) { gpa = gfn_to_gpa(walker.gfn); @@ -1018,33 +1014,6 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t addr, u32 access, return gpa; } -#if PTTYPE != PTTYPE_EPT -/* Note, gva_to_gpa_nested() is only used to translate L2 GVAs. */ -static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gpa_t vaddr, - u32 access, - struct x86_exception *exception) -{ - struct guest_walker walker; - gpa_t gpa = UNMAPPED_GVA; - int r; - -#ifndef CONFIG_X86_64 - /* A 64-bit GVA should be impossible on 32-bit KVM. */ - WARN_ON_ONCE(vaddr >> 32); -#endif - - r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access); - - if (r) { - gpa = gfn_to_gpa(walker.gfn); - gpa |= vaddr & ~PAGE_MASK; - } else if (exception) - *exception = walker.fault; - - return gpa; -} -#endif - /* * Using the cached information from sp->gfns is safe because: * - The spte has a reference to the struct page, so the pfn for a given gfn diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index d4f1dff258cc..7a7bad7ec3a7 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6506,13 +6506,14 @@ void kvm_get_segment(struct kvm_vcpu *vcpu, gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, struct x86_exception *exception) { + struct kvm_mmu *mmu = vcpu->arch.mmu; gpa_t t_gpa; BUG_ON(!mmu_is_nested(vcpu)); /* NPT walks are always user-walks */ access |= PFERR_USER_MASK; - t_gpa = vcpu->arch.mmu->gva_to_gpa(vcpu, gpa, access, exception); + t_gpa = mmu->gva_to_gpa(vcpu, mmu, gpa, access, exception); return t_gpa; } @@ -6520,25 +6521,31 @@ gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { + struct kvm_mmu *mmu = vcpu->arch.walk_mmu; + u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; - return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); + return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); } EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read); gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { + struct kvm_mmu *mmu = vcpu->arch.walk_mmu; + u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; access |= PFERR_FETCH_MASK; - return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); + return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); } gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { + struct kvm_mmu *mmu = vcpu->arch.walk_mmu; + u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; access |= PFERR_WRITE_MASK; - return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); + return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); } EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_write); @@ -6546,19 +6553,21 @@ EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_write); gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { - return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception); + struct kvm_mmu *mmu = vcpu->arch.walk_mmu; + + return mmu->gva_to_gpa(vcpu, mmu, gva, 0, exception); } static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, u32 access, struct x86_exception *exception) { + struct kvm_mmu *mmu = vcpu->arch.walk_mmu; void *data = val; int r = X86EMUL_CONTINUE; while (bytes) { - gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access, - exception); + gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception); unsigned offset = addr & (PAGE_SIZE-1); unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); int ret; @@ -6586,13 +6595,14 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); + struct kvm_mmu *mmu = vcpu->arch.walk_mmu; u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; unsigned offset; int ret; /* Inline kvm_read_guest_virt_helper for speed. */ - gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK, - exception); + gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access|PFERR_FETCH_MASK, + exception); if (unlikely(gpa == UNMAPPED_GVA)) return X86EMUL_PROPAGATE_FAULT; @@ -6651,13 +6661,12 @@ static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes struct kvm_vcpu *vcpu, u32 access, struct x86_exception *exception) { + struct kvm_mmu *mmu = vcpu->arch.walk_mmu; void *data = val; int r = X86EMUL_CONTINUE; while (bytes) { - gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, - access, - exception); + gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception); unsigned offset = addr & (PAGE_SIZE-1); unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); int ret; @@ -6744,6 +6753,7 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, gpa_t *gpa, struct x86_exception *exception, bool write) { + struct kvm_mmu *mmu = vcpu->arch.walk_mmu; u32 access = ((static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0) | (write ? PFERR_WRITE_MASK : 0); @@ -6761,7 +6771,7 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, return 1; } - *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); + *gpa = mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); if (*gpa == UNMAPPED_GVA) return -1; @@ -12312,12 +12322,13 @@ EXPORT_SYMBOL_GPL(kvm_spec_ctrl_test_value); void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code) { + struct kvm_mmu *mmu = vcpu->arch.walk_mmu; struct x86_exception fault; u32 access = error_code & (PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK); if (!(error_code & PFERR_PRESENT_MASK) || - vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, &fault) != UNMAPPED_GVA) { + mmu->gva_to_gpa(vcpu, mmu, gva, access, &fault) != UNMAPPED_GVA) { /* * If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page * tables probably do not match the TLB. Just proceed From c59a0f57fa32cfa77643daa17a8e55377cc9fe0b Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Wed, 24 Nov 2021 20:20:45 +0800 Subject: [PATCH 123/268] KVM: X86: Remove mmu->translate_gpa Reduce an indirect function call (retpoline) and some intialization code. Signed-off-by: Lai Jiangshan Message-Id: <20211124122055.64424-4-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 4 ---- arch/x86/kvm/mmu.h | 13 +++++++++++++ arch/x86/kvm/mmu/mmu.c | 11 +---------- arch/x86/kvm/mmu/paging_tmpl.h | 7 +++---- arch/x86/kvm/x86.c | 4 ++-- 5 files changed, 19 insertions(+), 20 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 7d0b79ad270d..f1414df18e11 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -429,8 +429,6 @@ struct kvm_mmu { gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, gpa_t gva_or_gpa, u32 access, struct x86_exception *exception); - gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, - struct x86_exception *exception); int (*sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp); void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa); @@ -1766,8 +1764,6 @@ int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn); void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, ulong roots_to_free); void kvm_mmu_free_guest_mode_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu); -gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, - struct x86_exception *exception); gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception); gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 9ae6168d381e..97e13c2988b3 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -351,4 +351,17 @@ static inline void kvm_update_page_stats(struct kvm *kvm, int level, int count) { atomic64_add(count, &kvm->stat.pages[level - 1]); } + +gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, + struct x86_exception *exception); + +static inline gpa_t kvm_translate_gpa(struct kvm_vcpu *vcpu, + struct kvm_mmu *mmu, + gpa_t gpa, u32 access, + struct x86_exception *exception) +{ + if (mmu != &vcpu->arch.nested_mmu) + return gpa; + return translate_nested_gpa(vcpu, gpa, access, exception); +} #endif diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 62e74296857f..327da013ab64 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -335,12 +335,6 @@ static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte) return likely(kvm_gen == spte_gen); } -static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, - struct x86_exception *exception) -{ - return gpa; -} - static int is_cpuid_PSE36(void) { return 1; @@ -3738,7 +3732,7 @@ static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, { if (exception) exception->error_code = 0; - return mmu->translate_gpa(vcpu, vaddr, access, exception); + return kvm_translate_gpa(vcpu, mmu, vaddr, access, exception); } static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct) @@ -5500,7 +5494,6 @@ static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) mmu->root_hpa = INVALID_PAGE; mmu->root_pgd = 0; - mmu->translate_gpa = translate_gpa; for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; @@ -5562,8 +5555,6 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu) vcpu->arch.mmu = &vcpu->arch.root_mmu; vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; - vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa; - ret = __kvm_mmu_create(vcpu, &vcpu->arch.guest_mmu); if (ret) return ret; diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index d16fb5eda02c..5b5bdac97c7b 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -403,9 +403,8 @@ retry_walk: walker->table_gfn[walker->level - 1] = table_gfn; walker->pte_gpa[walker->level - 1] = pte_gpa; - real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn), - nested_access, - &walker->fault); + real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(table_gfn), + nested_access, &walker->fault); /* * FIXME: This can happen if emulation (for of an INS/OUTS @@ -467,7 +466,7 @@ retry_walk: if (PTTYPE == 32 && walker->level > PG_LEVEL_4K && is_cpuid_PSE36()) gfn += pse36_gfn_delta(pte); - real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access, &walker->fault); + real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(gfn), access, &walker->fault); if (real_gpa == UNMAPPED_GVA) return 0; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 7a7bad7ec3a7..432c6e3cb746 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -810,8 +810,8 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) * If the MMU is nested, CR3 holds an L2 GPA and needs to be translated * to an L1 GPA. */ - real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(pdpt_gfn), - PFERR_USER_MASK | PFERR_WRITE_MASK, NULL); + real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(pdpt_gfn), + PFERR_USER_MASK | PFERR_WRITE_MASK, NULL); if (real_gpa == UNMAPPED_GVA) return 0; From 84ea5c09a66d19eff2eaebffafa667e6bf9a7905 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Wed, 24 Nov 2021 20:20:48 +0800 Subject: [PATCH 124/268] KVM: X86: Add huge_page_level to __reset_rsvds_bits_mask_ept() Bit 7 on pte depends on the level of supported large page. Signed-off-by: Lai Jiangshan Message-Id: <20211124122055.64424-7-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 327da013ab64..ad7e3c5903e7 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4358,22 +4358,28 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, static void __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check, - u64 pa_bits_rsvd, bool execonly) + u64 pa_bits_rsvd, bool execonly, int huge_page_level) { u64 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51); + u64 large_1g_rsvd = 0, large_2m_rsvd = 0; u64 bad_mt_xwr; + if (huge_page_level < PG_LEVEL_1G) + large_1g_rsvd = rsvd_bits(7, 7); + if (huge_page_level < PG_LEVEL_2M) + large_2m_rsvd = rsvd_bits(7, 7); + rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd | rsvd_bits(3, 7); rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd | rsvd_bits(3, 7); - rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | rsvd_bits(3, 6); - rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd | rsvd_bits(3, 6); + rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | rsvd_bits(3, 6) | large_1g_rsvd; + rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd | rsvd_bits(3, 6) | large_2m_rsvd; rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd; /* large page */ rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4]; rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3]; - rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | rsvd_bits(12, 29); - rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | rsvd_bits(12, 20); + rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | rsvd_bits(12, 29) | large_1g_rsvd; + rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | rsvd_bits(12, 20) | large_2m_rsvd; rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0]; bad_mt_xwr = 0xFFull << (2 * 8); /* bits 3..5 must not be 2 */ @@ -4389,10 +4395,11 @@ __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check, } static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, - struct kvm_mmu *context, bool execonly) + struct kvm_mmu *context, bool execonly, int huge_page_level) { __reset_rsvds_bits_mask_ept(&context->guest_rsvd_check, - vcpu->arch.reserved_gpa_bits, execonly); + vcpu->arch.reserved_gpa_bits, execonly, + huge_page_level); } static inline u64 reserved_hpa_bits(void) @@ -4468,7 +4475,8 @@ reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, false, true); else __reset_rsvds_bits_mask_ept(shadow_zero_check, - reserved_hpa_bits(), false); + reserved_hpa_bits(), false, + max_huge_page_level); if (!shadow_me_mask) return; @@ -4488,7 +4496,8 @@ reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context, bool execonly) { __reset_rsvds_bits_mask_ept(&context->shadow_zero_check, - reserved_hpa_bits(), execonly); + reserved_hpa_bits(), execonly, + max_huge_page_level); } #define BYTE_MASK(access) \ @@ -4923,7 +4932,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, update_permission_bitmask(context, true); context->pkru_mask = 0; - reset_rsvds_bits_mask_ept(vcpu, context, execonly); + reset_rsvds_bits_mask_ept(vcpu, context, execonly, max_huge_page_level); reset_ept_shadow_zero_bits_mask(vcpu, context, execonly); } EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu); From cc022ae144c1ce318643f821461295337280a1c0 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Wed, 24 Nov 2021 20:20:49 +0800 Subject: [PATCH 125/268] KVM: X86: Add parameter huge_page_level to kvm_init_shadow_ept_mmu() The level of supported large page on nEPT affects the rsvds_bits_mask. Signed-off-by: Lai Jiangshan Message-Id: <20211124122055.64424-8-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.h | 3 ++- arch/x86/kvm/mmu/mmu.c | 5 +++-- arch/x86/kvm/vmx/capabilities.h | 9 +++++++++ arch/x86/kvm/vmx/nested.c | 8 +++++--- 4 files changed, 19 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 97e13c2988b3..e9fbb2c8bbe2 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -71,7 +71,8 @@ void kvm_init_mmu(struct kvm_vcpu *vcpu); void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0, unsigned long cr4, u64 efer, gpa_t nested_cr3); void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, - bool accessed_dirty, gpa_t new_eptp); + int huge_page_level, bool accessed_dirty, + gpa_t new_eptp); bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu); int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, u64 fault_address, char *insn, int insn_len); diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index ad7e3c5903e7..41613963a455 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4905,7 +4905,8 @@ kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty, } void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, - bool accessed_dirty, gpa_t new_eptp) + int huge_page_level, bool accessed_dirty, + gpa_t new_eptp) { struct kvm_mmu *context = &vcpu->arch.guest_mmu; u8 level = vmx_eptp_page_walk_level(new_eptp); @@ -4932,7 +4933,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, update_permission_bitmask(context, true); context->pkru_mask = 0; - reset_rsvds_bits_mask_ept(vcpu, context, execonly, max_huge_page_level); + reset_rsvds_bits_mask_ept(vcpu, context, execonly, huge_page_level); reset_ept_shadow_zero_bits_mask(vcpu, context, execonly); } EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu); diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h index 4705ad55abb5..c8029b7845b6 100644 --- a/arch/x86/kvm/vmx/capabilities.h +++ b/arch/x86/kvm/vmx/capabilities.h @@ -312,6 +312,15 @@ static inline bool cpu_has_vmx_ept_1g_page(void) return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT; } +static inline int ept_caps_to_lpage_level(u32 ept_caps) +{ + if (ept_caps & VMX_EPT_1GB_PAGE_BIT) + return PG_LEVEL_1G; + if (ept_caps & VMX_EPT_2MB_PAGE_BIT) + return PG_LEVEL_2M; + return PG_LEVEL_4K; +} + static inline bool cpu_has_vmx_ept_ad_bits(void) { return vmx_capability.ept & VMX_EPT_AD_BIT; diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index b03df82fc57e..e6230cd55b44 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -397,9 +397,11 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, static void nested_ept_new_eptp(struct kvm_vcpu *vcpu) { - kvm_init_shadow_ept_mmu(vcpu, - to_vmx(vcpu)->nested.msrs.ept_caps & - VMX_EPT_EXECUTE_ONLY_BIT, + struct vcpu_vmx *vmx = to_vmx(vcpu); + bool execonly = vmx->nested.msrs.ept_caps & VMX_EPT_EXECUTE_ONLY_BIT; + int ept_lpage_level = ept_caps_to_lpage_level(vmx->nested.msrs.ept_caps); + + kvm_init_shadow_ept_mmu(vcpu, execonly, ept_lpage_level, nested_ept_ad_enabled(vcpu), nested_ept_get_eptp(vcpu)); } From f8cd457f061d3ca79518f9061c4205590348a0a1 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Wed, 24 Nov 2021 20:20:50 +0800 Subject: [PATCH 126/268] KVM: VMX: Use ept_caps_to_lpage_level() in hardware_setup() Using ept_caps_to_lpage_level is simpler. Signed-off-by: Lai Jiangshan Message-Id: <20211124122055.64424-9-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 041ab537e160..cee69d895370 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7700,7 +7700,7 @@ static __init int hardware_setup(void) { unsigned long host_bndcfgs; struct desc_ptr dt; - int r, ept_lpage_level; + int r; store_idt(&dt); host_idt_base = dt.address; @@ -7797,16 +7797,8 @@ static __init int hardware_setup(void) kvm_mmu_set_ept_masks(enable_ept_ad_bits, cpu_has_vmx_ept_execute_only()); - if (!enable_ept) - ept_lpage_level = 0; - else if (cpu_has_vmx_ept_1g_page()) - ept_lpage_level = PG_LEVEL_1G; - else if (cpu_has_vmx_ept_2m_page()) - ept_lpage_level = PG_LEVEL_2M; - else - ept_lpage_level = PG_LEVEL_4K; kvm_configure_mmu(enable_ept, 0, vmx_get_max_tdp_level(), - ept_lpage_level); + ept_caps_to_lpage_level(vmx_capability.ept)); /* * Only enable PML when hardware supports PML feature, and both EPT From bb3b394d35e80d7a58ce015191e4960a13f54ba5 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Wed, 24 Nov 2021 20:20:51 +0800 Subject: [PATCH 127/268] KVM: X86: Rename gpte_is_8_bytes to has_4_byte_gpte and invert the direction This bit is very close to mean "role.quadrant is not in use", except that it is false also when the MMU is mapping guest physical addresses directly. In that case, role.quadrant is indeed not in use, but there are no guest PTEs at all. Changing the name and direction of the bit removes the special case, since a guest with paging disabled, or not considering guest paging structures as is the case for two-dimensional paging, does not have to deal with 4-byte guest PTEs. Suggested-by: Paolo Bonzini Signed-off-by: Lai Jiangshan Message-Id: <20211124122055.64424-10-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/mmu.rst | 8 ++++---- arch/x86/include/asm/kvm_host.h | 8 ++++---- arch/x86/kvm/mmu/mmu.c | 12 ++++++------ arch/x86/kvm/mmu/mmutrace.h | 2 +- arch/x86/kvm/mmu/tdp_mmu.c | 2 +- 5 files changed, 16 insertions(+), 16 deletions(-) diff --git a/Documentation/virt/kvm/mmu.rst b/Documentation/virt/kvm/mmu.rst index f60f5488e121..5b1ebad24c77 100644 --- a/Documentation/virt/kvm/mmu.rst +++ b/Documentation/virt/kvm/mmu.rst @@ -161,7 +161,7 @@ Shadow pages contain the following information: If clear, this page corresponds to a guest page table denoted by the gfn field. role.quadrant: - When role.gpte_is_8_bytes=0, the guest uses 32-bit gptes while the host uses 64-bit + When role.has_4_byte_gpte=1, the guest uses 32-bit gptes while the host uses 64-bit sptes. That means a guest page table contains more ptes than the host, so multiple shadow pages are needed to shadow one guest page. For first-level shadow pages, role.quadrant can be 0 or 1 and denotes the @@ -177,9 +177,9 @@ Shadow pages contain the following information: The page is invalid and should not be used. It is a root page that is currently pinned (by a cpu hardware register pointing to it); once it is unpinned it will be destroyed. - role.gpte_is_8_bytes: - Reflects the size of the guest PTE for which the page is valid, i.e. '1' - if 64-bit gptes are in use, '0' if 32-bit gptes are in use. + role.has_4_byte_gpte: + Reflects the size of the guest PTE for which the page is valid, i.e. '0' + if direct map or 64-bit gptes are in use, '1' if 32-bit gptes are in use. role.efer_nx: Contains the value of efer.nx for which the page is valid. role.cr0_wp: diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index f1414df18e11..5d48ba6d1487 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -296,14 +296,14 @@ struct kvm_kernel_irq_routing_entry; * * - invalid shadow pages are not accounted, so the bits are effectively 18 * - * - quadrant will only be used if gpte_is_8_bytes=0 (non-PAE paging); + * - quadrant will only be used if has_4_byte_gpte=1 (non-PAE paging); * execonly and ad_disabled are only used for nested EPT which has - * gpte_is_8_bytes=1. Therefore, 2 bits are always unused. + * has_4_byte_gpte=0. Therefore, 2 bits are always unused. * * - the 4 bits of level are effectively limited to the values 2/3/4/5, * as 4k SPs are not tracked (allowed to go unsync). In addition non-PAE * paging has exactly one upper level, making level completely redundant - * when gpte_is_8_bytes=0. + * when has_4_byte_gpte=1. * * - on top of this, smep_andnot_wp and smap_andnot_wp are only set if * cr0_wp=0, therefore these three bits only give rise to 5 possibilities. @@ -315,7 +315,7 @@ union kvm_mmu_page_role { u32 word; struct { unsigned level:4; - unsigned gpte_is_8_bytes:1; + unsigned has_4_byte_gpte:1; unsigned quadrant:2; unsigned direct:1; unsigned access:3; diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 41613963a455..1ccee4d17481 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2081,7 +2081,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, role.level = level; role.direct = direct; role.access = access; - if (!direct_mmu && !role.gpte_is_8_bytes) { + if (role.has_4_byte_gpte) { quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; role.quadrant = quadrant; @@ -4746,7 +4746,7 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, role.base.ad_disabled = (shadow_accessed_mask == 0); role.base.level = kvm_mmu_get_tdp_level(vcpu); role.base.direct = true; - role.base.gpte_is_8_bytes = true; + role.base.has_4_byte_gpte = false; return role; } @@ -4791,7 +4791,7 @@ kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu, role.base.smep_andnot_wp = role.ext.cr4_smep && !____is_cr0_wp(regs); role.base.smap_andnot_wp = role.ext.cr4_smap && !____is_cr0_wp(regs); - role.base.gpte_is_8_bytes = ____is_cr0_pg(regs) && ____is_cr4_pae(regs); + role.base.has_4_byte_gpte = ____is_cr0_pg(regs) && !____is_cr4_pae(regs); return role; } @@ -4890,7 +4890,7 @@ kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty, role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm; role.base.level = level; - role.base.gpte_is_8_bytes = true; + role.base.has_4_byte_gpte = false; role.base.direct = false; role.base.ad_disabled = !accessed_dirty; role.base.guest_mode = true; @@ -5168,7 +5168,7 @@ static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa, gpa, bytes, sp->role.word); offset = offset_in_page(gpa); - pte_size = sp->role.gpte_is_8_bytes ? 8 : 4; + pte_size = sp->role.has_4_byte_gpte ? 4 : 8; /* * Sometimes, the OS only writes the last one bytes to update status @@ -5192,7 +5192,7 @@ static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte) page_offset = offset_in_page(gpa); level = sp->role.level; *nspte = 1; - if (!sp->role.gpte_is_8_bytes) { + if (sp->role.has_4_byte_gpte) { page_offset <<= 1; /* 32->64 */ /* * A 32-bit pde maps 4MB while the shadow pdes map diff --git a/arch/x86/kvm/mmu/mmutrace.h b/arch/x86/kvm/mmu/mmutrace.h index b8151bbca36a..de5e8e4e1aa7 100644 --- a/arch/x86/kvm/mmu/mmutrace.h +++ b/arch/x86/kvm/mmu/mmutrace.h @@ -35,7 +35,7 @@ " %snxe %sad root %u %s%c", \ __entry->mmu_valid_gen, \ __entry->gfn, role.level, \ - role.gpte_is_8_bytes ? 8 : 4, \ + role.has_4_byte_gpte ? 4 : 8, \ role.quadrant, \ role.direct ? " direct" : "", \ access_str[role.access], \ diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 1db8496259ad..b69e47e68307 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -165,7 +165,7 @@ static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu, role = vcpu->arch.mmu->mmu_role.base; role.level = level; role.direct = true; - role.gpte_is_8_bytes = true; + role.has_4_byte_gpte = false; role.access = ACC_ALL; role.ad_disabled = !shadow_accessed_mask; From 2df4a5eb6c5a7aab471dc0b279efe0e49194ce77 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Wed, 24 Nov 2021 20:20:52 +0800 Subject: [PATCH 128/268] KVM: X86: Remove mmu parameter from load_pdptrs() It uses vcpu->arch.walk_mmu always; nested EPT does not have PDPTRs, and nested NPT treats them like all other non-leaf page table levels instead of caching them. Signed-off-by: Lai Jiangshan Message-Id: <20211124122055.64424-11-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/svm/nested.c | 4 ++-- arch/x86/kvm/svm/svm.c | 2 +- arch/x86/kvm/vmx/nested.c | 4 ++-- arch/x86/kvm/x86.c | 12 ++++++------ 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 5d48ba6d1487..361fc52d1c0a 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1593,7 +1593,7 @@ void kvm_mmu_zap_all(struct kvm *kvm); void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen); void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages); -int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3); +int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, const void *val, int bytes); diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 2d0a7dd7e2a6..cf206855ebf0 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -461,7 +461,7 @@ static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, return -EINVAL; if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) && - CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))) + CC(!load_pdptrs(vcpu, cr3))) return -EINVAL; if (!nested_npt) @@ -1517,7 +1517,7 @@ static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu) * the guest CR3 might be restored prior to setting the nested * state which can lead to a load of wrong PDPTRs. */ - if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3))) + if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3))) return false; if (!nested_svm_vmrun_msrpm(svm)) { diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 581b9dfe63f2..208566f63bce 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1605,7 +1605,7 @@ static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) * it is always updated per SDM when moving to CRs. */ if (npt_enabled) - load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); + load_pdptrs(vcpu, kvm_read_cr3(vcpu)); break; default: KVM_BUG_ON(1, vcpu->kvm); diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index e6230cd55b44..08e785871985 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -1103,7 +1103,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, * must not be dereferenced. */ if (reload_pdptrs && !nested_ept && is_pae_paging(vcpu) && - CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))) { + CC(!load_pdptrs(vcpu, cr3))) { *entry_failure_code = ENTRY_FAIL_PDPTE; return -EINVAL; } @@ -3147,7 +3147,7 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) * the guest CR3 might be restored prior to setting the nested * state which can lead to a load of wrong PDPTRs. */ - if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3))) + if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3))) return false; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 432c6e3cb746..2f232eb0c989 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -798,8 +798,9 @@ static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu) /* * Load the pae pdptrs. Return 1 if they are all valid, 0 otherwise. */ -int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) +int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) { + struct kvm_mmu *mmu = vcpu->arch.walk_mmu; gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; gpa_t real_gpa; int i; @@ -890,7 +891,7 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) #endif if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) && is_pae(vcpu) && ((cr0 ^ old_cr0) & X86_CR0_PDPTR_BITS) && - !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu))) + !load_pdptrs(vcpu, kvm_read_cr3(vcpu))) return 1; if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) @@ -1064,8 +1065,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) return 1; } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) && ((cr4 ^ old_cr4) & X86_CR4_PDPTR_BITS) - && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, - kvm_read_cr3(vcpu))) + && !load_pdptrs(vcpu, kvm_read_cr3(vcpu))) return 1; if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) { @@ -1154,7 +1154,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) if (kvm_vcpu_is_illegal_gpa(vcpu, cr3)) return 1; - if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) + if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, cr3)) return 1; if (cr3 != kvm_read_cr3(vcpu)) @@ -10597,7 +10597,7 @@ static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs, if (update_pdptrs) { idx = srcu_read_lock(&vcpu->kvm->srcu); if (is_pae_paging(vcpu)) { - load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); + load_pdptrs(vcpu, kvm_read_cr3(vcpu)); *mmu_reset_needed = 1; } srcu_read_unlock(&vcpu->kvm->srcu, idx); From ce5977b181c1613072eafbc7546bcb6c463ea68c Mon Sep 17 00:00:00 2001 From: Li RongQing Date: Thu, 4 Nov 2021 19:56:13 +0800 Subject: [PATCH 129/268] KVM: x86: don't print when fail to read/write pv eoi memory If guest gives MSR_KVM_PV_EOI_EN a wrong value, this printk() will be trigged, and kernel log is spammed with the useless message Fixes: 0d88800d5472 ("kvm: x86: ioapic and apic debug macros cleanup") Reported-by: Vitaly Kuznetsov Reviewed-by: Vitaly Kuznetsov Signed-off-by: Li RongQing Cc: stable@kernel.org Message-Id: <1636026974-50555-1-git-send-email-lirongqing@baidu.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/lapic.c | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index bbac8477b3ec..8f4d872f3ffa 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -676,31 +676,25 @@ static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu) static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu) { u8 val; - if (pv_eoi_get_user(vcpu, &val) < 0) { - printk(KERN_WARNING "Can't read EOI MSR value: 0x%llx\n", - (unsigned long long)vcpu->arch.pv_eoi.msr_val); + if (pv_eoi_get_user(vcpu, &val) < 0) return false; - } + return val & KVM_PV_EOI_ENABLED; } static void pv_eoi_set_pending(struct kvm_vcpu *vcpu) { - if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) { - printk(KERN_WARNING "Can't set EOI MSR value: 0x%llx\n", - (unsigned long long)vcpu->arch.pv_eoi.msr_val); + if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) return; - } + __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention); } static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu) { - if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) { - printk(KERN_WARNING "Can't clear EOI MSR value: 0x%llx\n", - (unsigned long long)vcpu->arch.pv_eoi.msr_val); + if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) return; - } + __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention); } From 51b1209c6125273c345aee6767ffaccb765e5e36 Mon Sep 17 00:00:00 2001 From: Li RongQing Date: Thu, 4 Nov 2021 19:56:14 +0800 Subject: [PATCH 130/268] KVM: Clear pv eoi pending bit only when it is set merge pv_eoi_get_pending and pv_eoi_clr_pending into a single function pv_eoi_test_and_clear_pending, which returns and clear the value of the pending bit. This makes it possible to clear the pending bit only if the guest had set it, and otherwise skip the call to pv_eoi_put_user(). This can save up to 300 nsec on AMD EPYC processors. Suggested-by: Vitaly Kuznetsov Suggested-by: Paolo Bonzini Signed-off-by: Li RongQing Message-Id: <1636026974-50555-2-git-send-email-lirongqing@baidu.com> Reviewed-by: Vitaly Kuznetsov Signed-off-by: Paolo Bonzini --- arch/x86/kvm/lapic.c | 40 +++++++++++++++++++--------------------- 1 file changed, 19 insertions(+), 21 deletions(-) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 8f4d872f3ffa..40270d7bc597 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -673,15 +673,6 @@ static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu) return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED; } -static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu) -{ - u8 val; - if (pv_eoi_get_user(vcpu, &val) < 0) - return false; - - return val & KVM_PV_EOI_ENABLED; -} - static void pv_eoi_set_pending(struct kvm_vcpu *vcpu) { if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) @@ -690,12 +681,26 @@ static void pv_eoi_set_pending(struct kvm_vcpu *vcpu) __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention); } -static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu) +static bool pv_eoi_test_and_clr_pending(struct kvm_vcpu *vcpu) { - if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) - return; + u8 val; + if (pv_eoi_get_user(vcpu, &val) < 0) + return false; + + val &= KVM_PV_EOI_ENABLED; + + if (val && pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) + return false; + + /* + * Clear pending bit in any case: it will be set again on vmentry. + * While this might not be ideal from performance point of view, + * this makes sure pv eoi is only enabled when we know it's safe. + */ __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention); + + return val; } static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr) @@ -2671,7 +2676,6 @@ void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu, struct kvm_lapic *apic) { - bool pending; int vector; /* * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host @@ -2685,14 +2689,8 @@ static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu, * -> host enabled PV EOI, guest executed EOI. */ BUG_ON(!pv_eoi_enabled(vcpu)); - pending = pv_eoi_get_pending(vcpu); - /* - * Clear pending bit in any case: it will be set again on vmentry. - * While this might not be ideal from performance point of view, - * this makes sure pv eoi is only enabled when we know it's safe. - */ - pv_eoi_clr_pending(vcpu); - if (pending) + + if (pv_eoi_test_and_clr_pending(vcpu)) return; vector = apic_set_eoi(apic); trace_kvm_pv_eoi(apic, vector); From 5e854864ee4384736f27a986633bae21731a4e4e Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 2 Nov 2021 17:15:29 +0800 Subject: [PATCH 131/268] KVM: x86: Handle 32-bit wrap of EIP for EMULTYPE_SKIP with flat code seg Truncate the new EIP to a 32-bit value when handling EMULTYPE_SKIP as the decode phase does not truncate _eip. Wrapping the 32-bit boundary is legal if and only if CS is a flat code segment, but that check is implicitly handled in the form of limit checks in the decode phase. Opportunstically prepare for a future fix by storing the result of any truncation in "eip" instead of "_eip". Fixes: 1957aa63be53 ("KVM: VMX: Handle single-step #DB for EMULTYPE_SKIP on EPT misconfig") Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini Message-Id: <093eabb1eab2965201c9b018373baf26ff256d85.1635842679.git.houwenlong93@linux.alibaba.com> --- arch/x86/kvm/x86.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2f232eb0c989..d0cc4051ee26 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8139,7 +8139,12 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, * updating interruptibility state and injecting single-step #DBs. */ if (emulation_type & EMULTYPE_SKIP) { - kvm_rip_write(vcpu, ctxt->_eip); + if (ctxt->mode != X86EMUL_MODE_PROT64) + ctxt->eip = (u32)ctxt->_eip; + else + ctxt->eip = ctxt->_eip; + + kvm_rip_write(vcpu, ctxt->eip); if (ctxt->eflags & X86_EFLAGS_RF) kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF); return 1; From 906fa90416fdb703467926ca4f6f55438cd7ea82 Mon Sep 17 00:00:00 2001 From: Hou Wenlong Date: Tue, 2 Nov 2021 17:15:30 +0800 Subject: [PATCH 132/268] KVM: x86: Add an emulation type to handle completion of user exits The next patch would use kvm_emulate_instruction() with EMULTYPE_SKIP in complete_userspace_io callback to fix a problem in msr access emulation. However, EMULTYPE_SKIP only updates RIP, more things like updating interruptibility state and injecting single-step #DBs would be done in the callback. Since the emulator also does those things after x86_emulate_insn(), add a new emulation type to pair with EMULTYPE_SKIP to do those things for completion of user exits within the emulator. Suggested-by: Sean Christopherson Signed-off-by: Hou Wenlong Signed-off-by: Paolo Bonzini Message-Id: <8f8c8e268b65f31d55c2881a4b30670946ecfa0d.1635842679.git.houwenlong93@linux.alibaba.com> --- arch/x86/include/asm/kvm_host.h | 8 +++++++- arch/x86/kvm/x86.c | 13 ++++++++++--- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 361fc52d1c0a..d5fede05eb5f 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1643,7 +1643,8 @@ extern u64 kvm_mce_cap_supported; * * EMULTYPE_SKIP - Set when emulating solely to skip an instruction, i.e. to * decode the instruction length. For use *only* by - * kvm_x86_ops.skip_emulated_instruction() implementations. + * kvm_x86_ops.skip_emulated_instruction() implementations if + * EMULTYPE_COMPLETE_USER_EXIT is not set. * * EMULTYPE_ALLOW_RETRY_PF - Set when the emulator should resume the guest to * retry native execution under certain conditions, @@ -1663,6 +1664,10 @@ extern u64 kvm_mce_cap_supported; * * EMULTYPE_PF - Set when emulating MMIO by way of an intercepted #PF, in which * case the CR2/GPA value pass on the stack is valid. + * + * EMULTYPE_COMPLETE_USER_EXIT - Set when the emulator should update interruptibility + * state and inject single-step #DBs after skipping + * an instruction (after completing userspace I/O). */ #define EMULTYPE_NO_DECODE (1 << 0) #define EMULTYPE_TRAP_UD (1 << 1) @@ -1671,6 +1676,7 @@ extern u64 kvm_mce_cap_supported; #define EMULTYPE_TRAP_UD_FORCED (1 << 4) #define EMULTYPE_VMWARE_GP (1 << 5) #define EMULTYPE_PF (1 << 6) +#define EMULTYPE_COMPLETE_USER_EXIT (1 << 7) int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type); int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index d0cc4051ee26..4464aa7931cd 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8134,9 +8134,10 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, } /* - * Note, EMULTYPE_SKIP is intended for use *only* by vendor callbacks - * for kvm_skip_emulated_instruction(). The caller is responsible for - * updating interruptibility state and injecting single-step #DBs. + * EMULTYPE_SKIP without EMULTYPE_COMPLETE_USER_EXIT is intended for + * use *only* by vendor callbacks for kvm_skip_emulated_instruction(). + * The caller is responsible for updating interruptibility state and + * injecting single-step #DBs. */ if (emulation_type & EMULTYPE_SKIP) { if (ctxt->mode != X86EMUL_MODE_PROT64) @@ -8144,6 +8145,11 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, else ctxt->eip = ctxt->_eip; + if (emulation_type & EMULTYPE_COMPLETE_USER_EXIT) { + r = 1; + goto writeback; + } + kvm_rip_write(vcpu, ctxt->eip); if (ctxt->eflags & X86_EFLAGS_RF) kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF); @@ -8213,6 +8219,7 @@ restart: else r = 1; +writeback: if (writeback) { unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); toggle_interruptibility(vcpu, ctxt->interruptibility); From d2f7d49826ae62b8b5c9829292e84861d2bda2b6 Mon Sep 17 00:00:00 2001 From: Hou Wenlong Date: Tue, 2 Nov 2021 17:15:31 +0800 Subject: [PATCH 133/268] KVM: x86: Use different callback if msr access comes from the emulator If msr access triggers an exit to userspace, the complete_userspace_io callback would skip instruction by vendor callback for kvm_skip_emulated_instruction(). However, when msr access comes from the emulator, e.g. if kvm.force_emulation_prefix is enabled and the guest uses rdmsr/wrmsr with kvm prefix, VM_EXIT_INSTRUCTION_LEN in vmcs is invalid and kvm_emulate_instruction() should be used to skip instruction instead. As Sean noted, unlike the previous case, there's no #UD if unrestricted guest is disabled and the guest accesses an MSR in Big RM. So the correct way to fix this is to attach a different callback when the msr access comes from the emulator. Suggested-by: Sean Christopherson Signed-off-by: Hou Wenlong Signed-off-by: Paolo Bonzini Message-Id: <34208da8f51580a06e45afefac95afea0e3f96e3.1635842679.git.houwenlong93@linux.alibaba.com> --- arch/x86/kvm/x86.c | 87 ++++++++++++++++++++++++++-------------------- 1 file changed, 50 insertions(+), 37 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4464aa7931cd..16f7d20ed19c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -118,6 +118,7 @@ static void enter_smm(struct kvm_vcpu *vcpu); static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); static void store_regs(struct kvm_vcpu *vcpu); static int sync_regs(struct kvm_vcpu *vcpu); +static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu); static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2); static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2); @@ -710,6 +711,17 @@ int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) } EXPORT_SYMBOL_GPL(kvm_complete_insn_gp); +static int complete_emulated_insn_gp(struct kvm_vcpu *vcpu, int err) +{ + if (err) { + kvm_inject_gp(vcpu, 0); + return 1; + } + + return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE | EMULTYPE_SKIP | + EMULTYPE_COMPLETE_USER_EXIT); +} + void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { ++vcpu->stat.pf_guest; @@ -1815,22 +1827,36 @@ int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data) } EXPORT_SYMBOL_GPL(kvm_set_msr); -static int complete_emulated_rdmsr(struct kvm_vcpu *vcpu) +static void complete_userspace_rdmsr(struct kvm_vcpu *vcpu) { - int err = vcpu->run->msr.error; - if (!err) { + if (!vcpu->run->msr.error) { kvm_rax_write(vcpu, (u32)vcpu->run->msr.data); kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32); } - - return static_call(kvm_x86_complete_emulated_msr)(vcpu, err); } -static int complete_emulated_wrmsr(struct kvm_vcpu *vcpu) +static int complete_emulated_msr_access(struct kvm_vcpu *vcpu) +{ + return complete_emulated_insn_gp(vcpu, vcpu->run->msr.error); +} + +static int complete_emulated_rdmsr(struct kvm_vcpu *vcpu) +{ + complete_userspace_rdmsr(vcpu); + return complete_emulated_msr_access(vcpu); +} + +static int complete_fast_msr_access(struct kvm_vcpu *vcpu) { return static_call(kvm_x86_complete_emulated_msr)(vcpu, vcpu->run->msr.error); } +static int complete_fast_rdmsr(struct kvm_vcpu *vcpu) +{ + complete_userspace_rdmsr(vcpu); + return complete_fast_msr_access(vcpu); +} + static u64 kvm_msr_reason(int r) { switch (r) { @@ -1865,18 +1891,6 @@ static int kvm_msr_user_space(struct kvm_vcpu *vcpu, u32 index, return 1; } -static int kvm_get_msr_user_space(struct kvm_vcpu *vcpu, u32 index, int r) -{ - return kvm_msr_user_space(vcpu, index, KVM_EXIT_X86_RDMSR, 0, - complete_emulated_rdmsr, r); -} - -static int kvm_set_msr_user_space(struct kvm_vcpu *vcpu, u32 index, u64 data, int r) -{ - return kvm_msr_user_space(vcpu, index, KVM_EXIT_X86_WRMSR, data, - complete_emulated_wrmsr, r); -} - int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu) { u32 ecx = kvm_rcx_read(vcpu); @@ -1885,18 +1899,16 @@ int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu) r = kvm_get_msr(vcpu, ecx, &data); - /* MSR read failed? See if we should ask user space */ - if (r && kvm_get_msr_user_space(vcpu, ecx, r)) { - /* Bounce to user space */ - return 0; - } - if (!r) { trace_kvm_msr_read(ecx, data); kvm_rax_write(vcpu, data & -1u); kvm_rdx_write(vcpu, (data >> 32) & -1u); } else { + /* MSR read failed? See if we should ask user space */ + if (kvm_msr_user_space(vcpu, ecx, KVM_EXIT_X86_RDMSR, 0, + complete_fast_rdmsr, r)) + return 0; trace_kvm_msr_read_ex(ecx); } @@ -1912,19 +1924,18 @@ int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu) r = kvm_set_msr(vcpu, ecx, data); - /* MSR write failed? See if we should ask user space */ - if (r && kvm_set_msr_user_space(vcpu, ecx, data, r)) - /* Bounce to user space */ - return 0; - - /* Signal all other negative errors to userspace */ - if (r < 0) - return r; - - if (!r) + if (!r) { trace_kvm_msr_write(ecx, data); - else + } else { + /* MSR write failed? See if we should ask user space */ + if (kvm_msr_user_space(vcpu, ecx, KVM_EXIT_X86_WRMSR, data, + complete_fast_msr_access, r)) + return 0; + /* Signal all other negative errors to userspace */ + if (r < 0) + return r; trace_kvm_msr_write_ex(ecx, data); + } return static_call(kvm_x86_complete_emulated_msr)(vcpu, r); } @@ -7400,7 +7411,8 @@ static int emulator_get_msr(struct x86_emulate_ctxt *ctxt, r = kvm_get_msr(vcpu, msr_index, pdata); - if (r && kvm_get_msr_user_space(vcpu, msr_index, r)) { + if (r && kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_RDMSR, 0, + complete_emulated_rdmsr, r)) { /* Bounce to user space */ return X86EMUL_IO_NEEDED; } @@ -7416,7 +7428,8 @@ static int emulator_set_msr(struct x86_emulate_ctxt *ctxt, r = kvm_set_msr(vcpu, msr_index, data); - if (r && kvm_set_msr_user_space(vcpu, msr_index, data, r)) { + if (r && kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_WRMSR, data, + complete_emulated_msr_access, r)) { /* Bounce to user space */ return X86EMUL_IO_NEEDED; } From adbfb12d4c4517a8adde23a7fc46538953d56eea Mon Sep 17 00:00:00 2001 From: Hou Wenlong Date: Tue, 2 Nov 2021 17:15:32 +0800 Subject: [PATCH 134/268] KVM: x86: Exit to userspace if emulation prepared a completion callback em_rdmsr() and em_wrmsr() return X86EMUL_IO_NEEDED if MSR accesses required an exit to userspace. However, x86_emulate_insn() doesn't return X86EMUL_*, so x86_emulate_instruction() doesn't directly act on X86EMUL_IO_NEEDED; instead, it looks for other signals to differentiate between PIO, MMIO, etc. causing RDMSR/WRMSR emulation not to exit to userspace now. Nevertheless, if the userspace_msr_exit_test testcase in selftests is changed to test RDMSR/WRMSR with a forced emulation prefix, the test passes. What happens is that first userspace exit information is filled but the userspace exit does not happen. Because x86_emulate_instruction() returns 1, the guest retries the instruction---but this time RIP has already been adjusted past the forced emulation prefix, so the guest executes RDMSR/WRMSR and the userspace exit finally happens. Since the X86EMUL_IO_NEEDED path has provided a complete_userspace_io callback, x86_emulate_instruction() can just return 0 if the callback is not NULL. Then RDMSR/WRMSR instruction emulation will exit to userspace directly, without the RDMSR/WRMSR vmexit. Fixes: 1ae099540e8c7 ("KVM: x86: Allow deflecting unknown MSR accesses to user space") Signed-off-by: Hou Wenlong Signed-off-by: Paolo Bonzini Message-Id: <56f9df2ee5c05a81155e2be366c9dc1f7adc8817.1635842679.git.houwenlong93@linux.alibaba.com> --- arch/x86/kvm/x86.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 16f7d20ed19c..1aaf37e1bd0f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8227,6 +8227,9 @@ restart: writeback = false; r = 0; vcpu->arch.complete_userspace_io = complete_emulated_mmio; + } else if (vcpu->arch.complete_userspace_io) { + writeback = false; + r = 0; } else if (r == EMULATION_RESTART) goto restart; else From b84155c38076b36d625043a06a2f1c90bde62903 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Mon, 29 Nov 2021 10:47:02 +0100 Subject: [PATCH 135/268] KVM: VMX: Introduce vmx_msr_bitmap_l01_changed() helper In preparation to enabling 'Enlightened MSR Bitmap' feature for Hyper-V guests move MSR bitmap update tracking to a dedicated helper. Note: vmx_msr_bitmap_l01_changed() is called when MSR bitmap might be updated. KVM doesn't check if the bit we're trying to set is already set (or the bit it's trying to clear is already cleared). Such situations should not be common and a few false positives should not be a problem. No functional change intended. Signed-off-by: Vitaly Kuznetsov Reviewed-by: Maxim Levitsky Reviewed-by: Sean Christopherson Message-Id: <20211129094704.326635-3-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 9d7f26eaf05f..7ee50671191b 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -3688,6 +3688,17 @@ void free_vpid(int vpid) spin_unlock(&vmx_vpid_lock); } +static void vmx_msr_bitmap_l01_changed(struct vcpu_vmx *vmx) +{ + /* + * When KVM is a nested hypervisor on top of Hyper-V and uses + * 'Enlightened MSR Bitmap' feature L0 needs to know that MSR + * bitmap has changed. + */ + if (static_branch_unlikely(&enable_evmcs)) + evmcs_touch_msr_bitmap(); +} + void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type) { struct vcpu_vmx *vmx = to_vmx(vcpu); @@ -3696,8 +3707,7 @@ void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type) if (!cpu_has_vmx_msr_bitmap()) return; - if (static_branch_unlikely(&enable_evmcs)) - evmcs_touch_msr_bitmap(); + vmx_msr_bitmap_l01_changed(vmx); /* * Mark the desired intercept state in shadow bitmap, this is needed @@ -3741,8 +3751,7 @@ void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type) if (!cpu_has_vmx_msr_bitmap()) return; - if (static_branch_unlikely(&enable_evmcs)) - evmcs_touch_msr_bitmap(); + vmx_msr_bitmap_l01_changed(vmx); /* * Mark the desired intercept state in shadow bitmap, this is needed From ed2a4800ae9d491e4bf1b8b60b15001ce3b88fcd Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Mon, 29 Nov 2021 10:47:03 +0100 Subject: [PATCH 136/268] KVM: nVMX: Track whether changes in L0 require MSR bitmap for L2 to be rebuilt Introduce a flag to keep track of whether MSR bitmap for L2 needs to be rebuilt due to changes in MSR bitmap for L1 or switching to a different L2. This information will be used for Enlightened MSR Bitmap feature for Hyper-V guests. Note, setting msr_bitmap_changed to 'true' from set_current_vmptr() is not really needed for Enlightened MSR Bitmap as the feature can only be used in conjunction with Enlightened VMCS but let's keep tracking information complete, it's cheap and in the future similar PV feature can easily be implemented for KVM on KVM too. No functional change intended. Signed-off-by: Vitaly Kuznetsov Message-Id: <20211129094704.326635-4-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/nested.c | 9 ++++++++- arch/x86/kvm/vmx/vmx.c | 2 ++ arch/x86/kvm/vmx/vmx.h | 9 +++++++++ 3 files changed, 19 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 08e785871985..e7aa9576441c 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -672,6 +672,8 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, kvm_vcpu_unmap(vcpu, &vmx->nested.msr_bitmap_map, false); + vmx->nested.force_msr_bitmap_recalc = false; + return true; } @@ -2029,10 +2031,13 @@ static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld( * Clean fields data can't be used on VMLAUNCH and when we switch * between different L2 guests as KVM keeps a single VMCS12 per L1. */ - if (from_launch || evmcs_gpa_changed) + if (from_launch || evmcs_gpa_changed) { vmx->nested.hv_evmcs->hv_clean_fields &= ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; + vmx->nested.force_msr_bitmap_recalc = true; + } + return EVMPTRLD_SUCCEEDED; } @@ -5260,6 +5265,7 @@ static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr) vmx->nested.need_vmcs12_to_shadow_sync = true; } vmx->nested.dirty_vmcs12 = true; + vmx->nested.force_msr_bitmap_recalc = true; } /* Emulate the VMPTRLD instruction */ @@ -6395,6 +6401,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, goto error_guest_mode; vmx->nested.dirty_vmcs12 = true; + vmx->nested.force_msr_bitmap_recalc = true; ret = nested_vmx_enter_non_root_mode(vcpu, false); if (ret) goto error_guest_mode; diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 7ee50671191b..9c729d6d4e7d 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -3697,6 +3697,8 @@ static void vmx_msr_bitmap_l01_changed(struct vcpu_vmx *vmx) */ if (static_branch_unlikely(&enable_evmcs)) evmcs_touch_msr_bitmap(); + + vmx->nested.force_msr_bitmap_recalc = true; } void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type) diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index f978699480e3..6c2c1aff1c3d 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -158,6 +158,15 @@ struct nested_vmx { bool need_vmcs12_to_shadow_sync; bool dirty_vmcs12; + /* + * Indicates whether MSR bitmap for L2 needs to be rebuilt due to + * changes in MSR bitmap for L1 or switching to a different L2. Note, + * this flag can only be used reliably in conjunction with a paravirt L1 + * which informs L0 whether any changes to MSR bitmap for L2 were done + * on its side. + */ + bool force_msr_bitmap_recalc; + /* * Indicates lazily loaded guest state has not yet been decached from * vmcs02. From 502d2bf5f2fd7c05adc2d4f057910bd5d4c4c63e Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Mon, 29 Nov 2021 10:47:04 +0100 Subject: [PATCH 137/268] KVM: nVMX: Implement Enlightened MSR Bitmap feature Updating MSR bitmap for L2 is not cheap and rearly needed. TLFS for Hyper-V offers 'Enlightened MSR Bitmap' feature which allows L1 hypervisor to inform L0 when it changes MSR bitmap, this eliminates the need to examine L1's MSR bitmap for L2 every time when 'real' MSR bitmap for L2 gets constructed. Use 'vmx->nested.msr_bitmap_changed' flag to implement the feature. Note, KVM already uses 'Enlightened MSR bitmap' feature when it runs as a nested hypervisor on top of Hyper-V. The newly introduced feature is going to be used by Hyper-V guests on KVM. When the feature is enabled for Win10+WSL2, it shaves off around 700 CPU cycles from a nested vmexit cost (tight cpuid loop test). Signed-off-by: Vitaly Kuznetsov Message-Id: <20211129094704.326635-5-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/hyperv.c | 2 ++ arch/x86/kvm/vmx/nested.c | 14 ++++++++++++++ 2 files changed, 16 insertions(+) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 7179fa645eda..a91424ed436d 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -2517,6 +2517,8 @@ int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, case HYPERV_CPUID_NESTED_FEATURES: ent->eax = evmcs_ver; + if (evmcs_ver) + ent->eax |= HV_X64_NESTED_MSR_BITMAP; break; diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index e7aa9576441c..2f6f465e575f 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -599,6 +599,7 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, int msr; unsigned long *msr_bitmap_l1; unsigned long *msr_bitmap_l0 = vmx->nested.vmcs02.msr_bitmap; + struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; struct kvm_host_map *map = &vmx->nested.msr_bitmap_map; /* Nothing to do if the MSR bitmap is not in use. */ @@ -606,6 +607,19 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) return false; + /* + * MSR bitmap update can be skipped when: + * - MSR bitmap for L1 hasn't changed. + * - Nested hypervisor (L1) is attempting to launch the same L2 as + * before. + * - Nested hypervisor (L1) has enabled 'Enlightened MSR Bitmap' feature + * and tells KVM (L0) there were no changes in MSR bitmap for L2. + */ + if (!vmx->nested.force_msr_bitmap_recalc && evmcs && + evmcs->hv_enlightenments_control.msr_bitmap && + evmcs->hv_clean_fields & HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP) + return true; + if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), map)) return false; From 142ff9bddbde757674c7081ffc238cfcffa1859b Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 8 Dec 2021 14:46:41 +0000 Subject: [PATCH 138/268] KVM: arm64: Drop unused workaround_flags vcpu field workaround_flags is a leftover from our earlier Spectre-v4 workaround implementation, and now serves no purpose. Get rid of the field and the corresponding asm-offset definition. Fixes: 29e8910a566a ("KVM: arm64: Simplify handling of ARCH_WORKAROUND_2") Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 3 --- arch/arm64/kernel/asm-offsets.c | 1 - 2 files changed, 4 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 2a5f7f38006f..3811d19e5674 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -298,9 +298,6 @@ struct kvm_vcpu_arch { /* Exception Information */ struct kvm_vcpu_fault_info fault; - /* State of various workarounds, see kvm_asm.h for bit assignment */ - u64 workaround_flags; - /* Miscellaneous vcpu state flags */ u64 flags; diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 6d0c3afd36b8..1197e7679882 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -111,7 +111,6 @@ int main(void) #ifdef CONFIG_KVM DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt)); DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1)); - DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags)); DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2)); DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_cpu_context, regs)); DEFINE(CPU_RGSR_EL1, offsetof(struct kvm_cpu_context, sys_regs[RGSR_EL1])); From 8e819d75cbcf541a833219521379114a76a645a6 Mon Sep 17 00:00:00 2001 From: Maxim Levitsky Date: Thu, 9 Dec 2021 13:54:36 +0200 Subject: [PATCH 139/268] KVM: x86: add a tracepoint for APICv/AVIC interrupt delivery This allows to see how many interrupts were delivered via the APICv/AVIC from the host. Signed-off-by: Maxim Levitsky Message-Id: <20211209115440.394441-3-mlevitsk@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/lapic.c | 3 +++ arch/x86/kvm/trace.h | 24 ++++++++++++++++++++++++ arch/x86/kvm/x86.c | 1 + 3 files changed, 28 insertions(+) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 40270d7bc597..c5028e6b0f96 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1100,6 +1100,9 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, kvm_lapic_set_irr(vector, apic); kvm_make_request(KVM_REQ_EVENT, vcpu); kvm_vcpu_kick(vcpu); + } else { + trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode, + trig_mode, vector); } break; diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index 953b0fcb21ee..92e6f6702f00 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -1356,6 +1356,30 @@ TRACE_EVENT(kvm_apicv_update_request, __entry->bit) ); +TRACE_EVENT(kvm_apicv_accept_irq, + TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec), + TP_ARGS(apicid, dm, tm, vec), + + TP_STRUCT__entry( + __field( __u32, apicid ) + __field( __u16, dm ) + __field( __u16, tm ) + __field( __u8, vec ) + ), + + TP_fast_assign( + __entry->apicid = apicid; + __entry->dm = dm; + __entry->tm = tm; + __entry->vec = vec; + ), + + TP_printk("apicid %x vec %u (%s|%s)", + __entry->apicid, __entry->vec, + __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode), + __entry->tm ? "level" : "edge") +); + /* * Tracepoint for AMD AVIC */ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1aaf37e1bd0f..26cb3a4cd0e9 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -12693,6 +12693,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_ga_log); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_update_request); +EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_accept_irq); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_enter); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_exit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_enter); From 83c98007d9fb5c827cd954fc48e9cba034ef6fdc Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 8 Dec 2021 01:52:12 +0000 Subject: [PATCH 140/268] KVM: nVMX: Ensure vCPU honors event request if posting nested IRQ fails Add a memory barrier between writing vcpu->requests and reading vcpu->guest_mode to ensure the read is ordered after the write when (potentially) delivering an IRQ to L2 via nested posted interrupt. If the request were to be completed after reading vcpu->mode, it would be possible for the target vCPU to enter the guest without posting the interrupt and without handling the event request. Note, the barrier is only for documentation since atomic operations are serializing on x86. Suggested-by: Paolo Bonzini Fixes: 6b6977117f50 ("KVM: nVMX: Fix races when sending nested PI while dest enters/leaves L2") Fixes: 705699a13994 ("KVM: nVMX: Enable nested posted interrupt processing") Signed-off-by: Sean Christopherson Message-Id: <20211208015236.1616697-3-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 9c729d6d4e7d..63615d242bdf 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -3943,6 +3943,19 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, */ vmx->nested.pi_pending = true; kvm_make_request(KVM_REQ_EVENT, vcpu); + + /* + * This pairs with the smp_mb_*() after setting vcpu->mode in + * vcpu_enter_guest() to guarantee the vCPU sees the event + * request if triggering a posted interrupt "fails" because + * vcpu->mode != IN_GUEST_MODE. The extra barrier is needed as + * the smb_wmb() in kvm_make_request() only ensures everything + * done before making the request is visible when the request + * is visible, it doesn't ensure ordering between the store to + * vcpu->requests and the load from vcpu->mode. + */ + smp_mb__after_atomic(); + /* the PIR and ON have been set by L1. */ if (!kvm_vcpu_trigger_posted_interrupt(vcpu, true)) kvm_vcpu_kick(vcpu); @@ -3976,6 +3989,12 @@ static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) if (pi_test_and_set_on(&vmx->pi_desc)) return 0; + /* + * The implied barrier in pi_test_and_set_on() pairs with the smp_mb_*() + * after setting vcpu->mode in vcpu_enter_guest(), thus the vCPU is + * guaranteed to see PID.ON=1 and sync the PIR to IRR if triggering a + * posted interrupt "fails" because vcpu->mode != IN_GUEST_MODE. + */ if (vcpu != kvm_get_running_vcpu() && !kvm_vcpu_trigger_posted_interrupt(vcpu, false)) kvm_vcpu_kick(vcpu); From 45af1bb99b72e36c16714390a8a3c9445e432938 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 8 Dec 2021 01:52:13 +0000 Subject: [PATCH 141/268] KVM: VMX: Clean up PI pre/post-block WARNs Move the WARN sanity checks out of the PI descriptor update loop so as not to spam the kernel log if the condition is violated and the update takes multiple attempts due to another writer. This also eliminates a few extra uops from the retry path. Technically not checking every attempt could mean KVM will now fail to WARN in a scenario that would have failed before, but any such failure would be inherently racy as some other agent (CPU or device) would have to concurrent modify the PI descriptor. Add a helper to handle the actual write and more importantly to document why the write may need to be retried. Signed-off-by: Sean Christopherson Message-Id: <20211208015236.1616697-4-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/posted_intr.c | 35 ++++++++++++++++++++++------------ 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c index 4db2b14ee7c6..88c53c521094 100644 --- a/arch/x86/kvm/vmx/posted_intr.c +++ b/arch/x86/kvm/vmx/posted_intr.c @@ -34,6 +34,20 @@ static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu) return &(to_vmx(vcpu)->pi_desc); } +static int pi_try_set_control(struct pi_desc *pi_desc, u64 old, u64 new) +{ + /* + * PID.ON can be set at any time by a different vCPU or by hardware, + * e.g. a device. PID.control must be written atomically, and the + * update must be retried with a fresh snapshot an ON change causes + * the cmpxchg to fail. + */ + if (cmpxchg64(&pi_desc->control, old, new) != old) + return -EBUSY; + + return 0; +} + void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) { struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); @@ -74,8 +88,7 @@ void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) new.ndst = dest; new.sn = 0; - } while (cmpxchg64(&pi_desc->control, old.control, - new.control) != old.control); + } while (pi_try_set_control(pi_desc, old.control, new.control)); after_clear_sn: @@ -128,17 +141,17 @@ static void __pi_post_block(struct kvm_vcpu *vcpu) if (!x2apic_mode) dest = (dest << 8) & 0xFF00; + WARN(pi_desc->nv != POSTED_INTR_WAKEUP_VECTOR, + "Wakeup handler not enabled while the vCPU was blocking"); + do { old.control = new.control = READ_ONCE(pi_desc->control); - WARN(old.nv != POSTED_INTR_WAKEUP_VECTOR, - "Wakeup handler not enabled while the VCPU is blocked\n"); new.ndst = dest; /* set 'NV' to 'notification vector' */ new.nv = POSTED_INTR_VECTOR; - } while (cmpxchg64(&pi_desc->control, old.control, - new.control) != old.control); + } while (pi_try_set_control(pi_desc, old.control, new.control)); vcpu->pre_pcpu = -1; } @@ -173,17 +186,15 @@ int pi_pre_block(struct kvm_vcpu *vcpu) &per_cpu(blocked_vcpu_on_cpu, vcpu->cpu)); spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->cpu)); + WARN(pi_desc->sn == 1, + "Posted Interrupt Suppress Notification set before blocking"); + do { old.control = new.control = READ_ONCE(pi_desc->control); - WARN((pi_desc->sn == 1), - "Warning: SN field of posted-interrupts " - "is set before blocking\n"); - /* set 'NV' to 'wakeup vector' */ new.nv = POSTED_INTR_WAKEUP_VECTOR; - } while (cmpxchg64(&pi_desc->control, old.control, - new.control) != old.control); + } while (pi_try_set_control(pi_desc, old.control, new.control)); /* We should not block the vCPU if an interrupt is posted for it. */ if (pi_test_on(pi_desc)) From dc70ec217cec504e6f8fee8fd91bf5c118af05f2 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Sun, 21 Nov 2021 12:54:40 +0000 Subject: [PATCH 142/268] KVM: Introduce CONFIG_HAVE_KVM_DIRTY_RING I'd like to make the build include dirty_ring.c based on whether the arch wants it or not. That's a whole lot simpler if there's a config symbol instead of doing it implicitly on KVM_DIRTY_LOG_PAGE_OFFSET being set to something non-zero. Signed-off-by: David Woodhouse Message-Id: <20211121125451.9489-2-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/Kconfig | 1 + include/linux/kvm_dirty_ring.h | 8 ++++---- virt/kvm/Kconfig | 3 +++ virt/kvm/kvm_main.c | 4 ++-- 4 files changed, 10 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 7618bef0a4a9..03b2ce34e7f4 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -27,6 +27,7 @@ config KVM select MMU_NOTIFIER select HAVE_KVM_IRQCHIP select HAVE_KVM_IRQFD + select HAVE_KVM_DIRTY_RING select IRQ_BYPASS_MANAGER select HAVE_KVM_IRQ_BYPASS select HAVE_KVM_IRQ_ROUTING diff --git a/include/linux/kvm_dirty_ring.h b/include/linux/kvm_dirty_ring.h index 120e5e90fa1d..4da8d4a4140b 100644 --- a/include/linux/kvm_dirty_ring.h +++ b/include/linux/kvm_dirty_ring.h @@ -27,9 +27,9 @@ struct kvm_dirty_ring { int index; }; -#if (KVM_DIRTY_LOG_PAGE_OFFSET == 0) +#ifndef CONFIG_HAVE_KVM_DIRTY_RING /* - * If KVM_DIRTY_LOG_PAGE_OFFSET not defined, kvm_dirty_ring.o should + * If CONFIG_HAVE_HVM_DIRTY_RING not defined, kvm_dirty_ring.o should * not be included as well, so define these nop functions for the arch. */ static inline u32 kvm_dirty_ring_get_rsvd_entries(void) @@ -74,7 +74,7 @@ static inline bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring) return true; } -#else /* KVM_DIRTY_LOG_PAGE_OFFSET == 0 */ +#else /* CONFIG_HAVE_KVM_DIRTY_RING */ u32 kvm_dirty_ring_get_rsvd_entries(void); int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size); @@ -98,6 +98,6 @@ struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset); void kvm_dirty_ring_free(struct kvm_dirty_ring *ring); bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring); -#endif /* KVM_DIRTY_LOG_PAGE_OFFSET == 0 */ +#endif /* CONFIG_HAVE_KVM_DIRTY_RING */ #endif /* KVM_DIRTY_RING_H */ diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig index 62b39149b8c8..97cf5413ac25 100644 --- a/virt/kvm/Kconfig +++ b/virt/kvm/Kconfig @@ -13,6 +13,9 @@ config HAVE_KVM_IRQFD config HAVE_KVM_IRQ_ROUTING bool +config HAVE_KVM_DIRTY_RING + bool + config HAVE_KVM_EVENTFD bool select EVENTFD diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index f3acff708bf5..b0f7e6eb00ff 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3600,7 +3600,7 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff) { -#if KVM_DIRTY_LOG_PAGE_OFFSET > 0 +#ifdef CONFIG_HAVE_KVM_DIRTY_RING return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) && (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET + kvm->dirty_ring_size / PAGE_SIZE); @@ -4305,7 +4305,7 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) case KVM_CAP_NR_MEMSLOTS: return KVM_USER_MEM_SLOTS; case KVM_CAP_DIRTY_LOG_RING: -#if KVM_DIRTY_LOG_PAGE_OFFSET > 0 +#ifdef CONFIG_HAVE_KVM_DIRTY_RING return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn); #else return 0; From 6f2cdbdba43e4afad8df1ab06797c83e3af4a3dc Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Sun, 21 Nov 2021 12:54:41 +0000 Subject: [PATCH 143/268] KVM: Add Makefile.kvm for common files, use it for x86 Splitting kvm_main.c out into smaller and better-organized files is slightly non-trivial when it involves editing a bunch of per-arch KVM makefiles. Provide virt/kvm/Makefile.kvm for them to include. Signed-off-by: David Woodhouse Acked-by: Marc Zyngier Message-Id: <20211121125451.9489-3-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/Makefile | 7 +------ virt/kvm/Makefile.kvm | 13 +++++++++++++ 2 files changed, 14 insertions(+), 6 deletions(-) create mode 100644 virt/kvm/Makefile.kvm diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index 75dfd27b6e8a..30f244b64523 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -7,12 +7,7 @@ ifeq ($(CONFIG_FRAME_POINTER),y) OBJECT_FILES_NON_STANDARD_vmenter.o := y endif -KVM := ../../../virt/kvm - -kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \ - $(KVM)/eventfd.o $(KVM)/irqchip.o $(KVM)/vfio.o \ - $(KVM)/dirty_ring.o $(KVM)/binary_stats.o -kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o +include $(srctree)/virt/kvm/Makefile.kvm kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \ i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \ diff --git a/virt/kvm/Makefile.kvm b/virt/kvm/Makefile.kvm new file mode 100644 index 000000000000..ffdcad3cc97a --- /dev/null +++ b/virt/kvm/Makefile.kvm @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for Kernel-based Virtual Machine module +# + +KVM ?= ../../../virt/kvm + +kvm-y := $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/binary_stats.o +kvm-$(CONFIG_KVM_VFIO) += $(KVM)/vfio.o +kvm-$(CONFIG_KVM_MMIO) += $(KVM)/coalesced_mmio.o +kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o +kvm-$(CONFIG_HAVE_KVM_IRQ_ROUTING) += $(KVM)/irqchip.o +kvm-$(CONFIG_HAVE_KVM_DIRTY_RING) += $(KVM)/dirty_ring.o From f786ab1bf17af973d80ab648ee22d037d1ffebec Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Sun, 21 Nov 2021 12:54:42 +0000 Subject: [PATCH 144/268] KVM: s390: Use Makefile.kvm for common files Signed-off-by: David Woodhouse Reviewed-by: Christian Borntraeger Message-Id: <20211121125451.9489-4-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini --- arch/s390/kvm/Makefile | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile index b3aaadc60ead..26f4a74e5ce4 100644 --- a/arch/s390/kvm/Makefile +++ b/arch/s390/kvm/Makefile @@ -3,13 +3,11 @@ # # Copyright IBM Corp. 2008 -KVM := ../../../virt/kvm -common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/async_pf.o \ - $(KVM)/irqchip.o $(KVM)/vfio.o $(KVM)/binary_stats.o +include $(srctree)/virt/kvm/Makefile.kvm ccflags-y := -Ivirt/kvm -Iarch/s390/kvm -kvm-objs := $(common-objs) kvm-s390.o intercept.o interrupt.o priv.o sigp.o -kvm-objs += diag.o gaccess.o guestdbg.o vsie.o pv.o +kvm-y += kvm-s390.o intercept.o interrupt.o priv.o sigp.o +kvm-y += diag.o gaccess.o guestdbg.o vsie.o pv.o obj-$(CONFIG_KVM) += kvm.o From ae1b606e6207476d97d642010b2775a9465a46d6 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Sun, 21 Nov 2021 12:54:43 +0000 Subject: [PATCH 145/268] KVM: mips: Use Makefile.kvm for common files Signed-off-by: David Woodhouse Message-Id: <20211121125451.9489-5-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini --- arch/mips/kvm/Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile index d3710959da55..21ff75bcdbc4 100644 --- a/arch/mips/kvm/Makefile +++ b/arch/mips/kvm/Makefile @@ -2,9 +2,10 @@ # Makefile for KVM support for MIPS # +include $(srctree)/virt/kvm/Makefile.kvm + ccflags-y += -Ivirt/kvm -Iarch/mips/kvm -kvm-y := $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o eventfd.o binary_stats.o) kvm-$(CONFIG_CPU_HAS_MSA) += msa.o kvm-y += mips.o emulate.o entry.o \ From 3e3aa26fd4c44c4aec44b494b69b74dacbeb4685 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Sun, 21 Nov 2021 12:54:44 +0000 Subject: [PATCH 146/268] KVM: RISC-V: Use Makefile.kvm for common files Signed-off-by: David Woodhouse Message-Id: <20211121125451.9489-6-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini --- arch/riscv/kvm/Makefile | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile index 30cdd1df0098..300590225348 100644 --- a/arch/riscv/kvm/Makefile +++ b/arch/riscv/kvm/Makefile @@ -5,14 +5,10 @@ ccflags-y += -I $(srctree)/$(src) -KVM := ../../../virt/kvm +include $(srctree)/virt/kvm/Makefile.kvm obj-$(CONFIG_KVM) += kvm.o -kvm-y += $(KVM)/kvm_main.o -kvm-y += $(KVM)/coalesced_mmio.o -kvm-y += $(KVM)/binary_stats.o -kvm-y += $(KVM)/eventfd.o kvm-y += main.o kvm-y += vm.o kvm-y += vmid.o From 5f33868af8f4cd688cadff44a67d934684548011 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Sun, 21 Nov 2021 12:54:45 +0000 Subject: [PATCH 147/268] KVM: powerpc: Use Makefile.kvm for common files It's all fairly baroque but in the end, I don't think there's any reason for $(KVM)/irqchip.o to have been handled differently, as they all end up in $(kvm-y) in the end anyway, regardless of whether they get there via $(common-objs-y) and the CPU-specific object lists. Signed-off-by: David Woodhouse Acked-by: Michael Ellerman (powerpc) Message-Id: <20211121125451.9489-7-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini --- arch/powerpc/kvm/Makefile | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index 583c14ef596e..9bdfc8b50899 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile @@ -4,11 +4,8 @@ # ccflags-y := -Ivirt/kvm -Iarch/powerpc/kvm -KVM := ../../../virt/kvm -common-objs-y = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/binary_stats.o -common-objs-$(CONFIG_KVM_VFIO) += $(KVM)/vfio.o -common-objs-$(CONFIG_KVM_MMIO) += $(KVM)/coalesced_mmio.o +include $(srctree)/virt/kvm/Makefile.kvm common-objs-y += powerpc.o emulate_loadstore.o obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o @@ -125,9 +122,8 @@ kvm-book3s_32-objs := \ kvm-objs-$(CONFIG_KVM_BOOK3S_32) := $(kvm-book3s_32-objs) kvm-objs-$(CONFIG_KVM_MPIC) += mpic.o -kvm-objs-$(CONFIG_HAVE_KVM_IRQ_ROUTING) += $(KVM)/irqchip.o -kvm-objs := $(kvm-objs-m) $(kvm-objs-y) +kvm-y += $(kvm-objs-m) $(kvm-objs-y) obj-$(CONFIG_KVM_E500V2) += kvm.o obj-$(CONFIG_KVM_E500MC) += kvm.o From d8f6ef45a623d650f9b97e11553adb4978f6aa70 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Sun, 21 Nov 2021 12:54:46 +0000 Subject: [PATCH 148/268] KVM: arm64: Use Makefile.kvm for common files Signed-off-by: David Woodhouse Acked-by: Marc Zyngier Message-Id: <20211121125451.9489-8-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini --- arch/arm64/kvm/Makefile | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index 989bb5dad2c8..04a53f71a6b6 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -5,14 +5,12 @@ ccflags-y += -I $(srctree)/$(src) -KVM=../../../virt/kvm +include $(srctree)/virt/kvm/Makefile.kvm obj-$(CONFIG_KVM) += kvm.o obj-$(CONFIG_KVM) += hyp/ -kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \ - $(KVM)/vfio.o $(KVM)/irqchip.o $(KVM)/binary_stats.o \ - arm.o mmu.o mmio.o psci.o perf.o hypercalls.o pvtime.o \ +kvm-y += arm.o mmu.o mmio.o psci.o perf.o hypercalls.o pvtime.o \ inject_fault.o va_layout.o handle_exit.o \ guest.o debug.o reset.o sys_regs.o \ vgic-sys-reg-v3.o fpsimd.o pmu.o \ From 34b43a8849229e8363c19236ecdf463b7a89d085 Mon Sep 17 00:00:00 2001 From: Quentin Perret Date: Wed, 8 Dec 2021 15:22:54 +0000 Subject: [PATCH 149/268] KVM: arm64: pkvm: Fix hyp_pool max order The EL2 page allocator in protected mode maintains a per-pool max order value to optimize allocations when the memory region it covers is small. However, the max order value is currently under-estimated whenever the number of pages in the region is a power of two. Fix the estimation. Signed-off-by: Quentin Perret Acked-by: Will Deacon Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211208152300.2478542-2-qperret@google.com --- arch/arm64/kvm/hyp/nvhe/page_alloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kvm/hyp/nvhe/page_alloc.c b/arch/arm64/kvm/hyp/nvhe/page_alloc.c index 0bd7701ad1df..543cad6c376a 100644 --- a/arch/arm64/kvm/hyp/nvhe/page_alloc.c +++ b/arch/arm64/kvm/hyp/nvhe/page_alloc.c @@ -241,7 +241,7 @@ int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages, int i; hyp_spin_lock_init(&pool->lock); - pool->max_order = min(MAX_ORDER, get_order(nr_pages << PAGE_SHIFT)); + pool->max_order = min(MAX_ORDER, get_order((nr_pages + 1) << PAGE_SHIFT)); for (i = 0; i < pool->max_order; i++) INIT_LIST_HEAD(&pool->free_area[i]); pool->range_start = phys; From a770ee80e66270a7df183dda5ad6df4e8c8ab615 Mon Sep 17 00:00:00 2001 From: Quentin Perret Date: Wed, 8 Dec 2021 15:22:55 +0000 Subject: [PATCH 150/268] KVM: arm64: pkvm: Disable GICv2 support GICv2 requires having device mappings in guests and the hypervisor, which is incompatible with the current pKVM EL2 page ownership model which only covers memory. While it would be desirable to support pKVM with GICv2, this will require a lot more work, so let's make the current assumption clear until then. Co-developed-by: Marc Zyngier Signed-off-by: Marc Zyngier Signed-off-by: Quentin Perret Acked-by: Will Deacon Link: https://lore.kernel.org/r/20211208152300.2478542-3-qperret@google.com --- arch/arm64/kvm/vgic/vgic-v2.c | 5 +++++ arch/arm64/kvm/vgic/vgic-v3.c | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/vgic/vgic-v2.c b/arch/arm64/kvm/vgic/vgic-v2.c index 95a18cec14a3..8e337a0d7817 100644 --- a/arch/arm64/kvm/vgic/vgic-v2.c +++ b/arch/arm64/kvm/vgic/vgic-v2.c @@ -345,6 +345,11 @@ int vgic_v2_probe(const struct gic_kvm_info *info) int ret; u32 vtr; + if (is_protected_kvm_enabled()) { + kvm_err("GICv2 not supported in protected mode\n"); + return -ENXIO; + } + if (!info->vctrl.start) { kvm_err("GICH not present in the firmware table\n"); return -ENXIO; diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c index 04f62c4b07fb..debad4e6e6c9 100644 --- a/arch/arm64/kvm/vgic/vgic-v3.c +++ b/arch/arm64/kvm/vgic/vgic-v3.c @@ -651,7 +651,7 @@ int vgic_v3_probe(const struct gic_kvm_info *info) } else if (!PAGE_ALIGNED(info->vcpu.start)) { pr_warn("GICV physical address 0x%llx not page aligned\n", (unsigned long long)info->vcpu.start); - } else { + } else if (kvm_get_mode() != KVM_MODE_PROTECTED) { kvm_vgic_global_state.vcpu_base = info->vcpu.start; kvm_vgic_global_state.can_emulate_gicv2 = true; ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2); From 53a563b01fa2ae2376a0b7d547f26a0ae9c78b5c Mon Sep 17 00:00:00 2001 From: Quentin Perret Date: Wed, 8 Dec 2021 15:22:56 +0000 Subject: [PATCH 151/268] KVM: arm64: Make the hyp memory pool static The hyp memory pool struct is sized to fit exactly the needs of the hypervisor stage-1 page-table allocator, so it is important it is not used for anything else. As it is currently used only from setup.c, reduce its visibility by marking it static. Signed-off-by: Quentin Perret Reviewed-by: Andrew Walbran Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211208152300.2478542-4-qperret@google.com --- arch/arm64/kvm/hyp/include/nvhe/mm.h | 1 - arch/arm64/kvm/hyp/nvhe/setup.c | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/arm64/kvm/hyp/include/nvhe/mm.h b/arch/arm64/kvm/hyp/include/nvhe/mm.h index c9a8f535212e..75f58b783fd7 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/mm.h +++ b/arch/arm64/kvm/hyp/include/nvhe/mm.h @@ -15,7 +15,6 @@ extern struct memblock_region kvm_nvhe_sym(hyp_memory)[]; extern unsigned int kvm_nvhe_sym(hyp_memblock_nr); extern struct kvm_pgtable pkvm_pgtable; extern hyp_spinlock_t pkvm_pgd_lock; -extern struct hyp_pool hpool; extern u64 __io_map_base; int hyp_create_idmap(u32 hyp_va_bits); diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c index 578f71798c2e..e35709367598 100644 --- a/arch/arm64/kvm/hyp/nvhe/setup.c +++ b/arch/arm64/kvm/hyp/nvhe/setup.c @@ -17,7 +17,6 @@ #include #include -struct hyp_pool hpool; unsigned long hyp_nr_cpus; #define hyp_percpu_size ((unsigned long)__per_cpu_end - \ @@ -27,6 +26,7 @@ static void *vmemmap_base; static void *hyp_pgt_base; static void *host_s2_pgt_base; static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops; +static struct hyp_pool hpool; static int divide_memory_pool(void *virt, unsigned long size) { From 473a3efbafaa9ffd06c8b8f653f24c97b5ac3ff0 Mon Sep 17 00:00:00 2001 From: Quentin Perret Date: Wed, 8 Dec 2021 15:22:57 +0000 Subject: [PATCH 152/268] KVM: arm64: Make __io_map_base static The __io_map_base variable is used at EL2 to track the end of the hypervisor's "private" VA range in nVHE protected mode. However it doesn't need to be used outside of mm.c, so let's make it static to keep all the hyp VA allocation logic in one place. Signed-off-by: Quentin Perret Acked-by: Will Deacon Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211208152300.2478542-5-qperret@google.com --- arch/arm64/kvm/hyp/include/nvhe/mm.h | 1 - arch/arm64/kvm/hyp/nvhe/mm.c | 3 ++- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/hyp/include/nvhe/mm.h b/arch/arm64/kvm/hyp/include/nvhe/mm.h index 75f58b783fd7..5ed5ba637d00 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/mm.h +++ b/arch/arm64/kvm/hyp/include/nvhe/mm.h @@ -15,7 +15,6 @@ extern struct memblock_region kvm_nvhe_sym(hyp_memory)[]; extern unsigned int kvm_nvhe_sym(hyp_memblock_nr); extern struct kvm_pgtable pkvm_pgtable; extern hyp_spinlock_t pkvm_pgd_lock; -extern u64 __io_map_base; int hyp_create_idmap(u32 hyp_va_bits); int hyp_map_vectors(void); diff --git a/arch/arm64/kvm/hyp/nvhe/mm.c b/arch/arm64/kvm/hyp/nvhe/mm.c index 2fabeceb889a..e78fd8e1beef 100644 --- a/arch/arm64/kvm/hyp/nvhe/mm.c +++ b/arch/arm64/kvm/hyp/nvhe/mm.c @@ -18,11 +18,12 @@ struct kvm_pgtable pkvm_pgtable; hyp_spinlock_t pkvm_pgd_lock; -u64 __io_map_base; struct memblock_region hyp_memory[HYP_MEMBLOCK_REGIONS]; unsigned int hyp_memblock_nr; +static u64 __io_map_base; + static int __pkvm_create_mappings(unsigned long start, unsigned long size, unsigned long phys, enum kvm_pgtable_prot prot) { From bff01cb6b1bf68052739eb6155132f7d6d974208 Mon Sep 17 00:00:00 2001 From: Quentin Perret Date: Wed, 8 Dec 2021 15:22:58 +0000 Subject: [PATCH 153/268] KVM: arm64: pkvm: Stub io map functions Now that GICv2 is disabled in nVHE protected mode there should be no other reason for the host to use create_hyp_io_mappings() or kvm_phys_addr_ioremap(). Add sanity checks to make sure that assumption remains true looking forward. Signed-off-by: Quentin Perret Acked-by: Will Deacon Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211208152300.2478542-6-qperret@google.com --- arch/arm64/kvm/mmu.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 326cdfec74a1..605c104eb030 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -407,6 +407,9 @@ int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size, unsigned long addr; int ret; + if (is_protected_kvm_enabled()) + return -EPERM; + *kaddr = ioremap(phys_addr, size); if (!*kaddr) return -ENOMEM; @@ -650,6 +653,9 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, KVM_PGTABLE_PROT_R | (writable ? KVM_PGTABLE_PROT_W : 0); + if (is_protected_kvm_enabled()) + return -EPERM; + size += offset_in_page(guest_ipa); guest_ipa &= PAGE_MASK; From 64a1fbda59f4b14adde7f21cda687e2b9703b7bb Mon Sep 17 00:00:00 2001 From: Quentin Perret Date: Wed, 8 Dec 2021 15:22:59 +0000 Subject: [PATCH 154/268] KVM: arm64: pkvm: Make kvm_host_owns_hyp_mappings() robust to VHE The kvm_host_owns_hyp_mappings() function should return true if and only if the host kernel is responsible for creating the hypervisor stage-1 mappings. That is only possible in standard non-VHE mode, or during boot in protected nVHE mode. But either way, none of this makes sense in VHE, so make sure to catch this case as well, hence making the function return sensible values in any context (VHE or not). Suggested-by: Marc Zyngier Signed-off-by: Quentin Perret Acked-by: Will Deacon Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211208152300.2478542-7-qperret@google.com --- arch/arm64/kvm/mmu.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 605c104eb030..ea840fa223b5 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -239,6 +239,9 @@ void free_hyp_pgds(void) static bool kvm_host_owns_hyp_mappings(void) { + if (is_kernel_in_hyp_mode()) + return false; + if (static_branch_likely(&kvm_protected_mode_initialized)) return false; From c95b1d7ca794ac3ea49a8f85c729f9841444d5e5 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 16 Dec 2021 10:45:26 +0000 Subject: [PATCH 155/268] KVM: arm64: vgic-v3: Fix vcpu index comparison When handling an error at the point where we try and register all the redistributors, we unregister all the previously registered frames by counting down from the failing index. However, the way the code is written relies on that index being a signed value. Which won't be true once we switch to an xarray-based vcpu set. Since this code is pretty awkward the first place, and that the failure mode is hard to spot, rewrite this loop to iterate over the vcpus upwards rather than downwards. Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211216104526.1482124-1-maz@kernel.org --- arch/arm64/kvm/vgic/vgic-mmio-v3.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c index bf7ec4a78497..9943a3fe1b0a 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c @@ -763,10 +763,12 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm) } if (ret) { - /* The current c failed, so we start with the previous one. */ + /* The current c failed, so iterate over the previous ones. */ + int i; + mutex_lock(&kvm->slots_lock); - for (c--; c >= 0; c--) { - vcpu = kvm_get_vcpu(kvm, c); + for (i = 0; i < c; i++) { + vcpu = kvm_get_vcpu(kvm, i); vgic_unregister_redist_iodev(vcpu); } mutex_unlock(&kvm->slots_lock); From 440523b92be6a25e53f9ba1f3b418345fe465b51 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 16 Dec 2021 10:45:07 +0000 Subject: [PATCH 156/268] KVM: arm64: vgic: Demote userspace-triggered console prints to kvm_debug() Running the KVM selftests results in these messages being dumped in the kernel console: [ 188.051073] kvm [469]: VGIC redist and dist frames overlap [ 188.056820] kvm [469]: VGIC redist and dist frames overlap [ 188.076199] kvm [469]: VGIC redist and dist frames overlap Being amle to trigger this from userspace is definitely not on, so demote these warnings to kvm_debug(). Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211216104507.1482017-1-maz@kernel.org --- arch/arm64/kvm/vgic/vgic-v2.c | 4 ++-- arch/arm64/kvm/vgic/vgic-v3.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kvm/vgic/vgic-v2.c b/arch/arm64/kvm/vgic/vgic-v2.c index 95a18cec14a3..280a708631cd 100644 --- a/arch/arm64/kvm/vgic/vgic-v2.c +++ b/arch/arm64/kvm/vgic/vgic-v2.c @@ -293,12 +293,12 @@ int vgic_v2_map_resources(struct kvm *kvm) if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) || IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) { - kvm_err("Need to set vgic cpu and dist addresses first\n"); + kvm_debug("Need to set vgic cpu and dist addresses first\n"); return -ENXIO; } if (!vgic_v2_check_base(dist->vgic_dist_base, dist->vgic_cpu_base)) { - kvm_err("VGIC CPU and dist frames overlap\n"); + kvm_debug("VGIC CPU and dist frames overlap\n"); return -EINVAL; } diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c index 04f62c4b07fb..97ab7c02e189 100644 --- a/arch/arm64/kvm/vgic/vgic-v3.c +++ b/arch/arm64/kvm/vgic/vgic-v3.c @@ -554,12 +554,12 @@ int vgic_v3_map_resources(struct kvm *kvm) } if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base)) { - kvm_err("Need to set vgic distributor addresses first\n"); + kvm_debug("Need to set vgic distributor addresses first\n"); return -ENXIO; } if (!vgic_v3_check_base(kvm)) { - kvm_err("VGIC redist and dist frames overlap\n"); + kvm_debug("VGIC redist and dist frames overlap\n"); return -EINVAL; } From 1fac3cfb9cc60d71b66ee5127b2bc5b5f9f79df8 Mon Sep 17 00:00:00 2001 From: Quentin Perret Date: Wed, 15 Dec 2021 16:12:18 +0000 Subject: [PATCH 157/268] KVM: arm64: Provide {get,put}_page() stubs for early hyp allocator In nVHE protected mode, the EL2 code uses a temporary allocator during boot while re-creating its stage-1 page-table. Unfortunately, the hyp_vmmemap is not ready to use at this stage, so refcounting pages is not possible. That is not currently a problem because hyp stage-1 mappings are never removed, which implies refcounting of page-table pages is unnecessary. In preparation for allowing hypervisor stage-1 mappings to be removed, provide stub implementations for {get,put}_page() in the early allocator. Acked-by: Will Deacon Signed-off-by: Quentin Perret Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211215161232.1480836-2-qperret@google.com --- arch/arm64/kvm/hyp/nvhe/early_alloc.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/arm64/kvm/hyp/nvhe/early_alloc.c b/arch/arm64/kvm/hyp/nvhe/early_alloc.c index 1306c430ab87..00de04153cc6 100644 --- a/arch/arm64/kvm/hyp/nvhe/early_alloc.c +++ b/arch/arm64/kvm/hyp/nvhe/early_alloc.c @@ -43,6 +43,9 @@ void *hyp_early_alloc_page(void *arg) return hyp_early_alloc_contig(1); } +static void hyp_early_alloc_get_page(void *addr) { } +static void hyp_early_alloc_put_page(void *addr) { } + void hyp_early_alloc_init(void *virt, unsigned long size) { base = cur = (unsigned long)virt; @@ -51,4 +54,6 @@ void hyp_early_alloc_init(void *virt, unsigned long size) hyp_early_alloc_mm_ops.zalloc_page = hyp_early_alloc_page; hyp_early_alloc_mm_ops.phys_to_virt = hyp_phys_to_virt; hyp_early_alloc_mm_ops.virt_to_phys = hyp_virt_to_phys; + hyp_early_alloc_mm_ops.get_page = hyp_early_alloc_get_page; + hyp_early_alloc_mm_ops.put_page = hyp_early_alloc_put_page; } From 2ea2ff91e82293909d4879b0b4c6c94b02d52b7e Mon Sep 17 00:00:00 2001 From: Quentin Perret Date: Wed, 15 Dec 2021 16:12:19 +0000 Subject: [PATCH 158/268] KVM: arm64: Refcount hyp stage-1 pgtable pages To prepare the ground for allowing hyp stage-1 mappings to be removed at run-time, update the KVM page-table code to maintain a correct refcount using the ->{get,put}_page() function callbacks. Signed-off-by: Quentin Perret Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211215161232.1480836-3-qperret@google.com --- arch/arm64/kvm/hyp/pgtable.c | 41 ++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 21 deletions(-) diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index f8ceebe4982e..e50e9158fc56 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -383,21 +383,6 @@ enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte) return prot; } -static bool hyp_pte_needs_update(kvm_pte_t old, kvm_pte_t new) -{ - /* - * Tolerate KVM recreating the exact same mapping, or changing software - * bits if the existing mapping was valid. - */ - if (old == new) - return false; - - if (!kvm_pte_valid(old)) - return true; - - return !WARN_ON((old ^ new) & ~KVM_PTE_LEAF_ATTR_HI_SW); -} - static bool hyp_map_walker_try_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, struct hyp_map_data *data) { @@ -407,11 +392,16 @@ static bool hyp_map_walker_try_leaf(u64 addr, u64 end, u32 level, if (!kvm_block_mapping_supported(addr, end, phys, level)) return false; - new = kvm_init_valid_leaf_pte(phys, data->attr, level); - if (hyp_pte_needs_update(old, new)) - smp_store_release(ptep, new); - data->phys += granule; + new = kvm_init_valid_leaf_pte(phys, data->attr, level); + if (old == new) + return true; + if (!kvm_pte_valid(old)) + data->mm_ops->get_page(ptep); + else if (WARN_ON((old ^ new) & ~KVM_PTE_LEAF_ATTR_HI_SW)) + return false; + + smp_store_release(ptep, new); return true; } @@ -433,6 +423,7 @@ static int hyp_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, return -ENOMEM; kvm_set_table_pte(ptep, childp, mm_ops); + mm_ops->get_page(ptep); return 0; } @@ -482,8 +473,16 @@ static int hyp_free_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, enum kvm_pgtable_walk_flags flag, void * const arg) { struct kvm_pgtable_mm_ops *mm_ops = arg; + kvm_pte_t pte = *ptep; + + if (!kvm_pte_valid(pte)) + return 0; + + mm_ops->put_page(ptep); + + if (kvm_pte_table(pte, level)) + mm_ops->put_page(kvm_pte_follow(pte, mm_ops)); - mm_ops->put_page((void *)kvm_pte_follow(*ptep, mm_ops)); return 0; } @@ -491,7 +490,7 @@ void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt) { struct kvm_pgtable_walker walker = { .cb = hyp_free_walker, - .flags = KVM_PGTABLE_WALK_TABLE_POST, + .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST, .arg = pgt->mm_ops, }; From d6b4bd3f4897f3b60ac9e8c9e2f0300e739b3392 Mon Sep 17 00:00:00 2001 From: Quentin Perret Date: Wed, 15 Dec 2021 16:12:20 +0000 Subject: [PATCH 159/268] KVM: arm64: Fixup hyp stage-1 refcount In nVHE-protected mode, the hyp stage-1 page-table refcount is broken due to the lack of refcount support in the early allocator. Fix-up the refcount in the finalize walker, once the 'hyp_vmemmap' is up and running. Acked-by: Will Deacon Signed-off-by: Quentin Perret Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211215161232.1480836-4-qperret@google.com --- arch/arm64/kvm/hyp/nvhe/setup.c | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c index 578f71798c2e..875b5174342f 100644 --- a/arch/arm64/kvm/hyp/nvhe/setup.c +++ b/arch/arm64/kvm/hyp/nvhe/setup.c @@ -165,6 +165,7 @@ static int finalize_host_mappings_walker(u64 addr, u64 end, u32 level, enum kvm_pgtable_walk_flags flag, void * const arg) { + struct kvm_pgtable_mm_ops *mm_ops = arg; enum kvm_pgtable_prot prot; enum pkvm_page_state state; kvm_pte_t pte = *ptep; @@ -173,6 +174,15 @@ static int finalize_host_mappings_walker(u64 addr, u64 end, u32 level, if (!kvm_pte_valid(pte)) return 0; + /* + * Fix-up the refcount for the page-table pages as the early allocator + * was unable to access the hyp_vmemmap and so the buddy allocator has + * initialised the refcount to '1'. + */ + mm_ops->get_page(ptep); + if (flag != KVM_PGTABLE_WALK_LEAF) + return 0; + if (level != (KVM_PGTABLE_MAX_LEVELS - 1)) return -EINVAL; @@ -205,7 +215,8 @@ static int finalize_host_mappings(void) { struct kvm_pgtable_walker walker = { .cb = finalize_host_mappings_walker, - .flags = KVM_PGTABLE_WALK_LEAF, + .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST, + .arg = pkvm_pgtable.mm_ops, }; int i, ret; @@ -240,10 +251,6 @@ void __noreturn __pkvm_init_finalise(void) if (ret) goto out; - ret = finalize_host_mappings(); - if (ret) - goto out; - pkvm_pgtable_mm_ops = (struct kvm_pgtable_mm_ops) { .zalloc_page = hyp_zalloc_hyp_page, .phys_to_virt = hyp_phys_to_virt, @@ -253,6 +260,10 @@ void __noreturn __pkvm_init_finalise(void) }; pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops; + ret = finalize_host_mappings(); + if (ret) + goto out; + out: /* * We tail-called to here from handle___pkvm_init() and will not return, From 34ec7cbf1ee0c45e66a0c24311bcd5b83b7109f5 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 15 Dec 2021 16:12:21 +0000 Subject: [PATCH 160/268] KVM: arm64: Hook up ->page_count() for hypervisor stage-1 page-table kvm_pgtable_hyp_unmap() relies on the ->page_count() function callback being provided by the memory-management operations for the page-table. Wire up this callback for the hypervisor stage-1 page-table. Signed-off-by: Will Deacon Signed-off-by: Quentin Perret Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211215161232.1480836-5-qperret@google.com --- arch/arm64/kvm/hyp/nvhe/setup.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c index 875b5174342f..855a19056627 100644 --- a/arch/arm64/kvm/hyp/nvhe/setup.c +++ b/arch/arm64/kvm/hyp/nvhe/setup.c @@ -257,6 +257,7 @@ void __noreturn __pkvm_init_finalise(void) .virt_to_phys = hyp_virt_to_phys, .get_page = hpool_get_page, .put_page = hpool_put_page, + .page_count = hyp_page_count, }; pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops; From 82bb02445de57bb3072052705f6f5dea9465592e Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 15 Dec 2021 16:12:22 +0000 Subject: [PATCH 161/268] KVM: arm64: Implement kvm_pgtable_hyp_unmap() at EL2 Implement kvm_pgtable_hyp_unmap() which can be used to remove hypervisor stage-1 mappings at EL2. Signed-off-by: Will Deacon Signed-off-by: Quentin Perret Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211215161232.1480836-6-qperret@google.com --- arch/arm64/include/asm/kvm_pgtable.h | 21 ++++++++++ arch/arm64/kvm/hyp/pgtable.c | 63 ++++++++++++++++++++++++++++ 2 files changed, 84 insertions(+) diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h index 027783829584..9d076f36401d 100644 --- a/arch/arm64/include/asm/kvm_pgtable.h +++ b/arch/arm64/include/asm/kvm_pgtable.h @@ -251,6 +251,27 @@ void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt); int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, enum kvm_pgtable_prot prot); +/** + * kvm_pgtable_hyp_unmap() - Remove a mapping from a hypervisor stage-1 page-table. + * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init(). + * @addr: Virtual address from which to remove the mapping. + * @size: Size of the mapping. + * + * The offset of @addr within a page is ignored, @size is rounded-up to + * the next page boundary and @phys is rounded-down to the previous page + * boundary. + * + * TLB invalidation is performed for each page-table entry cleared during the + * unmapping operation and the reference count for the page-table page + * containing the cleared entry is decremented, with unreferenced pages being + * freed. The unmapping operation will stop early if it encounters either an + * invalid page-table entry or a valid block mapping which maps beyond the range + * being unmapped. + * + * Return: Number of bytes unmapped, which may be 0. + */ +u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size); + /** * kvm_get_vtcr() - Helper to construct VTCR_EL2 * @mmfr0: Sanitized value of SYS_ID_AA64MMFR0_EL1 register. diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index e50e9158fc56..adc73f8cd24f 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -451,6 +451,69 @@ int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, return ret; } +struct hyp_unmap_data { + u64 unmapped; + struct kvm_pgtable_mm_ops *mm_ops; +}; + +static int hyp_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, + enum kvm_pgtable_walk_flags flag, void * const arg) +{ + kvm_pte_t pte = *ptep, *childp = NULL; + u64 granule = kvm_granule_size(level); + struct hyp_unmap_data *data = arg; + struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops; + + if (!kvm_pte_valid(pte)) + return -EINVAL; + + if (kvm_pte_table(pte, level)) { + childp = kvm_pte_follow(pte, mm_ops); + + if (mm_ops->page_count(childp) != 1) + return 0; + + kvm_clear_pte(ptep); + dsb(ishst); + __tlbi_level(vae2is, __TLBI_VADDR(addr, 0), level); + } else { + if (end - addr < granule) + return -EINVAL; + + kvm_clear_pte(ptep); + dsb(ishst); + __tlbi_level(vale2is, __TLBI_VADDR(addr, 0), level); + data->unmapped += granule; + } + + dsb(ish); + isb(); + mm_ops->put_page(ptep); + + if (childp) + mm_ops->put_page(childp); + + return 0; +} + +u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size) +{ + struct hyp_unmap_data unmap_data = { + .mm_ops = pgt->mm_ops, + }; + struct kvm_pgtable_walker walker = { + .cb = hyp_unmap_walker, + .arg = &unmap_data, + .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST, + }; + + if (!pgt->mm_ops->page_count) + return 0; + + kvm_pgtable_walk(pgt, addr, size, &walker); + return unmap_data.unmapped; +} + int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits, struct kvm_pgtable_mm_ops *mm_ops) { From 3f868e142c0bb052a1c15fd3ceca1391604e2e69 Mon Sep 17 00:00:00 2001 From: Quentin Perret Date: Wed, 15 Dec 2021 16:12:23 +0000 Subject: [PATCH 162/268] KVM: arm64: Introduce kvm_share_hyp() The create_hyp_mappings() function can currently be called at any point in time. However, its behaviour in protected mode changes widely depending on when it is being called. Prior to KVM init, it is used to create the temporary page-table used to bring-up the hypervisor, and later on it is transparently turned into a 'share' hypercall when the kernel has lost control over the hypervisor stage-1. In order to prepare the ground for also unsharing pages with the hypervisor during guest teardown, introduce a kvm_share_hyp() function to make it clear in which places a share hypercall should be expected, as we will soon need a matching unshare hypercall in all those places. Signed-off-by: Quentin Perret Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211215161232.1480836-7-qperret@google.com --- arch/arm64/include/asm/kvm_mmu.h | 1 + arch/arm64/kvm/arm.c | 4 ++-- arch/arm64/kvm/fpsimd.c | 2 +- arch/arm64/kvm/mmu.c | 27 +++++++++++++++++++++------ arch/arm64/kvm/reset.c | 2 +- 5 files changed, 26 insertions(+), 10 deletions(-) diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 02d378887743..185d0f62b724 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -150,6 +150,7 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v) #include #include +int kvm_share_hyp(void *from, void *to); int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot); int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size, void __iomem **kaddr, diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 9b745d2bc89a..c202abb448b1 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -146,7 +146,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) if (ret) return ret; - ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP); + ret = kvm_share_hyp(kvm, kvm + 1); if (ret) goto out_free_stage2_pgd; @@ -342,7 +342,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) if (err) return err; - return create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP); + return kvm_share_hyp(vcpu, vcpu + 1); } void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 5526d79c7b47..86899d3aa9a9 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -30,7 +30,7 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu) struct user_fpsimd_state *fpsimd = ¤t->thread.uw.fpsimd_state; /* Make sure the host task fpsimd state is visible to hyp: */ - ret = create_hyp_mappings(fpsimd, fpsimd + 1, PAGE_HYP); + ret = kvm_share_hyp(fpsimd, fpsimd + 1); if (!ret) vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd); diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 326cdfec74a1..ccb0cfb25868 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -296,6 +296,25 @@ static int pkvm_share_hyp(phys_addr_t start, phys_addr_t end) return 0; } +int kvm_share_hyp(void *from, void *to) +{ + if (is_kernel_in_hyp_mode()) + return 0; + + /* + * The share hcall maps things in the 'fixed-offset' region of the hyp + * VA space, so we can only share physically contiguous data-structures + * for now. + */ + if (is_vmalloc_or_module_addr(from) || is_vmalloc_or_module_addr(to)) + return -EINVAL; + + if (kvm_host_owns_hyp_mappings()) + return create_hyp_mappings(from, to, PAGE_HYP); + + return pkvm_share_hyp(__pa(from), __pa(to)); +} + /** * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode * @from: The virtual kernel start address of the range @@ -316,12 +335,8 @@ int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot) if (is_kernel_in_hyp_mode()) return 0; - if (!kvm_host_owns_hyp_mappings()) { - if (WARN_ON(prot != PAGE_HYP)) - return -EPERM; - return pkvm_share_hyp(kvm_kaddr_to_phys(from), - kvm_kaddr_to_phys(to)); - } + if (!kvm_host_owns_hyp_mappings()) + return -EPERM; start = start & PAGE_MASK; end = PAGE_ALIGN(end); diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index c7a0249df840..e3e2a79fbd75 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -113,7 +113,7 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu) if (!buf) return -ENOMEM; - ret = create_hyp_mappings(buf, buf + reg_sz, PAGE_HYP); + ret = kvm_share_hyp(buf, buf + reg_sz); if (ret) { kfree(buf); return ret; From a83e2191b7f1894dd0b4b3816ceb9caf4e0cd7e5 Mon Sep 17 00:00:00 2001 From: Quentin Perret Date: Wed, 15 Dec 2021 16:12:24 +0000 Subject: [PATCH 163/268] KVM: arm64: pkvm: Refcount the pages shared with EL2 In order to simplify the page tracking infrastructure at EL2 in nVHE protected mode, move the responsibility of refcounting pages that are shared multiple times on the host. In order to do so, let's create a red-black tree tracking all the PFNs that have been shared, along with a refcount. Acked-by: Will Deacon Signed-off-by: Quentin Perret Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211215161232.1480836-8-qperret@google.com --- arch/arm64/kvm/mmu.c | 80 ++++++++++++++++++++++++++++++++++++++------ 1 file changed, 69 insertions(+), 11 deletions(-) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index ccb0cfb25868..cf6c52ff8b87 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -281,23 +281,72 @@ static phys_addr_t kvm_kaddr_to_phys(void *kaddr) } } -static int pkvm_share_hyp(phys_addr_t start, phys_addr_t end) -{ - phys_addr_t addr; - int ret; +struct hyp_shared_pfn { + u64 pfn; + int count; + struct rb_node node; +}; - for (addr = ALIGN_DOWN(start, PAGE_SIZE); addr < end; addr += PAGE_SIZE) { - ret = kvm_call_hyp_nvhe(__pkvm_host_share_hyp, - __phys_to_pfn(addr)); - if (ret) - return ret; +static DEFINE_MUTEX(hyp_shared_pfns_lock); +static struct rb_root hyp_shared_pfns = RB_ROOT; + +static struct hyp_shared_pfn *find_shared_pfn(u64 pfn, struct rb_node ***node, + struct rb_node **parent) +{ + struct hyp_shared_pfn *this; + + *node = &hyp_shared_pfns.rb_node; + *parent = NULL; + while (**node) { + this = container_of(**node, struct hyp_shared_pfn, node); + *parent = **node; + if (this->pfn < pfn) + *node = &((**node)->rb_left); + else if (this->pfn > pfn) + *node = &((**node)->rb_right); + else + return this; } - return 0; + return NULL; +} + +static int share_pfn_hyp(u64 pfn) +{ + struct rb_node **node, *parent; + struct hyp_shared_pfn *this; + int ret = 0; + + mutex_lock(&hyp_shared_pfns_lock); + this = find_shared_pfn(pfn, &node, &parent); + if (this) { + this->count++; + goto unlock; + } + + this = kzalloc(sizeof(*this), GFP_KERNEL); + if (!this) { + ret = -ENOMEM; + goto unlock; + } + + this->pfn = pfn; + this->count = 1; + rb_link_node(&this->node, parent, node); + rb_insert_color(&this->node, &hyp_shared_pfns); + ret = kvm_call_hyp_nvhe(__pkvm_host_share_hyp, pfn, 1); +unlock: + mutex_unlock(&hyp_shared_pfns_lock); + + return ret; } int kvm_share_hyp(void *from, void *to) { + phys_addr_t start, end, cur; + u64 pfn; + int ret; + if (is_kernel_in_hyp_mode()) return 0; @@ -312,7 +361,16 @@ int kvm_share_hyp(void *from, void *to) if (kvm_host_owns_hyp_mappings()) return create_hyp_mappings(from, to, PAGE_HYP); - return pkvm_share_hyp(__pa(from), __pa(to)); + start = ALIGN_DOWN(__pa(from), PAGE_SIZE); + end = PAGE_ALIGN(__pa(to)); + for (cur = start; cur < end; cur += PAGE_SIZE) { + pfn = __phys_to_pfn(cur); + ret = share_pfn_hyp(pfn); + if (ret) + return ret; + } + + return 0; } /** From 3d467f7b8c0a179a10aa4e9f17cd2d3c3b7e5403 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 15 Dec 2021 16:12:25 +0000 Subject: [PATCH 164/268] KVM: arm64: Extend pkvm_page_state enumeration to handle absent pages Explicitly name the combination of SW0 | SW1 as reserved in the pte and introduce a new PKVM_NOPAGE meta-state which, although not directly stored in the software bits of the pte, can be used to represent an entry for which there is no underlying page. This is distinct from an invalid pte, as stage-2 identity mappings for the host are created lazily and so an invalid pte there is the same as a valid mapping for the purposes of ownership information. This state will be used for permission checking during page transitions in later patches. Reviewed-by: Andrew Walbran Signed-off-by: Will Deacon Signed-off-by: Quentin Perret Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211215161232.1480836-9-qperret@google.com --- arch/arm64/kvm/hyp/include/nvhe/mem_protect.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h index b58c910babaf..56445586c755 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h +++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h @@ -24,6 +24,11 @@ enum pkvm_page_state { PKVM_PAGE_OWNED = 0ULL, PKVM_PAGE_SHARED_OWNED = KVM_PGTABLE_PROT_SW0, PKVM_PAGE_SHARED_BORROWED = KVM_PGTABLE_PROT_SW1, + __PKVM_PAGE_RESERVED = KVM_PGTABLE_PROT_SW0 | + KVM_PGTABLE_PROT_SW1, + + /* Meta-states which aren't encoded directly in the PTE's SW bits */ + PKVM_NOPAGE, }; #define PKVM_PAGE_STATE_PROT_MASK (KVM_PGTABLE_PROT_SW0 | KVM_PGTABLE_PROT_SW1) From 61d99e33e757a21b47b8b130e49dcbdfaa5d2b1c Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 15 Dec 2021 16:12:26 +0000 Subject: [PATCH 165/268] KVM: arm64: Introduce wrappers for host and hyp spin lock accessors In preparation for adding additional locked sections for manipulating page-tables at EL2, introduce some simple wrappers around the host and hypervisor locks so that it's a bit easier to read and bit more difficult to take the wrong lock (or even take them in the wrong order). Signed-off-by: Will Deacon Signed-off-by: Quentin Perret Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211215161232.1480836-10-qperret@google.com --- arch/arm64/kvm/hyp/nvhe/mem_protect.c | 32 ++++++++++++++++++++++----- 1 file changed, 26 insertions(+), 6 deletions(-) diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c index c1a90dd022b8..757dfefe3aeb 100644 --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -27,6 +27,26 @@ static struct hyp_pool host_s2_pool; const u8 pkvm_hyp_id = 1; +static void host_lock_component(void) +{ + hyp_spin_lock(&host_kvm.lock); +} + +static void host_unlock_component(void) +{ + hyp_spin_unlock(&host_kvm.lock); +} + +static void hyp_lock_component(void) +{ + hyp_spin_lock(&pkvm_pgd_lock); +} + +static void hyp_unlock_component(void) +{ + hyp_spin_unlock(&pkvm_pgd_lock); +} + static void *host_s2_zalloc_pages_exact(size_t size) { void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size)); @@ -338,14 +358,14 @@ static int host_stage2_idmap(u64 addr) prot = is_memory ? PKVM_HOST_MEM_PROT : PKVM_HOST_MMIO_PROT; - hyp_spin_lock(&host_kvm.lock); + host_lock_component(); ret = host_stage2_adjust_range(addr, &range); if (ret) goto unlock; ret = host_stage2_idmap_locked(range.start, range.end - range.start, prot); unlock: - hyp_spin_unlock(&host_kvm.lock); + host_unlock_component(); return ret; } @@ -369,8 +389,8 @@ int __pkvm_host_share_hyp(u64 pfn) if (!addr_is_memory(addr)) return -EINVAL; - hyp_spin_lock(&host_kvm.lock); - hyp_spin_lock(&pkvm_pgd_lock); + host_lock_component(); + hyp_lock_component(); ret = kvm_pgtable_get_leaf(&host_kvm.pgt, addr, &pte, NULL); if (ret) @@ -432,8 +452,8 @@ map_shared: BUG_ON(ret); unlock: - hyp_spin_unlock(&pkvm_pgd_lock); - hyp_spin_unlock(&host_kvm.lock); + hyp_unlock_component(); + host_unlock_component(); return ret; } From e82edcc75c4e2389a3d7223c4ef1737bd9a07e5d Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 15 Dec 2021 16:12:27 +0000 Subject: [PATCH 166/268] KVM: arm64: Implement do_share() helper for sharing memory By default, protected KVM isolates memory pages so that they are accessible only to their owner: be it the host kernel, the hypervisor at EL2 or (in future) the guest. Establishing shared-memory regions between these components therefore involves a transition for each page so that the owner can share memory with a borrower under a certain set of permissions. Introduce a do_share() helper for safely sharing a memory region between two components. Currently, only host-to-hyp sharing is implemented, but the code is easily extended to handle other combinations and the permission checks for each component are reusable. Reviewed-by: Andrew Walbran Signed-off-by: Will Deacon Signed-off-by: Quentin Perret Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211215161232.1480836-11-qperret@google.com --- arch/arm64/kvm/hyp/nvhe/mem_protect.c | 237 ++++++++++++++++++++++++++ 1 file changed, 237 insertions(+) diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c index 757dfefe3aeb..e612fd9d8975 100644 --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -471,3 +471,240 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt) ret = host_stage2_idmap(addr); BUG_ON(ret && ret != -EAGAIN); } + +/* This corresponds to locking order */ +enum pkvm_component_id { + PKVM_ID_HOST, + PKVM_ID_HYP, +}; + +struct pkvm_mem_transition { + u64 nr_pages; + + struct { + enum pkvm_component_id id; + /* Address in the initiator's address space */ + u64 addr; + + union { + struct { + /* Address in the completer's address space */ + u64 completer_addr; + } host; + }; + } initiator; + + struct { + enum pkvm_component_id id; + } completer; +}; + +struct pkvm_mem_share { + const struct pkvm_mem_transition tx; + const enum kvm_pgtable_prot completer_prot; +}; + +struct check_walk_data { + enum pkvm_page_state desired; + enum pkvm_page_state (*get_page_state)(kvm_pte_t pte); +}; + +static int __check_page_state_visitor(u64 addr, u64 end, u32 level, + kvm_pte_t *ptep, + enum kvm_pgtable_walk_flags flag, + void * const arg) +{ + struct check_walk_data *d = arg; + kvm_pte_t pte = *ptep; + + if (kvm_pte_valid(pte) && !addr_is_memory(kvm_pte_to_phys(pte))) + return -EINVAL; + + return d->get_page_state(pte) == d->desired ? 0 : -EPERM; +} + +static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size, + struct check_walk_data *data) +{ + struct kvm_pgtable_walker walker = { + .cb = __check_page_state_visitor, + .arg = data, + .flags = KVM_PGTABLE_WALK_LEAF, + }; + + return kvm_pgtable_walk(pgt, addr, size, &walker); +} + +static enum pkvm_page_state host_get_page_state(kvm_pte_t pte) +{ + if (!kvm_pte_valid(pte) && pte) + return PKVM_NOPAGE; + + return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte)); +} + +static int __host_check_page_state_range(u64 addr, u64 size, + enum pkvm_page_state state) +{ + struct check_walk_data d = { + .desired = state, + .get_page_state = host_get_page_state, + }; + + hyp_assert_lock_held(&host_kvm.lock); + return check_page_state_range(&host_kvm.pgt, addr, size, &d); +} + +static int __host_set_page_state_range(u64 addr, u64 size, + enum pkvm_page_state state) +{ + enum kvm_pgtable_prot prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, state); + + return host_stage2_idmap_locked(addr, size, prot); +} + +static int host_request_owned_transition(u64 *completer_addr, + const struct pkvm_mem_transition *tx) +{ + u64 size = tx->nr_pages * PAGE_SIZE; + u64 addr = tx->initiator.addr; + + *completer_addr = tx->initiator.host.completer_addr; + return __host_check_page_state_range(addr, size, PKVM_PAGE_OWNED); +} + +static int host_initiate_share(u64 *completer_addr, + const struct pkvm_mem_transition *tx) +{ + u64 size = tx->nr_pages * PAGE_SIZE; + u64 addr = tx->initiator.addr; + + *completer_addr = tx->initiator.host.completer_addr; + return __host_set_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED); +} + +static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte) +{ + if (!kvm_pte_valid(pte)) + return PKVM_NOPAGE; + + return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte)); +} + +static int __hyp_check_page_state_range(u64 addr, u64 size, + enum pkvm_page_state state) +{ + struct check_walk_data d = { + .desired = state, + .get_page_state = hyp_get_page_state, + }; + + hyp_assert_lock_held(&pkvm_pgd_lock); + return check_page_state_range(&pkvm_pgtable, addr, size, &d); +} + +static bool __hyp_ack_skip_pgtable_check(const struct pkvm_mem_transition *tx) +{ + return !(IS_ENABLED(CONFIG_NVHE_EL2_DEBUG) || + tx->initiator.id != PKVM_ID_HOST); +} + +static int hyp_ack_share(u64 addr, const struct pkvm_mem_transition *tx, + enum kvm_pgtable_prot perms) +{ + u64 size = tx->nr_pages * PAGE_SIZE; + + if (perms != PAGE_HYP) + return -EPERM; + + if (__hyp_ack_skip_pgtable_check(tx)) + return 0; + + return __hyp_check_page_state_range(addr, size, PKVM_NOPAGE); +} + +static int hyp_complete_share(u64 addr, const struct pkvm_mem_transition *tx, + enum kvm_pgtable_prot perms) +{ + void *start = (void *)addr, *end = start + (tx->nr_pages * PAGE_SIZE); + enum kvm_pgtable_prot prot; + + prot = pkvm_mkstate(perms, PKVM_PAGE_SHARED_BORROWED); + return pkvm_create_mappings_locked(start, end, prot); +} + +static int check_share(struct pkvm_mem_share *share) +{ + const struct pkvm_mem_transition *tx = &share->tx; + u64 completer_addr; + int ret; + + switch (tx->initiator.id) { + case PKVM_ID_HOST: + ret = host_request_owned_transition(&completer_addr, tx); + break; + default: + ret = -EINVAL; + } + + if (ret) + return ret; + + switch (tx->completer.id) { + case PKVM_ID_HYP: + ret = hyp_ack_share(completer_addr, tx, share->completer_prot); + break; + default: + ret = -EINVAL; + } + + return ret; +} + +static int __do_share(struct pkvm_mem_share *share) +{ + const struct pkvm_mem_transition *tx = &share->tx; + u64 completer_addr; + int ret; + + switch (tx->initiator.id) { + case PKVM_ID_HOST: + ret = host_initiate_share(&completer_addr, tx); + break; + default: + ret = -EINVAL; + } + + if (ret) + return ret; + + switch (tx->completer.id) { + case PKVM_ID_HYP: + ret = hyp_complete_share(completer_addr, tx, share->completer_prot); + break; + default: + ret = -EINVAL; + } + + return ret; +} + +/* + * do_share(): + * + * The page owner grants access to another component with a given set + * of permissions. + * + * Initiator: OWNED => SHARED_OWNED + * Completer: NOPAGE => SHARED_BORROWED + */ +static int do_share(struct pkvm_mem_share *share) +{ + int ret; + + ret = check_share(share); + if (ret) + return ret; + + return WARN_ON(__do_share(share)); +} From 1ee32109fd78720259f7431740897d37ebcd84f6 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 15 Dec 2021 16:12:28 +0000 Subject: [PATCH 167/268] KVM: arm64: Implement __pkvm_host_share_hyp() using do_share() __pkvm_host_share_hyp() shares memory between the host and the hypervisor so implement it as an invocation of the new do_share() mechanism. Note that double-sharing is no longer permitted (as this allows us to reduce the number of page-table walks significantly), but is thankfully no longer relied upon by the host. Signed-off-by: Will Deacon Signed-off-by: Quentin Perret Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211215161232.1480836-12-qperret@google.com --- arch/arm64/kvm/hyp/nvhe/mem_protect.c | 121 +++++++------------------- 1 file changed, 33 insertions(+), 88 deletions(-) diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c index e612fd9d8975..492b9930609d 100644 --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -370,94 +370,6 @@ unlock: return ret; } -static inline bool check_prot(enum kvm_pgtable_prot prot, - enum kvm_pgtable_prot required, - enum kvm_pgtable_prot denied) -{ - return (prot & (required | denied)) == required; -} - -int __pkvm_host_share_hyp(u64 pfn) -{ - phys_addr_t addr = hyp_pfn_to_phys(pfn); - enum kvm_pgtable_prot prot, cur; - void *virt = __hyp_va(addr); - enum pkvm_page_state state; - kvm_pte_t pte; - int ret; - - if (!addr_is_memory(addr)) - return -EINVAL; - - host_lock_component(); - hyp_lock_component(); - - ret = kvm_pgtable_get_leaf(&host_kvm.pgt, addr, &pte, NULL); - if (ret) - goto unlock; - if (!pte) - goto map_shared; - - /* - * Check attributes in the host stage-2 PTE. We need the page to be: - * - mapped RWX as we're sharing memory; - * - not borrowed, as that implies absence of ownership. - * Otherwise, we can't let it got through - */ - cur = kvm_pgtable_stage2_pte_prot(pte); - prot = pkvm_mkstate(0, PKVM_PAGE_SHARED_BORROWED); - if (!check_prot(cur, PKVM_HOST_MEM_PROT, prot)) { - ret = -EPERM; - goto unlock; - } - - state = pkvm_getstate(cur); - if (state == PKVM_PAGE_OWNED) - goto map_shared; - - /* - * Tolerate double-sharing the same page, but this requires - * cross-checking the hypervisor stage-1. - */ - if (state != PKVM_PAGE_SHARED_OWNED) { - ret = -EPERM; - goto unlock; - } - - ret = kvm_pgtable_get_leaf(&pkvm_pgtable, (u64)virt, &pte, NULL); - if (ret) - goto unlock; - - /* - * If the page has been shared with the hypervisor, it must be - * already mapped as SHARED_BORROWED in its stage-1. - */ - cur = kvm_pgtable_hyp_pte_prot(pte); - prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_BORROWED); - if (!check_prot(cur, prot, ~prot)) - ret = -EPERM; - goto unlock; - -map_shared: - /* - * If the page is not yet shared, adjust mappings in both page-tables - * while both locks are held. - */ - prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_BORROWED); - ret = pkvm_create_mappings_locked(virt, virt + PAGE_SIZE, prot); - BUG_ON(ret); - - prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_OWNED); - ret = host_stage2_idmap_locked(addr, PAGE_SIZE, prot); - BUG_ON(ret); - -unlock: - hyp_unlock_component(); - host_unlock_component(); - - return ret; -} - void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt) { struct kvm_vcpu_fault_info fault; @@ -708,3 +620,36 @@ static int do_share(struct pkvm_mem_share *share) return WARN_ON(__do_share(share)); } + +int __pkvm_host_share_hyp(u64 pfn) +{ + int ret; + u64 host_addr = hyp_pfn_to_phys(pfn); + u64 hyp_addr = (u64)__hyp_va(host_addr); + struct pkvm_mem_share share = { + .tx = { + .nr_pages = 1, + .initiator = { + .id = PKVM_ID_HOST, + .addr = host_addr, + .host = { + .completer_addr = hyp_addr, + }, + }, + .completer = { + .id = PKVM_ID_HYP, + }, + }, + .completer_prot = PAGE_HYP, + }; + + host_lock_component(); + hyp_lock_component(); + + ret = do_share(&share); + + hyp_unlock_component(); + host_unlock_component(); + + return ret; +} From 376a240f037959c2b9a2486e53bcd8d388cbec17 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 15 Dec 2021 16:12:29 +0000 Subject: [PATCH 168/268] KVM: arm64: Implement do_unshare() helper for unsharing memory Tearing down a previously shared memory region results in the borrower losing access to the underlying pages and returning them to the "owned" state in the owner. Implement a do_unshare() helper, along the same lines as do_share(), to provide this functionality for the host-to-hyp case. Reviewed-by: Andrew Walbran Signed-off-by: Will Deacon Signed-off-by: Quentin Perret Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211215161232.1480836-13-qperret@google.com --- arch/arm64/kvm/hyp/nvhe/mem_protect.c | 115 ++++++++++++++++++++++++++ 1 file changed, 115 insertions(+) diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c index 492b9930609d..06973a93db00 100644 --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -485,6 +485,16 @@ static int host_request_owned_transition(u64 *completer_addr, return __host_check_page_state_range(addr, size, PKVM_PAGE_OWNED); } +static int host_request_unshare(u64 *completer_addr, + const struct pkvm_mem_transition *tx) +{ + u64 size = tx->nr_pages * PAGE_SIZE; + u64 addr = tx->initiator.addr; + + *completer_addr = tx->initiator.host.completer_addr; + return __host_check_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED); +} + static int host_initiate_share(u64 *completer_addr, const struct pkvm_mem_transition *tx) { @@ -495,6 +505,16 @@ static int host_initiate_share(u64 *completer_addr, return __host_set_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED); } +static int host_initiate_unshare(u64 *completer_addr, + const struct pkvm_mem_transition *tx) +{ + u64 size = tx->nr_pages * PAGE_SIZE; + u64 addr = tx->initiator.addr; + + *completer_addr = tx->initiator.host.completer_addr; + return __host_set_page_state_range(addr, size, PKVM_PAGE_OWNED); +} + static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte) { if (!kvm_pte_valid(pte)) @@ -535,6 +555,17 @@ static int hyp_ack_share(u64 addr, const struct pkvm_mem_transition *tx, return __hyp_check_page_state_range(addr, size, PKVM_NOPAGE); } +static int hyp_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx) +{ + u64 size = tx->nr_pages * PAGE_SIZE; + + if (__hyp_ack_skip_pgtable_check(tx)) + return 0; + + return __hyp_check_page_state_range(addr, size, + PKVM_PAGE_SHARED_BORROWED); +} + static int hyp_complete_share(u64 addr, const struct pkvm_mem_transition *tx, enum kvm_pgtable_prot perms) { @@ -545,6 +576,14 @@ static int hyp_complete_share(u64 addr, const struct pkvm_mem_transition *tx, return pkvm_create_mappings_locked(start, end, prot); } +static int hyp_complete_unshare(u64 addr, const struct pkvm_mem_transition *tx) +{ + u64 size = tx->nr_pages * PAGE_SIZE; + int ret = kvm_pgtable_hyp_unmap(&pkvm_pgtable, addr, size); + + return (ret != size) ? -EFAULT : 0; +} + static int check_share(struct pkvm_mem_share *share) { const struct pkvm_mem_transition *tx = &share->tx; @@ -621,6 +660,82 @@ static int do_share(struct pkvm_mem_share *share) return WARN_ON(__do_share(share)); } +static int check_unshare(struct pkvm_mem_share *share) +{ + const struct pkvm_mem_transition *tx = &share->tx; + u64 completer_addr; + int ret; + + switch (tx->initiator.id) { + case PKVM_ID_HOST: + ret = host_request_unshare(&completer_addr, tx); + break; + default: + ret = -EINVAL; + } + + if (ret) + return ret; + + switch (tx->completer.id) { + case PKVM_ID_HYP: + ret = hyp_ack_unshare(completer_addr, tx); + break; + default: + ret = -EINVAL; + } + + return ret; +} + +static int __do_unshare(struct pkvm_mem_share *share) +{ + const struct pkvm_mem_transition *tx = &share->tx; + u64 completer_addr; + int ret; + + switch (tx->initiator.id) { + case PKVM_ID_HOST: + ret = host_initiate_unshare(&completer_addr, tx); + break; + default: + ret = -EINVAL; + } + + if (ret) + return ret; + + switch (tx->completer.id) { + case PKVM_ID_HYP: + ret = hyp_complete_unshare(completer_addr, tx); + break; + default: + ret = -EINVAL; + } + + return ret; +} + +/* + * do_unshare(): + * + * The page owner revokes access from another component for a range of + * pages which were previously shared using do_share(). + * + * Initiator: SHARED_OWNED => OWNED + * Completer: SHARED_BORROWED => NOPAGE + */ +static int do_unshare(struct pkvm_mem_share *share) +{ + int ret; + + ret = check_unshare(share); + if (ret) + return ret; + + return WARN_ON(__do_unshare(share)); +} + int __pkvm_host_share_hyp(u64 pfn) { int ret; From b8cc6eb5bded7078f796b2ebf548f79850281eb6 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 15 Dec 2021 16:12:30 +0000 Subject: [PATCH 169/268] KVM: arm64: Expose unshare hypercall to the host Introduce an unshare hypercall which can be used to unmap memory from the hypervisor stage-1 in nVHE protected mode. This will be useful to update the EL2 ownership state of pages during guest teardown, and avoids keeping dangling mappings to unreferenced portions of memory. Signed-off-by: Will Deacon Signed-off-by: Quentin Perret Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211215161232.1480836-14-qperret@google.com --- arch/arm64/include/asm/kvm_asm.h | 1 + arch/arm64/kvm/hyp/include/nvhe/mem_protect.h | 1 + arch/arm64/kvm/hyp/nvhe/hyp-main.c | 8 +++++ arch/arm64/kvm/hyp/nvhe/mem_protect.c | 33 +++++++++++++++++++ 4 files changed, 43 insertions(+) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 50d5e4de244c..d5b0386ef765 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -63,6 +63,7 @@ enum __kvm_host_smccc_func { /* Hypercalls available after pKVM finalisation */ __KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp, + __KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_hyp, __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc, __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run, __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context, diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h index 56445586c755..80e99836eac7 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h +++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h @@ -55,6 +55,7 @@ extern const u8 pkvm_hyp_id; int __pkvm_prot_finalize(void); int __pkvm_host_share_hyp(u64 pfn); +int __pkvm_host_unshare_hyp(u64 pfn); bool addr_is_memory(phys_addr_t phys); int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot); diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index b096bf009144..5e2197db0d32 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -147,6 +147,13 @@ static void handle___pkvm_host_share_hyp(struct kvm_cpu_context *host_ctxt) cpu_reg(host_ctxt, 1) = __pkvm_host_share_hyp(pfn); } +static void handle___pkvm_host_unshare_hyp(struct kvm_cpu_context *host_ctxt) +{ + DECLARE_REG(u64, pfn, host_ctxt, 1); + + cpu_reg(host_ctxt, 1) = __pkvm_host_unshare_hyp(pfn); +} + static void handle___pkvm_create_private_mapping(struct kvm_cpu_context *host_ctxt) { DECLARE_REG(phys_addr_t, phys, host_ctxt, 1); @@ -184,6 +191,7 @@ static const hcall_t host_hcall[] = { HANDLE_FUNC(__pkvm_prot_finalize), HANDLE_FUNC(__pkvm_host_share_hyp), + HANDLE_FUNC(__pkvm_host_unshare_hyp), HANDLE_FUNC(__kvm_adjust_pc), HANDLE_FUNC(__kvm_vcpu_run), HANDLE_FUNC(__kvm_flush_vm_context), diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c index 06973a93db00..33c105ddb6b7 100644 --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -768,3 +768,36 @@ int __pkvm_host_share_hyp(u64 pfn) return ret; } + +int __pkvm_host_unshare_hyp(u64 pfn) +{ + int ret; + u64 host_addr = hyp_pfn_to_phys(pfn); + u64 hyp_addr = (u64)__hyp_va(host_addr); + struct pkvm_mem_share share = { + .tx = { + .nr_pages = 1, + .initiator = { + .id = PKVM_ID_HOST, + .addr = host_addr, + .host = { + .completer_addr = hyp_addr, + }, + }, + .completer = { + .id = PKVM_ID_HYP, + }, + }, + .completer_prot = PAGE_HYP, + }; + + host_lock_component(); + hyp_lock_component(); + + ret = do_unshare(&share); + + hyp_unlock_component(); + host_unlock_component(); + + return ret; +} From 52b28657ebd7cd20e931ce71190f235d0fa018a6 Mon Sep 17 00:00:00 2001 From: Quentin Perret Date: Wed, 15 Dec 2021 16:12:31 +0000 Subject: [PATCH 170/268] KVM: arm64: pkvm: Unshare guest structs during teardown Make use of the newly introduced unshare hypercall during guest teardown to unmap guest-related data structures from the hyp stage-1. Signed-off-by: Quentin Perret Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211215161232.1480836-15-qperret@google.com --- arch/arm64/include/asm/kvm_host.h | 2 ++ arch/arm64/include/asm/kvm_mmu.h | 1 + arch/arm64/kvm/arm.c | 2 ++ arch/arm64/kvm/fpsimd.c | 34 ++++++++++++++++++++++--- arch/arm64/kvm/mmu.c | 42 +++++++++++++++++++++++++++++++ arch/arm64/kvm/reset.c | 8 +++++- 6 files changed, 85 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index cf858a7e3533..9360a2804df1 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -321,6 +321,7 @@ struct kvm_vcpu_arch { struct kvm_guest_debug_arch external_debug_state; struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */ + struct task_struct *parent_task; struct { /* {Break,watch}point registers */ @@ -737,6 +738,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu); +void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu); static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr) { diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 185d0f62b724..81839e9a8a24 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -151,6 +151,7 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v) #include int kvm_share_hyp(void *from, void *to); +void kvm_unshare_hyp(void *from, void *to); int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot); int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size, void __iomem **kaddr, diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index c202abb448b1..6057f3c5aafe 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -188,6 +188,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm) } } atomic_set(&kvm->online_vcpus, 0); + + kvm_unshare_hyp(kvm, kvm + 1); } int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 86899d3aa9a9..2f48fd362a8c 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -14,6 +14,19 @@ #include #include +void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu) +{ + struct task_struct *p = vcpu->arch.parent_task; + struct user_fpsimd_state *fpsimd; + + if (!is_protected_kvm_enabled() || !p) + return; + + fpsimd = &p->thread.uw.fpsimd_state; + kvm_unshare_hyp(fpsimd, fpsimd + 1); + put_task_struct(p); +} + /* * Called on entry to KVM_RUN unless this vcpu previously ran at least * once and the most recent prior KVM_RUN for this vcpu was called from @@ -29,12 +42,27 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu) struct user_fpsimd_state *fpsimd = ¤t->thread.uw.fpsimd_state; + kvm_vcpu_unshare_task_fp(vcpu); + /* Make sure the host task fpsimd state is visible to hyp: */ ret = kvm_share_hyp(fpsimd, fpsimd + 1); - if (!ret) - vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd); + if (ret) + return ret; - return ret; + vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd); + + /* + * We need to keep current's task_struct pinned until its data has been + * unshared with the hypervisor to make sure it is not re-used by the + * kernel and donated to someone else while already shared -- see + * kvm_vcpu_unshare_task_fp() for the matching put_task_struct(). + */ + if (is_protected_kvm_enabled()) { + get_task_struct(current); + vcpu->arch.parent_task = current; + } + + return 0; } /* diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index cf6c52ff8b87..b1edcc950d08 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -341,6 +341,32 @@ unlock: return ret; } +static int unshare_pfn_hyp(u64 pfn) +{ + struct rb_node **node, *parent; + struct hyp_shared_pfn *this; + int ret = 0; + + mutex_lock(&hyp_shared_pfns_lock); + this = find_shared_pfn(pfn, &node, &parent); + if (WARN_ON(!this)) { + ret = -ENOENT; + goto unlock; + } + + this->count--; + if (this->count) + goto unlock; + + rb_erase(&this->node, &hyp_shared_pfns); + kfree(this); + ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_hyp, pfn, 1); +unlock: + mutex_unlock(&hyp_shared_pfns_lock); + + return ret; +} + int kvm_share_hyp(void *from, void *to) { phys_addr_t start, end, cur; @@ -373,6 +399,22 @@ int kvm_share_hyp(void *from, void *to) return 0; } +void kvm_unshare_hyp(void *from, void *to) +{ + phys_addr_t start, end, cur; + u64 pfn; + + if (is_kernel_in_hyp_mode() || kvm_host_owns_hyp_mappings() || !from) + return; + + start = ALIGN_DOWN(__pa(from), PAGE_SIZE); + end = PAGE_ALIGN(__pa(to)); + for (cur = start; cur < end; cur += PAGE_SIZE) { + pfn = __phys_to_pfn(cur); + WARN_ON(unshare_pfn_hyp(pfn)); + } +} + /** * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode * @from: The virtual kernel start address of the range diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index e3e2a79fbd75..798a84eddbde 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -150,7 +150,13 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu) void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu) { - kfree(vcpu->arch.sve_state); + void *sve_state = vcpu->arch.sve_state; + + kvm_vcpu_unshare_task_fp(vcpu); + kvm_unshare_hyp(vcpu, vcpu + 1); + if (sve_state) + kvm_unshare_hyp(sve_state, sve_state + vcpu_sve_state_size(vcpu)); + kfree(sve_state); } static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu) From 9d8604b28575ccab3afd8d6f56cab9a6c0d281ef Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 29 Nov 2021 20:00:45 +0000 Subject: [PATCH 171/268] KVM: arm64: Rework kvm_pgtable initialisation Ganapatrao reported that the kvm_pgtable->mmu pointer is more or less hardcoded to the main S2 mmu structure, while the nested code needs it to point to other instances (as we have one instance per nested context). Rework the initialisation of the kvm_pgtable structure so that this assumtion doesn't hold true anymore. This requires some minor changes to the order in which things are initialised (the mmu->arch pointer being the critical one). Reported-by: Ganapatrao Kulkarni Reviewed-by: Ganapatrao Kulkarni Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211129200150.351436-5-maz@kernel.org --- arch/arm64/include/asm/kvm_pgtable.h | 9 ++++----- arch/arm64/kvm/hyp/nvhe/mem_protect.c | 4 ++-- arch/arm64/kvm/hyp/pgtable.c | 6 +++--- arch/arm64/kvm/mmu.c | 4 ++-- 4 files changed, 11 insertions(+), 12 deletions(-) diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h index 027783829584..8b808e70c2b2 100644 --- a/arch/arm64/include/asm/kvm_pgtable.h +++ b/arch/arm64/include/asm/kvm_pgtable.h @@ -270,8 +270,7 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift); /** * __kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table. * @pgt: Uninitialised page-table structure to initialise. - * @arch: Arch-specific KVM structure representing the guest virtual - * machine. + * @mmu: S2 MMU context for this S2 translation * @mm_ops: Memory management callbacks. * @flags: Stage-2 configuration flags. * @force_pte_cb: Function that returns true if page level mappings must @@ -279,13 +278,13 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift); * * Return: 0 on success, negative error code on failure. */ -int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_arch *arch, +int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, struct kvm_pgtable_mm_ops *mm_ops, enum kvm_pgtable_stage2_flags flags, kvm_pgtable_force_pte_cb_t force_pte_cb); -#define kvm_pgtable_stage2_init(pgt, arch, mm_ops) \ - __kvm_pgtable_stage2_init(pgt, arch, mm_ops, 0, NULL) +#define kvm_pgtable_stage2_init(pgt, mmu, mm_ops) \ + __kvm_pgtable_stage2_init(pgt, mmu, mm_ops, 0, NULL) /** * kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table. diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c index c1a90dd022b8..7d7b7037dc68 100644 --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -103,19 +103,19 @@ int kvm_host_prepare_stage2(void *pgt_pool_base) prepare_host_vtcr(); hyp_spin_lock_init(&host_kvm.lock); + mmu->arch = &host_kvm.arch; ret = prepare_s2_pool(pgt_pool_base); if (ret) return ret; - ret = __kvm_pgtable_stage2_init(&host_kvm.pgt, &host_kvm.arch, + ret = __kvm_pgtable_stage2_init(&host_kvm.pgt, mmu, &host_kvm.mm_ops, KVM_HOST_S2_FLAGS, host_stage2_force_pte_cb); if (ret) return ret; mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd); - mmu->arch = &host_kvm.arch; mmu->pgt = &host_kvm.pgt; WRITE_ONCE(mmu->vmid.vmid_gen, 0); WRITE_ONCE(mmu->vmid.vmid, 0); diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index f8ceebe4982e..8cdbc43fa651 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -1116,13 +1116,13 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size) } -int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_arch *arch, +int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, struct kvm_pgtable_mm_ops *mm_ops, enum kvm_pgtable_stage2_flags flags, kvm_pgtable_force_pte_cb_t force_pte_cb) { size_t pgd_sz; - u64 vtcr = arch->vtcr; + u64 vtcr = mmu->arch->vtcr; u32 ia_bits = VTCR_EL2_IPA(vtcr); u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr); u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0; @@ -1135,7 +1135,7 @@ int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_arch *arch, pgt->ia_bits = ia_bits; pgt->start_level = start_level; pgt->mm_ops = mm_ops; - pgt->mmu = &arch->mmu; + pgt->mmu = mmu; pgt->flags = flags; pgt->force_pte_cb = force_pte_cb; diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 326cdfec74a1..9eec548fccd1 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -516,7 +516,8 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu) if (!pgt) return -ENOMEM; - err = kvm_pgtable_stage2_init(pgt, &kvm->arch, &kvm_s2_mm_ops); + mmu->arch = &kvm->arch; + err = kvm_pgtable_stage2_init(pgt, mmu, &kvm_s2_mm_ops); if (err) goto out_free_pgtable; @@ -529,7 +530,6 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu) for_each_possible_cpu(cpu) *per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1; - mmu->arch = &kvm->arch; mmu->pgt = pgt; mmu->pgd_phys = __pa(pgt->pgd); WRITE_ONCE(mmu->vmid.vmid_gen, 0); From 416e7f0c9d613bf84e182eba9547ae8f9f5bfa4c Mon Sep 17 00:00:00 2001 From: Janis Schoetterl-Glausch Date: Fri, 26 Nov 2021 17:45:47 +0100 Subject: [PATCH 172/268] KVM: s390: gaccess: Refactor gpa and length calculation Improve readability by renaming the length variable and not calculating the offset manually. Signed-off-by: Janis Schoetterl-Glausch Reviewed-by: Janosch Frank Reviewed-by: David Hildenbrand Reviewed-by: Claudio Imbrenda Message-Id: <20211126164549.7046-2-scgl@linux.ibm.com> Signed-off-by: Janosch Frank --- arch/s390/kvm/gaccess.c | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index 6af59c59cc1b..45966fbba182 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -831,8 +831,9 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data, unsigned long len, enum gacc_mode mode) { psw_t *psw = &vcpu->arch.sie_block->gpsw; - unsigned long _len, nr_pages, gpa, idx; + unsigned long nr_pages, gpa, idx; unsigned long pages_array[2]; + unsigned int fragment_len; unsigned long *pages; int need_ipte_lock; union asce asce; @@ -855,15 +856,15 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data, ipte_lock(vcpu); rc = guest_page_range(vcpu, ga, ar, pages, nr_pages, asce, mode); for (idx = 0; idx < nr_pages && !rc; idx++) { - gpa = *(pages + idx) + (ga & ~PAGE_MASK); - _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len); + gpa = pages[idx] + offset_in_page(ga); + fragment_len = min(PAGE_SIZE - offset_in_page(gpa), len); if (mode == GACC_STORE) - rc = kvm_write_guest(vcpu->kvm, gpa, data, _len); + rc = kvm_write_guest(vcpu->kvm, gpa, data, fragment_len); else - rc = kvm_read_guest(vcpu->kvm, gpa, data, _len); - len -= _len; - ga += _len; - data += _len; + rc = kvm_read_guest(vcpu->kvm, gpa, data, fragment_len); + len -= fragment_len; + ga += fragment_len; + data += fragment_len; } if (need_ipte_lock) ipte_unlock(vcpu); @@ -875,19 +876,20 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data, int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data, unsigned long len, enum gacc_mode mode) { - unsigned long _len, gpa; + unsigned int fragment_len; + unsigned long gpa; int rc = 0; while (len && !rc) { gpa = kvm_s390_real_to_abs(vcpu, gra); - _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len); + fragment_len = min(PAGE_SIZE - offset_in_page(gpa), len); if (mode) - rc = write_guest_abs(vcpu, gpa, data, _len); + rc = write_guest_abs(vcpu, gpa, data, fragment_len); else - rc = read_guest_abs(vcpu, gpa, data, _len); - len -= _len; - gra += _len; - data += _len; + rc = read_guest_abs(vcpu, gpa, data, fragment_len); + len -= fragment_len; + gra += fragment_len; + data += fragment_len; } return rc; } From 7faa543df19bf62d4583a64d3902705747f2ad29 Mon Sep 17 00:00:00 2001 From: Janis Schoetterl-Glausch Date: Fri, 26 Nov 2021 17:45:48 +0100 Subject: [PATCH 173/268] KVM: s390: gaccess: Refactor access address range check Do not round down the first address to the page boundary, just translate it normally, which gives the value we care about in the first place. Given this, translating a single address is just the special case of translating a range spanning a single page. Make the output optional, so the function can be used to just check a range. Signed-off-by: Janis Schoetterl-Glausch Reviewed-by: Janosch Frank Reviewed-by: Claudio Imbrenda Message-Id: <20211126164549.7046-3-scgl@linux.ibm.com> Signed-off-by: Janosch Frank --- arch/s390/kvm/gaccess.c | 122 +++++++++++++++++++++++----------------- 1 file changed, 69 insertions(+), 53 deletions(-) diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index 45966fbba182..ca7f22a9e0c5 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -794,35 +794,74 @@ static int low_address_protection_enabled(struct kvm_vcpu *vcpu, return 1; } -static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, - unsigned long *pages, unsigned long nr_pages, - const union asce asce, enum gacc_mode mode) +/** + * guest_range_to_gpas() - Calculate guest physical addresses of page fragments + * covering a logical range + * @vcpu: virtual cpu + * @ga: guest address, start of range + * @ar: access register + * @gpas: output argument, may be NULL + * @len: length of range in bytes + * @asce: address-space-control element to use for translation + * @mode: access mode + * + * Translate a logical range to a series of guest absolute addresses, + * such that the concatenation of page fragments starting at each gpa make up + * the whole range. + * The translation is performed as if done by the cpu for the given @asce, @ar, + * @mode and state of the @vcpu. + * If the translation causes an exception, its program interruption code is + * returned and the &struct kvm_s390_pgm_info pgm member of @vcpu is modified + * such that a subsequent call to kvm_s390_inject_prog_vcpu() will inject + * a correct exception into the guest. + * The resulting gpas are stored into @gpas, unless it is NULL. + * + * Note: All fragments except the first one start at the beginning of a page. + * When deriving the boundaries of a fragment from a gpa, all but the last + * fragment end at the end of the page. + * + * Return: + * * 0 - success + * * <0 - translation could not be performed, for example if guest + * memory could not be accessed + * * >0 - an access exception occurred. In this case the returned value + * is the program interruption code and the contents of pgm may + * be used to inject an exception into the guest. + */ +static int guest_range_to_gpas(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, + unsigned long *gpas, unsigned long len, + const union asce asce, enum gacc_mode mode) { psw_t *psw = &vcpu->arch.sie_block->gpsw; + unsigned int offset = offset_in_page(ga); + unsigned int fragment_len; int lap_enabled, rc = 0; enum prot_type prot; + unsigned long gpa; lap_enabled = low_address_protection_enabled(vcpu, asce); - while (nr_pages) { + while (min(PAGE_SIZE - offset, len) > 0) { + fragment_len = min(PAGE_SIZE - offset, len); ga = kvm_s390_logical_to_effective(vcpu, ga); if (mode == GACC_STORE && lap_enabled && is_low_address(ga)) return trans_exc(vcpu, PGM_PROTECTION, ga, ar, mode, PROT_TYPE_LA); - ga &= PAGE_MASK; if (psw_bits(*psw).dat) { - rc = guest_translate(vcpu, ga, pages, asce, mode, &prot); + rc = guest_translate(vcpu, ga, &gpa, asce, mode, &prot); if (rc < 0) return rc; } else { - *pages = kvm_s390_real_to_abs(vcpu, ga); - if (kvm_is_error_gpa(vcpu->kvm, *pages)) + gpa = kvm_s390_real_to_abs(vcpu, ga); + if (kvm_is_error_gpa(vcpu->kvm, gpa)) rc = PGM_ADDRESSING; } if (rc) return trans_exc(vcpu, rc, ga, ar, mode, prot); - ga += PAGE_SIZE; - pages++; - nr_pages--; + if (gpas) + *gpas++ = gpa; + offset = 0; + ga += fragment_len; + len -= fragment_len; } return 0; } @@ -831,10 +870,10 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data, unsigned long len, enum gacc_mode mode) { psw_t *psw = &vcpu->arch.sie_block->gpsw; - unsigned long nr_pages, gpa, idx; - unsigned long pages_array[2]; + unsigned long nr_pages, idx; + unsigned long gpa_array[2]; unsigned int fragment_len; - unsigned long *pages; + unsigned long *gpas; int need_ipte_lock; union asce asce; int rc; @@ -846,30 +885,28 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data, if (rc) return rc; nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1; - pages = pages_array; - if (nr_pages > ARRAY_SIZE(pages_array)) - pages = vmalloc(array_size(nr_pages, sizeof(unsigned long))); - if (!pages) + gpas = gpa_array; + if (nr_pages > ARRAY_SIZE(gpa_array)) + gpas = vmalloc(array_size(nr_pages, sizeof(unsigned long))); + if (!gpas) return -ENOMEM; need_ipte_lock = psw_bits(*psw).dat && !asce.r; if (need_ipte_lock) ipte_lock(vcpu); - rc = guest_page_range(vcpu, ga, ar, pages, nr_pages, asce, mode); + rc = guest_range_to_gpas(vcpu, ga, ar, gpas, len, asce, mode); for (idx = 0; idx < nr_pages && !rc; idx++) { - gpa = pages[idx] + offset_in_page(ga); - fragment_len = min(PAGE_SIZE - offset_in_page(gpa), len); + fragment_len = min(PAGE_SIZE - offset_in_page(gpas[idx]), len); if (mode == GACC_STORE) - rc = kvm_write_guest(vcpu->kvm, gpa, data, fragment_len); + rc = kvm_write_guest(vcpu->kvm, gpas[idx], data, fragment_len); else - rc = kvm_read_guest(vcpu->kvm, gpa, data, fragment_len); + rc = kvm_read_guest(vcpu->kvm, gpas[idx], data, fragment_len); len -= fragment_len; - ga += fragment_len; data += fragment_len; } if (need_ipte_lock) ipte_unlock(vcpu); - if (nr_pages > ARRAY_SIZE(pages_array)) - vfree(pages); + if (nr_pages > ARRAY_SIZE(gpa_array)) + vfree(gpas); return rc; } @@ -911,8 +948,6 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar, unsigned long *gpa, enum gacc_mode mode) { - psw_t *psw = &vcpu->arch.sie_block->gpsw; - enum prot_type prot; union asce asce; int rc; @@ -920,23 +955,7 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar, rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode); if (rc) return rc; - if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) { - if (mode == GACC_STORE) - return trans_exc(vcpu, PGM_PROTECTION, gva, 0, - mode, PROT_TYPE_LA); - } - - if (psw_bits(*psw).dat && !asce.r) { /* Use DAT? */ - rc = guest_translate(vcpu, gva, gpa, asce, mode, &prot); - if (rc > 0) - return trans_exc(vcpu, rc, gva, 0, mode, prot); - } else { - *gpa = kvm_s390_real_to_abs(vcpu, gva); - if (kvm_is_error_gpa(vcpu->kvm, *gpa)) - return trans_exc(vcpu, rc, gva, PGM_ADDRESSING, mode, 0); - } - - return rc; + return guest_range_to_gpas(vcpu, gva, ar, gpa, 1, asce, mode); } /** @@ -950,17 +969,14 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar, int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar, unsigned long length, enum gacc_mode mode) { - unsigned long gpa; - unsigned long currlen; + union asce asce; int rc = 0; + rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode); + if (rc) + return rc; ipte_lock(vcpu); - while (length > 0 && !rc) { - currlen = min(length, PAGE_SIZE - (gva % PAGE_SIZE)); - rc = guest_translate_address(vcpu, gva, ar, &gpa, mode); - gva += currlen; - length -= currlen; - } + rc = guest_range_to_gpas(vcpu, gva, ar, NULL, length, asce, mode); ipte_unlock(vcpu); return rc; From bad13799e0305deb258372b7298a86be4c78aaba Mon Sep 17 00:00:00 2001 From: Janis Schoetterl-Glausch Date: Fri, 26 Nov 2021 17:45:49 +0100 Subject: [PATCH 174/268] KVM: s390: gaccess: Cleanup access to guest pages Introduce a helper function for guest frame access. Signed-off-by: Janis Schoetterl-Glausch Reviewed-by: Janosch Frank Reviewed-by: David Hildenbrand Reviewed-by: Claudio Imbrenda Message-Id: <20211126164549.7046-4-scgl@linux.ibm.com> Signed-off-by: Janosch Frank --- arch/s390/kvm/gaccess.c | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index ca7f22a9e0c5..4460808c3b9a 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -866,6 +866,20 @@ static int guest_range_to_gpas(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, return 0; } +static int access_guest_page(struct kvm *kvm, enum gacc_mode mode, gpa_t gpa, + void *data, unsigned int len) +{ + const unsigned int offset = offset_in_page(gpa); + const gfn_t gfn = gpa_to_gfn(gpa); + int rc; + + if (mode == GACC_STORE) + rc = kvm_write_guest_page(kvm, gfn, data, offset, len); + else + rc = kvm_read_guest_page(kvm, gfn, data, offset, len); + return rc; +} + int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data, unsigned long len, enum gacc_mode mode) { @@ -896,10 +910,7 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data, rc = guest_range_to_gpas(vcpu, ga, ar, gpas, len, asce, mode); for (idx = 0; idx < nr_pages && !rc; idx++) { fragment_len = min(PAGE_SIZE - offset_in_page(gpas[idx]), len); - if (mode == GACC_STORE) - rc = kvm_write_guest(vcpu->kvm, gpas[idx], data, fragment_len); - else - rc = kvm_read_guest(vcpu->kvm, gpas[idx], data, fragment_len); + rc = access_guest_page(vcpu->kvm, mode, gpas[idx], data, fragment_len); len -= fragment_len; data += fragment_len; } @@ -920,10 +931,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, while (len && !rc) { gpa = kvm_s390_real_to_abs(vcpu, gra); fragment_len = min(PAGE_SIZE - offset_in_page(gpa), len); - if (mode) - rc = write_guest_abs(vcpu, gpa, data, fragment_len); - else - rc = read_guest_abs(vcpu, gpa, data, fragment_len); + rc = access_guest_page(vcpu->kvm, mode, gpa, data, fragment_len); len -= fragment_len; gra += fragment_len; data += fragment_len; From 3c724f1a1caaee40c99422e22e22133e1496ffc3 Mon Sep 17 00:00:00 2001 From: Janosch Frank Date: Mon, 7 Jun 2021 08:07:13 +0000 Subject: [PATCH 175/268] s390: uv: Add offset comments to UV query struct and fix naming Changes to the struct are easier to manage with offset comments so let's add some. And now that we know that the last struct member has the wrong name let's also fix this. Signed-off-by: Janosch Frank Reviewed-by: Claudio Imbrenda --- arch/s390/include/asm/uv.h | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h index 72d3e49c2860..86218382d29c 100644 --- a/arch/s390/include/asm/uv.h +++ b/arch/s390/include/asm/uv.h @@ -91,23 +91,23 @@ struct uv_cb_header { /* Query Ultravisor Information */ struct uv_cb_qui { - struct uv_cb_header header; - u64 reserved08; - u64 inst_calls_list[4]; - u64 reserved30[2]; - u64 uv_base_stor_len; - u64 reserved48; - u64 conf_base_phys_stor_len; - u64 conf_base_virt_stor_len; - u64 conf_virt_var_stor_len; - u64 cpu_stor_len; - u32 reserved70[3]; - u32 max_num_sec_conf; - u64 max_guest_stor_addr; - u8 reserved88[158 - 136]; - u16 max_guest_cpu_id; - u64 uv_feature_indications; - u8 reserveda0[200 - 168]; + struct uv_cb_header header; /* 0x0000 */ + u64 reserved08; /* 0x0008 */ + u64 inst_calls_list[4]; /* 0x0010 */ + u64 reserved30[2]; /* 0x0030 */ + u64 uv_base_stor_len; /* 0x0040 */ + u64 reserved48; /* 0x0048 */ + u64 conf_base_phys_stor_len; /* 0x0050 */ + u64 conf_base_virt_stor_len; /* 0x0058 */ + u64 conf_virt_var_stor_len; /* 0x0060 */ + u64 cpu_stor_len; /* 0x0068 */ + u32 reserved70[3]; /* 0x0070 */ + u32 max_num_sec_conf; /* 0x007c */ + u64 max_guest_stor_addr; /* 0x0080 */ + u8 reserved88[158 - 136]; /* 0x0088 */ + u16 max_guest_cpu_id; /* 0x009e */ + u64 uv_feature_indications; /* 0x00a0 */ + u8 reserveda8[200 - 168]; /* 0x00a8 */ } __packed __aligned(8); /* Initialize Ultravisor */ From 812de04661c4daa7ac385c0dfd62594540538034 Mon Sep 17 00:00:00 2001 From: Eric Farman Date: Mon, 13 Dec 2021 22:05:50 +0100 Subject: [PATCH 176/268] KVM: s390: Clarify SIGP orders versus STOP/RESTART With KVM_CAP_S390_USER_SIGP, there are only five Signal Processor orders (CONDITIONAL EMERGENCY SIGNAL, EMERGENCY SIGNAL, EXTERNAL CALL, SENSE, and SENSE RUNNING STATUS) which are intended for frequent use and thus are processed in-kernel. The remainder are sent to userspace with the KVM_CAP_S390_USER_SIGP capability. Of those, three orders (RESTART, STOP, and STOP AND STORE STATUS) have the potential to inject work back into the kernel, and thus are asynchronous. Let's look for those pending IRQs when processing one of the in-kernel SIGP orders, and return BUSY (CC2) if one is in process. This is in agreement with the Principles of Operation, which states that only one order can be "active" on a CPU at a time. Cc: stable@vger.kernel.org Suggested-by: David Hildenbrand Signed-off-by: Eric Farman Reviewed-by: Christian Borntraeger Acked-by: David Hildenbrand Link: https://lore.kernel.org/r/20211213210550.856213-2-farman@linux.ibm.com [borntraeger@linux.ibm.com: add stable tag] Signed-off-by: Christian Borntraeger --- arch/s390/kvm/interrupt.c | 7 +++++++ arch/s390/kvm/kvm-s390.c | 9 +++++++-- arch/s390/kvm/kvm-s390.h | 1 + arch/s390/kvm/sigp.c | 28 ++++++++++++++++++++++++++++ 4 files changed, 43 insertions(+), 2 deletions(-) diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index c3bd993fdd0c..0576d5c99138 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -2115,6 +2115,13 @@ int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu) return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs); } +int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + + return test_bit(IRQ_PEND_RESTART, &li->pending_irqs); +} + void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 14a18ba5ff2c..ef299aad4009 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -4645,10 +4645,15 @@ int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) } } - /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ + /* + * Set the VCPU to STOPPED and THEN clear the interrupt flag, + * now that the SIGP STOP and SIGP STOP AND STORE STATUS orders + * have been fully processed. This will ensure that the VCPU + * is kept BUSY if another VCPU is inquiring with SIGP SENSE. + */ + kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED); kvm_s390_clear_stop_irq(vcpu); - kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED); __disable_ibs_on_vcpu(vcpu); for (i = 0; i < online_vcpus; i++) { diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index c07a050d757d..1876ab0c293f 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -427,6 +427,7 @@ void kvm_s390_destroy_adapters(struct kvm *kvm); int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu); extern struct kvm_device_ops kvm_flic_ops; int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu); +int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu); void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu); int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *buf, int len); diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index cf4de80bd541..8aaee2892ec3 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -276,6 +276,34 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, if (!dst_vcpu) return SIGP_CC_NOT_OPERATIONAL; + /* + * SIGP RESTART, SIGP STOP, and SIGP STOP AND STORE STATUS orders + * are processed asynchronously. Until the affected VCPU finishes + * its work and calls back into KVM to clear the (RESTART or STOP) + * interrupt, we need to return any new non-reset orders "busy". + * + * This is important because a single VCPU could issue: + * 1) SIGP STOP $DESTINATION + * 2) SIGP SENSE $DESTINATION + * + * If the SIGP SENSE would not be rejected as "busy", it could + * return an incorrect answer as to whether the VCPU is STOPPED + * or OPERATING. + */ + if (order_code != SIGP_INITIAL_CPU_RESET && + order_code != SIGP_CPU_RESET) { + /* + * Lockless check. Both SIGP STOP and SIGP (RE)START + * properly synchronize everything while processing + * their orders, while the guest cannot observe a + * difference when issuing other orders from two + * different VCPUs. + */ + if (kvm_s390_is_stop_irq_pending(dst_vcpu) || + kvm_s390_is_restart_irq_pending(dst_vcpu)) + return SIGP_CC_BUSY; + } + switch (order_code) { case SIGP_SENSE: vcpu->stat.instruction_sigp_sense++; From 500ca5241bf8054c8a973e54cb28629614f43178 Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Wed, 8 Dec 2021 19:28:10 +0000 Subject: [PATCH 177/268] KVM: arm64: Use defined value for SCTLR_ELx_EE Replace the hardcoded value with the existing definition. No functional change intended. Signed-off-by: Fuad Tabba Acked-by: Will Deacon Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211208192810.657360-1-tabba@google.com --- arch/arm64/include/asm/kvm_emulate.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index f4871e47b2d0..cd5fd9b92b04 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -386,7 +386,7 @@ static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT; } else { u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1); - sctlr |= (1 << 25); + sctlr |= SCTLR_ELx_EE; vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1); } } From a080e323be8d66415944ad862fcf750825f871e7 Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Wed, 8 Dec 2021 19:32:56 +0000 Subject: [PATCH 178/268] KVM: arm64: Fix comment for kvm_reset_vcpu() The comment for kvm_reset_vcpu() refers to the sysreg table as being the table above, probably because of the code extracted at commit f4672752c321ea36 ("arm64: KVM: virtual CPU reset"). Fix the comment to remove the potentially confusing reference. Signed-off-by: Fuad Tabba Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211208193257.667613-2-tabba@google.com --- arch/arm64/kvm/reset.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 426bd7fbc3fd..2bbfeadeb6ae 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -193,10 +193,9 @@ static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu) * kvm_reset_vcpu - sets core registers and sys_regs to reset value * @vcpu: The VCPU pointer * - * This function finds the right table above and sets the registers on - * the virtual CPU struct to their architecturally defined reset - * values, except for registers whose reset is deferred until - * kvm_arm_vcpu_finalize(). + * This function sets the registers on the virtual CPU struct to their + * architecturally defined reset values, except for registers whose reset is + * deferred until kvm_arm_vcpu_finalize(). * * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT * ioctl or as part of handling a request issued by another VCPU in the PSCI From dda0190d7ff7f26c221f9ab1659a73d4517920e7 Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Wed, 8 Dec 2021 19:32:57 +0000 Subject: [PATCH 179/268] KVM: arm64: Fix comment on barrier in kvm_psci_vcpu_on() The barrier is there for power_off rather than power_state. Probably typo in commit 358b28f09f0ab074 ("arm/arm64: KVM: Allow a VCPU to fully reset itself"). Signed-off-by: Fuad Tabba Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211208193257.667613-3-tabba@google.com --- arch/arm64/kvm/psci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c index 74c47d420253..9b82d6878aac 100644 --- a/arch/arm64/kvm/psci.c +++ b/arch/arm64/kvm/psci.c @@ -109,7 +109,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) /* * Make sure the reset request is observed if the change to - * power_state is observed. + * power_off is observed. */ smp_wmb(); From cb7c4f364abd09abd1865fa049ef492fb43e6bf3 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 27 Dec 2021 12:48:04 +0000 Subject: [PATCH 180/268] KVM: selftests: arm64: Initialise default guest mode at test startup time As we are going to add support for a variable default mode on arm64, let's make sure it is setup first by using a constructor that gets called before the actual test runs. Suggested-by: Andrew Jones Signed-off-by: Marc Zyngier Reviewed-by: Andrew Jones Link: https://lore.kernel.org/r/20211227124809.1335409-2-maz@kernel.org --- tools/testing/selftests/kvm/lib/aarch64/processor.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c index b4eeeafd2a70..b509341b8411 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/processor.c +++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c @@ -432,3 +432,12 @@ uint32_t guest_get_vcpuid(void) { return read_sysreg(tpidr_el1); } + +/* + * arm64 doesn't have a true default mode, so start by computing the + * available IPA space and page sizes early. + */ +void __attribute__((constructor)) init_guest_modes(void) +{ + guest_modes_append_default(); +} From 357c628e1248dd53f5c43a768246a83478a7f489 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 27 Dec 2021 12:48:05 +0000 Subject: [PATCH 181/268] KVM: selftests: arm64: Introduce a variable default IPA size Contrary to popular belief, there is no such thing as a default IPA size on arm64. Anything goes, and implementations are the usual Wild West. The selftest infrastructure default to 40bit IPA, which obviously doesn't work for some systems out there. Turn VM_MODE_DEFAULT from a constant into a variable, and let guest_modes_append_default() populate it, depending on what the HW can do. In order to preserve the current behaviour, we still pick 40bits IPA as the default if it is available, and the largest supported IPA space otherwise. Signed-off-by: Marc Zyngier Reviewed-by: Andrew Jones Link: https://lore.kernel.org/r/20211227124809.1335409-3-maz@kernel.org --- .../testing/selftests/kvm/include/kvm_util.h | 4 ++- tools/testing/selftests/kvm/lib/guest_modes.c | 30 +++++++++++++++++-- 2 files changed, 30 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index 6a1a37f30494..ec070d800731 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h @@ -53,7 +53,9 @@ enum vm_guest_mode { #if defined(__aarch64__) -#define VM_MODE_DEFAULT VM_MODE_P40V48_4K +extern enum vm_guest_mode vm_mode_default; + +#define VM_MODE_DEFAULT vm_mode_default #define MIN_PAGE_SHIFT 12U #define ptes_per_page(page_size) ((page_size) / 8) diff --git a/tools/testing/selftests/kvm/lib/guest_modes.c b/tools/testing/selftests/kvm/lib/guest_modes.c index c330f414ef96..5e3fdbd992fd 100644 --- a/tools/testing/selftests/kvm/lib/guest_modes.c +++ b/tools/testing/selftests/kvm/lib/guest_modes.c @@ -4,22 +4,46 @@ */ #include "guest_modes.h" +#ifdef __aarch64__ +enum vm_guest_mode vm_mode_default; +#endif + struct guest_mode guest_modes[NUM_VM_MODES]; void guest_modes_append_default(void) { +#ifndef __aarch64__ guest_mode_append(VM_MODE_DEFAULT, true, true); - -#ifdef __aarch64__ - guest_mode_append(VM_MODE_P40V48_64K, true, true); +#else { unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE); + int i; + + vm_mode_default = NUM_VM_MODES; + if (limit >= 52) guest_mode_append(VM_MODE_P52V48_64K, true, true); if (limit >= 48) { guest_mode_append(VM_MODE_P48V48_4K, true, true); guest_mode_append(VM_MODE_P48V48_64K, true, true); } + if (limit >= 40) { + guest_mode_append(VM_MODE_P40V48_4K, true, true); + guest_mode_append(VM_MODE_P40V48_64K, true, true); + vm_mode_default = VM_MODE_P40V48_4K; + } + + /* + * Pick the first supported IPA size if the default + * isn't available. + */ + for (i = 0; vm_mode_default == NUM_VM_MODES && i < NUM_VM_MODES; i++) { + if (guest_modes[i].supported && guest_modes[i].enabled) + vm_mode_default = i; + } + + TEST_ASSERT(vm_mode_default != NUM_VM_MODES, + "No supported mode!"); } #endif #ifdef __s390x__ From 0303ffdb9ecffac4654b16bbf69ba84d131eb8b7 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 27 Dec 2021 12:48:06 +0000 Subject: [PATCH 182/268] KVM: selftests: arm64: Check for supported page sizes Just as arm64 implemenations don't necessary support all IPA ranges, they don't all support the same page sizes either. Fun. Create a dummy VM to snapshot the page sizes supported by the host, and filter the supported modes. Signed-off-by: Marc Zyngier Reviewed-by: Andrew Jones Link: https://lore.kernel.org/r/20211227124809.1335409-4-maz@kernel.org --- .../selftests/kvm/include/aarch64/processor.h | 3 ++ .../selftests/kvm/lib/aarch64/processor.c | 36 +++++++++++++++++++ tools/testing/selftests/kvm/lib/guest_modes.c | 17 +++++---- 3 files changed, 50 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/kvm/include/aarch64/processor.h b/tools/testing/selftests/kvm/include/aarch64/processor.h index 27d8e1bb5b36..8f9f46979a00 100644 --- a/tools/testing/selftests/kvm/include/aarch64/processor.h +++ b/tools/testing/selftests/kvm/include/aarch64/processor.h @@ -113,6 +113,9 @@ enum { #define ESR_EC_WP_CURRENT 0x35 #define ESR_EC_BRK_INS 0x3c +void aarch64_get_supported_page_sizes(uint32_t ipa, + bool *ps4k, bool *ps16k, bool *ps64k); + void vm_init_descriptor_tables(struct kvm_vm *vm); void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid); diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c index b509341b8411..49fcfe9768e0 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/processor.c +++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c @@ -8,6 +8,7 @@ #include #include +#include "guest_modes.h" #include "kvm_util.h" #include "../kvm_util_internal.h" #include "processor.h" @@ -433,6 +434,41 @@ uint32_t guest_get_vcpuid(void) return read_sysreg(tpidr_el1); } +void aarch64_get_supported_page_sizes(uint32_t ipa, + bool *ps4k, bool *ps16k, bool *ps64k) +{ + struct kvm_vcpu_init preferred_init; + int kvm_fd, vm_fd, vcpu_fd, err; + uint64_t val; + struct kvm_one_reg reg = { + .id = KVM_ARM64_SYS_REG(SYS_ID_AA64MMFR0_EL1), + .addr = (uint64_t)&val, + }; + + kvm_fd = open_kvm_dev_path_or_exit(); + vm_fd = ioctl(kvm_fd, KVM_CREATE_VM, ipa); + TEST_ASSERT(vm_fd >= 0, "Can't create VM"); + + vcpu_fd = ioctl(vm_fd, KVM_CREATE_VCPU, 0); + TEST_ASSERT(vcpu_fd >= 0, "Can't create vcpu"); + + err = ioctl(vm_fd, KVM_ARM_PREFERRED_TARGET, &preferred_init); + TEST_ASSERT(err == 0, "Can't get target"); + err = ioctl(vcpu_fd, KVM_ARM_VCPU_INIT, &preferred_init); + TEST_ASSERT(err == 0, "Can't get init vcpu"); + + err = ioctl(vcpu_fd, KVM_GET_ONE_REG, ®); + TEST_ASSERT(err == 0, "Can't get MMFR0"); + + *ps4k = ((val >> 28) & 0xf) != 0xf; + *ps64k = ((val >> 24) & 0xf) == 0; + *ps16k = ((val >> 20) & 0xf) != 0; + + close(vcpu_fd); + close(vm_fd); + close(kvm_fd); +} + /* * arm64 doesn't have a true default mode, so start by computing the * available IPA space and page sizes early. diff --git a/tools/testing/selftests/kvm/lib/guest_modes.c b/tools/testing/selftests/kvm/lib/guest_modes.c index 5e3fdbd992fd..67144fdac433 100644 --- a/tools/testing/selftests/kvm/lib/guest_modes.c +++ b/tools/testing/selftests/kvm/lib/guest_modes.c @@ -5,6 +5,7 @@ #include "guest_modes.h" #ifdef __aarch64__ +#include "processor.h" enum vm_guest_mode vm_mode_default; #endif @@ -17,20 +18,24 @@ void guest_modes_append_default(void) #else { unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE); + bool ps4k, ps16k, ps64k; int i; + aarch64_get_supported_page_sizes(limit, &ps4k, &ps16k, &ps64k); + vm_mode_default = NUM_VM_MODES; if (limit >= 52) - guest_mode_append(VM_MODE_P52V48_64K, true, true); + guest_mode_append(VM_MODE_P52V48_64K, ps64k, ps64k); if (limit >= 48) { - guest_mode_append(VM_MODE_P48V48_4K, true, true); - guest_mode_append(VM_MODE_P48V48_64K, true, true); + guest_mode_append(VM_MODE_P48V48_4K, ps4k, ps4k); + guest_mode_append(VM_MODE_P48V48_64K, ps64k, ps64k); } if (limit >= 40) { - guest_mode_append(VM_MODE_P40V48_4K, true, true); - guest_mode_append(VM_MODE_P40V48_64K, true, true); - vm_mode_default = VM_MODE_P40V48_4K; + guest_mode_append(VM_MODE_P40V48_4K, ps4k, ps4k); + guest_mode_append(VM_MODE_P40V48_64K, ps64k, ps64k); + if (ps4k) + vm_mode_default = VM_MODE_P40V48_4K; } /* From 2f41a61c54fb6410202b2cc08be80ae9554d599d Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 27 Dec 2021 12:48:07 +0000 Subject: [PATCH 183/268] KVM: selftests: arm64: Rework TCR_EL1 configuration The current way we initialise TCR_EL1 is a bit cumbersome, as we mix setting TG0 and IPS in the same swtch statement. Split it into two statements (one for the base granule size, and another for the IPA size), allowing new modes to be added in a more elegant way. No functional change intended. Signed-off-by: Marc Zyngier Reviewed-by: Andrew Jones Link: https://lore.kernel.org/r/20211227124809.1335409-5-maz@kernel.org --- .../selftests/kvm/lib/aarch64/processor.c | 23 ++++++++++++------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c index 49fcfe9768e0..d005543aa3e2 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/processor.c +++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c @@ -238,6 +238,7 @@ void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init get_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), &sctlr_el1); get_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TCR_EL1), &tcr_el1); + /* Configure base granule size */ switch (vm->mode) { case VM_MODE_P52V48_4K: TEST_FAIL("AArch64 does not support 4K sized pages " @@ -246,23 +247,29 @@ void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init TEST_FAIL("AArch64 does not support 4K sized pages " "with ANY-bit physical address ranges"); case VM_MODE_P52V48_64K: + case VM_MODE_P48V48_64K: + case VM_MODE_P40V48_64K: tcr_el1 |= 1ul << 14; /* TG0 = 64KB */ + break; + case VM_MODE_P48V48_4K: + case VM_MODE_P40V48_4K: + tcr_el1 |= 0ul << 14; /* TG0 = 4KB */ + break; + default: + TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode); + } + + /* Configure output size */ + switch (vm->mode) { + case VM_MODE_P52V48_64K: tcr_el1 |= 6ul << 32; /* IPS = 52 bits */ break; case VM_MODE_P48V48_4K: - tcr_el1 |= 0ul << 14; /* TG0 = 4KB */ - tcr_el1 |= 5ul << 32; /* IPS = 48 bits */ - break; case VM_MODE_P48V48_64K: - tcr_el1 |= 1ul << 14; /* TG0 = 64KB */ tcr_el1 |= 5ul << 32; /* IPS = 48 bits */ break; case VM_MODE_P40V48_4K: - tcr_el1 |= 0ul << 14; /* TG0 = 4KB */ - tcr_el1 |= 2ul << 32; /* IPS = 40 bits */ - break; case VM_MODE_P40V48_64K: - tcr_el1 |= 1ul << 14; /* TG0 = 64KB */ tcr_el1 |= 2ul << 32; /* IPS = 40 bits */ break; default: From e7f58a6bd28bfd2e4f60312abf48f07de2c4121c Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 27 Dec 2021 12:48:08 +0000 Subject: [PATCH 184/268] KVM: selftests: arm64: Add support for VM_MODE_P36V48_{4K,64K} Some of the arm64 systems out there have an IPA space that is positively tiny. Nonetheless, they make great KVM hosts. Add support for 36bit IPA support with 4kB pages, which makes some of the fruity machines happy. Whilst we're at it, add support for 64kB pages as well, though these boxes have no support for it. Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211227124809.1335409-6-maz@kernel.org --- tools/testing/selftests/kvm/include/kvm_util.h | 2 ++ tools/testing/selftests/kvm/lib/aarch64/processor.c | 6 ++++++ tools/testing/selftests/kvm/lib/guest_modes.c | 4 ++++ tools/testing/selftests/kvm/lib/kvm_util.c | 6 ++++++ 4 files changed, 18 insertions(+) diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index ec070d800731..4e9e01631bc3 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h @@ -48,6 +48,8 @@ enum vm_guest_mode { VM_MODE_PXXV48_4K, /* For 48bits VA but ANY bits PA */ VM_MODE_P47V64_4K, VM_MODE_P44V64_4K, + VM_MODE_P36V48_4K, + VM_MODE_P36V48_64K, NUM_VM_MODES, }; diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c index d005543aa3e2..70395c777ea4 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/processor.c +++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c @@ -249,10 +249,12 @@ void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init case VM_MODE_P52V48_64K: case VM_MODE_P48V48_64K: case VM_MODE_P40V48_64K: + case VM_MODE_P36V48_64K: tcr_el1 |= 1ul << 14; /* TG0 = 64KB */ break; case VM_MODE_P48V48_4K: case VM_MODE_P40V48_4K: + case VM_MODE_P36V48_4K: tcr_el1 |= 0ul << 14; /* TG0 = 4KB */ break; default: @@ -272,6 +274,10 @@ void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init case VM_MODE_P40V48_64K: tcr_el1 |= 2ul << 32; /* IPS = 40 bits */ break; + case VM_MODE_P36V48_4K: + case VM_MODE_P36V48_64K: + tcr_el1 |= 1ul << 32; /* IPS = 36 bits */ + break; default: TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode); } diff --git a/tools/testing/selftests/kvm/lib/guest_modes.c b/tools/testing/selftests/kvm/lib/guest_modes.c index 67144fdac433..240f2d2e2d23 100644 --- a/tools/testing/selftests/kvm/lib/guest_modes.c +++ b/tools/testing/selftests/kvm/lib/guest_modes.c @@ -37,6 +37,10 @@ void guest_modes_append_default(void) if (ps4k) vm_mode_default = VM_MODE_P40V48_4K; } + if (limit >= 36) { + guest_mode_append(VM_MODE_P36V48_4K, ps4k, ps4k); + guest_mode_append(VM_MODE_P36V48_64K, ps64k, ps64k); + } /* * Pick the first supported IPA size if the default diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 8f2e0bb1ef96..643cb2e9a653 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -172,6 +172,8 @@ const char *vm_guest_mode_string(uint32_t i) [VM_MODE_PXXV48_4K] = "PA-bits:ANY, VA-bits:48, 4K pages", [VM_MODE_P47V64_4K] = "PA-bits:47, VA-bits:64, 4K pages", [VM_MODE_P44V64_4K] = "PA-bits:44, VA-bits:64, 4K pages", + [VM_MODE_P36V48_4K] = "PA-bits:36, VA-bits:48, 4K pages", + [VM_MODE_P36V48_64K] = "PA-bits:36, VA-bits:48, 64K pages", }; _Static_assert(sizeof(strings)/sizeof(char *) == NUM_VM_MODES, "Missing new mode strings?"); @@ -191,6 +193,8 @@ const struct vm_guest_mode_params vm_guest_mode_params[] = { [VM_MODE_PXXV48_4K] = { 0, 0, 0x1000, 12 }, [VM_MODE_P47V64_4K] = { 47, 64, 0x1000, 12 }, [VM_MODE_P44V64_4K] = { 44, 64, 0x1000, 12 }, + [VM_MODE_P36V48_4K] = { 36, 48, 0x1000, 12 }, + [VM_MODE_P36V48_64K] = { 36, 48, 0x10000, 16 }, }; _Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES, "Missing new mode params?"); @@ -252,9 +256,11 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm) vm->pgtable_levels = 3; break; case VM_MODE_P40V48_4K: + case VM_MODE_P36V48_4K: vm->pgtable_levels = 4; break; case VM_MODE_P40V48_64K: + case VM_MODE_P36V48_64K: vm->pgtable_levels = 3; break; case VM_MODE_PXXV48_4K: From aa674de1dc3d2bdf2c67ad195dc81977972323c6 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 27 Dec 2021 12:48:09 +0000 Subject: [PATCH 185/268] KVM: selftests: arm64: Add support for various modes with 16kB page size The 16kB page size is not a popular choice, due to only a few CPUs actually implementing support for it. However, it can lead to some interesting performance improvements given the right uarch choices. Add support for this page size for various PA/VA combinations. Signed-off-by: Marc Zyngier Reviewed-by: Andrew Jones Link: https://lore.kernel.org/r/20211227124809.1335409-7-maz@kernel.org --- tools/testing/selftests/kvm/include/kvm_util.h | 4 ++++ .../selftests/kvm/lib/aarch64/processor.c | 10 ++++++++++ tools/testing/selftests/kvm/lib/guest_modes.c | 4 ++++ tools/testing/selftests/kvm/lib/kvm_util.c | 16 ++++++++++++++++ 4 files changed, 34 insertions(+) diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index 4e9e01631bc3..4fee66da597c 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h @@ -42,14 +42,18 @@ enum vm_guest_mode { VM_MODE_P52V48_4K, VM_MODE_P52V48_64K, VM_MODE_P48V48_4K, + VM_MODE_P48V48_16K, VM_MODE_P48V48_64K, VM_MODE_P40V48_4K, + VM_MODE_P40V48_16K, VM_MODE_P40V48_64K, VM_MODE_PXXV48_4K, /* For 48bits VA but ANY bits PA */ VM_MODE_P47V64_4K, VM_MODE_P44V64_4K, VM_MODE_P36V48_4K, + VM_MODE_P36V48_16K, VM_MODE_P36V48_64K, + VM_MODE_P36V47_16K, NUM_VM_MODES, }; diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c index 70395c777ea4..9343d82519b4 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/processor.c +++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c @@ -252,6 +252,12 @@ void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init case VM_MODE_P36V48_64K: tcr_el1 |= 1ul << 14; /* TG0 = 64KB */ break; + case VM_MODE_P48V48_16K: + case VM_MODE_P40V48_16K: + case VM_MODE_P36V48_16K: + case VM_MODE_P36V47_16K: + tcr_el1 |= 2ul << 14; /* TG0 = 16KB */ + break; case VM_MODE_P48V48_4K: case VM_MODE_P40V48_4K: case VM_MODE_P36V48_4K: @@ -267,15 +273,19 @@ void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init tcr_el1 |= 6ul << 32; /* IPS = 52 bits */ break; case VM_MODE_P48V48_4K: + case VM_MODE_P48V48_16K: case VM_MODE_P48V48_64K: tcr_el1 |= 5ul << 32; /* IPS = 48 bits */ break; case VM_MODE_P40V48_4K: + case VM_MODE_P40V48_16K: case VM_MODE_P40V48_64K: tcr_el1 |= 2ul << 32; /* IPS = 40 bits */ break; case VM_MODE_P36V48_4K: + case VM_MODE_P36V48_16K: case VM_MODE_P36V48_64K: + case VM_MODE_P36V47_16K: tcr_el1 |= 1ul << 32; /* IPS = 36 bits */ break; default: diff --git a/tools/testing/selftests/kvm/lib/guest_modes.c b/tools/testing/selftests/kvm/lib/guest_modes.c index 240f2d2e2d23..da315d3373a3 100644 --- a/tools/testing/selftests/kvm/lib/guest_modes.c +++ b/tools/testing/selftests/kvm/lib/guest_modes.c @@ -29,17 +29,21 @@ void guest_modes_append_default(void) guest_mode_append(VM_MODE_P52V48_64K, ps64k, ps64k); if (limit >= 48) { guest_mode_append(VM_MODE_P48V48_4K, ps4k, ps4k); + guest_mode_append(VM_MODE_P48V48_16K, ps16k, ps16k); guest_mode_append(VM_MODE_P48V48_64K, ps64k, ps64k); } if (limit >= 40) { guest_mode_append(VM_MODE_P40V48_4K, ps4k, ps4k); + guest_mode_append(VM_MODE_P40V48_16K, ps16k, ps16k); guest_mode_append(VM_MODE_P40V48_64K, ps64k, ps64k); if (ps4k) vm_mode_default = VM_MODE_P40V48_4K; } if (limit >= 36) { guest_mode_append(VM_MODE_P36V48_4K, ps4k, ps4k); + guest_mode_append(VM_MODE_P36V48_16K, ps16k, ps16k); guest_mode_append(VM_MODE_P36V48_64K, ps64k, ps64k); + guest_mode_append(VM_MODE_P36V47_16K, ps16k, ps16k); } /* diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 643cb2e9a653..6c8766c9c166 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -166,14 +166,18 @@ const char *vm_guest_mode_string(uint32_t i) [VM_MODE_P52V48_4K] = "PA-bits:52, VA-bits:48, 4K pages", [VM_MODE_P52V48_64K] = "PA-bits:52, VA-bits:48, 64K pages", [VM_MODE_P48V48_4K] = "PA-bits:48, VA-bits:48, 4K pages", + [VM_MODE_P48V48_16K] = "PA-bits:48, VA-bits:48, 16K pages", [VM_MODE_P48V48_64K] = "PA-bits:48, VA-bits:48, 64K pages", [VM_MODE_P40V48_4K] = "PA-bits:40, VA-bits:48, 4K pages", + [VM_MODE_P40V48_16K] = "PA-bits:40, VA-bits:48, 16K pages", [VM_MODE_P40V48_64K] = "PA-bits:40, VA-bits:48, 64K pages", [VM_MODE_PXXV48_4K] = "PA-bits:ANY, VA-bits:48, 4K pages", [VM_MODE_P47V64_4K] = "PA-bits:47, VA-bits:64, 4K pages", [VM_MODE_P44V64_4K] = "PA-bits:44, VA-bits:64, 4K pages", [VM_MODE_P36V48_4K] = "PA-bits:36, VA-bits:48, 4K pages", + [VM_MODE_P36V48_16K] = "PA-bits:36, VA-bits:48, 16K pages", [VM_MODE_P36V48_64K] = "PA-bits:36, VA-bits:48, 64K pages", + [VM_MODE_P36V47_16K] = "PA-bits:36, VA-bits:47, 16K pages", }; _Static_assert(sizeof(strings)/sizeof(char *) == NUM_VM_MODES, "Missing new mode strings?"); @@ -187,14 +191,18 @@ const struct vm_guest_mode_params vm_guest_mode_params[] = { [VM_MODE_P52V48_4K] = { 52, 48, 0x1000, 12 }, [VM_MODE_P52V48_64K] = { 52, 48, 0x10000, 16 }, [VM_MODE_P48V48_4K] = { 48, 48, 0x1000, 12 }, + [VM_MODE_P48V48_16K] = { 48, 48, 0x4000, 14 }, [VM_MODE_P48V48_64K] = { 48, 48, 0x10000, 16 }, [VM_MODE_P40V48_4K] = { 40, 48, 0x1000, 12 }, + [VM_MODE_P40V48_16K] = { 40, 48, 0x4000, 14 }, [VM_MODE_P40V48_64K] = { 40, 48, 0x10000, 16 }, [VM_MODE_PXXV48_4K] = { 0, 0, 0x1000, 12 }, [VM_MODE_P47V64_4K] = { 47, 64, 0x1000, 12 }, [VM_MODE_P44V64_4K] = { 44, 64, 0x1000, 12 }, [VM_MODE_P36V48_4K] = { 36, 48, 0x1000, 12 }, + [VM_MODE_P36V48_16K] = { 36, 48, 0x4000, 14 }, [VM_MODE_P36V48_64K] = { 36, 48, 0x10000, 16 }, + [VM_MODE_P36V47_16K] = { 36, 47, 0x4000, 14 }, }; _Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES, "Missing new mode params?"); @@ -263,6 +271,14 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm) case VM_MODE_P36V48_64K: vm->pgtable_levels = 3; break; + case VM_MODE_P48V48_16K: + case VM_MODE_P40V48_16K: + case VM_MODE_P36V48_16K: + vm->pgtable_levels = 4; + break; + case VM_MODE_P36V47_16K: + vm->pgtable_levels = 3; + break; case VM_MODE_PXXV48_4K: #ifdef __x86_64__ kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits); From 33a1ca736e74839d08948973d30f6def820b8b14 Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Mon, 8 Nov 2021 18:38:50 -0800 Subject: [PATCH 186/268] KVM: selftests: aarch64: Move gic_v3.h to shared headers Move gic_v3.h to the shared headers location. There are some definitions that will be used in the vgic-irq test. Signed-off-by: Ricardo Koller Acked-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211109023906.1091208-2-ricarkol@google.com --- tools/testing/selftests/kvm/{lib => include}/aarch64/gic_v3.h | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tools/testing/selftests/kvm/{lib => include}/aarch64/gic_v3.h (100%) diff --git a/tools/testing/selftests/kvm/lib/aarch64/gic_v3.h b/tools/testing/selftests/kvm/include/aarch64/gic_v3.h similarity index 100% rename from tools/testing/selftests/kvm/lib/aarch64/gic_v3.h rename to tools/testing/selftests/kvm/include/aarch64/gic_v3.h From 745068367ccbf33d69cf4acf7b1a3d5478978e8e Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Mon, 8 Nov 2021 18:38:51 -0800 Subject: [PATCH 187/268] KVM: selftests: aarch64: Add function for accessing GICv3 dist and redist registers Add a generic library function for reading and writing GICv3 distributor and redistributor registers. Then adapt some functions to use it; more will come and use it in the next commit. Signed-off-by: Ricardo Koller Acked-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211109023906.1091208-3-ricarkol@google.com --- .../selftests/kvm/lib/aarch64/gic_v3.c | 120 +++++++++++++++--- 1 file changed, 99 insertions(+), 21 deletions(-) diff --git a/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c b/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c index 2dbf3339b62e..00e944fd8148 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c +++ b/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c @@ -19,7 +19,8 @@ struct gicv3_data { unsigned int nr_spis; }; -#define sgi_base_from_redist(redist_base) (redist_base + SZ_64K) +#define sgi_base_from_redist(redist_base) (redist_base + SZ_64K) +#define DIST_BIT (1U << 31) enum gicv3_intid_range { SGI_RANGE, @@ -50,6 +51,14 @@ static void gicv3_gicr_wait_for_rwp(void *redist_base) } } +static void gicv3_wait_for_rwp(uint32_t cpu_or_dist) +{ + if (cpu_or_dist & DIST_BIT) + gicv3_gicd_wait_for_rwp(); + else + gicv3_gicr_wait_for_rwp(gicv3_data.redist_base[cpu_or_dist]); +} + static enum gicv3_intid_range get_intid_range(unsigned int intid) { switch (intid) { @@ -81,39 +90,108 @@ static void gicv3_write_eoir(uint32_t irq) isb(); } -static void -gicv3_config_irq(unsigned int intid, unsigned int offset) +uint32_t gicv3_reg_readl(uint32_t cpu_or_dist, uint64_t offset) +{ + void *base = cpu_or_dist & DIST_BIT ? gicv3_data.dist_base + : sgi_base_from_redist(gicv3_data.redist_base[cpu_or_dist]); + return readl(base + offset); +} + +void gicv3_reg_writel(uint32_t cpu_or_dist, uint64_t offset, uint32_t reg_val) +{ + void *base = cpu_or_dist & DIST_BIT ? gicv3_data.dist_base + : sgi_base_from_redist(gicv3_data.redist_base[cpu_or_dist]); + writel(reg_val, base + offset); +} + +uint32_t gicv3_getl_fields(uint32_t cpu_or_dist, uint64_t offset, uint32_t mask) +{ + return gicv3_reg_readl(cpu_or_dist, offset) & mask; +} + +void gicv3_setl_fields(uint32_t cpu_or_dist, uint64_t offset, + uint32_t mask, uint32_t reg_val) +{ + uint32_t tmp = gicv3_reg_readl(cpu_or_dist, offset) & ~mask; + + tmp |= (reg_val & mask); + gicv3_reg_writel(cpu_or_dist, offset, tmp); +} + +/* + * We use a single offset for the distributor and redistributor maps as they + * have the same value in both. The only exceptions are registers that only + * exist in one and not the other, like GICR_WAKER that doesn't exist in the + * distributor map. Such registers are conveniently marked as reserved in the + * map that doesn't implement it; like GICR_WAKER's offset of 0x0014 being + * marked as "Reserved" in the Distributor map. + */ +static void gicv3_access_reg(uint32_t intid, uint64_t offset, + uint32_t reg_bits, uint32_t bits_per_field, + bool write, uint32_t *val) { uint32_t cpu = guest_get_vcpuid(); - uint32_t mask = 1 << (intid % 32); enum gicv3_intid_range intid_range = get_intid_range(intid); - void *reg; + uint32_t fields_per_reg, index, mask, shift; + uint32_t cpu_or_dist; - /* We care about 'cpu' only for SGIs or PPIs */ - if (intid_range == SGI_RANGE || intid_range == PPI_RANGE) { - GUEST_ASSERT(cpu < gicv3_data.nr_cpus); + GUEST_ASSERT(bits_per_field <= reg_bits); + GUEST_ASSERT(*val < (1U << bits_per_field)); + /* Some registers like IROUTER are 64 bit long. Those are currently not + * supported by readl nor writel, so just asserting here until then. + */ + GUEST_ASSERT(reg_bits == 32); - reg = sgi_base_from_redist(gicv3_data.redist_base[cpu]) + - offset; - writel(mask, reg); - gicv3_gicr_wait_for_rwp(gicv3_data.redist_base[cpu]); - } else if (intid_range == SPI_RANGE) { - reg = gicv3_data.dist_base + offset + (intid / 32) * 4; - writel(mask, reg); - gicv3_gicd_wait_for_rwp(); - } else { - GUEST_ASSERT(0); - } + fields_per_reg = reg_bits / bits_per_field; + index = intid % fields_per_reg; + shift = index * bits_per_field; + mask = ((1U << bits_per_field) - 1) << shift; + + /* Set offset to the actual register holding intid's config. */ + offset += (intid / fields_per_reg) * (reg_bits / 8); + + cpu_or_dist = (intid_range == SPI_RANGE) ? DIST_BIT : cpu; + + if (write) + gicv3_setl_fields(cpu_or_dist, offset, mask, *val << shift); + *val = gicv3_getl_fields(cpu_or_dist, offset, mask) >> shift; +} + +static void gicv3_write_reg(uint32_t intid, uint64_t offset, + uint32_t reg_bits, uint32_t bits_per_field, uint32_t val) +{ + gicv3_access_reg(intid, offset, reg_bits, + bits_per_field, true, &val); +} + +static uint32_t gicv3_read_reg(uint32_t intid, uint64_t offset, + uint32_t reg_bits, uint32_t bits_per_field) +{ + uint32_t val; + + gicv3_access_reg(intid, offset, reg_bits, + bits_per_field, false, &val); + return val; } static void gicv3_irq_enable(unsigned int intid) { - gicv3_config_irq(intid, GICD_ISENABLER); + bool is_spi = get_intid_range(intid) == SPI_RANGE; + unsigned int val = 1; + uint32_t cpu = guest_get_vcpuid(); + + gicv3_write_reg(intid, GICD_ISENABLER, 32, 1, val); + gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu); } static void gicv3_irq_disable(unsigned int intid) { - gicv3_config_irq(intid, GICD_ICENABLER); + bool is_spi = get_intid_range(intid) == SPI_RANGE; + uint32_t val = 1; + uint32_t cpu = guest_get_vcpuid(); + + gicv3_write_reg(intid, GICD_ICENABLER, 32, 1, val); + gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu); } static void gicv3_enable_redist(void *redist_base) From 17ce617bf76a7c1d3b553ed01607706434b9ed59 Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Mon, 8 Nov 2021 18:38:52 -0800 Subject: [PATCH 188/268] KVM: selftests: aarch64: Add GICv3 register accessor library functions Add library functions for accessing GICv3 registers: DIR, PMR, CTLR, ISACTIVER, ISPENDR. Signed-off-by: Ricardo Koller Acked-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211109023906.1091208-4-ricarkol@google.com --- .../selftests/kvm/include/aarch64/gic.h | 16 ++++ .../selftests/kvm/include/aarch64/gic_v3.h | 12 +++ tools/testing/selftests/kvm/lib/aarch64/gic.c | 66 ++++++++++++++ .../selftests/kvm/lib/aarch64/gic_private.h | 11 +++ .../selftests/kvm/lib/aarch64/gic_v3.c | 90 +++++++++++++++++-- 5 files changed, 189 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/kvm/include/aarch64/gic.h b/tools/testing/selftests/kvm/include/aarch64/gic.h index 85dd1e53048e..c932cf3d0771 100644 --- a/tools/testing/selftests/kvm/include/aarch64/gic.h +++ b/tools/testing/selftests/kvm/include/aarch64/gic.h @@ -17,5 +17,21 @@ void gic_irq_enable(unsigned int intid); void gic_irq_disable(unsigned int intid); unsigned int gic_get_and_ack_irq(void); void gic_set_eoi(unsigned int intid); +void gic_set_dir(unsigned int intid); + +/* + * Sets the EOI mode. When split is false, EOI just drops the priority. When + * split is true, EOI drops the priority and deactivates the interrupt. + */ +void gic_set_eoi_split(bool split); +void gic_set_priority_mask(uint64_t mask); +void gic_set_priority(uint32_t intid, uint32_t prio); +void gic_irq_set_active(unsigned int intid); +void gic_irq_clear_active(unsigned int intid); +bool gic_irq_get_active(unsigned int intid); +void gic_irq_set_pending(unsigned int intid); +void gic_irq_clear_pending(unsigned int intid); +bool gic_irq_get_pending(unsigned int intid); +void gic_irq_set_config(unsigned int intid, bool is_edge); #endif /* SELFTEST_KVM_GIC_H */ diff --git a/tools/testing/selftests/kvm/include/aarch64/gic_v3.h b/tools/testing/selftests/kvm/include/aarch64/gic_v3.h index b51536d469a6..ba0886e8a2bb 100644 --- a/tools/testing/selftests/kvm/include/aarch64/gic_v3.h +++ b/tools/testing/selftests/kvm/include/aarch64/gic_v3.h @@ -16,8 +16,12 @@ #define GICD_IGROUPR 0x0080 #define GICD_ISENABLER 0x0100 #define GICD_ICENABLER 0x0180 +#define GICD_ISPENDR 0x0200 +#define GICD_ICPENDR 0x0280 #define GICD_ICACTIVER 0x0380 +#define GICD_ISACTIVER 0x0300 #define GICD_IPRIORITYR 0x0400 +#define GICD_ICFGR 0x0C00 /* * The assumption is that the guest runs in a non-secure mode. @@ -49,16 +53,24 @@ #define GICR_IGROUPR0 GICD_IGROUPR #define GICR_ISENABLER0 GICD_ISENABLER #define GICR_ICENABLER0 GICD_ICENABLER +#define GICR_ISPENDR0 GICD_ISPENDR +#define GICR_ISACTIVER0 GICD_ISACTIVER #define GICR_ICACTIVER0 GICD_ICACTIVER +#define GICR_ICENABLER GICD_ICENABLER +#define GICR_ICACTIVER GICD_ICACTIVER #define GICR_IPRIORITYR0 GICD_IPRIORITYR /* CPU interface registers */ #define SYS_ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0) #define SYS_ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0) #define SYS_ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1) +#define SYS_ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1) +#define SYS_ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4) #define SYS_ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5) #define SYS_ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7) +#define SYS_ICV_AP1R0_EL1 sys_reg(3, 0, 12, 9, 0) + #define ICC_PMR_DEF_PRIO 0xf0 #define ICC_SRE_EL1_SRE (1U << 0) diff --git a/tools/testing/selftests/kvm/lib/aarch64/gic.c b/tools/testing/selftests/kvm/lib/aarch64/gic.c index fff4fc27504d..55668631d546 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/gic.c +++ b/tools/testing/selftests/kvm/lib/aarch64/gic.c @@ -93,3 +93,69 @@ void gic_set_eoi(unsigned int intid) GUEST_ASSERT(gic_common_ops); gic_common_ops->gic_write_eoir(intid); } + +void gic_set_dir(unsigned int intid) +{ + GUEST_ASSERT(gic_common_ops); + gic_common_ops->gic_write_dir(intid); +} + +void gic_set_eoi_split(bool split) +{ + GUEST_ASSERT(gic_common_ops); + gic_common_ops->gic_set_eoi_split(split); +} + +void gic_set_priority_mask(uint64_t pmr) +{ + GUEST_ASSERT(gic_common_ops); + gic_common_ops->gic_set_priority_mask(pmr); +} + +void gic_set_priority(unsigned int intid, unsigned int prio) +{ + GUEST_ASSERT(gic_common_ops); + gic_common_ops->gic_set_priority(intid, prio); +} + +void gic_irq_set_active(unsigned int intid) +{ + GUEST_ASSERT(gic_common_ops); + gic_common_ops->gic_irq_set_active(intid); +} + +void gic_irq_clear_active(unsigned int intid) +{ + GUEST_ASSERT(gic_common_ops); + gic_common_ops->gic_irq_clear_active(intid); +} + +bool gic_irq_get_active(unsigned int intid) +{ + GUEST_ASSERT(gic_common_ops); + return gic_common_ops->gic_irq_get_active(intid); +} + +void gic_irq_set_pending(unsigned int intid) +{ + GUEST_ASSERT(gic_common_ops); + gic_common_ops->gic_irq_set_pending(intid); +} + +void gic_irq_clear_pending(unsigned int intid) +{ + GUEST_ASSERT(gic_common_ops); + gic_common_ops->gic_irq_clear_pending(intid); +} + +bool gic_irq_get_pending(unsigned int intid) +{ + GUEST_ASSERT(gic_common_ops); + return gic_common_ops->gic_irq_get_pending(intid); +} + +void gic_irq_set_config(unsigned int intid, bool is_edge) +{ + GUEST_ASSERT(gic_common_ops); + gic_common_ops->gic_irq_set_config(intid, is_edge); +} diff --git a/tools/testing/selftests/kvm/lib/aarch64/gic_private.h b/tools/testing/selftests/kvm/lib/aarch64/gic_private.h index d81d739433dc..75d07313c893 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/gic_private.h +++ b/tools/testing/selftests/kvm/lib/aarch64/gic_private.h @@ -14,6 +14,17 @@ struct gic_common_ops { void (*gic_irq_disable)(unsigned int intid); uint64_t (*gic_read_iar)(void); void (*gic_write_eoir)(uint32_t irq); + void (*gic_write_dir)(uint32_t irq); + void (*gic_set_eoi_split)(bool split); + void (*gic_set_priority_mask)(uint64_t mask); + void (*gic_set_priority)(uint32_t intid, uint32_t prio); + void (*gic_irq_set_active)(uint32_t intid); + void (*gic_irq_clear_active)(uint32_t intid); + bool (*gic_irq_get_active)(uint32_t intid); + void (*gic_irq_set_pending)(uint32_t intid); + void (*gic_irq_clear_pending)(uint32_t intid); + bool (*gic_irq_get_pending)(uint32_t intid); + void (*gic_irq_set_config)(uint32_t intid, bool is_edge); }; extern const struct gic_common_ops gicv3_ops; diff --git a/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c b/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c index 00e944fd8148..00f613c0583c 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c +++ b/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c @@ -90,6 +90,29 @@ static void gicv3_write_eoir(uint32_t irq) isb(); } +static void gicv3_write_dir(uint32_t irq) +{ + write_sysreg_s(irq, SYS_ICC_DIR_EL1); + isb(); +} + +static void gicv3_set_priority_mask(uint64_t mask) +{ + write_sysreg_s(mask, SYS_ICC_PMR_EL1); +} + +static void gicv3_set_eoi_split(bool split) +{ + uint32_t val; + + /* All other fields are read-only, so no need to read CTLR first. In + * fact, the kernel does the same. + */ + val = split ? (1U << 1) : 0; + write_sysreg_s(val, SYS_ICC_CTLR_EL1); + isb(); +} + uint32_t gicv3_reg_readl(uint32_t cpu_or_dist, uint64_t offset) { void *base = cpu_or_dist & DIST_BIT ? gicv3_data.dist_base @@ -174,26 +197,70 @@ static uint32_t gicv3_read_reg(uint32_t intid, uint64_t offset, return val; } -static void gicv3_irq_enable(unsigned int intid) +static void gicv3_set_priority(uint32_t intid, uint32_t prio) +{ + gicv3_write_reg(intid, GICD_IPRIORITYR, 32, 8, prio); +} + +/* Sets the intid to be level-sensitive or edge-triggered. */ +static void gicv3_irq_set_config(uint32_t intid, bool is_edge) +{ + uint32_t val; + + /* N/A for private interrupts. */ + GUEST_ASSERT(get_intid_range(intid) == SPI_RANGE); + val = is_edge ? 2 : 0; + gicv3_write_reg(intid, GICD_ICFGR, 32, 2, val); +} + +static void gicv3_irq_enable(uint32_t intid) { bool is_spi = get_intid_range(intid) == SPI_RANGE; - unsigned int val = 1; uint32_t cpu = guest_get_vcpuid(); - gicv3_write_reg(intid, GICD_ISENABLER, 32, 1, val); + gicv3_write_reg(intid, GICD_ISENABLER, 32, 1, 1); gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu); } -static void gicv3_irq_disable(unsigned int intid) +static void gicv3_irq_disable(uint32_t intid) { bool is_spi = get_intid_range(intid) == SPI_RANGE; - uint32_t val = 1; uint32_t cpu = guest_get_vcpuid(); - gicv3_write_reg(intid, GICD_ICENABLER, 32, 1, val); + gicv3_write_reg(intid, GICD_ICENABLER, 32, 1, 1); gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu); } +static void gicv3_irq_set_active(uint32_t intid) +{ + gicv3_write_reg(intid, GICD_ISACTIVER, 32, 1, 1); +} + +static void gicv3_irq_clear_active(uint32_t intid) +{ + gicv3_write_reg(intid, GICD_ICACTIVER, 32, 1, 1); +} + +static bool gicv3_irq_get_active(uint32_t intid) +{ + return gicv3_read_reg(intid, GICD_ISACTIVER, 32, 1); +} + +static void gicv3_irq_set_pending(uint32_t intid) +{ + gicv3_write_reg(intid, GICD_ISPENDR, 32, 1, 1); +} + +static void gicv3_irq_clear_pending(uint32_t intid) +{ + gicv3_write_reg(intid, GICD_ICPENDR, 32, 1, 1); +} + +static bool gicv3_irq_get_pending(uint32_t intid) +{ + return gicv3_read_reg(intid, GICD_ISPENDR, 32, 1); +} + static void gicv3_enable_redist(void *redist_base) { uint32_t val = readl(redist_base + GICR_WAKER); @@ -315,4 +382,15 @@ const struct gic_common_ops gicv3_ops = { .gic_irq_disable = gicv3_irq_disable, .gic_read_iar = gicv3_read_iar, .gic_write_eoir = gicv3_write_eoir, + .gic_write_dir = gicv3_write_dir, + .gic_set_priority_mask = gicv3_set_priority_mask, + .gic_set_eoi_split = gicv3_set_eoi_split, + .gic_set_priority = gicv3_set_priority, + .gic_irq_set_active = gicv3_irq_set_active, + .gic_irq_clear_active = gicv3_irq_clear_active, + .gic_irq_get_active = gicv3_irq_get_active, + .gic_irq_set_pending = gicv3_irq_set_pending, + .gic_irq_clear_pending = gicv3_irq_clear_pending, + .gic_irq_get_pending = gicv3_irq_get_pending, + .gic_irq_set_config = gicv3_irq_set_config, }; From 227895ed6d03b46fa619614a41a3b8e1074d6151 Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Mon, 8 Nov 2021 18:38:53 -0800 Subject: [PATCH 189/268] KVM: selftests: Add kvm_irq_line library function Add an architecture independent wrapper function for the KVM_IRQ_LINE ioctl. Signed-off-by: Ricardo Koller Acked-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211109023906.1091208-5-ricarkol@google.com --- .../testing/selftests/kvm/include/kvm_util.h | 2 ++ tools/testing/selftests/kvm/lib/kvm_util.c | 21 +++++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index 6a1a37f30494..4ba8fd44cf95 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h @@ -240,6 +240,8 @@ int _kvm_device_access(int dev_fd, uint32_t group, uint64_t attr, void *val, bool write); int kvm_device_access(int dev_fd, uint32_t group, uint64_t attr, void *val, bool write); +void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level); +int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level); int _vcpu_has_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, uint64_t attr); diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 8f2e0bb1ef96..9f7210c2d75e 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -2086,6 +2086,27 @@ int vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, return ret; } +/* + * IRQ related functions. + */ + +int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level) +{ + struct kvm_irq_level irq_level = { + .irq = irq, + .level = level, + }; + + return _vm_ioctl(vm, KVM_IRQ_LINE, &irq_level); +} + +void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level) +{ + int ret = _kvm_irq_line(vm, irq, level); + + TEST_ASSERT(ret >= 0, "KVM_IRQ_LINE failed, rc: %i errno: %i", ret, errno); +} + /* * VM Dump * From e95def3a904dea467309bbe382a9032d301ba9cd Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Mon, 8 Nov 2021 18:38:54 -0800 Subject: [PATCH 190/268] KVM: selftests: aarch64: Add vGIC library functions to deal with vIRQ state Add a set of library functions for userspace code in selftests to deal with vIRQ state (i.e., ioctl wrappers). Signed-off-by: Ricardo Koller Acked-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211109023906.1091208-6-ricarkol@google.com --- .../selftests/kvm/include/aarch64/gic.h | 10 ++ .../selftests/kvm/include/aarch64/vgic.h | 14 ++- .../testing/selftests/kvm/lib/aarch64/vgic.c | 93 +++++++++++++++++++ 3 files changed, 116 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/kvm/include/aarch64/gic.h b/tools/testing/selftests/kvm/include/aarch64/gic.h index c932cf3d0771..b217ea17cac5 100644 --- a/tools/testing/selftests/kvm/include/aarch64/gic.h +++ b/tools/testing/selftests/kvm/include/aarch64/gic.h @@ -11,6 +11,16 @@ enum gic_type { GIC_TYPE_MAX, }; +#define MIN_SGI 0 +#define MIN_PPI 16 +#define MIN_SPI 32 +#define MAX_SPI 1019 +#define IAR_SPURIOUS 1023 + +#define INTID_IS_SGI(intid) (0 <= (intid) && (intid) < MIN_PPI) +#define INTID_IS_PPI(intid) (MIN_PPI <= (intid) && (intid) < MIN_SPI) +#define INTID_IS_SPI(intid) (MIN_SPI <= (intid) && (intid) <= MAX_SPI) + void gic_init(enum gic_type type, unsigned int nr_cpus, void *dist_base, void *redist_base); void gic_irq_enable(unsigned int intid); diff --git a/tools/testing/selftests/kvm/include/aarch64/vgic.h b/tools/testing/selftests/kvm/include/aarch64/vgic.h index 0ecfb253893c..ec8744bb2d4b 100644 --- a/tools/testing/selftests/kvm/include/aarch64/vgic.h +++ b/tools/testing/selftests/kvm/include/aarch64/vgic.h @@ -17,4 +17,16 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint64_t gicd_base_gpa, uint64_t gicr_base_gpa); -#endif /* SELFTEST_KVM_VGIC_H */ +#define VGIC_MAX_RESERVED 1023 + +void kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level); +int _kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level); + +void kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level); +int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level); + +/* The vcpu arg only applies to private interrupts. */ +void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, uint32_t vcpu); +void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, uint32_t vcpu); + +#endif // SELFTEST_KVM_VGIC_H diff --git a/tools/testing/selftests/kvm/lib/aarch64/vgic.c b/tools/testing/selftests/kvm/lib/aarch64/vgic.c index b9b271ff520d..a1f1f6c8e2e0 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/vgic.c +++ b/tools/testing/selftests/kvm/lib/aarch64/vgic.c @@ -5,11 +5,14 @@ #include #include +#include #include #include "kvm_util.h" #include "../kvm_util_internal.h" #include "vgic.h" +#include "gic.h" +#include "gic_v3.h" /* * vGIC-v3 default host setup @@ -68,3 +71,93 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, return gic_fd; } + +/* should only work for level sensitive interrupts */ +int _kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level) +{ + uint64_t attr = 32 * (intid / 32); + uint64_t index = intid % 32; + uint64_t val; + int ret; + + ret = _kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO, + attr, &val, false); + if (ret != 0) + return ret; + + val |= 1U << index; + ret = _kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO, + attr, &val, true); + return ret; +} + +void kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level) +{ + int ret = _kvm_irq_set_level_info(gic_fd, intid, level); + + TEST_ASSERT(ret == 0, "KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO failed, " + "rc: %i errno: %i", ret, errno); +} + +int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level) +{ + uint32_t irq = intid & KVM_ARM_IRQ_NUM_MASK; + + if (INTID_IS_PPI(intid)) + irq |= KVM_ARM_IRQ_TYPE_PPI << KVM_ARM_IRQ_TYPE_SHIFT; + else if (INTID_IS_SPI(intid)) + irq |= KVM_ARM_IRQ_TYPE_SPI << KVM_ARM_IRQ_TYPE_SHIFT; + else + TEST_FAIL("KVM_IRQ_LINE can't be used with SGIs."); + + return _kvm_irq_line(vm, irq, level); +} + +void kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level) +{ + int ret = _kvm_arm_irq_line(vm, intid, level); + + TEST_ASSERT(ret == 0, "KVM_IRQ_LINE failed, rc: %i errno: %i", + ret, errno); +} + +static void vgic_poke_irq(int gic_fd, uint32_t intid, + uint32_t vcpu, uint64_t reg_off) +{ + uint64_t reg = intid / 32; + uint64_t index = intid % 32; + uint64_t attr = reg_off + reg * 4; + uint64_t val; + bool intid_is_private = INTID_IS_SGI(intid) || INTID_IS_PPI(intid); + + /* Check that the addr part of the attr is within 32 bits. */ + assert(attr <= KVM_DEV_ARM_VGIC_OFFSET_MASK); + + uint32_t group = intid_is_private ? KVM_DEV_ARM_VGIC_GRP_REDIST_REGS + : KVM_DEV_ARM_VGIC_GRP_DIST_REGS; + + if (intid_is_private) { + /* TODO: only vcpu 0 implemented for now. */ + assert(vcpu == 0); + attr += SZ_64K; + } + + /* All calls will succeed, even with invalid intid's, as long as the + * addr part of the attr is within 32 bits (checked above). An invalid + * intid will just make the read/writes point to above the intended + * register space (i.e., ICPENDR after ISPENDR). + */ + kvm_device_access(gic_fd, group, attr, &val, false); + val |= 1ULL << index; + kvm_device_access(gic_fd, group, attr, &val, true); +} + +void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, uint32_t vcpu) +{ + vgic_poke_irq(gic_fd, intid, vcpu, GICD_ISPENDR); +} + +void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, uint32_t vcpu) +{ + vgic_poke_irq(gic_fd, intid, vcpu, GICD_ISACTIVER); +} From 50b020cdb7f72077e16133f1d88c9359cf415a53 Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Mon, 8 Nov 2021 18:38:55 -0800 Subject: [PATCH 191/268] KVM: selftests: aarch64: Add vgic_irq to test userspace IRQ injection Add a new KVM selftest, vgic_irq, for testing userspace IRQ injection. This particular test injects an SPI using KVM_IRQ_LINE on GICv3 and verifies that the IRQ is handled in the guest. The next commits will add more types of IRQs and different modes. Signed-off-by: Ricardo Koller Acked-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211109023906.1091208-7-ricarkol@google.com --- tools/testing/selftests/kvm/.gitignore | 1 + tools/testing/selftests/kvm/Makefile | 1 + .../testing/selftests/kvm/aarch64/vgic_irq.c | 244 ++++++++++++++++++ 3 files changed, 246 insertions(+) create mode 100644 tools/testing/selftests/kvm/aarch64/vgic_irq.c diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore index 3763105029fb..f75d4c433fc3 100644 --- a/tools/testing/selftests/kvm/.gitignore +++ b/tools/testing/selftests/kvm/.gitignore @@ -4,6 +4,7 @@ /aarch64/get-reg-list /aarch64/psci_cpu_on_test /aarch64/vgic_init +/aarch64/vgic_irq /s390x/memop /s390x/resets /s390x/sync_regs_test diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index c4e34717826a..758b5d0e352e 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -94,6 +94,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list TEST_GEN_PROGS_aarch64 += aarch64/psci_cpu_on_test TEST_GEN_PROGS_aarch64 += aarch64/vgic_init +TEST_GEN_PROGS_aarch64 += aarch64/vgic_irq TEST_GEN_PROGS_aarch64 += demand_paging_test TEST_GEN_PROGS_aarch64 += dirty_log_test TEST_GEN_PROGS_aarch64 += dirty_log_perf_test diff --git a/tools/testing/selftests/kvm/aarch64/vgic_irq.c b/tools/testing/selftests/kvm/aarch64/vgic_irq.c new file mode 100644 index 000000000000..e13e87427038 --- /dev/null +++ b/tools/testing/selftests/kvm/aarch64/vgic_irq.c @@ -0,0 +1,244 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * vgic_irq.c - Test userspace injection of IRQs + * + * This test validates the injection of IRQs from userspace using various + * methods (e.g., KVM_IRQ_LINE) and modes (e.g., EOI). The guest "asks" the + * host to inject a specific intid via a GUEST_SYNC call, and then checks that + * it received it. + */ + +#include +#include +#include + +#include "processor.h" +#include "test_util.h" +#include "kvm_util.h" +#include "gic.h" +#include "gic_v3.h" +#include "vgic.h" + +#define GICD_BASE_GPA 0x08000000ULL +#define GICR_BASE_GPA 0x080A0000ULL +#define VCPU_ID 0 + +/* + * KVM implements 32 priority levels: + * 0x00 (highest priority) - 0xF8 (lowest priority), in steps of 8 + * + * Note that these macros will still be correct in the case that KVM implements + * more priority levels. Also note that 32 is the minimum for GICv3 and GICv2. + */ +#define KVM_NUM_PRIOS 32 +#define KVM_PRIO_SHIFT 3 /* steps of 8 = 1 << 3 */ +#define LOWEST_PRIO (KVM_NUM_PRIOS - 1) +#define CPU_PRIO_MASK (LOWEST_PRIO << KVM_PRIO_SHIFT) /* 0xf8 */ +#define IRQ_DEFAULT_PRIO (LOWEST_PRIO - 1) +#define IRQ_DEFAULT_PRIO_REG (IRQ_DEFAULT_PRIO << KVM_PRIO_SHIFT) /* 0xf0 */ + +static void *dist = (void *)GICD_BASE_GPA; +static void *redist = (void *)GICR_BASE_GPA; + +/* + * The kvm_inject_* utilities are used by the guest to ask the host to inject + * interrupts (e.g., using the KVM_IRQ_LINE ioctl). + */ + +typedef enum { + KVM_INJECT_EDGE_IRQ_LINE = 1, +} kvm_inject_cmd; + +struct kvm_inject_args { + kvm_inject_cmd cmd; + uint32_t intid; +}; + +/* Used on the guest side to perform the hypercall. */ +static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t intid); + +/* Used on the host side to get the hypercall info. */ +static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc, + struct kvm_inject_args *args); + +/* Shared between the guest main thread and the IRQ handlers. */ +volatile uint64_t irq_handled; +volatile uint32_t irqnr_received[MAX_SPI + 1]; + +static void reset_stats(void) +{ + int i; + + irq_handled = 0; + for (i = 0; i <= MAX_SPI; i++) + irqnr_received[i] = 0; +} + +static uint64_t gic_read_ap1r0(void) +{ + uint64_t reg = read_sysreg_s(SYS_ICV_AP1R0_EL1); + + dsb(sy); + return reg; +} + +static void guest_irq_handler(struct ex_regs *regs) +{ + uint32_t intid = gic_get_and_ack_irq(); + + if (intid == IAR_SPURIOUS) + return; + + GUEST_ASSERT(gic_irq_get_active(intid)); + + GUEST_ASSERT(!gic_irq_get_pending(intid)); + + GUEST_ASSERT(intid < MAX_SPI); + irqnr_received[intid] += 1; + irq_handled += 1; + + gic_set_eoi(intid); + GUEST_ASSERT_EQ(gic_read_ap1r0(), 0); + + GUEST_ASSERT(!gic_irq_get_active(intid)); + GUEST_ASSERT(!gic_irq_get_pending(intid)); +} + +static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t intid) +{ + struct kvm_inject_args args = { + .cmd = cmd, + .intid = intid, + }; + GUEST_SYNC(&args); +} + +#define GUEST_ASSERT_IAR_EMPTY() \ +do { \ + uint32_t _intid; \ + _intid = gic_get_and_ack_irq(); \ + GUEST_ASSERT(_intid == 0 || _intid == IAR_SPURIOUS); \ +} while (0) + +static void test_kvm_irq_line(uint32_t intid) +{ + reset_stats(); + + asm volatile("msr daifset, #2" : : : "memory"); + kvm_inject_call(KVM_INJECT_EDGE_IRQ_LINE, intid); + + while (irq_handled < 1) { + asm volatile("wfi\n" + "msr daifclr, #2\n" + /* handle IRQ */ + "msr daifset, #2\n" + : : : "memory"); + } + asm volatile("msr daifclr, #2" : : : "memory"); + + GUEST_ASSERT_EQ(irq_handled, 1); + GUEST_ASSERT_EQ(irqnr_received[intid], 1); + GUEST_ASSERT_IAR_EMPTY(); +} + +static void guest_code(void) +{ + uint32_t i; + uint32_t nr_irqs = 64; /* absolute minimum number of IRQs supported. */ + + gic_init(GIC_V3, 1, dist, redist); + + for (i = 0; i < nr_irqs; i++) { + gic_irq_enable(i); + gic_set_priority(i, IRQ_DEFAULT_PRIO_REG); + } + + gic_set_priority_mask(CPU_PRIO_MASK); + + local_irq_enable(); + + test_kvm_irq_line(MIN_SPI); + + GUEST_DONE(); +} + +static void run_guest_cmd(struct kvm_vm *vm, int gic_fd, + struct kvm_inject_args *inject_args) +{ + kvm_inject_cmd cmd = inject_args->cmd; + uint32_t intid = inject_args->intid; + + switch (cmd) { + case KVM_INJECT_EDGE_IRQ_LINE: + kvm_arm_irq_line(vm, intid, 1); + kvm_arm_irq_line(vm, intid, 0); + break; + default: + break; + } +} + +static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc, + struct kvm_inject_args *args) +{ + struct kvm_inject_args *kvm_args_hva; + vm_vaddr_t kvm_args_gva; + + kvm_args_gva = uc->args[1]; + kvm_args_hva = (struct kvm_inject_args *)addr_gva2hva(vm, kvm_args_gva); + memcpy(args, kvm_args_hva, sizeof(struct kvm_inject_args)); +} + + +static void test_vgic(void) +{ + struct ucall uc; + int gic_fd; + struct kvm_vm *vm; + struct kvm_inject_args inject_args; + + vm = vm_create_default(VCPU_ID, 0, guest_code); + ucall_init(vm, NULL); + + vm_init_descriptor_tables(vm); + vcpu_init_descriptor_tables(vm, VCPU_ID); + + gic_fd = vgic_v3_setup(vm, 1, GICD_BASE_GPA, GICR_BASE_GPA); + + vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT, + guest_irq_handler); + + while (1) { + vcpu_run(vm, VCPU_ID); + + switch (get_ucall(vm, VCPU_ID, &uc)) { + case UCALL_SYNC: + kvm_inject_get_call(vm, &uc, &inject_args); + run_guest_cmd(vm, gic_fd, &inject_args); + break; + case UCALL_ABORT: + TEST_FAIL("%s at %s:%ld\n\tvalues: %#lx, %#lx", + (const char *)uc.args[0], + __FILE__, uc.args[1], uc.args[2], uc.args[3]); + break; + case UCALL_DONE: + goto done; + default: + TEST_FAIL("Unknown ucall %lu", uc.cmd); + } + } + +done: + close(gic_fd); + kvm_vm_free(vm); +} + +int main(int ac, char **av) +{ + /* Tell stdout not to buffer its content */ + setbuf(stdout, NULL); + + test_vgic(); + + return 0; +} From e1cb399eed1eda29568b17bdb75d16cee1fc3da4 Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Mon, 8 Nov 2021 18:38:56 -0800 Subject: [PATCH 192/268] KVM: selftests: aarch64: Abstract the injection functions in vgic_irq Build an abstraction around the injection functions, so the preparation and checking around the actual injection can be shared between tests. All functions are stored as pointers in arrays of kvm_inject_desc's which include the pointer and what kind of interrupts they can inject. Signed-off-by: Ricardo Koller Acked-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211109023906.1091208-8-ricarkol@google.com --- .../testing/selftests/kvm/aarch64/vgic_irq.c | 39 +++++++++++++++++-- 1 file changed, 36 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/kvm/aarch64/vgic_irq.c b/tools/testing/selftests/kvm/aarch64/vgic_irq.c index e13e87427038..f5d76fef22f0 100644 --- a/tools/testing/selftests/kvm/aarch64/vgic_irq.c +++ b/tools/testing/selftests/kvm/aarch64/vgic_irq.c @@ -57,10 +57,28 @@ struct kvm_inject_args { /* Used on the guest side to perform the hypercall. */ static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t intid); +#define KVM_INJECT(cmd, intid) \ + kvm_inject_call(cmd, intid) + /* Used on the host side to get the hypercall info. */ static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc, struct kvm_inject_args *args); +struct kvm_inject_desc { + kvm_inject_cmd cmd; + /* can inject PPIs, PPIs, and/or SPIs. */ + bool sgi, ppi, spi; +}; + +static struct kvm_inject_desc inject_edge_fns[] = { + /* sgi ppi spi */ + { KVM_INJECT_EDGE_IRQ_LINE, false, false, true }, + { 0, }, +}; + +#define for_each_inject_fn(t, f) \ + for ((f) = (t); (f)->cmd; (f)++) + /* Shared between the guest main thread and the IRQ handlers. */ volatile uint64_t irq_handled; volatile uint32_t irqnr_received[MAX_SPI + 1]; @@ -120,12 +138,12 @@ do { \ GUEST_ASSERT(_intid == 0 || _intid == IAR_SPURIOUS); \ } while (0) -static void test_kvm_irq_line(uint32_t intid) +static void guest_inject(uint32_t intid, kvm_inject_cmd cmd) { reset_stats(); asm volatile("msr daifset, #2" : : : "memory"); - kvm_inject_call(KVM_INJECT_EDGE_IRQ_LINE, intid); + KVM_INJECT(cmd, intid); while (irq_handled < 1) { asm volatile("wfi\n" @@ -141,10 +159,23 @@ static void test_kvm_irq_line(uint32_t intid) GUEST_ASSERT_IAR_EMPTY(); } +static void test_injection(struct kvm_inject_desc *f) +{ + if (f->sgi) + guest_inject(MIN_SGI, f->cmd); + + if (f->ppi) + guest_inject(MIN_PPI, f->cmd); + + if (f->spi) + guest_inject(MIN_SPI, f->cmd); +} + static void guest_code(void) { uint32_t i; uint32_t nr_irqs = 64; /* absolute minimum number of IRQs supported. */ + struct kvm_inject_desc *f; gic_init(GIC_V3, 1, dist, redist); @@ -157,7 +188,9 @@ static void guest_code(void) local_irq_enable(); - test_kvm_irq_line(MIN_SPI); + /* Start the tests. */ + for_each_inject_fn(inject_edge_fns, f) + test_injection(f); GUEST_DONE(); } From e5410ee2806d74a749fa39ca6fdb73be2f88611f Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Mon, 8 Nov 2021 18:38:57 -0800 Subject: [PATCH 193/268] KVM: selftests: aarch64: Cmdline arg to set number of IRQs in vgic_irq test Add the ability to specify the number of vIRQs exposed by KVM (arg defaults to 64). Then extend the KVM_IRQ_LINE test by injecting all available SPIs at once (specified by the nr-irqs arg). As a bonus, inject all SGIs at once as well. Signed-off-by: Ricardo Koller Acked-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211109023906.1091208-9-ricarkol@google.com --- .../selftests/kvm/aarch64/arch_timer.c | 2 +- .../testing/selftests/kvm/aarch64/vgic_irq.c | 151 ++++++++++++++---- .../selftests/kvm/include/aarch64/vgic.h | 2 +- .../testing/selftests/kvm/lib/aarch64/vgic.c | 9 +- 4 files changed, 128 insertions(+), 36 deletions(-) diff --git a/tools/testing/selftests/kvm/aarch64/arch_timer.c b/tools/testing/selftests/kvm/aarch64/arch_timer.c index bf6a45b0b8dc..9ad38bd360a4 100644 --- a/tools/testing/selftests/kvm/aarch64/arch_timer.c +++ b/tools/testing/selftests/kvm/aarch64/arch_timer.c @@ -382,7 +382,7 @@ static struct kvm_vm *test_vm_create(void) ucall_init(vm, NULL); test_init_timer_irq(vm); - vgic_v3_setup(vm, nr_vcpus, GICD_BASE_GPA, GICR_BASE_GPA); + vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA); /* Make all the test's cmdline args visible to the guest */ sync_global_to_guest(vm, test_args); diff --git a/tools/testing/selftests/kvm/aarch64/vgic_irq.c b/tools/testing/selftests/kvm/aarch64/vgic_irq.c index f5d76fef22f0..0b89a29dfe79 100644 --- a/tools/testing/selftests/kvm/aarch64/vgic_irq.c +++ b/tools/testing/selftests/kvm/aarch64/vgic_irq.c @@ -23,6 +23,14 @@ #define GICR_BASE_GPA 0x080A0000ULL #define VCPU_ID 0 +/* + * Stores the user specified args; it's passed to the guest and to every test + * function. + */ +struct test_args { + uint32_t nr_irqs; /* number of KVM supported IRQs. */ +}; + /* * KVM implements 32 priority levels: * 0x00 (highest priority) - 0xF8 (lowest priority), in steps of 8 @@ -51,14 +59,18 @@ typedef enum { struct kvm_inject_args { kvm_inject_cmd cmd; - uint32_t intid; + uint32_t first_intid; + uint32_t num; }; /* Used on the guest side to perform the hypercall. */ -static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t intid); +static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid, uint32_t num); #define KVM_INJECT(cmd, intid) \ - kvm_inject_call(cmd, intid) + kvm_inject_call(cmd, intid, 1) + +#define KVM_INJECT_MULTI(cmd, intid, num) \ + kvm_inject_call(cmd, intid, num) /* Used on the host side to get the hypercall info. */ static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc, @@ -122,11 +134,12 @@ static void guest_irq_handler(struct ex_regs *regs) GUEST_ASSERT(!gic_irq_get_pending(intid)); } -static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t intid) +static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid, uint32_t num) { struct kvm_inject_args args = { .cmd = cmd, - .intid = intid, + .first_intid = first_intid, + .num = num, }; GUEST_SYNC(&args); } @@ -138,14 +151,30 @@ do { \ GUEST_ASSERT(_intid == 0 || _intid == IAR_SPURIOUS); \ } while (0) -static void guest_inject(uint32_t intid, kvm_inject_cmd cmd) +static void reset_priorities(struct test_args *args) { + int i; + + for (i = 0; i < args->nr_irqs; i++) + gic_set_priority(i, IRQ_DEFAULT_PRIO_REG); +} + +static void guest_inject(struct test_args *args, + uint32_t first_intid, uint32_t num, + kvm_inject_cmd cmd) +{ + uint32_t i; + reset_stats(); - asm volatile("msr daifset, #2" : : : "memory"); - KVM_INJECT(cmd, intid); + /* Cycle over all priorities to make things more interesting. */ + for (i = first_intid; i < num + first_intid; i++) + gic_set_priority(i, (i % (KVM_NUM_PRIOS - 1)) << 3); - while (irq_handled < 1) { + asm volatile("msr daifset, #2" : : : "memory"); + KVM_INJECT_MULTI(cmd, first_intid, num); + + while (irq_handled < num) { asm volatile("wfi\n" "msr daifclr, #2\n" /* handle IRQ */ @@ -154,57 +183,72 @@ static void guest_inject(uint32_t intid, kvm_inject_cmd cmd) } asm volatile("msr daifclr, #2" : : : "memory"); - GUEST_ASSERT_EQ(irq_handled, 1); - GUEST_ASSERT_EQ(irqnr_received[intid], 1); + GUEST_ASSERT_EQ(irq_handled, num); + for (i = first_intid; i < num + first_intid; i++) + GUEST_ASSERT_EQ(irqnr_received[i], 1); GUEST_ASSERT_IAR_EMPTY(); + + reset_priorities(args); } -static void test_injection(struct kvm_inject_desc *f) +static void test_injection(struct test_args *args, struct kvm_inject_desc *f) { - if (f->sgi) - guest_inject(MIN_SGI, f->cmd); + uint32_t nr_irqs = args->nr_irqs; + + if (f->sgi) { + guest_inject(args, MIN_SGI, 1, f->cmd); + guest_inject(args, 0, 16, f->cmd); + } if (f->ppi) - guest_inject(MIN_PPI, f->cmd); + guest_inject(args, MIN_PPI, 1, f->cmd); - if (f->spi) - guest_inject(MIN_SPI, f->cmd); + if (f->spi) { + guest_inject(args, MIN_SPI, 1, f->cmd); + guest_inject(args, nr_irqs - 1, 1, f->cmd); + guest_inject(args, MIN_SPI, nr_irqs - MIN_SPI, f->cmd); + } } -static void guest_code(void) +static void guest_code(struct test_args args) { - uint32_t i; - uint32_t nr_irqs = 64; /* absolute minimum number of IRQs supported. */ + uint32_t i, nr_irqs = args.nr_irqs; struct kvm_inject_desc *f; gic_init(GIC_V3, 1, dist, redist); - for (i = 0; i < nr_irqs; i++) { + for (i = 0; i < nr_irqs; i++) gic_irq_enable(i); - gic_set_priority(i, IRQ_DEFAULT_PRIO_REG); - } + reset_priorities(&args); gic_set_priority_mask(CPU_PRIO_MASK); local_irq_enable(); /* Start the tests. */ for_each_inject_fn(inject_edge_fns, f) - test_injection(f); + test_injection(&args, f); GUEST_DONE(); } static void run_guest_cmd(struct kvm_vm *vm, int gic_fd, - struct kvm_inject_args *inject_args) + struct kvm_inject_args *inject_args, + struct test_args *test_args) { kvm_inject_cmd cmd = inject_args->cmd; - uint32_t intid = inject_args->intid; + uint32_t intid = inject_args->first_intid; + uint32_t num = inject_args->num; + uint32_t i; + + assert(intid < UINT_MAX - num); switch (cmd) { case KVM_INJECT_EDGE_IRQ_LINE: - kvm_arm_irq_line(vm, intid, 1); - kvm_arm_irq_line(vm, intid, 0); + for (i = intid; i < intid + num; i++) + kvm_arm_irq_line(vm, i, 1); + for (i = intid; i < intid + num; i++) + kvm_arm_irq_line(vm, i, 0); break; default: break; @@ -222,21 +266,35 @@ static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc, memcpy(args, kvm_args_hva, sizeof(struct kvm_inject_args)); } +static void print_args(struct test_args *args) +{ + printf("nr-irqs=%d\n", args->nr_irqs); +} -static void test_vgic(void) +static void test_vgic(uint32_t nr_irqs) { struct ucall uc; int gic_fd; struct kvm_vm *vm; struct kvm_inject_args inject_args; + struct test_args args = { + .nr_irqs = nr_irqs, + }; + + print_args(&args); + vm = vm_create_default(VCPU_ID, 0, guest_code); ucall_init(vm, NULL); vm_init_descriptor_tables(vm); vcpu_init_descriptor_tables(vm, VCPU_ID); - gic_fd = vgic_v3_setup(vm, 1, GICD_BASE_GPA, GICR_BASE_GPA); + /* Setup the guest args page (so it gets the args). */ + vcpu_args_set(vm, 0, 1, args); + + gic_fd = vgic_v3_setup(vm, 1, nr_irqs, + GICD_BASE_GPA, GICR_BASE_GPA); vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT, guest_irq_handler); @@ -247,7 +305,7 @@ static void test_vgic(void) switch (get_ucall(vm, VCPU_ID, &uc)) { case UCALL_SYNC: kvm_inject_get_call(vm, &uc, &inject_args); - run_guest_cmd(vm, gic_fd, &inject_args); + run_guest_cmd(vm, gic_fd, &inject_args, &args); break; case UCALL_ABORT: TEST_FAIL("%s at %s:%ld\n\tvalues: %#lx, %#lx", @@ -266,12 +324,39 @@ done: kvm_vm_free(vm); } -int main(int ac, char **av) +static void help(const char *name) { + printf( + "\n" + "usage: %s [-n num_irqs]\n", name); + printf(" -n: specify the number of IRQs to configure the vgic with.\n"); + puts(""); + exit(1); +} + +int main(int argc, char **argv) +{ + uint32_t nr_irqs = 64; + int opt; + /* Tell stdout not to buffer its content */ setbuf(stdout, NULL); - test_vgic(); + while ((opt = getopt(argc, argv, "hg:n:")) != -1) { + switch (opt) { + case 'n': + nr_irqs = atoi(optarg); + if (nr_irqs > 1024 || nr_irqs % 32) + help(argv[0]); + break; + case 'h': + default: + help(argv[0]); + break; + } + } + + test_vgic(nr_irqs); return 0; } diff --git a/tools/testing/selftests/kvm/include/aarch64/vgic.h b/tools/testing/selftests/kvm/include/aarch64/vgic.h index ec8744bb2d4b..ce6f0383c1a1 100644 --- a/tools/testing/selftests/kvm/include/aarch64/vgic.h +++ b/tools/testing/selftests/kvm/include/aarch64/vgic.h @@ -14,7 +14,7 @@ ((uint64_t)(flags) << 12) | \ index) -int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, +int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs, uint64_t gicd_base_gpa, uint64_t gicr_base_gpa); #define VGIC_MAX_RESERVED 1023 diff --git a/tools/testing/selftests/kvm/lib/aarch64/vgic.c b/tools/testing/selftests/kvm/lib/aarch64/vgic.c index a1f1f6c8e2e0..84206d7c92b4 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/vgic.c +++ b/tools/testing/selftests/kvm/lib/aarch64/vgic.c @@ -31,7 +31,7 @@ * redistributor regions of the guest. Since it depends on the number of * vCPUs for the VM, it must be called after all the vCPUs have been created. */ -int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, +int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs, uint64_t gicd_base_gpa, uint64_t gicr_base_gpa) { int gic_fd; @@ -53,6 +53,13 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, /* Distributor setup */ gic_fd = kvm_create_device(vm, KVM_DEV_TYPE_ARM_VGIC_V3, false); + + kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS, + 0, &nr_irqs, true); + + kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, + KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true); + kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, KVM_VGIC_V3_ADDR_TYPE_DIST, &gicd_base_gpa, true); nr_gic_pages = vm_calc_num_guest_pages(vm->mode, KVM_VGIC_V3_DIST_SIZE); From 8a35b2877d9a15fa885cea744f1e578e035856fe Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Mon, 8 Nov 2021 18:38:58 -0800 Subject: [PATCH 194/268] KVM: selftests: aarch64: Cmdline arg to set EOI mode in vgic_irq Add a new cmdline arg to set the EOI mode for all vgic_irq tests. This specifies whether a write to EOIR will deactivate IRQs or not. Signed-off-by: Ricardo Koller Acked-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211109023906.1091208-10-ricarkol@google.com --- .../testing/selftests/kvm/aarch64/vgic_irq.c | 58 ++++++++++++++++--- 1 file changed, 50 insertions(+), 8 deletions(-) diff --git a/tools/testing/selftests/kvm/aarch64/vgic_irq.c b/tools/testing/selftests/kvm/aarch64/vgic_irq.c index 0b89a29dfe79..3e18fa224280 100644 --- a/tools/testing/selftests/kvm/aarch64/vgic_irq.c +++ b/tools/testing/selftests/kvm/aarch64/vgic_irq.c @@ -29,6 +29,7 @@ */ struct test_args { uint32_t nr_irqs; /* number of KVM supported IRQs. */ + bool eoi_split; /* 1 is eoir+dir, 0 is eoir only */ }; /* @@ -112,7 +113,7 @@ static uint64_t gic_read_ap1r0(void) return reg; } -static void guest_irq_handler(struct ex_regs *regs) +static void guest_irq_generic_handler(bool eoi_split) { uint32_t intid = gic_get_and_ack_irq(); @@ -129,6 +130,8 @@ static void guest_irq_handler(struct ex_regs *regs) gic_set_eoi(intid); GUEST_ASSERT_EQ(gic_read_ap1r0(), 0); + if (eoi_split) + gic_set_dir(intid); GUEST_ASSERT(!gic_irq_get_active(intid)); GUEST_ASSERT(!gic_irq_get_pending(intid)); @@ -151,6 +154,24 @@ do { \ GUEST_ASSERT(_intid == 0 || _intid == IAR_SPURIOUS); \ } while (0) +#define CAT_HELPER(a, b) a ## b +#define CAT(a, b) CAT_HELPER(a, b) +#define PREFIX guest_irq_handler_ +#define GUEST_IRQ_HANDLER_NAME(split) CAT(PREFIX, split) +#define GENERATE_GUEST_IRQ_HANDLER(split) \ +static void CAT(PREFIX, split)(struct ex_regs *regs) \ +{ \ + guest_irq_generic_handler(split); \ +} + +GENERATE_GUEST_IRQ_HANDLER(0); +GENERATE_GUEST_IRQ_HANDLER(1); + +static void (*guest_irq_handlers[2])(struct ex_regs *) = { + GUEST_IRQ_HANDLER_NAME(0), + GUEST_IRQ_HANDLER_NAME(1), +}; + static void reset_priorities(struct test_args *args) { int i; @@ -220,6 +241,8 @@ static void guest_code(struct test_args args) for (i = 0; i < nr_irqs; i++) gic_irq_enable(i); + gic_set_eoi_split(args.eoi_split); + reset_priorities(&args); gic_set_priority_mask(CPU_PRIO_MASK); @@ -268,10 +291,11 @@ static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc, static void print_args(struct test_args *args) { - printf("nr-irqs=%d\n", args->nr_irqs); + printf("nr-irqs=%d eoi-split=%d\n", + args->nr_irqs, args->eoi_split); } -static void test_vgic(uint32_t nr_irqs) +static void test_vgic(uint32_t nr_irqs, bool eoi_split) { struct ucall uc; int gic_fd; @@ -280,6 +304,7 @@ static void test_vgic(uint32_t nr_irqs) struct test_args args = { .nr_irqs = nr_irqs, + .eoi_split = eoi_split, }; print_args(&args); @@ -297,7 +322,7 @@ static void test_vgic(uint32_t nr_irqs) GICD_BASE_GPA, GICR_BASE_GPA); vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT, - guest_irq_handler); + guest_irq_handlers[args.eoi_split]); while (1) { vcpu_run(vm, VCPU_ID); @@ -328,8 +353,11 @@ static void help(const char *name) { printf( "\n" - "usage: %s [-n num_irqs]\n", name); - printf(" -n: specify the number of IRQs to configure the vgic with.\n"); + "usage: %s [-n num_irqs] [-e eoi_split]\n", name); + printf(" -n: specify the number of IRQs to configure the vgic with. " + "It has to be a multiple of 32 and between 64 and 1024.\n"); + printf(" -e: if 1 then EOI is split into a write to DIR on top " + "of writing EOI.\n"); puts(""); exit(1); } @@ -337,18 +365,24 @@ static void help(const char *name) int main(int argc, char **argv) { uint32_t nr_irqs = 64; + bool default_args = true; int opt; + bool eoi_split = false; /* Tell stdout not to buffer its content */ setbuf(stdout, NULL); - while ((opt = getopt(argc, argv, "hg:n:")) != -1) { + while ((opt = getopt(argc, argv, "hn:e:")) != -1) { switch (opt) { case 'n': nr_irqs = atoi(optarg); if (nr_irqs > 1024 || nr_irqs % 32) help(argv[0]); break; + case 'e': + eoi_split = (bool)atoi(optarg); + default_args = false; + break; case 'h': default: help(argv[0]); @@ -356,7 +390,15 @@ int main(int argc, char **argv) } } - test_vgic(nr_irqs); + /* If the user just specified nr_irqs and/or gic_version, then run all + * combinations. + */ + if (default_args) { + test_vgic(nr_irqs, false /* eoi_split */); + test_vgic(nr_irqs, true /* eoi_split */); + } else { + test_vgic(nr_irqs, eoi_split); + } return 0; } From 0ad3ff4a6adc4922808ef8b2f91880c25195f509 Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Mon, 8 Nov 2021 18:38:59 -0800 Subject: [PATCH 195/268] KVM: selftests: aarch64: Add preemption tests in vgic_irq Add tests for IRQ preemption (having more than one activated IRQ at the same time). This test injects multiple concurrent IRQs and handles them without handling the actual exceptions. This is done by masking interrupts for the whole test. Signed-off-by: Ricardo Koller Acked-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211109023906.1091208-11-ricarkol@google.com --- .../testing/selftests/kvm/aarch64/vgic_irq.c | 91 ++++++++++++++++++- 1 file changed, 90 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/kvm/aarch64/vgic_irq.c b/tools/testing/selftests/kvm/aarch64/vgic_irq.c index 3e18fa224280..b9080aa75a14 100644 --- a/tools/testing/selftests/kvm/aarch64/vgic_irq.c +++ b/tools/testing/selftests/kvm/aarch64/vgic_irq.c @@ -41,6 +41,7 @@ struct test_args { */ #define KVM_NUM_PRIOS 32 #define KVM_PRIO_SHIFT 3 /* steps of 8 = 1 << 3 */ +#define KVM_PRIO_STEPS (1 << KVM_PRIO_SHIFT) /* 8 */ #define LOWEST_PRIO (KVM_NUM_PRIOS - 1) #define CPU_PRIO_MASK (LOWEST_PRIO << KVM_PRIO_SHIFT) /* 0xf8 */ #define IRQ_DEFAULT_PRIO (LOWEST_PRIO - 1) @@ -212,6 +213,74 @@ static void guest_inject(struct test_args *args, reset_priorities(args); } +/* + * Polls the IAR until it's not a spurious interrupt. + * + * This function should only be used in test_inject_preemption (with IRQs + * masked). + */ +static uint32_t wait_for_and_activate_irq(void) +{ + uint32_t intid; + + do { + asm volatile("wfi" : : : "memory"); + intid = gic_get_and_ack_irq(); + } while (intid == IAR_SPURIOUS); + + return intid; +} + +/* + * Inject multiple concurrent IRQs (num IRQs starting at first_intid) and + * handle them without handling the actual exceptions. This is done by masking + * interrupts for the whole test. + */ +static void test_inject_preemption(struct test_args *args, + uint32_t first_intid, int num, + kvm_inject_cmd cmd) +{ + uint32_t intid, prio, step = KVM_PRIO_STEPS; + int i; + + /* Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs + * in descending order, so intid+1 can preempt intid. + */ + for (i = 0, prio = (num - 1) * step; i < num; i++, prio -= step) { + GUEST_ASSERT(prio >= 0); + intid = i + first_intid; + gic_set_priority(intid, prio); + } + + local_irq_disable(); + + for (i = 0; i < num; i++) { + uint32_t tmp; + intid = i + first_intid; + kvm_inject_call(cmd, intid, 1); + /* Each successive IRQ will preempt the previous one. */ + tmp = wait_for_and_activate_irq(); + GUEST_ASSERT_EQ(tmp, intid); + } + + /* finish handling the IRQs starting with the highest priority one. */ + for (i = 0; i < num; i++) { + intid = num - i - 1 + first_intid; + gic_set_eoi(intid); + if (args->eoi_split) + gic_set_dir(intid); + } + + local_irq_enable(); + + for (i = 0; i < num; i++) + GUEST_ASSERT(!gic_irq_get_active(i + first_intid)); + GUEST_ASSERT_EQ(gic_read_ap1r0(), 0); + GUEST_ASSERT_IAR_EMPTY(); + + reset_priorities(args); +} + static void test_injection(struct test_args *args, struct kvm_inject_desc *f) { uint32_t nr_irqs = args->nr_irqs; @@ -231,6 +300,24 @@ static void test_injection(struct test_args *args, struct kvm_inject_desc *f) } } +static void test_preemption(struct test_args *args, struct kvm_inject_desc *f) +{ + /* + * Test up to 4 levels of preemption. The reason is that KVM doesn't + * currently implement the ability to have more than the number-of-LRs + * number of concurrently active IRQs. The number of LRs implemented is + * IMPLEMENTATION DEFINED, however, it seems that most implement 4. + */ + if (f->sgi) + test_inject_preemption(args, MIN_SGI, 4, f->cmd); + + if (f->ppi) + test_inject_preemption(args, MIN_PPI, 4, f->cmd); + + if (f->spi) + test_inject_preemption(args, MIN_SPI, 4, f->cmd); +} + static void guest_code(struct test_args args) { uint32_t i, nr_irqs = args.nr_irqs; @@ -249,8 +336,10 @@ static void guest_code(struct test_args args) local_irq_enable(); /* Start the tests. */ - for_each_inject_fn(inject_edge_fns, f) + for_each_inject_fn(inject_edge_fns, f) { test_injection(&args, f); + test_preemption(&args, f); + } GUEST_DONE(); } From 92f2cc4aa7964d4d13681eeb38582bb989b01b98 Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Mon, 8 Nov 2021 18:39:00 -0800 Subject: [PATCH 196/268] KVM: selftests: aarch64: Level-sensitive interrupts tests in vgic_irq Add a cmdline arg for using level-sensitive interrupts (vs the default edge-triggered). Then move the handler into a generic handler function that takes the type of interrupt (level vs. edge) as an arg. When handling line-sensitive interrupts it sets the line to low after acknowledging the IRQ. Signed-off-by: Ricardo Koller Acked-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211109023906.1091208-12-ricarkol@google.com --- .../testing/selftests/kvm/aarch64/vgic_irq.c | 118 +++++++++++++----- 1 file changed, 86 insertions(+), 32 deletions(-) diff --git a/tools/testing/selftests/kvm/aarch64/vgic_irq.c b/tools/testing/selftests/kvm/aarch64/vgic_irq.c index b9080aa75a14..a20d225367a1 100644 --- a/tools/testing/selftests/kvm/aarch64/vgic_irq.c +++ b/tools/testing/selftests/kvm/aarch64/vgic_irq.c @@ -30,6 +30,7 @@ struct test_args { uint32_t nr_irqs; /* number of KVM supported IRQs. */ bool eoi_split; /* 1 is eoir+dir, 0 is eoir only */ + bool level_sensitive; /* 1 is level, 0 is edge */ }; /* @@ -57,27 +58,31 @@ static void *redist = (void *)GICR_BASE_GPA; typedef enum { KVM_INJECT_EDGE_IRQ_LINE = 1, + KVM_SET_IRQ_LINE, + KVM_SET_IRQ_LINE_HIGH, } kvm_inject_cmd; struct kvm_inject_args { kvm_inject_cmd cmd; uint32_t first_intid; uint32_t num; + int level; }; /* Used on the guest side to perform the hypercall. */ -static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid, uint32_t num); - -#define KVM_INJECT(cmd, intid) \ - kvm_inject_call(cmd, intid, 1) - -#define KVM_INJECT_MULTI(cmd, intid, num) \ - kvm_inject_call(cmd, intid, num) +static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid, + uint32_t num, int level); /* Used on the host side to get the hypercall info. */ static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc, struct kvm_inject_args *args); +#define KVM_INJECT(cmd, intid) \ + kvm_inject_call(cmd, intid, 1, -1 /* not used */) + +#define KVM_INJECT_MULTI(cmd, intid, num) \ + kvm_inject_call(cmd, intid, num, -1 /* not used */) + struct kvm_inject_desc { kvm_inject_cmd cmd; /* can inject PPIs, PPIs, and/or SPIs. */ @@ -90,6 +95,12 @@ static struct kvm_inject_desc inject_edge_fns[] = { { 0, }, }; +static struct kvm_inject_desc inject_level_fns[] = { + /* sgi ppi spi */ + { KVM_SET_IRQ_LINE_HIGH, false, true, true }, + { 0, }, +}; + #define for_each_inject_fn(t, f) \ for ((f) = (t); (f)->cmd; (f)++) @@ -114,7 +125,9 @@ static uint64_t gic_read_ap1r0(void) return reg; } -static void guest_irq_generic_handler(bool eoi_split) +static void guest_set_irq_line(uint32_t intid, uint32_t level); + +static void guest_irq_generic_handler(bool eoi_split, bool level_sensitive) { uint32_t intid = gic_get_and_ack_irq(); @@ -123,7 +136,11 @@ static void guest_irq_generic_handler(bool eoi_split) GUEST_ASSERT(gic_irq_get_active(intid)); - GUEST_ASSERT(!gic_irq_get_pending(intid)); + if (!level_sensitive) + GUEST_ASSERT(!gic_irq_get_pending(intid)); + + if (level_sensitive) + guest_set_irq_line(intid, 0); GUEST_ASSERT(intid < MAX_SPI); irqnr_received[intid] += 1; @@ -138,12 +155,14 @@ static void guest_irq_generic_handler(bool eoi_split) GUEST_ASSERT(!gic_irq_get_pending(intid)); } -static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid, uint32_t num) +static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid, + uint32_t num, int level) { struct kvm_inject_args args = { .cmd = cmd, .first_intid = first_intid, .num = num, + .level = level, }; GUEST_SYNC(&args); } @@ -158,19 +177,21 @@ do { \ #define CAT_HELPER(a, b) a ## b #define CAT(a, b) CAT_HELPER(a, b) #define PREFIX guest_irq_handler_ -#define GUEST_IRQ_HANDLER_NAME(split) CAT(PREFIX, split) -#define GENERATE_GUEST_IRQ_HANDLER(split) \ -static void CAT(PREFIX, split)(struct ex_regs *regs) \ +#define GUEST_IRQ_HANDLER_NAME(split, lev) CAT(PREFIX, CAT(split, lev)) +#define GENERATE_GUEST_IRQ_HANDLER(split, lev) \ +static void CAT(PREFIX, CAT(split, lev))(struct ex_regs *regs) \ { \ - guest_irq_generic_handler(split); \ + guest_irq_generic_handler(split, lev); \ } -GENERATE_GUEST_IRQ_HANDLER(0); -GENERATE_GUEST_IRQ_HANDLER(1); +GENERATE_GUEST_IRQ_HANDLER(0, 0); +GENERATE_GUEST_IRQ_HANDLER(0, 1); +GENERATE_GUEST_IRQ_HANDLER(1, 0); +GENERATE_GUEST_IRQ_HANDLER(1, 1); -static void (*guest_irq_handlers[2])(struct ex_regs *) = { - GUEST_IRQ_HANDLER_NAME(0), - GUEST_IRQ_HANDLER_NAME(1), +static void (*guest_irq_handlers[2][2])(struct ex_regs *) = { + {GUEST_IRQ_HANDLER_NAME(0, 0), GUEST_IRQ_HANDLER_NAME(0, 1),}, + {GUEST_IRQ_HANDLER_NAME(1, 0), GUEST_IRQ_HANDLER_NAME(1, 1),}, }; static void reset_priorities(struct test_args *args) @@ -181,6 +202,11 @@ static void reset_priorities(struct test_args *args) gic_set_priority(i, IRQ_DEFAULT_PRIO_REG); } +static void guest_set_irq_line(uint32_t intid, uint32_t level) +{ + kvm_inject_call(KVM_SET_IRQ_LINE, intid, 1, level); +} + static void guest_inject(struct test_args *args, uint32_t first_intid, uint32_t num, kvm_inject_cmd cmd) @@ -257,10 +283,12 @@ static void test_inject_preemption(struct test_args *args, for (i = 0; i < num; i++) { uint32_t tmp; intid = i + first_intid; - kvm_inject_call(cmd, intid, 1); + KVM_INJECT(cmd, intid); /* Each successive IRQ will preempt the previous one. */ tmp = wait_for_and_activate_irq(); GUEST_ASSERT_EQ(tmp, intid); + if (args->level_sensitive) + guest_set_irq_line(intid, 0); } /* finish handling the IRQs starting with the highest priority one. */ @@ -321,22 +349,29 @@ static void test_preemption(struct test_args *args, struct kvm_inject_desc *f) static void guest_code(struct test_args args) { uint32_t i, nr_irqs = args.nr_irqs; - struct kvm_inject_desc *f; + bool level_sensitive = args.level_sensitive; + struct kvm_inject_desc *f, *inject_fns; gic_init(GIC_V3, 1, dist, redist); for (i = 0; i < nr_irqs; i++) gic_irq_enable(i); + for (i = MIN_SPI; i < nr_irqs; i++) + gic_irq_set_config(i, !args.level_sensitive); + gic_set_eoi_split(args.eoi_split); reset_priorities(&args); gic_set_priority_mask(CPU_PRIO_MASK); + inject_fns = level_sensitive ? inject_level_fns + : inject_edge_fns; + local_irq_enable(); /* Start the tests. */ - for_each_inject_fn(inject_edge_fns, f) { + for_each_inject_fn(inject_fns, f) { test_injection(&args, f); test_preemption(&args, f); } @@ -351,6 +386,7 @@ static void run_guest_cmd(struct kvm_vm *vm, int gic_fd, kvm_inject_cmd cmd = inject_args->cmd; uint32_t intid = inject_args->first_intid; uint32_t num = inject_args->num; + int level = inject_args->level; uint32_t i; assert(intid < UINT_MAX - num); @@ -362,6 +398,14 @@ static void run_guest_cmd(struct kvm_vm *vm, int gic_fd, for (i = intid; i < intid + num; i++) kvm_arm_irq_line(vm, i, 0); break; + case KVM_SET_IRQ_LINE: + for (i = intid; i < intid + num; i++) + kvm_arm_irq_line(vm, i, level); + break; + case KVM_SET_IRQ_LINE_HIGH: + for (i = intid; i < intid + num; i++) + kvm_arm_irq_line(vm, i, 1); + break; default: break; } @@ -380,11 +424,12 @@ static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc, static void print_args(struct test_args *args) { - printf("nr-irqs=%d eoi-split=%d\n", - args->nr_irqs, args->eoi_split); + printf("nr-irqs=%d level-sensitive=%d eoi-split=%d\n", + args->nr_irqs, args->level_sensitive, + args->eoi_split); } -static void test_vgic(uint32_t nr_irqs, bool eoi_split) +static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split) { struct ucall uc; int gic_fd; @@ -393,6 +438,7 @@ static void test_vgic(uint32_t nr_irqs, bool eoi_split) struct test_args args = { .nr_irqs = nr_irqs, + .level_sensitive = level_sensitive, .eoi_split = eoi_split, }; @@ -411,7 +457,7 @@ static void test_vgic(uint32_t nr_irqs, bool eoi_split) GICD_BASE_GPA, GICR_BASE_GPA); vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT, - guest_irq_handlers[args.eoi_split]); + guest_irq_handlers[args.eoi_split][args.level_sensitive]); while (1) { vcpu_run(vm, VCPU_ID); @@ -442,11 +488,12 @@ static void help(const char *name) { printf( "\n" - "usage: %s [-n num_irqs] [-e eoi_split]\n", name); - printf(" -n: specify the number of IRQs to configure the vgic with. " + "usage: %s [-n num_irqs] [-e eoi_split] [-l level_sensitive]\n", name); + printf(" -n: specify number of IRQs to setup the vgic with. " "It has to be a multiple of 32 and between 64 and 1024.\n"); printf(" -e: if 1 then EOI is split into a write to DIR on top " "of writing EOI.\n"); + printf(" -l: specify whether the IRQs are level-sensitive (1) or not (0)."); puts(""); exit(1); } @@ -455,13 +502,14 @@ int main(int argc, char **argv) { uint32_t nr_irqs = 64; bool default_args = true; + bool level_sensitive = false; int opt; bool eoi_split = false; /* Tell stdout not to buffer its content */ setbuf(stdout, NULL); - while ((opt = getopt(argc, argv, "hn:e:")) != -1) { + while ((opt = getopt(argc, argv, "hn:e:l:")) != -1) { switch (opt) { case 'n': nr_irqs = atoi(optarg); @@ -472,6 +520,10 @@ int main(int argc, char **argv) eoi_split = (bool)atoi(optarg); default_args = false; break; + case 'l': + level_sensitive = (bool)atoi(optarg); + default_args = false; + break; case 'h': default: help(argv[0]); @@ -483,10 +535,12 @@ int main(int argc, char **argv) * combinations. */ if (default_args) { - test_vgic(nr_irqs, false /* eoi_split */); - test_vgic(nr_irqs, true /* eoi_split */); + test_vgic(nr_irqs, false /* level */, false /* eoi_split */); + test_vgic(nr_irqs, false /* level */, true /* eoi_split */); + test_vgic(nr_irqs, true /* level */, false /* eoi_split */); + test_vgic(nr_irqs, true /* level */, true /* eoi_split */); } else { - test_vgic(nr_irqs, eoi_split); + test_vgic(nr_irqs, level_sensitive, eoi_split); } return 0; From 6830fa915912587a7aa304bade01b366cf0b9214 Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Mon, 8 Nov 2021 18:39:01 -0800 Subject: [PATCH 197/268] KVM: selftests: aarch64: Add tests for LEVEL_INFO in vgic_irq Add injection tests for the LEVEL_INFO ioctl (level-sensitive specific) into vgic_irq. Signed-off-by: Ricardo Koller Acked-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211109023906.1091208-13-ricarkol@google.com --- tools/testing/selftests/kvm/aarch64/vgic_irq.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tools/testing/selftests/kvm/aarch64/vgic_irq.c b/tools/testing/selftests/kvm/aarch64/vgic_irq.c index a20d225367a1..bc1b6fd684fc 100644 --- a/tools/testing/selftests/kvm/aarch64/vgic_irq.c +++ b/tools/testing/selftests/kvm/aarch64/vgic_irq.c @@ -60,6 +60,7 @@ typedef enum { KVM_INJECT_EDGE_IRQ_LINE = 1, KVM_SET_IRQ_LINE, KVM_SET_IRQ_LINE_HIGH, + KVM_SET_LEVEL_INFO_HIGH, } kvm_inject_cmd; struct kvm_inject_args { @@ -98,6 +99,7 @@ static struct kvm_inject_desc inject_edge_fns[] = { static struct kvm_inject_desc inject_level_fns[] = { /* sgi ppi spi */ { KVM_SET_IRQ_LINE_HIGH, false, true, true }, + { KVM_SET_LEVEL_INFO_HIGH, false, true, true }, { 0, }, }; @@ -406,6 +408,10 @@ static void run_guest_cmd(struct kvm_vm *vm, int gic_fd, for (i = intid; i < intid + num; i++) kvm_arm_irq_line(vm, i, 1); break; + case KVM_SET_LEVEL_INFO_HIGH: + for (i = intid; i < intid + num; i++) + kvm_irq_set_level_info(gic_fd, i, 1); + break; default: break; } From 90f50acac9ee9f81192098c22b2cbf2491a40263 Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Mon, 8 Nov 2021 18:39:02 -0800 Subject: [PATCH 198/268] KVM: selftests: aarch64: Add test_inject_fail to vgic_irq Add tests for failed injections to vgic_irq. This tests that KVM can handle bogus IRQ numbers. Signed-off-by: Ricardo Koller Acked-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211109023906.1091208-14-ricarkol@google.com --- .../testing/selftests/kvm/aarch64/vgic_irq.c | 122 +++++++++++++++--- .../testing/selftests/kvm/lib/aarch64/vgic.c | 7 +- 2 files changed, 109 insertions(+), 20 deletions(-) diff --git a/tools/testing/selftests/kvm/aarch64/vgic_irq.c b/tools/testing/selftests/kvm/aarch64/vgic_irq.c index bc1b6fd684fc..9f1674b3a45c 100644 --- a/tools/testing/selftests/kvm/aarch64/vgic_irq.c +++ b/tools/testing/selftests/kvm/aarch64/vgic_irq.c @@ -68,21 +68,28 @@ struct kvm_inject_args { uint32_t first_intid; uint32_t num; int level; + bool expect_failure; }; /* Used on the guest side to perform the hypercall. */ static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid, - uint32_t num, int level); + uint32_t num, int level, bool expect_failure); /* Used on the host side to get the hypercall info. */ static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc, struct kvm_inject_args *args); -#define KVM_INJECT(cmd, intid) \ - kvm_inject_call(cmd, intid, 1, -1 /* not used */) +#define _KVM_INJECT_MULTI(cmd, intid, num, expect_failure) \ + kvm_inject_call(cmd, intid, num, -1 /* not used */, expect_failure) #define KVM_INJECT_MULTI(cmd, intid, num) \ - kvm_inject_call(cmd, intid, num, -1 /* not used */) + _KVM_INJECT_MULTI(cmd, intid, num, false) + +#define _KVM_INJECT(cmd, intid, expect_failure) \ + _KVM_INJECT_MULTI(cmd, intid, 1, expect_failure) + +#define KVM_INJECT(cmd, intid) \ + _KVM_INJECT_MULTI(cmd, intid, 1, false) struct kvm_inject_desc { kvm_inject_cmd cmd; @@ -158,13 +165,14 @@ static void guest_irq_generic_handler(bool eoi_split, bool level_sensitive) } static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid, - uint32_t num, int level) + uint32_t num, int level, bool expect_failure) { struct kvm_inject_args args = { .cmd = cmd, .first_intid = first_intid, .num = num, .level = level, + .expect_failure = expect_failure, }; GUEST_SYNC(&args); } @@ -206,7 +214,19 @@ static void reset_priorities(struct test_args *args) static void guest_set_irq_line(uint32_t intid, uint32_t level) { - kvm_inject_call(KVM_SET_IRQ_LINE, intid, 1, level); + kvm_inject_call(KVM_SET_IRQ_LINE, intid, 1, level, false); +} + +static void test_inject_fail(struct test_args *args, + uint32_t intid, kvm_inject_cmd cmd) +{ + reset_stats(); + + _KVM_INJECT(cmd, intid, true); + /* no IRQ to handle on entry */ + + GUEST_ASSERT_EQ(irq_handled, 0); + GUEST_ASSERT_IAR_EMPTY(); } static void guest_inject(struct test_args *args, @@ -330,6 +350,16 @@ static void test_injection(struct test_args *args, struct kvm_inject_desc *f) } } +static void test_injection_failure(struct test_args *args, + struct kvm_inject_desc *f) +{ + uint32_t bad_intid[] = { args->nr_irqs, 1020, 1024, 1120, 5120, ~0U, }; + int i; + + for (i = 0; i < ARRAY_SIZE(bad_intid); i++) + test_inject_fail(args, bad_intid[i], f->cmd); +} + static void test_preemption(struct test_args *args, struct kvm_inject_desc *f) { /* @@ -376,11 +406,61 @@ static void guest_code(struct test_args args) for_each_inject_fn(inject_fns, f) { test_injection(&args, f); test_preemption(&args, f); + test_injection_failure(&args, f); } GUEST_DONE(); } +static void kvm_irq_line_check(struct kvm_vm *vm, uint32_t intid, int level, + struct test_args *test_args, bool expect_failure) +{ + int ret; + + if (!expect_failure) { + kvm_arm_irq_line(vm, intid, level); + } else { + /* The interface doesn't allow larger intid's. */ + if (intid > KVM_ARM_IRQ_NUM_MASK) + return; + + ret = _kvm_arm_irq_line(vm, intid, level); + TEST_ASSERT(ret != 0 && errno == EINVAL, + "Bad intid %i did not cause KVM_IRQ_LINE " + "error: rc: %i errno: %i", intid, ret, errno); + } +} + +void kvm_irq_set_level_info_check(int gic_fd, uint32_t intid, int level, + bool expect_failure) +{ + if (!expect_failure) { + kvm_irq_set_level_info(gic_fd, intid, level); + } else { + int ret = _kvm_irq_set_level_info(gic_fd, intid, level); + /* + * The kernel silently fails for invalid SPIs and SGIs (which + * are not level-sensitive). It only checks for intid to not + * spill over 1U << 10 (the max reserved SPI). Also, callers + * are supposed to mask the intid with 0x3ff (1023). + */ + if (intid > VGIC_MAX_RESERVED) + TEST_ASSERT(ret != 0 && errno == EINVAL, + "Bad intid %i did not cause VGIC_GRP_LEVEL_INFO " + "error: rc: %i errno: %i", intid, ret, errno); + else + TEST_ASSERT(!ret, "KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO " + "for intid %i failed, rc: %i errno: %i", + intid, ret, errno); + } +} + +/* handles the valid case: intid=0xffffffff num=1 */ +#define for_each_intid(first, num, tmp, i) \ + for ((tmp) = (i) = (first); \ + (tmp) < (uint64_t)(first) + (uint64_t)(num); \ + (tmp)++, (i)++) + static void run_guest_cmd(struct kvm_vm *vm, int gic_fd, struct kvm_inject_args *inject_args, struct test_args *test_args) @@ -389,28 +469,36 @@ static void run_guest_cmd(struct kvm_vm *vm, int gic_fd, uint32_t intid = inject_args->first_intid; uint32_t num = inject_args->num; int level = inject_args->level; + bool expect_failure = inject_args->expect_failure; + uint64_t tmp; uint32_t i; - assert(intid < UINT_MAX - num); + /* handles the valid case: intid=0xffffffff num=1 */ + assert(intid < UINT_MAX - num || num == 1); switch (cmd) { case KVM_INJECT_EDGE_IRQ_LINE: - for (i = intid; i < intid + num; i++) - kvm_arm_irq_line(vm, i, 1); - for (i = intid; i < intid + num; i++) - kvm_arm_irq_line(vm, i, 0); + for_each_intid(intid, num, tmp, i) + kvm_irq_line_check(vm, i, 1, test_args, + expect_failure); + for_each_intid(intid, num, tmp, i) + kvm_irq_line_check(vm, i, 0, test_args, + expect_failure); break; case KVM_SET_IRQ_LINE: - for (i = intid; i < intid + num; i++) - kvm_arm_irq_line(vm, i, level); + for_each_intid(intid, num, tmp, i) + kvm_irq_line_check(vm, i, level, test_args, + expect_failure); break; case KVM_SET_IRQ_LINE_HIGH: - for (i = intid; i < intid + num; i++) - kvm_arm_irq_line(vm, i, 1); + for_each_intid(intid, num, tmp, i) + kvm_irq_line_check(vm, i, 1, test_args, + expect_failure); break; case KVM_SET_LEVEL_INFO_HIGH: - for (i = intid; i < intid + num; i++) - kvm_irq_set_level_info(gic_fd, i, 1); + for_each_intid(intid, num, tmp, i) + kvm_irq_set_level_info_check(gic_fd, i, 1, + expect_failure); break; default: break; diff --git a/tools/testing/selftests/kvm/lib/aarch64/vgic.c b/tools/testing/selftests/kvm/lib/aarch64/vgic.c index 84206d7c92b4..b3a0fca0d780 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/vgic.c +++ b/tools/testing/selftests/kvm/lib/aarch64/vgic.c @@ -110,12 +110,13 @@ int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level) { uint32_t irq = intid & KVM_ARM_IRQ_NUM_MASK; + TEST_ASSERT(!INTID_IS_SGI(intid), "KVM_IRQ_LINE's interface itself " + "doesn't allow injecting SGIs. There's no mask for it."); + if (INTID_IS_PPI(intid)) irq |= KVM_ARM_IRQ_TYPE_PPI << KVM_ARM_IRQ_TYPE_SHIFT; - else if (INTID_IS_SPI(intid)) - irq |= KVM_ARM_IRQ_TYPE_SPI << KVM_ARM_IRQ_TYPE_SHIFT; else - TEST_FAIL("KVM_IRQ_LINE can't be used with SGIs."); + irq |= KVM_ARM_IRQ_TYPE_SPI << KVM_ARM_IRQ_TYPE_SHIFT; return _kvm_irq_line(vm, irq, level); } From 88209c104e9b3e95502c0e924fb1cd8bd5a01d82 Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Mon, 8 Nov 2021 18:39:03 -0800 Subject: [PATCH 199/268] KVM: selftests: Add IRQ GSI routing library functions Add an architecture independent wrapper function for creating and writing IRQ GSI routing tables. Also add a function to add irqchip entries. Signed-off-by: Ricardo Koller Acked-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211109023906.1091208-15-ricarkol@google.com --- .../testing/selftests/kvm/include/kvm_util.h | 8 +++ tools/testing/selftests/kvm/lib/kvm_util.c | 51 +++++++++++++++++++ 2 files changed, 59 insertions(+) diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index 4ba8fd44cf95..db36e6c49983 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h @@ -252,6 +252,14 @@ int _vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, int vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, uint64_t attr, void *val, bool write); +#define KVM_MAX_IRQ_ROUTES 4096 + +struct kvm_irq_routing *kvm_gsi_routing_create(void); +void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing, + uint32_t gsi, uint32_t pin); +int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing); +void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing); + const char *exit_reason_str(unsigned int exit_reason); void virt_pgd_alloc(struct kvm_vm *vm); diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 9f7210c2d75e..68b56e7e6808 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -2107,6 +2107,57 @@ void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level) TEST_ASSERT(ret >= 0, "KVM_IRQ_LINE failed, rc: %i errno: %i", ret, errno); } +struct kvm_irq_routing *kvm_gsi_routing_create(void) +{ + struct kvm_irq_routing *routing; + size_t size; + + size = sizeof(struct kvm_irq_routing); + /* Allocate space for the max number of entries: this wastes 196 KBs. */ + size += KVM_MAX_IRQ_ROUTES * sizeof(struct kvm_irq_routing_entry); + routing = calloc(1, size); + assert(routing); + + return routing; +} + +void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing, + uint32_t gsi, uint32_t pin) +{ + int i; + + assert(routing); + assert(routing->nr < KVM_MAX_IRQ_ROUTES); + + i = routing->nr; + routing->entries[i].gsi = gsi; + routing->entries[i].type = KVM_IRQ_ROUTING_IRQCHIP; + routing->entries[i].flags = 0; + routing->entries[i].u.irqchip.irqchip = 0; + routing->entries[i].u.irqchip.pin = pin; + routing->nr++; +} + +int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing) +{ + int ret; + + assert(routing); + ret = ioctl(vm_get_fd(vm), KVM_SET_GSI_ROUTING, routing); + free(routing); + + return ret; +} + +void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing) +{ + int ret; + + ret = _kvm_gsi_routing_write(vm, routing); + TEST_ASSERT(ret == 0, "KVM_SET_GSI_ROUTING failed, rc: %i errno: %i", + ret, errno); +} + /* * VM Dump * From 6a5a47188caca7be4bbe28cdb31d5df09868ed5c Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Mon, 8 Nov 2021 18:39:04 -0800 Subject: [PATCH 200/268] KVM: selftests: aarch64: Add tests for IRQFD in vgic_irq Add injection tests for the KVM_IRQFD ioctl into vgic_irq. Signed-off-by: Ricardo Koller Acked-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211109023906.1091208-16-ricarkol@google.com --- .../testing/selftests/kvm/aarch64/vgic_irq.c | 101 +++++++++++++++++- .../selftests/kvm/include/aarch64/vgic.h | 2 + 2 files changed, 102 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/kvm/aarch64/vgic_irq.c b/tools/testing/selftests/kvm/aarch64/vgic_irq.c index 9f1674b3a45c..121113f24ed3 100644 --- a/tools/testing/selftests/kvm/aarch64/vgic_irq.c +++ b/tools/testing/selftests/kvm/aarch64/vgic_irq.c @@ -10,6 +10,7 @@ #include #include +#include #include #include "processor.h" @@ -31,6 +32,8 @@ struct test_args { uint32_t nr_irqs; /* number of KVM supported IRQs. */ bool eoi_split; /* 1 is eoir+dir, 0 is eoir only */ bool level_sensitive; /* 1 is level, 0 is edge */ + int kvm_max_routes; /* output of KVM_CAP_IRQ_ROUTING */ + bool kvm_supports_irqfd; /* output of KVM_CAP_IRQFD */ }; /* @@ -61,6 +64,7 @@ typedef enum { KVM_SET_IRQ_LINE, KVM_SET_IRQ_LINE_HIGH, KVM_SET_LEVEL_INFO_HIGH, + KVM_INJECT_IRQFD, } kvm_inject_cmd; struct kvm_inject_args { @@ -100,6 +104,7 @@ struct kvm_inject_desc { static struct kvm_inject_desc inject_edge_fns[] = { /* sgi ppi spi */ { KVM_INJECT_EDGE_IRQ_LINE, false, false, true }, + { KVM_INJECT_IRQFD, false, false, true }, { 0, }, }; @@ -107,12 +112,17 @@ static struct kvm_inject_desc inject_level_fns[] = { /* sgi ppi spi */ { KVM_SET_IRQ_LINE_HIGH, false, true, true }, { KVM_SET_LEVEL_INFO_HIGH, false, true, true }, + { KVM_INJECT_IRQFD, false, false, true }, { 0, }, }; #define for_each_inject_fn(t, f) \ for ((f) = (t); (f)->cmd; (f)++) +#define for_each_supported_inject_fn(args, t, f) \ + for_each_inject_fn(t, f) \ + if ((args)->kvm_supports_irqfd || (f)->cmd != KVM_INJECT_IRQFD) + /* Shared between the guest main thread and the IRQ handlers. */ volatile uint64_t irq_handled; volatile uint32_t irqnr_received[MAX_SPI + 1]; @@ -403,7 +413,7 @@ static void guest_code(struct test_args args) local_irq_enable(); /* Start the tests. */ - for_each_inject_fn(inject_fns, f) { + for_each_supported_inject_fn(&args, inject_fns, f) { test_injection(&args, f); test_preemption(&args, f); test_injection_failure(&args, f); @@ -455,6 +465,88 @@ void kvm_irq_set_level_info_check(int gic_fd, uint32_t intid, int level, } } +static void kvm_set_gsi_routing_irqchip_check(struct kvm_vm *vm, + uint32_t intid, uint32_t num, uint32_t kvm_max_routes, + bool expect_failure) +{ + struct kvm_irq_routing *routing; + int ret; + uint64_t i; + + assert(num <= kvm_max_routes && kvm_max_routes <= KVM_MAX_IRQ_ROUTES); + + routing = kvm_gsi_routing_create(); + for (i = intid; i < (uint64_t)intid + num; i++) + kvm_gsi_routing_irqchip_add(routing, i - MIN_SPI, i - MIN_SPI); + + if (!expect_failure) { + kvm_gsi_routing_write(vm, routing); + } else { + ret = _kvm_gsi_routing_write(vm, routing); + /* The kernel only checks for KVM_IRQCHIP_NUM_PINS. */ + if (intid >= KVM_IRQCHIP_NUM_PINS) + TEST_ASSERT(ret != 0 && errno == EINVAL, + "Bad intid %u did not cause KVM_SET_GSI_ROUTING " + "error: rc: %i errno: %i", intid, ret, errno); + else + TEST_ASSERT(ret == 0, "KVM_SET_GSI_ROUTING " + "for intid %i failed, rc: %i errno: %i", + intid, ret, errno); + } +} + +static void kvm_routing_and_irqfd_check(struct kvm_vm *vm, + uint32_t intid, uint32_t num, uint32_t kvm_max_routes, + bool expect_failure) +{ + int fd[MAX_SPI]; + uint64_t val; + int ret, f; + uint64_t i; + + /* + * There is no way to try injecting an SGI or PPI as the interface + * starts counting from the first SPI (above the private ones), so just + * exit. + */ + if (INTID_IS_SGI(intid) || INTID_IS_PPI(intid)) + return; + + kvm_set_gsi_routing_irqchip_check(vm, intid, num, + kvm_max_routes, expect_failure); + + /* + * If expect_failure, then just to inject anyway. These + * will silently fail. And in any case, the guest will check + * that no actual interrupt was injected for those cases. + */ + + for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) { + fd[f] = eventfd(0, 0); + TEST_ASSERT(fd[f] != -1, + "eventfd failed, errno: %i\n", errno); + } + + for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) { + struct kvm_irqfd irqfd = { + .fd = fd[f], + .gsi = i - MIN_SPI, + }; + assert(i <= (uint64_t)UINT_MAX); + vm_ioctl(vm, KVM_IRQFD, &irqfd); + } + + for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) { + val = 1; + ret = write(fd[f], &val, sizeof(uint64_t)); + TEST_ASSERT(ret == sizeof(uint64_t), + "Write to KVM_IRQFD failed with ret: %d\n", ret); + } + + for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) + close(fd[f]); +} + /* handles the valid case: intid=0xffffffff num=1 */ #define for_each_intid(first, num, tmp, i) \ for ((tmp) = (i) = (first); \ @@ -500,6 +592,11 @@ static void run_guest_cmd(struct kvm_vm *vm, int gic_fd, kvm_irq_set_level_info_check(gic_fd, i, 1, expect_failure); break; + case KVM_INJECT_IRQFD: + kvm_routing_and_irqfd_check(vm, intid, num, + test_args->kvm_max_routes, + expect_failure); + break; default: break; } @@ -534,6 +631,8 @@ static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split) .nr_irqs = nr_irqs, .level_sensitive = level_sensitive, .eoi_split = eoi_split, + .kvm_max_routes = kvm_check_cap(KVM_CAP_IRQ_ROUTING), + .kvm_supports_irqfd = kvm_check_cap(KVM_CAP_IRQFD), }; print_args(&args); diff --git a/tools/testing/selftests/kvm/include/aarch64/vgic.h b/tools/testing/selftests/kvm/include/aarch64/vgic.h index ce6f0383c1a1..4442081221a0 100644 --- a/tools/testing/selftests/kvm/include/aarch64/vgic.h +++ b/tools/testing/selftests/kvm/include/aarch64/vgic.h @@ -29,4 +29,6 @@ int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level); void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, uint32_t vcpu); void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, uint32_t vcpu); +#define KVM_IRQCHIP_NUM_PINS (1020 - 32) + #endif // SELFTEST_KVM_VGIC_H From bebd8f3f869361e0249efe423ba76a0d991ce3e6 Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Mon, 8 Nov 2021 18:39:05 -0800 Subject: [PATCH 201/268] KVM: selftests: aarch64: Add ISPENDR write tests in vgic_irq Add injection tests that use writing into the ISPENDR register (to mark IRQs as pending). This is typically used by migration code. Signed-off-by: Ricardo Koller Acked-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211109023906.1091208-17-ricarkol@google.com --- .../testing/selftests/kvm/aarch64/vgic_irq.c | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/tools/testing/selftests/kvm/aarch64/vgic_irq.c b/tools/testing/selftests/kvm/aarch64/vgic_irq.c index 121113f24ed3..ab39f0bf18e7 100644 --- a/tools/testing/selftests/kvm/aarch64/vgic_irq.c +++ b/tools/testing/selftests/kvm/aarch64/vgic_irq.c @@ -65,6 +65,7 @@ typedef enum { KVM_SET_IRQ_LINE_HIGH, KVM_SET_LEVEL_INFO_HIGH, KVM_INJECT_IRQFD, + KVM_WRITE_ISPENDR, } kvm_inject_cmd; struct kvm_inject_args { @@ -105,6 +106,7 @@ static struct kvm_inject_desc inject_edge_fns[] = { /* sgi ppi spi */ { KVM_INJECT_EDGE_IRQ_LINE, false, false, true }, { KVM_INJECT_IRQFD, false, false, true }, + { KVM_WRITE_ISPENDR, true, false, true }, { 0, }, }; @@ -113,6 +115,7 @@ static struct kvm_inject_desc inject_level_fns[] = { { KVM_SET_IRQ_LINE_HIGH, false, true, true }, { KVM_SET_LEVEL_INFO_HIGH, false, true, true }, { KVM_INJECT_IRQFD, false, false, true }, + { KVM_WRITE_ISPENDR, false, true, true }, { 0, }, }; @@ -495,6 +498,20 @@ static void kvm_set_gsi_routing_irqchip_check(struct kvm_vm *vm, } } +static void kvm_irq_write_ispendr_check(int gic_fd, uint32_t intid, + uint32_t vcpu, bool expect_failure) +{ + /* + * Ignore this when expecting failure as invalid intids will lead to + * either trying to inject SGIs when we configured the test to be + * level_sensitive (or the reverse), or inject large intids which + * will lead to writing above the ISPENDR register space (and we + * don't want to do that either). + */ + if (!expect_failure) + kvm_irq_write_ispendr(gic_fd, intid, vcpu); +} + static void kvm_routing_and_irqfd_check(struct kvm_vm *vm, uint32_t intid, uint32_t num, uint32_t kvm_max_routes, bool expect_failure) @@ -597,6 +614,11 @@ static void run_guest_cmd(struct kvm_vm *vm, int gic_fd, test_args->kvm_max_routes, expect_failure); break; + case KVM_WRITE_ISPENDR: + for (i = intid; i < intid + num; i++) + kvm_irq_write_ispendr_check(gic_fd, i, + VCPU_ID, expect_failure); + break; default: break; } From 728fcc46d2c2292d1ac73f3491b8f4332066fdad Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Mon, 8 Nov 2021 18:39:06 -0800 Subject: [PATCH 202/268] KVM: selftests: aarch64: Add test for restoring active IRQs Add a test that restores multiple IRQs in active state, it does it by writing into ISACTIVER from the guest and using KVM ioctls. This test tries to emulate what would happen during a live migration: restore active IRQs. Signed-off-by: Ricardo Koller Acked-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211109023906.1091208-18-ricarkol@google.com --- .../testing/selftests/kvm/aarch64/vgic_irq.c | 91 +++++++++++++++++++ 1 file changed, 91 insertions(+) diff --git a/tools/testing/selftests/kvm/aarch64/vgic_irq.c b/tools/testing/selftests/kvm/aarch64/vgic_irq.c index ab39f0bf18e7..e6c7d7f8fbd1 100644 --- a/tools/testing/selftests/kvm/aarch64/vgic_irq.c +++ b/tools/testing/selftests/kvm/aarch64/vgic_irq.c @@ -66,6 +66,7 @@ typedef enum { KVM_SET_LEVEL_INFO_HIGH, KVM_INJECT_IRQFD, KVM_WRITE_ISPENDR, + KVM_WRITE_ISACTIVER, } kvm_inject_cmd; struct kvm_inject_args { @@ -96,6 +97,9 @@ static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc, #define KVM_INJECT(cmd, intid) \ _KVM_INJECT_MULTI(cmd, intid, 1, false) +#define KVM_ACTIVATE(cmd, intid) \ + kvm_inject_call(cmd, intid, 1, 1, false); + struct kvm_inject_desc { kvm_inject_cmd cmd; /* can inject PPIs, PPIs, and/or SPIs. */ @@ -119,6 +123,12 @@ static struct kvm_inject_desc inject_level_fns[] = { { 0, }, }; +static struct kvm_inject_desc set_active_fns[] = { + /* sgi ppi spi */ + { KVM_WRITE_ISACTIVER, true, true, true }, + { 0, }, +}; + #define for_each_inject_fn(t, f) \ for ((f) = (t); (f)->cmd; (f)++) @@ -126,6 +136,9 @@ static struct kvm_inject_desc inject_level_fns[] = { for_each_inject_fn(t, f) \ if ((args)->kvm_supports_irqfd || (f)->cmd != KVM_INJECT_IRQFD) +#define for_each_supported_activate_fn(args, t, f) \ + for_each_supported_inject_fn((args), (t), (f)) + /* Shared between the guest main thread and the IRQ handlers. */ volatile uint64_t irq_handled; volatile uint32_t irqnr_received[MAX_SPI + 1]; @@ -147,6 +160,12 @@ static uint64_t gic_read_ap1r0(void) return reg; } +static void gic_write_ap1r0(uint64_t val) +{ + write_sysreg_s(val, SYS_ICV_AP1R0_EL1); + isb(); +} + static void guest_set_irq_line(uint32_t intid, uint32_t level); static void guest_irq_generic_handler(bool eoi_split, bool level_sensitive) @@ -274,6 +293,55 @@ static void guest_inject(struct test_args *args, reset_priorities(args); } +/* + * Restore the active state of multiple concurrent IRQs (given by + * concurrent_irqs). This does what a live-migration would do on the + * destination side assuming there are some active IRQs that were not + * deactivated yet. + */ +static void guest_restore_active(struct test_args *args, + uint32_t first_intid, uint32_t num, + kvm_inject_cmd cmd) +{ + uint32_t prio, intid, ap1r; + int i; + + /* Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs + * in descending order, so intid+1 can preempt intid. + */ + for (i = 0, prio = (num - 1) * 8; i < num; i++, prio -= 8) { + GUEST_ASSERT(prio >= 0); + intid = i + first_intid; + gic_set_priority(intid, prio); + } + + /* In a real migration, KVM would restore all GIC state before running + * guest code. + */ + for (i = 0; i < num; i++) { + intid = i + first_intid; + KVM_ACTIVATE(cmd, intid); + ap1r = gic_read_ap1r0(); + ap1r |= 1U << i; + gic_write_ap1r0(ap1r); + } + + /* This is where the "migration" would occur. */ + + /* finish handling the IRQs starting with the highest priority one. */ + for (i = 0; i < num; i++) { + intid = num - i - 1 + first_intid; + gic_set_eoi(intid); + if (args->eoi_split) + gic_set_dir(intid); + } + + for (i = 0; i < num; i++) + GUEST_ASSERT(!gic_irq_get_active(i + first_intid)); + GUEST_ASSERT_EQ(gic_read_ap1r0(), 0); + GUEST_ASSERT_IAR_EMPTY(); +} + /* * Polls the IAR until it's not a spurious interrupt. * @@ -391,6 +459,19 @@ static void test_preemption(struct test_args *args, struct kvm_inject_desc *f) test_inject_preemption(args, MIN_SPI, 4, f->cmd); } +static void test_restore_active(struct test_args *args, struct kvm_inject_desc *f) +{ + /* Test up to 4 active IRQs. Same reason as in test_preemption. */ + if (f->sgi) + guest_restore_active(args, MIN_SGI, 4, f->cmd); + + if (f->ppi) + guest_restore_active(args, MIN_PPI, 4, f->cmd); + + if (f->spi) + guest_restore_active(args, MIN_SPI, 4, f->cmd); +} + static void guest_code(struct test_args args) { uint32_t i, nr_irqs = args.nr_irqs; @@ -422,6 +503,12 @@ static void guest_code(struct test_args args) test_injection_failure(&args, f); } + /* Restore the active state of IRQs. This would happen when live + * migrating IRQs in the middle of being handled. + */ + for_each_supported_activate_fn(&args, set_active_fns, f) + test_restore_active(&args, f); + GUEST_DONE(); } @@ -619,6 +706,10 @@ static void run_guest_cmd(struct kvm_vm *vm, int gic_fd, kvm_irq_write_ispendr_check(gic_fd, i, VCPU_ID, expect_failure); break; + case KVM_WRITE_ISACTIVER: + for (i = intid; i < intid + num; i++) + kvm_irq_write_isactiver(gic_fd, i, VCPU_ID); + break; default: break; } From f15dcf1b5853e770ed614e23ab2e90f0ca1b7b0b Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 28 Dec 2021 12:14:14 +0000 Subject: [PATCH 203/268] KVM: arm64: selftests: get-reg-list: Add pauth configuration The get-reg-list test ignores the Pointer Authentication features, which is a shame now that we have relatively common HW with this feature. Define two new configurations (with and without PMU) that exercise the KVM capabilities. Signed-off-by: Marc Zyngier Reviewed-by: Andrew Jones Link: https://lore.kernel.org/r/20211228121414.1013250-1-maz@kernel.org --- .../selftests/kvm/aarch64/get-reg-list.c | 50 +++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/tools/testing/selftests/kvm/aarch64/get-reg-list.c b/tools/testing/selftests/kvm/aarch64/get-reg-list.c index cc898181faab..f769fc6cd927 100644 --- a/tools/testing/selftests/kvm/aarch64/get-reg-list.c +++ b/tools/testing/selftests/kvm/aarch64/get-reg-list.c @@ -1014,6 +1014,22 @@ static __u64 sve_rejects_set[] = { KVM_REG_ARM64_SVE_VLS, }; +static __u64 pauth_addr_regs[] = { + ARM64_SYS_REG(3, 0, 2, 1, 0), /* APIAKEYLO_EL1 */ + ARM64_SYS_REG(3, 0, 2, 1, 1), /* APIAKEYHI_EL1 */ + ARM64_SYS_REG(3, 0, 2, 1, 2), /* APIBKEYLO_EL1 */ + ARM64_SYS_REG(3, 0, 2, 1, 3), /* APIBKEYHI_EL1 */ + ARM64_SYS_REG(3, 0, 2, 2, 0), /* APDAKEYLO_EL1 */ + ARM64_SYS_REG(3, 0, 2, 2, 1), /* APDAKEYHI_EL1 */ + ARM64_SYS_REG(3, 0, 2, 2, 2), /* APDBKEYLO_EL1 */ + ARM64_SYS_REG(3, 0, 2, 2, 3) /* APDBKEYHI_EL1 */ +}; + +static __u64 pauth_generic_regs[] = { + ARM64_SYS_REG(3, 0, 2, 3, 0), /* APGAKEYLO_EL1 */ + ARM64_SYS_REG(3, 0, 2, 3, 1), /* APGAKEYHI_EL1 */ +}; + #define BASE_SUBLIST \ { "base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), } #define VREGS_SUBLIST \ @@ -1025,6 +1041,21 @@ static __u64 sve_rejects_set[] = { { "sve", .capability = KVM_CAP_ARM_SVE, .feature = KVM_ARM_VCPU_SVE, .finalize = true, \ .regs = sve_regs, .regs_n = ARRAY_SIZE(sve_regs), \ .rejects_set = sve_rejects_set, .rejects_set_n = ARRAY_SIZE(sve_rejects_set), } +#define PAUTH_SUBLIST \ + { \ + .name = "pauth_address", \ + .capability = KVM_CAP_ARM_PTRAUTH_ADDRESS, \ + .feature = KVM_ARM_VCPU_PTRAUTH_ADDRESS, \ + .regs = pauth_addr_regs, \ + .regs_n = ARRAY_SIZE(pauth_addr_regs), \ + }, \ + { \ + .name = "pauth_generic", \ + .capability = KVM_CAP_ARM_PTRAUTH_GENERIC, \ + .feature = KVM_ARM_VCPU_PTRAUTH_GENERIC, \ + .regs = pauth_generic_regs, \ + .regs_n = ARRAY_SIZE(pauth_generic_regs), \ + } static struct vcpu_config vregs_config = { .sublists = { @@ -1056,11 +1087,30 @@ static struct vcpu_config sve_pmu_config = { {0}, }, }; +static struct vcpu_config pauth_config = { + .sublists = { + BASE_SUBLIST, + VREGS_SUBLIST, + PAUTH_SUBLIST, + {0}, + }, +}; +static struct vcpu_config pauth_pmu_config = { + .sublists = { + BASE_SUBLIST, + VREGS_SUBLIST, + PAUTH_SUBLIST, + PMU_SUBLIST, + {0}, + }, +}; static struct vcpu_config *vcpu_configs[] = { &vregs_config, &vregs_pmu_config, &sve_config, &sve_pmu_config, + &pauth_config, + &pauth_pmu_config, }; static int vcpu_configs_n = ARRAY_SIZE(vcpu_configs); From e938eddbeb85f4c0c47e56cd9e09ee196ea1bc1a Mon Sep 17 00:00:00 2001 From: Zenghui Yu Date: Thu, 30 Dec 2021 22:15:35 +0800 Subject: [PATCH 204/268] KVM: arm64: Fix comment typo in kvm_vcpu_finalize_sve() kvm_arm_init_arch_resources() was renamed to kvm_arm_init_sve() in commit a3be836df7cb ("KVM: arm/arm64: Demote kvm_arm_init_arch_resources() to just set up SVE"). Fix the function name in comment of kvm_vcpu_finalize_sve(). Signed-off-by: Zenghui Yu Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211230141535.1389-1-yuzenghui@huawei.com --- arch/arm64/kvm/reset.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 2bbfeadeb6ae..34d7c5d54e5b 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -99,7 +99,7 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu) /* * Responsibility for these properties is shared between - * kvm_arm_init_arch_resources(), kvm_vcpu_enable_sve() and + * kvm_arm_init_sve(), kvm_vcpu_enable_sve() and * set_sve_vls(). Double-check here just to be sure: */ if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl() || From 6c9eeb5f4a9bb2b11a40fd0f15efde7bd33ee908 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 4 Jan 2022 17:19:40 +0200 Subject: [PATCH 205/268] KVM: arm64: vgic: Replace kernel.h with the necessary inclusions arm_vgic.h does not require all the stuff that kernel.h provides. Replace kernel.h inclusion with the list of what is really being used. Signed-off-by: Andy Shevchenko Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220104151940.55399-1-andriy.shevchenko@linux.intel.com --- include/kvm/arm_vgic.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index e602d848fc1a..bb30a6803d9f 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -5,9 +5,11 @@ #ifndef __KVM_ARM_VGIC_H #define __KVM_ARM_VGIC_H -#include +#include #include #include +#include +#include #include #include #include From cc4f602bc4365d9a8665803a49dddc70eb56f7f1 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 4 Nov 2021 16:41:07 +0000 Subject: [PATCH 206/268] KVM: RISC-V: Use common KVM implementation of MMU memory caches Use common KVM's implementation of the MMU memory caches, which for all intents and purposes is semantically identical to RISC-V's version, the only difference being that the common implementation will fall back to an atomic allocation if there's a KVM bug that triggers a cache underflow. RISC-V appears to have based its MMU code on arm64 before the conversion to the common caches in commit c1a33aebe91d ("KVM: arm64: Use common KVM implementation of MMU memory caches"), despite having also copy-pasted the definition of KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE in kvm_types.h. Opportunistically drop the superfluous wrapper kvm_riscv_stage2_flush_cache(), whose name is very, very confusing as "cache flush" in the context of MMU code almost always refers to flushing hardware caches, not freeing unused software objects. No functional change intended. Signed-off-by: Sean Christopherson Signed-off-by: Anup Patel --- arch/riscv/include/asm/kvm_host.h | 10 +---- arch/riscv/include/asm/kvm_types.h | 2 +- arch/riscv/kvm/mmu.c | 66 ++++++------------------------ arch/riscv/kvm/vcpu.c | 5 ++- 4 files changed, 18 insertions(+), 65 deletions(-) diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index 907fafea787e..52e19888ce43 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -77,13 +77,6 @@ struct kvm_sbi_context { int return_handled; }; -#define KVM_MMU_PAGE_CACHE_NR_OBJS 32 - -struct kvm_mmu_page_cache { - int nobjs; - void *objects[KVM_MMU_PAGE_CACHE_NR_OBJS]; -}; - struct kvm_cpu_trap { unsigned long sepc; unsigned long scause; @@ -193,7 +186,7 @@ struct kvm_vcpu_arch { struct kvm_sbi_context sbi_context; /* Cache pages needed to program page tables with spinlock held */ - struct kvm_mmu_page_cache mmu_page_cache; + struct kvm_mmu_memory_cache mmu_page_cache; /* VCPU power-off state */ bool power_off; @@ -220,7 +213,6 @@ void __kvm_riscv_hfence_gvma_all(void); int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, gpa_t gpa, unsigned long hva, bool is_write); -void kvm_riscv_stage2_flush_cache(struct kvm_vcpu *vcpu); int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm); void kvm_riscv_stage2_free_pgd(struct kvm *kvm); void kvm_riscv_stage2_update_hgatp(struct kvm_vcpu *vcpu); diff --git a/arch/riscv/include/asm/kvm_types.h b/arch/riscv/include/asm/kvm_types.h index e476b404eb67..e15765f98d7a 100644 --- a/arch/riscv/include/asm/kvm_types.h +++ b/arch/riscv/include/asm/kvm_types.h @@ -2,6 +2,6 @@ #ifndef _ASM_RISCV_KVM_TYPES_H #define _ASM_RISCV_KVM_TYPES_H -#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40 +#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 32 #endif /* _ASM_RISCV_KVM_TYPES_H */ diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c index 7d884b15cf5e..5f2736c2e773 100644 --- a/arch/riscv/kvm/mmu.c +++ b/arch/riscv/kvm/mmu.c @@ -83,43 +83,6 @@ static int stage2_level_to_page_size(u32 level, unsigned long *out_pgsize) return 0; } -static int stage2_cache_topup(struct kvm_mmu_page_cache *pcache, - int min, int max) -{ - void *page; - - BUG_ON(max > KVM_MMU_PAGE_CACHE_NR_OBJS); - if (pcache->nobjs >= min) - return 0; - while (pcache->nobjs < max) { - page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); - if (!page) - return -ENOMEM; - pcache->objects[pcache->nobjs++] = page; - } - - return 0; -} - -static void stage2_cache_flush(struct kvm_mmu_page_cache *pcache) -{ - while (pcache && pcache->nobjs) - free_page((unsigned long)pcache->objects[--pcache->nobjs]); -} - -static void *stage2_cache_alloc(struct kvm_mmu_page_cache *pcache) -{ - void *p; - - if (!pcache) - return NULL; - - BUG_ON(!pcache->nobjs); - p = pcache->objects[--pcache->nobjs]; - - return p; -} - static bool stage2_get_leaf_entry(struct kvm *kvm, gpa_t addr, pte_t **ptepp, u32 *ptep_level) { @@ -171,7 +134,7 @@ static void stage2_remote_tlb_flush(struct kvm *kvm, u32 level, gpa_t addr) } static int stage2_set_pte(struct kvm *kvm, u32 level, - struct kvm_mmu_page_cache *pcache, + struct kvm_mmu_memory_cache *pcache, gpa_t addr, const pte_t *new_pte) { u32 current_level = stage2_pgd_levels - 1; @@ -186,7 +149,9 @@ static int stage2_set_pte(struct kvm *kvm, u32 level, return -EEXIST; if (!pte_val(*ptep)) { - next_ptep = stage2_cache_alloc(pcache); + if (!pcache) + return -ENOMEM; + next_ptep = kvm_mmu_memory_cache_alloc(pcache); if (!next_ptep) return -ENOMEM; *ptep = pfn_pte(PFN_DOWN(__pa(next_ptep)), @@ -209,7 +174,7 @@ static int stage2_set_pte(struct kvm *kvm, u32 level, } static int stage2_map_page(struct kvm *kvm, - struct kvm_mmu_page_cache *pcache, + struct kvm_mmu_memory_cache *pcache, gpa_t gpa, phys_addr_t hpa, unsigned long page_size, bool page_rdonly, bool page_exec) @@ -384,7 +349,10 @@ static int stage2_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa, int ret = 0; unsigned long pfn; phys_addr_t addr, end; - struct kvm_mmu_page_cache pcache = { 0, }; + struct kvm_mmu_memory_cache pcache; + + memset(&pcache, 0, sizeof(pcache)); + pcache.gfp_zero = __GFP_ZERO; end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK; pfn = __phys_to_pfn(hpa); @@ -395,9 +363,7 @@ static int stage2_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa, if (!writable) pte = pte_wrprotect(pte); - ret = stage2_cache_topup(&pcache, - stage2_pgd_levels, - KVM_MMU_PAGE_CACHE_NR_OBJS); + ret = kvm_mmu_topup_memory_cache(&pcache, stage2_pgd_levels); if (ret) goto out; @@ -411,7 +377,7 @@ static int stage2_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa, } out: - stage2_cache_flush(&pcache); + kvm_mmu_free_memory_cache(&pcache); return ret; } @@ -649,7 +615,7 @@ int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu, gfn_t gfn = gpa >> PAGE_SHIFT; struct vm_area_struct *vma; struct kvm *kvm = vcpu->kvm; - struct kvm_mmu_page_cache *pcache = &vcpu->arch.mmu_page_cache; + struct kvm_mmu_memory_cache *pcache = &vcpu->arch.mmu_page_cache; bool logging = (memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY)) ? true : false; unsigned long vma_pagesize, mmu_seq; @@ -684,8 +650,7 @@ int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu, } /* We need minimum second+third level pages */ - ret = stage2_cache_topup(pcache, stage2_pgd_levels, - KVM_MMU_PAGE_CACHE_NR_OBJS); + ret = kvm_mmu_topup_memory_cache(pcache, stage2_pgd_levels); if (ret) { kvm_err("Failed to topup stage2 cache\n"); return ret; @@ -734,11 +699,6 @@ out_unlock: return ret; } -void kvm_riscv_stage2_flush_cache(struct kvm_vcpu *vcpu) -{ - stage2_cache_flush(&vcpu->arch.mmu_page_cache); -} - int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm) { struct page *pgd_page; diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index fb84619df012..2ae11b4e93d1 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -77,6 +77,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) /* Mark this VCPU never ran */ vcpu->arch.ran_atleast_once = false; + vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO; /* Setup ISA features available to VCPU */ vcpu->arch.isa = riscv_isa_extension_base(NULL) & KVM_RISCV_ISA_ALLOWED; @@ -107,8 +108,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) /* Cleanup VCPU timer */ kvm_riscv_vcpu_timer_deinit(vcpu); - /* Flush the pages pre-allocated for Stage2 page table mappings */ - kvm_riscv_stage2_flush_cache(vcpu); + /* Free unused pages pre-allocated for Stage2 page table mappings */ + kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); } int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) From cf70be9d214c3ba8dd228cb373f7dc0edfa8da6b Mon Sep 17 00:00:00 2001 From: Atish Patra Date: Thu, 18 Nov 2021 00:39:08 -0800 Subject: [PATCH 207/268] RISC-V: KVM: Mark the existing SBI implementation as v0.1 The existing SBI specification impelementation follows v0.1 specification. The latest specification allows more scalability and performance improvements. Rename the existing implementation as v0.1 and provide a way to allow future extensions. Signed-off-by: Atish Patra Signed-off-by: Atish Patra Signed-off-by: Anup Patel --- arch/riscv/include/asm/kvm_vcpu_sbi.h | 29 +++++ arch/riscv/kvm/vcpu_sbi.c | 149 ++++++++++++++++++++------ 2 files changed, 145 insertions(+), 33 deletions(-) create mode 100644 arch/riscv/include/asm/kvm_vcpu_sbi.h diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi.h b/arch/riscv/include/asm/kvm_vcpu_sbi.h new file mode 100644 index 000000000000..1a4cb0db2d0b --- /dev/null +++ b/arch/riscv/include/asm/kvm_vcpu_sbi.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/** + * Copyright (c) 2021 Western Digital Corporation or its affiliates. + * + * Authors: + * Atish Patra + */ + +#ifndef __RISCV_KVM_VCPU_SBI_H__ +#define __RISCV_KVM_VCPU_SBI_H__ + +#define KVM_SBI_VERSION_MAJOR 0 +#define KVM_SBI_VERSION_MINOR 2 + +struct kvm_vcpu_sbi_extension { + unsigned long extid_start; + unsigned long extid_end; + /** + * SBI extension handler. It can be defined for a given extension or group of + * extension. But it should always return linux error codes rather than SBI + * specific error codes. + */ + int (*handler)(struct kvm_vcpu *vcpu, struct kvm_run *run, + unsigned long *out_val, struct kvm_cpu_trap *utrap, + bool *exit); +}; + +const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(unsigned long extid); +#endif /* __RISCV_KVM_VCPU_SBI_H__ */ diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c index d0d2bcab2f7b..8c5b50d2b27e 100644 --- a/arch/riscv/kvm/vcpu_sbi.c +++ b/arch/riscv/kvm/vcpu_sbi.c @@ -12,9 +12,25 @@ #include #include #include +#include -#define SBI_VERSION_MAJOR 0 -#define SBI_VERSION_MINOR 1 +static int kvm_linux_err_map_sbi(int err) +{ + switch (err) { + case 0: + return SBI_SUCCESS; + case -EPERM: + return SBI_ERR_DENIED; + case -EINVAL: + return SBI_ERR_INVALID_PARAM; + case -EFAULT: + return SBI_ERR_INVALID_ADDRESS; + case -EOPNOTSUPP: + return SBI_ERR_NOT_SUPPORTED; + default: + return SBI_ERR_FAILURE; + }; +} static void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run) @@ -72,21 +88,19 @@ static void kvm_sbi_system_shutdown(struct kvm_vcpu *vcpu, run->exit_reason = KVM_EXIT_SYSTEM_EVENT; } -int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run) +static int kvm_sbi_ext_v01_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, + unsigned long *out_val, + struct kvm_cpu_trap *utrap, + bool *exit) { ulong hmask; - int i, ret = 1; + int i, ret = 0; u64 next_cycle; struct kvm_vcpu *rvcpu; - bool next_sepc = true; struct cpumask cm, hm; struct kvm *kvm = vcpu->kvm; - struct kvm_cpu_trap utrap = { 0 }; struct kvm_cpu_context *cp = &vcpu->arch.guest_context; - if (!cp) - return -EINVAL; - switch (cp->a7) { case SBI_EXT_0_1_CONSOLE_GETCHAR: case SBI_EXT_0_1_CONSOLE_PUTCHAR: @@ -95,8 +109,7 @@ int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run) * handled in kernel so we forward these to user-space */ kvm_riscv_vcpu_sbi_forward(vcpu, run); - next_sepc = false; - ret = 0; + *exit = true; break; case SBI_EXT_0_1_SET_TIMER: #if __riscv_xlen == 32 @@ -104,47 +117,42 @@ int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run) #else next_cycle = (u64)cp->a0; #endif - kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle); + ret = kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle); break; case SBI_EXT_0_1_CLEAR_IPI: - kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_SOFT); + ret = kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_SOFT); break; case SBI_EXT_0_1_SEND_IPI: if (cp->a0) hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0, - &utrap); + utrap); else hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1; - if (utrap.scause) { - utrap.sepc = cp->sepc; - kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); - next_sepc = false; + if (utrap->scause) break; - } + for_each_set_bit(i, &hmask, BITS_PER_LONG) { rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i); - kvm_riscv_vcpu_set_interrupt(rvcpu, IRQ_VS_SOFT); + ret = kvm_riscv_vcpu_set_interrupt(rvcpu, IRQ_VS_SOFT); + if (ret < 0) + break; } break; case SBI_EXT_0_1_SHUTDOWN: kvm_sbi_system_shutdown(vcpu, run, KVM_SYSTEM_EVENT_SHUTDOWN); - next_sepc = false; - ret = 0; + *exit = true; break; case SBI_EXT_0_1_REMOTE_FENCE_I: case SBI_EXT_0_1_REMOTE_SFENCE_VMA: case SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID: if (cp->a0) hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0, - &utrap); + utrap); else hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1; - if (utrap.scause) { - utrap.sepc = cp->sepc; - kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); - next_sepc = false; + if (utrap->scause) break; - } + cpumask_clear(&cm); for_each_set_bit(i, &hmask, BITS_PER_LONG) { rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i); @@ -154,22 +162,97 @@ int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run) } riscv_cpuid_to_hartid_mask(&cm, &hm); if (cp->a7 == SBI_EXT_0_1_REMOTE_FENCE_I) - sbi_remote_fence_i(cpumask_bits(&hm)); + ret = sbi_remote_fence_i(cpumask_bits(&hm)); else if (cp->a7 == SBI_EXT_0_1_REMOTE_SFENCE_VMA) - sbi_remote_hfence_vvma(cpumask_bits(&hm), + ret = sbi_remote_hfence_vvma(cpumask_bits(&hm), cp->a1, cp->a2); else - sbi_remote_hfence_vvma_asid(cpumask_bits(&hm), + ret = sbi_remote_hfence_vvma_asid(cpumask_bits(&hm), cp->a1, cp->a2, cp->a3); break; default: - /* Return error for unsupported SBI calls */ - cp->a0 = SBI_ERR_NOT_SUPPORTED; + ret = -EINVAL; break; } + return ret; +} + +const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = { + .extid_start = SBI_EXT_0_1_SET_TIMER, + .extid_end = SBI_EXT_0_1_SHUTDOWN, + .handler = kvm_sbi_ext_v01_handler, +}; + +static const struct kvm_vcpu_sbi_extension *sbi_ext[] = { + &vcpu_sbi_ext_v01, +}; + +const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(unsigned long extid) +{ + int i = 0; + + for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) { + if (sbi_ext[i]->extid_start <= extid && + sbi_ext[i]->extid_end >= extid) + return sbi_ext[i]; + } + + return NULL; +} + +int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + int ret = 1; + bool next_sepc = true; + bool userspace_exit = false; + struct kvm_cpu_context *cp = &vcpu->arch.guest_context; + const struct kvm_vcpu_sbi_extension *sbi_ext; + struct kvm_cpu_trap utrap = { 0 }; + unsigned long out_val = 0; + bool ext_is_v01 = false; + + sbi_ext = kvm_vcpu_sbi_find_ext(cp->a7); + if (sbi_ext && sbi_ext->handler) { + if (cp->a7 >= SBI_EXT_0_1_SET_TIMER && + cp->a7 <= SBI_EXT_0_1_SHUTDOWN) + ext_is_v01 = true; + ret = sbi_ext->handler(vcpu, run, &out_val, &utrap, &userspace_exit); + } else { + /* Return error for unsupported SBI calls */ + cp->a0 = SBI_ERR_NOT_SUPPORTED; + goto ecall_done; + } + + /* Handle special error cases i.e trap, exit or userspace forward */ + if (utrap.scause) { + /* No need to increment sepc or exit ioctl loop */ + ret = 1; + utrap.sepc = cp->sepc; + kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); + next_sepc = false; + goto ecall_done; + } + + /* Exit ioctl loop or Propagate the error code the guest */ + if (userspace_exit) { + next_sepc = false; + ret = 0; + } else { + /** + * SBI extension handler always returns an Linux error code. Convert + * it to the SBI specific error code that can be propagated the SBI + * caller. + */ + ret = kvm_linux_err_map_sbi(ret); + cp->a0 = ret; + ret = 1; + } +ecall_done: if (next_sepc) cp->sepc += 4; + if (!ext_is_v01) + cp->a1 = out_val; return ret; } From a046c2d8578c93b85ab9272a818c821c254931d0 Mon Sep 17 00:00:00 2001 From: Atish Patra Date: Thu, 18 Nov 2021 00:39:09 -0800 Subject: [PATCH 208/268] RISC-V: KVM: Reorganize SBI code by moving SBI v0.1 to its own file With SBI v0.2, there may be more SBI extensions in future. It makes more sense to group related extensions in separate files. Guest kernel will choose appropriate SBI version dynamically. Move the existing implementation to a separate file so that it can be removed in future without much conflict. Signed-off-by: Atish Patra Signed-off-by: Atish Patra Signed-off-by: Anup Patel --- arch/riscv/include/asm/kvm_vcpu_sbi.h | 2 + arch/riscv/kvm/Makefile | 1 + arch/riscv/kvm/vcpu_sbi.c | 148 +++----------------------- arch/riscv/kvm/vcpu_sbi_v01.c | 126 ++++++++++++++++++++++ 4 files changed, 146 insertions(+), 131 deletions(-) create mode 100644 arch/riscv/kvm/vcpu_sbi_v01.c diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi.h b/arch/riscv/include/asm/kvm_vcpu_sbi.h index 1a4cb0db2d0b..704151969ceb 100644 --- a/arch/riscv/include/asm/kvm_vcpu_sbi.h +++ b/arch/riscv/include/asm/kvm_vcpu_sbi.h @@ -25,5 +25,7 @@ struct kvm_vcpu_sbi_extension { bool *exit); }; +void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run); const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(unsigned long extid); + #endif /* __RISCV_KVM_VCPU_SBI_H__ */ diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile index 300590225348..892c60b07823 100644 --- a/arch/riscv/kvm/Makefile +++ b/arch/riscv/kvm/Makefile @@ -19,4 +19,5 @@ kvm-y += vcpu_exit.o kvm-y += vcpu_fp.o kvm-y += vcpu_switch.o kvm-y += vcpu_sbi.o +kvm-$(CONFIG_RISCV_SBI_V01) += vcpu_sbi_v01.o kvm-y += vcpu_timer.o diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c index 8c5b50d2b27e..a8e0191cd9fc 100644 --- a/arch/riscv/kvm/vcpu_sbi.c +++ b/arch/riscv/kvm/vcpu_sbi.c @@ -9,9 +9,7 @@ #include #include #include -#include #include -#include #include static int kvm_linux_err_map_sbi(int err) @@ -32,8 +30,21 @@ static int kvm_linux_err_map_sbi(int err) }; } -static void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, - struct kvm_run *run) +#ifdef CONFIG_RISCV_SBI_V01 +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01; +#else +static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = { + .extid_start = -1UL, + .extid_end = -1UL, + .handler = NULL, +}; +#endif + +static const struct kvm_vcpu_sbi_extension *sbi_ext[] = { + &vcpu_sbi_ext_v01, +}; + +void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run) { struct kvm_cpu_context *cp = &vcpu->arch.guest_context; @@ -71,123 +82,6 @@ int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run) return 0; } -#ifdef CONFIG_RISCV_SBI_V01 - -static void kvm_sbi_system_shutdown(struct kvm_vcpu *vcpu, - struct kvm_run *run, u32 type) -{ - unsigned long i; - struct kvm_vcpu *tmp; - - kvm_for_each_vcpu(i, tmp, vcpu->kvm) - tmp->arch.power_off = true; - kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP); - - memset(&run->system_event, 0, sizeof(run->system_event)); - run->system_event.type = type; - run->exit_reason = KVM_EXIT_SYSTEM_EVENT; -} - -static int kvm_sbi_ext_v01_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, - unsigned long *out_val, - struct kvm_cpu_trap *utrap, - bool *exit) -{ - ulong hmask; - int i, ret = 0; - u64 next_cycle; - struct kvm_vcpu *rvcpu; - struct cpumask cm, hm; - struct kvm *kvm = vcpu->kvm; - struct kvm_cpu_context *cp = &vcpu->arch.guest_context; - - switch (cp->a7) { - case SBI_EXT_0_1_CONSOLE_GETCHAR: - case SBI_EXT_0_1_CONSOLE_PUTCHAR: - /* - * The CONSOLE_GETCHAR/CONSOLE_PUTCHAR SBI calls cannot be - * handled in kernel so we forward these to user-space - */ - kvm_riscv_vcpu_sbi_forward(vcpu, run); - *exit = true; - break; - case SBI_EXT_0_1_SET_TIMER: -#if __riscv_xlen == 32 - next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0; -#else - next_cycle = (u64)cp->a0; -#endif - ret = kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle); - break; - case SBI_EXT_0_1_CLEAR_IPI: - ret = kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_SOFT); - break; - case SBI_EXT_0_1_SEND_IPI: - if (cp->a0) - hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0, - utrap); - else - hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1; - if (utrap->scause) - break; - - for_each_set_bit(i, &hmask, BITS_PER_LONG) { - rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i); - ret = kvm_riscv_vcpu_set_interrupt(rvcpu, IRQ_VS_SOFT); - if (ret < 0) - break; - } - break; - case SBI_EXT_0_1_SHUTDOWN: - kvm_sbi_system_shutdown(vcpu, run, KVM_SYSTEM_EVENT_SHUTDOWN); - *exit = true; - break; - case SBI_EXT_0_1_REMOTE_FENCE_I: - case SBI_EXT_0_1_REMOTE_SFENCE_VMA: - case SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID: - if (cp->a0) - hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0, - utrap); - else - hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1; - if (utrap->scause) - break; - - cpumask_clear(&cm); - for_each_set_bit(i, &hmask, BITS_PER_LONG) { - rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i); - if (rvcpu->cpu < 0) - continue; - cpumask_set_cpu(rvcpu->cpu, &cm); - } - riscv_cpuid_to_hartid_mask(&cm, &hm); - if (cp->a7 == SBI_EXT_0_1_REMOTE_FENCE_I) - ret = sbi_remote_fence_i(cpumask_bits(&hm)); - else if (cp->a7 == SBI_EXT_0_1_REMOTE_SFENCE_VMA) - ret = sbi_remote_hfence_vvma(cpumask_bits(&hm), - cp->a1, cp->a2); - else - ret = sbi_remote_hfence_vvma_asid(cpumask_bits(&hm), - cp->a1, cp->a2, cp->a3); - break; - default: - ret = -EINVAL; - break; - } - - return ret; -} - -const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = { - .extid_start = SBI_EXT_0_1_SET_TIMER, - .extid_end = SBI_EXT_0_1_SHUTDOWN, - .handler = kvm_sbi_ext_v01_handler, -}; - -static const struct kvm_vcpu_sbi_extension *sbi_ext[] = { - &vcpu_sbi_ext_v01, -}; - const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(unsigned long extid) { int i = 0; @@ -214,9 +108,11 @@ int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run) sbi_ext = kvm_vcpu_sbi_find_ext(cp->a7); if (sbi_ext && sbi_ext->handler) { +#ifdef CONFIG_RISCV_SBI_V01 if (cp->a7 >= SBI_EXT_0_1_SET_TIMER && cp->a7 <= SBI_EXT_0_1_SHUTDOWN) ext_is_v01 = true; +#endif ret = sbi_ext->handler(vcpu, run, &out_val, &utrap, &userspace_exit); } else { /* Return error for unsupported SBI calls */ @@ -256,13 +152,3 @@ ecall_done: return ret; } - -#else - -int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - kvm_riscv_vcpu_sbi_forward(vcpu, run); - return 0; -} - -#endif diff --git a/arch/riscv/kvm/vcpu_sbi_v01.c b/arch/riscv/kvm/vcpu_sbi_v01.c new file mode 100644 index 000000000000..4c7e13ec9ccc --- /dev/null +++ b/arch/riscv/kvm/vcpu_sbi_v01.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2021 Western Digital Corporation or its affiliates. + * + * Authors: + * Atish Patra + */ + +#include +#include +#include +#include +#include +#include +#include + +static void kvm_sbi_system_shutdown(struct kvm_vcpu *vcpu, + struct kvm_run *run, u32 type) +{ + unsigned long i; + struct kvm_vcpu *tmp; + + kvm_for_each_vcpu(i, tmp, vcpu->kvm) + tmp->arch.power_off = true; + kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP); + + memset(&run->system_event, 0, sizeof(run->system_event)); + run->system_event.type = type; + run->exit_reason = KVM_EXIT_SYSTEM_EVENT; +} + +static int kvm_sbi_ext_v01_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, + unsigned long *out_val, + struct kvm_cpu_trap *utrap, + bool *exit) +{ + ulong hmask; + int i, ret = 0; + u64 next_cycle; + struct kvm_vcpu *rvcpu; + struct cpumask cm, hm; + struct kvm *kvm = vcpu->kvm; + struct kvm_cpu_context *cp = &vcpu->arch.guest_context; + + switch (cp->a7) { + case SBI_EXT_0_1_CONSOLE_GETCHAR: + case SBI_EXT_0_1_CONSOLE_PUTCHAR: + /* + * The CONSOLE_GETCHAR/CONSOLE_PUTCHAR SBI calls cannot be + * handled in kernel so we forward these to user-space + */ + kvm_riscv_vcpu_sbi_forward(vcpu, run); + *exit = true; + break; + case SBI_EXT_0_1_SET_TIMER: +#if __riscv_xlen == 32 + next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0; +#else + next_cycle = (u64)cp->a0; +#endif + ret = kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle); + break; + case SBI_EXT_0_1_CLEAR_IPI: + ret = kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_SOFT); + break; + case SBI_EXT_0_1_SEND_IPI: + if (cp->a0) + hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0, + utrap); + else + hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1; + if (utrap->scause) + break; + + for_each_set_bit(i, &hmask, BITS_PER_LONG) { + rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i); + ret = kvm_riscv_vcpu_set_interrupt(rvcpu, IRQ_VS_SOFT); + if (ret < 0) + break; + } + break; + case SBI_EXT_0_1_SHUTDOWN: + kvm_sbi_system_shutdown(vcpu, run, KVM_SYSTEM_EVENT_SHUTDOWN); + *exit = true; + break; + case SBI_EXT_0_1_REMOTE_FENCE_I: + case SBI_EXT_0_1_REMOTE_SFENCE_VMA: + case SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID: + if (cp->a0) + hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0, + utrap); + else + hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1; + if (utrap->scause) + break; + + cpumask_clear(&cm); + for_each_set_bit(i, &hmask, BITS_PER_LONG) { + rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i); + if (rvcpu->cpu < 0) + continue; + cpumask_set_cpu(rvcpu->cpu, &cm); + } + riscv_cpuid_to_hartid_mask(&cm, &hm); + if (cp->a7 == SBI_EXT_0_1_REMOTE_FENCE_I) + ret = sbi_remote_fence_i(cpumask_bits(&hm)); + else if (cp->a7 == SBI_EXT_0_1_REMOTE_SFENCE_VMA) + ret = sbi_remote_hfence_vvma(cpumask_bits(&hm), + cp->a1, cp->a2); + else + ret = sbi_remote_hfence_vvma_asid(cpumask_bits(&hm), + cp->a1, cp->a2, cp->a3); + break; + default: + ret = -EINVAL; + break; + }; + + return ret; +} + +const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = { + .extid_start = SBI_EXT_0_1_SET_TIMER, + .extid_end = SBI_EXT_0_1_SHUTDOWN, + .handler = kvm_sbi_ext_v01_handler, +}; From c62a76859723fb732bfeda847f22192e24e121a7 Mon Sep 17 00:00:00 2001 From: Atish Patra Date: Thu, 18 Nov 2021 00:39:10 -0800 Subject: [PATCH 209/268] RISC-V: KVM: Add SBI v0.2 base extension SBI v0.2 base extension defined to allow backward compatibility and probing of future extensions. This is also the only mandatory SBI extension that must be implemented by SBI implementors. Signed-off-by: Atish Patra Signed-off-by: Atish Patra Signed-off-by: Anup Patel --- arch/riscv/include/asm/kvm_vcpu_sbi.h | 2 + arch/riscv/include/asm/sbi.h | 8 +++ arch/riscv/kvm/Makefile | 1 + arch/riscv/kvm/vcpu_sbi.c | 3 +- arch/riscv/kvm/vcpu_sbi_base.c | 72 +++++++++++++++++++++++++++ 5 files changed, 85 insertions(+), 1 deletion(-) create mode 100644 arch/riscv/kvm/vcpu_sbi_base.c diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi.h b/arch/riscv/include/asm/kvm_vcpu_sbi.h index 704151969ceb..76e4e17a3e00 100644 --- a/arch/riscv/include/asm/kvm_vcpu_sbi.h +++ b/arch/riscv/include/asm/kvm_vcpu_sbi.h @@ -9,6 +9,8 @@ #ifndef __RISCV_KVM_VCPU_SBI_H__ #define __RISCV_KVM_VCPU_SBI_H__ +#define KVM_SBI_IMPID 3 + #define KVM_SBI_VERSION_MAJOR 0 #define KVM_SBI_VERSION_MINOR 2 diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h index 0d42693cb65e..9e4c79fd49d7 100644 --- a/arch/riscv/include/asm/sbi.h +++ b/arch/riscv/include/asm/sbi.h @@ -27,6 +27,14 @@ enum sbi_ext_id { SBI_EXT_IPI = 0x735049, SBI_EXT_RFENCE = 0x52464E43, SBI_EXT_HSM = 0x48534D, + + /* Experimentals extensions must lie within this range */ + SBI_EXT_EXPERIMENTAL_START = 0x08000000, + SBI_EXT_EXPERIMENTAL_END = 0x08FFFFFF, + + /* Vendor extensions must lie within this range */ + SBI_EXT_VENDOR_START = 0x09000000, + SBI_EXT_VENDOR_END = 0x09FFFFFF, }; enum sbi_ext_base_fid { diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile index 892c60b07823..c90b3802ee6d 100644 --- a/arch/riscv/kvm/Makefile +++ b/arch/riscv/kvm/Makefile @@ -20,4 +20,5 @@ kvm-y += vcpu_fp.o kvm-y += vcpu_switch.o kvm-y += vcpu_sbi.o kvm-$(CONFIG_RISCV_SBI_V01) += vcpu_sbi_v01.o +kvm-y += vcpu_sbi_base.o kvm-y += vcpu_timer.o diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c index a8e0191cd9fc..915a044a0b4f 100644 --- a/arch/riscv/kvm/vcpu_sbi.c +++ b/arch/riscv/kvm/vcpu_sbi.c @@ -39,9 +39,10 @@ static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = { .handler = NULL, }; #endif - +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_base; static const struct kvm_vcpu_sbi_extension *sbi_ext[] = { &vcpu_sbi_ext_v01, + &vcpu_sbi_ext_base, }; void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run) diff --git a/arch/riscv/kvm/vcpu_sbi_base.c b/arch/riscv/kvm/vcpu_sbi_base.c new file mode 100644 index 000000000000..d1ec08fe7289 --- /dev/null +++ b/arch/riscv/kvm/vcpu_sbi_base.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2021 Western Digital Corporation or its affiliates. + * + * Authors: + * Atish Patra + */ + +#include +#include +#include +#include +#include +#include +#include + +static int kvm_sbi_ext_base_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, + unsigned long *out_val, + struct kvm_cpu_trap *trap, bool *exit) +{ + int ret = 0; + struct kvm_cpu_context *cp = &vcpu->arch.guest_context; + struct sbiret ecall_ret; + + switch (cp->a6) { + case SBI_EXT_BASE_GET_SPEC_VERSION: + *out_val = (KVM_SBI_VERSION_MAJOR << + SBI_SPEC_VERSION_MAJOR_SHIFT) | + KVM_SBI_VERSION_MINOR; + break; + case SBI_EXT_BASE_GET_IMP_ID: + *out_val = KVM_SBI_IMPID; + break; + case SBI_EXT_BASE_GET_IMP_VERSION: + *out_val = 0; + break; + case SBI_EXT_BASE_PROBE_EXT: + if ((cp->a0 >= SBI_EXT_EXPERIMENTAL_START && + cp->a0 <= SBI_EXT_EXPERIMENTAL_END) || + (cp->a0 >= SBI_EXT_VENDOR_START && + cp->a0 <= SBI_EXT_VENDOR_END)) { + /* + * For experimental/vendor extensions + * forward it to the userspace + */ + kvm_riscv_vcpu_sbi_forward(vcpu, run); + *exit = true; + } else + *out_val = kvm_vcpu_sbi_find_ext(cp->a0) ? 1 : 0; + break; + case SBI_EXT_BASE_GET_MVENDORID: + case SBI_EXT_BASE_GET_MARCHID: + case SBI_EXT_BASE_GET_MIMPID: + ecall_ret = sbi_ecall(SBI_EXT_BASE, cp->a6, 0, 0, 0, 0, 0, 0); + if (!ecall_ret.error) + *out_val = ecall_ret.value; + /*TODO: We are unnecessarily converting the error twice */ + ret = sbi_err_map_linux_errno(ecall_ret.error); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_base = { + .extid_start = SBI_EXT_BASE, + .extid_end = SBI_EXT_BASE, + .handler = kvm_sbi_ext_base_handler, +}; From 5f862df5585cf9d74b97ecaf3078243591f1009f Mon Sep 17 00:00:00 2001 From: Atish Patra Date: Thu, 18 Nov 2021 00:39:11 -0800 Subject: [PATCH 210/268] RISC-V: KVM: Add v0.1 replacement SBI extensions defined in v0.2 The SBI v0.2 contains some of the improved versions of required v0.1 extensions such as remote fence, timer and IPI. This patch implements those extensions. Signed-off-by: Atish Patra Signed-off-by: Atish Patra Signed-off-by: Anup Patel --- arch/riscv/kvm/Makefile | 1 + arch/riscv/kvm/vcpu_sbi.c | 7 ++ arch/riscv/kvm/vcpu_sbi_replace.c | 135 ++++++++++++++++++++++++++++++ 3 files changed, 143 insertions(+) create mode 100644 arch/riscv/kvm/vcpu_sbi_replace.c diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile index c90b3802ee6d..7fd9fc184ae9 100644 --- a/arch/riscv/kvm/Makefile +++ b/arch/riscv/kvm/Makefile @@ -21,4 +21,5 @@ kvm-y += vcpu_switch.o kvm-y += vcpu_sbi.o kvm-$(CONFIG_RISCV_SBI_V01) += vcpu_sbi_v01.o kvm-y += vcpu_sbi_base.o +kvm-y += vcpu_sbi_replace.o kvm-y += vcpu_timer.o diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c index 915a044a0b4f..cf284e080f3e 100644 --- a/arch/riscv/kvm/vcpu_sbi.c +++ b/arch/riscv/kvm/vcpu_sbi.c @@ -40,9 +40,16 @@ static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = { }; #endif extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_base; +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time; +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi; +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence; + static const struct kvm_vcpu_sbi_extension *sbi_ext[] = { &vcpu_sbi_ext_v01, &vcpu_sbi_ext_base, + &vcpu_sbi_ext_time, + &vcpu_sbi_ext_ipi, + &vcpu_sbi_ext_rfence, }; void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run) diff --git a/arch/riscv/kvm/vcpu_sbi_replace.c b/arch/riscv/kvm/vcpu_sbi_replace.c new file mode 100644 index 000000000000..00036b7f83b9 --- /dev/null +++ b/arch/riscv/kvm/vcpu_sbi_replace.c @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2021 Western Digital Corporation or its affiliates. + * + * Authors: + * Atish Patra + */ + +#include +#include +#include +#include +#include +#include +#include + +static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, + unsigned long *out_val, + struct kvm_cpu_trap *utrap, bool *exit) +{ + int ret = 0; + struct kvm_cpu_context *cp = &vcpu->arch.guest_context; + u64 next_cycle; + + if (cp->a6 != SBI_EXT_TIME_SET_TIMER) + return -EINVAL; + +#if __riscv_xlen == 32 + next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0; +#else + next_cycle = (u64)cp->a0; +#endif + kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle); + + return ret; +} + +const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time = { + .extid_start = SBI_EXT_TIME, + .extid_end = SBI_EXT_TIME, + .handler = kvm_sbi_ext_time_handler, +}; + +static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, + unsigned long *out_val, + struct kvm_cpu_trap *utrap, bool *exit) +{ + int ret = 0; + unsigned long i; + struct kvm_vcpu *tmp; + struct kvm_cpu_context *cp = &vcpu->arch.guest_context; + unsigned long hmask = cp->a0; + unsigned long hbase = cp->a1; + + if (cp->a6 != SBI_EXT_IPI_SEND_IPI) + return -EINVAL; + + kvm_for_each_vcpu(i, tmp, vcpu->kvm) { + if (hbase != -1UL) { + if (tmp->vcpu_id < hbase) + continue; + if (!(hmask & (1UL << (tmp->vcpu_id - hbase)))) + continue; + } + ret = kvm_riscv_vcpu_set_interrupt(tmp, IRQ_VS_SOFT); + if (ret < 0) + break; + } + + return ret; +} + +const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi = { + .extid_start = SBI_EXT_IPI, + .extid_end = SBI_EXT_IPI, + .handler = kvm_sbi_ext_ipi_handler, +}; + +static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, + unsigned long *out_val, + struct kvm_cpu_trap *utrap, bool *exit) +{ + int ret = 0; + unsigned long i; + struct cpumask cm, hm; + struct kvm_vcpu *tmp; + struct kvm_cpu_context *cp = &vcpu->arch.guest_context; + unsigned long hmask = cp->a0; + unsigned long hbase = cp->a1; + unsigned long funcid = cp->a6; + + cpumask_clear(&cm); + cpumask_clear(&hm); + kvm_for_each_vcpu(i, tmp, vcpu->kvm) { + if (hbase != -1UL) { + if (tmp->vcpu_id < hbase) + continue; + if (!(hmask & (1UL << (tmp->vcpu_id - hbase)))) + continue; + } + if (tmp->cpu < 0) + continue; + cpumask_set_cpu(tmp->cpu, &cm); + } + + riscv_cpuid_to_hartid_mask(&cm, &hm); + + switch (funcid) { + case SBI_EXT_RFENCE_REMOTE_FENCE_I: + ret = sbi_remote_fence_i(cpumask_bits(&hm)); + break; + case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA: + ret = sbi_remote_hfence_vvma(cpumask_bits(&hm), cp->a2, cp->a3); + break; + case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID: + ret = sbi_remote_hfence_vvma_asid(cpumask_bits(&hm), cp->a2, + cp->a3, cp->a4); + break; + case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA: + case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID: + case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA: + case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID: + /* TODO: implement for nested hypervisor case */ + default: + ret = -EOPNOTSUPP; + } + + return ret; +} + +const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence = { + .extid_start = SBI_EXT_RFENCE, + .extid_end = SBI_EXT_RFENCE, + .handler = kvm_sbi_ext_rfence_handler, +}; From 3e1d86569c210ec64398091bd035e539f0e26e81 Mon Sep 17 00:00:00 2001 From: Atish Patra Date: Thu, 18 Nov 2021 00:39:12 -0800 Subject: [PATCH 211/268] RISC-V: KVM: Add SBI HSM extension in KVM SBI HSM extension allows OS to start/stop harts any time. It also allows ordered booting of harts instead of random booting. Implement SBI HSM exntesion and designate the vcpu 0 as the boot vcpu id. All other non-zero non-booting vcpus should be brought up by the OS implementing HSM extension. If the guest OS doesn't implement HSM extension, only single vcpu will be available to OS. Signed-off-by: Atish Patra Signed-off-by: Atish Patra Signed-off-by: Anup Patel --- arch/riscv/include/asm/sbi.h | 1 + arch/riscv/kvm/Makefile | 1 + arch/riscv/kvm/vcpu.c | 23 ++++++++ arch/riscv/kvm/vcpu_sbi.c | 4 ++ arch/riscv/kvm/vcpu_sbi_hsm.c | 105 ++++++++++++++++++++++++++++++++++ 5 files changed, 134 insertions(+) create mode 100644 arch/riscv/kvm/vcpu_sbi_hsm.c diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h index 9e4c79fd49d7..9c46dd3ff4a2 100644 --- a/arch/riscv/include/asm/sbi.h +++ b/arch/riscv/include/asm/sbi.h @@ -90,6 +90,7 @@ enum sbi_hsm_hart_status { #define SBI_ERR_INVALID_PARAM -3 #define SBI_ERR_DENIED -4 #define SBI_ERR_INVALID_ADDRESS -5 +#define SBI_ERR_ALREADY_AVAILABLE -6 extern unsigned long sbi_spec_version; struct sbiret { diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile index 7fd9fc184ae9..e5c56182f48f 100644 --- a/arch/riscv/kvm/Makefile +++ b/arch/riscv/kvm/Makefile @@ -22,4 +22,5 @@ kvm-y += vcpu_sbi.o kvm-$(CONFIG_RISCV_SBI_V01) += vcpu_sbi_v01.o kvm-y += vcpu_sbi_base.o kvm-y += vcpu_sbi_replace.o +kvm-y += vcpu_sbi_hsm.o kvm-y += vcpu_timer.o diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 2ae11b4e93d1..0c5239e05721 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -53,6 +53,17 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr; struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context; + bool loaded; + + /** + * The preemption should be disabled here because it races with + * kvm_sched_out/kvm_sched_in(called from preempt notifiers) which + * also calls vcpu_load/put. + */ + get_cpu(); + loaded = (vcpu->cpu != -1); + if (loaded) + kvm_arch_vcpu_put(vcpu); memcpy(csr, reset_csr, sizeof(*csr)); @@ -64,6 +75,11 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) WRITE_ONCE(vcpu->arch.irqs_pending, 0); WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0); + + /* Reset the guest CSRs for hotplug usecase */ + if (loaded) + kvm_arch_vcpu_load(vcpu, smp_processor_id()); + put_cpu(); } int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) @@ -101,6 +117,13 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) { + /** + * vcpu with id 0 is the designated boot cpu. + * Keep all vcpus with non-zero id in power-off state so that + * they can be brought up using SBI HSM extension. + */ + if (vcpu->vcpu_idx != 0) + kvm_riscv_vcpu_power_off(vcpu); } void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c index cf284e080f3e..f62d25bc9733 100644 --- a/arch/riscv/kvm/vcpu_sbi.c +++ b/arch/riscv/kvm/vcpu_sbi.c @@ -25,6 +25,8 @@ static int kvm_linux_err_map_sbi(int err) return SBI_ERR_INVALID_ADDRESS; case -EOPNOTSUPP: return SBI_ERR_NOT_SUPPORTED; + case -EALREADY: + return SBI_ERR_ALREADY_AVAILABLE; default: return SBI_ERR_FAILURE; }; @@ -43,6 +45,7 @@ extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_base; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence; +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm; static const struct kvm_vcpu_sbi_extension *sbi_ext[] = { &vcpu_sbi_ext_v01, @@ -50,6 +53,7 @@ static const struct kvm_vcpu_sbi_extension *sbi_ext[] = { &vcpu_sbi_ext_time, &vcpu_sbi_ext_ipi, &vcpu_sbi_ext_rfence, + &vcpu_sbi_ext_hsm, }; void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run) diff --git a/arch/riscv/kvm/vcpu_sbi_hsm.c b/arch/riscv/kvm/vcpu_sbi_hsm.c new file mode 100644 index 000000000000..2e383687fa48 --- /dev/null +++ b/arch/riscv/kvm/vcpu_sbi_hsm.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2021 Western Digital Corporation or its affiliates. + * + * Authors: + * Atish Patra + */ + +#include +#include +#include +#include +#include +#include + +static int kvm_sbi_hsm_vcpu_start(struct kvm_vcpu *vcpu) +{ + struct kvm_cpu_context *reset_cntx; + struct kvm_cpu_context *cp = &vcpu->arch.guest_context; + struct kvm_vcpu *target_vcpu; + unsigned long target_vcpuid = cp->a0; + + target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid); + if (!target_vcpu) + return -EINVAL; + if (!target_vcpu->arch.power_off) + return -EALREADY; + + reset_cntx = &target_vcpu->arch.guest_reset_context; + /* start address */ + reset_cntx->sepc = cp->a1; + /* target vcpu id to start */ + reset_cntx->a0 = target_vcpuid; + /* private data passed from kernel */ + reset_cntx->a1 = cp->a2; + kvm_make_request(KVM_REQ_VCPU_RESET, target_vcpu); + + kvm_riscv_vcpu_power_on(target_vcpu); + + return 0; +} + +static int kvm_sbi_hsm_vcpu_stop(struct kvm_vcpu *vcpu) +{ + if (vcpu->arch.power_off) + return -EINVAL; + + kvm_riscv_vcpu_power_off(vcpu); + + return 0; +} + +static int kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu *vcpu) +{ + struct kvm_cpu_context *cp = &vcpu->arch.guest_context; + unsigned long target_vcpuid = cp->a0; + struct kvm_vcpu *target_vcpu; + + target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid); + if (!target_vcpu) + return -EINVAL; + if (!target_vcpu->arch.power_off) + return SBI_HSM_HART_STATUS_STARTED; + else + return SBI_HSM_HART_STATUS_STOPPED; +} + +static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, + unsigned long *out_val, + struct kvm_cpu_trap *utrap, + bool *exit) +{ + int ret = 0; + struct kvm_cpu_context *cp = &vcpu->arch.guest_context; + struct kvm *kvm = vcpu->kvm; + unsigned long funcid = cp->a6; + + switch (funcid) { + case SBI_EXT_HSM_HART_START: + mutex_lock(&kvm->lock); + ret = kvm_sbi_hsm_vcpu_start(vcpu); + mutex_unlock(&kvm->lock); + break; + case SBI_EXT_HSM_HART_STOP: + ret = kvm_sbi_hsm_vcpu_stop(vcpu); + break; + case SBI_EXT_HSM_HART_STATUS: + ret = kvm_sbi_hsm_vcpu_get_status(vcpu); + if (ret >= 0) { + *out_val = ret; + ret = 0; + } + break; + default: + ret = -EOPNOTSUPP; + } + + return ret; +} + +const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm = { + .extid_start = SBI_EXT_HSM, + .extid_end = SBI_EXT_HSM, + .handler = kvm_sbi_ext_hsm_handler, +}; From 4abed558b2cedebde400856e52800115d92e994a Mon Sep 17 00:00:00 2001 From: Atish Patra Date: Thu, 2 Dec 2021 15:58:23 -0800 Subject: [PATCH 212/268] MAINTAINERS: Update Atish's email address I am no longer employed by western digital. Update my email address to personal one and add entries to .mailmap as well. Signed-off-by: Atish Patra Signed-off-by: Anup Patel --- .mailmap | 1 + MAINTAINERS | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.mailmap b/.mailmap index 6277bb27b4bf..29eedc1fab89 100644 --- a/.mailmap +++ b/.mailmap @@ -50,6 +50,7 @@ Archit Taneja Ard Biesheuvel Arnaud Patard Arnd Bergmann +Atish Patra Axel Dyks Axel Lin Bart Van Assche diff --git a/MAINTAINERS b/MAINTAINERS index 43007f2d29e0..d8972ed1f8d9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -10445,7 +10445,7 @@ F: arch/powerpc/kvm/ KERNEL VIRTUAL MACHINE FOR RISC-V (KVM/riscv) M: Anup Patel -R: Atish Patra +R: Atish Patra L: kvm@vger.kernel.org L: kvm-riscv@lists.infradead.org L: linux-riscv@lists.infradead.org From 637ad6551b2801cdf9c76046cffc1abe1f5c2243 Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Mon, 29 Nov 2021 00:07:39 +0800 Subject: [PATCH 213/268] RISC-V: KVM: make kvm_riscv_vcpu_fp_clean() static There are no users outside vcpu_fp.c so make kvm_riscv_vcpu_fp_clean() static. Signed-off-by: Jisheng Zhang Signed-off-by: Anup Patel --- arch/riscv/kvm/vcpu_fp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/riscv/kvm/vcpu_fp.c b/arch/riscv/kvm/vcpu_fp.c index 1b070152578f..4449a976e5a6 100644 --- a/arch/riscv/kvm/vcpu_fp.c +++ b/arch/riscv/kvm/vcpu_fp.c @@ -26,7 +26,7 @@ void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu) cntx->sstatus |= SR_FS_OFF; } -void kvm_riscv_vcpu_fp_clean(struct kvm_cpu_context *cntx) +static void kvm_riscv_vcpu_fp_clean(struct kvm_cpu_context *cntx) { cntx->sstatus &= ~SR_FS; cntx->sstatus |= SR_FS_CLEAN; From ef8949a986f0e325b1d535389101541849d611d3 Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Fri, 26 Nov 2021 10:48:41 +0530 Subject: [PATCH 214/268] RISC-V: KVM: Forward SBI experimental and vendor extensions The SBI experimental extension space is for temporary (or experimental) stuff whereas SBI vendor extension space is for hardware vendor specific stuff. Both these SBI extension spaces won't be standardized by the SBI specification so let's blindly forward such SBI calls to the userspace. Signed-off-by: Anup Patel Reviewed-and-tested-by: Atish Patra --- arch/riscv/kvm/vcpu_sbi.c | 4 ++++ arch/riscv/kvm/vcpu_sbi_base.c | 27 +++++++++++++++++++++++++++ 2 files changed, 31 insertions(+) diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c index f62d25bc9733..78aa3db76225 100644 --- a/arch/riscv/kvm/vcpu_sbi.c +++ b/arch/riscv/kvm/vcpu_sbi.c @@ -46,6 +46,8 @@ extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm; +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental; +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor; static const struct kvm_vcpu_sbi_extension *sbi_ext[] = { &vcpu_sbi_ext_v01, @@ -54,6 +56,8 @@ static const struct kvm_vcpu_sbi_extension *sbi_ext[] = { &vcpu_sbi_ext_ipi, &vcpu_sbi_ext_rfence, &vcpu_sbi_ext_hsm, + &vcpu_sbi_ext_experimental, + &vcpu_sbi_ext_vendor, }; void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run) diff --git a/arch/riscv/kvm/vcpu_sbi_base.c b/arch/riscv/kvm/vcpu_sbi_base.c index d1ec08fe7289..4ecf377f483b 100644 --- a/arch/riscv/kvm/vcpu_sbi_base.c +++ b/arch/riscv/kvm/vcpu_sbi_base.c @@ -70,3 +70,30 @@ const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_base = { .extid_end = SBI_EXT_BASE, .handler = kvm_sbi_ext_base_handler, }; + +static int kvm_sbi_ext_forward_handler(struct kvm_vcpu *vcpu, + struct kvm_run *run, + unsigned long *out_val, + struct kvm_cpu_trap *utrap, + bool *exit) +{ + /* + * Both SBI experimental and vendor extensions are + * unconditionally forwarded to userspace. + */ + kvm_riscv_vcpu_sbi_forward(vcpu, run); + *exit = true; + return 0; +} + +const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental = { + .extid_start = SBI_EXT_EXPERIMENTAL_START, + .extid_end = SBI_EXT_EXPERIMENTAL_END, + .handler = kvm_sbi_ext_forward_handler, +}; + +const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor = { + .extid_start = SBI_EXT_VENDOR_START, + .extid_end = SBI_EXT_VENDOR_END, + .handler = kvm_sbi_ext_forward_handler, +}; From a457fd5660efa9cf960d2461156a1025bfdb13fa Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Fri, 26 Nov 2021 17:05:51 +0530 Subject: [PATCH 215/268] RISC-V: KVM: Add VM capability to allow userspace get GPA bits The number of GPA bits supported for a RISC-V Guest/VM is based on the MMU mode used by the G-stage translation. The KVM RISC-V will detect and use the best possible MMU mode for the G-stage in kvm_arch_init(). We add a generic VM capability KVM_CAP_VM_GPA_BITS which can be used by the KVM userspace to get the number of GPA (guest physical address) bits supported for a Guest/VM. Signed-off-by: Anup Patel Reviewed-and-tested-by: Atish Patra --- arch/riscv/include/asm/kvm_host.h | 1 + arch/riscv/kvm/mmu.c | 5 +++++ arch/riscv/kvm/vm.c | 3 +++ include/uapi/linux/kvm.h | 1 + 4 files changed, 10 insertions(+) diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index 52e19888ce43..99ef6a120617 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -218,6 +218,7 @@ void kvm_riscv_stage2_free_pgd(struct kvm *kvm); void kvm_riscv_stage2_update_hgatp(struct kvm_vcpu *vcpu); void kvm_riscv_stage2_mode_detect(void); unsigned long kvm_riscv_stage2_mode(void); +int kvm_riscv_stage2_gpa_bits(void); void kvm_riscv_stage2_vmid_detect(void); unsigned long kvm_riscv_stage2_vmid_bits(void); diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c index 5f2736c2e773..9af67dbdc66a 100644 --- a/arch/riscv/kvm/mmu.c +++ b/arch/riscv/kvm/mmu.c @@ -769,3 +769,8 @@ unsigned long kvm_riscv_stage2_mode(void) { return stage2_mode >> HGATP_MODE_SHIFT; } + +int kvm_riscv_stage2_gpa_bits(void) +{ + return stage2_gpa_bits; +} diff --git a/arch/riscv/kvm/vm.c b/arch/riscv/kvm/vm.c index 7619691d8953..c768f75279ef 100644 --- a/arch/riscv/kvm/vm.c +++ b/arch/riscv/kvm/vm.c @@ -74,6 +74,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_NR_MEMSLOTS: r = KVM_USER_MEM_SLOTS; break; + case KVM_CAP_VM_GPA_BITS: + r = kvm_riscv_stage2_gpa_bits(); + break; default: r = 0; break; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 1daa45268de2..469f05d69c8d 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1131,6 +1131,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_EXIT_ON_EMULATION_FAILURE 204 #define KVM_CAP_ARM_MTE 205 #define KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM 206 +#define KVM_CAP_VM_GPA_BITS 207 #ifdef KVM_CAP_IRQ_ROUTING From 788490e798a707cea75d80448f5c6c5f322ada6e Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Fri, 26 Nov 2021 18:33:45 +0530 Subject: [PATCH 216/268] KVM: selftests: Add EXTRA_CFLAGS in top-level Makefile We add EXTRA_CFLAGS to the common CFLAGS of top-level Makefile which will allow users to pass additional compile-time flags such as "-static". Signed-off-by: Anup Patel Reviewed-and-tested-by: Atish Patra Reviewed-and-tested-by: Sean Christopherson --- tools/testing/selftests/kvm/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 17342b575e85..869edc2a5a34 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -133,7 +133,7 @@ endif CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \ -fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \ -I$(LINUX_TOOL_ARCH_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude \ - -I$( Date: Tue, 5 Oct 2021 18:09:56 +0530 Subject: [PATCH 217/268] KVM: selftests: Add initial support for RISC-V 64-bit We add initial support for RISC-V 64-bit in KVM selftests using which we can cross-compile and run arch independent tests such as: demand_paging_test dirty_log_test kvm_create_max_vcpus, kvm_page_table_test set_memory_region_test kvm_binary_stats_test All VM guest modes defined in kvm_util.h require at least 48-bit guest virtual address so to use KVM RISC-V selftests hardware need to support at least Sv48 MMU for guest (i.e. VS-mode). Signed-off-by: Anup Patel Reviewed-and-tested-by: Atish Patra --- tools/testing/selftests/kvm/Makefile | 12 + .../testing/selftests/kvm/include/kvm_util.h | 10 + .../selftests/kvm/include/riscv/processor.h | 135 +++++++ tools/testing/selftests/kvm/lib/guest_modes.c | 10 + .../selftests/kvm/lib/riscv/processor.c | 362 ++++++++++++++++++ tools/testing/selftests/kvm/lib/riscv/ucall.c | 87 +++++ 6 files changed, 616 insertions(+) create mode 100644 tools/testing/selftests/kvm/include/riscv/processor.h create mode 100644 tools/testing/selftests/kvm/lib/riscv/processor.c create mode 100644 tools/testing/selftests/kvm/lib/riscv/ucall.c diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 869edc2a5a34..a2d65f218795 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -32,11 +32,16 @@ endif ifeq ($(ARCH),s390) UNAME_M := s390x endif +# Set UNAME_M riscv compile/install to work +ifeq ($(ARCH),riscv) + UNAME_M := riscv +endif LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/rbtree.c lib/sparsebit.c lib/test_util.c lib/guest_modes.c lib/perf_test_util.c LIBKVM_x86_64 = lib/x86_64/apic.c lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/svm.c lib/x86_64/ucall.c lib/x86_64/handlers.S LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c lib/aarch64/handlers.S lib/aarch64/spinlock.c lib/aarch64/gic.c lib/aarch64/gic_v3.c lib/aarch64/vgic.c LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c lib/s390x/diag318_test_handler.c +LIBKVM_riscv = lib/riscv/processor.c lib/riscv/ucall.c TEST_GEN_PROGS_x86_64 = x86_64/cr4_cpuid_sync_test TEST_GEN_PROGS_x86_64 += x86_64/get_msr_index_features @@ -119,6 +124,13 @@ TEST_GEN_PROGS_s390x += rseq_test TEST_GEN_PROGS_s390x += set_memory_region_test TEST_GEN_PROGS_s390x += kvm_binary_stats_test +TEST_GEN_PROGS_riscv += demand_paging_test +TEST_GEN_PROGS_riscv += dirty_log_test +TEST_GEN_PROGS_riscv += kvm_create_max_vcpus +TEST_GEN_PROGS_riscv += kvm_page_table_test +TEST_GEN_PROGS_riscv += set_memory_region_test +TEST_GEN_PROGS_riscv += kvm_binary_stats_test + TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M)) LIBKVM += $(LIBKVM_$(UNAME_M)) diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index 2d62edc49d67..da854f997aeb 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h @@ -69,6 +69,16 @@ enum vm_guest_mode { #define MIN_PAGE_SHIFT 12U #define ptes_per_page(page_size) ((page_size) / 16) +#elif defined(__riscv) + +#if __riscv_xlen == 32 +#error "RISC-V 32-bit kvm selftests not supported" +#endif + +#define VM_MODE_DEFAULT VM_MODE_P40V48_4K +#define MIN_PAGE_SHIFT 12U +#define ptes_per_page(page_size) ((page_size) / 8) + #endif #define MIN_PAGE_SIZE (1U << MIN_PAGE_SHIFT) diff --git a/tools/testing/selftests/kvm/include/riscv/processor.h b/tools/testing/selftests/kvm/include/riscv/processor.h new file mode 100644 index 000000000000..dc284c6bdbc3 --- /dev/null +++ b/tools/testing/selftests/kvm/include/riscv/processor.h @@ -0,0 +1,135 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * RISC-V processor specific defines + * + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + */ +#ifndef SELFTEST_KVM_PROCESSOR_H +#define SELFTEST_KVM_PROCESSOR_H + +#include "kvm_util.h" +#include + +static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t idx, + uint64_t size) +{ + return KVM_REG_RISCV | type | idx | size; +} + +#if __riscv_xlen == 64 +#define KVM_REG_SIZE_ULONG KVM_REG_SIZE_U64 +#else +#define KVM_REG_SIZE_ULONG KVM_REG_SIZE_U32 +#endif + +#define RISCV_CONFIG_REG(name) __kvm_reg_id(KVM_REG_RISCV_CONFIG, \ + KVM_REG_RISCV_CONFIG_REG(name), \ + KVM_REG_SIZE_ULONG) + +#define RISCV_CORE_REG(name) __kvm_reg_id(KVM_REG_RISCV_CORE, \ + KVM_REG_RISCV_CORE_REG(name), \ + KVM_REG_SIZE_ULONG) + +#define RISCV_CSR_REG(name) __kvm_reg_id(KVM_REG_RISCV_CSR, \ + KVM_REG_RISCV_CSR_REG(name), \ + KVM_REG_SIZE_ULONG) + +#define RISCV_TIMER_REG(name) __kvm_reg_id(KVM_REG_RISCV_TIMER, \ + KVM_REG_RISCV_TIMER_REG(name), \ + KVM_REG_SIZE_U64) + +static inline void get_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id, + unsigned long *addr) +{ + struct kvm_one_reg reg; + + reg.id = id; + reg.addr = (unsigned long)addr; + vcpu_get_reg(vm, vcpuid, ®); +} + +static inline void set_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id, + unsigned long val) +{ + struct kvm_one_reg reg; + + reg.id = id; + reg.addr = (unsigned long)&val; + vcpu_set_reg(vm, vcpuid, ®); +} + +/* L3 index Bit[47:39] */ +#define PGTBL_L3_INDEX_MASK 0x0000FF8000000000ULL +#define PGTBL_L3_INDEX_SHIFT 39 +#define PGTBL_L3_BLOCK_SHIFT 39 +#define PGTBL_L3_BLOCK_SIZE 0x0000008000000000ULL +#define PGTBL_L3_MAP_MASK (~(PGTBL_L3_BLOCK_SIZE - 1)) +/* L2 index Bit[38:30] */ +#define PGTBL_L2_INDEX_MASK 0x0000007FC0000000ULL +#define PGTBL_L2_INDEX_SHIFT 30 +#define PGTBL_L2_BLOCK_SHIFT 30 +#define PGTBL_L2_BLOCK_SIZE 0x0000000040000000ULL +#define PGTBL_L2_MAP_MASK (~(PGTBL_L2_BLOCK_SIZE - 1)) +/* L1 index Bit[29:21] */ +#define PGTBL_L1_INDEX_MASK 0x000000003FE00000ULL +#define PGTBL_L1_INDEX_SHIFT 21 +#define PGTBL_L1_BLOCK_SHIFT 21 +#define PGTBL_L1_BLOCK_SIZE 0x0000000000200000ULL +#define PGTBL_L1_MAP_MASK (~(PGTBL_L1_BLOCK_SIZE - 1)) +/* L0 index Bit[20:12] */ +#define PGTBL_L0_INDEX_MASK 0x00000000001FF000ULL +#define PGTBL_L0_INDEX_SHIFT 12 +#define PGTBL_L0_BLOCK_SHIFT 12 +#define PGTBL_L0_BLOCK_SIZE 0x0000000000001000ULL +#define PGTBL_L0_MAP_MASK (~(PGTBL_L0_BLOCK_SIZE - 1)) + +#define PGTBL_PTE_ADDR_MASK 0x003FFFFFFFFFFC00ULL +#define PGTBL_PTE_ADDR_SHIFT 10 +#define PGTBL_PTE_RSW_MASK 0x0000000000000300ULL +#define PGTBL_PTE_RSW_SHIFT 8 +#define PGTBL_PTE_DIRTY_MASK 0x0000000000000080ULL +#define PGTBL_PTE_DIRTY_SHIFT 7 +#define PGTBL_PTE_ACCESSED_MASK 0x0000000000000040ULL +#define PGTBL_PTE_ACCESSED_SHIFT 6 +#define PGTBL_PTE_GLOBAL_MASK 0x0000000000000020ULL +#define PGTBL_PTE_GLOBAL_SHIFT 5 +#define PGTBL_PTE_USER_MASK 0x0000000000000010ULL +#define PGTBL_PTE_USER_SHIFT 4 +#define PGTBL_PTE_EXECUTE_MASK 0x0000000000000008ULL +#define PGTBL_PTE_EXECUTE_SHIFT 3 +#define PGTBL_PTE_WRITE_MASK 0x0000000000000004ULL +#define PGTBL_PTE_WRITE_SHIFT 2 +#define PGTBL_PTE_READ_MASK 0x0000000000000002ULL +#define PGTBL_PTE_READ_SHIFT 1 +#define PGTBL_PTE_PERM_MASK (PGTBL_PTE_EXECUTE_MASK | \ + PGTBL_PTE_WRITE_MASK | \ + PGTBL_PTE_READ_MASK) +#define PGTBL_PTE_VALID_MASK 0x0000000000000001ULL +#define PGTBL_PTE_VALID_SHIFT 0 + +#define PGTBL_PAGE_SIZE PGTBL_L0_BLOCK_SIZE +#define PGTBL_PAGE_SIZE_SHIFT PGTBL_L0_BLOCK_SHIFT + +#define SATP_PPN _AC(0x00000FFFFFFFFFFF, UL) +#define SATP_MODE_39 _AC(0x8000000000000000, UL) +#define SATP_MODE_48 _AC(0x9000000000000000, UL) +#define SATP_ASID_BITS 16 +#define SATP_ASID_SHIFT 44 +#define SATP_ASID_MASK _AC(0xFFFF, UL) + +#define SBI_EXT_EXPERIMENTAL_START 0x08000000 +#define SBI_EXT_EXPERIMENTAL_END 0x08FFFFFF + +#define KVM_RISCV_SELFTESTS_SBI_EXT SBI_EXT_EXPERIMENTAL_END + +struct sbiret { + long error; + long value; +}; + +struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, + unsigned long arg1, unsigned long arg2, + unsigned long arg3, unsigned long arg4, + unsigned long arg5); + +#endif /* SELFTEST_KVM_PROCESSOR_H */ diff --git a/tools/testing/selftests/kvm/lib/guest_modes.c b/tools/testing/selftests/kvm/lib/guest_modes.c index c330f414ef96..57839a12e472 100644 --- a/tools/testing/selftests/kvm/lib/guest_modes.c +++ b/tools/testing/selftests/kvm/lib/guest_modes.c @@ -38,6 +38,16 @@ void guest_modes_append_default(void) guest_mode_append(VM_MODE_P47V64_4K, true, true); } #endif +#ifdef __riscv + { + unsigned int sz = kvm_check_cap(KVM_CAP_VM_GPA_BITS); + + if (sz >= 52) + guest_mode_append(VM_MODE_P52V48_4K, true, true); + if (sz >= 48) + guest_mode_append(VM_MODE_P48V48_4K, true, true); + } +#endif } void for_each_guest_mode(void (*func)(enum vm_guest_mode, void *), void *arg) diff --git a/tools/testing/selftests/kvm/lib/riscv/processor.c b/tools/testing/selftests/kvm/lib/riscv/processor.c new file mode 100644 index 000000000000..d377f2603d98 --- /dev/null +++ b/tools/testing/selftests/kvm/lib/riscv/processor.c @@ -0,0 +1,362 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * RISC-V code + * + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + */ + +#include +#include + +#include "kvm_util.h" +#include "../kvm_util_internal.h" +#include "processor.h" + +#define DEFAULT_RISCV_GUEST_STACK_VADDR_MIN 0xac0000 + +static uint64_t page_align(struct kvm_vm *vm, uint64_t v) +{ + return (v + vm->page_size) & ~(vm->page_size - 1); +} + +static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry) +{ + return ((entry & PGTBL_PTE_ADDR_MASK) >> PGTBL_PTE_ADDR_SHIFT) << + PGTBL_PAGE_SIZE_SHIFT; +} + +static uint64_t ptrs_per_pte(struct kvm_vm *vm) +{ + return PGTBL_PAGE_SIZE / sizeof(uint64_t); +} + +static uint64_t pte_index_mask[] = { + PGTBL_L0_INDEX_MASK, + PGTBL_L1_INDEX_MASK, + PGTBL_L2_INDEX_MASK, + PGTBL_L3_INDEX_MASK, +}; + +static uint32_t pte_index_shift[] = { + PGTBL_L0_INDEX_SHIFT, + PGTBL_L1_INDEX_SHIFT, + PGTBL_L2_INDEX_SHIFT, + PGTBL_L3_INDEX_SHIFT, +}; + +static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level) +{ + TEST_ASSERT(level > -1, + "Negative page table level (%d) not possible", level); + TEST_ASSERT(level < vm->pgtable_levels, + "Invalid page table level (%d)", level); + + return (gva & pte_index_mask[level]) >> pte_index_shift[level]; +} + +void virt_pgd_alloc(struct kvm_vm *vm) +{ + if (!vm->pgd_created) { + vm_paddr_t paddr = vm_phy_pages_alloc(vm, + page_align(vm, ptrs_per_pte(vm) * 8) / vm->page_size, + KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0); + vm->pgd = paddr; + vm->pgd_created = true; + } +} + +void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) +{ + uint64_t *ptep, next_ppn; + int level = vm->pgtable_levels - 1; + + TEST_ASSERT((vaddr % vm->page_size) == 0, + "Virtual address not on page boundary,\n" + " vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size); + TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, + (vaddr >> vm->page_shift)), + "Invalid virtual address, vaddr: 0x%lx", vaddr); + TEST_ASSERT((paddr % vm->page_size) == 0, + "Physical address not on page boundary,\n" + " paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size); + TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, + "Physical address beyond maximum supported,\n" + " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", + paddr, vm->max_gfn, vm->page_size); + + ptep = addr_gpa2hva(vm, vm->pgd) + pte_index(vm, vaddr, level) * 8; + if (!*ptep) { + next_ppn = vm_alloc_page_table(vm) >> PGTBL_PAGE_SIZE_SHIFT; + *ptep = (next_ppn << PGTBL_PTE_ADDR_SHIFT) | + PGTBL_PTE_VALID_MASK; + } + level--; + + while (level > -1) { + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + + pte_index(vm, vaddr, level) * 8; + if (!*ptep && level > 0) { + next_ppn = vm_alloc_page_table(vm) >> + PGTBL_PAGE_SIZE_SHIFT; + *ptep = (next_ppn << PGTBL_PTE_ADDR_SHIFT) | + PGTBL_PTE_VALID_MASK; + } + level--; + } + + paddr = paddr >> PGTBL_PAGE_SIZE_SHIFT; + *ptep = (paddr << PGTBL_PTE_ADDR_SHIFT) | + PGTBL_PTE_PERM_MASK | PGTBL_PTE_VALID_MASK; +} + +vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) +{ + uint64_t *ptep; + int level = vm->pgtable_levels - 1; + + if (!vm->pgd_created) + goto unmapped_gva; + + ptep = addr_gpa2hva(vm, vm->pgd) + pte_index(vm, gva, level) * 8; + if (!ptep) + goto unmapped_gva; + level--; + + while (level > -1) { + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + + pte_index(vm, gva, level) * 8; + if (!ptep) + goto unmapped_gva; + level--; + } + + return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1)); + +unmapped_gva: + TEST_FAIL("No mapping for vm virtual address gva: 0x%lx level: %d", + gva, level); + exit(1); +} + +static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, + uint64_t page, int level) +{ +#ifdef DEBUG + static const char *const type[] = { "pte", "pmd", "pud", "p4d"}; + uint64_t pte, *ptep; + + if (level < 0) + return; + + for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) { + ptep = addr_gpa2hva(vm, pte); + if (!*ptep) + continue; + fprintf(stream, "%*s%s: %lx: %lx at %p\n", indent, "", + type[level], pte, *ptep, ptep); + pte_dump(stream, vm, indent + 1, + pte_addr(vm, *ptep), level - 1); + } +#endif +} + +void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) +{ + int level = vm->pgtable_levels - 1; + uint64_t pgd, *ptep; + + if (!vm->pgd_created) + return; + + for (pgd = vm->pgd; pgd < vm->pgd + ptrs_per_pte(vm) * 8; pgd += 8) { + ptep = addr_gpa2hva(vm, pgd); + if (!*ptep) + continue; + fprintf(stream, "%*spgd: %lx: %lx at %p\n", indent, "", + pgd, *ptep, ptep); + pte_dump(stream, vm, indent + 1, + pte_addr(vm, *ptep), level - 1); + } +} + +void riscv_vcpu_mmu_setup(struct kvm_vm *vm, int vcpuid) +{ + unsigned long satp; + + /* + * The RISC-V Sv48 MMU mode supports 56-bit physical address + * for 48-bit virtual address with 4KB last level page size. + */ + switch (vm->mode) { + case VM_MODE_P52V48_4K: + case VM_MODE_P48V48_4K: + case VM_MODE_P40V48_4K: + break; + default: + TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode); + } + + satp = (vm->pgd >> PGTBL_PAGE_SIZE_SHIFT) & SATP_PPN; + satp |= SATP_MODE_48; + + set_reg(vm, vcpuid, RISCV_CSR_REG(satp), satp); +} + +void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent) +{ + struct kvm_riscv_core core; + + get_reg(vm, vcpuid, RISCV_CORE_REG(mode), &core.mode); + get_reg(vm, vcpuid, RISCV_CORE_REG(regs.pc), &core.regs.pc); + get_reg(vm, vcpuid, RISCV_CORE_REG(regs.ra), &core.regs.ra); + get_reg(vm, vcpuid, RISCV_CORE_REG(regs.sp), &core.regs.sp); + get_reg(vm, vcpuid, RISCV_CORE_REG(regs.gp), &core.regs.gp); + get_reg(vm, vcpuid, RISCV_CORE_REG(regs.tp), &core.regs.tp); + get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t0), &core.regs.t0); + get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t1), &core.regs.t1); + get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t2), &core.regs.t2); + get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s0), &core.regs.s0); + get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s1), &core.regs.s1); + get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a0), &core.regs.a0); + get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a1), &core.regs.a1); + get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a2), &core.regs.a2); + get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a3), &core.regs.a3); + get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a4), &core.regs.a4); + get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a5), &core.regs.a5); + get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a6), &core.regs.a6); + get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a7), &core.regs.a7); + get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s2), &core.regs.s2); + get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s3), &core.regs.s3); + get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s4), &core.regs.s4); + get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s5), &core.regs.s5); + get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s6), &core.regs.s6); + get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s7), &core.regs.s7); + get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s8), &core.regs.s8); + get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s9), &core.regs.s9); + get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s10), &core.regs.s10); + get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s11), &core.regs.s11); + get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t3), &core.regs.t3); + get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t4), &core.regs.t4); + get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t5), &core.regs.t5); + get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t6), &core.regs.t6); + + fprintf(stream, + " MODE: 0x%lx\n", core.mode); + fprintf(stream, + " PC: 0x%016lx RA: 0x%016lx SP: 0x%016lx GP: 0x%016lx\n", + core.regs.pc, core.regs.ra, core.regs.sp, core.regs.gp); + fprintf(stream, + " TP: 0x%016lx T0: 0x%016lx T1: 0x%016lx T2: 0x%016lx\n", + core.regs.tp, core.regs.t0, core.regs.t1, core.regs.t2); + fprintf(stream, + " S0: 0x%016lx S1: 0x%016lx A0: 0x%016lx A1: 0x%016lx\n", + core.regs.s0, core.regs.s1, core.regs.a0, core.regs.a1); + fprintf(stream, + " A2: 0x%016lx A3: 0x%016lx A4: 0x%016lx A5: 0x%016lx\n", + core.regs.a2, core.regs.a3, core.regs.a4, core.regs.a5); + fprintf(stream, + " A6: 0x%016lx A7: 0x%016lx S2: 0x%016lx S3: 0x%016lx\n", + core.regs.a6, core.regs.a7, core.regs.s2, core.regs.s3); + fprintf(stream, + " S4: 0x%016lx S5: 0x%016lx S6: 0x%016lx S7: 0x%016lx\n", + core.regs.s4, core.regs.s5, core.regs.s6, core.regs.s7); + fprintf(stream, + " S8: 0x%016lx S9: 0x%016lx S10: 0x%016lx S11: 0x%016lx\n", + core.regs.s8, core.regs.s9, core.regs.s10, core.regs.s11); + fprintf(stream, + " T3: 0x%016lx T4: 0x%016lx T5: 0x%016lx T6: 0x%016lx\n", + core.regs.t3, core.regs.t4, core.regs.t5, core.regs.t6); +} + +static void guest_hang(void) +{ + while (1) + ; +} + +void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code) +{ + int r; + size_t stack_size = vm->page_size == 4096 ? + DEFAULT_STACK_PGS * vm->page_size : + vm->page_size; + unsigned long stack_vaddr = vm_vaddr_alloc(vm, stack_size, + DEFAULT_RISCV_GUEST_STACK_VADDR_MIN); + unsigned long current_gp = 0; + struct kvm_mp_state mps; + + vm_vcpu_add(vm, vcpuid); + riscv_vcpu_mmu_setup(vm, vcpuid); + + /* + * With SBI HSM support in KVM RISC-V, all secondary VCPUs are + * powered-off by default so we ensure that all secondary VCPUs + * are powered-on using KVM_SET_MP_STATE ioctl(). + */ + mps.mp_state = KVM_MP_STATE_RUNNABLE; + r = _vcpu_ioctl(vm, vcpuid, KVM_SET_MP_STATE, &mps); + TEST_ASSERT(!r, "IOCTL KVM_SET_MP_STATE failed (error %d)", r); + + /* Setup global pointer of guest to be same as the host */ + asm volatile ( + "add %0, gp, zero" : "=r" (current_gp) : : "memory"); + set_reg(vm, vcpuid, RISCV_CORE_REG(regs.gp), current_gp); + + /* Setup stack pointer and program counter of guest */ + set_reg(vm, vcpuid, RISCV_CORE_REG(regs.sp), + stack_vaddr + stack_size); + set_reg(vm, vcpuid, RISCV_CORE_REG(regs.pc), + (unsigned long)guest_code); + + /* Setup default exception vector of guest */ + set_reg(vm, vcpuid, RISCV_CSR_REG(stvec), + (unsigned long)guest_hang); +} + +void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...) +{ + va_list ap; + uint64_t id = RISCV_CORE_REG(regs.a0); + int i; + + TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n" + " num: %u\n", num); + + va_start(ap, num); + + for (i = 0; i < num; i++) { + switch (i) { + case 0: + id = RISCV_CORE_REG(regs.a0); + break; + case 1: + id = RISCV_CORE_REG(regs.a1); + break; + case 2: + id = RISCV_CORE_REG(regs.a2); + break; + case 3: + id = RISCV_CORE_REG(regs.a3); + break; + case 4: + id = RISCV_CORE_REG(regs.a4); + break; + case 5: + id = RISCV_CORE_REG(regs.a5); + break; + case 6: + id = RISCV_CORE_REG(regs.a6); + break; + case 7: + id = RISCV_CORE_REG(regs.a7); + break; + }; + set_reg(vm, vcpuid, id, va_arg(ap, uint64_t)); + } + + va_end(ap); +} + +void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid) +{ +} diff --git a/tools/testing/selftests/kvm/lib/riscv/ucall.c b/tools/testing/selftests/kvm/lib/riscv/ucall.c new file mode 100644 index 000000000000..9e42d8248fa6 --- /dev/null +++ b/tools/testing/selftests/kvm/lib/riscv/ucall.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ucall support. A ucall is a "hypercall to userspace". + * + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + */ + +#include + +#include "kvm_util.h" +#include "../kvm_util_internal.h" +#include "processor.h" + +void ucall_init(struct kvm_vm *vm, void *arg) +{ +} + +void ucall_uninit(struct kvm_vm *vm) +{ +} + +struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, + unsigned long arg1, unsigned long arg2, + unsigned long arg3, unsigned long arg4, + unsigned long arg5) +{ + register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0); + register uintptr_t a1 asm ("a1") = (uintptr_t)(arg1); + register uintptr_t a2 asm ("a2") = (uintptr_t)(arg2); + register uintptr_t a3 asm ("a3") = (uintptr_t)(arg3); + register uintptr_t a4 asm ("a4") = (uintptr_t)(arg4); + register uintptr_t a5 asm ("a5") = (uintptr_t)(arg5); + register uintptr_t a6 asm ("a6") = (uintptr_t)(fid); + register uintptr_t a7 asm ("a7") = (uintptr_t)(ext); + struct sbiret ret; + + asm volatile ( + "ecall" + : "+r" (a0), "+r" (a1) + : "r" (a2), "r" (a3), "r" (a4), "r" (a5), "r" (a6), "r" (a7) + : "memory"); + ret.error = a0; + ret.value = a1; + + return ret; +} + +void ucall(uint64_t cmd, int nargs, ...) +{ + struct ucall uc = { + .cmd = cmd, + }; + va_list va; + int i; + + nargs = nargs <= UCALL_MAX_ARGS ? nargs : UCALL_MAX_ARGS; + + va_start(va, nargs); + for (i = 0; i < nargs; ++i) + uc.args[i] = va_arg(va, uint64_t); + va_end(va); + + sbi_ecall(KVM_RISCV_SELFTESTS_SBI_EXT, 0, (vm_vaddr_t)&uc, + 0, 0, 0, 0, 0); +} + +uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc) +{ + struct kvm_run *run = vcpu_state(vm, vcpu_id); + struct ucall ucall = {}; + + if (uc) + memset(uc, 0, sizeof(*uc)); + + if (run->exit_reason == KVM_EXIT_RISCV_SBI && + run->riscv_sbi.extension_id == KVM_RISCV_SELFTESTS_SBI_EXT && + run->riscv_sbi.function_id == 0) { + memcpy(&ucall, addr_gva2hva(vm, run->riscv_sbi.args[0]), + sizeof(ucall)); + + vcpu_run_complete_io(vm, vcpu_id); + if (uc) + memcpy(uc, &ucall, sizeof(ucall)); + } + + return ucall.cmd; +} From 33e5b5746cc2336660c8710ba109d9a3923627b5 Mon Sep 17 00:00:00 2001 From: Vincent Chen Date: Mon, 27 Dec 2021 11:05:14 +0800 Subject: [PATCH 218/268] KVM: RISC-V: Avoid spurious virtual interrupts after clearing hideleg CSR When the last VM is terminated, the host kernel will invoke function hardware_disable_nolock() on each CPU to disable the related virtualization functions. Here, RISC-V currently only clears hideleg CSR and hedeleg CSR. This behavior will cause the host kernel to receive spurious interrupts if hvip CSR has pending interrupts and the corresponding enable bits in vsie CSR are asserted. To avoid it, hvip CSR and vsie CSR must be cleared before clearing hideleg CSR. Fixes: 99cdc6c18c2d ("RISC-V: Add initial skeletal KVM support") Signed-off-by: Vincent Chen Reviewed-by: Anup Patel Signed-off-by: Anup Patel --- arch/riscv/kvm/main.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arch/riscv/kvm/main.c b/arch/riscv/kvm/main.c index 421ecf4e6360..2e5ca43c8c49 100644 --- a/arch/riscv/kvm/main.c +++ b/arch/riscv/kvm/main.c @@ -58,6 +58,14 @@ int kvm_arch_hardware_enable(void) void kvm_arch_hardware_disable(void) { + /* + * After clearing the hideleg CSR, the host kernel will receive + * spurious interrupts if hvip CSR has pending interrupts and the + * corresponding enable bits in vsie CSR are asserted. To avoid it, + * hvip CSR and vsie CSR must be cleared before clearing hideleg CSR. + */ + csr_write(CSR_VSIE, 0); + csr_write(CSR_HVIP, 0); csr_write(CSR_HEDELEG, 0); csr_write(CSR_HIDELEG, 0); } From 497685f2c743f552ec5626d60fc12e7c00faaf06 Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Mon, 3 Jan 2022 18:54:58 +0530 Subject: [PATCH 219/268] MAINTAINERS: Update Anup's email address I am no longer work at Western Digital so update my email address to personal one and add entries to .mailmap as well. Signed-off-by: Anup Patel Acked-by: Atish Patra --- .mailmap | 1 + MAINTAINERS | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.mailmap b/.mailmap index 29eedc1fab89..c7a7cfc6d2fe 100644 --- a/.mailmap +++ b/.mailmap @@ -46,6 +46,7 @@ Andy Adamson Antoine Tenart Antoine Tenart Antonio Ospite +Anup Patel Archit Taneja Ard Biesheuvel Arnaud Patard diff --git a/MAINTAINERS b/MAINTAINERS index d8972ed1f8d9..14efdaa0616f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -10444,7 +10444,7 @@ F: arch/powerpc/kernel/kvm* F: arch/powerpc/kvm/ KERNEL VIRTUAL MACHINE FOR RISC-V (KVM/riscv) -M: Anup Patel +M: Anup Patel R: Atish Patra L: kvm@vger.kernel.org L: kvm-riscv@lists.infradead.org From 4c66b56781eb114d478d8488e614a4f832a44a2c Mon Sep 17 00:00:00 2001 From: Peter Gonda Date: Wed, 8 Dec 2021 11:16:40 -0800 Subject: [PATCH 220/268] selftests: KVM: sev_migrate_tests: Fix test_sev_mirror() Mirrors should not be able to call LAUNCH_START. Remove the call on the mirror to correct the test before fixing sev_ioctl() to correctly assert on this failed ioctl. Cc: Paolo Bonzini Cc: Sean Christopherson Cc: Marc Orr Signed-off-by: Peter Gonda Message-Id: <20211208191642.3792819-2-pgonda@google.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c b/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c index 29b18d565cf4..fbc742b42145 100644 --- a/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c +++ b/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c @@ -228,9 +228,6 @@ static void sev_mirror_create(int dst_fd, int src_fd) static void test_sev_mirror(bool es) { struct kvm_vm *src_vm, *dst_vm; - struct kvm_sev_launch_start start = { - .policy = es ? SEV_POLICY_ES : 0 - }; int i; src_vm = sev_vm_create(es); @@ -241,7 +238,7 @@ static void test_sev_mirror(bool es) /* Check that we can complete creation of the mirror VM. */ for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i) vm_vcpu_add(dst_vm, i); - sev_ioctl(dst_vm->fd, KVM_SEV_LAUNCH_START, &start); + if (es) sev_ioctl(dst_vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL); From 427d046a41bbad295552fa0a7496e15d98eae389 Mon Sep 17 00:00:00 2001 From: Peter Gonda Date: Wed, 8 Dec 2021 11:16:41 -0800 Subject: [PATCH 221/268] selftests: KVM: sev_migrate_tests: Fix sev_ioctl() TEST_ASSERT in SEV ioctl was allowing errors because it checked return value was good OR the FW error code was OK. This TEST_ASSERT should require both (aka. AND) values are OK. Removes the LAUNCH_START from the mirror VM because this call correctly fails because mirror VMs cannot call this command. Currently issues with the PSP driver functions mean the firmware error is not always reset to SEV_RET_SUCCESS when a call is successful. Mainly sev_platform_init() doesn't correctly set the fw error if the platform has already been initialized. Cc: Paolo Bonzini Cc: Sean Christopherson Cc: Marc Orr Signed-off-by: Peter Gonda Message-Id: <20211208191642.3792819-3-pgonda@google.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c b/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c index fbc742b42145..4bb960ca6486 100644 --- a/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c +++ b/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c @@ -30,8 +30,9 @@ static void sev_ioctl(int vm_fd, int cmd_id, void *data) }; int ret; + ret = ioctl(vm_fd, KVM_MEMORY_ENCRYPT_OP, &cmd); - TEST_ASSERT((ret == 0 || cmd.error == SEV_RET_SUCCESS), + TEST_ASSERT(ret == 0 && cmd.error == SEV_RET_SUCCESS, "%d failed: return code: %d, errno: %d, fw error: %d", cmd_id, ret, errno, cmd.error); } From a6fec53947cf48e14cc41e2a69dd7d2aa7a00e65 Mon Sep 17 00:00:00 2001 From: Peter Gonda Date: Wed, 8 Dec 2021 11:16:42 -0800 Subject: [PATCH 222/268] selftests: KVM: sev_migrate_tests: Add mirror command tests Add tests to confirm mirror vms can only run correct subset of commands. Cc: Paolo Bonzini Cc: Sean Christopherson Cc: Marc Orr Signed-off-by: Peter Gonda Message-Id: <20211208191642.3792819-4-pgonda@google.com> Signed-off-by: Paolo Bonzini --- .../selftests/kvm/x86_64/sev_migrate_tests.c | 55 +++++++++++++++++-- 1 file changed, 51 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c b/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c index 4bb960ca6486..80056bbbb003 100644 --- a/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c +++ b/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c @@ -21,7 +21,7 @@ #define NR_LOCK_TESTING_THREADS 3 #define NR_LOCK_TESTING_ITERATIONS 10000 -static void sev_ioctl(int vm_fd, int cmd_id, void *data) +static int __sev_ioctl(int vm_fd, int cmd_id, void *data, __u32 *fw_error) { struct kvm_sev_cmd cmd = { .id = cmd_id, @@ -30,11 +30,20 @@ static void sev_ioctl(int vm_fd, int cmd_id, void *data) }; int ret; - ret = ioctl(vm_fd, KVM_MEMORY_ENCRYPT_OP, &cmd); - TEST_ASSERT(ret == 0 && cmd.error == SEV_RET_SUCCESS, + *fw_error = cmd.error; + return ret; +} + +static void sev_ioctl(int vm_fd, int cmd_id, void *data) +{ + int ret; + __u32 fw_error; + + ret = __sev_ioctl(vm_fd, cmd_id, data, &fw_error); + TEST_ASSERT(ret == 0 && fw_error == SEV_RET_SUCCESS, "%d failed: return code: %d, errno: %d, fw error: %d", - cmd_id, ret, errno, cmd.error); + cmd_id, ret, errno, fw_error); } static struct kvm_vm *sev_vm_create(bool es) @@ -226,6 +235,42 @@ static void sev_mirror_create(int dst_fd, int src_fd) TEST_ASSERT(!ret, "Copying context failed, ret: %d, errno: %d\n", ret, errno); } +static void verify_mirror_allowed_cmds(int vm_fd) +{ + struct kvm_sev_guest_status status; + + for (int cmd_id = KVM_SEV_INIT; cmd_id < KVM_SEV_NR_MAX; ++cmd_id) { + int ret; + __u32 fw_error; + + /* + * These commands are allowed for mirror VMs, all others are + * not. + */ + switch (cmd_id) { + case KVM_SEV_LAUNCH_UPDATE_VMSA: + case KVM_SEV_GUEST_STATUS: + case KVM_SEV_DBG_DECRYPT: + case KVM_SEV_DBG_ENCRYPT: + continue; + default: + break; + } + + /* + * These commands should be disallowed before the data + * parameter is examined so NULL is OK here. + */ + ret = __sev_ioctl(vm_fd, cmd_id, NULL, &fw_error); + TEST_ASSERT( + ret == -1 && errno == EINVAL, + "Should not be able call command: %d. ret: %d, errno: %d\n", + cmd_id, ret, errno); + } + + sev_ioctl(vm_fd, KVM_SEV_GUEST_STATUS, &status); +} + static void test_sev_mirror(bool es) { struct kvm_vm *src_vm, *dst_vm; @@ -243,6 +288,8 @@ static void test_sev_mirror(bool es) if (es) sev_ioctl(dst_vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL); + verify_mirror_allowed_cmds(dst_vm->fd); + kvm_vm_free(src_vm); kvm_vm_free(dst_vm); } From 46cbc0400f85987954f6e2c110409f8f60725232 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 10 Dec 2021 18:13:37 -0500 Subject: [PATCH 223/268] Revert "KVM: X86: Update mmu->pdptrs only when it is changed" This reverts commit 24cd19a28cb7174df502162641d6e1e12e7ffbd9. Sean Christopherson reports: "Commit 24cd19a28cb7 ('KVM: X86: Update mmu->pdptrs only when it is changed') breaks nested VMs with EPT in L0 and PAE shadow paging in L2. Reproducing is trivial, just disable EPT in L1 and run a VM. I haven't investigating how it breaks things." Reviewed-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 8912f44c640d..074a0578979f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -841,12 +841,9 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) } } - kvm_register_mark_available(vcpu, VCPU_EXREG_PDPTR); - if (memcmp(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs))) { - memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); - kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); - kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu); - } + memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); + kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); + kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu); vcpu->arch.pdptrs_from_userspace = false; return 1; From a9f2705ec84449e3b8d70c804766f8e97e23080d Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 16 Dec 2021 10:19:36 +0800 Subject: [PATCH 224/268] KVM: VMX: Save HOST_CR3 in vmx_set_host_fs_gs() The host CR3 in the vcpu thread can only be changed when scheduling, so commit 15ad9762d69f ("KVM: VMX: Save HOST_CR3 in vmx_prepare_switch_to_guest()") changed vmx.c to only save it in vmx_prepare_switch_to_guest(). However, it also has to be synced in vmx_sync_vmcs_host_state() when switching VMCS. vmx_set_host_fs_gs() is called in both places, so rename it to vmx_set_vmcs_host_state() and make it update HOST_CR3. Fixes: 15ad9762d69f ("KVM: VMX: Save HOST_CR3 in vmx_prepare_switch_to_guest()") Signed-off-by: Lai Jiangshan Message-Id: <20211216021938.11752-2-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/nested.c | 3 ++- arch/x86/kvm/vmx/vmx.c | 20 +++++++++----------- arch/x86/kvm/vmx/vmx.h | 5 +++-- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 2f6f465e575f..d07a7fa75783 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -245,7 +245,8 @@ static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx, src = &prev->host_state; dest = &vmx->loaded_vmcs->host_state; - vmx_set_host_fs_gs(dest, src->fs_sel, src->gs_sel, src->fs_base, src->gs_base); + vmx_set_vmcs_host_state(dest, src->cr3, src->fs_sel, src->gs_sel, + src->fs_base, src->gs_base); dest->ldt_sel = src->ldt_sel; #ifdef CONFIG_X86_64 dest->ds_sel = src->ds_sel; diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 9bf65e5e4840..ebf511f83903 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1069,9 +1069,14 @@ static void pt_guest_exit(struct vcpu_vmx *vmx) wrmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); } -void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel, - unsigned long fs_base, unsigned long gs_base) +void vmx_set_vmcs_host_state(struct vmcs_host_state *host, unsigned long cr3, + u16 fs_sel, u16 gs_sel, + unsigned long fs_base, unsigned long gs_base) { + if (unlikely(cr3 != host->cr3)) { + vmcs_writel(HOST_CR3, cr3); + host->cr3 = cr3; + } if (unlikely(fs_sel != host->fs_sel)) { if (!(fs_sel & 7)) vmcs_write16(HOST_FS_SELECTOR, fs_sel); @@ -1103,7 +1108,6 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) #ifdef CONFIG_X86_64 int cpu = raw_smp_processor_id(); #endif - unsigned long cr3; unsigned long fs_base, gs_base; u16 fs_sel, gs_sel; int i; @@ -1167,14 +1171,8 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) gs_base = segment_base(gs_sel); #endif - vmx_set_host_fs_gs(host_state, fs_sel, gs_sel, fs_base, gs_base); - - /* Host CR3 including its PCID is stable when guest state is loaded. */ - cr3 = __get_current_cr3_fast(); - if (unlikely(cr3 != host_state->cr3)) { - vmcs_writel(HOST_CR3, cr3); - host_state->cr3 = cr3; - } + vmx_set_vmcs_host_state(host_state, __get_current_cr3_fast(), + fs_sel, gs_sel, fs_base, gs_base); vmx->guest_state_loaded = true; } diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 6c2c1aff1c3d..18111368cf85 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -371,8 +371,9 @@ int allocate_vpid(void); void free_vpid(int vpid); void vmx_set_constant_host_state(struct vcpu_vmx *vmx); void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu); -void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel, - unsigned long fs_base, unsigned long gs_base); +void vmx_set_vmcs_host_state(struct vmcs_host_state *host, unsigned long cr3, + u16 fs_sel, u16 gs_sel, + unsigned long fs_base, unsigned long gs_base); int vmx_get_cpl(struct kvm_vcpu *vcpu); bool vmx_emulation_required(struct kvm_vcpu *vcpu); unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu); From 6b123c3a89a90ac6418e4d64b1e23f09d458a77d Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 16 Dec 2021 10:19:37 +0800 Subject: [PATCH 225/268] KVM: x86/mmu: Reconstruct shadow page root if the guest PDPTEs is changed For shadow paging, the page table needs to be reconstructed before the coming VMENTER if the guest PDPTEs is changed. But not all paths that call load_pdptrs() will cause the page tables to be reconstructed. Normally, kvm_mmu_reset_context() and kvm_mmu_free_roots() are used to launch later reconstruction. The commit d81135a57aa6("KVM: x86: do not reset mmu if CR0.CD and CR0.NW are changed") skips kvm_mmu_reset_context() after load_pdptrs() when changing CR0.CD and CR0.NW. The commit 21823fbda552("KVM: x86: Invalidate all PGDs for the current PCID on MOV CR3 w/ flush") skips kvm_mmu_free_roots() after load_pdptrs() when rewriting the CR3 with the same value. The commit a91a7c709600("KVM: X86: Don't reset mmu context when toggling X86_CR4_PGE") skips kvm_mmu_reset_context() after load_pdptrs() when changing CR4.PGE. Guests like linux would keep the PDPTEs unchanged for every instance of pagetable, so this missing reconstruction has no problem for linux guests. Fixes: d81135a57aa6("KVM: x86: do not reset mmu if CR0.CD and CR0.NW are changed") Fixes: 21823fbda552("KVM: x86: Invalidate all PGDs for the current PCID on MOV CR3 w/ flush") Fixes: a91a7c709600("KVM: X86: Don't reset mmu context when toggling X86_CR4_PGE") Suggested-by: Sean Christopherson Signed-off-by: Lai Jiangshan Message-Id: <20211216021938.11752-3-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 074a0578979f..9ddad9493cb8 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -841,6 +841,13 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) } } + /* + * Marking VCPU_EXREG_PDPTR dirty doesn't work for !tdp_enabled. + * Shadow page roots need to be reconstructed instead. + */ + if (!tdp_enabled && memcmp(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs))) + kvm_mmu_free_roots(vcpu, mmu, KVM_MMU_ROOT_CURRENT); + memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu); From 5b61178cd2fd67890a70ae9febbd4df20bbd8c40 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 16 Dec 2021 10:19:38 +0800 Subject: [PATCH 226/268] KVM: VMX: Mark VCPU_EXREG_CR3 dirty when !CR0_PG -> CR0_PG if EPT + !URG When !CR0_PG -> CR0_PG, vcpu->arch.cr3 becomes active, but GUEST_CR3 is still vmx->ept_identity_map_addr if EPT + !URG. So VCPU_EXREG_CR3 is considered to be dirty and GUEST_CR3 needs to be updated in this case. Reported-by: Maxim Levitsky Suggested-by: Sean Christopherson Signed-off-by: Lai Jiangshan Message-Id: <20211216021938.11752-4-jiangshanlai@gmail.com> Fixes: c62c7bd4f95b ("KVM: VMX: Update vmcs.GUEST_CR3 only when the guest CR3 is dirty") Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index ebf511f83903..1d53b8144f83 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -3071,6 +3071,13 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) /* Note, vmx_set_cr4() consumes the new vcpu->arch.cr0. */ if ((old_cr0_pg ^ cr0) & X86_CR0_PG) vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); + + /* + * When !CR0_PG -> CR0_PG, vcpu->arch.cr3 becomes active, but + * GUEST_CR3 is still vmx->ept_identity_map_addr if EPT + !URG. + */ + if (!(old_cr0_pg & X86_CR0_PG) && (cr0 & X86_CR0_PG)) + kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); } /* depends on vcpu->arch.cr0 to be set to a new value */ From 006a0f0607e1504950dd8fa3b6ca8e438ec6c9d2 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Thu, 9 Dec 2021 14:10:04 -0500 Subject: [PATCH 227/268] KVM: x86: avoid out of bounds indices for fixed performance counters Because IceLake has 4 fixed performance counters but KVM only supports 3, it is possible for reprogram_fixed_counters to pass to reprogram_fixed_counter an index that is out of bounds for the fixed_pmc_events array. Ultimately intel_find_fixed_event, which is the only place that uses fixed_pmc_events, handles this correctly because it checks against the size of fixed_pmc_events anyway. Every other place operates on the fixed_counters[] array which is sized according to INTEL_PMC_MAX_FIXED. However, it is cleaner if the unsupported performance counters are culled early on in reprogram_fixed_counters. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/pmu_intel.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 1b7456b2177b..d33e9799276e 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -91,7 +91,7 @@ static unsigned intel_find_fixed_event(int idx) u32 event; size_t size = ARRAY_SIZE(fixed_pmc_events); - if (idx >= size) + if (WARN_ON_ONCE(idx >= size)) return PERF_COUNT_HW_MAX; event = fixed_pmc_events[array_index_nospec(idx, size)]; @@ -500,8 +500,9 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) pmu->nr_arch_fixed_counters = 0; } else { pmu->nr_arch_fixed_counters = - min_t(int, edx.split.num_counters_fixed, - x86_pmu.num_counters_fixed); + min3(ARRAY_SIZE(fixed_pmc_events), + (size_t) edx.split.num_counters_fixed, + (size_t) x86_pmu.num_counters_fixed); edx.split.bit_width_fixed = min_t(int, edx.split.bit_width_fixed, x86_pmu.bit_width_fixed); pmu->counter_bitmask[KVM_PMC_FIXED] = From 761875634a5e2c3fed36c439fc4acac6f85a96eb Mon Sep 17 00:00:00 2001 From: Like Xu Date: Tue, 30 Nov 2021 15:42:16 +0800 Subject: [PATCH 228/268] KVM: x86/pmu: Setup pmc->eventsel for fixed PMCs The current pmc->eventsel for fixed counter is underutilised. The pmc->eventsel can be setup for all known available fixed counters since we have mapping between fixed pmc index and the intel_arch_events array. Either gp or fixed counter, it will simplify the later checks for consistency between eventsel and perf_hw_id. Signed-off-by: Like Xu Message-Id: <20211130074221.93635-2-likexu@tencent.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/pmu_intel.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index d33e9799276e..965fefbad56d 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -459,6 +459,21 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 1; } +static void setup_fixed_pmc_eventsel(struct kvm_pmu *pmu) +{ + size_t size = ARRAY_SIZE(fixed_pmc_events); + struct kvm_pmc *pmc; + u32 event; + int i; + + for (i = 0; i < pmu->nr_arch_fixed_counters; i++) { + pmc = &pmu->fixed_counters[i]; + event = fixed_pmc_events[array_index_nospec(i, size)]; + pmc->eventsel = (intel_arch_events[event].unit_mask << 8) | + intel_arch_events[event].eventsel; + } +} + static void intel_pmu_refresh(struct kvm_vcpu *vcpu) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); @@ -507,6 +522,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) edx.split.bit_width_fixed, x86_pmu.bit_width_fixed); pmu->counter_bitmask[KVM_PMC_FIXED] = ((u64)1 << edx.split.bit_width_fixed) - 1; + setup_fixed_pmc_eventsel(pmu); } pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) | From 7c174f305cbee6bdba5018aae02b84369e7ab995 Mon Sep 17 00:00:00 2001 From: Like Xu Date: Tue, 30 Nov 2021 15:42:17 +0800 Subject: [PATCH 229/268] KVM: x86/pmu: Refactoring find_arch_event() to pmc_perf_hw_id() The find_arch_event() returns a "unsigned int" value, which is used by the pmc_reprogram_counter() to program a PERF_TYPE_HARDWARE type perf_event. The returned value is actually the kernel defined generic perf_hw_id, let's rename it to pmc_perf_hw_id() with simpler incoming parameters for better self-explanation. Signed-off-by: Like Xu Message-Id: <20211130074221.93635-3-likexu@tencent.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/pmu.c | 8 +------- arch/x86/kvm/pmu.h | 3 +-- arch/x86/kvm/svm/pmu.c | 8 ++++---- arch/x86/kvm/vmx/pmu_intel.c | 9 +++++---- 4 files changed, 11 insertions(+), 17 deletions(-) diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 09873f6488f7..3b3ccf5b1106 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -174,7 +174,6 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc) void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) { unsigned config, type = PERF_TYPE_RAW; - u8 event_select, unit_mask; struct kvm *kvm = pmc->vcpu->kvm; struct kvm_pmu_event_filter *filter; int i; @@ -206,17 +205,12 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) if (!allow_event) return; - event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT; - unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8; - if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE | ARCH_PERFMON_EVENTSEL_INV | ARCH_PERFMON_EVENTSEL_CMASK | HSW_IN_TX | HSW_IN_TX_CHECKPOINTED))) { - config = kvm_x86_ops.pmu_ops->find_arch_event(pmc_to_pmu(pmc), - event_select, - unit_mask); + config = kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc); if (config != PERF_COUNT_HW_MAX) type = PERF_TYPE_HARDWARE; } diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index 59d6b76203d5..dd7dbb1c5048 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -24,8 +24,7 @@ struct kvm_event_hw_type_mapping { }; struct kvm_pmu_ops { - unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select, - u8 unit_mask); + unsigned int (*pmc_perf_hw_id)(struct kvm_pmc *pmc); unsigned (*find_fixed_event)(int idx); bool (*pmc_is_enabled)(struct kvm_pmc *pmc); struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx); diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c index 0cf05e4caa4c..fb0ce8cda8a7 100644 --- a/arch/x86/kvm/svm/pmu.c +++ b/arch/x86/kvm/svm/pmu.c @@ -138,10 +138,10 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr, return &pmu->gp_counters[msr_to_index(msr)]; } -static unsigned amd_find_arch_event(struct kvm_pmu *pmu, - u8 event_select, - u8 unit_mask) +static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc) { + u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT; + u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8; int i; for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++) @@ -323,7 +323,7 @@ static void amd_pmu_reset(struct kvm_vcpu *vcpu) } struct kvm_pmu_ops amd_pmu_ops = { - .find_arch_event = amd_find_arch_event, + .pmc_perf_hw_id = amd_pmc_perf_hw_id, .find_fixed_event = amd_find_fixed_event, .pmc_is_enabled = amd_pmc_is_enabled, .pmc_idx_to_pmc = amd_pmc_idx_to_pmc, diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 965fefbad56d..7b530de7ae23 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -68,10 +68,11 @@ static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data) reprogram_counter(pmu, bit); } -static unsigned intel_find_arch_event(struct kvm_pmu *pmu, - u8 event_select, - u8 unit_mask) +static unsigned int intel_pmc_perf_hw_id(struct kvm_pmc *pmc) { + struct kvm_pmu *pmu = pmc_to_pmu(pmc); + u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT; + u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8; int i; for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++) @@ -720,7 +721,7 @@ static void intel_pmu_cleanup(struct kvm_vcpu *vcpu) } struct kvm_pmu_ops intel_pmu_ops = { - .find_arch_event = intel_find_arch_event, + .pmc_perf_hw_id = intel_pmc_perf_hw_id, .find_fixed_event = intel_find_fixed_event, .pmc_is_enabled = intel_pmc_is_enabled, .pmc_idx_to_pmc = intel_pmc_idx_to_pmc, From 6ed1298eb0bf6641b0a66c2c38369f5767a2575c Mon Sep 17 00:00:00 2001 From: Like Xu Date: Tue, 30 Nov 2021 15:42:18 +0800 Subject: [PATCH 230/268] KVM: x86/pmu: Reuse pmc_perf_hw_id() and drop find_fixed_event() Since we set the same semantic event value for the fixed counter in pmc->eventsel, returning the perf_hw_id for the fixed counter via find_fixed_event() can be painlessly replaced by pmc_perf_hw_id() with the help of pmc_is_fixed() check. Signed-off-by: Like Xu Message-Id: <20211130074221.93635-4-likexu@tencent.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/pmu.c | 2 +- arch/x86/kvm/pmu.h | 1 - arch/x86/kvm/svm/pmu.c | 11 ++++------- arch/x86/kvm/vmx/pmu_intel.c | 19 +++---------------- 4 files changed, 8 insertions(+), 25 deletions(-) diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 3b3ccf5b1106..b7a1ae28ab87 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -262,7 +262,7 @@ void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx) pmc->current_config = (u64)ctrl; pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE, - kvm_x86_ops.pmu_ops->find_fixed_event(idx), + kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc), !(en_field & 0x2), /* exclude user */ !(en_field & 0x1), /* exclude kernel */ pmi, false, false); diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index dd7dbb1c5048..c91d9725aafd 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -25,7 +25,6 @@ struct kvm_event_hw_type_mapping { struct kvm_pmu_ops { unsigned int (*pmc_perf_hw_id)(struct kvm_pmc *pmc); - unsigned (*find_fixed_event)(int idx); bool (*pmc_is_enabled)(struct kvm_pmc *pmc); struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx); struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu, diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c index fb0ce8cda8a7..12d8b301065a 100644 --- a/arch/x86/kvm/svm/pmu.c +++ b/arch/x86/kvm/svm/pmu.c @@ -144,6 +144,10 @@ static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc) u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8; int i; + /* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */ + if (WARN_ON(pmc_is_fixed(pmc))) + return PERF_COUNT_HW_MAX; + for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++) if (amd_event_mapping[i].eventsel == event_select && amd_event_mapping[i].unit_mask == unit_mask) @@ -155,12 +159,6 @@ static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc) return amd_event_mapping[i].event_type; } -/* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */ -static unsigned amd_find_fixed_event(int idx) -{ - return PERF_COUNT_HW_MAX; -} - /* check if a PMC is enabled by comparing it against global_ctrl bits. Because * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE). */ @@ -324,7 +322,6 @@ static void amd_pmu_reset(struct kvm_vcpu *vcpu) struct kvm_pmu_ops amd_pmu_ops = { .pmc_perf_hw_id = amd_pmc_perf_hw_id, - .find_fixed_event = amd_find_fixed_event, .pmc_is_enabled = amd_pmc_is_enabled, .pmc_idx_to_pmc = amd_pmc_idx_to_pmc, .rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc, diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 7b530de7ae23..5e0ac57d6d1b 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -76,9 +76,9 @@ static unsigned int intel_pmc_perf_hw_id(struct kvm_pmc *pmc) int i; for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++) - if (intel_arch_events[i].eventsel == event_select - && intel_arch_events[i].unit_mask == unit_mask - && (pmu->available_event_types & (1 << i))) + if (intel_arch_events[i].eventsel == event_select && + intel_arch_events[i].unit_mask == unit_mask && + (pmc_is_fixed(pmc) || pmu->available_event_types & (1 << i))) break; if (i == ARRAY_SIZE(intel_arch_events)) @@ -87,18 +87,6 @@ static unsigned int intel_pmc_perf_hw_id(struct kvm_pmc *pmc) return intel_arch_events[i].event_type; } -static unsigned intel_find_fixed_event(int idx) -{ - u32 event; - size_t size = ARRAY_SIZE(fixed_pmc_events); - - if (WARN_ON_ONCE(idx >= size)) - return PERF_COUNT_HW_MAX; - - event = fixed_pmc_events[array_index_nospec(idx, size)]; - return intel_arch_events[event].event_type; -} - /* check if a PMC is enabled by comparing it with globl_ctrl bits. */ static bool intel_pmc_is_enabled(struct kvm_pmc *pmc) { @@ -722,7 +710,6 @@ static void intel_pmu_cleanup(struct kvm_vcpu *vcpu) struct kvm_pmu_ops intel_pmu_ops = { .pmc_perf_hw_id = intel_pmc_perf_hw_id, - .find_fixed_event = intel_find_fixed_event, .pmc_is_enabled = intel_pmc_is_enabled, .pmc_idx_to_pmc = intel_pmc_idx_to_pmc, .rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc, From 40ccb96d5483c7ef773f50db15f82f0ab587cf8a Mon Sep 17 00:00:00 2001 From: Like Xu Date: Tue, 30 Nov 2021 15:42:19 +0800 Subject: [PATCH 231/268] KVM: x86/pmu: Add pmc->intr to refactor kvm_perf_overflow{_intr}() Depending on whether intr should be triggered or not, KVM registers two different event overflow callbacks in the perf_event context. The code skeleton of these two functions is very similar, so the pmc->intr can be stored into pmc from pmc_reprogram_counter() which provides smaller instructions footprint against the u-architecture branch predictor. The __kvm_perf_overflow() can be called in non-nmi contexts and a flag is needed to distinguish the caller context and thus avoid a check on kvm_is_in_guest(), otherwise we might get warnings from suspicious RCU or check_preemption_disabled(). Suggested-by: Paolo Bonzini Signed-off-by: Like Xu Message-Id: <20211130074221.93635-5-likexu@tencent.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/pmu.c | 62 ++++++++++++++++----------------- 2 files changed, 31 insertions(+), 32 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 344ced0b6b68..ce622b89c5d8 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -495,6 +495,7 @@ struct kvm_pmc { */ u64 current_config; bool is_paused; + bool intr; }; struct kvm_pmu { diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index b7a1ae28ab87..a20207ee4014 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -55,43 +55,41 @@ static void kvm_pmi_trigger_fn(struct irq_work *irq_work) kvm_pmu_deliver_pmi(vcpu); } +static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi) +{ + struct kvm_pmu *pmu = pmc_to_pmu(pmc); + + /* Ignore counters that have been reprogrammed already. */ + if (test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) + return; + + __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); + kvm_make_request(KVM_REQ_PMU, pmc->vcpu); + + if (!pmc->intr) + return; + + /* + * Inject PMI. If vcpu was in a guest mode during NMI PMI + * can be ejected on a guest mode re-entry. Otherwise we can't + * be sure that vcpu wasn't executing hlt instruction at the + * time of vmexit and is not going to re-enter guest mode until + * woken up. So we should wake it, but this is impossible from + * NMI context. Do it from irq work instead. + */ + if (in_pmi && !kvm_is_in_guest()) + irq_work_queue(&pmc_to_pmu(pmc)->irq_work); + else + kvm_make_request(KVM_REQ_PMI, pmc->vcpu); +} + static void kvm_perf_overflow(struct perf_event *perf_event, struct perf_sample_data *data, struct pt_regs *regs) { struct kvm_pmc *pmc = perf_event->overflow_handler_context; - struct kvm_pmu *pmu = pmc_to_pmu(pmc); - if (!test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) { - __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); - kvm_make_request(KVM_REQ_PMU, pmc->vcpu); - } -} - -static void kvm_perf_overflow_intr(struct perf_event *perf_event, - struct perf_sample_data *data, - struct pt_regs *regs) -{ - struct kvm_pmc *pmc = perf_event->overflow_handler_context; - struct kvm_pmu *pmu = pmc_to_pmu(pmc); - - if (!test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) { - __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); - kvm_make_request(KVM_REQ_PMU, pmc->vcpu); - - /* - * Inject PMI. If vcpu was in a guest mode during NMI PMI - * can be ejected on a guest mode re-entry. Otherwise we can't - * be sure that vcpu wasn't executing hlt instruction at the - * time of vmexit and is not going to re-enter guest mode until - * woken up. So we should wake it, but this is impossible from - * NMI context. Do it from irq work instead. - */ - if (!kvm_is_in_guest()) - irq_work_queue(&pmc_to_pmu(pmc)->irq_work); - else - kvm_make_request(KVM_REQ_PMI, pmc->vcpu); - } + __kvm_perf_overflow(pmc, true); } static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, @@ -126,7 +124,6 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, } event = perf_event_create_kernel_counter(&attr, -1, current, - intr ? kvm_perf_overflow_intr : kvm_perf_overflow, pmc); if (IS_ERR(event)) { pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n", @@ -138,6 +135,7 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, pmc_to_pmu(pmc)->event_count++; clear_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi); pmc->is_paused = false; + pmc->intr = intr; } static void pmc_pause_counter(struct kvm_pmc *pmc) From 9cd803d496e72cd1dd3287c9a6cb4afa636ee16a Mon Sep 17 00:00:00 2001 From: Eric Hankland Date: Tue, 30 Nov 2021 15:42:20 +0800 Subject: [PATCH 232/268] KVM: x86: Update vPMCs when retiring instructions When KVM retires a guest instruction through emulation, increment any vPMCs that are configured to monitor "instructions retired," and update the sample period of those counters so that they will overflow at the right time. Signed-off-by: Eric Hankland [jmattson: - Split the code to increment "branch instructions retired" into a separate commit. - Added 'static' to kvm_pmu_incr_counter() definition. - Modified kvm_pmu_incr_counter() to check pmc->perf_event->state == PERF_EVENT_STATE_ACTIVE. ] Fixes: f5132b01386b ("KVM: Expose a version 2 architectural PMU to a guests") Signed-off-by: Jim Mattson [likexu: - Drop checks for pmc->perf_event or event state or event type - Increase a counter once its umask bits and the first 8 select bits are matched - Rewrite kvm_pmu_incr_counter() with a less invasive approach to the host perf; - Rename kvm_pmu_record_event to kvm_pmu_trigger_event; - Add counter enable and CPL check for kvm_pmu_trigger_event(); ] Cc: Peter Zijlstra Signed-off-by: Like Xu Message-Id: <20211130074221.93635-6-likexu@tencent.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/pmu.c | 60 ++++++++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/pmu.h | 1 + arch/x86/kvm/x86.c | 3 +++ 3 files changed, 64 insertions(+) diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index a20207ee4014..8abdadb7e22a 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -482,6 +482,66 @@ void kvm_pmu_destroy(struct kvm_vcpu *vcpu) kvm_pmu_reset(vcpu); } +static void kvm_pmu_incr_counter(struct kvm_pmc *pmc) +{ + struct kvm_pmu *pmu = pmc_to_pmu(pmc); + u64 prev_count; + + prev_count = pmc->counter; + pmc->counter = (pmc->counter + 1) & pmc_bitmask(pmc); + + reprogram_counter(pmu, pmc->idx); + if (pmc->counter < prev_count) + __kvm_perf_overflow(pmc, false); +} + +static inline bool eventsel_match_perf_hw_id(struct kvm_pmc *pmc, + unsigned int perf_hw_id) +{ + u64 old_eventsel = pmc->eventsel; + unsigned int config; + + pmc->eventsel &= (ARCH_PERFMON_EVENTSEL_EVENT | ARCH_PERFMON_EVENTSEL_UMASK); + config = kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc); + pmc->eventsel = old_eventsel; + return config == perf_hw_id; +} + +static inline bool cpl_is_matched(struct kvm_pmc *pmc) +{ + bool select_os, select_user; + u64 config = pmc->current_config; + + if (pmc_is_gp(pmc)) { + select_os = config & ARCH_PERFMON_EVENTSEL_OS; + select_user = config & ARCH_PERFMON_EVENTSEL_USR; + } else { + select_os = config & 0x1; + select_user = config & 0x2; + } + + return (static_call(kvm_x86_get_cpl)(pmc->vcpu) == 0) ? select_os : select_user; +} + +void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id) +{ + struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); + struct kvm_pmc *pmc; + int i; + + for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) { + pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, i); + + if (!pmc || !pmc_is_enabled(pmc) || !pmc_speculative_in_use(pmc)) + continue; + + /* Ignore checks for edge detect, pin control, invert and CMASK bits */ + if (eventsel_match_perf_hw_id(pmc, perf_hw_id) && cpl_is_matched(pmc)) + kvm_pmu_incr_counter(pmc); + } +} +EXPORT_SYMBOL_GPL(kvm_pmu_trigger_event); + int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp) { struct kvm_pmu_event_filter tmp, *filter; diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index c91d9725aafd..7a7b8d5b775e 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -157,6 +157,7 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu); void kvm_pmu_cleanup(struct kvm_vcpu *vcpu); void kvm_pmu_destroy(struct kvm_vcpu *vcpu); int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp); +void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id); bool is_vmware_backdoor_pmc(u32 pmc_idx); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9ddad9493cb8..4fbde3a9b9d0 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7990,6 +7990,8 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu) if (unlikely(!r)) return 0; + kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_INSTRUCTIONS); + /* * rflags is the old, "raw" value of the flags. The new value has * not been saved yet. @@ -8252,6 +8254,7 @@ writeback: vcpu->arch.emulate_regs_need_sync_to_vcpu = false; if (!ctxt->have_exception || exception_type(ctxt->exception.vector) == EXCPT_TRAP) { + kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_INSTRUCTIONS); kvm_rip_write(vcpu, ctxt->eip); if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) r = kvm_vcpu_do_singlestep(vcpu); From 018d70ffcfec8a01f77b0d840527203d337dd7f9 Mon Sep 17 00:00:00 2001 From: Eric Hankland Date: Tue, 30 Nov 2021 15:42:21 +0800 Subject: [PATCH 233/268] KVM: x86: Update vPMCs when retiring branch instructions When KVM retires a guest branch instruction through emulation, increment any vPMCs that are configured to monitor "branch instructions retired," and update the sample period of those counters so that they will overflow at the right time. Signed-off-by: Eric Hankland [jmattson: - Split the code to increment "branch instructions retired" into a separate commit. - Moved/consolidated the calls to kvm_pmu_trigger_event() in the emulation of VMLAUNCH/VMRESUME to accommodate the evolution of that code. ] Fixes: f5132b01386b ("KVM: Expose a version 2 architectural PMU to a guests") Signed-off-by: Jim Mattson Message-Id: <20211130074221.93635-7-likexu@tencent.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/emulate.c | 55 +++++++++++++++++++++----------------- arch/x86/kvm/kvm_emulate.h | 1 + arch/x86/kvm/vmx/nested.c | 7 +++-- arch/x86/kvm/x86.c | 2 ++ 4 files changed, 39 insertions(+), 26 deletions(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 28b1a4e57827..166a145fc1e6 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -175,6 +175,7 @@ #define No16 ((u64)1 << 53) /* No 16 bit operand */ #define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */ #define TwoMemOp ((u64)1 << 55) /* Instruction has two memory operand */ +#define IsBranch ((u64)1 << 56) /* Instruction is considered a branch. */ #define DstXacc (DstAccLo | SrcAccHi | SrcWrite) @@ -191,8 +192,9 @@ #define FASTOP_SIZE 8 struct opcode { - u64 flags : 56; - u64 intercept : 8; + u64 flags; + u8 intercept; + u8 pad[7]; union { int (*execute)(struct x86_emulate_ctxt *ctxt); const struct opcode *group; @@ -4364,10 +4366,10 @@ static const struct opcode group4[] = { static const struct opcode group5[] = { F(DstMem | SrcNone | Lock, em_inc), F(DstMem | SrcNone | Lock, em_dec), - I(SrcMem | NearBranch, em_call_near_abs), - I(SrcMemFAddr | ImplicitOps, em_call_far), - I(SrcMem | NearBranch, em_jmp_abs), - I(SrcMemFAddr | ImplicitOps, em_jmp_far), + I(SrcMem | NearBranch | IsBranch, em_call_near_abs), + I(SrcMemFAddr | ImplicitOps | IsBranch, em_call_far), + I(SrcMem | NearBranch | IsBranch, em_jmp_abs), + I(SrcMemFAddr | ImplicitOps | IsBranch, em_jmp_far), I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined), }; @@ -4577,7 +4579,7 @@ static const struct opcode opcode_table[256] = { I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */ I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */ /* 0x70 - 0x7F */ - X16(D(SrcImmByte | NearBranch)), + X16(D(SrcImmByte | NearBranch | IsBranch)), /* 0x80 - 0x87 */ G(ByteOp | DstMem | SrcImm, group1), G(DstMem | SrcImm, group1), @@ -4596,7 +4598,7 @@ static const struct opcode opcode_table[256] = { DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)), /* 0x98 - 0x9F */ D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd), - I(SrcImmFAddr | No64, em_call_far), N, + I(SrcImmFAddr | No64 | IsBranch, em_call_far), N, II(ImplicitOps | Stack, em_pushf, pushf), II(ImplicitOps | Stack, em_popf, popf), I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf), @@ -4616,17 +4618,19 @@ static const struct opcode opcode_table[256] = { X8(I(DstReg | SrcImm64 | Mov, em_mov)), /* 0xC0 - 0xC7 */ G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2), - I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm), - I(ImplicitOps | NearBranch, em_ret), + I(ImplicitOps | NearBranch | SrcImmU16 | IsBranch, em_ret_near_imm), + I(ImplicitOps | NearBranch | IsBranch, em_ret), I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg), I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg), G(ByteOp, group11), G(0, group11), /* 0xC8 - 0xCF */ - I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave), - I(ImplicitOps | SrcImmU16, em_ret_far_imm), - I(ImplicitOps, em_ret_far), - D(ImplicitOps), DI(SrcImmByte, intn), - D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret), + I(Stack | SrcImmU16 | Src2ImmByte | IsBranch, em_enter), + I(Stack | IsBranch, em_leave), + I(ImplicitOps | SrcImmU16 | IsBranch, em_ret_far_imm), + I(ImplicitOps | IsBranch, em_ret_far), + D(ImplicitOps | IsBranch), DI(SrcImmByte | IsBranch, intn), + D(ImplicitOps | No64 | IsBranch), + II(ImplicitOps | IsBranch, em_iret, iret), /* 0xD0 - 0xD7 */ G(Src2One | ByteOp, group2), G(Src2One, group2), G(Src2CL | ByteOp, group2), G(Src2CL, group2), @@ -4637,14 +4641,15 @@ static const struct opcode opcode_table[256] = { /* 0xD8 - 0xDF */ N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N, /* 0xE0 - 0xE7 */ - X3(I(SrcImmByte | NearBranch, em_loop)), - I(SrcImmByte | NearBranch, em_jcxz), + X3(I(SrcImmByte | NearBranch | IsBranch, em_loop)), + I(SrcImmByte | NearBranch | IsBranch, em_jcxz), I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in), I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out), /* 0xE8 - 0xEF */ - I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch), - I(SrcImmFAddr | No64, em_jmp_far), - D(SrcImmByte | ImplicitOps | NearBranch), + I(SrcImm | NearBranch | IsBranch, em_call), + D(SrcImm | ImplicitOps | NearBranch | IsBranch), + I(SrcImmFAddr | No64 | IsBranch, em_jmp_far), + D(SrcImmByte | ImplicitOps | NearBranch | IsBranch), I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in), I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out), /* 0xF0 - 0xF7 */ @@ -4660,7 +4665,7 @@ static const struct opcode opcode_table[256] = { static const struct opcode twobyte_table[256] = { /* 0x00 - 0x0F */ G(0, group6), GD(0, &group7), N, N, - N, I(ImplicitOps | EmulateOnUD, em_syscall), + N, I(ImplicitOps | EmulateOnUD | IsBranch, em_syscall), II(ImplicitOps | Priv, em_clts, clts), N, DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N, @@ -4691,8 +4696,8 @@ static const struct opcode twobyte_table[256] = { IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc), II(ImplicitOps | Priv, em_rdmsr, rdmsr), IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc), - I(ImplicitOps | EmulateOnUD, em_sysenter), - I(ImplicitOps | Priv | EmulateOnUD, em_sysexit), + I(ImplicitOps | EmulateOnUD | IsBranch, em_sysenter), + I(ImplicitOps | Priv | EmulateOnUD | IsBranch, em_sysexit), N, N, N, N, N, N, N, N, N, N, /* 0x40 - 0x4F */ @@ -4710,7 +4715,7 @@ static const struct opcode twobyte_table[256] = { N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f), /* 0x80 - 0x8F */ - X16(D(SrcImm | NearBranch)), + X16(D(SrcImm | NearBranch | IsBranch)), /* 0x90 - 0x9F */ X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)), /* 0xA0 - 0xA7 */ @@ -5224,6 +5229,8 @@ done_prefixes: ctxt->d |= opcode.flags; } + ctxt->is_branch = opcode.flags & IsBranch; + /* Unrecognised? */ if (ctxt->d == 0) return EMULATION_FAILED; diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h index 68b420289d7e..39eded2426ff 100644 --- a/arch/x86/kvm/kvm_emulate.h +++ b/arch/x86/kvm/kvm_emulate.h @@ -369,6 +369,7 @@ struct x86_emulate_ctxt { struct fetch_cache fetch; struct read_cache io_read; struct read_cache mem_read; + bool is_branch; }; /* Repeat String Operation Prefix */ diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index d07a7fa75783..f235f77cbc03 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -3526,10 +3526,13 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) if (evmptrld_status == EVMPTRLD_ERROR) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; - } else if (CC(evmptrld_status == EVMPTRLD_VMFAIL)) { - return nested_vmx_failInvalid(vcpu); } + kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_BRANCH_INSTRUCTIONS); + + if (CC(evmptrld_status == EVMPTRLD_VMFAIL)) + return nested_vmx_failInvalid(vcpu); + if (CC(!evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) && vmx->nested.current_vmptr == INVALID_GPA)) return nested_vmx_failInvalid(vcpu); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4fbde3a9b9d0..42bde45a1bc2 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8255,6 +8255,8 @@ writeback: if (!ctxt->have_exception || exception_type(ctxt->exception.vector) == EXCPT_TRAP) { kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_INSTRUCTIONS); + if (ctxt->is_branch) + kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_BRANCH_INSTRUCTIONS); kvm_rip_write(vcpu, ctxt->eip); if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) r = kvm_vcpu_do_singlestep(vcpu); From f3f26dae05e39f0f286f588669b54d49b61dcfb8 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 9 Dec 2021 15:09:38 +0000 Subject: [PATCH 234/268] x86/kvm: Silence per-cpu pr_info noise about KVM clocks and steal time I made the actual CPU bringup go nice and fast... and then Linux spends half a minute printing stupid nonsense about clocks and steal time for each of 256 vCPUs. Don't do that. Nobody cares. Signed-off-by: David Woodhouse Message-Id: <20211209150938.3518-12-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini --- arch/x86/kernel/kvm.c | 6 +++--- arch/x86/kernel/kvmclock.c | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 59abbdad7729..a438217cbfac 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -313,7 +313,7 @@ static void kvm_register_steal_time(void) return; wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED)); - pr_info("stealtime: cpu %d, msr %llx\n", cpu, + pr_debug("stealtime: cpu %d, msr %llx\n", cpu, (unsigned long long) slow_virt_to_phys(st)); } @@ -350,7 +350,7 @@ static void kvm_guest_cpu_init(void) wrmsrl(MSR_KVM_ASYNC_PF_EN, pa); __this_cpu_write(apf_reason.enabled, 1); - pr_info("setup async PF for cpu %d\n", smp_processor_id()); + pr_debug("setup async PF for cpu %d\n", smp_processor_id()); } if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) { @@ -376,7 +376,7 @@ static void kvm_pv_disable_apf(void) wrmsrl(MSR_KVM_ASYNC_PF_EN, 0); __this_cpu_write(apf_reason.enabled, 0); - pr_info("disable async PF for cpu %d\n", smp_processor_id()); + pr_debug("disable async PF for cpu %d\n", smp_processor_id()); } static void kvm_disable_steal_time(void) diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 462dd8e9b03d..a35cbf9107af 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -174,7 +174,7 @@ static void kvm_register_clock(char *txt) pa = slow_virt_to_phys(&src->pvti) | 0x01ULL; wrmsrl(msr_kvm_system_time, pa); - pr_info("kvm-clock: cpu %d, msr %llx, %s", smp_processor_id(), pa, txt); + pr_debug("kvm-clock: cpu %d, msr %llx, %s", smp_processor_id(), pa, txt); } static void kvm_save_sched_clock_state(void) From 2efd61a608b0039911924d2e5d7028eb37496e85 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Fri, 10 Dec 2021 16:36:20 +0000 Subject: [PATCH 235/268] KVM: Warn if mark_page_dirty() is called without an active vCPU The various kvm_write_guest() and mark_page_dirty() functions must only ever be called in the context of an active vCPU, because if dirty ring tracking is enabled it may simply oops when kvm_get_running_vcpu() returns NULL for the vcpu and then kvm_dirty_ring_get() dereferences it. This oops was reported by "butt3rflyh4ck" in https://lore.kernel.org/kvm/CAFcO6XOmoS7EacN_n6v4Txk7xL7iqRa2gABg3F7E3Naf5uG94g@mail.gmail.com/ That actual bug will be fixed under separate cover but this warning should help to prevent new ones from being added. Signed-off-by: David Woodhouse Message-Id: <20211210163625.2886-2-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini --- include/linux/kvm_dirty_ring.h | 6 ------ virt/kvm/dirty_ring.c | 9 --------- virt/kvm/kvm_main.c | 7 ++++++- 3 files changed, 6 insertions(+), 16 deletions(-) diff --git a/include/linux/kvm_dirty_ring.h b/include/linux/kvm_dirty_ring.h index 4da8d4a4140b..906f899813dc 100644 --- a/include/linux/kvm_dirty_ring.h +++ b/include/linux/kvm_dirty_ring.h @@ -43,11 +43,6 @@ static inline int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, return 0; } -static inline struct kvm_dirty_ring *kvm_dirty_ring_get(struct kvm *kvm) -{ - return NULL; -} - static inline int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring) { @@ -78,7 +73,6 @@ static inline bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring) u32 kvm_dirty_ring_get_rsvd_entries(void); int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size); -struct kvm_dirty_ring *kvm_dirty_ring_get(struct kvm *kvm); /* * called with kvm->slots_lock held, returns the number of diff --git a/virt/kvm/dirty_ring.c b/virt/kvm/dirty_ring.c index 88f4683198ea..8e9874760fb3 100644 --- a/virt/kvm/dirty_ring.c +++ b/virt/kvm/dirty_ring.c @@ -36,15 +36,6 @@ static bool kvm_dirty_ring_full(struct kvm_dirty_ring *ring) return kvm_dirty_ring_used(ring) >= ring->size; } -struct kvm_dirty_ring *kvm_dirty_ring_get(struct kvm *kvm) -{ - struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); - - WARN_ON_ONCE(vcpu->kvm != kvm); - - return &vcpu->dirty_ring; -} - static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask) { struct kvm_memory_slot *memslot; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index b0f7e6eb00ff..af5b4427b139 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3155,12 +3155,17 @@ void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn) { + struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); + + if (WARN_ON_ONCE(!vcpu) || WARN_ON_ONCE(vcpu->kvm != kvm)) + return; + if (memslot && kvm_slot_dirty_track_enabled(memslot)) { unsigned long rel_gfn = gfn - memslot->base_gfn; u32 slot = (memslot->as_id << 16) | memslot->id; if (kvm->dirty_ring_size) - kvm_dirty_ring_push(kvm_dirty_ring_get(kvm), + kvm_dirty_ring_push(&vcpu->dirty_ring, slot, rel_gfn); else set_bit_le(rel_gfn, memslot->dirty_bitmap); From 982ed0de4753ed6e71dbd40f82a5a066baf133ed Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Fri, 10 Dec 2021 16:36:21 +0000 Subject: [PATCH 236/268] KVM: Reinstate gfn_to_pfn_cache with invalidation support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This can be used in two modes. There is an atomic mode where the cached mapping is accessed while holding the rwlock, and a mode where the physical address is used by a vCPU in guest mode. For the latter case, an invalidation will wake the vCPU with the new KVM_REQ_GPC_INVALIDATE, and the architecture will need to refresh any caches it still needs to access before entering guest mode again. Only one vCPU can be targeted by the wake requests; it's simple enough to make it wake all vCPUs or even a mask but I don't see a use case for that additional complexity right now. Invalidation happens from the invalidate_range_start MMU notifier, which needs to be able to sleep in order to wake the vCPU and wait for it. This means that revalidation potentially needs to "wait" for the MMU operation to complete and the invalidate_range_end notifier to be invoked. Like the vCPU when it takes a page fault in that period, we just spin — fixing that in a future patch by implementing an actual *wait* may be another part of shaving this particularly hirsute yak. As noted in the comments in the function itself, the only case where the invalidate_range_start notifier is expected to be called *without* being able to sleep is when the OOM reaper is killing the process. In that case, we expect the vCPU threads already to have exited, and thus there will be nothing to wake, and no reason to wait. So we clear the KVM_REQUEST_WAIT bit and send the request anyway, then complain loudly if there actually *was* anything to wake up. Signed-off-by: David Woodhouse Message-Id: <20211210163625.2886-3-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/Kconfig | 1 + include/linux/kvm_host.h | 103 ++++++++++++ include/linux/kvm_types.h | 18 ++ virt/kvm/Kconfig | 3 + virt/kvm/Makefile.kvm | 1 + virt/kvm/dirty_ring.c | 2 +- virt/kvm/kvm_main.c | 12 +- virt/kvm/kvm_mm.h | 44 +++++ virt/kvm/mmu_lock.h | 23 --- virt/kvm/pfncache.c | 337 ++++++++++++++++++++++++++++++++++++++ 10 files changed, 517 insertions(+), 27 deletions(-) create mode 100644 virt/kvm/kvm_mm.h delete mode 100644 virt/kvm/mmu_lock.h create mode 100644 virt/kvm/pfncache.c diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 03b2ce34e7f4..ebc8ce9ec917 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -26,6 +26,7 @@ config KVM select PREEMPT_NOTIFIERS select MMU_NOTIFIER select HAVE_KVM_IRQCHIP + select HAVE_KVM_PFNCACHE select HAVE_KVM_IRQFD select HAVE_KVM_DIRTY_RING select IRQ_BYPASS_MANAGER diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index f9bbcf519280..9bbb1f1d9e48 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -155,6 +155,7 @@ static inline bool is_error_page(struct page *page) #define KVM_REQ_UNBLOCK 2 #define KVM_REQ_UNHALT 3 #define KVM_REQ_VM_DEAD (4 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) +#define KVM_REQ_GPC_INVALIDATE (5 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQUEST_ARCH_BASE 8 #define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \ @@ -593,6 +594,10 @@ struct kvm { unsigned long mn_active_invalidate_count; struct rcuwait mn_memslots_update_rcuwait; + /* For management / invalidation of gfn_to_pfn_caches */ + spinlock_t gpc_lock; + struct list_head gpc_list; + /* * created_vcpus is protected by kvm->lock, and is incremented * at the beginning of KVM_CREATE_VCPU. online_vcpus is only @@ -1099,6 +1104,104 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, unsigned long len); void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); +/** + * kvm_gfn_to_pfn_cache_init - prepare a cached kernel mapping and HPA for a + * given guest physical address. + * + * @kvm: pointer to kvm instance. + * @gpc: struct gfn_to_pfn_cache object. + * @vcpu: vCPU to be used for marking pages dirty and to be woken on + * invalidation. + * @guest_uses_pa: indicates that the resulting host physical PFN is used while + * @vcpu is IN_GUEST_MODE so invalidations should wake it. + * @kernel_map: requests a kernel virtual mapping (kmap / memremap). + * @gpa: guest physical address to map. + * @len: sanity check; the range being access must fit a single page. + * @dirty: mark the cache dirty immediately. + * + * @return: 0 for success. + * -EINVAL for a mapping which would cross a page boundary. + * -EFAULT for an untranslatable guest physical address. + * + * This primes a gfn_to_pfn_cache and links it into the @kvm's list for + * invalidations to be processed. Invalidation callbacks to @vcpu using + * %KVM_REQ_GPC_INVALIDATE will occur only for MMU notifiers, not for KVM + * memslot changes. Callers are required to use kvm_gfn_to_pfn_cache_check() + * to ensure that the cache is valid before accessing the target page. + */ +int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, + struct kvm_vcpu *vcpu, bool guest_uses_pa, + bool kernel_map, gpa_t gpa, unsigned long len, + bool dirty); + +/** + * kvm_gfn_to_pfn_cache_check - check validity of a gfn_to_pfn_cache. + * + * @kvm: pointer to kvm instance. + * @gpc: struct gfn_to_pfn_cache object. + * @gpa: current guest physical address to map. + * @len: sanity check; the range being access must fit a single page. + * @dirty: mark the cache dirty immediately. + * + * @return: %true if the cache is still valid and the address matches. + * %false if the cache is not valid. + * + * Callers outside IN_GUEST_MODE context should hold a read lock on @gpc->lock + * while calling this function, and then continue to hold the lock until the + * access is complete. + * + * Callers in IN_GUEST_MODE may do so without locking, although they should + * still hold a read lock on kvm->scru for the memslot checks. + */ +bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, + gpa_t gpa, unsigned long len); + +/** + * kvm_gfn_to_pfn_cache_refresh - update a previously initialized cache. + * + * @kvm: pointer to kvm instance. + * @gpc: struct gfn_to_pfn_cache object. + * @gpa: updated guest physical address to map. + * @len: sanity check; the range being access must fit a single page. + * @dirty: mark the cache dirty immediately. + * + * @return: 0 for success. + * -EINVAL for a mapping which would cross a page boundary. + * -EFAULT for an untranslatable guest physical address. + * + * This will attempt to refresh a gfn_to_pfn_cache. Note that a successful + * returm from this function does not mean the page can be immediately + * accessed because it may have raced with an invalidation. Callers must + * still lock and check the cache status, as this function does not return + * with the lock still held to permit access. + */ +int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, + gpa_t gpa, unsigned long len, bool dirty); + +/** + * kvm_gfn_to_pfn_cache_unmap - temporarily unmap a gfn_to_pfn_cache. + * + * @kvm: pointer to kvm instance. + * @gpc: struct gfn_to_pfn_cache object. + * + * This unmaps the referenced page and marks it dirty, if appropriate. The + * cache is left in the invalid state but at least the mapping from GPA to + * userspace HVA will remain cached and can be reused on a subsequent + * refresh. + */ +void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc); + +/** + * kvm_gfn_to_pfn_cache_destroy - destroy and unlink a gfn_to_pfn_cache. + * + * @kvm: pointer to kvm instance. + * @gpc: struct gfn_to_pfn_cache object. + * + * This removes a cache from the @kvm's list to be processed on MMU notifier + * invocation. + */ +void kvm_gfn_to_pfn_cache_destroy(struct kvm *kvm, struct gfn_to_pfn_cache *gpc); + void kvm_sigset_activate(struct kvm_vcpu *vcpu); void kvm_sigset_deactivate(struct kvm_vcpu *vcpu); diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index 888ef12862c9..dceac12c1ce5 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -19,6 +19,7 @@ struct kvm_memslots; enum kvm_mr_change; #include +#include #include @@ -53,6 +54,23 @@ struct gfn_to_hva_cache { struct kvm_memory_slot *memslot; }; +struct gfn_to_pfn_cache { + u64 generation; + gpa_t gpa; + unsigned long uhva; + struct kvm_memory_slot *memslot; + struct kvm_vcpu *vcpu; + struct list_head list; + rwlock_t lock; + void *khva; + kvm_pfn_t pfn; + bool active; + bool valid; + bool dirty; + bool kernel_map; + bool guest_uses_pa; +}; + #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE /* * Memory caches are used to preallocate memory ahead of various MMU flows, diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig index 97cf5413ac25..f4834c20e4a6 100644 --- a/virt/kvm/Kconfig +++ b/virt/kvm/Kconfig @@ -4,6 +4,9 @@ config HAVE_KVM bool +config HAVE_KVM_PFNCACHE + bool + config HAVE_KVM_IRQCHIP bool diff --git a/virt/kvm/Makefile.kvm b/virt/kvm/Makefile.kvm index ffdcad3cc97a..2c27d5d0c367 100644 --- a/virt/kvm/Makefile.kvm +++ b/virt/kvm/Makefile.kvm @@ -11,3 +11,4 @@ kvm-$(CONFIG_KVM_MMIO) += $(KVM)/coalesced_mmio.o kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o kvm-$(CONFIG_HAVE_KVM_IRQ_ROUTING) += $(KVM)/irqchip.o kvm-$(CONFIG_HAVE_KVM_DIRTY_RING) += $(KVM)/dirty_ring.o +kvm-$(CONFIG_HAVE_KVM_PFNCACHE) += $(KVM)/pfncache.o diff --git a/virt/kvm/dirty_ring.c b/virt/kvm/dirty_ring.c index 8e9874760fb3..222ecc81d7df 100644 --- a/virt/kvm/dirty_ring.c +++ b/virt/kvm/dirty_ring.c @@ -9,7 +9,7 @@ #include #include #include -#include "mmu_lock.h" +#include "kvm_mm.h" int __weak kvm_cpu_dirty_log_size(void) { diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index af5b4427b139..6e8e9d36f382 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -59,7 +59,7 @@ #include "coalesced_mmio.h" #include "async_pf.h" -#include "mmu_lock.h" +#include "kvm_mm.h" #include "vfio.h" #define CREATE_TRACE_POINTS @@ -711,6 +711,9 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, kvm->mn_active_invalidate_count++; spin_unlock(&kvm->mn_invalidate_lock); + gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end, + hva_range.may_block); + __kvm_handle_hva_range(kvm, &hva_range); return 0; @@ -1071,6 +1074,9 @@ static struct kvm *kvm_create_vm(unsigned long type) rcuwait_init(&kvm->mn_memslots_update_rcuwait); xa_init(&kvm->vcpu_array); + INIT_LIST_HEAD(&kvm->gpc_list); + spin_lock_init(&kvm->gpc_lock); + INIT_LIST_HEAD(&kvm->devices); BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); @@ -2539,8 +2545,8 @@ out: * 2): @write_fault = false && @writable, @writable will tell the caller * whether the mapping is writable. */ -static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, - bool write_fault, bool *writable) +kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, + bool write_fault, bool *writable) { struct vm_area_struct *vma; kvm_pfn_t pfn = 0; diff --git a/virt/kvm/kvm_mm.h b/virt/kvm/kvm_mm.h new file mode 100644 index 000000000000..34ca40823260 --- /dev/null +++ b/virt/kvm/kvm_mm.h @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#ifndef __KVM_MM_H__ +#define __KVM_MM_H__ 1 + +/* + * Architectures can choose whether to use an rwlock or spinlock + * for the mmu_lock. These macros, for use in common code + * only, avoids using #ifdefs in places that must deal with + * multiple architectures. + */ + +#ifdef KVM_HAVE_MMU_RWLOCK +#define KVM_MMU_LOCK_INIT(kvm) rwlock_init(&(kvm)->mmu_lock) +#define KVM_MMU_LOCK(kvm) write_lock(&(kvm)->mmu_lock) +#define KVM_MMU_UNLOCK(kvm) write_unlock(&(kvm)->mmu_lock) +#define KVM_MMU_READ_LOCK(kvm) read_lock(&(kvm)->mmu_lock) +#define KVM_MMU_READ_UNLOCK(kvm) read_unlock(&(kvm)->mmu_lock) +#else +#define KVM_MMU_LOCK_INIT(kvm) spin_lock_init(&(kvm)->mmu_lock) +#define KVM_MMU_LOCK(kvm) spin_lock(&(kvm)->mmu_lock) +#define KVM_MMU_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock) +#define KVM_MMU_READ_LOCK(kvm) spin_lock(&(kvm)->mmu_lock) +#define KVM_MMU_READ_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock) +#endif /* KVM_HAVE_MMU_RWLOCK */ + +kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, + bool write_fault, bool *writable); + +#ifdef CONFIG_HAVE_KVM_PFNCACHE +void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, + unsigned long start, + unsigned long end, + bool may_block); +#else +static inline void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, + unsigned long start, + unsigned long end, + bool may_block) +{ +} +#endif /* HAVE_KVM_PFNCACHE */ + +#endif /* __KVM_MM_H__ */ diff --git a/virt/kvm/mmu_lock.h b/virt/kvm/mmu_lock.h deleted file mode 100644 index 9e1308f9734c..000000000000 --- a/virt/kvm/mmu_lock.h +++ /dev/null @@ -1,23 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only - -#ifndef KVM_MMU_LOCK_H -#define KVM_MMU_LOCK_H 1 - -/* - * Architectures can choose whether to use an rwlock or spinlock - * for the mmu_lock. These macros, for use in common code - * only, avoids using #ifdefs in places that must deal with - * multiple architectures. - */ - -#ifdef KVM_HAVE_MMU_RWLOCK -#define KVM_MMU_LOCK_INIT(kvm) rwlock_init(&(kvm)->mmu_lock) -#define KVM_MMU_LOCK(kvm) write_lock(&(kvm)->mmu_lock) -#define KVM_MMU_UNLOCK(kvm) write_unlock(&(kvm)->mmu_lock) -#else -#define KVM_MMU_LOCK_INIT(kvm) spin_lock_init(&(kvm)->mmu_lock) -#define KVM_MMU_LOCK(kvm) spin_lock(&(kvm)->mmu_lock) -#define KVM_MMU_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock) -#endif /* KVM_HAVE_MMU_RWLOCK */ - -#endif diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c new file mode 100644 index 000000000000..ce878f4be4da --- /dev/null +++ b/virt/kvm/pfncache.c @@ -0,0 +1,337 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Kernel-based Virtual Machine driver for Linux + * + * This module enables kernel and guest-mode vCPU access to guest physical + * memory with suitable invalidation mechanisms. + * + * Copyright © 2021 Amazon.com, Inc. or its affiliates. + * + * Authors: + * David Woodhouse + */ + +#include +#include +#include +#include +#include + +#include "kvm_mm.h" + +/* + * MMU notifier 'invalidate_range_start' hook. + */ +void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start, + unsigned long end, bool may_block) +{ + DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS); + struct gfn_to_pfn_cache *gpc; + bool wake_vcpus = false; + + spin_lock(&kvm->gpc_lock); + list_for_each_entry(gpc, &kvm->gpc_list, list) { + write_lock_irq(&gpc->lock); + + /* Only a single page so no need to care about length */ + if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) && + gpc->uhva >= start && gpc->uhva < end) { + gpc->valid = false; + + /* + * If a guest vCPU could be using the physical address, + * it needs to be woken. + */ + if (gpc->guest_uses_pa) { + if (!wake_vcpus) { + wake_vcpus = true; + bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS); + } + __set_bit(gpc->vcpu->vcpu_idx, vcpu_bitmap); + } + + /* + * We cannot call mark_page_dirty() from here because + * this physical CPU might not have an active vCPU + * with which to do the KVM dirty tracking. + * + * Neither is there any point in telling the kernel MM + * that the underlying page is dirty. A vCPU in guest + * mode might still be writing to it up to the point + * where we wake them a few lines further down anyway. + * + * So all the dirty marking happens on the unmap. + */ + } + write_unlock_irq(&gpc->lock); + } + spin_unlock(&kvm->gpc_lock); + + if (wake_vcpus) { + unsigned int req = KVM_REQ_GPC_INVALIDATE; + bool called; + + /* + * If the OOM reaper is active, then all vCPUs should have + * been stopped already, so perform the request without + * KVM_REQUEST_WAIT and be sad if any needed to be woken. + */ + if (!may_block) + req &= ~KVM_REQUEST_WAIT; + + called = kvm_make_vcpus_request_mask(kvm, req, vcpu_bitmap); + + WARN_ON_ONCE(called && !may_block); + } +} + +bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, + gpa_t gpa, unsigned long len) +{ + struct kvm_memslots *slots = kvm_memslots(kvm); + + if ((gpa & ~PAGE_MASK) + len > PAGE_SIZE) + return false; + + if (gpc->gpa != gpa || gpc->generation != slots->generation || + kvm_is_error_hva(gpc->uhva)) + return false; + + if (!gpc->valid) + return false; + + return true; +} +EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_check); + +static void __release_gpc(struct kvm *kvm, kvm_pfn_t pfn, void *khva, + gpa_t gpa, bool dirty) +{ + /* Unmap the old page if it was mapped before, and release it */ + if (!is_error_noslot_pfn(pfn)) { + if (khva) { + if (pfn_valid(pfn)) + kunmap(pfn_to_page(pfn)); +#ifdef CONFIG_HAS_IOMEM + else + memunmap(khva); +#endif + } + + kvm_release_pfn(pfn, dirty); + if (dirty) + mark_page_dirty(kvm, gpa); + } +} + +static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, unsigned long uhva) +{ + unsigned long mmu_seq; + kvm_pfn_t new_pfn; + int retry; + + do { + mmu_seq = kvm->mmu_notifier_seq; + smp_rmb(); + + /* We always request a writeable mapping */ + new_pfn = hva_to_pfn(uhva, false, NULL, true, NULL); + if (is_error_noslot_pfn(new_pfn)) + break; + + KVM_MMU_READ_LOCK(kvm); + retry = mmu_notifier_retry_hva(kvm, mmu_seq, uhva); + KVM_MMU_READ_UNLOCK(kvm); + if (!retry) + break; + + cond_resched(); + } while (1); + + return new_pfn; +} + +int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, + gpa_t gpa, unsigned long len, bool dirty) +{ + struct kvm_memslots *slots = kvm_memslots(kvm); + unsigned long page_offset = gpa & ~PAGE_MASK; + kvm_pfn_t old_pfn, new_pfn; + unsigned long old_uhva; + gpa_t old_gpa; + void *old_khva; + bool old_valid, old_dirty; + int ret = 0; + + /* + * If must fit within a single page. The 'len' argument is + * only to enforce that. + */ + if (page_offset + len > PAGE_SIZE) + return -EINVAL; + + write_lock_irq(&gpc->lock); + + old_gpa = gpc->gpa; + old_pfn = gpc->pfn; + old_khva = gpc->khva - offset_in_page(gpc->khva); + old_uhva = gpc->uhva; + old_valid = gpc->valid; + old_dirty = gpc->dirty; + + /* If the userspace HVA is invalid, refresh that first */ + if (gpc->gpa != gpa || gpc->generation != slots->generation || + kvm_is_error_hva(gpc->uhva)) { + gfn_t gfn = gpa_to_gfn(gpa); + + gpc->dirty = false; + gpc->gpa = gpa; + gpc->generation = slots->generation; + gpc->memslot = __gfn_to_memslot(slots, gfn); + gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn); + + if (kvm_is_error_hva(gpc->uhva)) { + ret = -EFAULT; + goto out; + } + + gpc->uhva += page_offset; + } + + /* + * If the userspace HVA changed or the PFN was already invalid, + * drop the lock and do the HVA to PFN lookup again. + */ + if (!old_valid || old_uhva != gpc->uhva) { + unsigned long uhva = gpc->uhva; + void *new_khva = NULL; + + /* Placeholders for "hva is valid but not yet mapped" */ + gpc->pfn = KVM_PFN_ERR_FAULT; + gpc->khva = NULL; + gpc->valid = true; + + write_unlock_irq(&gpc->lock); + + new_pfn = hva_to_pfn_retry(kvm, uhva); + if (is_error_noslot_pfn(new_pfn)) { + ret = -EFAULT; + goto map_done; + } + + if (gpc->kernel_map) { + if (new_pfn == old_pfn) { + new_khva = old_khva; + old_pfn = KVM_PFN_ERR_FAULT; + old_khva = NULL; + } else if (pfn_valid(new_pfn)) { + new_khva = kmap(pfn_to_page(new_pfn)); +#ifdef CONFIG_HAS_IOMEM + } else { + new_khva = memremap(pfn_to_hpa(new_pfn), PAGE_SIZE, MEMREMAP_WB); +#endif + } + if (new_khva) + new_khva += page_offset; + else + ret = -EFAULT; + } + + map_done: + write_lock_irq(&gpc->lock); + if (ret) { + gpc->valid = false; + gpc->pfn = KVM_PFN_ERR_FAULT; + gpc->khva = NULL; + } else { + /* At this point, gpc->valid may already have been cleared */ + gpc->pfn = new_pfn; + gpc->khva = new_khva; + } + } else { + /* If the HVA→PFN mapping was already valid, don't unmap it. */ + old_pfn = KVM_PFN_ERR_FAULT; + old_khva = NULL; + } + + out: + if (ret) + gpc->dirty = false; + else + gpc->dirty = dirty; + + write_unlock_irq(&gpc->lock); + + __release_gpc(kvm, old_pfn, old_khva, old_gpa, old_dirty); + + return ret; +} +EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_refresh); + +void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) +{ + void *old_khva; + kvm_pfn_t old_pfn; + bool old_dirty; + gpa_t old_gpa; + + write_lock_irq(&gpc->lock); + + gpc->valid = false; + + old_khva = gpc->khva - offset_in_page(gpc->khva); + old_dirty = gpc->dirty; + old_gpa = gpc->gpa; + old_pfn = gpc->pfn; + + /* + * We can leave the GPA → uHVA map cache intact but the PFN + * lookup will need to be redone even for the same page. + */ + gpc->khva = NULL; + gpc->pfn = KVM_PFN_ERR_FAULT; + + write_unlock_irq(&gpc->lock); + + __release_gpc(kvm, old_pfn, old_khva, old_gpa, old_dirty); +} +EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_unmap); + + +int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, + struct kvm_vcpu *vcpu, bool guest_uses_pa, + bool kernel_map, gpa_t gpa, unsigned long len, + bool dirty) +{ + if (!gpc->active) { + rwlock_init(&gpc->lock); + + gpc->khva = NULL; + gpc->pfn = KVM_PFN_ERR_FAULT; + gpc->uhva = KVM_HVA_ERR_BAD; + gpc->vcpu = vcpu; + gpc->kernel_map = kernel_map; + gpc->guest_uses_pa = guest_uses_pa; + gpc->valid = false; + gpc->active = true; + + spin_lock(&kvm->gpc_lock); + list_add(&gpc->list, &kvm->gpc_list); + spin_unlock(&kvm->gpc_lock); + } + return kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpa, len, dirty); +} +EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_init); + +void kvm_gfn_to_pfn_cache_destroy(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) +{ + if (gpc->active) { + spin_lock(&kvm->gpc_lock); + list_del(&gpc->list); + spin_unlock(&kvm->gpc_lock); + + kvm_gfn_to_pfn_cache_unmap(kvm, gpc); + gpc->active = false; + } +} +EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_destroy); From 1cfc9c4b9d4606a1e90e7dbc50058b9f0c1d43a6 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Fri, 10 Dec 2021 16:36:22 +0000 Subject: [PATCH 237/268] KVM: x86/xen: Maintain valid mapping of Xen shared_info page Use the newly reinstated gfn_to_pfn_cache to maintain a kernel mapping of the Xen shared_info page so that it can be accessed in atomic context. Note that we do not participate in dirty tracking for the shared info page and we do not explicitly mark it dirty every single tim we deliver an event channel interrupts. We wouldn't want to do that even if we *did* have a valid vCPU context with which to do so. Signed-off-by: David Woodhouse Message-Id: <20211210163625.2886-4-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/api.rst | 12 ++++++++++++ arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/xen.c | 25 ++++++++++++++----------- 3 files changed, 27 insertions(+), 12 deletions(-) diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index b86c7edae888..c168be764707 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -371,6 +371,9 @@ The bits in the dirty bitmap are cleared before the ioctl returns, unless KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is enabled. For more information, see the description of the capability. +Note that the Xen shared info page, if configured, shall always be assumed +to be dirty. KVM will not explicitly mark it such. + 4.9 KVM_SET_MEMORY_ALIAS ------------------------ @@ -5134,6 +5137,15 @@ KVM_XEN_ATTR_TYPE_SHARED_INFO not aware of the Xen CPU id which is used as the index into the vcpu_info[] array, so cannot know the correct default location. + Note that the shared info page may be constantly written to by KVM; + it contains the event channel bitmap used to deliver interrupts to + a Xen guest, amongst other things. It is exempt from dirty tracking + mechanisms — KVM will not explicitly mark the page as dirty each + time an event channel interrupt is delivered to the guest! Thus, + userspace should always assume that the designated GFN is dirty if + any vCPU has been running or any event channel interrupts can be + routed to the guest. + KVM_XEN_ATTR_TYPE_UPCALL_VECTOR Sets the exception vector used to deliver Xen event channel upcalls. diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index ce622b89c5d8..6e61e11e750f 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1020,7 +1020,7 @@ struct msr_bitmap_range { struct kvm_xen { bool long_mode; u8 upcall_vector; - gfn_t shinfo_gfn; + struct gfn_to_pfn_cache shinfo_cache; }; enum kvm_irqchip_mode { diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index dff2bdf9507a..da4bf2c6407f 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -23,16 +23,21 @@ DEFINE_STATIC_KEY_DEFERRED_FALSE(kvm_xen_enabled, HZ); static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn) { + struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; gpa_t gpa = gfn_to_gpa(gfn); int wc_ofs, sec_hi_ofs; int ret = 0; int idx = srcu_read_lock(&kvm->srcu); - if (kvm_is_error_hva(gfn_to_hva(kvm, gfn))) { - ret = -EFAULT; + if (gfn == GPA_INVALID) { + kvm_gfn_to_pfn_cache_destroy(kvm, gpc); goto out; } - kvm->arch.xen.shinfo_gfn = gfn; + + ret = kvm_gfn_to_pfn_cache_init(kvm, gpc, NULL, false, true, gpa, + PAGE_SIZE, false); + if (ret) + goto out; /* Paranoia checks on the 32-bit struct layout */ BUILD_BUG_ON(offsetof(struct compat_shared_info, wc) != 0x900); @@ -260,15 +265,9 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) break; case KVM_XEN_ATTR_TYPE_SHARED_INFO: - if (data->u.shared_info.gfn == GPA_INVALID) { - kvm->arch.xen.shinfo_gfn = GPA_INVALID; - r = 0; - break; - } r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn); break; - case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR: if (data->u.vector && data->u.vector < 0x10) r = -EINVAL; @@ -299,7 +298,10 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) break; case KVM_XEN_ATTR_TYPE_SHARED_INFO: - data->u.shared_info.gfn = kvm->arch.xen.shinfo_gfn; + if (kvm->arch.xen.shinfo_cache.active) + data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa); + else + data->u.shared_info.gfn = GPA_INVALID; r = 0; break; @@ -661,11 +663,12 @@ int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc) void kvm_xen_init_vm(struct kvm *kvm) { - kvm->arch.xen.shinfo_gfn = GPA_INVALID; } void kvm_xen_destroy_vm(struct kvm *kvm) { + kvm_gfn_to_pfn_cache_destroy(kvm, &kvm->arch.xen.shinfo_cache); + if (kvm->arch.xen_hvm_config.msr) static_branch_slow_dec_deferred(&kvm_xen_enabled); } From 14243b387137a4afbe1df5d9dc15182d6657bb79 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Fri, 10 Dec 2021 16:36:23 +0000 Subject: [PATCH 238/268] KVM: x86/xen: Add KVM_IRQ_ROUTING_XEN_EVTCHN and event channel delivery This adds basic support for delivering 2 level event channels to a guest. Initially, it only supports delivery via the IRQ routing table, triggered by an eventfd. In order to do so, it has a kvm_xen_set_evtchn_fast() function which will use the pre-mapped shared_info page if it already exists and is still valid, while the slow path through the irqfd_inject workqueue will remap the shared_info page if necessary. It sets the bits in the shared_info page but not the vcpu_info; that is deferred to __kvm_xen_has_interrupt() which raises the vector to the appropriate vCPU. Add a 'verbose' mode to xen_shinfo_test while adding test cases for this. Signed-off-by: David Woodhouse Message-Id: <20211210163625.2886-5-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/api.rst | 21 ++ arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/irq_comm.c | 12 + arch/x86/kvm/x86.c | 3 +- arch/x86/kvm/xen.c | 262 +++++++++++++++++- arch/x86/kvm/xen.h | 9 + include/linux/kvm_host.h | 7 + include/uapi/linux/kvm.h | 11 + .../selftests/kvm/x86_64/xen_shinfo_test.c | 184 +++++++++++- 9 files changed, 503 insertions(+), 7 deletions(-) diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index c168be764707..6b683dfea8f2 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -1799,6 +1799,7 @@ No flags are specified so far, the corresponding field must be set to zero. struct kvm_irq_routing_msi msi; struct kvm_irq_routing_s390_adapter adapter; struct kvm_irq_routing_hv_sint hv_sint; + struct kvm_irq_routing_xen_evtchn xen_evtchn; __u32 pad[8]; } u; }; @@ -1808,6 +1809,7 @@ No flags are specified so far, the corresponding field must be set to zero. #define KVM_IRQ_ROUTING_MSI 2 #define KVM_IRQ_ROUTING_S390_ADAPTER 3 #define KVM_IRQ_ROUTING_HV_SINT 4 + #define KVM_IRQ_ROUTING_XEN_EVTCHN 5 flags: @@ -1859,6 +1861,20 @@ address_hi must be zero. __u32 sint; }; + struct kvm_irq_routing_xen_evtchn { + __u32 port; + __u32 vcpu; + __u32 priority; + }; + + +When KVM_CAP_XEN_HVM includes the KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL bit +in its indication of supported features, routing to Xen event channels +is supported. Although the priority field is present, only the value +KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL is supported, which means delivery by +2 level event channels. FIFO event channel support may be added in +the future. + 4.55 KVM_SET_TSC_KHZ -------------------- @@ -7413,6 +7429,7 @@ PVHVM guests. Valid flags are:: #define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL (1 << 1) #define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2) #define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 2) + #define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 3) The KVM_XEN_HVM_CONFIG_HYPERCALL_MSR flag indicates that the KVM_XEN_HVM_CONFIG ioctl is available, for the guest to set its hypercall page. @@ -7432,6 +7449,10 @@ The KVM_XEN_HVM_CONFIG_RUNSTATE flag indicates that the runstate-related features KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR/_CURRENT/_DATA/_ADJUST are supported by the KVM_XEN_VCPU_SET_ATTR/KVM_XEN_VCPU_GET_ATTR ioctls. +The KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL flag indicates that IRQ routing entries +of the type KVM_IRQ_ROUTING_XEN_EVTCHN are supported, with the priority +field set to indicate 2 level event channel delivery. + 8.31 KVM_CAP_PPC_MULTITCE ------------------------- diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 6e61e11e750f..623fb7c4992c 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -610,6 +610,7 @@ struct kvm_vcpu_xen { u64 last_steal; u64 runstate_entry_time; u64 runstate_times[4]; + unsigned long evtchn_pending_sel; }; struct kvm_vcpu_arch { diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c index 39ad02d6dc63..6e0dab04320e 100644 --- a/arch/x86/kvm/irq_comm.c +++ b/arch/x86/kvm/irq_comm.c @@ -24,6 +24,7 @@ #include "hyperv.h" #include "x86.h" +#include "xen.h" static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, int irq_source_id, int level, @@ -175,6 +176,13 @@ int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, return r; break; +#ifdef CONFIG_KVM_XEN + case KVM_IRQ_ROUTING_XEN_EVTCHN: + if (!level) + return -1; + + return kvm_xen_set_evtchn_fast(e, kvm); +#endif default: break; } @@ -310,6 +318,10 @@ int kvm_set_routing_entry(struct kvm *kvm, e->hv_sint.vcpu = ue->u.hv_sint.vcpu; e->hv_sint.sint = ue->u.hv_sint.sint; break; +#ifdef CONFIG_KVM_XEN + case KVM_IRQ_ROUTING_XEN_EVTCHN: + return kvm_xen_setup_evtchn(kvm, e, ue); +#endif default: return -EINVAL; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 42bde45a1bc2..3050601d5d73 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4188,7 +4188,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_XEN_HVM: r = KVM_XEN_HVM_CONFIG_HYPERCALL_MSR | KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL | - KVM_XEN_HVM_CONFIG_SHARED_INFO; + KVM_XEN_HVM_CONFIG_SHARED_INFO | + KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL; if (sched_info_on()) r |= KVM_XEN_HVM_CONFIG_RUNSTATE; break; diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index da4bf2c6407f..ceddabd1f5c6 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "trace.h" @@ -195,6 +196,8 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state) int __kvm_xen_has_interrupt(struct kvm_vcpu *v) { + unsigned long evtchn_pending_sel = READ_ONCE(v->arch.xen.evtchn_pending_sel); + bool atomic = in_atomic() || !task_is_running(current); int err; u8 rc = 0; @@ -204,6 +207,9 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v) */ struct gfn_to_hva_cache *ghc = &v->arch.xen.vcpu_info_cache; struct kvm_memslots *slots = kvm_memslots(v->kvm); + bool ghc_valid = slots->generation == ghc->generation && + !kvm_is_error_hva(ghc->hva) && ghc->memslot; + unsigned int offset = offsetof(struct vcpu_info, evtchn_upcall_pending); /* No need for compat handling here */ @@ -219,8 +225,7 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v) * cache in kvm_read_guest_offset_cached(), but just uses * __get_user() instead. And falls back to the slow path. */ - if (likely(slots->generation == ghc->generation && - !kvm_is_error_hva(ghc->hva) && ghc->memslot)) { + if (!evtchn_pending_sel && ghc_valid) { /* Fast path */ pagefault_disable(); err = __get_user(rc, (u8 __user *)ghc->hva + offset); @@ -239,11 +244,82 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v) * and we'll end up getting called again from a context where we *can* * fault in the page and wait for it. */ - if (in_atomic() || !task_is_running(current)) + if (atomic) return 1; - kvm_read_guest_offset_cached(v->kvm, ghc, &rc, offset, - sizeof(rc)); + if (!ghc_valid) { + err = kvm_gfn_to_hva_cache_init(v->kvm, ghc, ghc->gpa, ghc->len); + if (err || !ghc->memslot) { + /* + * If this failed, userspace has screwed up the + * vcpu_info mapping. No interrupts for you. + */ + return 0; + } + } + + /* + * Now we have a valid (protected by srcu) userspace HVA in + * ghc->hva which points to the struct vcpu_info. If there + * are any bits in the in-kernel evtchn_pending_sel then + * we need to write those to the guest vcpu_info and set + * its evtchn_upcall_pending flag. If there aren't any bits + * to add, we only want to *check* evtchn_upcall_pending. + */ + if (evtchn_pending_sel) { + bool long_mode = v->kvm->arch.xen.long_mode; + + if (!user_access_begin((void __user *)ghc->hva, sizeof(struct vcpu_info))) + return 0; + + if (IS_ENABLED(CONFIG_64BIT) && long_mode) { + struct vcpu_info __user *vi = (void __user *)ghc->hva; + + /* Attempt to set the evtchn_pending_sel bits in the + * guest, and if that succeeds then clear the same + * bits in the in-kernel version. */ + asm volatile("1:\t" LOCK_PREFIX "orq %0, %1\n" + "\tnotq %0\n" + "\t" LOCK_PREFIX "andq %0, %2\n" + "2:\n" + "\t.section .fixup,\"ax\"\n" + "3:\tjmp\t2b\n" + "\t.previous\n" + _ASM_EXTABLE_UA(1b, 3b) + : "=r" (evtchn_pending_sel), + "+m" (vi->evtchn_pending_sel), + "+m" (v->arch.xen.evtchn_pending_sel) + : "0" (evtchn_pending_sel)); + } else { + struct compat_vcpu_info __user *vi = (void __user *)ghc->hva; + u32 evtchn_pending_sel32 = evtchn_pending_sel; + + /* Attempt to set the evtchn_pending_sel bits in the + * guest, and if that succeeds then clear the same + * bits in the in-kernel version. */ + asm volatile("1:\t" LOCK_PREFIX "orl %0, %1\n" + "\tnotl %0\n" + "\t" LOCK_PREFIX "andl %0, %2\n" + "2:\n" + "\t.section .fixup,\"ax\"\n" + "3:\tjmp\t2b\n" + "\t.previous\n" + _ASM_EXTABLE_UA(1b, 3b) + : "=r" (evtchn_pending_sel32), + "+m" (vi->evtchn_pending_sel), + "+m" (v->arch.xen.evtchn_pending_sel) + : "0" (evtchn_pending_sel32)); + } + rc = 1; + unsafe_put_user(rc, (u8 __user *)ghc->hva + offset, err); + + err: + user_access_end(); + + mark_page_dirty_in_slot(v->kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT); + } else { + __get_user(rc, (u8 __user *)ghc->hva + offset); + } return rc; } @@ -740,3 +816,179 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu) return 0; } + +static inline int max_evtchn_port(struct kvm *kvm) +{ + if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) + return EVTCHN_2L_NR_CHANNELS; + else + return COMPAT_EVTCHN_2L_NR_CHANNELS; +} + +/* + * This follows the kvm_set_irq() API, so it returns: + * < 0 Interrupt was ignored (masked or not delivered for other reasons) + * = 0 Interrupt was coalesced (previous irq is still pending) + * > 0 Number of CPUs interrupt was delivered to + */ +int kvm_xen_set_evtchn_fast(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm) +{ + struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; + struct kvm_vcpu *vcpu; + unsigned long *pending_bits, *mask_bits; + unsigned long flags; + int port_word_bit; + bool kick_vcpu = false; + int idx; + int rc; + + vcpu = kvm_get_vcpu_by_id(kvm, e->xen_evtchn.vcpu); + if (!vcpu) + return -1; + + if (!vcpu->arch.xen.vcpu_info_set) + return -1; + + if (e->xen_evtchn.port >= max_evtchn_port(kvm)) + return -1; + + rc = -EWOULDBLOCK; + read_lock_irqsave(&gpc->lock, flags); + + idx = srcu_read_lock(&kvm->srcu); + if (!kvm_gfn_to_pfn_cache_check(kvm, gpc, gpc->gpa, PAGE_SIZE)) + goto out_rcu; + + if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) { + struct shared_info *shinfo = gpc->khva; + pending_bits = (unsigned long *)&shinfo->evtchn_pending; + mask_bits = (unsigned long *)&shinfo->evtchn_mask; + port_word_bit = e->xen_evtchn.port / 64; + } else { + struct compat_shared_info *shinfo = gpc->khva; + pending_bits = (unsigned long *)&shinfo->evtchn_pending; + mask_bits = (unsigned long *)&shinfo->evtchn_mask; + port_word_bit = e->xen_evtchn.port / 32; + } + + /* + * If this port wasn't already set, and if it isn't masked, then + * we try to set the corresponding bit in the in-kernel shadow of + * evtchn_pending_sel for the target vCPU. And if *that* wasn't + * already set, then we kick the vCPU in question to write to the + * *real* evtchn_pending_sel in its own guest vcpu_info struct. + */ + if (test_and_set_bit(e->xen_evtchn.port, pending_bits)) { + rc = 0; /* It was already raised */ + } else if (test_bit(e->xen_evtchn.port, mask_bits)) { + rc = -1; /* Masked */ + } else { + rc = 1; /* Delivered. But was the vCPU waking already? */ + if (!test_and_set_bit(port_word_bit, &vcpu->arch.xen.evtchn_pending_sel)) + kick_vcpu = true; + } + + out_rcu: + srcu_read_unlock(&kvm->srcu, idx); + read_unlock_irqrestore(&gpc->lock, flags); + + if (kick_vcpu) { + kvm_make_request(KVM_REQ_EVENT, vcpu); + kvm_vcpu_kick(vcpu); + } + + return rc; +} + +/* This is the version called from kvm_set_irq() as the .set function */ +static int evtchn_set_fn(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, + int irq_source_id, int level, bool line_status) +{ + bool mm_borrowed = false; + int rc; + + if (!level) + return -1; + + rc = kvm_xen_set_evtchn_fast(e, kvm); + if (rc != -EWOULDBLOCK) + return rc; + + if (current->mm != kvm->mm) { + /* + * If not on a thread which already belongs to this KVM, + * we'd better be in the irqfd workqueue. + */ + if (WARN_ON_ONCE(current->mm)) + return -EINVAL; + + kthread_use_mm(kvm->mm); + mm_borrowed = true; + } + + /* + * For the irqfd workqueue, using the main kvm->lock mutex is + * fine since this function is invoked from kvm_set_irq() with + * no other lock held, no srcu. In future if it will be called + * directly from a vCPU thread (e.g. on hypercall for an IPI) + * then it may need to switch to using a leaf-node mutex for + * serializing the shared_info mapping. + */ + mutex_lock(&kvm->lock); + + /* + * It is theoretically possible for the page to be unmapped + * and the MMU notifier to invalidate the shared_info before + * we even get to use it. In that case, this looks like an + * infinite loop. It was tempting to do it via the userspace + * HVA instead... but that just *hides* the fact that it's + * an infinite loop, because if a fault occurs and it waits + * for the page to come back, it can *still* immediately + * fault and have to wait again, repeatedly. + * + * Conversely, the page could also have been reinstated by + * another thread before we even obtain the mutex above, so + * check again *first* before remapping it. + */ + do { + struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; + int idx; + + rc = kvm_xen_set_evtchn_fast(e, kvm); + if (rc != -EWOULDBLOCK) + break; + + idx = srcu_read_lock(&kvm->srcu); + rc = kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpc->gpa, + PAGE_SIZE, false); + srcu_read_unlock(&kvm->srcu, idx); + } while(!rc); + + mutex_unlock(&kvm->lock); + + if (mm_borrowed) + kthread_unuse_mm(kvm->mm); + + return rc; +} + +int kvm_xen_setup_evtchn(struct kvm *kvm, + struct kvm_kernel_irq_routing_entry *e, + const struct kvm_irq_routing_entry *ue) + +{ + if (ue->u.xen_evtchn.port >= max_evtchn_port(kvm)) + return -EINVAL; + + /* We only support 2 level event channels for now */ + if (ue->u.xen_evtchn.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) + return -EINVAL; + + e->xen_evtchn.port = ue->u.xen_evtchn.port; + e->xen_evtchn.vcpu = ue->u.xen_evtchn.vcpu; + e->xen_evtchn.priority = ue->u.xen_evtchn.priority; + e->set = evtchn_set_fn; + + return 0; +} diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h index cc0cf5f37450..adbcc9ed59db 100644 --- a/arch/x86/kvm/xen.h +++ b/arch/x86/kvm/xen.h @@ -24,6 +24,12 @@ int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc); void kvm_xen_init_vm(struct kvm *kvm); void kvm_xen_destroy_vm(struct kvm *kvm); +int kvm_xen_set_evtchn_fast(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm); +int kvm_xen_setup_evtchn(struct kvm *kvm, + struct kvm_kernel_irq_routing_entry *e, + const struct kvm_irq_routing_entry *ue); + static inline bool kvm_xen_msr_enabled(struct kvm *kvm) { return static_branch_unlikely(&kvm_xen_enabled.key) && @@ -134,6 +140,9 @@ struct compat_shared_info { struct compat_arch_shared_info arch; }; +#define COMPAT_EVTCHN_2L_NR_CHANNELS (8 * \ + sizeof_field(struct compat_shared_info, \ + evtchn_pending)) struct compat_vcpu_runstate_info { int state; uint64_t state_entry_time; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 9bbb1f1d9e48..3c47b146851a 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -497,6 +497,12 @@ struct kvm_hv_sint { u32 sint; }; +struct kvm_xen_evtchn { + u32 port; + u32 vcpu; + u32 priority; +}; + struct kvm_kernel_irq_routing_entry { u32 gsi; u32 type; @@ -517,6 +523,7 @@ struct kvm_kernel_irq_routing_entry { } msi; struct kvm_s390_adapter_int adapter; struct kvm_hv_sint hv_sint; + struct kvm_xen_evtchn xen_evtchn; }; struct hlist_node link; }; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 469f05d69c8d..fbfd70d965c6 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1163,11 +1163,20 @@ struct kvm_irq_routing_hv_sint { __u32 sint; }; +struct kvm_irq_routing_xen_evtchn { + __u32 port; + __u32 vcpu; + __u32 priority; +}; + +#define KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL ((__u32)(-1)) + /* gsi routing entry types */ #define KVM_IRQ_ROUTING_IRQCHIP 1 #define KVM_IRQ_ROUTING_MSI 2 #define KVM_IRQ_ROUTING_S390_ADAPTER 3 #define KVM_IRQ_ROUTING_HV_SINT 4 +#define KVM_IRQ_ROUTING_XEN_EVTCHN 5 struct kvm_irq_routing_entry { __u32 gsi; @@ -1179,6 +1188,7 @@ struct kvm_irq_routing_entry { struct kvm_irq_routing_msi msi; struct kvm_irq_routing_s390_adapter adapter; struct kvm_irq_routing_hv_sint hv_sint; + struct kvm_irq_routing_xen_evtchn xen_evtchn; __u32 pad[8]; } u; }; @@ -1209,6 +1219,7 @@ struct kvm_x86_mce { #define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL (1 << 1) #define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2) #define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 3) +#define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4) struct kvm_xen_hvm_config { __u32 flags; diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c index a0699f00b3d6..478e0ae8b93e 100644 --- a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c +++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c @@ -14,6 +14,9 @@ #include #include #include +#include + +#include #define VCPU_ID 5 @@ -22,10 +25,15 @@ #define SHINFO_REGION_SLOT 10 #define PAGE_SIZE 4096 +#define DUMMY_REGION_GPA (SHINFO_REGION_GPA + (2 * PAGE_SIZE)) +#define DUMMY_REGION_SLOT 11 + +#define SHINFO_ADDR (SHINFO_REGION_GPA) #define PVTIME_ADDR (SHINFO_REGION_GPA + PAGE_SIZE) #define RUNSTATE_ADDR (SHINFO_REGION_GPA + PAGE_SIZE + 0x20) #define VCPU_INFO_ADDR (SHINFO_REGION_GPA + 0x40) +#define SHINFO_VADDR (SHINFO_REGION_GVA) #define RUNSTATE_VADDR (SHINFO_REGION_GVA + PAGE_SIZE + 0x20) #define VCPU_INFO_VADDR (SHINFO_REGION_GVA + 0x40) @@ -73,15 +81,37 @@ struct vcpu_info { struct pvclock_vcpu_time_info time; }; /* 64 bytes (x86) */ +struct shared_info { + struct vcpu_info vcpu_info[32]; + unsigned long evtchn_pending[64]; + unsigned long evtchn_mask[64]; + struct pvclock_wall_clock wc; + uint32_t wc_sec_hi; + /* arch_shared_info here */ +}; + #define RUNSTATE_running 0 #define RUNSTATE_runnable 1 #define RUNSTATE_blocked 2 #define RUNSTATE_offline 3 +static const char *runstate_names[] = { + "running", + "runnable", + "blocked", + "offline" +}; + +struct { + struct kvm_irq_routing info; + struct kvm_irq_routing_entry entries[2]; +} irq_routes; + static void evtchn_handler(struct ex_regs *regs) { struct vcpu_info *vi = (void *)VCPU_INFO_VADDR; vi->evtchn_upcall_pending = 0; + vi->evtchn_pending_sel = 0; GUEST_SYNC(0x20); } @@ -127,7 +157,25 @@ static void guest_code(void) GUEST_SYNC(6); GUEST_ASSERT(rs->time[RUNSTATE_runnable] >= MIN_STEAL_TIME); - GUEST_DONE(); + /* Attempt to deliver a *masked* interrupt */ + GUEST_SYNC(7); + + /* Wait until we see the bit set */ + struct shared_info *si = (void *)SHINFO_VADDR; + while (!si->evtchn_pending[0]) + __asm__ __volatile__ ("rep nop" : : : "memory"); + + /* Now deliver an *unmasked* interrupt */ + GUEST_SYNC(8); + + while (!si->evtchn_pending[1]) + __asm__ __volatile__ ("rep nop" : : : "memory"); + + /* Change memslots and deliver an interrupt */ + GUEST_SYNC(9); + + for (;;) + __asm__ __volatile__ ("rep nop" : : : "memory"); } static int cmp_timespec(struct timespec *a, struct timespec *b) @@ -144,9 +192,18 @@ static int cmp_timespec(struct timespec *a, struct timespec *b) return 0; } +static void handle_alrm(int sig) +{ + TEST_FAIL("IRQ delivery timed out"); +} + int main(int argc, char *argv[]) { struct timespec min_ts, max_ts, vm_ts; + bool verbose; + + verbose = argc > 1 && (!strncmp(argv[1], "-v", 3) || + !strncmp(argv[1], "--verbose", 10)); int xen_caps = kvm_check_cap(KVM_CAP_XEN_HVM); if (!(xen_caps & KVM_XEN_HVM_CONFIG_SHARED_INFO) ) { @@ -155,6 +212,7 @@ int main(int argc, char *argv[]) } bool do_runstate_tests = !!(xen_caps & KVM_XEN_HVM_CONFIG_RUNSTATE); + bool do_eventfd_tests = !!(xen_caps & KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL); clock_gettime(CLOCK_REALTIME, &min_ts); @@ -166,6 +224,11 @@ int main(int argc, char *argv[]) SHINFO_REGION_GPA, SHINFO_REGION_SLOT, 2, 0); virt_map(vm, SHINFO_REGION_GVA, SHINFO_REGION_GPA, 2); + struct shared_info *shinfo = addr_gpa2hva(vm, SHINFO_VADDR); + + int zero_fd = open("/dev/zero", O_RDONLY); + TEST_ASSERT(zero_fd != -1, "Failed to open /dev/zero"); + struct kvm_xen_hvm_config hvmc = { .flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL, .msr = XEN_HYPERCALL_MSR, @@ -184,6 +247,16 @@ int main(int argc, char *argv[]) }; vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &ha); + /* + * Test what happens when the HVA of the shinfo page is remapped after + * the kernel has a reference to it. But make sure we copy the clock + * info over since that's only set at setup time, and we test it later. + */ + struct pvclock_wall_clock wc_copy = shinfo->wc; + void *m = mmap(shinfo, PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_PRIVATE, zero_fd, 0); + TEST_ASSERT(m == shinfo, "Failed to map /dev/zero over shared info"); + shinfo->wc = wc_copy; + struct kvm_xen_vcpu_attr vi = { .type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO, .u.gpa = VCPU_INFO_ADDR, @@ -214,6 +287,49 @@ int main(int argc, char *argv[]) vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &st); } + int irq_fd[2] = { -1, -1 }; + + if (do_eventfd_tests) { + irq_fd[0] = eventfd(0, 0); + irq_fd[1] = eventfd(0, 0); + + /* Unexpected, but not a KVM failure */ + if (irq_fd[0] == -1 || irq_fd[1] == -1) + do_eventfd_tests = false; + } + + if (do_eventfd_tests) { + irq_routes.info.nr = 2; + + irq_routes.entries[0].gsi = 32; + irq_routes.entries[0].type = KVM_IRQ_ROUTING_XEN_EVTCHN; + irq_routes.entries[0].u.xen_evtchn.port = 15; + irq_routes.entries[0].u.xen_evtchn.vcpu = VCPU_ID; + irq_routes.entries[0].u.xen_evtchn.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL; + + irq_routes.entries[1].gsi = 33; + irq_routes.entries[1].type = KVM_IRQ_ROUTING_XEN_EVTCHN; + irq_routes.entries[1].u.xen_evtchn.port = 66; + irq_routes.entries[1].u.xen_evtchn.vcpu = VCPU_ID; + irq_routes.entries[1].u.xen_evtchn.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL; + + vm_ioctl(vm, KVM_SET_GSI_ROUTING, &irq_routes); + + struct kvm_irqfd ifd = { }; + + ifd.fd = irq_fd[0]; + ifd.gsi = 32; + vm_ioctl(vm, KVM_IRQFD, &ifd); + + ifd.fd = irq_fd[1]; + ifd.gsi = 33; + vm_ioctl(vm, KVM_IRQFD, &ifd); + + struct sigaction sa = { }; + sa.sa_handler = handle_alrm; + sigaction(SIGALRM, &sa, NULL); + } + struct vcpu_info *vinfo = addr_gpa2hva(vm, VCPU_INFO_VADDR); vinfo->evtchn_upcall_pending = 0; @@ -248,6 +364,8 @@ int main(int argc, char *argv[]) switch (uc.args[1]) { case 0: + if (verbose) + printf("Delivering evtchn upcall\n"); evtchn_irq_expected = true; vinfo->evtchn_upcall_pending = 1; break; @@ -256,11 +374,16 @@ int main(int argc, char *argv[]) TEST_ASSERT(!evtchn_irq_expected, "Event channel IRQ not seen"); if (!do_runstate_tests) goto done; + if (verbose) + printf("Testing runstate %s\n", runstate_names[uc.args[1]]); rst.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT; rst.u.runstate.state = uc.args[1]; vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &rst); break; + case 4: + if (verbose) + printf("Testing RUNSTATE_ADJUST\n"); rst.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST; memset(&rst.u, 0, sizeof(rst.u)); rst.u.runstate.state = (uint64_t)-1; @@ -274,6 +397,8 @@ int main(int argc, char *argv[]) break; case 5: + if (verbose) + printf("Testing RUNSTATE_DATA\n"); rst.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA; memset(&rst.u, 0, sizeof(rst.u)); rst.u.runstate.state = RUNSTATE_running; @@ -282,16 +407,54 @@ int main(int argc, char *argv[]) rst.u.runstate.time_offline = 0x5a; vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &rst); break; + case 6: + if (verbose) + printf("Testing steal time\n"); /* Yield until scheduler delay exceeds target */ rundelay = get_run_delay() + MIN_STEAL_TIME; do { sched_yield(); } while (get_run_delay() < rundelay); break; + + case 7: + if (!do_eventfd_tests) + goto done; + if (verbose) + printf("Testing masked event channel\n"); + shinfo->evtchn_mask[0] = 0x8000; + eventfd_write(irq_fd[0], 1UL); + alarm(1); + break; + + case 8: + if (verbose) + printf("Testing unmasked event channel\n"); + /* Unmask that, but deliver the other one */ + shinfo->evtchn_pending[0] = 0; + shinfo->evtchn_mask[0] = 0; + eventfd_write(irq_fd[1], 1UL); + evtchn_irq_expected = true; + alarm(1); + break; + + case 9: + if (verbose) + printf("Testing event channel after memslot change\n"); + vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, + DUMMY_REGION_GPA, DUMMY_REGION_SLOT, 1, 0); + eventfd_write(irq_fd[0], 1UL); + evtchn_irq_expected = true; + alarm(1); + break; + case 0x20: TEST_ASSERT(evtchn_irq_expected, "Unexpected event channel IRQ"); evtchn_irq_expected = false; + if (shinfo->evtchn_pending[1] && + shinfo->evtchn_pending[0]) + goto done; break; } break; @@ -318,6 +481,16 @@ int main(int argc, char *argv[]) ti = addr_gpa2hva(vm, SHINFO_REGION_GPA + 0x40 + 0x20); ti2 = addr_gpa2hva(vm, PVTIME_ADDR); + if (verbose) { + printf("Wall clock (v %d) %d.%09d\n", wc->version, wc->sec, wc->nsec); + printf("Time info 1: v %u tsc %" PRIu64 " time %" PRIu64 " mul %u shift %u flags %x\n", + ti->version, ti->tsc_timestamp, ti->system_time, ti->tsc_to_system_mul, + ti->tsc_shift, ti->flags); + printf("Time info 2: v %u tsc %" PRIu64 " time %" PRIu64 " mul %u shift %u flags %x\n", + ti2->version, ti2->tsc_timestamp, ti2->system_time, ti2->tsc_to_system_mul, + ti2->tsc_shift, ti2->flags); + } + vm_ts.tv_sec = wc->sec; vm_ts.tv_nsec = wc->nsec; TEST_ASSERT(wc->version && !(wc->version & 1), @@ -341,6 +514,15 @@ int main(int argc, char *argv[]) }; vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_GET_ATTR, &rst); + if (verbose) { + printf("Runstate: %s(%d), entry %" PRIu64 " ns\n", + rs->state <= RUNSTATE_offline ? runstate_names[rs->state] : "unknown", + rs->state, rs->state_entry_time); + for (int i = RUNSTATE_running; i <= RUNSTATE_offline; i++) { + printf("State %s: %" PRIu64 " ns\n", + runstate_names[i], rs->time[i]); + } + } TEST_ASSERT(rs->state == rst.u.runstate.state, "Runstate mismatch"); TEST_ASSERT(rs->state_entry_time == rst.u.runstate.state_entry_time, "State entry time mismatch"); From 55749769fe608fa3f4a075e42e89d237c8e37637 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Fri, 10 Dec 2021 16:36:24 +0000 Subject: [PATCH 239/268] KVM: x86: Fix wall clock writes in Xen shared_info not to mark page dirty When dirty ring logging is enabled, any dirty logging without an active vCPU context will cause a kernel oops. But we've already declared that the shared_info page doesn't get dirty tracking anyway, since it would be kind of insane to mark it dirty every time we deliver an event channel interrupt. Userspace is supposed to just assume it's always dirty any time a vCPU can run or event channels are routed. So stop using the generic kvm_write_wall_clock() and just write directly through the gfn_to_pfn_cache that we already have set up. We can make kvm_write_wall_clock() static in x86.c again now, but let's not remove the 'sec_hi_ofs' argument even though it's not used yet. At some point we *will* want to use that for KVM guests too. Fixes: 629b5348841a ("KVM: x86/xen: update wallclock region") Reported-by: butt3rflyh4ck Signed-off-by: David Woodhouse Message-Id: <20211210163625.2886-6-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 2 +- arch/x86/kvm/x86.h | 1 - arch/x86/kvm/xen.c | 64 +++++++++++++++++++++++++++++++++++----------- 3 files changed, 50 insertions(+), 17 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3050601d5d73..6492329f2e9a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2135,7 +2135,7 @@ static s64 get_kvmclock_base_ns(void) } #endif -void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs) +static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs) { int version; int r; diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 4abcd8d9836d..da7031e80f23 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -301,7 +301,6 @@ static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu) return is_smm(vcpu) || static_call(kvm_x86_apic_init_signal_blocked)(vcpu); } -void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs); void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); u64 get_kvmclock_ns(struct kvm *kvm); diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index ceddabd1f5c6..0e3f7d6e9fd7 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -25,8 +25,11 @@ DEFINE_STATIC_KEY_DEFERRED_FALSE(kvm_xen_enabled, HZ); static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn) { struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; + struct pvclock_wall_clock *wc; gpa_t gpa = gfn_to_gpa(gfn); - int wc_ofs, sec_hi_ofs; + u32 *wc_sec_hi; + u32 wc_version; + u64 wall_nsec; int ret = 0; int idx = srcu_read_lock(&kvm->srcu); @@ -35,32 +38,63 @@ static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn) goto out; } - ret = kvm_gfn_to_pfn_cache_init(kvm, gpc, NULL, false, true, gpa, - PAGE_SIZE, false); - if (ret) - goto out; + do { + ret = kvm_gfn_to_pfn_cache_init(kvm, gpc, NULL, false, true, + gpa, PAGE_SIZE, false); + if (ret) + goto out; + + /* + * This code mirrors kvm_write_wall_clock() except that it writes + * directly through the pfn cache and doesn't mark the page dirty. + */ + wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm); + + /* It could be invalid again already, so we need to check */ + read_lock_irq(&gpc->lock); + + if (gpc->valid) + break; + + read_unlock_irq(&gpc->lock); + } while (1); /* Paranoia checks on the 32-bit struct layout */ BUILD_BUG_ON(offsetof(struct compat_shared_info, wc) != 0x900); BUILD_BUG_ON(offsetof(struct compat_shared_info, arch.wc_sec_hi) != 0x924); BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0); - /* 32-bit location by default */ - wc_ofs = offsetof(struct compat_shared_info, wc); - sec_hi_ofs = offsetof(struct compat_shared_info, arch.wc_sec_hi); - #ifdef CONFIG_X86_64 /* Paranoia checks on the 64-bit struct layout */ BUILD_BUG_ON(offsetof(struct shared_info, wc) != 0xc00); BUILD_BUG_ON(offsetof(struct shared_info, wc_sec_hi) != 0xc0c); - if (kvm->arch.xen.long_mode) { - wc_ofs = offsetof(struct shared_info, wc); - sec_hi_ofs = offsetof(struct shared_info, wc_sec_hi); - } -#endif + if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) { + struct shared_info *shinfo = gpc->khva; + + wc_sec_hi = &shinfo->wc_sec_hi; + wc = &shinfo->wc; + } else +#endif + { + struct compat_shared_info *shinfo = gpc->khva; + + wc_sec_hi = &shinfo->arch.wc_sec_hi; + wc = &shinfo->wc; + } + + /* Increment and ensure an odd value */ + wc_version = wc->version = (wc->version + 1) | 1; + smp_wmb(); + + wc->nsec = do_div(wall_nsec, 1000000000); + wc->sec = (u32)wall_nsec; + *wc_sec_hi = wall_nsec >> 32; + smp_wmb(); + + wc->version = wc_version + 1; + read_unlock_irq(&gpc->lock); - kvm_write_wall_clock(kvm, gpa + wc_ofs, sec_hi_ofs - wc_ofs); kvm_make_all_cpus_request(kvm, KVM_REQ_MASTERCLOCK_UPDATE); out: From 907d139318b5109e5b676b32b0f4a2c666a8d9ac Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 13 Dec 2021 11:07:40 +0100 Subject: [PATCH 240/268] KVM: VMX: Provide vmread version using asm-goto-with-outputs Use asm-goto-output for smaller fast path code. Message-Id: Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx_ops.h | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/arch/x86/kvm/vmx/vmx_ops.h b/arch/x86/kvm/vmx/vmx_ops.h index 9e9ef47e988c..67f745250e50 100644 --- a/arch/x86/kvm/vmx/vmx_ops.h +++ b/arch/x86/kvm/vmx/vmx_ops.h @@ -71,6 +71,31 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field) { unsigned long value; +#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT + + asm_volatile_goto("1: vmread %[field], %[output]\n\t" + "jna %l[do_fail]\n\t" + + _ASM_EXTABLE(1b, %l[do_exception]) + + : [output] "=r" (value) + : [field] "r" (field) + : "cc" + : do_fail, do_exception); + + return value; + +do_fail: + WARN_ONCE(1, "kvm: vmread failed: field=%lx\n", field); + pr_warn_ratelimited("kvm: vmread failed: field=%lx\n", field); + return 0; + +do_exception: + kvm_spurious_fault(); + return 0; + +#else /* !CONFIG_CC_HAS_ASM_GOTO_OUTPUT */ + asm volatile("1: vmread %2, %1\n\t" ".byte 0x3e\n\t" /* branch taken hint */ "ja 3f\n\t" @@ -101,6 +126,8 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field) _ASM_EXTABLE(1b, 4b) : ASM_CALL_CONSTRAINT, "=r"(value) : "r"(field) : "cc"); return value; + +#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */ } static __always_inline u16 vmcs_read16(unsigned long field) From 405329fc9aeef1e3e2eccaadf32b539ad6c7120f Mon Sep 17 00:00:00 2001 From: Michael Roth Date: Thu, 16 Dec 2021 11:13:54 -0600 Subject: [PATCH 241/268] KVM: SVM: include CR3 in initial VMSA state for SEV-ES guests Normally guests will set up CR3 themselves, but some guests, such as kselftests, and potentially CONFIG_PVH guests, rely on being booted with paging enabled and CR3 initialized to a pre-allocated page table. Currently CR3 updates via KVM_SET_SREGS* are not loaded into the guest VMCB until just prior to entering the guest. For SEV-ES/SEV-SNP, this is too late, since it will have switched over to using the VMSA page prior to that point, with the VMSA CR3 copied from the VMCB initial CR3 value: 0. Address this by sync'ing the CR3 value into the VMCB save area immediately when KVM_SET_SREGS* is issued so it will find it's way into the initial VMSA. Suggested-by: Tom Lendacky Signed-off-by: Michael Roth Message-Id: <20211216171358.61140-10-michael.roth@amd.com> [Remove vmx_post_set_cr3; add a remark about kvm_set_cr3 not calling the new hook. - Paolo] Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm-x86-ops.h | 1 + arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/svm/svm.c | 19 +++++++++++++++++++ arch/x86/kvm/vmx/vmx.c | 1 + arch/x86/kvm/x86.c | 2 ++ 5 files changed, 24 insertions(+) diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index 9e50da3ed01a..f658bb4dbb74 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -35,6 +35,7 @@ KVM_X86_OP(get_cpl) KVM_X86_OP(set_segment) KVM_X86_OP_NULL(get_cs_db_l_bits) KVM_X86_OP(set_cr0) +KVM_X86_OP_NULL(post_set_cr3) KVM_X86_OP(is_valid_cr4) KVM_X86_OP(set_cr4) KVM_X86_OP(set_efer) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 623fb7c4992c..6cbf97a2ebc4 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1344,6 +1344,7 @@ struct kvm_x86_ops { struct kvm_segment *var, int seg); void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); + void (*post_set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); bool (*is_valid_cr4)(struct kvm_vcpu *vcpu, unsigned long cr0); void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); int (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 5557867dcb69..c3d9006478a4 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1801,6 +1801,24 @@ static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) vmcb_mark_dirty(svm->vmcb, VMCB_DT); } +static void svm_post_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) +{ + struct vcpu_svm *svm = to_svm(vcpu); + + /* + * For guests that don't set guest_state_protected, the cr3 update is + * handled via kvm_mmu_load() while entering the guest. For guests + * that do (SEV-ES/SEV-SNP), the cr3 update needs to be written to + * VMCB save area now, since the save area will become the initial + * contents of the VMSA, and future VMCB save area updates won't be + * seen. + */ + if (sev_es_guest(vcpu->kvm)) { + svm->vmcb->save.cr3 = cr3; + vmcb_mark_dirty(svm->vmcb, VMCB_CR); + } +} + void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { struct vcpu_svm *svm = to_svm(vcpu); @@ -4624,6 +4642,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .get_cpl = svm_get_cpl, .get_cs_db_l_bits = kvm_get_cs_db_l_bits, .set_cr0 = svm_set_cr0, + .post_set_cr3 = svm_post_set_cr3, .is_valid_cr4 = svm_is_valid_cr4, .set_cr4 = svm_set_cr4, .set_efer = svm_set_efer, diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 1d53b8144f83..7b5abe25e1e5 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -3134,6 +3134,7 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, vmcs_writel(GUEST_CR3, guest_cr3); } + static bool vmx_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { /* diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6492329f2e9a..a9f1044dd6b2 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1179,6 +1179,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) vcpu->arch.cr3 = cr3; kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); + /* Do not call post_set_cr3, we do not get here for confidential guests. */ handle_tlb_flush: /* @@ -10618,6 +10619,7 @@ static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs, *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; vcpu->arch.cr3 = sregs->cr3; kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); + static_call_cond(kvm_x86_post_set_cr3)(vcpu, sregs->cr3); kvm_set_cr8(vcpu, sregs->cr8); From 7d9a662ed9f0403e7b94940dceb81552b8edb931 Mon Sep 17 00:00:00 2001 From: Michael Roth Date: Fri, 10 Dec 2021 10:46:11 -0600 Subject: [PATCH 242/268] kvm: selftests: move base kvm_util.h declarations to kvm_util_base.h Between helper macros and interfaces that will be introduced in subsequent patches, much of kvm_util.h would end up being declarations specific to ucall. Ideally these could be separated out into a separate header since they are not strictly required for writing guest tests and are mostly self-contained interfaces other than a reliance on a few core declarations like struct kvm_vm. This doesn't make a big difference as far as how tests will be compiled/written since all these interfaces will still be packaged up into a single/common libkvm.a used by all tests, but it is still nice to be able to compartmentalize to improve readabilty and reduce merge conflicts in the future for common tasks like adding new interfaces to kvm_util.h. Furthermore, some of the ucall declarations will be arch-specific, requiring various #ifdef'ery in kvm_util.h. Ideally these declarations could live in separate arch-specific headers, e.g. include//ucall.h, which would handle arch-specific declarations as well as pulling in common ucall-related declarations shared by all archs. One simple way to do this would be to #include ucall.h at the bottom of kvm_util.h, after declarations it relies upon like struct kvm_vm. This is brittle however, and doesn't scale easily to other sets of interfaces that may be added in the future. Instead, move all declarations currently in kvm_util.h into kvm_util_base.h, then have kvm_util.h #include it. With this change, non-base declarations can be selectively moved/introduced into separate headers, which can then be included in kvm_util.h so that individual tests don't need to be touched. Subsequent patches will then move ucall-related declarations into a separate header to meet the above goals. Signed-off-by: Michael Roth Message-Id: <20211210164620.11636-2-michael.roth@amd.com> Signed-off-by: Paolo Bonzini --- .../testing/selftests/kvm/include/kvm_util.h | 436 +---------------- .../selftests/kvm/include/kvm_util_base.h | 446 ++++++++++++++++++ 2 files changed, 447 insertions(+), 435 deletions(-) create mode 100644 tools/testing/selftests/kvm/include/kvm_util_base.h diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index 4ba3f47a41d2..c860ced3888d 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h @@ -7,440 +7,6 @@ #ifndef SELFTEST_KVM_UTIL_H #define SELFTEST_KVM_UTIL_H -#include "test_util.h" - -#include "asm/kvm.h" -#include "linux/list.h" -#include "linux/kvm.h" -#include - -#include "sparsebit.h" - -#define KVM_DEV_PATH "/dev/kvm" -#define KVM_MAX_VCPUS 512 - -#define NSEC_PER_SEC 1000000000L - -/* - * Callers of kvm_util only have an incomplete/opaque description of the - * structure kvm_util is using to maintain the state of a VM. - */ -struct kvm_vm; - -typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */ -typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */ - -/* Minimum allocated guest virtual and physical addresses */ -#define KVM_UTIL_MIN_VADDR 0x2000 -#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000 - -#define DEFAULT_GUEST_PHY_PAGES 512 -#define DEFAULT_GUEST_STACK_VADDR_MIN 0xab6000 -#define DEFAULT_STACK_PGS 5 - -enum vm_guest_mode { - VM_MODE_P52V48_4K, - VM_MODE_P52V48_64K, - VM_MODE_P48V48_4K, - VM_MODE_P48V48_16K, - VM_MODE_P48V48_64K, - VM_MODE_P40V48_4K, - VM_MODE_P40V48_16K, - VM_MODE_P40V48_64K, - VM_MODE_PXXV48_4K, /* For 48bits VA but ANY bits PA */ - VM_MODE_P47V64_4K, - VM_MODE_P44V64_4K, - VM_MODE_P36V48_4K, - VM_MODE_P36V48_16K, - VM_MODE_P36V48_64K, - VM_MODE_P36V47_16K, - NUM_VM_MODES, -}; - -#if defined(__aarch64__) - -extern enum vm_guest_mode vm_mode_default; - -#define VM_MODE_DEFAULT vm_mode_default -#define MIN_PAGE_SHIFT 12U -#define ptes_per_page(page_size) ((page_size) / 8) - -#elif defined(__x86_64__) - -#define VM_MODE_DEFAULT VM_MODE_PXXV48_4K -#define MIN_PAGE_SHIFT 12U -#define ptes_per_page(page_size) ((page_size) / 8) - -#elif defined(__s390x__) - -#define VM_MODE_DEFAULT VM_MODE_P44V64_4K -#define MIN_PAGE_SHIFT 12U -#define ptes_per_page(page_size) ((page_size) / 16) - -#elif defined(__riscv) - -#if __riscv_xlen == 32 -#error "RISC-V 32-bit kvm selftests not supported" -#endif - -#define VM_MODE_DEFAULT VM_MODE_P40V48_4K -#define MIN_PAGE_SHIFT 12U -#define ptes_per_page(page_size) ((page_size) / 8) - -#endif - -#define MIN_PAGE_SIZE (1U << MIN_PAGE_SHIFT) -#define PTES_PER_MIN_PAGE ptes_per_page(MIN_PAGE_SIZE) - -struct vm_guest_mode_params { - unsigned int pa_bits; - unsigned int va_bits; - unsigned int page_size; - unsigned int page_shift; -}; -extern const struct vm_guest_mode_params vm_guest_mode_params[]; - -int open_path_or_exit(const char *path, int flags); -int open_kvm_dev_path_or_exit(void); -int kvm_check_cap(long cap); -int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap); -int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id, - struct kvm_enable_cap *cap); -void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size); -const char *vm_guest_mode_string(uint32_t i); - -struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm); -void kvm_vm_free(struct kvm_vm *vmp); -void kvm_vm_restart(struct kvm_vm *vmp, int perm); -void kvm_vm_release(struct kvm_vm *vmp); -void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log); -void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log, - uint64_t first_page, uint32_t num_pages); -uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm); - -int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva, - size_t len); - -void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename); - -void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent); - -/* - * VM VCPU Dump - * - * Input Args: - * stream - Output FILE stream - * vm - Virtual Machine - * vcpuid - VCPU ID - * indent - Left margin indent amount - * - * Output Args: None - * - * Return: None - * - * Dumps the current state of the VCPU specified by @vcpuid, within the VM - * given by @vm, to the FILE stream given by @stream. - */ -void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, - uint8_t indent); - -void vm_create_irqchip(struct kvm_vm *vm); - -void vm_userspace_mem_region_add(struct kvm_vm *vm, - enum vm_mem_backing_src_type src_type, - uint64_t guest_paddr, uint32_t slot, uint64_t npages, - uint32_t flags); - -void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long ioctl, - void *arg); -int _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long ioctl, - void *arg); -void vm_ioctl(struct kvm_vm *vm, unsigned long ioctl, void *arg); -int _vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg); -void kvm_ioctl(struct kvm_vm *vm, unsigned long ioctl, void *arg); -int _kvm_ioctl(struct kvm_vm *vm, unsigned long ioctl, void *arg); -void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags); -void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa); -void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot); -void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid); -vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min); -vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages); -vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm); - -void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, - unsigned int npages); -void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa); -void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva); -vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva); -void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa); - -/* - * Address Guest Virtual to Guest Physical - * - * Input Args: - * vm - Virtual Machine - * gva - VM virtual address - * - * Output Args: None - * - * Return: - * Equivalent VM physical address - * - * Returns the VM physical address of the translated VM virtual - * address given by @gva. - */ -vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva); - -struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid); -void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid); -int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid); -int vcpu_get_fd(struct kvm_vm *vm, uint32_t vcpuid); -void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid); -void vcpu_set_guest_debug(struct kvm_vm *vm, uint32_t vcpuid, - struct kvm_guest_debug *debug); -void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid, - struct kvm_mp_state *mp_state); -struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vm *vm, uint32_t vcpuid); -void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs); -void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs); - -/* - * VM VCPU Args Set - * - * Input Args: - * vm - Virtual Machine - * vcpuid - VCPU ID - * num - number of arguments - * ... - arguments, each of type uint64_t - * - * Output Args: None - * - * Return: None - * - * Sets the first @num function input registers of the VCPU with @vcpuid, - * per the C calling convention of the architecture, to the values given - * as variable args. Each of the variable args is expected to be of type - * uint64_t. The maximum @num can be is specific to the architecture. - */ -void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...); - -void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid, - struct kvm_sregs *sregs); -void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, - struct kvm_sregs *sregs); -int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, - struct kvm_sregs *sregs); -void vcpu_fpu_get(struct kvm_vm *vm, uint32_t vcpuid, - struct kvm_fpu *fpu); -void vcpu_fpu_set(struct kvm_vm *vm, uint32_t vcpuid, - struct kvm_fpu *fpu); -void vcpu_get_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg); -void vcpu_set_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg); -#ifdef __KVM_HAVE_VCPU_EVENTS -void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid, - struct kvm_vcpu_events *events); -void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid, - struct kvm_vcpu_events *events); -#endif -#ifdef __x86_64__ -void vcpu_nested_state_get(struct kvm_vm *vm, uint32_t vcpuid, - struct kvm_nested_state *state); -int vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid, - struct kvm_nested_state *state, bool ignore_error); -#endif -void *vcpu_map_dirty_ring(struct kvm_vm *vm, uint32_t vcpuid); - -int _kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr); -int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr); -int _kvm_create_device(struct kvm_vm *vm, uint64_t type, bool test, int *fd); -int kvm_create_device(struct kvm_vm *vm, uint64_t type, bool test); -int _kvm_device_access(int dev_fd, uint32_t group, uint64_t attr, - void *val, bool write); -int kvm_device_access(int dev_fd, uint32_t group, uint64_t attr, - void *val, bool write); -void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level); -int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level); - -int _vcpu_has_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, - uint64_t attr); -int vcpu_has_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, - uint64_t attr); -int _vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, - uint64_t attr, void *val, bool write); -int vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, - uint64_t attr, void *val, bool write); - -#define KVM_MAX_IRQ_ROUTES 4096 - -struct kvm_irq_routing *kvm_gsi_routing_create(void); -void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing, - uint32_t gsi, uint32_t pin); -int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing); -void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing); - -const char *exit_reason_str(unsigned int exit_reason); - -void virt_pgd_alloc(struct kvm_vm *vm); - -/* - * VM Virtual Page Map - * - * Input Args: - * vm - Virtual Machine - * vaddr - VM Virtual Address - * paddr - VM Physical Address - * memslot - Memory region slot for new virtual translation tables - * - * Output Args: None - * - * Return: None - * - * Within @vm, creates a virtual translation for the page starting - * at @vaddr to the page starting at @paddr. - */ -void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr); - -vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, - uint32_t memslot); -vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, - vm_paddr_t paddr_min, uint32_t memslot); -vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm); - -/* - * Create a VM with reasonable defaults - * - * Input Args: - * vcpuid - The id of the single VCPU to add to the VM. - * extra_mem_pages - The number of extra pages to add (this will - * decide how much extra space we will need to - * setup the page tables using memslot 0) - * guest_code - The vCPU's entry point - * - * Output Args: None - * - * Return: - * Pointer to opaque structure that describes the created VM. - */ -struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages, - void *guest_code); - -/* Same as vm_create_default, but can be used for more than one vcpu */ -struct kvm_vm *vm_create_default_with_vcpus(uint32_t nr_vcpus, uint64_t extra_mem_pages, - uint32_t num_percpu_pages, void *guest_code, - uint32_t vcpuids[]); - -/* Like vm_create_default_with_vcpus, but accepts mode and slot0 memory as a parameter */ -struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus, - uint64_t slot0_mem_pages, uint64_t extra_mem_pages, - uint32_t num_percpu_pages, void *guest_code, - uint32_t vcpuids[]); - -/* - * Adds a vCPU with reasonable defaults (e.g. a stack) - * - * Input Args: - * vm - Virtual Machine - * vcpuid - The id of the VCPU to add to the VM. - * guest_code - The vCPU's entry point - */ -void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code); - -bool vm_is_unrestricted_guest(struct kvm_vm *vm); - -unsigned int vm_get_page_size(struct kvm_vm *vm); -unsigned int vm_get_page_shift(struct kvm_vm *vm); -unsigned long vm_compute_max_gfn(struct kvm_vm *vm); -uint64_t vm_get_max_gfn(struct kvm_vm *vm); -int vm_get_fd(struct kvm_vm *vm); - -unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size); -unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages); -unsigned int vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages); -static inline unsigned int -vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages) -{ - unsigned int n; - n = vm_num_guest_pages(mode, vm_num_host_pages(mode, num_guest_pages)); -#ifdef __s390x__ - /* s390 requires 1M aligned guest sizes */ - n = (n + 255) & ~255; -#endif - return n; -} - -struct kvm_userspace_memory_region * -kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start, - uint64_t end); - -struct kvm_dirty_log * -allocate_kvm_dirty_log(struct kvm_userspace_memory_region *region); - -int vm_create_device(struct kvm_vm *vm, struct kvm_create_device *cd); - -#define sync_global_to_guest(vm, g) ({ \ - typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ - memcpy(_p, &(g), sizeof(g)); \ -}) - -#define sync_global_from_guest(vm, g) ({ \ - typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ - memcpy(&(g), _p, sizeof(g)); \ -}) - -void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid); - -/* Common ucalls */ -enum { - UCALL_NONE, - UCALL_SYNC, - UCALL_ABORT, - UCALL_DONE, - UCALL_UNHANDLED, -}; - -#define UCALL_MAX_ARGS 6 - -struct ucall { - uint64_t cmd; - uint64_t args[UCALL_MAX_ARGS]; -}; - -void ucall_init(struct kvm_vm *vm, void *arg); -void ucall_uninit(struct kvm_vm *vm); -void ucall(uint64_t cmd, int nargs, ...); -uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc); - -#define GUEST_SYNC_ARGS(stage, arg1, arg2, arg3, arg4) \ - ucall(UCALL_SYNC, 6, "hello", stage, arg1, arg2, arg3, arg4) -#define GUEST_SYNC(stage) ucall(UCALL_SYNC, 2, "hello", stage) -#define GUEST_DONE() ucall(UCALL_DONE, 0) -#define __GUEST_ASSERT(_condition, _condstr, _nargs, _args...) do { \ - if (!(_condition)) \ - ucall(UCALL_ABORT, 2 + _nargs, \ - "Failed guest assert: " \ - _condstr, __LINE__, _args); \ -} while (0) - -#define GUEST_ASSERT(_condition) \ - __GUEST_ASSERT(_condition, #_condition, 0, 0) - -#define GUEST_ASSERT_1(_condition, arg1) \ - __GUEST_ASSERT(_condition, #_condition, 1, (arg1)) - -#define GUEST_ASSERT_2(_condition, arg1, arg2) \ - __GUEST_ASSERT(_condition, #_condition, 2, (arg1), (arg2)) - -#define GUEST_ASSERT_3(_condition, arg1, arg2, arg3) \ - __GUEST_ASSERT(_condition, #_condition, 3, (arg1), (arg2), (arg3)) - -#define GUEST_ASSERT_4(_condition, arg1, arg2, arg3, arg4) \ - __GUEST_ASSERT(_condition, #_condition, 4, (arg1), (arg2), (arg3), (arg4)) - -#define GUEST_ASSERT_EQ(a, b) __GUEST_ASSERT((a) == (b), #a " == " #b, 2, a, b) - -int vm_get_stats_fd(struct kvm_vm *vm); -int vcpu_get_stats_fd(struct kvm_vm *vm, uint32_t vcpuid); - -uint32_t guest_get_vcpuid(void); +#include "kvm_util_base.h" #endif /* SELFTEST_KVM_UTIL_H */ diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h new file mode 100644 index 000000000000..749da2c4bab0 --- /dev/null +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h @@ -0,0 +1,446 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * tools/testing/selftests/kvm/include/kvm_util_base.h + * + * Copyright (C) 2018, Google LLC. + */ +#ifndef SELFTEST_KVM_UTIL_BASE_H +#define SELFTEST_KVM_UTIL_BASE_H + +#include "test_util.h" + +#include "asm/kvm.h" +#include "linux/list.h" +#include "linux/kvm.h" +#include + +#include "sparsebit.h" + +#define KVM_DEV_PATH "/dev/kvm" +#define KVM_MAX_VCPUS 512 + +#define NSEC_PER_SEC 1000000000L + +/* + * Callers of kvm_util only have an incomplete/opaque description of the + * structure kvm_util is using to maintain the state of a VM. + */ +struct kvm_vm; + +typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */ +typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */ + +/* Minimum allocated guest virtual and physical addresses */ +#define KVM_UTIL_MIN_VADDR 0x2000 +#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000 + +#define DEFAULT_GUEST_PHY_PAGES 512 +#define DEFAULT_GUEST_STACK_VADDR_MIN 0xab6000 +#define DEFAULT_STACK_PGS 5 + +enum vm_guest_mode { + VM_MODE_P52V48_4K, + VM_MODE_P52V48_64K, + VM_MODE_P48V48_4K, + VM_MODE_P48V48_16K, + VM_MODE_P48V48_64K, + VM_MODE_P40V48_4K, + VM_MODE_P40V48_16K, + VM_MODE_P40V48_64K, + VM_MODE_PXXV48_4K, /* For 48bits VA but ANY bits PA */ + VM_MODE_P47V64_4K, + VM_MODE_P44V64_4K, + VM_MODE_P36V48_4K, + VM_MODE_P36V48_16K, + VM_MODE_P36V48_64K, + VM_MODE_P36V47_16K, + NUM_VM_MODES, +}; + +#if defined(__aarch64__) + +extern enum vm_guest_mode vm_mode_default; + +#define VM_MODE_DEFAULT vm_mode_default +#define MIN_PAGE_SHIFT 12U +#define ptes_per_page(page_size) ((page_size) / 8) + +#elif defined(__x86_64__) + +#define VM_MODE_DEFAULT VM_MODE_PXXV48_4K +#define MIN_PAGE_SHIFT 12U +#define ptes_per_page(page_size) ((page_size) / 8) + +#elif defined(__s390x__) + +#define VM_MODE_DEFAULT VM_MODE_P44V64_4K +#define MIN_PAGE_SHIFT 12U +#define ptes_per_page(page_size) ((page_size) / 16) + +#elif defined(__riscv) + +#if __riscv_xlen == 32 +#error "RISC-V 32-bit kvm selftests not supported" +#endif + +#define VM_MODE_DEFAULT VM_MODE_P40V48_4K +#define MIN_PAGE_SHIFT 12U +#define ptes_per_page(page_size) ((page_size) / 8) + +#endif + +#define MIN_PAGE_SIZE (1U << MIN_PAGE_SHIFT) +#define PTES_PER_MIN_PAGE ptes_per_page(MIN_PAGE_SIZE) + +struct vm_guest_mode_params { + unsigned int pa_bits; + unsigned int va_bits; + unsigned int page_size; + unsigned int page_shift; +}; +extern const struct vm_guest_mode_params vm_guest_mode_params[]; + +int open_path_or_exit(const char *path, int flags); +int open_kvm_dev_path_or_exit(void); +int kvm_check_cap(long cap); +int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap); +int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id, + struct kvm_enable_cap *cap); +void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size); +const char *vm_guest_mode_string(uint32_t i); + +struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm); +void kvm_vm_free(struct kvm_vm *vmp); +void kvm_vm_restart(struct kvm_vm *vmp, int perm); +void kvm_vm_release(struct kvm_vm *vmp); +void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log); +void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log, + uint64_t first_page, uint32_t num_pages); +uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm); + +int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva, + size_t len); + +void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename); + +void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent); + +/* + * VM VCPU Dump + * + * Input Args: + * stream - Output FILE stream + * vm - Virtual Machine + * vcpuid - VCPU ID + * indent - Left margin indent amount + * + * Output Args: None + * + * Return: None + * + * Dumps the current state of the VCPU specified by @vcpuid, within the VM + * given by @vm, to the FILE stream given by @stream. + */ +void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, + uint8_t indent); + +void vm_create_irqchip(struct kvm_vm *vm); + +void vm_userspace_mem_region_add(struct kvm_vm *vm, + enum vm_mem_backing_src_type src_type, + uint64_t guest_paddr, uint32_t slot, uint64_t npages, + uint32_t flags); + +void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long ioctl, + void *arg); +int _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long ioctl, + void *arg); +void vm_ioctl(struct kvm_vm *vm, unsigned long ioctl, void *arg); +int _vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg); +void kvm_ioctl(struct kvm_vm *vm, unsigned long ioctl, void *arg); +int _kvm_ioctl(struct kvm_vm *vm, unsigned long ioctl, void *arg); +void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags); +void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa); +void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot); +void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid); +vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min); +vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages); +vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm); + +void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, + unsigned int npages); +void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa); +void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva); +vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva); +void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa); + +/* + * Address Guest Virtual to Guest Physical + * + * Input Args: + * vm - Virtual Machine + * gva - VM virtual address + * + * Output Args: None + * + * Return: + * Equivalent VM physical address + * + * Returns the VM physical address of the translated VM virtual + * address given by @gva. + */ +vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva); + +struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid); +void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid); +int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid); +int vcpu_get_fd(struct kvm_vm *vm, uint32_t vcpuid); +void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid); +void vcpu_set_guest_debug(struct kvm_vm *vm, uint32_t vcpuid, + struct kvm_guest_debug *debug); +void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid, + struct kvm_mp_state *mp_state); +struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vm *vm, uint32_t vcpuid); +void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs); +void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs); + +/* + * VM VCPU Args Set + * + * Input Args: + * vm - Virtual Machine + * vcpuid - VCPU ID + * num - number of arguments + * ... - arguments, each of type uint64_t + * + * Output Args: None + * + * Return: None + * + * Sets the first @num function input registers of the VCPU with @vcpuid, + * per the C calling convention of the architecture, to the values given + * as variable args. Each of the variable args is expected to be of type + * uint64_t. The maximum @num can be is specific to the architecture. + */ +void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...); + +void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid, + struct kvm_sregs *sregs); +void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, + struct kvm_sregs *sregs); +int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, + struct kvm_sregs *sregs); +void vcpu_fpu_get(struct kvm_vm *vm, uint32_t vcpuid, + struct kvm_fpu *fpu); +void vcpu_fpu_set(struct kvm_vm *vm, uint32_t vcpuid, + struct kvm_fpu *fpu); +void vcpu_get_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg); +void vcpu_set_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg); +#ifdef __KVM_HAVE_VCPU_EVENTS +void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid, + struct kvm_vcpu_events *events); +void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid, + struct kvm_vcpu_events *events); +#endif +#ifdef __x86_64__ +void vcpu_nested_state_get(struct kvm_vm *vm, uint32_t vcpuid, + struct kvm_nested_state *state); +int vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid, + struct kvm_nested_state *state, bool ignore_error); +#endif +void *vcpu_map_dirty_ring(struct kvm_vm *vm, uint32_t vcpuid); + +int _kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr); +int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr); +int _kvm_create_device(struct kvm_vm *vm, uint64_t type, bool test, int *fd); +int kvm_create_device(struct kvm_vm *vm, uint64_t type, bool test); +int _kvm_device_access(int dev_fd, uint32_t group, uint64_t attr, + void *val, bool write); +int kvm_device_access(int dev_fd, uint32_t group, uint64_t attr, + void *val, bool write); +void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level); +int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level); + +int _vcpu_has_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, + uint64_t attr); +int vcpu_has_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, + uint64_t attr); +int _vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, + uint64_t attr, void *val, bool write); +int vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, + uint64_t attr, void *val, bool write); + +#define KVM_MAX_IRQ_ROUTES 4096 + +struct kvm_irq_routing *kvm_gsi_routing_create(void); +void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing, + uint32_t gsi, uint32_t pin); +int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing); +void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing); + +const char *exit_reason_str(unsigned int exit_reason); + +void virt_pgd_alloc(struct kvm_vm *vm); + +/* + * VM Virtual Page Map + * + * Input Args: + * vm - Virtual Machine + * vaddr - VM Virtual Address + * paddr - VM Physical Address + * memslot - Memory region slot for new virtual translation tables + * + * Output Args: None + * + * Return: None + * + * Within @vm, creates a virtual translation for the page starting + * at @vaddr to the page starting at @paddr. + */ +void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr); + +vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, + uint32_t memslot); +vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, + vm_paddr_t paddr_min, uint32_t memslot); +vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm); + +/* + * Create a VM with reasonable defaults + * + * Input Args: + * vcpuid - The id of the single VCPU to add to the VM. + * extra_mem_pages - The number of extra pages to add (this will + * decide how much extra space we will need to + * setup the page tables using memslot 0) + * guest_code - The vCPU's entry point + * + * Output Args: None + * + * Return: + * Pointer to opaque structure that describes the created VM. + */ +struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages, + void *guest_code); + +/* Same as vm_create_default, but can be used for more than one vcpu */ +struct kvm_vm *vm_create_default_with_vcpus(uint32_t nr_vcpus, uint64_t extra_mem_pages, + uint32_t num_percpu_pages, void *guest_code, + uint32_t vcpuids[]); + +/* Like vm_create_default_with_vcpus, but accepts mode and slot0 memory as a parameter */ +struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus, + uint64_t slot0_mem_pages, uint64_t extra_mem_pages, + uint32_t num_percpu_pages, void *guest_code, + uint32_t vcpuids[]); + +/* + * Adds a vCPU with reasonable defaults (e.g. a stack) + * + * Input Args: + * vm - Virtual Machine + * vcpuid - The id of the VCPU to add to the VM. + * guest_code - The vCPU's entry point + */ +void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code); + +bool vm_is_unrestricted_guest(struct kvm_vm *vm); + +unsigned int vm_get_page_size(struct kvm_vm *vm); +unsigned int vm_get_page_shift(struct kvm_vm *vm); +unsigned long vm_compute_max_gfn(struct kvm_vm *vm); +uint64_t vm_get_max_gfn(struct kvm_vm *vm); +int vm_get_fd(struct kvm_vm *vm); + +unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size); +unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages); +unsigned int vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages); +static inline unsigned int +vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages) +{ + unsigned int n; + n = vm_num_guest_pages(mode, vm_num_host_pages(mode, num_guest_pages)); +#ifdef __s390x__ + /* s390 requires 1M aligned guest sizes */ + n = (n + 255) & ~255; +#endif + return n; +} + +struct kvm_userspace_memory_region * +kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start, + uint64_t end); + +struct kvm_dirty_log * +allocate_kvm_dirty_log(struct kvm_userspace_memory_region *region); + +int vm_create_device(struct kvm_vm *vm, struct kvm_create_device *cd); + +#define sync_global_to_guest(vm, g) ({ \ + typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ + memcpy(_p, &(g), sizeof(g)); \ +}) + +#define sync_global_from_guest(vm, g) ({ \ + typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \ + memcpy(&(g), _p, sizeof(g)); \ +}) + +void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid); + +/* Common ucalls */ +enum { + UCALL_NONE, + UCALL_SYNC, + UCALL_ABORT, + UCALL_DONE, + UCALL_UNHANDLED, +}; + +#define UCALL_MAX_ARGS 6 + +struct ucall { + uint64_t cmd; + uint64_t args[UCALL_MAX_ARGS]; +}; + +void ucall_init(struct kvm_vm *vm, void *arg); +void ucall_uninit(struct kvm_vm *vm); +void ucall(uint64_t cmd, int nargs, ...); +uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc); + +#define GUEST_SYNC_ARGS(stage, arg1, arg2, arg3, arg4) \ + ucall(UCALL_SYNC, 6, "hello", stage, arg1, arg2, arg3, arg4) +#define GUEST_SYNC(stage) ucall(UCALL_SYNC, 2, "hello", stage) +#define GUEST_DONE() ucall(UCALL_DONE, 0) +#define __GUEST_ASSERT(_condition, _condstr, _nargs, _args...) do { \ + if (!(_condition)) \ + ucall(UCALL_ABORT, 2 + _nargs, \ + "Failed guest assert: " \ + _condstr, __LINE__, _args); \ +} while (0) + +#define GUEST_ASSERT(_condition) \ + __GUEST_ASSERT(_condition, #_condition, 0, 0) + +#define GUEST_ASSERT_1(_condition, arg1) \ + __GUEST_ASSERT(_condition, #_condition, 1, (arg1)) + +#define GUEST_ASSERT_2(_condition, arg1, arg2) \ + __GUEST_ASSERT(_condition, #_condition, 2, (arg1), (arg2)) + +#define GUEST_ASSERT_3(_condition, arg1, arg2, arg3) \ + __GUEST_ASSERT(_condition, #_condition, 3, (arg1), (arg2), (arg3)) + +#define GUEST_ASSERT_4(_condition, arg1, arg2, arg3, arg4) \ + __GUEST_ASSERT(_condition, #_condition, 4, (arg1), (arg2), (arg3), (arg4)) + +#define GUEST_ASSERT_EQ(a, b) __GUEST_ASSERT((a) == (b), #a " == " #b, 2, a, b) + +int vm_get_stats_fd(struct kvm_vm *vm); +int vcpu_get_stats_fd(struct kvm_vm *vm, uint32_t vcpuid); + +uint32_t guest_get_vcpuid(void); + +#endif /* SELFTEST_KVM_UTIL_BASE_H */ From 96c1a6285568d31a8125c36974a140fccbe548af Mon Sep 17 00:00:00 2001 From: Michael Roth Date: Fri, 10 Dec 2021 10:46:12 -0600 Subject: [PATCH 243/268] kvm: selftests: move ucall declarations into ucall_common.h Now that core kvm_util declarations have special home in kvm_util_base.h, move ucall-related declarations out into a separate header. Signed-off-by: Michael Roth Message-Id: <20211210164620.11636-3-michael.roth@amd.com> Signed-off-by: Paolo Bonzini --- .../testing/selftests/kvm/include/kvm_util.h | 1 + .../selftests/kvm/include/kvm_util_base.h | 49 --------------- .../selftests/kvm/include/ucall_common.h | 59 +++++++++++++++++++ 3 files changed, 60 insertions(+), 49 deletions(-) create mode 100644 tools/testing/selftests/kvm/include/ucall_common.h diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index c860ced3888d..c9286811a4cb 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h @@ -8,5 +8,6 @@ #define SELFTEST_KVM_UTIL_H #include "kvm_util_base.h" +#include "ucall_common.h" #endif /* SELFTEST_KVM_UTIL_H */ diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h index 749da2c4bab0..1e5ab6a92848 100644 --- a/tools/testing/selftests/kvm/include/kvm_util_base.h +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h @@ -389,55 +389,6 @@ int vm_create_device(struct kvm_vm *vm, struct kvm_create_device *cd); void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid); -/* Common ucalls */ -enum { - UCALL_NONE, - UCALL_SYNC, - UCALL_ABORT, - UCALL_DONE, - UCALL_UNHANDLED, -}; - -#define UCALL_MAX_ARGS 6 - -struct ucall { - uint64_t cmd; - uint64_t args[UCALL_MAX_ARGS]; -}; - -void ucall_init(struct kvm_vm *vm, void *arg); -void ucall_uninit(struct kvm_vm *vm); -void ucall(uint64_t cmd, int nargs, ...); -uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc); - -#define GUEST_SYNC_ARGS(stage, arg1, arg2, arg3, arg4) \ - ucall(UCALL_SYNC, 6, "hello", stage, arg1, arg2, arg3, arg4) -#define GUEST_SYNC(stage) ucall(UCALL_SYNC, 2, "hello", stage) -#define GUEST_DONE() ucall(UCALL_DONE, 0) -#define __GUEST_ASSERT(_condition, _condstr, _nargs, _args...) do { \ - if (!(_condition)) \ - ucall(UCALL_ABORT, 2 + _nargs, \ - "Failed guest assert: " \ - _condstr, __LINE__, _args); \ -} while (0) - -#define GUEST_ASSERT(_condition) \ - __GUEST_ASSERT(_condition, #_condition, 0, 0) - -#define GUEST_ASSERT_1(_condition, arg1) \ - __GUEST_ASSERT(_condition, #_condition, 1, (arg1)) - -#define GUEST_ASSERT_2(_condition, arg1, arg2) \ - __GUEST_ASSERT(_condition, #_condition, 2, (arg1), (arg2)) - -#define GUEST_ASSERT_3(_condition, arg1, arg2, arg3) \ - __GUEST_ASSERT(_condition, #_condition, 3, (arg1), (arg2), (arg3)) - -#define GUEST_ASSERT_4(_condition, arg1, arg2, arg3, arg4) \ - __GUEST_ASSERT(_condition, #_condition, 4, (arg1), (arg2), (arg3), (arg4)) - -#define GUEST_ASSERT_EQ(a, b) __GUEST_ASSERT((a) == (b), #a " == " #b, 2, a, b) - int vm_get_stats_fd(struct kvm_vm *vm); int vcpu_get_stats_fd(struct kvm_vm *vm, uint32_t vcpuid); diff --git a/tools/testing/selftests/kvm/include/ucall_common.h b/tools/testing/selftests/kvm/include/ucall_common.h new file mode 100644 index 000000000000..9eecc9d40b79 --- /dev/null +++ b/tools/testing/selftests/kvm/include/ucall_common.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * tools/testing/selftests/kvm/include/kvm_util.h + * + * Copyright (C) 2018, Google LLC. + */ +#ifndef SELFTEST_KVM_UCALL_COMMON_H +#define SELFTEST_KVM_UCALL_COMMON_H + +/* Common ucalls */ +enum { + UCALL_NONE, + UCALL_SYNC, + UCALL_ABORT, + UCALL_DONE, + UCALL_UNHANDLED, +}; + +#define UCALL_MAX_ARGS 6 + +struct ucall { + uint64_t cmd; + uint64_t args[UCALL_MAX_ARGS]; +}; + +void ucall_init(struct kvm_vm *vm, void *arg); +void ucall_uninit(struct kvm_vm *vm); +void ucall(uint64_t cmd, int nargs, ...); +uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc); + +#define GUEST_SYNC_ARGS(stage, arg1, arg2, arg3, arg4) \ + ucall(UCALL_SYNC, 6, "hello", stage, arg1, arg2, arg3, arg4) +#define GUEST_SYNC(stage) ucall(UCALL_SYNC, 2, "hello", stage) +#define GUEST_DONE() ucall(UCALL_DONE, 0) +#define __GUEST_ASSERT(_condition, _condstr, _nargs, _args...) do { \ + if (!(_condition)) \ + ucall(UCALL_ABORT, 2 + _nargs, \ + "Failed guest assert: " \ + _condstr, __LINE__, _args); \ +} while (0) + +#define GUEST_ASSERT(_condition) \ + __GUEST_ASSERT(_condition, #_condition, 0, 0) + +#define GUEST_ASSERT_1(_condition, arg1) \ + __GUEST_ASSERT(_condition, #_condition, 1, (arg1)) + +#define GUEST_ASSERT_2(_condition, arg1, arg2) \ + __GUEST_ASSERT(_condition, #_condition, 2, (arg1), (arg2)) + +#define GUEST_ASSERT_3(_condition, arg1, arg2, arg3) \ + __GUEST_ASSERT(_condition, #_condition, 3, (arg1), (arg2), (arg3)) + +#define GUEST_ASSERT_4(_condition, arg1, arg2, arg3, arg4) \ + __GUEST_ASSERT(_condition, #_condition, 4, (arg1), (arg2), (arg3), (arg4)) + +#define GUEST_ASSERT_EQ(a, b) __GUEST_ASSERT((a) == (b), #a " == " #b, 2, a, b) + +#endif /* SELFTEST_KVM_UCALL_COMMON_H */ From 980fe2fddcff21937c93532b4597c8ea450346c1 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 5 Jan 2022 04:35:12 -0800 Subject: [PATCH 244/268] x86/fpu: Extend fpu_xstate_prctl() with guest permissions KVM requires a clear separation of host user space and guest permissions for dynamic XSTATE components. Add a guest permissions member to struct fpu and a separate set of prctl() arguments: ARCH_GET_XCOMP_GUEST_PERM and ARCH_REQ_XCOMP_GUEST_PERM. The semantics are equivalent to the host user space permission control except for the following constraints: 1) Permissions have to be requested before the first vCPU is created 2) Permissions are frozen when the first vCPU is created to ensure consistency. Any attempt to expand permissions via the prctl() after that point is rejected. Signed-off-by: Thomas Gleixner Signed-off-by: Jing Liu Signed-off-by: Yang Zhong Message-Id: <20220105123532.12586-2-yang.zhong@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/fpu/api.h | 2 ++ arch/x86/include/asm/fpu/types.h | 9 ++++++ arch/x86/include/uapi/asm/prctl.h | 26 ++++++++------- arch/x86/kernel/fpu/core.c | 3 ++ arch/x86/kernel/fpu/xstate.c | 53 +++++++++++++++++++++++-------- arch/x86/kernel/fpu/xstate.h | 13 ++++++-- arch/x86/kernel/process.c | 2 ++ 7 files changed, 80 insertions(+), 28 deletions(-) diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h index c2767a6a387e..d8c222290e68 100644 --- a/arch/x86/include/asm/fpu/api.h +++ b/arch/x86/include/asm/fpu/api.h @@ -132,6 +132,8 @@ static inline void fpstate_free(struct fpu *fpu) { } /* fpstate-related functions which are exported to KVM */ extern void fpstate_clear_xstate_component(struct fpstate *fps, unsigned int xfeature); +extern inline u64 xstate_get_guest_group_perm(void); + /* KVM specific functions */ extern bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu); extern void fpu_free_guest_fpstate(struct fpu_guest *gfpu); diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h index 3c06c82ab355..6ddf80637697 100644 --- a/arch/x86/include/asm/fpu/types.h +++ b/arch/x86/include/asm/fpu/types.h @@ -387,6 +387,8 @@ struct fpstate { /* @regs is dynamically sized! Don't add anything after @regs! */ } __aligned(64); +#define FPU_GUEST_PERM_LOCKED BIT_ULL(63) + struct fpu_state_perm { /* * @__state_perm: @@ -476,6 +478,13 @@ struct fpu { */ struct fpu_state_perm perm; + /* + * @guest_perm: + * + * Permission related information for guest pseudo FPUs + */ + struct fpu_state_perm guest_perm; + /* * @__fpstate: * diff --git a/arch/x86/include/uapi/asm/prctl.h b/arch/x86/include/uapi/asm/prctl.h index 754a07856817..500b96e71f18 100644 --- a/arch/x86/include/uapi/asm/prctl.h +++ b/arch/x86/include/uapi/asm/prctl.h @@ -2,20 +2,22 @@ #ifndef _ASM_X86_PRCTL_H #define _ASM_X86_PRCTL_H -#define ARCH_SET_GS 0x1001 -#define ARCH_SET_FS 0x1002 -#define ARCH_GET_FS 0x1003 -#define ARCH_GET_GS 0x1004 +#define ARCH_SET_GS 0x1001 +#define ARCH_SET_FS 0x1002 +#define ARCH_GET_FS 0x1003 +#define ARCH_GET_GS 0x1004 -#define ARCH_GET_CPUID 0x1011 -#define ARCH_SET_CPUID 0x1012 +#define ARCH_GET_CPUID 0x1011 +#define ARCH_SET_CPUID 0x1012 -#define ARCH_GET_XCOMP_SUPP 0x1021 -#define ARCH_GET_XCOMP_PERM 0x1022 -#define ARCH_REQ_XCOMP_PERM 0x1023 +#define ARCH_GET_XCOMP_SUPP 0x1021 +#define ARCH_GET_XCOMP_PERM 0x1022 +#define ARCH_REQ_XCOMP_PERM 0x1023 +#define ARCH_GET_XCOMP_GUEST_PERM 0x1024 +#define ARCH_REQ_XCOMP_GUEST_PERM 0x1025 -#define ARCH_MAP_VDSO_X32 0x2001 -#define ARCH_MAP_VDSO_32 0x2002 -#define ARCH_MAP_VDSO_64 0x2003 +#define ARCH_MAP_VDSO_X32 0x2001 +#define ARCH_MAP_VDSO_32 0x2002 +#define ARCH_MAP_VDSO_64 0x2003 #endif /* _ASM_X86_PRCTL_H */ diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 8ea306b1bf8e..ab19b3d8b2f7 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -450,6 +450,8 @@ void fpstate_reset(struct fpu *fpu) fpu->perm.__state_perm = fpu_kernel_cfg.default_features; fpu->perm.__state_size = fpu_kernel_cfg.default_size; fpu->perm.__user_state_size = fpu_user_cfg.default_size; + /* Same defaults for guests */ + fpu->guest_perm = fpu->perm; } static inline void fpu_inherit_perms(struct fpu *dst_fpu) @@ -460,6 +462,7 @@ static inline void fpu_inherit_perms(struct fpu *dst_fpu) spin_lock_irq(¤t->sighand->siglock); /* Fork also inherits the permissions of the parent */ dst_fpu->perm = src_fpu->perm; + dst_fpu->guest_perm = src_fpu->guest_perm; spin_unlock_irq(¤t->sighand->siglock); } } diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index d28829403ed0..5f01d463859d 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -1595,7 +1595,7 @@ static int validate_sigaltstack(unsigned int usize) return 0; } -static int __xstate_request_perm(u64 permitted, u64 requested) +static int __xstate_request_perm(u64 permitted, u64 requested, bool guest) { /* * This deliberately does not exclude !XSAVES as we still might @@ -1605,9 +1605,10 @@ static int __xstate_request_perm(u64 permitted, u64 requested) */ bool compacted = cpu_feature_enabled(X86_FEATURE_XSAVES); struct fpu *fpu = ¤t->group_leader->thread.fpu; + struct fpu_state_perm *perm; unsigned int ksize, usize; u64 mask; - int ret; + int ret = 0; /* Check whether fully enabled */ if ((permitted & requested) == requested) @@ -1621,15 +1622,18 @@ static int __xstate_request_perm(u64 permitted, u64 requested) mask &= XFEATURE_MASK_USER_SUPPORTED; usize = xstate_calculate_size(mask, false); - ret = validate_sigaltstack(usize); - if (ret) - return ret; + if (!guest) { + ret = validate_sigaltstack(usize); + if (ret) + return ret; + } + perm = guest ? &fpu->guest_perm : &fpu->perm; /* Pairs with the READ_ONCE() in xstate_get_group_perm() */ - WRITE_ONCE(fpu->perm.__state_perm, requested); + WRITE_ONCE(perm->__state_perm, requested); /* Protected by sighand lock */ - fpu->perm.__state_size = ksize; - fpu->perm.__user_state_size = usize; + perm->__state_size = ksize; + perm->__user_state_size = usize; return ret; } @@ -1640,7 +1644,7 @@ static const u64 xstate_prctl_req[XFEATURE_MAX] = { [XFEATURE_XTILE_DATA] = XFEATURE_MASK_XTILE_DATA, }; -static int xstate_request_perm(unsigned long idx) +static int xstate_request_perm(unsigned long idx, bool guest) { u64 permitted, requested; int ret; @@ -1661,14 +1665,19 @@ static int xstate_request_perm(unsigned long idx) return -EOPNOTSUPP; /* Lockless quick check */ - permitted = xstate_get_host_group_perm(); + permitted = xstate_get_group_perm(guest); if ((permitted & requested) == requested) return 0; /* Protect against concurrent modifications */ spin_lock_irq(¤t->sighand->siglock); - permitted = xstate_get_host_group_perm(); - ret = __xstate_request_perm(permitted, requested); + permitted = xstate_get_group_perm(guest); + + /* First vCPU allocation locks the permissions. */ + if (guest && (permitted & FPU_GUEST_PERM_LOCKED)) + ret = -EBUSY; + else + ret = __xstate_request_perm(permitted, requested, guest); spin_unlock_irq(¤t->sighand->siglock); return ret; } @@ -1713,12 +1722,18 @@ int xfd_enable_feature(u64 xfd_err) return 0; } #else /* CONFIG_X86_64 */ -static inline int xstate_request_perm(unsigned long idx) +static inline int xstate_request_perm(unsigned long idx, bool guest) { return -EPERM; } #endif /* !CONFIG_X86_64 */ +inline u64 xstate_get_guest_group_perm(void) +{ + return xstate_get_group_perm(true); +} +EXPORT_SYMBOL_GPL(xstate_get_guest_group_perm); + /** * fpu_xstate_prctl - xstate permission operations * @tsk: Redundant pointer to current @@ -1742,6 +1757,7 @@ long fpu_xstate_prctl(struct task_struct *tsk, int option, unsigned long arg2) u64 __user *uptr = (u64 __user *)arg2; u64 permitted, supported; unsigned long idx = arg2; + bool guest = false; if (tsk != current) return -EPERM; @@ -1760,11 +1776,20 @@ long fpu_xstate_prctl(struct task_struct *tsk, int option, unsigned long arg2) permitted &= XFEATURE_MASK_USER_SUPPORTED; return put_user(permitted, uptr); + case ARCH_GET_XCOMP_GUEST_PERM: + permitted = xstate_get_guest_group_perm(); + permitted &= XFEATURE_MASK_USER_SUPPORTED; + return put_user(permitted, uptr); + + case ARCH_REQ_XCOMP_GUEST_PERM: + guest = true; + fallthrough; + case ARCH_REQ_XCOMP_PERM: if (!IS_ENABLED(CONFIG_X86_64)) return -EOPNOTSUPP; - return xstate_request_perm(idx); + return xstate_request_perm(idx, guest); default: return -EINVAL; diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h index 86ea7c0fa2f6..98a472775c97 100644 --- a/arch/x86/kernel/fpu/xstate.h +++ b/arch/x86/kernel/fpu/xstate.h @@ -20,10 +20,19 @@ static inline void xstate_init_xcomp_bv(struct xregs_state *xsave, u64 mask) xsave->header.xcomp_bv = mask | XCOMP_BV_COMPACTED_FORMAT; } +static inline u64 xstate_get_group_perm(bool guest) +{ + struct fpu *fpu = ¤t->group_leader->thread.fpu; + struct fpu_state_perm *perm; + + /* Pairs with WRITE_ONCE() in xstate_request_perm() */ + perm = guest ? &fpu->guest_perm : &fpu->perm; + return READ_ONCE(perm->__state_perm); +} + static inline u64 xstate_get_host_group_perm(void) { - /* Pairs with WRITE_ONCE() in xstate_request_perm() */ - return READ_ONCE(current->group_leader->thread.fpu.perm.__state_perm); + return xstate_get_group_perm(false); } enum xstate_copy_mode { diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 04143a653a8a..d7bc23589062 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -993,6 +993,8 @@ long do_arch_prctl_common(struct task_struct *task, int option, case ARCH_GET_XCOMP_SUPP: case ARCH_GET_XCOMP_PERM: case ARCH_REQ_XCOMP_PERM: + case ARCH_GET_XCOMP_GUEST_PERM: + case ARCH_REQ_XCOMP_GUEST_PERM: return fpu_xstate_prctl(task, option, arg2); } From 36487e6228c4cb04257c92266a04078a384bc4ec Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 5 Jan 2022 04:35:13 -0800 Subject: [PATCH 245/268] x86/fpu: Prepare guest FPU for dynamically enabled FPU features To support dynamically enabled FPU features for guests prepare the guest pseudo FPU container to keep track of the currently enabled xfeatures and the guest permissions. Signed-off-by: Thomas Gleixner Signed-off-by: Jing Liu Signed-off-by: Yang Zhong Message-Id: <20220105123532.12586-3-yang.zhong@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/fpu/types.h | 13 +++++++++++++ arch/x86/kernel/fpu/core.c | 26 +++++++++++++++++++++++++- 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h index 6ddf80637697..c752d0aa23a4 100644 --- a/arch/x86/include/asm/fpu/types.h +++ b/arch/x86/include/asm/fpu/types.h @@ -504,6 +504,19 @@ struct fpu { * Guest pseudo FPU container */ struct fpu_guest { + /* + * @xfeatures: xfeature bitmap of features which are + * currently enabled for the guest vCPU. + */ + u64 xfeatures; + + /* + * @perm: xfeature bitmap of features which are + * permitted to be enabled for the guest + * vCPU. + */ + u64 perm; + /* * @fpstate: Pointer to the allocated guest fpstate */ diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index ab19b3d8b2f7..eddeeb4ed2f5 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -201,6 +201,26 @@ void fpu_reset_from_exception_fixup(void) #if IS_ENABLED(CONFIG_KVM) static void __fpstate_reset(struct fpstate *fpstate); +static void fpu_init_guest_permissions(struct fpu_guest *gfpu) +{ + struct fpu_state_perm *fpuperm; + u64 perm; + + if (!IS_ENABLED(CONFIG_X86_64)) + return; + + spin_lock_irq(¤t->sighand->siglock); + fpuperm = ¤t->group_leader->thread.fpu.guest_perm; + perm = fpuperm->__state_perm; + + /* First fpstate allocation locks down permissions. */ + WRITE_ONCE(fpuperm->__state_perm, perm | FPU_GUEST_PERM_LOCKED); + + spin_unlock_irq(¤t->sighand->siglock); + + gfpu->perm = perm & ~FPU_GUEST_PERM_LOCKED; +} + bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu) { struct fpstate *fpstate; @@ -216,7 +236,11 @@ bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu) fpstate->is_valloc = true; fpstate->is_guest = true; - gfpu->fpstate = fpstate; + gfpu->fpstate = fpstate; + gfpu->xfeatures = fpu_user_cfg.default_features; + gfpu->perm = fpu_user_cfg.default_features; + fpu_init_guest_permissions(gfpu); + return true; } EXPORT_SYMBOL_GPL(fpu_alloc_guest_fpstate); From cc04b6a21d431359eceeec0d812b492088b04af5 Mon Sep 17 00:00:00 2001 From: Jing Liu Date: Wed, 5 Jan 2022 04:35:14 -0800 Subject: [PATCH 246/268] kvm: x86: Fix xstate_required_size() to follow XSTATE alignment rule CPUID.0xD.1.EBX enumerates the size of the XSAVE area (in compacted format) required by XSAVES. If CPUID.0xD.i.ECX[1] is set for a state component (i), this state component should be located on the next 64-bytes boundary following the preceding state component in the compacted layout. Fix xstate_required_size() to follow the alignment rule. AMX is the first state component with 64-bytes alignment to catch this bug. Signed-off-by: Jing Liu Signed-off-by: Yang Zhong Message-Id: <20220105123532.12586-4-yang.zhong@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/cpuid.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 0b920e12bb6d..f3e6fda6b858 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -42,7 +42,11 @@ static u32 xstate_required_size(u64 xstate_bv, bool compacted) if (xstate_bv & 0x1) { u32 eax, ebx, ecx, edx, offset; cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx); - offset = compacted ? ret : ebx; + /* ECX[1]: 64B alignment in compacted form */ + if (compacted) + offset = (ecx & 0x2) ? ALIGN(ret, 64) : ret; + else + offset = ebx; ret = max(ret, offset + eax); } From 445ecdf79be0c71ca248f7611aeefceaea3ec59f Mon Sep 17 00:00:00 2001 From: Jing Liu Date: Wed, 5 Jan 2022 04:35:15 -0800 Subject: [PATCH 247/268] kvm: x86: Exclude unpermitted xfeatures at KVM_GET_SUPPORTED_CPUID KVM_GET_SUPPORTED_CPUID should not include any dynamic xstates in CPUID[0xD] if they have not been requested with prctl. Otherwise a process which directly passes KVM_GET_SUPPORTED_CPUID to KVM_SET_CPUID2 would now fail even if it doesn't intend to use a dynamically enabled feature. Userspace must know that prctl is required and allocate >4K xstate buffer before setting any dynamic bit. Suggested-by: Paolo Bonzini Signed-off-by: Jing Liu Signed-off-by: Yang Zhong Message-Id: <20220105123532.12586-5-yang.zhong@intel.com> Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/api.rst | 4 ++++ arch/x86/kvm/cpuid.c | 9 ++++++--- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 6b683dfea8f2..f4ea5e41a4d0 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -1687,6 +1687,10 @@ userspace capabilities, and with user requirements (for example, the user may wish to constrain cpuid to emulate older hardware, or for feature consistency across a cluster). +Dynamically-enabled feature bits need to be requested with +``arch_prctl()`` before calling this ioctl. Feature bits that have not +been requested are excluded from the result. + Note that certain capabilities, such as KVM_CAP_X86_DISABLE_EXITS, may expose cpuid features (e.g. MONITOR) which are not supported by kvm in its default configuration. If userspace enables such capabilities, it diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index f3e6fda6b858..eb52dde5deec 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -815,11 +815,13 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) goto out; } break; - case 0xd: - entry->eax &= supported_xcr0; + case 0xd: { + u64 guest_perm = xstate_get_guest_group_perm(); + + entry->eax &= supported_xcr0 & guest_perm; entry->ebx = xstate_required_size(supported_xcr0, false); entry->ecx = entry->ebx; - entry->edx &= supported_xcr0 >> 32; + entry->edx &= (supported_xcr0 & guest_perm) >> 32; if (!supported_xcr0) break; @@ -866,6 +868,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) entry->edx = 0; } break; + } case 0x12: /* Intel SGX */ if (!kvm_cpu_cap_has(X86_FEATURE_SGX)) { From b0237dad2d7f8820b5b415291431d8259e787470 Mon Sep 17 00:00:00 2001 From: Jing Liu Date: Wed, 5 Jan 2022 04:35:16 -0800 Subject: [PATCH 248/268] x86/fpu: Make XFD initialization in __fpstate_reset() a function argument vCPU threads are different from native tasks regarding to the initial XFD value. While all native tasks follow a fixed value (init_fpstate::xfd) established by the FPU core at boot, vCPU threads need to obey the reset value (i.e. ZERO) defined by the specification, to meet the expectation of the guest. Let the caller supply an argument and adjust the host and guest related invocations accordingly. Signed-off-by: Jing Liu Signed-off-by: Yang Zhong Signed-off-by: Thomas Gleixner Signed-off-by: Jing Liu Signed-off-by: Yang Zhong Message-Id: <20220105123532.12586-6-yang.zhong@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/kernel/fpu/core.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index eddeeb4ed2f5..a78bc547fc03 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -199,7 +199,7 @@ void fpu_reset_from_exception_fixup(void) } #if IS_ENABLED(CONFIG_KVM) -static void __fpstate_reset(struct fpstate *fpstate); +static void __fpstate_reset(struct fpstate *fpstate, u64 xfd); static void fpu_init_guest_permissions(struct fpu_guest *gfpu) { @@ -231,7 +231,8 @@ bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu) if (!fpstate) return false; - __fpstate_reset(fpstate); + /* Leave xfd to 0 (the reset value defined by spec) */ + __fpstate_reset(fpstate, 0); fpstate_init_user(fpstate); fpstate->is_valloc = true; fpstate->is_guest = true; @@ -454,21 +455,21 @@ void fpstate_init_user(struct fpstate *fpstate) fpstate_init_fstate(fpstate); } -static void __fpstate_reset(struct fpstate *fpstate) +static void __fpstate_reset(struct fpstate *fpstate, u64 xfd) { /* Initialize sizes and feature masks */ fpstate->size = fpu_kernel_cfg.default_size; fpstate->user_size = fpu_user_cfg.default_size; fpstate->xfeatures = fpu_kernel_cfg.default_features; fpstate->user_xfeatures = fpu_user_cfg.default_features; - fpstate->xfd = init_fpstate.xfd; + fpstate->xfd = xfd; } void fpstate_reset(struct fpu *fpu) { /* Set the fpstate pointer to the default fpstate */ fpu->fpstate = &fpu->__fpstate; - __fpstate_reset(fpu->fpstate); + __fpstate_reset(fpu->fpstate, init_fpstate.xfd); /* Initialize the permission related info in fpu */ fpu->perm.__state_perm = fpu_kernel_cfg.default_features; From c270ce393dfd700e7510a4579568deeefba954fd Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 5 Jan 2022 04:35:17 -0800 Subject: [PATCH 249/268] x86/fpu: Add guest support to xfd_enable_feature() Guest support for dynamically enabled FPU features requires a few modifications to the enablement function which is currently invoked from the #NM handler: 1) Use guest permissions and sizes for the update 2) Update fpu_guest state accordingly 3) Take into account that the enabling can be triggered either from a running guest via XSETBV and MSR_IA32_XFD write emulation or from a guest restore. In the latter case the guests fpstate is not the current tasks active fpstate. Split the function and implement the guest mechanics throughout the callchain. Signed-off-by: Thomas Gleixner Signed-off-by: Jing Liu Signed-off-by: Yang Zhong Message-Id: <20220105123532.12586-7-yang.zhong@intel.com> [Add 32-bit stub for __xfd_enable_feature. - Paolo] Signed-off-by: Paolo Bonzini --- arch/x86/kernel/fpu/xstate.c | 93 +++++++++++++++++++++--------------- arch/x86/kernel/fpu/xstate.h | 6 +++ 2 files changed, 60 insertions(+), 39 deletions(-) diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 5f01d463859d..0c0b2323cdec 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -1499,29 +1499,6 @@ void fpstate_free(struct fpu *fpu) vfree(fpu->fpstate); } -/** - * fpu_install_fpstate - Update the active fpstate in the FPU - * - * @fpu: A struct fpu * pointer - * @newfps: A struct fpstate * pointer - * - * Returns: A null pointer if the last active fpstate is the embedded - * one or the new fpstate is already installed; - * otherwise, a pointer to the old fpstate which has to - * be freed by the caller. - */ -static struct fpstate *fpu_install_fpstate(struct fpu *fpu, - struct fpstate *newfps) -{ - struct fpstate *oldfps = fpu->fpstate; - - if (fpu->fpstate == newfps) - return NULL; - - fpu->fpstate = newfps; - return oldfps != &fpu->__fpstate ? oldfps : NULL; -} - /** * fpstate_realloc - Reallocate struct fpstate for the requested new features * @@ -1529,6 +1506,7 @@ static struct fpstate *fpu_install_fpstate(struct fpu *fpu, * of that task * @ksize: The required size for the kernel buffer * @usize: The required size for user space buffers + * @guest_fpu: Pointer to a guest FPU container. NULL for host allocations * * Note vs. vmalloc(): If the task with a vzalloc()-allocated buffer * terminates quickly, vfree()-induced IPIs may be a concern, but tasks @@ -1537,13 +1515,13 @@ static struct fpstate *fpu_install_fpstate(struct fpu *fpu, * Returns: 0 on success, -ENOMEM on allocation error. */ static int fpstate_realloc(u64 xfeatures, unsigned int ksize, - unsigned int usize) + unsigned int usize, struct fpu_guest *guest_fpu) { struct fpu *fpu = ¤t->thread.fpu; struct fpstate *curfps, *newfps = NULL; unsigned int fpsize; + bool in_use; - curfps = fpu->fpstate; fpsize = ksize + ALIGN(offsetof(struct fpstate, regs), 64); newfps = vzalloc(fpsize); @@ -1553,28 +1531,55 @@ static int fpstate_realloc(u64 xfeatures, unsigned int ksize, newfps->user_size = usize; newfps->is_valloc = true; + /* + * When a guest FPU is supplied, use @guest_fpu->fpstate + * as reference independent whether it is in use or not. + */ + curfps = guest_fpu ? guest_fpu->fpstate : fpu->fpstate; + + /* Determine whether @curfps is the active fpstate */ + in_use = fpu->fpstate == curfps; + + if (guest_fpu) { + newfps->is_guest = true; + newfps->is_confidential = curfps->is_confidential; + newfps->in_use = curfps->in_use; + guest_fpu->xfeatures |= xfeatures; + } + fpregs_lock(); /* - * Ensure that the current state is in the registers before - * swapping fpstate as that might invalidate it due to layout - * changes. + * If @curfps is in use, ensure that the current state is in the + * registers before swapping fpstate as that might invalidate it + * due to layout changes. */ - if (test_thread_flag(TIF_NEED_FPU_LOAD)) + if (in_use && test_thread_flag(TIF_NEED_FPU_LOAD)) fpregs_restore_userregs(); newfps->xfeatures = curfps->xfeatures | xfeatures; newfps->user_xfeatures = curfps->user_xfeatures | xfeatures; newfps->xfd = curfps->xfd & ~xfeatures; - curfps = fpu_install_fpstate(fpu, newfps); - /* Do the final updates within the locked region */ xstate_init_xcomp_bv(&newfps->regs.xsave, newfps->xfeatures); - xfd_update_state(newfps); + if (guest_fpu) { + guest_fpu->fpstate = newfps; + /* If curfps is active, update the FPU fpstate pointer */ + if (in_use) + fpu->fpstate = newfps; + } else { + fpu->fpstate = newfps; + } + + if (in_use) + xfd_update_state(fpu->fpstate); fpregs_unlock(); - vfree(curfps); + /* Only free valloc'ed state */ + if (curfps && curfps->is_valloc) + vfree(curfps); + return 0; } @@ -1682,14 +1687,16 @@ static int xstate_request_perm(unsigned long idx, bool guest) return ret; } -int xfd_enable_feature(u64 xfd_err) +int __xfd_enable_feature(u64 xfd_err, struct fpu_guest *guest_fpu) { u64 xfd_event = xfd_err & XFEATURE_MASK_USER_DYNAMIC; + struct fpu_state_perm *perm; unsigned int ksize, usize; struct fpu *fpu; if (!xfd_event) { - pr_err_once("XFD: Invalid xfd error: %016llx\n", xfd_err); + if (!guest_fpu) + pr_err_once("XFD: Invalid xfd error: %016llx\n", xfd_err); return 0; } @@ -1697,14 +1704,16 @@ int xfd_enable_feature(u64 xfd_err) spin_lock_irq(¤t->sighand->siglock); /* If not permitted let it die */ - if ((xstate_get_host_group_perm() & xfd_event) != xfd_event) { + if ((xstate_get_group_perm(!!guest_fpu) & xfd_event) != xfd_event) { spin_unlock_irq(¤t->sighand->siglock); return -EPERM; } fpu = ¤t->group_leader->thread.fpu; - ksize = fpu->perm.__state_size; - usize = fpu->perm.__user_state_size; + perm = guest_fpu ? &fpu->guest_perm : &fpu->perm; + ksize = perm->__state_size; + usize = perm->__user_state_size; + /* * The feature is permitted. State size is sufficient. Dropping * the lock is safe here even if more features are added from @@ -1717,10 +1726,16 @@ int xfd_enable_feature(u64 xfd_err) * Try to allocate a new fpstate. If that fails there is no way * out. */ - if (fpstate_realloc(xfd_event, ksize, usize)) + if (fpstate_realloc(xfd_event, ksize, usize, guest_fpu)) return -EFAULT; return 0; } + +int xfd_enable_feature(u64 xfd_err) +{ + return __xfd_enable_feature(xfd_err, NULL); +} + #else /* CONFIG_X86_64 */ static inline int xstate_request_perm(unsigned long idx, bool guest) { diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h index 98a472775c97..67ed6bbc19b8 100644 --- a/arch/x86/kernel/fpu/xstate.h +++ b/arch/x86/kernel/fpu/xstate.h @@ -158,8 +158,14 @@ static inline void xfd_update_state(struct fpstate *fpstate) } } } + +extern int __xfd_enable_feature(u64 which, struct fpu_guest *guest_fpu); #else static inline void xfd_update_state(struct fpstate *fpstate) { } + +static inline int __xfd_enable_feature(u64 which, struct fpu_guest *guest_fpu) { + return -EPERM; +} #endif /* From 0781d60f658e25fbad3b6e4261f54eb1cd3dc302 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 5 Jan 2022 04:35:18 -0800 Subject: [PATCH 250/268] x86/fpu: Provide fpu_enable_guest_xfd_features() for KVM Provide a wrapper for expanding the guest fpstate buffer according to requested xfeatures. KVM wants to call this wrapper to manage any dynamic xstate used by the guest. Suggested-by: Sean Christopherson Signed-off-by: Sean Christopherson Signed-off-by: Kevin Tian Signed-off-by: Yang Zhong Reviewed-by: Paolo Bonzini Message-Id: <20220105123532.12586-8-yang.zhong@intel.com> [Remove unnecessary 32-bit check. - Paolo] Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/fpu/api.h | 1 + arch/x86/kernel/fpu/core.c | 22 ++++++++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h index d8c222290e68..1ed2a247a84e 100644 --- a/arch/x86/include/asm/fpu/api.h +++ b/arch/x86/include/asm/fpu/api.h @@ -138,6 +138,7 @@ extern inline u64 xstate_get_guest_group_perm(void); extern bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu); extern void fpu_free_guest_fpstate(struct fpu_guest *gfpu); extern int fpu_swap_kvm_fpstate(struct fpu_guest *gfpu, bool enter_guest); +extern int fpu_enable_guest_xfd_features(struct fpu_guest *guest_fpu, u64 xfeatures); extern void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf, unsigned int size, u32 pkru); extern int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf, u64 xcr0, u32 *vpkru); diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index a78bc547fc03..64b2ee39bece 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -261,6 +261,28 @@ void fpu_free_guest_fpstate(struct fpu_guest *gfpu) } EXPORT_SYMBOL_GPL(fpu_free_guest_fpstate); +/* + * fpu_enable_guest_xfd_features - Check xfeatures against guest perm and enable + * @guest_fpu: Pointer to the guest FPU container + * @xfeatures: Features requested by guest CPUID + * + * Enable all dynamic xfeatures according to guest perm and requested CPUID. + * + * Return: 0 on success, error code otherwise + */ +int fpu_enable_guest_xfd_features(struct fpu_guest *guest_fpu, u64 xfeatures) +{ + lockdep_assert_preemption_enabled(); + + /* Nothing to do if all requested features are already enabled. */ + xfeatures &= ~guest_fpu->xfeatures; + if (!xfeatures) + return 0; + + return __xfd_enable_feature(xfeatures, guest_fpu); +} +EXPORT_SYMBOL_GPL(fpu_enable_guest_xfd_features); + int fpu_swap_kvm_fpstate(struct fpu_guest *guest_fpu, bool enter_guest) { struct fpstate *guest_fps = guest_fpu->fpstate; From 5ab2f45bba4894a0db4af8567da3efd6228dd010 Mon Sep 17 00:00:00 2001 From: Jing Liu Date: Wed, 5 Jan 2022 04:35:19 -0800 Subject: [PATCH 251/268] kvm: x86: Enable dynamic xfeatures at KVM_SET_CPUID2 KVM can request fpstate expansion in two approaches: 1) When intercepting guest updates to XCR0 and XFD MSR; 2) Before vcpu runs (e.g. at KVM_SET_CPUID2); The first option doesn't waste memory for legacy guest if it doesn't support XFD. However doing so introduces more complexity and also imposes an order requirement in the restoring path, i.e. XCR0/XFD must be restored before XSTATE. Given that the agreement is to do the static approach. This is considered a better tradeoff though it does waste 8K memory for legacy guest if its CPUID includes dynamically-enabled xfeatures. Successful fpstate expansion requires userspace VMM to acquire guest xstate permissions before calling KVM_SET_CPUID2. Also take the chance to adjust the indent in kvm_set_cpuid(). Signed-off-by: Jing Liu Signed-off-by: Sean Christopherson Signed-off-by: Kevin Tian Signed-off-by: Yang Zhong Message-Id: <20220105123532.12586-9-yang.zhong@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/cpuid.c | 42 +++++++++++++++++++++++++++++------------- 1 file changed, 29 insertions(+), 13 deletions(-) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index eb52dde5deec..a0fedf1514ab 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -84,9 +84,12 @@ static inline struct kvm_cpuid_entry2 *cpuid_entry2_find( return NULL; } -static int kvm_check_cpuid(struct kvm_cpuid_entry2 *entries, int nent) +static int kvm_check_cpuid(struct kvm_vcpu *vcpu, + struct kvm_cpuid_entry2 *entries, + int nent) { struct kvm_cpuid_entry2 *best; + u64 xfeatures; /* * The existing code assumes virtual address is 48-bit or 57-bit in the @@ -100,7 +103,20 @@ static int kvm_check_cpuid(struct kvm_cpuid_entry2 *entries, int nent) return -EINVAL; } - return 0; + /* + * Exposing dynamic xfeatures to the guest requires additional + * enabling in the FPU, e.g. to expand the guest XSAVE state size. + */ + best = cpuid_entry2_find(entries, nent, 0xd, 0); + if (!best) + return 0; + + xfeatures = best->eax | ((u64)best->edx << 32); + xfeatures &= XFEATURE_MASK_USER_DYNAMIC; + if (!xfeatures) + return 0; + + return fpu_enable_guest_xfd_features(&vcpu->arch.guest_fpu, xfeatures); } static void kvm_update_kvm_cpuid_base(struct kvm_vcpu *vcpu) @@ -280,21 +296,21 @@ u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu) static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2, int nent) { - int r; + int r; - r = kvm_check_cpuid(e2, nent); - if (r) - return r; + r = kvm_check_cpuid(vcpu, e2, nent); + if (r) + return r; - kvfree(vcpu->arch.cpuid_entries); - vcpu->arch.cpuid_entries = e2; - vcpu->arch.cpuid_nent = nent; + kvfree(vcpu->arch.cpuid_entries); + vcpu->arch.cpuid_entries = e2; + vcpu->arch.cpuid_nent = nent; - kvm_update_kvm_cpuid_base(vcpu); - kvm_update_cpuid_runtime(vcpu); - kvm_vcpu_after_set_cpuid(vcpu); + kvm_update_kvm_cpuid_base(vcpu); + kvm_update_cpuid_runtime(vcpu); + kvm_vcpu_after_set_cpuid(vcpu); - return 0; + return 0; } /* when an old userspace process fills a new kernel module */ From 8eb9a48ac1e86a8a59f7123b529d6e498fb1f163 Mon Sep 17 00:00:00 2001 From: Kevin Tian Date: Wed, 5 Jan 2022 04:35:20 -0800 Subject: [PATCH 252/268] x86/fpu: Provide fpu_update_guest_xfd() for IA32_XFD emulation Guest XFD can be updated either in the emulation path or in the restore path. Provide a wrapper to update guest_fpu::fpstate::xfd. If the guest fpstate is currently in-use, also update the per-cpu xfd cache and the actual MSR. Signed-off-by: Kevin Tian Signed-off-by: Jing Liu Signed-off-by: Yang Zhong Message-Id: <20220105123532.12586-10-yang.zhong@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/fpu/api.h | 6 ++++++ arch/x86/kernel/fpu/core.c | 12 ++++++++++++ 2 files changed, 18 insertions(+) diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h index 1ed2a247a84e..e4d10155290b 100644 --- a/arch/x86/include/asm/fpu/api.h +++ b/arch/x86/include/asm/fpu/api.h @@ -140,6 +140,12 @@ extern void fpu_free_guest_fpstate(struct fpu_guest *gfpu); extern int fpu_swap_kvm_fpstate(struct fpu_guest *gfpu, bool enter_guest); extern int fpu_enable_guest_xfd_features(struct fpu_guest *guest_fpu, u64 xfeatures); +#ifdef CONFIG_X86_64 +extern void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd); +#else +static inline void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd) { } +#endif + extern void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf, unsigned int size, u32 pkru); extern int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf, u64 xcr0, u32 *vpkru); diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 64b2ee39bece..271fd5bc043b 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -283,6 +283,18 @@ int fpu_enable_guest_xfd_features(struct fpu_guest *guest_fpu, u64 xfeatures) } EXPORT_SYMBOL_GPL(fpu_enable_guest_xfd_features); +#ifdef CONFIG_X86_64 +void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd) +{ + fpregs_lock(); + guest_fpu->fpstate->xfd = xfd; + if (guest_fpu->fpstate->in_use) + xfd_update_state(guest_fpu->fpstate); + fpregs_unlock(); +} +EXPORT_SYMBOL_GPL(fpu_update_guest_xfd); +#endif /* CONFIG_X86_64 */ + int fpu_swap_kvm_fpstate(struct fpu_guest *guest_fpu, bool enter_guest) { struct fpstate *guest_fps = guest_fpu->fpstate; From 820a6ee944e74e57255ac2e90916ecdaade57b95 Mon Sep 17 00:00:00 2001 From: Jing Liu Date: Wed, 5 Jan 2022 04:35:21 -0800 Subject: [PATCH 253/268] kvm: x86: Add emulation for IA32_XFD Intel's eXtended Feature Disable (XFD) feature allows the software to dynamically adjust fpstate buffer size for XSAVE features which have large state. Because guest fpstate has been expanded for all possible dynamic xstates at KVM_SET_CPUID2, emulation of the IA32_XFD MSR is straightforward. For write just call fpu_update_guest_xfd() to update the guest fpu container once all the sanity checks are passed. For read simply return the cached value in the container. Signed-off-by: Jing Liu Signed-off-by: Zeng Guang Signed-off-by: Wei Wang Signed-off-by: Jing Liu Signed-off-by: Yang Zhong Message-Id: <20220105123532.12586-11-yang.zhong@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a9f1044dd6b2..b18d2838606f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1377,6 +1377,7 @@ static const u32 msrs_to_save_all[] = { MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5, MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2, MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5, + MSR_IA32_XFD, }; static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)]; @@ -3686,6 +3687,19 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 1; vcpu->arch.msr_misc_features_enables = data; break; +#ifdef CONFIG_X86_64 + case MSR_IA32_XFD: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) + return 1; + + if (data & ~(XFEATURE_MASK_USER_DYNAMIC & + vcpu->arch.guest_supported_xcr0)) + return 1; + + fpu_update_guest_xfd(&vcpu->arch.guest_fpu, data); + break; +#endif default: if (kvm_pmu_is_valid_msr(vcpu, msr)) return kvm_pmu_set_msr(vcpu, msr_info); @@ -4006,6 +4020,15 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_K7_HWCR: msr_info->data = vcpu->arch.msr_hwcr; break; +#ifdef CONFIG_X86_64 + case MSR_IA32_XFD: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) + return 1; + + msr_info->data = vcpu->arch.guest_fpu.fpstate->xfd; + break; +#endif default: if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) return kvm_pmu_get_msr(vcpu, msr_info); @@ -6441,6 +6464,10 @@ static void kvm_init_msr_list(void) min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp)) continue; break; + case MSR_IA32_XFD: + if (!kvm_cpu_cap_has(X86_FEATURE_XFD)) + continue; + break; default: break; } From 1df4fd834e8e2c00973ac2003ad0e6feb8750b31 Mon Sep 17 00:00:00 2001 From: Jing Liu Date: Wed, 5 Jan 2022 04:35:22 -0800 Subject: [PATCH 254/268] x86/fpu: Prepare xfd_err in struct fpu_guest When XFD causes an instruction to generate #NM, IA32_XFD_ERR contains information about which disabled state components are being accessed. The #NM handler is expected to check this information and then enable the state components by clearing IA32_XFD for the faulting task (if having permission). If the XFD_ERR value generated in guest is consumed/clobbered by the host before the guest itself doing so, it may lead to non-XFD-related #NM treated as XFD #NM in host (due to non-zero value in XFD_ERR), or XFD-related #NM treated as non-XFD #NM in guest (XFD_ERR cleared by the host #NM handler). Introduce a new field in fpu_guest to save the guest xfd_err value. KVM is expected to save guest xfd_err before interrupt is enabled and restore it right before entering the guest (with interrupt disabled). Signed-off-by: Jing Liu Signed-off-by: Kevin Tian Signed-off-by: Jing Liu Signed-off-by: Yang Zhong Message-Id: <20220105123532.12586-12-yang.zhong@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/fpu/types.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h index c752d0aa23a4..3795d0573773 100644 --- a/arch/x86/include/asm/fpu/types.h +++ b/arch/x86/include/asm/fpu/types.h @@ -517,6 +517,11 @@ struct fpu_guest { */ u64 perm; + /* + * @xfd_err: Save the guest value. + */ + u64 xfd_err; + /* * @fpstate: Pointer to the allocated guest fpstate */ From ec5be88ab29fd9145c7ced20b58fb96f7c6b6890 Mon Sep 17 00:00:00 2001 From: Jing Liu Date: Wed, 5 Jan 2022 04:35:23 -0800 Subject: [PATCH 255/268] kvm: x86: Intercept #NM for saving IA32_XFD_ERR Guest IA32_XFD_ERR is generally modified in two places: - Set by CPU when #NM is triggered; - Cleared by guest in its #NM handler; Intercept #NM for the first case when a nonzero value is written to IA32_XFD. Nonzero indicates that the guest is willing to do dynamic fpstate expansion for certain xfeatures, thus KVM needs to manage and virtualize guest XFD_ERR properly. The vcpu exception bitmap is updated in XFD write emulation according to guest_fpu::xfd. Save the current XFD_ERR value to the guest_fpu container in the #NM VM-exit handler. This must be done with interrupt disabled, otherwise the unsaved MSR value may be clobbered by host activity. The saving operation is conducted conditionally only when guest_fpu:xfd includes a non-zero value. Doing so also avoids misread on a platform which doesn't support XFD but #NM is triggered due to L1 interception. Queueing #NM to the guest is postponed to handle_exception_nmi(). This goes through the nested_vmx check so a virtual vmexit is queued instead when #NM is triggered in L2 but L1 wants to intercept it. Restore the host value (always ZERO outside of the host #NM handler) before enabling interrupt. Restore the guest value from the guest_fpu container right before entering the guest (with interrupt disabled). Suggested-by: Thomas Gleixner Signed-off-by: Jing Liu Signed-off-by: Kevin Tian Signed-off-by: Yang Zhong Message-Id: <20220105123532.12586-13-yang.zhong@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmcs.h | 5 +++++ arch/x86/kvm/vmx/vmx.c | 48 +++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/x86.c | 6 ++++++ 3 files changed, 59 insertions(+) diff --git a/arch/x86/kvm/vmx/vmcs.h b/arch/x86/kvm/vmx/vmcs.h index 6e5de2e2b0da..e325c290a816 100644 --- a/arch/x86/kvm/vmx/vmcs.h +++ b/arch/x86/kvm/vmx/vmcs.h @@ -129,6 +129,11 @@ static inline bool is_machine_check(u32 intr_info) return is_exception_n(intr_info, MC_VECTOR); } +static inline bool is_nm_fault(u32 intr_info) +{ + return is_exception_n(intr_info, NM_VECTOR); +} + /* Undocumented: icebp/int1 */ static inline bool is_icebp(u32 intr_info) { diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 7b5abe25e1e5..84f6904cdb6e 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include @@ -761,6 +762,13 @@ void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu) vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, match); } + /* + * Trap #NM if guest xfd contains a non-zero value so guest XFD_ERR + * can be saved timely. + */ + if (vcpu->arch.guest_fpu.fpstate->xfd) + eb |= (1u << NM_VECTOR); + vmcs_write32(EXCEPTION_BITMAP, eb); } @@ -1967,6 +1975,12 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_KERNEL_GS_BASE: vmx_write_guest_kernel_gs_base(vmx, data); break; + case MSR_IA32_XFD: + ret = kvm_set_msr_common(vcpu, msr_info); + /* Update #NM interception according to guest xfd */ + if (!ret) + vmx_update_exception_bitmap(vcpu); + break; #endif case MSR_IA32_SYSENTER_CS: if (is_guest_mode(vcpu)) @@ -4798,6 +4812,17 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu) if (is_machine_check(intr_info) || is_nmi(intr_info)) return 1; /* handled by handle_exception_nmi_irqoff() */ + /* + * Queue the exception here instead of in handle_nm_fault_irqoff(). + * This ensures the nested_vmx check is not skipped so vmexit can + * be reflected to L1 (when it intercepts #NM) before reaching this + * point. + */ + if (is_nm_fault(intr_info)) { + kvm_queue_exception(vcpu, NM_VECTOR); + return 1; + } + if (is_invalid_opcode(intr_info)) return handle_ud(vcpu); @@ -6399,6 +6424,26 @@ static void handle_interrupt_nmi_irqoff(struct kvm_vcpu *vcpu, kvm_after_interrupt(vcpu); } +static void handle_nm_fault_irqoff(struct kvm_vcpu *vcpu) +{ + /* + * Save xfd_err to guest_fpu before interrupt is enabled, so the + * MSR value is not clobbered by the host activity before the guest + * has chance to consume it. + * + * Do not blindly read xfd_err here, since this exception might + * be caused by L1 interception on a platform which doesn't + * support xfd at all. + * + * Do it conditionally upon guest_fpu::xfd. xfd_err matters + * only when xfd contains a non-zero value. + * + * Queuing exception is done in vmx_handle_exit. See comment there. + */ + if (vcpu->arch.guest_fpu.fpstate->xfd) + rdmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err); +} + static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx) { const unsigned long nmi_entry = (unsigned long)asm_exc_nmi_noist; @@ -6407,6 +6452,9 @@ static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx) /* if exit due to PF check for async PF */ if (is_page_fault(intr_info)) vmx->vcpu.arch.apf.host_apf_flags = kvm_read_and_reset_apf_flags(); + /* if exit due to NM, handle before interrupts are enabled */ + else if (is_nm_fault(intr_info)) + handle_nm_fault_irqoff(&vmx->vcpu); /* Handle machine checks before interrupts are enabled */ else if (is_machine_check(intr_info)) kvm_machine_check(); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b18d2838606f..bb9534590a3a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9952,6 +9952,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) if (test_thread_flag(TIF_NEED_FPU_LOAD)) switch_fpu_return(); + if (vcpu->arch.guest_fpu.xfd_err) + wrmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err); + if (unlikely(vcpu->arch.switch_db_regs)) { set_debugreg(0, 7); set_debugreg(vcpu->arch.eff_db[0], 0); @@ -10015,6 +10018,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) static_call(kvm_x86_handle_exit_irqoff)(vcpu); + if (vcpu->arch.guest_fpu.xfd_err) + wrmsrl(MSR_IA32_XFD_ERR, 0); + /* * Consume any pending interrupts, including the possible source of * VM-Exit on SVM and any ticks that occur between VM-Exit and now. From 548e83650a51dce0d188b9e41b1e2ca5d63597cf Mon Sep 17 00:00:00 2001 From: Jing Liu Date: Wed, 5 Jan 2022 04:35:24 -0800 Subject: [PATCH 256/268] kvm: x86: Emulate IA32_XFD_ERR for guest Emulate read/write to IA32_XFD_ERR MSR. Only the saved value in the guest_fpu container is touched in the emulation handler. Actual MSR update is handled right before entering the guest (with preemption disabled) Signed-off-by: Jing Liu Signed-off-by: Zeng Guang Signed-off-by: Wei Wang Signed-off-by: Jing Liu Signed-off-by: Yang Zhong Message-Id: <20220105123532.12586-14-yang.zhong@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index bb9534590a3a..2475b64cb762 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1377,7 +1377,7 @@ static const u32 msrs_to_save_all[] = { MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5, MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2, MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5, - MSR_IA32_XFD, + MSR_IA32_XFD, MSR_IA32_XFD_ERR, }; static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)]; @@ -3699,6 +3699,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) fpu_update_guest_xfd(&vcpu->arch.guest_fpu, data); break; + case MSR_IA32_XFD_ERR: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) + return 1; + + if (data & ~(XFEATURE_MASK_USER_DYNAMIC & + vcpu->arch.guest_supported_xcr0)) + return 1; + + vcpu->arch.guest_fpu.xfd_err = data; + break; #endif default: if (kvm_pmu_is_valid_msr(vcpu, msr)) @@ -4028,6 +4039,13 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = vcpu->arch.guest_fpu.fpstate->xfd; break; + case MSR_IA32_XFD_ERR: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) + return 1; + + msr_info->data = vcpu->arch.guest_fpu.xfd_err; + break; #endif default: if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) @@ -6465,6 +6483,7 @@ static void kvm_init_msr_list(void) continue; break; case MSR_IA32_XFD: + case MSR_IA32_XFD_ERR: if (!kvm_cpu_cap_has(X86_FEATURE_XFD)) continue; break; From 61f208134a871047f1d642ed3b813f4f71e30b0e Mon Sep 17 00:00:00 2001 From: Jing Liu Date: Wed, 5 Jan 2022 04:35:25 -0800 Subject: [PATCH 257/268] kvm: x86: Disable RDMSR interception of IA32_XFD_ERR This saves one unnecessary VM-exit in guest #NM handler, given that the MSR is already restored with the guest value before the guest is resumed. Suggested-by: Paolo Bonzini Signed-off-by: Jing Liu Signed-off-by: Yang Zhong Message-Id: <20220105123532.12586-15-yang.zhong@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 6 ++++++ arch/x86/kvm/vmx/vmx.h | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 84f6904cdb6e..b8b7f5c7b3df 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -162,6 +162,7 @@ static u32 vmx_possible_passthrough_msrs[MAX_POSSIBLE_PASSTHROUGH_MSRS] = { MSR_FS_BASE, MSR_GS_BASE, MSR_KERNEL_GS_BASE, + MSR_IA32_XFD_ERR, #endif MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, @@ -7288,6 +7289,11 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) } } + if (kvm_cpu_cap_has(X86_FEATURE_XFD)) + vmx_set_intercept_for_msr(vcpu, MSR_IA32_XFD_ERR, MSR_TYPE_R, + !guest_cpuid_has(vcpu, X86_FEATURE_XFD)); + + set_cr4_guest_host_mask(vmx); vmx_write_encls_bitmap(vcpu, NULL); diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 18111368cf85..69dd2f85abdc 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -349,7 +349,7 @@ struct vcpu_vmx { struct lbr_desc lbr_desc; /* Save desired MSR intercept (read: pass-through) state */ -#define MAX_POSSIBLE_PASSTHROUGH_MSRS 13 +#define MAX_POSSIBLE_PASSTHROUGH_MSRS 14 struct { DECLARE_BITMAP(read, MAX_POSSIBLE_PASSTHROUGH_MSRS); DECLARE_BITMAP(write, MAX_POSSIBLE_PASSTHROUGH_MSRS); From 86aff7a4799286635efd94dab17b513544703cad Mon Sep 17 00:00:00 2001 From: Jing Liu Date: Wed, 5 Jan 2022 04:35:26 -0800 Subject: [PATCH 258/268] kvm: x86: Add XCR0 support for Intel AMX Two XCR0 bits are defined for AMX to support XSAVE mechanism. Bit 17 is for tilecfg and bit 18 is for tiledata. The value of XCR0[17:18] is always either 00b or 11b. Also, SDM recommends that only 64-bit operating systems enable Intel AMX by setting XCR0[18:17]. 32-bit host kernel never sets the tile bits in vcpu->arch.guest_supported_xcr0. Signed-off-by: Jing Liu Signed-off-by: Kevin Tian Signed-off-by: Yang Zhong Message-Id: <20220105123532.12586-16-yang.zhong@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2475b64cb762..993eee6451ea 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -211,7 +211,7 @@ static struct kvm_user_return_msrs __percpu *user_return_msrs; #define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \ | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \ | XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \ - | XFEATURE_MASK_PKRU) + | XFEATURE_MASK_PKRU | XFEATURE_MASK_XTILE) u64 __read_mostly host_efer; EXPORT_SYMBOL_GPL(host_efer); @@ -1010,6 +1010,11 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512) return 1; } + + if ((xcr0 & XFEATURE_MASK_XTILE) && + ((xcr0 & XFEATURE_MASK_XTILE) != XFEATURE_MASK_XTILE)) + return 1; + vcpu->arch.xcr0 = xcr0; if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND) From 690a757d610e50c2c3acd2e4bc3992cfc63feff2 Mon Sep 17 00:00:00 2001 From: Jing Liu Date: Wed, 5 Jan 2022 04:35:27 -0800 Subject: [PATCH 259/268] kvm: x86: Add CPUID support for Intel AMX Extend CPUID emulation to support XFD, AMX_TILE, AMX_INT8 and AMX_BF16. Adding those bits into kvm_cpu_caps finally activates all previous logics in this series. Hide XFD on 32bit host kernels. Otherwise it leads to a weird situation where KVM tells userspace to migrate MSR_IA32_XFD and then rejects attempts to read/write the MSR. Signed-off-by: Jing Liu Signed-off-by: Yang Zhong Message-Id: <20220105123532.12586-17-yang.zhong@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/cpufeatures.h | 2 ++ arch/x86/kvm/cpuid.c | 27 +++++++++++++++++++++++++-- 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index d5b5f2ab87a0..da872b6f8d8b 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -299,7 +299,9 @@ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ #define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */ #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */ +#define X86_FEATURE_AMX_BF16 (18*32+22) /* AMX bf16 Support */ #define X86_FEATURE_AMX_TILE (18*32+24) /* AMX tile Support */ +#define X86_FEATURE_AMX_INT8 (18*32+25) /* AMX int8 Support */ /* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */ #define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index a0fedf1514ab..ba4c3d5d2386 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -442,9 +442,11 @@ void kvm_set_cpu_caps(void) #ifdef CONFIG_X86_64 unsigned int f_gbpages = F(GBPAGES); unsigned int f_lm = F(LM); + unsigned int f_xfd = F(XFD); #else unsigned int f_gbpages = 0; unsigned int f_lm = 0; + unsigned int f_xfd = 0; #endif memset(kvm_cpu_caps, 0, sizeof(kvm_cpu_caps)); @@ -512,7 +514,8 @@ void kvm_set_cpu_caps(void) F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) | F(MD_CLEAR) | F(AVX512_VP2INTERSECT) | F(FSRM) | - F(SERIALIZE) | F(TSXLDTRK) | F(AVX512_FP16) + F(SERIALIZE) | F(TSXLDTRK) | F(AVX512_FP16) | + F(AMX_TILE) | F(AMX_INT8) | F(AMX_BF16) ); /* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */ @@ -531,7 +534,7 @@ void kvm_set_cpu_caps(void) ); kvm_cpu_cap_mask(CPUID_D_1_EAX, - F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES) + F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES) | f_xfd ); kvm_cpu_cap_init_scattered(CPUID_12_EAX, @@ -657,6 +660,8 @@ static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array, case 0x14: case 0x17: case 0x18: + case 0x1d: + case 0x1e: case 0x1f: case 0x8000001d: entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; @@ -929,6 +934,24 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) goto out; } break; + /* Intel AMX TILE */ + case 0x1d: + if (!kvm_cpu_cap_has(X86_FEATURE_AMX_TILE)) { + entry->eax = entry->ebx = entry->ecx = entry->edx = 0; + break; + } + + for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) { + if (!do_host_cpuid(array, function, i)) + goto out; + } + break; + case 0x1e: /* TMUL information */ + if (!kvm_cpu_cap_has(X86_FEATURE_AMX_TILE)) { + entry->eax = entry->ebx = entry->ecx = entry->edx = 0; + break; + } + break; case KVM_CPUID_SIGNATURE: { const u32 *sigptr = (const u32 *)KVM_SIGNATURE; entry->eax = KVM_CPUID_FEATURES; From c60427dd50ba9b20063ccaed0e98d62e886d7a3b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 5 Jan 2022 04:35:28 -0800 Subject: [PATCH 260/268] x86/fpu: Add uabi_size to guest_fpu Userspace needs to inquire KVM about the buffer size to work with the new KVM_SET_XSAVE and KVM_GET_XSAVE2. Add the size info to guest_fpu for KVM to access. Signed-off-by: Thomas Gleixner Signed-off-by: Wei Wang Signed-off-by: Jing Liu Signed-off-by: Yang Zhong Message-Id: <20220105123532.12586-18-yang.zhong@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/fpu/types.h | 5 +++++ arch/x86/kernel/fpu/core.c | 1 + arch/x86/kernel/fpu/xstate.c | 1 + 3 files changed, 7 insertions(+) diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h index 3795d0573773..eb7cd1139d97 100644 --- a/arch/x86/include/asm/fpu/types.h +++ b/arch/x86/include/asm/fpu/types.h @@ -522,6 +522,11 @@ struct fpu_guest { */ u64 xfd_err; + /* + * @uabi_size: Size required for save/restore + */ + unsigned int uabi_size; + /* * @fpstate: Pointer to the allocated guest fpstate */ diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 271fd5bc043b..de8e8c21f355 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -240,6 +240,7 @@ bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu) gfpu->fpstate = fpstate; gfpu->xfeatures = fpu_user_cfg.default_features; gfpu->perm = fpu_user_cfg.default_features; + gfpu->uabi_size = fpu_user_cfg.default_size; fpu_init_guest_permissions(gfpu); return true; diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 0c0b2323cdec..10fe072f1c92 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -1545,6 +1545,7 @@ static int fpstate_realloc(u64 xfeatures, unsigned int ksize, newfps->is_confidential = curfps->is_confidential; newfps->in_use = curfps->in_use; guest_fpu->xfeatures |= xfeatures; + guest_fpu->uabi_size = usize; } fpregs_lock(); From be50b2065dfa3d88428fdfdc340d154d96bf6848 Mon Sep 17 00:00:00 2001 From: Guang Zeng Date: Wed, 5 Jan 2022 04:35:29 -0800 Subject: [PATCH 261/268] kvm: x86: Add support for getting/setting expanded xstate buffer With KVM_CAP_XSAVE, userspace uses a hardcoded 4KB buffer to get/set xstate data from/to KVM. This doesn't work when dynamic xfeatures (e.g. AMX) are exposed to the guest as they require a larger buffer size. Introduce a new capability (KVM_CAP_XSAVE2). Userspace VMM gets the required xstate buffer size via KVM_CHECK_EXTENSION(KVM_CAP_XSAVE2). KVM_SET_XSAVE is extended to work with both legacy and new capabilities by doing properly-sized memdup_user() based on the guest fpu container. KVM_GET_XSAVE is kept for backward-compatible reason. Instead, KVM_GET_XSAVE2 is introduced under KVM_CAP_XSAVE2 as the preferred interface for getting xstate buffer (4KB or larger size) from KVM (Link: https://lkml.org/lkml/2021/12/15/510) Also, update the api doc with the new KVM_GET_XSAVE2 ioctl. Signed-off-by: Guang Zeng Signed-off-by: Wei Wang Signed-off-by: Jing Liu Signed-off-by: Kevin Tian Signed-off-by: Yang Zhong Message-Id: <20220105123532.12586-19-yang.zhong@intel.com> Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/api.rst | 42 ++++++++++++++++++++++++++++-- arch/x86/include/uapi/asm/kvm.h | 16 +++++++++++- arch/x86/kvm/cpuid.c | 2 +- arch/x86/kvm/cpuid.h | 2 ++ arch/x86/kvm/x86.c | 45 ++++++++++++++++++++++++++++++++- include/uapi/linux/kvm.h | 4 +++ 6 files changed, 106 insertions(+), 5 deletions(-) diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index f4ea5e41a4d0..d3791a14eb9a 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -1569,6 +1569,7 @@ otherwise it will return EBUSY error. struct kvm_xsave { __u32 region[1024]; + __u32 extra[0]; }; This ioctl would copy current vcpu's xsave struct to the userspace. @@ -1577,7 +1578,7 @@ This ioctl would copy current vcpu's xsave struct to the userspace. 4.43 KVM_SET_XSAVE ------------------ -:Capability: KVM_CAP_XSAVE +:Capability: KVM_CAP_XSAVE and KVM_CAP_XSAVE2 :Architectures: x86 :Type: vcpu ioctl :Parameters: struct kvm_xsave (in) @@ -1588,9 +1589,18 @@ This ioctl would copy current vcpu's xsave struct to the userspace. struct kvm_xsave { __u32 region[1024]; + __u32 extra[0]; }; -This ioctl would copy userspace's xsave struct to the kernel. +This ioctl would copy userspace's xsave struct to the kernel. It copies +as many bytes as are returned by KVM_CHECK_EXTENSION(KVM_CAP_XSAVE2), +when invoked on the vm file descriptor. The size value returned by +KVM_CHECK_EXTENSION(KVM_CAP_XSAVE2) will always be at least 4096. +Currently, it is only greater than 4096 if a dynamic feature has been +enabled with ``arch_prctl()``, but this may change in the future. + +The offsets of the state save areas in struct kvm_xsave follow the +contents of CPUID leaf 0xD on the host. 4.44 KVM_GET_XCRS @@ -5535,6 +5545,34 @@ the trailing ``'\0'``, is indicated by ``name_size`` in the header. The Stats Data block contains an array of 64-bit values in the same order as the descriptors in Descriptors block. +4.42 KVM_GET_XSAVE2 +------------------ + +:Capability: KVM_CAP_XSAVE2 +:Architectures: x86 +:Type: vcpu ioctl +:Parameters: struct kvm_xsave (out) +:Returns: 0 on success, -1 on error + + +:: + + struct kvm_xsave { + __u32 region[1024]; + __u32 extra[0]; + }; + +This ioctl would copy current vcpu's xsave struct to the userspace. It +copies as many bytes as are returned by KVM_CHECK_EXTENSION(KVM_CAP_XSAVE2) +when invoked on the vm file descriptor. The size value returned by +KVM_CHECK_EXTENSION(KVM_CAP_XSAVE2) will always be at least 4096. +Currently, it is only greater than 4096 if a dynamic feature has been +enabled with ``arch_prctl()``, but this may change in the future. + +The offsets of the state save areas in struct kvm_xsave follow the contents +of CPUID leaf 0xD on the host. + + 5. The kvm_run structure ======================== diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h index 5a776a08f78c..2da3316bb559 100644 --- a/arch/x86/include/uapi/asm/kvm.h +++ b/arch/x86/include/uapi/asm/kvm.h @@ -373,9 +373,23 @@ struct kvm_debugregs { __u64 reserved[9]; }; -/* for KVM_CAP_XSAVE */ +/* for KVM_CAP_XSAVE and KVM_CAP_XSAVE2 */ struct kvm_xsave { + /* + * KVM_GET_XSAVE2 and KVM_SET_XSAVE write and read as many bytes + * as are returned by KVM_CHECK_EXTENSION(KVM_CAP_XSAVE2) + * respectively, when invoked on the vm file descriptor. + * + * The size value returned by KVM_CHECK_EXTENSION(KVM_CAP_XSAVE2) + * will always be at least 4096. Currently, it is only greater + * than 4096 if a dynamic feature has been enabled with + * ``arch_prctl()``, but this may change in the future. + * + * The offsets of the state save areas in struct kvm_xsave follow + * the contents of CPUID leaf 0xD on the host. + */ __u32 region[1024]; + __u32 extra[0]; }; #define KVM_MAX_XCRS 16 diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index ba4c3d5d2386..c55e57b30e81 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -32,7 +32,7 @@ u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly; EXPORT_SYMBOL_GPL(kvm_cpu_caps); -static u32 xstate_required_size(u64 xstate_bv, bool compacted) +u32 xstate_required_size(u64 xstate_bv, bool compacted) { int feature_bit = 0; u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index c99edfff7f82..8a770b481d9d 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -30,6 +30,8 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, bool exact_only); +u32 xstate_required_size(u64 xstate_bv, bool compacted); + int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu); u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 993eee6451ea..bde18ca657db 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4314,6 +4314,14 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) else r = 0; break; + case KVM_CAP_XSAVE2: { + u64 guest_perm = xstate_get_guest_group_perm(); + + r = xstate_required_size(supported_xcr0 & guest_perm, false); + if (r < sizeof(struct kvm_xsave)) + r = sizeof(struct kvm_xsave); + break; + } default: break; } @@ -4917,6 +4925,16 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, vcpu->arch.pkru); } +static void kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu, + u8 *state, unsigned int size) +{ + if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) + return; + + fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, + state, size, vcpu->arch.pkru); +} + static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, struct kvm_xsave *guest_xsave) { @@ -5370,6 +5388,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, break; } case KVM_GET_XSAVE: { + r = -EINVAL; + if (vcpu->arch.guest_fpu.uabi_size > sizeof(struct kvm_xsave)) + break; + u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL_ACCOUNT); r = -ENOMEM; if (!u.xsave) @@ -5384,7 +5406,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, break; } case KVM_SET_XSAVE: { - u.xsave = memdup_user(argp, sizeof(*u.xsave)); + int size = vcpu->arch.guest_fpu.uabi_size; + + u.xsave = memdup_user(argp, size); if (IS_ERR(u.xsave)) { r = PTR_ERR(u.xsave); goto out_nofree; @@ -5393,6 +5417,25 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave); break; } + + case KVM_GET_XSAVE2: { + int size = vcpu->arch.guest_fpu.uabi_size; + + u.xsave = kzalloc(size, GFP_KERNEL_ACCOUNT); + r = -ENOMEM; + if (!u.xsave) + break; + + kvm_vcpu_ioctl_x86_get_xsave2(vcpu, u.buffer, size); + + r = -EFAULT; + if (copy_to_user(argp, u.xsave, size)) + break; + + r = 0; + break; + } + case KVM_GET_XCRS: { u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL_ACCOUNT); r = -ENOMEM; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index fbfd70d965c6..9563d294f181 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1132,6 +1132,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_ARM_MTE 205 #define KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM 206 #define KVM_CAP_VM_GPA_BITS 207 +#define KVM_CAP_XSAVE2 208 #ifdef KVM_CAP_IRQ_ROUTING @@ -1622,6 +1623,9 @@ struct kvm_enc_region { #define KVM_S390_NORMAL_RESET _IO(KVMIO, 0xc3) #define KVM_S390_CLEAR_RESET _IO(KVMIO, 0xc4) +/* Available with KVM_CAP_XSAVE2 */ +#define KVM_GET_XSAVE2 _IOR(KVMIO, 0xcf, struct kvm_xsave) + struct kvm_s390_pv_sec_parm { __u64 origin; __u64 length; From 415a3c33e847349c0f76575b3ebfdfae2f5a681a Mon Sep 17 00:00:00 2001 From: Wei Wang Date: Wed, 5 Jan 2022 04:35:30 -0800 Subject: [PATCH 262/268] kvm: selftests: Add support for KVM_CAP_XSAVE2 When KVM_CAP_XSAVE2 is supported, userspace is expected to allocate buffer for KVM_GET_XSAVE2 and KVM_SET_XSAVE using the size returned by KVM_CHECK_EXTENSION(KVM_CAP_XSAVE2). Signed-off-by: Wei Wang Signed-off-by: Guang Zeng Signed-off-by: Jing Liu Signed-off-by: Yang Zhong Message-Id: <20220105123532.12586-20-yang.zhong@intel.com> Signed-off-by: Paolo Bonzini --- tools/arch/x86/include/uapi/asm/kvm.h | 16 ++++- tools/include/uapi/linux/kvm.h | 3 + .../selftests/kvm/include/kvm_util_base.h | 2 + .../selftests/kvm/include/x86_64/processor.h | 10 +++ tools/testing/selftests/kvm/lib/kvm_util.c | 32 +++++++++ .../selftests/kvm/lib/x86_64/processor.c | 67 ++++++++++++++++++- .../testing/selftests/kvm/x86_64/evmcs_test.c | 2 +- tools/testing/selftests/kvm/x86_64/smm_test.c | 2 +- .../testing/selftests/kvm/x86_64/state_test.c | 2 +- .../kvm/x86_64/vmx_preemption_timer_test.c | 2 +- 10 files changed, 130 insertions(+), 8 deletions(-) diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h index 5a776a08f78c..2da3316bb559 100644 --- a/tools/arch/x86/include/uapi/asm/kvm.h +++ b/tools/arch/x86/include/uapi/asm/kvm.h @@ -373,9 +373,23 @@ struct kvm_debugregs { __u64 reserved[9]; }; -/* for KVM_CAP_XSAVE */ +/* for KVM_CAP_XSAVE and KVM_CAP_XSAVE2 */ struct kvm_xsave { + /* + * KVM_GET_XSAVE2 and KVM_SET_XSAVE write and read as many bytes + * as are returned by KVM_CHECK_EXTENSION(KVM_CAP_XSAVE2) + * respectively, when invoked on the vm file descriptor. + * + * The size value returned by KVM_CHECK_EXTENSION(KVM_CAP_XSAVE2) + * will always be at least 4096. Currently, it is only greater + * than 4096 if a dynamic feature has been enabled with + * ``arch_prctl()``, but this may change in the future. + * + * The offsets of the state save areas in struct kvm_xsave follow + * the contents of CPUID leaf 0xD on the host. + */ __u32 region[1024]; + __u32 extra[0]; }; #define KVM_MAX_XCRS 16 diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index 1daa45268de2..f066637ee206 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h @@ -1131,6 +1131,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_EXIT_ON_EMULATION_FAILURE 204 #define KVM_CAP_ARM_MTE 205 #define KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM 206 +#define KVM_CAP_XSAVE2 207 #ifdef KVM_CAP_IRQ_ROUTING @@ -1551,6 +1552,8 @@ struct kvm_s390_ucas_mapping { /* Available with KVM_CAP_XSAVE */ #define KVM_GET_XSAVE _IOR(KVMIO, 0xa4, struct kvm_xsave) #define KVM_SET_XSAVE _IOW(KVMIO, 0xa5, struct kvm_xsave) +/* Available with KVM_CAP_XSAVE2 */ +#define KVM_GET_XSAVE2 _IOR(KVMIO, 0xcf, struct kvm_xsave) /* Available with KVM_CAP_XCRS */ #define KVM_GET_XCRS _IOR(KVMIO, 0xa6, struct kvm_xcrs) #define KVM_SET_XCRS _IOW(KVMIO, 0xa7, struct kvm_xcrs) diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h index 1e5ab6a92848..66775de26952 100644 --- a/tools/testing/selftests/kvm/include/kvm_util_base.h +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h @@ -103,6 +103,7 @@ extern const struct vm_guest_mode_params vm_guest_mode_params[]; int open_path_or_exit(const char *path, int flags); int open_kvm_dev_path_or_exit(void); int kvm_check_cap(long cap); +int vm_check_cap(struct kvm_vm *vm, long cap); int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap); int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id, struct kvm_enable_cap *cap); @@ -344,6 +345,7 @@ struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus, * guest_code - The vCPU's entry point */ void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code); +void vm_xsave_req_perm(void); bool vm_is_unrestricted_guest(struct kvm_vm *vm); diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index 05e65ca1c30c..58633e51960f 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -10,8 +10,10 @@ #include #include +#include #include +#include #include "../kvm_util.h" @@ -352,6 +354,7 @@ struct kvm_x86_state; struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid); void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *state); +void kvm_x86_state_cleanup(struct kvm_x86_state *state); struct kvm_msr_list *kvm_get_msr_index_list(void); uint64_t kvm_get_feature_msr(uint64_t msr_index); @@ -443,4 +446,11 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, /* VMX_EPT_VPID_CAP bits */ #define VMX_EPT_VPID_CAP_AD_BITS (1ULL << 21) +#define XSTATE_XTILE_CFG_BIT 17 +#define XSTATE_XTILE_DATA_BIT 18 + +#define XSTATE_XTILE_CFG_MASK (1ULL << XSTATE_XTILE_CFG_BIT) +#define XSTATE_XTILE_DATA_MASK (1ULL << XSTATE_XTILE_DATA_BIT) +#define XFEATURE_XTILE_MASK (XSTATE_XTILE_CFG_MASK | \ + XSTATE_XTILE_DATA_MASK) #endif /* SELFTEST_KVM_PROCESSOR_H */ diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index ecc53d108ad8..4a645dc77f34 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -85,6 +85,33 @@ int kvm_check_cap(long cap) return ret; } +/* VM Check Capability + * + * Input Args: + * vm - Virtual Machine + * cap - Capability + * + * Output Args: None + * + * Return: + * On success, the Value corresponding to the capability (KVM_CAP_*) + * specified by the value of cap. On failure a TEST_ASSERT failure + * is produced. + * + * Looks up and returns the value corresponding to the capability + * (KVM_CAP_*) given by cap. + */ +int vm_check_cap(struct kvm_vm *vm, long cap) +{ + int ret; + + ret = ioctl(vm->fd, KVM_CHECK_EXTENSION, cap); + TEST_ASSERT(ret >= 0, "KVM_CHECK_EXTENSION VM IOCTL failed,\n" + " rc: %i errno: %i", ret, errno); + + return ret; +} + /* VM Enable Capability * * Input Args: @@ -366,6 +393,11 @@ struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus, struct kvm_vm *vm; int i; + /* + * Permission needs to be requested before KVM_SET_CPUID2. + */ + vm_xsave_req_perm(); + /* Force slot0 memory size not small than DEFAULT_GUEST_PHY_PAGES */ if (slot0_mem_pages < DEFAULT_GUEST_PHY_PAGES) slot0_mem_pages = DEFAULT_GUEST_PHY_PAGES; diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index eef7b34756d5..f19d6d201977 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -650,6 +650,45 @@ static void vcpu_setup(struct kvm_vm *vm, int vcpuid) vcpu_sregs_set(vm, vcpuid, &sregs); } +#define CPUID_XFD_BIT (1 << 4) +static bool is_xfd_supported(void) +{ + int eax, ebx, ecx, edx; + const int leaf = 0xd, subleaf = 0x1; + + __asm__ __volatile__( + "cpuid" + : /* output */ "=a"(eax), "=b"(ebx), + "=c"(ecx), "=d"(edx) + : /* input */ "0"(leaf), "2"(subleaf)); + + return !!(eax & CPUID_XFD_BIT); +} + +void vm_xsave_req_perm(void) +{ + unsigned long bitmask; + long rc; + + if (!is_xfd_supported()) + return; + + rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM, + XSTATE_XTILE_DATA_BIT); + /* + * The older kernel version(<5.15) can't support + * ARCH_REQ_XCOMP_GUEST_PERM and directly return. + */ + if (rc) + return; + + rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_GUEST_PERM, &bitmask); + TEST_ASSERT(rc == 0, "prctl(ARCH_GET_XCOMP_GUEST_PERM) error: %ld", rc); + TEST_ASSERT(bitmask & XFEATURE_XTILE_MASK, + "prctl(ARCH_REQ_XCOMP_GUEST_PERM) failure bitmask=0x%lx", + bitmask); +} + void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code) { struct kvm_mp_state mp_state; @@ -1018,10 +1057,10 @@ void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent) } struct kvm_x86_state { + struct kvm_xsave *xsave; struct kvm_vcpu_events events; struct kvm_mp_state mp_state; struct kvm_regs regs; - struct kvm_xsave xsave; struct kvm_xcrs xcrs; struct kvm_sregs sregs; struct kvm_debugregs debugregs; @@ -1069,6 +1108,22 @@ struct kvm_msr_list *kvm_get_msr_index_list(void) return list; } +static int vcpu_save_xsave_state(struct kvm_vm *vm, struct vcpu *vcpu, + struct kvm_x86_state *state) +{ + int size; + + size = vm_check_cap(vm, KVM_CAP_XSAVE2); + if (!size) + size = sizeof(struct kvm_xsave); + + state->xsave = malloc(size); + if (size == sizeof(struct kvm_xsave)) + return ioctl(vcpu->fd, KVM_GET_XSAVE, state->xsave); + else + return ioctl(vcpu->fd, KVM_GET_XSAVE2, state->xsave); +} + struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid) { struct vcpu *vcpu = vcpu_find(vm, vcpuid); @@ -1112,7 +1167,7 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid) TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_REGS, r: %i", r); - r = ioctl(vcpu->fd, KVM_GET_XSAVE, &state->xsave); + r = vcpu_save_xsave_state(vm, vcpu, state); TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XSAVE, r: %i", r); @@ -1157,7 +1212,7 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s struct vcpu *vcpu = vcpu_find(vm, vcpuid); int r; - r = ioctl(vcpu->fd, KVM_SET_XSAVE, &state->xsave); + r = ioctl(vcpu->fd, KVM_SET_XSAVE, state->xsave); TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i", r); @@ -1198,6 +1253,12 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s } } +void kvm_x86_state_cleanup(struct kvm_x86_state *state) +{ + free(state->xsave); + free(state); +} + bool is_intel_cpu(void) { int eax, ebx, ecx, edx; diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c index 2b46dcca86a8..4c7841dfd481 100644 --- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c +++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c @@ -129,7 +129,7 @@ static void save_restore_vm(struct kvm_vm *vm) vcpu_set_hv_cpuid(vm, VCPU_ID); vcpu_enable_evmcs(vm, VCPU_ID); vcpu_load_state(vm, VCPU_ID, state); - free(state); + kvm_x86_state_cleanup(state); memset(®s2, 0, sizeof(regs2)); vcpu_regs_get(vm, VCPU_ID, ®s2); diff --git a/tools/testing/selftests/kvm/x86_64/smm_test.c b/tools/testing/selftests/kvm/x86_64/smm_test.c index d0fe2fdce58c..2da8eb8e2d96 100644 --- a/tools/testing/selftests/kvm/x86_64/smm_test.c +++ b/tools/testing/selftests/kvm/x86_64/smm_test.c @@ -212,7 +212,7 @@ int main(int argc, char *argv[]) vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); vcpu_load_state(vm, VCPU_ID, state); run = vcpu_state(vm, VCPU_ID); - free(state); + kvm_x86_state_cleanup(state); } done: diff --git a/tools/testing/selftests/kvm/x86_64/state_test.c b/tools/testing/selftests/kvm/x86_64/state_test.c index 32854c1462ad..2e0a92da8ff5 100644 --- a/tools/testing/selftests/kvm/x86_64/state_test.c +++ b/tools/testing/selftests/kvm/x86_64/state_test.c @@ -218,7 +218,7 @@ int main(int argc, char *argv[]) vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); vcpu_load_state(vm, VCPU_ID, state); run = vcpu_state(vm, VCPU_ID); - free(state); + kvm_x86_state_cleanup(state); memset(®s2, 0, sizeof(regs2)); vcpu_regs_get(vm, VCPU_ID, ®s2); diff --git a/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c b/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c index a07480aed397..ff92e25b6f1e 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c @@ -244,7 +244,7 @@ int main(int argc, char *argv[]) vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); vcpu_load_state(vm, VCPU_ID, state); run = vcpu_state(vm, VCPU_ID); - free(state); + kvm_x86_state_cleanup(state); memset(®s2, 0, sizeof(regs2)); vcpu_regs_get(vm, VCPU_ID, ®s2); From 5429cead01192ff4019ea0b13316268d14fd1ec2 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 5 Jan 2022 04:35:31 -0800 Subject: [PATCH 263/268] x86/fpu: Provide fpu_sync_guest_vmexit_xfd_state() KVM can disable the write emulation for the XFD MSR when the vCPU's fpstate is already correctly sized to reduce the overhead. When write emulation is disabled the XFD MSR state after a VMEXIT is unknown and therefore not in sync with the software states in fpstate and the per CPU XFD cache. Provide fpu_sync_guest_vmexit_xfd_state() which has to be invoked after a VMEXIT before enabling interrupts when write emulation is disabled for the XFD MSR. It could be invoked unconditionally even when write emulation is enabled for the price of a pointless MSR read. Signed-off-by: Thomas Gleixner Signed-off-by: Jing Liu Signed-off-by: Yang Zhong Message-Id: <20220105123532.12586-21-yang.zhong@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/fpu/api.h | 2 ++ arch/x86/kernel/fpu/core.c | 24 ++++++++++++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h index e4d10155290b..a467eb80f9ed 100644 --- a/arch/x86/include/asm/fpu/api.h +++ b/arch/x86/include/asm/fpu/api.h @@ -142,8 +142,10 @@ extern int fpu_enable_guest_xfd_features(struct fpu_guest *guest_fpu, u64 xfeatu #ifdef CONFIG_X86_64 extern void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd); +extern void fpu_sync_guest_vmexit_xfd_state(void); #else static inline void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd) { } +static inline void fpu_sync_guest_vmexit_xfd_state(void) { } #endif extern void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf, unsigned int size, u32 pkru); diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index de8e8c21f355..da51381cb64b 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -294,6 +294,30 @@ void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd) fpregs_unlock(); } EXPORT_SYMBOL_GPL(fpu_update_guest_xfd); + +/** + * fpu_sync_guest_vmexit_xfd_state - Synchronize XFD MSR and software state + * + * Must be invoked from KVM after a VMEXIT before enabling interrupts when + * XFD write emulation is disabled. This is required because the guest can + * freely modify XFD and the state at VMEXIT is not guaranteed to be the + * same as the state on VMENTER. So software state has to be udpated before + * any operation which depends on it can take place. + * + * Note: It can be invoked unconditionally even when write emulation is + * enabled for the price of a then pointless MSR read. + */ +void fpu_sync_guest_vmexit_xfd_state(void) +{ + struct fpstate *fps = current->thread.fpu.fpstate; + + lockdep_assert_irqs_disabled(); + if (fpu_state_size_dynamic()) { + rdmsrl(MSR_IA32_XFD, fps->xfd); + __this_cpu_write(xfd_state, fps->xfd); + } +} +EXPORT_SYMBOL_GPL(fpu_sync_guest_vmexit_xfd_state); #endif /* CONFIG_X86_64 */ int fpu_swap_kvm_fpstate(struct fpu_guest *guest_fpu, bool enter_guest) From b5274b1b7ba89fe8ed38cc470041cd6ba0dfb79b Mon Sep 17 00:00:00 2001 From: Kevin Tian Date: Wed, 5 Jan 2022 04:35:32 -0800 Subject: [PATCH 264/268] kvm: x86: Disable interception for IA32_XFD on demand Always intercepting IA32_XFD causes non-negligible overhead when this register is updated frequently in the guest. Disable r/w emulation after intercepting the first WRMSR(IA32_XFD) with a non-zero value. Disable WRMSR emulation implies that IA32_XFD becomes out-of-sync with the software states in fpstate and the per-cpu xfd cache. This leads to two additional changes accordingly: - Call fpu_sync_guest_vmexit_xfd_state() after vm-exit to bring software states back in-sync with the MSR, before handle_exit_irqoff() is called. - Always trap #NM once write interception is disabled for IA32_XFD. The #NM exception is rare if the guest doesn't use dynamic features. Otherwise, there is at most one exception per guest task given a dynamic feature. p.s. We have confirmed that SDM is being revised to say that when setting IA32_XFD[18] the AMX register state is not guaranteed to be preserved. This clarification avoids adding mess for a creative guest which sets IA32_XFD[18]=1 before saving active AMX state to its own storage. Signed-off-by: Kevin Tian Signed-off-by: Jing Liu Signed-off-by: Yang Zhong Message-Id: <20220105123532.12586-22-yang.zhong@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/vmx/vmx.c | 24 +++++++++++++++++++----- arch/x86/kvm/vmx/vmx.h | 2 +- arch/x86/kvm/x86.c | 8 ++++++++ 4 files changed, 29 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 6cbf97a2ebc4..89d1fdb39c46 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -647,6 +647,7 @@ struct kvm_vcpu_arch { u64 smi_count; bool tpr_access_reporting; bool xsaves_enabled; + bool xfd_no_write_intercept; u64 ia32_xss; u64 microcode_version; u64 arch_capabilities; diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index b8b7f5c7b3df..15e30602782b 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -162,6 +162,7 @@ static u32 vmx_possible_passthrough_msrs[MAX_POSSIBLE_PASSTHROUGH_MSRS] = { MSR_FS_BASE, MSR_GS_BASE, MSR_KERNEL_GS_BASE, + MSR_IA32_XFD, MSR_IA32_XFD_ERR, #endif MSR_IA32_SYSENTER_CS, @@ -764,10 +765,11 @@ void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu) } /* - * Trap #NM if guest xfd contains a non-zero value so guest XFD_ERR - * can be saved timely. + * Disabling xfd interception indicates that dynamic xfeatures + * might be used in the guest. Always trap #NM in this case + * to save guest xfd_err timely. */ - if (vcpu->arch.guest_fpu.fpstate->xfd) + if (vcpu->arch.xfd_no_write_intercept) eb |= (1u << NM_VECTOR); vmcs_write32(EXCEPTION_BITMAP, eb); @@ -1978,9 +1980,21 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; case MSR_IA32_XFD: ret = kvm_set_msr_common(vcpu, msr_info); - /* Update #NM interception according to guest xfd */ - if (!ret) + /* + * Always intercepting WRMSR could incur non-negligible + * overhead given xfd might be changed frequently in + * guest context switch. Disable write interception + * upon the first write with a non-zero value (indicating + * potential usage on dynamic xfeatures). Also update + * exception bitmap to trap #NM for proper virtualization + * of guest xfd_err. + */ + if (!ret && data) { + vmx_disable_intercept_for_msr(vcpu, MSR_IA32_XFD, + MSR_TYPE_RW); + vcpu->arch.xfd_no_write_intercept = true; vmx_update_exception_bitmap(vcpu); + } break; #endif case MSR_IA32_SYSENTER_CS: diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 69dd2f85abdc..f8fc7441baea 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -349,7 +349,7 @@ struct vcpu_vmx { struct lbr_desc lbr_desc; /* Save desired MSR intercept (read: pass-through) state */ -#define MAX_POSSIBLE_PASSTHROUGH_MSRS 14 +#define MAX_POSSIBLE_PASSTHROUGH_MSRS 15 struct { DECLARE_BITMAP(read, MAX_POSSIBLE_PASSTHROUGH_MSRS); DECLARE_BITMAP(write, MAX_POSSIBLE_PASSTHROUGH_MSRS); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index bde18ca657db..60da2331ec32 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10083,6 +10083,14 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) vcpu->mode = OUTSIDE_GUEST_MODE; smp_wmb(); + /* + * Sync xfd before calling handle_exit_irqoff() which may + * rely on the fact that guest_fpu::xfd is up-to-date (e.g. + * in #NM irqoff handler). + */ + if (vcpu->arch.xfd_no_write_intercept) + fpu_sync_guest_vmexit_xfd_state(); + static_call(kvm_x86_handle_exit_irqoff)(vcpu); if (vcpu->arch.guest_fpu.xfd_err) From 551447cfa5dc208b7fba7aa98391d5cc8149fa5a Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Thu, 23 Dec 2021 09:53:20 -0500 Subject: [PATCH 265/268] selftest: kvm: Reorder vcpu_load_state steps for AMX For AMX support it is recommended to load XCR0 after XFD, so that KVM does not see XFD=0, XCR=1 for a save state that will eventually be disabled (which would lead to premature allocation of the space required for that save state). It is also required to load XSAVE data after XCR0 and XFD, so that KVM can trigger allocation of the extra space required to store AMX state. Adjust vcpu_load_state to obey these new requirements. Signed-off-by: Paolo Bonzini Signed-off-by: Yang Zhong Message-Id: <20211223145322.2914028-2-yang.zhong@intel.com> Signed-off-by: Paolo Bonzini --- .../selftests/kvm/lib/x86_64/processor.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index f19d6d201977..93264424aee5 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -1212,24 +1212,25 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s struct vcpu *vcpu = vcpu_find(vm, vcpuid); int r; - r = ioctl(vcpu->fd, KVM_SET_XSAVE, state->xsave); - TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i", + r = ioctl(vcpu->fd, KVM_SET_SREGS, &state->sregs); + TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_SREGS, r: %i", r); + r = ioctl(vcpu->fd, KVM_SET_MSRS, &state->msrs); + TEST_ASSERT(r == state->msrs.nmsrs, + "Unexpected result from KVM_SET_MSRS, r: %i (failed at %x)", + r, r == state->msrs.nmsrs ? -1 : state->msrs.entries[r].index); + if (kvm_check_cap(KVM_CAP_XCRS)) { r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs); TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i", r); } - r = ioctl(vcpu->fd, KVM_SET_SREGS, &state->sregs); - TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_SREGS, r: %i", + r = ioctl(vcpu->fd, KVM_SET_XSAVE, state->xsave); + TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i", r); - r = ioctl(vcpu->fd, KVM_SET_MSRS, &state->msrs); - TEST_ASSERT(r == state->msrs.nmsrs, "Unexpected result from KVM_SET_MSRS, r: %i (failed at %x)", - r, r == state->msrs.nmsrs ? -1 : state->msrs.entries[r].index); - r = ioctl(vcpu->fd, KVM_SET_VCPU_EVENTS, &state->events); TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_VCPU_EVENTS, r: %i", r); From 6559b4a523cd65f6005b4592833b16ba970abdf5 Mon Sep 17 00:00:00 2001 From: Yang Zhong Date: Thu, 23 Dec 2021 09:53:21 -0500 Subject: [PATCH 266/268] selftest: kvm: Move struct kvm_x86_state to header Those changes can avoid dereferencing pointer compile issue when amx_test.c reference state->xsave. Move struct kvm_x86_state definition to processor.h. Signed-off-by: Yang Zhong Message-Id: <20211223145322.2914028-3-yang.zhong@intel.com> Signed-off-by: Paolo Bonzini --- .../selftests/kvm/include/x86_64/processor.h | 16 +++++++++++++++- .../testing/selftests/kvm/lib/x86_64/processor.c | 15 --------------- 2 files changed, 15 insertions(+), 16 deletions(-) diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index 58633e51960f..e94ba0fc67d8 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -94,6 +94,21 @@ struct desc_ptr { uint64_t address; } __attribute__((packed)); +struct kvm_x86_state { + struct kvm_xsave *xsave; + struct kvm_vcpu_events events; + struct kvm_mp_state mp_state; + struct kvm_regs regs; + struct kvm_xcrs xcrs; + struct kvm_sregs sregs; + struct kvm_debugregs debugregs; + union { + struct kvm_nested_state nested; + char nested_[16384]; + }; + struct kvm_msrs msrs; +}; + static inline uint64_t get_desc64_base(const struct desc64 *desc) { return ((uint64_t)desc->base3 << 32) | @@ -350,7 +365,6 @@ static inline unsigned long get_xmm(int n) bool is_intel_cpu(void); -struct kvm_x86_state; struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid); void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *state); diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index 93264424aee5..babb0f28575c 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -1056,21 +1056,6 @@ void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent) sregs_dump(stream, &sregs, indent + 4); } -struct kvm_x86_state { - struct kvm_xsave *xsave; - struct kvm_vcpu_events events; - struct kvm_mp_state mp_state; - struct kvm_regs regs; - struct kvm_xcrs xcrs; - struct kvm_sregs sregs; - struct kvm_debugregs debugregs; - union { - struct kvm_nested_state nested; - char nested_[16384]; - }; - struct kvm_msrs msrs; -}; - static int kvm_get_num_msrs_fd(int kvm_fd) { struct kvm_msr_list nmsrs; From bf70636d9443c9e0718fd98765ba634e631ed079 Mon Sep 17 00:00:00 2001 From: Yang Zhong Date: Thu, 23 Dec 2021 09:53:22 -0500 Subject: [PATCH 267/268] selftest: kvm: Add amx selftest This selftest covers two aspects of AMX. The first is triggering #NM exception and checking the MSR XFD_ERR value. The second case is loading tile config and tile data into guest registers and trapping to the host side for a complete save/load of the guest state. TMM0 is also checked against memory data after save/restore. Signed-off-by: Yang Zhong Message-Id: <20211223145322.2914028-4-yang.zhong@intel.com> Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/Makefile | 1 + tools/testing/selftests/kvm/x86_64/amx_test.c | 448 ++++++++++++++++++ 2 files changed, 449 insertions(+) create mode 100644 tools/testing/selftests/kvm/x86_64/amx_test.c diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index c407ebbec2c1..ee8cf2149824 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -82,6 +82,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test TEST_GEN_PROGS_x86_64 += x86_64/vmx_pi_mmio_test TEST_GEN_PROGS_x86_64 += x86_64/sev_migrate_tests +TEST_GEN_PROGS_x86_64 += x86_64/amx_test TEST_GEN_PROGS_x86_64 += demand_paging_test TEST_GEN_PROGS_x86_64 += dirty_log_test TEST_GEN_PROGS_x86_64 += dirty_log_perf_test diff --git a/tools/testing/selftests/kvm/x86_64/amx_test.c b/tools/testing/selftests/kvm/x86_64/amx_test.c new file mode 100644 index 000000000000..523c1e99ed64 --- /dev/null +++ b/tools/testing/selftests/kvm/x86_64/amx_test.c @@ -0,0 +1,448 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * amx tests + * + * Copyright (C) 2021, Intel, Inc. + * + * Tests for amx #NM exception and save/restore. + */ + +#define _GNU_SOURCE /* for program_invocation_short_name */ +#include +#include +#include +#include +#include +#include + +#include "test_util.h" + +#include "kvm_util.h" +#include "processor.h" +#include "vmx.h" + +#ifndef __x86_64__ +# error This test is 64-bit only +#endif + +#define VCPU_ID 0 +#define X86_FEATURE_XSAVE (1 << 26) +#define X86_FEATURE_OSXSAVE (1 << 27) + +#define PAGE_SIZE (1 << 12) +#define NUM_TILES 8 +#define TILE_SIZE 1024 +#define XSAVE_SIZE ((NUM_TILES * TILE_SIZE) + PAGE_SIZE) + +/* Tile configuration associated: */ +#define MAX_TILES 16 +#define RESERVED_BYTES 14 + +#define XFEATURE_XTILECFG 17 +#define XFEATURE_XTILEDATA 18 +#define XFEATURE_MASK_XTILECFG (1 << XFEATURE_XTILECFG) +#define XFEATURE_MASK_XTILEDATA (1 << XFEATURE_XTILEDATA) +#define XFEATURE_MASK_XTILE (XFEATURE_MASK_XTILECFG | XFEATURE_MASK_XTILEDATA) + +#define TILE_CPUID 0x1d +#define XSTATE_CPUID 0xd +#define TILE_PALETTE_CPUID_SUBLEAVE 0x1 +#define XSTATE_USER_STATE_SUBLEAVE 0x0 + +#define XSAVE_HDR_OFFSET 512 + +struct xsave_data { + u8 area[XSAVE_SIZE]; +} __aligned(64); + +struct tile_config { + u8 palette_id; + u8 start_row; + u8 reserved[RESERVED_BYTES]; + u16 colsb[MAX_TILES]; + u8 rows[MAX_TILES]; +}; + +struct tile_data { + u8 data[NUM_TILES * TILE_SIZE]; +}; + +struct xtile_info { + u16 bytes_per_tile; + u16 bytes_per_row; + u16 max_names; + u16 max_rows; + u32 xsave_offset; + u32 xsave_size; +}; + +static struct xtile_info xtile; + +static inline u64 __xgetbv(u32 index) +{ + u32 eax, edx; + + asm volatile("xgetbv;" + : "=a" (eax), "=d" (edx) + : "c" (index)); + return eax + ((u64)edx << 32); +} + +static inline void __xsetbv(u32 index, u64 value) +{ + u32 eax = value; + u32 edx = value >> 32; + + asm volatile("xsetbv" :: "a" (eax), "d" (edx), "c" (index)); +} + +static inline void __ldtilecfg(void *cfg) +{ + asm volatile(".byte 0xc4,0xe2,0x78,0x49,0x00" + : : "a"(cfg)); +} + +static inline void __tileloadd(void *tile) +{ + asm volatile(".byte 0xc4,0xe2,0x7b,0x4b,0x04,0x10" + : : "a"(tile), "d"(0)); +} + +static inline void __tilerelease(void) +{ + asm volatile(".byte 0xc4, 0xe2, 0x78, 0x49, 0xc0" ::); +} + +static inline void __xsavec(struct xsave_data *data, uint64_t rfbm) +{ + uint32_t rfbm_lo = rfbm; + uint32_t rfbm_hi = rfbm >> 32; + + asm volatile("xsavec (%%rdi)" + : : "D" (data), "a" (rfbm_lo), "d" (rfbm_hi) + : "memory"); +} + +static inline void check_cpuid_xsave(void) +{ + uint32_t eax, ebx, ecx, edx; + + eax = 1; + ecx = 0; + cpuid(&eax, &ebx, &ecx, &edx); + if (!(ecx & X86_FEATURE_XSAVE)) + GUEST_ASSERT(!"cpuid: no CPU xsave support!"); + if (!(ecx & X86_FEATURE_OSXSAVE)) + GUEST_ASSERT(!"cpuid: no OS xsave support!"); +} + +static bool check_xsave_supports_xtile(void) +{ + return __xgetbv(0) & XFEATURE_MASK_XTILE; +} + +static bool enum_xtile_config(void) +{ + u32 eax, ebx, ecx, edx; + + eax = TILE_CPUID; + ecx = TILE_PALETTE_CPUID_SUBLEAVE; + + cpuid(&eax, &ebx, &ecx, &edx); + if (!eax || !ebx || !ecx) + return false; + + xtile.max_names = ebx >> 16; + if (xtile.max_names < NUM_TILES) + return false; + + xtile.bytes_per_tile = eax >> 16; + if (xtile.bytes_per_tile < TILE_SIZE) + return false; + + xtile.bytes_per_row = ebx; + xtile.max_rows = ecx; + + return true; +} + +static bool enum_xsave_tile(void) +{ + u32 eax, ebx, ecx, edx; + + eax = XSTATE_CPUID; + ecx = XFEATURE_XTILEDATA; + + cpuid(&eax, &ebx, &ecx, &edx); + if (!eax || !ebx) + return false; + + xtile.xsave_offset = ebx; + xtile.xsave_size = eax; + + return true; +} + +static bool check_xsave_size(void) +{ + u32 eax, ebx, ecx, edx; + bool valid = false; + + eax = XSTATE_CPUID; + ecx = XSTATE_USER_STATE_SUBLEAVE; + + cpuid(&eax, &ebx, &ecx, &edx); + if (ebx && ebx <= XSAVE_SIZE) + valid = true; + + return valid; +} + +static bool check_xtile_info(void) +{ + bool ret = false; + + if (!check_xsave_size()) + return ret; + + if (!enum_xsave_tile()) + return ret; + + if (!enum_xtile_config()) + return ret; + + if (sizeof(struct tile_data) >= xtile.xsave_size) + ret = true; + + return ret; +} + +static void set_tilecfg(struct tile_config *cfg) +{ + int i; + + /* Only palette id 1 */ + cfg->palette_id = 1; + for (i = 0; i < xtile.max_names; i++) { + cfg->colsb[i] = xtile.bytes_per_row; + cfg->rows[i] = xtile.max_rows; + } +} + +static void set_xstatebv(void *data, uint64_t bv) +{ + *(uint64_t *)(data + XSAVE_HDR_OFFSET) = bv; +} + +static u64 get_xstatebv(void *data) +{ + return *(u64 *)(data + XSAVE_HDR_OFFSET); +} + +static void init_regs(void) +{ + uint64_t cr4, xcr0; + + /* turn on CR4.OSXSAVE */ + cr4 = get_cr4(); + cr4 |= X86_CR4_OSXSAVE; + set_cr4(cr4); + + xcr0 = __xgetbv(0); + xcr0 |= XFEATURE_MASK_XTILE; + __xsetbv(0x0, xcr0); +} + +static void __attribute__((__flatten__)) guest_code(struct tile_config *amx_cfg, + struct tile_data *tiledata, + struct xsave_data *xsave_data) +{ + init_regs(); + check_cpuid_xsave(); + GUEST_ASSERT(check_xsave_supports_xtile()); + GUEST_ASSERT(check_xtile_info()); + + /* check xtile configs */ + GUEST_ASSERT(xtile.xsave_offset == 2816); + GUEST_ASSERT(xtile.xsave_size == 8192); + GUEST_ASSERT(xtile.max_names == 8); + GUEST_ASSERT(xtile.bytes_per_tile == 1024); + GUEST_ASSERT(xtile.bytes_per_row == 64); + GUEST_ASSERT(xtile.max_rows == 16); + GUEST_SYNC(1); + + /* xfd=0, enable amx */ + wrmsr(MSR_IA32_XFD, 0); + GUEST_SYNC(2); + GUEST_ASSERT(rdmsr(MSR_IA32_XFD) == 0); + set_tilecfg(amx_cfg); + __ldtilecfg(amx_cfg); + GUEST_SYNC(3); + /* Check save/restore when trap to userspace */ + __tileloadd(tiledata); + GUEST_SYNC(4); + __tilerelease(); + GUEST_SYNC(5); + /* bit 18 not in the XCOMP_BV after xsavec() */ + set_xstatebv(xsave_data, XFEATURE_MASK_XTILEDATA); + __xsavec(xsave_data, XFEATURE_MASK_XTILEDATA); + GUEST_ASSERT((get_xstatebv(xsave_data) & XFEATURE_MASK_XTILEDATA) == 0); + + /* xfd=0x40000, disable amx tiledata */ + wrmsr(MSR_IA32_XFD, XFEATURE_MASK_XTILEDATA); + GUEST_SYNC(6); + GUEST_ASSERT(rdmsr(MSR_IA32_XFD) == XFEATURE_MASK_XTILEDATA); + set_tilecfg(amx_cfg); + __ldtilecfg(amx_cfg); + /* Trigger #NM exception */ + __tileloadd(tiledata); + GUEST_SYNC(10); + + GUEST_DONE(); +} + +void guest_nm_handler(struct ex_regs *regs) +{ + /* Check if #NM is triggered by XFEATURE_MASK_XTILEDATA */ + GUEST_SYNC(7); + GUEST_ASSERT(rdmsr(MSR_IA32_XFD_ERR) == XFEATURE_MASK_XTILEDATA); + GUEST_SYNC(8); + GUEST_ASSERT(rdmsr(MSR_IA32_XFD_ERR) == XFEATURE_MASK_XTILEDATA); + /* Clear xfd_err */ + wrmsr(MSR_IA32_XFD_ERR, 0); + /* xfd=0, enable amx */ + wrmsr(MSR_IA32_XFD, 0); + GUEST_SYNC(9); +} + +int main(int argc, char *argv[]) +{ + struct kvm_cpuid_entry2 *entry; + struct kvm_regs regs1, regs2; + bool amx_supported = false; + struct kvm_vm *vm; + struct kvm_run *run; + struct kvm_x86_state *state; + int xsave_restore_size = 0; + vm_vaddr_t amx_cfg, tiledata, xsavedata; + struct ucall uc; + u32 amx_offset; + int stage, ret; + + /* Create VM */ + vm = vm_create_default(VCPU_ID, 0, guest_code); + + entry = kvm_get_supported_cpuid_entry(1); + if (!(entry->ecx & X86_FEATURE_XSAVE)) { + print_skip("XSAVE feature not supported"); + exit(KSFT_SKIP); + } + + if (kvm_get_cpuid_max_basic() >= 0xd) { + entry = kvm_get_supported_cpuid_index(0xd, 0); + amx_supported = entry && !!(entry->eax & XFEATURE_MASK_XTILE); + if (!amx_supported) { + print_skip("AMX is not supported by the vCPU (eax=0x%x)", entry->eax); + exit(KSFT_SKIP); + } + /* Get xsave/restore max size */ + xsave_restore_size = entry->ecx; + } + + run = vcpu_state(vm, VCPU_ID); + vcpu_regs_get(vm, VCPU_ID, ®s1); + + /* Register #NM handler */ + vm_init_descriptor_tables(vm); + vcpu_init_descriptor_tables(vm, VCPU_ID); + vm_install_exception_handler(vm, NM_VECTOR, guest_nm_handler); + + /* amx cfg for guest_code */ + amx_cfg = vm_vaddr_alloc_page(vm); + memset(addr_gva2hva(vm, amx_cfg), 0x0, getpagesize()); + + /* amx tiledata for guest_code */ + tiledata = vm_vaddr_alloc_pages(vm, 2); + memset(addr_gva2hva(vm, tiledata), rand() | 1, 2 * getpagesize()); + + /* xsave data for guest_code */ + xsavedata = vm_vaddr_alloc_pages(vm, 3); + memset(addr_gva2hva(vm, xsavedata), 0, 3 * getpagesize()); + vcpu_args_set(vm, VCPU_ID, 3, amx_cfg, tiledata, xsavedata); + + for (stage = 1; ; stage++) { + _vcpu_run(vm, VCPU_ID); + TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, + "Stage %d: unexpected exit reason: %u (%s),\n", + stage, run->exit_reason, + exit_reason_str(run->exit_reason)); + + switch (get_ucall(vm, VCPU_ID, &uc)) { + case UCALL_ABORT: + TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], + __FILE__, uc.args[1]); + /* NOT REACHED */ + case UCALL_SYNC: + switch (uc.args[1]) { + case 1: + case 2: + case 3: + case 5: + case 6: + case 7: + case 8: + fprintf(stderr, "GUEST_SYNC(%ld)\n", uc.args[1]); + break; + case 4: + case 10: + fprintf(stderr, + "GUEST_SYNC(%ld), check save/restore status\n", uc.args[1]); + + /* Compacted mode, get amx offset by xsave area + * size subtract 8K amx size. + */ + amx_offset = xsave_restore_size - NUM_TILES*TILE_SIZE; + state = vcpu_save_state(vm, VCPU_ID); + void *amx_start = (void *)state->xsave + amx_offset; + void *tiles_data = (void *)addr_gva2hva(vm, tiledata); + /* Only check TMM0 register, 1 tile */ + ret = memcmp(amx_start, tiles_data, TILE_SIZE); + TEST_ASSERT(ret == 0, "memcmp failed, ret=%d\n", ret); + kvm_x86_state_cleanup(state); + break; + case 9: + fprintf(stderr, + "GUEST_SYNC(%ld), #NM exception and enable amx\n", uc.args[1]); + break; + } + break; + case UCALL_DONE: + fprintf(stderr, "UCALL_DONE\n"); + goto done; + default: + TEST_FAIL("Unknown ucall %lu", uc.cmd); + } + + state = vcpu_save_state(vm, VCPU_ID); + memset(®s1, 0, sizeof(regs1)); + vcpu_regs_get(vm, VCPU_ID, ®s1); + + kvm_vm_release(vm); + + /* Restore state in a new VM. */ + kvm_vm_restart(vm, O_RDWR); + vm_vcpu_add(vm, VCPU_ID); + vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); + vcpu_load_state(vm, VCPU_ID, state); + run = vcpu_state(vm, VCPU_ID); + kvm_x86_state_cleanup(state); + + memset(®s2, 0, sizeof(regs2)); + vcpu_regs_get(vm, VCPU_ID, ®s2); + TEST_ASSERT(!memcmp(®s1, ®s2, sizeof(regs2)), + "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx", + (ulong) regs2.rdi, (ulong) regs2.rsi); + } +done: + kvm_vm_free(vm); +} From c862dcd199759d4a45e65dab47b03e3e8a144e3a Mon Sep 17 00:00:00 2001 From: Yang Zhong Date: Thu, 13 Jan 2022 13:08:25 -0500 Subject: [PATCH 268/268] x86/fpu: Fix inline prefix warnings Fix sparse warnings in xstate and remove inline prefix. Fixes: 980fe2fddcff ("x86/fpu: Extend fpu_xstate_prctl() with guest permissions") Signed-off-by: Yang Zhong Reported-by: kernel test robot Message-Id: <20220113180825.322333-1-yang.zhong@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/fpu/api.h | 2 +- arch/x86/kernel/fpu/xstate.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h index a467eb80f9ed..c83b3020350a 100644 --- a/arch/x86/include/asm/fpu/api.h +++ b/arch/x86/include/asm/fpu/api.h @@ -132,7 +132,7 @@ static inline void fpstate_free(struct fpu *fpu) { } /* fpstate-related functions which are exported to KVM */ extern void fpstate_clear_xstate_component(struct fpstate *fps, unsigned int xfeature); -extern inline u64 xstate_get_guest_group_perm(void); +extern u64 xstate_get_guest_group_perm(void); /* KVM specific functions */ extern bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu); diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 10fe072f1c92..02b3ddaf4f75 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -1744,7 +1744,7 @@ static inline int xstate_request_perm(unsigned long idx, bool guest) } #endif /* !CONFIG_X86_64 */ -inline u64 xstate_get_guest_group_perm(void) +u64 xstate_get_guest_group_perm(void) { return xstate_get_group_perm(true); }