Two ARM fixes:

* Ensure the guest PMU context is restored before the first KVM_RUN,
   fixing an issue where EL0 event counting is broken after vCPU
   save/restore
 
 * Actually initialize ID_AA64PFR0_EL1.{CSV2,CSV3} based on the
   sanitized, system-wide values for protected VMs
 -----BEGIN PGP SIGNATURE-----
 
 iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmQ0944UHHBib256aW5p
 QHJlZGhhdC5jb20ACgkQv/vSX3jHroO3mAf8CpzKs1SSfx4OpH1rPHgnZBmMyb9x
 cJe+W330RCuM96sUXFah7kealS8KIVLFk1E1k2gUIKBtm7cExWeWm6NFpazeu4aT
 c4R6dOGoXZ9i0E0l807onh0NCZp/cV6FnTnfYnS44Re6Bpo1V7QODP8cVWww1IIK
 w+4I4Fvcz/ensjgzh8z3h6o0Jz8YWWgAS17Vcic6ezVCEX5HC1sltXB9y1NKMduc
 vYvT7At8Rb4GhqvfCfsAiNW7iV5dftZe5LHup0Rd6FmzvQGkaTGd7aoekf46IQV2
 Kl7yNpOH6GAUjxCjf8Dvfat4IsFsrfTESGPI15s0cS9BG2d1dXzwLVcHaQ==
 =bYva
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fixes from Paolo Bonzini:
 "Two ARM fixes:

   - Ensure the guest PMU context is restored before the first KVM_RUN,
     fixing an issue where EL0 event counting is broken after vCPU
     save/restore

   - Actually initialize ID_AA64PFR0_EL1.{CSV2,CSV3} based on the
     sanitized, system-wide values for protected VMs"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: arm64: Advertise ID_AA64PFR0_EL1.CSV2/3 to protected VMs
  KVM: arm64: PMU: Restore the guest's EL0 event counting after migration
This commit is contained in:
Linus Torvalds 2023-04-11 09:36:42 -07:00
commit aa4c9185f0
5 changed files with 30 additions and 10 deletions

View file

@ -1890,9 +1890,33 @@ static int __init do_pkvm_init(u32 hyp_va_bits)
return ret;
}
static u64 get_hyp_id_aa64pfr0_el1(void)
{
/*
* Track whether the system isn't affected by spectre/meltdown in the
* hypervisor's view of id_aa64pfr0_el1, used for protected VMs.
* Although this is per-CPU, we make it global for simplicity, e.g., not
* to have to worry about vcpu migration.
*
* Unlike for non-protected VMs, userspace cannot override this for
* protected VMs.
*/
u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
arm64_get_meltdown_state() == SPECTRE_UNAFFECTED);
return val;
}
static void kvm_hyp_init_symbols(void)
{
kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = get_hyp_id_aa64pfr0_el1();
kvm_nvhe_sym(id_aa64pfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
kvm_nvhe_sym(id_aa64isar0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR0_EL1);
kvm_nvhe_sym(id_aa64isar1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1);

View file

@ -33,11 +33,14 @@
* Allow for protected VMs:
* - Floating-point and Advanced SIMD
* - Data Independent Timing
* - Spectre/Meltdown Mitigation
*/
#define PVM_ID_AA64PFR0_ALLOW (\
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP) | \
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD) | \
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) \
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) | \
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) | \
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3) \
)
/*

View file

@ -85,19 +85,12 @@ static u64 get_restricted_features_unsigned(u64 sys_reg_val,
static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu)
{
const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm);
u64 set_mask = 0;
u64 allow_mask = PVM_ID_AA64PFR0_ALLOW;
set_mask |= get_restricted_features_unsigned(id_aa64pfr0_el1_sys_val,
PVM_ID_AA64PFR0_RESTRICT_UNSIGNED);
/* Spectre and Meltdown mitigation in KVM */
set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
(u64)kvm->arch.pfr0_csv2);
set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
(u64)kvm->arch.pfr0_csv3);
return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask;
}

View file

@ -558,6 +558,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
for_each_set_bit(i, &mask, 32)
kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true);
}
kvm_vcpu_pmu_restore_guest(vcpu);
}
static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)

View file

@ -794,7 +794,6 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
if (!kvm_supports_32bit_el0())
val |= ARMV8_PMU_PMCR_LC;
kvm_pmu_handle_pmcr(vcpu, val);
kvm_vcpu_pmu_restore_guest(vcpu);
} else {
/* PMCR.P & PMCR.C are RAZ */
val = __vcpu_sys_reg(vcpu, PMCR_EL0)