mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-30 08:02:30 +00:00
Bugfixes for ARM, PPC and x86, plus selftest improvements.
-----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQEcBAABAgAGBQJdst7mAAoJEL/70l94x66DZmgIAJJK+LdYja4NljoOd9gCt43g lGlRJpmfaDUNQHrfuG1ESC+tD73ndaehFfBPSTnpUmgGyq11NCtuMVjVR6ZgIcsh gUOzgk5PmJIUcb7bgOVkXHXTsqSmC7X8JQqrHmTESY7nEUOGO3GqVdviC/tIdM0Z lS8F7b21OektJO7PPRgCsgOKwCXKL9SRMClBc7+7AaiShF7WJaKFHbu0iXsENv5D 8QOQDSDWAVWCdNy4Wrv40lJ2DYUydUFh579ekuKkvvus3dBdK+il0epu7kl+HCaU OpTVQtWmLbgYs++IL4iLj0YIAxoTT19gz5pxOBtMXcPAppGrbfbgKqtpsLYne60= =PwUK -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull KVM fixes from Paolo Bonzini: "Bugfixes for ARM, PPC and x86, plus selftest improvements" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: nVMX: Don't leak L1 MMIO regions to L2 KVM: SVM: Fix potential wrong physical id in avic_handle_ldr_update kvm: clear kvmclock MSR on reset KVM: x86: fix bugon.cocci warnings KVM: VMX: Remove specialized handling of unexpected exit-reasons selftests: kvm: fix sync_regs_test with newer gccs selftests: kvm: vmx_dirty_log_test: skip the test when VMX is not supported selftests: kvm: consolidate VMX support checks selftests: kvm: vmx_set_nested_state_test: don't check for VMX support twice KVM: Don't shrink/grow vCPU halt_poll_ns if host side polling is disabled selftests: kvm: synchronize .gitignore to Makefile kvm: x86: Expose RDPID in KVM_GET_SUPPORTED_CPUID KVM: arm64: pmu: Reset sample period on overflow handling KVM: arm64: pmu: Set the CHAINED attribute before creating the in-kernel event arm64: KVM: Handle PMCR_EL0.LC as RES1 on pure AArch64 systems KVM: arm64: pmu: Fix cycle counter truncation KVM: PPC: Book3S HV: XIVE: Ensure VP isn't already in use
This commit is contained in:
commit
8c123380b3
23 changed files with 186 additions and 127 deletions
|
@ -632,6 +632,8 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||||
*/
|
*/
|
||||||
val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
|
val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
|
||||||
| (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
|
| (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
|
||||||
|
if (!system_supports_32bit_el0())
|
||||||
|
val |= ARMV8_PMU_PMCR_LC;
|
||||||
__vcpu_sys_reg(vcpu, r->reg) = val;
|
__vcpu_sys_reg(vcpu, r->reg) = val;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -682,6 +684,8 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||||
val = __vcpu_sys_reg(vcpu, PMCR_EL0);
|
val = __vcpu_sys_reg(vcpu, PMCR_EL0);
|
||||||
val &= ~ARMV8_PMU_PMCR_MASK;
|
val &= ~ARMV8_PMU_PMCR_MASK;
|
||||||
val |= p->regval & ARMV8_PMU_PMCR_MASK;
|
val |= p->regval & ARMV8_PMU_PMCR_MASK;
|
||||||
|
if (!system_supports_32bit_el0())
|
||||||
|
val |= ARMV8_PMU_PMCR_LC;
|
||||||
__vcpu_sys_reg(vcpu, PMCR_EL0) = val;
|
__vcpu_sys_reg(vcpu, PMCR_EL0) = val;
|
||||||
kvm_pmu_handle_pmcr(vcpu, val);
|
kvm_pmu_handle_pmcr(vcpu, val);
|
||||||
kvm_vcpu_pmu_restore_guest(vcpu);
|
kvm_vcpu_pmu_restore_guest(vcpu);
|
||||||
|
|
|
@ -1217,6 +1217,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
|
||||||
struct kvmppc_xive *xive = dev->private;
|
struct kvmppc_xive *xive = dev->private;
|
||||||
struct kvmppc_xive_vcpu *xc;
|
struct kvmppc_xive_vcpu *xc;
|
||||||
int i, r = -EBUSY;
|
int i, r = -EBUSY;
|
||||||
|
u32 vp_id;
|
||||||
|
|
||||||
pr_devel("connect_vcpu(cpu=%d)\n", cpu);
|
pr_devel("connect_vcpu(cpu=%d)\n", cpu);
|
||||||
|
|
||||||
|
@ -1228,25 +1229,32 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
|
if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
if (kvmppc_xive_find_server(vcpu->kvm, cpu)) {
|
|
||||||
pr_devel("Duplicate !\n");
|
|
||||||
return -EEXIST;
|
|
||||||
}
|
|
||||||
if (cpu >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) {
|
if (cpu >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) {
|
||||||
pr_devel("Out of bounds !\n");
|
pr_devel("Out of bounds !\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
xc = kzalloc(sizeof(*xc), GFP_KERNEL);
|
|
||||||
if (!xc)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
/* We need to synchronize with queue provisioning */
|
/* We need to synchronize with queue provisioning */
|
||||||
mutex_lock(&xive->lock);
|
mutex_lock(&xive->lock);
|
||||||
|
|
||||||
|
vp_id = kvmppc_xive_vp(xive, cpu);
|
||||||
|
if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
|
||||||
|
pr_devel("Duplicate !\n");
|
||||||
|
r = -EEXIST;
|
||||||
|
goto bail;
|
||||||
|
}
|
||||||
|
|
||||||
|
xc = kzalloc(sizeof(*xc), GFP_KERNEL);
|
||||||
|
if (!xc) {
|
||||||
|
r = -ENOMEM;
|
||||||
|
goto bail;
|
||||||
|
}
|
||||||
|
|
||||||
vcpu->arch.xive_vcpu = xc;
|
vcpu->arch.xive_vcpu = xc;
|
||||||
xc->xive = xive;
|
xc->xive = xive;
|
||||||
xc->vcpu = vcpu;
|
xc->vcpu = vcpu;
|
||||||
xc->server_num = cpu;
|
xc->server_num = cpu;
|
||||||
xc->vp_id = kvmppc_xive_vp(xive, cpu);
|
xc->vp_id = vp_id;
|
||||||
xc->mfrr = 0xff;
|
xc->mfrr = 0xff;
|
||||||
xc->valid = true;
|
xc->valid = true;
|
||||||
|
|
||||||
|
|
|
@ -220,6 +220,18 @@ static inline u32 kvmppc_xive_vp(struct kvmppc_xive *xive, u32 server)
|
||||||
return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server);
|
return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool kvmppc_xive_vp_in_use(struct kvm *kvm, u32 vp_id)
|
||||||
|
{
|
||||||
|
struct kvm_vcpu *vcpu = NULL;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||||
|
if (vcpu->arch.xive_vcpu && vp_id == vcpu->arch.xive_vcpu->vp_id)
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Mapping between guest priorities and host priorities
|
* Mapping between guest priorities and host priorities
|
||||||
* is as follow.
|
* is as follow.
|
||||||
|
|
|
@ -106,6 +106,7 @@ int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
|
||||||
struct kvmppc_xive *xive = dev->private;
|
struct kvmppc_xive *xive = dev->private;
|
||||||
struct kvmppc_xive_vcpu *xc = NULL;
|
struct kvmppc_xive_vcpu *xc = NULL;
|
||||||
int rc;
|
int rc;
|
||||||
|
u32 vp_id;
|
||||||
|
|
||||||
pr_devel("native_connect_vcpu(server=%d)\n", server_num);
|
pr_devel("native_connect_vcpu(server=%d)\n", server_num);
|
||||||
|
|
||||||
|
@ -124,7 +125,8 @@ int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
|
||||||
|
|
||||||
mutex_lock(&xive->lock);
|
mutex_lock(&xive->lock);
|
||||||
|
|
||||||
if (kvmppc_xive_find_server(vcpu->kvm, server_num)) {
|
vp_id = kvmppc_xive_vp(xive, server_num);
|
||||||
|
if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
|
||||||
pr_devel("Duplicate !\n");
|
pr_devel("Duplicate !\n");
|
||||||
rc = -EEXIST;
|
rc = -EEXIST;
|
||||||
goto bail;
|
goto bail;
|
||||||
|
@ -141,7 +143,7 @@ int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
|
||||||
xc->vcpu = vcpu;
|
xc->vcpu = vcpu;
|
||||||
xc->server_num = server_num;
|
xc->server_num = server_num;
|
||||||
|
|
||||||
xc->vp_id = kvmppc_xive_vp(xive, server_num);
|
xc->vp_id = vp_id;
|
||||||
xc->valid = true;
|
xc->valid = true;
|
||||||
vcpu->arch.irq_type = KVMPPC_IRQ_XIVE;
|
vcpu->arch.irq_type = KVMPPC_IRQ_XIVE;
|
||||||
|
|
||||||
|
|
|
@ -1189,7 +1189,7 @@ struct kvm_x86_ops {
|
||||||
int (*set_nested_state)(struct kvm_vcpu *vcpu,
|
int (*set_nested_state)(struct kvm_vcpu *vcpu,
|
||||||
struct kvm_nested_state __user *user_kvm_nested_state,
|
struct kvm_nested_state __user *user_kvm_nested_state,
|
||||||
struct kvm_nested_state *kvm_state);
|
struct kvm_nested_state *kvm_state);
|
||||||
void (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
|
bool (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
|
||||||
|
|
||||||
int (*smi_allowed)(struct kvm_vcpu *vcpu);
|
int (*smi_allowed)(struct kvm_vcpu *vcpu);
|
||||||
int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
|
int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
|
||||||
|
|
|
@ -363,7 +363,7 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry, int index)
|
||||||
|
|
||||||
/* cpuid 7.0.ecx*/
|
/* cpuid 7.0.ecx*/
|
||||||
const u32 kvm_cpuid_7_0_ecx_x86_features =
|
const u32 kvm_cpuid_7_0_ecx_x86_features =
|
||||||
F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ |
|
F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) |
|
||||||
F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
|
F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
|
||||||
F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
|
F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
|
||||||
F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/;
|
F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/;
|
||||||
|
|
|
@ -111,11 +111,6 @@ static inline int apic_enabled(struct kvm_lapic *apic)
|
||||||
(LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
|
(LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
|
||||||
APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
|
APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
|
||||||
|
|
||||||
static inline u8 kvm_xapic_id(struct kvm_lapic *apic)
|
|
||||||
{
|
|
||||||
return kvm_lapic_get_reg(apic, APIC_ID) >> 24;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
|
static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
|
||||||
{
|
{
|
||||||
return apic->vcpu->vcpu_id;
|
return apic->vcpu->vcpu_id;
|
||||||
|
|
|
@ -242,4 +242,9 @@ static inline enum lapic_mode kvm_apic_mode(u64 apic_base)
|
||||||
return apic_base & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
|
return apic_base & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline u8 kvm_xapic_id(struct kvm_lapic *apic)
|
||||||
|
{
|
||||||
|
return kvm_lapic_get_reg(apic, APIC_ID) >> 24;
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -4591,6 +4591,7 @@ static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
struct vcpu_svm *svm = to_svm(vcpu);
|
struct vcpu_svm *svm = to_svm(vcpu);
|
||||||
u32 ldr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LDR);
|
u32 ldr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LDR);
|
||||||
|
u32 id = kvm_xapic_id(vcpu->arch.apic);
|
||||||
|
|
||||||
if (ldr == svm->ldr_reg)
|
if (ldr == svm->ldr_reg)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -4598,7 +4599,7 @@ static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
|
||||||
avic_invalidate_logical_id_entry(vcpu);
|
avic_invalidate_logical_id_entry(vcpu);
|
||||||
|
|
||||||
if (ldr)
|
if (ldr)
|
||||||
ret = avic_ldr_write(vcpu, vcpu->vcpu_id, ldr);
|
ret = avic_ldr_write(vcpu, id, ldr);
|
||||||
|
|
||||||
if (!ret)
|
if (!ret)
|
||||||
svm->ldr_reg = ldr;
|
svm->ldr_reg = ldr;
|
||||||
|
@ -4610,8 +4611,7 @@ static int avic_handle_apic_id_update(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
u64 *old, *new;
|
u64 *old, *new;
|
||||||
struct vcpu_svm *svm = to_svm(vcpu);
|
struct vcpu_svm *svm = to_svm(vcpu);
|
||||||
u32 apic_id_reg = kvm_lapic_get_reg(vcpu->arch.apic, APIC_ID);
|
u32 id = kvm_xapic_id(vcpu->arch.apic);
|
||||||
u32 id = (apic_id_reg >> 24) & 0xff;
|
|
||||||
|
|
||||||
if (vcpu->vcpu_id == id)
|
if (vcpu->vcpu_id == id)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -2917,7 +2917,7 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
|
||||||
static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
|
static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
|
||||||
struct vmcs12 *vmcs12);
|
struct vmcs12 *vmcs12);
|
||||||
|
|
||||||
static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
|
static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
||||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||||
|
@ -2937,19 +2937,18 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
|
||||||
vmx->nested.apic_access_page = NULL;
|
vmx->nested.apic_access_page = NULL;
|
||||||
}
|
}
|
||||||
page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr);
|
page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr);
|
||||||
/*
|
|
||||||
* If translation failed, no matter: This feature asks
|
|
||||||
* to exit when accessing the given address, and if it
|
|
||||||
* can never be accessed, this feature won't do
|
|
||||||
* anything anyway.
|
|
||||||
*/
|
|
||||||
if (!is_error_page(page)) {
|
if (!is_error_page(page)) {
|
||||||
vmx->nested.apic_access_page = page;
|
vmx->nested.apic_access_page = page;
|
||||||
hpa = page_to_phys(vmx->nested.apic_access_page);
|
hpa = page_to_phys(vmx->nested.apic_access_page);
|
||||||
vmcs_write64(APIC_ACCESS_ADDR, hpa);
|
vmcs_write64(APIC_ACCESS_ADDR, hpa);
|
||||||
} else {
|
} else {
|
||||||
secondary_exec_controls_clearbit(vmx,
|
pr_debug_ratelimited("%s: no backing 'struct page' for APIC-access address in vmcs12\n",
|
||||||
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
|
__func__);
|
||||||
|
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||||
|
vcpu->run->internal.suberror =
|
||||||
|
KVM_INTERNAL_ERROR_EMULATION;
|
||||||
|
vcpu->run->internal.ndata = 0;
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2994,6 +2993,7 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
|
||||||
exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
|
exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
|
||||||
else
|
else
|
||||||
exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
|
exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -3032,13 +3032,15 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
|
||||||
/*
|
/*
|
||||||
* If from_vmentry is false, this is being called from state restore (either RSM
|
* If from_vmentry is false, this is being called from state restore (either RSM
|
||||||
* or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume.
|
* or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume.
|
||||||
+ *
|
*
|
||||||
+ * Returns:
|
* Returns:
|
||||||
+ * 0 - success, i.e. proceed with actual VMEnter
|
* NVMX_ENTRY_SUCCESS: Entered VMX non-root mode
|
||||||
+ * 1 - consistency check VMExit
|
* NVMX_ENTRY_VMFAIL: Consistency check VMFail
|
||||||
+ * -1 - consistency check VMFail
|
* NVMX_ENTRY_VMEXIT: Consistency check VMExit
|
||||||
|
* NVMX_ENTRY_KVM_INTERNAL_ERROR: KVM internal error
|
||||||
*/
|
*/
|
||||||
int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
|
enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
|
||||||
|
bool from_vmentry)
|
||||||
{
|
{
|
||||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||||
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
||||||
|
@ -3081,11 +3083,12 @@ int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
|
||||||
prepare_vmcs02_early(vmx, vmcs12);
|
prepare_vmcs02_early(vmx, vmcs12);
|
||||||
|
|
||||||
if (from_vmentry) {
|
if (from_vmentry) {
|
||||||
nested_get_vmcs12_pages(vcpu);
|
if (unlikely(!nested_get_vmcs12_pages(vcpu)))
|
||||||
|
return NVMX_VMENTRY_KVM_INTERNAL_ERROR;
|
||||||
|
|
||||||
if (nested_vmx_check_vmentry_hw(vcpu)) {
|
if (nested_vmx_check_vmentry_hw(vcpu)) {
|
||||||
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
|
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
|
||||||
return -1;
|
return NVMX_VMENTRY_VMFAIL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual))
|
if (nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual))
|
||||||
|
@ -3149,7 +3152,7 @@ int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
|
||||||
* returned as far as L1 is concerned. It will only return (and set
|
* returned as far as L1 is concerned. It will only return (and set
|
||||||
* the success flag) when L2 exits (see nested_vmx_vmexit()).
|
* the success flag) when L2 exits (see nested_vmx_vmexit()).
|
||||||
*/
|
*/
|
||||||
return 0;
|
return NVMX_VMENTRY_SUCCESS;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* A failed consistency check that leads to a VMExit during L1's
|
* A failed consistency check that leads to a VMExit during L1's
|
||||||
|
@ -3165,14 +3168,14 @@ int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
|
||||||
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
|
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
|
||||||
|
|
||||||
if (!from_vmentry)
|
if (!from_vmentry)
|
||||||
return 1;
|
return NVMX_VMENTRY_VMEXIT;
|
||||||
|
|
||||||
load_vmcs12_host_state(vcpu, vmcs12);
|
load_vmcs12_host_state(vcpu, vmcs12);
|
||||||
vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
|
vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
|
||||||
vmcs12->exit_qualification = exit_qual;
|
vmcs12->exit_qualification = exit_qual;
|
||||||
if (enable_shadow_vmcs || vmx->nested.hv_evmcs)
|
if (enable_shadow_vmcs || vmx->nested.hv_evmcs)
|
||||||
vmx->nested.need_vmcs12_to_shadow_sync = true;
|
vmx->nested.need_vmcs12_to_shadow_sync = true;
|
||||||
return 1;
|
return NVMX_VMENTRY_VMEXIT;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -3182,9 +3185,9 @@ int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
|
||||||
static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
|
static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
|
||||||
{
|
{
|
||||||
struct vmcs12 *vmcs12;
|
struct vmcs12 *vmcs12;
|
||||||
|
enum nvmx_vmentry_status status;
|
||||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||||
u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
|
u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (!nested_vmx_check_permission(vcpu))
|
if (!nested_vmx_check_permission(vcpu))
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -3244,13 +3247,9 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
|
||||||
* the nested entry.
|
* the nested entry.
|
||||||
*/
|
*/
|
||||||
vmx->nested.nested_run_pending = 1;
|
vmx->nested.nested_run_pending = 1;
|
||||||
ret = nested_vmx_enter_non_root_mode(vcpu, true);
|
status = nested_vmx_enter_non_root_mode(vcpu, true);
|
||||||
vmx->nested.nested_run_pending = !ret;
|
if (unlikely(status != NVMX_VMENTRY_SUCCESS))
|
||||||
if (ret > 0)
|
goto vmentry_failed;
|
||||||
return 1;
|
|
||||||
else if (ret)
|
|
||||||
return nested_vmx_failValid(vcpu,
|
|
||||||
VMXERR_ENTRY_INVALID_CONTROL_FIELD);
|
|
||||||
|
|
||||||
/* Hide L1D cache contents from the nested guest. */
|
/* Hide L1D cache contents from the nested guest. */
|
||||||
vmx->vcpu.arch.l1tf_flush_l1d = true;
|
vmx->vcpu.arch.l1tf_flush_l1d = true;
|
||||||
|
@ -3281,6 +3280,15 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
|
||||||
return kvm_vcpu_halt(vcpu);
|
return kvm_vcpu_halt(vcpu);
|
||||||
}
|
}
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
|
vmentry_failed:
|
||||||
|
vmx->nested.nested_run_pending = 0;
|
||||||
|
if (status == NVMX_VMENTRY_KVM_INTERNAL_ERROR)
|
||||||
|
return 0;
|
||||||
|
if (status == NVMX_VMENTRY_VMEXIT)
|
||||||
|
return 1;
|
||||||
|
WARN_ON_ONCE(status != NVMX_VMENTRY_VMFAIL);
|
||||||
|
return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -6,6 +6,16 @@
|
||||||
#include "vmcs12.h"
|
#include "vmcs12.h"
|
||||||
#include "vmx.h"
|
#include "vmx.h"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Status returned by nested_vmx_enter_non_root_mode():
|
||||||
|
*/
|
||||||
|
enum nvmx_vmentry_status {
|
||||||
|
NVMX_VMENTRY_SUCCESS, /* Entered VMX non-root mode */
|
||||||
|
NVMX_VMENTRY_VMFAIL, /* Consistency check VMFail */
|
||||||
|
NVMX_VMENTRY_VMEXIT, /* Consistency check VMExit */
|
||||||
|
NVMX_VMENTRY_KVM_INTERNAL_ERROR,/* KVM internal error */
|
||||||
|
};
|
||||||
|
|
||||||
void vmx_leave_nested(struct kvm_vcpu *vcpu);
|
void vmx_leave_nested(struct kvm_vcpu *vcpu);
|
||||||
void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
|
void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
|
||||||
bool apicv);
|
bool apicv);
|
||||||
|
@ -13,7 +23,8 @@ void nested_vmx_hardware_unsetup(void);
|
||||||
__init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *));
|
__init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *));
|
||||||
void nested_vmx_vcpu_setup(void);
|
void nested_vmx_vcpu_setup(void);
|
||||||
void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu);
|
void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu);
|
||||||
int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry);
|
enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
|
||||||
|
bool from_vmentry);
|
||||||
bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason);
|
bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason);
|
||||||
void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
|
void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
|
||||||
u32 exit_intr_info, unsigned long exit_qualification);
|
u32 exit_intr_info, unsigned long exit_qualification);
|
||||||
|
|
|
@ -5543,14 +5543,6 @@ static int handle_encls(struct kvm_vcpu *vcpu)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int handle_unexpected_vmexit(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
kvm_skip_emulated_instruction(vcpu);
|
|
||||||
WARN_ONCE(1, "Unexpected VM-Exit Reason = 0x%x",
|
|
||||||
vmcs_read32(VM_EXIT_REASON));
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The exit handlers return 1 if the exit was handled fully and guest execution
|
* The exit handlers return 1 if the exit was handled fully and guest execution
|
||||||
* may resume. Otherwise they set the kvm_run parameter to indicate what needs
|
* may resume. Otherwise they set the kvm_run parameter to indicate what needs
|
||||||
|
@ -5602,15 +5594,11 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
|
||||||
[EXIT_REASON_INVVPID] = handle_vmx_instruction,
|
[EXIT_REASON_INVVPID] = handle_vmx_instruction,
|
||||||
[EXIT_REASON_RDRAND] = handle_invalid_op,
|
[EXIT_REASON_RDRAND] = handle_invalid_op,
|
||||||
[EXIT_REASON_RDSEED] = handle_invalid_op,
|
[EXIT_REASON_RDSEED] = handle_invalid_op,
|
||||||
[EXIT_REASON_XSAVES] = handle_unexpected_vmexit,
|
|
||||||
[EXIT_REASON_XRSTORS] = handle_unexpected_vmexit,
|
|
||||||
[EXIT_REASON_PML_FULL] = handle_pml_full,
|
[EXIT_REASON_PML_FULL] = handle_pml_full,
|
||||||
[EXIT_REASON_INVPCID] = handle_invpcid,
|
[EXIT_REASON_INVPCID] = handle_invpcid,
|
||||||
[EXIT_REASON_VMFUNC] = handle_vmx_instruction,
|
[EXIT_REASON_VMFUNC] = handle_vmx_instruction,
|
||||||
[EXIT_REASON_PREEMPTION_TIMER] = handle_preemption_timer,
|
[EXIT_REASON_PREEMPTION_TIMER] = handle_preemption_timer,
|
||||||
[EXIT_REASON_ENCLS] = handle_encls,
|
[EXIT_REASON_ENCLS] = handle_encls,
|
||||||
[EXIT_REASON_UMWAIT] = handle_unexpected_vmexit,
|
|
||||||
[EXIT_REASON_TPAUSE] = handle_unexpected_vmexit,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static const int kvm_vmx_max_exit_handlers =
|
static const int kvm_vmx_max_exit_handlers =
|
||||||
|
|
|
@ -360,8 +360,7 @@ EXPORT_SYMBOL_GPL(kvm_set_apic_base);
|
||||||
asmlinkage __visible void kvm_spurious_fault(void)
|
asmlinkage __visible void kvm_spurious_fault(void)
|
||||||
{
|
{
|
||||||
/* Fault while not rebooting. We want the trace. */
|
/* Fault while not rebooting. We want the trace. */
|
||||||
if (!kvm_rebooting)
|
BUG_ON(!kvm_rebooting);
|
||||||
BUG();
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_spurious_fault);
|
EXPORT_SYMBOL_GPL(kvm_spurious_fault);
|
||||||
|
|
||||||
|
@ -2537,6 +2536,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
|
||||||
static void kvmclock_reset(struct kvm_vcpu *vcpu)
|
static void kvmclock_reset(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
vcpu->arch.pv_time_enabled = false;
|
vcpu->arch.pv_time_enabled = false;
|
||||||
|
vcpu->arch.time = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
|
static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
|
||||||
|
@ -2702,8 +2702,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||||
case MSR_KVM_SYSTEM_TIME: {
|
case MSR_KVM_SYSTEM_TIME: {
|
||||||
struct kvm_arch *ka = &vcpu->kvm->arch;
|
struct kvm_arch *ka = &vcpu->kvm->arch;
|
||||||
|
|
||||||
kvmclock_reset(vcpu);
|
|
||||||
|
|
||||||
if (vcpu->vcpu_id == 0 && !msr_info->host_initiated) {
|
if (vcpu->vcpu_id == 0 && !msr_info->host_initiated) {
|
||||||
bool tmp = (msr == MSR_KVM_SYSTEM_TIME);
|
bool tmp = (msr == MSR_KVM_SYSTEM_TIME);
|
||||||
|
|
||||||
|
@ -2717,14 +2715,13 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||||
kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
|
kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
|
||||||
|
|
||||||
/* we verify if the enable bit is set... */
|
/* we verify if the enable bit is set... */
|
||||||
|
vcpu->arch.pv_time_enabled = false;
|
||||||
if (!(data & 1))
|
if (!(data & 1))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
|
if (!kvm_gfn_to_hva_cache_init(vcpu->kvm,
|
||||||
&vcpu->arch.pv_time, data & ~1ULL,
|
&vcpu->arch.pv_time, data & ~1ULL,
|
||||||
sizeof(struct pvclock_vcpu_time_info)))
|
sizeof(struct pvclock_vcpu_time_info)))
|
||||||
vcpu->arch.pv_time_enabled = false;
|
|
||||||
else
|
|
||||||
vcpu->arch.pv_time_enabled = true;
|
vcpu->arch.pv_time_enabled = true;
|
||||||
|
|
||||||
break;
|
break;
|
||||||
|
@ -7941,8 +7938,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||||
bool req_immediate_exit = false;
|
bool req_immediate_exit = false;
|
||||||
|
|
||||||
if (kvm_request_pending(vcpu)) {
|
if (kvm_request_pending(vcpu)) {
|
||||||
if (kvm_check_request(KVM_REQ_GET_VMCS12_PAGES, vcpu))
|
if (kvm_check_request(KVM_REQ_GET_VMCS12_PAGES, vcpu)) {
|
||||||
kvm_x86_ops->get_vmcs12_pages(vcpu);
|
if (unlikely(!kvm_x86_ops->get_vmcs12_pages(vcpu))) {
|
||||||
|
r = 0;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
}
|
||||||
if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
|
if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
|
||||||
kvm_mmu_unload(vcpu);
|
kvm_mmu_unload(vcpu);
|
||||||
if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
|
if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
|
||||||
|
|
2
tools/testing/selftests/kvm/.gitignore
vendored
2
tools/testing/selftests/kvm/.gitignore
vendored
|
@ -1,4 +1,5 @@
|
||||||
/s390x/sync_regs_test
|
/s390x/sync_regs_test
|
||||||
|
/s390x/memop
|
||||||
/x86_64/cr4_cpuid_sync_test
|
/x86_64/cr4_cpuid_sync_test
|
||||||
/x86_64/evmcs_test
|
/x86_64/evmcs_test
|
||||||
/x86_64/hyperv_cpuid
|
/x86_64/hyperv_cpuid
|
||||||
|
@ -9,6 +10,7 @@
|
||||||
/x86_64/state_test
|
/x86_64/state_test
|
||||||
/x86_64/sync_regs_test
|
/x86_64/sync_regs_test
|
||||||
/x86_64/vmx_close_while_nested_test
|
/x86_64/vmx_close_while_nested_test
|
||||||
|
/x86_64/vmx_dirty_log_test
|
||||||
/x86_64/vmx_set_nested_state_test
|
/x86_64/vmx_set_nested_state_test
|
||||||
/x86_64/vmx_tsc_adjust_test
|
/x86_64/vmx_tsc_adjust_test
|
||||||
/clear_dirty_log_test
|
/clear_dirty_log_test
|
||||||
|
|
|
@ -580,6 +580,8 @@ bool prepare_for_vmx_operation(struct vmx_pages *vmx);
|
||||||
void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp);
|
void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp);
|
||||||
bool load_vmcs(struct vmx_pages *vmx);
|
bool load_vmcs(struct vmx_pages *vmx);
|
||||||
|
|
||||||
|
void nested_vmx_check_supported(void);
|
||||||
|
|
||||||
void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
|
void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
|
||||||
uint64_t nested_paddr, uint64_t paddr, uint32_t eptp_memslot);
|
uint64_t nested_paddr, uint64_t paddr, uint32_t eptp_memslot);
|
||||||
void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
|
void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
|
||||||
|
|
|
@ -376,6 +376,16 @@ void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp)
|
||||||
init_vmcs_guest_state(guest_rip, guest_rsp);
|
init_vmcs_guest_state(guest_rip, guest_rsp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void nested_vmx_check_supported(void)
|
||||||
|
{
|
||||||
|
struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
|
||||||
|
|
||||||
|
if (!(entry->ecx & CPUID_VMX)) {
|
||||||
|
fprintf(stderr, "nested VMX not enabled, skipping test\n");
|
||||||
|
exit(KSFT_SKIP);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
|
void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
|
||||||
uint64_t nested_paddr, uint64_t paddr, uint32_t eptp_memslot)
|
uint64_t nested_paddr, uint64_t paddr, uint32_t eptp_memslot)
|
||||||
{
|
{
|
||||||
|
|
|
@ -22,18 +22,19 @@
|
||||||
|
|
||||||
#define VCPU_ID 5
|
#define VCPU_ID 5
|
||||||
|
|
||||||
|
#define UCALL_PIO_PORT ((uint16_t)0x1000)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* ucall is embedded here to protect against compiler reshuffling registers
|
||||||
|
* before calling a function. In this test we only need to get KVM_EXIT_IO
|
||||||
|
* vmexit and preserve RBX, no additional information is needed.
|
||||||
|
*/
|
||||||
void guest_code(void)
|
void guest_code(void)
|
||||||
{
|
{
|
||||||
/*
|
asm volatile("1: in %[port], %%al\n"
|
||||||
* use a callee-save register, otherwise the compiler
|
"add $0x1, %%rbx\n"
|
||||||
* saves it around the call to GUEST_SYNC.
|
"jmp 1b"
|
||||||
*/
|
: : [port] "d" (UCALL_PIO_PORT) : "rax", "rbx");
|
||||||
register u32 stage asm("rbx");
|
|
||||||
for (;;) {
|
|
||||||
GUEST_SYNC(0);
|
|
||||||
stage++;
|
|
||||||
asm volatile ("" : : "r" (stage));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void compare_regs(struct kvm_regs *left, struct kvm_regs *right)
|
static void compare_regs(struct kvm_regs *left, struct kvm_regs *right)
|
||||||
|
|
|
@ -53,12 +53,8 @@ static void l1_guest_code(struct vmx_pages *vmx_pages)
|
||||||
int main(int argc, char *argv[])
|
int main(int argc, char *argv[])
|
||||||
{
|
{
|
||||||
vm_vaddr_t vmx_pages_gva;
|
vm_vaddr_t vmx_pages_gva;
|
||||||
struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
|
|
||||||
|
|
||||||
if (!(entry->ecx & CPUID_VMX)) {
|
nested_vmx_check_supported();
|
||||||
fprintf(stderr, "nested VMX not enabled, skipping test\n");
|
|
||||||
exit(KSFT_SKIP);
|
|
||||||
}
|
|
||||||
|
|
||||||
vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
|
vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
|
||||||
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
|
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
|
||||||
|
|
|
@ -78,6 +78,8 @@ int main(int argc, char *argv[])
|
||||||
struct ucall uc;
|
struct ucall uc;
|
||||||
bool done = false;
|
bool done = false;
|
||||||
|
|
||||||
|
nested_vmx_check_supported();
|
||||||
|
|
||||||
/* Create VM */
|
/* Create VM */
|
||||||
vm = vm_create_default(VCPU_ID, 0, l1_guest_code);
|
vm = vm_create_default(VCPU_ID, 0, l1_guest_code);
|
||||||
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
|
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
|
||||||
|
|
|
@ -224,7 +224,6 @@ int main(int argc, char *argv[])
|
||||||
{
|
{
|
||||||
struct kvm_vm *vm;
|
struct kvm_vm *vm;
|
||||||
struct kvm_nested_state state;
|
struct kvm_nested_state state;
|
||||||
struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
|
|
||||||
|
|
||||||
have_evmcs = kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS);
|
have_evmcs = kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS);
|
||||||
|
|
||||||
|
@ -237,10 +236,7 @@ int main(int argc, char *argv[])
|
||||||
* AMD currently does not implement set_nested_state, so for now we
|
* AMD currently does not implement set_nested_state, so for now we
|
||||||
* just early out.
|
* just early out.
|
||||||
*/
|
*/
|
||||||
if (!(entry->ecx & CPUID_VMX)) {
|
nested_vmx_check_supported();
|
||||||
fprintf(stderr, "nested VMX not enabled, skipping test\n");
|
|
||||||
exit(KSFT_SKIP);
|
|
||||||
}
|
|
||||||
|
|
||||||
vm = vm_create_default(VCPU_ID, 0, 0);
|
vm = vm_create_default(VCPU_ID, 0, 0);
|
||||||
|
|
||||||
|
@ -271,12 +267,7 @@ int main(int argc, char *argv[])
|
||||||
state.flags = KVM_STATE_NESTED_RUN_PENDING;
|
state.flags = KVM_STATE_NESTED_RUN_PENDING;
|
||||||
test_nested_state_expect_einval(vm, &state);
|
test_nested_state_expect_einval(vm, &state);
|
||||||
|
|
||||||
/*
|
test_vmx_nested_state(vm);
|
||||||
* TODO: When SVM support is added for KVM_SET_NESTED_STATE
|
|
||||||
* add tests here to support it like VMX.
|
|
||||||
*/
|
|
||||||
if (entry->ecx & CPUID_VMX)
|
|
||||||
test_vmx_nested_state(vm);
|
|
||||||
|
|
||||||
kvm_vm_free(vm);
|
kvm_vm_free(vm);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -128,12 +128,8 @@ static void report(int64_t val)
|
||||||
int main(int argc, char *argv[])
|
int main(int argc, char *argv[])
|
||||||
{
|
{
|
||||||
vm_vaddr_t vmx_pages_gva;
|
vm_vaddr_t vmx_pages_gva;
|
||||||
struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
|
|
||||||
|
|
||||||
if (!(entry->ecx & CPUID_VMX)) {
|
nested_vmx_check_supported();
|
||||||
fprintf(stderr, "nested VMX not enabled, skipping test\n");
|
|
||||||
exit(KSFT_SKIP);
|
|
||||||
}
|
|
||||||
|
|
||||||
vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
|
vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
|
||||||
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
|
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
|
||||||
|
|
|
@ -8,6 +8,7 @@
|
||||||
#include <linux/kvm.h>
|
#include <linux/kvm.h>
|
||||||
#include <linux/kvm_host.h>
|
#include <linux/kvm_host.h>
|
||||||
#include <linux/perf_event.h>
|
#include <linux/perf_event.h>
|
||||||
|
#include <linux/perf/arm_pmu.h>
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
#include <asm/kvm_emulate.h>
|
#include <asm/kvm_emulate.h>
|
||||||
#include <kvm/arm_pmu.h>
|
#include <kvm/arm_pmu.h>
|
||||||
|
@ -146,8 +147,7 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
|
||||||
if (kvm_pmu_pmc_is_chained(pmc) &&
|
if (kvm_pmu_pmc_is_chained(pmc) &&
|
||||||
kvm_pmu_idx_is_high_counter(select_idx))
|
kvm_pmu_idx_is_high_counter(select_idx))
|
||||||
counter = upper_32_bits(counter);
|
counter = upper_32_bits(counter);
|
||||||
|
else if (select_idx != ARMV8_PMU_CYCLE_IDX)
|
||||||
else if (!kvm_pmu_idx_is_64bit(vcpu, select_idx))
|
|
||||||
counter = lower_32_bits(counter);
|
counter = lower_32_bits(counter);
|
||||||
|
|
||||||
return counter;
|
return counter;
|
||||||
|
@ -193,7 +193,7 @@ static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
|
||||||
*/
|
*/
|
||||||
static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
|
static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
|
||||||
{
|
{
|
||||||
u64 counter, reg;
|
u64 counter, reg, val;
|
||||||
|
|
||||||
pmc = kvm_pmu_get_canonical_pmc(pmc);
|
pmc = kvm_pmu_get_canonical_pmc(pmc);
|
||||||
if (!pmc->perf_event)
|
if (!pmc->perf_event)
|
||||||
|
@ -201,16 +201,19 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
|
||||||
|
|
||||||
counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
|
counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
|
||||||
|
|
||||||
if (kvm_pmu_pmc_is_chained(pmc)) {
|
if (pmc->idx == ARMV8_PMU_CYCLE_IDX) {
|
||||||
reg = PMEVCNTR0_EL0 + pmc->idx;
|
reg = PMCCNTR_EL0;
|
||||||
__vcpu_sys_reg(vcpu, reg) = lower_32_bits(counter);
|
val = counter;
|
||||||
__vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
|
|
||||||
} else {
|
} else {
|
||||||
reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
|
reg = PMEVCNTR0_EL0 + pmc->idx;
|
||||||
? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
|
val = lower_32_bits(counter);
|
||||||
__vcpu_sys_reg(vcpu, reg) = lower_32_bits(counter);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
__vcpu_sys_reg(vcpu, reg) = val;
|
||||||
|
|
||||||
|
if (kvm_pmu_pmc_is_chained(pmc))
|
||||||
|
__vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
|
||||||
|
|
||||||
kvm_pmu_release_perf_event(pmc);
|
kvm_pmu_release_perf_event(pmc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -440,8 +443,25 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct kvm_pmc *pmc = perf_event->overflow_handler_context;
|
struct kvm_pmc *pmc = perf_event->overflow_handler_context;
|
||||||
|
struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
|
||||||
struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
|
struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
|
||||||
int idx = pmc->idx;
|
int idx = pmc->idx;
|
||||||
|
u64 period;
|
||||||
|
|
||||||
|
cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Reset the sample period to the architectural limit,
|
||||||
|
* i.e. the point where the counter overflows.
|
||||||
|
*/
|
||||||
|
period = -(local64_read(&perf_event->count));
|
||||||
|
|
||||||
|
if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
|
||||||
|
period &= GENMASK(31, 0);
|
||||||
|
|
||||||
|
local64_set(&perf_event->hw.period_left, 0);
|
||||||
|
perf_event->attr.sample_period = period;
|
||||||
|
perf_event->hw.sample_period = period;
|
||||||
|
|
||||||
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
|
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
|
||||||
|
|
||||||
|
@ -449,6 +469,8 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
|
||||||
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
|
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
|
||||||
kvm_vcpu_kick(vcpu);
|
kvm_vcpu_kick(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -567,12 +589,12 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
|
||||||
* high counter.
|
* high counter.
|
||||||
*/
|
*/
|
||||||
attr.sample_period = (-counter) & GENMASK(63, 0);
|
attr.sample_period = (-counter) & GENMASK(63, 0);
|
||||||
|
if (kvm_pmu_counter_is_enabled(vcpu, pmc->idx + 1))
|
||||||
|
attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
|
||||||
|
|
||||||
event = perf_event_create_kernel_counter(&attr, -1, current,
|
event = perf_event_create_kernel_counter(&attr, -1, current,
|
||||||
kvm_pmu_perf_overflow,
|
kvm_pmu_perf_overflow,
|
||||||
pmc + 1);
|
pmc + 1);
|
||||||
|
|
||||||
if (kvm_pmu_counter_is_enabled(vcpu, pmc->idx + 1))
|
|
||||||
attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
|
|
||||||
} else {
|
} else {
|
||||||
/* The initial sample period (overflow count) of an event. */
|
/* The initial sample period (overflow count) of an event. */
|
||||||
if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
|
if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
|
||||||
|
|
|
@ -2360,20 +2360,23 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
|
||||||
kvm_arch_vcpu_unblocking(vcpu);
|
kvm_arch_vcpu_unblocking(vcpu);
|
||||||
block_ns = ktime_to_ns(cur) - ktime_to_ns(start);
|
block_ns = ktime_to_ns(cur) - ktime_to_ns(start);
|
||||||
|
|
||||||
if (!vcpu_valid_wakeup(vcpu))
|
if (!kvm_arch_no_poll(vcpu)) {
|
||||||
shrink_halt_poll_ns(vcpu);
|
if (!vcpu_valid_wakeup(vcpu)) {
|
||||||
else if (halt_poll_ns) {
|
|
||||||
if (block_ns <= vcpu->halt_poll_ns)
|
|
||||||
;
|
|
||||||
/* we had a long block, shrink polling */
|
|
||||||
else if (vcpu->halt_poll_ns && block_ns > halt_poll_ns)
|
|
||||||
shrink_halt_poll_ns(vcpu);
|
shrink_halt_poll_ns(vcpu);
|
||||||
/* we had a short halt and our poll time is too small */
|
} else if (halt_poll_ns) {
|
||||||
else if (vcpu->halt_poll_ns < halt_poll_ns &&
|
if (block_ns <= vcpu->halt_poll_ns)
|
||||||
block_ns < halt_poll_ns)
|
;
|
||||||
grow_halt_poll_ns(vcpu);
|
/* we had a long block, shrink polling */
|
||||||
} else
|
else if (vcpu->halt_poll_ns && block_ns > halt_poll_ns)
|
||||||
vcpu->halt_poll_ns = 0;
|
shrink_halt_poll_ns(vcpu);
|
||||||
|
/* we had a short halt and our poll time is too small */
|
||||||
|
else if (vcpu->halt_poll_ns < halt_poll_ns &&
|
||||||
|
block_ns < halt_poll_ns)
|
||||||
|
grow_halt_poll_ns(vcpu);
|
||||||
|
} else {
|
||||||
|
vcpu->halt_poll_ns = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu));
|
trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu));
|
||||||
kvm_arch_vcpu_block_finish(vcpu);
|
kvm_arch_vcpu_block_finish(vcpu);
|
||||||
|
|
Loading…
Reference in a new issue