KVM: VMX: Extend VMX controls macro shenanigans

When VMX controls macros are used to set or clear a control bit, make
sure that this bit was checked in setup_vmcs_config() and thus is properly
reflected in vmcs_config.

Opportunistically drop pointless "< 0" check for adjust_vmx_controls()'s
return value.

No functional change intended.

Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Link: https://lore.kernel.org/r/20220830133737.1539624-24-vkuznets@redhat.com
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Vitaly Kuznetsov 2022-08-30 15:37:27 +02:00 committed by Paolo Bonzini
parent ebb3c8d409
commit ee087b4da0
2 changed files with 156 additions and 111 deletions

View file

@ -864,7 +864,7 @@ unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx)
return flags; return flags;
} }
static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, static __always_inline void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
unsigned long entry, unsigned long exit) unsigned long entry, unsigned long exit)
{ {
vm_entry_controls_clearbit(vmx, entry); vm_entry_controls_clearbit(vmx, entry);
@ -922,7 +922,7 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
} }
static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, static __always_inline void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
unsigned long entry, unsigned long exit, unsigned long entry, unsigned long exit,
unsigned long guest_val_vmcs, unsigned long host_val_vmcs, unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
u64 guest_val, u64 host_val) u64 guest_val, u64 host_val)
@ -2525,7 +2525,6 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
struct vmx_capability *vmx_cap) struct vmx_capability *vmx_cap)
{ {
u32 vmx_msr_low, vmx_msr_high; u32 vmx_msr_low, vmx_msr_high;
u32 min, opt, min2, opt2;
u32 _pin_based_exec_control = 0; u32 _pin_based_exec_control = 0;
u32 _cpu_based_exec_control = 0; u32 _cpu_based_exec_control = 0;
u32 _cpu_based_2nd_exec_control = 0; u32 _cpu_based_2nd_exec_control = 0;
@ -2551,29 +2550,11 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
}; };
memset(vmcs_conf, 0, sizeof(*vmcs_conf)); memset(vmcs_conf, 0, sizeof(*vmcs_conf));
min = CPU_BASED_HLT_EXITING |
#ifdef CONFIG_X86_64
CPU_BASED_CR8_LOAD_EXITING |
CPU_BASED_CR8_STORE_EXITING |
#endif
CPU_BASED_CR3_LOAD_EXITING |
CPU_BASED_CR3_STORE_EXITING |
CPU_BASED_UNCOND_IO_EXITING |
CPU_BASED_MOV_DR_EXITING |
CPU_BASED_USE_TSC_OFFSETTING |
CPU_BASED_MWAIT_EXITING |
CPU_BASED_MONITOR_EXITING |
CPU_BASED_INVLPG_EXITING |
CPU_BASED_RDPMC_EXITING |
CPU_BASED_INTR_WINDOW_EXITING;
opt = CPU_BASED_TPR_SHADOW | if (adjust_vmx_controls(KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL,
CPU_BASED_USE_MSR_BITMAPS | KVM_OPTIONAL_VMX_CPU_BASED_VM_EXEC_CONTROL,
CPU_BASED_NMI_WINDOW_EXITING | MSR_IA32_VMX_PROCBASED_CTLS,
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS | &_cpu_based_exec_control))
CPU_BASED_ACTIVATE_TERTIARY_CONTROLS;
if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
&_cpu_based_exec_control) < 0)
return -EIO; return -EIO;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if (_cpu_based_exec_control & CPU_BASED_TPR_SHADOW) if (_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)
@ -2581,36 +2562,10 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
~CPU_BASED_CR8_STORE_EXITING; ~CPU_BASED_CR8_STORE_EXITING;
#endif #endif
if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) { if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
min2 = 0; if (adjust_vmx_controls(KVM_REQUIRED_VMX_SECONDARY_VM_EXEC_CONTROL,
opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | KVM_OPTIONAL_VMX_SECONDARY_VM_EXEC_CONTROL,
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
SECONDARY_EXEC_WBINVD_EXITING |
SECONDARY_EXEC_ENABLE_VPID |
SECONDARY_EXEC_ENABLE_EPT |
SECONDARY_EXEC_UNRESTRICTED_GUEST |
SECONDARY_EXEC_PAUSE_LOOP_EXITING |
SECONDARY_EXEC_DESC |
SECONDARY_EXEC_ENABLE_RDTSCP |
SECONDARY_EXEC_ENABLE_INVPCID |
SECONDARY_EXEC_APIC_REGISTER_VIRT |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
SECONDARY_EXEC_SHADOW_VMCS |
SECONDARY_EXEC_XSAVES |
SECONDARY_EXEC_RDSEED_EXITING |
SECONDARY_EXEC_RDRAND_EXITING |
SECONDARY_EXEC_ENABLE_PML |
SECONDARY_EXEC_TSC_SCALING |
SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE |
SECONDARY_EXEC_PT_USE_GPA |
SECONDARY_EXEC_PT_CONCEAL_VMX |
SECONDARY_EXEC_ENABLE_VMFUNC |
SECONDARY_EXEC_BUS_LOCK_DETECTION |
SECONDARY_EXEC_NOTIFY_VM_EXITING |
SECONDARY_EXEC_ENCLS_EXITING;
if (adjust_vmx_controls(min2, opt2,
MSR_IA32_VMX_PROCBASED_CTLS2, MSR_IA32_VMX_PROCBASED_CTLS2,
&_cpu_based_2nd_exec_control) < 0) &_cpu_based_2nd_exec_control))
return -EIO; return -EIO;
} }
#ifndef CONFIG_X86_64 #ifndef CONFIG_X86_64
@ -2657,32 +2612,21 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
if (!cpu_has_sgx()) if (!cpu_has_sgx())
_cpu_based_2nd_exec_control &= ~SECONDARY_EXEC_ENCLS_EXITING; _cpu_based_2nd_exec_control &= ~SECONDARY_EXEC_ENCLS_EXITING;
if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_TERTIARY_CONTROLS) { if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_TERTIARY_CONTROLS)
u64 opt3 = TERTIARY_EXEC_IPI_VIRT; _cpu_based_3rd_exec_control =
adjust_vmx_controls64(KVM_OPTIONAL_VMX_TERTIARY_VM_EXEC_CONTROL,
_cpu_based_3rd_exec_control = adjust_vmx_controls64(opt3,
MSR_IA32_VMX_PROCBASED_CTLS3); MSR_IA32_VMX_PROCBASED_CTLS3);
}
min = VM_EXIT_SAVE_DEBUG_CONTROLS | VM_EXIT_ACK_INTR_ON_EXIT; if (adjust_vmx_controls(KVM_REQUIRED_VMX_VM_EXIT_CONTROLS,
#ifdef CONFIG_X86_64 KVM_OPTIONAL_VMX_VM_EXIT_CONTROLS,
min |= VM_EXIT_HOST_ADDR_SPACE_SIZE; MSR_IA32_VMX_EXIT_CTLS,
#endif &_vmexit_control))
opt = VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
VM_EXIT_LOAD_IA32_PAT |
VM_EXIT_LOAD_IA32_EFER |
VM_EXIT_CLEAR_BNDCFGS |
VM_EXIT_PT_CONCEAL_PIP |
VM_EXIT_CLEAR_IA32_RTIT_CTL;
if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
&_vmexit_control) < 0)
return -EIO; return -EIO;
min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING; if (adjust_vmx_controls(KVM_REQUIRED_VMX_PIN_BASED_VM_EXEC_CONTROL,
opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR | KVM_OPTIONAL_VMX_PIN_BASED_VM_EXEC_CONTROL,
PIN_BASED_VMX_PREEMPTION_TIMER; MSR_IA32_VMX_PINBASED_CTLS,
if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS, &_pin_based_exec_control))
&_pin_based_exec_control) < 0)
return -EIO; return -EIO;
if (cpu_has_broken_vmx_preemption_timer()) if (cpu_has_broken_vmx_preemption_timer())
@ -2691,18 +2635,10 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)) SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY))
_pin_based_exec_control &= ~PIN_BASED_POSTED_INTR; _pin_based_exec_control &= ~PIN_BASED_POSTED_INTR;
min = VM_ENTRY_LOAD_DEBUG_CONTROLS; if (adjust_vmx_controls(KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS,
#ifdef CONFIG_X86_64 KVM_OPTIONAL_VMX_VM_ENTRY_CONTROLS,
min |= VM_ENTRY_IA32E_MODE; MSR_IA32_VMX_ENTRY_CTLS,
#endif &_vmentry_control))
opt = VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL |
VM_ENTRY_LOAD_IA32_PAT |
VM_ENTRY_LOAD_IA32_EFER |
VM_ENTRY_LOAD_BNDCFGS |
VM_ENTRY_PT_CONCEAL_PIP |
VM_ENTRY_LOAD_IA32_RTIT_CTL;
if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
&_vmentry_control) < 0)
return -EIO; return -EIO;
for (i = 0; i < ARRAY_SIZE(vmcs_entry_exit_pairs); i++) { for (i = 0; i < ARRAY_SIZE(vmcs_entry_exit_pairs); i++) {

View file

@ -477,29 +477,138 @@ static inline u8 vmx_get_rvi(void)
return vmcs_read16(GUEST_INTR_STATUS) & 0xff; return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
} }
#define BUILD_CONTROLS_SHADOW(lname, uname, bits) \ #define __KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS \
static inline void lname##_controls_set(struct vcpu_vmx *vmx, u##bits val) \ (VM_ENTRY_LOAD_DEBUG_CONTROLS)
{ \ #ifdef CONFIG_X86_64
if (vmx->loaded_vmcs->controls_shadow.lname != val) { \ #define KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS \
vmcs_write##bits(uname, val); \ (__KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS | \
vmx->loaded_vmcs->controls_shadow.lname = val; \ VM_ENTRY_IA32E_MODE)
} \ #else
} \ #define KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS \
static inline u##bits __##lname##_controls_get(struct loaded_vmcs *vmcs) \ __KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS
{ \ #endif
return vmcs->controls_shadow.lname; \ #define KVM_OPTIONAL_VMX_VM_ENTRY_CONTROLS \
} \ (VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | \
static inline u##bits lname##_controls_get(struct vcpu_vmx *vmx) \ VM_ENTRY_LOAD_IA32_PAT | \
{ \ VM_ENTRY_LOAD_IA32_EFER | \
return __##lname##_controls_get(vmx->loaded_vmcs); \ VM_ENTRY_LOAD_BNDCFGS | \
} \ VM_ENTRY_PT_CONCEAL_PIP | \
static inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u##bits val) \ VM_ENTRY_LOAD_IA32_RTIT_CTL)
{ \
lname##_controls_set(vmx, lname##_controls_get(vmx) | val); \ #define __KVM_REQUIRED_VMX_VM_EXIT_CONTROLS \
} \ (VM_EXIT_SAVE_DEBUG_CONTROLS | \
static inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u##bits val) \ VM_EXIT_ACK_INTR_ON_EXIT)
{ \ #ifdef CONFIG_X86_64
lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val); \ #define KVM_REQUIRED_VMX_VM_EXIT_CONTROLS \
(__KVM_REQUIRED_VMX_VM_EXIT_CONTROLS | \
VM_EXIT_HOST_ADDR_SPACE_SIZE)
#else
#define KVM_REQUIRED_VMX_VM_EXIT_CONTROLS \
__KVM_REQUIRED_VMX_VM_EXIT_CONTROLS
#endif
#define KVM_OPTIONAL_VMX_VM_EXIT_CONTROLS \
(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | \
VM_EXIT_LOAD_IA32_PAT | \
VM_EXIT_LOAD_IA32_EFER | \
VM_EXIT_CLEAR_BNDCFGS | \
VM_EXIT_PT_CONCEAL_PIP | \
VM_EXIT_CLEAR_IA32_RTIT_CTL)
#define KVM_REQUIRED_VMX_PIN_BASED_VM_EXEC_CONTROL \
(PIN_BASED_EXT_INTR_MASK | \
PIN_BASED_NMI_EXITING)
#define KVM_OPTIONAL_VMX_PIN_BASED_VM_EXEC_CONTROL \
(PIN_BASED_VIRTUAL_NMIS | \
PIN_BASED_POSTED_INTR | \
PIN_BASED_VMX_PREEMPTION_TIMER)
#define __KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL \
(CPU_BASED_HLT_EXITING | \
CPU_BASED_CR3_LOAD_EXITING | \
CPU_BASED_CR3_STORE_EXITING | \
CPU_BASED_UNCOND_IO_EXITING | \
CPU_BASED_MOV_DR_EXITING | \
CPU_BASED_USE_TSC_OFFSETTING | \
CPU_BASED_MWAIT_EXITING | \
CPU_BASED_MONITOR_EXITING | \
CPU_BASED_INVLPG_EXITING | \
CPU_BASED_RDPMC_EXITING | \
CPU_BASED_INTR_WINDOW_EXITING)
#ifdef CONFIG_X86_64
#define KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL \
(__KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL | \
CPU_BASED_CR8_LOAD_EXITING | \
CPU_BASED_CR8_STORE_EXITING)
#else
#define KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL \
__KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL
#endif
#define KVM_OPTIONAL_VMX_CPU_BASED_VM_EXEC_CONTROL \
(CPU_BASED_TPR_SHADOW | \
CPU_BASED_USE_MSR_BITMAPS | \
CPU_BASED_NMI_WINDOW_EXITING | \
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS | \
CPU_BASED_ACTIVATE_TERTIARY_CONTROLS)
#define KVM_REQUIRED_VMX_SECONDARY_VM_EXEC_CONTROL 0
#define KVM_OPTIONAL_VMX_SECONDARY_VM_EXEC_CONTROL \
(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | \
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | \
SECONDARY_EXEC_WBINVD_EXITING | \
SECONDARY_EXEC_ENABLE_VPID | \
SECONDARY_EXEC_ENABLE_EPT | \
SECONDARY_EXEC_UNRESTRICTED_GUEST | \
SECONDARY_EXEC_PAUSE_LOOP_EXITING | \
SECONDARY_EXEC_DESC | \
SECONDARY_EXEC_ENABLE_RDTSCP | \
SECONDARY_EXEC_ENABLE_INVPCID | \
SECONDARY_EXEC_APIC_REGISTER_VIRT | \
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | \
SECONDARY_EXEC_SHADOW_VMCS | \
SECONDARY_EXEC_XSAVES | \
SECONDARY_EXEC_RDSEED_EXITING | \
SECONDARY_EXEC_RDRAND_EXITING | \
SECONDARY_EXEC_ENABLE_PML | \
SECONDARY_EXEC_TSC_SCALING | \
SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | \
SECONDARY_EXEC_PT_USE_GPA | \
SECONDARY_EXEC_PT_CONCEAL_VMX | \
SECONDARY_EXEC_ENABLE_VMFUNC | \
SECONDARY_EXEC_BUS_LOCK_DETECTION | \
SECONDARY_EXEC_NOTIFY_VM_EXITING | \
SECONDARY_EXEC_ENCLS_EXITING)
#define KVM_REQUIRED_VMX_TERTIARY_VM_EXEC_CONTROL 0
#define KVM_OPTIONAL_VMX_TERTIARY_VM_EXEC_CONTROL \
(TERTIARY_EXEC_IPI_VIRT)
#define BUILD_CONTROLS_SHADOW(lname, uname, bits) \
static inline void lname##_controls_set(struct vcpu_vmx *vmx, u##bits val) \
{ \
if (vmx->loaded_vmcs->controls_shadow.lname != val) { \
vmcs_write##bits(uname, val); \
vmx->loaded_vmcs->controls_shadow.lname = val; \
} \
} \
static inline u##bits __##lname##_controls_get(struct loaded_vmcs *vmcs) \
{ \
return vmcs->controls_shadow.lname; \
} \
static inline u##bits lname##_controls_get(struct vcpu_vmx *vmx) \
{ \
return __##lname##_controls_get(vmx->loaded_vmcs); \
} \
static __always_inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u##bits val) \
{ \
BUILD_BUG_ON(!(val & (KVM_REQUIRED_VMX_##uname | KVM_OPTIONAL_VMX_##uname))); \
lname##_controls_set(vmx, lname##_controls_get(vmx) | val); \
} \
static __always_inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u##bits val) \
{ \
BUILD_BUG_ON(!(val & (KVM_REQUIRED_VMX_##uname | KVM_OPTIONAL_VMX_##uname))); \
lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val); \
} }
BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS, 32) BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS, 32)
BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS, 32) BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS, 32)