KVM: VMX: Convert vcpu_vmx.exit_reason to a union

[ Upstream commit 8e53324021 ]

Convert vcpu_vmx.exit_reason from a u32 to a union (of size u32).  The
full VM_EXIT_REASON field is comprised of a 16-bit basic exit reason in
bits 15:0, and single-bit modifiers in bits 31:16.

Historically, KVM has only had to worry about handling the "failed
VM-Entry" modifier, which could only be set in very specific flows and
required dedicated handling.  I.e. manually stripping the FAILED_VMENTRY
bit was a somewhat viable approach.  But even with only a single bit to
worry about, KVM has had several bugs related to comparing a basic exit
reason against the full exit reason store in vcpu_vmx.

Upcoming Intel features, e.g. SGX, will add new modifier bits that can
be set on more or less any VM-Exit, as opposed to the significantly more
restricted FAILED_VMENTRY, i.e. correctly handling everything in one-off
flows isn't scalable.  Tracking exit reason in a union forces code to
explicitly choose between consuming the full exit reason and the basic
exit, and is a convenient way to document and access the modifiers.

No functional change intended.

Cc: Xiaoyao Li <xiaoyao.li@intel.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Chenyi Qiang <chenyi.qiang@intel.com>
Message-Id: <20201106090315.18606-2-chenyi.qiang@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Sean Christopherson 2020-11-06 17:03:12 +08:00 committed by Greg Kroah-Hartman
parent 4f3ff11204
commit c670ff84fa
3 changed files with 86 additions and 49 deletions

View file

@ -3329,7 +3329,11 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12 = get_vmcs12(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
enum vm_entry_failure_code entry_failure_code; enum vm_entry_failure_code entry_failure_code;
bool evaluate_pending_interrupts; bool evaluate_pending_interrupts;
u32 exit_reason, failed_index; union vmx_exit_reason exit_reason = {
.basic = EXIT_REASON_INVALID_STATE,
.failed_vmentry = 1,
};
u32 failed_index;
if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu)) if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
kvm_vcpu_flush_tlb_current(vcpu); kvm_vcpu_flush_tlb_current(vcpu);
@ -3381,7 +3385,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
if (nested_vmx_check_guest_state(vcpu, vmcs12, if (nested_vmx_check_guest_state(vcpu, vmcs12,
&entry_failure_code)) { &entry_failure_code)) {
exit_reason = EXIT_REASON_INVALID_STATE; exit_reason.basic = EXIT_REASON_INVALID_STATE;
vmcs12->exit_qualification = entry_failure_code; vmcs12->exit_qualification = entry_failure_code;
goto vmentry_fail_vmexit; goto vmentry_fail_vmexit;
} }
@ -3392,7 +3396,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
vcpu->arch.tsc_offset += vmcs12->tsc_offset; vcpu->arch.tsc_offset += vmcs12->tsc_offset;
if (prepare_vmcs02(vcpu, vmcs12, &entry_failure_code)) { if (prepare_vmcs02(vcpu, vmcs12, &entry_failure_code)) {
exit_reason = EXIT_REASON_INVALID_STATE; exit_reason.basic = EXIT_REASON_INVALID_STATE;
vmcs12->exit_qualification = entry_failure_code; vmcs12->exit_qualification = entry_failure_code;
goto vmentry_fail_vmexit_guest_mode; goto vmentry_fail_vmexit_guest_mode;
} }
@ -3402,7 +3406,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
vmcs12->vm_entry_msr_load_addr, vmcs12->vm_entry_msr_load_addr,
vmcs12->vm_entry_msr_load_count); vmcs12->vm_entry_msr_load_count);
if (failed_index) { if (failed_index) {
exit_reason = EXIT_REASON_MSR_LOAD_FAIL; exit_reason.basic = EXIT_REASON_MSR_LOAD_FAIL;
vmcs12->exit_qualification = failed_index; vmcs12->exit_qualification = failed_index;
goto vmentry_fail_vmexit_guest_mode; goto vmentry_fail_vmexit_guest_mode;
} }
@ -3470,7 +3474,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
return NVMX_VMENTRY_VMEXIT; return NVMX_VMENTRY_VMEXIT;
load_vmcs12_host_state(vcpu, vmcs12); load_vmcs12_host_state(vcpu, vmcs12);
vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY; vmcs12->vm_exit_reason = exit_reason.full;
if (enable_shadow_vmcs || vmx->nested.hv_evmcs) if (enable_shadow_vmcs || vmx->nested.hv_evmcs)
vmx->nested.need_vmcs12_to_shadow_sync = true; vmx->nested.need_vmcs12_to_shadow_sync = true;
return NVMX_VMENTRY_VMEXIT; return NVMX_VMENTRY_VMEXIT;
@ -5533,7 +5537,12 @@ static int handle_vmfunc(struct kvm_vcpu *vcpu)
return kvm_skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
fail: fail:
nested_vmx_vmexit(vcpu, vmx->exit_reason, /*
* This is effectively a reflected VM-Exit, as opposed to a synthesized
* nested VM-Exit. Pass the original exit reason, i.e. don't hardcode
* EXIT_REASON_VMFUNC as the exit reason.
*/
nested_vmx_vmexit(vcpu, vmx->exit_reason.full,
vmx_get_intr_info(vcpu), vmx_get_intr_info(vcpu),
vmx_get_exit_qual(vcpu)); vmx_get_exit_qual(vcpu));
return 1; return 1;
@ -5601,7 +5610,8 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
* MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps. * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
*/ */
static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12, u32 exit_reason) struct vmcs12 *vmcs12,
union vmx_exit_reason exit_reason)
{ {
u32 msr_index = kvm_rcx_read(vcpu); u32 msr_index = kvm_rcx_read(vcpu);
gpa_t bitmap; gpa_t bitmap;
@ -5615,7 +5625,7 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
* First we need to figure out which of the four to use: * First we need to figure out which of the four to use:
*/ */
bitmap = vmcs12->msr_bitmap; bitmap = vmcs12->msr_bitmap;
if (exit_reason == EXIT_REASON_MSR_WRITE) if (exit_reason.basic == EXIT_REASON_MSR_WRITE)
bitmap += 2048; bitmap += 2048;
if (msr_index >= 0xc0000000) { if (msr_index >= 0xc0000000) {
msr_index -= 0xc0000000; msr_index -= 0xc0000000;
@ -5752,11 +5762,12 @@ static bool nested_vmx_exit_handled_mtf(struct vmcs12 *vmcs12)
* Return true if L0 wants to handle an exit from L2 regardless of whether or not * Return true if L0 wants to handle an exit from L2 regardless of whether or not
* L1 wants the exit. Only call this when in is_guest_mode (L2). * L1 wants the exit. Only call this when in is_guest_mode (L2).
*/ */
static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason) static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
union vmx_exit_reason exit_reason)
{ {
u32 intr_info; u32 intr_info;
switch ((u16)exit_reason) { switch ((u16)exit_reason.basic) {
case EXIT_REASON_EXCEPTION_NMI: case EXIT_REASON_EXCEPTION_NMI:
intr_info = vmx_get_intr_info(vcpu); intr_info = vmx_get_intr_info(vcpu);
if (is_nmi(intr_info)) if (is_nmi(intr_info))
@ -5812,12 +5823,13 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
* Return 1 if L1 wants to intercept an exit from L2. Only call this when in * Return 1 if L1 wants to intercept an exit from L2. Only call this when in
* is_guest_mode (L2). * is_guest_mode (L2).
*/ */
static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason) static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu,
union vmx_exit_reason exit_reason)
{ {
struct vmcs12 *vmcs12 = get_vmcs12(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
u32 intr_info; u32 intr_info;
switch ((u16)exit_reason) { switch ((u16)exit_reason.basic) {
case EXIT_REASON_EXCEPTION_NMI: case EXIT_REASON_EXCEPTION_NMI:
intr_info = vmx_get_intr_info(vcpu); intr_info = vmx_get_intr_info(vcpu);
if (is_nmi(intr_info)) if (is_nmi(intr_info))
@ -5936,7 +5948,7 @@ static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu) bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 exit_reason = vmx->exit_reason; union vmx_exit_reason exit_reason = vmx->exit_reason;
unsigned long exit_qual; unsigned long exit_qual;
u32 exit_intr_info; u32 exit_intr_info;
@ -5955,7 +5967,7 @@ bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
goto reflect_vmexit; goto reflect_vmexit;
} }
trace_kvm_nested_vmexit(exit_reason, vcpu, KVM_ISA_VMX); trace_kvm_nested_vmexit(exit_reason.full, vcpu, KVM_ISA_VMX);
/* If L0 (KVM) wants the exit, it trumps L1's desires. */ /* If L0 (KVM) wants the exit, it trumps L1's desires. */
if (nested_vmx_l0_wants_exit(vcpu, exit_reason)) if (nested_vmx_l0_wants_exit(vcpu, exit_reason))
@ -5981,7 +5993,7 @@ bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
exit_qual = vmx_get_exit_qual(vcpu); exit_qual = vmx_get_exit_qual(vcpu);
reflect_vmexit: reflect_vmexit:
nested_vmx_vmexit(vcpu, exit_reason, exit_intr_info, exit_qual); nested_vmx_vmexit(vcpu, exit_reason.full, exit_intr_info, exit_qual);
return true; return true;
} }

View file

@ -1578,7 +1578,7 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
* i.e. we end up advancing IP with some random value. * i.e. we end up advancing IP with some random value.
*/ */
if (!static_cpu_has(X86_FEATURE_HYPERVISOR) || if (!static_cpu_has(X86_FEATURE_HYPERVISOR) ||
to_vmx(vcpu)->exit_reason != EXIT_REASON_EPT_MISCONFIG) { to_vmx(vcpu)->exit_reason.basic != EXIT_REASON_EPT_MISCONFIG) {
orig_rip = kvm_rip_read(vcpu); orig_rip = kvm_rip_read(vcpu);
rip = orig_rip + vmcs_read32(VM_EXIT_INSTRUCTION_LEN); rip = orig_rip + vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
@ -5687,7 +5687,7 @@ static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2,
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
*info1 = vmx_get_exit_qual(vcpu); *info1 = vmx_get_exit_qual(vcpu);
if (!(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) { if (!(vmx->exit_reason.failed_vmentry)) {
*info2 = vmx->idt_vectoring_info; *info2 = vmx->idt_vectoring_info;
*intr_info = vmx_get_intr_info(vcpu); *intr_info = vmx_get_intr_info(vcpu);
if (is_exception_with_error_code(*intr_info)) if (is_exception_with_error_code(*intr_info))
@ -5931,8 +5931,9 @@ void dump_vmcs(void)
static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 exit_reason = vmx->exit_reason; union vmx_exit_reason exit_reason = vmx->exit_reason;
u32 vectoring_info = vmx->idt_vectoring_info; u32 vectoring_info = vmx->idt_vectoring_info;
u16 exit_handler_index;
/* /*
* Flush logged GPAs PML buffer, this will make dirty_bitmap more * Flush logged GPAs PML buffer, this will make dirty_bitmap more
@ -5974,11 +5975,11 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
return 1; return 1;
} }
if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) { if (exit_reason.failed_vmentry) {
dump_vmcs(); dump_vmcs();
vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
vcpu->run->fail_entry.hardware_entry_failure_reason vcpu->run->fail_entry.hardware_entry_failure_reason
= exit_reason; = exit_reason.full;
vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu; vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
return 0; return 0;
} }
@ -6000,18 +6001,18 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
* will cause infinite loop. * will cause infinite loop.
*/ */
if ((vectoring_info & VECTORING_INFO_VALID_MASK) && if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
(exit_reason != EXIT_REASON_EXCEPTION_NMI && (exit_reason.basic != EXIT_REASON_EXCEPTION_NMI &&
exit_reason != EXIT_REASON_EPT_VIOLATION && exit_reason.basic != EXIT_REASON_EPT_VIOLATION &&
exit_reason != EXIT_REASON_PML_FULL && exit_reason.basic != EXIT_REASON_PML_FULL &&
exit_reason != EXIT_REASON_APIC_ACCESS && exit_reason.basic != EXIT_REASON_APIC_ACCESS &&
exit_reason != EXIT_REASON_TASK_SWITCH)) { exit_reason.basic != EXIT_REASON_TASK_SWITCH)) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
vcpu->run->internal.ndata = 3; vcpu->run->internal.ndata = 3;
vcpu->run->internal.data[0] = vectoring_info; vcpu->run->internal.data[0] = vectoring_info;
vcpu->run->internal.data[1] = exit_reason; vcpu->run->internal.data[1] = exit_reason.full;
vcpu->run->internal.data[2] = vcpu->arch.exit_qualification; vcpu->run->internal.data[2] = vcpu->arch.exit_qualification;
if (exit_reason == EXIT_REASON_EPT_MISCONFIG) { if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG) {
vcpu->run->internal.ndata++; vcpu->run->internal.ndata++;
vcpu->run->internal.data[3] = vcpu->run->internal.data[3] =
vmcs_read64(GUEST_PHYSICAL_ADDRESS); vmcs_read64(GUEST_PHYSICAL_ADDRESS);
@ -6043,38 +6044,39 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
if (exit_fastpath != EXIT_FASTPATH_NONE) if (exit_fastpath != EXIT_FASTPATH_NONE)
return 1; return 1;
if (exit_reason >= kvm_vmx_max_exit_handlers) if (exit_reason.basic >= kvm_vmx_max_exit_handlers)
goto unexpected_vmexit; goto unexpected_vmexit;
#ifdef CONFIG_RETPOLINE #ifdef CONFIG_RETPOLINE
if (exit_reason == EXIT_REASON_MSR_WRITE) if (exit_reason.basic == EXIT_REASON_MSR_WRITE)
return kvm_emulate_wrmsr(vcpu); return kvm_emulate_wrmsr(vcpu);
else if (exit_reason == EXIT_REASON_PREEMPTION_TIMER) else if (exit_reason.basic == EXIT_REASON_PREEMPTION_TIMER)
return handle_preemption_timer(vcpu); return handle_preemption_timer(vcpu);
else if (exit_reason == EXIT_REASON_INTERRUPT_WINDOW) else if (exit_reason.basic == EXIT_REASON_INTERRUPT_WINDOW)
return handle_interrupt_window(vcpu); return handle_interrupt_window(vcpu);
else if (exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT) else if (exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
return handle_external_interrupt(vcpu); return handle_external_interrupt(vcpu);
else if (exit_reason == EXIT_REASON_HLT) else if (exit_reason.basic == EXIT_REASON_HLT)
return kvm_emulate_halt(vcpu); return kvm_emulate_halt(vcpu);
else if (exit_reason == EXIT_REASON_EPT_MISCONFIG) else if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG)
return handle_ept_misconfig(vcpu); return handle_ept_misconfig(vcpu);
#endif #endif
exit_reason = array_index_nospec(exit_reason, exit_handler_index = array_index_nospec((u16)exit_reason.basic,
kvm_vmx_max_exit_handlers); kvm_vmx_max_exit_handlers);
if (!kvm_vmx_exit_handlers[exit_reason]) if (!kvm_vmx_exit_handlers[exit_handler_index])
goto unexpected_vmexit; goto unexpected_vmexit;
return kvm_vmx_exit_handlers[exit_reason](vcpu); return kvm_vmx_exit_handlers[exit_handler_index](vcpu);
unexpected_vmexit: unexpected_vmexit:
vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n", exit_reason); vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
exit_reason.full);
dump_vmcs(); dump_vmcs();
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = vcpu->run->internal.suberror =
KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON; KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
vcpu->run->internal.ndata = 2; vcpu->run->internal.ndata = 2;
vcpu->run->internal.data[0] = exit_reason; vcpu->run->internal.data[0] = exit_reason.full;
vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu; vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
return 0; return 0;
} }
@ -6393,9 +6395,9 @@ static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
if (vmx->exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT) if (vmx->exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
handle_external_interrupt_irqoff(vcpu); handle_external_interrupt_irqoff(vcpu);
else if (vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI) else if (vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI)
handle_exception_nmi_irqoff(vmx); handle_exception_nmi_irqoff(vmx);
} }
@ -6583,7 +6585,7 @@ void noinstr vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu) static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
{ {
switch (to_vmx(vcpu)->exit_reason) { switch (to_vmx(vcpu)->exit_reason.basic) {
case EXIT_REASON_MSR_WRITE: case EXIT_REASON_MSR_WRITE:
return handle_fastpath_set_msr_irqoff(vcpu); return handle_fastpath_set_msr_irqoff(vcpu);
case EXIT_REASON_PREEMPTION_TIMER: case EXIT_REASON_PREEMPTION_TIMER:
@ -6782,17 +6784,17 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmx->idt_vectoring_info = 0; vmx->idt_vectoring_info = 0;
if (unlikely(vmx->fail)) { if (unlikely(vmx->fail)) {
vmx->exit_reason = 0xdead; vmx->exit_reason.full = 0xdead;
return EXIT_FASTPATH_NONE; return EXIT_FASTPATH_NONE;
} }
vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); vmx->exit_reason.full = vmcs_read32(VM_EXIT_REASON);
if (unlikely((u16)vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY)) if (unlikely((u16)vmx->exit_reason.basic == EXIT_REASON_MCE_DURING_VMENTRY))
kvm_machine_check(); kvm_machine_check();
trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX); trace_kvm_exit(vmx->exit_reason.full, vcpu, KVM_ISA_VMX);
if (unlikely(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) if (unlikely(vmx->exit_reason.failed_vmentry))
return EXIT_FASTPATH_NONE; return EXIT_FASTPATH_NONE;
vmx->loaded_vmcs->launched = 1; vmx->loaded_vmcs->launched = 1;

View file

@ -70,6 +70,29 @@ struct pt_desc {
struct pt_ctx guest; struct pt_ctx guest;
}; };
union vmx_exit_reason {
struct {
u32 basic : 16;
u32 reserved16 : 1;
u32 reserved17 : 1;
u32 reserved18 : 1;
u32 reserved19 : 1;
u32 reserved20 : 1;
u32 reserved21 : 1;
u32 reserved22 : 1;
u32 reserved23 : 1;
u32 reserved24 : 1;
u32 reserved25 : 1;
u32 reserved26 : 1;
u32 enclave_mode : 1;
u32 smi_pending_mtf : 1;
u32 smi_from_vmx_root : 1;
u32 reserved30 : 1;
u32 failed_vmentry : 1;
};
u32 full;
};
/* /*
* The nested_vmx structure is part of vcpu_vmx, and holds information we need * The nested_vmx structure is part of vcpu_vmx, and holds information we need
* for correct emulation of VMX (i.e., nested VMX) on this vcpu. * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
@ -244,7 +267,7 @@ struct vcpu_vmx {
int vpid; int vpid;
bool emulation_required; bool emulation_required;
u32 exit_reason; union vmx_exit_reason exit_reason;
/* Posted interrupt descriptor */ /* Posted interrupt descriptor */
struct pi_desc pi_desc; struct pi_desc pi_desc;