mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-22 18:41:06 +00:00
KVM: x86: Split kvm_is_valid_cr4() and export only the non-vendor bits
commit c33f6f2228
upstream.
Split the common x86 parts of kvm_is_valid_cr4(), i.e. the reserved bits
checks, into a separate helper, __kvm_is_valid_cr4(), and export only the
inner helper to vendor code in order to prevent nested VMX from calling
back into vmx_is_valid_cr4() via kvm_is_valid_cr4().
On SVM, this is a nop as SVM doesn't place any additional restrictions on
CR4.
On VMX, this is also currently a nop, but only because nested VMX is
missing checks on reserved CR4 bits for nested VM-Enter. That bug will
be fixed in a future patch, and could simply use kvm_is_valid_cr4() as-is,
but nVMX has _another_ bug where VMXON emulation doesn't enforce VMX's
restrictions on CR0/CR4. The cleanest and most intuitive way to fix the
VMXON bug is to use nested_host_cr{0,4}_valid(). If the CR4 variant
routes through kvm_is_valid_cr4(), using nested_host_cr4_valid() won't do
the right thing for the VMXON case as vmx_is_valid_cr4() enforces VMX's
restrictions if and only if the vCPU is post-VMXON.
Cc: stable@vger.kernel.org
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20220607213604.3346000-2-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
aeb4c3e1c4
commit
9953f86a67
4 changed files with 14 additions and 7 deletions
|
@ -275,7 +275,8 @@ static bool nested_vmcb_check_cr3_cr4(struct kvm_vcpu *vcpu,
|
|||
return false;
|
||||
}
|
||||
|
||||
if (CC(!kvm_is_valid_cr4(vcpu, save->cr4)))
|
||||
/* Note, SVM doesn't have any additional restrictions on CR4. */
|
||||
if (CC(!__kvm_is_valid_cr4(vcpu, save->cr4)))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
|
|
@ -3213,8 +3213,8 @@ static bool vmx_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
|||
{
|
||||
/*
|
||||
* We operate under the default treatment of SMM, so VMX cannot be
|
||||
* enabled under SMM. Note, whether or not VMXE is allowed at all is
|
||||
* handled by kvm_is_valid_cr4().
|
||||
* enabled under SMM. Note, whether or not VMXE is allowed at all,
|
||||
* i.e. is a reserved bit, is handled by common x86 code.
|
||||
*/
|
||||
if ((cr4 & X86_CR4_VMXE) && is_smm(vcpu))
|
||||
return false;
|
||||
|
|
|
@ -1031,7 +1031,7 @@ int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_emulate_xsetbv);
|
||||
|
||||
bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
||||
bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
||||
{
|
||||
if (cr4 & cr4_reserved_bits)
|
||||
return false;
|
||||
|
@ -1039,9 +1039,15 @@ bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
|||
if (cr4 & vcpu->arch.cr4_guest_rsvd_bits)
|
||||
return false;
|
||||
|
||||
return static_call(kvm_x86_is_valid_cr4)(vcpu, cr4);
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__kvm_is_valid_cr4);
|
||||
|
||||
static bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
||||
{
|
||||
return __kvm_is_valid_cr4(vcpu, cr4) &&
|
||||
static_call(kvm_x86_is_valid_cr4)(vcpu, cr4);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_is_valid_cr4);
|
||||
|
||||
void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4)
|
||||
{
|
||||
|
|
|
@ -448,7 +448,7 @@ static inline void kvm_machine_check(void)
|
|||
void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
|
||||
void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
|
||||
int kvm_spec_ctrl_test_value(u64 value);
|
||||
bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
|
||||
bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
|
||||
int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
|
||||
struct x86_exception *e);
|
||||
int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);
|
||||
|
|
Loading…
Reference in a new issue