KVM: x86: Add WARNs to detect out-of-bounds register indices

Add WARN_ON_ONCE() checks in kvm_register_{read,write}() to detect reg
values that would cause KVM to overflow vcpu->arch.regs.  Change the reg
param to an 'int' to make it clear that the reg index is unverified.

Regarding the overhead of WARN_ON_ONCE(), now that all fixed GPR reads
and writes use dedicated accessors, e.g. kvm_rax_read(), the overhead
is limited to flows where the reg index is generated at runtime.  And
there is at least one historical bug where KVM has generated an out-of-
bounds access to arch.regs (see commit b68f3cc7d9, "KVM: x86: Always
use 32-bit SMRAM save state for 32-bit kernels").

Adding the WARN_ON_ONCE() protection paves the way for additional
cleanup related to kvm_reg and kvm_reg_ex.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Sean Christopherson 2019-09-27 14:45:20 -07:00 committed by Paolo Bonzini
parent 491c1ad1ac
commit 489cbcf01d
2 changed files with 10 additions and 8 deletions

View File

@ -37,19 +37,23 @@ BUILD_KVM_GPR_ACCESSORS(r14, R14)
BUILD_KVM_GPR_ACCESSORS(r15, R15) BUILD_KVM_GPR_ACCESSORS(r15, R15)
#endif #endif
static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
enum kvm_reg reg)
{ {
if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
return 0;
if (!test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail)) if (!test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail))
kvm_x86_ops->cache_reg(vcpu, reg); kvm_x86_ops->cache_reg(vcpu, reg);
return vcpu->arch.regs[reg]; return vcpu->arch.regs[reg];
} }
static inline void kvm_register_write(struct kvm_vcpu *vcpu, static inline void kvm_register_write(struct kvm_vcpu *vcpu, int reg,
enum kvm_reg reg,
unsigned long val) unsigned long val)
{ {
if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
return;
vcpu->arch.regs[reg] = val; vcpu->arch.regs[reg] = val;
__set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
__set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);

View File

@ -238,8 +238,7 @@ static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
return false; return false;
} }
static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu, static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu, int reg)
enum kvm_reg reg)
{ {
unsigned long val = kvm_register_read(vcpu, reg); unsigned long val = kvm_register_read(vcpu, reg);
@ -247,8 +246,7 @@ static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu,
} }
static inline void kvm_register_writel(struct kvm_vcpu *vcpu, static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
enum kvm_reg reg, int reg, unsigned long val)
unsigned long val)
{ {
if (!is_64_bit_mode(vcpu)) if (!is_64_bit_mode(vcpu))
val = (u32)val; val = (u32)val;