KVM: x86: define SVM/VMX specific kvm_arch_[alloc|free]_vm

Define kvm_arch_[alloc|free]_vm in x86 as pass through functions
to new kvm_x86_ops vm_alloc and vm_free, and move the current
allocation logic as-is to SVM and VMX.  Vendor specific alloc/free
functions set the stage for SVM/VMX wrappers of 'struct kvm',
which will allow us to move the growing number of SVM/VMX specific
member variables out of 'struct kvm_arch'.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Sean Christopherson 2018-03-20 12:17:18 -07:00 committed by Paolo Bonzini
parent 3184a995f7
commit 434a1e9446
3 changed files with 37 additions and 0 deletions

View File

@ -948,6 +948,8 @@ struct kvm_x86_ops {
bool (*cpu_has_high_real_mode_segbase)(void);
void (*cpuid_update)(struct kvm_vcpu *vcpu);
struct kvm *(*vm_alloc)(void);
void (*vm_free)(struct kvm *);
int (*vm_init)(struct kvm *kvm);
void (*vm_destroy)(struct kvm *kvm);
@ -1121,6 +1123,17 @@ struct kvm_arch_async_pf {
extern struct kvm_x86_ops *kvm_x86_ops;
#define __KVM_HAVE_ARCH_VM_ALLOC
static inline struct kvm *kvm_arch_alloc_vm(void)
{
return kvm_x86_ops->vm_alloc();
}
static inline void kvm_arch_free_vm(struct kvm *kvm)
{
return kvm_x86_ops->vm_free(kvm);
}
int kvm_mmu_module_init(void);
void kvm_mmu_module_exit(void);

View File

@ -1717,6 +1717,16 @@ static void __unregister_enc_region_locked(struct kvm *kvm,
kfree(region);
}
static struct kvm *svm_vm_alloc(void)
{
return kzalloc(sizeof(struct kvm), GFP_KERNEL);
}
static void svm_vm_free(struct kvm *kvm)
{
kfree(kvm);
}
static void sev_vm_destroy(struct kvm *kvm)
{
struct kvm_sev_info *sev = &kvm->arch.sev_info;
@ -6881,6 +6891,8 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.vcpu_free = svm_free_vcpu,
.vcpu_reset = svm_vcpu_reset,
.vm_alloc = svm_vm_alloc,
.vm_free = svm_vm_free,
.vm_init = avic_vm_init,
.vm_destroy = svm_vm_destroy,

View File

@ -9763,6 +9763,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
}
STACK_FRAME_NON_STANDARD(vmx_vcpu_run);
static struct kvm *vmx_vm_alloc(void)
{
return kzalloc(sizeof(struct kvm), GFP_KERNEL);
}
static void vmx_vm_free(struct kvm *kvm)
{
kfree(kvm);
}
static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@ -12430,6 +12440,8 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase,
.vm_init = vmx_vm_init,
.vm_alloc = vmx_vm_alloc,
.vm_free = vmx_vm_free,
.vcpu_create = vmx_create_vcpu,
.vcpu_free = vmx_free_vcpu,