mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-02 07:04:24 +00:00
KVM: x86/xen: Take srcu lock when accessing kvm_memslots()
kvm_memslots() will be called by kvm_write_guest_offset_cached() so we should
take the srcu lock. Let's pull the srcu lock operation from kvm_steal_time_set_preempted()
again to fix xen part.
Fixes: 30b5c851af
("KVM: x86/xen: Add support for vCPU runstate information")
Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
Message-Id: <1619166200-9215-1-git-send-email-wanpengli@tencent.com>
Reviewed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
bf05bf16c7
commit
9c1a07442c
1 changed files with 9 additions and 11 deletions
|
@ -4025,7 +4025,6 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct kvm_host_map map;
|
struct kvm_host_map map;
|
||||||
struct kvm_steal_time *st;
|
struct kvm_steal_time *st;
|
||||||
int idx;
|
|
||||||
|
|
||||||
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
|
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
|
||||||
return;
|
return;
|
||||||
|
@ -4033,15 +4032,9 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
|
||||||
if (vcpu->arch.st.preempted)
|
if (vcpu->arch.st.preempted)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
|
||||||
* Take the srcu lock as memslots will be accessed to check the gfn
|
|
||||||
* cache generation against the memslots generation.
|
|
||||||
*/
|
|
||||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
|
||||||
|
|
||||||
if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map,
|
if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map,
|
||||||
&vcpu->arch.st.cache, true))
|
&vcpu->arch.st.cache, true))
|
||||||
goto out;
|
return;
|
||||||
|
|
||||||
st = map.hva +
|
st = map.hva +
|
||||||
offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
|
offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
|
||||||
|
@ -4049,20 +4042,25 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
|
||||||
st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
|
st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
|
||||||
|
|
||||||
kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true);
|
kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true);
|
||||||
|
|
||||||
out:
|
|
||||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
|
int idx;
|
||||||
|
|
||||||
if (vcpu->preempted && !vcpu->arch.guest_state_protected)
|
if (vcpu->preempted && !vcpu->arch.guest_state_protected)
|
||||||
vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu);
|
vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Take the srcu lock as memslots will be accessed to check the gfn
|
||||||
|
* cache generation against the memslots generation.
|
||||||
|
*/
|
||||||
|
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||||
if (kvm_xen_msr_enabled(vcpu->kvm))
|
if (kvm_xen_msr_enabled(vcpu->kvm))
|
||||||
kvm_xen_runstate_set_preempted(vcpu);
|
kvm_xen_runstate_set_preempted(vcpu);
|
||||||
else
|
else
|
||||||
kvm_steal_time_set_preempted(vcpu);
|
kvm_steal_time_set_preempted(vcpu);
|
||||||
|
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||||
|
|
||||||
static_call(kvm_x86_vcpu_put)(vcpu);
|
static_call(kvm_x86_vcpu_put)(vcpu);
|
||||||
vcpu->arch.last_host_tsc = rdtsc();
|
vcpu->arch.last_host_tsc = rdtsc();
|
||||||
|
|
Loading…
Reference in a new issue