mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-02 15:18:19 +00:00
KVM: arm64: Handle blocking WFIT instruction
When trapping a blocking WFIT instruction, take it into account when computing the deadline of the background timer. The state is tracked with a new vcpu flag, and is gated by a new CPU capability, which isn't currently enabled. Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20220419182755.601427-6-maz@kernel.org
This commit is contained in:
parent
daf85a5f6b
commit
89f5074c50
4 changed files with 28 additions and 3 deletions
|
@ -441,6 +441,7 @@ struct kvm_vcpu_arch {
|
||||||
#define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */
|
#define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */
|
||||||
#define KVM_ARM64_FP_FOREIGN_FPSTATE (1 << 14)
|
#define KVM_ARM64_FP_FOREIGN_FPSTATE (1 << 14)
|
||||||
#define KVM_ARM64_ON_UNSUPPORTED_CPU (1 << 15) /* Physical CPU not in supported_cpus */
|
#define KVM_ARM64_ON_UNSUPPORTED_CPU (1 << 15) /* Physical CPU not in supported_cpus */
|
||||||
|
#define KVM_ARM64_WFIT (1 << 16) /* WFIT instruction trapped */
|
||||||
|
|
||||||
#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
|
#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
|
||||||
KVM_GUESTDBG_USE_SW_BP | \
|
KVM_GUESTDBG_USE_SW_BP | \
|
||||||
|
|
|
@ -239,6 +239,20 @@ static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
|
||||||
(ARCH_TIMER_CTRL_IT_MASK | ARCH_TIMER_CTRL_ENABLE)) == ARCH_TIMER_CTRL_ENABLE);
|
(ARCH_TIMER_CTRL_IT_MASK | ARCH_TIMER_CTRL_ENABLE)) == ARCH_TIMER_CTRL_ENABLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool vcpu_has_wfit_active(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
return (cpus_have_final_cap(ARM64_HAS_WFXT) &&
|
||||||
|
(vcpu->arch.flags & KVM_ARM64_WFIT));
|
||||||
|
}
|
||||||
|
|
||||||
|
static u64 wfit_delay_ns(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
struct arch_timer_context *ctx = vcpu_vtimer(vcpu);
|
||||||
|
u64 val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
|
||||||
|
|
||||||
|
return kvm_counter_compute_delta(ctx, val);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Returns the earliest expiration time in ns among guest timers.
|
* Returns the earliest expiration time in ns among guest timers.
|
||||||
* Note that it will return 0 if none of timers can fire.
|
* Note that it will return 0 if none of timers can fire.
|
||||||
|
@ -256,6 +270,9 @@ static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu)
|
||||||
min_delta = min(min_delta, kvm_timer_compute_delta(ctx));
|
min_delta = min(min_delta, kvm_timer_compute_delta(ctx));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (vcpu_has_wfit_active(vcpu))
|
||||||
|
min_delta = min(min_delta, wfit_delay_ns(vcpu));
|
||||||
|
|
||||||
/* If none of timers can fire, then return 0 */
|
/* If none of timers can fire, then return 0 */
|
||||||
if (min_delta == ULLONG_MAX)
|
if (min_delta == ULLONG_MAX)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -355,7 +372,7 @@ static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
|
||||||
|
|
||||||
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
|
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return 0;
|
return vcpu_has_wfit_active(vcpu) && wfit_delay_ns(vcpu) == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -481,7 +498,8 @@ static void kvm_timer_blocking(struct kvm_vcpu *vcpu)
|
||||||
*/
|
*/
|
||||||
if (!kvm_timer_irq_can_fire(map.direct_vtimer) &&
|
if (!kvm_timer_irq_can_fire(map.direct_vtimer) &&
|
||||||
!kvm_timer_irq_can_fire(map.direct_ptimer) &&
|
!kvm_timer_irq_can_fire(map.direct_ptimer) &&
|
||||||
!kvm_timer_irq_can_fire(map.emul_ptimer))
|
!kvm_timer_irq_can_fire(map.emul_ptimer) &&
|
||||||
|
!vcpu_has_wfit_active(vcpu))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -634,6 +634,7 @@ void kvm_vcpu_wfi(struct kvm_vcpu *vcpu)
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
|
|
||||||
kvm_vcpu_halt(vcpu);
|
kvm_vcpu_halt(vcpu);
|
||||||
|
vcpu->arch.flags &= ~KVM_ARM64_WFIT;
|
||||||
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
|
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
|
||||||
|
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
|
|
|
@ -85,16 +85,21 @@ static int handle_no_fpsimd(struct kvm_vcpu *vcpu)
|
||||||
* WFI: Simply call kvm_vcpu_halt(), which will halt execution of
|
* WFI: Simply call kvm_vcpu_halt(), which will halt execution of
|
||||||
* world-switches and schedule other host processes until there is an
|
* world-switches and schedule other host processes until there is an
|
||||||
* incoming IRQ or FIQ to the VM.
|
* incoming IRQ or FIQ to the VM.
|
||||||
|
* WFIT: Same as WFI, with a timed wakeup implemented as a background timer
|
||||||
*/
|
*/
|
||||||
static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
|
static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
if (kvm_vcpu_get_esr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
|
u64 esr = kvm_vcpu_get_esr(vcpu);
|
||||||
|
|
||||||
|
if (esr & ESR_ELx_WFx_ISS_WFE) {
|
||||||
trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
|
trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
|
||||||
vcpu->stat.wfe_exit_stat++;
|
vcpu->stat.wfe_exit_stat++;
|
||||||
kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
|
kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
|
||||||
} else {
|
} else {
|
||||||
trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
|
trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
|
||||||
vcpu->stat.wfi_exit_stat++;
|
vcpu->stat.wfi_exit_stat++;
|
||||||
|
if ((esr & (ESR_ELx_WFx_ISS_RV | ESR_ELx_WFx_ISS_WFxT)) == (ESR_ELx_WFx_ISS_RV | ESR_ELx_WFx_ISS_WFxT))
|
||||||
|
vcpu->arch.flags |= KVM_ARM64_WFIT;
|
||||||
kvm_vcpu_wfi(vcpu);
|
kvm_vcpu_wfi(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue