Merge remote-tracking branch 'kvmarm/misc-5.5' into kvmarm/next

This commit is contained in:
Marc Zyngier 2019-11-08 11:27:29 +00:00
commit cd7056ae34
16 changed files with 97 additions and 63 deletions

View File

@ -95,12 +95,12 @@ static inline unsigned long *vcpu_hcr(const struct kvm_vcpu *vcpu)
return (unsigned long *)&vcpu->arch.hcr; return (unsigned long *)&vcpu->arch.hcr;
} }
static inline void vcpu_clear_wfe_traps(struct kvm_vcpu *vcpu) static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
{ {
vcpu->arch.hcr &= ~HCR_TWE; vcpu->arch.hcr &= ~HCR_TWE;
} }
static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu) static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
{ {
vcpu->arch.hcr |= HCR_TWE; vcpu->arch.hcr |= HCR_TWE;
} }

View File

@ -21,6 +21,10 @@
#define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU } #define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
struct kvm_stats_debugfs_item debugfs_entries[] = { struct kvm_stats_debugfs_item debugfs_entries[] = {
VCPU_STAT(halt_successful_poll),
VCPU_STAT(halt_attempted_poll),
VCPU_STAT(halt_poll_invalid),
VCPU_STAT(halt_wakeup),
VCPU_STAT(hvc_exit_stat), VCPU_STAT(hvc_exit_stat),
VCPU_STAT(wfe_exit_stat), VCPU_STAT(wfe_exit_stat),
VCPU_STAT(wfi_exit_stat), VCPU_STAT(wfi_exit_stat),

View File

@ -61,7 +61,6 @@
* RW: 64bit by default, can be overridden for 32bit VMs * RW: 64bit by default, can be overridden for 32bit VMs
* TAC: Trap ACTLR * TAC: Trap ACTLR
* TSC: Trap SMC * TSC: Trap SMC
* TVM: Trap VM ops (until M+C set in SCTLR_EL1)
* TSW: Trap cache operations by set/way * TSW: Trap cache operations by set/way
* TWE: Trap WFE * TWE: Trap WFE
* TWI: Trap WFI * TWI: Trap WFI
@ -74,7 +73,7 @@
* SWIO: Turn set/way invalidates into set/way clean+invalidate * SWIO: Turn set/way invalidates into set/way clean+invalidate
*/ */
#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \ #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \ HCR_BSU_IS | HCR_FB | HCR_TAC | \
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \ HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
HCR_FMO | HCR_IMO) HCR_FMO | HCR_IMO)
#define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF) #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)

View File

@ -53,8 +53,18 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
/* trap error record accesses */ /* trap error record accesses */
vcpu->arch.hcr_el2 |= HCR_TERR; vcpu->arch.hcr_el2 |= HCR_TERR;
} }
if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
vcpu->arch.hcr_el2 |= HCR_FWB; vcpu->arch.hcr_el2 |= HCR_FWB;
} else {
/*
* For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
* get set in SCTLR_EL1 such that we can detect when the guest
* MMU gets turned on and do the necessary cache maintenance
* then.
*/
vcpu->arch.hcr_el2 |= HCR_TVM;
}
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
vcpu->arch.hcr_el2 &= ~HCR_RW; vcpu->arch.hcr_el2 &= ~HCR_RW;
@ -77,14 +87,19 @@ static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
return (unsigned long *)&vcpu->arch.hcr_el2; return (unsigned long *)&vcpu->arch.hcr_el2;
} }
static inline void vcpu_clear_wfe_traps(struct kvm_vcpu *vcpu) static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
{ {
vcpu->arch.hcr_el2 &= ~HCR_TWE; vcpu->arch.hcr_el2 &= ~HCR_TWE;
if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count))
vcpu->arch.hcr_el2 &= ~HCR_TWI;
else
vcpu->arch.hcr_el2 |= HCR_TWI;
} }
static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu) static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
{ {
vcpu->arch.hcr_el2 |= HCR_TWE; vcpu->arch.hcr_el2 |= HCR_TWE;
vcpu->arch.hcr_el2 |= HCR_TWI;
} }
static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu) static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)

View File

@ -34,6 +34,10 @@
#define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU } #define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
struct kvm_stats_debugfs_item debugfs_entries[] = { struct kvm_stats_debugfs_item debugfs_entries[] = {
VCPU_STAT(halt_successful_poll),
VCPU_STAT(halt_attempted_poll),
VCPU_STAT(halt_poll_invalid),
VCPU_STAT(halt_wakeup),
VCPU_STAT(hvc_exit_stat), VCPU_STAT(hvc_exit_stat),
VCPU_STAT(wfe_exit_stat), VCPU_STAT(wfe_exit_stat),
VCPU_STAT(wfi_exit_stat), VCPU_STAT(wfi_exit_stat),

View File

@ -141,12 +141,17 @@ static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info)
int its_schedule_vpe(struct its_vpe *vpe, bool on) int its_schedule_vpe(struct its_vpe *vpe, bool on)
{ {
struct its_cmd_info info; struct its_cmd_info info;
int ret;
WARN_ON(preemptible()); WARN_ON(preemptible());
info.cmd_type = on ? SCHEDULE_VPE : DESCHEDULE_VPE; info.cmd_type = on ? SCHEDULE_VPE : DESCHEDULE_VPE;
return its_send_vpe_cmd(vpe, &info); ret = its_send_vpe_cmd(vpe, &info);
if (!ret)
vpe->resident = on;
return ret;
} }
int its_invall_vpe(struct its_vpe *vpe) int its_invall_vpe(struct its_vpe *vpe)

View File

@ -240,7 +240,7 @@ struct vgic_dist {
* Contains the attributes and gpa of the LPI configuration table. * Contains the attributes and gpa of the LPI configuration table.
* Since we report GICR_TYPER.CommonLPIAff as 0b00, we can share * Since we report GICR_TYPER.CommonLPIAff as 0b00, we can share
* one address across all redistributors. * one address across all redistributors.
* GICv3 spec: 6.1.2 "LPI Configuration tables" * GICv3 spec: IHI 0069E 6.1.1 "LPI Configuration tables"
*/ */
u64 propbaser; u64 propbaser;
@ -378,8 +378,6 @@ static inline int kvm_vgic_get_max_vcpus(void)
return kvm_vgic_global_state.max_gic_vcpus; return kvm_vgic_global_state.max_gic_vcpus;
} }
int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
/** /**
* kvm_vgic_setup_default_irq_routing: * kvm_vgic_setup_default_irq_routing:
* Setup a default flat gsi routing table mapping all SPIs * Setup a default flat gsi routing table mapping all SPIs
@ -396,7 +394,7 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int irq,
int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq, int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq,
struct kvm_kernel_irq_routing_entry *irq_entry); struct kvm_kernel_irq_routing_entry *irq_entry);
void kvm_vgic_v4_enable_doorbell(struct kvm_vcpu *vcpu); int vgic_v4_load(struct kvm_vcpu *vcpu);
void kvm_vgic_v4_disable_doorbell(struct kvm_vcpu *vcpu); int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db);
#endif /* __KVM_ARM_VGIC_H */ #endif /* __KVM_ARM_VGIC_H */

View File

@ -32,9 +32,13 @@ struct its_vm {
struct its_vpe { struct its_vpe {
struct page *vpt_page; struct page *vpt_page;
struct its_vm *its_vm; struct its_vm *its_vm;
/* per-vPE VLPI tracking */
atomic_t vlpi_count;
/* Doorbell interrupt */ /* Doorbell interrupt */
int irq; int irq;
irq_hw_number_t vpe_db_lpi; irq_hw_number_t vpe_db_lpi;
/* VPE resident */
bool resident;
/* VPE proxy mapping */ /* VPE proxy mapping */
int vpe_proxy_event; int vpe_proxy_event;
/* /*

View File

@ -80,7 +80,7 @@ static inline bool userspace_irqchip(struct kvm *kvm)
static void soft_timer_start(struct hrtimer *hrt, u64 ns) static void soft_timer_start(struct hrtimer *hrt, u64 ns)
{ {
hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns), hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns),
HRTIMER_MODE_ABS); HRTIMER_MODE_ABS_HARD);
} }
static void soft_timer_cancel(struct hrtimer *hrt) static void soft_timer_cancel(struct hrtimer *hrt)
@ -697,11 +697,11 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
update_vtimer_cntvoff(vcpu, kvm_phys_timer_read()); update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
ptimer->cntvoff = 0; ptimer->cntvoff = 0;
hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
timer->bg_timer.function = kvm_bg_timer_expire; timer->bg_timer.function = kvm_bg_timer_expire;
hrtimer_init(&vtimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); hrtimer_init(&vtimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
vtimer->hrtimer.function = kvm_hrtimer_expire; vtimer->hrtimer.function = kvm_hrtimer_expire;
ptimer->hrtimer.function = kvm_hrtimer_expire; ptimer->hrtimer.function = kvm_hrtimer_expire;

View File

@ -348,20 +348,24 @@ void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
/* /*
* If we're about to block (most likely because we've just hit a * If we're about to block (most likely because we've just hit a
* WFI), we need to sync back the state of the GIC CPU interface * WFI), we need to sync back the state of the GIC CPU interface
* so that we have the lastest PMR and group enables. This ensures * so that we have the latest PMR and group enables. This ensures
* that kvm_arch_vcpu_runnable has up-to-date data to decide * that kvm_arch_vcpu_runnable has up-to-date data to decide
* whether we have pending interrupts. * whether we have pending interrupts.
*
* For the same reason, we want to tell GICv4 that we need
* doorbells to be signalled, should an interrupt become pending.
*/ */
preempt_disable(); preempt_disable();
kvm_vgic_vmcr_sync(vcpu); kvm_vgic_vmcr_sync(vcpu);
vgic_v4_put(vcpu, true);
preempt_enable(); preempt_enable();
kvm_vgic_v4_enable_doorbell(vcpu);
} }
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
{ {
kvm_vgic_v4_disable_doorbell(vcpu); preempt_disable();
vgic_v4_load(vcpu);
preempt_enable();
} }
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
@ -412,9 +416,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu); kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
if (single_task_running()) if (single_task_running())
vcpu_clear_wfe_traps(vcpu); vcpu_clear_wfx_traps(vcpu);
else else
vcpu_set_wfe_traps(vcpu); vcpu_set_wfx_traps(vcpu);
vcpu_ptrauth_setup_lazy(vcpu); vcpu_ptrauth_setup_lazy(vcpu);
} }

View File

@ -203,6 +203,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
INIT_LIST_HEAD(&vgic_cpu->ap_list_head); INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
raw_spin_lock_init(&vgic_cpu->ap_list_lock); raw_spin_lock_init(&vgic_cpu->ap_list_lock);
atomic_set(&vgic_cpu->vgic_v3.its_vpe.vlpi_count, 0);
/* /*
* Enable and configure all SGIs to be edge-triggered and * Enable and configure all SGIs to be edge-triggered and

View File

@ -360,7 +360,10 @@ static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
if (ret) if (ret)
return ret; return ret;
if (map.vpe)
atomic_dec(&map.vpe->vlpi_count);
map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
atomic_inc(&map.vpe->vlpi_count);
ret = its_map_vlpi(irq->host_irq, &map); ret = its_map_vlpi(irq->host_irq, &map);
} }

View File

@ -357,14 +357,14 @@ retry:
} }
/** /**
* vgic_its_save_pending_tables - Save the pending tables into guest RAM * vgic_v3_save_pending_tables - Save the pending tables into guest RAM
* kvm lock and all vcpu lock must be held * kvm lock and all vcpu lock must be held
*/ */
int vgic_v3_save_pending_tables(struct kvm *kvm) int vgic_v3_save_pending_tables(struct kvm *kvm)
{ {
struct vgic_dist *dist = &kvm->arch.vgic; struct vgic_dist *dist = &kvm->arch.vgic;
int last_byte_offset = -1;
struct vgic_irq *irq; struct vgic_irq *irq;
gpa_t last_ptr = ~(gpa_t)0;
int ret; int ret;
u8 val; u8 val;
@ -384,11 +384,11 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
bit_nr = irq->intid % BITS_PER_BYTE; bit_nr = irq->intid % BITS_PER_BYTE;
ptr = pendbase + byte_offset; ptr = pendbase + byte_offset;
if (byte_offset != last_byte_offset) { if (ptr != last_ptr) {
ret = kvm_read_guest_lock(kvm, ptr, &val, 1); ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
if (ret) if (ret)
return ret; return ret;
last_byte_offset = byte_offset; last_ptr = ptr;
} }
stored = val & (1U << bit_nr); stored = val & (1U << bit_nr);
@ -664,6 +664,8 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
if (has_vhe()) if (has_vhe())
__vgic_v3_activate_traps(vcpu); __vgic_v3_activate_traps(vcpu);
WARN_ON(vgic_v4_load(vcpu));
} }
void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu) void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
@ -676,6 +678,8 @@ void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
void vgic_v3_put(struct kvm_vcpu *vcpu) void vgic_v3_put(struct kvm_vcpu *vcpu)
{ {
WARN_ON(vgic_v4_put(vcpu, false));
vgic_v3_vmcr_sync(vcpu); vgic_v3_vmcr_sync(vcpu);
kvm_call_hyp(__vgic_v3_save_aprs, vcpu); kvm_call_hyp(__vgic_v3_save_aprs, vcpu);

View File

@ -85,6 +85,10 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
{ {
struct kvm_vcpu *vcpu = info; struct kvm_vcpu *vcpu = info;
/* We got the message, no need to fire again */
if (!irqd_irq_disabled(&irq_to_desc(irq)->irq_data))
disable_irq_nosync(irq);
vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true; vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
kvm_vcpu_kick(vcpu); kvm_vcpu_kick(vcpu);
@ -192,20 +196,30 @@ void vgic_v4_teardown(struct kvm *kvm)
its_vm->vpes = NULL; its_vm->vpes = NULL;
} }
int vgic_v4_sync_hwstate(struct kvm_vcpu *vcpu) int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db)
{ {
if (!vgic_supports_direct_msis(vcpu->kvm)) struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
struct irq_desc *desc = irq_to_desc(vpe->irq);
if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident)
return 0; return 0;
return its_schedule_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe, false); /*
* If blocking, a doorbell is required. Undo the nested
* disable_irq() calls...
*/
while (need_db && irqd_irq_disabled(&desc->irq_data))
enable_irq(vpe->irq);
return its_schedule_vpe(vpe, false);
} }
int vgic_v4_flush_hwstate(struct kvm_vcpu *vcpu) int vgic_v4_load(struct kvm_vcpu *vcpu)
{ {
int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq; struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
int err; int err;
if (!vgic_supports_direct_msis(vcpu->kvm)) if (!vgic_supports_direct_msis(vcpu->kvm) || vpe->resident)
return 0; return 0;
/* /*
@ -214,11 +228,14 @@ int vgic_v4_flush_hwstate(struct kvm_vcpu *vcpu)
* doc in drivers/irqchip/irq-gic-v4.c to understand how this * doc in drivers/irqchip/irq-gic-v4.c to understand how this
* turns into a VMOVP command at the ITS level. * turns into a VMOVP command at the ITS level.
*/ */
err = irq_set_affinity(irq, cpumask_of(smp_processor_id())); err = irq_set_affinity(vpe->irq, cpumask_of(smp_processor_id()));
if (err) if (err)
return err; return err;
err = its_schedule_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe, true); /* Disabled the doorbell, as we're about to enter the guest */
disable_irq_nosync(vpe->irq);
err = its_schedule_vpe(vpe, true);
if (err) if (err)
return err; return err;
@ -226,9 +243,7 @@ int vgic_v4_flush_hwstate(struct kvm_vcpu *vcpu)
* Now that the VPE is resident, let's get rid of a potential * Now that the VPE is resident, let's get rid of a potential
* doorbell interrupt that would still be pending. * doorbell interrupt that would still be pending.
*/ */
err = irq_set_irqchip_state(irq, IRQCHIP_STATE_PENDING, false); return irq_set_irqchip_state(vpe->irq, IRQCHIP_STATE_PENDING, false);
return err;
} }
static struct vgic_its *vgic_get_its(struct kvm *kvm, static struct vgic_its *vgic_get_its(struct kvm *kvm,
@ -266,7 +281,7 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
mutex_lock(&its->its_lock); mutex_lock(&its->its_lock);
/* Perform then actual DevID/EventID -> LPI translation. */ /* Perform the actual DevID/EventID -> LPI translation. */
ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid, ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
irq_entry->msi.data, &irq); irq_entry->msi.data, &irq);
if (ret) if (ret)
@ -294,6 +309,7 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
irq->hw = true; irq->hw = true;
irq->host_irq = virq; irq->host_irq = virq;
atomic_inc(&map.vpe->vlpi_count);
out: out:
mutex_unlock(&its->its_lock); mutex_unlock(&its->its_lock);
@ -327,6 +343,7 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
WARN_ON(!(irq->hw && irq->host_irq == virq)); WARN_ON(!(irq->hw && irq->host_irq == virq));
if (irq->hw) { if (irq->hw) {
atomic_dec(&irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count);
irq->hw = false; irq->hw = false;
ret = its_unmap_vlpi(virq); ret = its_unmap_vlpi(virq);
} }
@ -335,21 +352,3 @@ out:
mutex_unlock(&its->its_lock); mutex_unlock(&its->its_lock);
return ret; return ret;
} }
void kvm_vgic_v4_enable_doorbell(struct kvm_vcpu *vcpu)
{
if (vgic_supports_direct_msis(vcpu->kvm)) {
int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
if (irq)
enable_irq(irq);
}
}
void kvm_vgic_v4_disable_doorbell(struct kvm_vcpu *vcpu)
{
if (vgic_supports_direct_msis(vcpu->kvm)) {
int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
if (irq)
disable_irq(irq);
}
}

View File

@ -857,8 +857,6 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
{ {
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
WARN_ON(vgic_v4_sync_hwstate(vcpu));
/* An empty ap_list_head implies used_lrs == 0 */ /* An empty ap_list_head implies used_lrs == 0 */
if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
return; return;
@ -882,8 +880,6 @@ static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
/* Flush our emulation state into the GIC hardware before entering the guest. */ /* Flush our emulation state into the GIC hardware before entering the guest. */
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
{ {
WARN_ON(vgic_v4_flush_hwstate(vcpu));
/* /*
* If there are no virtual interrupts active or pending for this * If there are no virtual interrupts active or pending for this
* VCPU, then there is no work to do and we can bail out without * VCPU, then there is no work to do and we can bail out without

View File

@ -316,7 +316,5 @@ void vgic_its_invalidate_cache(struct kvm *kvm);
bool vgic_supports_direct_msis(struct kvm *kvm); bool vgic_supports_direct_msis(struct kvm *kvm);
int vgic_v4_init(struct kvm *kvm); int vgic_v4_init(struct kvm *kvm);
void vgic_v4_teardown(struct kvm *kvm); void vgic_v4_teardown(struct kvm *kvm);
int vgic_v4_sync_hwstate(struct kvm_vcpu *vcpu);
int vgic_v4_flush_hwstate(struct kvm_vcpu *vcpu);
#endif #endif