diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 27cfac561745..12739999f19c 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1182,22 +1182,6 @@ config HARDEN_BRANCH_PREDICTOR If unsure, say Y. -config HARDEN_EL2_VECTORS - bool "Harden EL2 vector mapping against system register leak" if EXPERT - default y - help - Speculation attacks against some high-performance processors can - be used to leak privileged information such as the vector base - register, resulting in a potential defeat of the EL2 layout - randomization. - - This config option will map the vectors to a fixed location, - independent of the EL2 code mapping, so that revealing VBAR_EL2 - to an attacker does not give away any extra information. This - only gets enabled on affected CPUs. - - If unsure, say Y. - config ARM64_SSBD bool "Speculative Store Bypass Disable" if EXPERT default y diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index dc37a5055137..fb1a922b31ba 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -191,7 +191,6 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ]; .macro get_vcpu_ptr vcpu, ctxt get_host_ctxt \ctxt, \vcpu ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] - kern_hyp_va \vcpu .endm #endif diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 57f9042e7270..5d96c3472390 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -238,14 +238,14 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu) return mode != PSR_MODE_EL0t; } -static __always_inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu) +static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu) { return vcpu->arch.fault.esr_el2; } static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) { - u32 esr = kvm_vcpu_get_hsr(vcpu); + u32 esr = kvm_vcpu_get_esr(vcpu); if (esr & ESR_ELx_CV) return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT; @@ -270,64 +270,64 @@ static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu) static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu) { - return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK; + return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK; } static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) { - return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV); + return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV); } static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu) { - return kvm_vcpu_get_hsr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC); + return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC); } static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu) { - return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE); + return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE); } static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu) { - return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF); + return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF); } static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) { - return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; + return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; } static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) { - return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW); + return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW); } static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) { - return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) || + return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR) || kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */ } static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu) { - return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM); + return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM); } static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) { - return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT); + return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT); } /* This one is not specific to Data Abort */ static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) { - return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL); + return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL); } static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) { - return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu)); + return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu)); } static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) @@ -337,12 +337,12 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) { - return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC; + return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC; } static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) { - return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE; + return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE; } static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) @@ -366,7 +366,7 @@ static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu) { - u32 esr = kvm_vcpu_get_hsr(vcpu); + u32 esr = kvm_vcpu_get_esr(vcpu); return ESR_ELx_SYS64_ISS_RT(esr); } diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index cf50c53e9357..04729b71c851 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -637,7 +637,7 @@ has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry, return is_midr_in_range(midr, &range) && has_dic; } -#if defined(CONFIG_HARDEN_EL2_VECTORS) +#ifdef CONFIG_RANDOMIZE_BASE static const struct midr_range ca57_a72[] = { MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), @@ -882,7 +882,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = { .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, .matches = check_branch_predictor, }, -#ifdef CONFIG_HARDEN_EL2_VECTORS +#ifdef CONFIG_RANDOMIZE_BASE { .desc = "EL2 vector hardening", .capability = ARM64_HARDEN_EL2_VECTORS, diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index 13489aff4440..318c8f2df245 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -58,7 +58,7 @@ config KVM_ARM_PMU virtual machines. config KVM_INDIRECT_VECTORS - def_bool HARDEN_BRANCH_PREDICTOR || HARDEN_EL2_VECTORS + def_bool HARDEN_BRANCH_PREDICTOR || RANDOMIZE_BASE endif # KVM diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index e52f2b2305b5..98f05bdac3c1 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -456,7 +456,6 @@ static bool need_new_vmid_gen(struct kvm_vmid *vmid) /** * update_vmid - Update the vmid with a valid VMID for the current generation - * @kvm: The guest that struct vmid belongs to * @vmid: The stage-2 VMID information struct */ static void update_vmid(struct kvm_vmid *vmid) diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 5a02d4c90559..98ab33139982 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -89,7 +89,7 @@ static int handle_no_fpsimd(struct kvm_vcpu *vcpu, struct kvm_run *run) */ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) { - if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) { + if (kvm_vcpu_get_esr(vcpu) & ESR_ELx_WFx_ISS_WFE) { trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true); vcpu->stat.wfe_exit_stat++; kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu)); @@ -119,13 +119,13 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) */ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run) { - u32 hsr = kvm_vcpu_get_hsr(vcpu); + u32 esr = kvm_vcpu_get_esr(vcpu); int ret = 0; run->exit_reason = KVM_EXIT_DEBUG; - run->debug.arch.hsr = hsr; + run->debug.arch.hsr = esr; - switch (ESR_ELx_EC(hsr)) { + switch (ESR_ELx_EC(esr)) { case ESR_ELx_EC_WATCHPT_LOW: run->debug.arch.far = vcpu->arch.fault.far_el2; /* fall through */ @@ -135,8 +135,8 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run) case ESR_ELx_EC_BRK64: break; default: - kvm_err("%s: un-handled case hsr: %#08x\n", - __func__, (unsigned int) hsr); + kvm_err("%s: un-handled case esr: %#08x\n", + __func__, (unsigned int) esr); ret = -1; break; } @@ -146,10 +146,10 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run) static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run) { - u32 hsr = kvm_vcpu_get_hsr(vcpu); + u32 esr = kvm_vcpu_get_esr(vcpu); - kvm_pr_unimpl("Unknown exception class: hsr: %#08x -- %s\n", - hsr, esr_get_class_string(hsr)); + kvm_pr_unimpl("Unknown exception class: esr: %#08x -- %s\n", + esr, esr_get_class_string(esr)); kvm_inject_undefined(vcpu); return 1; @@ -200,10 +200,10 @@ static exit_handle_fn arm_exit_handlers[] = { static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) { - u32 hsr = kvm_vcpu_get_hsr(vcpu); - u8 hsr_ec = ESR_ELx_EC(hsr); + u32 esr = kvm_vcpu_get_esr(vcpu); + u8 esr_ec = ESR_ELx_EC(esr); - return arm_exit_handlers[hsr_ec]; + return arm_exit_handlers[esr_ec]; } /* @@ -241,15 +241,15 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, int exception_index) { if (ARM_SERROR_PENDING(exception_index)) { - u8 hsr_ec = ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu)); + u8 esr_ec = ESR_ELx_EC(kvm_vcpu_get_esr(vcpu)); /* * HVC/SMC already have an adjusted PC, which we need * to correct in order to return to after having * injected the SError. */ - if (hsr_ec == ESR_ELx_EC_HVC32 || hsr_ec == ESR_ELx_EC_HVC64 || - hsr_ec == ESR_ELx_EC_SMC32 || hsr_ec == ESR_ELx_EC_SMC64) { + if (esr_ec == ESR_ELx_EC_HVC32 || esr_ec == ESR_ELx_EC_HVC64 || + esr_ec == ESR_ELx_EC_SMC32 || esr_ec == ESR_ELx_EC_SMC64) { u32 adj = kvm_vcpu_trap_il_is32bit(vcpu) ? 4 : 2; *vcpu_pc(vcpu) -= adj; } @@ -307,5 +307,5 @@ void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run, exception_index = ARM_EXCEPTION_CODE(exception_index); if (exception_index == ARM_EXCEPTION_EL1_SERROR) - kvm_handle_guest_serror(vcpu, kvm_vcpu_get_hsr(vcpu)); + kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu)); } diff --git a/arch/arm64/kvm/hyp/aarch32.c b/arch/arm64/kvm/hyp/aarch32.c index f9ff67dfbf0b..ae56d8a4b382 100644 --- a/arch/arm64/kvm/hyp/aarch32.c +++ b/arch/arm64/kvm/hyp/aarch32.c @@ -51,7 +51,7 @@ bool kvm_condition_valid32(const struct kvm_vcpu *vcpu) int cond; /* Top two bits non-zero? Unconditional. */ - if (kvm_vcpu_get_hsr(vcpu) >> 30) + if (kvm_vcpu_get_esr(vcpu) >> 30) return true; /* Is condition field valid? */ diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 784a581071fc..0511af14dc81 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -199,7 +199,7 @@ static inline bool __populate_fault_info(struct kvm_vcpu *vcpu) static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu) { bool vhe, sve_guest, sve_host; - u8 hsr_ec; + u8 esr_ec; if (!system_supports_fpsimd()) return false; @@ -219,14 +219,14 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu) vhe = has_vhe(); } - hsr_ec = kvm_vcpu_trap_get_class(vcpu); - if (hsr_ec != ESR_ELx_EC_FP_ASIMD && - hsr_ec != ESR_ELx_EC_SVE) + esr_ec = kvm_vcpu_trap_get_class(vcpu); + if (esr_ec != ESR_ELx_EC_FP_ASIMD && + esr_ec != ESR_ELx_EC_SVE) return false; /* Don't handle SVE traps for non-SVE vcpus here: */ if (!sve_guest) - if (hsr_ec != ESR_ELx_EC_FP_ASIMD) + if (esr_ec != ESR_ELx_EC_FP_ASIMD) return false; /* Valid trap. Switch the context: */ @@ -284,7 +284,7 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu) static inline bool handle_tx2_tvm(struct kvm_vcpu *vcpu) { - u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_hsr(vcpu)); + u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu)); int rt = kvm_vcpu_sys_get_rt(vcpu); u64 val = vcpu_get_reg(vcpu, rt); @@ -379,7 +379,7 @@ static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu) u64 val; if (!vcpu_has_ptrauth(vcpu) || - !esr_is_ptrauth_trap(kvm_vcpu_get_hsr(vcpu))) + !esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu))) return false; ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index d31eb6266f2e..5a0073511efb 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -426,7 +426,7 @@ static int __vgic_v3_bpr_min(void) static int __vgic_v3_get_group(struct kvm_vcpu *vcpu) { - u32 esr = kvm_vcpu_get_hsr(vcpu); + u32 esr = kvm_vcpu_get_esr(vcpu); u8 crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT; return crm != 8; @@ -978,7 +978,7 @@ int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu) bool is_read; u32 sysreg; - esr = kvm_vcpu_get_hsr(vcpu); + esr = kvm_vcpu_get_esr(vcpu); if (vcpu_mode_is_32bit(vcpu)) { if (!kvm_condition_valid(vcpu)) { __kvm_skip_instr(vcpu); diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 41eedbfd961e..cd14c831d56f 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -2116,7 +2116,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) * For RAS the host kernel may handle this abort. * There is no need to pass the error into the guest. */ - if (!kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_hsr(vcpu))) + if (!kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_esr(vcpu))) return 1; if (unlikely(!is_iabt)) { @@ -2125,7 +2125,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) } } - trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu), + trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu), kvm_vcpu_get_hfar(vcpu), fault_ipa); /* Check the stage-2 fault is trans. fault or write fault */ @@ -2134,7 +2134,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n", kvm_vcpu_trap_get_class(vcpu), (unsigned long)kvm_vcpu_trap_get_fault(vcpu), - (unsigned long)kvm_vcpu_get_hsr(vcpu)); + (unsigned long)kvm_vcpu_get_esr(vcpu)); return -EFAULT; } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 39da5d55763d..1fbab5f066bf 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -2220,10 +2220,10 @@ static int emulate_cp(struct kvm_vcpu *vcpu, static void unhandled_cp_access(struct kvm_vcpu *vcpu, struct sys_reg_params *params) { - u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu); + u8 esr_ec = kvm_vcpu_trap_get_class(vcpu); int cp = -1; - switch(hsr_ec) { + switch (esr_ec) { case ESR_ELx_EC_CP15_32: case ESR_ELx_EC_CP15_64: cp = 15; @@ -2252,17 +2252,17 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu, size_t nr_global) { struct sys_reg_params params; - u32 hsr = kvm_vcpu_get_hsr(vcpu); + u32 esr = kvm_vcpu_get_esr(vcpu); int Rt = kvm_vcpu_sys_get_rt(vcpu); - int Rt2 = (hsr >> 10) & 0x1f; + int Rt2 = (esr >> 10) & 0x1f; params.is_aarch32 = true; params.is_32bit = false; - params.CRm = (hsr >> 1) & 0xf; - params.is_write = ((hsr & 1) == 0); + params.CRm = (esr >> 1) & 0xf; + params.is_write = ((esr & 1) == 0); params.Op0 = 0; - params.Op1 = (hsr >> 16) & 0xf; + params.Op1 = (esr >> 16) & 0xf; params.Op2 = 0; params.CRn = 0; @@ -2304,18 +2304,18 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu, size_t nr_global) { struct sys_reg_params params; - u32 hsr = kvm_vcpu_get_hsr(vcpu); + u32 esr = kvm_vcpu_get_esr(vcpu); int Rt = kvm_vcpu_sys_get_rt(vcpu); params.is_aarch32 = true; params.is_32bit = true; - params.CRm = (hsr >> 1) & 0xf; + params.CRm = (esr >> 1) & 0xf; params.regval = vcpu_get_reg(vcpu, Rt); - params.is_write = ((hsr & 1) == 0); - params.CRn = (hsr >> 10) & 0xf; + params.is_write = ((esr & 1) == 0); + params.CRn = (esr >> 10) & 0xf; params.Op0 = 0; - params.Op1 = (hsr >> 14) & 0x7; - params.Op2 = (hsr >> 17) & 0x7; + params.Op1 = (esr >> 14) & 0x7; + params.Op2 = (esr >> 17) & 0x7; if (!emulate_cp(vcpu, ¶ms, global, nr_global)) { if (!params.is_write) @@ -2397,7 +2397,7 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run) { struct sys_reg_params params; - unsigned long esr = kvm_vcpu_get_hsr(vcpu); + unsigned long esr = kvm_vcpu_get_esr(vcpu); int Rt = kvm_vcpu_sys_get_rt(vcpu); int ret; diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c index a4f48c1ac28c..e0404bcab019 100644 --- a/arch/arm64/kvm/va_layout.c +++ b/arch/arm64/kvm/va_layout.c @@ -48,7 +48,7 @@ __init void kvm_compute_layout(void) va_mask = GENMASK_ULL(tag_lsb - 1, 0); tag_val = hyp_va_msb; - if (tag_lsb != (vabits_actual - 1)) { + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && tag_lsb != (vabits_actual - 1)) { /* We have some free bits to insert a random tag. */ tag_val |= get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb); } diff --git a/arch/arm64/kvm/vgic/vgic-irqfd.c b/arch/arm64/kvm/vgic/vgic-irqfd.c index d8cdfea5cc96..79f8899b234c 100644 --- a/arch/arm64/kvm/vgic/vgic-irqfd.c +++ b/arch/arm64/kvm/vgic/vgic-irqfd.c @@ -100,19 +100,33 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, /** * kvm_arch_set_irq_inatomic: fast-path for irqfd injection - * - * Currently only direct MSI injection is supported. */ int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, int irq_source_id, int level, bool line_status) { - if (e->type == KVM_IRQ_ROUTING_MSI && vgic_has_its(kvm) && level) { + if (!level) + return -EWOULDBLOCK; + + switch (e->type) { + case KVM_IRQ_ROUTING_MSI: { struct kvm_msi msi; + if (!vgic_has_its(kvm)) + break; + kvm_populate_msi(e, &msi); - if (!vgic_its_inject_cached_translation(kvm, &msi)) - return 0; + return vgic_its_inject_cached_translation(kvm, &msi); + } + + case KVM_IRQ_ROUTING_IRQCHIP: + /* + * Injecting SPIs is always possible in atomic context + * as long as the damn vgic is initialized. + */ + if (unlikely(!vgic_initialized(kvm))) + break; + return vgic_irqfd_set_irq(e, kvm, irq_source_id, 1, line_status); } return -EWOULDBLOCK; diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c index c012a52b19f5..40cbaca81333 100644 --- a/arch/arm64/kvm/vgic/vgic-its.c +++ b/arch/arm64/kvm/vgic/vgic-its.c @@ -757,9 +757,8 @@ int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi) db = (u64)msi->address_hi << 32 | msi->address_lo; irq = vgic_its_check_cache(kvm, db, msi->devid, msi->data); - if (!irq) - return -1; + return -EWOULDBLOCK; raw_spin_lock_irqsave(&irq->irq_lock, flags); irq->pending_latch = true; diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c index d2339a2b9fb9..5c786b915cd3 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c @@ -389,7 +389,7 @@ u64 vgic_sanitise_outer_cacheability(u64 field) case GIC_BASER_CACHE_nC: return field; default: - return GIC_BASER_CACHE_nC; + return GIC_BASER_CACHE_SameAsInner; } }