This is the 6.6.26 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmYWo+QACgkQONu9yGCS aT4EyA/9HiX9wHZIjgGLLDkePE5LzfveO5R+8kqqTGQUGsgm5uMA6AO0O0Jym1xk zkdlsell4XqR/lw3JQEi/dDYskXN4z8qOb9yjZ8FAYa/hPaHdv8x0kUE6YG4fag8 qzCUwozdTsmYhKVUeNfD6PJsFnMBP4x/9vYwZXsO5Fy5yKbJYD6gl5LVmK5Y6gpI kY9P/+39u6HGtd4zA5yHdo++CFmyLz0+QcMNm6aBkfJwCcbhzjS6MDkjilE7uumc AtyHieDEwGvIjPTB+j4uPaOzGFMa93k8GiACniGaXOsBV7WmH1YUbB5VpK8i282f peP+vnhxFhyGMwkRBXZ5KXikqG5JmpXptL4+URzGrWje7vv5Tk/nqJKP/ttgfs71 gfFkEla14aVsfD4iVuVtmV49iVgqKKjsLv3AqBHlV4e8YolypcfuPg88PrhnetNo lTUrojsvMjXNQZVnzFp6z7vpdbBLjgExJ1nwsVP9S4aUDtWu6FKsDTKmyxzWielq m3WxNEi10T3nYx2rzD2uNqWR7YxG9tiNDhc6U410wHpaUmpmu7/AZ+rcD9ufB+0n XOhncKT+SxY/BXBTO3JZUq4E/lZZbnArLvV7TLUG0/w5D1XH2oGK7FWYMBF65Dtm MAoVPTKaMLG2dIpETJxlqI9k8wWchBUoQKN1DUtIOS8xs4CnpCY= =1XJZ -----END PGP SIGNATURE----- Merge v6.6.26 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
commit
cd7e581d75
|
@ -138,11 +138,10 @@ associated with the source address of the indirect branch. Specifically,
|
|||
the BHB might be shared across privilege levels even in the presence of
|
||||
Enhanced IBRS.
|
||||
|
||||
Currently the only known real-world BHB attack vector is via
|
||||
unprivileged eBPF. Therefore, it's highly recommended to not enable
|
||||
unprivileged eBPF, especially when eIBRS is used (without retpolines).
|
||||
For a full mitigation against BHB attacks, it's recommended to use
|
||||
retpolines (or eIBRS combined with retpolines).
|
||||
Previously the only known real-world BHB attack vector was via unprivileged
|
||||
eBPF. Further research has found attacks that don't require unprivileged eBPF.
|
||||
For a full mitigation against BHB attacks it is recommended to set BHI_DIS_S or
|
||||
use the BHB clearing sequence.
|
||||
|
||||
Attack scenarios
|
||||
----------------
|
||||
|
@ -430,6 +429,23 @@ The possible values in this file are:
|
|||
'PBRSB-eIBRS: Not affected' CPU is not affected by PBRSB
|
||||
=========================== =======================================================
|
||||
|
||||
- Branch History Injection (BHI) protection status:
|
||||
|
||||
.. list-table::
|
||||
|
||||
* - BHI: Not affected
|
||||
- System is not affected
|
||||
* - BHI: Retpoline
|
||||
- System is protected by retpoline
|
||||
* - BHI: BHI_DIS_S
|
||||
- System is protected by BHI_DIS_S
|
||||
* - BHI: SW loop; KVM SW loop
|
||||
- System is protected by software clearing sequence
|
||||
* - BHI: Syscall hardening
|
||||
- Syscalls are hardened against BHI
|
||||
* - BHI: Syscall hardening; KVM: SW loop
|
||||
- System is protected from userspace attacks by syscall hardening; KVM is protected by software clearing sequence
|
||||
|
||||
Full mitigation might require a microcode update from the CPU
|
||||
vendor. When the necessary microcode is not available, the kernel will
|
||||
report vulnerability.
|
||||
|
@ -484,7 +500,11 @@ Spectre variant 2
|
|||
|
||||
Systems which support enhanced IBRS (eIBRS) enable IBRS protection once at
|
||||
boot, by setting the IBRS bit, and they're automatically protected against
|
||||
Spectre v2 variant attacks.
|
||||
some Spectre v2 variant attacks. The BHB can still influence the choice of
|
||||
indirect branch predictor entry, and although branch predictor entries are
|
||||
isolated between modes when eIBRS is enabled, the BHB itself is not isolated
|
||||
between modes. Systems which support BHI_DIS_S will set it to protect against
|
||||
BHI attacks.
|
||||
|
||||
On Intel's enhanced IBRS systems, this includes cross-thread branch target
|
||||
injections on SMT systems (STIBP). In other words, Intel eIBRS enables
|
||||
|
@ -638,6 +658,22 @@ kernel command line.
|
|||
spectre_v2=off. Spectre variant 1 mitigations
|
||||
cannot be disabled.
|
||||
|
||||
spectre_bhi=
|
||||
|
||||
[X86] Control mitigation of Branch History Injection
|
||||
(BHI) vulnerability. Syscalls are hardened against BHI
|
||||
regardless of this setting. This setting affects the deployment
|
||||
of the HW BHI control and the SW BHB clearing sequence.
|
||||
|
||||
on
|
||||
unconditionally enable.
|
||||
off
|
||||
unconditionally disable.
|
||||
auto
|
||||
enable if hardware mitigation
|
||||
control(BHI_DIS_S) is available, otherwise
|
||||
enable alternate mitigation in KVM.
|
||||
|
||||
For spectre_v2_user see Documentation/admin-guide/kernel-parameters.txt
|
||||
|
||||
Mitigation selection guide
|
||||
|
|
|
@ -5920,6 +5920,18 @@
|
|||
sonypi.*= [HW] Sony Programmable I/O Control Device driver
|
||||
See Documentation/admin-guide/laptops/sonypi.rst
|
||||
|
||||
spectre_bhi= [X86] Control mitigation of Branch History Injection
|
||||
(BHI) vulnerability. Syscalls are hardened against BHI
|
||||
reglardless of this setting. This setting affects the
|
||||
deployment of the HW BHI control and the SW BHB
|
||||
clearing sequence.
|
||||
|
||||
on - unconditionally enable.
|
||||
off - unconditionally disable.
|
||||
auto - (default) enable hardware mitigation
|
||||
(BHI_DIS_S) if available, otherwise enable
|
||||
alternate mitigation in KVM.
|
||||
|
||||
spectre_v2= [X86] Control mitigation of Spectre variant 2
|
||||
(indirect branch speculation) vulnerability.
|
||||
The default operation protects the kernel from
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 25
|
||||
SUBLEVEL = 26
|
||||
EXTRAVERSION =
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
|
|
|
@ -970,6 +970,8 @@ ap_spi_fp: &spi10 {
|
|||
vddrf-supply = <&pp1300_l2c>;
|
||||
vddch0-supply = <&pp3300_l10c>;
|
||||
max-speed = <3200000>;
|
||||
|
||||
qcom,local-bd-address-broken;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -728,7 +728,6 @@ static void sve_init_header_from_task(struct user_sve_header *header,
|
|||
{
|
||||
unsigned int vq;
|
||||
bool active;
|
||||
bool fpsimd_only;
|
||||
enum vec_type task_type;
|
||||
|
||||
memset(header, 0, sizeof(*header));
|
||||
|
@ -744,12 +743,10 @@ static void sve_init_header_from_task(struct user_sve_header *header,
|
|||
case ARM64_VEC_SVE:
|
||||
if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
|
||||
header->flags |= SVE_PT_VL_INHERIT;
|
||||
fpsimd_only = !test_tsk_thread_flag(target, TIF_SVE);
|
||||
break;
|
||||
case ARM64_VEC_SME:
|
||||
if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT))
|
||||
header->flags |= SVE_PT_VL_INHERIT;
|
||||
fpsimd_only = false;
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
|
@ -757,7 +754,7 @@ static void sve_init_header_from_task(struct user_sve_header *header,
|
|||
}
|
||||
|
||||
if (active) {
|
||||
if (fpsimd_only) {
|
||||
if (target->thread.fp_type == FP_STATE_FPSIMD) {
|
||||
header->flags |= SVE_PT_REGS_FPSIMD;
|
||||
} else {
|
||||
header->flags |= SVE_PT_REGS_SVE;
|
||||
|
|
|
@ -805,12 +805,15 @@ static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx,
|
|||
* Perform the appropriate TLB invalidation based on the
|
||||
* evicted pte value (if any).
|
||||
*/
|
||||
if (kvm_pte_table(ctx->old, ctx->level))
|
||||
kvm_tlb_flush_vmid_range(mmu, ctx->addr,
|
||||
kvm_granule_size(ctx->level));
|
||||
else if (kvm_pte_valid(ctx->old))
|
||||
if (kvm_pte_table(ctx->old, ctx->level)) {
|
||||
u64 size = kvm_granule_size(ctx->level);
|
||||
u64 addr = ALIGN_DOWN(ctx->addr, size);
|
||||
|
||||
kvm_tlb_flush_vmid_range(mmu, addr, size);
|
||||
} else if (kvm_pte_valid(ctx->old)) {
|
||||
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu,
|
||||
ctx->addr, ctx->level);
|
||||
}
|
||||
}
|
||||
|
||||
if (stage2_pte_is_counted(ctx->old))
|
||||
|
|
|
@ -876,7 +876,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
|||
emit(A64_UXTH(is64, dst, dst), ctx);
|
||||
break;
|
||||
case 32:
|
||||
emit(A64_REV32(is64, dst, dst), ctx);
|
||||
emit(A64_REV32(0, dst, dst), ctx);
|
||||
/* upper 32 bits already cleared */
|
||||
break;
|
||||
case 64:
|
||||
|
@ -1189,7 +1189,7 @@ emit_cond_jmp:
|
|||
} else {
|
||||
emit_a64_mov_i(1, tmp, off, ctx);
|
||||
if (sign_extend)
|
||||
emit(A64_LDRSW(dst, src_adj, off_adj), ctx);
|
||||
emit(A64_LDRSW(dst, src, tmp), ctx);
|
||||
else
|
||||
emit(A64_LDR32(dst, src, tmp), ctx);
|
||||
}
|
||||
|
|
|
@ -130,7 +130,7 @@ void set_pud_at(struct mm_struct *mm, unsigned long addr,
|
|||
|
||||
WARN_ON(pte_hw_valid(pud_pte(*pudp)));
|
||||
assert_spin_locked(pud_lockptr(mm, pudp));
|
||||
WARN_ON(!(pud_large(pud)));
|
||||
WARN_ON(!(pud_leaf(pud)));
|
||||
#endif
|
||||
trace_hugepage_set_pud(addr, pud_val(pud));
|
||||
return set_pte_at(mm, addr, pudp_ptep(pudp), pud_pte(pud));
|
||||
|
|
|
@ -319,7 +319,7 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
|
|||
|
||||
#define __get_kernel_nofault(dst, src, type, err_label) \
|
||||
do { \
|
||||
long __kr_err; \
|
||||
long __kr_err = 0; \
|
||||
\
|
||||
__get_user_nocheck(*((type *)(dst)), (type *)(src), __kr_err); \
|
||||
if (unlikely(__kr_err)) \
|
||||
|
@ -328,7 +328,7 @@ do { \
|
|||
|
||||
#define __put_kernel_nofault(dst, src, type, err_label) \
|
||||
do { \
|
||||
long __kr_err; \
|
||||
long __kr_err = 0; \
|
||||
\
|
||||
__put_user_nocheck(*((type *)(src)), (type *)(dst), __kr_err); \
|
||||
if (unlikely(__kr_err)) \
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
#define AT_L3_CACHEGEOMETRY 47
|
||||
|
||||
/* entries in ARCH_DLINFO */
|
||||
#define AT_VECTOR_SIZE_ARCH 9
|
||||
#define AT_VECTOR_SIZE_ARCH 10
|
||||
#define AT_MINSIGSTKSZ 51
|
||||
|
||||
#endif /* _UAPI_ASM_RISCV_AUXVEC_H */
|
||||
|
|
|
@ -80,6 +80,8 @@ static int __patch_insn_set(void *addr, u8 c, size_t len)
|
|||
*/
|
||||
lockdep_assert_held(&text_mutex);
|
||||
|
||||
preempt_disable();
|
||||
|
||||
if (across_pages)
|
||||
patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1);
|
||||
|
||||
|
@ -92,6 +94,8 @@ static int __patch_insn_set(void *addr, u8 c, size_t len)
|
|||
if (across_pages)
|
||||
patch_unmap(FIX_TEXT_POKE1);
|
||||
|
||||
preempt_enable();
|
||||
|
||||
return 0;
|
||||
}
|
||||
NOKPROBE_SYMBOL(__patch_insn_set);
|
||||
|
@ -122,6 +126,8 @@ static int __patch_insn_write(void *addr, const void *insn, size_t len)
|
|||
if (!riscv_patch_in_stop_machine)
|
||||
lockdep_assert_held(&text_mutex);
|
||||
|
||||
preempt_disable();
|
||||
|
||||
if (across_pages)
|
||||
patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1);
|
||||
|
||||
|
@ -134,6 +140,8 @@ static int __patch_insn_write(void *addr, const void *insn, size_t len)
|
|||
if (across_pages)
|
||||
patch_unmap(FIX_TEXT_POKE1);
|
||||
|
||||
preempt_enable();
|
||||
|
||||
return ret;
|
||||
}
|
||||
NOKPROBE_SYMBOL(__patch_insn_write);
|
||||
|
|
|
@ -26,8 +26,6 @@
|
|||
#include <asm/cpuidle.h>
|
||||
#include <asm/vector.h>
|
||||
|
||||
register unsigned long gp_in_global __asm__("gp");
|
||||
|
||||
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
|
||||
#include <linux/stackprotector.h>
|
||||
unsigned long __stack_chk_guard __read_mostly;
|
||||
|
@ -186,7 +184,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
|||
if (unlikely(args->fn)) {
|
||||
/* Kernel thread */
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
childregs->gp = gp_in_global;
|
||||
/* Supervisor/Machine, irqs on: */
|
||||
childregs->status = SR_PP | SR_PIE;
|
||||
|
||||
|
|
|
@ -137,11 +137,21 @@ static void aplic_write_pending(struct aplic *aplic, u32 irq, bool pending)
|
|||
raw_spin_lock_irqsave(&irqd->lock, flags);
|
||||
|
||||
sm = irqd->sourcecfg & APLIC_SOURCECFG_SM_MASK;
|
||||
if (!pending &&
|
||||
((sm == APLIC_SOURCECFG_SM_LEVEL_HIGH) ||
|
||||
(sm == APLIC_SOURCECFG_SM_LEVEL_LOW)))
|
||||
if (sm == APLIC_SOURCECFG_SM_INACTIVE)
|
||||
goto skip_write_pending;
|
||||
|
||||
if (sm == APLIC_SOURCECFG_SM_LEVEL_HIGH ||
|
||||
sm == APLIC_SOURCECFG_SM_LEVEL_LOW) {
|
||||
if (!pending)
|
||||
goto skip_write_pending;
|
||||
if ((irqd->state & APLIC_IRQ_STATE_INPUT) &&
|
||||
sm == APLIC_SOURCECFG_SM_LEVEL_LOW)
|
||||
goto skip_write_pending;
|
||||
if (!(irqd->state & APLIC_IRQ_STATE_INPUT) &&
|
||||
sm == APLIC_SOURCECFG_SM_LEVEL_HIGH)
|
||||
goto skip_write_pending;
|
||||
}
|
||||
|
||||
if (pending)
|
||||
irqd->state |= APLIC_IRQ_STATE_PENDING;
|
||||
else
|
||||
|
@ -187,16 +197,31 @@ static void aplic_write_enabled(struct aplic *aplic, u32 irq, bool enabled)
|
|||
|
||||
static bool aplic_read_input(struct aplic *aplic, u32 irq)
|
||||
{
|
||||
bool ret;
|
||||
unsigned long flags;
|
||||
u32 sourcecfg, sm, raw_input, irq_inverted;
|
||||
struct aplic_irq *irqd;
|
||||
unsigned long flags;
|
||||
bool ret = false;
|
||||
|
||||
if (!irq || aplic->nr_irqs <= irq)
|
||||
return false;
|
||||
irqd = &aplic->irqs[irq];
|
||||
|
||||
raw_spin_lock_irqsave(&irqd->lock, flags);
|
||||
ret = (irqd->state & APLIC_IRQ_STATE_INPUT) ? true : false;
|
||||
|
||||
sourcecfg = irqd->sourcecfg;
|
||||
if (sourcecfg & APLIC_SOURCECFG_D)
|
||||
goto skip;
|
||||
|
||||
sm = sourcecfg & APLIC_SOURCECFG_SM_MASK;
|
||||
if (sm == APLIC_SOURCECFG_SM_INACTIVE)
|
||||
goto skip;
|
||||
|
||||
raw_input = (irqd->state & APLIC_IRQ_STATE_INPUT) ? 1 : 0;
|
||||
irq_inverted = (sm == APLIC_SOURCECFG_SM_LEVEL_LOW ||
|
||||
sm == APLIC_SOURCECFG_SM_EDGE_FALL) ? 1 : 0;
|
||||
ret = !!(raw_input ^ irq_inverted);
|
||||
|
||||
skip:
|
||||
raw_spin_unlock_irqrestore(&irqd->lock, flags);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -360,7 +360,7 @@ static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long e
|
|||
}
|
||||
pmd = boot_crst_alloc(_SEGMENT_ENTRY_EMPTY);
|
||||
pud_populate(&init_mm, pud, pmd);
|
||||
} else if (pud_large(*pud)) {
|
||||
} else if (pud_leaf(*pud)) {
|
||||
continue;
|
||||
}
|
||||
pgtable_pmd_populate(pud, addr, next, mode);
|
||||
|
|
|
@ -729,7 +729,7 @@ static inline int pud_bad(pud_t pud)
|
|||
{
|
||||
unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
|
||||
|
||||
if (type > _REGION_ENTRY_TYPE_R3 || pud_large(pud))
|
||||
if (type > _REGION_ENTRY_TYPE_R3 || pud_leaf(pud))
|
||||
return 1;
|
||||
if (type < _REGION_ENTRY_TYPE_R3)
|
||||
return 0;
|
||||
|
@ -1396,7 +1396,7 @@ static inline unsigned long pud_deref(pud_t pud)
|
|||
unsigned long origin_mask;
|
||||
|
||||
origin_mask = _REGION_ENTRY_ORIGIN;
|
||||
if (pud_large(pud))
|
||||
if (pud_leaf(pud))
|
||||
origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
|
||||
return (unsigned long)__va(pud_val(pud) & origin_mask);
|
||||
}
|
||||
|
|
|
@ -653,6 +653,7 @@ SYM_DATA_START_LOCAL(daton_psw)
|
|||
SYM_DATA_END(daton_psw)
|
||||
|
||||
.section .rodata, "a"
|
||||
.balign 8
|
||||
#define SYSCALL(esame,emu) .quad __s390x_ ## esame
|
||||
SYM_DATA_START(sys_call_table)
|
||||
#include "asm/syscall_table.h"
|
||||
|
|
|
@ -596,7 +596,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
|
|||
pud = pud_offset(p4d, vmaddr);
|
||||
VM_BUG_ON(pud_none(*pud));
|
||||
/* large puds cannot yet be handled */
|
||||
if (pud_large(*pud))
|
||||
if (pud_leaf(*pud))
|
||||
return -EFAULT;
|
||||
pmd = pmd_offset(pud, vmaddr);
|
||||
VM_BUG_ON(pmd_none(*pmd));
|
||||
|
|
|
@ -224,7 +224,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
|
|||
if (p4d_present(*p4dp)) {
|
||||
pudp = pud_offset(p4dp, addr);
|
||||
if (pud_present(*pudp)) {
|
||||
if (pud_large(*pudp))
|
||||
if (pud_leaf(*pudp))
|
||||
return (pte_t *) pudp;
|
||||
pmdp = pmd_offset(pudp, addr);
|
||||
}
|
||||
|
@ -240,7 +240,7 @@ int pmd_huge(pmd_t pmd)
|
|||
|
||||
int pud_huge(pud_t pud)
|
||||
{
|
||||
return pud_large(pud);
|
||||
return pud_leaf(pud);
|
||||
}
|
||||
|
||||
bool __init arch_hugetlb_valid_size(unsigned long size)
|
||||
|
|
|
@ -274,7 +274,7 @@ static int walk_pud_level(p4d_t *p4d, unsigned long addr, unsigned long end,
|
|||
if (pud_none(*pudp))
|
||||
return -EINVAL;
|
||||
next = pud_addr_end(addr, end);
|
||||
if (pud_large(*pudp)) {
|
||||
if (pud_leaf(*pudp)) {
|
||||
need_split = !!(flags & SET_MEMORY_4K);
|
||||
need_split |= !!(addr & ~PUD_MASK);
|
||||
need_split |= !!(addr + PUD_SIZE > next);
|
||||
|
|
|
@ -479,7 +479,7 @@ static int pmd_lookup(struct mm_struct *mm, unsigned long addr, pmd_t **pmdp)
|
|||
return -ENOENT;
|
||||
|
||||
/* Large PUDs are not supported yet. */
|
||||
if (pud_large(*pud))
|
||||
if (pud_leaf(*pud))
|
||||
return -EFAULT;
|
||||
|
||||
*pmdp = pmd_offset(pud, addr);
|
||||
|
|
|
@ -322,7 +322,7 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
|
|||
if (!add) {
|
||||
if (pud_none(*pud))
|
||||
continue;
|
||||
if (pud_large(*pud)) {
|
||||
if (pud_leaf(*pud)) {
|
||||
if (IS_ALIGNED(addr, PUD_SIZE) &&
|
||||
IS_ALIGNED(next, PUD_SIZE)) {
|
||||
pud_clear(pud);
|
||||
|
@ -343,7 +343,7 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
|
|||
if (!pmd)
|
||||
goto out;
|
||||
pud_populate(&init_mm, pud, pmd);
|
||||
} else if (pud_large(*pud)) {
|
||||
} else if (pud_leaf(*pud)) {
|
||||
continue;
|
||||
}
|
||||
ret = modify_pmd_table(pud, addr, next, add, direct);
|
||||
|
@ -586,7 +586,7 @@ pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
|
|||
if (!pmd)
|
||||
goto out;
|
||||
pud_populate(&init_mm, pud, pmd);
|
||||
} else if (WARN_ON_ONCE(pud_large(*pud))) {
|
||||
} else if (WARN_ON_ONCE(pud_leaf(*pud))) {
|
||||
goto out;
|
||||
}
|
||||
pmd = pmd_offset(pud, addr);
|
||||
|
|
|
@ -516,11 +516,12 @@ static void bpf_skip(struct bpf_jit *jit, int size)
|
|||
* PLT for hotpatchable calls. The calling convention is the same as for the
|
||||
* ftrace hotpatch trampolines: %r0 is return address, %r1 is clobbered.
|
||||
*/
|
||||
extern const char bpf_plt[];
|
||||
extern const char bpf_plt_ret[];
|
||||
extern const char bpf_plt_target[];
|
||||
extern const char bpf_plt_end[];
|
||||
#define BPF_PLT_SIZE 32
|
||||
struct bpf_plt {
|
||||
char code[16];
|
||||
void *ret;
|
||||
void *target;
|
||||
} __packed;
|
||||
extern const struct bpf_plt bpf_plt;
|
||||
asm(
|
||||
".pushsection .rodata\n"
|
||||
" .balign 8\n"
|
||||
|
@ -531,15 +532,14 @@ asm(
|
|||
" .balign 8\n"
|
||||
"bpf_plt_ret: .quad 0\n"
|
||||
"bpf_plt_target: .quad 0\n"
|
||||
"bpf_plt_end:\n"
|
||||
" .popsection\n"
|
||||
);
|
||||
|
||||
static void bpf_jit_plt(void *plt, void *ret, void *target)
|
||||
static void bpf_jit_plt(struct bpf_plt *plt, void *ret, void *target)
|
||||
{
|
||||
memcpy(plt, bpf_plt, BPF_PLT_SIZE);
|
||||
*(void **)((char *)plt + (bpf_plt_ret - bpf_plt)) = ret;
|
||||
*(void **)((char *)plt + (bpf_plt_target - bpf_plt)) = target ?: ret;
|
||||
memcpy(plt, &bpf_plt, sizeof(*plt));
|
||||
plt->ret = ret;
|
||||
plt->target = target;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -662,9 +662,9 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
|
|||
jit->prg = ALIGN(jit->prg, 8);
|
||||
jit->prologue_plt = jit->prg;
|
||||
if (jit->prg_buf)
|
||||
bpf_jit_plt(jit->prg_buf + jit->prg,
|
||||
bpf_jit_plt((struct bpf_plt *)(jit->prg_buf + jit->prg),
|
||||
jit->prg_buf + jit->prologue_plt_ret, NULL);
|
||||
jit->prg += BPF_PLT_SIZE;
|
||||
jit->prg += sizeof(struct bpf_plt);
|
||||
}
|
||||
|
||||
static int get_probe_mem_regno(const u8 *insn)
|
||||
|
@ -1901,9 +1901,6 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
|
|||
struct bpf_jit jit;
|
||||
int pass;
|
||||
|
||||
if (WARN_ON_ONCE(bpf_plt_end - bpf_plt != BPF_PLT_SIZE))
|
||||
return orig_fp;
|
||||
|
||||
if (!fp->jit_requested)
|
||||
return orig_fp;
|
||||
|
||||
|
@ -2009,14 +2006,11 @@ bool bpf_jit_supports_far_kfunc_call(void)
|
|||
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
|
||||
void *old_addr, void *new_addr)
|
||||
{
|
||||
struct bpf_plt expected_plt, current_plt, new_plt, *plt;
|
||||
struct {
|
||||
u16 opc;
|
||||
s32 disp;
|
||||
} __packed insn;
|
||||
char expected_plt[BPF_PLT_SIZE];
|
||||
char current_plt[BPF_PLT_SIZE];
|
||||
char new_plt[BPF_PLT_SIZE];
|
||||
char *plt;
|
||||
char *ret;
|
||||
int err;
|
||||
|
||||
|
@ -2035,18 +2029,18 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
|
|||
*/
|
||||
} else {
|
||||
/* Verify the PLT. */
|
||||
plt = (char *)ip + (insn.disp << 1);
|
||||
err = copy_from_kernel_nofault(current_plt, plt, BPF_PLT_SIZE);
|
||||
plt = ip + (insn.disp << 1);
|
||||
err = copy_from_kernel_nofault(¤t_plt, plt,
|
||||
sizeof(current_plt));
|
||||
if (err < 0)
|
||||
return err;
|
||||
ret = (char *)ip + 6;
|
||||
bpf_jit_plt(expected_plt, ret, old_addr);
|
||||
if (memcmp(current_plt, expected_plt, BPF_PLT_SIZE))
|
||||
bpf_jit_plt(&expected_plt, ret, old_addr);
|
||||
if (memcmp(¤t_plt, &expected_plt, sizeof(current_plt)))
|
||||
return -EINVAL;
|
||||
/* Adjust the call address. */
|
||||
bpf_jit_plt(new_plt, ret, new_addr);
|
||||
s390_kernel_write(plt + (bpf_plt_target - bpf_plt),
|
||||
new_plt + (bpf_plt_target - bpf_plt),
|
||||
bpf_jit_plt(&new_plt, ret, new_addr);
|
||||
s390_kernel_write(&plt->target, &new_plt.target,
|
||||
sizeof(void *));
|
||||
}
|
||||
|
||||
|
|
|
@ -1665,7 +1665,7 @@ bool kern_addr_valid(unsigned long addr)
|
|||
if (pud_none(*pud))
|
||||
return false;
|
||||
|
||||
if (pud_large(*pud))
|
||||
if (pud_leaf(*pud))
|
||||
return pfn_valid(pud_pfn(*pud));
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
|
|
|
@ -2566,6 +2566,31 @@ config MITIGATION_RFDS
|
|||
stored in floating point, vector and integer registers.
|
||||
See also <file:Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst>
|
||||
|
||||
choice
|
||||
prompt "Clear branch history"
|
||||
depends on CPU_SUP_INTEL
|
||||
default SPECTRE_BHI_ON
|
||||
help
|
||||
Enable BHI mitigations. BHI attacks are a form of Spectre V2 attacks
|
||||
where the branch history buffer is poisoned to speculatively steer
|
||||
indirect branches.
|
||||
See <file:Documentation/admin-guide/hw-vuln/spectre.rst>
|
||||
|
||||
config SPECTRE_BHI_ON
|
||||
bool "on"
|
||||
help
|
||||
Equivalent to setting spectre_bhi=on command line parameter.
|
||||
config SPECTRE_BHI_OFF
|
||||
bool "off"
|
||||
help
|
||||
Equivalent to setting spectre_bhi=off command line parameter.
|
||||
config SPECTRE_BHI_AUTO
|
||||
bool "auto"
|
||||
help
|
||||
Equivalent to setting spectre_bhi=auto command line parameter.
|
||||
|
||||
endchoice
|
||||
|
||||
endif
|
||||
|
||||
config ARCH_HAS_ADD_PAGES
|
||||
|
|
|
@ -84,7 +84,7 @@ LDFLAGS_vmlinux += -T
|
|||
hostprogs := mkpiggy
|
||||
HOST_EXTRACFLAGS += -I$(srctree)/tools/include
|
||||
|
||||
sed-voffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(_text\|__bss_start\|_end\)$$/\#define VO_\2 _AC(0x\1,UL)/p'
|
||||
sed-voffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(_text\|__start_rodata\|__bss_start\|_end\)$$/\#define VO_\2 _AC(0x\1,UL)/p'
|
||||
|
||||
quiet_cmd_voffset = VOFFSET $@
|
||||
cmd_voffset = $(NM) $< | sed -n $(sed-voffset) > $@
|
||||
|
|
|
@ -330,6 +330,7 @@ static size_t parse_elf(void *output)
|
|||
return ehdr.e_entry - LOAD_PHYSICAL_ADDR;
|
||||
}
|
||||
|
||||
const unsigned long kernel_text_size = VO___start_rodata - VO__text;
|
||||
const unsigned long kernel_total_size = VO__end - VO__text;
|
||||
|
||||
static u8 boot_heap[BOOT_HEAP_SIZE] __aligned(4);
|
||||
|
@ -357,6 +358,19 @@ unsigned long decompress_kernel(unsigned char *outbuf, unsigned long virt_addr,
|
|||
return entry;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the memory encryption xloadflag based on the mem_encrypt= command line
|
||||
* parameter, if provided.
|
||||
*/
|
||||
static void parse_mem_encrypt(struct setup_header *hdr)
|
||||
{
|
||||
int on = cmdline_find_option_bool("mem_encrypt=on");
|
||||
int off = cmdline_find_option_bool("mem_encrypt=off");
|
||||
|
||||
if (on > off)
|
||||
hdr->xloadflags |= XLF_MEM_ENCRYPTION;
|
||||
}
|
||||
|
||||
/*
|
||||
* The compressed kernel image (ZO), has been moved so that its position
|
||||
* is against the end of the buffer used to hold the uncompressed kernel
|
||||
|
@ -387,6 +401,8 @@ asmlinkage __visible void *extract_kernel(void *rmode, unsigned char *output)
|
|||
/* Clear flags intended for solely in-kernel use. */
|
||||
boot_params->hdr.loadflags &= ~KASLR_FLAG;
|
||||
|
||||
parse_mem_encrypt(&boot_params->hdr);
|
||||
|
||||
sanitize_boot_params(boot_params);
|
||||
|
||||
if (boot_params->screen_info.orig_video_mode == 7) {
|
||||
|
|
|
@ -116,6 +116,9 @@ static bool fault_in_kernel_space(unsigned long address)
|
|||
#undef __init
|
||||
#define __init
|
||||
|
||||
#undef __head
|
||||
#define __head
|
||||
|
||||
#define __BOOT_COMPRESSED
|
||||
|
||||
/* Basic instruction decoding support needed */
|
||||
|
|
|
@ -3,13 +3,17 @@
|
|||
* Confidential Computing Platform Capability checks
|
||||
*
|
||||
* Copyright (C) 2021 Advanced Micro Devices, Inc.
|
||||
* Copyright (C) 2024 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
|
||||
*
|
||||
* Author: Tom Lendacky <thomas.lendacky@amd.com>
|
||||
*/
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <linux/cc_platform.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/random.h>
|
||||
|
||||
#include <asm/archrandom.h>
|
||||
#include <asm/coco.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
|
@ -148,3 +152,40 @@ u64 cc_mkdec(u64 val)
|
|||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cc_mkdec);
|
||||
|
||||
__init void cc_random_init(void)
|
||||
{
|
||||
/*
|
||||
* The seed is 32 bytes (in units of longs), which is 256 bits, which
|
||||
* is the security level that the RNG is targeting.
|
||||
*/
|
||||
unsigned long rng_seed[32 / sizeof(long)];
|
||||
size_t i, longs;
|
||||
|
||||
if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Since the CoCo threat model includes the host, the only reliable
|
||||
* source of entropy that can be neither observed nor manipulated is
|
||||
* RDRAND. Usually, RDRAND failure is considered tolerable, but since
|
||||
* CoCo guests have no other unobservable source of entropy, it's
|
||||
* important to at least ensure the RNG gets some initial random seeds.
|
||||
*/
|
||||
for (i = 0; i < ARRAY_SIZE(rng_seed); i += longs) {
|
||||
longs = arch_get_random_longs(&rng_seed[i], ARRAY_SIZE(rng_seed) - i);
|
||||
|
||||
/*
|
||||
* A zero return value means that the guest doesn't have RDRAND
|
||||
* or the CPU is physically broken, and in both cases that
|
||||
* means most crypto inside of the CoCo instance will be
|
||||
* broken, defeating the purpose of CoCo in the first place. So
|
||||
* just panic here because it's absolutely unsafe to continue
|
||||
* executing.
|
||||
*/
|
||||
if (longs == 0)
|
||||
panic("RDRAND is defective.");
|
||||
}
|
||||
add_device_randomness(rng_seed, sizeof(rng_seed));
|
||||
memzero_explicit(rng_seed, sizeof(rng_seed));
|
||||
}
|
||||
|
|
|
@ -48,7 +48,7 @@ static __always_inline bool do_syscall_x64(struct pt_regs *regs, int nr)
|
|||
|
||||
if (likely(unr < NR_syscalls)) {
|
||||
unr = array_index_nospec(unr, NR_syscalls);
|
||||
regs->ax = sys_call_table[unr](regs);
|
||||
regs->ax = x64_sys_call(regs, unr);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -65,7 +65,7 @@ static __always_inline bool do_syscall_x32(struct pt_regs *regs, int nr)
|
|||
|
||||
if (IS_ENABLED(CONFIG_X86_X32_ABI) && likely(xnr < X32_NR_syscalls)) {
|
||||
xnr = array_index_nospec(xnr, X32_NR_syscalls);
|
||||
regs->ax = x32_sys_call_table[xnr](regs);
|
||||
regs->ax = x32_sys_call(regs, xnr);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -114,7 +114,7 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs, int nr)
|
|||
|
||||
if (likely(unr < IA32_NR_syscalls)) {
|
||||
unr = array_index_nospec(unr, IA32_NR_syscalls);
|
||||
regs->ax = ia32_sys_call_table[unr](regs);
|
||||
regs->ax = ia32_sys_call(regs, unr);
|
||||
} else if (nr != -1) {
|
||||
regs->ax = __ia32_sys_ni_syscall(regs);
|
||||
}
|
||||
|
@ -141,7 +141,7 @@ static __always_inline bool int80_is_external(void)
|
|||
}
|
||||
|
||||
/**
|
||||
* int80_emulation - 32-bit legacy syscall entry
|
||||
* do_int80_emulation - 32-bit legacy syscall C entry from asm
|
||||
*
|
||||
* This entry point can be used by 32-bit and 64-bit programs to perform
|
||||
* 32-bit system calls. Instances of INT $0x80 can be found inline in
|
||||
|
@ -159,7 +159,7 @@ static __always_inline bool int80_is_external(void)
|
|||
* eax: system call number
|
||||
* ebx, ecx, edx, esi, edi, ebp: arg1 - arg 6
|
||||
*/
|
||||
DEFINE_IDTENTRY_RAW(int80_emulation)
|
||||
__visible noinstr void do_int80_emulation(struct pt_regs *regs)
|
||||
{
|
||||
int nr;
|
||||
|
||||
|
|
|
@ -116,6 +116,7 @@ SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
|
|||
/* clobbers %rax, make sure it is after saving the syscall nr */
|
||||
IBRS_ENTER
|
||||
UNTRAIN_RET
|
||||
CLEAR_BRANCH_HISTORY
|
||||
|
||||
call do_syscall_64 /* returns with IRQs disabled */
|
||||
|
||||
|
@ -1549,3 +1550,63 @@ SYM_CODE_START_NOALIGN(rewind_stack_and_make_dead)
|
|||
call make_task_dead
|
||||
SYM_CODE_END(rewind_stack_and_make_dead)
|
||||
.popsection
|
||||
|
||||
/*
|
||||
* This sequence executes branches in order to remove user branch information
|
||||
* from the branch history tracker in the Branch Predictor, therefore removing
|
||||
* user influence on subsequent BTB lookups.
|
||||
*
|
||||
* It should be used on parts prior to Alder Lake. Newer parts should use the
|
||||
* BHI_DIS_S hardware control instead. If a pre-Alder Lake part is being
|
||||
* virtualized on newer hardware the VMM should protect against BHI attacks by
|
||||
* setting BHI_DIS_S for the guests.
|
||||
*
|
||||
* CALLs/RETs are necessary to prevent Loop Stream Detector(LSD) from engaging
|
||||
* and not clearing the branch history. The call tree looks like:
|
||||
*
|
||||
* call 1
|
||||
* call 2
|
||||
* call 2
|
||||
* call 2
|
||||
* call 2
|
||||
* call 2
|
||||
* ret
|
||||
* ret
|
||||
* ret
|
||||
* ret
|
||||
* ret
|
||||
* ret
|
||||
*
|
||||
* This means that the stack is non-constant and ORC can't unwind it with %rsp
|
||||
* alone. Therefore we unconditionally set up the frame pointer, which allows
|
||||
* ORC to unwind properly.
|
||||
*
|
||||
* The alignment is for performance and not for safety, and may be safely
|
||||
* refactored in the future if needed.
|
||||
*/
|
||||
SYM_FUNC_START(clear_bhb_loop)
|
||||
push %rbp
|
||||
mov %rsp, %rbp
|
||||
movl $5, %ecx
|
||||
ANNOTATE_INTRA_FUNCTION_CALL
|
||||
call 1f
|
||||
jmp 5f
|
||||
.align 64, 0xcc
|
||||
ANNOTATE_INTRA_FUNCTION_CALL
|
||||
1: call 2f
|
||||
RET
|
||||
.align 64, 0xcc
|
||||
2: movl $5, %eax
|
||||
3: jmp 4f
|
||||
nop
|
||||
4: sub $1, %eax
|
||||
jnz 3b
|
||||
sub $1, %ecx
|
||||
jnz 1b
|
||||
RET
|
||||
5: lfence
|
||||
pop %rbp
|
||||
RET
|
||||
SYM_FUNC_END(clear_bhb_loop)
|
||||
EXPORT_SYMBOL_GPL(clear_bhb_loop)
|
||||
STACK_FRAME_NON_STANDARD(clear_bhb_loop)
|
||||
|
|
|
@ -92,6 +92,7 @@ SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL)
|
|||
|
||||
IBRS_ENTER
|
||||
UNTRAIN_RET
|
||||
CLEAR_BRANCH_HISTORY
|
||||
|
||||
/*
|
||||
* SYSENTER doesn't filter flags, so we need to clear NT and AC
|
||||
|
@ -209,6 +210,7 @@ SYM_INNER_LABEL(entry_SYSCALL_compat_after_hwframe, SYM_L_GLOBAL)
|
|||
|
||||
IBRS_ENTER
|
||||
UNTRAIN_RET
|
||||
CLEAR_BRANCH_HISTORY
|
||||
|
||||
movq %rsp, %rdi
|
||||
call do_fast_syscall_32
|
||||
|
@ -277,3 +279,17 @@ SYM_INNER_LABEL(entry_SYSRETL_compat_end, SYM_L_GLOBAL)
|
|||
ANNOTATE_NOENDBR
|
||||
int3
|
||||
SYM_CODE_END(entry_SYSCALL_compat)
|
||||
|
||||
/*
|
||||
* int 0x80 is used by 32 bit mode as a system call entry. Normally idt entries
|
||||
* point to C routines, however since this is a system call interface the branch
|
||||
* history needs to be scrubbed to protect against BHI attacks, and that
|
||||
* scrubbing needs to take place in assembly code prior to entering any C
|
||||
* routines.
|
||||
*/
|
||||
SYM_CODE_START(int80_emulation)
|
||||
ANNOTATE_NOENDBR
|
||||
UNWIND_HINT_FUNC
|
||||
CLEAR_BRANCH_HISTORY
|
||||
jmp do_int80_emulation
|
||||
SYM_CODE_END(int80_emulation)
|
||||
|
|
|
@ -18,8 +18,25 @@
|
|||
#include <asm/syscalls_32.h>
|
||||
#undef __SYSCALL
|
||||
|
||||
/*
|
||||
* The sys_call_table[] is no longer used for system calls, but
|
||||
* kernel/trace/trace_syscalls.c still wants to know the system
|
||||
* call address.
|
||||
*/
|
||||
#ifdef CONFIG_X86_32
|
||||
#define __SYSCALL(nr, sym) __ia32_##sym,
|
||||
|
||||
__visible const sys_call_ptr_t ia32_sys_call_table[] = {
|
||||
const sys_call_ptr_t sys_call_table[] = {
|
||||
#include <asm/syscalls_32.h>
|
||||
};
|
||||
#undef __SYSCALL
|
||||
#endif
|
||||
|
||||
#define __SYSCALL(nr, sym) case nr: return __ia32_##sym(regs);
|
||||
|
||||
long ia32_sys_call(const struct pt_regs *regs, unsigned int nr)
|
||||
{
|
||||
switch (nr) {
|
||||
#include <asm/syscalls_32.h>
|
||||
default: return __ia32_sys_ni_syscall(regs);
|
||||
}
|
||||
};
|
||||
|
|
|
@ -11,8 +11,23 @@
|
|||
#include <asm/syscalls_64.h>
|
||||
#undef __SYSCALL
|
||||
|
||||
/*
|
||||
* The sys_call_table[] is no longer used for system calls, but
|
||||
* kernel/trace/trace_syscalls.c still wants to know the system
|
||||
* call address.
|
||||
*/
|
||||
#define __SYSCALL(nr, sym) __x64_##sym,
|
||||
|
||||
asmlinkage const sys_call_ptr_t sys_call_table[] = {
|
||||
const sys_call_ptr_t sys_call_table[] = {
|
||||
#include <asm/syscalls_64.h>
|
||||
};
|
||||
#undef __SYSCALL
|
||||
|
||||
#define __SYSCALL(nr, sym) case nr: return __x64_##sym(regs);
|
||||
|
||||
long x64_sys_call(const struct pt_regs *regs, unsigned int nr)
|
||||
{
|
||||
switch (nr) {
|
||||
#include <asm/syscalls_64.h>
|
||||
default: return __x64_sys_ni_syscall(regs);
|
||||
}
|
||||
};
|
||||
|
|
|
@ -11,8 +11,12 @@
|
|||
#include <asm/syscalls_x32.h>
|
||||
#undef __SYSCALL
|
||||
|
||||
#define __SYSCALL(nr, sym) __x64_##sym,
|
||||
#define __SYSCALL(nr, sym) case nr: return __x64_##sym(regs);
|
||||
|
||||
asmlinkage const sys_call_ptr_t x32_sys_call_table[] = {
|
||||
#include <asm/syscalls_x32.h>
|
||||
long x32_sys_call(const struct pt_regs *regs, unsigned int nr)
|
||||
{
|
||||
switch (nr) {
|
||||
#include <asm/syscalls_x32.h>
|
||||
default: return __x64_sys_ni_syscall(regs);
|
||||
}
|
||||
};
|
||||
|
|
|
@ -250,7 +250,7 @@ static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
|
|||
/*
|
||||
* AMD Performance Monitor Family 17h and later:
|
||||
*/
|
||||
static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
|
||||
static const u64 amd_zen1_perfmon_event_map[PERF_COUNT_HW_MAX] =
|
||||
{
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
|
||||
|
@ -262,10 +262,24 @@ static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
|
|||
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x0187,
|
||||
};
|
||||
|
||||
static const u64 amd_zen2_perfmon_event_map[PERF_COUNT_HW_MAX] =
|
||||
{
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = 0x0964,
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
|
||||
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00a9,
|
||||
};
|
||||
|
||||
static u64 amd_pmu_event_map(int hw_event)
|
||||
{
|
||||
if (boot_cpu_data.x86 >= 0x17)
|
||||
return amd_f17h_perfmon_event_map[hw_event];
|
||||
if (cpu_feature_enabled(X86_FEATURE_ZEN2) || boot_cpu_data.x86 >= 0x19)
|
||||
return amd_zen2_perfmon_event_map[hw_event];
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_ZEN1))
|
||||
return amd_zen1_perfmon_event_map[hw_event];
|
||||
|
||||
return amd_perfmon_event_map[hw_event];
|
||||
}
|
||||
|
@ -904,8 +918,8 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
|
|||
if (!status)
|
||||
goto done;
|
||||
|
||||
/* Read branch records before unfreezing */
|
||||
if (status & GLOBAL_STATUS_LBRS_FROZEN) {
|
||||
/* Read branch records */
|
||||
if (x86_pmu.lbr_nr) {
|
||||
amd_pmu_lbr_read();
|
||||
status &= ~GLOBAL_STATUS_LBRS_FROZEN;
|
||||
}
|
||||
|
|
|
@ -400,10 +400,12 @@ void amd_pmu_lbr_enable_all(void)
|
|||
wrmsrl(MSR_AMD64_LBR_SELECT, lbr_select);
|
||||
}
|
||||
|
||||
rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
|
||||
rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
|
||||
if (cpu_feature_enabled(X86_FEATURE_AMD_LBR_PMC_FREEZE)) {
|
||||
rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
|
||||
wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
|
||||
}
|
||||
|
||||
wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
|
||||
rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
|
||||
wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg | DBG_EXTN_CFG_LBRV2EN);
|
||||
}
|
||||
|
||||
|
@ -416,10 +418,12 @@ void amd_pmu_lbr_disable_all(void)
|
|||
return;
|
||||
|
||||
rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
|
||||
rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
|
||||
|
||||
wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg & ~DBG_EXTN_CFG_LBRV2EN);
|
||||
wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl & ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_AMD_LBR_PMC_FREEZE)) {
|
||||
rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
|
||||
wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl & ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
|
||||
}
|
||||
}
|
||||
|
||||
__init int amd_pmu_lbr_init(void)
|
||||
|
|
|
@ -1236,11 +1236,11 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
|
|||
struct pmu *pmu = event->pmu;
|
||||
|
||||
/*
|
||||
* Make sure we get updated with the first PEBS
|
||||
* event. It will trigger also during removal, but
|
||||
* that does not hurt:
|
||||
* Make sure we get updated with the first PEBS event.
|
||||
* During removal, ->pebs_data_cfg is still valid for
|
||||
* the last PEBS event. Don't clear it.
|
||||
*/
|
||||
if (cpuc->n_pebs == 1)
|
||||
if ((cpuc->n_pebs == 1) && add)
|
||||
cpuc->pebs_data_cfg = PEBS_UPDATE_DS_SW;
|
||||
|
||||
if (needed_cb != pebs_needs_sched_cb(cpuc)) {
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <asm/preempt.h>
|
||||
#include <asm/asm.h>
|
||||
#include <asm/gsseg.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
#ifndef CONFIG_X86_CMPXCHG64
|
||||
extern void cmpxchg8b_emu(void);
|
||||
|
|
|
@ -81,6 +81,7 @@
|
|||
|
||||
#ifndef __ASSEMBLY__
|
||||
extern unsigned int output_len;
|
||||
extern const unsigned long kernel_text_size;
|
||||
extern const unsigned long kernel_total_size;
|
||||
|
||||
unsigned long decompress_kernel(unsigned char *outbuf, unsigned long virt_addr,
|
||||
|
|
|
@ -22,6 +22,7 @@ static inline void cc_set_mask(u64 mask)
|
|||
|
||||
u64 cc_mkenc(u64 val);
|
||||
u64 cc_mkdec(u64 val);
|
||||
void cc_random_init(void);
|
||||
#else
|
||||
static inline u64 cc_mkenc(u64 val)
|
||||
{
|
||||
|
@ -32,6 +33,7 @@ static inline u64 cc_mkdec(u64 val)
|
|||
{
|
||||
return val;
|
||||
}
|
||||
static inline void cc_random_init(void) { }
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_COCO_H */
|
||||
|
|
|
@ -33,6 +33,8 @@ enum cpuid_leafs
|
|||
CPUID_7_EDX,
|
||||
CPUID_8000_001F_EAX,
|
||||
CPUID_8000_0021_EAX,
|
||||
CPUID_LNX_5,
|
||||
NR_CPUID_WORDS,
|
||||
};
|
||||
|
||||
#define X86_CAP_FMT_NUM "%d:%d"
|
||||
|
@ -91,8 +93,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
|
|||
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 18, feature_bit) || \
|
||||
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 19, feature_bit) || \
|
||||
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 20, feature_bit) || \
|
||||
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 21, feature_bit) || \
|
||||
REQUIRED_MASK_CHECK || \
|
||||
BUILD_BUG_ON_ZERO(NCAPINTS != 21))
|
||||
BUILD_BUG_ON_ZERO(NCAPINTS != 22))
|
||||
|
||||
#define DISABLED_MASK_BIT_SET(feature_bit) \
|
||||
( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \
|
||||
|
@ -116,8 +119,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
|
|||
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 18, feature_bit) || \
|
||||
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 19, feature_bit) || \
|
||||
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 20, feature_bit) || \
|
||||
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 21, feature_bit) || \
|
||||
DISABLED_MASK_CHECK || \
|
||||
BUILD_BUG_ON_ZERO(NCAPINTS != 21))
|
||||
BUILD_BUG_ON_ZERO(NCAPINTS != 22))
|
||||
|
||||
#define cpu_has(c, bit) \
|
||||
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
/*
|
||||
* Defines x86 CPU feature bits
|
||||
*/
|
||||
#define NCAPINTS 21 /* N 32-bit words worth of info */
|
||||
#define NCAPINTS 22 /* N 32-bit words worth of info */
|
||||
#define NBUGINTS 2 /* N 32-bit bug flags */
|
||||
|
||||
/*
|
||||
|
@ -218,7 +218,7 @@
|
|||
#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
|
||||
#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
|
||||
#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
|
||||
#define X86_FEATURE_ZEN (7*32+28) /* "" CPU based on Zen microarchitecture */
|
||||
#define X86_FEATURE_ZEN ( 7*32+28) /* "" Generic flag for all Zen and newer */
|
||||
#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
|
||||
#define X86_FEATURE_IBRS_ENHANCED ( 7*32+30) /* Enhanced IBRS */
|
||||
#define X86_FEATURE_MSR_IA32_FEAT_CTL ( 7*32+31) /* "" MSR IA32_FEAT_CTL configured */
|
||||
|
@ -312,6 +312,10 @@
|
|||
#define X86_FEATURE_SRSO_ALIAS (11*32+25) /* "" AMD BTB untrain RETs through aliasing */
|
||||
#define X86_FEATURE_IBPB_ON_VMEXIT (11*32+26) /* "" Issue an IBPB only on VMEXIT */
|
||||
#define X86_FEATURE_APIC_MSRS_FENCE (11*32+27) /* "" IA32_TSC_DEADLINE and X2APIC MSRs need fencing */
|
||||
#define X86_FEATURE_ZEN2 (11*32+28) /* "" CPU based on Zen2 microarchitecture */
|
||||
#define X86_FEATURE_ZEN3 (11*32+29) /* "" CPU based on Zen3 microarchitecture */
|
||||
#define X86_FEATURE_ZEN4 (11*32+30) /* "" CPU based on Zen4 microarchitecture */
|
||||
#define X86_FEATURE_ZEN1 (11*32+31) /* "" CPU based on Zen1 microarchitecture */
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
|
||||
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
|
||||
|
@ -452,6 +456,18 @@
|
|||
#define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */
|
||||
#define X86_FEATURE_SRSO_NO (20*32+29) /* "" CPU is not affected by SRSO */
|
||||
|
||||
/*
|
||||
* Extended auxiliary flags: Linux defined - for features scattered in various
|
||||
* CPUID levels like 0x80000022, etc and Linux defined features.
|
||||
*
|
||||
* Reuse free bits when adding new feature flags!
|
||||
*/
|
||||
#define X86_FEATURE_AMD_LBR_PMC_FREEZE (21*32+ 0) /* AMD LBR and PMC Freeze */
|
||||
#define X86_FEATURE_CLEAR_BHB_LOOP (21*32+ 1) /* "" Clear branch history at syscall entry using SW loop */
|
||||
#define X86_FEATURE_BHI_CTRL (21*32+ 2) /* "" BHI_DIS_S HW control available */
|
||||
#define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* "" BHI_DIS_S HW control enabled */
|
||||
#define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* "" Clear branch history at vmexit using SW loop */
|
||||
|
||||
/*
|
||||
* BUG word(s)
|
||||
*/
|
||||
|
@ -499,4 +515,5 @@
|
|||
#define X86_BUG_SRSO X86_BUG(1*32 + 0) /* AMD SRSO bug */
|
||||
#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */
|
||||
#define X86_BUG_RFDS X86_BUG(1*32 + 2) /* CPU is vulnerable to Register File Data Sampling */
|
||||
#define X86_BUG_BHI X86_BUG(1*32 + 3) /* CPU is affected by Branch History Injection */
|
||||
#endif /* _ASM_X86_CPUFEATURES_H */
|
||||
|
|
|
@ -143,6 +143,7 @@
|
|||
#define DISABLED_MASK18 (DISABLE_IBT)
|
||||
#define DISABLED_MASK19 0
|
||||
#define DISABLED_MASK20 0
|
||||
#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21)
|
||||
#define DISABLED_MASK21 0
|
||||
#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22)
|
||||
|
||||
#endif /* _ASM_X86_DISABLED_FEATURES_H */
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
#ifndef _ASM_X86_INIT_H
|
||||
#define _ASM_X86_INIT_H
|
||||
|
||||
#define __head __section(".head.text")
|
||||
|
||||
struct x86_mapping_info {
|
||||
void *(*alloc_pgt_page)(void *); /* allocate buf for page table */
|
||||
void *context; /* context for alloc_pgt_page */
|
||||
|
|
|
@ -46,8 +46,8 @@ void __init sme_unmap_bootdata(char *real_mode_data);
|
|||
void __init sme_early_init(void);
|
||||
void __init sev_setup_arch(void);
|
||||
|
||||
void __init sme_encrypt_kernel(struct boot_params *bp);
|
||||
void __init sme_enable(struct boot_params *bp);
|
||||
void sme_encrypt_kernel(struct boot_params *bp);
|
||||
void sme_enable(struct boot_params *bp);
|
||||
|
||||
int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size);
|
||||
int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
|
||||
|
@ -81,8 +81,8 @@ static inline void __init sme_unmap_bootdata(char *real_mode_data) { }
|
|||
static inline void __init sme_early_init(void) { }
|
||||
static inline void __init sev_setup_arch(void) { }
|
||||
|
||||
static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
|
||||
static inline void __init sme_enable(struct boot_params *bp) { }
|
||||
static inline void sme_encrypt_kernel(struct boot_params *bp) { }
|
||||
static inline void sme_enable(struct boot_params *bp) { }
|
||||
|
||||
static inline void sev_es_init_vc_handling(void) { }
|
||||
|
||||
|
|
|
@ -50,10 +50,13 @@
|
|||
#define SPEC_CTRL_SSBD BIT(SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
|
||||
#define SPEC_CTRL_RRSBA_DIS_S_SHIFT 6 /* Disable RRSBA behavior */
|
||||
#define SPEC_CTRL_RRSBA_DIS_S BIT(SPEC_CTRL_RRSBA_DIS_S_SHIFT)
|
||||
#define SPEC_CTRL_BHI_DIS_S_SHIFT 10 /* Disable Branch History Injection behavior */
|
||||
#define SPEC_CTRL_BHI_DIS_S BIT(SPEC_CTRL_BHI_DIS_S_SHIFT)
|
||||
|
||||
/* A mask for bits which the kernel toggles when controlling mitigations */
|
||||
#define SPEC_CTRL_MITIGATIONS_MASK (SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD \
|
||||
| SPEC_CTRL_RRSBA_DIS_S)
|
||||
| SPEC_CTRL_RRSBA_DIS_S \
|
||||
| SPEC_CTRL_BHI_DIS_S)
|
||||
|
||||
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
|
||||
#define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */
|
||||
|
@ -152,6 +155,10 @@
|
|||
* are restricted to targets in
|
||||
* kernel.
|
||||
*/
|
||||
#define ARCH_CAP_BHI_NO BIT(20) /*
|
||||
* CPU is not affected by Branch
|
||||
* History Injection.
|
||||
*/
|
||||
#define ARCH_CAP_PBRSB_NO BIT(24) /*
|
||||
* Not susceptible to Post-Barrier
|
||||
* Return Stack Buffer Predictions.
|
||||
|
|
|
@ -271,11 +271,20 @@
|
|||
.Lskip_rsb_\@:
|
||||
.endm
|
||||
|
||||
/*
|
||||
* The CALL to srso_alias_untrain_ret() must be patched in directly at
|
||||
* the spot where untraining must be done, ie., srso_alias_untrain_ret()
|
||||
* must be the target of a CALL instruction instead of indirectly
|
||||
* jumping to a wrapper which then calls it. Therefore, this macro is
|
||||
* called outside of __UNTRAIN_RET below, for the time being, before the
|
||||
* kernel can support nested alternatives with arbitrary nesting.
|
||||
*/
|
||||
.macro CALL_UNTRAIN_RET
|
||||
#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)
|
||||
#define CALL_UNTRAIN_RET "call entry_untrain_ret"
|
||||
#else
|
||||
#define CALL_UNTRAIN_RET ""
|
||||
ALTERNATIVE_2 "", "call entry_untrain_ret", X86_FEATURE_UNRET, \
|
||||
"call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the
|
||||
|
@ -288,38 +297,24 @@
|
|||
* As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
|
||||
* where we have a stack but before any RET instruction.
|
||||
*/
|
||||
.macro UNTRAIN_RET
|
||||
#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
|
||||
defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO)
|
||||
.macro __UNTRAIN_RET ibpb_feature, call_depth_insns
|
||||
#if defined(CONFIG_RETHUNK) || defined(CONFIG_CPU_IBPB_ENTRY)
|
||||
VALIDATE_UNRET_END
|
||||
ALTERNATIVE_3 "", \
|
||||
CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
|
||||
"call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \
|
||||
__stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH
|
||||
CALL_UNTRAIN_RET
|
||||
ALTERNATIVE_2 "", \
|
||||
"call entry_ibpb", \ibpb_feature, \
|
||||
__stringify(\call_depth_insns), X86_FEATURE_CALL_DEPTH
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro UNTRAIN_RET_VM
|
||||
#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
|
||||
defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO)
|
||||
VALIDATE_UNRET_END
|
||||
ALTERNATIVE_3 "", \
|
||||
CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
|
||||
"call entry_ibpb", X86_FEATURE_IBPB_ON_VMEXIT, \
|
||||
__stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH
|
||||
#endif
|
||||
.endm
|
||||
#define UNTRAIN_RET \
|
||||
__UNTRAIN_RET X86_FEATURE_ENTRY_IBPB, __stringify(RESET_CALL_DEPTH)
|
||||
|
||||
.macro UNTRAIN_RET_FROM_CALL
|
||||
#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
|
||||
defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO)
|
||||
VALIDATE_UNRET_END
|
||||
ALTERNATIVE_3 "", \
|
||||
CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
|
||||
"call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \
|
||||
__stringify(RESET_CALL_DEPTH_FROM_CALL), X86_FEATURE_CALL_DEPTH
|
||||
#endif
|
||||
.endm
|
||||
#define UNTRAIN_RET_VM \
|
||||
__UNTRAIN_RET X86_FEATURE_IBPB_ON_VMEXIT, __stringify(RESET_CALL_DEPTH)
|
||||
|
||||
#define UNTRAIN_RET_FROM_CALL \
|
||||
__UNTRAIN_RET X86_FEATURE_ENTRY_IBPB, __stringify(RESET_CALL_DEPTH_FROM_CALL)
|
||||
|
||||
|
||||
.macro CALL_DEPTH_ACCOUNT
|
||||
|
@ -340,6 +335,19 @@
|
|||
ALTERNATIVE "", __stringify(verw _ASM_RIP(mds_verw_sel)), X86_FEATURE_CLEAR_CPU_BUF
|
||||
.endm
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
.macro CLEAR_BRANCH_HISTORY
|
||||
ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP
|
||||
.endm
|
||||
|
||||
.macro CLEAR_BRANCH_HISTORY_VMEXIT
|
||||
ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT
|
||||
.endm
|
||||
#else
|
||||
#define CLEAR_BRANCH_HISTORY
|
||||
#define CLEAR_BRANCH_HISTORY_VMEXIT
|
||||
#endif
|
||||
|
||||
#else /* __ASSEMBLY__ */
|
||||
|
||||
#define ANNOTATE_RETPOLINE_SAFE \
|
||||
|
@ -359,6 +367,22 @@ extern void __x86_return_thunk(void);
|
|||
static inline void __x86_return_thunk(void) {}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CPU_UNRET_ENTRY
|
||||
extern void retbleed_return_thunk(void);
|
||||
#else
|
||||
static inline void retbleed_return_thunk(void) {}
|
||||
#endif
|
||||
|
||||
extern void srso_alias_untrain_ret(void);
|
||||
|
||||
#ifdef CONFIG_CPU_SRSO
|
||||
extern void srso_return_thunk(void);
|
||||
extern void srso_alias_return_thunk(void);
|
||||
#else
|
||||
static inline void srso_return_thunk(void) {}
|
||||
static inline void srso_alias_return_thunk(void) {}
|
||||
#endif
|
||||
|
||||
extern void retbleed_return_thunk(void);
|
||||
extern void srso_return_thunk(void);
|
||||
extern void srso_alias_return_thunk(void);
|
||||
|
@ -370,6 +394,10 @@ extern void srso_alias_untrain_ret(void);
|
|||
extern void entry_untrain_ret(void);
|
||||
extern void entry_ibpb(void);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
extern void clear_bhb_loop(void);
|
||||
#endif
|
||||
|
||||
extern void (*x86_return_thunk)(void);
|
||||
|
||||
#ifdef CONFIG_CALL_DEPTH_TRACKING
|
||||
|
|
|
@ -99,6 +99,7 @@
|
|||
#define REQUIRED_MASK18 0
|
||||
#define REQUIRED_MASK19 0
|
||||
#define REQUIRED_MASK20 0
|
||||
#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21)
|
||||
#define REQUIRED_MASK21 0
|
||||
#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22)
|
||||
|
||||
#endif /* _ASM_X86_REQUIRED_FEATURES_H */
|
||||
|
|
|
@ -199,15 +199,15 @@ static inline int pvalidate(unsigned long vaddr, bool rmp_psize, bool validate)
|
|||
struct snp_guest_request_ioctl;
|
||||
|
||||
void setup_ghcb(void);
|
||||
void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
|
||||
unsigned long npages);
|
||||
void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
|
||||
unsigned long npages);
|
||||
void early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
|
||||
unsigned long npages);
|
||||
void early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
|
||||
unsigned long npages);
|
||||
void snp_set_memory_shared(unsigned long vaddr, unsigned long npages);
|
||||
void snp_set_memory_private(unsigned long vaddr, unsigned long npages);
|
||||
void snp_set_wakeup_secondary_cpu(void);
|
||||
bool snp_init(struct boot_params *bp);
|
||||
void __init __noreturn snp_abort(void);
|
||||
void __noreturn snp_abort(void);
|
||||
void snp_dmi_setup(void);
|
||||
int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio);
|
||||
void snp_accept_memory(phys_addr_t start, phys_addr_t end);
|
||||
|
|
|
@ -16,19 +16,17 @@
|
|||
#include <asm/thread_info.h> /* for TS_COMPAT */
|
||||
#include <asm/unistd.h>
|
||||
|
||||
/* This is used purely for kernel/trace/trace_syscalls.c */
|
||||
typedef long (*sys_call_ptr_t)(const struct pt_regs *);
|
||||
extern const sys_call_ptr_t sys_call_table[];
|
||||
|
||||
#if defined(CONFIG_X86_32)
|
||||
#define ia32_sys_call_table sys_call_table
|
||||
#else
|
||||
/*
|
||||
* These may not exist, but still put the prototypes in so we
|
||||
* can use IS_ENABLED().
|
||||
*/
|
||||
extern const sys_call_ptr_t ia32_sys_call_table[];
|
||||
extern const sys_call_ptr_t x32_sys_call_table[];
|
||||
#endif
|
||||
extern long ia32_sys_call(const struct pt_regs *, unsigned int nr);
|
||||
extern long x32_sys_call(const struct pt_regs *, unsigned int nr);
|
||||
extern long x64_sys_call(const struct pt_regs *, unsigned int nr);
|
||||
|
||||
/*
|
||||
* Only the low 32 bits of orig_ax are meaningful, so we return int.
|
||||
|
@ -127,6 +125,7 @@ static inline int syscall_get_arch(struct task_struct *task)
|
|||
}
|
||||
|
||||
void do_syscall_64(struct pt_regs *regs, int nr);
|
||||
void do_int80_emulation(struct pt_regs *regs);
|
||||
|
||||
#endif /* CONFIG_X86_32 */
|
||||
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
#define XLF_EFI_KEXEC (1<<4)
|
||||
#define XLF_5LEVEL (1<<5)
|
||||
#define XLF_5LEVEL_ENABLED (1<<6)
|
||||
#define XLF_MEM_ENCRYPTION (1<<7)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
|
|
@ -66,20 +66,6 @@ static const int amd_erratum_400[] =
|
|||
static const int amd_erratum_383[] =
|
||||
AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
|
||||
|
||||
/* #1054: Instructions Retired Performance Counter May Be Inaccurate */
|
||||
static const int amd_erratum_1054[] =
|
||||
AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
|
||||
|
||||
static const int amd_zenbleed[] =
|
||||
AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf),
|
||||
AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf),
|
||||
AMD_MODEL_RANGE(0x17, 0x90, 0x0, 0x91, 0xf),
|
||||
AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf));
|
||||
|
||||
static const int amd_div0[] =
|
||||
AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x00, 0x0, 0x2f, 0xf),
|
||||
AMD_MODEL_RANGE(0x17, 0x50, 0x0, 0x5f, 0xf));
|
||||
|
||||
static const int amd_erratum_1485[] =
|
||||
AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x19, 0x10, 0x0, 0x1f, 0xf),
|
||||
AMD_MODEL_RANGE(0x19, 0x60, 0x0, 0xaf, 0xf));
|
||||
|
@ -620,6 +606,49 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
|
|||
}
|
||||
|
||||
resctrl_cpu_detect(c);
|
||||
|
||||
/* Figure out Zen generations: */
|
||||
switch (c->x86) {
|
||||
case 0x17: {
|
||||
switch (c->x86_model) {
|
||||
case 0x00 ... 0x2f:
|
||||
case 0x50 ... 0x5f:
|
||||
setup_force_cpu_cap(X86_FEATURE_ZEN1);
|
||||
break;
|
||||
case 0x30 ... 0x4f:
|
||||
case 0x60 ... 0x7f:
|
||||
case 0x90 ... 0x91:
|
||||
case 0xa0 ... 0xaf:
|
||||
setup_force_cpu_cap(X86_FEATURE_ZEN2);
|
||||
break;
|
||||
default:
|
||||
goto warn;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 0x19: {
|
||||
switch (c->x86_model) {
|
||||
case 0x00 ... 0x0f:
|
||||
case 0x20 ... 0x5f:
|
||||
setup_force_cpu_cap(X86_FEATURE_ZEN3);
|
||||
break;
|
||||
case 0x10 ... 0x1f:
|
||||
case 0x60 ... 0xaf:
|
||||
setup_force_cpu_cap(X86_FEATURE_ZEN4);
|
||||
break;
|
||||
default:
|
||||
goto warn;
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
warn:
|
||||
WARN_ONCE(1, "Family 0x%x, model: 0x%x??\n", c->x86, c->x86_model);
|
||||
}
|
||||
|
||||
static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
|
||||
|
@ -945,6 +974,19 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
|
|||
clear_rdrand_cpuid_bit(c);
|
||||
}
|
||||
|
||||
static void fix_erratum_1386(struct cpuinfo_x86 *c)
|
||||
{
|
||||
/*
|
||||
* Work around Erratum 1386. The XSAVES instruction malfunctions in
|
||||
* certain circumstances on Zen1/2 uarch, and not all parts have had
|
||||
* updated microcode at the time of writing (March 2023).
|
||||
*
|
||||
* Affected parts all have no supervisor XSAVE states, meaning that
|
||||
* the XSAVEC instruction (which works fine) is equivalent.
|
||||
*/
|
||||
clear_cpu_cap(c, X86_FEATURE_XSAVES);
|
||||
}
|
||||
|
||||
void init_spectral_chicken(struct cpuinfo_x86 *c)
|
||||
{
|
||||
#ifdef CONFIG_CPU_UNRET_ENTRY
|
||||
|
@ -965,24 +1007,19 @@ void init_spectral_chicken(struct cpuinfo_x86 *c)
|
|||
}
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
* Work around Erratum 1386. The XSAVES instruction malfunctions in
|
||||
* certain circumstances on Zen1/2 uarch, and not all parts have had
|
||||
* updated microcode at the time of writing (March 2023).
|
||||
*
|
||||
* Affected parts all have no supervisor XSAVE states, meaning that
|
||||
* the XSAVEC instruction (which works fine) is equivalent.
|
||||
*/
|
||||
clear_cpu_cap(c, X86_FEATURE_XSAVES);
|
||||
}
|
||||
|
||||
static void init_amd_zn(struct cpuinfo_x86 *c)
|
||||
{
|
||||
set_cpu_cap(c, X86_FEATURE_ZEN);
|
||||
|
||||
setup_force_cpu_cap(X86_FEATURE_ZEN);
|
||||
#ifdef CONFIG_NUMA
|
||||
node_reclaim_distance = 32;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void init_amd_zen1(struct cpuinfo_x86 *c)
|
||||
{
|
||||
fix_erratum_1386(c);
|
||||
|
||||
/* Fix up CPUID bits, but only if not virtualised. */
|
||||
if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
|
||||
|
@ -999,6 +1036,9 @@ static void init_amd_zn(struct cpuinfo_x86 *c)
|
|||
if (c->x86 == 0x19 && !cpu_has(c, X86_FEATURE_BTC_NO))
|
||||
set_cpu_cap(c, X86_FEATURE_BTC_NO);
|
||||
}
|
||||
|
||||
pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
|
||||
setup_force_cpu_bug(X86_BUG_DIV0);
|
||||
}
|
||||
|
||||
static bool cpu_has_zenbleed_microcode(void)
|
||||
|
@ -1023,11 +1063,8 @@ static bool cpu_has_zenbleed_microcode(void)
|
|||
return true;
|
||||
}
|
||||
|
||||
static void zenbleed_check(struct cpuinfo_x86 *c)
|
||||
static void zen2_zenbleed_check(struct cpuinfo_x86 *c)
|
||||
{
|
||||
if (!cpu_has_amd_erratum(c, amd_zenbleed))
|
||||
return;
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_HYPERVISOR))
|
||||
return;
|
||||
|
||||
|
@ -1042,6 +1079,20 @@ static void zenbleed_check(struct cpuinfo_x86 *c)
|
|||
}
|
||||
}
|
||||
|
||||
static void init_amd_zen2(struct cpuinfo_x86 *c)
|
||||
{
|
||||
fix_erratum_1386(c);
|
||||
zen2_zenbleed_check(c);
|
||||
}
|
||||
|
||||
static void init_amd_zen3(struct cpuinfo_x86 *c)
|
||||
{
|
||||
}
|
||||
|
||||
static void init_amd_zen4(struct cpuinfo_x86 *c)
|
||||
{
|
||||
}
|
||||
|
||||
static void init_amd(struct cpuinfo_x86 *c)
|
||||
{
|
||||
early_init_amd(c);
|
||||
|
@ -1080,6 +1131,15 @@ static void init_amd(struct cpuinfo_x86 *c)
|
|||
case 0x19: init_amd_zn(c); break;
|
||||
}
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_ZEN1))
|
||||
init_amd_zen1(c);
|
||||
else if (boot_cpu_has(X86_FEATURE_ZEN2))
|
||||
init_amd_zen2(c);
|
||||
else if (boot_cpu_has(X86_FEATURE_ZEN3))
|
||||
init_amd_zen3(c);
|
||||
else if (boot_cpu_has(X86_FEATURE_ZEN4))
|
||||
init_amd_zen4(c);
|
||||
|
||||
/*
|
||||
* Enable workaround for FXSAVE leak on CPUs
|
||||
* without a XSaveErPtr feature
|
||||
|
@ -1131,7 +1191,7 @@ static void init_amd(struct cpuinfo_x86 *c)
|
|||
* Counter May Be Inaccurate".
|
||||
*/
|
||||
if (cpu_has(c, X86_FEATURE_IRPERF) &&
|
||||
!cpu_has_amd_erratum(c, amd_erratum_1054))
|
||||
(boot_cpu_has(X86_FEATURE_ZEN1) && c->x86_model > 0x2f))
|
||||
msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
|
||||
|
||||
check_null_seg_clears_base(c);
|
||||
|
@ -1147,13 +1207,6 @@ static void init_amd(struct cpuinfo_x86 *c)
|
|||
cpu_has(c, X86_FEATURE_AUTOIBRS))
|
||||
WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS));
|
||||
|
||||
zenbleed_check(c);
|
||||
|
||||
if (cpu_has_amd_erratum(c, amd_div0)) {
|
||||
pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
|
||||
setup_force_cpu_bug(X86_BUG_DIV0);
|
||||
}
|
||||
|
||||
if (!cpu_has(c, X86_FEATURE_HYPERVISOR) &&
|
||||
cpu_has_amd_erratum(c, amd_erratum_1485))
|
||||
msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT);
|
||||
|
@ -1313,7 +1366,7 @@ static void zenbleed_check_cpu(void *unused)
|
|||
{
|
||||
struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
|
||||
|
||||
zenbleed_check(c);
|
||||
zen2_zenbleed_check(c);
|
||||
}
|
||||
|
||||
void amd_check_microcode(void)
|
||||
|
|
|
@ -63,7 +63,7 @@ EXPORT_SYMBOL_GPL(x86_pred_cmd);
|
|||
|
||||
static DEFINE_MUTEX(spec_ctrl_mutex);
|
||||
|
||||
void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk;
|
||||
void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
|
||||
|
||||
/* Update SPEC_CTRL MSR and its cached copy unconditionally */
|
||||
static void update_spec_ctrl(u64 val)
|
||||
|
@ -1108,8 +1108,7 @@ do_cmd_auto:
|
|||
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
|
||||
setup_force_cpu_cap(X86_FEATURE_UNRET);
|
||||
|
||||
if (IS_ENABLED(CONFIG_RETHUNK))
|
||||
x86_return_thunk = retbleed_return_thunk;
|
||||
x86_return_thunk = retbleed_return_thunk;
|
||||
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
|
||||
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
|
||||
|
@ -1607,6 +1606,79 @@ static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_
|
|||
dump_stack();
|
||||
}
|
||||
|
||||
/*
|
||||
* Set BHI_DIS_S to prevent indirect branches in kernel to be influenced by
|
||||
* branch history in userspace. Not needed if BHI_NO is set.
|
||||
*/
|
||||
static bool __init spec_ctrl_bhi_dis(void)
|
||||
{
|
||||
if (!boot_cpu_has(X86_FEATURE_BHI_CTRL))
|
||||
return false;
|
||||
|
||||
x86_spec_ctrl_base |= SPEC_CTRL_BHI_DIS_S;
|
||||
update_spec_ctrl(x86_spec_ctrl_base);
|
||||
setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_HW);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
enum bhi_mitigations {
|
||||
BHI_MITIGATION_OFF,
|
||||
BHI_MITIGATION_ON,
|
||||
BHI_MITIGATION_AUTO,
|
||||
};
|
||||
|
||||
static enum bhi_mitigations bhi_mitigation __ro_after_init =
|
||||
IS_ENABLED(CONFIG_SPECTRE_BHI_ON) ? BHI_MITIGATION_ON :
|
||||
IS_ENABLED(CONFIG_SPECTRE_BHI_OFF) ? BHI_MITIGATION_OFF :
|
||||
BHI_MITIGATION_AUTO;
|
||||
|
||||
static int __init spectre_bhi_parse_cmdline(char *str)
|
||||
{
|
||||
if (!str)
|
||||
return -EINVAL;
|
||||
|
||||
if (!strcmp(str, "off"))
|
||||
bhi_mitigation = BHI_MITIGATION_OFF;
|
||||
else if (!strcmp(str, "on"))
|
||||
bhi_mitigation = BHI_MITIGATION_ON;
|
||||
else if (!strcmp(str, "auto"))
|
||||
bhi_mitigation = BHI_MITIGATION_AUTO;
|
||||
else
|
||||
pr_err("Ignoring unknown spectre_bhi option (%s)", str);
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("spectre_bhi", spectre_bhi_parse_cmdline);
|
||||
|
||||
static void __init bhi_select_mitigation(void)
|
||||
{
|
||||
if (bhi_mitigation == BHI_MITIGATION_OFF)
|
||||
return;
|
||||
|
||||
/* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */
|
||||
if (cpu_feature_enabled(X86_FEATURE_RETPOLINE) &&
|
||||
!(x86_read_arch_cap_msr() & ARCH_CAP_RRSBA))
|
||||
return;
|
||||
|
||||
if (spec_ctrl_bhi_dis())
|
||||
return;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_X86_64))
|
||||
return;
|
||||
|
||||
/* Mitigate KVM by default */
|
||||
setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT);
|
||||
pr_info("Spectre BHI mitigation: SW BHB clearing on vm exit\n");
|
||||
|
||||
if (bhi_mitigation == BHI_MITIGATION_AUTO)
|
||||
return;
|
||||
|
||||
/* Mitigate syscalls when the mitigation is forced =on */
|
||||
setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP);
|
||||
pr_info("Spectre BHI mitigation: SW BHB clearing on syscall\n");
|
||||
}
|
||||
|
||||
static void __init spectre_v2_select_mitigation(void)
|
||||
{
|
||||
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
|
||||
|
@ -1718,6 +1790,9 @@ static void __init spectre_v2_select_mitigation(void)
|
|||
mode == SPECTRE_V2_RETPOLINE)
|
||||
spec_ctrl_disable_kernel_rrsba();
|
||||
|
||||
if (boot_cpu_has(X86_BUG_BHI))
|
||||
bhi_select_mitigation();
|
||||
|
||||
spectre_v2_enabled = mode;
|
||||
pr_info("%s\n", spectre_v2_strings[mode]);
|
||||
|
||||
|
@ -2695,15 +2770,15 @@ static char *stibp_state(void)
|
|||
|
||||
switch (spectre_v2_user_stibp) {
|
||||
case SPECTRE_V2_USER_NONE:
|
||||
return ", STIBP: disabled";
|
||||
return "; STIBP: disabled";
|
||||
case SPECTRE_V2_USER_STRICT:
|
||||
return ", STIBP: forced";
|
||||
return "; STIBP: forced";
|
||||
case SPECTRE_V2_USER_STRICT_PREFERRED:
|
||||
return ", STIBP: always-on";
|
||||
return "; STIBP: always-on";
|
||||
case SPECTRE_V2_USER_PRCTL:
|
||||
case SPECTRE_V2_USER_SECCOMP:
|
||||
if (static_key_enabled(&switch_to_cond_stibp))
|
||||
return ", STIBP: conditional";
|
||||
return "; STIBP: conditional";
|
||||
}
|
||||
return "";
|
||||
}
|
||||
|
@ -2712,10 +2787,10 @@ static char *ibpb_state(void)
|
|||
{
|
||||
if (boot_cpu_has(X86_FEATURE_IBPB)) {
|
||||
if (static_key_enabled(&switch_mm_always_ibpb))
|
||||
return ", IBPB: always-on";
|
||||
return "; IBPB: always-on";
|
||||
if (static_key_enabled(&switch_mm_cond_ibpb))
|
||||
return ", IBPB: conditional";
|
||||
return ", IBPB: disabled";
|
||||
return "; IBPB: conditional";
|
||||
return "; IBPB: disabled";
|
||||
}
|
||||
return "";
|
||||
}
|
||||
|
@ -2725,14 +2800,31 @@ static char *pbrsb_eibrs_state(void)
|
|||
if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
|
||||
if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) ||
|
||||
boot_cpu_has(X86_FEATURE_RSB_VMEXIT))
|
||||
return ", PBRSB-eIBRS: SW sequence";
|
||||
return "; PBRSB-eIBRS: SW sequence";
|
||||
else
|
||||
return ", PBRSB-eIBRS: Vulnerable";
|
||||
return "; PBRSB-eIBRS: Vulnerable";
|
||||
} else {
|
||||
return ", PBRSB-eIBRS: Not affected";
|
||||
return "; PBRSB-eIBRS: Not affected";
|
||||
}
|
||||
}
|
||||
|
||||
static const char * const spectre_bhi_state(void)
|
||||
{
|
||||
if (!boot_cpu_has_bug(X86_BUG_BHI))
|
||||
return "; BHI: Not affected";
|
||||
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW))
|
||||
return "; BHI: BHI_DIS_S";
|
||||
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
|
||||
return "; BHI: SW loop, KVM: SW loop";
|
||||
else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
|
||||
!(x86_read_arch_cap_msr() & ARCH_CAP_RRSBA))
|
||||
return "; BHI: Retpoline";
|
||||
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
|
||||
return "; BHI: Syscall hardening, KVM: SW loop";
|
||||
|
||||
return "; BHI: Vulnerable (Syscall hardening enabled)";
|
||||
}
|
||||
|
||||
static ssize_t spectre_v2_show_state(char *buf)
|
||||
{
|
||||
if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
|
||||
|
@ -2745,13 +2837,15 @@ static ssize_t spectre_v2_show_state(char *buf)
|
|||
spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
|
||||
return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
|
||||
|
||||
return sysfs_emit(buf, "%s%s%s%s%s%s%s\n",
|
||||
return sysfs_emit(buf, "%s%s%s%s%s%s%s%s\n",
|
||||
spectre_v2_strings[spectre_v2_enabled],
|
||||
ibpb_state(),
|
||||
boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
|
||||
boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? "; IBRS_FW" : "",
|
||||
stibp_state(),
|
||||
boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
|
||||
boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? "; RSB filling" : "",
|
||||
pbrsb_eibrs_state(),
|
||||
spectre_bhi_state(),
|
||||
/* this should always be at the end */
|
||||
spectre_v2_module_string());
|
||||
}
|
||||
|
||||
|
|
|
@ -1165,6 +1165,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
|
|||
#define NO_SPECTRE_V2 BIT(8)
|
||||
#define NO_MMIO BIT(9)
|
||||
#define NO_EIBRS_PBRSB BIT(10)
|
||||
#define NO_BHI BIT(11)
|
||||
|
||||
#define VULNWL(vendor, family, model, whitelist) \
|
||||
X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist)
|
||||
|
@ -1227,18 +1228,18 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
|
|||
VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
|
||||
|
||||
/* AMD Family 0xf - 0x12 */
|
||||
VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||
VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||
VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||
VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||
VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
|
||||
VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
|
||||
VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
|
||||
VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
|
||||
|
||||
/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
|
||||
VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
|
||||
VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
|
||||
VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB | NO_BHI),
|
||||
VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB | NO_BHI),
|
||||
|
||||
/* Zhaoxin Family 7 */
|
||||
VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
|
||||
VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
|
||||
VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO | NO_BHI),
|
||||
VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO | NO_BHI),
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -1475,6 +1476,13 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
|||
if (vulnerable_to_rfds(ia32_cap))
|
||||
setup_force_cpu_bug(X86_BUG_RFDS);
|
||||
|
||||
/* When virtualized, eIBRS could be hidden, assume vulnerable */
|
||||
if (!(ia32_cap & ARCH_CAP_BHI_NO) &&
|
||||
!cpu_matches(cpu_vuln_whitelist, NO_BHI) &&
|
||||
(boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) ||
|
||||
boot_cpu_has(X86_FEATURE_HYPERVISOR)))
|
||||
setup_force_cpu_bug(X86_BUG_BHI);
|
||||
|
||||
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
|
||||
return;
|
||||
|
||||
|
|
|
@ -2468,12 +2468,14 @@ static ssize_t set_bank(struct device *s, struct device_attribute *attr,
|
|||
return -EINVAL;
|
||||
|
||||
b = &per_cpu(mce_banks_array, s->id)[bank];
|
||||
|
||||
if (!b->init)
|
||||
return -ENODEV;
|
||||
|
||||
b->ctl = new;
|
||||
|
||||
mutex_lock(&mce_sysfs_mutex);
|
||||
mce_restart();
|
||||
mutex_unlock(&mce_sysfs_mutex);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ static const struct cpuid_bit cpuid_bits[] = {
|
|||
{ X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
|
||||
{ X86_FEATURE_INTEL_PPIN, CPUID_EBX, 0, 0x00000007, 1 },
|
||||
{ X86_FEATURE_RRSBA_CTRL, CPUID_EDX, 2, 0x00000007, 2 },
|
||||
{ X86_FEATURE_BHI_CTRL, CPUID_EDX, 4, 0x00000007, 2 },
|
||||
{ X86_FEATURE_CQM_LLC, CPUID_EDX, 1, 0x0000000f, 0 },
|
||||
{ X86_FEATURE_CQM_OCCUP_LLC, CPUID_EDX, 0, 0x0000000f, 1 },
|
||||
{ X86_FEATURE_CQM_MBM_TOTAL, CPUID_EDX, 1, 0x0000000f, 1 },
|
||||
|
@ -49,6 +50,7 @@ static const struct cpuid_bit cpuid_bits[] = {
|
|||
{ X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 },
|
||||
{ X86_FEATURE_PERFMON_V2, CPUID_EAX, 0, 0x80000022, 0 },
|
||||
{ X86_FEATURE_AMD_LBR_V2, CPUID_EAX, 1, 0x80000022, 0 },
|
||||
{ X86_FEATURE_AMD_LBR_PMC_FREEZE, CPUID_EAX, 2, 0x80000022, 0 },
|
||||
{ 0, 0, 0, 0, 0 }
|
||||
};
|
||||
|
||||
|
|
|
@ -41,6 +41,7 @@
|
|||
#include <asm/trapnr.h>
|
||||
#include <asm/sev.h>
|
||||
#include <asm/tdx.h>
|
||||
#include <asm/init.h>
|
||||
|
||||
/*
|
||||
* Manage page tables very early on.
|
||||
|
@ -84,8 +85,6 @@ static struct desc_ptr startup_gdt_descr = {
|
|||
.address = 0,
|
||||
};
|
||||
|
||||
#define __head __section(".head.text")
|
||||
|
||||
static void __head *fixup_pointer(void *ptr, unsigned long physaddr)
|
||||
{
|
||||
return ptr - (void *)_text + (void *)physaddr;
|
||||
|
|
|
@ -196,12 +196,12 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
|
|||
if (!smp_check_mpc(mpc, oem, str))
|
||||
return 0;
|
||||
|
||||
if (early) {
|
||||
/* Initialize the lapic mapping */
|
||||
if (!acpi_lapic)
|
||||
register_lapic_address(mpc->lapic);
|
||||
/* Initialize the lapic mapping */
|
||||
if (!acpi_lapic)
|
||||
register_lapic_address(mpc->lapic);
|
||||
|
||||
if (early)
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Now process the configuration blocks. */
|
||||
while (count < mpc->length) {
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include <asm/bios_ebda.h>
|
||||
#include <asm/bugs.h>
|
||||
#include <asm/cacheinfo.h>
|
||||
#include <asm/coco.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/efi.h>
|
||||
#include <asm/gart.h>
|
||||
|
@ -1120,6 +1121,7 @@ void __init setup_arch(char **cmdline_p)
|
|||
* memory size.
|
||||
*/
|
||||
sev_setup_arch();
|
||||
cc_random_init();
|
||||
|
||||
efi_fake_memmap();
|
||||
efi_find_mirror();
|
||||
|
|
|
@ -89,7 +89,8 @@ static bool __init sev_es_check_cpu_features(void)
|
|||
return true;
|
||||
}
|
||||
|
||||
static void __noreturn sev_es_terminate(unsigned int set, unsigned int reason)
|
||||
static void __head __noreturn
|
||||
sev_es_terminate(unsigned int set, unsigned int reason)
|
||||
{
|
||||
u64 val = GHCB_MSR_TERM_REQ;
|
||||
|
||||
|
@ -326,13 +327,7 @@ static int sev_cpuid_hv(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid
|
|||
*/
|
||||
static const struct snp_cpuid_table *snp_cpuid_get_table(void)
|
||||
{
|
||||
void *ptr;
|
||||
|
||||
asm ("lea cpuid_table_copy(%%rip), %0"
|
||||
: "=r" (ptr)
|
||||
: "p" (&cpuid_table_copy));
|
||||
|
||||
return ptr;
|
||||
return &RIP_REL_REF(cpuid_table_copy);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -391,7 +386,7 @@ static u32 snp_cpuid_calc_xsave_size(u64 xfeatures_en, bool compacted)
|
|||
return xsave_size;
|
||||
}
|
||||
|
||||
static bool
|
||||
static bool __head
|
||||
snp_cpuid_get_validated_func(struct cpuid_leaf *leaf)
|
||||
{
|
||||
const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
|
||||
|
@ -528,7 +523,8 @@ static int snp_cpuid_postprocess(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
|
|||
* Returns -EOPNOTSUPP if feature not enabled. Any other non-zero return value
|
||||
* should be treated as fatal by caller.
|
||||
*/
|
||||
static int snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
|
||||
static int __head
|
||||
snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
|
||||
{
|
||||
const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
|
||||
|
||||
|
@ -570,7 +566,7 @@ static int snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_le
|
|||
* page yet, so it only supports the MSR based communication with the
|
||||
* hypervisor and only the CPUID exit-code.
|
||||
*/
|
||||
void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
|
||||
void __head do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
|
||||
{
|
||||
unsigned int subfn = lower_bits(regs->cx, 32);
|
||||
unsigned int fn = lower_bits(regs->ax, 32);
|
||||
|
@ -1016,7 +1012,8 @@ struct cc_setup_data {
|
|||
* Search for a Confidential Computing blob passed in as a setup_data entry
|
||||
* via the Linux Boot Protocol.
|
||||
*/
|
||||
static struct cc_blob_sev_info *find_cc_blob_setup_data(struct boot_params *bp)
|
||||
static __head
|
||||
struct cc_blob_sev_info *find_cc_blob_setup_data(struct boot_params *bp)
|
||||
{
|
||||
struct cc_setup_data *sd = NULL;
|
||||
struct setup_data *hdr;
|
||||
|
@ -1043,7 +1040,7 @@ static struct cc_blob_sev_info *find_cc_blob_setup_data(struct boot_params *bp)
|
|||
* mapping needs to be updated in sync with all the changes to virtual memory
|
||||
* layout and related mapping facilities throughout the boot process.
|
||||
*/
|
||||
static void __init setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
|
||||
static void __head setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
|
||||
{
|
||||
const struct snp_cpuid_table *cpuid_table_fw, *cpuid_table;
|
||||
int i;
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <linux/dmi.h>
|
||||
#include <uapi/linux/sev-guest.h>
|
||||
|
||||
#include <asm/init.h>
|
||||
#include <asm/cpu_entry_area.h>
|
||||
#include <asm/stacktrace.h>
|
||||
#include <asm/sev.h>
|
||||
|
@ -683,8 +684,9 @@ static u64 __init get_jump_table_addr(void)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void early_set_pages_state(unsigned long vaddr, unsigned long paddr,
|
||||
unsigned long npages, enum psc_op op)
|
||||
static void __head
|
||||
early_set_pages_state(unsigned long vaddr, unsigned long paddr,
|
||||
unsigned long npages, enum psc_op op)
|
||||
{
|
||||
unsigned long paddr_end;
|
||||
u64 val;
|
||||
|
@ -740,7 +742,7 @@ e_term:
|
|||
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
|
||||
}
|
||||
|
||||
void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
|
||||
void __head early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
|
||||
unsigned long npages)
|
||||
{
|
||||
/*
|
||||
|
@ -2045,7 +2047,7 @@ fail:
|
|||
*
|
||||
* Scan for the blob in that order.
|
||||
*/
|
||||
static __init struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
|
||||
static __head struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
|
||||
{
|
||||
struct cc_blob_sev_info *cc_info;
|
||||
|
||||
|
@ -2071,7 +2073,7 @@ found_cc_info:
|
|||
return cc_info;
|
||||
}
|
||||
|
||||
bool __init snp_init(struct boot_params *bp)
|
||||
bool __head snp_init(struct boot_params *bp)
|
||||
{
|
||||
struct cc_blob_sev_info *cc_info;
|
||||
|
||||
|
@ -2093,7 +2095,7 @@ bool __init snp_init(struct boot_params *bp)
|
|||
return true;
|
||||
}
|
||||
|
||||
void __init __noreturn snp_abort(void)
|
||||
void __head __noreturn snp_abort(void)
|
||||
{
|
||||
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
|
||||
}
|
||||
|
|
|
@ -139,10 +139,7 @@ SECTIONS
|
|||
STATIC_CALL_TEXT
|
||||
|
||||
ALIGN_ENTRY_TEXT_BEGIN
|
||||
#ifdef CONFIG_CPU_SRSO
|
||||
*(.text..__x86.rethunk_untrain)
|
||||
#endif
|
||||
|
||||
ENTRY_TEXT
|
||||
|
||||
#ifdef CONFIG_CPU_SRSO
|
||||
|
@ -520,12 +517,12 @@ INIT_PER_CPU(irq_stack_backing_store);
|
|||
"fixed_percpu_data is not at start of per-cpu area");
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RETHUNK
|
||||
#ifdef CONFIG_CPU_UNRET_ENTRY
|
||||
. = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned");
|
||||
. = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CPU_SRSO
|
||||
. = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
|
||||
/*
|
||||
* GNU ld cannot do XOR until 2.41.
|
||||
* https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f6f78318fca803c4907fb8d7f6ded8295f1947b1
|
||||
|
|
|
@ -3120,7 +3120,7 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
|
|||
if (pud_none(pud) || !pud_present(pud))
|
||||
goto out;
|
||||
|
||||
if (pud_large(pud)) {
|
||||
if (pud_leaf(pud)) {
|
||||
level = PG_LEVEL_1G;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -52,7 +52,7 @@ enum kvm_only_cpuid_leafs {
|
|||
#define X86_FEATURE_IPRED_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 1)
|
||||
#define KVM_X86_FEATURE_RRSBA_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 2)
|
||||
#define X86_FEATURE_DDPD_U KVM_X86_FEATURE(CPUID_7_2_EDX, 3)
|
||||
#define X86_FEATURE_BHI_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 4)
|
||||
#define KVM_X86_FEATURE_BHI_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 4)
|
||||
#define X86_FEATURE_MCDT_NO KVM_X86_FEATURE(CPUID_7_2_EDX, 5)
|
||||
|
||||
/* CPUID level 0x80000007 (EDX). */
|
||||
|
@ -102,10 +102,12 @@ static const struct cpuid_reg reverse_cpuid[] = {
|
|||
*/
|
||||
static __always_inline void reverse_cpuid_check(unsigned int x86_leaf)
|
||||
{
|
||||
BUILD_BUG_ON(NR_CPUID_WORDS != NCAPINTS);
|
||||
BUILD_BUG_ON(x86_leaf == CPUID_LNX_1);
|
||||
BUILD_BUG_ON(x86_leaf == CPUID_LNX_2);
|
||||
BUILD_BUG_ON(x86_leaf == CPUID_LNX_3);
|
||||
BUILD_BUG_ON(x86_leaf == CPUID_LNX_4);
|
||||
BUILD_BUG_ON(x86_leaf == CPUID_LNX_5);
|
||||
BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid));
|
||||
BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
|
||||
}
|
||||
|
@ -126,6 +128,7 @@ static __always_inline u32 __feature_translate(int x86_feature)
|
|||
KVM_X86_TRANSLATE_FEATURE(CONSTANT_TSC);
|
||||
KVM_X86_TRANSLATE_FEATURE(PERFMON_V2);
|
||||
KVM_X86_TRANSLATE_FEATURE(RRSBA_CTRL);
|
||||
KVM_X86_TRANSLATE_FEATURE(BHI_CTRL);
|
||||
default:
|
||||
return x86_feature;
|
||||
}
|
||||
|
|
|
@ -84,9 +84,10 @@ struct enc_region {
|
|||
};
|
||||
|
||||
/* Called with the sev_bitmap_lock held, or on shutdown */
|
||||
static int sev_flush_asids(int min_asid, int max_asid)
|
||||
static int sev_flush_asids(unsigned int min_asid, unsigned int max_asid)
|
||||
{
|
||||
int ret, asid, error = 0;
|
||||
int ret, error = 0;
|
||||
unsigned int asid;
|
||||
|
||||
/* Check if there are any ASIDs to reclaim before performing a flush */
|
||||
asid = find_next_bit(sev_reclaim_asid_bitmap, nr_asids, min_asid);
|
||||
|
@ -116,7 +117,7 @@ static inline bool is_mirroring_enc_context(struct kvm *kvm)
|
|||
}
|
||||
|
||||
/* Must be called with the sev_bitmap_lock held */
|
||||
static bool __sev_recycle_asids(int min_asid, int max_asid)
|
||||
static bool __sev_recycle_asids(unsigned int min_asid, unsigned int max_asid)
|
||||
{
|
||||
if (sev_flush_asids(min_asid, max_asid))
|
||||
return false;
|
||||
|
@ -143,8 +144,20 @@ static void sev_misc_cg_uncharge(struct kvm_sev_info *sev)
|
|||
|
||||
static int sev_asid_new(struct kvm_sev_info *sev)
|
||||
{
|
||||
int asid, min_asid, max_asid, ret;
|
||||
/*
|
||||
* SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
|
||||
* SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
|
||||
* Note: min ASID can end up larger than the max if basic SEV support is
|
||||
* effectively disabled by disallowing use of ASIDs for SEV guests.
|
||||
*/
|
||||
unsigned int min_asid = sev->es_active ? 1 : min_sev_asid;
|
||||
unsigned int max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
|
||||
unsigned int asid;
|
||||
bool retry = true;
|
||||
int ret;
|
||||
|
||||
if (min_asid > max_asid)
|
||||
return -ENOTTY;
|
||||
|
||||
WARN_ON(sev->misc_cg);
|
||||
sev->misc_cg = get_current_misc_cg();
|
||||
|
@ -157,12 +170,6 @@ static int sev_asid_new(struct kvm_sev_info *sev)
|
|||
|
||||
mutex_lock(&sev_bitmap_lock);
|
||||
|
||||
/*
|
||||
* SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
|
||||
* SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
|
||||
*/
|
||||
min_asid = sev->es_active ? 1 : min_sev_asid;
|
||||
max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
|
||||
again:
|
||||
asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid);
|
||||
if (asid > max_asid) {
|
||||
|
@ -187,7 +194,7 @@ e_uncharge:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int sev_get_asid(struct kvm *kvm)
|
||||
static unsigned int sev_get_asid(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
|
||||
|
||||
|
@ -284,8 +291,8 @@ e_no_asid:
|
|||
|
||||
static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
|
||||
{
|
||||
unsigned int asid = sev_get_asid(kvm);
|
||||
struct sev_data_activate activate;
|
||||
int asid = sev_get_asid(kvm);
|
||||
int ret;
|
||||
|
||||
/* activate ASID on the given handle */
|
||||
|
@ -2234,8 +2241,10 @@ void __init sev_hardware_setup(void)
|
|||
goto out;
|
||||
}
|
||||
|
||||
sev_asid_count = max_sev_asid - min_sev_asid + 1;
|
||||
WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count));
|
||||
if (min_sev_asid <= max_sev_asid) {
|
||||
sev_asid_count = max_sev_asid - min_sev_asid + 1;
|
||||
WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count));
|
||||
}
|
||||
sev_supported = true;
|
||||
|
||||
/* SEV-ES support requested? */
|
||||
|
@ -2266,7 +2275,9 @@ void __init sev_hardware_setup(void)
|
|||
out:
|
||||
if (boot_cpu_has(X86_FEATURE_SEV))
|
||||
pr_info("SEV %s (ASIDs %u - %u)\n",
|
||||
sev_supported ? "enabled" : "disabled",
|
||||
sev_supported ? min_sev_asid <= max_sev_asid ? "enabled" :
|
||||
"unusable" :
|
||||
"disabled",
|
||||
min_sev_asid, max_sev_asid);
|
||||
if (boot_cpu_has(X86_FEATURE_SEV_ES))
|
||||
pr_info("SEV-ES %s (ASIDs %u - %u)\n",
|
||||
|
@ -2314,7 +2325,7 @@ int sev_cpu_init(struct svm_cpu_data *sd)
|
|||
*/
|
||||
static void sev_flush_encrypted_page(struct kvm_vcpu *vcpu, void *va)
|
||||
{
|
||||
int asid = to_kvm_svm(vcpu->kvm)->sev_info.asid;
|
||||
unsigned int asid = sev_get_asid(vcpu->kvm);
|
||||
|
||||
/*
|
||||
* Note! The address must be a kernel address, as regular page walk
|
||||
|
@ -2632,7 +2643,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm)
|
|||
void pre_sev_run(struct vcpu_svm *svm, int cpu)
|
||||
{
|
||||
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
|
||||
int asid = sev_get_asid(svm->vcpu.kvm);
|
||||
unsigned int asid = sev_get_asid(svm->vcpu.kvm);
|
||||
|
||||
/* Assign the asid allocated with this SEV guest */
|
||||
svm->asid = asid;
|
||||
|
|
|
@ -732,13 +732,13 @@ TRACE_EVENT(kvm_nested_intr_vmexit,
|
|||
* Tracepoint for nested #vmexit because of interrupt pending
|
||||
*/
|
||||
TRACE_EVENT(kvm_invlpga,
|
||||
TP_PROTO(__u64 rip, int asid, u64 address),
|
||||
TP_PROTO(__u64 rip, unsigned int asid, u64 address),
|
||||
TP_ARGS(rip, asid, address),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( __u64, rip )
|
||||
__field( int, asid )
|
||||
__field( __u64, address )
|
||||
__field( __u64, rip )
|
||||
__field( unsigned int, asid )
|
||||
__field( __u64, address )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
|
@ -747,7 +747,7 @@ TRACE_EVENT(kvm_invlpga,
|
|||
__entry->address = address;
|
||||
),
|
||||
|
||||
TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx",
|
||||
TP_printk("rip: 0x%016llx asid: %u address: 0x%016llx",
|
||||
__entry->rip, __entry->asid, __entry->address)
|
||||
);
|
||||
|
||||
|
|
|
@ -275,6 +275,8 @@ SYM_INNER_LABEL_ALIGN(vmx_vmexit, SYM_L_GLOBAL)
|
|||
|
||||
call vmx_spec_ctrl_restore_host
|
||||
|
||||
CLEAR_BRANCH_HISTORY_VMEXIT
|
||||
|
||||
/* Put return value in AX */
|
||||
mov %_ASM_BX, %_ASM_AX
|
||||
|
||||
|
|
|
@ -1621,7 +1621,7 @@ static bool kvm_is_immutable_feature_msr(u32 msr)
|
|||
ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \
|
||||
ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \
|
||||
ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO | \
|
||||
ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR)
|
||||
ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR | ARCH_CAP_BHI_NO)
|
||||
|
||||
static u64 kvm_get_arch_capabilities(void)
|
||||
{
|
||||
|
|
|
@ -14,19 +14,6 @@ ifdef CONFIG_KCSAN
|
|||
CFLAGS_REMOVE_delay.o = $(CC_FLAGS_FTRACE)
|
||||
endif
|
||||
|
||||
# Early boot use of cmdline; don't instrument it
|
||||
ifdef CONFIG_AMD_MEM_ENCRYPT
|
||||
KCOV_INSTRUMENT_cmdline.o := n
|
||||
KASAN_SANITIZE_cmdline.o := n
|
||||
KCSAN_SANITIZE_cmdline.o := n
|
||||
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
CFLAGS_REMOVE_cmdline.o = -pg
|
||||
endif
|
||||
|
||||
CFLAGS_cmdline.o := -fno-stack-protector -fno-jump-tables
|
||||
endif
|
||||
|
||||
inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk
|
||||
inat_tables_maps = $(srctree)/arch/x86/lib/x86-opcode-map.txt
|
||||
quiet_cmd_inat_tables = GEN $@
|
||||
|
|
|
@ -126,12 +126,13 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array)
|
|||
#include <asm/GEN-for-each-reg.h>
|
||||
#undef GEN
|
||||
#endif
|
||||
/*
|
||||
* This function name is magical and is used by -mfunction-return=thunk-extern
|
||||
* for the compiler to generate JMPs to it.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_RETHUNK
|
||||
|
||||
.section .text..__x86.return_thunk
|
||||
|
||||
#ifdef CONFIG_CPU_SRSO
|
||||
|
||||
/*
|
||||
* srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at
|
||||
* special addresses:
|
||||
|
@ -147,9 +148,7 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array)
|
|||
*
|
||||
* As a result, srso_alias_safe_ret() becomes a safe return.
|
||||
*/
|
||||
#ifdef CONFIG_CPU_SRSO
|
||||
.section .text..__x86.rethunk_untrain
|
||||
|
||||
.pushsection .text..__x86.rethunk_untrain
|
||||
SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
|
||||
UNWIND_HINT_FUNC
|
||||
ANNOTATE_NOENDBR
|
||||
|
@ -158,17 +157,9 @@ SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
|
|||
jmp srso_alias_return_thunk
|
||||
SYM_FUNC_END(srso_alias_untrain_ret)
|
||||
__EXPORT_THUNK(srso_alias_untrain_ret)
|
||||
.popsection
|
||||
|
||||
.section .text..__x86.rethunk_safe
|
||||
#else
|
||||
/* dummy definition for alternatives */
|
||||
SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
|
||||
ANNOTATE_UNRET_SAFE
|
||||
ret
|
||||
int3
|
||||
SYM_FUNC_END(srso_alias_untrain_ret)
|
||||
#endif
|
||||
|
||||
.pushsection .text..__x86.rethunk_safe
|
||||
SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE)
|
||||
lea 8(%_ASM_SP), %_ASM_SP
|
||||
UNWIND_HINT_FUNC
|
||||
|
@ -177,14 +168,69 @@ SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE)
|
|||
int3
|
||||
SYM_FUNC_END(srso_alias_safe_ret)
|
||||
|
||||
.section .text..__x86.return_thunk
|
||||
|
||||
SYM_CODE_START(srso_alias_return_thunk)
|
||||
SYM_CODE_START_NOALIGN(srso_alias_return_thunk)
|
||||
UNWIND_HINT_FUNC
|
||||
ANNOTATE_NOENDBR
|
||||
call srso_alias_safe_ret
|
||||
ud2
|
||||
SYM_CODE_END(srso_alias_return_thunk)
|
||||
.popsection
|
||||
|
||||
/*
|
||||
* SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret()
|
||||
* above. On kernel entry, srso_untrain_ret() is executed which is a
|
||||
*
|
||||
* movabs $0xccccc30824648d48,%rax
|
||||
*
|
||||
* and when the return thunk executes the inner label srso_safe_ret()
|
||||
* later, it is a stack manipulation and a RET which is mispredicted and
|
||||
* thus a "safe" one to use.
|
||||
*/
|
||||
.align 64
|
||||
.skip 64 - (srso_safe_ret - srso_untrain_ret), 0xcc
|
||||
SYM_START(srso_untrain_ret, SYM_L_LOCAL, SYM_A_NONE)
|
||||
ANNOTATE_NOENDBR
|
||||
.byte 0x48, 0xb8
|
||||
|
||||
/*
|
||||
* This forces the function return instruction to speculate into a trap
|
||||
* (UD2 in srso_return_thunk() below). This RET will then mispredict
|
||||
* and execution will continue at the return site read from the top of
|
||||
* the stack.
|
||||
*/
|
||||
SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
|
||||
lea 8(%_ASM_SP), %_ASM_SP
|
||||
ret
|
||||
int3
|
||||
int3
|
||||
/* end of movabs */
|
||||
lfence
|
||||
call srso_safe_ret
|
||||
ud2
|
||||
SYM_CODE_END(srso_safe_ret)
|
||||
SYM_FUNC_END(srso_untrain_ret)
|
||||
|
||||
SYM_CODE_START(srso_return_thunk)
|
||||
UNWIND_HINT_FUNC
|
||||
ANNOTATE_NOENDBR
|
||||
call srso_safe_ret
|
||||
ud2
|
||||
SYM_CODE_END(srso_return_thunk)
|
||||
|
||||
#define JMP_SRSO_UNTRAIN_RET "jmp srso_untrain_ret"
|
||||
#else /* !CONFIG_CPU_SRSO */
|
||||
#define JMP_SRSO_UNTRAIN_RET "ud2"
|
||||
/* Dummy for the alternative in CALL_UNTRAIN_RET. */
|
||||
SYM_CODE_START(srso_alias_untrain_ret)
|
||||
ANNOTATE_UNRET_SAFE
|
||||
ANNOTATE_NOENDBR
|
||||
ret
|
||||
int3
|
||||
SYM_FUNC_END(srso_alias_untrain_ret)
|
||||
__EXPORT_THUNK(srso_alias_untrain_ret)
|
||||
#endif /* CONFIG_CPU_SRSO */
|
||||
|
||||
#ifdef CONFIG_CPU_UNRET_ENTRY
|
||||
|
||||
/*
|
||||
* Some generic notes on the untraining sequences:
|
||||
|
@ -266,65 +312,19 @@ SYM_CODE_END(retbleed_return_thunk)
|
|||
SYM_FUNC_END(retbleed_untrain_ret)
|
||||
__EXPORT_THUNK(retbleed_untrain_ret)
|
||||
|
||||
/*
|
||||
* SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret()
|
||||
* above. On kernel entry, srso_untrain_ret() is executed which is a
|
||||
*
|
||||
* movabs $0xccccc30824648d48,%rax
|
||||
*
|
||||
* and when the return thunk executes the inner label srso_safe_ret()
|
||||
* later, it is a stack manipulation and a RET which is mispredicted and
|
||||
* thus a "safe" one to use.
|
||||
*/
|
||||
.align 64
|
||||
.skip 64 - (srso_safe_ret - srso_untrain_ret), 0xcc
|
||||
SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
|
||||
ANNOTATE_NOENDBR
|
||||
.byte 0x48, 0xb8
|
||||
#define JMP_RETBLEED_UNTRAIN_RET "jmp retbleed_untrain_ret"
|
||||
#else /* !CONFIG_CPU_UNRET_ENTRY */
|
||||
#define JMP_RETBLEED_UNTRAIN_RET "ud2"
|
||||
#endif /* CONFIG_CPU_UNRET_ENTRY */
|
||||
|
||||
/*
|
||||
* This forces the function return instruction to speculate into a trap
|
||||
* (UD2 in srso_return_thunk() below). This RET will then mispredict
|
||||
* and execution will continue at the return site read from the top of
|
||||
* the stack.
|
||||
*/
|
||||
SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
|
||||
lea 8(%_ASM_SP), %_ASM_SP
|
||||
ret
|
||||
int3
|
||||
int3
|
||||
/* end of movabs */
|
||||
lfence
|
||||
call srso_safe_ret
|
||||
ud2
|
||||
SYM_CODE_END(srso_safe_ret)
|
||||
SYM_FUNC_END(srso_untrain_ret)
|
||||
__EXPORT_THUNK(srso_untrain_ret)
|
||||
|
||||
SYM_CODE_START(srso_return_thunk)
|
||||
UNWIND_HINT_FUNC
|
||||
ANNOTATE_NOENDBR
|
||||
call srso_safe_ret
|
||||
ud2
|
||||
SYM_CODE_END(srso_return_thunk)
|
||||
#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)
|
||||
|
||||
SYM_FUNC_START(entry_untrain_ret)
|
||||
ALTERNATIVE_2 "jmp retbleed_untrain_ret", \
|
||||
"jmp srso_untrain_ret", X86_FEATURE_SRSO, \
|
||||
"jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
|
||||
ALTERNATIVE JMP_RETBLEED_UNTRAIN_RET, JMP_SRSO_UNTRAIN_RET, X86_FEATURE_SRSO
|
||||
SYM_FUNC_END(entry_untrain_ret)
|
||||
__EXPORT_THUNK(entry_untrain_ret)
|
||||
|
||||
SYM_CODE_START(__x86_return_thunk)
|
||||
UNWIND_HINT_FUNC
|
||||
ANNOTATE_NOENDBR
|
||||
ANNOTATE_UNRET_SAFE
|
||||
ret
|
||||
int3
|
||||
SYM_CODE_END(__x86_return_thunk)
|
||||
EXPORT_SYMBOL(__x86_return_thunk)
|
||||
|
||||
#endif /* CONFIG_RETHUNK */
|
||||
#endif /* CONFIG_CPU_UNRET_ENTRY || CONFIG_CPU_SRSO */
|
||||
|
||||
#ifdef CONFIG_CALL_DEPTH_TRACKING
|
||||
|
||||
|
@ -359,3 +359,22 @@ SYM_FUNC_START(__x86_return_skl)
|
|||
SYM_FUNC_END(__x86_return_skl)
|
||||
|
||||
#endif /* CONFIG_CALL_DEPTH_TRACKING */
|
||||
|
||||
/*
|
||||
* This function name is magical and is used by -mfunction-return=thunk-extern
|
||||
* for the compiler to generate JMPs to it.
|
||||
*
|
||||
* This code is only used during kernel boot or module init. All
|
||||
* 'JMP __x86_return_thunk' sites are changed to something else by
|
||||
* apply_returns().
|
||||
*/
|
||||
SYM_CODE_START(__x86_return_thunk)
|
||||
UNWIND_HINT_FUNC
|
||||
ANNOTATE_NOENDBR
|
||||
ANNOTATE_UNRET_SAFE
|
||||
ret
|
||||
int3
|
||||
SYM_CODE_END(__x86_return_thunk)
|
||||
EXPORT_SYMBOL(__x86_return_thunk)
|
||||
|
||||
#endif /* CONFIG_RETHUNK */
|
||||
|
|
|
@ -376,7 +376,7 @@ static void dump_pagetable(unsigned long address)
|
|||
goto bad;
|
||||
|
||||
pr_cont("PUD %lx ", pud_val(*pud));
|
||||
if (!pud_present(*pud) || pud_large(*pud))
|
||||
if (!pud_present(*pud) || pud_leaf(*pud))
|
||||
goto out;
|
||||
|
||||
pmd = pmd_offset(pud, address);
|
||||
|
@ -1037,7 +1037,7 @@ spurious_kernel_fault(unsigned long error_code, unsigned long address)
|
|||
if (!pud_present(*pud))
|
||||
return 0;
|
||||
|
||||
if (pud_large(*pud))
|
||||
if (pud_leaf(*pud))
|
||||
return spurious_kernel_fault_check(error_code, (pte_t *) pud);
|
||||
|
||||
pmd = pmd_offset(pud, address);
|
||||
|
|
|
@ -26,31 +26,18 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
|
|||
for (; addr < end; addr = next) {
|
||||
pud_t *pud = pud_page + pud_index(addr);
|
||||
pmd_t *pmd;
|
||||
bool use_gbpage;
|
||||
|
||||
next = (addr & PUD_MASK) + PUD_SIZE;
|
||||
if (next > end)
|
||||
next = end;
|
||||
|
||||
/* if this is already a gbpage, this portion is already mapped */
|
||||
if (pud_large(*pud))
|
||||
continue;
|
||||
|
||||
/* Is using a gbpage allowed? */
|
||||
use_gbpage = info->direct_gbpages;
|
||||
|
||||
/* Don't use gbpage if it maps more than the requested region. */
|
||||
/* at the begining: */
|
||||
use_gbpage &= ((addr & ~PUD_MASK) == 0);
|
||||
/* ... or at the end: */
|
||||
use_gbpage &= ((next & ~PUD_MASK) == 0);
|
||||
|
||||
/* Never overwrite existing mappings */
|
||||
use_gbpage &= !pud_present(*pud);
|
||||
|
||||
if (use_gbpage) {
|
||||
if (info->direct_gbpages) {
|
||||
pud_t pudval;
|
||||
|
||||
if (pud_present(*pud))
|
||||
continue;
|
||||
|
||||
addr &= PUD_MASK;
|
||||
pudval = __pud((addr - info->offset) | info->page_flag);
|
||||
set_pud(pud, pudval);
|
||||
continue;
|
||||
|
|
|
@ -617,7 +617,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
|
|||
}
|
||||
|
||||
if (!pud_none(*pud)) {
|
||||
if (!pud_large(*pud)) {
|
||||
if (!pud_leaf(*pud)) {
|
||||
pmd = pmd_offset(pud, 0);
|
||||
paddr_last = phys_pmd_init(pmd, paddr,
|
||||
paddr_end,
|
||||
|
@ -1163,7 +1163,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
|
|||
if (!pud_present(*pud))
|
||||
continue;
|
||||
|
||||
if (pud_large(*pud) &&
|
||||
if (pud_leaf(*pud) &&
|
||||
IS_ALIGNED(addr, PUD_SIZE) &&
|
||||
IS_ALIGNED(next, PUD_SIZE)) {
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
|
|
|
@ -115,7 +115,7 @@ static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr,
|
|||
pud = pud_offset(p4d, addr);
|
||||
do {
|
||||
next = pud_addr_end(addr, end);
|
||||
if (!pud_large(*pud))
|
||||
if (!pud_leaf(*pud))
|
||||
kasan_populate_pud(pud, addr, next, nid);
|
||||
} while (pud++, addr = next, addr != end);
|
||||
}
|
||||
|
|
|
@ -41,9 +41,9 @@
|
|||
#include <linux/mem_encrypt.h>
|
||||
#include <linux/cc_platform.h>
|
||||
|
||||
#include <asm/init.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/cmdline.h>
|
||||
#include <asm/coco.h>
|
||||
#include <asm/sev.h>
|
||||
|
||||
|
@ -95,10 +95,7 @@ struct sme_populate_pgd_data {
|
|||
*/
|
||||
static char sme_workarea[2 * PMD_SIZE] __section(".init.scratch");
|
||||
|
||||
static char sme_cmdline_arg[] __initdata = "mem_encrypt";
|
||||
static char sme_cmdline_on[] __initdata = "on";
|
||||
|
||||
static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
|
||||
static void __head sme_clear_pgd(struct sme_populate_pgd_data *ppd)
|
||||
{
|
||||
unsigned long pgd_start, pgd_end, pgd_size;
|
||||
pgd_t *pgd_p;
|
||||
|
@ -113,7 +110,7 @@ static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
|
|||
memset(pgd_p, 0, pgd_size);
|
||||
}
|
||||
|
||||
static pud_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
|
||||
static pud_t __head *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
|
@ -144,13 +141,13 @@ static pud_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
|
|||
set_pud(pud, __pud(PUD_FLAGS | __pa(pmd)));
|
||||
}
|
||||
|
||||
if (pud_large(*pud))
|
||||
if (pud_leaf(*pud))
|
||||
return NULL;
|
||||
|
||||
return pud;
|
||||
}
|
||||
|
||||
static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
|
||||
static void __head sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
|
||||
{
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
|
@ -166,7 +163,7 @@ static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
|
|||
set_pmd(pmd, __pmd(ppd->paddr | ppd->pmd_flags));
|
||||
}
|
||||
|
||||
static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
|
||||
static void __head sme_populate_pgd(struct sme_populate_pgd_data *ppd)
|
||||
{
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
|
@ -192,7 +189,7 @@ static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
|
|||
set_pte(pte, __pte(ppd->paddr | ppd->pte_flags));
|
||||
}
|
||||
|
||||
static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
|
||||
static void __head __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
|
||||
{
|
||||
while (ppd->vaddr < ppd->vaddr_end) {
|
||||
sme_populate_pgd_large(ppd);
|
||||
|
@ -202,7 +199,7 @@ static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
|
|||
}
|
||||
}
|
||||
|
||||
static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
|
||||
static void __head __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
|
||||
{
|
||||
while (ppd->vaddr < ppd->vaddr_end) {
|
||||
sme_populate_pgd(ppd);
|
||||
|
@ -212,7 +209,7 @@ static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
|
|||
}
|
||||
}
|
||||
|
||||
static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
|
||||
static void __head __sme_map_range(struct sme_populate_pgd_data *ppd,
|
||||
pmdval_t pmd_flags, pteval_t pte_flags)
|
||||
{
|
||||
unsigned long vaddr_end;
|
||||
|
@ -236,22 +233,22 @@ static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
|
|||
__sme_map_range_pte(ppd);
|
||||
}
|
||||
|
||||
static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
|
||||
static void __head sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
|
||||
{
|
||||
__sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC);
|
||||
}
|
||||
|
||||
static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
|
||||
static void __head sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
|
||||
{
|
||||
__sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC);
|
||||
}
|
||||
|
||||
static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
|
||||
static void __head sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
|
||||
{
|
||||
__sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP);
|
||||
}
|
||||
|
||||
static unsigned long __init sme_pgtable_calc(unsigned long len)
|
||||
static unsigned long __head sme_pgtable_calc(unsigned long len)
|
||||
{
|
||||
unsigned long entries = 0, tables = 0;
|
||||
|
||||
|
@ -288,7 +285,7 @@ static unsigned long __init sme_pgtable_calc(unsigned long len)
|
|||
return entries + tables;
|
||||
}
|
||||
|
||||
void __init sme_encrypt_kernel(struct boot_params *bp)
|
||||
void __head sme_encrypt_kernel(struct boot_params *bp)
|
||||
{
|
||||
unsigned long workarea_start, workarea_end, workarea_len;
|
||||
unsigned long execute_start, execute_end, execute_len;
|
||||
|
@ -323,9 +320,8 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
|
|||
* memory from being cached.
|
||||
*/
|
||||
|
||||
/* Physical addresses gives us the identity mapped virtual addresses */
|
||||
kernel_start = __pa_symbol(_text);
|
||||
kernel_end = ALIGN(__pa_symbol(_end), PMD_SIZE);
|
||||
kernel_start = (unsigned long)RIP_REL_REF(_text);
|
||||
kernel_end = ALIGN((unsigned long)RIP_REL_REF(_end), PMD_SIZE);
|
||||
kernel_len = kernel_end - kernel_start;
|
||||
|
||||
initrd_start = 0;
|
||||
|
@ -342,14 +338,6 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
|
|||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We're running identity mapped, so we must obtain the address to the
|
||||
* SME encryption workarea using rip-relative addressing.
|
||||
*/
|
||||
asm ("lea sme_workarea(%%rip), %0"
|
||||
: "=r" (workarea_start)
|
||||
: "p" (sme_workarea));
|
||||
|
||||
/*
|
||||
* Calculate required number of workarea bytes needed:
|
||||
* executable encryption area size:
|
||||
|
@ -359,7 +347,7 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
|
|||
* pagetable structures for the encryption of the kernel
|
||||
* pagetable structures for workarea (in case not currently mapped)
|
||||
*/
|
||||
execute_start = workarea_start;
|
||||
execute_start = workarea_start = (unsigned long)RIP_REL_REF(sme_workarea);
|
||||
execute_end = execute_start + (PAGE_SIZE * 2) + PMD_SIZE;
|
||||
execute_len = execute_end - execute_start;
|
||||
|
||||
|
@ -502,13 +490,11 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
|
|||
native_write_cr3(__native_read_cr3());
|
||||
}
|
||||
|
||||
void __init sme_enable(struct boot_params *bp)
|
||||
void __head sme_enable(struct boot_params *bp)
|
||||
{
|
||||
const char *cmdline_ptr, *cmdline_arg, *cmdline_on;
|
||||
unsigned int eax, ebx, ecx, edx;
|
||||
unsigned long feature_mask;
|
||||
unsigned long me_mask;
|
||||
char buffer[16];
|
||||
bool snp;
|
||||
u64 msr;
|
||||
|
||||
|
@ -551,6 +537,9 @@ void __init sme_enable(struct boot_params *bp)
|
|||
|
||||
/* Check if memory encryption is enabled */
|
||||
if (feature_mask == AMD_SME_BIT) {
|
||||
if (!(bp->hdr.xloadflags & XLF_MEM_ENCRYPTION))
|
||||
return;
|
||||
|
||||
/*
|
||||
* No SME if Hypervisor bit is set. This check is here to
|
||||
* prevent a guest from trying to enable SME. For running as a
|
||||
|
@ -570,31 +559,8 @@ void __init sme_enable(struct boot_params *bp)
|
|||
msr = __rdmsr(MSR_AMD64_SYSCFG);
|
||||
if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
|
||||
return;
|
||||
} else {
|
||||
/* SEV state cannot be controlled by a command line option */
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fixups have not been applied to phys_base yet and we're running
|
||||
* identity mapped, so we must obtain the address to the SME command
|
||||
* line argument data using rip-relative addressing.
|
||||
*/
|
||||
asm ("lea sme_cmdline_arg(%%rip), %0"
|
||||
: "=r" (cmdline_arg)
|
||||
: "p" (sme_cmdline_arg));
|
||||
asm ("lea sme_cmdline_on(%%rip), %0"
|
||||
: "=r" (cmdline_on)
|
||||
: "p" (sme_cmdline_on));
|
||||
|
||||
cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
|
||||
((u64)bp->ext_cmd_line_ptr << 32));
|
||||
|
||||
if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0 ||
|
||||
strncmp(buffer, cmdline_on, sizeof(buffer)))
|
||||
return;
|
||||
|
||||
out:
|
||||
RIP_REL_REF(sme_me_mask) = me_mask;
|
||||
physical_mask &= ~me_mask;
|
||||
cc_vendor = CC_VENDOR_AMD;
|
||||
|
|
|
@ -950,6 +950,38 @@ static void free_pfn_range(u64 paddr, unsigned long size)
|
|||
memtype_free(paddr, paddr + size);
|
||||
}
|
||||
|
||||
static int get_pat_info(struct vm_area_struct *vma, resource_size_t *paddr,
|
||||
pgprot_t *pgprot)
|
||||
{
|
||||
unsigned long prot;
|
||||
|
||||
VM_WARN_ON_ONCE(!(vma->vm_flags & VM_PAT));
|
||||
|
||||
/*
|
||||
* We need the starting PFN and cachemode used for track_pfn_remap()
|
||||
* that covered the whole VMA. For most mappings, we can obtain that
|
||||
* information from the page tables. For COW mappings, we might now
|
||||
* suddenly have anon folios mapped and follow_phys() will fail.
|
||||
*
|
||||
* Fallback to using vma->vm_pgoff, see remap_pfn_range_notrack(), to
|
||||
* detect the PFN. If we need the cachemode as well, we're out of luck
|
||||
* for now and have to fail fork().
|
||||
*/
|
||||
if (!follow_phys(vma, vma->vm_start, 0, &prot, paddr)) {
|
||||
if (pgprot)
|
||||
*pgprot = __pgprot(prot);
|
||||
return 0;
|
||||
}
|
||||
if (is_cow_mapping(vma->vm_flags)) {
|
||||
if (pgprot)
|
||||
return -EINVAL;
|
||||
*paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
|
||||
return 0;
|
||||
}
|
||||
WARN_ON_ONCE(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* track_pfn_copy is called when vma that is covering the pfnmap gets
|
||||
* copied through copy_page_range().
|
||||
|
@ -960,20 +992,13 @@ static void free_pfn_range(u64 paddr, unsigned long size)
|
|||
int track_pfn_copy(struct vm_area_struct *vma)
|
||||
{
|
||||
resource_size_t paddr;
|
||||
unsigned long prot;
|
||||
unsigned long vma_size = vma->vm_end - vma->vm_start;
|
||||
pgprot_t pgprot;
|
||||
|
||||
if (vma->vm_flags & VM_PAT) {
|
||||
/*
|
||||
* reserve the whole chunk covered by vma. We need the
|
||||
* starting address and protection from pte.
|
||||
*/
|
||||
if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
|
||||
WARN_ON_ONCE(1);
|
||||
if (get_pat_info(vma, &paddr, &pgprot))
|
||||
return -EINVAL;
|
||||
}
|
||||
pgprot = __pgprot(prot);
|
||||
/* reserve the whole chunk covered by vma. */
|
||||
return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
|
||||
}
|
||||
|
||||
|
@ -1048,7 +1073,6 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
|
|||
unsigned long size, bool mm_wr_locked)
|
||||
{
|
||||
resource_size_t paddr;
|
||||
unsigned long prot;
|
||||
|
||||
if (vma && !(vma->vm_flags & VM_PAT))
|
||||
return;
|
||||
|
@ -1056,11 +1080,8 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
|
|||
/* free the chunk starting from pfn or the whole chunk */
|
||||
paddr = (resource_size_t)pfn << PAGE_SHIFT;
|
||||
if (!paddr && !size) {
|
||||
if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
|
||||
WARN_ON_ONCE(1);
|
||||
if (get_pat_info(vma, &paddr, NULL))
|
||||
return;
|
||||
}
|
||||
|
||||
size = vma->vm_end - vma->vm_start;
|
||||
}
|
||||
free_pfn_range(paddr, size);
|
||||
|
|
|
@ -684,7 +684,7 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
|
|||
return NULL;
|
||||
|
||||
*level = PG_LEVEL_1G;
|
||||
if (pud_large(*pud) || !pud_present(*pud))
|
||||
if (pud_leaf(*pud) || !pud_present(*pud))
|
||||
return (pte_t *)pud;
|
||||
|
||||
pmd = pmd_offset(pud, address);
|
||||
|
@ -743,7 +743,7 @@ pmd_t *lookup_pmd_address(unsigned long address)
|
|||
return NULL;
|
||||
|
||||
pud = pud_offset(p4d, address);
|
||||
if (pud_none(*pud) || pud_large(*pud) || !pud_present(*pud))
|
||||
if (pud_none(*pud) || pud_leaf(*pud) || !pud_present(*pud))
|
||||
return NULL;
|
||||
|
||||
return pmd_offset(pud, address);
|
||||
|
@ -1274,7 +1274,7 @@ static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end)
|
|||
*/
|
||||
while (end - start >= PUD_SIZE) {
|
||||
|
||||
if (pud_large(*pud))
|
||||
if (pud_leaf(*pud))
|
||||
pud_clear(pud);
|
||||
else
|
||||
unmap_pmd_range(pud, start, start + PUD_SIZE);
|
||||
|
|
|
@ -774,7 +774,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
|
|||
*/
|
||||
int pud_clear_huge(pud_t *pud)
|
||||
{
|
||||
if (pud_large(*pud)) {
|
||||
if (pud_leaf(*pud)) {
|
||||
pud_clear(pud);
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -217,7 +217,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
|
|||
|
||||
pud = pud_offset(p4d, address);
|
||||
/* The user page tables do not use large mappings: */
|
||||
if (pud_large(*pud)) {
|
||||
if (pud_leaf(*pud)) {
|
||||
WARN_ON(1);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -344,7 +344,7 @@ static int emit_call(u8 **pprog, void *func, void *ip)
|
|||
static int emit_rsb_call(u8 **pprog, void *func, void *ip)
|
||||
{
|
||||
OPTIMIZER_HIDE_VAR(func);
|
||||
x86_call_depth_emit_accounting(pprog, func);
|
||||
ip += x86_call_depth_emit_accounting(pprog, func);
|
||||
return emit_patch(pprog, func, ip, 0xE8);
|
||||
}
|
||||
|
||||
|
|
|
@ -170,7 +170,7 @@ int relocate_restore_code(void)
|
|||
goto out;
|
||||
}
|
||||
pud = pud_offset(p4d, relocated_restore_code);
|
||||
if (pud_large(*pud)) {
|
||||
if (pud_leaf(*pud)) {
|
||||
set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -1082,7 +1082,7 @@ static void __init xen_cleanmfnmap_pud(pud_t *pud, bool unpin)
|
|||
pmd_t *pmd_tbl;
|
||||
int i;
|
||||
|
||||
if (pud_large(*pud)) {
|
||||
if (pud_leaf(*pud)) {
|
||||
pa = pud_val(*pud) & PHYSICAL_PAGE_MASK;
|
||||
xen_free_ro_pages(pa, PUD_SIZE);
|
||||
return;
|
||||
|
@ -1863,7 +1863,7 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
|
|||
if (!pud_present(pud))
|
||||
return 0;
|
||||
pa = pud_val(pud) & PTE_PFN_MASK;
|
||||
if (pud_large(pud))
|
||||
if (pud_leaf(pud))
|
||||
return pa + (vaddr & ~PUD_MASK);
|
||||
|
||||
pmd = native_make_pmd(xen_read_phys_ulong(pa + pmd_index(vaddr) *
|
||||
|
|
|
@ -550,8 +550,12 @@ acpi_db_walk_for_fields(acpi_handle obj_handle,
|
|||
ACPI_FREE(buffer.pointer);
|
||||
|
||||
buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
|
||||
acpi_evaluate_object(obj_handle, NULL, NULL, &buffer);
|
||||
|
||||
status = acpi_evaluate_object(obj_handle, NULL, NULL, &buffer);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
acpi_os_printf("Could Not evaluate object %p\n",
|
||||
obj_handle);
|
||||
return (AE_OK);
|
||||
}
|
||||
/*
|
||||
* Since this is a field unit, surround the output in braces
|
||||
*/
|
||||
|
|
|
@ -787,37 +787,6 @@ static const struct ata_port_info mv_port_info[] = {
|
|||
},
|
||||
};
|
||||
|
||||
static const struct pci_device_id mv_pci_tbl[] = {
|
||||
{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
|
||||
{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
|
||||
{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
|
||||
{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
|
||||
/* RocketRAID 1720/174x have different identifiers */
|
||||
{ PCI_VDEVICE(TTI, 0x1720), chip_6042 },
|
||||
{ PCI_VDEVICE(TTI, 0x1740), chip_6042 },
|
||||
{ PCI_VDEVICE(TTI, 0x1742), chip_6042 },
|
||||
|
||||
{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
|
||||
{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
|
||||
{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
|
||||
{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
|
||||
{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
|
||||
|
||||
{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
|
||||
|
||||
/* Adaptec 1430SA */
|
||||
{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
|
||||
|
||||
/* Marvell 7042 support */
|
||||
{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
|
||||
|
||||
/* Highpoint RocketRAID PCIe series */
|
||||
{ PCI_VDEVICE(TTI, 0x2300), chip_7042 },
|
||||
{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },
|
||||
|
||||
{ } /* terminate list */
|
||||
};
|
||||
|
||||
static const struct mv_hw_ops mv5xxx_ops = {
|
||||
.phy_errata = mv5_phy_errata,
|
||||
.enable_leds = mv5_enable_leds,
|
||||
|
@ -4300,6 +4269,36 @@ static int mv_pci_init_one(struct pci_dev *pdev,
|
|||
static int mv_pci_device_resume(struct pci_dev *pdev);
|
||||
#endif
|
||||
|
||||
static const struct pci_device_id mv_pci_tbl[] = {
|
||||
{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
|
||||
{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
|
||||
{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
|
||||
{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
|
||||
/* RocketRAID 1720/174x have different identifiers */
|
||||
{ PCI_VDEVICE(TTI, 0x1720), chip_6042 },
|
||||
{ PCI_VDEVICE(TTI, 0x1740), chip_6042 },
|
||||
{ PCI_VDEVICE(TTI, 0x1742), chip_6042 },
|
||||
|
||||
{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
|
||||
{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
|
||||
{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
|
||||
{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
|
||||
{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
|
||||
|
||||
{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
|
||||
|
||||
/* Adaptec 1430SA */
|
||||
{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
|
||||
|
||||
/* Marvell 7042 support */
|
||||
{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
|
||||
|
||||
/* Highpoint RocketRAID PCIe series */
|
||||
{ PCI_VDEVICE(TTI, 0x2300), chip_7042 },
|
||||
{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },
|
||||
|
||||
{ } /* terminate list */
|
||||
};
|
||||
|
||||
static struct pci_driver mv_pci_driver = {
|
||||
.name = DRV_NAME,
|
||||
|
@ -4312,6 +4311,7 @@ static struct pci_driver mv_pci_driver = {
|
|||
#endif
|
||||
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
|
||||
|
||||
/**
|
||||
* mv_print_info - Dump key info to kernel log for perusal.
|
||||
|
@ -4484,7 +4484,6 @@ static void __exit mv_exit(void)
|
|||
MODULE_AUTHOR("Brett Russ");
|
||||
MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
|
||||
MODULE_VERSION(DRV_VERSION);
|
||||
MODULE_ALIAS("platform:" DRV_NAME);
|
||||
|
||||
|
|
|
@ -957,8 +957,7 @@ static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
|
|||
|
||||
offset -= (idx * window_size);
|
||||
idx++;
|
||||
dist = ((long) (window_size - (offset + size))) >= 0 ? size :
|
||||
(long) (window_size - offset);
|
||||
dist = min(size, window_size - offset);
|
||||
memcpy_fromio(psource, dimm_mmio + offset / 4, dist);
|
||||
|
||||
psource += dist;
|
||||
|
@ -1005,8 +1004,7 @@ static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
|
|||
readl(mmio + PDC_DIMM_WINDOW_CTLR);
|
||||
offset -= (idx * window_size);
|
||||
idx++;
|
||||
dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
|
||||
(long) (window_size - offset);
|
||||
dist = min(size, window_size - offset);
|
||||
memcpy_toio(dimm_mmio + offset / 4, psource, dist);
|
||||
writel(0x01, mmio + PDC_GENERAL_CTLR);
|
||||
readl(mmio + PDC_GENERAL_CTLR);
|
||||
|
|
|
@ -44,6 +44,7 @@ static bool fw_devlink_is_permissive(void);
|
|||
static void __fw_devlink_link_to_consumers(struct device *dev);
|
||||
static bool fw_devlink_drv_reg_done;
|
||||
static bool fw_devlink_best_effort;
|
||||
static struct workqueue_struct *device_link_wq;
|
||||
|
||||
/**
|
||||
* __fwnode_link_add - Create a link between two fwnode_handles.
|
||||
|
@ -531,12 +532,26 @@ static void devlink_dev_release(struct device *dev)
|
|||
/*
|
||||
* It may take a while to complete this work because of the SRCU
|
||||
* synchronization in device_link_release_fn() and if the consumer or
|
||||
* supplier devices get deleted when it runs, so put it into the "long"
|
||||
* workqueue.
|
||||
* supplier devices get deleted when it runs, so put it into the
|
||||
* dedicated workqueue.
|
||||
*/
|
||||
queue_work(system_long_wq, &link->rm_work);
|
||||
queue_work(device_link_wq, &link->rm_work);
|
||||
}
|
||||
|
||||
/**
|
||||
* device_link_wait_removal - Wait for ongoing devlink removal jobs to terminate
|
||||
*/
|
||||
void device_link_wait_removal(void)
|
||||
{
|
||||
/*
|
||||
* devlink removal jobs are queued in the dedicated work queue.
|
||||
* To be sure that all removal jobs are terminated, ensure that any
|
||||
* scheduled work has run to completion.
|
||||
*/
|
||||
flush_workqueue(device_link_wq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(device_link_wait_removal);
|
||||
|
||||
static struct class devlink_class = {
|
||||
.name = "devlink",
|
||||
.dev_groups = devlink_groups,
|
||||
|
@ -4090,9 +4105,14 @@ int __init devices_init(void)
|
|||
sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj);
|
||||
if (!sysfs_dev_char_kobj)
|
||||
goto char_kobj_err;
|
||||
device_link_wq = alloc_workqueue("device_link_wq", 0, 0);
|
||||
if (!device_link_wq)
|
||||
goto wq_err;
|
||||
|
||||
return 0;
|
||||
|
||||
wq_err:
|
||||
kobject_put(sysfs_dev_char_kobj);
|
||||
char_kobj_err:
|
||||
kobject_put(sysfs_dev_block_kobj);
|
||||
block_kobj_err:
|
||||
|
|
|
@ -112,7 +112,7 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min,
|
|||
unsigned long *entry, *lower, *upper;
|
||||
unsigned long lower_index, lower_last;
|
||||
unsigned long upper_index, upper_last;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
lower = NULL;
|
||||
upper = NULL;
|
||||
|
@ -145,7 +145,7 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min,
|
|||
upper_index = max + 1;
|
||||
upper_last = mas.last;
|
||||
|
||||
upper = kmemdup(&entry[max + 1],
|
||||
upper = kmemdup(&entry[max - mas.index + 1],
|
||||
((mas.last - max) *
|
||||
sizeof(unsigned long)),
|
||||
map->alloc_flags);
|
||||
|
@ -244,7 +244,7 @@ static int regcache_maple_sync(struct regmap *map, unsigned int min,
|
|||
unsigned long lmin = min;
|
||||
unsigned long lmax = max;
|
||||
unsigned int r, v, sync_start;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
bool sync_needed = false;
|
||||
|
||||
map->cache_bypass = true;
|
||||
|
|
|
@ -758,11 +758,15 @@ EXPORT_SYMBOL_GPL(qca_uart_setup);
|
|||
|
||||
int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
|
||||
{
|
||||
bdaddr_t bdaddr_swapped;
|
||||
struct sk_buff *skb;
|
||||
int err;
|
||||
|
||||
skb = __hci_cmd_sync_ev(hdev, EDL_WRITE_BD_ADDR_OPCODE, 6, bdaddr,
|
||||
HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
|
||||
baswap(&bdaddr_swapped, bdaddr);
|
||||
|
||||
skb = __hci_cmd_sync_ev(hdev, EDL_WRITE_BD_ADDR_OPCODE, 6,
|
||||
&bdaddr_swapped, HCI_EV_VENDOR,
|
||||
HCI_INIT_TIMEOUT);
|
||||
if (IS_ERR(skb)) {
|
||||
err = PTR_ERR(skb);
|
||||
bt_dev_err(hdev, "QCA Change address cmd failed (%d)", err);
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
*
|
||||
* Copyright (C) 2007 Texas Instruments, Inc.
|
||||
* Copyright (c) 2010, 2012, 2018 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Acknowledgements:
|
||||
* This file is based on hci_ll.c, which was...
|
||||
|
@ -226,6 +225,7 @@ struct qca_serdev {
|
|||
struct qca_power *bt_power;
|
||||
u32 init_speed;
|
||||
u32 oper_speed;
|
||||
bool bdaddr_property_broken;
|
||||
const char *firmware_name;
|
||||
};
|
||||
|
||||
|
@ -1825,6 +1825,7 @@ static int qca_setup(struct hci_uart *hu)
|
|||
const char *firmware_name = qca_get_firmware_name(hu);
|
||||
int ret;
|
||||
struct qca_btsoc_version ver;
|
||||
struct qca_serdev *qcadev;
|
||||
const char *soc_name;
|
||||
|
||||
ret = qca_check_speeds(hu);
|
||||
|
@ -1882,16 +1883,11 @@ retry:
|
|||
case QCA_WCN6750:
|
||||
case QCA_WCN6855:
|
||||
case QCA_WCN7850:
|
||||
set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
|
||||
|
||||
/* Set BDA quirk bit for reading BDA value from fwnode property
|
||||
* only if that property exist in DT.
|
||||
*/
|
||||
if (fwnode_property_present(dev_fwnode(hdev->dev.parent), "local-bd-address")) {
|
||||
set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
|
||||
bt_dev_info(hdev, "setting quirk bit to read BDA from fwnode later");
|
||||
} else {
|
||||
bt_dev_dbg(hdev, "local-bd-address` is not present in the devicetree so not setting quirk bit for BDA");
|
||||
}
|
||||
qcadev = serdev_device_get_drvdata(hu->serdev);
|
||||
if (qcadev->bdaddr_property_broken)
|
||||
set_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks);
|
||||
|
||||
hci_set_aosp_capable(hdev);
|
||||
|
||||
|
@ -2264,6 +2260,9 @@ static int qca_serdev_probe(struct serdev_device *serdev)
|
|||
if (!qcadev->oper_speed)
|
||||
BT_DBG("UART will pick default operating speed");
|
||||
|
||||
qcadev->bdaddr_property_broken = device_property_read_bool(&serdev->dev,
|
||||
"qcom,local-bd-address-broken");
|
||||
|
||||
if (data)
|
||||
qcadev->btsoc_type = data->soc_type;
|
||||
else
|
||||
|
|
|
@ -84,11 +84,11 @@ static int sanitycheck(void *arg)
|
|||
return -ENOMEM;
|
||||
|
||||
chain = mock_chain(NULL, f, 1);
|
||||
if (!chain)
|
||||
if (chain)
|
||||
dma_fence_enable_sw_signaling(chain);
|
||||
else
|
||||
err = -ENOMEM;
|
||||
|
||||
dma_fence_enable_sw_signaling(chain);
|
||||
|
||||
dma_fence_signal(f);
|
||||
dma_fence_put(f);
|
||||
|
||||
|
|
|
@ -24,6 +24,8 @@ static bool efi_noinitrd;
|
|||
static bool efi_nosoftreserve;
|
||||
static bool efi_disable_pci_dma = IS_ENABLED(CONFIG_EFI_DISABLE_PCI_DMA);
|
||||
|
||||
int efi_mem_encrypt;
|
||||
|
||||
bool __pure __efi_soft_reserve_enabled(void)
|
||||
{
|
||||
return !efi_nosoftreserve;
|
||||
|
@ -75,6 +77,12 @@ efi_status_t efi_parse_options(char const *cmdline)
|
|||
efi_noinitrd = true;
|
||||
} else if (IS_ENABLED(CONFIG_X86_64) && !strcmp(param, "no5lvl")) {
|
||||
efi_no5lvl = true;
|
||||
} else if (IS_ENABLED(CONFIG_ARCH_HAS_MEM_ENCRYPT) &&
|
||||
!strcmp(param, "mem_encrypt") && val) {
|
||||
if (parse_option_str(val, "on"))
|
||||
efi_mem_encrypt = 1;
|
||||
else if (parse_option_str(val, "off"))
|
||||
efi_mem_encrypt = -1;
|
||||
} else if (!strcmp(param, "efi") && val) {
|
||||
efi_nochunk = parse_option_str(val, "nochunk");
|
||||
efi_novamap |= parse_option_str(val, "novamap");
|
||||
|
|
|
@ -37,8 +37,8 @@ extern bool efi_no5lvl;
|
|||
extern bool efi_nochunk;
|
||||
extern bool efi_nokaslr;
|
||||
extern int efi_loglevel;
|
||||
extern int efi_mem_encrypt;
|
||||
extern bool efi_novamap;
|
||||
|
||||
extern const efi_system_table_t *efi_system_table;
|
||||
|
||||
typedef union efi_dxe_services_table efi_dxe_services_table_t;
|
||||
|
|
|
@ -238,6 +238,15 @@ efi_status_t efi_adjust_memory_range_protection(unsigned long start,
|
|||
rounded_end = roundup(start + size, EFI_PAGE_SIZE);
|
||||
|
||||
if (memattr != NULL) {
|
||||
status = efi_call_proto(memattr, set_memory_attributes,
|
||||
rounded_start,
|
||||
rounded_end - rounded_start,
|
||||
EFI_MEMORY_RO);
|
||||
if (status != EFI_SUCCESS) {
|
||||
efi_warn("Failed to set EFI_MEMORY_RO attribute\n");
|
||||
return status;
|
||||
}
|
||||
|
||||
status = efi_call_proto(memattr, clear_memory_attributes,
|
||||
rounded_start,
|
||||
rounded_end - rounded_start,
|
||||
|
@ -816,7 +825,7 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
|
|||
|
||||
*kernel_entry = addr + entry;
|
||||
|
||||
return efi_adjust_memory_range_protection(addr, kernel_total_size);
|
||||
return efi_adjust_memory_range_protection(addr, kernel_text_size);
|
||||
}
|
||||
|
||||
static void __noreturn enter_kernel(unsigned long kernel_addr,
|
||||
|
@ -888,6 +897,9 @@ void __noreturn efi_stub_entry(efi_handle_t handle,
|
|||
}
|
||||
}
|
||||
|
||||
if (efi_mem_encrypt > 0)
|
||||
hdr->xloadflags |= XLF_MEM_ENCRYPTION;
|
||||
|
||||
status = efi_decompress_kernel(&kernel_entry);
|
||||
if (status != EFI_SUCCESS) {
|
||||
efi_err("Failed to decompress kernel\n");
|
||||
|
|
|
@ -655,6 +655,25 @@ static u32 line_event_id(int level)
|
|||
GPIO_V2_LINE_EVENT_FALLING_EDGE;
|
||||
}
|
||||
|
||||
static inline char *make_irq_label(const char *orig)
|
||||
{
|
||||
char *new;
|
||||
|
||||
if (!orig)
|
||||
return NULL;
|
||||
|
||||
new = kstrdup_and_replace(orig, '/', ':', GFP_KERNEL);
|
||||
if (!new)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
return new;
|
||||
}
|
||||
|
||||
static inline void free_irq_label(const char *label)
|
||||
{
|
||||
kfree(label);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HTE
|
||||
|
||||
static enum hte_return process_hw_ts_thread(void *p)
|
||||
|
@ -942,6 +961,7 @@ static int debounce_setup(struct line *line, unsigned int debounce_period_us)
|
|||
{
|
||||
unsigned long irqflags;
|
||||
int ret, level, irq;
|
||||
char *label;
|
||||
|
||||
/* try hardware */
|
||||
ret = gpiod_set_debounce(line->desc, debounce_period_us);
|
||||
|
@ -964,11 +984,17 @@ static int debounce_setup(struct line *line, unsigned int debounce_period_us)
|
|||
if (irq < 0)
|
||||
return -ENXIO;
|
||||
|
||||
label = make_irq_label(line->req->label);
|
||||
if (IS_ERR(label))
|
||||
return -ENOMEM;
|
||||
|
||||
irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING;
|
||||
ret = request_irq(irq, debounce_irq_handler, irqflags,
|
||||
line->req->label, line);
|
||||
if (ret)
|
||||
label, line);
|
||||
if (ret) {
|
||||
free_irq_label(label);
|
||||
return ret;
|
||||
}
|
||||
line->irq = irq;
|
||||
} else {
|
||||
ret = hte_edge_setup(line, GPIO_V2_LINE_FLAG_EDGE_BOTH);
|
||||
|
@ -1013,7 +1039,7 @@ static u32 gpio_v2_line_config_debounce_period(struct gpio_v2_line_config *lc,
|
|||
static void edge_detector_stop(struct line *line)
|
||||
{
|
||||
if (line->irq) {
|
||||
free_irq(line->irq, line);
|
||||
free_irq_label(free_irq(line->irq, line));
|
||||
line->irq = 0;
|
||||
}
|
||||
|
||||
|
@ -1038,6 +1064,7 @@ static int edge_detector_setup(struct line *line,
|
|||
unsigned long irqflags = 0;
|
||||
u64 eflags;
|
||||
int irq, ret;
|
||||
char *label;
|
||||
|
||||
eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
|
||||
if (eflags && !kfifo_initialized(&line->req->events)) {
|
||||
|
@ -1074,11 +1101,17 @@ static int edge_detector_setup(struct line *line,
|
|||
IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
|
||||
irqflags |= IRQF_ONESHOT;
|
||||
|
||||
label = make_irq_label(line->req->label);
|
||||
if (IS_ERR(label))
|
||||
return PTR_ERR(label);
|
||||
|
||||
/* Request a thread to read the events */
|
||||
ret = request_threaded_irq(irq, edge_irq_handler, edge_irq_thread,
|
||||
irqflags, line->req->label, line);
|
||||
if (ret)
|
||||
irqflags, label, line);
|
||||
if (ret) {
|
||||
free_irq_label(label);
|
||||
return ret;
|
||||
}
|
||||
|
||||
line->irq = irq;
|
||||
return 0;
|
||||
|
@ -1943,7 +1976,7 @@ static void lineevent_free(struct lineevent_state *le)
|
|||
blocking_notifier_chain_unregister(&le->gdev->device_notifier,
|
||||
&le->device_unregistered_nb);
|
||||
if (le->irq)
|
||||
free_irq(le->irq, le);
|
||||
free_irq_label(free_irq(le->irq, le));
|
||||
if (le->desc)
|
||||
gpiod_free(le->desc);
|
||||
kfree(le->label);
|
||||
|
@ -2091,6 +2124,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
|
|||
int fd;
|
||||
int ret;
|
||||
int irq, irqflags = 0;
|
||||
char *label;
|
||||
|
||||
if (copy_from_user(&eventreq, ip, sizeof(eventreq)))
|
||||
return -EFAULT;
|
||||
|
@ -2175,15 +2209,23 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
|
|||
if (ret)
|
||||
goto out_free_le;
|
||||
|
||||
label = make_irq_label(le->label);
|
||||
if (IS_ERR(label)) {
|
||||
ret = PTR_ERR(label);
|
||||
goto out_free_le;
|
||||
}
|
||||
|
||||
/* Request a thread to read the events */
|
||||
ret = request_threaded_irq(irq,
|
||||
lineevent_irq_handler,
|
||||
lineevent_irq_thread,
|
||||
irqflags,
|
||||
le->label,
|
||||
label,
|
||||
le);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
free_irq_label(label);
|
||||
goto out_free_le;
|
||||
}
|
||||
|
||||
le->irq = irq;
|
||||
|
||||
|
|
|
@ -1369,6 +1369,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
|
|||
void amdgpu_driver_release_kms(struct drm_device *dev);
|
||||
|
||||
int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
|
||||
int amdgpu_device_prepare(struct drm_device *dev);
|
||||
int amdgpu_device_suspend(struct drm_device *dev, bool fbcon);
|
||||
int amdgpu_device_resume(struct drm_device *dev, bool fbcon);
|
||||
u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc);
|
||||
|
|
|
@ -1549,6 +1549,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
|
|||
} else {
|
||||
pr_info("switched off\n");
|
||||
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
||||
amdgpu_device_prepare(dev);
|
||||
amdgpu_device_suspend(dev, true);
|
||||
amdgpu_device_cache_pci_state(pdev);
|
||||
/* Shut down the device */
|
||||
|
@ -4094,6 +4095,43 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
|
|||
/*
|
||||
* Suspend & resume.
|
||||
*/
|
||||
/**
|
||||
* amdgpu_device_prepare - prepare for device suspend
|
||||
*
|
||||
* @dev: drm dev pointer
|
||||
*
|
||||
* Prepare to put the hw in the suspend state (all asics).
|
||||
* Returns 0 for success or an error on failure.
|
||||
* Called at driver suspend.
|
||||
*/
|
||||
int amdgpu_device_prepare(struct drm_device *dev)
|
||||
{
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
int i, r;
|
||||
|
||||
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
||||
return 0;
|
||||
|
||||
/* Evict the majority of BOs before starting suspend sequence */
|
||||
r = amdgpu_device_evict_resources(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
flush_delayed_work(&adev->gfx.gfx_off_delay_work);
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_blocks[i].status.valid)
|
||||
continue;
|
||||
if (!adev->ip_blocks[i].version->funcs->prepare_suspend)
|
||||
continue;
|
||||
r = adev->ip_blocks[i].version->funcs->prepare_suspend((void *)adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_device_suspend - initiate device suspend
|
||||
*
|
||||
|
@ -4114,11 +4152,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
|
|||
|
||||
adev->in_suspend = true;
|
||||
|
||||
/* Evict the majority of BOs before grabbing the full access */
|
||||
r = amdgpu_device_evict_resources(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
amdgpu_virt_fini_data_exchange(adev);
|
||||
r = amdgpu_virt_request_full_gpu(adev, false);
|
||||
|
|
|
@ -2386,8 +2386,9 @@ static int amdgpu_pmops_prepare(struct device *dev)
|
|||
/* Return a positive number here so
|
||||
* DPM_FLAG_SMART_SUSPEND works properly
|
||||
*/
|
||||
if (amdgpu_device_supports_boco(drm_dev))
|
||||
return pm_runtime_suspended(dev);
|
||||
if (amdgpu_device_supports_boco(drm_dev) &&
|
||||
pm_runtime_suspended(dev))
|
||||
return 1;
|
||||
|
||||
/* if we will not support s3 or s2i for the device
|
||||
* then skip suspend
|
||||
|
@ -2396,7 +2397,7 @@ static int amdgpu_pmops_prepare(struct device *dev)
|
|||
!amdgpu_acpi_is_s3_active(adev))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
return amdgpu_device_prepare(drm_dev);
|
||||
}
|
||||
|
||||
static void amdgpu_pmops_complete(struct device *dev)
|
||||
|
@ -2598,6 +2599,9 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
|
|||
if (amdgpu_device_supports_boco(drm_dev))
|
||||
adev->mp1_state = PP_MP1_STATE_UNLOAD;
|
||||
|
||||
ret = amdgpu_device_prepare(drm_dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = amdgpu_device_suspend(drm_dev, false);
|
||||
if (ret) {
|
||||
adev->in_runpm = false;
|
||||
|
|
|
@ -1179,9 +1179,10 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
|
|||
dto_params.timing = &pipe_ctx->stream->timing;
|
||||
dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
|
||||
if (dccg) {
|
||||
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
|
||||
dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
|
||||
dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst);
|
||||
if (dccg && dccg->funcs->set_dtbclk_dto)
|
||||
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
|
||||
}
|
||||
} else if (dccg && dccg->funcs->disable_symclk_se) {
|
||||
dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst,
|
||||
|
|
|
@ -2728,18 +2728,17 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
|
|||
}
|
||||
|
||||
if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
|
||||
dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
|
||||
dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst);
|
||||
|
||||
phyd32clk = get_phyd32clk_src(link);
|
||||
dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
|
||||
|
||||
dto_params.otg_inst = tg->inst;
|
||||
dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
|
||||
dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx);
|
||||
dto_params.timing = &pipe_ctx->stream->timing;
|
||||
dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
|
||||
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
|
||||
dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
|
||||
dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst);
|
||||
|
||||
phyd32clk = get_phyd32clk_src(link);
|
||||
dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
|
||||
} else {
|
||||
}
|
||||
if (hws->funcs.calculate_dccg_k1_k2_values && dc->res_pool->dccg->funcs->set_pixel_rate_div) {
|
||||
|
|
|
@ -295,6 +295,7 @@ struct amd_ip_funcs {
|
|||
int (*hw_init)(void *handle);
|
||||
int (*hw_fini)(void *handle);
|
||||
void (*late_fini)(void *handle);
|
||||
int (*prepare_suspend)(void *handle);
|
||||
int (*suspend)(void *handle);
|
||||
int (*resume)(void *handle);
|
||||
bool (*is_idle)(void *handle);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue