mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-11-01 00:48:50 +00:00
b5f06f64e2
The goal of this is to allow tasks that want to protect sensitive information, against e.g. the recently found snoop assisted data sampling vulnerabilites, to flush their L1D on being switched out. This protects their data from being snooped or leaked via side channels after the task has context switched out. This could also be used to wipe L1D when an untrusted task is switched in, but that's not a really well defined scenario while the opt-in variant is clearly defined. The mechanism is default disabled and can be enabled on the kernel command line. Prepare for the actual prctl based opt-in: 1) Provide the necessary setup functionality similar to the other mitigations and enable the static branch when the command line option is set and the CPU provides support for hardware assisted L1D flushing. Software based L1D flush is not supported because it's CPU model specific and not really well defined. This does not come with a sysfs file like the other mitigations because it is not bound to any specific vulnerability. Support has to be queried via the prctl(2) interface. 2) Add TIF_SPEC_L1D_FLUSH next to L1D_SPEC_IB so the two bits can be mangled into the mm pointer in one go which allows to reuse the existing mechanism in switch_mm() for the conditional IBPB speculation barrier efficiently. 3) Add the L1D flush specific functionality which flushes L1D when the outgoing task opted in. Also check whether the incoming task has requested L1D flush and if so validate that it is not accidentaly running on an SMT sibling as this makes the whole excercise moot because SMT siblings share L1D which opens tons of other attack vectors. If that happens schedule task work which signals the incoming task on return to user/guest with SIGBUS as this is part of the paranoid L1D flush contract. Suggested-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Balbir Singh <sblbir@amazon.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/r/20210108121056.21940-1-sblbir@amazon.com
365 lines
9.8 KiB
C
365 lines
9.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef _ASM_X86_NOSPEC_BRANCH_H_
|
|
#define _ASM_X86_NOSPEC_BRANCH_H_
|
|
|
|
#include <linux/static_key.h>
|
|
#include <linux/objtool.h>
|
|
|
|
#include <asm/alternative.h>
|
|
#include <asm/cpufeatures.h>
|
|
#include <asm/msr-index.h>
|
|
#include <asm/unwind_hints.h>
|
|
|
|
/*
|
|
* Fill the CPU return stack buffer.
|
|
*
|
|
* Each entry in the RSB, if used for a speculative 'ret', contains an
|
|
* infinite 'pause; lfence; jmp' loop to capture speculative execution.
|
|
*
|
|
* This is required in various cases for retpoline and IBRS-based
|
|
* mitigations for the Spectre variant 2 vulnerability. Sometimes to
|
|
* eliminate potentially bogus entries from the RSB, and sometimes
|
|
* purely to ensure that it doesn't get empty, which on some CPUs would
|
|
* allow predictions from other (unwanted!) sources to be used.
|
|
*
|
|
* We define a CPP macro such that it can be used from both .S files and
|
|
* inline assembly. It's possible to do a .macro and then include that
|
|
* from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
|
|
*/
|
|
|
|
#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
|
|
|
|
/*
|
|
* Google experimented with loop-unrolling and this turned out to be
|
|
* the optimal version - two calls, each with their own speculation
|
|
* trap should their return address end up getting used, in a loop.
|
|
*/
|
|
#define __FILL_RETURN_BUFFER(reg, nr, sp) \
|
|
mov $(nr/2), reg; \
|
|
771: \
|
|
ANNOTATE_INTRA_FUNCTION_CALL; \
|
|
call 772f; \
|
|
773: /* speculation trap */ \
|
|
UNWIND_HINT_EMPTY; \
|
|
pause; \
|
|
lfence; \
|
|
jmp 773b; \
|
|
772: \
|
|
ANNOTATE_INTRA_FUNCTION_CALL; \
|
|
call 774f; \
|
|
775: /* speculation trap */ \
|
|
UNWIND_HINT_EMPTY; \
|
|
pause; \
|
|
lfence; \
|
|
jmp 775b; \
|
|
774: \
|
|
add $(BITS_PER_LONG/8) * 2, sp; \
|
|
dec reg; \
|
|
jnz 771b;
|
|
|
|
#ifdef __ASSEMBLY__
|
|
|
|
/*
|
|
* This should be used immediately before an indirect jump/call. It tells
|
|
* objtool the subsequent indirect jump/call is vouched safe for retpoline
|
|
* builds.
|
|
*/
|
|
.macro ANNOTATE_RETPOLINE_SAFE
|
|
.Lannotate_\@:
|
|
.pushsection .discard.retpoline_safe
|
|
_ASM_PTR .Lannotate_\@
|
|
.popsection
|
|
.endm
|
|
|
|
/*
|
|
* JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
|
|
* indirect jmp/call which may be susceptible to the Spectre variant 2
|
|
* attack.
|
|
*/
|
|
.macro JMP_NOSPEC reg:req
|
|
#ifdef CONFIG_RETPOLINE
|
|
ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \
|
|
__stringify(jmp __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \
|
|
__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), X86_FEATURE_RETPOLINE_AMD
|
|
#else
|
|
jmp *%\reg
|
|
#endif
|
|
.endm
|
|
|
|
.macro CALL_NOSPEC reg:req
|
|
#ifdef CONFIG_RETPOLINE
|
|
ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *%\reg), \
|
|
__stringify(call __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \
|
|
__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *%\reg), X86_FEATURE_RETPOLINE_AMD
|
|
#else
|
|
call *%\reg
|
|
#endif
|
|
.endm
|
|
|
|
/*
|
|
* A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
|
|
* monstrosity above, manually.
|
|
*/
|
|
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
|
|
#ifdef CONFIG_RETPOLINE
|
|
ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr
|
|
__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)
|
|
.Lskip_rsb_\@:
|
|
#endif
|
|
.endm
|
|
|
|
#else /* __ASSEMBLY__ */
|
|
|
|
#define ANNOTATE_RETPOLINE_SAFE \
|
|
"999:\n\t" \
|
|
".pushsection .discard.retpoline_safe\n\t" \
|
|
_ASM_PTR " 999b\n\t" \
|
|
".popsection\n\t"
|
|
|
|
#ifdef CONFIG_RETPOLINE
|
|
#ifdef CONFIG_X86_64
|
|
|
|
/*
|
|
* Inline asm uses the %V modifier which is only in newer GCC
|
|
* which is ensured when CONFIG_RETPOLINE is defined.
|
|
*/
|
|
# define CALL_NOSPEC \
|
|
ALTERNATIVE_2( \
|
|
ANNOTATE_RETPOLINE_SAFE \
|
|
"call *%[thunk_target]\n", \
|
|
"call __x86_indirect_thunk_%V[thunk_target]\n", \
|
|
X86_FEATURE_RETPOLINE, \
|
|
"lfence;\n" \
|
|
ANNOTATE_RETPOLINE_SAFE \
|
|
"call *%[thunk_target]\n", \
|
|
X86_FEATURE_RETPOLINE_AMD)
|
|
|
|
# define THUNK_TARGET(addr) [thunk_target] "r" (addr)
|
|
|
|
#else /* CONFIG_X86_32 */
|
|
/*
|
|
* For i386 we use the original ret-equivalent retpoline, because
|
|
* otherwise we'll run out of registers. We don't care about CET
|
|
* here, anyway.
|
|
*/
|
|
# define CALL_NOSPEC \
|
|
ALTERNATIVE_2( \
|
|
ANNOTATE_RETPOLINE_SAFE \
|
|
"call *%[thunk_target]\n", \
|
|
" jmp 904f;\n" \
|
|
" .align 16\n" \
|
|
"901: call 903f;\n" \
|
|
"902: pause;\n" \
|
|
" lfence;\n" \
|
|
" jmp 902b;\n" \
|
|
" .align 16\n" \
|
|
"903: lea 4(%%esp), %%esp;\n" \
|
|
" pushl %[thunk_target];\n" \
|
|
" ret;\n" \
|
|
" .align 16\n" \
|
|
"904: call 901b;\n", \
|
|
X86_FEATURE_RETPOLINE, \
|
|
"lfence;\n" \
|
|
ANNOTATE_RETPOLINE_SAFE \
|
|
"call *%[thunk_target]\n", \
|
|
X86_FEATURE_RETPOLINE_AMD)
|
|
|
|
# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
|
|
#endif
|
|
#else /* No retpoline for C / inline asm */
|
|
# define CALL_NOSPEC "call *%[thunk_target]\n"
|
|
# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
|
|
#endif
|
|
|
|
/* The Spectre V2 mitigation variants */
|
|
enum spectre_v2_mitigation {
|
|
SPECTRE_V2_NONE,
|
|
SPECTRE_V2_RETPOLINE_GENERIC,
|
|
SPECTRE_V2_RETPOLINE_AMD,
|
|
SPECTRE_V2_IBRS_ENHANCED,
|
|
};
|
|
|
|
/* The indirect branch speculation control variants */
|
|
enum spectre_v2_user_mitigation {
|
|
SPECTRE_V2_USER_NONE,
|
|
SPECTRE_V2_USER_STRICT,
|
|
SPECTRE_V2_USER_STRICT_PREFERRED,
|
|
SPECTRE_V2_USER_PRCTL,
|
|
SPECTRE_V2_USER_SECCOMP,
|
|
};
|
|
|
|
/* The Speculative Store Bypass disable variants */
|
|
enum ssb_mitigation {
|
|
SPEC_STORE_BYPASS_NONE,
|
|
SPEC_STORE_BYPASS_DISABLE,
|
|
SPEC_STORE_BYPASS_PRCTL,
|
|
SPEC_STORE_BYPASS_SECCOMP,
|
|
};
|
|
|
|
extern char __indirect_thunk_start[];
|
|
extern char __indirect_thunk_end[];
|
|
|
|
static __always_inline
|
|
void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
|
|
{
|
|
asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
|
|
: : "c" (msr),
|
|
"a" ((u32)val),
|
|
"d" ((u32)(val >> 32)),
|
|
[feature] "i" (feature)
|
|
: "memory");
|
|
}
|
|
|
|
static inline void indirect_branch_prediction_barrier(void)
|
|
{
|
|
u64 val = PRED_CMD_IBPB;
|
|
|
|
alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
|
|
}
|
|
|
|
/* The Intel SPEC CTRL MSR base value cache */
|
|
extern u64 x86_spec_ctrl_base;
|
|
|
|
/*
|
|
* With retpoline, we must use IBRS to restrict branch prediction
|
|
* before calling into firmware.
|
|
*
|
|
* (Implemented as CPP macros due to header hell.)
|
|
*/
|
|
#define firmware_restrict_branch_speculation_start() \
|
|
do { \
|
|
u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS; \
|
|
\
|
|
preempt_disable(); \
|
|
alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
|
|
X86_FEATURE_USE_IBRS_FW); \
|
|
} while (0)
|
|
|
|
#define firmware_restrict_branch_speculation_end() \
|
|
do { \
|
|
u64 val = x86_spec_ctrl_base; \
|
|
\
|
|
alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
|
|
X86_FEATURE_USE_IBRS_FW); \
|
|
preempt_enable(); \
|
|
} while (0)
|
|
|
|
DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
|
|
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
|
|
DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
|
|
|
|
DECLARE_STATIC_KEY_FALSE(mds_user_clear);
|
|
DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
|
|
|
|
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
|
|
|
|
#include <asm/segment.h>
|
|
|
|
/**
|
|
* mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
|
|
*
|
|
* This uses the otherwise unused and obsolete VERW instruction in
|
|
* combination with microcode which triggers a CPU buffer flush when the
|
|
* instruction is executed.
|
|
*/
|
|
static __always_inline void mds_clear_cpu_buffers(void)
|
|
{
|
|
static const u16 ds = __KERNEL_DS;
|
|
|
|
/*
|
|
* Has to be the memory-operand variant because only that
|
|
* guarantees the CPU buffer flush functionality according to
|
|
* documentation. The register-operand variant does not.
|
|
* Works with any segment selector, but a valid writable
|
|
* data segment is the fastest variant.
|
|
*
|
|
* "cc" clobber is required because VERW modifies ZF.
|
|
*/
|
|
asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
|
|
}
|
|
|
|
/**
|
|
* mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
|
|
*
|
|
* Clear CPU buffers if the corresponding static key is enabled
|
|
*/
|
|
static __always_inline void mds_user_clear_cpu_buffers(void)
|
|
{
|
|
if (static_branch_likely(&mds_user_clear))
|
|
mds_clear_cpu_buffers();
|
|
}
|
|
|
|
/**
|
|
* mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
|
|
*
|
|
* Clear CPU buffers if the corresponding static key is enabled
|
|
*/
|
|
static inline void mds_idle_clear_cpu_buffers(void)
|
|
{
|
|
if (static_branch_likely(&mds_idle_clear))
|
|
mds_clear_cpu_buffers();
|
|
}
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
/*
|
|
* Below is used in the eBPF JIT compiler and emits the byte sequence
|
|
* for the following assembly:
|
|
*
|
|
* With retpolines configured:
|
|
*
|
|
* callq do_rop
|
|
* spec_trap:
|
|
* pause
|
|
* lfence
|
|
* jmp spec_trap
|
|
* do_rop:
|
|
* mov %rcx,(%rsp) for x86_64
|
|
* mov %edx,(%esp) for x86_32
|
|
* retq
|
|
*
|
|
* Without retpolines configured:
|
|
*
|
|
* jmp *%rcx for x86_64
|
|
* jmp *%edx for x86_32
|
|
*/
|
|
#ifdef CONFIG_RETPOLINE
|
|
# ifdef CONFIG_X86_64
|
|
# define RETPOLINE_RCX_BPF_JIT_SIZE 17
|
|
# define RETPOLINE_RCX_BPF_JIT() \
|
|
do { \
|
|
EMIT1_off32(0xE8, 7); /* callq do_rop */ \
|
|
/* spec_trap: */ \
|
|
EMIT2(0xF3, 0x90); /* pause */ \
|
|
EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \
|
|
EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \
|
|
/* do_rop: */ \
|
|
EMIT4(0x48, 0x89, 0x0C, 0x24); /* mov %rcx,(%rsp) */ \
|
|
EMIT1(0xC3); /* retq */ \
|
|
} while (0)
|
|
# else /* !CONFIG_X86_64 */
|
|
# define RETPOLINE_EDX_BPF_JIT() \
|
|
do { \
|
|
EMIT1_off32(0xE8, 7); /* call do_rop */ \
|
|
/* spec_trap: */ \
|
|
EMIT2(0xF3, 0x90); /* pause */ \
|
|
EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \
|
|
EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \
|
|
/* do_rop: */ \
|
|
EMIT3(0x89, 0x14, 0x24); /* mov %edx,(%esp) */ \
|
|
EMIT1(0xC3); /* ret */ \
|
|
} while (0)
|
|
# endif
|
|
#else /* !CONFIG_RETPOLINE */
|
|
# ifdef CONFIG_X86_64
|
|
# define RETPOLINE_RCX_BPF_JIT_SIZE 2
|
|
# define RETPOLINE_RCX_BPF_JIT() \
|
|
EMIT2(0xFF, 0xE1); /* jmp *%rcx */
|
|
# else /* !CONFIG_X86_64 */
|
|
# define RETPOLINE_EDX_BPF_JIT() \
|
|
EMIT2(0xFF, 0xE2) /* jmp *%edx */
|
|
# endif
|
|
#endif
|
|
|
|
#endif /* _ASM_X86_NOSPEC_BRANCH_H_ */
|