mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-31 16:38:12 +00:00
arm64: use XPACLRI to strip PAC
Currently we strip the PAC from pointers using C code, which requires generating bitmasks, and conditionally clearing/setting bits depending on bit 55. We can do better by using XPACLRI directly. When the logic was originally written to strip PACs from user pointers, contemporary toolchains used for the kernel had assemblers which were unaware of the PAC instructions. As stripping the PAC from userspace pointers required unconditional clearing of a fixed set of bits (which could be performed with a single instruction), it was simpler to implement the masking in C than it was to make use of XPACI or XPACLRI. When support for in-kernel pointer authentication was added, the stripping logic was extended to cover TTBR1 pointers, requiring several instructions to handle whether to clear/set bits dependent on bit 55 of the pointer. This patch simplifies the stripping of PACs by using XPACLRI directly, as contemporary toolchains do within __builtin_return_address(). This saves a number of instructions, especially where __builtin_return_address() does not implicitly strip the PAC but is heavily used (e.g. with tracepoints). As the kernel might be compiled with an assembler without knowledge of XPACLRI, it is assembled using the 'HINT #7' alias, which results in an identical opcode. At the same time, I've split ptrauth_strip_insn_pac() into ptrauth_strip_user_insn_pac() and ptrauth_strip_kernel_insn_pac() helpers so that we can avoid unnecessary PAC stripping when pointer authentication is not in use in userspace or kernel respectively. The underlying xpaclri() macro uses inline assembly which clobbers x30. The clobber causes the compiler to save/restore the original x30 value in a frame record (protected with PACIASP and AUTIASP when in-kernel authentication is enabled), so this does not provide a gadget to alter the return address. Similarly this does not adversely affect unwinding due to the presence of the frame record. The ptrauth_user_pac_mask() and ptrauth_kernel_pac_mask() are exported from the kernel in ptrace and core dumps, so these are retained. A subsequent patch will move them out of <asm/compiler.h>. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Amit Daniel Kachhap <amit.kachhap@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Kristina Martsenko <kristina.martsenko@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20230412160134.306148-3-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
parent
9df3f5082f
commit
ca708599ca
5 changed files with 28 additions and 16 deletions
|
@ -15,15 +15,33 @@
|
|||
#define ptrauth_user_pac_mask() GENMASK_ULL(54, vabits_actual)
|
||||
#define ptrauth_kernel_pac_mask() GENMASK_ULL(63, vabits_actual)
|
||||
|
||||
/* Valid for EL0 TTBR0 and EL1 TTBR1 instruction pointers */
|
||||
#define ptrauth_clear_pac(ptr) \
|
||||
((ptr & BIT_ULL(55)) ? (ptr | ptrauth_kernel_pac_mask()) : \
|
||||
(ptr & ~ptrauth_user_pac_mask()))
|
||||
#define xpaclri(ptr) \
|
||||
({ \
|
||||
register unsigned long __xpaclri_ptr asm("x30") = (ptr); \
|
||||
\
|
||||
asm( \
|
||||
ARM64_ASM_PREAMBLE \
|
||||
" hint #7\n" \
|
||||
: "+r" (__xpaclri_ptr)); \
|
||||
\
|
||||
__xpaclri_ptr; \
|
||||
})
|
||||
|
||||
#if defined(CONFIG_ARM64_PTR_AUTH_KERNEL) && \
|
||||
!defined(CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC)
|
||||
#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
|
||||
#define ptrauth_strip_kernel_insn_pac(ptr) xpaclri(ptr)
|
||||
#else
|
||||
#define ptrauth_strip_kernel_insn_pac(ptr) (ptr)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARM64_PTR_AUTH
|
||||
#define ptrauth_strip_user_insn_pac(ptr) xpaclri(ptr)
|
||||
#else
|
||||
#define ptrauth_strip_user_insn_pac(ptr) (ptr)
|
||||
#endif
|
||||
|
||||
#if !defined(CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC)
|
||||
#define __builtin_return_address(val) \
|
||||
(void *)(ptrauth_clear_pac((unsigned long)__builtin_return_address(val)))
|
||||
(void *)(ptrauth_strip_kernel_insn_pac((unsigned long)__builtin_return_address(val)))
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_COMPILER_H */
|
||||
|
|
|
@ -97,11 +97,6 @@ extern int ptrauth_set_enabled_keys(struct task_struct *tsk, unsigned long keys,
|
|||
unsigned long enabled);
|
||||
extern int ptrauth_get_enabled_keys(struct task_struct *tsk);
|
||||
|
||||
static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr)
|
||||
{
|
||||
return ptrauth_clear_pac(ptr);
|
||||
}
|
||||
|
||||
static __always_inline void ptrauth_enable(void)
|
||||
{
|
||||
if (!system_supports_address_auth())
|
||||
|
@ -133,7 +128,6 @@ static __always_inline void ptrauth_enable(void)
|
|||
#define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL)
|
||||
#define ptrauth_set_enabled_keys(tsk, keys, enabled) (-EINVAL)
|
||||
#define ptrauth_get_enabled_keys(tsk) (-EINVAL)
|
||||
#define ptrauth_strip_insn_pac(lr) (lr)
|
||||
#define ptrauth_suspend_exit()
|
||||
#define ptrauth_thread_init_user()
|
||||
#define ptrauth_thread_switch_user(tsk)
|
||||
|
|
|
@ -38,7 +38,7 @@ user_backtrace(struct frame_tail __user *tail,
|
|||
if (err)
|
||||
return NULL;
|
||||
|
||||
lr = ptrauth_strip_insn_pac(buftail.lr);
|
||||
lr = ptrauth_strip_user_insn_pac(buftail.lr);
|
||||
|
||||
perf_callchain_store(entry, lr);
|
||||
|
||||
|
|
|
@ -217,7 +217,7 @@ void __show_regs(struct pt_regs *regs)
|
|||
|
||||
if (!user_mode(regs)) {
|
||||
printk("pc : %pS\n", (void *)regs->pc);
|
||||
printk("lr : %pS\n", (void *)ptrauth_strip_insn_pac(lr));
|
||||
printk("lr : %pS\n", (void *)ptrauth_strip_kernel_insn_pac(lr));
|
||||
} else {
|
||||
printk("pc : %016llx\n", regs->pc);
|
||||
printk("lr : %016llx\n", lr);
|
||||
|
|
|
@ -120,7 +120,7 @@ unwind_next(struct unwind_state *state)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
state->pc = ptrauth_strip_insn_pac(state->pc);
|
||||
state->pc = ptrauth_strip_kernel_insn_pac(state->pc);
|
||||
|
||||
return unwind_recover_return_address(state);
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue