linux-stable/arch/arm64/kernel/syscall.c
Mark Rutland f130ac0ae4 arm64: syscall: unmask DAIF earlier for SVCs
For a number of historical reasons, when handling SVCs we don't unmask
DAIF in el0_svc() or el0_svc_compat(), and instead do so later in
el0_svc_common(). This is unfortunate and makes it harder to make
changes to the DAIF management in entry-common.c as we'd like to do as
cleanup and preparation for FEAT_NMI support. We can move the DAIF
unmasking to entry-common.c as long as we also hoist the
fp_user_discard() logic, as reasoned below.

We converted the syscall trace logic from assembly to C in commit:

  f37099b699 ("arm64: convert syscall trace logic to C")

... which was intended to have no functional change, and mirrored the
existing assembly logic to avoid the risk of any functional regression.

With the logic in C, it's clear that there is currently no reason to
unmask DAIF so late within el0_svc_common():

* The thread flags are read prior to unmasking DAIF, but are not
  consumed until after DAIF is unmasked, and we don't perform a
  read-modify-write sequence of the thread flags for which we might need
  to serialize against an IPI modifying the flags. Similarly, for any
  thread flags set by other threads, whether DAIF is masked or not has
  no impact.

  The read_thread_flags() helpers performs a single-copy-atomic read of
  the flags, and so this can safely be moved after unmasking DAIF.

* The pt_regs::orig_x0 and pt_regs::syscallno fields are neither
  consumed nor modified by the handler for any DAIF exception (e.g.
  these do not exist in the `perf_event_arm_regs` enum and are not
  sampled by perf in its IRQ handler).

  Thus, the manipulation of pt_regs::orig_x0 and pt_regs::syscallno can
  safely be moved after unmasking DAIF.

Given the above, we can safely hoist unmasking of DAIF out of
el0_svc_common(), and into its immediate callers: do_el0_svc() and
do_el0_svc_compat(). Further:

* In do_el0_svc(), we sample the syscall number from
  pt_regs::regs[8]. This is not modified by the handler for any DAIF
  exception, and thus can safely be moved after unmasking DAIF.

  As fp_user_discard() operates on the live FP/SVE/SME register state,
  this needs to occur before we clear DAIF.IF, as interrupts could
  result in preemption which would cause this state to become foreign.
  As fp_user_discard() is the first function called within do_el0_svc(),
  it has no dependency on other parts of do_el0_svc() and can be moved
  earlier so long as it is called prior to unmasking DAIF.IF.

* In do_el0_svc_compat(), we sample the syscall number from
  pt_regs::regs[7]. This is not modified by the handler for any DAIF
  exception, and thus can safely be moved after unmasking DAIF.

  Compat threads cannot use SVE or SME, so there's no need for
  el0_svc_compat() to call fp_user_discard().

Given the above, we can safely hoist the unmasking of DAIF out of
do_el0_svc() and do_el0_svc_compat(), and into their immediate callers:
el0_svc() and el0_svc_compat(), so long a we also hoist
fp_user_discard() into el0_svc().

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Mark Brown <broonie@kernel.org>
Cc: Will Deacon <will@kernel.org>
Reviewed-by: Mark Brown <broonie@kernel.org>
Link: https://lore.kernel.org/r/20230808101148.1064172-1-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
2023-08-11 12:23:48 +01:00

164 lines
4.7 KiB
C

// SPDX-License-Identifier: GPL-2.0
#include <linux/compiler.h>
#include <linux/context_tracking.h>
#include <linux/errno.h>
#include <linux/nospec.h>
#include <linux/ptrace.h>
#include <linux/randomize_kstack.h>
#include <linux/syscalls.h>
#include <asm/debug-monitors.h>
#include <asm/exception.h>
#include <asm/fpsimd.h>
#include <asm/syscall.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
long compat_arm_syscall(struct pt_regs *regs, int scno);
long sys_ni_syscall(void);
static long do_ni_syscall(struct pt_regs *regs, int scno)
{
#ifdef CONFIG_COMPAT
long ret;
if (is_compat_task()) {
ret = compat_arm_syscall(regs, scno);
if (ret != -ENOSYS)
return ret;
}
#endif
return sys_ni_syscall();
}
static long __invoke_syscall(struct pt_regs *regs, syscall_fn_t syscall_fn)
{
return syscall_fn(regs);
}
static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
unsigned int sc_nr,
const syscall_fn_t syscall_table[])
{
long ret;
add_random_kstack_offset();
if (scno < sc_nr) {
syscall_fn_t syscall_fn;
syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)];
ret = __invoke_syscall(regs, syscall_fn);
} else {
ret = do_ni_syscall(regs, scno);
}
syscall_set_return_value(current, regs, 0, ret);
/*
* Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
* but not enough for arm64 stack utilization comfort. To keep
* reasonable stack head room, reduce the maximum offset to 9 bits.
*
* The actual entropy will be further reduced by the compiler when
* applying stack alignment constraints: the AAPCS mandates a
* 16-byte (i.e. 4-bit) aligned SP at function boundaries.
*
* The resulting 5 bits of entropy is seen in SP[8:4].
*/
choose_random_kstack_offset(get_random_u16() & 0x1FF);
}
static inline bool has_syscall_work(unsigned long flags)
{
return unlikely(flags & _TIF_SYSCALL_WORK);
}
static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
const syscall_fn_t syscall_table[])
{
unsigned long flags = read_thread_flags();
regs->orig_x0 = regs->regs[0];
regs->syscallno = scno;
/*
* BTI note:
* The architecture does not guarantee that SPSR.BTYPE is zero
* on taking an SVC, so we could return to userspace with a
* non-zero BTYPE after the syscall.
*
* This shouldn't matter except when userspace is explicitly
* doing something stupid, such as setting PROT_BTI on a page
* that lacks conforming BTI/PACIxSP instructions, falling
* through from one executable page to another with differing
* PROT_BTI, or messing with BTYPE via ptrace: in such cases,
* userspace should not be surprised if a SIGILL occurs on
* syscall return.
*
* So, don't touch regs->pstate & PSR_BTYPE_MASK here.
* (Similarly for HVC and SMC elsewhere.)
*/
if (flags & _TIF_MTE_ASYNC_FAULT) {
/*
* Process the asynchronous tag check fault before the actual
* syscall. do_notify_resume() will send a signal to userspace
* before the syscall is restarted.
*/
syscall_set_return_value(current, regs, -ERESTARTNOINTR, 0);
return;
}
if (has_syscall_work(flags)) {
/*
* The de-facto standard way to skip a system call using ptrace
* is to set the system call to -1 (NO_SYSCALL) and set x0 to a
* suitable error code for consumption by userspace. However,
* this cannot be distinguished from a user-issued syscall(-1)
* and so we must set x0 to -ENOSYS here in case the tracer doesn't
* issue the skip and we fall into trace_exit with x0 preserved.
*
* This is slightly odd because it also means that if a tracer
* sets the system call number to -1 but does not initialise x0,
* then x0 will be preserved for all system calls apart from a
* user-issued syscall(-1). However, requesting a skip and not
* setting the return value is unlikely to do anything sensible
* anyway.
*/
if (scno == NO_SYSCALL)
syscall_set_return_value(current, regs, -ENOSYS, 0);
scno = syscall_trace_enter(regs);
if (scno == NO_SYSCALL)
goto trace_exit;
}
invoke_syscall(regs, scno, sc_nr, syscall_table);
/*
* The tracing status may have changed under our feet, so we have to
* check again. However, if we were tracing entry, then we always trace
* exit regardless, as the old entry assembly did.
*/
if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) {
flags = read_thread_flags();
if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP))
return;
}
trace_exit:
syscall_trace_exit(regs);
}
void do_el0_svc(struct pt_regs *regs)
{
el0_svc_common(regs, regs->regs[8], __NR_syscalls, sys_call_table);
}
#ifdef CONFIG_COMPAT
void do_el0_svc_compat(struct pt_regs *regs)
{
el0_svc_common(regs, regs->regs[7], __NR_compat_syscalls,
compat_sys_call_table);
}
#endif