arm64: entry: prepare ret_to_user for function call

In a subsequent patch ret_to_user will need to make a C function call
(in some configurations) which may clobber x0-x18 at the start of the
finish_ret_to_user block, before enable_step_tsk consumes the flags
loaded into x1.

In preparation for this, let's load the flags into x19, which is
preserved across C function calls. This avoids a redundant reload of the
flags and ensures we operate on a consistent shapshot regardless.

There should be no functional change as a result of this patch. At this
point of the entry/exit paths we only need to preserve x28 (tsk) and the
sp, and x19 is free for this use.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20201130115950.22492-6-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
Mark Rutland 2020-11-30 11:59:44 +00:00 committed by Will Deacon
parent 2f911d494f
commit 3cb5ed4d76

View file

@ -774,13 +774,13 @@ SYM_CODE_END(el0_error)
SYM_CODE_START_LOCAL(ret_to_user)
disable_daif
gic_prio_kentry_setup tmp=x3
ldr x1, [tsk, #TSK_TI_FLAGS]
and x2, x1, #_TIF_WORK_MASK
ldr x19, [tsk, #TSK_TI_FLAGS]
and x2, x19, #_TIF_WORK_MASK
cbnz x2, work_pending
finish_ret_to_user:
/* Ignore asynchronous tag check faults in the uaccess routines */
clear_mte_async_tcf
enable_step_tsk x1, x2
enable_step_tsk x19, x2
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
bl stackleak_erase
#endif
@ -791,11 +791,12 @@ finish_ret_to_user:
*/
work_pending:
mov x0, sp // 'regs'
mov x1, x19
bl do_notify_resume
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_on // enabled while in userspace
#endif
ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for single-step
ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step
b finish_ret_to_user
SYM_CODE_END(ret_to_user)