linux-stable/arch/arm64/include/asm/ftrace.h
Torsten Duwe 3b23e4991f arm64: implement ftrace with regs
This patch implements FTRACE_WITH_REGS for arm64, which allows a traced
function's arguments (and some other registers) to be captured into a
struct pt_regs, allowing these to be inspected and/or modified. This is
a building block for live-patching, where a function's arguments may be
forwarded to another function. This is also necessary to enable ftrace
and in-kernel pointer authentication at the same time, as it allows the
LR value to be captured and adjusted prior to signing.

Using GCC's -fpatchable-function-entry=N option, we can have the
compiler insert a configurable number of NOPs between the function entry
point and the usual prologue. This also ensures functions are AAPCS
compliant (e.g. disabling inter-procedural register allocation).

For example, with -fpatchable-function-entry=2, GCC 8.1.0 compiles the
following:

| unsigned long bar(void);
|
| unsigned long foo(void)
| {
|         return bar() + 1;
| }

... to:

| <foo>:
|         nop
|         nop
|         stp     x29, x30, [sp, #-16]!
|         mov     x29, sp
|         bl      0 <bar>
|         add     x0, x0, #0x1
|         ldp     x29, x30, [sp], #16
|         ret

This patch builds the kernel with -fpatchable-function-entry=2,
prefixing each function with two NOPs. To trace a function, we replace
these NOPs with a sequence that saves the LR into a GPR, then calls an
ftrace entry assembly function which saves this and other relevant
registers:

| mov	x9, x30
| bl	<ftrace-entry>

Since patchable functions are AAPCS compliant (and the kernel does not
use x18 as a platform register), x9-x18 can be safely clobbered in the
patched sequence and the ftrace entry code.

There are now two ftrace entry functions, ftrace_regs_entry (which saves
all GPRs), and ftrace_entry (which saves the bare minimum). A PLT is
allocated for each within modules.

Signed-off-by: Torsten Duwe <duwe@suse.de>
[Mark: rework asm, comments, PLTs, initialization, commit message]
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Torsten Duwe <duwe@suse.de>
Tested-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
Tested-by: Torsten Duwe <duwe@suse.de>
Cc: AKASHI Takahiro <takahiro.akashi@linaro.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Julien Thierry <jthierry@redhat.com>
Cc: Will Deacon <will@kernel.org>
2019-11-06 14:17:35 +00:00

108 lines
2.9 KiB
C

/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm64/include/asm/ftrace.h
*
* Copyright (C) 2013 Linaro Limited
* Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
*/
#ifndef __ASM_FTRACE_H
#define __ASM_FTRACE_H
#include <asm/insn.h>
#define HAVE_FUNCTION_GRAPH_FP_TEST
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
#define ARCH_SUPPORTS_FTRACE_OPS 1
#else
#define MCOUNT_ADDR ((unsigned long)_mcount)
#endif
/* The BL at the callsite's adjusted rec->ip */
#define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE
#define FTRACE_PLT_IDX 0
#define FTRACE_REGS_PLT_IDX 1
#define NR_FTRACE_PLTS 2
/*
* Currently, gcc tends to save the link register after the local variables
* on the stack. This causes the max stack tracer to report the function
* frame sizes for the wrong functions. By defining
* ARCH_FTRACE_SHIFT_STACK_TRACER, it will tell the stack tracer to expect
* to find the return address on the stack after the local variables have
* been set up.
*
* Note, this may change in the future, and we will need to deal with that
* if it were to happen.
*/
#define ARCH_FTRACE_SHIFT_STACK_TRACER 1
#ifndef __ASSEMBLY__
#include <linux/compat.h>
extern void _mcount(unsigned long);
extern void *return_address(unsigned int);
struct dyn_arch_ftrace {
/* No extra data needed for arm64 */
};
extern unsigned long ftrace_graph_call;
extern void return_to_handler(void);
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
/*
* Adjust addr to point at the BL in the callsite.
* See ftrace_init_nop() for the callsite sequence.
*/
if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
return addr + AARCH64_INSN_SIZE;
/*
* addr is the address of the mcount call instruction.
* recordmcount does the necessary offset calculation.
*/
return addr;
}
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
struct dyn_ftrace;
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
#define ftrace_init_nop ftrace_init_nop
#endif
#define ftrace_return_address(n) return_address(n)
/*
* Because AArch32 mode does not share the same syscall table with AArch64,
* tracing compat syscalls may result in reporting bogus syscalls or even
* hang-up, so just do not trace them.
* See kernel/trace/trace_syscalls.c
*
* x86 code says:
* If the user really wants these, then they should use the
* raw syscall tracepoints with filtering.
*/
#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
{
return is_compat_task();
}
#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
static inline bool arch_syscall_match_sym_name(const char *sym,
const char *name)
{
/*
* Since all syscall functions have __arm64_ prefix, we must skip it.
* However, as we described above, we decided to ignore compat
* syscalls, so we don't care about __arm64_compat_ prefix here.
*/
return !strcmp(sym + 8, name);
}
#endif /* ifndef __ASSEMBLY__ */
#endif /* __ASM_FTRACE_H */