This tree introduces static_call(), which is the idea of static_branch()

applied to indirect function calls. Remove a data load (indirection) by
 modifying the text.
 
 They give the flexibility of function pointers, but with better
 performance. (This is especially important for cases where
 retpolines would otherwise be used, as retpolines can be pretty
 slow.)
 
 API overview:
 
   DECLARE_STATIC_CALL(name, func);
   DEFINE_STATIC_CALL(name, func);
   DEFINE_STATIC_CALL_NULL(name, typename);
 
   static_call(name)(args...);
   static_call_cond(name)(args...);
   static_call_update(name, func);
 
 x86 is supported via text patching, otherwise basic indirect calls are used,
 with function pointers.
 
 There's a second variant using inline code patching, inspired by jump-labels,
 implemented on x86 as well.
 
 The new APIs are utilized in the x86 perf code, a heavy user of function pointers,
 where static calls speed up the PMU handler by 4.2% (!).
 
 The generic implementation is not really excercised on other architectures,
 outside of the trivial test_static_call_init() self-test.
 
 Signed-off-by: Ingo Molnar <mingo@kernel.org>
 -----BEGIN PGP SIGNATURE-----
 
 iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAl+EfAQRHG1pbmdvQGtl
 cm5lbC5vcmcACgkQEnMQ0APhK1iEAw//divHeVCJnHhV+YBbuI9ROUsERkzu8VhK
 O1DEmW68Fvj7pszT8NZsMjtkt97ZtxDRK7aCJiiup0eItG9qCJ8lpCLb84ZbizHV
 HhCbhBLrpxSvTrWlQnkgP1OkPAbtoryIjVlZzWhjye2MY8UEbVnZWyviBolbAAxH
 Fk1Yi56fIMu19GO+9Ohzy9E2VDnVEH1iMx5YWoLD2H88Qbq/yEMP+U2tIj8hIVKT
 Y/jdogihNXRIau6QB+YPfDPisdty+RHxfU7zct4Rv8cFF5ylglZB5fD34C3sUQF2
 WqsaYz7zjUj9f02F8pw8hIaAT7InzArPhlNVITxf2oMfmdrNqBptnSCddZqCJLvv
 oDGew21k50Zcbqkv9amclpxXH5tTpRvJeqit2pz/85GMeqBRuhzHUAkCpht5YA73
 qJsHWS3z+qIxKi0tDbhDJswuwa51q5sgdUUwo1uCr3wT3DGDlqNhCAZBzX14dcty
 0shDSbv13TCwqAcb7asPzEoPwE15cwa+x+viGEIL901pyZKyQYjs/abDU26It3BW
 roWRkuVJZ9/QMdZJs1v7kaXw1L8YiKIDkBgke+xbfrDwEvvjudQkl2LUL66DB11j
 RJU3GyxKClvdY06SSRh/H13fqZLNKh1JZ0nPEWSTJECDFN9zcDjrDrod/7PFOcpY
 NAlawLoGG+s=
 =JvpF
 -----END PGP SIGNATURE-----

Merge tag 'core-static_call-2020-10-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull static call support from Ingo Molnar:
 "This introduces static_call(), which is the idea of static_branch()
  applied to indirect function calls. Remove a data load (indirection)
  by modifying the text.

  They give the flexibility of function pointers, but with better
  performance. (This is especially important for cases where retpolines
  would otherwise be used, as retpolines can be pretty slow.)

  API overview:

      DECLARE_STATIC_CALL(name, func);
      DEFINE_STATIC_CALL(name, func);
      DEFINE_STATIC_CALL_NULL(name, typename);

      static_call(name)(args...);
      static_call_cond(name)(args...);
      static_call_update(name, func);

  x86 is supported via text patching, otherwise basic indirect calls are
  used, with function pointers.

  There's a second variant using inline code patching, inspired by
  jump-labels, implemented on x86 as well.

  The new APIs are utilized in the x86 perf code, a heavy user of
  function pointers, where static calls speed up the PMU handler by
  4.2% (!).

  The generic implementation is not really excercised on other
  architectures, outside of the trivial test_static_call_init()
  self-test"

* tag 'core-static_call-2020-10-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (21 commits)
  static_call: Fix return type of static_call_init
  tracepoint: Fix out of sync data passing by static caller
  tracepoint: Fix overly long tracepoint names
  x86/perf, static_call: Optimize x86_pmu methods
  tracepoint: Optimize using static_call()
  static_call: Allow early init
  static_call: Add some validation
  static_call: Handle tail-calls
  static_call: Add static_call_cond()
  x86/alternatives: Teach text_poke_bp() to emulate RET
  static_call: Add simple self-test for static calls
  x86/static_call: Add inline static call implementation for x86-64
  x86/static_call: Add out-of-line static call implementation
  static_call: Avoid kprobes on inline static_call()s
  static_call: Add inline static call infrastructure
  static_call: Add basic static call infrastructure
  compiler.h: Make __ADDRESSABLE() symbol truly unique
  jump_label,module: Fix module lifetime for __jump_label_mod_text_reserved()
  module: Properly propagate MODULE_STATE_COMING failure
  module: Fix up module_notifier return values
  ...
This commit is contained in:
Linus Torvalds 2020-10-12 13:58:15 -07:00
commit dd502a8107
47 changed files with 1585 additions and 241 deletions

View File

@ -106,6 +106,12 @@ config STATIC_KEYS_SELFTEST
help
Boot time self-test of the branch patching code.
config STATIC_CALL_SELFTEST
bool "Static call selftest"
depends on HAVE_STATIC_CALL
help
Boot time self-test of the call patching code.
config OPTPROBES
def_bool y
depends on KPROBES && HAVE_OPTPROBES
@ -975,6 +981,13 @@ config HAVE_SPARSE_SYSCALL_NR
config ARCH_HAS_VDSO_DATA
bool
config HAVE_STATIC_CALL
bool
config HAVE_STATIC_CALL_INLINE
bool
depends on HAVE_STATIC_CALL
source "kernel/gcov/Kconfig"
source "scripts/gcc-plugins/Kconfig"

View File

@ -215,6 +215,8 @@ config X86
select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_STACKPROTECTOR if CC_HAS_SANE_STACKPROTECTOR
select HAVE_STACK_VALIDATION if X86_64
select HAVE_STATIC_CALL
select HAVE_STATIC_CALL_INLINE if HAVE_STACK_VALIDATION
select HAVE_RSEQ
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UNSTABLE_SCHED_CLOCK
@ -230,6 +232,7 @@ config X86
select RTC_MC146818_LIB
select SPARSE_IRQ
select SRCU
select STACK_VALIDATION if HAVE_STACK_VALIDATION && (HAVE_STATIC_CALL_INLINE || RETPOLINE)
select SYSCTL_EXCEPTION_TRACE
select THREAD_INFO_IN_TASK
select USER_STACKTRACE_SUPPORT
@ -451,7 +454,6 @@ config GOLDFISH
config RETPOLINE
bool "Avoid speculative indirect branches in kernel"
default y
select STACK_VALIDATION if HAVE_STACK_VALIDATION
help
Compile kernel with the retpoline compiler options to guard against
kernel-to-user data leaks by avoiding speculative indirect

View File

@ -28,6 +28,7 @@
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/nospec.h>
#include <linux/static_call.h>
#include <asm/apic.h>
#include <asm/stacktrace.h>
@ -52,6 +53,34 @@ DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
DEFINE_STATIC_KEY_FALSE(rdpmc_never_available_key);
DEFINE_STATIC_KEY_FALSE(rdpmc_always_available_key);
/*
* This here uses DEFINE_STATIC_CALL_NULL() to get a static_call defined
* from just a typename, as opposed to an actual function.
*/
DEFINE_STATIC_CALL_NULL(x86_pmu_handle_irq, *x86_pmu.handle_irq);
DEFINE_STATIC_CALL_NULL(x86_pmu_disable_all, *x86_pmu.disable_all);
DEFINE_STATIC_CALL_NULL(x86_pmu_enable_all, *x86_pmu.enable_all);
DEFINE_STATIC_CALL_NULL(x86_pmu_enable, *x86_pmu.enable);
DEFINE_STATIC_CALL_NULL(x86_pmu_disable, *x86_pmu.disable);
DEFINE_STATIC_CALL_NULL(x86_pmu_add, *x86_pmu.add);
DEFINE_STATIC_CALL_NULL(x86_pmu_del, *x86_pmu.del);
DEFINE_STATIC_CALL_NULL(x86_pmu_read, *x86_pmu.read);
DEFINE_STATIC_CALL_NULL(x86_pmu_schedule_events, *x86_pmu.schedule_events);
DEFINE_STATIC_CALL_NULL(x86_pmu_get_event_constraints, *x86_pmu.get_event_constraints);
DEFINE_STATIC_CALL_NULL(x86_pmu_put_event_constraints, *x86_pmu.put_event_constraints);
DEFINE_STATIC_CALL_NULL(x86_pmu_start_scheduling, *x86_pmu.start_scheduling);
DEFINE_STATIC_CALL_NULL(x86_pmu_commit_scheduling, *x86_pmu.commit_scheduling);
DEFINE_STATIC_CALL_NULL(x86_pmu_stop_scheduling, *x86_pmu.stop_scheduling);
DEFINE_STATIC_CALL_NULL(x86_pmu_sched_task, *x86_pmu.sched_task);
DEFINE_STATIC_CALL_NULL(x86_pmu_swap_task_ctx, *x86_pmu.swap_task_ctx);
DEFINE_STATIC_CALL_NULL(x86_pmu_drain_pebs, *x86_pmu.drain_pebs);
DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_aliases, *x86_pmu.pebs_aliases);
u64 __read_mostly hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
@ -660,7 +689,7 @@ static void x86_pmu_disable(struct pmu *pmu)
cpuc->enabled = 0;
barrier();
x86_pmu.disable_all();
static_call(x86_pmu_disable_all)();
}
void x86_pmu_enable_all(int added)
@ -907,8 +936,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
n0 -= cpuc->n_txn;
if (x86_pmu.start_scheduling)
x86_pmu.start_scheduling(cpuc);
static_call_cond(x86_pmu_start_scheduling)(cpuc);
for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) {
c = cpuc->event_constraint[i];
@ -925,7 +953,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
* change due to external factors (sibling state, allow_tfa).
*/
if (!c || (c->flags & PERF_X86_EVENT_DYNAMIC)) {
c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]);
c = static_call(x86_pmu_get_event_constraints)(cpuc, i, cpuc->event_list[i]);
cpuc->event_constraint[i] = c;
}
@ -1008,8 +1036,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
if (!unsched && assign) {
for (i = 0; i < n; i++) {
e = cpuc->event_list[i];
if (x86_pmu.commit_scheduling)
x86_pmu.commit_scheduling(cpuc, i, assign[i]);
static_call_cond(x86_pmu_commit_scheduling)(cpuc, i, assign[i]);
}
} else {
for (i = n0; i < n; i++) {
@ -1018,15 +1045,13 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
/*
* release events that failed scheduling
*/
if (x86_pmu.put_event_constraints)
x86_pmu.put_event_constraints(cpuc, e);
static_call_cond(x86_pmu_put_event_constraints)(cpuc, e);
cpuc->event_constraint[i] = NULL;
}
}
if (x86_pmu.stop_scheduling)
x86_pmu.stop_scheduling(cpuc);
static_call_cond(x86_pmu_stop_scheduling)(cpuc);
return unsched ? -EINVAL : 0;
}
@ -1226,7 +1251,7 @@ static void x86_pmu_enable(struct pmu *pmu)
cpuc->enabled = 1;
barrier();
x86_pmu.enable_all(added);
static_call(x86_pmu_enable_all)(added);
}
static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
@ -1347,7 +1372,7 @@ static int x86_pmu_add(struct perf_event *event, int flags)
if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
goto done_collect;
ret = x86_pmu.schedule_events(cpuc, n, assign);
ret = static_call(x86_pmu_schedule_events)(cpuc, n, assign);
if (ret)
goto out;
/*
@ -1365,13 +1390,11 @@ done_collect:
cpuc->n_added += n - n0;
cpuc->n_txn += n - n0;
if (x86_pmu.add) {
/*
* This is before x86_pmu_enable() will call x86_pmu_start(),
* so we enable LBRs before an event needs them etc..
*/
x86_pmu.add(event);
}
/*
* This is before x86_pmu_enable() will call x86_pmu_start(),
* so we enable LBRs before an event needs them etc..
*/
static_call_cond(x86_pmu_add)(event);
ret = 0;
out:
@ -1399,7 +1422,7 @@ static void x86_pmu_start(struct perf_event *event, int flags)
cpuc->events[idx] = event;
__set_bit(idx, cpuc->active_mask);
__set_bit(idx, cpuc->running);
x86_pmu.enable(event);
static_call(x86_pmu_enable)(event);
perf_event_update_userpage(event);
}
@ -1469,7 +1492,7 @@ void x86_pmu_stop(struct perf_event *event, int flags)
struct hw_perf_event *hwc = &event->hw;
if (test_bit(hwc->idx, cpuc->active_mask)) {
x86_pmu.disable(event);
static_call(x86_pmu_disable)(event);
__clear_bit(hwc->idx, cpuc->active_mask);
cpuc->events[hwc->idx] = NULL;
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
@ -1519,8 +1542,7 @@ static void x86_pmu_del(struct perf_event *event, int flags)
if (i >= cpuc->n_events - cpuc->n_added)
--cpuc->n_added;
if (x86_pmu.put_event_constraints)
x86_pmu.put_event_constraints(cpuc, event);
static_call_cond(x86_pmu_put_event_constraints)(cpuc, event);
/* Delete the array entry. */
while (++i < cpuc->n_events) {
@ -1533,13 +1555,12 @@ static void x86_pmu_del(struct perf_event *event, int flags)
perf_event_update_userpage(event);
do_del:
if (x86_pmu.del) {
/*
* This is after x86_pmu_stop(); so we disable LBRs after any
* event can need them etc..
*/
x86_pmu.del(event);
}
/*
* This is after x86_pmu_stop(); so we disable LBRs after any
* event can need them etc..
*/
static_call_cond(x86_pmu_del)(event);
}
int x86_pmu_handle_irq(struct pt_regs *regs)
@ -1617,7 +1638,7 @@ perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
return NMI_DONE;
start_clock = sched_clock();
ret = x86_pmu.handle_irq(regs);
ret = static_call(x86_pmu_handle_irq)(regs);
finish_clock = sched_clock();
perf_sample_event_took(finish_clock - start_clock);
@ -1830,6 +1851,38 @@ ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event)
static struct attribute_group x86_pmu_attr_group;
static struct attribute_group x86_pmu_caps_group;
static void x86_pmu_static_call_update(void)
{
static_call_update(x86_pmu_handle_irq, x86_pmu.handle_irq);
static_call_update(x86_pmu_disable_all, x86_pmu.disable_all);
static_call_update(x86_pmu_enable_all, x86_pmu.enable_all);
static_call_update(x86_pmu_enable, x86_pmu.enable);
static_call_update(x86_pmu_disable, x86_pmu.disable);
static_call_update(x86_pmu_add, x86_pmu.add);
static_call_update(x86_pmu_del, x86_pmu.del);
static_call_update(x86_pmu_read, x86_pmu.read);
static_call_update(x86_pmu_schedule_events, x86_pmu.schedule_events);
static_call_update(x86_pmu_get_event_constraints, x86_pmu.get_event_constraints);
static_call_update(x86_pmu_put_event_constraints, x86_pmu.put_event_constraints);
static_call_update(x86_pmu_start_scheduling, x86_pmu.start_scheduling);
static_call_update(x86_pmu_commit_scheduling, x86_pmu.commit_scheduling);
static_call_update(x86_pmu_stop_scheduling, x86_pmu.stop_scheduling);
static_call_update(x86_pmu_sched_task, x86_pmu.sched_task);
static_call_update(x86_pmu_swap_task_ctx, x86_pmu.swap_task_ctx);
static_call_update(x86_pmu_drain_pebs, x86_pmu.drain_pebs);
static_call_update(x86_pmu_pebs_aliases, x86_pmu.pebs_aliases);
}
static void _x86_pmu_read(struct perf_event *event)
{
x86_perf_event_update(event);
}
static int __init init_hw_perf_events(void)
{
struct x86_pmu_quirk *quirk;
@ -1898,6 +1951,11 @@ static int __init init_hw_perf_events(void)
pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
if (!x86_pmu.read)
x86_pmu.read = _x86_pmu_read;
x86_pmu_static_call_update();
/*
* Install callbacks. Core will call them for each online
* cpu.
@ -1934,11 +1992,9 @@ out:
}
early_initcall(init_hw_perf_events);
static inline void x86_pmu_read(struct perf_event *event)
static void x86_pmu_read(struct perf_event *event)
{
if (x86_pmu.read)
return x86_pmu.read(event);
x86_perf_event_update(event);
static_call(x86_pmu_read)(event);
}
/*
@ -2015,7 +2071,7 @@ static int x86_pmu_commit_txn(struct pmu *pmu)
if (!x86_pmu_initialized())
return -EAGAIN;
ret = x86_pmu.schedule_events(cpuc, n, assign);
ret = static_call(x86_pmu_schedule_events)(cpuc, n, assign);
if (ret)
return ret;
@ -2308,15 +2364,13 @@ static const struct attribute_group *x86_pmu_attr_groups[] = {
static void x86_pmu_sched_task(struct perf_event_context *ctx, bool sched_in)
{
if (x86_pmu.sched_task)
x86_pmu.sched_task(ctx, sched_in);
static_call_cond(x86_pmu_sched_task)(ctx, sched_in);
}
static void x86_pmu_swap_task_ctx(struct perf_event_context *prev,
struct perf_event_context *next)
{
if (x86_pmu.swap_task_ctx)
x86_pmu.swap_task_ctx(prev, next);
static_call_cond(x86_pmu_swap_task_ctx)(prev, next);
}
void perf_check_microcode(void)

View File

@ -0,0 +1,40 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_STATIC_CALL_H
#define _ASM_STATIC_CALL_H
#include <asm/text-patching.h>
/*
* For CONFIG_HAVE_STATIC_CALL_INLINE, this is a temporary trampoline which
* uses the current value of the key->func pointer to do an indirect jump to
* the function. This trampoline is only used during boot, before the call
* sites get patched by static_call_update(). The name of this trampoline has
* a magical aspect: objtool uses it to find static call sites so it can create
* the .static_call_sites section.
*
* For CONFIG_HAVE_STATIC_CALL, this is a permanent trampoline which
* does a direct jump to the function. The direct jump gets patched by
* static_call_update().
*
* Having the trampoline in a special section forces GCC to emit a JMP.d32 when
* it does tail-call optimization on the call; since you cannot compute the
* relative displacement across sections.
*/
#define __ARCH_DEFINE_STATIC_CALL_TRAMP(name, insns) \
asm(".pushsection .static_call.text, \"ax\" \n" \
".align 4 \n" \
".globl " STATIC_CALL_TRAMP_STR(name) " \n" \
STATIC_CALL_TRAMP_STR(name) ": \n" \
insns " \n" \
".type " STATIC_CALL_TRAMP_STR(name) ", @function \n" \
".size " STATIC_CALL_TRAMP_STR(name) ", . - " STATIC_CALL_TRAMP_STR(name) " \n" \
".popsection \n")
#define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func) \
__ARCH_DEFINE_STATIC_CALL_TRAMP(name, ".byte 0xe9; .long " #func " - (. + 4)")
#define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) \
__ARCH_DEFINE_STATIC_CALL_TRAMP(name, "ret; nop; nop; nop; nop")
#endif /* _ASM_STATIC_CALL_H */

View File

@ -53,6 +53,9 @@ extern void text_poke_finish(void);
#define INT3_INSN_SIZE 1
#define INT3_INSN_OPCODE 0xCC
#define RET_INSN_SIZE 1
#define RET_INSN_OPCODE 0xC3
#define CALL_INSN_SIZE 5
#define CALL_INSN_OPCODE 0xE8
@ -73,6 +76,7 @@ static __always_inline int text_opcode_size(u8 opcode)
switch(opcode) {
__CASE(INT3);
__CASE(RET);
__CASE(CALL);
__CASE(JMP32);
__CASE(JMP8);
@ -140,12 +144,27 @@ void int3_emulate_push(struct pt_regs *regs, unsigned long val)
*(unsigned long *)regs->sp = val;
}
static __always_inline
unsigned long int3_emulate_pop(struct pt_regs *regs)
{
unsigned long val = *(unsigned long *)regs->sp;
regs->sp += sizeof(unsigned long);
return val;
}
static __always_inline
void int3_emulate_call(struct pt_regs *regs, unsigned long func)
{
int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE);
int3_emulate_jmp(regs, func);
}
static __always_inline
void int3_emulate_ret(struct pt_regs *regs)
{
unsigned long ip = int3_emulate_pop(regs);
int3_emulate_jmp(regs, ip);
}
#endif /* !CONFIG_UML_X86 */
#endif /* _ASM_X86_TEXT_PATCHING_H */

View File

@ -68,6 +68,7 @@ obj-y += tsc.o tsc_msr.o io_delay.o rtc.o
obj-y += pci-iommu_table.o
obj-y += resource.o
obj-y += irqflags.o
obj-y += static_call.o
obj-y += process.o
obj-y += fpu/

View File

@ -1103,6 +1103,10 @@ noinstr int poke_int3_handler(struct pt_regs *regs)
*/
goto out_put;
case RET_INSN_OPCODE:
int3_emulate_ret(regs);
break;
case CALL_INSN_OPCODE:
int3_emulate_call(regs, (long)ip + tp->rel32);
break;
@ -1277,6 +1281,7 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
switch (tp->opcode) {
case INT3_INSN_OPCODE:
case RET_INSN_OPCODE:
break;
case CALL_INSN_OPCODE:

View File

@ -18,6 +18,7 @@
#include <linux/ftrace.h>
#include <linux/frame.h>
#include <linux/pgtable.h>
#include <linux/static_call.h>
#include <asm/text-patching.h>
#include <asm/cacheflush.h>
@ -210,7 +211,8 @@ static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
/* Check whether the address range is reserved */
if (ftrace_text_reserved(src, src + len - 1) ||
alternatives_text_reserved(src, src + len - 1) ||
jump_label_text_reserved(src, src + len - 1))
jump_label_text_reserved(src, src + len - 1) ||
static_call_text_reserved(src, src + len - 1))
return -EBUSY;
return len;

View File

@ -19,6 +19,7 @@
#include <linux/hugetlb.h>
#include <linux/tboot.h>
#include <linux/usb/xhci-dbgp.h>
#include <linux/static_call.h>
#include <uapi/linux/mount.h>
@ -849,6 +850,7 @@ void __init setup_arch(char **cmdline_p)
early_cpu_init();
arch_init_ideal_nops();
jump_label_init();
static_call_init();
early_ioremap_init();
setup_olpc_ofw_pgd();

View File

@ -0,0 +1,98 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/static_call.h>
#include <linux/memory.h>
#include <linux/bug.h>
#include <asm/text-patching.h>
enum insn_type {
CALL = 0, /* site call */
NOP = 1, /* site cond-call */
JMP = 2, /* tramp / site tail-call */
RET = 3, /* tramp / site cond-tail-call */
};
static void __ref __static_call_transform(void *insn, enum insn_type type, void *func)
{
int size = CALL_INSN_SIZE;
const void *code;
switch (type) {
case CALL:
code = text_gen_insn(CALL_INSN_OPCODE, insn, func);
break;
case NOP:
code = ideal_nops[NOP_ATOMIC5];
break;
case JMP:
code = text_gen_insn(JMP32_INSN_OPCODE, insn, func);
break;
case RET:
code = text_gen_insn(RET_INSN_OPCODE, insn, func);
size = RET_INSN_SIZE;
break;
}
if (memcmp(insn, code, size) == 0)
return;
if (unlikely(system_state == SYSTEM_BOOTING))
return text_poke_early(insn, code, size);
text_poke_bp(insn, code, size, NULL);
}
static void __static_call_validate(void *insn, bool tail)
{
u8 opcode = *(u8 *)insn;
if (tail) {
if (opcode == JMP32_INSN_OPCODE ||
opcode == RET_INSN_OPCODE)
return;
} else {
if (opcode == CALL_INSN_OPCODE ||
!memcmp(insn, ideal_nops[NOP_ATOMIC5], 5))
return;
}
/*
* If we ever trigger this, our text is corrupt, we'll probably not live long.
*/
WARN_ONCE(1, "unexpected static_call insn opcode 0x%x at %pS\n", opcode, insn);
}
static inline enum insn_type __sc_insn(bool null, bool tail)
{
/*
* Encode the following table without branches:
*
* tail null insn
* -----+-------+------
* 0 | 0 | CALL
* 0 | 1 | NOP
* 1 | 0 | JMP
* 1 | 1 | RET
*/
return 2*tail + null;
}
void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
{
mutex_lock(&text_mutex);
if (tramp) {
__static_call_validate(tramp, true);
__static_call_transform(tramp, __sc_insn(!func, true), func);
}
if (IS_ENABLED(CONFIG_HAVE_STATIC_CALL_INLINE) && site) {
__static_call_validate(site, tail);
__static_call_transform(site, __sc_insn(!func, tail), func);
}
mutex_unlock(&text_mutex);
}
EXPORT_SYMBOL_GPL(arch_static_call_transform);

View File

@ -136,6 +136,7 @@ SECTIONS
ENTRY_TEXT
ALIGN_ENTRY_TEXT_END
SOFTIRQENTRY_TEXT
STATIC_CALL_TEXT
*(.fixup)
*(.gnu.warning)

View File

@ -116,7 +116,7 @@ module_load_notify(struct notifier_block *self, unsigned long val, void *data)
{
#ifdef CONFIG_MODULES
if (val != MODULE_STATE_COMING)
return 0;
return NOTIFY_DONE;
/* FIXME: should we process all CPU buffers ? */
mutex_lock(&buffer_mutex);
@ -124,7 +124,7 @@ module_load_notify(struct notifier_block *self, unsigned long val, void *data)
add_event_entry(MODULE_LOADED_CODE);
mutex_unlock(&buffer_mutex);
#endif
return 0;
return NOTIFY_OK;
}

View File

@ -389,6 +389,12 @@
KEEP(*(__jump_table)) \
__stop___jump_table = .;
#define STATIC_CALL_DATA \
. = ALIGN(8); \
__start_static_call_sites = .; \
KEEP(*(.static_call_sites)) \
__stop_static_call_sites = .;
/*
* Allow architectures to handle ro_after_init data on their
* own by defining an empty RO_AFTER_INIT_DATA.
@ -399,6 +405,7 @@
__start_ro_after_init = .; \
*(.data..ro_after_init) \
JUMP_TABLE_DATA \
STATIC_CALL_DATA \
__end_ro_after_init = .;
#endif
@ -639,6 +646,12 @@
*(.softirqentry.text) \
__softirqentry_text_end = .;
#define STATIC_CALL_TEXT \
ALIGN_FUNCTION(); \
__static_call_text_start = .; \
*(.static_call.text) \
__static_call_text_end = .;
/* Section used for early init (in .S files) */
#define HEAD_TEXT KEEP(*(.head.text))

View File

@ -207,7 +207,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
*/
#define __ADDRESSABLE(sym) \
static void * __section(.discard.addressable) __used \
__PASTE(__addressable_##sym, __LINE__) = (void *)&sym;
__UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)&sym;
/**
* offset_to_ptr - convert a relative memory offset to an absolute pointer

View File

@ -25,6 +25,7 @@
#include <linux/error-injection.h>
#include <linux/tracepoint-defs.h>
#include <linux/srcu.h>
#include <linux/static_call_types.h>
#include <linux/percpu.h>
#include <asm/module.h>
@ -498,6 +499,10 @@ struct module {
unsigned long *kprobe_blacklist;
unsigned int num_kprobe_blacklist;
#endif
#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
int num_static_call_sites;
struct static_call_site *static_call_sites;
#endif
#ifdef CONFIG_LIVEPATCH
bool klp; /* Is this a livepatch module? */

View File

@ -161,20 +161,19 @@ extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
unsigned long val, void *v);
extern int __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
unsigned long val, void *v, int nr_to_call, int *nr_calls);
extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
unsigned long val, void *v);
extern int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
unsigned long val, void *v, int nr_to_call, int *nr_calls);
extern int raw_notifier_call_chain(struct raw_notifier_head *nh,
unsigned long val, void *v);
extern int __raw_notifier_call_chain(struct raw_notifier_head *nh,
unsigned long val, void *v, int nr_to_call, int *nr_calls);
extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
unsigned long val, void *v);
extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
unsigned long val, void *v, int nr_to_call, int *nr_calls);
extern int atomic_notifier_call_chain_robust(struct atomic_notifier_head *nh,
unsigned long val_up, unsigned long val_down, void *v);
extern int blocking_notifier_call_chain_robust(struct blocking_notifier_head *nh,
unsigned long val_up, unsigned long val_down, void *v);
extern int raw_notifier_call_chain_robust(struct raw_notifier_head *nh,
unsigned long val_up, unsigned long val_down, void *v);
#define NOTIFY_DONE 0x0000 /* Don't care */
#define NOTIFY_OK 0x0001 /* Suits me */

298
include/linux/static_call.h Normal file
View File

@ -0,0 +1,298 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_STATIC_CALL_H
#define _LINUX_STATIC_CALL_H
/*
* Static call support
*
* Static calls use code patching to hard-code function pointers into direct
* branch instructions. They give the flexibility of function pointers, but
* with improved performance. This is especially important for cases where
* retpolines would otherwise be used, as retpolines can significantly impact
* performance.
*
*
* API overview:
*
* DECLARE_STATIC_CALL(name, func);
* DEFINE_STATIC_CALL(name, func);
* DEFINE_STATIC_CALL_NULL(name, typename);
* static_call(name)(args...);
* static_call_cond(name)(args...);
* static_call_update(name, func);
*
* Usage example:
*
* # Start with the following functions (with identical prototypes):
* int func_a(int arg1, int arg2);
* int func_b(int arg1, int arg2);
*
* # Define a 'my_name' reference, associated with func_a() by default
* DEFINE_STATIC_CALL(my_name, func_a);
*
* # Call func_a()
* static_call(my_name)(arg1, arg2);
*
* # Update 'my_name' to point to func_b()
* static_call_update(my_name, &func_b);
*
* # Call func_b()
* static_call(my_name)(arg1, arg2);
*
*
* Implementation details:
*
* This requires some arch-specific code (CONFIG_HAVE_STATIC_CALL).
* Otherwise basic indirect calls are used (with function pointers).
*
* Each static_call() site calls into a trampoline associated with the name.
* The trampoline has a direct branch to the default function. Updates to a
* name will modify the trampoline's branch destination.
*
* If the arch has CONFIG_HAVE_STATIC_CALL_INLINE, then the call sites
* themselves will be patched at runtime to call the functions directly,
* rather than calling through the trampoline. This requires objtool or a
* compiler plugin to detect all the static_call() sites and annotate them
* in the .static_call_sites section.
*
*
* Notes on NULL function pointers:
*
* Static_call()s support NULL functions, with many of the caveats that
* regular function pointers have.
*
* Clearly calling a NULL function pointer is 'BAD', so too for
* static_call()s (although when HAVE_STATIC_CALL it might not be immediately
* fatal). A NULL static_call can be the result of:
*
* DECLARE_STATIC_CALL_NULL(my_static_call, void (*)(int));
*
* which is equivalent to declaring a NULL function pointer with just a
* typename:
*
* void (*my_func_ptr)(int arg1) = NULL;
*
* or using static_call_update() with a NULL function. In both cases the
* HAVE_STATIC_CALL implementation will patch the trampoline with a RET
* instruction, instead of an immediate tail-call JMP. HAVE_STATIC_CALL_INLINE
* architectures can patch the trampoline call to a NOP.
*
* In all cases, any argument evaluation is unconditional. Unlike a regular
* conditional function pointer call:
*
* if (my_func_ptr)
* my_func_ptr(arg1)
*
* where the argument evaludation also depends on the pointer value.
*
* When calling a static_call that can be NULL, use:
*
* static_call_cond(name)(arg1);
*
* which will include the required value tests to avoid NULL-pointer
* dereferences.
*/
#include <linux/types.h>
#include <linux/cpu.h>
#include <linux/static_call_types.h>
#ifdef CONFIG_HAVE_STATIC_CALL
#include <asm/static_call.h>
/*
* Either @site or @tramp can be NULL.
*/
extern void arch_static_call_transform(void *site, void *tramp, void *func, bool tail);
#define STATIC_CALL_TRAMP_ADDR(name) &STATIC_CALL_TRAMP(name)
/*
* __ADDRESSABLE() is used to ensure the key symbol doesn't get stripped from
* the symbol table so that objtool can reference it when it generates the
* .static_call_sites section.
*/
#define __static_call(name) \
({ \
__ADDRESSABLE(STATIC_CALL_KEY(name)); \
&STATIC_CALL_TRAMP(name); \
})
#else
#define STATIC_CALL_TRAMP_ADDR(name) NULL
#endif
#define DECLARE_STATIC_CALL(name, func) \
extern struct static_call_key STATIC_CALL_KEY(name); \
extern typeof(func) STATIC_CALL_TRAMP(name);
#define static_call_update(name, func) \
({ \
BUILD_BUG_ON(!__same_type(*(func), STATIC_CALL_TRAMP(name))); \
__static_call_update(&STATIC_CALL_KEY(name), \
STATIC_CALL_TRAMP_ADDR(name), func); \
})
#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
extern int __init static_call_init(void);
struct static_call_mod {
struct static_call_mod *next;
struct module *mod; /* for vmlinux, mod == NULL */
struct static_call_site *sites;
};
struct static_call_key {
void *func;
union {
/* bit 0: 0 = mods, 1 = sites */
unsigned long type;
struct static_call_mod *mods;
struct static_call_site *sites;
};
};
extern void __static_call_update(struct static_call_key *key, void *tramp, void *func);
extern int static_call_mod_init(struct module *mod);
extern int static_call_text_reserved(void *start, void *end);
#define DEFINE_STATIC_CALL(name, _func) \
DECLARE_STATIC_CALL(name, _func); \
struct static_call_key STATIC_CALL_KEY(name) = { \
.func = _func, \
.type = 1, \
}; \
ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func)
#define DEFINE_STATIC_CALL_NULL(name, _func) \
DECLARE_STATIC_CALL(name, _func); \
struct static_call_key STATIC_CALL_KEY(name) = { \
.func = NULL, \
.type = 1, \
}; \
ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
#define static_call(name) __static_call(name)
#define static_call_cond(name) (void)__static_call(name)
#define EXPORT_STATIC_CALL(name) \
EXPORT_SYMBOL(STATIC_CALL_KEY(name)); \
EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
#define EXPORT_STATIC_CALL_GPL(name) \
EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name)); \
EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
#elif defined(CONFIG_HAVE_STATIC_CALL)
static inline int static_call_init(void) { return 0; }
struct static_call_key {
void *func;
};
#define DEFINE_STATIC_CALL(name, _func) \
DECLARE_STATIC_CALL(name, _func); \
struct static_call_key STATIC_CALL_KEY(name) = { \
.func = _func, \
}; \
ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func)
#define DEFINE_STATIC_CALL_NULL(name, _func) \
DECLARE_STATIC_CALL(name, _func); \
struct static_call_key STATIC_CALL_KEY(name) = { \
.func = NULL, \
}; \
ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
#define static_call(name) __static_call(name)
#define static_call_cond(name) (void)__static_call(name)
static inline
void __static_call_update(struct static_call_key *key, void *tramp, void *func)
{
cpus_read_lock();
WRITE_ONCE(key->func, func);
arch_static_call_transform(NULL, tramp, func, false);
cpus_read_unlock();
}
static inline int static_call_text_reserved(void *start, void *end)
{
return 0;
}
#define EXPORT_STATIC_CALL(name) \
EXPORT_SYMBOL(STATIC_CALL_KEY(name)); \
EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
#define EXPORT_STATIC_CALL_GPL(name) \
EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name)); \
EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
#else /* Generic implementation */
static inline int static_call_init(void) { return 0; }
struct static_call_key {
void *func;
};
#define DEFINE_STATIC_CALL(name, _func) \
DECLARE_STATIC_CALL(name, _func); \
struct static_call_key STATIC_CALL_KEY(name) = { \
.func = _func, \
}
#define DEFINE_STATIC_CALL_NULL(name, _func) \
DECLARE_STATIC_CALL(name, _func); \
struct static_call_key STATIC_CALL_KEY(name) = { \
.func = NULL, \
}
#define static_call(name) \
((typeof(STATIC_CALL_TRAMP(name))*)(STATIC_CALL_KEY(name).func))
static inline void __static_call_nop(void) { }
/*
* This horrific hack takes care of two things:
*
* - it ensures the compiler will only load the function pointer ONCE,
* which avoids a reload race.
*
* - it ensures the argument evaluation is unconditional, similar
* to the HAVE_STATIC_CALL variant.
*
* Sadly current GCC/Clang (10 for both) do not optimize this properly
* and will emit an indirect call for the NULL case :-(
*/
#define __static_call_cond(name) \
({ \
void *func = READ_ONCE(STATIC_CALL_KEY(name).func); \
if (!func) \
func = &__static_call_nop; \
(typeof(STATIC_CALL_TRAMP(name))*)func; \
})
#define static_call_cond(name) (void)__static_call_cond(name)
static inline
void __static_call_update(struct static_call_key *key, void *tramp, void *func)
{
WRITE_ONCE(key->func, func);
}
static inline int static_call_text_reserved(void *start, void *end)
{
return 0;
}
#define EXPORT_STATIC_CALL(name) EXPORT_SYMBOL(STATIC_CALL_KEY(name))
#define EXPORT_STATIC_CALL_GPL(name) EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name))
#endif /* CONFIG_HAVE_STATIC_CALL */
#endif /* _LINUX_STATIC_CALL_H */

View File

@ -0,0 +1,35 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _STATIC_CALL_TYPES_H
#define _STATIC_CALL_TYPES_H
#include <linux/types.h>
#include <linux/stringify.h>
#define STATIC_CALL_KEY_PREFIX __SCK__
#define STATIC_CALL_KEY_PREFIX_STR __stringify(STATIC_CALL_KEY_PREFIX)
#define STATIC_CALL_KEY_PREFIX_LEN (sizeof(STATIC_CALL_KEY_PREFIX_STR) - 1)
#define STATIC_CALL_KEY(name) __PASTE(STATIC_CALL_KEY_PREFIX, name)
#define STATIC_CALL_TRAMP_PREFIX __SCT__
#define STATIC_CALL_TRAMP_PREFIX_STR __stringify(STATIC_CALL_TRAMP_PREFIX)
#define STATIC_CALL_TRAMP_PREFIX_LEN (sizeof(STATIC_CALL_TRAMP_PREFIX_STR) - 1)
#define STATIC_CALL_TRAMP(name) __PASTE(STATIC_CALL_TRAMP_PREFIX, name)
#define STATIC_CALL_TRAMP_STR(name) __stringify(STATIC_CALL_TRAMP(name))
/*
* Flags in the low bits of static_call_site::key.
*/
#define STATIC_CALL_SITE_TAIL 1UL /* tail call */
#define STATIC_CALL_SITE_INIT 2UL /* init section */
#define STATIC_CALL_SITE_FLAGS 3UL
/*
* The static call site table needs to be created by external tooling (objtool
* or a compiler plugin).
*/
struct static_call_site {
s32 addr;
s32 key;
};
#endif /* _STATIC_CALL_TYPES_H */

View File

@ -11,6 +11,8 @@
#include <linux/atomic.h>
#include <linux/static_key.h>
struct static_call_key;
struct trace_print_flags {
unsigned long mask;
const char *name;
@ -30,6 +32,9 @@ struct tracepoint_func {
struct tracepoint {
const char *name; /* Tracepoint name */
struct static_key key;
struct static_call_key *static_call_key;
void *static_call_tramp;
void *iterator;
int (*regfunc)(void);
void (*unregfunc)(void);
struct tracepoint_func __rcu *funcs;

View File

@ -19,6 +19,7 @@
#include <linux/cpumask.h>
#include <linux/rcupdate.h>
#include <linux/tracepoint-defs.h>
#include <linux/static_call.h>
struct module;
struct tracepoint;
@ -92,7 +93,9 @@ extern int syscall_regfunc(void);
extern void syscall_unregfunc(void);
#endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */
#ifndef PARAMS
#define PARAMS(args...) args
#endif
#define TRACE_DEFINE_ENUM(x)
#define TRACE_DEFINE_SIZEOF(x)
@ -148,6 +151,12 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
#ifdef TRACEPOINTS_ENABLED
#ifdef CONFIG_HAVE_STATIC_CALL
#define __DO_TRACE_CALL(name) static_call(tp_func_##name)
#else
#define __DO_TRACE_CALL(name) __traceiter_##name
#endif /* CONFIG_HAVE_STATIC_CALL */
/*
* it_func[0] is never NULL because there is at least one element in the array
* when the array itself is non NULL.
@ -157,12 +166,11 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
* has a "void" prototype, then it is invalid to declare a function
* as "(void *, void)".
*/
#define __DO_TRACE(tp, proto, args, cond, rcuidle) \
#define __DO_TRACE(name, proto, args, cond, rcuidle) \
do { \
struct tracepoint_func *it_func_ptr; \
void *it_func; \
void *__data; \
int __maybe_unused __idx = 0; \
void *__data; \
\
if (!(cond)) \
return; \
@ -182,14 +190,11 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
rcu_irq_enter_irqson(); \
} \
\
it_func_ptr = rcu_dereference_raw((tp)->funcs); \
\
it_func_ptr = \
rcu_dereference_raw((&__tracepoint_##name)->funcs); \
if (it_func_ptr) { \
do { \
it_func = (it_func_ptr)->func; \
__data = (it_func_ptr)->data; \
((void(*)(proto))(it_func))(args); \
} while ((++it_func_ptr)->func); \
__data = (it_func_ptr)->data; \
__DO_TRACE_CALL(name)(args); \
} \
\
if (rcuidle) { \
@ -205,7 +210,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
static inline void trace_##name##_rcuidle(proto) \
{ \
if (static_key_false(&__tracepoint_##name.key)) \
__DO_TRACE(&__tracepoint_##name, \
__DO_TRACE(name, \
TP_PROTO(data_proto), \
TP_ARGS(data_args), \
TP_CONDITION(cond), 1); \
@ -227,11 +232,13 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
* poking RCU a bit.
*/
#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
extern int __traceiter_##name(data_proto); \
DECLARE_STATIC_CALL(tp_func_##name, __traceiter_##name); \
extern struct tracepoint __tracepoint_##name; \
static inline void trace_##name(proto) \
{ \
if (static_key_false(&__tracepoint_##name.key)) \
__DO_TRACE(&__tracepoint_##name, \
__DO_TRACE(name, \
TP_PROTO(data_proto), \
TP_ARGS(data_args), \
TP_CONDITION(cond), 0); \
@ -277,21 +284,50 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
* structures, so we create an array of pointers that will be used for iteration
* on the tracepoints.
*/
#define DEFINE_TRACE_FN(name, reg, unreg) \
static const char __tpstrtab_##name[] \
__section(__tracepoints_strings) = #name; \
struct tracepoint __tracepoint_##name __used \
__section(__tracepoints) = \
{ __tpstrtab_##name, STATIC_KEY_INIT_FALSE, reg, unreg, NULL };\
__TRACEPOINT_ENTRY(name);
#define DEFINE_TRACE_FN(_name, _reg, _unreg, proto, args) \
static const char __tpstrtab_##_name[] \
__section(__tracepoints_strings) = #_name; \
extern struct static_call_key STATIC_CALL_KEY(tp_func_##_name); \
int __traceiter_##_name(void *__data, proto); \
struct tracepoint __tracepoint_##_name __used \
__section(__tracepoints) = { \
.name = __tpstrtab_##_name, \
.key = STATIC_KEY_INIT_FALSE, \
.static_call_key = &STATIC_CALL_KEY(tp_func_##_name), \
.static_call_tramp = STATIC_CALL_TRAMP_ADDR(tp_func_##_name), \
.iterator = &__traceiter_##_name, \
.regfunc = _reg, \
.unregfunc = _unreg, \
.funcs = NULL }; \
__TRACEPOINT_ENTRY(_name); \
int __traceiter_##_name(void *__data, proto) \
{ \
struct tracepoint_func *it_func_ptr; \
void *it_func; \
\
it_func_ptr = \
rcu_dereference_raw((&__tracepoint_##_name)->funcs); \
do { \
it_func = (it_func_ptr)->func; \
__data = (it_func_ptr)->data; \
((void(*)(void *, proto))(it_func))(__data, args); \
} while ((++it_func_ptr)->func); \
return 0; \
} \
DEFINE_STATIC_CALL(tp_func_##_name, __traceiter_##_name);
#define DEFINE_TRACE(name) \
DEFINE_TRACE_FN(name, NULL, NULL);
#define DEFINE_TRACE(name, proto, args) \
DEFINE_TRACE_FN(name, NULL, NULL, PARAMS(proto), PARAMS(args));
#define EXPORT_TRACEPOINT_SYMBOL_GPL(name) \
EXPORT_SYMBOL_GPL(__tracepoint_##name)
EXPORT_SYMBOL_GPL(__tracepoint_##name); \
EXPORT_SYMBOL_GPL(__traceiter_##name); \
EXPORT_STATIC_CALL_GPL(tp_func_##name)
#define EXPORT_TRACEPOINT_SYMBOL(name) \
EXPORT_SYMBOL(__tracepoint_##name)
EXPORT_SYMBOL(__tracepoint_##name); \
EXPORT_SYMBOL(__traceiter_##name); \
EXPORT_STATIC_CALL(tp_func_##name)
#else /* !TRACEPOINTS_ENABLED */
#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
@ -320,8 +356,8 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
return false; \
}
#define DEFINE_TRACE_FN(name, reg, unreg)
#define DEFINE_TRACE(name)
#define DEFINE_TRACE_FN(name, reg, unreg, proto, args)
#define DEFINE_TRACE(name, proto, args)
#define EXPORT_TRACEPOINT_SYMBOL_GPL(name)
#define EXPORT_TRACEPOINT_SYMBOL(name)

View File

@ -25,7 +25,7 @@
#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
DEFINE_TRACE(name)
DEFINE_TRACE(name, PARAMS(proto), PARAMS(args))
#undef TRACE_EVENT_CONDITION
#define TRACE_EVENT_CONDITION(name, proto, args, cond, tstruct, assign, print) \
@ -39,12 +39,12 @@
#undef TRACE_EVENT_FN
#define TRACE_EVENT_FN(name, proto, args, tstruct, \
assign, print, reg, unreg) \
DEFINE_TRACE_FN(name, reg, unreg)
DEFINE_TRACE_FN(name, reg, unreg, PARAMS(proto), PARAMS(args))
#undef TRACE_EVENT_FN_COND
#define TRACE_EVENT_FN_COND(name, proto, args, cond, tstruct, \
assign, print, reg, unreg) \
DEFINE_TRACE_FN(name, reg, unreg)
DEFINE_TRACE_FN(name, reg, unreg, PARAMS(proto), PARAMS(args))
#undef TRACE_EVENT_NOP
#define TRACE_EVENT_NOP(name, proto, args, struct, assign, print)
@ -54,15 +54,15 @@
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args) \
DEFINE_TRACE(name)
DEFINE_TRACE(name, PARAMS(proto), PARAMS(args))
#undef DEFINE_EVENT_FN
#define DEFINE_EVENT_FN(template, name, proto, args, reg, unreg) \
DEFINE_TRACE_FN(name, reg, unreg)
DEFINE_TRACE_FN(name, reg, unreg, PARAMS(proto), PARAMS(args))
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
DEFINE_TRACE(name)
DEFINE_TRACE(name, PARAMS(proto), PARAMS(args))
#undef DEFINE_EVENT_CONDITION
#define DEFINE_EVENT_CONDITION(template, name, proto, args, cond) \
@ -70,7 +70,7 @@
#undef DECLARE_TRACE
#define DECLARE_TRACE(name, proto, args) \
DEFINE_TRACE(name)
DEFINE_TRACE(name, PARAMS(proto), PARAMS(args))
#undef TRACE_INCLUDE
#undef __TRACE_INCLUDE

View File

@ -111,6 +111,7 @@ obj-$(CONFIG_CPU_PM) += cpu_pm.o
obj-$(CONFIG_BPF) += bpf/
obj-$(CONFIG_KCSAN) += kcsan/
obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o
obj-$(CONFIG_HAVE_STATIC_CALL_INLINE) += static_call.o
obj-$(CONFIG_PERF_EVENTS) += events/

View File

@ -15,18 +15,28 @@
static ATOMIC_NOTIFIER_HEAD(cpu_pm_notifier_chain);
static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls)
static int cpu_pm_notify(enum cpu_pm_event event)
{
int ret;
/*
* __atomic_notifier_call_chain has a RCU read critical section, which
* atomic_notifier_call_chain has a RCU read critical section, which
* could be disfunctional in cpu idle. Copy RCU_NONIDLE code to let
* RCU know this.
*/
rcu_irq_enter_irqson();
ret = __atomic_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL,
nr_to_call, nr_calls);
ret = atomic_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL);
rcu_irq_exit_irqson();
return notifier_to_errno(ret);
}
static int cpu_pm_notify_robust(enum cpu_pm_event event_up, enum cpu_pm_event event_down)
{
int ret;
rcu_irq_enter_irqson();
ret = atomic_notifier_call_chain_robust(&cpu_pm_notifier_chain, event_up, event_down, NULL);
rcu_irq_exit_irqson();
return notifier_to_errno(ret);
@ -80,18 +90,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
*/
int cpu_pm_enter(void)
{
int nr_calls = 0;
int ret = 0;
ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls);
if (ret)
/*
* Inform listeners (nr_calls - 1) about failure of CPU PM
* PM entry who are notified earlier to prepare for it.
*/
cpu_pm_notify(CPU_PM_ENTER_FAILED, nr_calls - 1, NULL);
return ret;
return cpu_pm_notify_robust(CPU_PM_ENTER, CPU_PM_ENTER_FAILED);
}
EXPORT_SYMBOL_GPL(cpu_pm_enter);
@ -109,7 +108,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_enter);
*/
int cpu_pm_exit(void)
{
return cpu_pm_notify(CPU_PM_EXIT, -1, NULL);
return cpu_pm_notify(CPU_PM_EXIT);
}
EXPORT_SYMBOL_GPL(cpu_pm_exit);
@ -131,18 +130,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_exit);
*/
int cpu_cluster_pm_enter(void)
{
int nr_calls = 0;
int ret = 0;
ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls);
if (ret)
/*
* Inform listeners (nr_calls - 1) about failure of CPU cluster
* PM entry who are notified earlier to prepare for it.
*/
cpu_pm_notify(CPU_CLUSTER_PM_ENTER_FAILED, nr_calls - 1, NULL);
return ret;
return cpu_pm_notify_robust(CPU_CLUSTER_PM_ENTER, CPU_CLUSTER_PM_ENTER_FAILED);
}
EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter);
@ -163,7 +151,7 @@ EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter);
*/
int cpu_cluster_pm_exit(void)
{
return cpu_pm_notify(CPU_CLUSTER_PM_EXIT, -1, NULL);
return cpu_pm_notify(CPU_CLUSTER_PM_EXIT);
}
EXPORT_SYMBOL_GPL(cpu_cluster_pm_exit);

View File

@ -539,19 +539,25 @@ static void static_key_set_mod(struct static_key *key,
static int __jump_label_mod_text_reserved(void *start, void *end)
{
struct module *mod;
int ret;
preempt_disable();
mod = __module_text_address((unsigned long)start);
WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
if (!try_module_get(mod))
mod = NULL;
preempt_enable();
if (!mod)
return 0;
return __jump_label_text_reserved(mod->jump_entries,
ret = __jump_label_text_reserved(mod->jump_entries,
mod->jump_entries + mod->num_jump_entries,
start, end);
module_put(mod);
return ret;
}
static void __jump_label_mod_update(struct static_key *key)

View File

@ -36,6 +36,7 @@
#include <linux/cpu.h>
#include <linux/jump_label.h>
#include <linux/perf_event.h>
#include <linux/static_call.h>
#include <asm/sections.h>
#include <asm/cacheflush.h>
@ -1634,6 +1635,7 @@ static int check_kprobe_address_safe(struct kprobe *p,
if (!kernel_text_address((unsigned long) p->addr) ||
within_kprobe_blacklist((unsigned long) p->addr) ||
jump_label_text_reserved(p->addr, p->addr) ||
static_call_text_reserved(p->addr, p->addr) ||
find_bug((unsigned long)p->addr)) {
ret = -EINVAL;
goto out;

View File

@ -3274,6 +3274,11 @@ static int find_module_sections(struct module *mod, struct load_info *info)
mod->kprobe_blacklist = section_objs(info, "_kprobe_blacklist",
sizeof(unsigned long),
&mod->num_kprobe_blacklist);
#endif
#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
mod->static_call_sites = section_objs(info, ".static_call_sites",
sizeof(*mod->static_call_sites),
&mod->num_static_call_sites);
#endif
mod->extable = section_objs(info, "__ex_table",
sizeof(*mod->extable), &mod->num_exentries);
@ -3792,9 +3797,13 @@ static int prepare_coming_module(struct module *mod)
if (err)
return err;
blocking_notifier_call_chain(&module_notify_list,
MODULE_STATE_COMING, mod);
return 0;
err = blocking_notifier_call_chain_robust(&module_notify_list,
MODULE_STATE_COMING, MODULE_STATE_GOING, mod);
err = notifier_to_errno(err);
if (err)
klp_module_going(mod);
return err;
}
static int unknown_module_param_cb(char *param, char *val, const char *modname,

View File

@ -94,6 +94,34 @@ static int notifier_call_chain(struct notifier_block **nl,
}
NOKPROBE_SYMBOL(notifier_call_chain);
/**
* notifier_call_chain_robust - Inform the registered notifiers about an event
* and rollback on error.
* @nl: Pointer to head of the blocking notifier chain
* @val_up: Value passed unmodified to the notifier function
* @val_down: Value passed unmodified to the notifier function when recovering
* from an error on @val_up
* @v Pointer passed unmodified to the notifier function
*
* NOTE: It is important the @nl chain doesn't change between the two
* invocations of notifier_call_chain() such that we visit the
* exact same notifier callbacks; this rules out any RCU usage.
*
* Returns: the return value of the @val_up call.
*/
static int notifier_call_chain_robust(struct notifier_block **nl,
unsigned long val_up, unsigned long val_down,
void *v)
{
int ret, nr = 0;
ret = notifier_call_chain(nl, val_up, v, -1, &nr);
if (ret & NOTIFY_STOP_MASK)
notifier_call_chain(nl, val_down, v, nr-1, NULL);
return ret;
}
/*
* Atomic notifier chain routines. Registration and unregistration
* use a spinlock, and call_chain is synchronized by RCU (no locks).
@ -144,13 +172,30 @@ int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
}
EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister);
int atomic_notifier_call_chain_robust(struct atomic_notifier_head *nh,
unsigned long val_up, unsigned long val_down, void *v)
{
unsigned long flags;
int ret;
/*
* Musn't use RCU; because then the notifier list can
* change between the up and down traversal.
*/
spin_lock_irqsave(&nh->lock, flags);
ret = notifier_call_chain_robust(&nh->head, val_up, val_down, v);
spin_unlock_irqrestore(&nh->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(atomic_notifier_call_chain_robust);
NOKPROBE_SYMBOL(atomic_notifier_call_chain_robust);
/**
* __atomic_notifier_call_chain - Call functions in an atomic notifier chain
* atomic_notifier_call_chain - Call functions in an atomic notifier chain
* @nh: Pointer to head of the atomic notifier chain
* @val: Value passed unmodified to notifier function
* @v: Pointer passed unmodified to notifier function
* @nr_to_call: See the comment for notifier_call_chain.
* @nr_calls: See the comment for notifier_call_chain.
*
* Calls each function in a notifier chain in turn. The functions
* run in an atomic context, so they must not block.
@ -163,24 +208,16 @@ EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister);
* Otherwise the return value is the return value
* of the last notifier function called.
*/
int __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
unsigned long val, void *v,
int nr_to_call, int *nr_calls)
int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
unsigned long val, void *v)
{
int ret;
rcu_read_lock();
ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls);
ret = notifier_call_chain(&nh->head, val, v, -1, NULL);
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(__atomic_notifier_call_chain);
NOKPROBE_SYMBOL(__atomic_notifier_call_chain);
int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
unsigned long val, void *v)
{
return __atomic_notifier_call_chain(nh, val, v, -1, NULL);
return ret;
}
EXPORT_SYMBOL_GPL(atomic_notifier_call_chain);
NOKPROBE_SYMBOL(atomic_notifier_call_chain);
@ -250,13 +287,30 @@ int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
}
EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister);
int blocking_notifier_call_chain_robust(struct blocking_notifier_head *nh,
unsigned long val_up, unsigned long val_down, void *v)
{
int ret = NOTIFY_DONE;
/*
* We check the head outside the lock, but if this access is
* racy then it does not matter what the result of the test
* is, we re-check the list after having taken the lock anyway:
*/
if (rcu_access_pointer(nh->head)) {
down_read(&nh->rwsem);
ret = notifier_call_chain_robust(&nh->head, val_up, val_down, v);
up_read(&nh->rwsem);
}
return ret;
}
EXPORT_SYMBOL_GPL(blocking_notifier_call_chain_robust);
/**
* __blocking_notifier_call_chain - Call functions in a blocking notifier chain
* blocking_notifier_call_chain - Call functions in a blocking notifier chain
* @nh: Pointer to head of the blocking notifier chain
* @val: Value passed unmodified to notifier function
* @v: Pointer passed unmodified to notifier function
* @nr_to_call: See comment for notifier_call_chain.
* @nr_calls: See comment for notifier_call_chain.
*
* Calls each function in a notifier chain in turn. The functions
* run in a process context, so they are allowed to block.
@ -268,9 +322,8 @@ EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister);
* Otherwise the return value is the return value
* of the last notifier function called.
*/
int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
unsigned long val, void *v,
int nr_to_call, int *nr_calls)
int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
unsigned long val, void *v)
{
int ret = NOTIFY_DONE;
@ -281,19 +334,11 @@ int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
*/
if (rcu_access_pointer(nh->head)) {
down_read(&nh->rwsem);
ret = notifier_call_chain(&nh->head, val, v, nr_to_call,
nr_calls);
ret = notifier_call_chain(&nh->head, val, v, -1, NULL);
up_read(&nh->rwsem);
}
return ret;
}
EXPORT_SYMBOL_GPL(__blocking_notifier_call_chain);
int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
unsigned long val, void *v)
{
return __blocking_notifier_call_chain(nh, val, v, -1, NULL);
}
EXPORT_SYMBOL_GPL(blocking_notifier_call_chain);
/*
@ -335,13 +380,18 @@ int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
}
EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister);
int raw_notifier_call_chain_robust(struct raw_notifier_head *nh,
unsigned long val_up, unsigned long val_down, void *v)
{
return notifier_call_chain_robust(&nh->head, val_up, val_down, v);
}
EXPORT_SYMBOL_GPL(raw_notifier_call_chain_robust);
/**
* __raw_notifier_call_chain - Call functions in a raw notifier chain
* raw_notifier_call_chain - Call functions in a raw notifier chain
* @nh: Pointer to head of the raw notifier chain
* @val: Value passed unmodified to notifier function
* @v: Pointer passed unmodified to notifier function
* @nr_to_call: See comment for notifier_call_chain.
* @nr_calls: See comment for notifier_call_chain
*
* Calls each function in a notifier chain in turn. The functions
* run in an undefined context.
@ -354,18 +404,10 @@ EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister);
* Otherwise the return value is the return value
* of the last notifier function called.
*/
int __raw_notifier_call_chain(struct raw_notifier_head *nh,
unsigned long val, void *v,
int nr_to_call, int *nr_calls)
{
return notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls);
}
EXPORT_SYMBOL_GPL(__raw_notifier_call_chain);
int raw_notifier_call_chain(struct raw_notifier_head *nh,
unsigned long val, void *v)
{
return __raw_notifier_call_chain(nh, val, v, -1, NULL);
return notifier_call_chain(&nh->head, val, v, -1, NULL);
}
EXPORT_SYMBOL_GPL(raw_notifier_call_chain);
@ -437,12 +479,10 @@ int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister);
/**
* __srcu_notifier_call_chain - Call functions in an SRCU notifier chain
* srcu_notifier_call_chain - Call functions in an SRCU notifier chain
* @nh: Pointer to head of the SRCU notifier chain
* @val: Value passed unmodified to notifier function
* @v: Pointer passed unmodified to notifier function
* @nr_to_call: See comment for notifier_call_chain.
* @nr_calls: See comment for notifier_call_chain
*
* Calls each function in a notifier chain in turn. The functions
* run in a process context, so they are allowed to block.
@ -454,25 +494,17 @@ EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister);
* Otherwise the return value is the return value
* of the last notifier function called.
*/
int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
unsigned long val, void *v,
int nr_to_call, int *nr_calls)
int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
unsigned long val, void *v)
{
int ret;
int idx;
idx = srcu_read_lock(&nh->srcu);
ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls);
ret = notifier_call_chain(&nh->head, val, v, -1, NULL);
srcu_read_unlock(&nh->srcu, idx);
return ret;
}
EXPORT_SYMBOL_GPL(__srcu_notifier_call_chain);
int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
unsigned long val, void *v)
{
return __srcu_notifier_call_chain(nh, val, v, -1, NULL);
}
EXPORT_SYMBOL_GPL(srcu_notifier_call_chain);
/**

View File

@ -706,8 +706,8 @@ static int load_image_and_restore(void)
*/
int hibernate(void)
{
int error, nr_calls = 0;
bool snapshot_test = false;
int error;
if (!hibernation_available()) {
pm_pr_dbg("Hibernation not available.\n");
@ -723,11 +723,9 @@ int hibernate(void)
pr_info("hibernation entry\n");
pm_prepare_console();
error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls);
if (error) {
nr_calls--;
goto Exit;
}
error = pm_notifier_call_chain_robust(PM_HIBERNATION_PREPARE, PM_POST_HIBERNATION);
if (error)
goto Restore;
ksys_sync_helper();
@ -785,7 +783,8 @@ int hibernate(void)
/* Don't bother checking whether freezer_test_done is true */
freezer_test_done = false;
Exit:
__pm_notifier_call_chain(PM_POST_HIBERNATION, nr_calls, NULL);
pm_notifier_call_chain(PM_POST_HIBERNATION);
Restore:
pm_restore_console();
hibernate_release();
Unlock:
@ -804,7 +803,7 @@ int hibernate(void)
*/
int hibernate_quiet_exec(int (*func)(void *data), void *data)
{
int error, nr_calls = 0;
int error;
lock_system_sleep();
@ -815,11 +814,9 @@ int hibernate_quiet_exec(int (*func)(void *data), void *data)
pm_prepare_console();
error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls);
if (error) {
nr_calls--;
goto exit;
}
error = pm_notifier_call_chain_robust(PM_HIBERNATION_PREPARE, PM_POST_HIBERNATION);
if (error)
goto restore;
error = freeze_processes();
if (error)
@ -880,8 +877,9 @@ thaw:
thaw_processes();
exit:
__pm_notifier_call_chain(PM_POST_HIBERNATION, nr_calls, NULL);
pm_notifier_call_chain(PM_POST_HIBERNATION);
restore:
pm_restore_console();
hibernate_release();
@ -910,7 +908,7 @@ EXPORT_SYMBOL_GPL(hibernate_quiet_exec);
*/
static int software_resume(void)
{
int error, nr_calls = 0;
int error;
/*
* If the user said "noresume".. bail out early.
@ -997,11 +995,9 @@ static int software_resume(void)
pr_info("resume from hibernation\n");
pm_prepare_console();
error = __pm_notifier_call_chain(PM_RESTORE_PREPARE, -1, &nr_calls);
if (error) {
nr_calls--;
goto Close_Finish;
}
error = pm_notifier_call_chain_robust(PM_RESTORE_PREPARE, PM_POST_RESTORE);
if (error)
goto Restore;
pm_pr_dbg("Preparing processes for hibernation restore.\n");
error = freeze_processes();
@ -1017,7 +1013,8 @@ static int software_resume(void)
error = load_image_and_restore();
thaw_processes();
Finish:
__pm_notifier_call_chain(PM_POST_RESTORE, nr_calls, NULL);
pm_notifier_call_chain(PM_POST_RESTORE);
Restore:
pm_restore_console();
pr_info("resume failed (%d)\n", error);
hibernate_release();

View File

@ -80,18 +80,18 @@ int unregister_pm_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(unregister_pm_notifier);
int __pm_notifier_call_chain(unsigned long val, int nr_to_call, int *nr_calls)
int pm_notifier_call_chain_robust(unsigned long val_up, unsigned long val_down)
{
int ret;
ret = __blocking_notifier_call_chain(&pm_chain_head, val, NULL,
nr_to_call, nr_calls);
ret = blocking_notifier_call_chain_robust(&pm_chain_head, val_up, val_down, NULL);
return notifier_to_errno(ret);
}
int pm_notifier_call_chain(unsigned long val)
{
return __pm_notifier_call_chain(val, -1, NULL);
return blocking_notifier_call_chain(&pm_chain_head, val, NULL);
}
/* If set, devices may be suspended and resumed asynchronously. */

View File

@ -210,8 +210,7 @@ static inline void suspend_test_finish(const char *label) {}
#ifdef CONFIG_PM_SLEEP
/* kernel/power/main.c */
extern int __pm_notifier_call_chain(unsigned long val, int nr_to_call,
int *nr_calls);
extern int pm_notifier_call_chain_robust(unsigned long val_up, unsigned long val_down);
extern int pm_notifier_call_chain(unsigned long val);
#endif

View File

@ -342,18 +342,16 @@ static int suspend_test(int level)
*/
static int suspend_prepare(suspend_state_t state)
{
int error, nr_calls = 0;
int error;
if (!sleep_state_supported(state))
return -EPERM;
pm_prepare_console();
error = __pm_notifier_call_chain(PM_SUSPEND_PREPARE, -1, &nr_calls);
if (error) {
nr_calls--;
goto Finish;
}
error = pm_notifier_call_chain_robust(PM_SUSPEND_PREPARE, PM_POST_SUSPEND);
if (error)
goto Restore;
trace_suspend_resume(TPS("freeze_processes"), 0, true);
error = suspend_freeze_processes();
@ -363,8 +361,8 @@ static int suspend_prepare(suspend_state_t state)
suspend_stats.failed_freeze++;
dpm_save_failed_step(SUSPEND_FREEZE);
Finish:
__pm_notifier_call_chain(PM_POST_SUSPEND, nr_calls, NULL);
pm_notifier_call_chain(PM_POST_SUSPEND);
Restore:
pm_restore_console();
return error;
}

View File

@ -46,7 +46,7 @@ int is_hibernate_resume_dev(const struct inode *bd_inode)
static int snapshot_open(struct inode *inode, struct file *filp)
{
struct snapshot_data *data;
int error, nr_calls = 0;
int error;
if (!hibernation_available())
return -EPERM;
@ -73,9 +73,7 @@ static int snapshot_open(struct inode *inode, struct file *filp)
swap_type_of(swsusp_resume_device, 0, NULL) : -1;
data->mode = O_RDONLY;
data->free_bitmaps = false;
error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls);
if (error)
__pm_notifier_call_chain(PM_POST_HIBERNATION, --nr_calls, NULL);
error = pm_notifier_call_chain_robust(PM_HIBERNATION_PREPARE, PM_POST_HIBERNATION);
} else {
/*
* Resuming. We may need to wait for the image device to
@ -85,15 +83,11 @@ static int snapshot_open(struct inode *inode, struct file *filp)
data->swap = -1;
data->mode = O_WRONLY;
error = __pm_notifier_call_chain(PM_RESTORE_PREPARE, -1, &nr_calls);
error = pm_notifier_call_chain_robust(PM_RESTORE_PREPARE, PM_POST_RESTORE);
if (!error) {
error = create_basic_memory_bitmaps();
data->free_bitmaps = !error;
} else
nr_calls--;
if (error)
__pm_notifier_call_chain(PM_POST_RESTORE, nr_calls, NULL);
}
}
if (error)
hibernate_release();

482
kernel/static_call.c Normal file
View File

@ -0,0 +1,482 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/static_call.h>
#include <linux/bug.h>
#include <linux/smp.h>
#include <linux/sort.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/cpu.h>
#include <linux/processor.h>
#include <asm/sections.h>
extern struct static_call_site __start_static_call_sites[],
__stop_static_call_sites[];
static bool static_call_initialized;
/* mutex to protect key modules/sites */
static DEFINE_MUTEX(static_call_mutex);
static void static_call_lock(void)
{
mutex_lock(&static_call_mutex);
}
static void static_call_unlock(void)
{
mutex_unlock(&static_call_mutex);
}
static inline void *static_call_addr(struct static_call_site *site)
{
return (void *)((long)site->addr + (long)&site->addr);
}
static inline struct static_call_key *static_call_key(const struct static_call_site *site)
{
return (struct static_call_key *)
(((long)site->key + (long)&site->key) & ~STATIC_CALL_SITE_FLAGS);
}
/* These assume the key is word-aligned. */
static inline bool static_call_is_init(struct static_call_site *site)
{
return ((long)site->key + (long)&site->key) & STATIC_CALL_SITE_INIT;
}
static inline bool static_call_is_tail(struct static_call_site *site)
{
return ((long)site->key + (long)&site->key) & STATIC_CALL_SITE_TAIL;
}
static inline void static_call_set_init(struct static_call_site *site)
{
site->key = ((long)static_call_key(site) | STATIC_CALL_SITE_INIT) -
(long)&site->key;
}
static int static_call_site_cmp(const void *_a, const void *_b)
{
const struct static_call_site *a = _a;
const struct static_call_site *b = _b;
const struct static_call_key *key_a = static_call_key(a);
const struct static_call_key *key_b = static_call_key(b);
if (key_a < key_b)
return -1;
if (key_a > key_b)
return 1;
return 0;
}
static void static_call_site_swap(void *_a, void *_b, int size)
{
long delta = (unsigned long)_a - (unsigned long)_b;
struct static_call_site *a = _a;
struct static_call_site *b = _b;
struct static_call_site tmp = *a;
a->addr = b->addr - delta;
a->key = b->key - delta;
b->addr = tmp.addr + delta;
b->key = tmp.key + delta;
}
static inline void static_call_sort_entries(struct static_call_site *start,
struct static_call_site *stop)
{
sort(start, stop - start, sizeof(struct static_call_site),
static_call_site_cmp, static_call_site_swap);
}
static inline bool static_call_key_has_mods(struct static_call_key *key)
{
return !(key->type & 1);
}
static inline struct static_call_mod *static_call_key_next(struct static_call_key *key)
{
if (!static_call_key_has_mods(key))
return NULL;
return key->mods;
}
static inline struct static_call_site *static_call_key_sites(struct static_call_key *key)
{
if (static_call_key_has_mods(key))
return NULL;
return (struct static_call_site *)(key->type & ~1);
}
void __static_call_update(struct static_call_key *key, void *tramp, void *func)
{
struct static_call_site *site, *stop;
struct static_call_mod *site_mod, first;
cpus_read_lock();
static_call_lock();
if (key->func == func)
goto done;
key->func = func;
arch_static_call_transform(NULL, tramp, func, false);
/*
* If uninitialized, we'll not update the callsites, but they still
* point to the trampoline and we just patched that.
*/
if (WARN_ON_ONCE(!static_call_initialized))
goto done;
first = (struct static_call_mod){
.next = static_call_key_next(key),
.mod = NULL,
.sites = static_call_key_sites(key),
};
for (site_mod = &first; site_mod; site_mod = site_mod->next) {
struct module *mod = site_mod->mod;
if (!site_mod->sites) {
/*
* This can happen if the static call key is defined in
* a module which doesn't use it.
*
* It also happens in the has_mods case, where the
* 'first' entry has no sites associated with it.
*/
continue;
}
stop = __stop_static_call_sites;
#ifdef CONFIG_MODULES
if (mod) {
stop = mod->static_call_sites +
mod->num_static_call_sites;
}
#endif
for (site = site_mod->sites;
site < stop && static_call_key(site) == key; site++) {
void *site_addr = static_call_addr(site);
if (static_call_is_init(site)) {
/*
* Don't write to call sites which were in
* initmem and have since been freed.
*/
if (!mod && system_state >= SYSTEM_RUNNING)
continue;
if (mod && !within_module_init((unsigned long)site_addr, mod))
continue;
}
if (!kernel_text_address((unsigned long)site_addr)) {
WARN_ONCE(1, "can't patch static call site at %pS",
site_addr);
continue;
}
arch_static_call_transform(site_addr, NULL, func,
static_call_is_tail(site));
}
}
done:
static_call_unlock();
cpus_read_unlock();
}
EXPORT_SYMBOL_GPL(__static_call_update);
static int __static_call_init(struct module *mod,
struct static_call_site *start,
struct static_call_site *stop)
{
struct static_call_site *site;
struct static_call_key *key, *prev_key = NULL;
struct static_call_mod *site_mod;
if (start == stop)
return 0;
static_call_sort_entries(start, stop);
for (site = start; site < stop; site++) {
void *site_addr = static_call_addr(site);
if ((mod && within_module_init((unsigned long)site_addr, mod)) ||
(!mod && init_section_contains(site_addr, 1)))
static_call_set_init(site);
key = static_call_key(site);
if (key != prev_key) {
prev_key = key;
/*
* For vmlinux (!mod) avoid the allocation by storing
* the sites pointer in the key itself. Also see
* __static_call_update()'s @first.
*
* This allows architectures (eg. x86) to call
* static_call_init() before memory allocation works.
*/
if (!mod) {
key->sites = site;
key->type |= 1;
goto do_transform;
}
site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL);
if (!site_mod)
return -ENOMEM;
/*
* When the key has a direct sites pointer, extract
* that into an explicit struct static_call_mod, so we
* can have a list of modules.
*/
if (static_call_key_sites(key)) {
site_mod->mod = NULL;
site_mod->next = NULL;
site_mod->sites = static_call_key_sites(key);
key->mods = site_mod;
site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL);
if (!site_mod)
return -ENOMEM;
}
site_mod->mod = mod;
site_mod->sites = site;
site_mod->next = static_call_key_next(key);
key->mods = site_mod;
}
do_transform:
arch_static_call_transform(site_addr, NULL, key->func,
static_call_is_tail(site));
}
return 0;
}
static int addr_conflict(struct static_call_site *site, void *start, void *end)
{
unsigned long addr = (unsigned long)static_call_addr(site);
if (addr <= (unsigned long)end &&
addr + CALL_INSN_SIZE > (unsigned long)start)
return 1;
return 0;
}
static int __static_call_text_reserved(struct static_call_site *iter_start,
struct static_call_site *iter_stop,
void *start, void *end)
{
struct static_call_site *iter = iter_start;
while (iter < iter_stop) {
if (addr_conflict(iter, start, end))
return 1;
iter++;
}
return 0;
}
#ifdef CONFIG_MODULES
static int __static_call_mod_text_reserved(void *start, void *end)
{
struct module *mod;
int ret;
preempt_disable();
mod = __module_text_address((unsigned long)start);
WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
if (!try_module_get(mod))
mod = NULL;
preempt_enable();
if (!mod)
return 0;
ret = __static_call_text_reserved(mod->static_call_sites,
mod->static_call_sites + mod->num_static_call_sites,
start, end);
module_put(mod);
return ret;
}
static int static_call_add_module(struct module *mod)
{
return __static_call_init(mod, mod->static_call_sites,
mod->static_call_sites + mod->num_static_call_sites);
}
static void static_call_del_module(struct module *mod)
{
struct static_call_site *start = mod->static_call_sites;
struct static_call_site *stop = mod->static_call_sites +
mod->num_static_call_sites;
struct static_call_key *key, *prev_key = NULL;
struct static_call_mod *site_mod, **prev;
struct static_call_site *site;
for (site = start; site < stop; site++) {
key = static_call_key(site);
if (key == prev_key)
continue;
prev_key = key;
for (prev = &key->mods, site_mod = key->mods;
site_mod && site_mod->mod != mod;
prev = &site_mod->next, site_mod = site_mod->next)
;
if (!site_mod)
continue;
*prev = site_mod->next;
kfree(site_mod);
}
}
static int static_call_module_notify(struct notifier_block *nb,
unsigned long val, void *data)
{
struct module *mod = data;
int ret = 0;
cpus_read_lock();
static_call_lock();
switch (val) {
case MODULE_STATE_COMING:
ret = static_call_add_module(mod);
if (ret) {
WARN(1, "Failed to allocate memory for static calls");
static_call_del_module(mod);
}
break;
case MODULE_STATE_GOING:
static_call_del_module(mod);
break;
}
static_call_unlock();
cpus_read_unlock();
return notifier_from_errno(ret);
}
static struct notifier_block static_call_module_nb = {
.notifier_call = static_call_module_notify,
};
#else
static inline int __static_call_mod_text_reserved(void *start, void *end)
{
return 0;
}
#endif /* CONFIG_MODULES */
int static_call_text_reserved(void *start, void *end)
{
int ret = __static_call_text_reserved(__start_static_call_sites,
__stop_static_call_sites, start, end);
if (ret)
return ret;
return __static_call_mod_text_reserved(start, end);
}
int __init static_call_init(void)
{
int ret;
if (static_call_initialized)
return 0;
cpus_read_lock();
static_call_lock();
ret = __static_call_init(NULL, __start_static_call_sites,
__stop_static_call_sites);
static_call_unlock();
cpus_read_unlock();
if (ret) {
pr_err("Failed to allocate memory for static_call!\n");
BUG();
}
static_call_initialized = true;
#ifdef CONFIG_MODULES
register_module_notifier(&static_call_module_nb);
#endif
return 0;
}
early_initcall(static_call_init);
#ifdef CONFIG_STATIC_CALL_SELFTEST
static int func_a(int x)
{
return x+1;
}
static int func_b(int x)
{
return x+2;
}
DEFINE_STATIC_CALL(sc_selftest, func_a);
static struct static_call_data {
int (*func)(int);
int val;
int expect;
} static_call_data [] __initdata = {
{ NULL, 2, 3 },
{ func_b, 2, 4 },
{ func_a, 2, 3 }
};
static int __init test_static_call_init(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(static_call_data); i++ ) {
struct static_call_data *scd = &static_call_data[i];
if (scd->func)
static_call_update(sc_selftest, scd->func);
WARN_ON(static_call(sc_selftest)(scd->val) != scd->expect);
}
return 0;
}
early_initcall(test_static_call_init);
#endif /* CONFIG_STATIC_CALL_SELFTEST */

View File

@ -2027,10 +2027,11 @@ static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
{
struct bpf_trace_module *btm, *tmp;
struct module *mod = module;
int ret = 0;
if (mod->num_bpf_raw_events == 0 ||
(op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
return 0;
goto out;
mutex_lock(&bpf_module_mutex);
@ -2040,6 +2041,8 @@ static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
if (btm) {
btm->module = module;
list_add(&btm->list, &bpf_trace_modules);
} else {
ret = -ENOMEM;
}
break;
case MODULE_STATE_GOING:
@ -2055,7 +2058,8 @@ static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
mutex_unlock(&bpf_module_mutex);
return 0;
out:
return notifier_from_errno(ret);
}
static struct notifier_block bpf_module_nb = {

View File

@ -9074,7 +9074,7 @@ static int trace_module_notify(struct notifier_block *self,
break;
}
return 0;
return NOTIFY_OK;
}
static struct notifier_block trace_module_nb = {

View File

@ -2646,7 +2646,7 @@ static int trace_module_notify(struct notifier_block *self,
mutex_unlock(&trace_types_lock);
mutex_unlock(&event_mutex);
return 0;
return NOTIFY_OK;
}
static struct notifier_block trace_module_nb = {

View File

@ -96,7 +96,7 @@ static int module_trace_bprintk_format_notify(struct notifier_block *self,
if (val == MODULE_STATE_COMING)
hold_module_trace_bprintk_format(start, end);
}
return 0;
return NOTIFY_OK;
}
/*
@ -174,7 +174,7 @@ __init static int
module_trace_bprintk_format_notify(struct notifier_block *self,
unsigned long val, void *data)
{
return 0;
return NOTIFY_OK;
}
static inline const char **
find_next_mod_format(int start_index, void *v, const char **fmt, loff_t *pos)

View File

@ -221,6 +221,29 @@ static void *func_remove(struct tracepoint_func **funcs,
return old;
}
static void tracepoint_update_call(struct tracepoint *tp, struct tracepoint_func *tp_funcs, bool sync)
{
void *func = tp->iterator;
/* Synthetic events do not have static call sites */
if (!tp->static_call_key)
return;
if (!tp_funcs[1].func) {
func = tp_funcs[0].func;
/*
* If going from the iterator back to a single caller,
* we need to synchronize with __DO_TRACE to make sure
* that the data passed to the callback is the one that
* belongs to that callback.
*/
if (sync)
tracepoint_synchronize_unregister();
}
__static_call_update(tp->static_call_key, tp->static_call_tramp, func);
}
/*
* Add the probe function to a tracepoint.
*/
@ -251,8 +274,9 @@ static int tracepoint_add_func(struct tracepoint *tp,
* include/linux/tracepoint.h using rcu_dereference_sched().
*/
rcu_assign_pointer(tp->funcs, tp_funcs);
if (!static_key_enabled(&tp->key))
static_key_slow_inc(&tp->key);
tracepoint_update_call(tp, tp_funcs, false);
static_key_enable(&tp->key);
release_probes(old);
return 0;
}
@ -281,10 +305,13 @@ static int tracepoint_remove_func(struct tracepoint *tp,
if (tp->unregfunc && static_key_enabled(&tp->key))
tp->unregfunc();
if (static_key_enabled(&tp->key))
static_key_slow_dec(&tp->key);
static_key_disable(&tp->key);
rcu_assign_pointer(tp->funcs, tp_funcs);
} else {
rcu_assign_pointer(tp->funcs, tp_funcs);
tracepoint_update_call(tp, tp_funcs,
tp_funcs[0].func != old[0].func);
}
rcu_assign_pointer(tp->funcs, tp_funcs);
release_probes(old);
return 0;
}
@ -521,7 +548,7 @@ static int tracepoint_module_notify(struct notifier_block *self,
case MODULE_STATE_UNFORMED:
break;
}
return ret;
return notifier_from_errno(ret);
}
static struct notifier_block tracepoint_module_nb = {

View File

@ -0,0 +1,35 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _STATIC_CALL_TYPES_H
#define _STATIC_CALL_TYPES_H
#include <linux/types.h>
#include <linux/stringify.h>
#define STATIC_CALL_KEY_PREFIX __SCK__
#define STATIC_CALL_KEY_PREFIX_STR __stringify(STATIC_CALL_KEY_PREFIX)
#define STATIC_CALL_KEY_PREFIX_LEN (sizeof(STATIC_CALL_KEY_PREFIX_STR) - 1)
#define STATIC_CALL_KEY(name) __PASTE(STATIC_CALL_KEY_PREFIX, name)
#define STATIC_CALL_TRAMP_PREFIX __SCT__
#define STATIC_CALL_TRAMP_PREFIX_STR __stringify(STATIC_CALL_TRAMP_PREFIX)
#define STATIC_CALL_TRAMP_PREFIX_LEN (sizeof(STATIC_CALL_TRAMP_PREFIX_STR) - 1)
#define STATIC_CALL_TRAMP(name) __PASTE(STATIC_CALL_TRAMP_PREFIX, name)
#define STATIC_CALL_TRAMP_STR(name) __stringify(STATIC_CALL_TRAMP(name))
/*
* Flags in the low bits of static_call_site::key.
*/
#define STATIC_CALL_SITE_TAIL 1UL /* tail call */
#define STATIC_CALL_SITE_INIT 2UL /* init section */
#define STATIC_CALL_SITE_FLAGS 3UL
/*
* The static call site table needs to be created by external tooling (objtool
* or a compiler plugin).
*/
struct static_call_site {
s32 addr;
s32 key;
};
#endif /* _STATIC_CALL_TYPES_H */

View File

@ -16,6 +16,7 @@
#include <linux/hashtable.h>
#include <linux/kernel.h>
#include <linux/static_call_types.h>
#define FAKE_JUMP_OFFSET -1
@ -433,6 +434,103 @@ reachable:
return 0;
}
static int create_static_call_sections(struct objtool_file *file)
{
struct section *sec, *reloc_sec;
struct reloc *reloc;
struct static_call_site *site;
struct instruction *insn;
struct symbol *key_sym;
char *key_name, *tmp;
int idx;
sec = find_section_by_name(file->elf, ".static_call_sites");
if (sec) {
INIT_LIST_HEAD(&file->static_call_list);
WARN("file already has .static_call_sites section, skipping");
return 0;
}
if (list_empty(&file->static_call_list))
return 0;
idx = 0;
list_for_each_entry(insn, &file->static_call_list, static_call_node)
idx++;
sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE,
sizeof(struct static_call_site), idx);
if (!sec)
return -1;
reloc_sec = elf_create_reloc_section(file->elf, sec, SHT_RELA);
if (!reloc_sec)
return -1;
idx = 0;
list_for_each_entry(insn, &file->static_call_list, static_call_node) {
site = (struct static_call_site *)sec->data->d_buf + idx;
memset(site, 0, sizeof(struct static_call_site));
/* populate reloc for 'addr' */
reloc = malloc(sizeof(*reloc));
if (!reloc) {
perror("malloc");
return -1;
}
memset(reloc, 0, sizeof(*reloc));
reloc->sym = insn->sec->sym;
reloc->addend = insn->offset;
reloc->type = R_X86_64_PC32;
reloc->offset = idx * sizeof(struct static_call_site);
reloc->sec = reloc_sec;
elf_add_reloc(file->elf, reloc);
/* find key symbol */
key_name = strdup(insn->call_dest->name);
if (!key_name) {
perror("strdup");
return -1;
}
if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
STATIC_CALL_TRAMP_PREFIX_LEN)) {
WARN("static_call: trampoline name malformed: %s", key_name);
return -1;
}
tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN);
key_sym = find_symbol_by_name(file->elf, tmp);
if (!key_sym) {
WARN("static_call: can't find static_call_key symbol: %s", tmp);
return -1;
}
free(key_name);
/* populate reloc for 'key' */
reloc = malloc(sizeof(*reloc));
if (!reloc) {
perror("malloc");
return -1;
}
memset(reloc, 0, sizeof(*reloc));
reloc->sym = key_sym;
reloc->addend = is_sibling_call(insn) ? STATIC_CALL_SITE_TAIL : 0;
reloc->type = R_X86_64_PC32;
reloc->offset = idx * sizeof(struct static_call_site) + 4;
reloc->sec = reloc_sec;
elf_add_reloc(file->elf, reloc);
idx++;
}
if (elf_rebuild_reloc_section(file->elf, reloc_sec))
return -1;
return 0;
}
/*
* Warnings shouldn't be reported for ignored functions.
*/
@ -705,6 +803,10 @@ static int add_jump_destinations(struct objtool_file *file)
} else {
/* external sibling call */
insn->call_dest = reloc->sym;
if (insn->call_dest->static_call_tramp) {
list_add_tail(&insn->static_call_node,
&file->static_call_list);
}
continue;
}
@ -756,6 +858,10 @@ static int add_jump_destinations(struct objtool_file *file)
/* internal sibling call */
insn->call_dest = insn->jump_dest->func;
if (insn->call_dest->static_call_tramp) {
list_add_tail(&insn->static_call_node,
&file->static_call_list);
}
}
}
}
@ -1578,6 +1684,23 @@ static int read_intra_function_calls(struct objtool_file *file)
return 0;
}
static int read_static_call_tramps(struct objtool_file *file)
{
struct section *sec;
struct symbol *func;
for_each_sec(file, sec) {
list_for_each_entry(func, &sec->symbol_list, list) {
if (func->bind == STB_GLOBAL &&
!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR,
strlen(STATIC_CALL_TRAMP_PREFIX_STR)))
func->static_call_tramp = true;
}
}
return 0;
}
static void mark_rodata(struct objtool_file *file)
{
struct section *sec;
@ -1625,6 +1748,10 @@ static int decode_sections(struct objtool_file *file)
if (ret)
return ret;
ret = read_static_call_tramps(file);
if (ret)
return ret;
ret = add_jump_destinations(file);
if (ret)
return ret;
@ -2488,6 +2615,11 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
if (dead_end_function(file, insn->call_dest))
return 0;
if (insn->type == INSN_CALL && insn->call_dest->static_call_tramp) {
list_add_tail(&insn->static_call_node,
&file->static_call_list);
}
break;
case INSN_JUMP_CONDITIONAL:
@ -2847,6 +2979,7 @@ int check(const char *_objname, bool orc)
INIT_LIST_HEAD(&file.insn_list);
hash_init(file.insn_hash);
INIT_LIST_HEAD(&file.static_call_list);
file.c_file = !vmlinux && find_section_by_name(file.elf, ".comment");
file.ignore_unreachables = no_unreachable;
file.hints = false;
@ -2894,6 +3027,11 @@ int check(const char *_objname, bool orc)
warnings += ret;
}
ret = create_static_call_sections(&file);
if (ret < 0)
goto out;
warnings += ret;
if (orc) {
ret = create_orc(&file);
if (ret < 0)

View File

@ -22,6 +22,7 @@ struct insn_state {
struct instruction {
struct list_head list;
struct hlist_node hash;
struct list_head static_call_node;
struct section *sec;
unsigned long offset;
unsigned int len;

View File

@ -652,7 +652,7 @@ err:
}
struct section *elf_create_section(struct elf *elf, const char *name,
size_t entsize, int nr)
unsigned int sh_flags, size_t entsize, int nr)
{
struct section *sec, *shstrtab;
size_t size = entsize * nr;
@ -712,7 +712,7 @@ struct section *elf_create_section(struct elf *elf, const char *name,
sec->sh.sh_entsize = entsize;
sec->sh.sh_type = SHT_PROGBITS;
sec->sh.sh_addralign = 1;
sec->sh.sh_flags = SHF_ALLOC;
sec->sh.sh_flags = SHF_ALLOC | sh_flags;
/* Add section name to .shstrtab (or .strtab for Clang) */
@ -767,7 +767,7 @@ static struct section *elf_create_rel_reloc_section(struct elf *elf, struct sect
strcpy(relocname, ".rel");
strcat(relocname, base->name);
sec = elf_create_section(elf, relocname, sizeof(GElf_Rel), 0);
sec = elf_create_section(elf, relocname, 0, sizeof(GElf_Rel), 0);
free(relocname);
if (!sec)
return NULL;
@ -797,7 +797,7 @@ static struct section *elf_create_rela_reloc_section(struct elf *elf, struct sec
strcpy(relocname, ".rela");
strcat(relocname, base->name);
sec = elf_create_section(elf, relocname, sizeof(GElf_Rela), 0);
sec = elf_create_section(elf, relocname, 0, sizeof(GElf_Rela), 0);
free(relocname);
if (!sec)
return NULL;

View File

@ -56,6 +56,7 @@ struct symbol {
unsigned int len;
struct symbol *pfunc, *cfunc, *alias;
bool uaccess_safe;
bool static_call_tramp;
};
struct reloc {
@ -120,7 +121,7 @@ static inline u32 reloc_hash(struct reloc *reloc)
}
struct elf *elf_open_read(const char *name, int flags);
struct section *elf_create_section(struct elf *elf, const char *name, size_t entsize, int nr);
struct section *elf_create_section(struct elf *elf, const char *name, unsigned int sh_flags, size_t entsize, int nr);
struct section *elf_create_reloc_section(struct elf *elf, struct section *base, int reltype);
void elf_add_reloc(struct elf *elf, struct reloc *reloc);
int elf_write_insn(struct elf *elf, struct section *sec,

View File

@ -16,6 +16,7 @@ struct objtool_file {
struct elf *elf;
struct list_head insn_list;
DECLARE_HASHTABLE(insn_hash, 20);
struct list_head static_call_list;
bool ignore_unreachables, c_file, hints, rodata;
};

View File

@ -177,7 +177,7 @@ int create_orc_sections(struct objtool_file *file)
/* create .orc_unwind_ip and .rela.orc_unwind_ip sections */
sec = elf_create_section(file->elf, ".orc_unwind_ip", sizeof(int), idx);
sec = elf_create_section(file->elf, ".orc_unwind_ip", 0, sizeof(int), idx);
if (!sec)
return -1;
@ -186,7 +186,7 @@ int create_orc_sections(struct objtool_file *file)
return -1;
/* create .orc_unwind section */
u_sec = elf_create_section(file->elf, ".orc_unwind",
u_sec = elf_create_section(file->elf, ".orc_unwind", 0,
sizeof(struct orc_entry), idx);
/* populate sections */

View File

@ -7,6 +7,7 @@ arch/x86/include/asm/orc_types.h
arch/x86/include/asm/emulate_prefix.h
arch/x86/lib/x86-opcode-map.txt
arch/x86/tools/gen-insn-attr-x86.awk
include/linux/static_call_types.h
'
check_2 () {

View File

@ -171,7 +171,7 @@ class SystemValues:
tracefuncs = {
'sys_sync': {},
'ksys_sync': {},
'__pm_notifier_call_chain': {},
'pm_notifier_call_chain_robust': {},
'pm_prepare_console': {},
'pm_notifier_call_chain': {},
'freeze_processes': {},