mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-30 08:02:30 +00:00
0ff0edb550
- rtmutex cleanup & spring cleaning pass that removes ~400 lines of code - Futex simplifications & cleanups - Add debugging to the CSD code, to help track down a tenacious race (or hw problem) - Add lockdep_assert_not_held(), to allow code to require a lock to not be held, and propagate this into the ath10k driver - Misc LKMM documentation updates - Misc KCSAN updates: cleanups & documentation updates - Misc fixes and cleanups - Fix locktorture bugs with ww_mutexes Signed-off-by: Ingo Molnar <mingo@kernel.org> -----BEGIN PGP SIGNATURE----- iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmCJDn0RHG1pbmdvQGtl cm5lbC5vcmcACgkQEnMQ0APhK1hPrRAAryS4zPnuDsfkVk0smxo7a0lK5ljbH2Xo 28QUZXOl6upnEV8dzbjwG7eAjt5ZJVI5tKIeG0PV0NUJH2nsyHwESdtULGGYuPf/ 4YUzNwZJa+nI/jeBnVsXCimLVxxnNCRdR7yOVOHm4ukEwa+YTNt1pvlYRmUd4YyH Q5cCrpb3THvLka3AAamEbqnHnAdGxHKuuHYVRkODpMQ+zrQvtN8antYsuk8kJsqM m+GZg/dVCuLEPah5k+lOACtcq/w7HCmTlxS8t4XLvD52jywFZLcCPvi1rk0+JR+k Vd9TngC09GJ4jXuDpr42YKkU9/X6qy2Es39iA/ozCvc1Alrhspx/59XmaVSuWQGo XYuEPx38Yuo/6w16haSgp0k4WSay15A4uhCTQ75VF4vli8Bqgg9PaxLyQH1uG8e2 xk8U90R7bDzLlhKYIx1Vu5Z0t7A1JtB5CJtgpcfg/zQLlzygo75fHzdAiU5fDBDm 3QQXSU2Oqzt7c5ZypioHWazARk7tL6th38KGN1gZDTm5zwifpaCtHi7sml6hhZ/4 ATH6zEPzIbXJL2UqumSli6H4ye5ORNjOu32r7YPqLI4IDbzpssfoSwfKYlQG4Tvn 4H1Ukirzni0gz5+wbleItzf2aeo1rocs4YQTnaT02j8NmUHUz4AzOHGOQFr5Tvh0 wk/P4MIoSb0= =cOOk -----END PGP SIGNATURE----- Merge tag 'locking-core-2021-04-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull locking updates from Ingo Molnar: - rtmutex cleanup & spring cleaning pass that removes ~400 lines of code - Futex simplifications & cleanups - Add debugging to the CSD code, to help track down a tenacious race (or hw problem) - Add lockdep_assert_not_held(), to allow code to require a lock to not be held, and propagate this into the ath10k driver - Misc LKMM documentation updates - Misc KCSAN updates: cleanups & documentation updates - Misc fixes and cleanups - Fix locktorture bugs with ww_mutexes * tag 'locking-core-2021-04-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (44 commits) kcsan: Fix printk format string static_call: Relax static_call_update() function argument type static_call: Fix unused variable warn w/o MODULE locking/rtmutex: Clean up signal handling in __rt_mutex_slowlock() locking/rtmutex: Restrict the trylock WARN_ON() to debug locking/rtmutex: Fix misleading comment in rt_mutex_postunlock() locking/rtmutex: Consolidate the fast/slowpath invocation locking/rtmutex: Make text section and inlining consistent locking/rtmutex: Move debug functions as inlines into common header locking/rtmutex: Decrapify __rt_mutex_init() locking/rtmutex: Remove pointless CONFIG_RT_MUTEXES=n stubs locking/rtmutex: Inline chainwalk depth check locking/rtmutex: Move rt_mutex_debug_task_free() to rtmutex.c locking/rtmutex: Remove empty and unused debug stubs locking/rtmutex: Consolidate rt_mutex_init() locking/rtmutex: Remove output from deadlock detector locking/rtmutex: Remove rtmutex deadlock tester leftovers locking/rtmutex: Remove rt_mutex_timed_lock() MAINTAINERS: Add myself as futex reviewer locking/mutex: Remove repeated declaration ...
303 lines
9 KiB
C
303 lines
9 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_STATIC_CALL_H
|
|
#define _LINUX_STATIC_CALL_H
|
|
|
|
/*
|
|
* Static call support
|
|
*
|
|
* Static calls use code patching to hard-code function pointers into direct
|
|
* branch instructions. They give the flexibility of function pointers, but
|
|
* with improved performance. This is especially important for cases where
|
|
* retpolines would otherwise be used, as retpolines can significantly impact
|
|
* performance.
|
|
*
|
|
*
|
|
* API overview:
|
|
*
|
|
* DECLARE_STATIC_CALL(name, func);
|
|
* DEFINE_STATIC_CALL(name, func);
|
|
* DEFINE_STATIC_CALL_NULL(name, typename);
|
|
* static_call(name)(args...);
|
|
* static_call_cond(name)(args...);
|
|
* static_call_update(name, func);
|
|
* static_call_query(name);
|
|
*
|
|
* Usage example:
|
|
*
|
|
* # Start with the following functions (with identical prototypes):
|
|
* int func_a(int arg1, int arg2);
|
|
* int func_b(int arg1, int arg2);
|
|
*
|
|
* # Define a 'my_name' reference, associated with func_a() by default
|
|
* DEFINE_STATIC_CALL(my_name, func_a);
|
|
*
|
|
* # Call func_a()
|
|
* static_call(my_name)(arg1, arg2);
|
|
*
|
|
* # Update 'my_name' to point to func_b()
|
|
* static_call_update(my_name, &func_b);
|
|
*
|
|
* # Call func_b()
|
|
* static_call(my_name)(arg1, arg2);
|
|
*
|
|
*
|
|
* Implementation details:
|
|
*
|
|
* This requires some arch-specific code (CONFIG_HAVE_STATIC_CALL).
|
|
* Otherwise basic indirect calls are used (with function pointers).
|
|
*
|
|
* Each static_call() site calls into a trampoline associated with the name.
|
|
* The trampoline has a direct branch to the default function. Updates to a
|
|
* name will modify the trampoline's branch destination.
|
|
*
|
|
* If the arch has CONFIG_HAVE_STATIC_CALL_INLINE, then the call sites
|
|
* themselves will be patched at runtime to call the functions directly,
|
|
* rather than calling through the trampoline. This requires objtool or a
|
|
* compiler plugin to detect all the static_call() sites and annotate them
|
|
* in the .static_call_sites section.
|
|
*
|
|
*
|
|
* Notes on NULL function pointers:
|
|
*
|
|
* Static_call()s support NULL functions, with many of the caveats that
|
|
* regular function pointers have.
|
|
*
|
|
* Clearly calling a NULL function pointer is 'BAD', so too for
|
|
* static_call()s (although when HAVE_STATIC_CALL it might not be immediately
|
|
* fatal). A NULL static_call can be the result of:
|
|
*
|
|
* DECLARE_STATIC_CALL_NULL(my_static_call, void (*)(int));
|
|
*
|
|
* which is equivalent to declaring a NULL function pointer with just a
|
|
* typename:
|
|
*
|
|
* void (*my_func_ptr)(int arg1) = NULL;
|
|
*
|
|
* or using static_call_update() with a NULL function. In both cases the
|
|
* HAVE_STATIC_CALL implementation will patch the trampoline with a RET
|
|
* instruction, instead of an immediate tail-call JMP. HAVE_STATIC_CALL_INLINE
|
|
* architectures can patch the trampoline call to a NOP.
|
|
*
|
|
* In all cases, any argument evaluation is unconditional. Unlike a regular
|
|
* conditional function pointer call:
|
|
*
|
|
* if (my_func_ptr)
|
|
* my_func_ptr(arg1)
|
|
*
|
|
* where the argument evaludation also depends on the pointer value.
|
|
*
|
|
* When calling a static_call that can be NULL, use:
|
|
*
|
|
* static_call_cond(name)(arg1);
|
|
*
|
|
* which will include the required value tests to avoid NULL-pointer
|
|
* dereferences.
|
|
*
|
|
* To query which function is currently set to be called, use:
|
|
*
|
|
* func = static_call_query(name);
|
|
*/
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/static_call_types.h>
|
|
|
|
#ifdef CONFIG_HAVE_STATIC_CALL
|
|
#include <asm/static_call.h>
|
|
|
|
/*
|
|
* Either @site or @tramp can be NULL.
|
|
*/
|
|
extern void arch_static_call_transform(void *site, void *tramp, void *func, bool tail);
|
|
|
|
#define STATIC_CALL_TRAMP_ADDR(name) &STATIC_CALL_TRAMP(name)
|
|
|
|
#else
|
|
#define STATIC_CALL_TRAMP_ADDR(name) NULL
|
|
#endif
|
|
|
|
#define static_call_update(name, func) \
|
|
({ \
|
|
typeof(&STATIC_CALL_TRAMP(name)) __F = (func); \
|
|
__static_call_update(&STATIC_CALL_KEY(name), \
|
|
STATIC_CALL_TRAMP_ADDR(name), __F); \
|
|
})
|
|
|
|
#define static_call_query(name) (READ_ONCE(STATIC_CALL_KEY(name).func))
|
|
|
|
#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
|
|
|
|
extern int __init static_call_init(void);
|
|
|
|
struct static_call_mod {
|
|
struct static_call_mod *next;
|
|
struct module *mod; /* for vmlinux, mod == NULL */
|
|
struct static_call_site *sites;
|
|
};
|
|
|
|
/* For finding the key associated with a trampoline */
|
|
struct static_call_tramp_key {
|
|
s32 tramp;
|
|
s32 key;
|
|
};
|
|
|
|
extern void __static_call_update(struct static_call_key *key, void *tramp, void *func);
|
|
extern int static_call_mod_init(struct module *mod);
|
|
extern int static_call_text_reserved(void *start, void *end);
|
|
|
|
extern long __static_call_return0(void);
|
|
|
|
#define __DEFINE_STATIC_CALL(name, _func, _func_init) \
|
|
DECLARE_STATIC_CALL(name, _func); \
|
|
struct static_call_key STATIC_CALL_KEY(name) = { \
|
|
.func = _func_init, \
|
|
.type = 1, \
|
|
}; \
|
|
ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func_init)
|
|
|
|
#define DEFINE_STATIC_CALL_NULL(name, _func) \
|
|
DECLARE_STATIC_CALL(name, _func); \
|
|
struct static_call_key STATIC_CALL_KEY(name) = { \
|
|
.func = NULL, \
|
|
.type = 1, \
|
|
}; \
|
|
ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
|
|
|
|
#define static_call_cond(name) (void)__static_call(name)
|
|
|
|
#define EXPORT_STATIC_CALL(name) \
|
|
EXPORT_SYMBOL(STATIC_CALL_KEY(name)); \
|
|
EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
|
|
#define EXPORT_STATIC_CALL_GPL(name) \
|
|
EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name)); \
|
|
EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
|
|
|
|
/* Leave the key unexported, so modules can't change static call targets: */
|
|
#define EXPORT_STATIC_CALL_TRAMP(name) \
|
|
EXPORT_SYMBOL(STATIC_CALL_TRAMP(name)); \
|
|
ARCH_ADD_TRAMP_KEY(name)
|
|
#define EXPORT_STATIC_CALL_TRAMP_GPL(name) \
|
|
EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name)); \
|
|
ARCH_ADD_TRAMP_KEY(name)
|
|
|
|
#elif defined(CONFIG_HAVE_STATIC_CALL)
|
|
|
|
static inline int static_call_init(void) { return 0; }
|
|
|
|
#define __DEFINE_STATIC_CALL(name, _func, _func_init) \
|
|
DECLARE_STATIC_CALL(name, _func); \
|
|
struct static_call_key STATIC_CALL_KEY(name) = { \
|
|
.func = _func_init, \
|
|
}; \
|
|
ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func_init)
|
|
|
|
#define DEFINE_STATIC_CALL_NULL(name, _func) \
|
|
DECLARE_STATIC_CALL(name, _func); \
|
|
struct static_call_key STATIC_CALL_KEY(name) = { \
|
|
.func = NULL, \
|
|
}; \
|
|
ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
|
|
|
|
|
|
#define static_call_cond(name) (void)__static_call(name)
|
|
|
|
static inline
|
|
void __static_call_update(struct static_call_key *key, void *tramp, void *func)
|
|
{
|
|
cpus_read_lock();
|
|
WRITE_ONCE(key->func, func);
|
|
arch_static_call_transform(NULL, tramp, func, false);
|
|
cpus_read_unlock();
|
|
}
|
|
|
|
static inline int static_call_text_reserved(void *start, void *end)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline long __static_call_return0(void)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
#define EXPORT_STATIC_CALL(name) \
|
|
EXPORT_SYMBOL(STATIC_CALL_KEY(name)); \
|
|
EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
|
|
#define EXPORT_STATIC_CALL_GPL(name) \
|
|
EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name)); \
|
|
EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
|
|
|
|
/* Leave the key unexported, so modules can't change static call targets: */
|
|
#define EXPORT_STATIC_CALL_TRAMP(name) \
|
|
EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
|
|
#define EXPORT_STATIC_CALL_TRAMP_GPL(name) \
|
|
EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
|
|
|
|
#else /* Generic implementation */
|
|
|
|
static inline int static_call_init(void) { return 0; }
|
|
|
|
static inline long __static_call_return0(void)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
#define __DEFINE_STATIC_CALL(name, _func, _func_init) \
|
|
DECLARE_STATIC_CALL(name, _func); \
|
|
struct static_call_key STATIC_CALL_KEY(name) = { \
|
|
.func = _func_init, \
|
|
}
|
|
|
|
#define DEFINE_STATIC_CALL_NULL(name, _func) \
|
|
DECLARE_STATIC_CALL(name, _func); \
|
|
struct static_call_key STATIC_CALL_KEY(name) = { \
|
|
.func = NULL, \
|
|
}
|
|
|
|
static inline void __static_call_nop(void) { }
|
|
|
|
/*
|
|
* This horrific hack takes care of two things:
|
|
*
|
|
* - it ensures the compiler will only load the function pointer ONCE,
|
|
* which avoids a reload race.
|
|
*
|
|
* - it ensures the argument evaluation is unconditional, similar
|
|
* to the HAVE_STATIC_CALL variant.
|
|
*
|
|
* Sadly current GCC/Clang (10 for both) do not optimize this properly
|
|
* and will emit an indirect call for the NULL case :-(
|
|
*/
|
|
#define __static_call_cond(name) \
|
|
({ \
|
|
void *func = READ_ONCE(STATIC_CALL_KEY(name).func); \
|
|
if (!func) \
|
|
func = &__static_call_nop; \
|
|
(typeof(STATIC_CALL_TRAMP(name))*)func; \
|
|
})
|
|
|
|
#define static_call_cond(name) (void)__static_call_cond(name)
|
|
|
|
static inline
|
|
void __static_call_update(struct static_call_key *key, void *tramp, void *func)
|
|
{
|
|
WRITE_ONCE(key->func, func);
|
|
}
|
|
|
|
static inline int static_call_text_reserved(void *start, void *end)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
#define EXPORT_STATIC_CALL(name) EXPORT_SYMBOL(STATIC_CALL_KEY(name))
|
|
#define EXPORT_STATIC_CALL_GPL(name) EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name))
|
|
|
|
#endif /* CONFIG_HAVE_STATIC_CALL */
|
|
|
|
#define DEFINE_STATIC_CALL(name, _func) \
|
|
__DEFINE_STATIC_CALL(name, _func, _func)
|
|
|
|
#define DEFINE_STATIC_CALL_RET0(name, _func) \
|
|
__DEFINE_STATIC_CALL(name, _func, __static_call_return0)
|
|
|
|
#endif /* _LINUX_STATIC_CALL_H */
|