mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-11-01 17:08:10 +00:00
e3ff7c609f
There have been reports [1][2] of live patches failing to complete within a reasonable amount of time due to CPU-bound kthreads. Fix it by patching tasks in cond_resched(). There are four different flavors of cond_resched(), depending on the kernel configuration. Hook into all of them. A more elegant solution might be to use a preempt notifier. However, non-ORC unwinders can't unwind a preempted task reliably. [1] https://lore.kernel.org/lkml/20220507174628.2086373-1-song@kernel.org/ [2] https://lkml.kernel.org/lkml/20230120-vhost-klp-switching-v1-0-7c2b65519c43@kernel.org Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Petr Mladek <pmladek@suse.com> Tested-by: Seth Forshee (DigitalOcean) <sforshee@kernel.org> Link: https://lore.kernel.org/r/4ae981466b7814ec221014fc2554b2f86f3fb70b.1677257135.git.jpoimboe@kernel.org
29 lines
802 B
C
29 lines
802 B
C
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
#ifndef _LINUX_LIVEPATCH_SCHED_H_
|
|
#define _LINUX_LIVEPATCH_SCHED_H_
|
|
|
|
#include <linux/jump_label.h>
|
|
#include <linux/static_call_types.h>
|
|
|
|
#ifdef CONFIG_LIVEPATCH
|
|
|
|
void __klp_sched_try_switch(void);
|
|
|
|
#if !defined(CONFIG_PREEMPT_DYNAMIC) || !defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
|
|
|
|
DECLARE_STATIC_KEY_FALSE(klp_sched_try_switch_key);
|
|
|
|
static __always_inline void klp_sched_try_switch(void)
|
|
{
|
|
if (static_branch_unlikely(&klp_sched_try_switch_key))
|
|
__klp_sched_try_switch();
|
|
}
|
|
|
|
#endif /* !CONFIG_PREEMPT_DYNAMIC || !CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
|
|
|
|
#else /* !CONFIG_LIVEPATCH */
|
|
static inline void klp_sched_try_switch(void) {}
|
|
static inline void __klp_sched_try_switch(void) {}
|
|
#endif /* CONFIG_LIVEPATCH */
|
|
|
|
#endif /* _LINUX_LIVEPATCH_SCHED_H_ */
|