linux-stable/lib/smp_processor_id.c
Yejune Deng 570a752b7a lib/smp_processor_id: Use is_percpu_thread() instead of nr_cpus_allowed
is_percpu_thread() more elegantly handles SMP vs UP, and further checks the
presence of PF_NO_SETAFFINITY. This lets us catch cases where
check_preemption_disabled() can race with a concurrent sched_setaffinity().

Signed-off-by: Yejune Deng <yejune.deng@gmail.com>
[Amended changelog]
Signed-off-by: Valentin Schneider <valentin.schneider@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20210510151024.2448573-3-valentin.schneider@arm.com
2021-05-19 10:51:40 +02:00

68 lines
1.4 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* lib/smp_processor_id.c
*
* DEBUG_PREEMPT variant of smp_processor_id().
*/
#include <linux/export.h>
#include <linux/kprobes.h>
#include <linux/sched.h>
noinstr static
unsigned int check_preemption_disabled(const char *what1, const char *what2)
{
int this_cpu = raw_smp_processor_id();
if (likely(preempt_count()))
goto out;
if (irqs_disabled())
goto out;
if (is_percpu_thread())
goto out;
#ifdef CONFIG_SMP
if (current->migration_disabled)
goto out;
#endif
/*
* It is valid to assume CPU-locality during early bootup:
*/
if (system_state < SYSTEM_SCHEDULING)
goto out;
/*
* Avoid recursion:
*/
preempt_disable_notrace();
instrumentation_begin();
if (!printk_ratelimit())
goto out_enable;
printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x] code: %s/%d\n",
what1, what2, preempt_count() - 1, current->comm, current->pid);
printk("caller is %pS\n", __builtin_return_address(0));
dump_stack();
instrumentation_end();
out_enable:
preempt_enable_no_resched_notrace();
out:
return this_cpu;
}
noinstr unsigned int debug_smp_processor_id(void)
{
return check_preemption_disabled("smp_processor_id", "");
}
EXPORT_SYMBOL(debug_smp_processor_id);
noinstr void __this_cpu_preempt_check(const char *op)
{
check_preemption_disabled("__this_cpu_", op);
}
EXPORT_SYMBOL(__this_cpu_preempt_check);