cpu/hotplug: Allow "parallel" bringup up to CPUHP_BP_KICK_AP_STATE

There is often significant latency in the early stages of CPU bringup, and
time is wasted by waking each CPU (e.g. with SIPI/INIT/INIT on x86) and
then waiting for it to respond before moving on to the next.

Allow a platform to enable parallel setup which brings all to be onlined
CPUs up to the CPUHP_BP_KICK_AP state. While this state advancement on the
control CPU (BP) is single-threaded the important part is the last state
CPUHP_BP_KICK_AP which wakes the to be onlined CPUs up.

This allows the CPUs to run up to the first sychronization point
cpuhp_ap_sync_alive() where they wait for the control CPU to release them
one by one for the full onlining procedure.

This parallelism depends on the CPU hotplug core sync mechanism which
ensures that the parallel brought up CPUs wait for release before touching
any state which would make the CPU visible to anything outside the hotplug
control mechanism.

To handle the SMT constraints of X86 correctly the bringup happens in two
iterations when CONFIG_HOTPLUG_SMT is enabled. The control CPU brings up
the primary SMT threads of each core first, which can load the microcode
without the need to rendevouz with the thread siblings. Once that's
completed it brings up the secondary SMT threads.

Co-developed-by: David Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Michael Kelley <mikelley@microsoft.com>
Tested-by: Oleksandr Natalenko <oleksandr@natalenko.name>
Tested-by: Helge Deller <deller@gmx.de> # parisc
Tested-by: Guilherme G. Piccoli <gpiccoli@igalia.com> # Steam Deck
Link: https://lore.kernel.org/r/20230512205257.240231377@linutronix.de
This commit is contained in:
Thomas Gleixner 2023-05-12 23:07:50 +02:00 committed by Peter Zijlstra
parent f54d4434c2
commit 18415f33e2
4 changed files with 109 additions and 5 deletions

View File

@ -838,6 +838,12 @@
on every CPU online, such as boot, and resume from suspend.
Default: 10000
cpuhp.parallel=
[SMP] Enable/disable parallel bringup of secondary CPUs
Format: <bool>
Default is enabled if CONFIG_HOTPLUG_PARALLEL=y. Otherwise
the parameter has no effect.
crash_kexec_post_notifiers
Run kdump after running panic-notifiers and dumping
kmsg. This only for the users who doubt kdump always

View File

@ -53,6 +53,10 @@ config HOTPLUG_SPLIT_STARTUP
bool
select HOTPLUG_CORE_SYNC_FULL
config HOTPLUG_PARALLEL
bool
select HOTPLUG_SPLIT_STARTUP
config GENERIC_ENTRY
bool

View File

@ -524,6 +524,7 @@ void cpuhp_ap_sync_alive(void);
void arch_cpuhp_sync_state_poll(void);
void arch_cpuhp_cleanup_kick_cpu(unsigned int cpu);
int arch_cpuhp_kick_ap_alive(unsigned int cpu, struct task_struct *tidle);
bool arch_cpuhp_init_parallel_bringup(void);
#ifdef CONFIG_HOTPLUG_CORE_SYNC_DEAD
void cpuhp_ap_report_dead(void);

View File

@ -649,8 +649,23 @@ bool cpu_smt_possible(void)
cpu_smt_control != CPU_SMT_NOT_SUPPORTED;
}
EXPORT_SYMBOL_GPL(cpu_smt_possible);
static inline bool cpuhp_smt_aware(void)
{
return topology_smt_supported();
}
static inline const struct cpumask *cpuhp_get_primary_thread_mask(void)
{
return cpu_primary_thread_mask;
}
#else
static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
static inline bool cpuhp_smt_aware(void) { return false; }
static inline const struct cpumask *cpuhp_get_primary_thread_mask(void)
{
return cpu_present_mask;
}
#endif
static inline enum cpuhp_state
@ -1747,18 +1762,96 @@ int bringup_hibernate_cpu(unsigned int sleep_cpu)
return 0;
}
void __init bringup_nonboot_cpus(unsigned int setup_max_cpus)
static void __init cpuhp_bringup_mask(const struct cpumask *mask, unsigned int ncpus,
enum cpuhp_state target)
{
unsigned int cpu;
for_each_present_cpu(cpu) {
if (num_online_cpus() >= setup_max_cpus)
for_each_cpu(cpu, mask) {
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
if (!--ncpus)
break;
if (!cpu_online(cpu))
cpu_up(cpu, CPUHP_ONLINE);
if (cpu_up(cpu, target) && can_rollback_cpu(st)) {
/*
* If this failed then cpu_up() might have only
* rolled back to CPUHP_BP_KICK_AP for the final
* online. Clean it up. NOOP if already rolled back.
*/
WARN_ON(cpuhp_invoke_callback_range(false, cpu, st, CPUHP_OFFLINE));
}
}
}
#ifdef CONFIG_HOTPLUG_PARALLEL
static bool __cpuhp_parallel_bringup __ro_after_init = true;
static int __init parallel_bringup_parse_param(char *arg)
{
return kstrtobool(arg, &__cpuhp_parallel_bringup);
}
early_param("cpuhp.parallel", parallel_bringup_parse_param);
/*
* On architectures which have enabled parallel bringup this invokes all BP
* prepare states for each of the to be onlined APs first. The last state
* sends the startup IPI to the APs. The APs proceed through the low level
* bringup code in parallel and then wait for the control CPU to release
* them one by one for the final onlining procedure.
*
* This avoids waiting for each AP to respond to the startup IPI in
* CPUHP_BRINGUP_CPU.
*/
static bool __init cpuhp_bringup_cpus_parallel(unsigned int ncpus)
{
const struct cpumask *mask = cpu_present_mask;
if (__cpuhp_parallel_bringup)
__cpuhp_parallel_bringup = arch_cpuhp_init_parallel_bringup();
if (!__cpuhp_parallel_bringup)
return false;
if (cpuhp_smt_aware()) {
const struct cpumask *pmask = cpuhp_get_primary_thread_mask();
static struct cpumask tmp_mask __initdata;
/*
* X86 requires to prevent that SMT siblings stopped while
* the primary thread does a microcode update for various
* reasons. Bring the primary threads up first.
*/
cpumask_and(&tmp_mask, mask, pmask);
cpuhp_bringup_mask(&tmp_mask, ncpus, CPUHP_BP_KICK_AP);
cpuhp_bringup_mask(&tmp_mask, ncpus, CPUHP_ONLINE);
/* Account for the online CPUs */
ncpus -= num_online_cpus();
if (!ncpus)
return true;
/* Create the mask for secondary CPUs */
cpumask_andnot(&tmp_mask, mask, pmask);
mask = &tmp_mask;
}
/* Bring the not-yet started CPUs up */
cpuhp_bringup_mask(mask, ncpus, CPUHP_BP_KICK_AP);
cpuhp_bringup_mask(mask, ncpus, CPUHP_ONLINE);
return true;
}
#else
static inline bool cpuhp_bringup_cpus_parallel(unsigned int ncpus) { return false; }
#endif /* CONFIG_HOTPLUG_PARALLEL */
void __init bringup_nonboot_cpus(unsigned int setup_max_cpus)
{
/* Try parallel bringup optimization if enabled */
if (cpuhp_bringup_cpus_parallel(setup_max_cpus))
return;
/* Full per CPU serialized bringup */
cpuhp_bringup_mask(cpu_present_mask, setup_max_cpus, CPUHP_ONLINE);
}
#ifdef CONFIG_PM_SLEEP_SMP
static cpumask_var_t frozen_cpus;