cpu/hotplug: Do not bail-out in DYING/STARTING sections

The DYING/STARTING callbacks are not expected to fail. However, as reported
by Derek, buggy drivers such as tboot are still free to return errors
within those sections, which halts the hot(un)plug and leaves the CPU in an
unrecoverable state.

As there is no rollback possible, only log the failures and proceed with
the following steps.

This restores the hotplug behaviour prior to commit 453e410851
("cpu/hotplug: Add cpuhp_invoke_callback_range()")

Fixes: 453e410851 ("cpu/hotplug: Add cpuhp_invoke_callback_range()")
Reported-by: Derek Dolney <z23@posteo.net>
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Derek Dolney <z23@posteo.net>
Reviewed-by: Valentin Schneider <vschneid@redhat.com>
Link: https://bugzilla.kernel.org/show_bug.cgi?id=215867
Link: https://lore.kernel.org/r/20220927101259.1149636-1-vdonnefort@google.com
This commit is contained in:
Vincent Donnefort 2022-09-27 11:12:59 +01:00 committed by Thomas Gleixner
parent d385febc9a
commit 6f855b39e4
1 changed files with 40 additions and 16 deletions

View File

@ -663,21 +663,51 @@ static bool cpuhp_next_state(bool bringup,
return true;
}
static int cpuhp_invoke_callback_range(bool bringup,
unsigned int cpu,
struct cpuhp_cpu_state *st,
enum cpuhp_state target)
static int __cpuhp_invoke_callback_range(bool bringup,
unsigned int cpu,
struct cpuhp_cpu_state *st,
enum cpuhp_state target,
bool nofail)
{
enum cpuhp_state state;
int err = 0;
int ret = 0;
while (cpuhp_next_state(bringup, &state, st, target)) {
int err;
err = cpuhp_invoke_callback(cpu, state, bringup, NULL, NULL);
if (err)
if (!err)
continue;
if (nofail) {
pr_warn("CPU %u %s state %s (%d) failed (%d)\n",
cpu, bringup ? "UP" : "DOWN",
cpuhp_get_step(st->state)->name,
st->state, err);
ret = -1;
} else {
ret = err;
break;
}
}
return err;
return ret;
}
static inline int cpuhp_invoke_callback_range(bool bringup,
unsigned int cpu,
struct cpuhp_cpu_state *st,
enum cpuhp_state target)
{
return __cpuhp_invoke_callback_range(bringup, cpu, st, target, false);
}
static inline void cpuhp_invoke_callback_range_nofail(bool bringup,
unsigned int cpu,
struct cpuhp_cpu_state *st,
enum cpuhp_state target)
{
__cpuhp_invoke_callback_range(bringup, cpu, st, target, true);
}
static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
@ -999,7 +1029,6 @@ static int take_cpu_down(void *_param)
struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
int err, cpu = smp_processor_id();
int ret;
/* Ensure this CPU doesn't handle any more interrupts. */
err = __cpu_disable();
@ -1012,13 +1041,10 @@ static int take_cpu_down(void *_param)
*/
WARN_ON(st->state != (CPUHP_TEARDOWN_CPU - 1));
/* Invoke the former CPU_DYING callbacks */
ret = cpuhp_invoke_callback_range(false, cpu, st, target);
/*
* DYING must not fail!
* Invoke the former CPU_DYING callbacks. DYING must not fail!
*/
WARN_ON_ONCE(ret);
cpuhp_invoke_callback_range_nofail(false, cpu, st, target);
/* Give up timekeeping duties */
tick_handover_do_timer();
@ -1296,16 +1322,14 @@ void notify_cpu_starting(unsigned int cpu)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
int ret;
rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
cpumask_set_cpu(cpu, &cpus_booted_once_mask);
ret = cpuhp_invoke_callback_range(true, cpu, st, target);
/*
* STARTING must not fail!
*/
WARN_ON_ONCE(ret);
cpuhp_invoke_callback_range_nofail(true, cpu, st, target);
}
/*