ACPI/processor: Replace racy task affinity logic

acpi_processor_get_throttling() requires to invoke the getter function on
the target CPU. This is achieved by temporarily setting the affinity of the
calling user space thread to the requested CPU and reset it to the original
affinity afterwards.

That's racy vs. CPU hotplug and concurrent affinity settings for that
thread resulting in code executing on the wrong CPU and overwriting the
new affinity setting.

acpi_processor_get_throttling() is invoked in two ways:

1) The CPU online callback, which is already running on the target CPU and
   obviously protected against hotplug and not affected by affinity
   settings.

2) The ACPI driver probe function, which is not protected against hotplug
   during modprobe.

Switch it over to work_on_cpu() and protect the probe function against CPU
hotplug.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Sebastian Siewior <bigeasy@linutronix.de>
Cc: Lai Jiangshan <jiangshanlai@gmail.com>
Cc: linux-acpi@vger.kernel.org
Cc: Viresh Kumar <viresh.kumar@linaro.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Tejun Heo <tj@kernel.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Len Brown <lenb@kernel.org>
Link: http://lkml.kernel.org/r/20170412201042.785920903@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Thomas Gleixner 2017-04-12 22:07:34 +02:00
parent a5cbdf693a
commit 8153f9ac43
2 changed files with 42 additions and 27 deletions

View File

@ -262,11 +262,16 @@ err_power_exit:
static int acpi_processor_start(struct device *dev)
{
struct acpi_device *device = ACPI_COMPANION(dev);
int ret;
if (!device)
return -ENODEV;
return __acpi_processor_start(device);
/* Protect against concurrent CPU hotplug operations */
get_online_cpus();
ret = __acpi_processor_start(device);
put_online_cpus();
return ret;
}
static int acpi_processor_stop(struct device *dev)

View File

@ -62,8 +62,8 @@ struct acpi_processor_throttling_arg {
#define THROTTLING_POSTCHANGE (2)
static int acpi_processor_get_throttling(struct acpi_processor *pr);
int acpi_processor_set_throttling(struct acpi_processor *pr,
int state, bool force);
static int __acpi_processor_set_throttling(struct acpi_processor *pr,
int state, bool force, bool direct);
static int acpi_processor_update_tsd_coord(void)
{
@ -891,7 +891,8 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Invalid throttling state, reset\n"));
state = 0;
ret = acpi_processor_set_throttling(pr, state, true);
ret = __acpi_processor_set_throttling(pr, state, true,
true);
if (ret)
return ret;
}
@ -901,36 +902,31 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
return 0;
}
static long __acpi_processor_get_throttling(void *data)
{
struct acpi_processor *pr = data;
return pr->throttling.acpi_processor_get_throttling(pr);
}
static int acpi_processor_get_throttling(struct acpi_processor *pr)
{
cpumask_var_t saved_mask;
int ret;
if (!pr)
return -EINVAL;
if (!pr->flags.throttling)
return -ENODEV;
if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
return -ENOMEM;
/*
* Migrate task to the cpu pointed by pr.
* This is either called from the CPU hotplug callback of
* processor_driver or via the ACPI probe function. In the latter
* case the CPU is not guaranteed to be online. Both call sites are
* protected against CPU hotplug.
*/
cpumask_copy(saved_mask, &current->cpus_allowed);
/* FIXME: use work_on_cpu() */
if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
/* Can't migrate to the target pr->id CPU. Exit */
free_cpumask_var(saved_mask);
if (!cpu_online(pr->id))
return -ENODEV;
}
ret = pr->throttling.acpi_processor_get_throttling(pr);
/* restore the previous state */
set_cpus_allowed_ptr(current, saved_mask);
free_cpumask_var(saved_mask);
return ret;
return work_on_cpu(pr->id, __acpi_processor_get_throttling, pr);
}
static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
@ -1080,8 +1076,15 @@ static long acpi_processor_throttling_fn(void *data)
arg->target_state, arg->force);
}
int acpi_processor_set_throttling(struct acpi_processor *pr,
int state, bool force)
static int call_on_cpu(int cpu, long (*fn)(void *), void *arg, bool direct)
{
if (direct)
return fn(arg);
return work_on_cpu(cpu, fn, arg);
}
static int __acpi_processor_set_throttling(struct acpi_processor *pr,
int state, bool force, bool direct)
{
int ret = 0;
unsigned int i;
@ -1130,7 +1133,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
arg.pr = pr;
arg.target_state = state;
arg.force = force;
ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, &arg);
ret = call_on_cpu(pr->id, acpi_processor_throttling_fn, &arg,
direct);
} else {
/*
* When the T-state coordination is SW_ALL or HW_ALL,
@ -1163,8 +1167,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
arg.pr = match_pr;
arg.target_state = state;
arg.force = force;
ret = work_on_cpu(pr->id, acpi_processor_throttling_fn,
&arg);
ret = call_on_cpu(pr->id, acpi_processor_throttling_fn,
&arg, direct);
}
}
/*
@ -1182,6 +1186,12 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
return ret;
}
int acpi_processor_set_throttling(struct acpi_processor *pr, int state,
bool force)
{
return __acpi_processor_set_throttling(pr, state, force, false);
}
int acpi_processor_get_throttling_info(struct acpi_processor *pr)
{
int result = 0;