mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-06 00:39:48 +00:00
powerpc: Replace deprecated CPU-hotplug functions.
The functions get_online_cpus() and put_online_cpus() have been deprecated during the CPU hotplug rework. They map directly to cpus_read_lock() and cpus_read_unlock(). Replace deprecated CPU-hotplug functions with the official version. The behavior remains unchanged. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20210803141621.780504-4-bigeasy@linutronix.de
This commit is contained in:
parent
c00103abf7
commit
5ae36401ca
4 changed files with 13 additions and 13 deletions
|
@ -429,7 +429,7 @@ static void rtas_event_scan(struct work_struct *w)
|
||||||
|
|
||||||
do_event_scan();
|
do_event_scan();
|
||||||
|
|
||||||
get_online_cpus();
|
cpus_read_lock();
|
||||||
|
|
||||||
/* raw_ OK because just using CPU as starting point. */
|
/* raw_ OK because just using CPU as starting point. */
|
||||||
cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
|
cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
|
||||||
|
@ -451,7 +451,7 @@ static void rtas_event_scan(struct work_struct *w)
|
||||||
schedule_delayed_work_on(cpu, &event_scan_work,
|
schedule_delayed_work_on(cpu, &event_scan_work,
|
||||||
__round_jiffies_relative(event_scan_delay, cpu));
|
__round_jiffies_relative(event_scan_delay, cpu));
|
||||||
|
|
||||||
put_online_cpus();
|
cpus_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PPC64
|
#ifdef CONFIG_PPC64
|
||||||
|
|
|
@ -137,23 +137,23 @@ long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
|
||||||
* exist in the system. We use a counter of VMs to track this.
|
* exist in the system. We use a counter of VMs to track this.
|
||||||
*
|
*
|
||||||
* One of the operations we need to block is onlining of secondaries, so we
|
* One of the operations we need to block is onlining of secondaries, so we
|
||||||
* protect hv_vm_count with get/put_online_cpus().
|
* protect hv_vm_count with cpus_read_lock/unlock().
|
||||||
*/
|
*/
|
||||||
static atomic_t hv_vm_count;
|
static atomic_t hv_vm_count;
|
||||||
|
|
||||||
void kvm_hv_vm_activated(void)
|
void kvm_hv_vm_activated(void)
|
||||||
{
|
{
|
||||||
get_online_cpus();
|
cpus_read_lock();
|
||||||
atomic_inc(&hv_vm_count);
|
atomic_inc(&hv_vm_count);
|
||||||
put_online_cpus();
|
cpus_read_unlock();
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_hv_vm_activated);
|
EXPORT_SYMBOL_GPL(kvm_hv_vm_activated);
|
||||||
|
|
||||||
void kvm_hv_vm_deactivated(void)
|
void kvm_hv_vm_deactivated(void)
|
||||||
{
|
{
|
||||||
get_online_cpus();
|
cpus_read_lock();
|
||||||
atomic_dec(&hv_vm_count);
|
atomic_dec(&hv_vm_count);
|
||||||
put_online_cpus();
|
cpus_read_unlock();
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated);
|
EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated);
|
||||||
|
|
||||||
|
|
|
@ -199,12 +199,12 @@ static ssize_t store_fastsleep_workaround_applyonce(struct device *dev,
|
||||||
*/
|
*/
|
||||||
power7_fastsleep_workaround_exit = false;
|
power7_fastsleep_workaround_exit = false;
|
||||||
|
|
||||||
get_online_cpus();
|
cpus_read_lock();
|
||||||
primary_thread_mask = cpu_online_cores_map();
|
primary_thread_mask = cpu_online_cores_map();
|
||||||
on_each_cpu_mask(&primary_thread_mask,
|
on_each_cpu_mask(&primary_thread_mask,
|
||||||
pnv_fastsleep_workaround_apply,
|
pnv_fastsleep_workaround_apply,
|
||||||
&err, 1);
|
&err, 1);
|
||||||
put_online_cpus();
|
cpus_read_unlock();
|
||||||
if (err) {
|
if (err) {
|
||||||
pr_err("fastsleep_workaround_applyonce change failed while running pnv_fastsleep_workaround_apply");
|
pr_err("fastsleep_workaround_applyonce change failed while running pnv_fastsleep_workaround_apply");
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
|
@ -186,7 +186,7 @@ static void disable_nest_pmu_counters(void)
|
||||||
int nid, cpu;
|
int nid, cpu;
|
||||||
const struct cpumask *l_cpumask;
|
const struct cpumask *l_cpumask;
|
||||||
|
|
||||||
get_online_cpus();
|
cpus_read_lock();
|
||||||
for_each_node_with_cpus(nid) {
|
for_each_node_with_cpus(nid) {
|
||||||
l_cpumask = cpumask_of_node(nid);
|
l_cpumask = cpumask_of_node(nid);
|
||||||
cpu = cpumask_first_and(l_cpumask, cpu_online_mask);
|
cpu = cpumask_first_and(l_cpumask, cpu_online_mask);
|
||||||
|
@ -195,7 +195,7 @@ static void disable_nest_pmu_counters(void)
|
||||||
opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
|
opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
|
||||||
get_hard_smp_processor_id(cpu));
|
get_hard_smp_processor_id(cpu));
|
||||||
}
|
}
|
||||||
put_online_cpus();
|
cpus_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void disable_core_pmu_counters(void)
|
static void disable_core_pmu_counters(void)
|
||||||
|
@ -203,7 +203,7 @@ static void disable_core_pmu_counters(void)
|
||||||
cpumask_t cores_map;
|
cpumask_t cores_map;
|
||||||
int cpu, rc;
|
int cpu, rc;
|
||||||
|
|
||||||
get_online_cpus();
|
cpus_read_lock();
|
||||||
/* Disable the IMC Core functions */
|
/* Disable the IMC Core functions */
|
||||||
cores_map = cpu_online_cores_map();
|
cores_map = cpu_online_cores_map();
|
||||||
for_each_cpu(cpu, &cores_map) {
|
for_each_cpu(cpu, &cores_map) {
|
||||||
|
@ -213,7 +213,7 @@ static void disable_core_pmu_counters(void)
|
||||||
pr_err("%s: Failed to stop Core (cpu = %d)\n",
|
pr_err("%s: Failed to stop Core (cpu = %d)\n",
|
||||||
__FUNCTION__, cpu);
|
__FUNCTION__, cpu);
|
||||||
}
|
}
|
||||||
put_online_cpus();
|
cpus_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
int get_max_nest_dev(void)
|
int get_max_nest_dev(void)
|
||||||
|
|
Loading…
Reference in a new issue