Power management updates for 5.18-rc5

- Fix issues with the Qualcomm's cpufreq driver (Dmitry Baryshkov,
    Vladimir Zapolskiy).
 
  - Fix memory leak with the Sun501 driver (Xiaobing Luo).
 
  - Make intel_idle enable C1E promotion on all CPUs when C1E is
    preferred to C1 (Artem Bityutskiy).
 
  - Make C6 optimization on Sapphire Rapids added recently work as
    expected if both C1E and C1 are "preferred" (Artem Bityutskiy).
 -----BEGIN PGP SIGNATURE-----
 
 iQJGBAABCAAwFiEE4fcc61cGeeHD/fCwgsRv/nhiVHEFAmJqr7cSHHJqd0Byand5
 c29ja2kubmV0AAoJEILEb/54YlRxlv0P/Rf3L00cpl+HGoUQ2yNb/7FDPu2hXyt2
 V1b3XVHxKlDfN6azKChdnJlczYw7/C/uGc6azjblZ2nN362jkYiWGRn8D7tX+nvy
 7NyajdZLRSOx+AchWOSpDjrOkq6TVcLnZLGLMYEX02JREKZmOqimfCNEKjfhO7Ht
 FwapPSqTAMdg7YmEhpUCkigUJCo6d436ZxMf6T02LHXLUTTN6qqBcEqHh81AWgvd
 LZbz1WC1srj0YZePalmMl6ToyO2p8qQjMIQZizIthvFOkhj58af2H687oU2dCNtp
 YoLctD7+ge14nhssnE3VD4cmOY4rzlK15e0EqPIflbuQcux2n5qNN3rLyJ0bqHSM
 zQUHy7cbBgnYMbhM7/FKXza51nsxWh5cSV0CQisv0Pck+Mj9PqAJssM6HhhmnKgz
 tyQvT742p98XH3pEq2RCLoJ01qe2ptl6/+u2HN36KA9zfPPAOY7eIvrZvliQ2Qgf
 R7V29ZxrT2gDkKhF71pKH1qcSY8/41x2uesKFJzFunj0+IBVFhKNECvqEgJuUbCo
 nnr4woxYfSZuDQFsf3/NC7OoM5z+SdeaPu0Y8f594wORL6pMHaTuo/8UwJEx2aWQ
 OxlhYeDwk0Zd/A7pdFqbFomX+PHU0Ah2UWNX/aaDybiBFm8LcFuIVGwJwy9r1HX1
 VjAsHV8bVaPo
 =b1TP
 -----END PGP SIGNATURE-----

Merge tag 'pm-5.18-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull power management fixes from Rafael Wysocki:
 "These fix up recent intel_idle driver changes and fix some ARM cpufreq
  driver issues.

  Specifics:

   - Fix issues with the Qualcomm's cpufreq driver (Dmitry Baryshkov,
     Vladimir Zapolskiy).

   - Fix memory leak with the Sun501 driver (Xiaobing Luo).

   - Make intel_idle enable C1E promotion on all CPUs when C1E is
     preferred to C1 (Artem Bityutskiy).

   - Make C6 optimization on Sapphire Rapids added recently work as
     expected if both C1E and C1 are "preferred" (Artem Bityutskiy)"

* tag 'pm-5.18-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  intel_idle: Fix SPR C6 optimization
  intel_idle: Fix the 'preferred_cstates' module parameter
  cpufreq: qcom-cpufreq-hw: Clear dcvs interrupts
  cpufreq: fix memory leak in sun50i_cpufreq_nvmem_probe
  cpufreq: qcom-cpufreq-hw: Fix throttle frequency value on EPSS platforms
  cpufreq: qcom-hw: provide online/offline operations
  cpufreq: qcom-hw: fix the opp entries refcounting
  cpufreq: qcom-hw: fix the race between LMH worker and cpuhp
  cpufreq: qcom-hw: drop affinity hint before freeing the IRQ
This commit is contained in:
Linus Torvalds 2022-04-28 11:50:21 -07:00
commit 659ed6e285
3 changed files with 75 additions and 26 deletions

View file

@ -24,13 +24,17 @@
#define CLK_HW_DIV 2
#define LUT_TURBO_IND 1
#define GT_IRQ_STATUS BIT(2)
#define HZ_PER_KHZ 1000
struct qcom_cpufreq_soc_data {
u32 reg_enable;
u32 reg_domain_state;
u32 reg_dcvs_ctrl;
u32 reg_freq_lut;
u32 reg_volt_lut;
u32 reg_intr_clr;
u32 reg_current_vote;
u32 reg_perf_state;
u8 lut_row_size;
@ -280,37 +284,46 @@ static void qcom_get_related_cpus(int index, struct cpumask *m)
}
}
static unsigned int qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
{
unsigned int val = readl_relaxed(data->base + data->soc_data->reg_current_vote);
unsigned int lval;
return (val & 0x3FF) * 19200;
if (data->soc_data->reg_current_vote)
lval = readl_relaxed(data->base + data->soc_data->reg_current_vote) & 0x3ff;
else
lval = readl_relaxed(data->base + data->soc_data->reg_domain_state) & 0xff;
return lval * xo_rate;
}
static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
{
struct cpufreq_policy *policy = data->policy;
int cpu = cpumask_first(policy->cpus);
int cpu = cpumask_first(policy->related_cpus);
struct device *dev = get_cpu_device(cpu);
unsigned long freq_hz, throttled_freq;
struct dev_pm_opp *opp;
unsigned int freq;
/*
* Get the h/w throttled frequency, normalize it using the
* registered opp table and use it to calculate thermal pressure.
*/
freq = qcom_lmh_get_throttle_freq(data);
freq_hz = freq * HZ_PER_KHZ;
freq_hz = qcom_lmh_get_throttle_freq(data);
opp = dev_pm_opp_find_freq_floor(dev, &freq_hz);
if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE)
dev_pm_opp_find_freq_ceil(dev, &freq_hz);
opp = dev_pm_opp_find_freq_ceil(dev, &freq_hz);
throttled_freq = freq_hz / HZ_PER_KHZ;
if (IS_ERR(opp)) {
dev_warn(dev, "Can't find the OPP for throttling: %pe!\n", opp);
} else {
throttled_freq = freq_hz / HZ_PER_KHZ;
/* Update thermal pressure (the boost frequencies are accepted) */
arch_update_thermal_pressure(policy->related_cpus, throttled_freq);
/* Update thermal pressure (the boost frequencies are accepted) */
arch_update_thermal_pressure(policy->related_cpus, throttled_freq);
dev_pm_opp_put(opp);
}
/*
* In the unlikely case policy is unregistered do not enable
@ -350,6 +363,10 @@ static irqreturn_t qcom_lmh_dcvs_handle_irq(int irq, void *data)
disable_irq_nosync(c_data->throttle_irq);
schedule_delayed_work(&c_data->throttle_work, 0);
if (c_data->soc_data->reg_intr_clr)
writel_relaxed(GT_IRQ_STATUS,
c_data->base + c_data->soc_data->reg_intr_clr);
return IRQ_HANDLED;
}
@ -365,9 +382,11 @@ static const struct qcom_cpufreq_soc_data qcom_soc_data = {
static const struct qcom_cpufreq_soc_data epss_soc_data = {
.reg_enable = 0x0,
.reg_domain_state = 0x20,
.reg_dcvs_ctrl = 0xb0,
.reg_freq_lut = 0x100,
.reg_volt_lut = 0x200,
.reg_intr_clr = 0x308,
.reg_perf_state = 0x320,
.lut_row_size = 4,
};
@ -417,16 +436,39 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
return 0;
}
static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
static int qcom_cpufreq_hw_cpu_online(struct cpufreq_policy *policy)
{
struct qcom_cpufreq_data *data = policy->driver_data;
struct platform_device *pdev = cpufreq_get_driver_data();
int ret;
ret = irq_set_affinity_hint(data->throttle_irq, policy->cpus);
if (ret)
dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
data->irq_name, data->throttle_irq);
return ret;
}
static int qcom_cpufreq_hw_cpu_offline(struct cpufreq_policy *policy)
{
struct qcom_cpufreq_data *data = policy->driver_data;
if (data->throttle_irq <= 0)
return;
return 0;
mutex_lock(&data->throttle_lock);
data->cancel_throttle = true;
mutex_unlock(&data->throttle_lock);
cancel_delayed_work_sync(&data->throttle_work);
irq_set_affinity_hint(data->throttle_irq, NULL);
return 0;
}
static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
{
free_irq(data->throttle_irq, data);
}
@ -583,6 +625,8 @@ static struct cpufreq_driver cpufreq_qcom_hw_driver = {
.get = qcom_cpufreq_hw_get,
.init = qcom_cpufreq_hw_cpu_init,
.exit = qcom_cpufreq_hw_cpu_exit,
.online = qcom_cpufreq_hw_cpu_online,
.offline = qcom_cpufreq_hw_cpu_offline,
.register_em = cpufreq_register_em_with_opp,
.fast_switch = qcom_cpufreq_hw_fast_switch,
.name = "qcom-cpufreq-hw",

View file

@ -98,8 +98,10 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
return -ENOMEM;
ret = sun50i_cpufreq_get_efuse(&speed);
if (ret)
if (ret) {
kfree(opp_tables);
return ret;
}
snprintf(name, MAX_NAME_LEN, "speed%d", speed);

View file

@ -69,7 +69,12 @@ static unsigned int preferred_states_mask;
static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
static unsigned long auto_demotion_disable_flags;
static bool disable_promotion_to_c1e;
static enum {
C1E_PROMOTION_PRESERVE,
C1E_PROMOTION_ENABLE,
C1E_PROMOTION_DISABLE
} c1e_promotion = C1E_PROMOTION_PRESERVE;
struct idle_cpu {
struct cpuidle_state *state_table;
@ -1398,8 +1403,6 @@ static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { }
static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; }
#endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */
static void c1e_promotion_enable(void);
/**
* ivt_idle_state_table_update - Tune the idle states table for Ivy Town.
*
@ -1578,17 +1581,14 @@ static void __init spr_idle_state_table_update(void)
unsigned long long msr;
/* Check if user prefers C1E over C1. */
if (preferred_states_mask & BIT(2)) {
if (preferred_states_mask & BIT(1))
/* Both can't be enabled, stick to the defaults. */
return;
if ((preferred_states_mask & BIT(2)) &&
!(preferred_states_mask & BIT(1))) {
/* Disable C1 and enable C1E. */
spr_cstates[0].flags |= CPUIDLE_FLAG_UNUSABLE;
spr_cstates[1].flags &= ~CPUIDLE_FLAG_UNUSABLE;
/* Enable C1E using the "C1E promotion" bit. */
c1e_promotion_enable();
disable_promotion_to_c1e = false;
c1e_promotion = C1E_PROMOTION_ENABLE;
}
/*
@ -1754,7 +1754,9 @@ static int intel_idle_cpu_init(unsigned int cpu)
if (auto_demotion_disable_flags)
auto_demotion_disable();
if (disable_promotion_to_c1e)
if (c1e_promotion == C1E_PROMOTION_ENABLE)
c1e_promotion_enable();
else if (c1e_promotion == C1E_PROMOTION_DISABLE)
c1e_promotion_disable();
return 0;
@ -1833,7 +1835,8 @@ static int __init intel_idle_init(void)
if (icpu) {
cpuidle_state_table = icpu->state_table;
auto_demotion_disable_flags = icpu->auto_demotion_disable_flags;
disable_promotion_to_c1e = icpu->disable_promotion_to_c1e;
if (icpu->disable_promotion_to_c1e)
c1e_promotion = C1E_PROMOTION_DISABLE;
if (icpu->use_acpi || force_use_acpi)
intel_idle_acpi_cst_extract();
} else if (!intel_idle_acpi_cst_extract()) {