From 08e23d05fa6dc4fc13da0ccf09defdd4bbc92ff4 Mon Sep 17 00:00:00 2001 From: Christian Marangi Date: Tue, 24 Oct 2023 20:30:15 +0200 Subject: [PATCH 01/35] PM / devfreq: Fix buffer overflow in trans_stat_show Fix buffer overflow in trans_stat_show(). Convert simple snprintf to the more secure scnprintf with size of PAGE_SIZE. Add condition checking if we are exceeding PAGE_SIZE and exit early from loop. Also add at the end a warning that we exceeded PAGE_SIZE and that stats is disabled. Return -EFBIG in the case where we don't have enough space to write the full transition table. Also document in the ABI that this function can return -EFBIG error. Link: https://lore.kernel.org/all/20231024183016.14648-2-ansuelsmth@gmail.com/ Cc: stable@vger.kernel.org Closes: https://bugzilla.kernel.org/show_bug.cgi?id=218041 Fixes: e552bbaf5b98 ("PM / devfreq: Add sysfs node for representing frequency transition information.") Signed-off-by: Christian Marangi Signed-off-by: Chanwoo Choi --- Documentation/ABI/testing/sysfs-class-devfreq | 3 + drivers/devfreq/devfreq.c | 59 +++++++++++++------ 2 files changed, 43 insertions(+), 19 deletions(-) diff --git a/Documentation/ABI/testing/sysfs-class-devfreq b/Documentation/ABI/testing/sysfs-class-devfreq index 5e6b74f30406..1e7e0bb4c14e 100644 --- a/Documentation/ABI/testing/sysfs-class-devfreq +++ b/Documentation/ABI/testing/sysfs-class-devfreq @@ -52,6 +52,9 @@ Description: echo 0 > /sys/class/devfreq/.../trans_stat + If the transition table is bigger than PAGE_SIZE, reading + this will return an -EFBIG error. + What: /sys/class/devfreq/.../available_frequencies Date: October 2012 Contact: Nishanth Menon diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index b3a68d5833bd..907f50ab70ed 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -1688,7 +1688,7 @@ static ssize_t trans_stat_show(struct device *dev, struct device_attribute *attr, char *buf) { struct devfreq *df = to_devfreq(dev); - ssize_t len; + ssize_t len = 0; int i, j; unsigned int max_state; @@ -1697,7 +1697,7 @@ static ssize_t trans_stat_show(struct device *dev, max_state = df->max_state; if (max_state == 0) - return sprintf(buf, "Not Supported.\n"); + return scnprintf(buf, PAGE_SIZE, "Not Supported.\n"); mutex_lock(&df->lock); if (!df->stop_polling && @@ -1707,31 +1707,52 @@ static ssize_t trans_stat_show(struct device *dev, } mutex_unlock(&df->lock); - len = sprintf(buf, " From : To\n"); - len += sprintf(buf + len, " :"); - for (i = 0; i < max_state; i++) - len += sprintf(buf + len, "%10lu", - df->freq_table[i]); + len += scnprintf(buf + len, PAGE_SIZE - len, " From : To\n"); + len += scnprintf(buf + len, PAGE_SIZE - len, " :"); + for (i = 0; i < max_state; i++) { + if (len >= PAGE_SIZE - 1) + break; + len += scnprintf(buf + len, PAGE_SIZE - len, "%10lu", + df->freq_table[i]); + } + if (len >= PAGE_SIZE - 1) + return PAGE_SIZE - 1; - len += sprintf(buf + len, " time(ms)\n"); + len += scnprintf(buf + len, PAGE_SIZE - len, " time(ms)\n"); for (i = 0; i < max_state; i++) { + if (len >= PAGE_SIZE - 1) + break; if (df->freq_table[i] == df->previous_freq) - len += sprintf(buf + len, "*"); + len += scnprintf(buf + len, PAGE_SIZE - len, "*"); else - len += sprintf(buf + len, " "); + len += scnprintf(buf + len, PAGE_SIZE - len, " "); + if (len >= PAGE_SIZE - 1) + break; - len += sprintf(buf + len, "%10lu:", df->freq_table[i]); - for (j = 0; j < max_state; j++) - len += sprintf(buf + len, "%10u", - df->stats.trans_table[(i * max_state) + j]); - - len += sprintf(buf + len, "%10llu\n", (u64) - jiffies64_to_msecs(df->stats.time_in_state[i])); + len += scnprintf(buf + len, PAGE_SIZE - len, "%10lu:", + df->freq_table[i]); + for (j = 0; j < max_state; j++) { + if (len >= PAGE_SIZE - 1) + break; + len += scnprintf(buf + len, PAGE_SIZE - len, "%10u", + df->stats.trans_table[(i * max_state) + j]); + } + if (len >= PAGE_SIZE - 1) + break; + len += scnprintf(buf + len, PAGE_SIZE - len, "%10llu\n", (u64) + jiffies64_to_msecs(df->stats.time_in_state[i])); + } + + if (len < PAGE_SIZE - 1) + len += scnprintf(buf + len, PAGE_SIZE - len, "Total transition : %u\n", + df->stats.total_trans); + + if (len >= PAGE_SIZE - 1) { + pr_warn_once("devfreq transition table exceeds PAGE_SIZE. Disabling\n"); + return -EFBIG; } - len += sprintf(buf + len, "Total transition : %u\n", - df->stats.total_trans); return len; } From 4920ee6dcfaf9aec9f4bd14ce6c15a6a758a92ae Mon Sep 17 00:00:00 2001 From: Christian Marangi Date: Tue, 24 Oct 2023 20:30:16 +0200 Subject: [PATCH 02/35] PM / devfreq: Convert to use sysfs_emit_at() API Follow the advice of the Documentation/filesystems/sysfs.rst and show() should only use sysfs_emit() or sysfs_emit_at() when formatting the value to be returned to user space. Link: https://lore.kernel.org/all/20231024183016.14648-3-ansuelsmth@gmail.com/ Signed-off-by: Christian Marangi Signed-off-by: Chanwoo Choi --- drivers/devfreq/devfreq.c | 37 +++++++++++++++++-------------------- 1 file changed, 17 insertions(+), 20 deletions(-) diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 907f50ab70ed..017a87465776 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -1697,7 +1697,7 @@ static ssize_t trans_stat_show(struct device *dev, max_state = df->max_state; if (max_state == 0) - return scnprintf(buf, PAGE_SIZE, "Not Supported.\n"); + return sysfs_emit(buf, "Not Supported.\n"); mutex_lock(&df->lock); if (!df->stop_polling && @@ -1707,47 +1707,44 @@ static ssize_t trans_stat_show(struct device *dev, } mutex_unlock(&df->lock); - len += scnprintf(buf + len, PAGE_SIZE - len, " From : To\n"); - len += scnprintf(buf + len, PAGE_SIZE - len, " :"); + len += sysfs_emit_at(buf, len, " From : To\n"); + len += sysfs_emit_at(buf, len, " :"); for (i = 0; i < max_state; i++) { if (len >= PAGE_SIZE - 1) break; - len += scnprintf(buf + len, PAGE_SIZE - len, "%10lu", - df->freq_table[i]); + len += sysfs_emit_at(buf, len, "%10lu", + df->freq_table[i]); } + if (len >= PAGE_SIZE - 1) return PAGE_SIZE - 1; - - len += scnprintf(buf + len, PAGE_SIZE - len, " time(ms)\n"); + len += sysfs_emit_at(buf, len, " time(ms)\n"); for (i = 0; i < max_state; i++) { if (len >= PAGE_SIZE - 1) break; - if (df->freq_table[i] == df->previous_freq) - len += scnprintf(buf + len, PAGE_SIZE - len, "*"); + if (df->freq_table[2] == df->previous_freq) + len += sysfs_emit_at(buf, len, "*"); else - len += scnprintf(buf + len, PAGE_SIZE - len, " "); + len += sysfs_emit_at(buf, len, " "); if (len >= PAGE_SIZE - 1) break; - - len += scnprintf(buf + len, PAGE_SIZE - len, "%10lu:", - df->freq_table[i]); + len += sysfs_emit_at(buf, len, "%10lu:", df->freq_table[i]); for (j = 0; j < max_state; j++) { if (len >= PAGE_SIZE - 1) break; - len += scnprintf(buf + len, PAGE_SIZE - len, "%10u", - df->stats.trans_table[(i * max_state) + j]); + len += sysfs_emit_at(buf, len, "%10u", + df->stats.trans_table[(i * max_state) + j]); } if (len >= PAGE_SIZE - 1) break; - len += scnprintf(buf + len, PAGE_SIZE - len, "%10llu\n", (u64) - jiffies64_to_msecs(df->stats.time_in_state[i])); + len += sysfs_emit_at(buf, len, "%10llu\n", (u64) + jiffies64_to_msecs(df->stats.time_in_state[i])); } if (len < PAGE_SIZE - 1) - len += scnprintf(buf + len, PAGE_SIZE - len, "Total transition : %u\n", - df->stats.total_trans); - + len += sysfs_emit_at(buf, len, "Total transition : %u\n", + df->stats.total_trans); if (len >= PAGE_SIZE - 1) { pr_warn_once("devfreq transition table exceeds PAGE_SIZE. Disabling\n"); return -EFBIG; From 4c58e9d85c24b5281a2d39a3e6510b5f3b7fc687 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Wed, 1 Nov 2023 09:45:00 -0500 Subject: [PATCH 03/35] opp: ti: Use device_get_match_data() Use preferred device_get_match_data() instead of of_match_device() to get the driver match data. With this, adjust the includes to explicitly include the correct headers. As this driver only does DT based matching, of_match_device() will never return NULL if we've gotten to probe(). Therefore, the NULL check and error return for it can be dropped. Signed-off-by: Rob Herring Signed-off-by: Viresh Kumar --- drivers/opp/ti-opp-supply.c | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c index 8f3f13fbbb25..e3b97cd1fbbf 100644 --- a/drivers/opp/ti-opp-supply.c +++ b/drivers/opp/ti-opp-supply.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -373,23 +374,15 @@ static int ti_opp_supply_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device *cpu_dev = get_cpu_device(0); - const struct of_device_id *match; const struct ti_opp_supply_of_data *of_data; int ret = 0; - match = of_match_device(ti_opp_supply_of_match, dev); - if (!match) { - /* We do not expect this to happen */ - dev_err(dev, "%s: Unable to match device\n", __func__); - return -ENODEV; - } - if (!match->data) { + of_data = device_get_match_data(dev); + if (!of_data) { /* Again, unlikely.. but mistakes do happen */ dev_err(dev, "%s: Bad data in match\n", __func__); return -EINVAL; } - of_data = match->data; - dev_set_drvdata(dev, (void *)of_data); /* If we need optimized voltage */ From 073d3d2ca7d462afc8159ca0175675b9b7b4f162 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 27 Oct 2023 12:40:04 +0530 Subject: [PATCH 04/35] OPP: Level zero is valid The level zero can be used by some OPPs to drop performance state vote for the device. It is perfectly fine to allow the same. _set_opp_level() considers it as an invalid value currently and returns early. In order to support this properly, initialize the level field with U32_MAX, which denotes unused level field. Reported-by: Stephan Gerhold Reviewed-by: Ulf Hansson Tested-by: Stephan Gerhold Signed-off-by: Viresh Kumar --- drivers/opp/core.c | 24 ++++++++++++++++++++---- drivers/opp/of.c | 8 +++++++- include/linux/pm_opp.h | 5 ++++- 3 files changed, 31 insertions(+), 6 deletions(-) diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 84f345c69ea5..f2e2aa07b431 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -201,7 +201,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq_indexed); * @opp: opp for which level value has to be returned for * * Return: level read from device tree corresponding to the opp, else - * return 0. + * return U32_MAX. */ unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp) { @@ -221,7 +221,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_level); * @index: index of the required opp * * Return: performance state read from device tree corresponding to the - * required opp, else return 0. + * required opp, else return U32_MAX. */ unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp, unsigned int index) @@ -808,6 +808,14 @@ struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, struct dev_pm_opp *opp; opp = _find_key_ceil(dev, &temp, 0, true, _read_level, NULL); + + /* False match */ + if (temp == OPP_LEVEL_UNSET) { + dev_err(dev, "%s: OPP levels aren't available\n", __func__); + dev_pm_opp_put(opp); + return ERR_PTR(-ENODEV); + } + *level = temp; return opp; } @@ -1049,12 +1057,18 @@ static int _set_opp_bw(const struct opp_table *opp_table, static int _set_performance_state(struct device *dev, struct device *pd_dev, struct dev_pm_opp *opp, int i) { - unsigned int pstate = likely(opp) ? opp->required_opps[i]->level: 0; + unsigned int pstate = 0; int ret; if (!pd_dev) return 0; + if (likely(opp)) { + pstate = opp->required_opps[i]->level; + if (pstate == OPP_LEVEL_UNSET) + return 0; + } + ret = dev_pm_domain_set_performance_state(pd_dev, pstate); if (ret) { dev_err(dev, "Failed to set performance state of %s: %d (%d)\n", @@ -1135,7 +1149,7 @@ static int _set_opp_level(struct device *dev, struct opp_table *opp_table, int ret = 0; if (opp) { - if (!opp->level) + if (opp->level == OPP_LEVEL_UNSET) return 0; level = opp->level; @@ -1867,6 +1881,8 @@ struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table) INIT_LIST_HEAD(&opp->node); + opp->level = OPP_LEVEL_UNSET; + return opp; } diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 81fa27599d58..85fad7ca0007 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -1393,8 +1393,14 @@ int of_get_required_opp_performance_state(struct device_node *np, int index) opp = _find_opp_of_np(opp_table, required_np); if (opp) { - pstate = opp->level; + if (opp->level == OPP_LEVEL_UNSET) { + pr_err("%s: OPP levels aren't available for %pOF\n", + __func__, np); + } else { + pstate = opp->level; + } dev_pm_opp_put(opp); + } dev_pm_opp_put_opp_table(opp_table); diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index ccd97bcef269..af53101a1383 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -92,9 +92,12 @@ struct dev_pm_opp_config { struct device ***virt_devs; }; +#define OPP_LEVEL_UNSET U32_MAX + /** * struct dev_pm_opp_data - The data to use to initialize an OPP. - * @level: The performance level for the OPP. + * @level: The performance level for the OPP. Set level to OPP_LEVEL_UNSET if + * level field isn't used. * @freq: The clock rate in Hz for the OPP. * @u_volt: The voltage in uV for the OPP. */ From 6d366d0e544676bf608769b9520644e3f654ff99 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 12 Oct 2023 15:45:21 +0530 Subject: [PATCH 05/35] OPP: Use _set_opp_level() for single genpd case There are two genpd (as required-opp) cases that we need to handle, devices with a single genpd and ones with multiple genpds. The multiple genpds case is clear, where the OPP core calls dev_pm_domain_attach_by_name() for them and uses the virtual devices returned by this helper to call dev_pm_domain_set_performance_state() later to change the performance state. The single genpd case however requires special handling as we need to use the same `dev` structure (instead of a virtual one provided by genpd core) for setting the performance state via dev_pm_domain_set_performance_state(). As we move towards more generic code to take care of the required OPPs, where we will recursively call dev_pm_opp_set_opp() for all the required OPPs, the above special case becomes a problem. It doesn't make sense for a device's DT entry to have both "opp-level" and single "required-opps" entry pointing to a genpd's OPP, as that would make the OPP core call dev_pm_domain_set_performance_state() for two different values for the same device structure. And so we can reuse the 'opp->level" field in such a case and call _set_opp_level() for the device. Reviewed-by: Ulf Hansson Tested-by: Stephan Gerhold Signed-off-by: Viresh Kumar --- drivers/opp/core.c | 6 ++++-- drivers/opp/of.c | 31 ++++++++++++++++++++++++++++--- 2 files changed, 32 insertions(+), 5 deletions(-) diff --git a/drivers/opp/core.c b/drivers/opp/core.c index f2e2aa07b431..aeb216f7e978 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -1088,10 +1088,12 @@ static int _opp_set_required_opps_generic(struct device *dev, static int _opp_set_required_opps_genpd(struct device *dev, struct opp_table *opp_table, struct dev_pm_opp *opp, bool scaling_down) { - struct device **genpd_virt_devs = - opp_table->genpd_virt_devs ? opp_table->genpd_virt_devs : &dev; + struct device **genpd_virt_devs = opp_table->genpd_virt_devs; int index, target, delta, ret; + if (!genpd_virt_devs) + return 0; + /* Scaling up? Set required OPPs in normal order, else reverse */ if (!scaling_down) { index = 0; diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 85fad7ca0007..4cdeeab5ceee 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -296,7 +296,7 @@ void _of_clear_opp(struct opp_table *opp_table, struct dev_pm_opp *opp) of_node_put(opp->np); } -static int _link_required_opps(struct dev_pm_opp *opp, +static int _link_required_opps(struct dev_pm_opp *opp, struct opp_table *opp_table, struct opp_table *required_table, int index) { struct device_node *np; @@ -314,6 +314,31 @@ static int _link_required_opps(struct dev_pm_opp *opp, return -ENODEV; } + /* + * There are two genpd (as required-opp) cases that we need to handle, + * devices with a single genpd and ones with multiple genpds. + * + * The single genpd case requires special handling as we need to use the + * same `dev` structure (instead of a virtual one provided by genpd + * core) for setting the performance state. + * + * It doesn't make sense for a device's DT entry to have both + * "opp-level" and single "required-opps" entry pointing to a genpd's + * OPP, as that would make the OPP core call + * dev_pm_domain_set_performance_state() for two different values for + * the same device structure. Lets treat single genpd configuration as a + * case where the OPP's level is directly available without required-opp + * link in the DT. + * + * Just update the `level` with the right value, which + * dev_pm_opp_set_opp() will take care of in the normal path itself. + */ + if (required_table->is_genpd && opp_table->required_opp_count == 1 && + !opp_table->genpd_virt_devs) { + if (!WARN_ON(opp->level != OPP_LEVEL_UNSET)) + opp->level = opp->required_opps[0]->level; + } + return 0; } @@ -338,7 +363,7 @@ static int _of_opp_alloc_required_opps(struct opp_table *opp_table, if (IS_ERR_OR_NULL(required_table)) continue; - ret = _link_required_opps(opp, required_table, i); + ret = _link_required_opps(opp, opp_table, required_table, i); if (ret) goto free_required_opps; } @@ -359,7 +384,7 @@ static int lazy_link_required_opps(struct opp_table *opp_table, int ret; list_for_each_entry(opp, &opp_table->opp_list, node) { - ret = _link_required_opps(opp, new_table, index); + ret = _link_required_opps(opp, opp_table, new_table, index); if (ret) return ret; } From e37440e7e2c2760475d60c5556b59c8880a7fd63 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 27 Oct 2023 14:17:48 +0530 Subject: [PATCH 06/35] OPP: Call dev_pm_opp_set_opp() for required OPPs Configuring the required OPP was never properly implemented, we just took an exception for genpds and configured them directly, while leaving out all other required OPP types. Now that a standard call to dev_pm_opp_set_opp() takes care of configuring the opp->level too, the special handling for genpds can be avoided by simply calling dev_pm_opp_set_opp() for the required OPPs, which shall eventually configure the corresponding level for genpds. This also makes it possible for us to configure other type of required OPPs (no concrete users yet though), via the same path. This is how other frameworks take care of parent nodes, like clock, regulators, etc, where we recursively call the same helper. In order to call dev_pm_opp_set_opp() for the virtual genpd devices, they must share the OPP table of the genpd. Call _add_opp_dev() for them to get that done. This commit also extends the struct dev_pm_opp_config to pass required devices, for non-genpd cases, which can be used to call dev_pm_opp_set_opp() for the non-genpd required devices. Reviewed-by: Ulf Hansson Tested-by: Stephan Gerhold Signed-off-by: Viresh Kumar --- drivers/opp/core.c | 168 ++++++++++++++++++++--------------------- drivers/opp/of.c | 17 +++-- drivers/opp/opp.h | 8 +- include/linux/pm_opp.h | 7 +- 4 files changed, 100 insertions(+), 100 deletions(-) diff --git a/drivers/opp/core.c b/drivers/opp/core.c index aeb216f7e978..e08375ed50aa 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -1054,48 +1054,22 @@ static int _set_opp_bw(const struct opp_table *opp_table, return 0; } -static int _set_performance_state(struct device *dev, struct device *pd_dev, - struct dev_pm_opp *opp, int i) +/* This is only called for PM domain for now */ +static int _set_required_opps(struct device *dev, struct opp_table *opp_table, + struct dev_pm_opp *opp, bool up) { - unsigned int pstate = 0; - int ret; - - if (!pd_dev) - return 0; - - if (likely(opp)) { - pstate = opp->required_opps[i]->level; - if (pstate == OPP_LEVEL_UNSET) - return 0; - } - - ret = dev_pm_domain_set_performance_state(pd_dev, pstate); - if (ret) { - dev_err(dev, "Failed to set performance state of %s: %d (%d)\n", - dev_name(pd_dev), pstate, ret); - } - - return ret; -} - -static int _opp_set_required_opps_generic(struct device *dev, - struct opp_table *opp_table, struct dev_pm_opp *opp, bool scaling_down) -{ - dev_err(dev, "setting required-opps isn't supported for non-genpd devices\n"); - return -ENOENT; -} - -static int _opp_set_required_opps_genpd(struct device *dev, - struct opp_table *opp_table, struct dev_pm_opp *opp, bool scaling_down) -{ - struct device **genpd_virt_devs = opp_table->genpd_virt_devs; + struct device **devs = opp_table->required_devs; int index, target, delta, ret; - if (!genpd_virt_devs) + if (!devs) return 0; + /* required-opps not fully initialized yet */ + if (lazy_linking_pending(opp_table)) + return -EBUSY; + /* Scaling up? Set required OPPs in normal order, else reverse */ - if (!scaling_down) { + if (up) { index = 0; target = opp_table->required_opp_count; delta = 1; @@ -1106,9 +1080,11 @@ static int _opp_set_required_opps_genpd(struct device *dev, } while (index != target) { - ret = _set_performance_state(dev, genpd_virt_devs[index], opp, index); - if (ret) - return ret; + if (devs[index]) { + ret = dev_pm_opp_set_opp(devs[index], opp->required_opps[index]); + if (ret) + return ret; + } index += delta; } @@ -1116,34 +1092,6 @@ static int _opp_set_required_opps_genpd(struct device *dev, return 0; } -/* This is only called for PM domain for now */ -static int _set_required_opps(struct device *dev, struct opp_table *opp_table, - struct dev_pm_opp *opp, bool up) -{ - /* required-opps not fully initialized yet */ - if (lazy_linking_pending(opp_table)) - return -EBUSY; - - if (opp_table->set_required_opps) - return opp_table->set_required_opps(dev, opp_table, opp, up); - - return 0; -} - -/* Update set_required_opps handler */ -void _update_set_required_opps(struct opp_table *opp_table) -{ - /* Already set */ - if (opp_table->set_required_opps) - return; - - /* All required OPPs will belong to genpd or none */ - if (opp_table->required_opp_tables[0]->is_genpd) - opp_table->set_required_opps = _opp_set_required_opps_genpd; - else - opp_table->set_required_opps = _opp_set_required_opps_generic; -} - static int _set_opp_level(struct device *dev, struct opp_table *opp_table, struct dev_pm_opp *opp) { @@ -2406,19 +2354,13 @@ static void _opp_detach_genpd(struct opp_table *opp_table) { int index; - if (!opp_table->genpd_virt_devs) - return; - for (index = 0; index < opp_table->required_opp_count; index++) { - if (!opp_table->genpd_virt_devs[index]) + if (!opp_table->required_devs[index]) continue; - dev_pm_domain_detach(opp_table->genpd_virt_devs[index], false); - opp_table->genpd_virt_devs[index] = NULL; + dev_pm_domain_detach(opp_table->required_devs[index], false); + opp_table->required_devs[index] = NULL; } - - kfree(opp_table->genpd_virt_devs); - opp_table->genpd_virt_devs = NULL; } /* @@ -2445,14 +2387,14 @@ static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev, int index = 0, ret = -EINVAL; const char * const *name = names; - if (opp_table->genpd_virt_devs) - return 0; + if (!opp_table->required_devs) { + dev_err(dev, "Required OPPs not available, can't attach genpd\n"); + return -EINVAL; + } - opp_table->genpd_virt_devs = kcalloc(opp_table->required_opp_count, - sizeof(*opp_table->genpd_virt_devs), - GFP_KERNEL); - if (!opp_table->genpd_virt_devs) - return -ENOMEM; + /* Checking only the first one is enough ? */ + if (opp_table->required_devs[0]) + return 0; while (*name) { if (index >= opp_table->required_opp_count) { @@ -2468,13 +2410,25 @@ static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev, goto err; } - opp_table->genpd_virt_devs[index] = virt_dev; + /* + * Add the virtual genpd device as a user of the OPP table, so + * we can call dev_pm_opp_set_opp() on it directly. + * + * This will be automatically removed when the OPP table is + * removed, don't need to handle that here. + */ + if (!_add_opp_dev(virt_dev, opp_table->required_opp_tables[index])) { + ret = -ENOMEM; + goto err; + } + + opp_table->required_devs[index] = virt_dev; index++; name++; } if (virt_devs) - *virt_devs = opp_table->genpd_virt_devs; + *virt_devs = opp_table->required_devs; return 0; @@ -2484,10 +2438,42 @@ err: } +static int _opp_set_required_devs(struct opp_table *opp_table, + struct device *dev, + struct device **required_devs) +{ + int i; + + if (!opp_table->required_devs) { + dev_err(dev, "Required OPPs not available, can't set required devs\n"); + return -EINVAL; + } + + /* Another device that shares the OPP table has set the required devs ? */ + if (opp_table->required_devs[0]) + return 0; + + for (i = 0; i < opp_table->required_opp_count; i++) + opp_table->required_devs[i] = required_devs[i]; + + return 0; +} + +static void _opp_put_required_devs(struct opp_table *opp_table) +{ + int i; + + for (i = 0; i < opp_table->required_opp_count; i++) + opp_table->required_devs[i] = NULL; +} + static void _opp_clear_config(struct opp_config_data *data) { - if (data->flags & OPP_CONFIG_GENPD) + if (data->flags & OPP_CONFIG_REQUIRED_DEVS) + _opp_put_required_devs(data->opp_table); + else if (data->flags & OPP_CONFIG_GENPD) _opp_detach_genpd(data->opp_table); + if (data->flags & OPP_CONFIG_REGULATOR) _opp_put_regulators(data->opp_table); if (data->flags & OPP_CONFIG_SUPPORTED_HW) @@ -2601,12 +2587,22 @@ int dev_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config) /* Attach genpds */ if (config->genpd_names) { + if (config->required_devs) + goto err; + ret = _opp_attach_genpd(opp_table, dev, config->genpd_names, config->virt_devs); if (ret) goto err; data->flags |= OPP_CONFIG_GENPD; + } else if (config->required_devs) { + ret = _opp_set_required_devs(opp_table, dev, + config->required_devs); + if (ret) + goto err; + + data->flags |= OPP_CONFIG_REQUIRED_DEVS; } ret = xa_alloc(&opp_configs, &id, data, XA_LIMIT(1, INT_MAX), diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 4cdeeab5ceee..5a7e294e56b7 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -165,7 +165,7 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table, struct opp_table **required_opp_tables; struct device_node *required_np, *np; bool lazy = false; - int count, i; + int count, i, size; /* Traversing the first OPP node is all we need */ np = of_get_next_available_child(opp_np, NULL); @@ -179,12 +179,13 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table, if (count <= 0) goto put_np; - required_opp_tables = kcalloc(count, sizeof(*required_opp_tables), - GFP_KERNEL); + size = sizeof(*required_opp_tables) + sizeof(*opp_table->required_devs); + required_opp_tables = kcalloc(count, size, GFP_KERNEL); if (!required_opp_tables) goto put_np; opp_table->required_opp_tables = required_opp_tables; + opp_table->required_devs = (void *)(required_opp_tables + count); opp_table->required_opp_count = count; for (i = 0; i < count; i++) { @@ -208,8 +209,6 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table, mutex_lock(&opp_table_lock); list_add(&opp_table->lazy, &lazy_opp_tables); mutex_unlock(&opp_table_lock); - } else { - _update_set_required_opps(opp_table); } goto put_np; @@ -332,9 +331,14 @@ static int _link_required_opps(struct dev_pm_opp *opp, struct opp_table *opp_tab * * Just update the `level` with the right value, which * dev_pm_opp_set_opp() will take care of in the normal path itself. + * + * There is another case though, where a genpd's OPP table has + * required-opps set to a parent genpd. The OPP core expects the user to + * set the respective required `struct device` pointer via + * dev_pm_opp_set_config(). */ if (required_table->is_genpd && opp_table->required_opp_count == 1 && - !opp_table->genpd_virt_devs) { + !opp_table->required_devs[0]) { if (!WARN_ON(opp->level != OPP_LEVEL_UNSET)) opp->level = opp->required_opps[0]->level; } @@ -447,7 +451,6 @@ static void lazy_link_required_opp_table(struct opp_table *new_table) /* All required opp-tables found, remove from lazy list */ if (!lazy) { - _update_set_required_opps(opp_table); list_del_init(&opp_table->lazy); list_for_each_entry(opp, &opp_table->opp_list, node) diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h index 08366f90f16b..23dcb2fbf8c3 100644 --- a/drivers/opp/opp.h +++ b/drivers/opp/opp.h @@ -35,6 +35,7 @@ extern struct list_head opp_tables; #define OPP_CONFIG_PROP_NAME BIT(3) #define OPP_CONFIG_SUPPORTED_HW BIT(4) #define OPP_CONFIG_GENPD BIT(5) +#define OPP_CONFIG_REQUIRED_DEVS BIT(6) /** * struct opp_config_data - data for set config operations @@ -160,9 +161,9 @@ enum opp_table_access { * @rate_clk_single: Currently configured frequency for single clk. * @current_opp: Currently configured OPP for the table. * @suspend_opp: Pointer to OPP to be used during device suspend. - * @genpd_virt_devs: List of virtual devices for multiple genpd support. * @required_opp_tables: List of device OPP tables that are required by OPPs in * this table. + * @required_devs: List of devices for required OPP tables. * @required_opp_count: Number of required devices. * @supported_hw: Array of version number to support. * @supported_hw_count: Number of elements in supported_hw array. @@ -180,7 +181,6 @@ enum opp_table_access { * @path_count: Number of interconnect paths * @enabled: Set to true if the device's resources are enabled/configured. * @is_genpd: Marks if the OPP table belongs to a genpd. - * @set_required_opps: Helper responsible to set required OPPs. * @dentry: debugfs dentry pointer of the real device directory (not links). * @dentry_name: Name of the real dentry. * @@ -211,8 +211,8 @@ struct opp_table { struct dev_pm_opp *current_opp; struct dev_pm_opp *suspend_opp; - struct device **genpd_virt_devs; struct opp_table **required_opp_tables; + struct device **required_devs; unsigned int required_opp_count; unsigned int *supported_hw; @@ -229,8 +229,6 @@ struct opp_table { unsigned int path_count; bool enabled; bool is_genpd; - int (*set_required_opps)(struct device *dev, - struct opp_table *opp_table, struct dev_pm_opp *opp, bool scaling_down); #ifdef CONFIG_DEBUG_FS struct dentry *dentry; diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index af53101a1383..81dff7facdc9 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -74,8 +74,10 @@ typedef int (*config_clks_t)(struct device *dev, struct opp_table *opp_table, * @supported_hw_count: Number of elements in the array. * @regulator_names: Array of pointers to the names of the regulator, NULL terminated. * @genpd_names: Null terminated array of pointers containing names of genpd to - * attach. - * @virt_devs: Pointer to return the array of virtual devices. + * attach. Mutually exclusive with required_devs. + * @virt_devs: Pointer to return the array of genpd virtual devices. Mutually + * exclusive with required_devs. + * @required_devs: Required OPP devices. Mutually exclusive with genpd_names/virt_devs. * * This structure contains platform specific OPP configurations for the device. */ @@ -90,6 +92,7 @@ struct dev_pm_opp_config { const char * const *regulator_names; const char * const *genpd_names; struct device ***virt_devs; + struct device **required_devs; }; #define OPP_LEVEL_UNSET U32_MAX From 925141432fa4d8325b7156e88e53d740b12d0b0e Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 16 Nov 2023 15:59:35 +0530 Subject: [PATCH 07/35] OPP: Don't set OPP recursively for a parent genpd Like other frameworks (clk, regulator, etc.) genpd core too takes care of propagation to performance state to parent genpds. The OPP core shouldn't attempt the same, or it may result in undefined behavior. Add checks at various places to take care of the same. Reviewed-by: Ulf Hansson Tested-by: Stephan Gerhold Signed-off-by: Viresh Kumar --- drivers/opp/core.c | 16 +++++++++++++++- drivers/opp/of.c | 7 +++++-- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/drivers/opp/core.c b/drivers/opp/core.c index e08375ed50aa..4f1ca84d9ed0 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -2392,6 +2392,12 @@ static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev, return -EINVAL; } + /* Genpd core takes care of propagation to parent genpd */ + if (opp_table->is_genpd) { + dev_err(dev, "%s: Operation not supported for genpds\n", __func__); + return -EOPNOTSUPP; + } + /* Checking only the first one is enough ? */ if (opp_table->required_devs[0]) return 0; @@ -2453,8 +2459,16 @@ static int _opp_set_required_devs(struct opp_table *opp_table, if (opp_table->required_devs[0]) return 0; - for (i = 0; i < opp_table->required_opp_count; i++) + for (i = 0; i < opp_table->required_opp_count; i++) { + /* Genpd core takes care of propagation to parent genpd */ + if (required_devs[i] && opp_table->is_genpd && + opp_table->required_opp_tables[i]->is_genpd) { + dev_err(dev, "%s: Operation not supported for genpds\n", __func__); + return -EOPNOTSUPP; + } + opp_table->required_devs[i] = required_devs[i]; + } return 0; } diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 5a7e294e56b7..f9f0b22bccbb 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -339,8 +339,11 @@ static int _link_required_opps(struct dev_pm_opp *opp, struct opp_table *opp_tab */ if (required_table->is_genpd && opp_table->required_opp_count == 1 && !opp_table->required_devs[0]) { - if (!WARN_ON(opp->level != OPP_LEVEL_UNSET)) - opp->level = opp->required_opps[0]->level; + /* Genpd core takes care of propagation to parent genpd */ + if (!opp_table->is_genpd) { + if (!WARN_ON(opp->level != OPP_LEVEL_UNSET)) + opp->level = opp->required_opps[0]->level; + } } return 0; From 19cc8b1819a40410c50a3efab6cf27b73298deb5 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Tue, 28 Nov 2023 12:31:38 +0530 Subject: [PATCH 08/35] OPP: Check for invalid OPP in dev_pm_opp_find_level_ceil() _find_key_ceil() may return an error and that must be checked before passing the same to dev_pm_opp_put(). Fixes: 41907aa4ae37 ("OPP: Level zero is valid") Reported-by: Dan Carpenter Signed-off-by: Viresh Kumar --- drivers/opp/core.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 4f1ca84d9ed0..c022d548067d 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -808,6 +808,8 @@ struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, struct dev_pm_opp *opp; opp = _find_key_ceil(dev, &temp, 0, true, _read_level, NULL); + if (IS_ERR(opp)) + return opp; /* False match */ if (temp == OPP_LEVEL_UNSET) { From 2719675fa8111a8d7a060133e1dd4797d20c9754 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Mon, 20 Nov 2023 10:59:42 -0800 Subject: [PATCH 09/35] cpufreq: intel_pstate: Prioritize firmware-provided balance performance EPP The platform firmware can provide a balance performance EPP value by enabling HWP and programming the EPP to the desired value. However, currently this only takes effect for processors listed in intel_epp_balance_perf[], so in order to enable a new processor model to utilize this mechanism, that table needs to be updated. It arguably should not be necessary to modify the kernel to work properly with every new generation of processors, though, and distributions that don't always ship the most recent kernels should be able to run reasonably well on new hardware without code changes. For this reason, move the check to avoid updating the EPP when the balance performance EPP is unmodified from the power-up default of 0x80 after the check that allows the firmware-provided balance performance EPP value to be retrieved. This will cause the code to always look for the firmware- provided value before consulting intel_epp_balance_perf[] and the handling of new hardware will not depend on whether or not that thable has been updated yet. Signed-off-by: Srinivas Pandruvada [ rjw: Subject and changelog edits ] Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/intel_pstate.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index a534a1f7f1ee..dd6d23e389f1 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1691,13 +1691,6 @@ static void intel_pstate_update_epp_defaults(struct cpudata *cpudata) { cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); - /* - * If this CPU gen doesn't call for change in balance_perf - * EPP return. - */ - if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE) - return; - /* * If the EPP is set by firmware, which means that firmware enabled HWP * - Is equal or less than 0x80 (default balance_perf EPP) @@ -1710,6 +1703,13 @@ static void intel_pstate_update_epp_defaults(struct cpudata *cpudata) return; } + /* + * If this CPU gen doesn't call for change in balance_perf + * EPP return. + */ + if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE) + return; + /* * Use hard coded value per gen to update the balance_perf * and default EPP. From c4a5118a3ae1eadc687d84eef9431f9e13eb015c Mon Sep 17 00:00:00 2001 From: Alexandra Diupina Date: Tue, 5 Dec 2023 18:12:20 +0300 Subject: [PATCH 10/35] cpufreq: scmi: process the result of devm_of_clk_add_hw_provider() devm_of_clk_add_hw_provider() may return an errno, so add a return value check Found by Linux Verification Center (linuxtesting.org) with SVACE. Fixes: 8410e7f3b31e ("cpufreq: scmi: Fix OPP addition failure with a dummy clock provider") Signed-off-by: Alexandra Diupina Signed-off-by: Viresh Kumar --- drivers/cpufreq/scmi-cpufreq.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index c8a7ccc42c16..4ee23f4ebf4a 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -334,8 +334,11 @@ static int scmi_cpufreq_probe(struct scmi_device *sdev) #ifdef CONFIG_COMMON_CLK /* dummy clock provider as needed by OPP if clocks property is used */ - if (of_property_present(dev->of_node, "#clock-cells")) - devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL); + if (of_property_present(dev->of_node, "#clock-cells")) { + ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL); + if (ret) + return dev_err_probe(dev, ret, "%s: registering clock provider failed\n", __func__); + } #endif ret = cpufreq_register_driver(&scmi_cpufreq_driver); From eeae55ed9c0a74604a49789e36b7cdf0ee8bd69c Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sat, 2 Dec 2023 02:09:28 +0800 Subject: [PATCH 11/35] intel_idle: Add Meteorlake support Add intel_idle support for MeteorLake. C1 and C1E states on Meteorlake are mutually exclusive, like Alderlake and Raptorlake, but they have little latency difference with measureable power difference, so always enable "C1E promotion" bit and expose C1E only. Expose C6 because it has less power compared with C1E, and smaller latency compared with C8/C10. Ignore C8 and expose C10, because C8 does not show latency advantage compared with C10. Signed-off-by: Zhang Rui Signed-off-by: Rafael J. Wysocki --- drivers/idle/intel_idle.c | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index dcda0afecfc5..cfd0b24fd7f1 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -923,6 +923,35 @@ static struct cpuidle_state adl_l_cstates[] __initdata = { .enter = NULL } }; +static struct cpuidle_state mtl_l_cstates[] __initdata = { + { + .name = "C1E", + .desc = "MWAIT 0x01", + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, + .exit_latency = 1, + .target_residency = 1, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C6", + .desc = "MWAIT 0x20", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 140, + .target_residency = 420, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C10", + .desc = "MWAIT 0x60", + .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 310, + .target_residency = 930, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .enter = NULL } +}; + static struct cpuidle_state gmt_cstates[] __initdata = { { .name = "C1", @@ -1349,6 +1378,10 @@ static const struct idle_cpu idle_cpu_adl_l __initconst = { .state_table = adl_l_cstates, }; +static const struct idle_cpu idle_cpu_mtl_l __initconst = { + .state_table = mtl_l_cstates, +}; + static const struct idle_cpu idle_cpu_gmt __initconst = { .state_table = gmt_cstates, }; @@ -1423,6 +1456,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &idle_cpu_icx), X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &idle_cpu_adl), X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &idle_cpu_adl_l), + X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &idle_cpu_mtl_l), X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &idle_cpu_gmt), X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &idle_cpu_spr), X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &idle_cpu_spr), From a1ca8295ee53a2fc57085fae26df37228c655791 Mon Sep 17 00:00:00 2001 From: Wang chaodong Date: Fri, 20 Oct 2023 16:51:06 +0800 Subject: [PATCH 12/35] PM: hibernate: Drop unnecessary local variable initialization It is not necessary to intialize the error variable in create_basic_memory_bitmaps(), because it is only read after being assigned a value. Signed-off-by: Wang chaodong [ rjw: Subject and changelog rewrite ] Signed-off-by: Rafael J. Wysocki --- kernel/power/snapshot.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 50a15408c3fc..71b2f12ed3b5 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -1119,7 +1119,7 @@ static void mark_nosave_pages(struct memory_bitmap *bm) int create_basic_memory_bitmaps(void) { struct memory_bitmap *bm1, *bm2; - int error = 0; + int error; if (forbidden_pages_map && free_pages_map) return 0; From bbeaa4691fa8682e2fe2e87f28d5fce39805fa68 Mon Sep 17 00:00:00 2001 From: Li zeming Date: Fri, 27 Oct 2023 09:55:33 +0800 Subject: [PATCH 13/35] PM: hibernate: Do not initialize error in swap_write_page() 'error' first receives the function result before it is used, and it does not need to be assigned a value during definition. Signed-off-by: Li zeming [ rjw: Subject rewrite ] Signed-off-by: Rafael J. Wysocki --- kernel/power/swap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/power/swap.c b/kernel/power/swap.c index a2cb0babb5ec..68973ca2cf07 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -451,7 +451,7 @@ err_close: static int swap_write_page(struct swap_map_handle *handle, void *buf, struct hib_bio_batch *hb) { - int error = 0; + int error; sector_t offset; if (!handle->cur) From 4ac934b1aaa99e00ca25875d55094a4fe34e212d Mon Sep 17 00:00:00 2001 From: Li zeming Date: Tue, 24 Oct 2023 10:04:34 +0800 Subject: [PATCH 14/35] PM: hibernate: Do not initialize error in snapshot_write_next() The error variable in snapshot_write_next() gets a value before it is used, so don't initialize it to 0 upfront. Signed-off-by: Li zeming [ rjw: Subject and changelog rewrite ] Signed-off-by: Rafael J. Wysocki --- kernel/power/snapshot.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 71b2f12ed3b5..e3e8f1c6e75f 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -2778,7 +2778,7 @@ static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca) int snapshot_write_next(struct snapshot_handle *handle) { static struct chain_allocator ca; - int error = 0; + int error; next: /* Check if we have already loaded the entire image */ From 0c4cae1bc00d31c78858c184ede351baea232bdb Mon Sep 17 00:00:00 2001 From: Chris Feng Date: Wed, 13 Dec 2023 16:32:51 +0800 Subject: [PATCH 15/35] PM: hibernate: Avoid missing wakeup events during hibernation Wakeup events that occur in the hibernation process's hibernation_platform_enter() cannot wake up the system. Although the current hibernation framework will execute part of the recovery process after a wakeup event occurs, it ultimately performs a shutdown operation because the system does not check the return value of hibernation_platform_enter(). In short, if a wakeup event occurs before putting the system into the final low-power state, it will be missed. To solve this problem, check the return value of hibernation_platform_enter(). When it returns -EAGAIN or -EBUSY (indicate the occurrence of a wakeup event), execute the hibernation recovery process, discard the previously saved image, and ultimately return to the working state. Signed-off-by: Chris Feng [ rjw: Rephrase the message printed when going back to the working state ] Signed-off-by: Rafael J. Wysocki --- kernel/power/hibernate.c | 10 ++++++++-- kernel/power/power.h | 2 ++ 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index dee341ae4ace..4b0b7cf2e019 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -642,9 +642,9 @@ int hibernation_platform_enter(void) */ static void power_down(void) { -#ifdef CONFIG_SUSPEND int error; +#ifdef CONFIG_SUSPEND if (hibernation_mode == HIBERNATION_SUSPEND) { error = suspend_devices_and_enter(mem_sleep_current); if (error) { @@ -667,7 +667,13 @@ static void power_down(void) kernel_restart(NULL); break; case HIBERNATION_PLATFORM: - hibernation_platform_enter(); + error = hibernation_platform_enter(); + if (error == -EAGAIN || error == -EBUSY) { + swsusp_unmark(); + events_check_enabled = false; + pr_info("Wakeup event detected during hibernation, rolling back.\n"); + return; + } fallthrough; case HIBERNATION_SHUTDOWN: if (kernel_can_power_off()) diff --git a/kernel/power/power.h b/kernel/power/power.h index 17fd9aaaf084..8499a39c62f4 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -175,6 +175,8 @@ extern int swsusp_write(unsigned int flags); void swsusp_close(void); #ifdef CONFIG_SUSPEND extern int swsusp_unmark(void); +#else +static inline int swsusp_unmark(void) { return 0; } #endif struct __kernel_old_timeval; From 71cd7e80cfde548959952eac7063aeaea1f2e1c6 Mon Sep 17 00:00:00 2001 From: Hongchen Zhang Date: Thu, 16 Nov 2023 08:56:09 +0800 Subject: [PATCH 16/35] PM: hibernate: Enforce ordering during image compression/decompression An S4 (suspend to disk) test on the LoongArch 3A6000 platform sometimes fails with the following error messaged in the dmesg log: Invalid LZO compressed length That happens because when compressing/decompressing the image, the synchronization between the control thread and the compress/decompress/crc thread is based on a relaxed ordering interface, which is unreliable, and the following situation may occur: CPU 0 CPU 1 save_image_lzo lzo_compress_threadfn atomic_set(&d->stop, 1); atomic_read(&data[thr].stop) data[thr].cmp = data[thr].cmp_len; WRITE data[thr].cmp_len Then CPU0 gets a stale cmp_len and writes it to disk. During resume from S4, wrong cmp_len is loaded. To maintain data consistency between the two threads, use the acquire/release variants of atomic set and read operations. Fixes: 081a9d043c98 ("PM / Hibernate: Improve performance of LZO/plain hibernation, checksum image") Cc: All applicable Signed-off-by: Hongchen Zhang Co-developed-by: Weihao Li Signed-off-by: Weihao Li [ rjw: Subject rewrite and changelog edits ] Signed-off-by: Rafael J. Wysocki --- kernel/power/swap.c | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 68973ca2cf07..975e7195573b 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -606,11 +606,11 @@ static int crc32_threadfn(void *data) unsigned i; while (1) { - wait_event(d->go, atomic_read(&d->ready) || + wait_event(d->go, atomic_read_acquire(&d->ready) || kthread_should_stop()); if (kthread_should_stop()) { d->thr = NULL; - atomic_set(&d->stop, 1); + atomic_set_release(&d->stop, 1); wake_up(&d->done); break; } @@ -619,7 +619,7 @@ static int crc32_threadfn(void *data) for (i = 0; i < d->run_threads; i++) *d->crc32 = crc32_le(*d->crc32, d->unc[i], *d->unc_len[i]); - atomic_set(&d->stop, 1); + atomic_set_release(&d->stop, 1); wake_up(&d->done); } return 0; @@ -649,12 +649,12 @@ static int lzo_compress_threadfn(void *data) struct cmp_data *d = data; while (1) { - wait_event(d->go, atomic_read(&d->ready) || + wait_event(d->go, atomic_read_acquire(&d->ready) || kthread_should_stop()); if (kthread_should_stop()) { d->thr = NULL; d->ret = -1; - atomic_set(&d->stop, 1); + atomic_set_release(&d->stop, 1); wake_up(&d->done); break; } @@ -663,7 +663,7 @@ static int lzo_compress_threadfn(void *data) d->ret = lzo1x_1_compress(d->unc, d->unc_len, d->cmp + LZO_HEADER, &d->cmp_len, d->wrk); - atomic_set(&d->stop, 1); + atomic_set_release(&d->stop, 1); wake_up(&d->done); } return 0; @@ -798,7 +798,7 @@ static int save_image_lzo(struct swap_map_handle *handle, data[thr].unc_len = off; - atomic_set(&data[thr].ready, 1); + atomic_set_release(&data[thr].ready, 1); wake_up(&data[thr].go); } @@ -806,12 +806,12 @@ static int save_image_lzo(struct swap_map_handle *handle, break; crc->run_threads = thr; - atomic_set(&crc->ready, 1); + atomic_set_release(&crc->ready, 1); wake_up(&crc->go); for (run_threads = thr, thr = 0; thr < run_threads; thr++) { wait_event(data[thr].done, - atomic_read(&data[thr].stop)); + atomic_read_acquire(&data[thr].stop)); atomic_set(&data[thr].stop, 0); ret = data[thr].ret; @@ -850,7 +850,7 @@ static int save_image_lzo(struct swap_map_handle *handle, } } - wait_event(crc->done, atomic_read(&crc->stop)); + wait_event(crc->done, atomic_read_acquire(&crc->stop)); atomic_set(&crc->stop, 0); } @@ -1132,12 +1132,12 @@ static int lzo_decompress_threadfn(void *data) struct dec_data *d = data; while (1) { - wait_event(d->go, atomic_read(&d->ready) || + wait_event(d->go, atomic_read_acquire(&d->ready) || kthread_should_stop()); if (kthread_should_stop()) { d->thr = NULL; d->ret = -1; - atomic_set(&d->stop, 1); + atomic_set_release(&d->stop, 1); wake_up(&d->done); break; } @@ -1150,7 +1150,7 @@ static int lzo_decompress_threadfn(void *data) flush_icache_range((unsigned long)d->unc, (unsigned long)d->unc + d->unc_len); - atomic_set(&d->stop, 1); + atomic_set_release(&d->stop, 1); wake_up(&d->done); } return 0; @@ -1335,7 +1335,7 @@ static int load_image_lzo(struct swap_map_handle *handle, } if (crc->run_threads) { - wait_event(crc->done, atomic_read(&crc->stop)); + wait_event(crc->done, atomic_read_acquire(&crc->stop)); atomic_set(&crc->stop, 0); crc->run_threads = 0; } @@ -1371,7 +1371,7 @@ static int load_image_lzo(struct swap_map_handle *handle, pg = 0; } - atomic_set(&data[thr].ready, 1); + atomic_set_release(&data[thr].ready, 1); wake_up(&data[thr].go); } @@ -1390,7 +1390,7 @@ static int load_image_lzo(struct swap_map_handle *handle, for (run_threads = thr, thr = 0; thr < run_threads; thr++) { wait_event(data[thr].done, - atomic_read(&data[thr].stop)); + atomic_read_acquire(&data[thr].stop)); atomic_set(&data[thr].stop, 0); ret = data[thr].ret; @@ -1421,7 +1421,7 @@ static int load_image_lzo(struct swap_map_handle *handle, ret = snapshot_write_next(snapshot); if (ret <= 0) { crc->run_threads = thr + 1; - atomic_set(&crc->ready, 1); + atomic_set_release(&crc->ready, 1); wake_up(&crc->go); goto out_finish; } @@ -1429,13 +1429,13 @@ static int load_image_lzo(struct swap_map_handle *handle, } crc->run_threads = thr; - atomic_set(&crc->ready, 1); + atomic_set_release(&crc->ready, 1); wake_up(&crc->go); } out_finish: if (crc->run_threads) { - wait_event(crc->done, atomic_read(&crc->stop)); + wait_event(crc->done, atomic_read_acquire(&crc->stop)); atomic_set(&crc->stop, 0); } stop = ktime_get(); From 0990319a0400db1d6069b5549327cd9105a266d5 Mon Sep 17 00:00:00 2001 From: Gregory CLEMENT Date: Fri, 15 Dec 2023 16:37:06 +0100 Subject: [PATCH 17/35] cpufreq: armada-8k: Fix parameter type warning The second parameter of clk_get() is of type 'const char *', so use NULL instead of the integer 0 to resolve a sparse warning: drivers/cpufreq/armada-8k-cpufreq.c:60:40: warning: Using plain integer as NULL pointer drivers/cpufreq/armada-8k-cpufreq.c:168:40: warning: Using plain integer as NULL pointer Reported-by: kernel test robot Signed-off-by: Gregory CLEMENT Reviewed-by: Andrew Lunn Signed-off-by: Viresh Kumar --- drivers/cpufreq/armada-8k-cpufreq.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c index 8afefdea4d80..ce5a5641b6dd 100644 --- a/drivers/cpufreq/armada-8k-cpufreq.c +++ b/drivers/cpufreq/armada-8k-cpufreq.c @@ -57,7 +57,7 @@ static void __init armada_8k_get_sharing_cpus(struct clk *cur_clk, continue; } - clk = clk_get(cpu_dev, 0); + clk = clk_get(cpu_dev, NULL); if (IS_ERR(clk)) { pr_warn("Cannot get clock for CPU %d\n", cpu); } else { @@ -165,7 +165,7 @@ static int __init armada_8k_cpufreq_init(void) continue; } - clk = clk_get(cpu_dev, 0); + clk = clk_get(cpu_dev, NULL); if (IS_ERR(clk)) { pr_err("Cannot get clock for CPU %d\n", cpu); From aed5ed595960c6d301dcd4ed31aeaa7a8054c0c6 Mon Sep 17 00:00:00 2001 From: Mukesh Ojha Date: Sat, 25 Nov 2023 02:41:58 +0530 Subject: [PATCH 18/35] PM / devfreq: Synchronize devfreq_monitor_[start/stop] There is a chance if a frequent switch of the governor done in a loop result in timer list corruption where timer cancel being done from two place one from cancel_delayed_work_sync() and followed by expire_timers() can be seen from the traces[1]. while true do echo "simple_ondemand" > /sys/class/devfreq/1d84000.ufshc/governor echo "performance" > /sys/class/devfreq/1d84000.ufshc/governor done It looks to be issue with devfreq driver where device_monitor_[start/stop] need to synchronized so that delayed work should get corrupted while it is either being queued or running or being cancelled. Let's use polling flag and devfreq lock to synchronize the queueing the timer instance twice and work data being corrupted. [1] ... .. -0 [003] 9436.209662: timer_cancel timer=0xffffff80444f0428 -0 [003] 9436.209664: timer_expire_entry timer=0xffffff80444f0428 now=0x10022da1c function=__typeid__ZTSFvP10timer_listE_global_addr baseclk=0x10022da1c -0 [003] 9436.209718: timer_expire_exit timer=0xffffff80444f0428 kworker/u16:6-14217 [003] 9436.209863: timer_start timer=0xffffff80444f0428 function=__typeid__ZTSFvP10timer_listE_global_addr expires=0x10022da2b now=0x10022da1c flags=182452227 vendor.xxxyyy.ha-1593 [004] 9436.209888: timer_cancel timer=0xffffff80444f0428 vendor.xxxyyy.ha-1593 [004] 9436.216390: timer_init timer=0xffffff80444f0428 vendor.xxxyyy.ha-1593 [004] 9436.216392: timer_start timer=0xffffff80444f0428 function=__typeid__ZTSFvP10timer_listE_global_addr expires=0x10022da2c now=0x10022da1d flags=186646532 vendor.xxxyyy.ha-1593 [005] 9436.220992: timer_cancel timer=0xffffff80444f0428 xxxyyyTraceManag-7795 [004] 9436.261641: timer_cancel timer=0xffffff80444f0428 [2] 9436.261653][ C4] Unable to handle kernel paging request at virtual address dead00000000012a [ 9436.261664][ C4] Mem abort info: [ 9436.261666][ C4] ESR = 0x96000044 [ 9436.261669][ C4] EC = 0x25: DABT (current EL), IL = 32 bits [ 9436.261671][ C4] SET = 0, FnV = 0 [ 9436.261673][ C4] EA = 0, S1PTW = 0 [ 9436.261675][ C4] Data abort info: [ 9436.261677][ C4] ISV = 0, ISS = 0x00000044 [ 9436.261680][ C4] CM = 0, WnR = 1 [ 9436.261682][ C4] [dead00000000012a] address between user and kernel address ranges [ 9436.261685][ C4] Internal error: Oops: 96000044 [#1] PREEMPT SMP [ 9436.261701][ C4] Skip md ftrace buffer dump for: 0x3a982d0 ... [ 9436.262138][ C4] CPU: 4 PID: 7795 Comm: TraceManag Tainted: G S W O 5.10.149-android12-9-o-g17f915d29d0c #1 [ 9436.262141][ C4] Hardware name: Qualcomm Technologies, Inc. (DT) [ 9436.262144][ C4] pstate: 22400085 (nzCv daIf +PAN -UAO +TCO BTYPE=--) [ 9436.262161][ C4] pc : expire_timers+0x9c/0x438 [ 9436.262164][ C4] lr : expire_timers+0x2a4/0x438 [ 9436.262168][ C4] sp : ffffffc010023dd0 [ 9436.262171][ C4] x29: ffffffc010023df0 x28: ffffffd0636fdc18 [ 9436.262178][ C4] x27: ffffffd063569dd0 x26: ffffffd063536008 [ 9436.262182][ C4] x25: 0000000000000001 x24: ffffff88f7c69280 [ 9436.262185][ C4] x23: 00000000000000e0 x22: dead000000000122 [ 9436.262188][ C4] x21: 000000010022da29 x20: ffffff8af72b4e80 [ 9436.262191][ C4] x19: ffffffc010023e50 x18: ffffffc010025038 [ 9436.262195][ C4] x17: 0000000000000240 x16: 0000000000000201 [ 9436.262199][ C4] x15: ffffffffffffffff x14: ffffff889f3c3100 [ 9436.262203][ C4] x13: ffffff889f3c3100 x12: 00000000049f56b8 [ 9436.262207][ C4] x11: 00000000049f56b8 x10: 00000000ffffffff [ 9436.262212][ C4] x9 : ffffffc010023e50 x8 : dead000000000122 [ 9436.262216][ C4] x7 : ffffffffffffffff x6 : ffffffc0100239d8 [ 9436.262220][ C4] x5 : 0000000000000000 x4 : 0000000000000101 [ 9436.262223][ C4] x3 : 0000000000000080 x2 : ffffff889edc155c [ 9436.262227][ C4] x1 : ffffff8001005200 x0 : ffffff80444f0428 [ 9436.262232][ C4] Call trace: [ 9436.262236][ C4] expire_timers+0x9c/0x438 [ 9436.262240][ C4] __run_timers+0x1f0/0x330 [ 9436.262245][ C4] run_timer_softirq+0x28/0x58 [ 9436.262255][ C4] efi_header_end+0x168/0x5ec [ 9436.262265][ C4] __irq_exit_rcu+0x108/0x124 [ 9436.262274][ C4] __handle_domain_irq+0x118/0x1e4 [ 9436.262282][ C4] gic_handle_irq.30369+0x6c/0x2bc [ 9436.262286][ C4] el0_irq_naked+0x60/0x6c Link: https://lore.kernel.org/all/1700860318-4025-1-git-send-email-quic_mojha@quicinc.com/ Reported-by: Joyyoung Huang Acked-by: MyungJoo Ham Signed-off-by: Mukesh Ojha Signed-off-by: Chanwoo Choi --- drivers/devfreq/devfreq.c | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 017a87465776..98657d3b9435 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -461,10 +461,14 @@ static void devfreq_monitor(struct work_struct *work) if (err) dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err); + if (devfreq->stop_polling) + goto out; + queue_delayed_work(devfreq_wq, &devfreq->work, msecs_to_jiffies(devfreq->profile->polling_ms)); - mutex_unlock(&devfreq->lock); +out: + mutex_unlock(&devfreq->lock); trace_devfreq_monitor(devfreq); } @@ -483,6 +487,10 @@ void devfreq_monitor_start(struct devfreq *devfreq) if (IS_SUPPORTED_FLAG(devfreq->governor->flags, IRQ_DRIVEN)) return; + mutex_lock(&devfreq->lock); + if (delayed_work_pending(&devfreq->work)) + goto out; + switch (devfreq->profile->timer) { case DEVFREQ_TIMER_DEFERRABLE: INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor); @@ -491,12 +499,16 @@ void devfreq_monitor_start(struct devfreq *devfreq) INIT_DELAYED_WORK(&devfreq->work, devfreq_monitor); break; default: - return; + goto out; } if (devfreq->profile->polling_ms) queue_delayed_work(devfreq_wq, &devfreq->work, msecs_to_jiffies(devfreq->profile->polling_ms)); + +out: + devfreq->stop_polling = false; + mutex_unlock(&devfreq->lock); } EXPORT_SYMBOL(devfreq_monitor_start); @@ -513,6 +525,14 @@ void devfreq_monitor_stop(struct devfreq *devfreq) if (IS_SUPPORTED_FLAG(devfreq->governor->flags, IRQ_DRIVEN)) return; + mutex_lock(&devfreq->lock); + if (devfreq->stop_polling) { + mutex_unlock(&devfreq->lock); + return; + } + + devfreq->stop_polling = true; + mutex_unlock(&devfreq->lock); cancel_delayed_work_sync(&devfreq->work); } EXPORT_SYMBOL(devfreq_monitor_stop); From ac89d11b93cc37c52dc38206c3eaffd4fa603f91 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Thu, 14 Dec 2023 18:56:21 +0200 Subject: [PATCH 19/35] intel_idle: add Grand Ridge SoC support Add Intel Grand Ridge SoC C-states, which are C1, C1E, and C6S. The Grand Ridge SoC is built with modules, each module includes 4 cores (Crestmont microarchitecture). There is one L2 cache per module, shared between the 4 cores. There is no core C6 state, but there is C6S state, which has module scope: when all 4 cores request C6S, the entire module (4 cores + L2 cache) enters the low power state. Package C6 is not supported by Grand Ridge SoC. Signed-off-by: Artem Bityutskiy Signed-off-by: Rafael J. Wysocki --- drivers/idle/intel_idle.c | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index cfd0b24fd7f1..3b846d4f8707 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -1271,6 +1271,35 @@ static struct cpuidle_state snr_cstates[] __initdata = { .enter = NULL } }; +static struct cpuidle_state grr_cstates[] __initdata = { + { + .name = "C1", + .desc = "MWAIT 0x00", + .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_ALWAYS_ENABLE, + .exit_latency = 1, + .target_residency = 1, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C1E", + .desc = "MWAIT 0x01", + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, + .exit_latency = 2, + .target_residency = 10, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C6S", + .desc = "MWAIT 0x22", + .flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 140, + .target_residency = 500, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .enter = NULL } +}; + static const struct idle_cpu idle_cpu_nehalem __initconst = { .state_table = nehalem_cstates, .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE, @@ -1420,6 +1449,12 @@ static const struct idle_cpu idle_cpu_snr __initconst = { .use_acpi = true, }; +static const struct idle_cpu idle_cpu_grr __initconst = { + .state_table = grr_cstates, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct x86_cpu_id intel_idle_ids[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &idle_cpu_nhx), X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &idle_cpu_nehalem), @@ -1466,6 +1501,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &idle_cpu_bxt), X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &idle_cpu_dnv), X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &idle_cpu_snr), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, &idle_cpu_grr), {} }; From 92813fd5b1562e547120c8489137b040892fe1bc Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Thu, 14 Dec 2023 18:56:22 +0200 Subject: [PATCH 20/35] intel_idle: add Sierra Forest SoC support Add Sierra Forest SoC C-states, which are C1, C1E, C6S, and C6SP. Sierra Forest SoC is built with modules, each module includes 4 cores (Crestmont microarchitecture). There is one L2 cache per module, shared between the 4 cores. There is no core C6 state, but there is C6S state, which has module scope: when all 4 cores request C6S, the entire module (4 cores + L2 cache) enters the low power state. C6SP state has package scope - when all modules in the package enter C6S, the package enters the power state mode. Signed-off-by: Artem Bityutskiy Signed-off-by: Rafael J. Wysocki --- drivers/idle/intel_idle.c | 44 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 3b846d4f8707..b4390822edad 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -1300,6 +1300,43 @@ static struct cpuidle_state grr_cstates[] __initdata = { .enter = NULL } }; +static struct cpuidle_state srf_cstates[] __initdata = { + { + .name = "C1", + .desc = "MWAIT 0x00", + .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_ALWAYS_ENABLE, + .exit_latency = 1, + .target_residency = 1, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C1E", + .desc = "MWAIT 0x01", + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, + .exit_latency = 2, + .target_residency = 10, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C6S", + .desc = "MWAIT 0x22", + .flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 270, + .target_residency = 700, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C6SP", + .desc = "MWAIT 0x23", + .flags = MWAIT2flg(0x23) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 310, + .target_residency = 900, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .enter = NULL } +}; + static const struct idle_cpu idle_cpu_nehalem __initconst = { .state_table = nehalem_cstates, .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE, @@ -1455,6 +1492,12 @@ static const struct idle_cpu idle_cpu_grr __initconst = { .use_acpi = true, }; +static const struct idle_cpu idle_cpu_srf __initconst = { + .state_table = srf_cstates, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct x86_cpu_id intel_idle_ids[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &idle_cpu_nhx), X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &idle_cpu_nehalem), @@ -1502,6 +1545,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &idle_cpu_dnv), X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &idle_cpu_snr), X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, &idle_cpu_grr), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, &idle_cpu_srf), {} }; From 489c693bd04a2308865dc50f37bd0b5f6ad52deb Mon Sep 17 00:00:00 2001 From: Chen Haonan Date: Tue, 19 Dec 2023 21:06:25 +0800 Subject: [PATCH 21/35] PM: hibernate: Use kmap_local_page() in copy_data_page() kmap_atomic() has been deprecated in favor of kmap_local_page(). kmap_atomic() disables page-faults and preemption (the latter only for !PREEMPT_RT kernels).The code between the mapping and un-mapping in this patch does not depend on the above-mentioned side effects.So simply replaced kmap_atomic() with kmap_local_page(). Signed-off-by: Chen Haonan [ rjw: Subject edits ] Signed-off-by: Rafael J. Wysocki --- kernel/power/snapshot.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index e3e8f1c6e75f..5c96ff067c64 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -1487,11 +1487,11 @@ static bool copy_data_page(unsigned long dst_pfn, unsigned long src_pfn) s_page = pfn_to_page(src_pfn); d_page = pfn_to_page(dst_pfn); if (PageHighMem(s_page)) { - src = kmap_atomic(s_page); - dst = kmap_atomic(d_page); + src = kmap_local_page(s_page); + dst = kmap_local_page(d_page); zeros_only = do_copy_page(dst, src); - kunmap_atomic(dst); - kunmap_atomic(src); + kunmap_local(dst); + kunmap_local(src); } else { if (PageHighMem(d_page)) { /* @@ -1499,9 +1499,9 @@ static bool copy_data_page(unsigned long dst_pfn, unsigned long src_pfn) * data modified by kmap_atomic() */ zeros_only = safe_copy_page(buffer, s_page); - dst = kmap_atomic(d_page); + dst = kmap_local_page(d_page); copy_page(dst, buffer); - kunmap_atomic(dst); + kunmap_local(dst); } else { zeros_only = safe_copy_page(page_address(d_page), s_page); } From 4bbf0b6a64455c95586caf130e374586caef9986 Mon Sep 17 00:00:00 2001 From: Kevin Hao Date: Tue, 12 Dec 2023 22:00:43 +0800 Subject: [PATCH 22/35] Documentation: PM: Adjust freezing-of-tasks.rst to the freezer changes The core freezer logic has been modified by commit f5d39b020809 ("freezer,sched: Rewrite core freezer logic"), so adjust the documentation to reflect the new code. The main changes include: - Drop references to PF_FROZEN and PF_FREEZER_SKIP - Describe TASK_FROZEN, TASK_FREEZABLE and __TASK_FREEZABLE_UNSAFE - Replace system_freezing_cnt with freezer_active - Use a different example for the loop of a freezable kernel thread, since the old code is gone gone Signed-off-by: Kevin Hao [ rjw: Subject and changelog edits, doc text adjustments ] Signed-off-by: Rafael J. Wysocki --- Documentation/power/freezing-of-tasks.rst | 81 +++++++++++++---------- 1 file changed, 46 insertions(+), 35 deletions(-) diff --git a/Documentation/power/freezing-of-tasks.rst b/Documentation/power/freezing-of-tasks.rst index 53b6a56c4635..df9755bfbd94 100644 --- a/Documentation/power/freezing-of-tasks.rst +++ b/Documentation/power/freezing-of-tasks.rst @@ -14,27 +14,28 @@ architectures). II. How does it work? ===================== -There are three per-task flags used for that, PF_NOFREEZE, PF_FROZEN -and PF_FREEZER_SKIP (the last one is auxiliary). The tasks that have -PF_NOFREEZE unset (all user space processes and some kernel threads) are -regarded as 'freezable' and treated in a special way before the system enters a -suspend state as well as before a hibernation image is created (in what follows -we only consider hibernation, but the description also applies to suspend). +There is one per-task flag (PF_NOFREEZE) and three per-task states +(TASK_FROZEN, TASK_FREEZABLE and __TASK_FREEZABLE_UNSAFE) used for that. +The tasks that have PF_NOFREEZE unset (all user space tasks and some kernel +threads) are regarded as 'freezable' and treated in a special way before the +system enters a sleep state as well as before a hibernation image is created +(hibernation is directly covered by what follows, but the description applies +to system-wide suspend too). Namely, as the first step of the hibernation procedure the function freeze_processes() (defined in kernel/power/process.c) is called. A system-wide -variable system_freezing_cnt (as opposed to a per-task flag) is used to indicate -whether the system is to undergo a freezing operation. And freeze_processes() -sets this variable. After this, it executes try_to_freeze_tasks() that sends a -fake signal to all user space processes, and wakes up all the kernel threads. -All freezable tasks must react to that by calling try_to_freeze(), which -results in a call to __refrigerator() (defined in kernel/freezer.c), which sets -the task's PF_FROZEN flag, changes its state to TASK_UNINTERRUPTIBLE and makes -it loop until PF_FROZEN is cleared for it. Then, we say that the task is -'frozen' and therefore the set of functions handling this mechanism is referred -to as 'the freezer' (these functions are defined in kernel/power/process.c, -kernel/freezer.c & include/linux/freezer.h). User space processes are generally -frozen before kernel threads. +static key freezer_active (as opposed to a per-task flag or state) is used to +indicate whether the system is to undergo a freezing operation. And +freeze_processes() sets this static key. After this, it executes +try_to_freeze_tasks() that sends a fake signal to all user space processes, and +wakes up all the kernel threads. All freezable tasks must react to that by +calling try_to_freeze(), which results in a call to __refrigerator() (defined +in kernel/freezer.c), which changes the task's state to TASK_FROZEN, and makes +it loop until it is woken by an explicit TASK_FROZEN wakeup. Then, that task +is regarded as 'frozen' and so the set of functions handling this mechanism is +referred to as 'the freezer' (these functions are defined in +kernel/power/process.c, kernel/freezer.c & include/linux/freezer.h). User space +tasks are generally frozen before kernel threads. __refrigerator() must not be called directly. Instead, use the try_to_freeze() function (defined in include/linux/freezer.h), that checks @@ -43,31 +44,40 @@ if the task is to be frozen and makes the task enter __refrigerator(). For user space processes try_to_freeze() is called automatically from the signal-handling code, but the freezable kernel threads need to call it explicitly in suitable places or use the wait_event_freezable() or -wait_event_freezable_timeout() macros (defined in include/linux/freezer.h) -that combine interruptible sleep with checking if the task is to be frozen and -calling try_to_freeze(). The main loop of a freezable kernel thread may look +wait_event_freezable_timeout() macros (defined in include/linux/wait.h) +that put the task to sleep (TASK_INTERRUPTIBLE) or freeze it (TASK_FROZEN) if +freezer_active is set. The main loop of a freezable kernel thread may look like the following one:: set_freezable(); - do { - hub_events(); - wait_event_freezable(khubd_wait, - !list_empty(&hub_event_list) || - kthread_should_stop()); - } while (!kthread_should_stop() || !list_empty(&hub_event_list)); -(from drivers/usb/core/hub.c::hub_thread()). + while (true) { + struct task_struct *tsk = NULL; -If a freezable kernel thread fails to call try_to_freeze() after the freezer has -initiated a freezing operation, the freezing of tasks will fail and the entire -hibernation operation will be cancelled. For this reason, freezable kernel -threads must call try_to_freeze() somewhere or use one of the + wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL); + spin_lock_irq(&oom_reaper_lock); + if (oom_reaper_list != NULL) { + tsk = oom_reaper_list; + oom_reaper_list = tsk->oom_reaper_list; + } + spin_unlock_irq(&oom_reaper_lock); + + if (tsk) + oom_reap_task(tsk); + } + +(from mm/oom_kill.c::oom_reaper()). + +If a freezable kernel thread is not put to the frozen state after the freezer +has initiated a freezing operation, the freezing of tasks will fail and the +entire system-wide transition will be cancelled. For this reason, freezable +kernel threads must call try_to_freeze() somewhere or use one of the wait_event_freezable() and wait_event_freezable_timeout() macros. After the system memory state has been restored from a hibernation image and devices have been reinitialized, the function thaw_processes() is called in -order to clear the PF_FROZEN flag for each frozen task. Then, the tasks that -have been frozen leave __refrigerator() and continue running. +order to wake up each frozen task. Then, the tasks that have been frozen leave +__refrigerator() and continue running. Rationale behind the functions dealing with freezing and thawing of tasks @@ -96,7 +106,8 @@ III. Which kernel threads are freezable? Kernel threads are not freezable by default. However, a kernel thread may clear PF_NOFREEZE for itself by calling set_freezable() (the resetting of PF_NOFREEZE directly is not allowed). From this point it is regarded as freezable -and must call try_to_freeze() in a suitable place. +and must call try_to_freeze() or variants of wait_event_freezable() in a +suitable place. IV. Why do we do that? ====================== From e95013156ad88e6a1e1db6545881f49183e2ee0a Mon Sep 17 00:00:00 2001 From: Zhenguo Yao Date: Wed, 13 Dec 2023 18:28:08 +0800 Subject: [PATCH 23/35] cpufreq: intel_pstate: Add Emerald Rapids support in no-HWP mode Users may disable HWP in firmware, in which case intel_pstate will give up unless the CPU model is explicitly supported. See also the following past commits: - commit df51f287b5de ("cpufreq: intel_pstate: Add Sapphire Rapids support in no-HWP mode") - commit d8de7a44e11f ("cpufreq: intel_pstate: Add Skylake servers support") - commit 706c5328851d ("cpufreq: intel_pstate: Add Cometlake support in no-HWP mode") - commit fbdc21e9b038 ("cpufreq: intel_pstate: Add Icelake servers support in no-HWP mode") - commit 71bb5c82aaae ("cpufreq: intel_pstate: Add Tigerlake support in no-HWP mode") Signed-off-by: Zhenguo Yao Acked-by: Srinivas Pandruvada [ rjw: Changelog edits ] Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/intel_pstate.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index dd6d23e389f1..3c69040920b8 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -2406,6 +2406,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = { X86_MATCH(ICELAKE_X, core_funcs), X86_MATCH(TIGERLAKE, core_funcs), X86_MATCH(SAPPHIRERAPIDS_X, core_funcs), + X86_MATCH(EMERALDRAPIDS_X, core_funcs), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); From e0f4bd26e29bf6162cdc9dc6fb7522bde7b74d07 Mon Sep 17 00:00:00 2001 From: Kevin Hao Date: Wed, 20 Dec 2023 08:35:35 +0800 Subject: [PATCH 24/35] PM: sleep: Remove obsolete comment from unlock_system_sleep() With the freezer changes introduced by commit f5d39b020809 ("freezer,sched: Rewrite core freezer logic"), the comment in unlock_system_sleep() has become obsolete, there is no need to retain it. Signed-off-by: Kevin Hao Signed-off-by: Rafael J. Wysocki --- kernel/power/main.c | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/kernel/power/main.c b/kernel/power/main.c index f6425ae3e8b0..b1ae9b677d03 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -60,22 +60,6 @@ EXPORT_SYMBOL_GPL(lock_system_sleep); void unlock_system_sleep(unsigned int flags) { - /* - * Don't use freezer_count() because we don't want the call to - * try_to_freeze() here. - * - * Reason: - * Fundamentally, we just don't need it, because freezing condition - * doesn't come into effect until we release the - * system_transition_mutex lock, since the freezer always works with - * system_transition_mutex held. - * - * More importantly, in the case of hibernation, - * unlock_system_sleep() gets called in snapshot_read() and - * snapshot_write() when the freezing condition is still in effect. - * Which means, if we use try_to_freeze() here, it would make them - * enter the refrigerator, thus causing hibernation to lockup. - */ if (!(flags & PF_NOFREEZE)) current->flags &= ~PF_NOFREEZE; mutex_unlock(&system_transition_mutex); From dadce3fbaf10250b35d540caff475ff93b259de0 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 19 Dec 2023 22:02:46 -0800 Subject: [PATCH 25/35] PM: hibernate: Repair excess function parameter description warning Function swsusp_close() does not have any parameters, so remove the description of parameter @exclusive to prevent this warning. swap.c:1573: warning: Excess function parameter 'exclusive' description in 'swsusp_close' Signed-off-by: Randy Dunlap [ rjw: Subject edits ] Signed-off-by: Rafael J. Wysocki --- kernel/power/swap.c | 1 - 1 file changed, 1 deletion(-) diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 975e7195573b..6053ddddaf65 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -1566,7 +1566,6 @@ put: /** * swsusp_close - close resume device. - * @exclusive: Close the resume device which is exclusively opened. */ void swsusp_close(void) From ba367479c7ad0b870461024cd5ae7a1ea6e1e3db Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Tue, 19 Dec 2023 11:32:39 +0530 Subject: [PATCH 26/35] OPP: The level field is always of unsigned int type By mistake, dev_pm_opp_find_level_floor() used the level parameter as unsigned long instead of unsigned int. Fix it. Signed-off-by: Viresh Kumar --- drivers/opp/core.c | 9 +++++++-- include/linux/pm_opp.h | 4 ++-- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/drivers/opp/core.c b/drivers/opp/core.c index c022d548067d..49b429984bdb 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -842,9 +842,14 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil); * use. */ struct dev_pm_opp *dev_pm_opp_find_level_floor(struct device *dev, - unsigned long *level) + unsigned int *level) { - return _find_key_floor(dev, level, 0, true, _read_level, NULL); + unsigned long temp = *level; + struct dev_pm_opp *opp; + + opp = _find_key_floor(dev, &temp, 0, true, _read_level, NULL); + *level = temp; + return opp; } EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_floor); diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 81dff7facdc9..74768c47d790 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -163,7 +163,7 @@ struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, unsigned int *level); struct dev_pm_opp *dev_pm_opp_find_level_floor(struct device *dev, - unsigned long *level); + unsigned int *level); struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev, unsigned int *bw, int index); @@ -330,7 +330,7 @@ static inline struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, } static inline struct dev_pm_opp *dev_pm_opp_find_level_floor(struct device *dev, - unsigned long *level) + unsigned int *level) { return ERR_PTR(-EOPNOTSUPP); } From ab7a781fd6f889d8514817622afc3ae514c3caf1 Mon Sep 17 00:00:00 2001 From: Bryan O'Donoghue Date: Sat, 23 Dec 2023 02:34:21 +0000 Subject: [PATCH 27/35] OPP: Fix _set_required_opps when opp is NULL _set_required_opps can be called with opp NULL in _disable_opp_table(). commit e37440e7e2c2 ("OPP: Call dev_pm_opp_set_opp() for required OPPs") requires the opp pointer to be non-NULL to function. [ 81.253439] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000048 [ 81.438407] Hardware name: Qualcomm Technologies, Inc. Robotics RB5 (DT) [ 81.445296] Workqueue: pm pm_runtime_work [ 81.449446] pstate: 60400005 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) [ 81.456609] pc : _set_required_opps+0x178/0x28c [ 81.461288] lr : _set_required_opps+0x178/0x28c [ 81.465962] sp : ffff80008078bb00 [ 81.469375] x29: ffff80008078bb00 x28: ffffd1cd71bfe308 x27: 0000000000000000 [ 81.476730] x26: ffffd1cd70ebc578 x25: ffffd1cd70a08710 x24: 00000000ffffffff [ 81.484083] x23: 00000000ffffffff x22: 0000000000000000 x21: ffff56ff892b3c48 [ 81.491435] x20: ffff56f1071c10 x19: 0000000000000000 x18: ffffffffffffffff [ 81.498788] x17: 2030207865646e69 x16: 2030303131207370 x15: 706f5f6465726975 [ 81.506141] x14: 7165725f7465735f x13: ffff5700f5c00000 x12: 00000000000008ac [ 81.513495] x11: 00000000000002e4 x10: ffff5700f6700000 x9 : ffff5700f5c00000 [ 81.520848] x8 : 00000000fffdffff x7 : ffff5700f6700000 x6 : 80000000fffe0000 [ 81.528200] x5 : ffff5700fef40d08 x4 : 0000000000000000 x3 : 0000000000000000 [ 81.535551] x2 : 0000000000000000 x1 : 0000000000000000 x0 : ffff56ff81298f80 [ 81.542904] Call trace: [ 81.545437] _set_required_opps+0x178/0x28c [ 81.549754] _set_opp+0x3fc/0x5c0 [ 81.553181] dev_pm_opp_set_rate+0x90/0x26c [ 81.557498] core_power_v4+0x44/0x15c [venus_core] [ 81.562509] venus_runtime_suspend+0x40/0xd0 [venus_core] [ 81.568135] pm_generic_runtime_suspend+0x2c/0x44 [ 81.572983] __rpm_callback+0x48/0x1d8 [ 81.576852] rpm_callback+0x6c/0x78 [ 81.580453] rpm_suspend+0x10c/0x570 [ 81.584143] pm_runtime_work+0xc4/0xc8 [ 81.588011] process_one_work+0x138/0x244 [ 81.592153] worker_thread+0x320/0x438 [ 81.596021] kthread+0x110/0x114 [ 81.599355] ret_from_fork+0x10/0x20 [ 81.603052] Code: f10000ff fa5410e0 54fffbe1 97f05ae8 (f94026c5) [ 81.609317] ---[ end trace 0000000000000000 ]--- Fix it. Fixes: e37440e7e2c2 ("OPP: Call dev_pm_opp_set_opp() for required OPPs") Signed-off-by: Bryan O'Donoghue [ Viresh: Implemented the fix differently ] Signed-off-by: Viresh Kumar Reviewed-by: Bryan O'Donoghue Tested-by: Bryan O'Donoghue --- drivers/opp/core.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 49b429984bdb..a6e80f566e9b 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -1066,6 +1066,7 @@ static int _set_required_opps(struct device *dev, struct opp_table *opp_table, struct dev_pm_opp *opp, bool up) { struct device **devs = opp_table->required_devs; + struct dev_pm_opp *required_opp; int index, target, delta, ret; if (!devs) @@ -1088,7 +1089,9 @@ static int _set_required_opps(struct device *dev, struct opp_table *opp_table, while (index != target) { if (devs[index]) { - ret = dev_pm_opp_set_opp(devs[index], opp->required_opps[index]); + required_opp = opp ? opp->required_opps[index] : NULL; + + ret = dev_pm_opp_set_opp(devs[index], required_opp); if (ret) return ret; } From c8f5caec3df84a02b937d6d9cda1f7ffa8dc443f Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Fri, 29 Dec 2023 18:08:18 +0100 Subject: [PATCH 28/35] cpuidle: haltpoll: Do not enable interrupts when entering idle The cpuidle drivers' ->enter() methods are supposed to be IRQ invariant: 5e26aa933911 ("cpuidle/poll: Ensure IRQs stay disabled after cpuidle_state::enter() calls") bb7b11258561 ("cpuidle: Move IRQ state validation") Do that in the haltpoll driver too. Fixes: 5e26aa933911 ("cpuidle/poll: Ensure IRQs stay disabled after cpuidle_state::enter() calls") Closes: https://bugzilla.kernel.org/show_bug.cgi?id=218245 Reported-by: Tested-by: Signed-off-by: Borislav Petkov (AMD) [ rjw: Changelog edits ] Signed-off-by: Rafael J. Wysocki --- drivers/cpuidle/cpuidle-haltpoll.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/cpuidle/cpuidle-haltpoll.c b/drivers/cpuidle/cpuidle-haltpoll.c index e66df22f9695..d8515d5c0853 100644 --- a/drivers/cpuidle/cpuidle-haltpoll.c +++ b/drivers/cpuidle/cpuidle-haltpoll.c @@ -25,13 +25,12 @@ MODULE_PARM_DESC(force, "Load unconditionally"); static struct cpuidle_device __percpu *haltpoll_cpuidle_devices; static enum cpuhp_state haltpoll_hp_state; -static int default_enter_idle(struct cpuidle_device *dev, - struct cpuidle_driver *drv, int index) +static __cpuidle int default_enter_idle(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) { - if (current_clr_polling_and_test()) { - local_irq_enable(); + if (current_clr_polling_and_test()) return index; - } + arch_cpu_idle(); return index; } From 6aa09a5bccd8e224d917afdb4c278fc66aacde4d Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 27 Dec 2023 21:37:02 +0100 Subject: [PATCH 29/35] async: Split async_schedule_node_domain() In preparation for subsequent changes, split async_schedule_node_domain() in two pieces so as to allow the bottom part of it to be called from a somewhat different code path. No functional impact. Signed-off-by: Rafael J. Wysocki Reviewed-by: Stanislaw Gruszka Tested-by: Youngmin Nam Reviewed-by: Ulf Hansson --- kernel/async.c | 56 ++++++++++++++++++++++++++++++-------------------- 1 file changed, 34 insertions(+), 22 deletions(-) diff --git a/kernel/async.c b/kernel/async.c index b2c4ba5686ee..cffe6b4cff9f 100644 --- a/kernel/async.c +++ b/kernel/async.c @@ -145,6 +145,39 @@ static void async_run_entry_fn(struct work_struct *work) wake_up(&async_done); } +static async_cookie_t __async_schedule_node_domain(async_func_t func, + void *data, int node, + struct async_domain *domain, + struct async_entry *entry) +{ + async_cookie_t newcookie; + unsigned long flags; + + INIT_LIST_HEAD(&entry->domain_list); + INIT_LIST_HEAD(&entry->global_list); + INIT_WORK(&entry->work, async_run_entry_fn); + entry->func = func; + entry->data = data; + entry->domain = domain; + + spin_lock_irqsave(&async_lock, flags); + + /* allocate cookie and queue */ + newcookie = entry->cookie = next_cookie++; + + list_add_tail(&entry->domain_list, &domain->pending); + if (domain->registered) + list_add_tail(&entry->global_list, &async_global_pending); + + atomic_inc(&entry_count); + spin_unlock_irqrestore(&async_lock, flags); + + /* schedule for execution */ + queue_work_node(node, system_unbound_wq, &entry->work); + + return newcookie; +} + /** * async_schedule_node_domain - NUMA specific version of async_schedule_domain * @func: function to execute asynchronously @@ -186,29 +219,8 @@ async_cookie_t async_schedule_node_domain(async_func_t func, void *data, func(data, newcookie); return newcookie; } - INIT_LIST_HEAD(&entry->domain_list); - INIT_LIST_HEAD(&entry->global_list); - INIT_WORK(&entry->work, async_run_entry_fn); - entry->func = func; - entry->data = data; - entry->domain = domain; - spin_lock_irqsave(&async_lock, flags); - - /* allocate cookie and queue */ - newcookie = entry->cookie = next_cookie++; - - list_add_tail(&entry->domain_list, &domain->pending); - if (domain->registered) - list_add_tail(&entry->global_list, &async_global_pending); - - atomic_inc(&entry_count); - spin_unlock_irqrestore(&async_lock, flags); - - /* schedule for execution */ - queue_work_node(node, system_unbound_wq, &entry->work); - - return newcookie; + return __async_schedule_node_domain(func, data, node, domain, entry); } EXPORT_SYMBOL_GPL(async_schedule_node_domain); From 7d4b5d7a37bdd63a5a3371b988744b060d5bb86f Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 27 Dec 2023 21:38:23 +0100 Subject: [PATCH 30/35] async: Introduce async_schedule_dev_nocall() In preparation for subsequent changes, introduce a specialized variant of async_schedule_dev() that will not invoke the argument function synchronously when it cannot be scheduled for asynchronous execution. The new function, async_schedule_dev_nocall(), will be used for fixing possible deadlocks in the system-wide power management core code. Signed-off-by: Rafael J. Wysocki Reviewed-by: Stanislaw Gruszka for the series. Tested-by: Youngmin Nam Reviewed-by: Ulf Hansson --- include/linux/async.h | 2 ++ kernel/async.c | 29 +++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+) diff --git a/include/linux/async.h b/include/linux/async.h index cce4ad31e8fc..33c9ff4afb49 100644 --- a/include/linux/async.h +++ b/include/linux/async.h @@ -90,6 +90,8 @@ async_schedule_dev(async_func_t func, struct device *dev) return async_schedule_node(func, dev, dev_to_node(dev)); } +bool async_schedule_dev_nocall(async_func_t func, struct device *dev); + /** * async_schedule_dev_domain - A device specific version of async_schedule_domain * @func: function to execute asynchronously diff --git a/kernel/async.c b/kernel/async.c index cffe6b4cff9f..673bba6bdf3a 100644 --- a/kernel/async.c +++ b/kernel/async.c @@ -243,6 +243,35 @@ async_cookie_t async_schedule_node(async_func_t func, void *data, int node) } EXPORT_SYMBOL_GPL(async_schedule_node); +/** + * async_schedule_dev_nocall - A simplified variant of async_schedule_dev() + * @func: function to execute asynchronously + * @dev: device argument to be passed to function + * + * @dev is used as both the argument for the function and to provide NUMA + * context for where to run the function. + * + * If the asynchronous execution of @func is scheduled successfully, return + * true. Otherwise, do nothing and return false, unlike async_schedule_dev() + * that will run the function synchronously then. + */ +bool async_schedule_dev_nocall(async_func_t func, struct device *dev) +{ + struct async_entry *entry; + + entry = kzalloc(sizeof(struct async_entry), GFP_KERNEL); + + /* Give up if there is no memory or too much work. */ + if (!entry || atomic_read(&entry_count) > MAX_WORK) { + kfree(entry); + return false; + } + + __async_schedule_node_domain(func, dev, dev_to_node(dev), + &async_dfl_domain, entry); + return true; +} + /** * async_synchronize_full - synchronize all asynchronous function calls * From 3b82024c5ba93e7a0db2d0b9635ca6b28338efd7 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 28 Dec 2023 13:04:41 +0530 Subject: [PATCH 31/35] OPP: Move dev_pm_opp_icc_bw to internal opp.h It isn't used by any driver or API, privatize it. Signed-off-by: Viresh Kumar --- drivers/opp/opp.h | 12 ++++++++++++ include/linux/pm_opp.h | 12 ------------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h index 23dcb2fbf8c3..558c9ac6a6fa 100644 --- a/drivers/opp/opp.h +++ b/drivers/opp/opp.h @@ -50,6 +50,18 @@ struct opp_config_data { unsigned int flags; }; +/** + * struct dev_pm_opp_icc_bw - Interconnect bandwidth values + * @avg: Average bandwidth corresponding to this OPP (in icc units) + * @peak: Peak bandwidth corresponding to this OPP (in icc units) + * + * This structure stores the bandwidth values for a single interconnect path. + */ +struct dev_pm_opp_icc_bw { + u32 avg; + u32 peak; +}; + /* * Internal data structure organization with the OPP layer library is as * follows: diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 74768c47d790..76dcb7f37bcd 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -45,18 +45,6 @@ struct dev_pm_opp_supply { unsigned long u_watt; }; -/** - * struct dev_pm_opp_icc_bw - Interconnect bandwidth values - * @avg: Average bandwidth corresponding to this OPP (in icc units) - * @peak: Peak bandwidth corresponding to this OPP (in icc units) - * - * This structure stores the bandwidth values for a single interconnect path. - */ -struct dev_pm_opp_icc_bw { - u32 avg; - u32 peak; -}; - typedef int (*config_regulators_t)(struct device *dev, struct dev_pm_opp *old_opp, struct dev_pm_opp *new_opp, struct regulator **regulators, unsigned int count); From 7839d0078e0d5e6cc2fa0b0dfbee71de74f1e557 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 27 Dec 2023 21:41:06 +0100 Subject: [PATCH 32/35] PM: sleep: Fix possible deadlocks in core system-wide PM code It is reported that in low-memory situations the system-wide resume core code deadlocks, because async_schedule_dev() executes its argument function synchronously if it cannot allocate memory (and not only in that case) and that function attempts to acquire a mutex that is already held. Executing the argument function synchronously from within dpm_async_fn() may also be problematic for ordering reasons (it may cause a consumer device's resume callback to be invoked before a requisite supplier device's one, for example). Address this by changing the code in question to use async_schedule_dev_nocall() for scheduling the asynchronous execution of device suspend and resume functions and to directly run them synchronously if async_schedule_dev_nocall() returns false. Link: https://lore.kernel.org/linux-pm/ZYvjiqX6EsL15moe@perf/ Reported-by: Youngmin Nam Signed-off-by: Rafael J. Wysocki Reviewed-by: Stanislaw Gruszka Tested-by: Youngmin Nam Reviewed-by: Ulf Hansson Cc: 5.7+ # 5.7+: 6aa09a5bccd8 async: Split async_schedule_node_domain() Cc: 5.7+ # 5.7+: 7d4b5d7a37bd async: Introduce async_schedule_dev_nocall() Cc: 5.7+ # 5.7+ --- drivers/base/power/main.c | 148 ++++++++++++++++++-------------------- 1 file changed, 68 insertions(+), 80 deletions(-) diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index f85f3515c258..9c5a5f4dba5a 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -579,7 +579,7 @@ bool dev_pm_skip_resume(struct device *dev) } /** - * device_resume_noirq - Execute a "noirq resume" callback for given device. + * __device_resume_noirq - Execute a "noirq resume" callback for given device. * @dev: Device to handle. * @state: PM transition of the system being carried out. * @async: If true, the device is being resumed asynchronously. @@ -587,7 +587,7 @@ bool dev_pm_skip_resume(struct device *dev) * The driver of @dev will not receive interrupts while this function is being * executed. */ -static int device_resume_noirq(struct device *dev, pm_message_t state, bool async) +static void __device_resume_noirq(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; const char *info = NULL; @@ -655,7 +655,13 @@ Skip: Out: complete_all(&dev->power.completion); TRACE_RESUME(error); - return error; + + if (error) { + suspend_stats.failed_resume_noirq++; + dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, state, async ? " async noirq" : " noirq", error); + } } static bool is_async(struct device *dev) @@ -668,11 +674,15 @@ static bool dpm_async_fn(struct device *dev, async_func_t func) { reinit_completion(&dev->power.completion); - if (is_async(dev)) { - get_device(dev); - async_schedule_dev(func, dev); + if (!is_async(dev)) + return false; + + get_device(dev); + + if (async_schedule_dev_nocall(func, dev)) return true; - } + + put_device(dev); return false; } @@ -680,15 +690,19 @@ static bool dpm_async_fn(struct device *dev, async_func_t func) static void async_resume_noirq(void *data, async_cookie_t cookie) { struct device *dev = data; - int error; - - error = device_resume_noirq(dev, pm_transition, true); - if (error) - pm_dev_err(dev, pm_transition, " async", error); + __device_resume_noirq(dev, pm_transition, true); put_device(dev); } +static void device_resume_noirq(struct device *dev) +{ + if (dpm_async_fn(dev, async_resume_noirq)) + return; + + __device_resume_noirq(dev, pm_transition, false); +} + static void dpm_noirq_resume_devices(pm_message_t state) { struct device *dev; @@ -698,14 +712,6 @@ static void dpm_noirq_resume_devices(pm_message_t state) mutex_lock(&dpm_list_mtx); pm_transition = state; - /* - * Advanced the async threads upfront, - * in case the starting of async threads is - * delayed by non-async resuming devices. - */ - list_for_each_entry(dev, &dpm_noirq_list, power.entry) - dpm_async_fn(dev, async_resume_noirq); - while (!list_empty(&dpm_noirq_list)) { dev = to_device(dpm_noirq_list.next); get_device(dev); @@ -713,17 +719,7 @@ static void dpm_noirq_resume_devices(pm_message_t state) mutex_unlock(&dpm_list_mtx); - if (!is_async(dev)) { - int error; - - error = device_resume_noirq(dev, state, false); - if (error) { - suspend_stats.failed_resume_noirq++; - dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); - dpm_save_failed_dev(dev_name(dev)); - pm_dev_err(dev, state, " noirq", error); - } - } + device_resume_noirq(dev); put_device(dev); @@ -751,14 +747,14 @@ void dpm_resume_noirq(pm_message_t state) } /** - * device_resume_early - Execute an "early resume" callback for given device. + * __device_resume_early - Execute an "early resume" callback for given device. * @dev: Device to handle. * @state: PM transition of the system being carried out. * @async: If true, the device is being resumed asynchronously. * * Runtime PM is disabled for @dev while this function is being executed. */ -static int device_resume_early(struct device *dev, pm_message_t state, bool async) +static void __device_resume_early(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; const char *info = NULL; @@ -811,21 +807,31 @@ Out: pm_runtime_enable(dev); complete_all(&dev->power.completion); - return error; + + if (error) { + suspend_stats.failed_resume_early++; + dpm_save_failed_step(SUSPEND_RESUME_EARLY); + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, state, async ? " async early" : " early", error); + } } static void async_resume_early(void *data, async_cookie_t cookie) { struct device *dev = data; - int error; - - error = device_resume_early(dev, pm_transition, true); - if (error) - pm_dev_err(dev, pm_transition, " async", error); + __device_resume_early(dev, pm_transition, true); put_device(dev); } +static void device_resume_early(struct device *dev) +{ + if (dpm_async_fn(dev, async_resume_early)) + return; + + __device_resume_early(dev, pm_transition, false); +} + /** * dpm_resume_early - Execute "early resume" callbacks for all devices. * @state: PM transition of the system being carried out. @@ -839,14 +845,6 @@ void dpm_resume_early(pm_message_t state) mutex_lock(&dpm_list_mtx); pm_transition = state; - /* - * Advanced the async threads upfront, - * in case the starting of async threads is - * delayed by non-async resuming devices. - */ - list_for_each_entry(dev, &dpm_late_early_list, power.entry) - dpm_async_fn(dev, async_resume_early); - while (!list_empty(&dpm_late_early_list)) { dev = to_device(dpm_late_early_list.next); get_device(dev); @@ -854,17 +852,7 @@ void dpm_resume_early(pm_message_t state) mutex_unlock(&dpm_list_mtx); - if (!is_async(dev)) { - int error; - - error = device_resume_early(dev, state, false); - if (error) { - suspend_stats.failed_resume_early++; - dpm_save_failed_step(SUSPEND_RESUME_EARLY); - dpm_save_failed_dev(dev_name(dev)); - pm_dev_err(dev, state, " early", error); - } - } + device_resume_early(dev); put_device(dev); @@ -888,12 +876,12 @@ void dpm_resume_start(pm_message_t state) EXPORT_SYMBOL_GPL(dpm_resume_start); /** - * device_resume - Execute "resume" callbacks for given device. + * __device_resume - Execute "resume" callbacks for given device. * @dev: Device to handle. * @state: PM transition of the system being carried out. * @async: If true, the device is being resumed asynchronously. */ -static int device_resume(struct device *dev, pm_message_t state, bool async) +static void __device_resume(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; const char *info = NULL; @@ -975,20 +963,30 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) TRACE_RESUME(error); - return error; + if (error) { + suspend_stats.failed_resume++; + dpm_save_failed_step(SUSPEND_RESUME); + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, state, async ? " async" : "", error); + } } static void async_resume(void *data, async_cookie_t cookie) { struct device *dev = data; - int error; - error = device_resume(dev, pm_transition, true); - if (error) - pm_dev_err(dev, pm_transition, " async", error); + __device_resume(dev, pm_transition, true); put_device(dev); } +static void device_resume(struct device *dev) +{ + if (dpm_async_fn(dev, async_resume)) + return; + + __device_resume(dev, pm_transition, false); +} + /** * dpm_resume - Execute "resume" callbacks for non-sysdev devices. * @state: PM transition of the system being carried out. @@ -1008,27 +1006,17 @@ void dpm_resume(pm_message_t state) pm_transition = state; async_error = 0; - list_for_each_entry(dev, &dpm_suspended_list, power.entry) - dpm_async_fn(dev, async_resume); - while (!list_empty(&dpm_suspended_list)) { dev = to_device(dpm_suspended_list.next); + get_device(dev); - if (!is_async(dev)) { - int error; - mutex_unlock(&dpm_list_mtx); + mutex_unlock(&dpm_list_mtx); - error = device_resume(dev, state, false); - if (error) { - suspend_stats.failed_resume++; - dpm_save_failed_step(SUSPEND_RESUME); - dpm_save_failed_dev(dev_name(dev)); - pm_dev_err(dev, state, "", error); - } + device_resume(dev); + + mutex_lock(&dpm_list_mtx); - mutex_lock(&dpm_list_mtx); - } if (!list_empty(&dev->power.entry)) list_move_tail(&dev->power.entry, &dpm_prepared_list); From 0b40dd3bcfc6f521e6ac0e297ecdcc391d5cc4bb Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 3 Jan 2024 14:26:18 +0530 Subject: [PATCH 33/35] OPP: Relocate dev_pm_opp_sync_regulators() Move this to a more relevant place in the file. No functional changes. Signed-off-by: Viresh Kumar --- drivers/opp/core.c | 82 +++++++++++++++++++++++----------------------- 1 file changed, 41 insertions(+), 41 deletions(-) diff --git a/drivers/opp/core.c b/drivers/opp/core.c index a6e80f566e9b..29f8160c3e38 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -3012,6 +3012,47 @@ put_table: } EXPORT_SYMBOL_GPL(dev_pm_opp_adjust_voltage); +/** + * dev_pm_opp_sync_regulators() - Sync state of voltage regulators + * @dev: device for which we do this operation + * + * Sync voltage state of the OPP table regulators. + * + * Return: 0 on success or a negative error value. + */ +int dev_pm_opp_sync_regulators(struct device *dev) +{ + struct opp_table *opp_table; + struct regulator *reg; + int i, ret = 0; + + /* Device may not have OPP table */ + opp_table = _find_opp_table(dev); + if (IS_ERR(opp_table)) + return 0; + + /* Regulator may not be required for the device */ + if (unlikely(!opp_table->regulators)) + goto put_table; + + /* Nothing to sync if voltage wasn't changed */ + if (!opp_table->enabled) + goto put_table; + + for (i = 0; i < opp_table->regulator_count; i++) { + reg = opp_table->regulators[i]; + ret = regulator_sync_voltage(reg); + if (ret) + break; + } +put_table: + /* Drop reference taken by _find_opp_table() */ + dev_pm_opp_put_opp_table(opp_table); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_sync_regulators); + /** * dev_pm_opp_enable() - Enable a specific OPP * @dev: device for which we do this operation @@ -3135,44 +3176,3 @@ void dev_pm_opp_remove_table(struct device *dev) dev_pm_opp_put_opp_table(opp_table); } EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table); - -/** - * dev_pm_opp_sync_regulators() - Sync state of voltage regulators - * @dev: device for which we do this operation - * - * Sync voltage state of the OPP table regulators. - * - * Return: 0 on success or a negative error value. - */ -int dev_pm_opp_sync_regulators(struct device *dev) -{ - struct opp_table *opp_table; - struct regulator *reg; - int i, ret = 0; - - /* Device may not have OPP table */ - opp_table = _find_opp_table(dev); - if (IS_ERR(opp_table)) - return 0; - - /* Regulator may not be required for the device */ - if (unlikely(!opp_table->regulators)) - goto put_table; - - /* Nothing to sync if voltage wasn't changed */ - if (!opp_table->enabled) - goto put_table; - - for (i = 0; i < opp_table->regulator_count; i++) { - reg = opp_table->regulators[i]; - ret = regulator_sync_voltage(reg); - if (ret) - break; - } -put_table: - /* Drop reference taken by _find_opp_table() */ - dev_pm_opp_put_opp_table(opp_table); - - return ret; -} -EXPORT_SYMBOL_GPL(dev_pm_opp_sync_regulators); From 7269c250db1b89cda72ca419b7bd5e37997309d6 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 5 Jan 2024 13:55:37 +0530 Subject: [PATCH 34/35] OPP: Pass rounded rate to _set_opp() The OPP core finds the eventual frequency to set with the help of clk_round_rate() and the same was earlier getting passed to _set_opp() and that's what would get configured. The commit 1efae8d2e777 ("OPP: Make dev_pm_opp_set_opp() independent of frequency") mistakenly changed that. Fix it. Fixes: 1efae8d2e777 ("OPP: Make dev_pm_opp_set_opp() independent of frequency") Cc: v5.18+ # v6.0+ Signed-off-by: Viresh Kumar --- drivers/opp/core.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 29f8160c3e38..5e6cfcbd2e87 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -1352,12 +1352,12 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) * value of the frequency. In such a case, do not abort but * configure the hardware to the desired frequency forcefully. */ - forced = opp_table->rate_clk_single != target_freq; + forced = opp_table->rate_clk_single != freq; } - ret = _set_opp(dev, opp_table, opp, &target_freq, forced); + ret = _set_opp(dev, opp_table, opp, &freq, forced); - if (target_freq) + if (freq) dev_pm_opp_put(opp); put_opp_table: From dcfec12b67980cba139a6c3afba57ebd4936ebe8 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 5 Jan 2024 15:39:52 +0530 Subject: [PATCH 35/35] OPP: Rename 'rate_clk_single' The field's name isn't clear enough. Rename it. Signed-off-by: Viresh Kumar --- drivers/opp/core.c | 4 ++-- drivers/opp/opp.h | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 5e6cfcbd2e87..c4e0432ae42a 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -963,7 +963,7 @@ _opp_config_clk_single(struct device *dev, struct opp_table *opp_table, dev_err(dev, "%s: failed to set clock rate: %d\n", __func__, ret); } else { - opp_table->rate_clk_single = freq; + opp_table->current_rate_single_clk = freq; } return ret; @@ -1352,7 +1352,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) * value of the frequency. In such a case, do not abort but * configure the hardware to the desired frequency forcefully. */ - forced = opp_table->rate_clk_single != freq; + forced = opp_table->current_rate_single_clk != freq; } ret = _set_opp(dev, opp_table, opp, &freq, forced); diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h index 558c9ac6a6fa..cff1fabd1ae3 100644 --- a/drivers/opp/opp.h +++ b/drivers/opp/opp.h @@ -170,7 +170,7 @@ enum opp_table_access { * @clock_latency_ns_max: Max clock latency in nanoseconds. * @parsed_static_opps: Count of devices for which OPPs are initialized from DT. * @shared_opp: OPP is shared between multiple devices. - * @rate_clk_single: Currently configured frequency for single clk. + * @current_rate_single_clk: Currently configured frequency for single clk. * @current_opp: Currently configured OPP for the table. * @suspend_opp: Pointer to OPP to be used during device suspend. * @required_opp_tables: List of device OPP tables that are required by OPPs in @@ -219,7 +219,7 @@ struct opp_table { unsigned int parsed_static_opps; enum opp_table_access shared_opp; - unsigned long rate_clk_single; + unsigned long current_rate_single_clk; struct dev_pm_opp *current_opp; struct dev_pm_opp *suspend_opp;