Merge back earlier cpufreq material for v5.1.

This commit is contained in:
Rafael J. Wysocki 2019-02-24 21:18:05 +01:00
commit 17162a117c
36 changed files with 778 additions and 301 deletions

View File

@ -1737,6 +1737,7 @@ F: arch/arm/configs/mvebu_*_defconfig
F: arch/arm/mach-mvebu/ F: arch/arm/mach-mvebu/
F: arch/arm64/boot/dts/marvell/armada* F: arch/arm64/boot/dts/marvell/armada*
F: drivers/cpufreq/armada-37xx-cpufreq.c F: drivers/cpufreq/armada-37xx-cpufreq.c
F: drivers/cpufreq/armada-8k-cpufreq.c
F: drivers/cpufreq/mvebu-cpufreq.c F: drivers/cpufreq/mvebu-cpufreq.c
F: drivers/irqchip/irq-armada-370-xp.c F: drivers/irqchip/irq-armada-370-xp.c
F: drivers/irqchip/irq-mvebu-* F: drivers/irqchip/irq-mvebu-*
@ -3961,7 +3962,7 @@ M: Viresh Kumar <viresh.kumar@linaro.org>
L: linux-pm@vger.kernel.org L: linux-pm@vger.kernel.org
S: Maintained S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
T: git git://git.linaro.org/people/vireshk/linux.git (For ARM Updates) T: git git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm.git (For ARM Updates)
B: https://bugzilla.kernel.org B: https://bugzilla.kernel.org
F: Documentation/admin-guide/pm/cpufreq.rst F: Documentation/admin-guide/pm/cpufreq.rst
F: Documentation/admin-guide/pm/intel_pstate.rst F: Documentation/admin-guide/pm/intel_pstate.rst
@ -12604,11 +12605,11 @@ F: Documentation/media/v4l-drivers/qcom_camss.rst
F: drivers/media/platform/qcom/camss/ F: drivers/media/platform/qcom/camss/
QUALCOMM CPUFREQ DRIVER MSM8996/APQ8096 QUALCOMM CPUFREQ DRIVER MSM8996/APQ8096
M: Ilia Lin <ilia.lin@gmail.com> M: Ilia Lin <ilia.lin@kernel.org>
L: linux-pm@vger.kernel.org L: linux-pm@vger.kernel.org
S: Maintained S: Maintained
F: Documentation/devicetree/bindings/opp/kryo-cpufreq.txt F: Documentation/devicetree/bindings/opp/kryo-cpufreq.txt
F: drivers/cpufreq/qcom-cpufreq-kryo.c F: drivers/cpufreq/qcom-cpufreq-kryo.c
QUALCOMM EMAC GIGABIT ETHERNET DRIVER QUALCOMM EMAC GIGABIT ETHERNET DRIVER
M: Timur Tabi <timur@kernel.org> M: Timur Tabi <timur@kernel.org>

View File

@ -21,6 +21,7 @@
#include <linux/mfd/da8xx-cfgchip.h> #include <linux/mfd/da8xx-cfgchip.h>
#include <linux/platform_data/clk-da8xx-cfgchip.h> #include <linux/platform_data/clk-da8xx-cfgchip.h>
#include <linux/platform_data/clk-davinci-pll.h> #include <linux/platform_data/clk-davinci-pll.h>
#include <linux/platform_data/davinci-cpufreq.h>
#include <linux/platform_data/gpio-davinci.h> #include <linux/platform_data/gpio-davinci.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/regmap.h> #include <linux/regmap.h>
@ -29,7 +30,6 @@
#include <asm/mach/map.h> #include <asm/mach/map.h>
#include <mach/common.h> #include <mach/common.h>
#include <mach/cpufreq.h>
#include <mach/cputype.h> #include <mach/cputype.h>
#include <mach/da8xx.h> #include <mach/da8xx.h>
#include <mach/irqs.h> #include <mach/irqs.h>

View File

@ -1,26 +0,0 @@
/*
* TI DaVinci CPUFreq platform support.
*
* Copyright (C) 2009 Texas Instruments, Inc. http://www.ti.com/
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MACH_DAVINCI_CPUFREQ_H
#define _MACH_DAVINCI_CPUFREQ_H
#include <linux/cpufreq.h>
struct davinci_cpufreq_config {
struct cpufreq_frequency_table *freq_table;
int (*set_voltage) (unsigned int index);
int (*init) (void);
};
#endif

View File

@ -1050,6 +1050,48 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
return ret_val; return ret_val;
} }
/**
* cppc_get_desired_perf - Get the value of desired performance register.
* @cpunum: CPU from which to get desired performance.
* @desired_perf: address of a variable to store the returned desired performance
*
* Return: 0 for success, -EIO otherwise.
*/
int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
{
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
struct cpc_register_resource *desired_reg;
struct cppc_pcc_data *pcc_ss_data = NULL;
desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
if (CPC_IN_PCC(desired_reg)) {
int ret = 0;
if (pcc_ss_id < 0)
return -EIO;
pcc_ss_data = pcc_data[pcc_ss_id];
down_write(&pcc_ss_data->pcc_lock);
if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
cpc_read(cpunum, desired_reg, desired_perf);
else
ret = -EIO;
up_write(&pcc_ss_data->pcc_lock);
return ret;
}
cpc_read(cpunum, desired_reg, desired_perf);
return 0;
}
EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
/** /**
* cppc_get_perf_caps - Get a CPUs performance capabilities. * cppc_get_perf_caps - Get a CPUs performance capabilities.
* @cpunum: CPU from which to get capabilities info. * @cpunum: CPU from which to get capabilities info.

View File

@ -207,8 +207,6 @@ comment "CPU frequency scaling drivers"
config CPUFREQ_DT config CPUFREQ_DT
tristate "Generic DT based cpufreq driver" tristate "Generic DT based cpufreq driver"
depends on HAVE_CLK && OF depends on HAVE_CLK && OF
# if CPU_THERMAL is on and THERMAL=m, CPUFREQ_DT cannot be =y:
depends on !CPU_THERMAL || THERMAL
select CPUFREQ_DT_PLATDEV select CPUFREQ_DT_PLATDEV
select PM_OPP select PM_OPP
help help
@ -327,7 +325,6 @@ endif
config QORIQ_CPUFREQ config QORIQ_CPUFREQ
tristate "CPU frequency scaling driver for Freescale QorIQ SoCs" tristate "CPU frequency scaling driver for Freescale QorIQ SoCs"
depends on OF && COMMON_CLK && (PPC_E500MC || ARM || ARM64) depends on OF && COMMON_CLK && (PPC_E500MC || ARM || ARM64)
depends on !CPU_THERMAL || THERMAL
select CLK_QORIQ select CLK_QORIQ
help help
This adds the CPUFreq driver support for Freescale QorIQ SoCs This adds the CPUFreq driver support for Freescale QorIQ SoCs

View File

@ -25,12 +25,21 @@ config ARM_ARMADA_37XX_CPUFREQ
This adds the CPUFreq driver support for Marvell Armada 37xx SoCs. This adds the CPUFreq driver support for Marvell Armada 37xx SoCs.
The Armada 37xx PMU supports 4 frequency and VDD levels. The Armada 37xx PMU supports 4 frequency and VDD levels.
config ARM_ARMADA_8K_CPUFREQ
tristate "Armada 8K CPUFreq driver"
depends on ARCH_MVEBU && CPUFREQ_DT
help
This enables the CPUFreq driver support for Marvell
Armada8k SOCs.
Armada8K device has the AP806 which supports scaling
to any full integer divider.
If in doubt, say N.
# big LITTLE core layer and glue drivers # big LITTLE core layer and glue drivers
config ARM_BIG_LITTLE_CPUFREQ config ARM_BIG_LITTLE_CPUFREQ
tristate "Generic ARM big LITTLE CPUfreq driver" tristate "Generic ARM big LITTLE CPUfreq driver"
depends on ARM_CPU_TOPOLOGY && HAVE_CLK depends on ARM_CPU_TOPOLOGY && HAVE_CLK
# if CPU_THERMAL is on and THERMAL=m, ARM_BIT_LITTLE_CPUFREQ cannot be =y
depends on !CPU_THERMAL || THERMAL
select PM_OPP select PM_OPP
help help
This enables the Generic CPUfreq driver for ARM big.LITTLE platforms. This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
@ -38,7 +47,6 @@ config ARM_BIG_LITTLE_CPUFREQ
config ARM_SCPI_CPUFREQ config ARM_SCPI_CPUFREQ
tristate "SCPI based CPUfreq driver" tristate "SCPI based CPUfreq driver"
depends on ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI depends on ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI
depends on !CPU_THERMAL || THERMAL
help help
This adds the CPUfreq driver support for ARM platforms using SCPI This adds the CPUfreq driver support for ARM platforms using SCPI
protocol for CPU power management. protocol for CPU power management.
@ -93,7 +101,6 @@ config ARM_KIRKWOOD_CPUFREQ
config ARM_MEDIATEK_CPUFREQ config ARM_MEDIATEK_CPUFREQ
tristate "CPU Frequency scaling support for MediaTek SoCs" tristate "CPU Frequency scaling support for MediaTek SoCs"
depends on ARCH_MEDIATEK && REGULATOR depends on ARCH_MEDIATEK && REGULATOR
depends on !CPU_THERMAL || THERMAL
select PM_OPP select PM_OPP
help help
This adds the CPUFreq driver support for MediaTek SoCs. This adds the CPUFreq driver support for MediaTek SoCs.
@ -233,7 +240,6 @@ config ARM_SA1110_CPUFREQ
config ARM_SCMI_CPUFREQ config ARM_SCMI_CPUFREQ
tristate "SCMI based CPUfreq driver" tristate "SCMI based CPUfreq driver"
depends on ARM_SCMI_PROTOCOL || COMPILE_TEST depends on ARM_SCMI_PROTOCOL || COMPILE_TEST
depends on !CPU_THERMAL || THERMAL
select PM_OPP select PM_OPP
help help
This adds the CPUfreq driver support for ARM platforms using SCMI This adds the CPUfreq driver support for ARM platforms using SCMI

View File

@ -50,6 +50,7 @@ obj-$(CONFIG_X86_SFI_CPUFREQ) += sfi-cpufreq.o
obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o
obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ) += armada-37xx-cpufreq.o obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ) += armada-37xx-cpufreq.o
obj-$(CONFIG_ARM_ARMADA_8K_CPUFREQ) += armada-8k-cpufreq.o
obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o
obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o
obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o

View File

@ -916,8 +916,10 @@ static void __init acpi_cpufreq_boost_init(void)
{ {
int ret; int ret;
if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA))) if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA))) {
pr_debug("Boost capabilities not present in the processor\n");
return; return;
}
acpi_cpufreq_driver.set_boost = set_boost; acpi_cpufreq_driver.set_boost = set_boost;
acpi_cpufreq_driver.boost_enabled = boost_state(0); acpi_cpufreq_driver.boost_enabled = boost_state(0);

View File

@ -0,0 +1,204 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* CPUFreq support for Armada 8K
*
* Copyright (C) 2018 Marvell
*
* Omri Itach <omrii@marvell.com>
* Gregory Clement <gregory.clement@bootlin.com>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/slab.h>
/*
* Setup the opps list with the divider for the max frequency, that
* will be filled at runtime.
*/
static const int opps_div[] __initconst = {1, 2, 3, 4};
static struct platform_device *armada_8k_pdev;
struct freq_table {
struct device *cpu_dev;
unsigned int freq[ARRAY_SIZE(opps_div)];
};
/* If the CPUs share the same clock, then they are in the same cluster. */
static void __init armada_8k_get_sharing_cpus(struct clk *cur_clk,
struct cpumask *cpumask)
{
int cpu;
for_each_possible_cpu(cpu) {
struct device *cpu_dev;
struct clk *clk;
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev) {
pr_warn("Failed to get cpu%d device\n", cpu);
continue;
}
clk = clk_get(cpu_dev, 0);
if (IS_ERR(clk)) {
pr_warn("Cannot get clock for CPU %d\n", cpu);
} else {
if (clk_is_match(clk, cur_clk))
cpumask_set_cpu(cpu, cpumask);
clk_put(clk);
}
}
}
static int __init armada_8k_add_opp(struct clk *clk, struct device *cpu_dev,
struct freq_table *freq_tables,
int opps_index)
{
unsigned int cur_frequency;
unsigned int freq;
int i, ret;
/* Get nominal (current) CPU frequency. */
cur_frequency = clk_get_rate(clk);
if (!cur_frequency) {
dev_err(cpu_dev, "Failed to get clock rate for this CPU\n");
return -EINVAL;
}
freq_tables[opps_index].cpu_dev = cpu_dev;
for (i = 0; i < ARRAY_SIZE(opps_div); i++) {
freq = cur_frequency / opps_div[i];
ret = dev_pm_opp_add(cpu_dev, freq, 0);
if (ret)
return ret;
freq_tables[opps_index].freq[i] = freq;
}
return 0;
}
static void armada_8k_cpufreq_free_table(struct freq_table *freq_tables)
{
int opps_index, nb_cpus = num_possible_cpus();
for (opps_index = 0 ; opps_index <= nb_cpus; opps_index++) {
int i;
/* If cpu_dev is NULL then we reached the end of the array */
if (!freq_tables[opps_index].cpu_dev)
break;
for (i = 0; i < ARRAY_SIZE(opps_div); i++) {
/*
* A 0Hz frequency is not valid, this meant
* that it was not yet initialized so there is
* no more opp to free
*/
if (freq_tables[opps_index].freq[i] == 0)
break;
dev_pm_opp_remove(freq_tables[opps_index].cpu_dev,
freq_tables[opps_index].freq[i]);
}
}
kfree(freq_tables);
}
static int __init armada_8k_cpufreq_init(void)
{
int ret = 0, opps_index = 0, cpu, nb_cpus;
struct freq_table *freq_tables;
struct device_node *node;
struct cpumask cpus;
node = of_find_compatible_node(NULL, NULL, "marvell,ap806-cpu-clock");
if (!node || !of_device_is_available(node))
return -ENODEV;
nb_cpus = num_possible_cpus();
freq_tables = kcalloc(nb_cpus, sizeof(*freq_tables), GFP_KERNEL);
cpumask_copy(&cpus, cpu_possible_mask);
/*
* For each CPU, this loop registers the operating points
* supported (which are the nominal CPU frequency and full integer
* divisions of it).
*/
for_each_cpu(cpu, &cpus) {
struct cpumask shared_cpus;
struct device *cpu_dev;
struct clk *clk;
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev) {
pr_err("Cannot get CPU %d\n", cpu);
continue;
}
clk = clk_get(cpu_dev, 0);
if (IS_ERR(clk)) {
pr_err("Cannot get clock for CPU %d\n", cpu);
ret = PTR_ERR(clk);
goto remove_opp;
}
ret = armada_8k_add_opp(clk, cpu_dev, freq_tables, opps_index);
if (ret) {
clk_put(clk);
goto remove_opp;
}
opps_index++;
cpumask_clear(&shared_cpus);
armada_8k_get_sharing_cpus(clk, &shared_cpus);
dev_pm_opp_set_sharing_cpus(cpu_dev, &shared_cpus);
cpumask_andnot(&cpus, &cpus, &shared_cpus);
clk_put(clk);
}
armada_8k_pdev = platform_device_register_simple("cpufreq-dt", -1,
NULL, 0);
ret = PTR_ERR_OR_ZERO(armada_8k_pdev);
if (ret)
goto remove_opp;
platform_set_drvdata(armada_8k_pdev, freq_tables);
return 0;
remove_opp:
armada_8k_cpufreq_free_table(freq_tables);
return ret;
}
module_init(armada_8k_cpufreq_init);
static void __exit armada_8k_cpufreq_exit(void)
{
struct freq_table *freq_tables = platform_get_drvdata(armada_8k_pdev);
platform_device_unregister(armada_8k_pdev);
armada_8k_cpufreq_free_table(freq_tables);
}
module_exit(armada_8k_cpufreq_exit);
MODULE_AUTHOR("Gregory Clement <gregory.clement@bootlin.com>");
MODULE_DESCRIPTION("Armada 8K cpufreq driver");
MODULE_LICENSE("GPL");

View File

@ -42,6 +42,66 @@
*/ */
static struct cppc_cpudata **all_cpu_data; static struct cppc_cpudata **all_cpu_data;
struct cppc_workaround_oem_info {
char oem_id[ACPI_OEM_ID_SIZE +1];
char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
u32 oem_revision;
};
static bool apply_hisi_workaround;
static struct cppc_workaround_oem_info wa_info[] = {
{
.oem_id = "HISI ",
.oem_table_id = "HIP07 ",
.oem_revision = 0,
}, {
.oem_id = "HISI ",
.oem_table_id = "HIP08 ",
.oem_revision = 0,
}
};
static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu,
unsigned int perf);
/*
* HISI platform does not support delivered performance counter and
* reference performance counter. It can calculate the performance using the
* platform specific mechanism. We reuse the desired performance register to
* store the real performance calculated by the platform.
*/
static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpunum)
{
struct cppc_cpudata *cpudata = all_cpu_data[cpunum];
u64 desired_perf;
int ret;
ret = cppc_get_desired_perf(cpunum, &desired_perf);
if (ret < 0)
return -EIO;
return cppc_cpufreq_perf_to_khz(cpudata, desired_perf);
}
static void cppc_check_hisi_workaround(void)
{
struct acpi_table_header *tbl;
acpi_status status = AE_OK;
int i;
status = acpi_get_table(ACPI_SIG_PCCT, 0, &tbl);
if (ACPI_FAILURE(status) || !tbl)
return;
for (i = 0; i < ARRAY_SIZE(wa_info); i++) {
if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) &&
!memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
wa_info[i].oem_revision == tbl->oem_revision)
apply_hisi_workaround = true;
}
}
/* Callback function used to retrieve the max frequency from DMI */ /* Callback function used to retrieve the max frequency from DMI */
static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private) static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
{ {
@ -334,6 +394,9 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpunum)
struct cppc_cpudata *cpu = all_cpu_data[cpunum]; struct cppc_cpudata *cpu = all_cpu_data[cpunum];
int ret; int ret;
if (apply_hisi_workaround)
return hisi_cppc_cpufreq_get_rate(cpunum);
ret = cppc_get_perf_ctrs(cpunum, &fb_ctrs_t0); ret = cppc_get_perf_ctrs(cpunum, &fb_ctrs_t0);
if (ret) if (ret)
return ret; return ret;
@ -386,6 +449,8 @@ static int __init cppc_cpufreq_init(void)
goto out; goto out;
} }
cppc_check_hisi_workaround();
ret = cpufreq_register_driver(&cppc_cpufreq_driver); ret = cpufreq_register_driver(&cppc_cpufreq_driver);
if (ret) if (ret)
goto out; goto out;

View File

@ -13,7 +13,6 @@
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/cpu_cooling.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/err.h> #include <linux/err.h>
@ -30,7 +29,6 @@
struct private_data { struct private_data {
struct opp_table *opp_table; struct opp_table *opp_table;
struct device *cpu_dev; struct device *cpu_dev;
struct thermal_cooling_device *cdev;
const char *reg_name; const char *reg_name;
bool have_static_opps; bool have_static_opps;
}; };
@ -297,11 +295,25 @@ out_put_clk:
return ret; return ret;
} }
static int cpufreq_online(struct cpufreq_policy *policy)
{
/* We did light-weight tear down earlier, nothing to do here */
return 0;
}
static int cpufreq_offline(struct cpufreq_policy *policy)
{
/*
* Preserve policy->driver_data and don't free resources on light-weight
* tear down.
*/
return 0;
}
static int cpufreq_exit(struct cpufreq_policy *policy) static int cpufreq_exit(struct cpufreq_policy *policy)
{ {
struct private_data *priv = policy->driver_data; struct private_data *priv = policy->driver_data;
cpufreq_cooling_unregister(priv->cdev);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
if (priv->have_static_opps) if (priv->have_static_opps)
dev_pm_opp_of_cpumask_remove_table(policy->related_cpus); dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
@ -314,21 +326,16 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
return 0; return 0;
} }
static void cpufreq_ready(struct cpufreq_policy *policy)
{
struct private_data *priv = policy->driver_data;
priv->cdev = of_cpufreq_cooling_register(policy);
}
static struct cpufreq_driver dt_cpufreq_driver = { static struct cpufreq_driver dt_cpufreq_driver = {
.flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify, .verify = cpufreq_generic_frequency_table_verify,
.target_index = set_target, .target_index = set_target,
.get = cpufreq_generic_get, .get = cpufreq_generic_get,
.init = cpufreq_init, .init = cpufreq_init,
.exit = cpufreq_exit, .exit = cpufreq_exit,
.ready = cpufreq_ready, .online = cpufreq_online,
.offline = cpufreq_offline,
.name = "cpufreq-dt", .name = "cpufreq-dt",
.attr = cpufreq_dt_attr, .attr = cpufreq_dt_attr,
.suspend = cpufreq_generic_suspend, .suspend = cpufreq_generic_suspend,

View File

@ -19,6 +19,7 @@
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/cpu_cooling.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/init.h> #include <linux/init.h>
@ -545,13 +546,13 @@ EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
* SYSFS INTERFACE * * SYSFS INTERFACE *
*********************************************************************/ *********************************************************************/
static ssize_t show_boost(struct kobject *kobj, static ssize_t show_boost(struct kobject *kobj,
struct attribute *attr, char *buf) struct kobj_attribute *attr, char *buf)
{ {
return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled); return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
} }
static ssize_t store_boost(struct kobject *kobj, struct attribute *attr, static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count) const char *buf, size_t count)
{ {
int ret, enable; int ret, enable;
@ -1200,28 +1201,39 @@ static int cpufreq_online(unsigned int cpu)
return -ENOMEM; return -ENOMEM;
} }
cpumask_copy(policy->cpus, cpumask_of(cpu)); if (!new_policy && cpufreq_driver->online) {
ret = cpufreq_driver->online(policy);
if (ret) {
pr_debug("%s: %d: initialization failed\n", __func__,
__LINE__);
goto out_exit_policy;
}
/* call driver. From then on the cpufreq must be able /* Recover policy->cpus using related_cpus */
* to accept all calls to ->verify and ->setpolicy for this CPU cpumask_copy(policy->cpus, policy->related_cpus);
*/ } else {
ret = cpufreq_driver->init(policy); cpumask_copy(policy->cpus, cpumask_of(cpu));
if (ret) {
pr_debug("initialization failed\n");
goto out_free_policy;
}
ret = cpufreq_table_validate_and_sort(policy); /*
if (ret) * Call driver. From then on the cpufreq must be able
goto out_exit_policy; * to accept all calls to ->verify and ->setpolicy for this CPU.
*/
ret = cpufreq_driver->init(policy);
if (ret) {
pr_debug("%s: %d: initialization failed\n", __func__,
__LINE__);
goto out_free_policy;
}
down_write(&policy->rwsem); ret = cpufreq_table_validate_and_sort(policy);
if (ret)
goto out_exit_policy;
if (new_policy) {
/* related_cpus should at least include policy->cpus. */ /* related_cpus should at least include policy->cpus. */
cpumask_copy(policy->related_cpus, policy->cpus); cpumask_copy(policy->related_cpus, policy->cpus);
} }
down_write(&policy->rwsem);
/* /*
* affected cpus must always be the one, which are online. We aren't * affected cpus must always be the one, which are online. We aren't
* managing offline cpus here. * managing offline cpus here.
@ -1305,8 +1317,6 @@ static int cpufreq_online(unsigned int cpu)
if (ret) { if (ret) {
pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n", pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
__func__, cpu, ret); __func__, cpu, ret);
/* cpufreq_policy_free() will notify based on this */
new_policy = false;
goto out_destroy_policy; goto out_destroy_policy;
} }
@ -1318,6 +1328,10 @@ static int cpufreq_online(unsigned int cpu)
if (cpufreq_driver->ready) if (cpufreq_driver->ready)
cpufreq_driver->ready(policy); cpufreq_driver->ready(policy);
if (IS_ENABLED(CONFIG_CPU_THERMAL) &&
cpufreq_driver->flags & CPUFREQ_IS_COOLING_DEV)
policy->cdev = of_cpufreq_cooling_register(policy);
pr_debug("initialization complete\n"); pr_debug("initialization complete\n");
return 0; return 0;
@ -1405,6 +1419,12 @@ static int cpufreq_offline(unsigned int cpu)
goto unlock; goto unlock;
} }
if (IS_ENABLED(CONFIG_CPU_THERMAL) &&
cpufreq_driver->flags & CPUFREQ_IS_COOLING_DEV) {
cpufreq_cooling_unregister(policy->cdev);
policy->cdev = NULL;
}
if (cpufreq_driver->stop_cpu) if (cpufreq_driver->stop_cpu)
cpufreq_driver->stop_cpu(policy); cpufreq_driver->stop_cpu(policy);
@ -1412,11 +1432,12 @@ static int cpufreq_offline(unsigned int cpu)
cpufreq_exit_governor(policy); cpufreq_exit_governor(policy);
/* /*
* Perform the ->exit() even during light-weight tear-down, * Perform the ->offline() during light-weight tear-down, as
* since this is a core component, and is essential for the * that allows fast recovery when the CPU comes back.
* subsequent light-weight ->init() to succeed.
*/ */
if (cpufreq_driver->exit) { if (cpufreq_driver->offline) {
cpufreq_driver->offline(policy);
} else if (cpufreq_driver->exit) {
cpufreq_driver->exit(policy); cpufreq_driver->exit(policy);
policy->freq_table = NULL; policy->freq_table = NULL;
} }
@ -1445,8 +1466,13 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
cpumask_clear_cpu(cpu, policy->real_cpus); cpumask_clear_cpu(cpu, policy->real_cpus);
remove_cpu_dev_symlink(policy, dev); remove_cpu_dev_symlink(policy, dev);
if (cpumask_empty(policy->real_cpus)) if (cpumask_empty(policy->real_cpus)) {
/* We did light-weight exit earlier, do full tear down now */
if (cpufreq_driver->offline)
cpufreq_driver->exit(policy);
cpufreq_policy_free(policy); cpufreq_policy_free(policy);
}
} }
/** /**
@ -2192,12 +2218,25 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
} }
EXPORT_SYMBOL(cpufreq_get_policy); EXPORT_SYMBOL(cpufreq_get_policy);
/* /**
* policy : current policy. * cpufreq_set_policy - Modify cpufreq policy parameters.
* new_policy: policy to be set. * @policy: Policy object to modify.
* @new_policy: New policy data.
*
* Pass @new_policy to the cpufreq driver's ->verify() callback, run the
* installed policy notifiers for it with the CPUFREQ_ADJUST value, pass it to
* the driver's ->verify() callback again and run the notifiers for it again
* with the CPUFREQ_NOTIFY value. Next, copy the min and max parameters
* of @new_policy to @policy and either invoke the driver's ->setpolicy()
* callback (if present) or carry out a governor update for @policy. That is,
* run the current governor's ->limits() callback (if the governor field in
* @new_policy points to the same object as the one in @policy) or replace the
* governor for @policy with the new one stored in @new_policy.
*
* The cpuinfo part of @policy is not updated by this function.
*/ */
static int cpufreq_set_policy(struct cpufreq_policy *policy, static int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_policy *new_policy) struct cpufreq_policy *new_policy)
{ {
struct cpufreq_governor *old_gov; struct cpufreq_governor *old_gov;
int ret; int ret;
@ -2247,11 +2286,11 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
if (cpufreq_driver->setpolicy) { if (cpufreq_driver->setpolicy) {
policy->policy = new_policy->policy; policy->policy = new_policy->policy;
pr_debug("setting range\n"); pr_debug("setting range\n");
return cpufreq_driver->setpolicy(new_policy); return cpufreq_driver->setpolicy(policy);
} }
if (new_policy->governor == policy->governor) { if (new_policy->governor == policy->governor) {
pr_debug("cpufreq: governor limits update\n"); pr_debug("governor limits update\n");
cpufreq_governor_limits(policy); cpufreq_governor_limits(policy);
return 0; return 0;
} }
@ -2272,7 +2311,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
if (!ret) { if (!ret) {
ret = cpufreq_start_governor(policy); ret = cpufreq_start_governor(policy);
if (!ret) { if (!ret) {
pr_debug("cpufreq: governor change\n"); pr_debug("governor change\n");
sched_cpufreq_governor_change(policy, old_gov); sched_cpufreq_governor_change(policy, old_gov);
return 0; return 0;
} }
@ -2293,11 +2332,14 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
} }
/** /**
* cpufreq_update_policy - re-evaluate an existing cpufreq policy * cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
* @cpu: CPU which shall be re-evaluated * @cpu: CPU to re-evaluate the policy for.
* *
* Useful for policy notifiers which have different necessities * Update the current frequency for the cpufreq policy of @cpu and use
* at different times. * cpufreq_set_policy() to re-apply the min and max limits saved in the
* user_policy sub-structure of that policy, which triggers the evaluation
* of policy notifiers and the cpufreq driver's ->verify() callback for the
* policy in question, among other things.
*/ */
void cpufreq_update_policy(unsigned int cpu) void cpufreq_update_policy(unsigned int cpu)
{ {
@ -2312,23 +2354,18 @@ void cpufreq_update_policy(unsigned int cpu)
if (policy_is_inactive(policy)) if (policy_is_inactive(policy))
goto unlock; goto unlock;
pr_debug("updating policy for CPU %u\n", cpu);
memcpy(&new_policy, policy, sizeof(*policy));
new_policy.min = policy->user_policy.min;
new_policy.max = policy->user_policy.max;
/* /*
* BIOS might change freq behind our back * BIOS might change freq behind our back
* -> ask driver for current freq and notify governors about a change * -> ask driver for current freq and notify governors about a change
*/ */
if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { if (cpufreq_driver->get && !cpufreq_driver->setpolicy &&
if (cpufreq_suspended) (cpufreq_suspended || WARN_ON(!cpufreq_update_current_freq(policy))))
goto unlock; goto unlock;
new_policy.cur = cpufreq_update_current_freq(policy); pr_debug("updating policy for CPU %u\n", cpu);
if (WARN_ON(!new_policy.cur)) memcpy(&new_policy, policy, sizeof(*policy));
goto unlock; new_policy.min = policy->user_policy.min;
} new_policy.max = policy->user_policy.max;
cpufreq_set_policy(policy, &new_policy); cpufreq_set_policy(policy, &new_policy);
@ -2479,7 +2516,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
driver_data->target) || driver_data->target) ||
(driver_data->setpolicy && (driver_data->target_index || (driver_data->setpolicy && (driver_data->target_index ||
driver_data->target)) || driver_data->target)) ||
(!!driver_data->get_intermediate != !!driver_data->target_intermediate)) (!driver_data->get_intermediate != !driver_data->target_intermediate) ||
(!driver_data->online != !driver_data->offline))
return -EINVAL; return -EINVAL;
pr_debug("trying to register driver %s\n", driver_data->name); pr_debug("trying to register driver %s\n", driver_data->name);

View File

@ -31,26 +31,27 @@ static void cpufreq_stats_update(struct cpufreq_stats *stats)
{ {
unsigned long long cur_time = get_jiffies_64(); unsigned long long cur_time = get_jiffies_64();
spin_lock(&cpufreq_stats_lock);
stats->time_in_state[stats->last_index] += cur_time - stats->last_time; stats->time_in_state[stats->last_index] += cur_time - stats->last_time;
stats->last_time = cur_time; stats->last_time = cur_time;
spin_unlock(&cpufreq_stats_lock);
} }
static void cpufreq_stats_clear_table(struct cpufreq_stats *stats) static void cpufreq_stats_clear_table(struct cpufreq_stats *stats)
{ {
unsigned int count = stats->max_state; unsigned int count = stats->max_state;
spin_lock(&cpufreq_stats_lock);
memset(stats->time_in_state, 0, count * sizeof(u64)); memset(stats->time_in_state, 0, count * sizeof(u64));
memset(stats->trans_table, 0, count * count * sizeof(int)); memset(stats->trans_table, 0, count * count * sizeof(int));
stats->last_time = get_jiffies_64(); stats->last_time = get_jiffies_64();
stats->total_trans = 0; stats->total_trans = 0;
spin_unlock(&cpufreq_stats_lock);
} }
static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf) static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
{ {
return sprintf(buf, "%d\n", policy->stats->total_trans); return sprintf(buf, "%d\n", policy->stats->total_trans);
} }
cpufreq_freq_attr_ro(total_trans);
static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf) static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
{ {
@ -61,7 +62,10 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
if (policy->fast_switch_enabled) if (policy->fast_switch_enabled)
return 0; return 0;
spin_lock(&cpufreq_stats_lock);
cpufreq_stats_update(stats); cpufreq_stats_update(stats);
spin_unlock(&cpufreq_stats_lock);
for (i = 0; i < stats->state_num; i++) { for (i = 0; i < stats->state_num; i++) {
len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i], len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i],
(unsigned long long) (unsigned long long)
@ -69,6 +73,7 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
} }
return len; return len;
} }
cpufreq_freq_attr_ro(time_in_state);
static ssize_t store_reset(struct cpufreq_policy *policy, const char *buf, static ssize_t store_reset(struct cpufreq_policy *policy, const char *buf,
size_t count) size_t count)
@ -77,6 +82,7 @@ static ssize_t store_reset(struct cpufreq_policy *policy, const char *buf,
cpufreq_stats_clear_table(policy->stats); cpufreq_stats_clear_table(policy->stats);
return count; return count;
} }
cpufreq_freq_attr_wo(reset);
static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf) static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
{ {
@ -126,10 +132,6 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
} }
cpufreq_freq_attr_ro(trans_table); cpufreq_freq_attr_ro(trans_table);
cpufreq_freq_attr_ro(total_trans);
cpufreq_freq_attr_ro(time_in_state);
cpufreq_freq_attr_wo(reset);
static struct attribute *default_attrs[] = { static struct attribute *default_attrs[] = {
&total_trans.attr, &total_trans.attr,
&time_in_state.attr, &time_in_state.attr,
@ -240,9 +242,11 @@ void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
if (old_index == -1 || new_index == -1 || old_index == new_index) if (old_index == -1 || new_index == -1 || old_index == new_index)
return; return;
spin_lock(&cpufreq_stats_lock);
cpufreq_stats_update(stats); cpufreq_stats_update(stats);
stats->last_index = new_index; stats->last_index = new_index;
stats->trans_table[old_index * stats->max_state + new_index]++; stats->trans_table[old_index * stats->max_state + new_index]++;
stats->total_trans++; stats->total_trans++;
spin_unlock(&cpufreq_stats_lock);
} }

View File

@ -23,13 +23,10 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/platform_data/davinci-cpufreq.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/export.h> #include <linux/export.h>
#include <mach/hardware.h>
#include <mach/cpufreq.h>
#include <mach/common.h>
struct davinci_cpufreq { struct davinci_cpufreq {
struct device *dev; struct device *dev;
struct clk *armclk; struct clk *armclk;

View File

@ -323,9 +323,8 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
states = 2; states = 2;
/* Allocate private data and frequency table for current cpu */ /* Allocate private data and frequency table for current cpu */
centaur = kzalloc(sizeof(*centaur) centaur = kzalloc(struct_size(centaur, freq_table, states + 1),
+ (states + 1) * sizeof(struct cpufreq_frequency_table), GFP_KERNEL);
GFP_KERNEL);
if (!centaur) if (!centaur)
return -ENOMEM; return -ENOMEM;
eps_cpu[0] = centaur; eps_cpu[0] = centaur;

View File

@ -9,7 +9,6 @@
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/cpu_cooling.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/nvmem-consumer.h> #include <linux/nvmem-consumer.h>
@ -52,7 +51,6 @@ static struct clk_bulk_data clks[] = {
}; };
static struct device *cpu_dev; static struct device *cpu_dev;
static struct thermal_cooling_device *cdev;
static bool free_opp; static bool free_opp;
static struct cpufreq_frequency_table *freq_table; static struct cpufreq_frequency_table *freq_table;
static unsigned int max_freq; static unsigned int max_freq;
@ -193,16 +191,6 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
return 0; return 0;
} }
static void imx6q_cpufreq_ready(struct cpufreq_policy *policy)
{
cdev = of_cpufreq_cooling_register(policy);
if (!cdev)
dev_err(cpu_dev,
"running cpufreq without cooling device: %ld\n",
PTR_ERR(cdev));
}
static int imx6q_cpufreq_init(struct cpufreq_policy *policy) static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
{ {
int ret; int ret;
@ -214,22 +202,14 @@ static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
return ret; return ret;
} }
static int imx6q_cpufreq_exit(struct cpufreq_policy *policy)
{
cpufreq_cooling_unregister(cdev);
return 0;
}
static struct cpufreq_driver imx6q_cpufreq_driver = { static struct cpufreq_driver imx6q_cpufreq_driver = {
.flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify, .verify = cpufreq_generic_frequency_table_verify,
.target_index = imx6q_set_target, .target_index = imx6q_set_target,
.get = cpufreq_generic_get, .get = cpufreq_generic_get,
.init = imx6q_cpufreq_init, .init = imx6q_cpufreq_init,
.exit = imx6q_cpufreq_exit,
.name = "imx6q-cpufreq", .name = "imx6q-cpufreq",
.ready = imx6q_cpufreq_ready,
.attr = cpufreq_generic_attr, .attr = cpufreq_generic_attr,
.suspend = cpufreq_generic_suspend, .suspend = cpufreq_generic_suspend,
}; };

View File

@ -50,6 +50,8 @@
#define int_tofp(X) ((int64_t)(X) << FRAC_BITS) #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
#define fp_toint(X) ((X) >> FRAC_BITS) #define fp_toint(X) ((X) >> FRAC_BITS)
#define ONE_EIGHTH_FP ((int64_t)1 << (FRAC_BITS - 3))
#define EXT_BITS 6 #define EXT_BITS 6
#define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS) #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS)
#define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS) #define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS)
@ -895,7 +897,7 @@ static void intel_pstate_update_policies(void)
/************************** sysfs begin ************************/ /************************** sysfs begin ************************/
#define show_one(file_name, object) \ #define show_one(file_name, object) \
static ssize_t show_##file_name \ static ssize_t show_##file_name \
(struct kobject *kobj, struct attribute *attr, char *buf) \ (struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
{ \ { \
return sprintf(buf, "%u\n", global.object); \ return sprintf(buf, "%u\n", global.object); \
} }
@ -904,7 +906,7 @@ static ssize_t intel_pstate_show_status(char *buf);
static int intel_pstate_update_status(const char *buf, size_t size); static int intel_pstate_update_status(const char *buf, size_t size);
static ssize_t show_status(struct kobject *kobj, static ssize_t show_status(struct kobject *kobj,
struct attribute *attr, char *buf) struct kobj_attribute *attr, char *buf)
{ {
ssize_t ret; ssize_t ret;
@ -915,7 +917,7 @@ static ssize_t show_status(struct kobject *kobj,
return ret; return ret;
} }
static ssize_t store_status(struct kobject *a, struct attribute *b, static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count) const char *buf, size_t count)
{ {
char *p = memchr(buf, '\n', count); char *p = memchr(buf, '\n', count);
@ -929,7 +931,7 @@ static ssize_t store_status(struct kobject *a, struct attribute *b,
} }
static ssize_t show_turbo_pct(struct kobject *kobj, static ssize_t show_turbo_pct(struct kobject *kobj,
struct attribute *attr, char *buf) struct kobj_attribute *attr, char *buf)
{ {
struct cpudata *cpu; struct cpudata *cpu;
int total, no_turbo, turbo_pct; int total, no_turbo, turbo_pct;
@ -955,7 +957,7 @@ static ssize_t show_turbo_pct(struct kobject *kobj,
} }
static ssize_t show_num_pstates(struct kobject *kobj, static ssize_t show_num_pstates(struct kobject *kobj,
struct attribute *attr, char *buf) struct kobj_attribute *attr, char *buf)
{ {
struct cpudata *cpu; struct cpudata *cpu;
int total; int total;
@ -976,7 +978,7 @@ static ssize_t show_num_pstates(struct kobject *kobj,
} }
static ssize_t show_no_turbo(struct kobject *kobj, static ssize_t show_no_turbo(struct kobject *kobj,
struct attribute *attr, char *buf) struct kobj_attribute *attr, char *buf)
{ {
ssize_t ret; ssize_t ret;
@ -998,7 +1000,7 @@ static ssize_t show_no_turbo(struct kobject *kobj,
return ret; return ret;
} }
static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count) const char *buf, size_t count)
{ {
unsigned int input; unsigned int input;
@ -1045,7 +1047,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
return count; return count;
} }
static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count) const char *buf, size_t count)
{ {
unsigned int input; unsigned int input;
@ -1075,7 +1077,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
return count; return count;
} }
static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count) const char *buf, size_t count)
{ {
unsigned int input; unsigned int input;
@ -1107,12 +1109,13 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
} }
static ssize_t show_hwp_dynamic_boost(struct kobject *kobj, static ssize_t show_hwp_dynamic_boost(struct kobject *kobj,
struct attribute *attr, char *buf) struct kobj_attribute *attr, char *buf)
{ {
return sprintf(buf, "%u\n", hwp_boost); return sprintf(buf, "%u\n", hwp_boost);
} }
static ssize_t store_hwp_dynamic_boost(struct kobject *a, struct attribute *b, static ssize_t store_hwp_dynamic_boost(struct kobject *a,
struct kobj_attribute *b,
const char *buf, size_t count) const char *buf, size_t count)
{ {
unsigned int input; unsigned int input;
@ -1444,12 +1447,6 @@ static int knl_get_turbo_pstate(void)
return ret; return ret;
} }
static int intel_pstate_get_base_pstate(struct cpudata *cpu)
{
return global.no_turbo || global.turbo_disabled ?
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
}
static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
{ {
trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
@ -1470,11 +1467,9 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu)
static void intel_pstate_max_within_limits(struct cpudata *cpu) static void intel_pstate_max_within_limits(struct cpudata *cpu)
{ {
int pstate; int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
update_turbo_state(); update_turbo_state();
pstate = intel_pstate_get_base_pstate(cpu);
pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
intel_pstate_set_pstate(cpu, pstate); intel_pstate_set_pstate(cpu, pstate);
} }
@ -1678,17 +1673,14 @@ static inline int32_t get_avg_pstate(struct cpudata *cpu)
static inline int32_t get_target_pstate(struct cpudata *cpu) static inline int32_t get_target_pstate(struct cpudata *cpu)
{ {
struct sample *sample = &cpu->sample; struct sample *sample = &cpu->sample;
int32_t busy_frac, boost; int32_t busy_frac;
int target, avg_pstate; int target, avg_pstate;
busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift, busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift,
sample->tsc); sample->tsc);
boost = cpu->iowait_boost; if (busy_frac < cpu->iowait_boost)
cpu->iowait_boost >>= 1; busy_frac = cpu->iowait_boost;
if (busy_frac < boost)
busy_frac = boost;
sample->busy_scaled = busy_frac * 100; sample->busy_scaled = busy_frac * 100;
@ -1715,11 +1707,9 @@ static inline int32_t get_target_pstate(struct cpudata *cpu)
static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate) static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
{ {
int max_pstate = intel_pstate_get_base_pstate(cpu); int min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio);
int min_pstate; int max_pstate = max(min_pstate, cpu->max_perf_ratio);
min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio);
max_pstate = max(min_pstate, cpu->max_perf_ratio);
return clamp_t(int, pstate, min_pstate, max_pstate); return clamp_t(int, pstate, min_pstate, max_pstate);
} }
@ -1767,29 +1757,30 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time,
if (smp_processor_id() != cpu->cpu) if (smp_processor_id() != cpu->cpu)
return; return;
delta_ns = time - cpu->last_update;
if (flags & SCHED_CPUFREQ_IOWAIT) { if (flags & SCHED_CPUFREQ_IOWAIT) {
cpu->iowait_boost = int_tofp(1); /* Start over if the CPU may have been idle. */
cpu->last_update = time; if (delta_ns > TICK_NSEC) {
/* cpu->iowait_boost = ONE_EIGHTH_FP;
* The last time the busy was 100% so P-state was max anyway } else if (cpu->iowait_boost) {
* so avoid overhead of computation. cpu->iowait_boost <<= 1;
*/ if (cpu->iowait_boost > int_tofp(1))
if (fp_toint(cpu->sample.busy_scaled) == 100) cpu->iowait_boost = int_tofp(1);
return; } else {
cpu->iowait_boost = ONE_EIGHTH_FP;
goto set_pstate; }
} else if (cpu->iowait_boost) { } else if (cpu->iowait_boost) {
/* Clear iowait_boost if the CPU may have been idle. */ /* Clear iowait_boost if the CPU may have been idle. */
delta_ns = time - cpu->last_update;
if (delta_ns > TICK_NSEC) if (delta_ns > TICK_NSEC)
cpu->iowait_boost = 0; cpu->iowait_boost = 0;
else
cpu->iowait_boost >>= 1;
} }
cpu->last_update = time; cpu->last_update = time;
delta_ns = time - cpu->sample.time; delta_ns = time - cpu->sample.time;
if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL) if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL)
return; return;
set_pstate:
if (intel_pstate_sample(cpu, time)) if (intel_pstate_sample(cpu, time))
intel_pstate_adjust_pstate(cpu); intel_pstate_adjust_pstate(cpu);
} }
@ -1976,7 +1967,8 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
if (hwp_active) { if (hwp_active) {
intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state); intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state);
} else { } else {
max_state = intel_pstate_get_base_pstate(cpu); max_state = global.no_turbo || global.turbo_disabled ?
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
turbo_max = cpu->pstate.turbo_pstate; turbo_max = cpu->pstate.turbo_pstate;
} }
@ -2475,6 +2467,7 @@ static bool __init intel_pstate_no_acpi_pss(void)
kfree(pss); kfree(pss);
} }
pr_debug("ACPI _PSS not found\n");
return true; return true;
} }
@ -2485,9 +2478,14 @@ static bool __init intel_pstate_no_acpi_pcch(void)
status = acpi_get_handle(NULL, "\\_SB", &handle); status = acpi_get_handle(NULL, "\\_SB", &handle);
if (ACPI_FAILURE(status)) if (ACPI_FAILURE(status))
return true; goto not_found;
return !acpi_has_method(handle, "PCCH"); if (acpi_has_method(handle, "PCCH"))
return false;
not_found:
pr_debug("ACPI PCCH not found\n");
return true;
} }
static bool __init intel_pstate_has_acpi_ppc(void) static bool __init intel_pstate_has_acpi_ppc(void)
@ -2502,6 +2500,7 @@ static bool __init intel_pstate_has_acpi_ppc(void)
if (acpi_has_method(pr->handle, "_PPC")) if (acpi_has_method(pr->handle, "_PPC"))
return true; return true;
} }
pr_debug("ACPI _PPC not found\n");
return false; return false;
} }
@ -2539,8 +2538,10 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
id = x86_match_cpu(intel_pstate_cpu_oob_ids); id = x86_match_cpu(intel_pstate_cpu_oob_ids);
if (id) { if (id) {
rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
if ( misc_pwr & (1 << 8)) if (misc_pwr & (1 << 8)) {
pr_debug("Bit 8 in the MISC_PWR_MGMT MSR set\n");
return true; return true;
}
} }
idx = acpi_match_platform_list(plat_info); idx = acpi_match_platform_list(plat_info);
@ -2606,22 +2607,28 @@ static int __init intel_pstate_init(void)
} }
} else { } else {
id = x86_match_cpu(intel_pstate_cpu_ids); id = x86_match_cpu(intel_pstate_cpu_ids);
if (!id) if (!id) {
pr_info("CPU ID not supported\n");
return -ENODEV; return -ENODEV;
}
copy_cpu_funcs((struct pstate_funcs *)id->driver_data); copy_cpu_funcs((struct pstate_funcs *)id->driver_data);
} }
if (intel_pstate_msrs_not_valid()) if (intel_pstate_msrs_not_valid()) {
pr_info("Invalid MSRs\n");
return -ENODEV; return -ENODEV;
}
hwp_cpu_matched: hwp_cpu_matched:
/* /*
* The Intel pstate driver will be ignored if the platform * The Intel pstate driver will be ignored if the platform
* firmware has its own power management modes. * firmware has its own power management modes.
*/ */
if (intel_pstate_platform_pwr_mgmt_exists()) if (intel_pstate_platform_pwr_mgmt_exists()) {
pr_info("P-states controlled by the platform\n");
return -ENODEV; return -ENODEV;
}
if (!hwp_active && hwp_only) if (!hwp_active && hwp_only)
return -ENOTSUPP; return -ENOTSUPP;

View File

@ -851,7 +851,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
case TYPE_POWERSAVER: case TYPE_POWERSAVER:
pr_cont("Powersaver supported\n"); pr_cont("Powersaver supported\n");
break; break;
}; }
/* Doesn't hurt */ /* Doesn't hurt */
longhaul_setup_southbridge(); longhaul_setup_southbridge();

View File

@ -14,7 +14,6 @@
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/cpu_cooling.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/module.h> #include <linux/module.h>
@ -48,7 +47,6 @@ struct mtk_cpu_dvfs_info {
struct regulator *sram_reg; struct regulator *sram_reg;
struct clk *cpu_clk; struct clk *cpu_clk;
struct clk *inter_clk; struct clk *inter_clk;
struct thermal_cooling_device *cdev;
struct list_head list_head; struct list_head list_head;
int intermediate_voltage; int intermediate_voltage;
bool need_voltage_tracking; bool need_voltage_tracking;
@ -307,13 +305,6 @@ static int mtk_cpufreq_set_target(struct cpufreq_policy *policy,
#define DYNAMIC_POWER "dynamic-power-coefficient" #define DYNAMIC_POWER "dynamic-power-coefficient"
static void mtk_cpufreq_ready(struct cpufreq_policy *policy)
{
struct mtk_cpu_dvfs_info *info = policy->driver_data;
info->cdev = of_cpufreq_cooling_register(policy);
}
static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu) static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
{ {
struct device *cpu_dev; struct device *cpu_dev;
@ -472,7 +463,6 @@ static int mtk_cpufreq_exit(struct cpufreq_policy *policy)
{ {
struct mtk_cpu_dvfs_info *info = policy->driver_data; struct mtk_cpu_dvfs_info *info = policy->driver_data;
cpufreq_cooling_unregister(info->cdev);
dev_pm_opp_free_cpufreq_table(info->cpu_dev, &policy->freq_table); dev_pm_opp_free_cpufreq_table(info->cpu_dev, &policy->freq_table);
return 0; return 0;
@ -480,13 +470,13 @@ static int mtk_cpufreq_exit(struct cpufreq_policy *policy)
static struct cpufreq_driver mtk_cpufreq_driver = { static struct cpufreq_driver mtk_cpufreq_driver = {
.flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_HAVE_GOVERNOR_PER_POLICY, CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify, .verify = cpufreq_generic_frequency_table_verify,
.target_index = mtk_cpufreq_set_target, .target_index = mtk_cpufreq_set_target,
.get = cpufreq_generic_get, .get = cpufreq_generic_get,
.init = mtk_cpufreq_init, .init = mtk_cpufreq_init,
.exit = mtk_cpufreq_exit, .exit = mtk_cpufreq_exit,
.ready = mtk_cpufreq_ready,
.name = "mtk-cpufreq", .name = "mtk-cpufreq",
.attr = cpufreq_generic_attr, .attr = cpufreq_generic_attr,
}; };

View File

@ -268,7 +268,7 @@ static int pcc_get_offset(int cpu)
if (!pccp || pccp->type != ACPI_TYPE_PACKAGE) { if (!pccp || pccp->type != ACPI_TYPE_PACKAGE) {
ret = -ENODEV; ret = -ENODEV;
goto out_free; goto out_free;
}; }
offset = &(pccp->package.elements[0]); offset = &(pccp->package.elements[0]);
if (!offset || offset->type != ACPI_TYPE_INTEGER) { if (!offset || offset->type != ACPI_TYPE_INTEGER) {

View File

@ -244,6 +244,7 @@ static int init_powernv_pstates(void)
u32 len_ids, len_freqs; u32 len_ids, len_freqs;
u32 pstate_min, pstate_max, pstate_nominal; u32 pstate_min, pstate_max, pstate_nominal;
u32 pstate_turbo, pstate_ultra_turbo; u32 pstate_turbo, pstate_ultra_turbo;
int rc = -ENODEV;
power_mgt = of_find_node_by_path("/ibm,opal/power-mgt"); power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
if (!power_mgt) { if (!power_mgt) {
@ -327,8 +328,11 @@ next:
powernv_freqs[i].frequency = freq * 1000; /* kHz */ powernv_freqs[i].frequency = freq * 1000; /* kHz */
powernv_freqs[i].driver_data = id & 0xFF; powernv_freqs[i].driver_data = id & 0xFF;
revmap_data = (struct pstate_idx_revmap_data *) revmap_data = kmalloc(sizeof(*revmap_data), GFP_KERNEL);
kmalloc(sizeof(*revmap_data), GFP_KERNEL); if (!revmap_data) {
rc = -ENOMEM;
goto out;
}
revmap_data->pstate_id = id & 0xFF; revmap_data->pstate_id = id & 0xFF;
revmap_data->cpufreq_table_idx = i; revmap_data->cpufreq_table_idx = i;
@ -357,7 +361,7 @@ next:
return 0; return 0;
out: out:
of_node_put(power_mgt); of_node_put(power_mgt);
return -ENODEV; return rc;
} }
/* Returns the CPU frequency corresponding to the pstate_id. */ /* Returns the CPU frequency corresponding to the pstate_id. */

View File

@ -10,18 +10,21 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
#include <linux/pm_opp.h>
#include <linux/slab.h> #include <linux/slab.h>
#define LUT_MAX_ENTRIES 40U #define LUT_MAX_ENTRIES 40U
#define LUT_SRC GENMASK(31, 30) #define LUT_SRC GENMASK(31, 30)
#define LUT_L_VAL GENMASK(7, 0) #define LUT_L_VAL GENMASK(7, 0)
#define LUT_CORE_COUNT GENMASK(18, 16) #define LUT_CORE_COUNT GENMASK(18, 16)
#define LUT_VOLT GENMASK(11, 0)
#define LUT_ROW_SIZE 32 #define LUT_ROW_SIZE 32
#define CLK_HW_DIV 2 #define CLK_HW_DIV 2
/* Register offsets */ /* Register offsets */
#define REG_ENABLE 0x0 #define REG_ENABLE 0x0
#define REG_LUT_TABLE 0x110 #define REG_FREQ_LUT 0x110
#define REG_VOLT_LUT 0x114
#define REG_PERF_STATE 0x920 #define REG_PERF_STATE 0x920
static unsigned long cpu_hw_rate, xo_rate; static unsigned long cpu_hw_rate, xo_rate;
@ -70,11 +73,12 @@ static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
return policy->freq_table[index].frequency; return policy->freq_table[index].frequency;
} }
static int qcom_cpufreq_hw_read_lut(struct device *dev, static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
struct cpufreq_policy *policy, struct cpufreq_policy *policy,
void __iomem *base) void __iomem *base)
{ {
u32 data, src, lval, i, core_count, prev_cc = 0, prev_freq = 0, freq; u32 data, src, lval, i, core_count, prev_cc = 0, prev_freq = 0, freq;
u32 volt;
unsigned int max_cores = cpumask_weight(policy->cpus); unsigned int max_cores = cpumask_weight(policy->cpus);
struct cpufreq_frequency_table *table; struct cpufreq_frequency_table *table;
@ -83,23 +87,28 @@ static int qcom_cpufreq_hw_read_lut(struct device *dev,
return -ENOMEM; return -ENOMEM;
for (i = 0; i < LUT_MAX_ENTRIES; i++) { for (i = 0; i < LUT_MAX_ENTRIES; i++) {
data = readl_relaxed(base + REG_LUT_TABLE + i * LUT_ROW_SIZE); data = readl_relaxed(base + REG_FREQ_LUT +
i * LUT_ROW_SIZE);
src = FIELD_GET(LUT_SRC, data); src = FIELD_GET(LUT_SRC, data);
lval = FIELD_GET(LUT_L_VAL, data); lval = FIELD_GET(LUT_L_VAL, data);
core_count = FIELD_GET(LUT_CORE_COUNT, data); core_count = FIELD_GET(LUT_CORE_COUNT, data);
data = readl_relaxed(base + REG_VOLT_LUT +
i * LUT_ROW_SIZE);
volt = FIELD_GET(LUT_VOLT, data) * 1000;
if (src) if (src)
freq = xo_rate * lval / 1000; freq = xo_rate * lval / 1000;
else else
freq = cpu_hw_rate / 1000; freq = cpu_hw_rate / 1000;
/* Ignore boosts in the middle of the table */ if (freq != prev_freq && core_count == max_cores) {
if (core_count != max_cores) {
table[i].frequency = CPUFREQ_ENTRY_INVALID;
} else {
table[i].frequency = freq; table[i].frequency = freq;
dev_dbg(dev, "index=%d freq=%d, core_count %d\n", i, dev_pm_opp_add(cpu_dev, freq * 1000, volt);
dev_dbg(cpu_dev, "index=%d freq=%d, core_count %d\n", i,
freq, core_count); freq, core_count);
} else {
table[i].frequency = CPUFREQ_ENTRY_INVALID;
} }
/* /*
@ -116,6 +125,7 @@ static int qcom_cpufreq_hw_read_lut(struct device *dev,
if (prev_cc != max_cores) { if (prev_cc != max_cores) {
prev->frequency = prev_freq; prev->frequency = prev_freq;
prev->flags = CPUFREQ_BOOST_FREQ; prev->flags = CPUFREQ_BOOST_FREQ;
dev_pm_opp_add(cpu_dev, prev_freq * 1000, volt);
} }
break; break;
@ -127,6 +137,7 @@ static int qcom_cpufreq_hw_read_lut(struct device *dev,
table[i].frequency = CPUFREQ_TABLE_END; table[i].frequency = CPUFREQ_TABLE_END;
policy->freq_table = table; policy->freq_table = table;
dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
return 0; return 0;
} }
@ -159,10 +170,18 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
struct device *dev = &global_pdev->dev; struct device *dev = &global_pdev->dev;
struct of_phandle_args args; struct of_phandle_args args;
struct device_node *cpu_np; struct device_node *cpu_np;
struct device *cpu_dev;
struct resource *res; struct resource *res;
void __iomem *base; void __iomem *base;
int ret, index; int ret, index;
cpu_dev = get_cpu_device(policy->cpu);
if (!cpu_dev) {
pr_err("%s: failed to get cpu%d device\n", __func__,
policy->cpu);
return -ENODEV;
}
cpu_np = of_cpu_device_node_get(policy->cpu); cpu_np = of_cpu_device_node_get(policy->cpu);
if (!cpu_np) if (!cpu_np)
return -EINVAL; return -EINVAL;
@ -199,12 +218,21 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
policy->driver_data = base + REG_PERF_STATE; policy->driver_data = base + REG_PERF_STATE;
ret = qcom_cpufreq_hw_read_lut(dev, policy, base); ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy, base);
if (ret) { if (ret) {
dev_err(dev, "Domain-%d failed to read LUT\n", index); dev_err(dev, "Domain-%d failed to read LUT\n", index);
goto error; goto error;
} }
ret = dev_pm_opp_get_opp_count(cpu_dev);
if (ret <= 0) {
dev_err(cpu_dev, "Failed to add OPPs\n");
ret = -ENODEV;
goto error;
}
dev_pm_opp_of_register_em(policy->cpus);
policy->fast_switch_possible = true; policy->fast_switch_possible = true;
return 0; return 0;
@ -215,8 +243,10 @@ error:
static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy) static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
{ {
struct device *cpu_dev = get_cpu_device(policy->cpu);
void __iomem *base = policy->driver_data - REG_PERF_STATE; void __iomem *base = policy->driver_data - REG_PERF_STATE;
dev_pm_opp_remove_all_dynamic(cpu_dev);
kfree(policy->freq_table); kfree(policy->freq_table);
devm_iounmap(&global_pdev->dev, base); devm_iounmap(&global_pdev->dev, base);
@ -231,7 +261,8 @@ static struct freq_attr *qcom_cpufreq_hw_attr[] = {
static struct cpufreq_driver cpufreq_qcom_hw_driver = { static struct cpufreq_driver cpufreq_qcom_hw_driver = {
.flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_HAVE_GOVERNOR_PER_POLICY, CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify, .verify = cpufreq_generic_frequency_table_verify,
.target_index = qcom_cpufreq_hw_target_index, .target_index = qcom_cpufreq_hw_target_index,
.get = qcom_cpufreq_hw_get, .get = qcom_cpufreq_hw_get,
@ -296,7 +327,7 @@ static int __init qcom_cpufreq_hw_init(void)
{ {
return platform_driver_register(&qcom_cpufreq_hw_driver); return platform_driver_register(&qcom_cpufreq_hw_driver);
} }
subsys_initcall(qcom_cpufreq_hw_init); device_initcall(qcom_cpufreq_hw_init);
static void __exit qcom_cpufreq_hw_exit(void) static void __exit qcom_cpufreq_hw_exit(void)
{ {

View File

@ -42,7 +42,7 @@ enum _msm8996_version {
NUM_OF_MSM8996_VERSIONS, NUM_OF_MSM8996_VERSIONS,
}; };
struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev; static struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev;
static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void) static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void)
{ {

View File

@ -13,7 +13,6 @@
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/clk-provider.h> #include <linux/clk-provider.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/cpu_cooling.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/kernel.h> #include <linux/kernel.h>
@ -31,7 +30,6 @@
struct cpu_data { struct cpu_data {
struct clk **pclk; struct clk **pclk;
struct cpufreq_frequency_table *table; struct cpufreq_frequency_table *table;
struct thermal_cooling_device *cdev;
}; };
/* /*
@ -239,7 +237,6 @@ static int qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{ {
struct cpu_data *data = policy->driver_data; struct cpu_data *data = policy->driver_data;
cpufreq_cooling_unregister(data->cdev);
kfree(data->pclk); kfree(data->pclk);
kfree(data->table); kfree(data->table);
kfree(data); kfree(data);
@ -258,23 +255,15 @@ static int qoriq_cpufreq_target(struct cpufreq_policy *policy,
return clk_set_parent(policy->clk, parent); return clk_set_parent(policy->clk, parent);
} }
static void qoriq_cpufreq_ready(struct cpufreq_policy *policy)
{
struct cpu_data *cpud = policy->driver_data;
cpud->cdev = of_cpufreq_cooling_register(policy);
}
static struct cpufreq_driver qoriq_cpufreq_driver = { static struct cpufreq_driver qoriq_cpufreq_driver = {
.name = "qoriq_cpufreq", .name = "qoriq_cpufreq",
.flags = CPUFREQ_CONST_LOOPS, .flags = CPUFREQ_CONST_LOOPS |
CPUFREQ_IS_COOLING_DEV,
.init = qoriq_cpufreq_cpu_init, .init = qoriq_cpufreq_cpu_init,
.exit = qoriq_cpufreq_cpu_exit, .exit = qoriq_cpufreq_cpu_exit,
.verify = cpufreq_generic_frequency_table_verify, .verify = cpufreq_generic_frequency_table_verify,
.target_index = qoriq_cpufreq_target, .target_index = qoriq_cpufreq_target,
.get = cpufreq_generic_get, .get = cpufreq_generic_get,
.ready = qoriq_cpufreq_ready,
.attr = cpufreq_generic_attr, .attr = cpufreq_generic_attr,
}; };

View File

@ -584,7 +584,7 @@ static struct notifier_block s5pv210_cpufreq_reboot_notifier = {
static int s5pv210_cpufreq_probe(struct platform_device *pdev) static int s5pv210_cpufreq_probe(struct platform_device *pdev)
{ {
struct device_node *np; struct device_node *np;
int id; int id, result = 0;
/* /*
* HACK: This is a temporary workaround to get access to clock * HACK: This is a temporary workaround to get access to clock
@ -594,18 +594,39 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
* this whole driver as soon as S5PV210 gets migrated to use * this whole driver as soon as S5PV210 gets migrated to use
* cpufreq-dt driver. * cpufreq-dt driver.
*/ */
arm_regulator = regulator_get(NULL, "vddarm");
if (IS_ERR(arm_regulator)) {
if (PTR_ERR(arm_regulator) == -EPROBE_DEFER)
pr_debug("vddarm regulator not ready, defer\n");
else
pr_err("failed to get regulator vddarm\n");
return PTR_ERR(arm_regulator);
}
int_regulator = regulator_get(NULL, "vddint");
if (IS_ERR(int_regulator)) {
if (PTR_ERR(int_regulator) == -EPROBE_DEFER)
pr_debug("vddint regulator not ready, defer\n");
else
pr_err("failed to get regulator vddint\n");
result = PTR_ERR(int_regulator);
goto err_int_regulator;
}
np = of_find_compatible_node(NULL, NULL, "samsung,s5pv210-clock"); np = of_find_compatible_node(NULL, NULL, "samsung,s5pv210-clock");
if (!np) { if (!np) {
pr_err("%s: failed to find clock controller DT node\n", pr_err("%s: failed to find clock controller DT node\n",
__func__); __func__);
return -ENODEV; result = -ENODEV;
goto err_clock;
} }
clk_base = of_iomap(np, 0); clk_base = of_iomap(np, 0);
of_node_put(np); of_node_put(np);
if (!clk_base) { if (!clk_base) {
pr_err("%s: failed to map clock registers\n", __func__); pr_err("%s: failed to map clock registers\n", __func__);
return -EFAULT; result = -EFAULT;
goto err_clock;
} }
for_each_compatible_node(np, NULL, "samsung,s5pv210-dmc") { for_each_compatible_node(np, NULL, "samsung,s5pv210-dmc") {
@ -614,7 +635,8 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
pr_err("%s: failed to get alias of dmc node '%pOFn'\n", pr_err("%s: failed to get alias of dmc node '%pOFn'\n",
__func__, np); __func__, np);
of_node_put(np); of_node_put(np);
return id; result = id;
goto err_clk_base;
} }
dmc_base[id] = of_iomap(np, 0); dmc_base[id] = of_iomap(np, 0);
@ -622,33 +644,40 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
pr_err("%s: failed to map dmc%d registers\n", pr_err("%s: failed to map dmc%d registers\n",
__func__, id); __func__, id);
of_node_put(np); of_node_put(np);
return -EFAULT; result = -EFAULT;
goto err_dmc;
} }
} }
for (id = 0; id < ARRAY_SIZE(dmc_base); ++id) { for (id = 0; id < ARRAY_SIZE(dmc_base); ++id) {
if (!dmc_base[id]) { if (!dmc_base[id]) {
pr_err("%s: failed to find dmc%d node\n", __func__, id); pr_err("%s: failed to find dmc%d node\n", __func__, id);
return -ENODEV; result = -ENODEV;
goto err_dmc;
} }
} }
arm_regulator = regulator_get(NULL, "vddarm");
if (IS_ERR(arm_regulator)) {
pr_err("failed to get regulator vddarm\n");
return PTR_ERR(arm_regulator);
}
int_regulator = regulator_get(NULL, "vddint");
if (IS_ERR(int_regulator)) {
pr_err("failed to get regulator vddint\n");
regulator_put(arm_regulator);
return PTR_ERR(int_regulator);
}
register_reboot_notifier(&s5pv210_cpufreq_reboot_notifier); register_reboot_notifier(&s5pv210_cpufreq_reboot_notifier);
return cpufreq_register_driver(&s5pv210_driver); return cpufreq_register_driver(&s5pv210_driver);
err_dmc:
for (id = 0; id < ARRAY_SIZE(dmc_base); ++id)
if (dmc_base[id]) {
iounmap(dmc_base[id]);
dmc_base[id] = NULL;
}
err_clk_base:
iounmap(clk_base);
err_clock:
regulator_put(int_regulator);
err_int_regulator:
regulator_put(arm_regulator);
return result;
} }
static struct platform_driver s5pv210_cpufreq_platdrv = { static struct platform_driver s5pv210_cpufreq_platdrv = {

View File

@ -11,7 +11,6 @@
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/cpu_cooling.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/pm_opp.h> #include <linux/pm_opp.h>
@ -22,7 +21,6 @@
struct scmi_data { struct scmi_data {
int domain_id; int domain_id;
struct device *cpu_dev; struct device *cpu_dev;
struct thermal_cooling_device *cdev;
}; };
static const struct scmi_handle *handle; static const struct scmi_handle *handle;
@ -185,7 +183,6 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
{ {
struct scmi_data *priv = policy->driver_data; struct scmi_data *priv = policy->driver_data;
cpufreq_cooling_unregister(priv->cdev);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
dev_pm_opp_remove_all_dynamic(priv->cpu_dev); dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
kfree(priv); kfree(priv);
@ -193,17 +190,11 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
return 0; return 0;
} }
static void scmi_cpufreq_ready(struct cpufreq_policy *policy)
{
struct scmi_data *priv = policy->driver_data;
priv->cdev = of_cpufreq_cooling_register(policy);
}
static struct cpufreq_driver scmi_cpufreq_driver = { static struct cpufreq_driver scmi_cpufreq_driver = {
.name = "scmi", .name = "scmi",
.flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY | .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
CPUFREQ_NEED_INITIAL_FREQ_CHECK, CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify, .verify = cpufreq_generic_frequency_table_verify,
.attr = cpufreq_generic_attr, .attr = cpufreq_generic_attr,
.target_index = scmi_cpufreq_set_target, .target_index = scmi_cpufreq_set_target,
@ -211,7 +202,6 @@ static struct cpufreq_driver scmi_cpufreq_driver = {
.get = scmi_cpufreq_get_rate, .get = scmi_cpufreq_get_rate,
.init = scmi_cpufreq_init, .init = scmi_cpufreq_init,
.exit = scmi_cpufreq_exit, .exit = scmi_cpufreq_exit,
.ready = scmi_cpufreq_ready,
}; };
static int scmi_cpufreq_probe(struct scmi_device *sdev) static int scmi_cpufreq_probe(struct scmi_device *sdev)

View File

@ -22,7 +22,6 @@
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/cpu_cooling.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
@ -34,7 +33,6 @@
struct scpi_data { struct scpi_data {
struct clk *clk; struct clk *clk;
struct device *cpu_dev; struct device *cpu_dev;
struct thermal_cooling_device *cdev;
}; };
static struct scpi_ops *scpi_ops; static struct scpi_ops *scpi_ops;
@ -186,7 +184,6 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
{ {
struct scpi_data *priv = policy->driver_data; struct scpi_data *priv = policy->driver_data;
cpufreq_cooling_unregister(priv->cdev);
clk_put(priv->clk); clk_put(priv->clk);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
kfree(priv); kfree(priv);
@ -195,23 +192,16 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
return 0; return 0;
} }
static void scpi_cpufreq_ready(struct cpufreq_policy *policy)
{
struct scpi_data *priv = policy->driver_data;
priv->cdev = of_cpufreq_cooling_register(policy);
}
static struct cpufreq_driver scpi_cpufreq_driver = { static struct cpufreq_driver scpi_cpufreq_driver = {
.name = "scpi-cpufreq", .name = "scpi-cpufreq",
.flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY | .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
CPUFREQ_NEED_INITIAL_FREQ_CHECK, CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify, .verify = cpufreq_generic_frequency_table_verify,
.attr = cpufreq_generic_attr, .attr = cpufreq_generic_attr,
.get = scpi_cpufreq_get_rate, .get = scpi_cpufreq_get_rate,
.init = scpi_cpufreq_init, .init = scpi_cpufreq_init,
.exit = scpi_cpufreq_exit, .exit = scpi_cpufreq_exit,
.ready = scpi_cpufreq_ready,
.target_index = scpi_cpufreq_set_target, .target_index = scpi_cpufreq_set_target,
}; };

View File

@ -243,8 +243,7 @@ static unsigned int speedstep_get(unsigned int cpu)
unsigned int speed; unsigned int speed;
/* You're supposed to ensure CPU is online. */ /* You're supposed to ensure CPU is online. */
if (smp_call_function_single(cpu, get_freq_data, &speed, 1) != 0) BUG_ON(smp_call_function_single(cpu, get_freq_data, &speed, 1));
BUG();
pr_debug("detected %u kHz as current frequency\n", speed); pr_debug("detected %u kHz as current frequency\n", speed);
return speed; return speed;

View File

@ -134,6 +134,8 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv); platform_set_drvdata(pdev, priv);
of_node_put(np);
return 0; return 0;
out_switch_to_pllx: out_switch_to_pllx:

View File

@ -533,9 +533,8 @@ static int _set_opp_voltage(struct device *dev, struct regulator *reg,
return ret; return ret;
} }
static inline int static inline int _generic_set_opp_clk_only(struct device *dev, struct clk *clk,
_generic_set_opp_clk_only(struct device *dev, struct clk *clk, unsigned long freq)
unsigned long old_freq, unsigned long freq)
{ {
int ret; int ret;
@ -572,7 +571,7 @@ static int _generic_set_opp_regulator(const struct opp_table *opp_table,
} }
/* Change frequency */ /* Change frequency */
ret = _generic_set_opp_clk_only(dev, opp_table->clk, old_freq, freq); ret = _generic_set_opp_clk_only(dev, opp_table->clk, freq);
if (ret) if (ret)
goto restore_voltage; goto restore_voltage;
@ -586,7 +585,7 @@ static int _generic_set_opp_regulator(const struct opp_table *opp_table,
return 0; return 0;
restore_freq: restore_freq:
if (_generic_set_opp_clk_only(dev, opp_table->clk, freq, old_freq)) if (_generic_set_opp_clk_only(dev, opp_table->clk, old_freq))
dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n", dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
__func__, old_freq); __func__, old_freq);
restore_voltage: restore_voltage:
@ -759,7 +758,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
opp->supplies); opp->supplies);
} else { } else {
/* Only frequency scaling */ /* Only frequency scaling */
ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq); ret = _generic_set_opp_clk_only(dev, clk, freq);
} }
/* Scaling down? Configure required OPPs after frequency */ /* Scaling down? Configure required OPPs after frequency */

View File

@ -20,6 +20,7 @@
#include <linux/pm_domain.h> #include <linux/pm_domain.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/energy_model.h>
#include "opp.h" #include "opp.h"
@ -1047,3 +1048,101 @@ struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp)
return of_node_get(opp->np); return of_node_get(opp->np);
} }
EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node); EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node);
/*
* Callback function provided to the Energy Model framework upon registration.
* This computes the power estimated by @CPU at @kHz if it is the frequency
* of an existing OPP, or at the frequency of the first OPP above @kHz otherwise
* (see dev_pm_opp_find_freq_ceil()). This function updates @kHz to the ceiled
* frequency and @mW to the associated power. The power is estimated as
* P = C * V^2 * f with C being the CPU's capacitance and V and f respectively
* the voltage and frequency of the OPP.
*
* Returns -ENODEV if the CPU device cannot be found, -EINVAL if the power
* calculation failed because of missing parameters, 0 otherwise.
*/
static int __maybe_unused _get_cpu_power(unsigned long *mW, unsigned long *kHz,
int cpu)
{
struct device *cpu_dev;
struct dev_pm_opp *opp;
struct device_node *np;
unsigned long mV, Hz;
u32 cap;
u64 tmp;
int ret;
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev)
return -ENODEV;
np = of_node_get(cpu_dev->of_node);
if (!np)
return -EINVAL;
ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap);
of_node_put(np);
if (ret)
return -EINVAL;
Hz = *kHz * 1000;
opp = dev_pm_opp_find_freq_ceil(cpu_dev, &Hz);
if (IS_ERR(opp))
return -EINVAL;
mV = dev_pm_opp_get_voltage(opp) / 1000;
dev_pm_opp_put(opp);
if (!mV)
return -EINVAL;
tmp = (u64)cap * mV * mV * (Hz / 1000000);
do_div(tmp, 1000000000);
*mW = (unsigned long)tmp;
*kHz = Hz / 1000;
return 0;
}
/**
* dev_pm_opp_of_register_em() - Attempt to register an Energy Model
* @cpus : CPUs for which an Energy Model has to be registered
*
* This checks whether the "dynamic-power-coefficient" devicetree property has
* been specified, and tries to register an Energy Model with it if it has.
*/
void dev_pm_opp_of_register_em(struct cpumask *cpus)
{
struct em_data_callback em_cb = EM_DATA_CB(_get_cpu_power);
int ret, nr_opp, cpu = cpumask_first(cpus);
struct device *cpu_dev;
struct device_node *np;
u32 cap;
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev)
return;
nr_opp = dev_pm_opp_get_opp_count(cpu_dev);
if (nr_opp <= 0)
return;
np = of_node_get(cpu_dev->of_node);
if (!np)
return;
/*
* Register an EM only if the 'dynamic-power-coefficient' property is
* set in devicetree. It is assumed the voltage values are known if that
* property is set since it is useless otherwise. If voltages are not
* known, just let the EM registration fail with an error to alert the
* user about the inconsistent configuration.
*/
ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap);
of_node_put(np);
if (ret || !cap)
return;
em_register_perf_domain(cpus, nr_opp, &em_cb);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_register_em);

View File

@ -152,6 +152,7 @@ config CPU_THERMAL
bool "generic cpu cooling support" bool "generic cpu cooling support"
depends on CPU_FREQ depends on CPU_FREQ
depends on THERMAL_OF depends on THERMAL_OF
depends on THERMAL=y
help help
This implements the generic cpu cooling mechanism through frequency This implements the generic cpu cooling mechanism through frequency
reduction. An ACPI version of this already exists reduction. An ACPI version of this already exists

View File

@ -137,6 +137,7 @@ struct cppc_cpudata {
cpumask_var_t shared_cpu_map; cpumask_var_t shared_cpu_map;
}; };
extern int cppc_get_desired_perf(int cpunum, u64 *desired_perf);
extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs); extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs);
extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls); extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls);
extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps); extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps);

View File

@ -151,6 +151,9 @@ struct cpufreq_policy {
/* For cpufreq driver's internal use */ /* For cpufreq driver's internal use */
void *driver_data; void *driver_data;
/* Pointer to the cooling device if used for thermal mitigation */
struct thermal_cooling_device *cdev;
}; };
/* Only for ACPI */ /* Only for ACPI */
@ -254,20 +257,12 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
static struct freq_attr _name = \ static struct freq_attr _name = \
__ATTR(_name, 0200, NULL, store_##_name) __ATTR(_name, 0200, NULL, store_##_name)
struct global_attr {
struct attribute attr;
ssize_t (*show)(struct kobject *kobj,
struct attribute *attr, char *buf);
ssize_t (*store)(struct kobject *a, struct attribute *b,
const char *c, size_t count);
};
#define define_one_global_ro(_name) \ #define define_one_global_ro(_name) \
static struct global_attr _name = \ static struct kobj_attribute _name = \
__ATTR(_name, 0444, show_##_name, NULL) __ATTR(_name, 0444, show_##_name, NULL)
#define define_one_global_rw(_name) \ #define define_one_global_rw(_name) \
static struct global_attr _name = \ static struct kobj_attribute _name = \
__ATTR(_name, 0644, show_##_name, store_##_name) __ATTR(_name, 0644, show_##_name, store_##_name)
@ -330,6 +325,8 @@ struct cpufreq_driver {
/* optional */ /* optional */
int (*bios_limit)(int cpu, unsigned int *limit); int (*bios_limit)(int cpu, unsigned int *limit);
int (*online)(struct cpufreq_policy *policy);
int (*offline)(struct cpufreq_policy *policy);
int (*exit)(struct cpufreq_policy *policy); int (*exit)(struct cpufreq_policy *policy);
void (*stop_cpu)(struct cpufreq_policy *policy); void (*stop_cpu)(struct cpufreq_policy *policy);
int (*suspend)(struct cpufreq_policy *policy); int (*suspend)(struct cpufreq_policy *policy);
@ -346,14 +343,15 @@ struct cpufreq_driver {
}; };
/* flags */ /* flags */
#define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
all ->init() calls failed */ /* driver isn't removed even if all ->init() calls failed */
#define CPUFREQ_CONST_LOOPS (1 << 1) /* loops_per_jiffy or other #define CPUFREQ_STICKY BIT(0)
kernel "constants" aren't
affected by frequency /* loops_per_jiffy or other kernel "constants" aren't affected by frequency transitions */
transitions */ #define CPUFREQ_CONST_LOOPS BIT(1)
#define CPUFREQ_PM_NO_WARN (1 << 2) /* don't warn on suspend/resume
speed mismatches */ /* don't warn on suspend/resume speed mismatches */
#define CPUFREQ_PM_NO_WARN BIT(2)
/* /*
* This should be set by platforms having multiple clock-domains, i.e. * This should be set by platforms having multiple clock-domains, i.e.
@ -361,14 +359,14 @@ struct cpufreq_driver {
* be created in cpu/cpu<num>/cpufreq/ directory and so they can use the same * be created in cpu/cpu<num>/cpufreq/ directory and so they can use the same
* governor with different tunables for different clusters. * governor with different tunables for different clusters.
*/ */
#define CPUFREQ_HAVE_GOVERNOR_PER_POLICY (1 << 3) #define CPUFREQ_HAVE_GOVERNOR_PER_POLICY BIT(3)
/* /*
* Driver will do POSTCHANGE notifications from outside of their ->target() * Driver will do POSTCHANGE notifications from outside of their ->target()
* routine and so must set cpufreq_driver->flags with this flag, so that core * routine and so must set cpufreq_driver->flags with this flag, so that core
* can handle them specially. * can handle them specially.
*/ */
#define CPUFREQ_ASYNC_NOTIFICATION (1 << 4) #define CPUFREQ_ASYNC_NOTIFICATION BIT(4)
/* /*
* Set by drivers which want cpufreq core to check if CPU is running at a * Set by drivers which want cpufreq core to check if CPU is running at a
@ -377,13 +375,19 @@ struct cpufreq_driver {
* from the table. And if that fails, we will stop further boot process by * from the table. And if that fails, we will stop further boot process by
* issuing a BUG_ON(). * issuing a BUG_ON().
*/ */
#define CPUFREQ_NEED_INITIAL_FREQ_CHECK (1 << 5) #define CPUFREQ_NEED_INITIAL_FREQ_CHECK BIT(5)
/* /*
* Set by drivers to disallow use of governors with "dynamic_switching" flag * Set by drivers to disallow use of governors with "dynamic_switching" flag
* set. * set.
*/ */
#define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING (1 << 6) #define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING BIT(6)
/*
* Set by drivers that want the core to automatically register the cpufreq
* driver as a thermal cooling device.
*/
#define CPUFREQ_IS_COOLING_DEV BIT(7)
int cpufreq_register_driver(struct cpufreq_driver *driver_data); int cpufreq_register_driver(struct cpufreq_driver *driver_data);
int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);

View File

@ -0,0 +1,19 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* TI DaVinci CPUFreq platform support.
*
* Copyright (C) 2009 Texas Instruments, Inc. http://www.ti.com/
*/
#ifndef _MACH_DAVINCI_CPUFREQ_H
#define _MACH_DAVINCI_CPUFREQ_H
#include <linux/cpufreq.h>
struct davinci_cpufreq_config {
struct cpufreq_frequency_table *freq_table;
int (*set_voltage)(unsigned int index);
int (*init)(void);
};
#endif /* _MACH_DAVINCI_CPUFREQ_H */

View File

@ -327,6 +327,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpuma
struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev); struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev);
struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp); struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp);
int of_get_required_opp_performance_state(struct device_node *np, int index); int of_get_required_opp_performance_state(struct device_node *np, int index);
void dev_pm_opp_of_register_em(struct cpumask *cpus);
#else #else
static inline int dev_pm_opp_of_add_table(struct device *dev) static inline int dev_pm_opp_of_add_table(struct device *dev)
{ {
@ -365,6 +366,11 @@ static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp)
{ {
return NULL; return NULL;
} }
static inline void dev_pm_opp_of_register_em(struct cpumask *cpus)
{
}
static inline int of_get_required_opp_performance_state(struct device_node *np, int index) static inline int of_get_required_opp_performance_state(struct device_node *np, int index)
{ {
return -ENOTSUPP; return -ENOTSUPP;