Merge branch 'pm-cpufreq'

* pm-cpufreq: (22 commits)
  cpufreq: stats: Handle the case when trans_table goes beyond PAGE_SIZE
  cpufreq: arm_big_little: make cpufreq_arm_bL_ops structures const
  cpufreq: arm_big_little: make function arguments and structure pointer const
  cpufreq: pxa: convert to clock API
  cpufreq: speedstep-lib: mark expected switch fall-through
  cpufreq: ti-cpufreq: add missing of_node_put()
  cpufreq: dt: Remove support for Exynos4212 SoCs
  cpufreq: imx6q: Move speed grading check to cpufreq driver
  cpufreq: ti-cpufreq: kfree opp_data when failure
  cpufreq: SPEAr: pr_err() strings should end with newlines
  cpufreq: powernow-k8: pr_err() strings should end with newlines
  cpufreq: dt-platdev: drop socionext,uniphier-ld6b from whitelist
  arm64: wire cpu-invariant accounting support up to the task scheduler
  arm64: wire frequency-invariant accounting support up to the task scheduler
  arm: wire cpu-invariant accounting support up to the task scheduler
  arm: wire frequency-invariant accounting support up to the task scheduler
  drivers base/arch_topology: allow inlining cpu-invariant accounting support
  drivers base/arch_topology: provide frequency-invariant accounting support
  cpufreq: dt: invoke frequency-invariance setter function
  cpufreq: arm_big_little: invoke frequency-invariance setter function
  ...
This commit is contained in:
Rafael J. Wysocki 2017-11-13 01:34:49 +01:00
commit 60af981c78
22 changed files with 214 additions and 286 deletions

View file

@ -90,6 +90,9 @@ Freq_i to Freq_j. Freq_i is in descending order with increasing rows and
Freq_j is in descending order with increasing columns. The output here also
contains the actual freq values for each row and column for better readability.
If the transition table is bigger than PAGE_SIZE, reading this will
return an -EFBIG error.
--------------------------------------------------------------------------------
<mysystem>:/sys/devices/system/cpu/cpu0/cpufreq/stats # cat trans_table
From : To

View file

@ -25,6 +25,14 @@ void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
#include <linux/arch_topology.h>
/* Replace task scheduler's default frequency-invariant accounting */
#define arch_scale_freq_capacity topology_get_freq_scale
/* Replace task scheduler's default cpu-invariant accounting */
#define arch_scale_cpu_capacity topology_get_cpu_scale
#else
static inline void init_cpu_topology(void) { }

View file

@ -286,88 +286,6 @@ static void __init imx6q_init_machine(void)
imx6q_axi_init();
}
#define OCOTP_CFG3 0x440
#define OCOTP_CFG3_SPEED_SHIFT 16
#define OCOTP_CFG3_SPEED_1P2GHZ 0x3
#define OCOTP_CFG3_SPEED_996MHZ 0x2
#define OCOTP_CFG3_SPEED_852MHZ 0x1
static void __init imx6q_opp_check_speed_grading(struct device *cpu_dev)
{
struct device_node *np;
void __iomem *base;
u32 val;
np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-ocotp");
if (!np) {
pr_warn("failed to find ocotp node\n");
return;
}
base = of_iomap(np, 0);
if (!base) {
pr_warn("failed to map ocotp\n");
goto put_node;
}
/*
* SPEED_GRADING[1:0] defines the max speed of ARM:
* 2b'11: 1200000000Hz;
* 2b'10: 996000000Hz;
* 2b'01: 852000000Hz; -- i.MX6Q Only, exclusive with 996MHz.
* 2b'00: 792000000Hz;
* We need to set the max speed of ARM according to fuse map.
*/
val = readl_relaxed(base + OCOTP_CFG3);
val >>= OCOTP_CFG3_SPEED_SHIFT;
val &= 0x3;
if ((val != OCOTP_CFG3_SPEED_1P2GHZ) && cpu_is_imx6q())
if (dev_pm_opp_disable(cpu_dev, 1200000000))
pr_warn("failed to disable 1.2 GHz OPP\n");
if (val < OCOTP_CFG3_SPEED_996MHZ)
if (dev_pm_opp_disable(cpu_dev, 996000000))
pr_warn("failed to disable 996 MHz OPP\n");
if (cpu_is_imx6q()) {
if (val != OCOTP_CFG3_SPEED_852MHZ)
if (dev_pm_opp_disable(cpu_dev, 852000000))
pr_warn("failed to disable 852 MHz OPP\n");
}
iounmap(base);
put_node:
of_node_put(np);
}
static void __init imx6q_opp_init(void)
{
struct device_node *np;
struct device *cpu_dev = get_cpu_device(0);
if (!cpu_dev) {
pr_warn("failed to get cpu0 device\n");
return;
}
np = of_node_get(cpu_dev->of_node);
if (!np) {
pr_warn("failed to find cpu0 node\n");
return;
}
if (dev_pm_opp_of_add_table(cpu_dev)) {
pr_warn("failed to init OPP table\n");
goto put_node;
}
imx6q_opp_check_speed_grading(cpu_dev);
put_node:
of_node_put(np);
}
static struct platform_device imx6q_cpufreq_pdev = {
.name = "imx6q-cpufreq",
};
static void __init imx6q_init_late(void)
{
/*
@ -377,10 +295,8 @@ static void __init imx6q_init_late(void)
if (imx_get_soc_revision() > IMX_CHIP_REVISION_1_1)
imx6q_cpuidle_init();
if (IS_ENABLED(CONFIG_ARM_IMX6Q_CPUFREQ)) {
imx6q_opp_init();
platform_device_register(&imx6q_cpufreq_pdev);
}
if (IS_ENABLED(CONFIG_ARM_IMX6Q_CPUFREQ))
platform_device_register_simple("imx6q-cpufreq", -1, NULL, 0);
}
static void __init imx6q_map_io(void)

View file

@ -33,6 +33,14 @@ int pcibus_to_node(struct pci_bus *bus);
#endif /* CONFIG_NUMA */
#include <linux/arch_topology.h>
/* Replace task scheduler's default frequency-invariant accounting */
#define arch_scale_freq_capacity topology_get_freq_scale
/* Replace task scheduler's default cpu-invariant accounting */
#define arch_scale_cpu_capacity topology_get_cpu_scale
#include <asm-generic/topology.h>
#endif /* _ASM_ARM_TOPOLOGY_H */

View file

@ -22,14 +22,23 @@
#include <linux/string.h>
#include <linux/sched/topology.h>
static DEFINE_MUTEX(cpu_scale_mutex);
static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu)
void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
unsigned long max_freq)
{
return per_cpu(cpu_scale, cpu);
unsigned long scale;
int i;
scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
for_each_cpu(i, cpus)
per_cpu(freq_scale, i) = scale;
}
static DEFINE_MUTEX(cpu_scale_mutex);
DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
{
per_cpu(cpu_scale, cpu) = capacity;
@ -212,6 +221,8 @@ static struct notifier_block init_cpu_capacity_notifier __initdata = {
static int __init register_cpufreq_notifier(void)
{
int ret;
/*
* on ACPI-based systems we need to use the default cpu capacity
* until we have the necessary code to parse the cpu capacity, so
@ -227,8 +238,13 @@ static int __init register_cpufreq_notifier(void)
cpumask_copy(cpus_to_visit, cpu_possible_mask);
return cpufreq_register_notifier(&init_cpu_capacity_notifier,
CPUFREQ_POLICY_NOTIFIER);
ret = cpufreq_register_notifier(&init_cpu_capacity_notifier,
CPUFREQ_POLICY_NOTIFIER);
if (ret)
free_cpumask_var(cpus_to_visit);
return ret;
}
core_initcall(register_cpufreq_notifier);
@ -236,6 +252,7 @@ static void __init parsing_done_workfn(struct work_struct *work)
{
cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
CPUFREQ_POLICY_NOTIFIER);
free_cpumask_var(cpus_to_visit);
}
#else

View file

@ -57,7 +57,7 @@ static bool bL_switching_enabled;
#define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq)
static struct thermal_cooling_device *cdev[MAX_CLUSTERS];
static struct cpufreq_arm_bL_ops *arm_bL_ops;
static const struct cpufreq_arm_bL_ops *arm_bL_ops;
static struct clk *clk[MAX_CLUSTERS];
static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1];
static atomic_t cluster_usage[MAX_CLUSTERS + 1];
@ -213,6 +213,7 @@ static int bL_cpufreq_set_target(struct cpufreq_policy *policy,
{
u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster;
unsigned int freqs_new;
int ret;
cur_cluster = cpu_to_cluster(cpu);
new_cluster = actual_cluster = per_cpu(physical_cluster, cpu);
@ -229,7 +230,14 @@ static int bL_cpufreq_set_target(struct cpufreq_policy *policy,
}
}
return bL_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs_new);
ret = bL_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs_new);
if (!ret) {
arch_set_freq_scale(policy->related_cpus, freqs_new,
policy->cpuinfo.max_freq);
}
return ret;
}
static inline u32 get_table_count(struct cpufreq_frequency_table *table)
@ -609,7 +617,7 @@ static int __bLs_register_notifier(void) { return 0; }
static int __bLs_unregister_notifier(void) { return 0; }
#endif
int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
int bL_cpufreq_register(const struct cpufreq_arm_bL_ops *ops)
{
int ret, i;
@ -653,7 +661,7 @@ int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
}
EXPORT_SYMBOL_GPL(bL_cpufreq_register);
void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops)
void bL_cpufreq_unregister(const struct cpufreq_arm_bL_ops *ops)
{
if (arm_bL_ops != ops) {
pr_err("%s: Registered with: %s, can't unregister, exiting\n",

View file

@ -37,7 +37,7 @@ struct cpufreq_arm_bL_ops {
void (*free_opp_table)(const struct cpumask *cpumask);
};
int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops);
void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops);
int bL_cpufreq_register(const struct cpufreq_arm_bL_ops *ops);
void bL_cpufreq_unregister(const struct cpufreq_arm_bL_ops *ops);
#endif /* CPUFREQ_ARM_BIG_LITTLE_H */

View file

@ -61,7 +61,7 @@ static int dt_get_transition_latency(struct device *cpu_dev)
return transition_latency;
}
static struct cpufreq_arm_bL_ops dt_bL_ops = {
static const struct cpufreq_arm_bL_ops dt_bL_ops = {
.name = "dt-bl",
.get_transition_latency = dt_get_transition_latency,
.init_opp_table = dev_pm_opp_of_cpumask_add_table,

View file

@ -48,7 +48,6 @@ static const struct of_device_id whitelist[] __initconst = {
{ .compatible = "samsung,exynos3250", },
{ .compatible = "samsung,exynos4210", },
{ .compatible = "samsung,exynos4212", },
{ .compatible = "samsung,exynos5250", },
#ifndef CONFIG_BL_SWITCHER
{ .compatible = "samsung,exynos5800", },
@ -83,8 +82,6 @@ static const struct of_device_id whitelist[] __initconst = {
{ .compatible = "rockchip,rk3368", },
{ .compatible = "rockchip,rk3399", },
{ .compatible = "socionext,uniphier-ld6b", },
{ .compatible = "st-ericsson,u8500", },
{ .compatible = "st-ericsson,u8540", },
{ .compatible = "st-ericsson,u9500", },

View file

@ -43,9 +43,17 @@ static struct freq_attr *cpufreq_dt_attr[] = {
static int set_target(struct cpufreq_policy *policy, unsigned int index)
{
struct private_data *priv = policy->driver_data;
unsigned long freq = policy->freq_table[index].frequency;
int ret;
return dev_pm_opp_set_rate(priv->cpu_dev,
policy->freq_table[index].frequency * 1000);
ret = dev_pm_opp_set_rate(priv->cpu_dev, freq * 1000);
if (!ret) {
arch_set_freq_scale(policy->related_cpus, freq,
policy->cpuinfo.max_freq);
}
return ret;
}
/*

View file

@ -161,6 +161,12 @@ u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
}
EXPORT_SYMBOL_GPL(get_cpu_idle_time);
__weak void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
unsigned long max_freq)
{
}
EXPORT_SYMBOL_GPL(arch_set_freq_scale);
/*
* This is a generic cpufreq init() routine which can be used by cpufreq
* drivers of SMP systems. It will do following:

View file

@ -118,8 +118,11 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
break;
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
}
if (len >= PAGE_SIZE)
return PAGE_SIZE;
if (len >= PAGE_SIZE) {
pr_warn_once("cpufreq transition table exceeds PAGE_SIZE. Disabling\n");
return -EFBIG;
}
return len;
}
cpufreq_freq_attr_ro(trans_table);

View file

@ -12,6 +12,7 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/pm_opp.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
@ -191,6 +192,57 @@ static struct cpufreq_driver imx6q_cpufreq_driver = {
.suspend = cpufreq_generic_suspend,
};
#define OCOTP_CFG3 0x440
#define OCOTP_CFG3_SPEED_SHIFT 16
#define OCOTP_CFG3_SPEED_1P2GHZ 0x3
#define OCOTP_CFG3_SPEED_996MHZ 0x2
#define OCOTP_CFG3_SPEED_852MHZ 0x1
static void imx6q_opp_check_speed_grading(struct device *dev)
{
struct device_node *np;
void __iomem *base;
u32 val;
np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-ocotp");
if (!np)
return;
base = of_iomap(np, 0);
if (!base) {
dev_err(dev, "failed to map ocotp\n");
goto put_node;
}
/*
* SPEED_GRADING[1:0] defines the max speed of ARM:
* 2b'11: 1200000000Hz;
* 2b'10: 996000000Hz;
* 2b'01: 852000000Hz; -- i.MX6Q Only, exclusive with 996MHz.
* 2b'00: 792000000Hz;
* We need to set the max speed of ARM according to fuse map.
*/
val = readl_relaxed(base + OCOTP_CFG3);
val >>= OCOTP_CFG3_SPEED_SHIFT;
val &= 0x3;
if ((val != OCOTP_CFG3_SPEED_1P2GHZ) &&
of_machine_is_compatible("fsl,imx6q"))
if (dev_pm_opp_disable(dev, 1200000000))
dev_warn(dev, "failed to disable 1.2GHz OPP\n");
if (val < OCOTP_CFG3_SPEED_996MHZ)
if (dev_pm_opp_disable(dev, 996000000))
dev_warn(dev, "failed to disable 996MHz OPP\n");
if (of_machine_is_compatible("fsl,imx6q")) {
if (val != OCOTP_CFG3_SPEED_852MHZ)
if (dev_pm_opp_disable(dev, 852000000))
dev_warn(dev, "failed to disable 852MHz OPP\n");
}
iounmap(base);
put_node:
of_node_put(np);
}
static int imx6q_cpufreq_probe(struct platform_device *pdev)
{
struct device_node *np;
@ -252,28 +304,21 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
goto put_reg;
}
/*
* We expect an OPP table supplied by platform.
* Just, incase the platform did not supply the OPP
* table, it will try to get it.
*/
ret = dev_pm_opp_of_add_table(cpu_dev);
if (ret < 0) {
dev_err(cpu_dev, "failed to init OPP table: %d\n", ret);
goto put_reg;
}
imx6q_opp_check_speed_grading(cpu_dev);
/* Because we have added the OPPs here, we must free them */
free_opp = true;
num = dev_pm_opp_get_opp_count(cpu_dev);
if (num < 0) {
ret = dev_pm_opp_of_add_table(cpu_dev);
if (ret < 0) {
dev_err(cpu_dev, "failed to init OPP table: %d\n", ret);
goto put_reg;
}
/* Because we have added the OPPs here, we must free them */
free_opp = true;
num = dev_pm_opp_get_opp_count(cpu_dev);
if (num < 0) {
ret = num;
dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
goto out_free_opp;
}
ret = num;
dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
goto out_free_opp;
}
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);

View file

@ -1043,7 +1043,7 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) {
pr_err("unable to alloc powernow_k8_data");
pr_err("unable to alloc powernow_k8_data\n");
return -ENOMEM;
}

View file

@ -58,56 +58,40 @@ module_param(pxa27x_maxfreq, uint, 0);
MODULE_PARM_DESC(pxa27x_maxfreq, "Set the pxa27x maxfreq in MHz"
"(typically 624=>pxa270, 416=>pxa271, 520=>pxa272)");
struct pxa_cpufreq_data {
struct clk *clk_core;
};
static struct pxa_cpufreq_data pxa_cpufreq_data;
struct pxa_freqs {
unsigned int khz;
unsigned int membus;
unsigned int cccr;
unsigned int div2;
unsigned int cclkcfg;
int vmin;
int vmax;
};
/* Define the refresh period in mSec for the SDRAM and the number of rows */
#define SDRAM_TREF 64 /* standard 64ms SDRAM */
static unsigned int sdram_rows;
#define CCLKCFG_TURBO 0x1
#define CCLKCFG_FCS 0x2
#define CCLKCFG_HALFTURBO 0x4
#define CCLKCFG_FASTBUS 0x8
#define MDREFR_DB2_MASK (MDREFR_K2DB2 | MDREFR_K1DB2)
#define MDREFR_DRI_MASK 0xFFF
#define MDCNFG_DRAC2(mdcnfg) (((mdcnfg) >> 21) & 0x3)
#define MDCNFG_DRAC0(mdcnfg) (((mdcnfg) >> 5) & 0x3)
/*
* PXA255 definitions
*/
/* Use the run mode frequencies for the CPUFREQ_POLICY_PERFORMANCE policy */
#define CCLKCFG CCLKCFG_TURBO | CCLKCFG_FCS
static const struct pxa_freqs pxa255_run_freqs[] =
{
/* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */
{ 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */
{132700, 132700, 0x123, 1, CCLKCFG, -1, -1}, /* 133, 133, 66, 66 */
{199100, 99500, 0x141, 0, CCLKCFG, -1, -1}, /* 199, 199, 99, 99 */
{265400, 132700, 0x143, 1, CCLKCFG, -1, -1}, /* 265, 265, 133, 66 */
{331800, 165900, 0x145, 1, CCLKCFG, -1, -1}, /* 331, 331, 166, 83 */
{398100, 99500, 0x161, 0, CCLKCFG, -1, -1}, /* 398, 398, 196, 99 */
/* CPU MEMBUS run turbo PXbus SDRAM */
{ 99500, -1, -1}, /* 99, 99, 50, 50 */
{132700, -1, -1}, /* 133, 133, 66, 66 */
{199100, -1, -1}, /* 199, 199, 99, 99 */
{265400, -1, -1}, /* 265, 265, 133, 66 */
{331800, -1, -1}, /* 331, 331, 166, 83 */
{398100, -1, -1}, /* 398, 398, 196, 99 */
};
/* Use the turbo mode frequencies for the CPUFREQ_POLICY_POWERSAVE policy */
static const struct pxa_freqs pxa255_turbo_freqs[] =
{
/* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */
{ 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */
{199100, 99500, 0x221, 0, CCLKCFG, -1, -1}, /* 99, 199, 50, 99 */
{298500, 99500, 0x321, 0, CCLKCFG, -1, -1}, /* 99, 287, 50, 99 */
{298600, 99500, 0x1c1, 0, CCLKCFG, -1, -1}, /* 199, 287, 99, 99 */
{398100, 99500, 0x241, 0, CCLKCFG, -1, -1}, /* 199, 398, 99, 99 */
/* CPU run turbo PXbus SDRAM */
{ 99500, -1, -1}, /* 99, 99, 50, 50 */
{199100, -1, -1}, /* 99, 199, 50, 99 */
{298500, -1, -1}, /* 99, 287, 50, 99 */
{298600, -1, -1}, /* 199, 287, 99, 99 */
{398100, -1, -1}, /* 199, 398, 99, 99 */
};
#define NUM_PXA25x_RUN_FREQS ARRAY_SIZE(pxa255_run_freqs)
@ -122,47 +106,14 @@ static unsigned int pxa255_turbo_table;
module_param(pxa255_turbo_table, uint, 0);
MODULE_PARM_DESC(pxa255_turbo_table, "Selects the frequency table (0 = run table, !0 = turbo table)");
/*
* PXA270 definitions
*
* For the PXA27x:
* Control variables are A, L, 2N for CCCR; B, HT, T for CLKCFG.
*
* A = 0 => memory controller clock from table 3-7,
* A = 1 => memory controller clock = system bus clock
* Run mode frequency = 13 MHz * L
* Turbo mode frequency = 13 MHz * L * N
* System bus frequency = 13 MHz * L / (B + 1)
*
* In CCCR:
* A = 1
* L = 16 oscillator to run mode ratio
* 2N = 6 2 * (turbo mode to run mode ratio)
*
* In CCLKCFG:
* B = 1 Fast bus mode
* HT = 0 Half-Turbo mode
* T = 1 Turbo mode
*
* For now, just support some of the combinations in table 3-7 of
* PXA27x Processor Family Developer's Manual to simplify frequency
* change sequences.
*/
#define PXA27x_CCCR(A, L, N2) (A << 25 | N2 << 7 | L)
#define CCLKCFG2(B, HT, T) \
(CCLKCFG_FCS | \
((B) ? CCLKCFG_FASTBUS : 0) | \
((HT) ? CCLKCFG_HALFTURBO : 0) | \
((T) ? CCLKCFG_TURBO : 0))
static struct pxa_freqs pxa27x_freqs[] = {
{104000, 104000, PXA27x_CCCR(1, 8, 2), 0, CCLKCFG2(1, 0, 1), 900000, 1705000 },
{156000, 104000, PXA27x_CCCR(1, 8, 3), 0, CCLKCFG2(1, 0, 1), 1000000, 1705000 },
{208000, 208000, PXA27x_CCCR(0, 16, 2), 1, CCLKCFG2(0, 0, 1), 1180000, 1705000 },
{312000, 208000, PXA27x_CCCR(1, 16, 3), 1, CCLKCFG2(1, 0, 1), 1250000, 1705000 },
{416000, 208000, PXA27x_CCCR(1, 16, 4), 1, CCLKCFG2(1, 0, 1), 1350000, 1705000 },
{520000, 208000, PXA27x_CCCR(1, 16, 5), 1, CCLKCFG2(1, 0, 1), 1450000, 1705000 },
{624000, 208000, PXA27x_CCCR(1, 16, 6), 1, CCLKCFG2(1, 0, 1), 1550000, 1705000 }
{104000, 900000, 1705000 },
{156000, 1000000, 1705000 },
{208000, 1180000, 1705000 },
{312000, 1250000, 1705000 },
{416000, 1350000, 1705000 },
{520000, 1450000, 1705000 },
{624000, 1550000, 1705000 }
};
#define NUM_PXA27x_FREQS ARRAY_SIZE(pxa27x_freqs)
@ -241,51 +192,29 @@ static void pxa27x_guess_max_freq(void)
}
}
static void init_sdram_rows(void)
{
uint32_t mdcnfg = __raw_readl(MDCNFG);
unsigned int drac2 = 0, drac0 = 0;
if (mdcnfg & (MDCNFG_DE2 | MDCNFG_DE3))
drac2 = MDCNFG_DRAC2(mdcnfg);
if (mdcnfg & (MDCNFG_DE0 | MDCNFG_DE1))
drac0 = MDCNFG_DRAC0(mdcnfg);
sdram_rows = 1 << (11 + max(drac0, drac2));
}
static u32 mdrefr_dri(unsigned int freq)
{
u32 interval = freq * SDRAM_TREF / sdram_rows;
return (interval - (cpu_is_pxa27x() ? 31 : 0)) / 32;
}
static unsigned int pxa_cpufreq_get(unsigned int cpu)
{
return get_clk_frequency_khz(0);
struct pxa_cpufreq_data *data = cpufreq_get_driver_data();
return (unsigned int) clk_get_rate(data->clk_core) / 1000;
}
static int pxa_set_target(struct cpufreq_policy *policy, unsigned int idx)
{
struct cpufreq_frequency_table *pxa_freqs_table;
const struct pxa_freqs *pxa_freq_settings;
unsigned long flags;
unsigned int new_freq_cpu, new_freq_mem;
unsigned int unused, preset_mdrefr, postset_mdrefr, cclkcfg;
struct pxa_cpufreq_data *data = cpufreq_get_driver_data();
unsigned int new_freq_cpu;
int ret = 0;
/* Get the current policy */
find_freq_tables(&pxa_freqs_table, &pxa_freq_settings);
new_freq_cpu = pxa_freq_settings[idx].khz;
new_freq_mem = pxa_freq_settings[idx].membus;
if (freq_debug)
pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n",
new_freq_cpu / 1000, (pxa_freq_settings[idx].div2) ?
(new_freq_mem / 2000) : (new_freq_mem / 1000));
pr_debug("Changing CPU frequency from %d Mhz to %d Mhz\n",
policy->cur / 1000, new_freq_cpu / 1000);
if (vcc_core && new_freq_cpu > policy->cur) {
ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]);
@ -293,53 +222,7 @@ static int pxa_set_target(struct cpufreq_policy *policy, unsigned int idx)
return ret;
}
/* Calculate the next MDREFR. If we're slowing down the SDRAM clock
* we need to preset the smaller DRI before the change. If we're
* speeding up we need to set the larger DRI value after the change.
*/
preset_mdrefr = postset_mdrefr = __raw_readl(MDREFR);
if ((preset_mdrefr & MDREFR_DRI_MASK) > mdrefr_dri(new_freq_mem)) {
preset_mdrefr = (preset_mdrefr & ~MDREFR_DRI_MASK);
preset_mdrefr |= mdrefr_dri(new_freq_mem);
}
postset_mdrefr =
(postset_mdrefr & ~MDREFR_DRI_MASK) | mdrefr_dri(new_freq_mem);
/* If we're dividing the memory clock by two for the SDRAM clock, this
* must be set prior to the change. Clearing the divide must be done
* after the change.
*/
if (pxa_freq_settings[idx].div2) {
preset_mdrefr |= MDREFR_DB2_MASK;
postset_mdrefr |= MDREFR_DB2_MASK;
} else {
postset_mdrefr &= ~MDREFR_DB2_MASK;
}
local_irq_save(flags);
/* Set new the CCCR and prepare CCLKCFG */
writel(pxa_freq_settings[idx].cccr, CCCR);
cclkcfg = pxa_freq_settings[idx].cclkcfg;
asm volatile(" \n\
ldr r4, [%1] /* load MDREFR */ \n\
b 2f \n\
.align 5 \n\
1: \n\
str %3, [%1] /* preset the MDREFR */ \n\
mcr p14, 0, %2, c6, c0, 0 /* set CCLKCFG[FCS] */ \n\
str %4, [%1] /* postset the MDREFR */ \n\
\n\
b 3f \n\
2: b 1b \n\
3: nop \n\
"
: "=&r" (unused)
: "r" (MDREFR), "r" (cclkcfg),
"r" (preset_mdrefr), "r" (postset_mdrefr)
: "r4", "r5");
local_irq_restore(flags);
clk_set_rate(data->clk_core, new_freq_cpu * 1000);
/*
* Even if voltage setting fails, we don't report it, as the frequency
@ -369,8 +252,6 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
pxa_cpufreq_init_voltages();
init_sdram_rows();
/* set default policy and cpuinfo */
policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */
@ -429,11 +310,17 @@ static struct cpufreq_driver pxa_cpufreq_driver = {
.init = pxa_cpufreq_init,
.get = pxa_cpufreq_get,
.name = "PXA2xx",
.driver_data = &pxa_cpufreq_data,
};
static int __init pxa_cpu_init(void)
{
int ret = -ENODEV;
pxa_cpufreq_data.clk_core = clk_get_sys(NULL, "core");
if (IS_ERR(pxa_cpufreq_data.clk_core))
return PTR_ERR(pxa_cpufreq_data.clk_core);
if (cpu_is_pxa25x() || cpu_is_pxa27x())
ret = cpufreq_register_driver(&pxa_cpufreq_driver);
return ret;

View file

@ -53,7 +53,7 @@ static int scpi_init_opp_table(const struct cpumask *cpumask)
return ret;
}
static struct cpufreq_arm_bL_ops scpi_cpufreq_ops = {
static const struct cpufreq_arm_bL_ops scpi_cpufreq_ops = {
.name = "scpi",
.get_transition_latency = scpi_get_transition_latency,
.init_opp_table = scpi_init_opp_table,

View file

@ -177,7 +177,7 @@ static int spear_cpufreq_probe(struct platform_device *pdev)
np = of_cpu_device_node_get(0);
if (!np) {
pr_err("No cpu node found");
pr_err("No cpu node found\n");
return -ENODEV;
}
@ -187,7 +187,7 @@ static int spear_cpufreq_probe(struct platform_device *pdev)
prop = of_find_property(np, "cpufreq_tbl", NULL);
if (!prop || !prop->value) {
pr_err("Invalid cpufreq_tbl");
pr_err("Invalid cpufreq_tbl\n");
ret = -ENODEV;
goto out_put_node;
}

View file

@ -367,7 +367,7 @@ unsigned int speedstep_detect_processor(void)
} else
return SPEEDSTEP_CPU_PIII_C;
}
/* fall through */
default:
return 0;
}

View file

@ -205,6 +205,7 @@ static int ti_cpufreq_init(void)
np = of_find_node_by_path("/");
match = of_match_node(ti_cpufreq_of_match, np);
of_node_put(np);
if (!match)
return -ENODEV;
@ -217,7 +218,8 @@ static int ti_cpufreq_init(void)
opp_data->cpu_dev = get_cpu_device(0);
if (!opp_data->cpu_dev) {
pr_err("%s: Failed to get device for CPU0\n", __func__);
return -ENODEV;
ret = ENODEV;
goto free_opp_data;
}
opp_data->opp_node = dev_pm_opp_of_get_opp_desc_node(opp_data->cpu_dev);
@ -262,6 +264,8 @@ static int ti_cpufreq_init(void)
fail_put_node:
of_node_put(opp_data->opp_node);
free_opp_data:
kfree(opp_data);
return ret;
}

View file

@ -42,7 +42,7 @@ static int ve_spc_get_transition_latency(struct device *cpu_dev)
return 1000000; /* 1 ms */
}
static struct cpufreq_arm_bL_ops ve_spc_cpufreq_ops = {
static const struct cpufreq_arm_bL_ops ve_spc_cpufreq_ops = {
.name = "vexpress-spc",
.get_transition_latency = ve_spc_get_transition_latency,
.init_opp_table = ve_spc_init_opp_table,

View file

@ -6,15 +6,30 @@
#define _LINUX_ARCH_TOPOLOGY_H_
#include <linux/types.h>
#include <linux/percpu.h>
void topology_normalize_cpu_scale(void);
struct device_node;
bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);
DECLARE_PER_CPU(unsigned long, cpu_scale);
struct sched_domain;
unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu);
static inline
unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu)
{
return per_cpu(cpu_scale, cpu);
}
void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
DECLARE_PER_CPU(unsigned long, freq_scale);
static inline
unsigned long topology_get_freq_scale(struct sched_domain *sd, int cpu)
{
return per_cpu(freq_scale, cpu);
}
#endif /* _LINUX_ARCH_TOPOLOGY_H_ */

View file

@ -919,6 +919,9 @@ static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
extern unsigned int arch_freq_get_on_cpu(int cpu);
extern void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
unsigned long max_freq);
/* the following are really really optional */
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;