mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-29 05:44:11 +00:00
Merge branch 'pm-cpufreq'
Merge cpufreq updates for 6.7-rc1: - Add support for several Qualcomm SoC versions and other similar changes (Christian Marangi, Dmitry Baryshkov, Luca Weiss, Neil Armstrong, Richard Acayan, Robert Marko, Rohit Agarwal, Stephan Gerhold and Varadarajan Narayanan). - Clean up the tegra cpufreq driver (Sumit Gupta). - Use of_property_read_reg() to parse "reg" in pmac32 driver (Rob Herring). - Add support for TI's am62p5 Soc (Bryan Brattlof). - Make ARM_BRCMSTB_AVS_CPUFREQ depends on !ARM_SCMI_CPUFREQ (Florian Fainelli). - Update Kconfig to mention i.MX7 as well (Alexander Stein). - Revise global turbo disable check in intel_pstate (Srinivas Pandruvada). - Carry out initialization of sg_cpu in the schedutil cpufreq governor in one loop (Liao Chang). - Simplify the condition for storing 'down_threshold' in the conservative cpufreq governor (Liao Chang). - Use fine-grained mutex in the userspace cpufreq governor (Liao Chang). - Move is_managed indicator in the userspace cpufreq governor into a per-policy structure (Liao Chang). - Rebuild sched-domains when removing cpufreq driver (Pierre Gondois). - Fix buffer overflow detection in trans_stats() (Christian Marangi). * pm-cpufreq: (32 commits) dt-bindings: cpufreq: qcom-hw: document SM8650 CPUFREQ Hardware cpufreq: arm: Kconfig: Add i.MX7 to supported SoC for ARM_IMX_CPUFREQ_DT cpufreq: qcom-nvmem: add support for IPQ8064 cpufreq: qcom-nvmem: also accept operating-points-v2-krait-cpu cpufreq: qcom-nvmem: drop pvs_ver for format a fuses dt-bindings: cpufreq: qcom-cpufreq-nvmem: Document krait-cpu cpufreq: qcom-nvmem: add support for IPQ6018 dt-bindings: cpufreq: qcom-cpufreq-nvmem: document IPQ6018 cpufreq: qcom-nvmem: Add MSM8909 cpufreq: qcom-nvmem: Simplify driver data allocation cpufreq: stats: Fix buffer overflow detection in trans_stats() dt-bindings: cpufreq: cpufreq-qcom-hw: Add SDX75 compatible cpufreq: ARM_BRCMSTB_AVS_CPUFREQ cannot be used with ARM_SCMI_CPUFREQ cpufreq: ti-cpufreq: Add opp support for am62p5 SoCs cpufreq: dt-platdev: add am62p5 to blocklist cpufreq: tegra194: remove redundant AND with cpu_online_mask cpufreq: tegra194: use refclk delta based loop instead of udelay cpufreq: tegra194: save CPU data to avoid repeated SMP calls cpufreq: Rebuild sched-domains when removing cpufreq driver cpufreq: userspace: Move is_managed indicator into per-policy structure ...
This commit is contained in:
commit
78b1f56a6f
15 changed files with 386 additions and 179 deletions
|
@ -23,6 +23,7 @@ properties:
|
||||||
- enum:
|
- enum:
|
||||||
- qcom,qcm2290-cpufreq-hw
|
- qcom,qcm2290-cpufreq-hw
|
||||||
- qcom,sc7180-cpufreq-hw
|
- qcom,sc7180-cpufreq-hw
|
||||||
|
- qcom,sdm670-cpufreq-hw
|
||||||
- qcom,sdm845-cpufreq-hw
|
- qcom,sdm845-cpufreq-hw
|
||||||
- qcom,sm6115-cpufreq-hw
|
- qcom,sm6115-cpufreq-hw
|
||||||
- qcom,sm6350-cpufreq-hw
|
- qcom,sm6350-cpufreq-hw
|
||||||
|
@ -36,11 +37,13 @@ properties:
|
||||||
- qcom,sa8775p-cpufreq-epss
|
- qcom,sa8775p-cpufreq-epss
|
||||||
- qcom,sc7280-cpufreq-epss
|
- qcom,sc7280-cpufreq-epss
|
||||||
- qcom,sc8280xp-cpufreq-epss
|
- qcom,sc8280xp-cpufreq-epss
|
||||||
|
- qcom,sdx75-cpufreq-epss
|
||||||
- qcom,sm6375-cpufreq-epss
|
- qcom,sm6375-cpufreq-epss
|
||||||
- qcom,sm8250-cpufreq-epss
|
- qcom,sm8250-cpufreq-epss
|
||||||
- qcom,sm8350-cpufreq-epss
|
- qcom,sm8350-cpufreq-epss
|
||||||
- qcom,sm8450-cpufreq-epss
|
- qcom,sm8450-cpufreq-epss
|
||||||
- qcom,sm8550-cpufreq-epss
|
- qcom,sm8550-cpufreq-epss
|
||||||
|
- qcom,sm8650-cpufreq-epss
|
||||||
- const: qcom,cpufreq-epss
|
- const: qcom,cpufreq-epss
|
||||||
|
|
||||||
reg:
|
reg:
|
||||||
|
@ -128,6 +131,7 @@ allOf:
|
||||||
- qcom,qdu1000-cpufreq-epss
|
- qcom,qdu1000-cpufreq-epss
|
||||||
- qcom,sc7180-cpufreq-hw
|
- qcom,sc7180-cpufreq-hw
|
||||||
- qcom,sc8280xp-cpufreq-epss
|
- qcom,sc8280xp-cpufreq-epss
|
||||||
|
- qcom,sdm670-cpufreq-hw
|
||||||
- qcom,sdm845-cpufreq-hw
|
- qcom,sdm845-cpufreq-hw
|
||||||
- qcom,sm6115-cpufreq-hw
|
- qcom,sm6115-cpufreq-hw
|
||||||
- qcom,sm6350-cpufreq-hw
|
- qcom,sm6350-cpufreq-hw
|
||||||
|
|
|
@ -27,8 +27,12 @@ select:
|
||||||
enum:
|
enum:
|
||||||
- qcom,apq8064
|
- qcom,apq8064
|
||||||
- qcom,apq8096
|
- qcom,apq8096
|
||||||
|
- qcom,ipq5332
|
||||||
|
- qcom,ipq6018
|
||||||
- qcom,ipq8064
|
- qcom,ipq8064
|
||||||
- qcom,ipq8074
|
- qcom,ipq8074
|
||||||
|
- qcom,ipq9574
|
||||||
|
- qcom,msm8909
|
||||||
- qcom,msm8939
|
- qcom,msm8939
|
||||||
- qcom,msm8960
|
- qcom,msm8960
|
||||||
- qcom,msm8974
|
- qcom,msm8974
|
||||||
|
@ -43,7 +47,9 @@ patternProperties:
|
||||||
- if:
|
- if:
|
||||||
properties:
|
properties:
|
||||||
compatible:
|
compatible:
|
||||||
const: operating-points-v2-kryo-cpu
|
enum:
|
||||||
|
- operating-points-v2-krait-cpu
|
||||||
|
- operating-points-v2-kryo-cpu
|
||||||
then:
|
then:
|
||||||
$ref: /schemas/opp/opp-v2-kryo-cpu.yaml#
|
$ref: /schemas/opp/opp-v2-kryo-cpu.yaml#
|
||||||
|
|
||||||
|
|
|
@ -90,7 +90,7 @@ config ARM_VEXPRESS_SPC_CPUFREQ
|
||||||
|
|
||||||
config ARM_BRCMSTB_AVS_CPUFREQ
|
config ARM_BRCMSTB_AVS_CPUFREQ
|
||||||
tristate "Broadcom STB AVS CPUfreq driver"
|
tristate "Broadcom STB AVS CPUfreq driver"
|
||||||
depends on ARCH_BRCMSTB || COMPILE_TEST
|
depends on (ARCH_BRCMSTB && !ARM_SCMI_CPUFREQ) || COMPILE_TEST
|
||||||
default y
|
default y
|
||||||
help
|
help
|
||||||
Some Broadcom STB SoCs use a co-processor running proprietary firmware
|
Some Broadcom STB SoCs use a co-processor running proprietary firmware
|
||||||
|
@ -124,8 +124,8 @@ config ARM_IMX_CPUFREQ_DT
|
||||||
tristate "Freescale i.MX8M cpufreq support"
|
tristate "Freescale i.MX8M cpufreq support"
|
||||||
depends on ARCH_MXC && CPUFREQ_DT
|
depends on ARCH_MXC && CPUFREQ_DT
|
||||||
help
|
help
|
||||||
This adds cpufreq driver support for Freescale i.MX8M series SoCs,
|
This adds cpufreq driver support for Freescale i.MX7/i.MX8M
|
||||||
based on cpufreq-dt.
|
series SoCs, based on cpufreq-dt.
|
||||||
|
|
||||||
If in doubt, say N.
|
If in doubt, say N.
|
||||||
|
|
||||||
|
|
|
@ -142,9 +142,11 @@ static const struct of_device_id blocklist[] __initconst = {
|
||||||
{ .compatible = "nvidia,tegra234", },
|
{ .compatible = "nvidia,tegra234", },
|
||||||
|
|
||||||
{ .compatible = "qcom,apq8096", },
|
{ .compatible = "qcom,apq8096", },
|
||||||
|
{ .compatible = "qcom,msm8909", },
|
||||||
{ .compatible = "qcom,msm8996", },
|
{ .compatible = "qcom,msm8996", },
|
||||||
{ .compatible = "qcom,msm8998", },
|
{ .compatible = "qcom,msm8998", },
|
||||||
{ .compatible = "qcom,qcm2290", },
|
{ .compatible = "qcom,qcm2290", },
|
||||||
|
{ .compatible = "qcom,qcm6490", },
|
||||||
{ .compatible = "qcom,qcs404", },
|
{ .compatible = "qcom,qcs404", },
|
||||||
{ .compatible = "qcom,qdu1000", },
|
{ .compatible = "qcom,qdu1000", },
|
||||||
{ .compatible = "qcom,sa8155p" },
|
{ .compatible = "qcom,sa8155p" },
|
||||||
|
@ -176,7 +178,9 @@ static const struct of_device_id blocklist[] __initconst = {
|
||||||
{ .compatible = "ti,omap3", },
|
{ .compatible = "ti,omap3", },
|
||||||
{ .compatible = "ti,am625", },
|
{ .compatible = "ti,am625", },
|
||||||
{ .compatible = "ti,am62a7", },
|
{ .compatible = "ti,am62a7", },
|
||||||
|
{ .compatible = "ti,am62p5", },
|
||||||
|
|
||||||
|
{ .compatible = "qcom,ipq6018", },
|
||||||
{ .compatible = "qcom,ipq8064", },
|
{ .compatible = "qcom,ipq8064", },
|
||||||
{ .compatible = "qcom,apq8064", },
|
{ .compatible = "qcom,apq8064", },
|
||||||
{ .compatible = "qcom,msm8974", },
|
{ .compatible = "qcom,msm8974", },
|
||||||
|
|
|
@ -1544,7 +1544,7 @@ static int cpufreq_online(unsigned int cpu)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Register with the energy model before
|
* Register with the energy model before
|
||||||
* sched_cpufreq_governor_change() is called, which will result
|
* sugov_eas_rebuild_sd() is called, which will result
|
||||||
* in rebuilding of the sched domains, which should only be done
|
* in rebuilding of the sched domains, which should only be done
|
||||||
* once the energy model is properly initialized for the policy
|
* once the energy model is properly initialized for the policy
|
||||||
* first.
|
* first.
|
||||||
|
@ -2652,7 +2652,6 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
|
||||||
ret = cpufreq_start_governor(policy);
|
ret = cpufreq_start_governor(policy);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
pr_debug("governor change\n");
|
pr_debug("governor change\n");
|
||||||
sched_cpufreq_governor_change(policy, old_gov);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
cpufreq_exit_governor(policy);
|
cpufreq_exit_governor(policy);
|
||||||
|
|
|
@ -187,8 +187,7 @@ static ssize_t down_threshold_store(struct gov_attr_set *attr_set,
|
||||||
ret = sscanf(buf, "%u", &input);
|
ret = sscanf(buf, "%u", &input);
|
||||||
|
|
||||||
/* cannot be lower than 1 otherwise freq will not fall */
|
/* cannot be lower than 1 otherwise freq will not fall */
|
||||||
if (ret != 1 || input < 1 || input > 100 ||
|
if (ret != 1 || input < 1 || input >= dbs_data->up_threshold)
|
||||||
input >= dbs_data->up_threshold)
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
cs_tuners->down_threshold = input;
|
cs_tuners->down_threshold = input;
|
||||||
|
|
|
@ -131,23 +131,23 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
|
||||||
len += sysfs_emit_at(buf, len, " From : To\n");
|
len += sysfs_emit_at(buf, len, " From : To\n");
|
||||||
len += sysfs_emit_at(buf, len, " : ");
|
len += sysfs_emit_at(buf, len, " : ");
|
||||||
for (i = 0; i < stats->state_num; i++) {
|
for (i = 0; i < stats->state_num; i++) {
|
||||||
if (len >= PAGE_SIZE)
|
if (len >= PAGE_SIZE - 1)
|
||||||
break;
|
break;
|
||||||
len += sysfs_emit_at(buf, len, "%9u ", stats->freq_table[i]);
|
len += sysfs_emit_at(buf, len, "%9u ", stats->freq_table[i]);
|
||||||
}
|
}
|
||||||
if (len >= PAGE_SIZE)
|
if (len >= PAGE_SIZE - 1)
|
||||||
return PAGE_SIZE;
|
return PAGE_SIZE - 1;
|
||||||
|
|
||||||
len += sysfs_emit_at(buf, len, "\n");
|
len += sysfs_emit_at(buf, len, "\n");
|
||||||
|
|
||||||
for (i = 0; i < stats->state_num; i++) {
|
for (i = 0; i < stats->state_num; i++) {
|
||||||
if (len >= PAGE_SIZE)
|
if (len >= PAGE_SIZE - 1)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
len += sysfs_emit_at(buf, len, "%9u: ", stats->freq_table[i]);
|
len += sysfs_emit_at(buf, len, "%9u: ", stats->freq_table[i]);
|
||||||
|
|
||||||
for (j = 0; j < stats->state_num; j++) {
|
for (j = 0; j < stats->state_num; j++) {
|
||||||
if (len >= PAGE_SIZE)
|
if (len >= PAGE_SIZE - 1)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (pending)
|
if (pending)
|
||||||
|
@ -157,12 +157,12 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
|
||||||
|
|
||||||
len += sysfs_emit_at(buf, len, "%9u ", count);
|
len += sysfs_emit_at(buf, len, "%9u ", count);
|
||||||
}
|
}
|
||||||
if (len >= PAGE_SIZE)
|
if (len >= PAGE_SIZE - 1)
|
||||||
break;
|
break;
|
||||||
len += sysfs_emit_at(buf, len, "\n");
|
len += sysfs_emit_at(buf, len, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (len >= PAGE_SIZE) {
|
if (len >= PAGE_SIZE - 1) {
|
||||||
pr_warn_once("cpufreq transition table exceeds PAGE_SIZE. Disabling\n");
|
pr_warn_once("cpufreq transition table exceeds PAGE_SIZE. Disabling\n");
|
||||||
return -EFBIG;
|
return -EFBIG;
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,8 +15,11 @@
|
||||||
#include <linux/mutex.h>
|
#include <linux/mutex.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
|
||||||
static DEFINE_PER_CPU(unsigned int, cpu_is_managed);
|
struct userspace_policy {
|
||||||
static DEFINE_MUTEX(userspace_mutex);
|
unsigned int is_managed;
|
||||||
|
unsigned int setspeed;
|
||||||
|
struct mutex mutex;
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cpufreq_set - set the CPU frequency
|
* cpufreq_set - set the CPU frequency
|
||||||
|
@ -28,19 +31,19 @@ static DEFINE_MUTEX(userspace_mutex);
|
||||||
static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
|
static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
|
||||||
{
|
{
|
||||||
int ret = -EINVAL;
|
int ret = -EINVAL;
|
||||||
unsigned int *setspeed = policy->governor_data;
|
struct userspace_policy *userspace = policy->governor_data;
|
||||||
|
|
||||||
pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
|
pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
|
||||||
|
|
||||||
mutex_lock(&userspace_mutex);
|
mutex_lock(&userspace->mutex);
|
||||||
if (!per_cpu(cpu_is_managed, policy->cpu))
|
if (!userspace->is_managed)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
*setspeed = freq;
|
userspace->setspeed = freq;
|
||||||
|
|
||||||
ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
|
ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
|
||||||
err:
|
err:
|
||||||
mutex_unlock(&userspace_mutex);
|
mutex_unlock(&userspace->mutex);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -51,67 +54,74 @@ static ssize_t show_speed(struct cpufreq_policy *policy, char *buf)
|
||||||
|
|
||||||
static int cpufreq_userspace_policy_init(struct cpufreq_policy *policy)
|
static int cpufreq_userspace_policy_init(struct cpufreq_policy *policy)
|
||||||
{
|
{
|
||||||
unsigned int *setspeed;
|
struct userspace_policy *userspace;
|
||||||
|
|
||||||
setspeed = kzalloc(sizeof(*setspeed), GFP_KERNEL);
|
userspace = kzalloc(sizeof(*userspace), GFP_KERNEL);
|
||||||
if (!setspeed)
|
if (!userspace)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
policy->governor_data = setspeed;
|
mutex_init(&userspace->mutex);
|
||||||
|
|
||||||
|
policy->governor_data = userspace;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Any routine that writes to the policy struct will hold the "rwsem" of
|
||||||
|
* policy struct that means it is free to free "governor_data" here.
|
||||||
|
*/
|
||||||
static void cpufreq_userspace_policy_exit(struct cpufreq_policy *policy)
|
static void cpufreq_userspace_policy_exit(struct cpufreq_policy *policy)
|
||||||
{
|
{
|
||||||
mutex_lock(&userspace_mutex);
|
|
||||||
kfree(policy->governor_data);
|
kfree(policy->governor_data);
|
||||||
policy->governor_data = NULL;
|
policy->governor_data = NULL;
|
||||||
mutex_unlock(&userspace_mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cpufreq_userspace_policy_start(struct cpufreq_policy *policy)
|
static int cpufreq_userspace_policy_start(struct cpufreq_policy *policy)
|
||||||
{
|
{
|
||||||
unsigned int *setspeed = policy->governor_data;
|
struct userspace_policy *userspace = policy->governor_data;
|
||||||
|
|
||||||
BUG_ON(!policy->cur);
|
BUG_ON(!policy->cur);
|
||||||
pr_debug("started managing cpu %u\n", policy->cpu);
|
pr_debug("started managing cpu %u\n", policy->cpu);
|
||||||
|
|
||||||
mutex_lock(&userspace_mutex);
|
mutex_lock(&userspace->mutex);
|
||||||
per_cpu(cpu_is_managed, policy->cpu) = 1;
|
userspace->is_managed = 1;
|
||||||
*setspeed = policy->cur;
|
userspace->setspeed = policy->cur;
|
||||||
mutex_unlock(&userspace_mutex);
|
mutex_unlock(&userspace->mutex);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cpufreq_userspace_policy_stop(struct cpufreq_policy *policy)
|
static void cpufreq_userspace_policy_stop(struct cpufreq_policy *policy)
|
||||||
{
|
{
|
||||||
unsigned int *setspeed = policy->governor_data;
|
struct userspace_policy *userspace = policy->governor_data;
|
||||||
|
|
||||||
pr_debug("managing cpu %u stopped\n", policy->cpu);
|
pr_debug("managing cpu %u stopped\n", policy->cpu);
|
||||||
|
|
||||||
mutex_lock(&userspace_mutex);
|
mutex_lock(&userspace->mutex);
|
||||||
per_cpu(cpu_is_managed, policy->cpu) = 0;
|
userspace->is_managed = 0;
|
||||||
*setspeed = 0;
|
userspace->setspeed = 0;
|
||||||
mutex_unlock(&userspace_mutex);
|
mutex_unlock(&userspace->mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cpufreq_userspace_policy_limits(struct cpufreq_policy *policy)
|
static void cpufreq_userspace_policy_limits(struct cpufreq_policy *policy)
|
||||||
{
|
{
|
||||||
unsigned int *setspeed = policy->governor_data;
|
struct userspace_policy *userspace = policy->governor_data;
|
||||||
|
|
||||||
mutex_lock(&userspace_mutex);
|
mutex_lock(&userspace->mutex);
|
||||||
|
|
||||||
pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n",
|
pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n",
|
||||||
policy->cpu, policy->min, policy->max, policy->cur, *setspeed);
|
policy->cpu, policy->min, policy->max, policy->cur, userspace->setspeed);
|
||||||
|
|
||||||
if (policy->max < *setspeed)
|
if (policy->max < userspace->setspeed)
|
||||||
__cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
|
__cpufreq_driver_target(policy, policy->max,
|
||||||
else if (policy->min > *setspeed)
|
CPUFREQ_RELATION_H);
|
||||||
__cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
|
else if (policy->min > userspace->setspeed)
|
||||||
|
__cpufreq_driver_target(policy, policy->min,
|
||||||
|
CPUFREQ_RELATION_L);
|
||||||
else
|
else
|
||||||
__cpufreq_driver_target(policy, *setspeed, CPUFREQ_RELATION_L);
|
__cpufreq_driver_target(policy, userspace->setspeed,
|
||||||
|
CPUFREQ_RELATION_L);
|
||||||
|
|
||||||
mutex_unlock(&userspace_mutex);
|
mutex_unlock(&userspace->mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct cpufreq_governor cpufreq_gov_userspace = {
|
static struct cpufreq_governor cpufreq_gov_userspace = {
|
||||||
|
|
|
@ -571,13 +571,9 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
|
||||||
static inline void update_turbo_state(void)
|
static inline void update_turbo_state(void)
|
||||||
{
|
{
|
||||||
u64 misc_en;
|
u64 misc_en;
|
||||||
struct cpudata *cpu;
|
|
||||||
|
|
||||||
cpu = all_cpu_data[0];
|
|
||||||
rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
|
rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
|
||||||
global.turbo_disabled =
|
global.turbo_disabled = misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
|
||||||
(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
|
|
||||||
cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int min_perf_pct_min(void)
|
static int min_perf_pct_min(void)
|
||||||
|
|
|
@ -24,6 +24,7 @@
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
#include <linux/hardirq.h>
|
#include <linux/hardirq.h>
|
||||||
#include <linux/of.h>
|
#include <linux/of.h>
|
||||||
|
#include <linux/of_address.h>
|
||||||
|
|
||||||
#include <asm/machdep.h>
|
#include <asm/machdep.h>
|
||||||
#include <asm/irq.h>
|
#include <asm/irq.h>
|
||||||
|
@ -378,10 +379,9 @@ static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||||
|
|
||||||
static u32 read_gpio(struct device_node *np)
|
static u32 read_gpio(struct device_node *np)
|
||||||
{
|
{
|
||||||
const u32 *reg = of_get_property(np, "reg", NULL);
|
u64 offset;
|
||||||
u32 offset;
|
|
||||||
|
|
||||||
if (reg == NULL)
|
if (of_property_read_reg(np, 0, &offset, NULL) < 0)
|
||||||
return 0;
|
return 0;
|
||||||
/* That works for all keylargos but shall be fixed properly
|
/* That works for all keylargos but shall be fixed properly
|
||||||
* some day... The problem is that it seems we can't rely
|
* some day... The problem is that it seems we can't rely
|
||||||
|
@ -389,7 +389,6 @@ static u32 read_gpio(struct device_node *np)
|
||||||
* relative to the base of KeyLargo or to the base of the
|
* relative to the base of KeyLargo or to the base of the
|
||||||
* GPIO space, and the device-tree doesn't help.
|
* GPIO space, and the device-tree doesn't help.
|
||||||
*/
|
*/
|
||||||
offset = *reg;
|
|
||||||
if (offset < KEYLARGO_GPIO_LEVELS0)
|
if (offset < KEYLARGO_GPIO_LEVELS0)
|
||||||
offset += KEYLARGO_GPIO_LEVELS0;
|
offset += KEYLARGO_GPIO_LEVELS0;
|
||||||
return offset;
|
return offset;
|
||||||
|
|
|
@ -30,6 +30,14 @@
|
||||||
|
|
||||||
#include <dt-bindings/arm/qcom,ids.h>
|
#include <dt-bindings/arm/qcom,ids.h>
|
||||||
|
|
||||||
|
enum ipq806x_versions {
|
||||||
|
IPQ8062_VERSION = 0,
|
||||||
|
IPQ8064_VERSION,
|
||||||
|
IPQ8065_VERSION,
|
||||||
|
};
|
||||||
|
|
||||||
|
#define IPQ6000_VERSION BIT(2)
|
||||||
|
|
||||||
struct qcom_cpufreq_drv;
|
struct qcom_cpufreq_drv;
|
||||||
|
|
||||||
struct qcom_cpufreq_match_data {
|
struct qcom_cpufreq_match_data {
|
||||||
|
@ -40,16 +48,38 @@ struct qcom_cpufreq_match_data {
|
||||||
const char **genpd_names;
|
const char **genpd_names;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct qcom_cpufreq_drv_cpu {
|
||||||
|
int opp_token;
|
||||||
|
};
|
||||||
|
|
||||||
struct qcom_cpufreq_drv {
|
struct qcom_cpufreq_drv {
|
||||||
int *opp_tokens;
|
|
||||||
u32 versions;
|
u32 versions;
|
||||||
const struct qcom_cpufreq_match_data *data;
|
const struct qcom_cpufreq_match_data *data;
|
||||||
|
struct qcom_cpufreq_drv_cpu cpus[];
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct platform_device *cpufreq_dt_pdev, *cpufreq_pdev;
|
static struct platform_device *cpufreq_dt_pdev, *cpufreq_pdev;
|
||||||
|
|
||||||
|
static int qcom_cpufreq_simple_get_version(struct device *cpu_dev,
|
||||||
|
struct nvmem_cell *speedbin_nvmem,
|
||||||
|
char **pvs_name,
|
||||||
|
struct qcom_cpufreq_drv *drv)
|
||||||
|
{
|
||||||
|
u8 *speedbin;
|
||||||
|
|
||||||
|
*pvs_name = NULL;
|
||||||
|
speedbin = nvmem_cell_read(speedbin_nvmem, NULL);
|
||||||
|
if (IS_ERR(speedbin))
|
||||||
|
return PTR_ERR(speedbin);
|
||||||
|
|
||||||
|
dev_dbg(cpu_dev, "speedbin: %d\n", *speedbin);
|
||||||
|
drv->versions = 1 << *speedbin;
|
||||||
|
kfree(speedbin);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void get_krait_bin_format_a(struct device *cpu_dev,
|
static void get_krait_bin_format_a(struct device *cpu_dev,
|
||||||
int *speed, int *pvs, int *pvs_ver,
|
int *speed, int *pvs,
|
||||||
u8 *buf)
|
u8 *buf)
|
||||||
{
|
{
|
||||||
u32 pte_efuse;
|
u32 pte_efuse;
|
||||||
|
@ -180,8 +210,7 @@ static int qcom_cpufreq_krait_name_version(struct device *cpu_dev,
|
||||||
|
|
||||||
switch (len) {
|
switch (len) {
|
||||||
case 4:
|
case 4:
|
||||||
get_krait_bin_format_a(cpu_dev, &speed, &pvs, &pvs_ver,
|
get_krait_bin_format_a(cpu_dev, &speed, &pvs, speedbin);
|
||||||
speedbin);
|
|
||||||
break;
|
break;
|
||||||
case 8:
|
case 8:
|
||||||
get_krait_bin_format_b(cpu_dev, &speed, &pvs, &pvs_ver,
|
get_krait_bin_format_b(cpu_dev, &speed, &pvs, &pvs_ver,
|
||||||
|
@ -203,6 +232,114 @@ static int qcom_cpufreq_krait_name_version(struct device *cpu_dev,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int qcom_cpufreq_ipq8064_name_version(struct device *cpu_dev,
|
||||||
|
struct nvmem_cell *speedbin_nvmem,
|
||||||
|
char **pvs_name,
|
||||||
|
struct qcom_cpufreq_drv *drv)
|
||||||
|
{
|
||||||
|
int speed = 0, pvs = 0;
|
||||||
|
int msm_id, ret = 0;
|
||||||
|
u8 *speedbin;
|
||||||
|
size_t len;
|
||||||
|
|
||||||
|
speedbin = nvmem_cell_read(speedbin_nvmem, &len);
|
||||||
|
if (IS_ERR(speedbin))
|
||||||
|
return PTR_ERR(speedbin);
|
||||||
|
|
||||||
|
if (len != 4) {
|
||||||
|
dev_err(cpu_dev, "Unable to read nvmem data. Defaulting to 0!\n");
|
||||||
|
ret = -ENODEV;
|
||||||
|
goto exit;
|
||||||
|
}
|
||||||
|
|
||||||
|
get_krait_bin_format_a(cpu_dev, &speed, &pvs, speedbin);
|
||||||
|
|
||||||
|
ret = qcom_smem_get_soc_id(&msm_id);
|
||||||
|
if (ret)
|
||||||
|
goto exit;
|
||||||
|
|
||||||
|
switch (msm_id) {
|
||||||
|
case QCOM_ID_IPQ8062:
|
||||||
|
drv->versions = BIT(IPQ8062_VERSION);
|
||||||
|
break;
|
||||||
|
case QCOM_ID_IPQ8064:
|
||||||
|
case QCOM_ID_IPQ8066:
|
||||||
|
case QCOM_ID_IPQ8068:
|
||||||
|
drv->versions = BIT(IPQ8064_VERSION);
|
||||||
|
break;
|
||||||
|
case QCOM_ID_IPQ8065:
|
||||||
|
case QCOM_ID_IPQ8069:
|
||||||
|
drv->versions = BIT(IPQ8065_VERSION);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
dev_err(cpu_dev,
|
||||||
|
"SoC ID %u is not part of IPQ8064 family, limiting to 1.0GHz!\n",
|
||||||
|
msm_id);
|
||||||
|
drv->versions = BIT(IPQ8062_VERSION);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* IPQ8064 speed is never fused. Only pvs values are fused. */
|
||||||
|
snprintf(*pvs_name, sizeof("speed0-pvsXX"), "speed0-pvs%d", pvs);
|
||||||
|
|
||||||
|
exit:
|
||||||
|
kfree(speedbin);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int qcom_cpufreq_ipq6018_name_version(struct device *cpu_dev,
|
||||||
|
struct nvmem_cell *speedbin_nvmem,
|
||||||
|
char **pvs_name,
|
||||||
|
struct qcom_cpufreq_drv *drv)
|
||||||
|
{
|
||||||
|
u32 msm_id;
|
||||||
|
int ret;
|
||||||
|
u8 *speedbin;
|
||||||
|
*pvs_name = NULL;
|
||||||
|
|
||||||
|
ret = qcom_smem_get_soc_id(&msm_id);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
speedbin = nvmem_cell_read(speedbin_nvmem, NULL);
|
||||||
|
if (IS_ERR(speedbin))
|
||||||
|
return PTR_ERR(speedbin);
|
||||||
|
|
||||||
|
switch (msm_id) {
|
||||||
|
case QCOM_ID_IPQ6005:
|
||||||
|
case QCOM_ID_IPQ6010:
|
||||||
|
case QCOM_ID_IPQ6018:
|
||||||
|
case QCOM_ID_IPQ6028:
|
||||||
|
/* Fuse Value Freq BIT to set
|
||||||
|
* ---------------------------------
|
||||||
|
* 2’b0 No Limit BIT(0)
|
||||||
|
* 2’b1 1.5 GHz BIT(1)
|
||||||
|
*/
|
||||||
|
drv->versions = 1 << (unsigned int)(*speedbin);
|
||||||
|
break;
|
||||||
|
case QCOM_ID_IPQ6000:
|
||||||
|
/*
|
||||||
|
* IPQ6018 family only has one bit to advertise the CPU
|
||||||
|
* speed-bin, but that is not enough for IPQ6000 which
|
||||||
|
* is only rated up to 1.2GHz.
|
||||||
|
* So for IPQ6000 manually set BIT(2) based on SMEM ID.
|
||||||
|
*/
|
||||||
|
drv->versions = IPQ6000_VERSION;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
dev_err(cpu_dev,
|
||||||
|
"SoC ID %u is not part of IPQ6018 family, limiting to 1.2GHz!\n",
|
||||||
|
msm_id);
|
||||||
|
drv->versions = IPQ6000_VERSION;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
kfree(speedbin);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const char *generic_genpd_names[] = { "perf", NULL };
|
||||||
|
|
||||||
static const struct qcom_cpufreq_match_data match_data_kryo = {
|
static const struct qcom_cpufreq_match_data match_data_kryo = {
|
||||||
.get_version = qcom_cpufreq_kryo_name_version,
|
.get_version = qcom_cpufreq_kryo_name_version,
|
||||||
};
|
};
|
||||||
|
@ -211,12 +348,25 @@ static const struct qcom_cpufreq_match_data match_data_krait = {
|
||||||
.get_version = qcom_cpufreq_krait_name_version,
|
.get_version = qcom_cpufreq_krait_name_version,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct qcom_cpufreq_match_data match_data_msm8909 = {
|
||||||
|
.get_version = qcom_cpufreq_simple_get_version,
|
||||||
|
.genpd_names = generic_genpd_names,
|
||||||
|
};
|
||||||
|
|
||||||
static const char *qcs404_genpd_names[] = { "cpr", NULL };
|
static const char *qcs404_genpd_names[] = { "cpr", NULL };
|
||||||
|
|
||||||
static const struct qcom_cpufreq_match_data match_data_qcs404 = {
|
static const struct qcom_cpufreq_match_data match_data_qcs404 = {
|
||||||
.genpd_names = qcs404_genpd_names,
|
.genpd_names = qcs404_genpd_names,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct qcom_cpufreq_match_data match_data_ipq6018 = {
|
||||||
|
.get_version = qcom_cpufreq_ipq6018_name_version,
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct qcom_cpufreq_match_data match_data_ipq8064 = {
|
||||||
|
.get_version = qcom_cpufreq_ipq8064_name_version,
|
||||||
|
};
|
||||||
|
|
||||||
static int qcom_cpufreq_probe(struct platform_device *pdev)
|
static int qcom_cpufreq_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct qcom_cpufreq_drv *drv;
|
struct qcom_cpufreq_drv *drv;
|
||||||
|
@ -237,48 +387,39 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
|
||||||
if (!np)
|
if (!np)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|
||||||
ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu");
|
ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu") ||
|
||||||
|
of_device_is_compatible(np, "operating-points-v2-krait-cpu");
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
of_node_put(np);
|
of_node_put(np);
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
|
|
||||||
drv = kzalloc(sizeof(*drv), GFP_KERNEL);
|
drv = devm_kzalloc(&pdev->dev, struct_size(drv, cpus, num_possible_cpus()),
|
||||||
|
GFP_KERNEL);
|
||||||
if (!drv)
|
if (!drv)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
match = pdev->dev.platform_data;
|
match = pdev->dev.platform_data;
|
||||||
drv->data = match->data;
|
drv->data = match->data;
|
||||||
if (!drv->data) {
|
if (!drv->data)
|
||||||
ret = -ENODEV;
|
return -ENODEV;
|
||||||
goto free_drv;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (drv->data->get_version) {
|
if (drv->data->get_version) {
|
||||||
speedbin_nvmem = of_nvmem_cell_get(np, NULL);
|
speedbin_nvmem = of_nvmem_cell_get(np, NULL);
|
||||||
if (IS_ERR(speedbin_nvmem)) {
|
if (IS_ERR(speedbin_nvmem))
|
||||||
ret = dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem),
|
return dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem),
|
||||||
"Could not get nvmem cell\n");
|
"Could not get nvmem cell\n");
|
||||||
goto free_drv;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = drv->data->get_version(cpu_dev,
|
ret = drv->data->get_version(cpu_dev,
|
||||||
speedbin_nvmem, &pvs_name, drv);
|
speedbin_nvmem, &pvs_name, drv);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
nvmem_cell_put(speedbin_nvmem);
|
nvmem_cell_put(speedbin_nvmem);
|
||||||
goto free_drv;
|
return ret;
|
||||||
}
|
}
|
||||||
nvmem_cell_put(speedbin_nvmem);
|
nvmem_cell_put(speedbin_nvmem);
|
||||||
}
|
}
|
||||||
of_node_put(np);
|
of_node_put(np);
|
||||||
|
|
||||||
drv->opp_tokens = kcalloc(num_possible_cpus(), sizeof(*drv->opp_tokens),
|
|
||||||
GFP_KERNEL);
|
|
||||||
if (!drv->opp_tokens) {
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto free_drv;
|
|
||||||
}
|
|
||||||
|
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
struct dev_pm_opp_config config = {
|
struct dev_pm_opp_config config = {
|
||||||
.supported_hw = NULL,
|
.supported_hw = NULL,
|
||||||
|
@ -304,9 +445,9 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (config.supported_hw || config.genpd_names) {
|
if (config.supported_hw || config.genpd_names) {
|
||||||
drv->opp_tokens[cpu] = dev_pm_opp_set_config(cpu_dev, &config);
|
drv->cpus[cpu].opp_token = dev_pm_opp_set_config(cpu_dev, &config);
|
||||||
if (drv->opp_tokens[cpu] < 0) {
|
if (drv->cpus[cpu].opp_token < 0) {
|
||||||
ret = drv->opp_tokens[cpu];
|
ret = drv->cpus[cpu].opp_token;
|
||||||
dev_err(cpu_dev, "Failed to set OPP config\n");
|
dev_err(cpu_dev, "Failed to set OPP config\n");
|
||||||
goto free_opp;
|
goto free_opp;
|
||||||
}
|
}
|
||||||
|
@ -325,11 +466,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
free_opp:
|
free_opp:
|
||||||
for_each_possible_cpu(cpu)
|
for_each_possible_cpu(cpu)
|
||||||
dev_pm_opp_clear_config(drv->opp_tokens[cpu]);
|
dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
|
||||||
kfree(drv->opp_tokens);
|
|
||||||
free_drv:
|
|
||||||
kfree(drv);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -341,10 +478,7 @@ static void qcom_cpufreq_remove(struct platform_device *pdev)
|
||||||
platform_device_unregister(cpufreq_dt_pdev);
|
platform_device_unregister(cpufreq_dt_pdev);
|
||||||
|
|
||||||
for_each_possible_cpu(cpu)
|
for_each_possible_cpu(cpu)
|
||||||
dev_pm_opp_clear_config(drv->opp_tokens[cpu]);
|
dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
|
||||||
|
|
||||||
kfree(drv->opp_tokens);
|
|
||||||
kfree(drv);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct platform_driver qcom_cpufreq_driver = {
|
static struct platform_driver qcom_cpufreq_driver = {
|
||||||
|
@ -357,9 +491,11 @@ static struct platform_driver qcom_cpufreq_driver = {
|
||||||
|
|
||||||
static const struct of_device_id qcom_cpufreq_match_list[] __initconst = {
|
static const struct of_device_id qcom_cpufreq_match_list[] __initconst = {
|
||||||
{ .compatible = "qcom,apq8096", .data = &match_data_kryo },
|
{ .compatible = "qcom,apq8096", .data = &match_data_kryo },
|
||||||
|
{ .compatible = "qcom,msm8909", .data = &match_data_msm8909 },
|
||||||
{ .compatible = "qcom,msm8996", .data = &match_data_kryo },
|
{ .compatible = "qcom,msm8996", .data = &match_data_kryo },
|
||||||
{ .compatible = "qcom,qcs404", .data = &match_data_qcs404 },
|
{ .compatible = "qcom,qcs404", .data = &match_data_qcs404 },
|
||||||
{ .compatible = "qcom,ipq8064", .data = &match_data_krait },
|
{ .compatible = "qcom,ipq6018", .data = &match_data_ipq6018 },
|
||||||
|
{ .compatible = "qcom,ipq8064", .data = &match_data_ipq8064 },
|
||||||
{ .compatible = "qcom,apq8064", .data = &match_data_krait },
|
{ .compatible = "qcom,apq8064", .data = &match_data_krait },
|
||||||
{ .compatible = "qcom,msm8974", .data = &match_data_krait },
|
{ .compatible = "qcom,msm8974", .data = &match_data_krait },
|
||||||
{ .compatible = "qcom,msm8960", .data = &match_data_krait },
|
{ .compatible = "qcom,msm8960", .data = &match_data_krait },
|
||||||
|
|
|
@ -5,7 +5,6 @@
|
||||||
|
|
||||||
#include <linux/cpu.h>
|
#include <linux/cpu.h>
|
||||||
#include <linux/cpufreq.h>
|
#include <linux/cpufreq.h>
|
||||||
#include <linux/delay.h>
|
|
||||||
#include <linux/dma-mapping.h>
|
#include <linux/dma-mapping.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/of.h>
|
#include <linux/of.h>
|
||||||
|
@ -21,10 +20,11 @@
|
||||||
|
|
||||||
#define KHZ 1000
|
#define KHZ 1000
|
||||||
#define REF_CLK_MHZ 408 /* 408 MHz */
|
#define REF_CLK_MHZ 408 /* 408 MHz */
|
||||||
#define US_DELAY 500
|
|
||||||
#define CPUFREQ_TBL_STEP_HZ (50 * KHZ * KHZ)
|
#define CPUFREQ_TBL_STEP_HZ (50 * KHZ * KHZ)
|
||||||
#define MAX_CNT ~0U
|
#define MAX_CNT ~0U
|
||||||
|
|
||||||
|
#define MAX_DELTA_KHZ 115200
|
||||||
|
|
||||||
#define NDIV_MASK 0x1FF
|
#define NDIV_MASK 0x1FF
|
||||||
|
|
||||||
#define CORE_OFFSET(cpu) (cpu * 8)
|
#define CORE_OFFSET(cpu) (cpu * 8)
|
||||||
|
@ -39,6 +39,12 @@
|
||||||
/* cpufreq transisition latency */
|
/* cpufreq transisition latency */
|
||||||
#define TEGRA_CPUFREQ_TRANSITION_LATENCY (300 * 1000) /* unit in nanoseconds */
|
#define TEGRA_CPUFREQ_TRANSITION_LATENCY (300 * 1000) /* unit in nanoseconds */
|
||||||
|
|
||||||
|
struct tegra_cpu_data {
|
||||||
|
u32 cpuid;
|
||||||
|
u32 clusterid;
|
||||||
|
void __iomem *freq_core_reg;
|
||||||
|
};
|
||||||
|
|
||||||
struct tegra_cpu_ctr {
|
struct tegra_cpu_ctr {
|
||||||
u32 cpu;
|
u32 cpu;
|
||||||
u32 coreclk_cnt, last_coreclk_cnt;
|
u32 coreclk_cnt, last_coreclk_cnt;
|
||||||
|
@ -62,6 +68,7 @@ struct tegra_cpufreq_soc {
|
||||||
int maxcpus_per_cluster;
|
int maxcpus_per_cluster;
|
||||||
unsigned int num_clusters;
|
unsigned int num_clusters;
|
||||||
phys_addr_t actmon_cntr_base;
|
phys_addr_t actmon_cntr_base;
|
||||||
|
u32 refclk_delta_min;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct tegra194_cpufreq_data {
|
struct tegra194_cpufreq_data {
|
||||||
|
@ -69,6 +76,7 @@ struct tegra194_cpufreq_data {
|
||||||
struct cpufreq_frequency_table **bpmp_luts;
|
struct cpufreq_frequency_table **bpmp_luts;
|
||||||
const struct tegra_cpufreq_soc *soc;
|
const struct tegra_cpufreq_soc *soc;
|
||||||
bool icc_dram_bw_scaling;
|
bool icc_dram_bw_scaling;
|
||||||
|
struct tegra_cpu_data *cpu_data;
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct workqueue_struct *read_counters_wq;
|
static struct workqueue_struct *read_counters_wq;
|
||||||
|
@ -116,14 +124,8 @@ static void tegra234_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid)
|
||||||
static int tegra234_get_cpu_ndiv(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv)
|
static int tegra234_get_cpu_ndiv(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv)
|
||||||
{
|
{
|
||||||
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
|
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
|
||||||
void __iomem *freq_core_reg;
|
|
||||||
u64 mpidr_id;
|
|
||||||
|
|
||||||
/* use physical id to get address of per core frequency register */
|
*ndiv = readl(data->cpu_data[cpu].freq_core_reg) & NDIV_MASK;
|
||||||
mpidr_id = (clusterid * data->soc->maxcpus_per_cluster) + cpuid;
|
|
||||||
freq_core_reg = SCRATCH_FREQ_CORE_REG(data, mpidr_id);
|
|
||||||
|
|
||||||
*ndiv = readl(freq_core_reg) & NDIV_MASK;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -131,19 +133,10 @@ static int tegra234_get_cpu_ndiv(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv)
|
||||||
static void tegra234_set_cpu_ndiv(struct cpufreq_policy *policy, u64 ndiv)
|
static void tegra234_set_cpu_ndiv(struct cpufreq_policy *policy, u64 ndiv)
|
||||||
{
|
{
|
||||||
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
|
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
|
||||||
void __iomem *freq_core_reg;
|
u32 cpu;
|
||||||
u32 cpu, cpuid, clusterid;
|
|
||||||
u64 mpidr_id;
|
|
||||||
|
|
||||||
for_each_cpu_and(cpu, policy->cpus, cpu_online_mask) {
|
for_each_cpu(cpu, policy->cpus)
|
||||||
data->soc->ops->get_cpu_cluster_id(cpu, &cpuid, &clusterid);
|
writel(ndiv, data->cpu_data[cpu].freq_core_reg);
|
||||||
|
|
||||||
/* use physical id to get address of per core frequency register */
|
|
||||||
mpidr_id = (clusterid * data->soc->maxcpus_per_cluster) + cpuid;
|
|
||||||
freq_core_reg = SCRATCH_FREQ_CORE_REG(data, mpidr_id);
|
|
||||||
|
|
||||||
writel(ndiv, freq_core_reg);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -157,19 +150,35 @@ static void tegra234_read_counters(struct tegra_cpu_ctr *c)
|
||||||
{
|
{
|
||||||
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
|
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
|
||||||
void __iomem *actmon_reg;
|
void __iomem *actmon_reg;
|
||||||
u32 cpuid, clusterid;
|
u32 delta_refcnt;
|
||||||
|
int cnt = 0;
|
||||||
u64 val;
|
u64 val;
|
||||||
|
|
||||||
data->soc->ops->get_cpu_cluster_id(c->cpu, &cpuid, &clusterid);
|
actmon_reg = CORE_ACTMON_CNTR_REG(data, data->cpu_data[c->cpu].clusterid,
|
||||||
actmon_reg = CORE_ACTMON_CNTR_REG(data, clusterid, cpuid);
|
data->cpu_data[c->cpu].cpuid);
|
||||||
|
|
||||||
val = readq(actmon_reg);
|
val = readq(actmon_reg);
|
||||||
c->last_refclk_cnt = upper_32_bits(val);
|
c->last_refclk_cnt = upper_32_bits(val);
|
||||||
c->last_coreclk_cnt = lower_32_bits(val);
|
c->last_coreclk_cnt = lower_32_bits(val);
|
||||||
udelay(US_DELAY);
|
|
||||||
val = readq(actmon_reg);
|
/*
|
||||||
c->refclk_cnt = upper_32_bits(val);
|
* The sampling window is based on the minimum number of reference
|
||||||
c->coreclk_cnt = lower_32_bits(val);
|
* clock cycles which is known to give a stable value of CPU frequency.
|
||||||
|
*/
|
||||||
|
do {
|
||||||
|
val = readq(actmon_reg);
|
||||||
|
c->refclk_cnt = upper_32_bits(val);
|
||||||
|
c->coreclk_cnt = lower_32_bits(val);
|
||||||
|
if (c->refclk_cnt < c->last_refclk_cnt)
|
||||||
|
delta_refcnt = c->refclk_cnt + (MAX_CNT - c->last_refclk_cnt);
|
||||||
|
else
|
||||||
|
delta_refcnt = c->refclk_cnt - c->last_refclk_cnt;
|
||||||
|
if (++cnt >= 0xFFFF) {
|
||||||
|
pr_warn("cpufreq: problem with refclk on cpu:%d, delta_refcnt:%u, cnt:%d\n",
|
||||||
|
c->cpu, delta_refcnt, cnt);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
} while (delta_refcnt < data->soc->refclk_delta_min);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct tegra_cpufreq_ops tegra234_cpufreq_ops = {
|
static struct tegra_cpufreq_ops tegra234_cpufreq_ops = {
|
||||||
|
@ -184,6 +193,7 @@ static const struct tegra_cpufreq_soc tegra234_cpufreq_soc = {
|
||||||
.actmon_cntr_base = 0x9000,
|
.actmon_cntr_base = 0x9000,
|
||||||
.maxcpus_per_cluster = 4,
|
.maxcpus_per_cluster = 4,
|
||||||
.num_clusters = 3,
|
.num_clusters = 3,
|
||||||
|
.refclk_delta_min = 16000,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct tegra_cpufreq_soc tegra239_cpufreq_soc = {
|
static const struct tegra_cpufreq_soc tegra239_cpufreq_soc = {
|
||||||
|
@ -191,6 +201,7 @@ static const struct tegra_cpufreq_soc tegra239_cpufreq_soc = {
|
||||||
.actmon_cntr_base = 0x4000,
|
.actmon_cntr_base = 0x4000,
|
||||||
.maxcpus_per_cluster = 8,
|
.maxcpus_per_cluster = 8,
|
||||||
.num_clusters = 1,
|
.num_clusters = 1,
|
||||||
|
.refclk_delta_min = 16000,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void tegra194_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid)
|
static void tegra194_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid)
|
||||||
|
@ -231,15 +242,33 @@ static inline u32 map_ndiv_to_freq(struct mrq_cpu_ndiv_limits_response
|
||||||
|
|
||||||
static void tegra194_read_counters(struct tegra_cpu_ctr *c)
|
static void tegra194_read_counters(struct tegra_cpu_ctr *c)
|
||||||
{
|
{
|
||||||
|
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
|
||||||
|
u32 delta_refcnt;
|
||||||
|
int cnt = 0;
|
||||||
u64 val;
|
u64 val;
|
||||||
|
|
||||||
val = read_freq_feedback();
|
val = read_freq_feedback();
|
||||||
c->last_refclk_cnt = lower_32_bits(val);
|
c->last_refclk_cnt = lower_32_bits(val);
|
||||||
c->last_coreclk_cnt = upper_32_bits(val);
|
c->last_coreclk_cnt = upper_32_bits(val);
|
||||||
udelay(US_DELAY);
|
|
||||||
val = read_freq_feedback();
|
/*
|
||||||
c->refclk_cnt = lower_32_bits(val);
|
* The sampling window is based on the minimum number of reference
|
||||||
c->coreclk_cnt = upper_32_bits(val);
|
* clock cycles which is known to give a stable value of CPU frequency.
|
||||||
|
*/
|
||||||
|
do {
|
||||||
|
val = read_freq_feedback();
|
||||||
|
c->refclk_cnt = lower_32_bits(val);
|
||||||
|
c->coreclk_cnt = upper_32_bits(val);
|
||||||
|
if (c->refclk_cnt < c->last_refclk_cnt)
|
||||||
|
delta_refcnt = c->refclk_cnt + (MAX_CNT - c->last_refclk_cnt);
|
||||||
|
else
|
||||||
|
delta_refcnt = c->refclk_cnt - c->last_refclk_cnt;
|
||||||
|
if (++cnt >= 0xFFFF) {
|
||||||
|
pr_warn("cpufreq: problem with refclk on cpu:%d, delta_refcnt:%u, cnt:%d\n",
|
||||||
|
c->cpu, delta_refcnt, cnt);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
} while (delta_refcnt < data->soc->refclk_delta_min);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tegra_read_counters(struct work_struct *work)
|
static void tegra_read_counters(struct work_struct *work)
|
||||||
|
@ -297,9 +326,8 @@ static unsigned int tegra194_calculate_speed(u32 cpu)
|
||||||
u32 rate_mhz;
|
u32 rate_mhz;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* udelay() is required to reconstruct cpu frequency over an
|
* Reconstruct cpu frequency over an observation/sampling window.
|
||||||
* observation window. Using workqueue to call udelay() with
|
* Using workqueue to keep interrupts enabled during the interval.
|
||||||
* interrupts enabled.
|
|
||||||
*/
|
*/
|
||||||
read_counters_work.c.cpu = cpu;
|
read_counters_work.c.cpu = cpu;
|
||||||
INIT_WORK_ONSTACK(&read_counters_work.work, tegra_read_counters);
|
INIT_WORK_ONSTACK(&read_counters_work.work, tegra_read_counters);
|
||||||
|
@ -357,19 +385,17 @@ static void tegra194_set_cpu_ndiv(struct cpufreq_policy *policy, u64 ndiv)
|
||||||
static unsigned int tegra194_get_speed(u32 cpu)
|
static unsigned int tegra194_get_speed(u32 cpu)
|
||||||
{
|
{
|
||||||
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
|
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
|
||||||
|
u32 clusterid = data->cpu_data[cpu].clusterid;
|
||||||
struct cpufreq_frequency_table *pos;
|
struct cpufreq_frequency_table *pos;
|
||||||
u32 cpuid, clusterid;
|
|
||||||
unsigned int rate;
|
unsigned int rate;
|
||||||
u64 ndiv;
|
u64 ndiv;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
data->soc->ops->get_cpu_cluster_id(cpu, &cpuid, &clusterid);
|
|
||||||
|
|
||||||
/* reconstruct actual cpu freq using counters */
|
/* reconstruct actual cpu freq using counters */
|
||||||
rate = tegra194_calculate_speed(cpu);
|
rate = tegra194_calculate_speed(cpu);
|
||||||
|
|
||||||
/* get last written ndiv value */
|
/* get last written ndiv value */
|
||||||
ret = data->soc->ops->get_cpu_ndiv(cpu, cpuid, clusterid, &ndiv);
|
ret = data->soc->ops->get_cpu_ndiv(cpu, data->cpu_data[cpu].cpuid, clusterid, &ndiv);
|
||||||
if (WARN_ON_ONCE(ret))
|
if (WARN_ON_ONCE(ret))
|
||||||
return rate;
|
return rate;
|
||||||
|
|
||||||
|
@ -383,9 +409,9 @@ static unsigned int tegra194_get_speed(u32 cpu)
|
||||||
if (pos->driver_data != ndiv)
|
if (pos->driver_data != ndiv)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (abs(pos->frequency - rate) > 115200) {
|
if (abs(pos->frequency - rate) > MAX_DELTA_KHZ) {
|
||||||
pr_warn("cpufreq: cpu%d,cur:%u,set:%u,set ndiv:%llu\n",
|
pr_warn("cpufreq: cpu%d,cur:%u,set:%u,delta:%d,set ndiv:%llu\n",
|
||||||
cpu, rate, pos->frequency, ndiv);
|
cpu, rate, pos->frequency, abs(rate - pos->frequency), ndiv);
|
||||||
} else {
|
} else {
|
||||||
rate = pos->frequency;
|
rate = pos->frequency;
|
||||||
}
|
}
|
||||||
|
@ -450,6 +476,8 @@ static int tegra_cpufreq_init_cpufreq_table(struct cpufreq_policy *policy,
|
||||||
if (IS_ERR(opp))
|
if (IS_ERR(opp))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
dev_pm_opp_put(opp);
|
||||||
|
|
||||||
ret = dev_pm_opp_enable(cpu_dev, pos->frequency * KHZ);
|
ret = dev_pm_opp_enable(cpu_dev, pos->frequency * KHZ);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -473,13 +501,12 @@ static int tegra194_cpufreq_init(struct cpufreq_policy *policy)
|
||||||
{
|
{
|
||||||
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
|
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
|
||||||
int maxcpus_per_cluster = data->soc->maxcpus_per_cluster;
|
int maxcpus_per_cluster = data->soc->maxcpus_per_cluster;
|
||||||
|
u32 clusterid = data->cpu_data[policy->cpu].clusterid;
|
||||||
struct cpufreq_frequency_table *freq_table;
|
struct cpufreq_frequency_table *freq_table;
|
||||||
struct cpufreq_frequency_table *bpmp_lut;
|
struct cpufreq_frequency_table *bpmp_lut;
|
||||||
u32 start_cpu, cpu;
|
u32 start_cpu, cpu;
|
||||||
u32 clusterid;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
data->soc->ops->get_cpu_cluster_id(policy->cpu, NULL, &clusterid);
|
|
||||||
if (clusterid >= data->soc->num_clusters || !data->bpmp_luts[clusterid])
|
if (clusterid >= data->soc->num_clusters || !data->bpmp_luts[clusterid])
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
@ -578,6 +605,7 @@ static const struct tegra_cpufreq_soc tegra194_cpufreq_soc = {
|
||||||
.ops = &tegra194_cpufreq_ops,
|
.ops = &tegra194_cpufreq_ops,
|
||||||
.maxcpus_per_cluster = 2,
|
.maxcpus_per_cluster = 2,
|
||||||
.num_clusters = 4,
|
.num_clusters = 4,
|
||||||
|
.refclk_delta_min = 16000,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void tegra194_cpufreq_free_resources(void)
|
static void tegra194_cpufreq_free_resources(void)
|
||||||
|
@ -657,6 +685,28 @@ tegra_cpufreq_bpmp_read_lut(struct platform_device *pdev, struct tegra_bpmp *bpm
|
||||||
return freq_table;
|
return freq_table;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int tegra194_cpufreq_store_physids(unsigned int cpu, struct tegra194_cpufreq_data *data)
|
||||||
|
{
|
||||||
|
int num_cpus = data->soc->maxcpus_per_cluster * data->soc->num_clusters;
|
||||||
|
u32 cpuid, clusterid;
|
||||||
|
u64 mpidr_id;
|
||||||
|
|
||||||
|
if (cpu > (num_cpus - 1)) {
|
||||||
|
pr_err("cpufreq: wrong num of cpus or clusters in soc data\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
data->soc->ops->get_cpu_cluster_id(cpu, &cpuid, &clusterid);
|
||||||
|
|
||||||
|
mpidr_id = (clusterid * data->soc->maxcpus_per_cluster) + cpuid;
|
||||||
|
|
||||||
|
data->cpu_data[cpu].cpuid = cpuid;
|
||||||
|
data->cpu_data[cpu].clusterid = clusterid;
|
||||||
|
data->cpu_data[cpu].freq_core_reg = SCRATCH_FREQ_CORE_REG(data, mpidr_id);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int tegra194_cpufreq_probe(struct platform_device *pdev)
|
static int tegra194_cpufreq_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
const struct tegra_cpufreq_soc *soc;
|
const struct tegra_cpufreq_soc *soc;
|
||||||
|
@ -664,6 +714,7 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev)
|
||||||
struct tegra_bpmp *bpmp;
|
struct tegra_bpmp *bpmp;
|
||||||
struct device *cpu_dev;
|
struct device *cpu_dev;
|
||||||
int err, i;
|
int err, i;
|
||||||
|
u32 cpu;
|
||||||
|
|
||||||
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
|
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
|
||||||
if (!data)
|
if (!data)
|
||||||
|
@ -671,7 +722,7 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
soc = of_device_get_match_data(&pdev->dev);
|
soc = of_device_get_match_data(&pdev->dev);
|
||||||
|
|
||||||
if (soc->ops && soc->maxcpus_per_cluster && soc->num_clusters) {
|
if (soc->ops && soc->maxcpus_per_cluster && soc->num_clusters && soc->refclk_delta_min) {
|
||||||
data->soc = soc;
|
data->soc = soc;
|
||||||
} else {
|
} else {
|
||||||
dev_err(&pdev->dev, "soc data missing\n");
|
dev_err(&pdev->dev, "soc data missing\n");
|
||||||
|
@ -690,6 +741,12 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev)
|
||||||
return PTR_ERR(data->regs);
|
return PTR_ERR(data->regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
data->cpu_data = devm_kcalloc(&pdev->dev, data->soc->num_clusters *
|
||||||
|
data->soc->maxcpus_per_cluster,
|
||||||
|
sizeof(*data->cpu_data), GFP_KERNEL);
|
||||||
|
if (!data->cpu_data)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
platform_set_drvdata(pdev, data);
|
platform_set_drvdata(pdev, data);
|
||||||
|
|
||||||
bpmp = tegra_bpmp_get(&pdev->dev);
|
bpmp = tegra_bpmp_get(&pdev->dev);
|
||||||
|
@ -711,6 +768,12 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for_each_possible_cpu(cpu) {
|
||||||
|
err = tegra194_cpufreq_store_physids(cpu, data);
|
||||||
|
if (err)
|
||||||
|
goto err_free_res;
|
||||||
|
}
|
||||||
|
|
||||||
tegra194_cpufreq_driver.driver_data = data;
|
tegra194_cpufreq_driver.driver_data = data;
|
||||||
|
|
||||||
/* Check for optional OPPv2 and interconnect paths on CPU0 to enable ICC scaling */
|
/* Check for optional OPPv2 and interconnect paths on CPU0 to enable ICC scaling */
|
||||||
|
|
|
@ -338,6 +338,7 @@ static const struct of_device_id ti_cpufreq_of_match[] = {
|
||||||
{ .compatible = "ti,omap36xx", .data = &omap36xx_soc_data, },
|
{ .compatible = "ti,omap36xx", .data = &omap36xx_soc_data, },
|
||||||
{ .compatible = "ti,am625", .data = &am625_soc_data, },
|
{ .compatible = "ti,am625", .data = &am625_soc_data, },
|
||||||
{ .compatible = "ti,am62a7", .data = &am625_soc_data, },
|
{ .compatible = "ti,am62a7", .data = &am625_soc_data, },
|
||||||
|
{ .compatible = "ti,am62p5", .data = &am625_soc_data, },
|
||||||
/* legacy */
|
/* legacy */
|
||||||
{ .compatible = "ti,omap3430", .data = &omap34xx_soc_data, },
|
{ .compatible = "ti,omap3430", .data = &omap34xx_soc_data, },
|
||||||
{ .compatible = "ti,omap3630", .data = &omap36xx_soc_data, },
|
{ .compatible = "ti,omap3630", .data = &omap36xx_soc_data, },
|
||||||
|
|
|
@ -1193,14 +1193,6 @@ static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
|
|
||||||
void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
|
|
||||||
struct cpufreq_governor *old_gov);
|
|
||||||
#else
|
|
||||||
static inline void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
|
|
||||||
struct cpufreq_governor *old_gov) { }
|
|
||||||
#endif
|
|
||||||
|
|
||||||
extern unsigned int arch_freq_get_on_cpu(int cpu);
|
extern unsigned int arch_freq_get_on_cpu(int cpu);
|
||||||
|
|
||||||
#ifndef arch_set_freq_scale
|
#ifndef arch_set_freq_scale
|
||||||
|
|
|
@ -556,6 +556,31 @@ static const struct kobj_type sugov_tunables_ktype = {
|
||||||
|
|
||||||
/********************** cpufreq governor interface *********************/
|
/********************** cpufreq governor interface *********************/
|
||||||
|
|
||||||
|
#ifdef CONFIG_ENERGY_MODEL
|
||||||
|
static void rebuild_sd_workfn(struct work_struct *work)
|
||||||
|
{
|
||||||
|
rebuild_sched_domains_energy();
|
||||||
|
}
|
||||||
|
|
||||||
|
static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* EAS shouldn't be attempted without sugov, so rebuild the sched_domains
|
||||||
|
* on governor changes to make sure the scheduler knows about it.
|
||||||
|
*/
|
||||||
|
static void sugov_eas_rebuild_sd(void)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* When called from the cpufreq_register_driver() path, the
|
||||||
|
* cpu_hotplug_lock is already held, so use a work item to
|
||||||
|
* avoid nested locking in rebuild_sched_domains().
|
||||||
|
*/
|
||||||
|
schedule_work(&rebuild_sd_work);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static inline void sugov_eas_rebuild_sd(void) { };
|
||||||
|
#endif
|
||||||
|
|
||||||
struct cpufreq_governor schedutil_gov;
|
struct cpufreq_governor schedutil_gov;
|
||||||
|
|
||||||
static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
|
static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
|
||||||
|
@ -710,6 +735,8 @@ static int sugov_init(struct cpufreq_policy *policy)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
|
sugov_eas_rebuild_sd();
|
||||||
|
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&global_tunables_lock);
|
mutex_unlock(&global_tunables_lock);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -751,6 +778,8 @@ static void sugov_exit(struct cpufreq_policy *policy)
|
||||||
sugov_kthread_stop(sg_policy);
|
sugov_kthread_stop(sg_policy);
|
||||||
sugov_policy_free(sg_policy);
|
sugov_policy_free(sg_policy);
|
||||||
cpufreq_disable_fast_switch(policy);
|
cpufreq_disable_fast_switch(policy);
|
||||||
|
|
||||||
|
sugov_eas_rebuild_sd();
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sugov_start(struct cpufreq_policy *policy)
|
static int sugov_start(struct cpufreq_policy *policy)
|
||||||
|
@ -768,14 +797,6 @@ static int sugov_start(struct cpufreq_policy *policy)
|
||||||
|
|
||||||
sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
|
sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
|
||||||
|
|
||||||
for_each_cpu(cpu, policy->cpus) {
|
|
||||||
struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
|
|
||||||
|
|
||||||
memset(sg_cpu, 0, sizeof(*sg_cpu));
|
|
||||||
sg_cpu->cpu = cpu;
|
|
||||||
sg_cpu->sg_policy = sg_policy;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (policy_is_shared(policy))
|
if (policy_is_shared(policy))
|
||||||
uu = sugov_update_shared;
|
uu = sugov_update_shared;
|
||||||
else if (policy->fast_switch_enabled && cpufreq_driver_has_adjust_perf())
|
else if (policy->fast_switch_enabled && cpufreq_driver_has_adjust_perf())
|
||||||
|
@ -786,6 +807,9 @@ static int sugov_start(struct cpufreq_policy *policy)
|
||||||
for_each_cpu(cpu, policy->cpus) {
|
for_each_cpu(cpu, policy->cpus) {
|
||||||
struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
|
struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
|
||||||
|
|
||||||
|
memset(sg_cpu, 0, sizeof(*sg_cpu));
|
||||||
|
sg_cpu->cpu = cpu;
|
||||||
|
sg_cpu->sg_policy = sg_policy;
|
||||||
cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, uu);
|
cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, uu);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -839,29 +863,3 @@ struct cpufreq_governor *cpufreq_default_governor(void)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
cpufreq_governor_init(schedutil_gov);
|
cpufreq_governor_init(schedutil_gov);
|
||||||
|
|
||||||
#ifdef CONFIG_ENERGY_MODEL
|
|
||||||
static void rebuild_sd_workfn(struct work_struct *work)
|
|
||||||
{
|
|
||||||
rebuild_sched_domains_energy();
|
|
||||||
}
|
|
||||||
static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* EAS shouldn't be attempted without sugov, so rebuild the sched_domains
|
|
||||||
* on governor changes to make sure the scheduler knows about it.
|
|
||||||
*/
|
|
||||||
void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
|
|
||||||
struct cpufreq_governor *old_gov)
|
|
||||||
{
|
|
||||||
if (old_gov == &schedutil_gov || policy->governor == &schedutil_gov) {
|
|
||||||
/*
|
|
||||||
* When called from the cpufreq_register_driver() path, the
|
|
||||||
* cpu_hotplug_lock is already held, so use a work item to
|
|
||||||
* avoid nested locking in rebuild_sched_domains().
|
|
||||||
*/
|
|
||||||
schedule_work(&rebuild_sd_work);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
Loading…
Reference in a new issue