Arm SCMI updates for v6.7

Main additions this time include:
 
 1. SCMI v3.2 clock configuration support:
    This helps to retrieve the enabled state of a clock as well as allow
    to set OEM specific clock configurations.
 
 2. Support for generic performance scaling(DVFS):
    The current SCMI DVFS support is limited to the CPUs in the kernel.
    This extension enables it to used for all kind of devices and not
    only for the CPUs. It updates the SCMI cpufreq to utilize the power
    domain bindings. It also adds a more generic SCMI performance domain
    based on the genpd framework that as be used for all the non-CPU
    devices.
 
 3. Extend the generic performance scaling(DVFS) support for firmware
    driver OPPs:
    Consumer drivers for devices that are attached to the SCMI performance
    domain can't make use of the current OPP library to scale performance
    as the OPPs are firmware driven and often obtained from the firmware
    rather than the device tree. These changes extend the generic OPP
    and genpd PM domain frameworks to identify and utilise these firmware
    driven OPPs.
 
 4. SCMI v3.2 clock parent support:
    This enables the support for discovering and changing parent clocks
    and extending the SCMI clk driver to use the same.
 
 5. Qualcom SMC/HVC transport support:
    The Qualcomm virtual platforms require capability id in the hypervisor
    call to identify which doorbell to assert when supporting multiple
    SMC/HVC based SCMI transport channels. Extra parameter is added to
    support the same and the same is obtained at the fixed address in the
    shared memory which is initialised by the firmware.
 
 6. Move the existing SCMI power domain driver under drivers/pmdomain
 
 Apart from the above main changes, it also include couple of minor fixes
 and cosmetic reworks.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEunHlEgbzHrJD3ZPhAEG6vDF+4pgFAmUlJnAACgkQAEG6vDF+
 4phxrw/+KLuLGg0QXTps4trI1E9cTAivpnEe6sWM27TZ792k6TecqWbRSJyvkrtg
 MB9R9bIQobgGrQ2iFH+DQ+MOxjHI5qraoPV7rxcN+TzDlRsRJ6U51buFXyZpLvrp
 BtcHgKVZ+Xhi9jCxykMJ18XfJv6Fm6D7/B0PCsBC0nOC2BQdq0mxZamEiiBKFiSK
 6jtPXVBPracguush3rYVVio70O0ReIxMacIUAy3Oi5bH01DOx9lg2781KBBtrWKg
 4RdnQS3uwUIpGn7cBn+Mmj/kh2QzKguRkdhuaFImgBI49+DVWsgQ05IstF1TA1SG
 BlUWZOBr/CvcDH7xAc5FVJaIqbNvKQq4dss91AiXqmz2dzOCwJ+F1JafwUY9jLmI
 fS6SfyDaSLMpx8SPDGVWippgpHWY/3dqtBxoqOvILTiYDz7ZXPWUeavn6dtr9gTU
 nDWZNN6bj9EpTzVjSzMeCIiAjyjd2SGDyL9iuhUqA8pw4fidTJvbSpcAUOdkN7F5
 coNZUlzv8pYB8mlBmCO4kETlTesQdt5ZjdTzCE529BuErGAwBqq/XZ/x9mhXfzwS
 ojBwbgcejjnSFRC5XoW+LnZpruL5m1e+fe7aUfAcCSM/MqYOLQgKryT8mJOJnV0/
 5H/WwBYeX+OxlLa0+znGzPChk4OgukARAA+oJ401e7gNHZg7DVw=
 =IbXh
 -----END PGP SIGNATURE-----
gpgsig -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEiK/NIGsWEZVxh/FrYKtH/8kJUicFAmUtovkACgkQYKtH/8kJ
 UierUQ/9GT2ZV3lmuumsRudWVT0Uhe8ktCQElzaA7H6rHFYLMogYkVeEa97HhmN6
 sFIQ8BdsPyLl8IIjLN/uGNcz3JjbjQjnZoiIjHrwquWKDLe1KsKZeb6SGwcWn4nG
 c/e+CPVg7bLoUQnPJrAXQ9ksZ+1EsieuujVC0iYVWYKrauXYSNq3/H5QhVJlYDEs
 c6MYoOrFrz9kP4Ecqo5oYCmd5l2bANYIHP6cOk2rTvJgO6dsT9QmEw2djmVadGHj
 cQZd+kt0Y8Iezn+cX4kwq8WYl0YOk9+OLsjtIozxhRcglTxMmpBSCdFxAV2LRjiS
 12tp2HJMNQgx5c187CCckoeoIpkjSvTJhzRHBxnzb/wzonaGm42ERCu93vSyp6G0
 xccI/rEvjiJmlrBa8up30v+zFEe7II8u2TarnkxWVQ157Ww8bdWKZPq7pKsKrxm0
 tYnDhMJxosxEh6xSjurBZTJoJTnPZQKE+IUAClpCUzszf1hWHeJUbo2CsfuJw7RH
 3jUtM9nfSJCT6GFyM8wxwH/RP2Rt3U9olrW4zoZz7jkxHcLuOgJGQyFYgLHKrmsP
 ez3zFEiGAYysoJuSy7so0KEzjLkVhqO+XFdBtctOkRJ+tajx6i1ute9oQvOqs/TJ
 /CIuBl3+XECcNGm3InEI0aCillFAw69LKYupcUlNrcKJ9f0zFec=
 =u9+1
 -----END PGP SIGNATURE-----

Merge tag 'scmi-updates-6.7' of git://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux into soc/drivers

Arm SCMI updates for v6.7

Main additions this time include:

1. SCMI v3.2 clock configuration support:
   This helps to retrieve the enabled state of a clock as well as allow
   to set OEM specific clock configurations.

2. Support for generic performance scaling(DVFS):
   The current SCMI DVFS support is limited to the CPUs in the kernel.
   This extension enables it to used for all kind of devices and not
   only for the CPUs. It updates the SCMI cpufreq to utilize the power
   domain bindings. It also adds a more generic SCMI performance domain
   based on the genpd framework that as be used for all the non-CPU
   devices.

3. Extend the generic performance scaling(DVFS) support for firmware
   driver OPPs:
   Consumer drivers for devices that are attached to the SCMI performance
   domain can't make use of the current OPP library to scale performance
   as the OPPs are firmware driven and often obtained from the firmware
   rather than the device tree. These changes extend the generic OPP
   and genpd PM domain frameworks to identify and utilise these firmware
   driven OPPs.

4. SCMI v3.2 clock parent support:
   This enables the support for discovering and changing parent clocks
   and extending the SCMI clk driver to use the same.

5. Qualcom SMC/HVC transport support:
   The Qualcomm virtual platforms require capability id in the hypervisor
   call to identify which doorbell to assert when supporting multiple
   SMC/HVC based SCMI transport channels. Extra parameter is added to
   support the same and the same is obtained at the fixed address in the
   shared memory which is initialised by the firmware.

6. Move the existing SCMI power domain driver under drivers/pmdomain

Apart from the above main changes, it also include couple of minor fixes
and cosmetic reworks.

* tag 'scmi-updates-6.7' of git://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux: (37 commits)
  firmware: arm_scmi: Add qcom smc/hvc transport support
  dt-bindings: arm: Add new compatible for smc/hvc transport for SCMI
  firmware: arm_scmi: Convert u32 to unsigned long to align with arm_smccc_1_1_invoke()
  clk: scmi: Add support for clock {set,get}_parent
  firmware: arm_scmi: Add support for clock parents
  clk: scmi: Free scmi_clk allocated when the clocks with invalid info are skipped
  firmware: arm_scpi: Use device_get_match_data()
  firmware: arm_scmi: Add generic OPP support to the SCMI performance domain
  firmware: arm_scmi: Specify the performance level when adding an OPP
  firmware: arm_scmi: Simplify error path in scmi_dvfs_device_opps_add()
  OPP: Extend support for the opp-level beyond required-opps
  OPP: Switch to use dev_pm_domain_set_performance_state()
  OPP: Extend dev_pm_opp_data with a level
  OPP: Add dev_pm_opp_add_dynamic() to allow more flexibility
  PM: domains: Implement the ->set_performance_state() callback for genpd
  PM: domains: Introduce dev_pm_domain_set_performance_state()
  firmware: arm_scmi: Rename scmi_{msg_,}clock_config_{get,set}_{2,21}
  firmware: arm_scmi: Do not use !! on boolean when setting msg->flags
  firmware: arm_scmi: Move power-domain driver to the pmdomain dir
  pmdomain: arm: Add the SCMI performance domain
  ...

Link: https://lore.kernel.org/r/20231010124347.1620040-1-sudeep.holla@arm.com
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
This commit is contained in:
Arnd Bergmann 2023-10-16 22:54:08 +02:00
commit d1debb7b32
27 changed files with 995 additions and 185 deletions

View file

@ -308,7 +308,9 @@ properties:
power-domains property.
For PSCI based platforms, the name corresponding to the index of the PSCI
PM domain provider, must be "psci".
PM domain provider, must be "psci". For SCMI based platforms, the name
corresponding to the index of an SCMI performance domain provider, must be
"perf".
qcom,saw:
$ref: /schemas/types.yaml#/definitions/phandle

View file

@ -38,6 +38,9 @@ properties:
with shmem address(4KB-page, offset) as parameters
items:
- const: arm,scmi-smc-param
- description: SCMI compliant firmware with Qualcomm SMC/HVC transport
items:
- const: qcom,scmi-smc
- description: SCMI compliant firmware with SCMI Virtio transport.
The virtio transport only supports a single device.
items:
@ -149,8 +152,15 @@ properties:
'#clock-cells':
const: 1
required:
- '#clock-cells'
'#power-domain-cells':
const: 1
oneOf:
- required:
- '#clock-cells'
- required:
- '#power-domain-cells'
protocol@14:
$ref: '#/$defs/protocol-node'
@ -306,6 +316,7 @@ else:
enum:
- arm,scmi-smc
- arm,scmi-smc-param
- qcom,scmi-smc
then:
required:
- arm,smc-id

View file

@ -13,8 +13,9 @@ maintainers:
description: |+
System on chip designs are often divided into multiple PM domains that can be
used for power gating of selected IP blocks for power saving by reduced leakage
current.
used for power gating of selected IP blocks for power saving by reduced
leakage current. Moreover, in some cases the similar PM domains may also be
capable of scaling performance for a group of IP blocks.
This device tree binding can be used to bind PM domain consumer devices with
their PM domains provided by PM domain providers. A PM domain provider can be
@ -25,7 +26,7 @@ description: |+
properties:
$nodename:
pattern: "^(power-controller|power-domain)([@-].*)?$"
pattern: "^(power-controller|power-domain|performance-domain)([@-].*)?$"
domain-idle-states:
$ref: /schemas/types.yaml#/definitions/phandle-array
@ -44,11 +45,11 @@ properties:
operating-points-v2:
description:
Phandles to the OPP tables of power domains provided by a power domain
provider. If the provider provides a single power domain only or all
the power domains provided by the provider have identical OPP tables,
then this shall contain a single phandle. Refer to ../opp/opp-v2-base.yaml
for more information.
Phandles to the OPP tables of power domains that are capable of scaling
performance, provided by a power domain provider. If the provider provides
a single power domain only or all the power domains provided by the
provider have identical OPP tables, then this shall contain a single
phandle. Refer to ../opp/opp-v2-base.yaml for more information.
"#power-domain-cells":
description:

View file

@ -20904,6 +20904,7 @@ F: drivers/clk/clk-sc[mp]i.c
F: drivers/cpufreq/sc[mp]i-cpufreq.c
F: drivers/firmware/arm_scmi/
F: drivers/firmware/arm_scpi.c
F: drivers/pmdomain/arm/
F: drivers/powercap/arm_scmi_powercap.c
F: drivers/regulator/scmi-regulator.c
F: drivers/reset/reset-scmi.c

View file

@ -228,3 +228,24 @@ void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd)
device_pm_check_callbacks(dev);
}
EXPORT_SYMBOL_GPL(dev_pm_domain_set);
/**
* dev_pm_domain_set_performance_state - Request a new performance state.
* @dev: The device to make the request for.
* @state: Target performance state for the device.
*
* This function should be called when a new performance state needs to be
* requested for a device that is attached to a PM domain. Note that, the
* support for performance scaling for PM domains is optional.
*
* Returns 0 on success and when performance scaling isn't supported, negative
* error code on failure.
*/
int dev_pm_domain_set_performance_state(struct device *dev, unsigned int state)
{
if (dev->pm_domain && dev->pm_domain->set_performance_state)
return dev->pm_domain->set_performance_state(dev, state);
return 0;
}
EXPORT_SYMBOL_GPL(dev_pm_domain_set_performance_state);

View file

@ -130,6 +130,7 @@ static const struct genpd_lock_ops genpd_spin_ops = {
#define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
#define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN)
#define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
#define genpd_is_opp_table_fw(genpd) (genpd->flags & GENPD_FLAG_OPP_TABLE_FW)
static inline bool irq_safe_dev_in_sleep_domain(struct device *dev,
const struct generic_pm_domain *genpd)
@ -419,6 +420,25 @@ static void genpd_restore_performance_state(struct device *dev,
genpd_set_performance_state(dev, state);
}
static int genpd_dev_pm_set_performance_state(struct device *dev,
unsigned int state)
{
struct generic_pm_domain *genpd = dev_to_genpd(dev);
int ret = 0;
genpd_lock(genpd);
if (pm_runtime_suspended(dev)) {
dev_gpd_data(dev)->rpm_pstate = state;
} else {
ret = genpd_set_performance_state(dev, state);
if (!ret)
dev_gpd_data(dev)->rpm_pstate = 0;
}
genpd_unlock(genpd);
return ret;
}
/**
* dev_pm_genpd_set_performance_state- Set performance state of device's power
* domain.
@ -437,7 +457,6 @@ static void genpd_restore_performance_state(struct device *dev,
int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
{
struct generic_pm_domain *genpd;
int ret = 0;
genpd = dev_to_genpd_safe(dev);
if (!genpd)
@ -447,17 +466,7 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
!dev->power.subsys_data->domain_data))
return -EINVAL;
genpd_lock(genpd);
if (pm_runtime_suspended(dev)) {
dev_gpd_data(dev)->rpm_pstate = state;
} else {
ret = genpd_set_performance_state(dev, state);
if (!ret)
dev_gpd_data(dev)->rpm_pstate = 0;
}
genpd_unlock(genpd);
return ret;
return genpd_dev_pm_set_performance_state(dev, state);
}
EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
@ -2079,6 +2088,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
genpd->domain.ops.restore_noirq = genpd_restore_noirq;
genpd->domain.ops.complete = genpd_complete;
genpd->domain.start = genpd_dev_pm_start;
genpd->domain.set_performance_state = genpd_dev_pm_set_performance_state;
if (genpd->flags & GENPD_FLAG_PM_CLK) {
genpd->dev_ops.stop = pm_clk_suspend;
@ -2328,7 +2338,7 @@ int of_genpd_add_provider_simple(struct device_node *np,
genpd->dev.of_node = np;
/* Parse genpd OPP table */
if (genpd->set_performance_state) {
if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
ret = dev_pm_opp_of_add_table(&genpd->dev);
if (ret)
return dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n");
@ -2343,7 +2353,7 @@ int of_genpd_add_provider_simple(struct device_node *np,
ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
if (ret) {
if (genpd->set_performance_state) {
if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
dev_pm_opp_put_opp_table(genpd->opp_table);
dev_pm_opp_of_remove_table(&genpd->dev);
}
@ -2387,7 +2397,7 @@ int of_genpd_add_provider_onecell(struct device_node *np,
genpd->dev.of_node = np;
/* Parse genpd OPP table */
if (genpd->set_performance_state) {
if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
if (ret) {
dev_err_probe(&genpd->dev, ret,
@ -2423,7 +2433,7 @@ int of_genpd_add_provider_onecell(struct device_node *np,
genpd->provider = NULL;
genpd->has_provider = false;
if (genpd->set_performance_state) {
if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
dev_pm_opp_put_opp_table(genpd->opp_table);
dev_pm_opp_of_remove_table(&genpd->dev);
}
@ -2455,7 +2465,7 @@ void of_genpd_del_provider(struct device_node *np)
if (gpd->provider == &np->fwnode) {
gpd->has_provider = false;
if (!gpd->set_performance_state)
if (genpd_is_opp_table_fw(gpd) || !gpd->set_performance_state)
continue;
dev_pm_opp_put_opp_table(gpd->opp_table);

View file

@ -13,13 +13,18 @@
#include <linux/scmi_protocol.h>
#include <asm/div64.h>
#define NOT_ATOMIC false
#define ATOMIC true
static const struct scmi_clk_proto_ops *scmi_proto_clk_ops;
struct scmi_clk {
u32 id;
struct device *dev;
struct clk_hw hw;
const struct scmi_clock_info *info;
const struct scmi_protocol_handle *ph;
struct clk_parent_data *parent_data;
};
#define to_scmi_clk(clk) container_of(clk, struct scmi_clk, hw)
@ -74,38 +79,89 @@ static int scmi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
return scmi_proto_clk_ops->rate_set(clk->ph, clk->id, rate);
}
static int scmi_clk_set_parent(struct clk_hw *hw, u8 parent_index)
{
struct scmi_clk *clk = to_scmi_clk(hw);
return scmi_proto_clk_ops->parent_set(clk->ph, clk->id, parent_index);
}
static u8 scmi_clk_get_parent(struct clk_hw *hw)
{
struct scmi_clk *clk = to_scmi_clk(hw);
u32 parent_id, p_idx;
int ret;
ret = scmi_proto_clk_ops->parent_get(clk->ph, clk->id, &parent_id);
if (ret)
return 0;
for (p_idx = 0; p_idx < clk->info->num_parents; p_idx++) {
if (clk->parent_data[p_idx].index == parent_id)
break;
}
if (p_idx == clk->info->num_parents)
return 0;
return p_idx;
}
static int scmi_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
{
/*
* Suppose all the requested rates are supported, and let firmware
* to handle the left work.
*/
return 0;
}
static int scmi_clk_enable(struct clk_hw *hw)
{
struct scmi_clk *clk = to_scmi_clk(hw);
return scmi_proto_clk_ops->enable(clk->ph, clk->id);
return scmi_proto_clk_ops->enable(clk->ph, clk->id, NOT_ATOMIC);
}
static void scmi_clk_disable(struct clk_hw *hw)
{
struct scmi_clk *clk = to_scmi_clk(hw);
scmi_proto_clk_ops->disable(clk->ph, clk->id);
scmi_proto_clk_ops->disable(clk->ph, clk->id, NOT_ATOMIC);
}
static int scmi_clk_atomic_enable(struct clk_hw *hw)
{
struct scmi_clk *clk = to_scmi_clk(hw);
return scmi_proto_clk_ops->enable_atomic(clk->ph, clk->id);
return scmi_proto_clk_ops->enable(clk->ph, clk->id, ATOMIC);
}
static void scmi_clk_atomic_disable(struct clk_hw *hw)
{
struct scmi_clk *clk = to_scmi_clk(hw);
scmi_proto_clk_ops->disable_atomic(clk->ph, clk->id);
scmi_proto_clk_ops->disable(clk->ph, clk->id, ATOMIC);
}
static int scmi_clk_atomic_is_enabled(struct clk_hw *hw)
{
int ret;
bool enabled = false;
struct scmi_clk *clk = to_scmi_clk(hw);
ret = scmi_proto_clk_ops->state_get(clk->ph, clk->id, &enabled, ATOMIC);
if (ret)
dev_warn(clk->dev,
"Failed to get state for clock ID %d\n", clk->id);
return !!enabled;
}
/*
* We can provide enable/disable atomic callbacks only if the underlying SCMI
* transport for an SCMI instance is configured to handle SCMI commands in an
* atomic manner.
* We can provide enable/disable/is_enabled atomic callbacks only if the
* underlying SCMI transport for an SCMI instance is configured to handle
* SCMI commands in an atomic manner.
*
* When no SCMI atomic transport support is available we instead provide only
* the prepare/unprepare API, as allowed by the clock framework when atomic
@ -121,6 +177,9 @@ static const struct clk_ops scmi_clk_ops = {
.set_rate = scmi_clk_set_rate,
.prepare = scmi_clk_enable,
.unprepare = scmi_clk_disable,
.set_parent = scmi_clk_set_parent,
.get_parent = scmi_clk_get_parent,
.determine_rate = scmi_clk_determine_rate,
};
static const struct clk_ops scmi_atomic_clk_ops = {
@ -129,6 +188,10 @@ static const struct clk_ops scmi_atomic_clk_ops = {
.set_rate = scmi_clk_set_rate,
.enable = scmi_clk_atomic_enable,
.disable = scmi_clk_atomic_disable,
.is_enabled = scmi_clk_atomic_is_enabled,
.set_parent = scmi_clk_set_parent,
.get_parent = scmi_clk_get_parent,
.determine_rate = scmi_clk_determine_rate,
};
static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk,
@ -139,9 +202,10 @@ static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk,
struct clk_init_data init = {
.flags = CLK_GET_RATE_NOCACHE,
.num_parents = 0,
.num_parents = sclk->info->num_parents,
.ops = scmi_ops,
.name = sclk->info->name,
.parent_data = sclk->parent_data,
};
sclk->hw.init = &init;
@ -213,11 +277,13 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
sclk->info = scmi_proto_clk_ops->info_get(ph, idx);
if (!sclk->info) {
dev_dbg(dev, "invalid clock info for idx %d\n", idx);
devm_kfree(dev, sclk);
continue;
}
sclk->id = idx;
sclk->ph = ph;
sclk->dev = dev;
/*
* Note that when transport is atomic but SCMI protocol did not
@ -230,9 +296,23 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
else
scmi_ops = &scmi_clk_ops;
/* Initialize clock parent data. */
if (sclk->info->num_parents > 0) {
sclk->parent_data = devm_kcalloc(dev, sclk->info->num_parents,
sizeof(*sclk->parent_data), GFP_KERNEL);
if (!sclk->parent_data)
return -ENOMEM;
for (int i = 0; i < sclk->info->num_parents; i++) {
sclk->parent_data[i].index = sclk->info->parents[i];
sclk->parent_data[i].hw = hws[sclk->info->parents[i]];
}
}
err = scmi_clk_ops_init(dev, sclk, scmi_ops);
if (err) {
dev_err(dev, "failed to register clock %d\n", idx);
devm_kfree(dev, sclk->parent_data);
devm_kfree(dev, sclk);
hws[idx] = NULL;
} else {

View file

@ -70,15 +70,35 @@ static unsigned int scmi_cpufreq_fast_switch(struct cpufreq_policy *policy,
return 0;
}
static int
scmi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
static int scmi_cpu_domain_id(struct device *cpu_dev)
{
int cpu, domain, tdomain;
struct device *tcpu_dev;
struct device_node *np = cpu_dev->of_node;
struct of_phandle_args domain_id;
int index;
domain = perf_ops->device_domain_id(cpu_dev);
if (domain < 0)
return domain;
if (of_parse_phandle_with_args(np, "clocks", "#clock-cells", 0,
&domain_id)) {
/* Find the corresponding index for power-domain "perf". */
index = of_property_match_string(np, "power-domain-names",
"perf");
if (index < 0)
return -EINVAL;
if (of_parse_phandle_with_args(np, "power-domains",
"#power-domain-cells", index,
&domain_id))
return -EINVAL;
}
return domain_id.args[0];
}
static int
scmi_get_sharing_cpus(struct device *cpu_dev, int domain,
struct cpumask *cpumask)
{
int cpu, tdomain;
struct device *tcpu_dev;
for_each_possible_cpu(cpu) {
if (cpu == cpu_dev->id)
@ -88,7 +108,7 @@ scmi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
if (!tcpu_dev)
continue;
tdomain = perf_ops->device_domain_id(tcpu_dev);
tdomain = scmi_cpu_domain_id(tcpu_dev);
if (tdomain == domain)
cpumask_set_cpu(cpu, cpumask);
}
@ -104,7 +124,7 @@ scmi_get_cpu_power(struct device *cpu_dev, unsigned long *power,
unsigned long Hz;
int ret, domain;
domain = perf_ops->device_domain_id(cpu_dev);
domain = scmi_cpu_domain_id(cpu_dev);
if (domain < 0)
return domain;
@ -126,7 +146,7 @@ scmi_get_cpu_power(struct device *cpu_dev, unsigned long *power,
static int scmi_cpufreq_init(struct cpufreq_policy *policy)
{
int ret, nr_opp;
int ret, nr_opp, domain;
unsigned int latency;
struct device *cpu_dev;
struct scmi_data *priv;
@ -138,6 +158,10 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
return -ENODEV;
}
domain = scmi_cpu_domain_id(cpu_dev);
if (domain < 0)
return domain;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@ -148,7 +172,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
}
/* Obtain CPUs that share SCMI performance controls */
ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus);
ret = scmi_get_sharing_cpus(cpu_dev, domain, policy->cpus);
if (ret) {
dev_warn(cpu_dev, "failed to get sharing cpumask\n");
goto out_free_cpumask;
@ -176,7 +200,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
*/
nr_opp = dev_pm_opp_get_opp_count(cpu_dev);
if (nr_opp <= 0) {
ret = perf_ops->device_opps_add(ph, cpu_dev);
ret = perf_ops->device_opps_add(ph, cpu_dev, domain);
if (ret) {
dev_warn(cpu_dev, "failed to add opps to the device\n");
goto out_free_cpumask;
@ -209,7 +233,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
}
priv->cpu_dev = cpu_dev;
priv->domain_id = perf_ops->device_domain_id(cpu_dev);
priv->domain_id = domain;
policy->driver_data = priv;
policy->freq_table = freq_table;
@ -217,14 +241,14 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
/* SCMI allows DVFS request for any domain from any CPU */
policy->dvfs_possible_from_any_cpu = true;
latency = perf_ops->transition_latency_get(ph, cpu_dev);
latency = perf_ops->transition_latency_get(ph, domain);
if (!latency)
latency = CPUFREQ_ETERNAL;
policy->cpuinfo.transition_latency = latency;
policy->fast_switch_possible =
perf_ops->fast_switch_possible(ph, cpu_dev);
perf_ops->fast_switch_possible(ph, domain);
return 0;

View file

@ -181,6 +181,18 @@ config ARM_SCMI_POWER_DOMAIN
will be called scmi_pm_domain. Note this may needed early in boot
before rootfs may be available.
config ARM_SCMI_PERF_DOMAIN
tristate "SCMI performance domain driver"
depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
default y
select PM_GENERIC_DOMAINS if PM
help
This enables support for the SCMI performance domains which can be
enabled or disabled via the SCP firmware.
This driver can also be built as a module. If so, the module will be
called scmi_perf_domain.
config ARM_SCMI_POWER_CONTROL
tristate "SCMI system power control driver"
depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)

View file

@ -16,7 +16,6 @@ scmi-module-objs := $(scmi-driver-y) $(scmi-protocols-y) $(scmi-transport-y)
obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-core.o
obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-module.o
obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o
obj-$(CONFIG_ARM_SCMI_POWER_CONTROL) += scmi_power_control.o
ifeq ($(CONFIG_THUMB2_KERNEL)$(CONFIG_CC_IS_CLANG),yy)

View file

@ -21,6 +21,17 @@ enum scmi_clock_protocol_cmd {
CLOCK_NAME_GET = 0x8,
CLOCK_RATE_NOTIFY = 0x9,
CLOCK_RATE_CHANGE_REQUESTED_NOTIFY = 0xA,
CLOCK_CONFIG_GET = 0xB,
CLOCK_POSSIBLE_PARENTS_GET = 0xC,
CLOCK_PARENT_SET = 0xD,
CLOCK_PARENT_GET = 0xE,
};
enum clk_state {
CLK_STATE_DISABLE,
CLK_STATE_ENABLE,
CLK_STATE_RESERVED,
CLK_STATE_UNCHANGED,
};
struct scmi_msg_resp_clock_protocol_attributes {
@ -31,19 +42,59 @@ struct scmi_msg_resp_clock_protocol_attributes {
struct scmi_msg_resp_clock_attributes {
__le32 attributes;
#define CLOCK_ENABLE BIT(0)
#define SUPPORTS_RATE_CHANGED_NOTIF(x) ((x) & BIT(31))
#define SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(x) ((x) & BIT(30))
#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(29))
#define SUPPORTS_PARENT_CLOCK(x) ((x) & BIT(28))
u8 name[SCMI_SHORT_NAME_MAX_SIZE];
__le32 clock_enable_latency;
};
struct scmi_clock_set_config {
struct scmi_msg_clock_possible_parents {
__le32 id;
__le32 skip_parents;
};
struct scmi_msg_resp_clock_possible_parents {
__le32 num_parent_flags;
#define NUM_PARENTS_RETURNED(x) ((x) & 0xff)
#define NUM_PARENTS_REMAINING(x) ((x) >> 24)
__le32 possible_parents[];
};
struct scmi_msg_clock_set_parent {
__le32 id;
__le32 parent_id;
};
struct scmi_msg_clock_config_set {
__le32 id;
__le32 attributes;
};
/* Valid only from SCMI clock v2.1 */
struct scmi_msg_clock_config_set_v2 {
__le32 id;
__le32 attributes;
#define NULL_OEM_TYPE 0
#define REGMASK_OEM_TYPE_SET GENMASK(23, 16)
#define REGMASK_CLK_STATE GENMASK(1, 0)
__le32 oem_config_val;
};
struct scmi_msg_clock_config_get {
__le32 id;
__le32 flags;
#define REGMASK_OEM_TYPE_GET GENMASK(7, 0)
};
struct scmi_msg_resp_clock_config_get {
__le32 attributes;
__le32 config;
#define IS_CLK_ENABLED(x) le32_get_bits((x), BIT(0))
__le32 oem_config_val;
};
struct scmi_msg_clock_describe_rates {
__le32 id;
__le32 rate_index;
@ -100,6 +151,12 @@ struct clock_info {
int max_async_req;
atomic_t cur_async_req;
struct scmi_clock_info *clk;
int (*clock_config_set)(const struct scmi_protocol_handle *ph,
u32 clk_id, enum clk_state state,
u8 oem_type, u32 oem_val, bool atomic);
int (*clock_config_get)(const struct scmi_protocol_handle *ph,
u32 clk_id, u8 oem_type, u32 *attributes,
bool *enabled, u32 *oem_val, bool atomic);
};
static enum scmi_clock_protocol_cmd evt_2_cmd[] = {
@ -132,6 +189,98 @@ scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph,
return ret;
}
struct scmi_clk_ipriv {
struct device *dev;
u32 clk_id;
struct scmi_clock_info *clk;
};
static void iter_clk_possible_parents_prepare_message(void *message, unsigned int desc_index,
const void *priv)
{
struct scmi_msg_clock_possible_parents *msg = message;
const struct scmi_clk_ipriv *p = priv;
msg->id = cpu_to_le32(p->clk_id);
/* Set the number of OPPs to be skipped/already read */
msg->skip_parents = cpu_to_le32(desc_index);
}
static int iter_clk_possible_parents_update_state(struct scmi_iterator_state *st,
const void *response, void *priv)
{
const struct scmi_msg_resp_clock_possible_parents *r = response;
struct scmi_clk_ipriv *p = priv;
struct device *dev = ((struct scmi_clk_ipriv *)p)->dev;
u32 flags;
flags = le32_to_cpu(r->num_parent_flags);
st->num_returned = NUM_PARENTS_RETURNED(flags);
st->num_remaining = NUM_PARENTS_REMAINING(flags);
/*
* num parents is not declared previously anywhere so we
* assume it's returned+remaining on first call.
*/
if (!st->max_resources) {
p->clk->num_parents = st->num_returned + st->num_remaining;
p->clk->parents = devm_kcalloc(dev, p->clk->num_parents,
sizeof(*p->clk->parents),
GFP_KERNEL);
if (!p->clk->parents) {
p->clk->num_parents = 0;
return -ENOMEM;
}
st->max_resources = st->num_returned + st->num_remaining;
}
return 0;
}
static int iter_clk_possible_parents_process_response(const struct scmi_protocol_handle *ph,
const void *response,
struct scmi_iterator_state *st,
void *priv)
{
const struct scmi_msg_resp_clock_possible_parents *r = response;
struct scmi_clk_ipriv *p = priv;
u32 *parent = &p->clk->parents[st->desc_index + st->loop_idx];
*parent = le32_to_cpu(r->possible_parents[st->loop_idx]);
return 0;
}
static int scmi_clock_possible_parents(const struct scmi_protocol_handle *ph, u32 clk_id,
struct scmi_clock_info *clk)
{
struct scmi_iterator_ops ops = {
.prepare_message = iter_clk_possible_parents_prepare_message,
.update_state = iter_clk_possible_parents_update_state,
.process_response = iter_clk_possible_parents_process_response,
};
struct scmi_clk_ipriv ppriv = {
.clk_id = clk_id,
.clk = clk,
.dev = ph->dev,
};
void *iter;
int ret;
iter = ph->hops->iter_response_init(ph, &ops, 0,
CLOCK_POSSIBLE_PARENTS_GET,
sizeof(struct scmi_msg_clock_possible_parents),
&ppriv);
if (IS_ERR(iter))
return PTR_ERR(iter);
ret = ph->hops->iter_response_run(iter);
return ret;
}
static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
u32 clk_id, struct scmi_clock_info *clk,
u32 version)
@ -176,6 +325,8 @@ static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
clk->rate_changed_notifications = true;
if (SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(attributes))
clk->rate_change_requested_notifications = true;
if (SUPPORTS_PARENT_CLOCK(attributes))
scmi_clock_possible_parents(ph, clk_id, clk);
}
return ret;
@ -193,12 +344,6 @@ static int rate_cmp_func(const void *_r1, const void *_r2)
return 1;
}
struct scmi_clk_ipriv {
struct device *dev;
u32 clk_id;
struct scmi_clock_info *clk;
};
static void iter_clk_describe_prepare_message(void *message,
const unsigned int desc_index,
const void *priv)
@ -395,11 +540,15 @@ static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph,
static int
scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id,
u32 config, bool atomic)
enum clk_state state, u8 __unused0, u32 __unused1,
bool atomic)
{
int ret;
struct scmi_xfer *t;
struct scmi_clock_set_config *cfg;
struct scmi_msg_clock_config_set *cfg;
if (state >= CLK_STATE_RESERVED)
return -EINVAL;
ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET,
sizeof(*cfg), 0, &t);
@ -410,7 +559,7 @@ scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id,
cfg = t->tx.buf;
cfg->id = cpu_to_le32(clk_id);
cfg->attributes = cpu_to_le32(config);
cfg->attributes = cpu_to_le32(state);
ret = ph->xops->do_xfer(ph, t);
@ -418,26 +567,221 @@ scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id,
return ret;
}
static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id)
static int
scmi_clock_set_parent(const struct scmi_protocol_handle *ph, u32 clk_id,
u32 parent_id)
{
return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, false);
int ret;
struct scmi_xfer *t;
struct scmi_msg_clock_set_parent *cfg;
struct clock_info *ci = ph->get_priv(ph);
struct scmi_clock_info *clk;
if (clk_id >= ci->num_clocks)
return -EINVAL;
clk = ci->clk + clk_id;
if (parent_id >= clk->num_parents)
return -EINVAL;
ret = ph->xops->xfer_get_init(ph, CLOCK_PARENT_SET,
sizeof(*cfg), 0, &t);
if (ret)
return ret;
t->hdr.poll_completion = false;
cfg = t->tx.buf;
cfg->id = cpu_to_le32(clk_id);
cfg->parent_id = cpu_to_le32(clk->parents[parent_id]);
ret = ph->xops->do_xfer(ph, t);
ph->xops->xfer_put(ph, t);
return ret;
}
static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id)
static int
scmi_clock_get_parent(const struct scmi_protocol_handle *ph, u32 clk_id,
u32 *parent_id)
{
return scmi_clock_config_set(ph, clk_id, 0, false);
int ret;
struct scmi_xfer *t;
ret = ph->xops->xfer_get_init(ph, CLOCK_PARENT_GET,
sizeof(__le32), sizeof(u32), &t);
if (ret)
return ret;
put_unaligned_le32(clk_id, t->tx.buf);
ret = ph->xops->do_xfer(ph, t);
if (!ret)
*parent_id = get_unaligned_le32(t->rx.buf);
ph->xops->xfer_put(ph, t);
return ret;
}
static int scmi_clock_enable_atomic(const struct scmi_protocol_handle *ph,
u32 clk_id)
/* For SCMI clock v2.1 and onwards */
static int
scmi_clock_config_set_v2(const struct scmi_protocol_handle *ph, u32 clk_id,
enum clk_state state, u8 oem_type, u32 oem_val,
bool atomic)
{
return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, true);
int ret;
u32 attrs;
struct scmi_xfer *t;
struct scmi_msg_clock_config_set_v2 *cfg;
if (state == CLK_STATE_RESERVED ||
(!oem_type && state == CLK_STATE_UNCHANGED))
return -EINVAL;
ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET,
sizeof(*cfg), 0, &t);
if (ret)
return ret;
t->hdr.poll_completion = atomic;
attrs = FIELD_PREP(REGMASK_OEM_TYPE_SET, oem_type) |
FIELD_PREP(REGMASK_CLK_STATE, state);
cfg = t->tx.buf;
cfg->id = cpu_to_le32(clk_id);
cfg->attributes = cpu_to_le32(attrs);
/* Clear in any case */
cfg->oem_config_val = cpu_to_le32(0);
if (oem_type)
cfg->oem_config_val = cpu_to_le32(oem_val);
ret = ph->xops->do_xfer(ph, t);
ph->xops->xfer_put(ph, t);
return ret;
}
static int scmi_clock_disable_atomic(const struct scmi_protocol_handle *ph,
u32 clk_id)
static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id,
bool atomic)
{
return scmi_clock_config_set(ph, clk_id, 0, true);
struct clock_info *ci = ph->get_priv(ph);
return ci->clock_config_set(ph, clk_id, CLK_STATE_ENABLE,
NULL_OEM_TYPE, 0, atomic);
}
static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id,
bool atomic)
{
struct clock_info *ci = ph->get_priv(ph);
return ci->clock_config_set(ph, clk_id, CLK_STATE_DISABLE,
NULL_OEM_TYPE, 0, atomic);
}
/* For SCMI clock v2.1 and onwards */
static int
scmi_clock_config_get_v2(const struct scmi_protocol_handle *ph, u32 clk_id,
u8 oem_type, u32 *attributes, bool *enabled,
u32 *oem_val, bool atomic)
{
int ret;
u32 flags;
struct scmi_xfer *t;
struct scmi_msg_clock_config_get *cfg;
ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_GET,
sizeof(*cfg), 0, &t);
if (ret)
return ret;
t->hdr.poll_completion = atomic;
flags = FIELD_PREP(REGMASK_OEM_TYPE_GET, oem_type);
cfg = t->tx.buf;
cfg->id = cpu_to_le32(clk_id);
cfg->flags = cpu_to_le32(flags);
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
struct scmi_msg_resp_clock_config_get *resp = t->rx.buf;
if (attributes)
*attributes = le32_to_cpu(resp->attributes);
if (enabled)
*enabled = IS_CLK_ENABLED(resp->config);
if (oem_val && oem_type)
*oem_val = le32_to_cpu(resp->oem_config_val);
}
ph->xops->xfer_put(ph, t);
return ret;
}
static int
scmi_clock_config_get(const struct scmi_protocol_handle *ph, u32 clk_id,
u8 oem_type, u32 *attributes, bool *enabled,
u32 *oem_val, bool atomic)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_clock_attributes *resp;
if (!enabled)
return -EINVAL;
ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES,
sizeof(clk_id), sizeof(*resp), &t);
if (ret)
return ret;
t->hdr.poll_completion = atomic;
put_unaligned_le32(clk_id, t->tx.buf);
resp = t->rx.buf;
ret = ph->xops->do_xfer(ph, t);
if (!ret)
*enabled = IS_CLK_ENABLED(resp->attributes);
ph->xops->xfer_put(ph, t);
return ret;
}
static int scmi_clock_state_get(const struct scmi_protocol_handle *ph,
u32 clk_id, bool *enabled, bool atomic)
{
struct clock_info *ci = ph->get_priv(ph);
return ci->clock_config_get(ph, clk_id, NULL_OEM_TYPE, NULL,
enabled, NULL, atomic);
}
static int scmi_clock_config_oem_set(const struct scmi_protocol_handle *ph,
u32 clk_id, u8 oem_type, u32 oem_val,
bool atomic)
{
struct clock_info *ci = ph->get_priv(ph);
return ci->clock_config_set(ph, clk_id, CLK_STATE_UNCHANGED,
oem_type, oem_val, atomic);
}
static int scmi_clock_config_oem_get(const struct scmi_protocol_handle *ph,
u32 clk_id, u8 oem_type, u32 *oem_val,
u32 *attributes, bool atomic)
{
struct clock_info *ci = ph->get_priv(ph);
return ci->clock_config_get(ph, clk_id, oem_type, attributes,
NULL, oem_val, atomic);
}
static int scmi_clock_count_get(const struct scmi_protocol_handle *ph)
@ -470,8 +814,11 @@ static const struct scmi_clk_proto_ops clk_proto_ops = {
.rate_set = scmi_clock_rate_set,
.enable = scmi_clock_enable,
.disable = scmi_clock_disable,
.enable_atomic = scmi_clock_enable_atomic,
.disable_atomic = scmi_clock_disable_atomic,
.state_get = scmi_clock_state_get,
.config_oem_get = scmi_clock_config_oem_get,
.config_oem_set = scmi_clock_config_oem_set,
.parent_set = scmi_clock_set_parent,
.parent_get = scmi_clock_get_parent,
};
static int scmi_clk_rate_notify(const struct scmi_protocol_handle *ph,
@ -604,6 +951,15 @@ static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
scmi_clock_describe_rates_get(ph, clkid, clk);
}
if (PROTOCOL_REV_MAJOR(version) >= 0x2 &&
PROTOCOL_REV_MINOR(version) >= 0x1) {
cinfo->clock_config_set = scmi_clock_config_set_v2;
cinfo->clock_config_get = scmi_clock_config_get_v2;
} else {
cinfo->clock_config_set = scmi_clock_config_set;
cinfo->clock_config_get = scmi_clock_config_get;
}
cinfo->version = version;
return ph->set_priv(ph, cinfo);
}

View file

@ -2915,6 +2915,7 @@ static const struct of_device_id scmi_of_match[] = {
#ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC
{ .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
{ .compatible = "arm,scmi-smc-param", .data = &scmi_smc_desc},
{ .compatible = "qcom,scmi-smc", .data = &scmi_smc_desc},
#endif
#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO
{ .compatible = "arm,scmi-virtio", .data = &scmi_virtio_desc},

View file

@ -145,7 +145,6 @@ struct scmi_msg_resp_perf_describe_levels_v4 {
struct perf_dom_info {
u32 id;
bool set_limits;
bool set_perf;
bool perf_limit_notify;
bool perf_level_notify;
bool perf_fastchannels;
@ -154,7 +153,7 @@ struct perf_dom_info {
u32 sustained_freq_khz;
u32 sustained_perf_level;
u32 mult_factor;
char name[SCMI_MAX_STR_SIZE];
struct scmi_perf_domain_info info;
struct scmi_opp opp[MAX_OPPS];
struct scmi_fc_info *fc_info;
struct xarray opps_by_idx;
@ -257,7 +256,7 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
flags = le32_to_cpu(attr->flags);
dom_info->set_limits = SUPPORTS_SET_LIMITS(flags);
dom_info->set_perf = SUPPORTS_SET_PERF_LVL(flags);
dom_info->info.set_perf = SUPPORTS_SET_PERF_LVL(flags);
dom_info->perf_limit_notify = SUPPORTS_PERF_LIMIT_NOTIFY(flags);
dom_info->perf_level_notify = SUPPORTS_PERF_LEVEL_NOTIFY(flags);
dom_info->perf_fastchannels = SUPPORTS_PERF_FASTCHANNELS(flags);
@ -276,7 +275,8 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
dom_info->mult_factor =
(dom_info->sustained_freq_khz * 1000) /
dom_info->sustained_perf_level;
strscpy(dom_info->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE);
strscpy(dom_info->info.name, attr->name,
SCMI_SHORT_NAME_MAX_SIZE);
}
ph->xops->xfer_put(ph, t);
@ -288,7 +288,7 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x3 &&
SUPPORTS_EXTENDED_NAMES(flags))
ph->hops->extended_name_get(ph, PERF_DOMAIN_NAME_GET,
dom_info->id, dom_info->name,
dom_info->id, dom_info->info.name,
SCMI_MAX_STR_SIZE);
if (dom_info->level_indexing_mode) {
@ -423,6 +423,36 @@ scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph,
return ret;
}
static int scmi_perf_num_domains_get(const struct scmi_protocol_handle *ph)
{
struct scmi_perf_info *pi = ph->get_priv(ph);
return pi->num_domains;
}
static inline struct perf_dom_info *
scmi_perf_domain_lookup(const struct scmi_protocol_handle *ph, u32 domain)
{
struct scmi_perf_info *pi = ph->get_priv(ph);
if (domain >= pi->num_domains)
return ERR_PTR(-EINVAL);
return pi->dom_info + domain;
}
static const struct scmi_perf_domain_info *
scmi_perf_info_get(const struct scmi_protocol_handle *ph, u32 domain)
{
struct perf_dom_info *dom;
dom = scmi_perf_domain_lookup(ph, domain);
if (IS_ERR(dom))
return ERR_PTR(-EINVAL);
return &dom->info;
}
static int scmi_perf_msg_limits_set(const struct scmi_protocol_handle *ph,
u32 domain, u32 max_perf, u32 min_perf)
{
@ -446,17 +476,6 @@ static int scmi_perf_msg_limits_set(const struct scmi_protocol_handle *ph,
return ret;
}
static inline struct perf_dom_info *
scmi_perf_domain_lookup(const struct scmi_protocol_handle *ph, u32 domain)
{
struct scmi_perf_info *pi = ph->get_priv(ph);
if (domain >= pi->num_domains)
return ERR_PTR(-EINVAL);
return pi->dom_info + domain;
}
static int __scmi_perf_limits_set(const struct scmi_protocol_handle *ph,
struct perf_dom_info *dom, u32 max_perf,
u32 min_perf)
@ -763,71 +782,46 @@ static void scmi_perf_domain_init_fc(const struct scmi_protocol_handle *ph,
*p_fc = fc;
}
/* Device specific ops */
static int scmi_dev_domain_id(struct device *dev)
{
struct of_phandle_args clkspec;
if (of_parse_phandle_with_args(dev->of_node, "clocks", "#clock-cells",
0, &clkspec))
return -EINVAL;
return clkspec.args[0];
}
static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph,
struct device *dev)
struct device *dev, u32 domain)
{
int idx, ret, domain;
int idx, ret;
unsigned long freq;
struct scmi_opp *opp;
struct dev_pm_opp_data data = {};
struct perf_dom_info *dom;
domain = scmi_dev_domain_id(dev);
if (domain < 0)
return -EINVAL;
dom = scmi_perf_domain_lookup(ph, domain);
if (IS_ERR(dom))
return PTR_ERR(dom);
for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
for (idx = 0; idx < dom->opp_count; idx++) {
if (!dom->level_indexing_mode)
freq = opp->perf * dom->mult_factor;
freq = dom->opp[idx].perf * dom->mult_factor;
else
freq = opp->indicative_freq * 1000;
freq = dom->opp[idx].indicative_freq * 1000;
ret = dev_pm_opp_add(dev, freq, 0);
data.level = dom->opp[idx].perf;
data.freq = freq;
ret = dev_pm_opp_add_dynamic(dev, &data);
if (ret) {
dev_warn(dev, "failed to add opp %luHz\n", freq);
while (idx-- > 0) {
if (!dom->level_indexing_mode)
freq = (--opp)->perf * dom->mult_factor;
else
freq = (--opp)->indicative_freq * 1000;
dev_pm_opp_remove(dev, freq);
}
dev_pm_opp_remove_all_dynamic(dev);
return ret;
}
dev_dbg(dev, "[%d][%s]:: Registered OPP[%d] %lu\n",
domain, dom->name, idx, freq);
domain, dom->info.name, idx, freq);
}
return 0;
}
static int
scmi_dvfs_transition_latency_get(const struct scmi_protocol_handle *ph,
struct device *dev)
u32 domain)
{
int domain;
struct perf_dom_info *dom;
domain = scmi_dev_domain_id(dev);
if (domain < 0)
return -EINVAL;
dom = scmi_perf_domain_lookup(ph, domain);
if (IS_ERR(dom))
return PTR_ERR(dom);
@ -923,15 +917,10 @@ static int scmi_dvfs_est_power_get(const struct scmi_protocol_handle *ph,
}
static bool scmi_fast_switch_possible(const struct scmi_protocol_handle *ph,
struct device *dev)
u32 domain)
{
int domain;
struct perf_dom_info *dom;
domain = scmi_dev_domain_id(dev);
if (domain < 0)
return false;
dom = scmi_perf_domain_lookup(ph, domain);
if (IS_ERR(dom))
return false;
@ -948,11 +937,12 @@ scmi_power_scale_get(const struct scmi_protocol_handle *ph)
}
static const struct scmi_perf_proto_ops perf_proto_ops = {
.num_domains_get = scmi_perf_num_domains_get,
.info_get = scmi_perf_info_get,
.limits_set = scmi_perf_limits_set,
.limits_get = scmi_perf_limits_get,
.level_set = scmi_perf_level_set,
.level_get = scmi_perf_level_get,
.device_domain_id = scmi_dev_domain_id,
.transition_latency_get = scmi_dvfs_transition_latency_get,
.device_opps_add = scmi_dvfs_device_opps_add,
.freq_set = scmi_dvfs_freq_set,

View file

@ -360,8 +360,8 @@ static int scmi_powercap_xfer_cap_set(const struct scmi_protocol_handle *ph,
msg = t->tx.buf;
msg->domain = cpu_to_le32(pc->id);
msg->flags =
cpu_to_le32(FIELD_PREP(CAP_SET_ASYNC, !!pc->async_powercap_cap_set) |
FIELD_PREP(CAP_SET_IGNORE_DRESP, !!ignore_dresp));
cpu_to_le32(FIELD_PREP(CAP_SET_ASYNC, pc->async_powercap_cap_set) |
FIELD_PREP(CAP_SET_IGNORE_DRESP, ignore_dresp));
msg->value = cpu_to_le32(power_cap);
if (!pc->async_powercap_cap_set || ignore_dresp) {

View file

@ -15,6 +15,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/limits.h>
#include <linux/processor.h>
#include <linux/slab.h>
@ -50,6 +51,8 @@
* @func_id: smc/hvc call function id
* @param_page: 4K page number of the shmem channel
* @param_offset: Offset within the 4K page of the shmem channel
* @cap_id: smc/hvc doorbell's capability id to be used on Qualcomm virtual
* platforms
*/
struct scmi_smc {
@ -60,9 +63,10 @@ struct scmi_smc {
struct mutex shmem_lock;
#define INFLIGHT_NONE MSG_TOKEN_MAX
atomic_t inflight;
u32 func_id;
u32 param_page;
u32 param_offset;
unsigned long func_id;
unsigned long param_page;
unsigned long param_offset;
unsigned long cap_id;
};
static irqreturn_t smc_msg_done_isr(int irq, void *data)
@ -124,6 +128,7 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
bool tx)
{
struct device *cdev = cinfo->dev;
unsigned long cap_id = ULONG_MAX;
struct scmi_smc *scmi_info;
resource_size_t size;
struct resource res;
@ -162,6 +167,18 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
if (ret < 0)
return ret;
if (of_device_is_compatible(dev->of_node, "qcom,scmi-smc")) {
void __iomem *ptr = (void __iomem *)scmi_info->shmem + size - 8;
/* The capability-id is kept in last 8 bytes of shmem.
* +-------+ <-- 0
* | shmem |
* +-------+ <-- size - 8
* | capId |
* +-------+ <-- size
*/
memcpy_fromio(&cap_id, ptr, sizeof(cap_id));
}
if (of_device_is_compatible(dev->of_node, "arm,scmi-smc-param")) {
scmi_info->param_page = SHMEM_PAGE(res.start);
scmi_info->param_offset = SHMEM_OFFSET(res.start);
@ -184,6 +201,7 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
}
scmi_info->func_id = func_id;
scmi_info->cap_id = cap_id;
scmi_info->cinfo = cinfo;
smc_channel_lock_init(scmi_info);
cinfo->transport_info = scmi_info;
@ -211,8 +229,6 @@ static int smc_send_message(struct scmi_chan_info *cinfo,
{
struct scmi_smc *scmi_info = cinfo->transport_info;
struct arm_smccc_res res;
unsigned long page = scmi_info->param_page;
unsigned long offset = scmi_info->param_offset;
/*
* Channel will be released only once response has been
@ -222,8 +238,13 @@ static int smc_send_message(struct scmi_chan_info *cinfo,
shmem_tx_prepare(scmi_info->shmem, xfer, cinfo);
arm_smccc_1_1_invoke(scmi_info->func_id, page, offset, 0, 0, 0, 0, 0,
&res);
if (scmi_info->cap_id != ULONG_MAX)
arm_smccc_1_1_invoke(scmi_info->func_id, scmi_info->cap_id, 0,
0, 0, 0, 0, 0, &res);
else
arm_smccc_1_1_invoke(scmi_info->func_id, scmi_info->param_page,
scmi_info->param_offset, 0, 0, 0, 0, 0,
&res);
/* Only SMCCC_RET_NOT_SUPPORTED is valid error code */
if (res.a0) {

View file

@ -26,9 +26,12 @@
#include <linux/list.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
#include <linux/property.h>
#include <linux/pm_opp.h>
#include <linux/scpi_protocol.h>
#include <linux/slab.h>
@ -894,11 +897,6 @@ static int scpi_alloc_xfer_list(struct device *dev, struct scpi_chan *ch)
return 0;
}
static const struct of_device_id legacy_scpi_of_match[] = {
{.compatible = "arm,scpi-pre-1.0"},
{},
};
static const struct of_device_id shmem_of_match[] __maybe_unused = {
{ .compatible = "amlogic,meson-gxbb-scp-shmem", },
{ .compatible = "amlogic,meson-axg-scp-shmem", },
@ -919,8 +917,7 @@ static int scpi_probe(struct platform_device *pdev)
if (!scpi_drvinfo)
return -ENOMEM;
if (of_match_device(legacy_scpi_of_match, &pdev->dev))
scpi_drvinfo->is_legacy = true;
scpi_drvinfo->is_legacy = !!device_get_match_data(dev);
count = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
if (count < 0) {
@ -1038,7 +1035,7 @@ static int scpi_probe(struct platform_device *pdev)
static const struct of_device_id scpi_of_match[] = {
{.compatible = "arm,scpi"},
{.compatible = "arm,scpi-pre-1.0"},
{.compatible = "arm,scpi-pre-1.0", .data = (void *)1UL },
{},
};

View file

@ -1030,7 +1030,7 @@ static int _set_performance_state(struct device *dev, struct device *pd_dev,
if (!pd_dev)
return 0;
ret = dev_pm_genpd_set_performance_state(pd_dev, pstate);
ret = dev_pm_domain_set_performance_state(pd_dev, pstate);
if (ret) {
dev_err(dev, "Failed to set performance state of %s: %d (%d)\n",
dev_name(pd_dev), pstate, ret);
@ -1107,6 +1107,28 @@ void _update_set_required_opps(struct opp_table *opp_table)
opp_table->set_required_opps = _opp_set_required_opps_generic;
}
static int _set_opp_level(struct device *dev, struct opp_table *opp_table,
struct dev_pm_opp *opp)
{
unsigned int level = 0;
int ret = 0;
if (opp) {
if (!opp->level)
return 0;
level = opp->level;
}
/* Request a new performance state through the device's PM domain. */
ret = dev_pm_domain_set_performance_state(dev, level);
if (ret)
dev_err(dev, "Failed to set performance state %u (%d)\n", level,
ret);
return ret;
}
static void _find_current_opp(struct device *dev, struct opp_table *opp_table)
{
struct dev_pm_opp *opp = ERR_PTR(-ENODEV);
@ -1154,8 +1176,13 @@ static int _disable_opp_table(struct device *dev, struct opp_table *opp_table)
if (opp_table->regulators)
regulator_disable(opp_table->regulators[0]);
ret = _set_opp_level(dev, opp_table, NULL);
if (ret)
goto out;
ret = _set_required_opps(dev, opp_table, NULL, false);
out:
opp_table->enabled = false;
return ret;
}
@ -1198,6 +1225,10 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table,
return ret;
}
ret = _set_opp_level(dev, opp_table, opp);
if (ret)
return ret;
ret = _set_opp_bw(opp_table, opp, dev);
if (ret) {
dev_err(dev, "Failed to set bw: %d\n", ret);
@ -1241,6 +1272,10 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table,
return ret;
}
ret = _set_opp_level(dev, opp_table, opp);
if (ret)
return ret;
ret = _set_required_opps(dev, opp_table, opp, false);
if (ret) {
dev_err(dev, "Failed to set required opps: %d\n", ret);
@ -2002,8 +2037,7 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
* _opp_add_v1() - Allocate a OPP based on v1 bindings.
* @opp_table: OPP table
* @dev: device for which we do this operation
* @freq: Frequency in Hz for this OPP
* @u_volt: Voltage in uVolts for this OPP
* @data: The OPP data for the OPP to add
* @dynamic: Dynamically added OPPs.
*
* This function adds an opp definition to the opp table and returns status.
@ -2021,10 +2055,10 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
* -ENOMEM Memory allocation failure
*/
int _opp_add_v1(struct opp_table *opp_table, struct device *dev,
unsigned long freq, long u_volt, bool dynamic)
struct dev_pm_opp_data *data, bool dynamic)
{
struct dev_pm_opp *new_opp;
unsigned long tol;
unsigned long tol, u_volt = data->u_volt;
int ret;
if (!assert_single_clk(opp_table))
@ -2035,7 +2069,8 @@ int _opp_add_v1(struct opp_table *opp_table, struct device *dev,
return -ENOMEM;
/* populate the opp table */
new_opp->rates[0] = freq;
new_opp->rates[0] = data->freq;
new_opp->level = data->level;
tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
new_opp->supplies[0].u_volt = u_volt;
new_opp->supplies[0].u_volt_min = u_volt - tol;
@ -2825,10 +2860,9 @@ int dev_pm_opp_xlate_performance_state(struct opp_table *src_table,
}
/**
* dev_pm_opp_add() - Add an OPP table from a table definitions
* @dev: device for which we do this operation
* @freq: Frequency in Hz for this OPP
* @u_volt: Voltage in uVolts for this OPP
* dev_pm_opp_add_dynamic() - Add an OPP table from a table definitions
* @dev: The device for which we do this operation
* @data: The OPP data for the OPP to add
*
* This function adds an opp definition to the opp table and returns status.
* The opp is made available by default and it can be controlled using
@ -2841,7 +2875,7 @@ int dev_pm_opp_xlate_performance_state(struct opp_table *src_table,
* Duplicate OPPs (both freq and volt are same) and !opp->available
* -ENOMEM Memory allocation failure
*/
int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
int dev_pm_opp_add_dynamic(struct device *dev, struct dev_pm_opp_data *data)
{
struct opp_table *opp_table;
int ret;
@ -2853,13 +2887,13 @@ int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
/* Fix regulator count for dynamic OPPs */
opp_table->regulator_count = 1;
ret = _opp_add_v1(opp_table, dev, freq, u_volt, true);
ret = _opp_add_v1(opp_table, dev, data, true);
if (ret)
dev_pm_opp_put_opp_table(opp_table);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_add);
EXPORT_SYMBOL_GPL(dev_pm_opp_add_dynamic);
/**
* _opp_set_availability() - helper to set the availability of an opp

View file

@ -1077,13 +1077,15 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table)
val = prop->value;
while (nr) {
unsigned long freq = be32_to_cpup(val++) * 1000;
unsigned long volt = be32_to_cpup(val++);
struct dev_pm_opp_data data = {
.freq = be32_to_cpup(val++) * 1000,
.u_volt = be32_to_cpup(val++),
};
ret = _opp_add_v1(opp_table, dev, freq, volt, false);
ret = _opp_add_v1(opp_table, dev, &data, false);
if (ret) {
dev_err(dev, "%s: Failed to add OPP %ld (%d)\n",
__func__, freq, ret);
__func__, data.freq, ret);
goto remove_static_opp;
}
nr -= 2;

View file

@ -251,7 +251,7 @@ struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table);
void _opp_free(struct dev_pm_opp *opp);
int _opp_compare_key(struct opp_table *opp_table, struct dev_pm_opp *opp1, struct dev_pm_opp *opp2);
int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table);
int _opp_add_v1(struct opp_table *opp_table, struct device *dev, unsigned long freq, long u_volt, bool dynamic);
int _opp_add_v1(struct opp_table *opp_table, struct device *dev, struct dev_pm_opp_data *data, bool dynamic);
void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, int last_cpu);
struct opp_table *_add_opp_table_indexed(struct device *dev, int index, bool getclk);
void _put_opp_list_kref(struct opp_table *opp_table);

View file

@ -2,6 +2,7 @@
obj-y += actions/
obj-y += amlogic/
obj-y += apple/
obj-y += arm/
obj-y += bcm/
obj-y += imx/
obj-y += mediatek/

View file

@ -0,0 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_ARM_SCMI_PERF_DOMAIN) += scmi_perf_domain.o
obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o

View file

@ -0,0 +1,184 @@
// SPDX-License-Identifier: GPL-2.0
/*
* SCMI performance domain support.
*
* Copyright (C) 2023 Linaro Ltd.
*/
#include <linux/err.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
#include <linux/scmi_protocol.h>
#include <linux/slab.h>
struct scmi_perf_domain {
struct generic_pm_domain genpd;
const struct scmi_perf_proto_ops *perf_ops;
const struct scmi_protocol_handle *ph;
const struct scmi_perf_domain_info *info;
u32 domain_id;
};
#define to_scmi_pd(pd) container_of(pd, struct scmi_perf_domain, genpd)
static int
scmi_pd_set_perf_state(struct generic_pm_domain *genpd, unsigned int state)
{
struct scmi_perf_domain *pd = to_scmi_pd(genpd);
int ret;
if (!pd->info->set_perf)
return 0;
if (!state)
return -EINVAL;
ret = pd->perf_ops->level_set(pd->ph, pd->domain_id, state, true);
if (ret)
dev_warn(&genpd->dev, "Failed with %d when trying to set %d perf level",
ret, state);
return ret;
}
static int
scmi_pd_attach_dev(struct generic_pm_domain *genpd, struct device *dev)
{
struct scmi_perf_domain *pd = to_scmi_pd(genpd);
int ret;
/*
* Allow the device to be attached, but don't add the OPP table unless
* the performance level can be changed.
*/
if (!pd->info->set_perf)
return 0;
ret = pd->perf_ops->device_opps_add(pd->ph, dev, pd->domain_id);
if (ret)
dev_warn(dev, "failed to add OPPs for the device\n");
return ret;
}
static void
scmi_pd_detach_dev(struct generic_pm_domain *genpd, struct device *dev)
{
struct scmi_perf_domain *pd = to_scmi_pd(genpd);
if (!pd->info->set_perf)
return;
dev_pm_opp_remove_all_dynamic(dev);
}
static int scmi_perf_domain_probe(struct scmi_device *sdev)
{
struct device *dev = &sdev->dev;
const struct scmi_handle *handle = sdev->handle;
const struct scmi_perf_proto_ops *perf_ops;
struct scmi_protocol_handle *ph;
struct scmi_perf_domain *scmi_pd;
struct genpd_onecell_data *scmi_pd_data;
struct generic_pm_domain **domains;
int num_domains, i, ret = 0;
if (!handle)
return -ENODEV;
/* The OF node must specify us as a power-domain provider. */
if (!of_find_property(dev->of_node, "#power-domain-cells", NULL))
return 0;
perf_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_PERF, &ph);
if (IS_ERR(perf_ops))
return PTR_ERR(perf_ops);
num_domains = perf_ops->num_domains_get(ph);
if (num_domains < 0) {
dev_warn(dev, "Failed with %d when getting num perf domains\n",
num_domains);
return num_domains;
} else if (!num_domains) {
return 0;
}
scmi_pd = devm_kcalloc(dev, num_domains, sizeof(*scmi_pd), GFP_KERNEL);
if (!scmi_pd)
return -ENOMEM;
scmi_pd_data = devm_kzalloc(dev, sizeof(*scmi_pd_data), GFP_KERNEL);
if (!scmi_pd_data)
return -ENOMEM;
domains = devm_kcalloc(dev, num_domains, sizeof(*domains), GFP_KERNEL);
if (!domains)
return -ENOMEM;
for (i = 0; i < num_domains; i++, scmi_pd++) {
scmi_pd->info = perf_ops->info_get(ph, i);
scmi_pd->domain_id = i;
scmi_pd->perf_ops = perf_ops;
scmi_pd->ph = ph;
scmi_pd->genpd.name = scmi_pd->info->name;
scmi_pd->genpd.flags = GENPD_FLAG_ALWAYS_ON |
GENPD_FLAG_OPP_TABLE_FW;
scmi_pd->genpd.set_performance_state = scmi_pd_set_perf_state;
scmi_pd->genpd.attach_dev = scmi_pd_attach_dev;
scmi_pd->genpd.detach_dev = scmi_pd_detach_dev;
ret = pm_genpd_init(&scmi_pd->genpd, NULL, false);
if (ret)
goto err;
domains[i] = &scmi_pd->genpd;
}
scmi_pd_data->domains = domains;
scmi_pd_data->num_domains = num_domains;
ret = of_genpd_add_provider_onecell(dev->of_node, scmi_pd_data);
if (ret)
goto err;
dev_set_drvdata(dev, scmi_pd_data);
dev_info(dev, "Initialized %d performance domains", num_domains);
return 0;
err:
for (i--; i >= 0; i--)
pm_genpd_remove(domains[i]);
return ret;
}
static void scmi_perf_domain_remove(struct scmi_device *sdev)
{
struct device *dev = &sdev->dev;
struct genpd_onecell_data *scmi_pd_data = dev_get_drvdata(dev);
int i;
of_genpd_del_provider(dev->of_node);
for (i = 0; i < scmi_pd_data->num_domains; i++)
pm_genpd_remove(scmi_pd_data->domains[i]);
}
static const struct scmi_device_id scmi_id_table[] = {
{ SCMI_PROTOCOL_PERF, "perf" },
{ },
};
MODULE_DEVICE_TABLE(scmi, scmi_id_table);
static struct scmi_driver scmi_perf_domain_driver = {
.name = "scmi-perf-domain",
.probe = scmi_perf_domain_probe,
.remove = scmi_perf_domain_remove,
.id_table = scmi_id_table,
};
module_scmi_driver(scmi_perf_domain_driver);
MODULE_AUTHOR("Ulf Hansson <ulf.hansson@linaro.org>");
MODULE_DESCRIPTION("ARM SCMI perf domain driver");
MODULE_LICENSE("GPL v2");

View file

@ -719,6 +719,7 @@ extern void dev_pm_put_subsys_data(struct device *dev);
* @activate: Called before executing probe routines for bus types and drivers.
* @sync: Called after successful driver probe.
* @dismiss: Called after unsuccessful driver probe and after driver removal.
* @set_performance_state: Called to request a new performance state.
*
* Power domains provide callbacks that are executed during system suspend,
* hibernation, system resume and during runtime PM transitions instead of
@ -731,6 +732,7 @@ struct dev_pm_domain {
int (*activate)(struct device *dev);
void (*sync)(struct device *dev);
void (*dismiss)(struct device *dev);
int (*set_performance_state)(struct device *dev, unsigned int state);
};
/*

View file

@ -61,6 +61,10 @@
* GENPD_FLAG_MIN_RESIDENCY: Enable the genpd governor to consider its
* components' next wakeup when determining the
* optimal idle state.
*
* GENPD_FLAG_OPP_TABLE_FW: The genpd provider supports performance states,
* but its corresponding OPP tables are not
* described in DT, but are given directly by FW.
*/
#define GENPD_FLAG_PM_CLK (1U << 0)
#define GENPD_FLAG_IRQ_SAFE (1U << 1)
@ -69,6 +73,7 @@
#define GENPD_FLAG_CPU_DOMAIN (1U << 4)
#define GENPD_FLAG_RPM_ALWAYS_ON (1U << 5)
#define GENPD_FLAG_MIN_RESIDENCY (1U << 6)
#define GENPD_FLAG_OPP_TABLE_FW (1U << 7)
enum gpd_status {
GENPD_STATE_ON = 0, /* PM domain is on */
@ -430,6 +435,7 @@ struct device *dev_pm_domain_attach_by_name(struct device *dev,
void dev_pm_domain_detach(struct device *dev, bool power_off);
int dev_pm_domain_start(struct device *dev);
void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd);
int dev_pm_domain_set_performance_state(struct device *dev, unsigned int state);
#else
static inline int dev_pm_domain_attach(struct device *dev, bool power_on)
{
@ -452,6 +458,11 @@ static inline int dev_pm_domain_start(struct device *dev)
}
static inline void dev_pm_domain_set(struct device *dev,
struct dev_pm_domain *pd) {}
static inline int dev_pm_domain_set_performance_state(struct device *dev,
unsigned int state)
{
return 0;
}
#endif
#endif /* _LINUX_PM_DOMAIN_H */

View file

@ -92,6 +92,18 @@ struct dev_pm_opp_config {
struct device ***virt_devs;
};
/**
* struct dev_pm_opp_data - The data to use to initialize an OPP.
* @level: The performance level for the OPP.
* @freq: The clock rate in Hz for the OPP.
* @u_volt: The voltage in uV for the OPP.
*/
struct dev_pm_opp_data {
unsigned int level;
unsigned long freq;
unsigned long u_volt;
};
#if defined(CONFIG_PM_OPP)
struct opp_table *dev_pm_opp_get_opp_table(struct device *dev);
@ -152,8 +164,8 @@ struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
void dev_pm_opp_put(struct dev_pm_opp *opp);
int dev_pm_opp_add(struct device *dev, unsigned long freq,
unsigned long u_volt);
int dev_pm_opp_add_dynamic(struct device *dev, struct dev_pm_opp_data *opp);
void dev_pm_opp_remove(struct device *dev, unsigned long freq);
void dev_pm_opp_remove_all_dynamic(struct device *dev);
@ -322,8 +334,8 @@ static inline struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
static inline void dev_pm_opp_put(struct dev_pm_opp *opp) {}
static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
unsigned long u_volt)
static inline int
dev_pm_opp_add_dynamic(struct device *dev, struct dev_pm_opp_data *opp)
{
return -EOPNOTSUPP;
}
@ -519,6 +531,17 @@ static inline int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_ta
/* OPP Configuration helpers */
static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
unsigned long u_volt)
{
struct dev_pm_opp_data data = {
.freq = freq,
.u_volt = u_volt,
};
return dev_pm_opp_add_dynamic(dev, &data);
}
/* Regulators helpers */
static inline int dev_pm_opp_set_regulators(struct device *dev,
const char * const names[])

View file

@ -58,6 +58,8 @@ struct scmi_clock_info {
u64 step_size;
} range;
};
int num_parents;
u32 *parents;
};
enum scmi_power_scale {
@ -80,6 +82,11 @@ struct scmi_protocol_handle;
* @rate_set: set the clock rate of a clock
* @enable: enables the specified clock
* @disable: disables the specified clock
* @state_get: get the status of the specified clock
* @config_oem_get: get the value of an OEM specific clock config
* @config_oem_set: set the value of an OEM specific clock config
* @parent_get: get the parent id of a clk
* @parent_set: set the parent of a clock
*/
struct scmi_clk_proto_ops {
int (*count_get)(const struct scmi_protocol_handle *ph);
@ -90,22 +97,36 @@ struct scmi_clk_proto_ops {
u64 *rate);
int (*rate_set)(const struct scmi_protocol_handle *ph, u32 clk_id,
u64 rate);
int (*enable)(const struct scmi_protocol_handle *ph, u32 clk_id);
int (*disable)(const struct scmi_protocol_handle *ph, u32 clk_id);
int (*enable_atomic)(const struct scmi_protocol_handle *ph, u32 clk_id);
int (*disable_atomic)(const struct scmi_protocol_handle *ph,
u32 clk_id);
int (*enable)(const struct scmi_protocol_handle *ph, u32 clk_id,
bool atomic);
int (*disable)(const struct scmi_protocol_handle *ph, u32 clk_id,
bool atomic);
int (*state_get)(const struct scmi_protocol_handle *ph, u32 clk_id,
bool *enabled, bool atomic);
int (*config_oem_get)(const struct scmi_protocol_handle *ph, u32 clk_id,
u8 oem_type, u32 *oem_val, u32 *attributes,
bool atomic);
int (*config_oem_set)(const struct scmi_protocol_handle *ph, u32 clk_id,
u8 oem_type, u32 oem_val, bool atomic);
int (*parent_get)(const struct scmi_protocol_handle *ph, u32 clk_id, u32 *parent_id);
int (*parent_set)(const struct scmi_protocol_handle *ph, u32 clk_id, u32 parent_id);
};
struct scmi_perf_domain_info {
char name[SCMI_MAX_STR_SIZE];
bool set_perf;
};
/**
* struct scmi_perf_proto_ops - represents the various operations provided
* by SCMI Performance Protocol
*
* @num_domains_get: gets the number of supported performance domains
* @info_get: get the information of a performance domain
* @limits_set: sets limits on the performance level of a domain
* @limits_get: gets limits on the performance level of a domain
* @level_set: sets the performance level of a domain
* @level_get: gets the performance level of a domain
* @device_domain_id: gets the scmi domain id for a given device
* @transition_latency_get: gets the DVFS transition latency for a given device
* @device_opps_add: adds all the OPPs for a given device
* @freq_set: sets the frequency for a given device using sustained frequency
@ -120,6 +141,9 @@ struct scmi_clk_proto_ops {
* or in some other (abstract) scale
*/
struct scmi_perf_proto_ops {
int (*num_domains_get)(const struct scmi_protocol_handle *ph);
const struct scmi_perf_domain_info __must_check *(*info_get)
(const struct scmi_protocol_handle *ph, u32 domain);
int (*limits_set)(const struct scmi_protocol_handle *ph, u32 domain,
u32 max_perf, u32 min_perf);
int (*limits_get)(const struct scmi_protocol_handle *ph, u32 domain,
@ -128,11 +152,10 @@ struct scmi_perf_proto_ops {
u32 level, bool poll);
int (*level_get)(const struct scmi_protocol_handle *ph, u32 domain,
u32 *level, bool poll);
int (*device_domain_id)(struct device *dev);
int (*transition_latency_get)(const struct scmi_protocol_handle *ph,
struct device *dev);
u32 domain);
int (*device_opps_add)(const struct scmi_protocol_handle *ph,
struct device *dev);
struct device *dev, u32 domain);
int (*freq_set)(const struct scmi_protocol_handle *ph, u32 domain,
unsigned long rate, bool poll);
int (*freq_get)(const struct scmi_protocol_handle *ph, u32 domain,
@ -140,7 +163,7 @@ struct scmi_perf_proto_ops {
int (*est_power_get)(const struct scmi_protocol_handle *ph, u32 domain,
unsigned long *rate, unsigned long *power);
bool (*fast_switch_possible)(const struct scmi_protocol_handle *ph,
struct device *dev);
u32 domain);
enum scmi_power_scale (*power_scale_get)(const struct scmi_protocol_handle *ph);
};