diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 7f38a92b444a..8e554e6a82a2 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -2338,7 +2338,7 @@ EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); struct device *genpd_dev_pm_attach_by_id(struct device *dev, unsigned int index) { - struct device *genpd_dev; + struct device *virt_dev; int num_domains; int ret; @@ -2352,31 +2352,31 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev, return NULL; /* Allocate and register device on the genpd bus. */ - genpd_dev = kzalloc(sizeof(*genpd_dev), GFP_KERNEL); - if (!genpd_dev) + virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL); + if (!virt_dev) return ERR_PTR(-ENOMEM); - dev_set_name(genpd_dev, "genpd:%u:%s", index, dev_name(dev)); - genpd_dev->bus = &genpd_bus_type; - genpd_dev->release = genpd_release_dev; + dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev)); + virt_dev->bus = &genpd_bus_type; + virt_dev->release = genpd_release_dev; - ret = device_register(genpd_dev); + ret = device_register(virt_dev); if (ret) { - kfree(genpd_dev); + kfree(virt_dev); return ERR_PTR(ret); } /* Try to attach the device to the PM domain at the specified index. */ - ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index, false); + ret = __genpd_dev_pm_attach(virt_dev, dev->of_node, index, false); if (ret < 1) { - device_unregister(genpd_dev); + device_unregister(virt_dev); return ret ? ERR_PTR(ret) : NULL; } - pm_runtime_enable(genpd_dev); - genpd_queue_power_off_work(dev_to_genpd(genpd_dev)); + pm_runtime_enable(virt_dev); + genpd_queue_power_off_work(dev_to_genpd(virt_dev)); - return genpd_dev; + return virt_dev; } EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id); @@ -2521,52 +2521,36 @@ int of_genpd_parse_idle_states(struct device_node *dn, EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states); /** - * of_genpd_opp_to_performance_state- Gets performance state of device's - * power domain corresponding to a DT node's "required-opps" property. + * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node. * - * @dev: Device for which the performance-state needs to be found. - * @np: DT node where the "required-opps" property is present. This can be - * the device node itself (if it doesn't have an OPP table) or a node - * within the OPP table of a device (if device has an OPP table). + * @genpd_dev: Genpd's device for which the performance-state needs to be found. + * @opp: struct dev_pm_opp of the OPP for which we need to find performance + * state. * - * Returns performance state corresponding to the "required-opps" property of - * a DT node. This calls platform specific genpd->opp_to_performance_state() - * callback to translate power domain OPP to performance state. + * Returns performance state encoded in the OPP of the genpd. This calls + * platform specific genpd->opp_to_performance_state() callback to translate + * power domain OPP to performance state. * * Returns performance state on success and 0 on failure. */ -unsigned int of_genpd_opp_to_performance_state(struct device *dev, - struct device_node *np) +unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev, + struct dev_pm_opp *opp) { - struct generic_pm_domain *genpd; - struct dev_pm_opp *opp; - int state = 0; + struct generic_pm_domain *genpd = NULL; + int state; - genpd = dev_to_genpd(dev); - if (IS_ERR(genpd)) - return 0; + genpd = container_of(genpd_dev, struct generic_pm_domain, dev); - if (unlikely(!genpd->set_performance_state)) + if (unlikely(!genpd->opp_to_performance_state)) return 0; genpd_lock(genpd); - - opp = of_dev_pm_opp_find_required_opp(&genpd->dev, np); - if (IS_ERR(opp)) { - dev_err(dev, "Failed to find required OPP: %ld\n", - PTR_ERR(opp)); - goto unlock; - } - state = genpd->opp_to_performance_state(genpd, opp); - dev_pm_opp_put(opp); - -unlock: genpd_unlock(genpd); return state; } -EXPORT_SYMBOL_GPL(of_genpd_opp_to_performance_state); +EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state); static int __init genpd_bus_init(void) { diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 2c2df4e4fc14..0eaa954b3f6c 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -548,44 +548,6 @@ _generic_set_opp_clk_only(struct device *dev, struct clk *clk, return ret; } -static inline int -_generic_set_opp_domain(struct device *dev, struct clk *clk, - unsigned long old_freq, unsigned long freq, - unsigned int old_pstate, unsigned int new_pstate) -{ - int ret; - - /* Scaling up? Scale domain performance state before frequency */ - if (freq > old_freq) { - ret = dev_pm_genpd_set_performance_state(dev, new_pstate); - if (ret) - return ret; - } - - ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq); - if (ret) - goto restore_domain_state; - - /* Scaling down? Scale domain performance state after frequency */ - if (freq < old_freq) { - ret = dev_pm_genpd_set_performance_state(dev, new_pstate); - if (ret) - goto restore_freq; - } - - return 0; - -restore_freq: - if (_generic_set_opp_clk_only(dev, clk, freq, old_freq)) - dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n", - __func__, old_freq); -restore_domain_state: - if (freq > old_freq) - dev_pm_genpd_set_performance_state(dev, old_pstate); - - return ret; -} - static int _generic_set_opp_regulator(const struct opp_table *opp_table, struct device *dev, unsigned long old_freq, @@ -635,6 +597,84 @@ static int _generic_set_opp_regulator(const struct opp_table *opp_table, return ret; } +static int _set_opp_custom(const struct opp_table *opp_table, + struct device *dev, unsigned long old_freq, + unsigned long freq, + struct dev_pm_opp_supply *old_supply, + struct dev_pm_opp_supply *new_supply) +{ + struct dev_pm_set_opp_data *data; + int size; + + data = opp_table->set_opp_data; + data->regulators = opp_table->regulators; + data->regulator_count = opp_table->regulator_count; + data->clk = opp_table->clk; + data->dev = dev; + + data->old_opp.rate = old_freq; + size = sizeof(*old_supply) * opp_table->regulator_count; + if (IS_ERR(old_supply)) + memset(data->old_opp.supplies, 0, size); + else + memcpy(data->old_opp.supplies, old_supply, size); + + data->new_opp.rate = freq; + memcpy(data->new_opp.supplies, new_supply, size); + + return opp_table->set_opp(data); +} + +/* This is only called for PM domain for now */ +static int _set_required_opps(struct device *dev, + struct opp_table *opp_table, + struct dev_pm_opp *opp) +{ + struct opp_table **required_opp_tables = opp_table->required_opp_tables; + struct device **genpd_virt_devs = opp_table->genpd_virt_devs; + unsigned int pstate; + int i, ret = 0; + + if (!required_opp_tables) + return 0; + + /* Single genpd case */ + if (!genpd_virt_devs) { + pstate = opp->required_opps[0]->pstate; + ret = dev_pm_genpd_set_performance_state(dev, pstate); + if (ret) { + dev_err(dev, "Failed to set performance state of %s: %d (%d)\n", + dev_name(dev), pstate, ret); + } + return ret; + } + + /* Multiple genpd case */ + + /* + * Acquire genpd_virt_dev_lock to make sure we don't use a genpd_dev + * after it is freed from another thread. + */ + mutex_lock(&opp_table->genpd_virt_dev_lock); + + for (i = 0; i < opp_table->required_opp_count; i++) { + pstate = opp->required_opps[i]->pstate; + + if (!genpd_virt_devs[i]) + continue; + + ret = dev_pm_genpd_set_performance_state(genpd_virt_devs[i], pstate); + if (ret) { + dev_err(dev, "Failed to set performance rate of %s: %d (%d)\n", + dev_name(genpd_virt_devs[i]), pstate, ret); + break; + } + } + mutex_unlock(&opp_table->genpd_virt_dev_lock); + + return ret; +} + /** * dev_pm_opp_set_rate() - Configure new OPP based on frequency * @dev: device for which we do this operation @@ -649,7 +689,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) unsigned long freq, old_freq; struct dev_pm_opp *old_opp, *opp; struct clk *clk; - int ret, size; + int ret; if (unlikely(!target_freq)) { dev_err(dev, "%s: Invalid target frequency %lu\n", __func__, @@ -702,44 +742,34 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__, old_freq, freq); - /* Only frequency scaling */ - if (!opp_table->regulators) { - /* - * We don't support devices with both regulator and - * domain performance-state for now. - */ - if (opp_table->genpd_performance_state) - ret = _generic_set_opp_domain(dev, clk, old_freq, freq, - IS_ERR(old_opp) ? 0 : old_opp->pstate, - opp->pstate); - else - ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq); - } else if (!opp_table->set_opp) { + /* Scaling up? Configure required OPPs before frequency */ + if (freq > old_freq) { + ret = _set_required_opps(dev, opp_table, opp); + if (ret) + goto put_opp; + } + + if (opp_table->set_opp) { + ret = _set_opp_custom(opp_table, dev, old_freq, freq, + IS_ERR(old_opp) ? NULL : old_opp->supplies, + opp->supplies); + } else if (opp_table->regulators) { ret = _generic_set_opp_regulator(opp_table, dev, old_freq, freq, IS_ERR(old_opp) ? NULL : old_opp->supplies, opp->supplies); } else { - struct dev_pm_set_opp_data *data; - - data = opp_table->set_opp_data; - data->regulators = opp_table->regulators; - data->regulator_count = opp_table->regulator_count; - data->clk = clk; - data->dev = dev; - - data->old_opp.rate = old_freq; - size = sizeof(*opp->supplies) * opp_table->regulator_count; - if (IS_ERR(old_opp)) - memset(data->old_opp.supplies, 0, size); - else - memcpy(data->old_opp.supplies, old_opp->supplies, size); - - data->new_opp.rate = freq; - memcpy(data->new_opp.supplies, opp->supplies, size); - - ret = opp_table->set_opp(data); + /* Only frequency scaling */ + ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq); } + /* Scaling down? Configure required OPPs after frequency */ + if (!ret && freq < old_freq) { + ret = _set_required_opps(dev, opp_table, opp); + if (ret) + dev_err(dev, "Failed to set required opps: %d\n", ret); + } + +put_opp: dev_pm_opp_put(opp); put_old_opp: if (!IS_ERR(old_opp)) @@ -810,6 +840,7 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index) return NULL; mutex_init(&opp_table->lock); + mutex_init(&opp_table->genpd_virt_dev_lock); INIT_LIST_HEAD(&opp_table->dev_list); opp_dev = _add_opp_dev(dev, opp_table); @@ -888,6 +919,8 @@ static void _opp_table_kref_release(struct kref *kref) struct opp_table *opp_table = container_of(kref, struct opp_table, kref); struct opp_device *opp_dev, *temp; + _of_clear_opp_table(opp_table); + /* Release clk */ if (!IS_ERR(opp_table->clk)) clk_put(opp_table->clk); @@ -905,6 +938,7 @@ static void _opp_table_kref_release(struct kref *kref) _remove_opp_dev(opp_dev, opp_table); } + mutex_destroy(&opp_table->genpd_virt_dev_lock); mutex_destroy(&opp_table->lock); list_del(&opp_table->node); kfree(opp_table); @@ -961,6 +995,7 @@ static void _opp_kref_release(struct kref *kref) * frequency/voltage list. */ blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp); + _of_opp_free_required_opps(opp_table, opp); opp_debug_remove_one(opp); list_del(&opp->node); kfree(opp); @@ -1586,6 +1621,92 @@ void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table) } EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_set_opp_helper); +/** + * dev_pm_opp_set_genpd_virt_dev - Set virtual genpd device for an index + * @dev: Consumer device for which the genpd device is getting set. + * @virt_dev: virtual genpd device. + * @index: index. + * + * Multiple generic power domains for a device are supported with the help of + * virtual genpd devices, which are created for each consumer device - genpd + * pair. These are the device structures which are attached to the power domain + * and are required by the OPP core to set the performance state of the genpd. + * + * This helper will normally be called by the consumer driver of the device + * "dev", as only that has details of the genpd devices. + * + * This helper needs to be called once for each of those virtual devices, but + * only if multiple domains are available for a device. Otherwise the original + * device structure will be used instead by the OPP core. + */ +struct opp_table *dev_pm_opp_set_genpd_virt_dev(struct device *dev, + struct device *virt_dev, + int index) +{ + struct opp_table *opp_table; + + opp_table = dev_pm_opp_get_opp_table(dev); + if (!opp_table) + return ERR_PTR(-ENOMEM); + + mutex_lock(&opp_table->genpd_virt_dev_lock); + + if (unlikely(!opp_table->genpd_virt_devs || + index >= opp_table->required_opp_count || + opp_table->genpd_virt_devs[index])) { + + dev_err(dev, "Invalid request to set required device\n"); + dev_pm_opp_put_opp_table(opp_table); + mutex_unlock(&opp_table->genpd_virt_dev_lock); + + return ERR_PTR(-EINVAL); + } + + opp_table->genpd_virt_devs[index] = virt_dev; + mutex_unlock(&opp_table->genpd_virt_dev_lock); + + return opp_table; +} + +/** + * dev_pm_opp_put_genpd_virt_dev() - Releases resources blocked for genpd device. + * @opp_table: OPP table returned by dev_pm_opp_set_genpd_virt_dev(). + * @virt_dev: virtual genpd device. + * + * This releases the resource previously acquired with a call to + * dev_pm_opp_set_genpd_virt_dev(). The consumer driver shall call this helper + * if it doesn't want OPP core to update performance state of a power domain + * anymore. + */ +void dev_pm_opp_put_genpd_virt_dev(struct opp_table *opp_table, + struct device *virt_dev) +{ + int i; + + /* + * Acquire genpd_virt_dev_lock to make sure virt_dev isn't getting + * used in parallel. + */ + mutex_lock(&opp_table->genpd_virt_dev_lock); + + for (i = 0; i < opp_table->required_opp_count; i++) { + if (opp_table->genpd_virt_devs[i] != virt_dev) + continue; + + opp_table->genpd_virt_devs[i] = NULL; + dev_pm_opp_put_opp_table(opp_table); + + /* Drop the vote */ + dev_pm_genpd_set_performance_state(virt_dev, 0); + break; + } + + mutex_unlock(&opp_table->genpd_virt_dev_lock); + + if (unlikely(i == opp_table->required_opp_count)) + dev_err(virt_dev, "Failed to find required device entry\n"); +} + /** * dev_pm_opp_add() - Add an OPP table from a table definitions * @dev: device for which we do this operation diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 38a08805a30c..840f85181a37 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -73,6 +73,161 @@ struct opp_table *_managed_opp(struct device *dev, int index) return managed_table; } +/* The caller must call dev_pm_opp_put() after the OPP is used */ +static struct dev_pm_opp *_find_opp_of_np(struct opp_table *opp_table, + struct device_node *opp_np) +{ + struct dev_pm_opp *opp; + + lockdep_assert_held(&opp_table_lock); + + mutex_lock(&opp_table->lock); + + list_for_each_entry(opp, &opp_table->opp_list, node) { + if (opp->np == opp_np) { + dev_pm_opp_get(opp); + mutex_unlock(&opp_table->lock); + return opp; + } + } + + mutex_unlock(&opp_table->lock); + + return NULL; +} + +static struct device_node *of_parse_required_opp(struct device_node *np, + int index) +{ + struct device_node *required_np; + + required_np = of_parse_phandle(np, "required-opps", index); + if (unlikely(!required_np)) { + pr_err("%s: Unable to parse required-opps: %pOF, index: %d\n", + __func__, np, index); + } + + return required_np; +} + +/* The caller must call dev_pm_opp_put_opp_table() after the table is used */ +static struct opp_table *_find_table_of_opp_np(struct device_node *opp_np) +{ + struct opp_table *opp_table; + struct dev_pm_opp *opp; + + lockdep_assert_held(&opp_table_lock); + + list_for_each_entry(opp_table, &opp_tables, node) { + opp = _find_opp_of_np(opp_table, opp_np); + if (opp) { + dev_pm_opp_put(opp); + _get_opp_table_kref(opp_table); + return opp_table; + } + } + + return ERR_PTR(-ENODEV); +} + +/* Free resources previously acquired by _opp_table_alloc_required_tables() */ +static void _opp_table_free_required_tables(struct opp_table *opp_table) +{ + struct opp_table **required_opp_tables = opp_table->required_opp_tables; + struct device **genpd_virt_devs = opp_table->genpd_virt_devs; + int i; + + if (!required_opp_tables) + return; + + for (i = 0; i < opp_table->required_opp_count; i++) { + if (IS_ERR_OR_NULL(required_opp_tables[i])) + break; + + dev_pm_opp_put_opp_table(required_opp_tables[i]); + } + + kfree(required_opp_tables); + kfree(genpd_virt_devs); + + opp_table->required_opp_count = 0; + opp_table->genpd_virt_devs = NULL; + opp_table->required_opp_tables = NULL; +} + +/* + * Populate all devices and opp tables which are part of "required-opps" list. + * Checking only the first OPP node should be enough. + */ +static void _opp_table_alloc_required_tables(struct opp_table *opp_table, + struct device *dev, + struct device_node *opp_np) +{ + struct opp_table **required_opp_tables; + struct device **genpd_virt_devs = NULL; + struct device_node *required_np, *np; + int count, i; + + /* Traversing the first OPP node is all we need */ + np = of_get_next_available_child(opp_np, NULL); + if (!np) { + dev_err(dev, "Empty OPP table\n"); + return; + } + + count = of_count_phandle_with_args(np, "required-opps", NULL); + if (!count) + goto put_np; + + if (count > 1) { + genpd_virt_devs = kcalloc(count, sizeof(*genpd_virt_devs), + GFP_KERNEL); + if (!genpd_virt_devs) + goto put_np; + } + + required_opp_tables = kcalloc(count, sizeof(*required_opp_tables), + GFP_KERNEL); + if (!required_opp_tables) { + kfree(genpd_virt_devs); + goto put_np; + } + + opp_table->genpd_virt_devs = genpd_virt_devs; + opp_table->required_opp_tables = required_opp_tables; + opp_table->required_opp_count = count; + + for (i = 0; i < count; i++) { + required_np = of_parse_required_opp(np, i); + if (!required_np) + goto free_required_tables; + + required_opp_tables[i] = _find_table_of_opp_np(required_np); + of_node_put(required_np); + + if (IS_ERR(required_opp_tables[i])) + goto free_required_tables; + + /* + * We only support genpd's OPPs in the "required-opps" for now, + * as we don't know how much about other cases. Error out if the + * required OPP doesn't belong to a genpd. + */ + if (!required_opp_tables[i]->is_genpd) { + dev_err(dev, "required-opp doesn't belong to genpd: %pOF\n", + required_np); + goto free_required_tables; + } + } + + goto put_np; + +free_required_tables: + _opp_table_free_required_tables(opp_table); +put_np: + of_node_put(np); +} + void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, int index) { @@ -92,6 +247,9 @@ void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, of_property_read_u32(np, "voltage-tolerance", &opp_table->voltage_tolerance_v1); + if (of_find_property(np, "#power-domain-cells", NULL)) + opp_table->is_genpd = true; + /* Get OPP table node */ opp_np = _opp_of_get_opp_desc_node(np, index); of_node_put(np); @@ -106,9 +264,86 @@ void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, opp_table->np = opp_np; + _opp_table_alloc_required_tables(opp_table, dev, opp_np); of_node_put(opp_np); } +void _of_clear_opp_table(struct opp_table *opp_table) +{ + _opp_table_free_required_tables(opp_table); +} + +/* + * Release all resources previously acquired with a call to + * _of_opp_alloc_required_opps(). + */ +void _of_opp_free_required_opps(struct opp_table *opp_table, + struct dev_pm_opp *opp) +{ + struct dev_pm_opp **required_opps = opp->required_opps; + int i; + + if (!required_opps) + return; + + for (i = 0; i < opp_table->required_opp_count; i++) { + if (!required_opps[i]) + break; + + /* Put the reference back */ + dev_pm_opp_put(required_opps[i]); + } + + kfree(required_opps); + opp->required_opps = NULL; +} + +/* Populate all required OPPs which are part of "required-opps" list */ +static int _of_opp_alloc_required_opps(struct opp_table *opp_table, + struct dev_pm_opp *opp) +{ + struct dev_pm_opp **required_opps; + struct opp_table *required_table; + struct device_node *np; + int i, ret, count = opp_table->required_opp_count; + + if (!count) + return 0; + + required_opps = kcalloc(count, sizeof(*required_opps), GFP_KERNEL); + if (!required_opps) + return -ENOMEM; + + opp->required_opps = required_opps; + + for (i = 0; i < count; i++) { + required_table = opp_table->required_opp_tables[i]; + + np = of_parse_required_opp(opp->np, i); + if (unlikely(!np)) { + ret = -ENODEV; + goto free_required_opps; + } + + required_opps[i] = _find_opp_of_np(required_table, np); + of_node_put(np); + + if (!required_opps[i]) { + pr_err("%s: Unable to find required OPP node: %pOF (%d)\n", + __func__, opp->np, i); + ret = -ENODEV; + goto free_required_opps; + } + } + + return 0; + +free_required_opps: + _of_opp_free_required_opps(opp_table, opp); + + return ret; +} + static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table, struct device_node *np) { @@ -326,8 +561,7 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table, ret = of_property_read_u64(np, "opp-hz", &rate); if (ret < 0) { /* "opp-hz" is optional for devices like power domains. */ - if (!of_find_property(dev->of_node, "#power-domain-cells", - NULL)) { + if (!opp_table->is_genpd) { dev_err(dev, "%s: opp-hz not found\n", __func__); goto free_opp; } @@ -354,21 +588,26 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table, new_opp->dynamic = false; new_opp->available = true; + ret = _of_opp_alloc_required_opps(opp_table, new_opp); + if (ret) + goto free_opp; + if (!of_property_read_u32(np, "clock-latency-ns", &val)) new_opp->clock_latency_ns = val; - new_opp->pstate = of_genpd_opp_to_performance_state(dev, np); - ret = opp_parse_supplies(new_opp, dev, opp_table); if (ret) - goto free_opp; + goto free_required_opps; + + if (opp_table->is_genpd) + new_opp->pstate = pm_genpd_opp_to_performance_state(dev, new_opp); ret = _opp_add(dev, new_opp, opp_table, rate_not_available); if (ret) { /* Don't return error for duplicate OPPs */ if (ret == -EBUSY) ret = 0; - goto free_opp; + goto free_required_opps; } /* OPP to select on device suspend */ @@ -398,6 +637,8 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table, blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp); return new_opp; +free_required_opps: + _of_opp_free_required_opps(opp_table, new_opp); free_opp: _opp_free(new_opp); @@ -727,58 +968,48 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus); /** - * of_dev_pm_opp_find_required_opp() - Search for required OPP. - * @dev: The device whose OPP node is referenced by the 'np' DT node. + * of_get_required_opp_performance_state() - Search for required OPP and return its performance state. * @np: Node that contains the "required-opps" property. + * @index: Index of the phandle to parse. * - * Returns the OPP of the device 'dev', whose phandle is present in the "np" - * node. Although the "required-opps" property supports having multiple - * phandles, this helper routine only parses the very first phandle in the list. + * Returns the performance state of the OPP pointed out by the "required-opps" + * property at @index in @np. * - * Return: Matching opp, else returns ERR_PTR in case of error and should be - * handled using IS_ERR. - * - * The callers are required to call dev_pm_opp_put() for the returned OPP after - * use. + * Return: Positive performance state on success, otherwise 0 on errors. */ -struct dev_pm_opp *of_dev_pm_opp_find_required_opp(struct device *dev, - struct device_node *np) +unsigned int of_get_required_opp_performance_state(struct device_node *np, + int index) { - struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ENODEV); + struct dev_pm_opp *opp; struct device_node *required_np; struct opp_table *opp_table; + unsigned int pstate = 0; - opp_table = _find_opp_table(dev); - if (IS_ERR(opp_table)) - return ERR_CAST(opp_table); + required_np = of_parse_required_opp(np, index); + if (!required_np) + return 0; - required_np = of_parse_phandle(np, "required-opps", 0); - if (unlikely(!required_np)) { - dev_err(dev, "Unable to parse required-opps\n"); - goto put_opp_table; + opp_table = _find_table_of_opp_np(required_np); + if (IS_ERR(opp_table)) { + pr_err("%s: Failed to find required OPP table %pOF: %ld\n", + __func__, np, PTR_ERR(opp_table)); + goto put_required_np; } - mutex_lock(&opp_table->lock); - - list_for_each_entry(temp_opp, &opp_table->opp_list, node) { - if (temp_opp->available && temp_opp->np == required_np) { - opp = temp_opp; - - /* Increment the reference count of OPP */ - dev_pm_opp_get(opp); - break; - } + opp = _find_opp_of_np(opp_table, required_np); + if (opp) { + pstate = opp->pstate; + dev_pm_opp_put(opp); } - mutex_unlock(&opp_table->lock); - - of_node_put(required_np); -put_opp_table: dev_pm_opp_put_opp_table(opp_table); - return opp; +put_required_np: + of_node_put(required_np); + + return pstate; } -EXPORT_SYMBOL_GPL(of_dev_pm_opp_find_required_opp); +EXPORT_SYMBOL_GPL(of_get_required_opp_performance_state); /** * dev_pm_opp_get_of_node() - Gets the DT node corresponding to an opp diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h index 9c6544b4f4f9..8aec38792cae 100644 --- a/drivers/opp/opp.h +++ b/drivers/opp/opp.h @@ -63,6 +63,7 @@ extern struct list_head opp_tables; * @supplies: Power supplies voltage/current values * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's * frequency from any other OPP's frequency. + * @required_opps: List of OPPs that are required by this OPP. * @opp_table: points back to the opp_table struct this opp belongs to * @np: OPP's device node. * @dentry: debugfs dentry pointer (per opp) @@ -84,6 +85,7 @@ struct dev_pm_opp { unsigned long clock_latency_ns; + struct dev_pm_opp **required_opps; struct opp_table *opp_table; struct device_node *np; @@ -133,6 +135,11 @@ enum opp_table_access { * @parsed_static_opps: True if OPPs are initialized from DT. * @shared_opp: OPP is shared between multiple devices. * @suspend_opp: Pointer to OPP to be used during device suspend. + * @genpd_virt_dev_lock: Mutex protecting the genpd virtual device pointers. + * @genpd_virt_devs: List of virtual devices for multiple genpd support. + * @required_opp_tables: List of device OPP tables that are required by OPPs in + * this table. + * @required_opp_count: Number of required devices. * @supported_hw: Array of version number to support. * @supported_hw_count: Number of elements in supported_hw array. * @prop_name: A name to postfix to many DT properties, while parsing them. @@ -140,6 +147,7 @@ enum opp_table_access { * @regulators: Supply regulators * @regulator_count: Number of power supply regulators * @genpd_performance_state: Device's power domain support performance state. + * @is_genpd: Marks if the OPP table belongs to a genpd. * @set_opp: Platform specific set_opp callback * @set_opp_data: Data to be passed to set_opp callback * @dentry: debugfs dentry pointer of the real device directory (not links). @@ -171,6 +179,11 @@ struct opp_table { enum opp_table_access shared_opp; struct dev_pm_opp *suspend_opp; + struct mutex genpd_virt_dev_lock; + struct device **genpd_virt_devs; + struct opp_table **required_opp_tables; + unsigned int required_opp_count; + unsigned int *supported_hw; unsigned int supported_hw_count; const char *prop_name; @@ -178,6 +191,7 @@ struct opp_table { struct regulator **regulators; unsigned int regulator_count; bool genpd_performance_state; + bool is_genpd; int (*set_opp)(struct dev_pm_set_opp_data *data); struct dev_pm_set_opp_data *set_opp_data; @@ -206,10 +220,16 @@ void _put_opp_list_kref(struct opp_table *opp_table); #ifdef CONFIG_OF void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, int index); +void _of_clear_opp_table(struct opp_table *opp_table); struct opp_table *_managed_opp(struct device *dev, int index); +void _of_opp_free_required_opps(struct opp_table *opp_table, + struct dev_pm_opp *opp); #else static inline void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, int index) {} +static inline void _of_clear_opp_table(struct opp_table *opp_table) {} static inline struct opp_table *_managed_opp(struct device *dev, int index) { return NULL; } +static inline void _of_opp_free_required_opps(struct opp_table *opp_table, + struct dev_pm_opp *opp) {} #endif #ifdef CONFIG_DEBUG_FS diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 3b5d7280e52e..642036952553 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -258,8 +258,8 @@ int of_genpd_add_subdomain(struct of_phandle_args *parent, struct generic_pm_domain *of_genpd_remove_last(struct device_node *np); int of_genpd_parse_idle_states(struct device_node *dn, struct genpd_power_state **states, int *n); -unsigned int of_genpd_opp_to_performance_state(struct device *dev, - struct device_node *np); +unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev, + struct dev_pm_opp *opp); int genpd_dev_pm_attach(struct device *dev); struct device *genpd_dev_pm_attach_by_id(struct device *dev, @@ -300,8 +300,8 @@ static inline int of_genpd_parse_idle_states(struct device_node *dn, } static inline unsigned int -of_genpd_opp_to_performance_state(struct device *dev, - struct device_node *np) +pm_genpd_opp_to_performance_state(struct device *genpd_dev, + struct dev_pm_opp *opp) { return 0; } diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 5d399eeef172..2b2c3fd985ab 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -126,6 +126,8 @@ struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char * name); void dev_pm_opp_put_clkname(struct opp_table *opp_table); struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)); void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table); +struct opp_table *dev_pm_opp_set_genpd_virt_dev(struct device *dev, struct device *virt_dev, int index); +void dev_pm_opp_put_genpd_virt_dev(struct opp_table *opp_table, struct device *virt_dev); int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask); int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); @@ -272,6 +274,12 @@ static inline struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const static inline void dev_pm_opp_put_clkname(struct opp_table *opp_table) {} +static inline struct opp_table *dev_pm_opp_set_genpd_virt_dev(struct device *dev, struct device *virt_dev, int index) +{ + return ERR_PTR(-ENOTSUPP); +} + +static inline void dev_pm_opp_put_genpd_virt_dev(struct opp_table *opp_table, struct device *virt_dev) {} static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) { return -ENOTSUPP; @@ -305,8 +313,8 @@ int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask); void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask); int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev); -struct dev_pm_opp *of_dev_pm_opp_find_required_opp(struct device *dev, struct device_node *np); struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp); +unsigned int of_get_required_opp_performance_state(struct device_node *np, int index); #else static inline int dev_pm_opp_of_add_table(struct device *dev) { @@ -341,14 +349,14 @@ static inline struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device return NULL; } -static inline struct dev_pm_opp *of_dev_pm_opp_find_required_opp(struct device *dev, struct device_node *np) -{ - return NULL; -} static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp) { return NULL; } +static inline unsigned int of_get_required_opp_performance_state(struct device_node *np, int index) +{ + return 0; +} #endif #endif /* __LINUX_OPP_H__ */