Merge branch 'opp/linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm

Pull OPP (Operating Performance Points) updates for 5.11-rc1 from
Viresh Kumar:

"This contains the following updates:

 - Allow empty (node-less) OPP tables in DT for passing just the
   dependency related information (Nicola Mazzucato).

 - Fix a potential lockdep in OPP core and other OPP core cleanups
   (Viresh Kumar).

 - Don't abuse dev_pm_opp_get_opp_table() to create an OPP table, fix
   cpufreq-dt driver for the same (Viresh Kumar).

 - dev_pm_opp_put_regulators() accepts a NULL argument now, updates to
   all the users as well (Viresh Kumar)."

* 'opp/linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm:
  opp: of: Allow empty opp-table with opp-shared
  dt-bindings: opp: Allow empty OPP tables
  media: venus: dev_pm_opp_put_*() accepts NULL argument
  drm/panfrost: dev_pm_opp_put_*() accepts NULL argument
  drm/lima: dev_pm_opp_put_*() accepts NULL argument
  PM / devfreq: exynos: dev_pm_opp_put_*() accepts NULL argument
  cpufreq: qcom-cpufreq-nvmem: dev_pm_opp_put_*() accepts NULL argument
  cpufreq: dt: dev_pm_opp_put_regulators() accepts NULL argument
  opp: Allow dev_pm_opp_put_*() APIs to accept NULL opp_table
  opp: Don't create an OPP table from dev_pm_opp_get_opp_table()
  cpufreq: dt: Don't (ab)use dev_pm_opp_get_opp_table() to create OPP table
  opp: Reduce the size of critical section in _opp_kref_release()
  opp: Don't return opp_dev from _find_opp_dev()
  opp: Allocate the OPP table outside of opp_table_lock
  opp: Always add entries in dev_list with opp_table->lock held
This commit is contained in:
Rafael J. Wysocki 2020-12-14 20:26:17 +01:00
commit f0f6dbaf06
12 changed files with 283 additions and 223 deletions

View File

@ -65,7 +65,9 @@ Required properties:
- OPP nodes: One or more OPP nodes describing voltage-current-frequency
combinations. Their name isn't significant but their phandle can be used to
reference an OPP.
reference an OPP. These are mandatory except for the case where the OPP table
is present only to indicate dependency between devices using the opp-shared
property.
Optional properties:
- opp-shared: Indicates that device nodes using this OPP Table Node's phandle
@ -568,3 +570,53 @@ Example 6: opp-microvolt-<name>, opp-microamp-<name>:
};
};
};
Example 7: Single cluster Quad-core ARM cortex A53, OPP points from firmware,
distinct clock controls but two sets of clock/voltage/current lines.
/ {
cpus {
#address-cells = <2>;
#size-cells = <0>;
cpu@0 {
compatible = "arm,cortex-a53";
reg = <0x0 0x100>;
next-level-cache = <&A53_L2>;
clocks = <&dvfs_controller 0>;
operating-points-v2 = <&cpu_opp0_table>;
};
cpu@1 {
compatible = "arm,cortex-a53";
reg = <0x0 0x101>;
next-level-cache = <&A53_L2>;
clocks = <&dvfs_controller 1>;
operating-points-v2 = <&cpu_opp0_table>;
};
cpu@2 {
compatible = "arm,cortex-a53";
reg = <0x0 0x102>;
next-level-cache = <&A53_L2>;
clocks = <&dvfs_controller 2>;
operating-points-v2 = <&cpu_opp1_table>;
};
cpu@3 {
compatible = "arm,cortex-a53";
reg = <0x0 0x103>;
next-level-cache = <&A53_L2>;
clocks = <&dvfs_controller 3>;
operating-points-v2 = <&cpu_opp1_table>;
};
};
cpu_opp0_table: opp0_table {
compatible = "operating-points-v2";
opp-shared;
};
cpu_opp1_table: opp1_table {
compatible = "operating-points-v2";
opp-shared;
};
};

View File

@ -2249,7 +2249,7 @@ int of_genpd_add_provider_onecell(struct device_node *np,
* Save table for faster processing while setting
* performance state.
*/
genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i);
genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
WARN_ON(IS_ERR(genpd->opp_table));
}

View File

@ -30,7 +30,7 @@ struct private_data {
cpumask_var_t cpus;
struct device *cpu_dev;
struct opp_table *opp_table;
struct opp_table *reg_opp_table;
struct cpufreq_frequency_table *freq_table;
bool have_static_opps;
};
@ -102,7 +102,6 @@ static const char *find_supply_name(struct device *dev)
static int cpufreq_init(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *freq_table;
struct private_data *priv;
struct device *cpu_dev;
struct clk *cpu_clk;
@ -114,9 +113,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
pr_err("failed to find data for cpu%d\n", policy->cpu);
return -ENODEV;
}
cpu_dev = priv->cpu_dev;
cpumask_copy(policy->cpus, priv->cpus);
cpu_clk = clk_get(cpu_dev, NULL);
if (IS_ERR(cpu_clk)) {
@ -125,67 +122,32 @@ static int cpufreq_init(struct cpufreq_policy *policy)
return ret;
}
/*
* Initialize OPP tables for all policy->cpus. They will be shared by
* all CPUs which have marked their CPUs shared with OPP bindings.
*
* For platforms not using operating-points-v2 bindings, we do this
* before updating policy->cpus. Otherwise, we will end up creating
* duplicate OPPs for policy->cpus.
*
* OPPs might be populated at runtime, don't check for error here
*/
if (!dev_pm_opp_of_cpumask_add_table(policy->cpus))
priv->have_static_opps = true;
/*
* But we need OPP table to function so if it is not there let's
* give platform code chance to provide it for us.
*/
ret = dev_pm_opp_get_opp_count(cpu_dev);
if (ret <= 0) {
dev_err(cpu_dev, "OPP table can't be empty\n");
ret = -ENODEV;
goto out_free_opp;
}
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
goto out_free_opp;
}
transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
if (!transition_latency)
transition_latency = CPUFREQ_ETERNAL;
cpumask_copy(policy->cpus, priv->cpus);
policy->driver_data = priv;
policy->clk = cpu_clk;
policy->freq_table = freq_table;
policy->freq_table = priv->freq_table;
policy->suspend_freq = dev_pm_opp_get_suspend_opp_freq(cpu_dev) / 1000;
policy->cpuinfo.transition_latency = transition_latency;
policy->dvfs_possible_from_any_cpu = true;
/* Support turbo/boost mode */
if (policy_has_boost_freq(policy)) {
/* This gets disabled by core on driver unregister */
ret = cpufreq_enable_boost_support();
if (ret)
goto out_free_cpufreq_table;
goto out_clk_put;
cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
}
transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
if (!transition_latency)
transition_latency = CPUFREQ_ETERNAL;
policy->cpuinfo.transition_latency = transition_latency;
policy->dvfs_possible_from_any_cpu = true;
dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
return 0;
out_free_cpufreq_table:
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
out_free_opp:
if (priv->have_static_opps)
dev_pm_opp_of_cpumask_remove_table(policy->cpus);
out_clk_put:
clk_put(cpu_clk);
return ret;
@ -208,11 +170,6 @@ static int cpufreq_offline(struct cpufreq_policy *policy)
static int cpufreq_exit(struct cpufreq_policy *policy)
{
struct private_data *priv = policy->driver_data;
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
if (priv->have_static_opps)
dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
clk_put(policy->clk);
return 0;
}
@ -236,6 +193,7 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu)
{
struct private_data *priv;
struct device *cpu_dev;
bool fallback = false;
const char *reg_name;
int ret;
@ -254,68 +212,86 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu)
if (!alloc_cpumask_var(&priv->cpus, GFP_KERNEL))
return -ENOMEM;
cpumask_set_cpu(cpu, priv->cpus);
priv->cpu_dev = cpu_dev;
/* Try to get OPP table early to ensure resources are available */
priv->opp_table = dev_pm_opp_get_opp_table(cpu_dev);
if (IS_ERR(priv->opp_table)) {
ret = PTR_ERR(priv->opp_table);
if (ret != -EPROBE_DEFER)
dev_err(cpu_dev, "failed to get OPP table: %d\n", ret);
goto free_cpumask;
}
/*
* OPP layer will be taking care of regulators now, but it needs to know
* the name of the regulator first.
*/
reg_name = find_supply_name(cpu_dev);
if (reg_name) {
priv->reg_opp_table = dev_pm_opp_set_regulators(cpu_dev,
&reg_name, 1);
if (IS_ERR(priv->reg_opp_table)) {
ret = PTR_ERR(priv->reg_opp_table);
priv->opp_table = dev_pm_opp_set_regulators(cpu_dev, &reg_name,
1);
if (IS_ERR(priv->opp_table)) {
ret = PTR_ERR(priv->opp_table);
if (ret != -EPROBE_DEFER)
dev_err(cpu_dev, "failed to set regulators: %d\n",
ret);
goto put_table;
goto free_cpumask;
}
}
/* Find OPP sharing information so we can fill pri->cpus here */
/* Get OPP-sharing information from "operating-points-v2" bindings */
ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->cpus);
if (ret) {
if (ret != -ENOENT)
goto put_reg;
goto out;
/*
* operating-points-v2 not supported, fallback to all CPUs share
* OPP for backward compatibility if the platform hasn't set
* sharing CPUs.
*/
if (dev_pm_opp_get_sharing_cpus(cpu_dev, priv->cpus)) {
cpumask_setall(priv->cpus);
if (dev_pm_opp_get_sharing_cpus(cpu_dev, priv->cpus))
fallback = true;
}
/*
* OPP tables are initialized only for cpu, do it for
* others as well.
*/
ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->cpus);
if (ret)
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
__func__, ret);
}
/*
* Initialize OPP tables for all priv->cpus. They will be shared by
* all CPUs which have marked their CPUs shared with OPP bindings.
*
* For platforms not using operating-points-v2 bindings, we do this
* before updating priv->cpus. Otherwise, we will end up creating
* duplicate OPPs for the CPUs.
*
* OPPs might be populated at runtime, don't check for error here.
*/
if (!dev_pm_opp_of_cpumask_add_table(priv->cpus))
priv->have_static_opps = true;
/*
* The OPP table must be initialized, statically or dynamically, by this
* point.
*/
ret = dev_pm_opp_get_opp_count(cpu_dev);
if (ret <= 0) {
dev_err(cpu_dev, "OPP table can't be empty\n");
ret = -ENODEV;
goto out;
}
if (fallback) {
cpumask_setall(priv->cpus);
ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->cpus);
if (ret)
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
__func__, ret);
}
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &priv->freq_table);
if (ret) {
dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
goto out;
}
list_add(&priv->node, &priv_list);
return 0;
put_reg:
if (priv->reg_opp_table)
dev_pm_opp_put_regulators(priv->reg_opp_table);
put_table:
dev_pm_opp_put_opp_table(priv->opp_table);
out:
if (priv->have_static_opps)
dev_pm_opp_of_cpumask_remove_table(priv->cpus);
dev_pm_opp_put_regulators(priv->opp_table);
free_cpumask:
free_cpumask_var(priv->cpus);
return ret;
@ -326,9 +302,10 @@ static void dt_cpufreq_release(void)
struct private_data *priv, *tmp;
list_for_each_entry_safe(priv, tmp, &priv_list, node) {
if (priv->reg_opp_table)
dev_pm_opp_put_regulators(priv->reg_opp_table);
dev_pm_opp_put_opp_table(priv->opp_table);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &priv->freq_table);
if (priv->have_static_opps)
dev_pm_opp_of_cpumask_remove_table(priv->cpus);
dev_pm_opp_put_regulators(priv->opp_table);
free_cpumask_var(priv->cpus);
list_del(&priv->node);
}

View File

@ -397,19 +397,19 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
free_genpd_opp:
for_each_possible_cpu(cpu) {
if (IS_ERR_OR_NULL(drv->genpd_opp_tables[cpu]))
if (IS_ERR(drv->genpd_opp_tables[cpu]))
break;
dev_pm_opp_detach_genpd(drv->genpd_opp_tables[cpu]);
}
kfree(drv->genpd_opp_tables);
free_opp:
for_each_possible_cpu(cpu) {
if (IS_ERR_OR_NULL(drv->names_opp_tables[cpu]))
if (IS_ERR(drv->names_opp_tables[cpu]))
break;
dev_pm_opp_put_prop_name(drv->names_opp_tables[cpu]);
}
for_each_possible_cpu(cpu) {
if (IS_ERR_OR_NULL(drv->hw_opp_tables[cpu]))
if (IS_ERR(drv->hw_opp_tables[cpu]))
break;
dev_pm_opp_put_supported_hw(drv->hw_opp_tables[cpu]);
}
@ -430,12 +430,9 @@ static int qcom_cpufreq_remove(struct platform_device *pdev)
platform_device_unregister(cpufreq_dt_pdev);
for_each_possible_cpu(cpu) {
if (drv->names_opp_tables[cpu])
dev_pm_opp_put_supported_hw(drv->names_opp_tables[cpu]);
if (drv->hw_opp_tables[cpu])
dev_pm_opp_put_supported_hw(drv->hw_opp_tables[cpu]);
if (drv->genpd_opp_tables[cpu])
dev_pm_opp_detach_genpd(drv->genpd_opp_tables[cpu]);
dev_pm_opp_put_supported_hw(drv->names_opp_tables[cpu]);
dev_pm_opp_put_supported_hw(drv->hw_opp_tables[cpu]);
dev_pm_opp_detach_genpd(drv->genpd_opp_tables[cpu]);
}
kfree(drv->names_opp_tables);

View File

@ -158,10 +158,8 @@ static void exynos_bus_exit(struct device *dev)
dev_pm_opp_of_remove_table(dev);
clk_disable_unprepare(bus->clk);
if (bus->opp_table) {
dev_pm_opp_put_regulators(bus->opp_table);
bus->opp_table = NULL;
}
dev_pm_opp_put_regulators(bus->opp_table);
bus->opp_table = NULL;
}
static void exynos_bus_passive_exit(struct device *dev)
@ -444,10 +442,8 @@ static int exynos_bus_probe(struct platform_device *pdev)
dev_pm_opp_of_remove_table(dev);
clk_disable_unprepare(bus->clk);
err_reg:
if (!passive) {
dev_pm_opp_put_regulators(bus->opp_table);
bus->opp_table = NULL;
}
dev_pm_opp_put_regulators(bus->opp_table);
bus->opp_table = NULL;
return ret;
}

View File

@ -110,15 +110,10 @@ void lima_devfreq_fini(struct lima_device *ldev)
devfreq->opp_of_table_added = false;
}
if (devfreq->regulators_opp_table) {
dev_pm_opp_put_regulators(devfreq->regulators_opp_table);
devfreq->regulators_opp_table = NULL;
}
if (devfreq->clkname_opp_table) {
dev_pm_opp_put_clkname(devfreq->clkname_opp_table);
devfreq->clkname_opp_table = NULL;
}
dev_pm_opp_put_regulators(devfreq->regulators_opp_table);
dev_pm_opp_put_clkname(devfreq->clkname_opp_table);
devfreq->regulators_opp_table = NULL;
devfreq->clkname_opp_table = NULL;
}
int lima_devfreq_init(struct lima_device *ldev)

View File

@ -170,10 +170,8 @@ void panfrost_devfreq_fini(struct panfrost_device *pfdev)
pfdevfreq->opp_of_table_added = false;
}
if (pfdevfreq->regulators_opp_table) {
dev_pm_opp_put_regulators(pfdevfreq->regulators_opp_table);
pfdevfreq->regulators_opp_table = NULL;
}
dev_pm_opp_put_regulators(pfdevfreq->regulators_opp_table);
pfdevfreq->regulators_opp_table = NULL;
}
void panfrost_devfreq_resume(struct panfrost_device *pfdev)

View File

@ -898,8 +898,7 @@ static void core_put_v4(struct device *dev)
if (core->has_opp_table)
dev_pm_opp_of_remove_table(dev);
if (core->opp_table)
dev_pm_opp_put_clkname(core->opp_table);
dev_pm_opp_put_clkname(core->opp_table);
}

View File

@ -29,32 +29,32 @@
LIST_HEAD(opp_tables);
/* Lock to allow exclusive modification to the device and opp lists */
DEFINE_MUTEX(opp_table_lock);
/* Flag indicating that opp_tables list is being updated at the moment */
static bool opp_tables_busy;
static struct opp_device *_find_opp_dev(const struct device *dev,
struct opp_table *opp_table)
static bool _find_opp_dev(const struct device *dev, struct opp_table *opp_table)
{
struct opp_device *opp_dev;
bool found = false;
mutex_lock(&opp_table->lock);
list_for_each_entry(opp_dev, &opp_table->dev_list, node)
if (opp_dev->dev == dev)
return opp_dev;
if (opp_dev->dev == dev) {
found = true;
break;
}
return NULL;
mutex_unlock(&opp_table->lock);
return found;
}
static struct opp_table *_find_opp_table_unlocked(struct device *dev)
{
struct opp_table *opp_table;
bool found;
list_for_each_entry(opp_table, &opp_tables, node) {
mutex_lock(&opp_table->lock);
found = !!_find_opp_dev(dev, opp_table);
mutex_unlock(&opp_table->lock);
if (found) {
if (_find_opp_dev(dev, opp_table)) {
_get_opp_table_kref(opp_table);
return opp_table;
}
}
@ -1036,8 +1036,8 @@ static void _remove_opp_dev(struct opp_device *opp_dev,
kfree(opp_dev);
}
static struct opp_device *_add_opp_dev_unlocked(const struct device *dev,
struct opp_table *opp_table)
struct opp_device *_add_opp_dev(const struct device *dev,
struct opp_table *opp_table)
{
struct opp_device *opp_dev;
@ -1048,7 +1048,9 @@ static struct opp_device *_add_opp_dev_unlocked(const struct device *dev,
/* Initialize opp-dev */
opp_dev->dev = dev;
mutex_lock(&opp_table->lock);
list_add(&opp_dev->node, &opp_table->dev_list);
mutex_unlock(&opp_table->lock);
/* Create debugfs entries for the opp_table */
opp_debug_register(opp_dev, opp_table);
@ -1056,18 +1058,6 @@ static struct opp_device *_add_opp_dev_unlocked(const struct device *dev,
return opp_dev;
}
struct opp_device *_add_opp_dev(const struct device *dev,
struct opp_table *opp_table)
{
struct opp_device *opp_dev;
mutex_lock(&opp_table->lock);
opp_dev = _add_opp_dev_unlocked(dev, opp_table);
mutex_unlock(&opp_table->lock);
return opp_dev;
}
static struct opp_table *_allocate_opp_table(struct device *dev, int index)
{
struct opp_table *opp_table;
@ -1121,8 +1111,6 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
INIT_LIST_HEAD(&opp_table->opp_list);
kref_init(&opp_table->kref);
/* Secure the device table modification */
list_add(&opp_table->node, &opp_tables);
return opp_table;
err:
@ -1135,27 +1123,64 @@ void _get_opp_table_kref(struct opp_table *opp_table)
kref_get(&opp_table->kref);
}
static struct opp_table *_opp_get_opp_table(struct device *dev, int index)
/*
* We need to make sure that the OPP table for a device doesn't get added twice,
* if this routine gets called in parallel with the same device pointer.
*
* The simplest way to enforce that is to perform everything (find existing
* table and if not found, create a new one) under the opp_table_lock, so only
* one creator gets access to the same. But that expands the critical section
* under the lock and may end up causing circular dependencies with frameworks
* like debugfs, interconnect or clock framework as they may be direct or
* indirect users of OPP core.
*
* And for that reason we have to go for a bit tricky implementation here, which
* uses the opp_tables_busy flag to indicate if another creator is in the middle
* of adding an OPP table and others should wait for it to finish.
*/
struct opp_table *_add_opp_table_indexed(struct device *dev, int index)
{
struct opp_table *opp_table;
/* Hold our table modification lock here */
again:
mutex_lock(&opp_table_lock);
opp_table = _find_opp_table_unlocked(dev);
if (!IS_ERR(opp_table))
goto unlock;
/*
* The opp_tables list or an OPP table's dev_list is getting updated by
* another user, wait for it to finish.
*/
if (unlikely(opp_tables_busy)) {
mutex_unlock(&opp_table_lock);
cpu_relax();
goto again;
}
opp_tables_busy = true;
opp_table = _managed_opp(dev, index);
/* Drop the lock to reduce the size of critical section */
mutex_unlock(&opp_table_lock);
if (opp_table) {
if (!_add_opp_dev_unlocked(dev, opp_table)) {
if (!_add_opp_dev(dev, opp_table)) {
dev_pm_opp_put_opp_table(opp_table);
opp_table = ERR_PTR(-ENOMEM);
}
goto unlock;
mutex_lock(&opp_table_lock);
} else {
opp_table = _allocate_opp_table(dev, index);
mutex_lock(&opp_table_lock);
if (!IS_ERR(opp_table))
list_add(&opp_table->node, &opp_tables);
}
opp_table = _allocate_opp_table(dev, index);
opp_tables_busy = false;
unlock:
mutex_unlock(&opp_table_lock);
@ -1163,18 +1188,17 @@ static struct opp_table *_opp_get_opp_table(struct device *dev, int index)
return opp_table;
}
struct opp_table *_add_opp_table(struct device *dev)
{
return _add_opp_table_indexed(dev, 0);
}
struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
{
return _opp_get_opp_table(dev, 0);
return _find_opp_table(dev);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table);
struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev,
int index)
{
return _opp_get_opp_table(dev, index);
}
static void _opp_table_kref_release(struct kref *kref)
{
struct opp_table *opp_table = container_of(kref, struct opp_table, kref);
@ -1227,9 +1251,14 @@ void _opp_free(struct dev_pm_opp *opp)
kfree(opp);
}
static void _opp_kref_release(struct dev_pm_opp *opp,
struct opp_table *opp_table)
static void _opp_kref_release(struct kref *kref)
{
struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
struct opp_table *opp_table = opp->opp_table;
list_del(&opp->node);
mutex_unlock(&opp_table->lock);
/*
* Notify the changes in the availability of the operable
* frequency/voltage list.
@ -1237,27 +1266,9 @@ static void _opp_kref_release(struct dev_pm_opp *opp,
blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp);
_of_opp_free_required_opps(opp_table, opp);
opp_debug_remove_one(opp);
list_del(&opp->node);
kfree(opp);
}
static void _opp_kref_release_unlocked(struct kref *kref)
{
struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
struct opp_table *opp_table = opp->opp_table;
_opp_kref_release(opp, opp_table);
}
static void _opp_kref_release_locked(struct kref *kref)
{
struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
struct opp_table *opp_table = opp->opp_table;
_opp_kref_release(opp, opp_table);
mutex_unlock(&opp_table->lock);
}
void dev_pm_opp_get(struct dev_pm_opp *opp)
{
kref_get(&opp->kref);
@ -1265,16 +1276,10 @@ void dev_pm_opp_get(struct dev_pm_opp *opp)
void dev_pm_opp_put(struct dev_pm_opp *opp)
{
kref_put_mutex(&opp->kref, _opp_kref_release_locked,
&opp->opp_table->lock);
kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_put);
static void dev_pm_opp_put_unlocked(struct dev_pm_opp *opp)
{
kref_put(&opp->kref, _opp_kref_release_unlocked);
}
/**
* dev_pm_opp_remove() - Remove an OPP from OPP table
* @dev: device for which we do this operation
@ -1318,30 +1323,49 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
static struct dev_pm_opp *_opp_get_next(struct opp_table *opp_table,
bool dynamic)
{
struct dev_pm_opp *opp = NULL, *temp;
mutex_lock(&opp_table->lock);
list_for_each_entry(temp, &opp_table->opp_list, node) {
if (dynamic == temp->dynamic) {
opp = temp;
break;
}
}
mutex_unlock(&opp_table->lock);
return opp;
}
bool _opp_remove_all_static(struct opp_table *opp_table)
{
struct dev_pm_opp *opp, *tmp;
bool ret = true;
struct dev_pm_opp *opp;
mutex_lock(&opp_table->lock);
if (!opp_table->parsed_static_opps) {
ret = false;
goto unlock;
mutex_unlock(&opp_table->lock);
return false;
}
if (--opp_table->parsed_static_opps)
goto unlock;
list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
if (!opp->dynamic)
dev_pm_opp_put_unlocked(opp);
if (--opp_table->parsed_static_opps) {
mutex_unlock(&opp_table->lock);
return true;
}
unlock:
mutex_unlock(&opp_table->lock);
return ret;
/*
* Can't remove the OPP from under the lock, debugfs removal needs to
* happen lock less to avoid circular dependency issues.
*/
while ((opp = _opp_get_next(opp_table, false)))
dev_pm_opp_put(opp);
return true;
}
/**
@ -1353,21 +1377,21 @@ bool _opp_remove_all_static(struct opp_table *opp_table)
void dev_pm_opp_remove_all_dynamic(struct device *dev)
{
struct opp_table *opp_table;
struct dev_pm_opp *opp, *temp;
struct dev_pm_opp *opp;
int count = 0;
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table))
return;
mutex_lock(&opp_table->lock);
list_for_each_entry_safe(opp, temp, &opp_table->opp_list, node) {
if (opp->dynamic) {
dev_pm_opp_put_unlocked(opp);
count++;
}
/*
* Can't remove the OPP from under the lock, debugfs removal needs to
* happen lock less to avoid circular dependency issues.
*/
while ((opp = _opp_get_next(opp_table, true))) {
dev_pm_opp_put(opp);
count++;
}
mutex_unlock(&opp_table->lock);
/* Drop the references taken by dev_pm_opp_add() */
while (count--)
@ -1602,7 +1626,7 @@ struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev,
{
struct opp_table *opp_table;
opp_table = dev_pm_opp_get_opp_table(dev);
opp_table = _add_opp_table(dev);
if (IS_ERR(opp_table))
return opp_table;
@ -1636,6 +1660,9 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
*/
void dev_pm_opp_put_supported_hw(struct opp_table *opp_table)
{
if (unlikely(!opp_table))
return;
/* Make sure there are no concurrent readers while updating opp_table */
WARN_ON(!list_empty(&opp_table->opp_list));
@ -1661,7 +1688,7 @@ struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
{
struct opp_table *opp_table;
opp_table = dev_pm_opp_get_opp_table(dev);
opp_table = _add_opp_table(dev);
if (IS_ERR(opp_table))
return opp_table;
@ -1692,6 +1719,9 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
*/
void dev_pm_opp_put_prop_name(struct opp_table *opp_table)
{
if (unlikely(!opp_table))
return;
/* Make sure there are no concurrent readers while updating opp_table */
WARN_ON(!list_empty(&opp_table->opp_list));
@ -1754,7 +1784,7 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
struct regulator *reg;
int ret, i;
opp_table = dev_pm_opp_get_opp_table(dev);
opp_table = _add_opp_table(dev);
if (IS_ERR(opp_table))
return opp_table;
@ -1820,6 +1850,9 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table)
{
int i;
if (unlikely(!opp_table))
return;
if (!opp_table->regulators)
goto put_opp_table;
@ -1862,7 +1895,7 @@ struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name)
struct opp_table *opp_table;
int ret;
opp_table = dev_pm_opp_get_opp_table(dev);
opp_table = _add_opp_table(dev);
if (IS_ERR(opp_table))
return opp_table;
@ -1902,6 +1935,9 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_clkname);
*/
void dev_pm_opp_put_clkname(struct opp_table *opp_table)
{
if (unlikely(!opp_table))
return;
/* Make sure there are no concurrent readers while updating opp_table */
WARN_ON(!list_empty(&opp_table->opp_list));
@ -1930,7 +1966,7 @@ struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
if (!set_opp)
return ERR_PTR(-EINVAL);
opp_table = dev_pm_opp_get_opp_table(dev);
opp_table = _add_opp_table(dev);
if (IS_ERR(opp_table))
return opp_table;
@ -1957,6 +1993,9 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper);
*/
void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table)
{
if (unlikely(!opp_table))
return;
/* Make sure there are no concurrent readers while updating opp_table */
WARN_ON(!list_empty(&opp_table->opp_list));
@ -2014,7 +2053,7 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev,
int index = 0, ret = -EINVAL;
const char **name = names;
opp_table = dev_pm_opp_get_opp_table(dev);
opp_table = _add_opp_table(dev);
if (IS_ERR(opp_table))
return opp_table;
@ -2085,6 +2124,9 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_attach_genpd);
*/
void dev_pm_opp_detach_genpd(struct opp_table *opp_table)
{
if (unlikely(!opp_table))
return;
/*
* Acquire genpd_virt_dev_lock to make sure virt_dev isn't getting
* used in parallel.
@ -2179,7 +2221,7 @@ int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
struct opp_table *opp_table;
int ret;
opp_table = dev_pm_opp_get_opp_table(dev);
opp_table = _add_opp_table(dev);
if (IS_ERR(opp_table))
return PTR_ERR(opp_table);

View File

@ -112,8 +112,6 @@ static struct opp_table *_find_table_of_opp_np(struct device_node *opp_np)
struct opp_table *opp_table;
struct device_node *opp_table_np;
lockdep_assert_held(&opp_table_lock);
opp_table_np = of_get_parent(opp_np);
if (!opp_table_np)
goto err;
@ -121,12 +119,15 @@ static struct opp_table *_find_table_of_opp_np(struct device_node *opp_np)
/* It is safe to put the node now as all we need now is its address */
of_node_put(opp_table_np);
mutex_lock(&opp_table_lock);
list_for_each_entry(opp_table, &opp_tables, node) {
if (opp_table_np == opp_table->np) {
_get_opp_table_kref(opp_table);
mutex_unlock(&opp_table_lock);
return opp_table;
}
}
mutex_unlock(&opp_table_lock);
err:
return ERR_PTR(-ENODEV);
@ -169,7 +170,8 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
/* Traversing the first OPP node is all we need */
np = of_get_next_available_child(opp_np, NULL);
if (!np) {
dev_err(dev, "Empty OPP table\n");
dev_warn(dev, "Empty OPP table\n");
return;
}
@ -377,7 +379,9 @@ int dev_pm_opp_of_find_icc_paths(struct device *dev,
struct icc_path **paths;
ret = _bandwidth_supported(dev, opp_table);
if (ret <= 0)
if (ret == -EINVAL)
return 0; /* Empty OPP table is a valid corner-case, let's not fail */
else if (ret <= 0)
return ret;
ret = 0;
@ -974,7 +978,7 @@ int dev_pm_opp_of_add_table(struct device *dev)
struct opp_table *opp_table;
int ret;
opp_table = dev_pm_opp_get_opp_table_indexed(dev, 0);
opp_table = _add_opp_table_indexed(dev, 0);
if (IS_ERR(opp_table))
return PTR_ERR(opp_table);
@ -1029,7 +1033,7 @@ int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
index = 0;
}
opp_table = dev_pm_opp_get_opp_table_indexed(dev, index);
opp_table = _add_opp_table_indexed(dev, index);
if (IS_ERR(opp_table))
return PTR_ERR(opp_table);

View File

@ -224,6 +224,7 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *o
int _opp_add_v1(struct opp_table *opp_table, struct device *dev, unsigned long freq, long u_volt, bool dynamic);
void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, int last_cpu);
struct opp_table *_add_opp_table(struct device *dev);
struct opp_table *_add_opp_table_indexed(struct device *dev, int index);
void _put_opp_list_kref(struct opp_table *opp_table);
#ifdef CONFIG_OF

View File

@ -90,7 +90,6 @@ struct dev_pm_set_opp_data {
#if defined(CONFIG_PM_OPP)
struct opp_table *dev_pm_opp_get_opp_table(struct device *dev);
struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev, int index);
void dev_pm_opp_put_opp_table(struct opp_table *opp_table);
unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);