mirror of https://gitee.com/openkylin/linux.git
Merge branch 'opp/linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm into pm-opp
Pull operating performance points (OPP) framework updates for v5.1 from Viresh Kumar: "This pull request contains following changes: - Introduced new OPP helper for power-estimation and used it in several cpufreq drivers (Quentin Perret, Matthias Kaehlcke, Dietmar Eggemann, and Yangtao Li). - OPP Debugfs cleanup (Greg KH). - OPP core cleanup (Viresh Kumar)." * 'opp/linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm: cpufreq: OMAP: Register an Energy Model cpufreq: imx6q: Register an Energy Model opp: no need to check return value of debugfs_create functions cpufreq: mediatek: Register an Energy Model cpufreq: scmi: Register an Energy Model cpufreq: arm_big_little: Register an Energy Model cpufreq: scpi: Register an Energy Model cpufreq: dt: Register an Energy Model PM / OPP: Introduce a power estimation helper PM / OPP: Remove unused parameter of _generic_set_opp_clk_only()
This commit is contained in:
commit
78317ed93a
|
@ -487,6 +487,8 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
|
|||
policy->cpuinfo.transition_latency =
|
||||
arm_bL_ops->get_transition_latency(cpu_dev);
|
||||
|
||||
dev_pm_opp_of_register_em(policy->cpus);
|
||||
|
||||
if (is_bL_switching_enabled())
|
||||
per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu);
|
||||
|
||||
|
|
|
@ -280,6 +280,8 @@ static int cpufreq_init(struct cpufreq_policy *policy)
|
|||
policy->cpuinfo.transition_latency = transition_latency;
|
||||
policy->dvfs_possible_from_any_cpu = true;
|
||||
|
||||
dev_pm_opp_of_register_em(policy->cpus);
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_cpufreq_table:
|
||||
|
|
|
@ -210,6 +210,7 @@ static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
|
|||
policy->clk = clks[ARM].clk;
|
||||
ret = cpufreq_generic_init(policy, freq_table, transition_latency);
|
||||
policy->suspend_freq = max_freq;
|
||||
dev_pm_opp_of_register_em(policy->cpus);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -465,6 +465,8 @@ static int mtk_cpufreq_init(struct cpufreq_policy *policy)
|
|||
policy->driver_data = info;
|
||||
policy->clk = info->cpu_clk;
|
||||
|
||||
dev_pm_opp_of_register_em(policy->cpus);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -133,8 +133,10 @@ static int omap_cpu_init(struct cpufreq_policy *policy)
|
|||
|
||||
/* FIXME: what's the actual transition time? */
|
||||
result = cpufreq_generic_init(policy, freq_table, 300 * 1000);
|
||||
if (!result)
|
||||
if (!result) {
|
||||
dev_pm_opp_of_register_em(policy->cpus);
|
||||
return 0;
|
||||
}
|
||||
|
||||
freq_table_free();
|
||||
fail:
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <linux/cpufreq.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/cpu_cooling.h>
|
||||
#include <linux/energy_model.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pm_opp.h>
|
||||
|
@ -103,13 +104,42 @@ scmi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused
|
||||
scmi_get_cpu_power(unsigned long *power, unsigned long *KHz, int cpu)
|
||||
{
|
||||
struct device *cpu_dev = get_cpu_device(cpu);
|
||||
unsigned long Hz;
|
||||
int ret, domain;
|
||||
|
||||
if (!cpu_dev) {
|
||||
pr_err("failed to get cpu%d device\n", cpu);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
domain = handle->perf_ops->device_domain_id(cpu_dev);
|
||||
if (domain < 0)
|
||||
return domain;
|
||||
|
||||
/* Get the power cost of the performance domain. */
|
||||
Hz = *KHz * 1000;
|
||||
ret = handle->perf_ops->est_power_get(handle, domain, &Hz, power);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* The EM framework specifies the frequency in KHz. */
|
||||
*KHz = Hz / 1000;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int scmi_cpufreq_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
int ret;
|
||||
int ret, nr_opp;
|
||||
unsigned int latency;
|
||||
struct device *cpu_dev;
|
||||
struct scmi_data *priv;
|
||||
struct cpufreq_frequency_table *freq_table;
|
||||
struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power);
|
||||
|
||||
cpu_dev = get_cpu_device(policy->cpu);
|
||||
if (!cpu_dev) {
|
||||
|
@ -136,8 +166,8 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = dev_pm_opp_get_opp_count(cpu_dev);
|
||||
if (ret <= 0) {
|
||||
nr_opp = dev_pm_opp_get_opp_count(cpu_dev);
|
||||
if (nr_opp <= 0) {
|
||||
dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
|
||||
ret = -EPROBE_DEFER;
|
||||
goto out_free_opp;
|
||||
|
@ -171,6 +201,9 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
|
|||
policy->cpuinfo.transition_latency = latency;
|
||||
|
||||
policy->fast_switch_possible = true;
|
||||
|
||||
em_register_perf_domain(policy->cpus, nr_opp, &em_cb);
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_priv:
|
||||
|
|
|
@ -170,6 +170,9 @@ static int scpi_cpufreq_init(struct cpufreq_policy *policy)
|
|||
policy->cpuinfo.transition_latency = latency;
|
||||
|
||||
policy->fast_switch_possible = false;
|
||||
|
||||
dev_pm_opp_of_register_em(policy->cpus);
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_cpufreq_table:
|
||||
|
|
|
@ -533,9 +533,8 @@ static int _set_opp_voltage(struct device *dev, struct regulator *reg,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline int
|
||||
_generic_set_opp_clk_only(struct device *dev, struct clk *clk,
|
||||
unsigned long old_freq, unsigned long freq)
|
||||
static inline int _generic_set_opp_clk_only(struct device *dev, struct clk *clk,
|
||||
unsigned long freq)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -572,7 +571,7 @@ static int _generic_set_opp_regulator(const struct opp_table *opp_table,
|
|||
}
|
||||
|
||||
/* Change frequency */
|
||||
ret = _generic_set_opp_clk_only(dev, opp_table->clk, old_freq, freq);
|
||||
ret = _generic_set_opp_clk_only(dev, opp_table->clk, freq);
|
||||
if (ret)
|
||||
goto restore_voltage;
|
||||
|
||||
|
@ -586,7 +585,7 @@ static int _generic_set_opp_regulator(const struct opp_table *opp_table,
|
|||
return 0;
|
||||
|
||||
restore_freq:
|
||||
if (_generic_set_opp_clk_only(dev, opp_table->clk, freq, old_freq))
|
||||
if (_generic_set_opp_clk_only(dev, opp_table->clk, old_freq))
|
||||
dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
|
||||
__func__, old_freq);
|
||||
restore_voltage:
|
||||
|
@ -759,7 +758,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
|
|||
opp->supplies);
|
||||
} else {
|
||||
/* Only frequency scaling */
|
||||
ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq);
|
||||
ret = _generic_set_opp_clk_only(dev, clk, freq);
|
||||
}
|
||||
|
||||
/* Scaling down? Configure required OPPs after frequency */
|
||||
|
@ -793,7 +792,6 @@ static struct opp_device *_add_opp_dev_unlocked(const struct device *dev,
|
|||
struct opp_table *opp_table)
|
||||
{
|
||||
struct opp_device *opp_dev;
|
||||
int ret;
|
||||
|
||||
opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
|
||||
if (!opp_dev)
|
||||
|
@ -805,10 +803,7 @@ static struct opp_device *_add_opp_dev_unlocked(const struct device *dev,
|
|||
list_add(&opp_dev->node, &opp_table->dev_list);
|
||||
|
||||
/* Create debugfs entries for the opp_table */
|
||||
ret = opp_debug_register(opp_dev, opp_table);
|
||||
if (ret)
|
||||
dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
|
||||
__func__, ret);
|
||||
opp_debug_register(opp_dev, opp_table);
|
||||
|
||||
return opp_dev;
|
||||
}
|
||||
|
@ -1229,10 +1224,7 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
|
|||
new_opp->opp_table = opp_table;
|
||||
kref_init(&new_opp->kref);
|
||||
|
||||
ret = opp_debug_create_one(new_opp, opp_table);
|
||||
if (ret)
|
||||
dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
|
||||
__func__, ret);
|
||||
opp_debug_create_one(new_opp, opp_table);
|
||||
|
||||
if (!_opp_supported_by_regulators(new_opp, opp_table)) {
|
||||
new_opp->available = false;
|
||||
|
|
|
@ -35,7 +35,7 @@ void opp_debug_remove_one(struct dev_pm_opp *opp)
|
|||
debugfs_remove_recursive(opp->dentry);
|
||||
}
|
||||
|
||||
static bool opp_debug_create_supplies(struct dev_pm_opp *opp,
|
||||
static void opp_debug_create_supplies(struct dev_pm_opp *opp,
|
||||
struct opp_table *opp_table,
|
||||
struct dentry *pdentry)
|
||||
{
|
||||
|
@ -50,30 +50,21 @@ static bool opp_debug_create_supplies(struct dev_pm_opp *opp,
|
|||
/* Create per-opp directory */
|
||||
d = debugfs_create_dir(name, pdentry);
|
||||
|
||||
if (!d)
|
||||
return false;
|
||||
debugfs_create_ulong("u_volt_target", S_IRUGO, d,
|
||||
&opp->supplies[i].u_volt);
|
||||
|
||||
if (!debugfs_create_ulong("u_volt_target", S_IRUGO, d,
|
||||
&opp->supplies[i].u_volt))
|
||||
return false;
|
||||
debugfs_create_ulong("u_volt_min", S_IRUGO, d,
|
||||
&opp->supplies[i].u_volt_min);
|
||||
|
||||
if (!debugfs_create_ulong("u_volt_min", S_IRUGO, d,
|
||||
&opp->supplies[i].u_volt_min))
|
||||
return false;
|
||||
debugfs_create_ulong("u_volt_max", S_IRUGO, d,
|
||||
&opp->supplies[i].u_volt_max);
|
||||
|
||||
if (!debugfs_create_ulong("u_volt_max", S_IRUGO, d,
|
||||
&opp->supplies[i].u_volt_max))
|
||||
return false;
|
||||
|
||||
if (!debugfs_create_ulong("u_amp", S_IRUGO, d,
|
||||
&opp->supplies[i].u_amp))
|
||||
return false;
|
||||
debugfs_create_ulong("u_amp", S_IRUGO, d,
|
||||
&opp->supplies[i].u_amp);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
|
||||
void opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
|
||||
{
|
||||
struct dentry *pdentry = opp_table->dentry;
|
||||
struct dentry *d;
|
||||
|
@ -95,40 +86,23 @@ int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
|
|||
|
||||
/* Create per-opp directory */
|
||||
d = debugfs_create_dir(name, pdentry);
|
||||
if (!d)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!debugfs_create_bool("available", S_IRUGO, d, &opp->available))
|
||||
return -ENOMEM;
|
||||
debugfs_create_bool("available", S_IRUGO, d, &opp->available);
|
||||
debugfs_create_bool("dynamic", S_IRUGO, d, &opp->dynamic);
|
||||
debugfs_create_bool("turbo", S_IRUGO, d, &opp->turbo);
|
||||
debugfs_create_bool("suspend", S_IRUGO, d, &opp->suspend);
|
||||
debugfs_create_u32("performance_state", S_IRUGO, d, &opp->pstate);
|
||||
debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate);
|
||||
debugfs_create_ulong("clock_latency_ns", S_IRUGO, d,
|
||||
&opp->clock_latency_ns);
|
||||
|
||||
if (!debugfs_create_bool("dynamic", S_IRUGO, d, &opp->dynamic))
|
||||
return -ENOMEM;
|
||||
|
||||
if (!debugfs_create_bool("turbo", S_IRUGO, d, &opp->turbo))
|
||||
return -ENOMEM;
|
||||
|
||||
if (!debugfs_create_bool("suspend", S_IRUGO, d, &opp->suspend))
|
||||
return -ENOMEM;
|
||||
|
||||
if (!debugfs_create_u32("performance_state", S_IRUGO, d, &opp->pstate))
|
||||
return -ENOMEM;
|
||||
|
||||
if (!debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate))
|
||||
return -ENOMEM;
|
||||
|
||||
if (!opp_debug_create_supplies(opp, opp_table, d))
|
||||
return -ENOMEM;
|
||||
|
||||
if (!debugfs_create_ulong("clock_latency_ns", S_IRUGO, d,
|
||||
&opp->clock_latency_ns))
|
||||
return -ENOMEM;
|
||||
opp_debug_create_supplies(opp, opp_table, d);
|
||||
|
||||
opp->dentry = d;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int opp_list_debug_create_dir(struct opp_device *opp_dev,
|
||||
struct opp_table *opp_table)
|
||||
static void opp_list_debug_create_dir(struct opp_device *opp_dev,
|
||||
struct opp_table *opp_table)
|
||||
{
|
||||
const struct device *dev = opp_dev->dev;
|
||||
struct dentry *d;
|
||||
|
@ -137,36 +111,21 @@ static int opp_list_debug_create_dir(struct opp_device *opp_dev,
|
|||
|
||||
/* Create device specific directory */
|
||||
d = debugfs_create_dir(opp_table->dentry_name, rootdir);
|
||||
if (!d) {
|
||||
dev_err(dev, "%s: Failed to create debugfs dir\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
opp_dev->dentry = d;
|
||||
opp_table->dentry = d;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int opp_list_debug_create_link(struct opp_device *opp_dev,
|
||||
struct opp_table *opp_table)
|
||||
static void opp_list_debug_create_link(struct opp_device *opp_dev,
|
||||
struct opp_table *opp_table)
|
||||
{
|
||||
const struct device *dev = opp_dev->dev;
|
||||
char name[NAME_MAX];
|
||||
struct dentry *d;
|
||||
|
||||
opp_set_dev_name(opp_dev->dev, name);
|
||||
|
||||
/* Create device specific directory link */
|
||||
d = debugfs_create_symlink(name, rootdir, opp_table->dentry_name);
|
||||
if (!d) {
|
||||
dev_err(dev, "%s: Failed to create link\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
opp_dev->dentry = d;
|
||||
|
||||
return 0;
|
||||
opp_dev->dentry = debugfs_create_symlink(name, rootdir,
|
||||
opp_table->dentry_name);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -177,20 +136,13 @@ static int opp_list_debug_create_link(struct opp_device *opp_dev,
|
|||
* Dynamically adds device specific directory in debugfs 'opp' directory. If the
|
||||
* device-opp is shared with other devices, then links will be created for all
|
||||
* devices except the first.
|
||||
*
|
||||
* Return: 0 on success, otherwise negative error.
|
||||
*/
|
||||
int opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table)
|
||||
void opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table)
|
||||
{
|
||||
if (!rootdir) {
|
||||
pr_debug("%s: Uninitialized rootdir\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (opp_table->dentry)
|
||||
return opp_list_debug_create_link(opp_dev, opp_table);
|
||||
|
||||
return opp_list_debug_create_dir(opp_dev, opp_table);
|
||||
opp_list_debug_create_link(opp_dev, opp_table);
|
||||
else
|
||||
opp_list_debug_create_dir(opp_dev, opp_table);
|
||||
}
|
||||
|
||||
static void opp_migrate_dentry(struct opp_device *opp_dev,
|
||||
|
@ -252,10 +204,6 @@ static int __init opp_debug_init(void)
|
|||
{
|
||||
/* Create /sys/kernel/debug/opp directory */
|
||||
rootdir = debugfs_create_dir("opp", NULL);
|
||||
if (!rootdir) {
|
||||
pr_err("%s: Failed to create root directory\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <linux/pm_domain.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/energy_model.h>
|
||||
|
||||
#include "opp.h"
|
||||
|
||||
|
@ -1047,3 +1048,101 @@ struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp)
|
|||
return of_node_get(opp->np);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node);
|
||||
|
||||
/*
|
||||
* Callback function provided to the Energy Model framework upon registration.
|
||||
* This computes the power estimated by @CPU at @kHz if it is the frequency
|
||||
* of an existing OPP, or at the frequency of the first OPP above @kHz otherwise
|
||||
* (see dev_pm_opp_find_freq_ceil()). This function updates @kHz to the ceiled
|
||||
* frequency and @mW to the associated power. The power is estimated as
|
||||
* P = C * V^2 * f with C being the CPU's capacitance and V and f respectively
|
||||
* the voltage and frequency of the OPP.
|
||||
*
|
||||
* Returns -ENODEV if the CPU device cannot be found, -EINVAL if the power
|
||||
* calculation failed because of missing parameters, 0 otherwise.
|
||||
*/
|
||||
static int __maybe_unused _get_cpu_power(unsigned long *mW, unsigned long *kHz,
|
||||
int cpu)
|
||||
{
|
||||
struct device *cpu_dev;
|
||||
struct dev_pm_opp *opp;
|
||||
struct device_node *np;
|
||||
unsigned long mV, Hz;
|
||||
u32 cap;
|
||||
u64 tmp;
|
||||
int ret;
|
||||
|
||||
cpu_dev = get_cpu_device(cpu);
|
||||
if (!cpu_dev)
|
||||
return -ENODEV;
|
||||
|
||||
np = of_node_get(cpu_dev->of_node);
|
||||
if (!np)
|
||||
return -EINVAL;
|
||||
|
||||
ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap);
|
||||
of_node_put(np);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
||||
Hz = *kHz * 1000;
|
||||
opp = dev_pm_opp_find_freq_ceil(cpu_dev, &Hz);
|
||||
if (IS_ERR(opp))
|
||||
return -EINVAL;
|
||||
|
||||
mV = dev_pm_opp_get_voltage(opp) / 1000;
|
||||
dev_pm_opp_put(opp);
|
||||
if (!mV)
|
||||
return -EINVAL;
|
||||
|
||||
tmp = (u64)cap * mV * mV * (Hz / 1000000);
|
||||
do_div(tmp, 1000000000);
|
||||
|
||||
*mW = (unsigned long)tmp;
|
||||
*kHz = Hz / 1000;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dev_pm_opp_of_register_em() - Attempt to register an Energy Model
|
||||
* @cpus : CPUs for which an Energy Model has to be registered
|
||||
*
|
||||
* This checks whether the "dynamic-power-coefficient" devicetree property has
|
||||
* been specified, and tries to register an Energy Model with it if it has.
|
||||
*/
|
||||
void dev_pm_opp_of_register_em(struct cpumask *cpus)
|
||||
{
|
||||
struct em_data_callback em_cb = EM_DATA_CB(_get_cpu_power);
|
||||
int ret, nr_opp, cpu = cpumask_first(cpus);
|
||||
struct device *cpu_dev;
|
||||
struct device_node *np;
|
||||
u32 cap;
|
||||
|
||||
cpu_dev = get_cpu_device(cpu);
|
||||
if (!cpu_dev)
|
||||
return;
|
||||
|
||||
nr_opp = dev_pm_opp_get_opp_count(cpu_dev);
|
||||
if (nr_opp <= 0)
|
||||
return;
|
||||
|
||||
np = of_node_get(cpu_dev->of_node);
|
||||
if (!np)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Register an EM only if the 'dynamic-power-coefficient' property is
|
||||
* set in devicetree. It is assumed the voltage values are known if that
|
||||
* property is set since it is useless otherwise. If voltages are not
|
||||
* known, just let the EM registration fail with an error to alert the
|
||||
* user about the inconsistent configuration.
|
||||
*/
|
||||
ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap);
|
||||
of_node_put(np);
|
||||
if (ret || !cap)
|
||||
return;
|
||||
|
||||
em_register_perf_domain(cpus, nr_opp, &em_cb);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_opp_of_register_em);
|
||||
|
|
|
@ -236,18 +236,17 @@ static inline void _of_opp_free_required_opps(struct opp_table *opp_table,
|
|||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
void opp_debug_remove_one(struct dev_pm_opp *opp);
|
||||
int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table);
|
||||
int opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table);
|
||||
void opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table);
|
||||
void opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table);
|
||||
void opp_debug_unregister(struct opp_device *opp_dev, struct opp_table *opp_table);
|
||||
#else
|
||||
static inline void opp_debug_remove_one(struct dev_pm_opp *opp) {}
|
||||
|
||||
static inline int opp_debug_create_one(struct dev_pm_opp *opp,
|
||||
struct opp_table *opp_table)
|
||||
{ return 0; }
|
||||
static inline int opp_debug_register(struct opp_device *opp_dev,
|
||||
struct opp_table *opp_table)
|
||||
{ return 0; }
|
||||
static inline void opp_debug_create_one(struct dev_pm_opp *opp,
|
||||
struct opp_table *opp_table) { }
|
||||
|
||||
static inline void opp_debug_register(struct opp_device *opp_dev,
|
||||
struct opp_table *opp_table) { }
|
||||
|
||||
static inline void opp_debug_unregister(struct opp_device *opp_dev,
|
||||
struct opp_table *opp_table)
|
||||
|
|
|
@ -327,6 +327,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpuma
|
|||
struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev);
|
||||
struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp);
|
||||
int of_get_required_opp_performance_state(struct device_node *np, int index);
|
||||
void dev_pm_opp_of_register_em(struct cpumask *cpus);
|
||||
#else
|
||||
static inline int dev_pm_opp_of_add_table(struct device *dev)
|
||||
{
|
||||
|
@ -365,6 +366,11 @@ static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp)
|
|||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void dev_pm_opp_of_register_em(struct cpumask *cpus)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int of_get_required_opp_performance_state(struct device_node *np, int index)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
|
|
Loading…
Reference in New Issue