mirror of https://gitee.com/openkylin/linux.git
Merge branches 'pnp', 'powercap', 'pm-runtime' and 'pm-opp'
* pnp: MAINTAINERS: Remove Bjorn Helgaas as PNP maintainer PNP / resources: remove positive test on unsigned values * powercap: powercap / RAPL: add new CPU IDs powercap / RAPL: further relax energy counter checks * pm-runtime: PM / runtime: Update documentation to reflect the current code flow * pm-opp: PM / OPP: discard duplicate OPPs PM / OPP: Make OPP invisible to users in Kconfig PM / OPP: fix incorrect OPP count handling in of_init_opp_table
This commit is contained in:
commit
cd0c5bd391
|
@ -665,15 +665,17 @@ The PM core does its best to reduce the probability of race conditions between
|
|||
the runtime PM and system suspend/resume (and hibernation) callbacks by carrying
|
||||
out the following operations:
|
||||
|
||||
* During system suspend it calls pm_runtime_get_noresume() and
|
||||
pm_runtime_barrier() for every device right before executing the
|
||||
subsystem-level .suspend() callback for it. In addition to that it calls
|
||||
__pm_runtime_disable() with 'false' as the second argument for every device
|
||||
right before executing the subsystem-level .suspend_late() callback for it.
|
||||
* During system suspend pm_runtime_get_noresume() is called for every device
|
||||
right before executing the subsystem-level .prepare() callback for it and
|
||||
pm_runtime_barrier() is called for every device right before executing the
|
||||
subsystem-level .suspend() callback for it. In addition to that the PM core
|
||||
calls __pm_runtime_disable() with 'false' as the second argument for every
|
||||
device right before executing the subsystem-level .suspend_late() callback
|
||||
for it.
|
||||
|
||||
* During system resume it calls pm_runtime_enable() and pm_runtime_put()
|
||||
for every device right after executing the subsystem-level .resume_early()
|
||||
callback and right after executing the subsystem-level .resume() callback
|
||||
* During system resume pm_runtime_enable() and pm_runtime_put() are called for
|
||||
every device right after executing the subsystem-level .resume_early()
|
||||
callback and right after executing the subsystem-level .complete() callback
|
||||
for it, respectively.
|
||||
|
||||
7. Generic subsystem callbacks
|
||||
|
|
|
@ -6938,7 +6938,6 @@ F: drivers/power/
|
|||
|
||||
PNP SUPPORT
|
||||
M: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
|
||||
M: Bjorn Helgaas <bhelgaas@google.com>
|
||||
S: Maintained
|
||||
F: drivers/pnp/
|
||||
|
||||
|
|
|
@ -394,6 +394,13 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
|
|||
* to keep the integrity of the internal data structures. Callers should ensure
|
||||
* that this function is *NOT* called under RCU protection or in contexts where
|
||||
* mutex cannot be locked.
|
||||
*
|
||||
* Return:
|
||||
* 0: On success OR
|
||||
* Duplicate OPPs (both freq and volt are same) and opp->available
|
||||
* -EEXIST: Freq are same and volt are different OR
|
||||
* Duplicate OPPs (both freq and volt are same) and !opp->available
|
||||
* -ENOMEM: Memory allocation failure
|
||||
*/
|
||||
int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
|
||||
{
|
||||
|
@ -443,15 +450,31 @@ int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
|
|||
new_opp->u_volt = u_volt;
|
||||
new_opp->available = true;
|
||||
|
||||
/* Insert new OPP in order of increasing frequency */
|
||||
/*
|
||||
* Insert new OPP in order of increasing frequency
|
||||
* and discard if already present
|
||||
*/
|
||||
head = &dev_opp->opp_list;
|
||||
list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
|
||||
if (new_opp->rate < opp->rate)
|
||||
if (new_opp->rate <= opp->rate)
|
||||
break;
|
||||
else
|
||||
head = &opp->node;
|
||||
}
|
||||
|
||||
/* Duplicate OPPs ? */
|
||||
if (new_opp->rate == opp->rate) {
|
||||
int ret = opp->available && new_opp->u_volt == opp->u_volt ?
|
||||
0 : -EEXIST;
|
||||
|
||||
dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
|
||||
__func__, opp->rate, opp->u_volt, opp->available,
|
||||
new_opp->rate, new_opp->u_volt, new_opp->available);
|
||||
mutex_unlock(&dev_opp_list_lock);
|
||||
kfree(new_opp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
list_add_rcu(&new_opp->node, head);
|
||||
mutex_unlock(&dev_opp_list_lock);
|
||||
|
||||
|
@ -734,11 +757,9 @@ int of_init_opp_table(struct device *dev)
|
|||
unsigned long freq = be32_to_cpup(val++) * 1000;
|
||||
unsigned long volt = be32_to_cpup(val++);
|
||||
|
||||
if (dev_pm_opp_add(dev, freq, volt)) {
|
||||
if (dev_pm_opp_add(dev, freq, volt))
|
||||
dev_warn(dev, "%s: Failed to add OPP %ld\n",
|
||||
__func__, freq);
|
||||
continue;
|
||||
}
|
||||
nr -= 2;
|
||||
}
|
||||
|
||||
|
|
|
@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
|
|||
return 1;
|
||||
|
||||
/* check if the resource is valid */
|
||||
if (*irq < 0 || *irq > 15)
|
||||
if (*irq > 15)
|
||||
return 0;
|
||||
|
||||
/* check if the resource is reserved */
|
||||
|
@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
|
|||
return 1;
|
||||
|
||||
/* check if the resource is valid */
|
||||
if (*dma < 0 || *dma == 4 || *dma > 7)
|
||||
if (*dma == 4 || *dma > 7)
|
||||
return 0;
|
||||
|
||||
/* check if the resource is reserved */
|
||||
|
|
|
@ -951,7 +951,9 @@ static const struct x86_cpu_id rapl_ids[] = {
|
|||
{ X86_VENDOR_INTEL, 6, 0x2d},/* Sandy Bridge EP */
|
||||
{ X86_VENDOR_INTEL, 6, 0x37},/* Valleyview */
|
||||
{ X86_VENDOR_INTEL, 6, 0x3a},/* Ivy Bridge */
|
||||
{ X86_VENDOR_INTEL, 6, 0x45},/* Haswell */
|
||||
{ X86_VENDOR_INTEL, 6, 0x3c},/* Haswell */
|
||||
{ X86_VENDOR_INTEL, 6, 0x3d},/* Broadwell */
|
||||
{ X86_VENDOR_INTEL, 6, 0x45},/* Haswell ULT */
|
||||
/* TODO: Add more CPU IDs after testing */
|
||||
{}
|
||||
};
|
||||
|
@ -1124,8 +1126,7 @@ static int rapl_register_powercap(void)
|
|||
static int rapl_check_domain(int cpu, int domain)
|
||||
{
|
||||
unsigned msr;
|
||||
u64 val1, val2 = 0;
|
||||
int retry = 0;
|
||||
u64 val = 0;
|
||||
|
||||
switch (domain) {
|
||||
case RAPL_DOMAIN_PACKAGE:
|
||||
|
@ -1144,26 +1145,13 @@ static int rapl_check_domain(int cpu, int domain)
|
|||
pr_err("invalid domain id %d\n", domain);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (rdmsrl_safe_on_cpu(cpu, msr, &val1))
|
||||
/* make sure domain counters are available and contains non-zero
|
||||
* values, otherwise skip it.
|
||||
*/
|
||||
if (rdmsrl_safe_on_cpu(cpu, msr, &val) || !val)
|
||||
return -ENODEV;
|
||||
|
||||
/* PP1/uncore/graphics domain may not be active at the time of
|
||||
* driver loading. So skip further checks.
|
||||
*/
|
||||
if (domain == RAPL_DOMAIN_PP1)
|
||||
return 0;
|
||||
/* energy counters roll slowly on some domains */
|
||||
while (++retry < 10) {
|
||||
usleep_range(10000, 15000);
|
||||
rdmsrl_safe_on_cpu(cpu, msr, &val2);
|
||||
if ((val1 & ENERGY_STATUS_MASK) != (val2 & ENERGY_STATUS_MASK))
|
||||
return 0;
|
||||
}
|
||||
/* if energy counter does not change, report as bad domain */
|
||||
pr_info("domain %s energy ctr %llu:%llu not working, skip\n",
|
||||
rapl_domain_names[domain], val1, val2);
|
||||
|
||||
return -ENODEV;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Detect active and valid domains for the given CPU, caller must
|
||||
|
@ -1180,6 +1168,9 @@ static int rapl_detect_domains(struct rapl_package *rp, int cpu)
|
|||
/* use physical package id to read counters */
|
||||
if (!rapl_check_domain(cpu, i))
|
||||
rp->domain_map |= 1 << i;
|
||||
else
|
||||
pr_warn("RAPL domain %s detection failed\n",
|
||||
rapl_domain_names[i]);
|
||||
}
|
||||
rp->nr_domains = bitmap_weight(&rp->domain_map, RAPL_DOMAIN_MAX);
|
||||
if (!rp->nr_domains) {
|
||||
|
|
|
@ -257,8 +257,7 @@ config ARCH_HAS_OPP
|
|||
bool
|
||||
|
||||
config PM_OPP
|
||||
bool "Operating Performance Point (OPP) Layer library"
|
||||
depends on ARCH_HAS_OPP
|
||||
bool
|
||||
---help---
|
||||
SOCs have a standard set of tuples consisting of frequency and
|
||||
voltage pairs that the device will support per voltage domain. This
|
||||
|
|
Loading…
Reference in New Issue