Merge branches 'pm-cpufreq', 'pm-cpuidle' and 'pm-core'

* pm-cpufreq:
  cpufreq: schedutil: Improve prints messages with pr_fmt
  cpufreq: simplified goto out in cpufreq_register_driver()
  cpufreq: governor: CPUFREQ_GOV_STOP never fails
  cpufreq: governor: CPUFREQ_GOV_POLICY_EXIT never fails
  intel_pstate: Simplify conditional in intel_pstate_set_policy()

* pm-cpuidle:
  cpuidle: Fix cpuidle_state_is_coupled() argument in cpuidle_enter()

* pm-core:
  PM / sleep: Handle failures in device_suspend_late() consistently
This commit is contained in:
Rafael J. Wysocki 2016-05-25 21:54:45 +02:00
commit 4c2628cd75
5 changed files with 39 additions and 67 deletions

View File

@ -1267,14 +1267,15 @@ int dpm_suspend_late(pm_message_t state)
error = device_suspend_late(dev);
mutex_lock(&dpm_list_mtx);
if (!list_empty(&dev->power.entry))
list_move(&dev->power.entry, &dpm_late_early_list);
if (error) {
pm_dev_err(dev, state, " late", error);
dpm_save_failed_dev(dev_name(dev));
put_device(dev);
break;
}
if (!list_empty(&dev->power.entry))
list_move(&dev->power.entry, &dpm_late_early_list);
put_device(dev);
if (async_error)

View File

@ -78,9 +78,14 @@ static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
static int cpufreq_start_governor(struct cpufreq_policy *policy);
static inline int cpufreq_exit_governor(struct cpufreq_policy *policy)
static inline void cpufreq_exit_governor(struct cpufreq_policy *policy)
{
return cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
(void)cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
}
static inline void cpufreq_stop_governor(struct cpufreq_policy *policy)
{
(void)cpufreq_governor(policy, CPUFREQ_GOV_STOP);
}
/**
@ -1026,13 +1031,8 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp
return 0;
down_write(&policy->rwsem);
if (has_target()) {
ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
if (ret) {
pr_err("%s: Failed to stop governor\n", __func__);
goto unlock;
}
}
if (has_target())
cpufreq_stop_governor(policy);
cpumask_set_cpu(cpu, policy->cpus);
@ -1041,8 +1041,6 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp
if (ret)
pr_err("%s: Failed to start governor\n", __func__);
}
unlock:
up_write(&policy->rwsem);
return ret;
}
@ -1354,11 +1352,8 @@ static void cpufreq_offline(unsigned int cpu)
}
down_write(&policy->rwsem);
if (has_target()) {
ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
if (ret)
pr_err("%s: Failed to stop governor\n", __func__);
}
if (has_target())
cpufreq_stop_governor(policy);
cpumask_clear_cpu(cpu, policy->cpus);
@ -1387,12 +1382,8 @@ static void cpufreq_offline(unsigned int cpu)
if (cpufreq_driver->stop_cpu)
cpufreq_driver->stop_cpu(policy);
/* If cpu is last user of policy, free policy */
if (has_target()) {
ret = cpufreq_exit_governor(policy);
if (ret)
pr_err("%s: Failed to exit governor\n", __func__);
}
if (has_target())
cpufreq_exit_governor(policy);
/*
* Perform the ->exit() even during light-weight tear-down,
@ -1626,7 +1617,6 @@ EXPORT_SYMBOL(cpufreq_generic_suspend);
void cpufreq_suspend(void)
{
struct cpufreq_policy *policy;
int ret;
if (!cpufreq_driver)
return;
@ -1639,14 +1629,8 @@ void cpufreq_suspend(void)
for_each_active_policy(policy) {
if (has_target()) {
down_write(&policy->rwsem);
ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
cpufreq_stop_governor(policy);
up_write(&policy->rwsem);
if (ret) {
pr_err("%s: Failed to stop governor for policy: %p\n",
__func__, policy);
continue;
}
}
if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
@ -2049,16 +2033,15 @@ static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
ret = policy->governor->governor(policy, event);
if (!ret) {
if (event == CPUFREQ_GOV_POLICY_INIT)
if (event == CPUFREQ_GOV_POLICY_INIT) {
if (ret)
module_put(policy->governor->owner);
else
policy->governor->initialized++;
else if (event == CPUFREQ_GOV_POLICY_EXIT)
policy->governor->initialized--;
}
if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
} else if (event == CPUFREQ_GOV_POLICY_EXIT) {
policy->governor->initialized--;
module_put(policy->governor->owner);
}
return ret;
}
@ -2221,20 +2204,8 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
old_gov = policy->governor;
/* end old governor */
if (old_gov) {
ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
if (ret) {
/* This can happen due to race with other operations */
pr_debug("%s: Failed to Stop Governor: %s (%d)\n",
__func__, old_gov->name, ret);
return ret;
}
ret = cpufreq_exit_governor(policy);
if (ret) {
pr_err("%s: Failed to Exit Governor: %s (%d)\n",
__func__, old_gov->name, ret);
return ret;
}
cpufreq_stop_governor(policy);
cpufreq_exit_governor(policy);
}
/* start new governor */
@ -2495,10 +2466,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
register_hotcpu_notifier(&cpufreq_cpu_notifier);
pr_debug("driver %s up and running\n", driver_data->name);
out:
put_online_cpus();
return ret;
goto out;
err_if_unreg:
subsys_interface_unregister(&cpufreq_interface);
@ -2508,7 +2476,9 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
write_lock_irqsave(&cpufreq_driver_lock, flags);
cpufreq_driver = NULL;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
goto out;
out:
put_online_cpus();
return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_register_driver);

View File

@ -1461,12 +1461,11 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
intel_pstate_clear_update_util_hook(policy->cpu);
cpu = all_cpu_data[0];
if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate) {
if (policy->max < policy->cpuinfo.max_freq &&
policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) {
pr_debug("policy->max > max non turbo frequency\n");
policy->max = policy->cpuinfo.max_freq;
}
if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
policy->max < policy->cpuinfo.max_freq &&
policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) {
pr_debug("policy->max > max non turbo frequency\n");
policy->max = policy->cpuinfo.max_freq;
}
if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {

View File

@ -214,7 +214,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
tick_broadcast_exit();
}
if (!cpuidle_state_is_coupled(drv, entered_state))
if (!cpuidle_state_is_coupled(drv, index))
local_irq_enable();
/*

View File

@ -9,6 +9,8 @@
* published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/slab.h>
@ -388,7 +390,7 @@ static int sugov_init(struct cpufreq_policy *policy)
mutex_unlock(&global_tunables_lock);
sugov_policy_free(sg_policy);
pr_err("cpufreq: schedutil governor initialization failed (error %d)\n", ret);
pr_err("initialization failed (error %d)\n", ret);
return ret;
}