Merge branches 'pm-cpuidle', 'pm-sleep' and 'pm-domains'
* pm-cpuidle: cpuidle: menu: help gcc generate slightly better code cpuidle: menu: avoid expensive square root computation * pm-sleep: PM / suspend: replacing printk PM/freezer: y2038, use boottime to compare tstamps PM / sleep: declare __tracedata symbols as char[] rather than char * pm-domains: PM / Domains: Fix potential NULL pointer dereference PM / Domains: Fix removal of a subdomain PM / Domains: Propagate start and restore errors during runtime resume PM / Domains: Join state name and index in debugfs output PM / Domains: Restore alignment of slaves in debugfs output PM / Domains: remove old power on/off latencies ARM: imx6: pm: declare pm domain latency on power_state struct PM / Domains: Support for multiple states
This commit is contained in:
commit
93dffd03b3
|
@ -374,9 +374,14 @@ static struct pu_domain imx6q_pu_domain = {
|
|||
.name = "PU",
|
||||
.power_off = imx6q_pm_pu_power_off,
|
||||
.power_on = imx6q_pm_pu_power_on,
|
||||
.states = {
|
||||
[0] = {
|
||||
.power_off_latency_ns = 25000,
|
||||
.power_on_latency_ns = 2000000,
|
||||
},
|
||||
},
|
||||
.state_count = 1,
|
||||
},
|
||||
};
|
||||
|
||||
static struct generic_pm_domain imx6sl_display_domain = {
|
||||
|
|
|
@ -104,6 +104,7 @@ static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
|
|||
|
||||
static int genpd_power_on(struct generic_pm_domain *genpd, bool timed)
|
||||
{
|
||||
unsigned int state_idx = genpd->state_idx;
|
||||
ktime_t time_start;
|
||||
s64 elapsed_ns;
|
||||
int ret;
|
||||
|
@ -120,10 +121,10 @@ static int genpd_power_on(struct generic_pm_domain *genpd, bool timed)
|
|||
return ret;
|
||||
|
||||
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
|
||||
if (elapsed_ns <= genpd->power_on_latency_ns)
|
||||
if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
|
||||
return ret;
|
||||
|
||||
genpd->power_on_latency_ns = elapsed_ns;
|
||||
genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
|
||||
genpd->max_off_time_changed = true;
|
||||
pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
|
||||
genpd->name, "on", elapsed_ns);
|
||||
|
@ -133,6 +134,7 @@ static int genpd_power_on(struct generic_pm_domain *genpd, bool timed)
|
|||
|
||||
static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
|
||||
{
|
||||
unsigned int state_idx = genpd->state_idx;
|
||||
ktime_t time_start;
|
||||
s64 elapsed_ns;
|
||||
int ret;
|
||||
|
@ -149,10 +151,10 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
|
|||
return ret;
|
||||
|
||||
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
|
||||
if (elapsed_ns <= genpd->power_off_latency_ns)
|
||||
if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
|
||||
return ret;
|
||||
|
||||
genpd->power_off_latency_ns = elapsed_ns;
|
||||
genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
|
||||
genpd->max_off_time_changed = true;
|
||||
pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
|
||||
genpd->name, "off", elapsed_ns);
|
||||
|
@ -485,8 +487,13 @@ static int pm_genpd_runtime_resume(struct device *dev)
|
|||
if (timed && runtime_pm)
|
||||
time_start = ktime_get();
|
||||
|
||||
genpd_start_dev(genpd, dev);
|
||||
genpd_restore_dev(genpd, dev);
|
||||
ret = genpd_start_dev(genpd, dev);
|
||||
if (ret)
|
||||
goto err_poweroff;
|
||||
|
||||
ret = genpd_restore_dev(genpd, dev);
|
||||
if (ret)
|
||||
goto err_stop;
|
||||
|
||||
/* Update resume latency value if the measured time exceeds it. */
|
||||
if (timed && runtime_pm) {
|
||||
|
@ -501,6 +508,17 @@ static int pm_genpd_runtime_resume(struct device *dev)
|
|||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_stop:
|
||||
genpd_stop_dev(genpd, dev);
|
||||
err_poweroff:
|
||||
if (!dev->power.irq_safe) {
|
||||
mutex_lock(&genpd->lock);
|
||||
genpd_poweroff(genpd, 0);
|
||||
mutex_unlock(&genpd->lock);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool pd_ignore_unused;
|
||||
|
@ -585,6 +603,8 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd,
|
|||
|| atomic_read(&genpd->sd_count) > 0)
|
||||
return;
|
||||
|
||||
/* Choose the deepest state when suspending */
|
||||
genpd->state_idx = genpd->state_count - 1;
|
||||
genpd_power_off(genpd, timed);
|
||||
|
||||
genpd->status = GPD_STATE_POWER_OFF;
|
||||
|
@ -1378,7 +1398,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
|
|||
mutex_lock(&subdomain->lock);
|
||||
mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
|
||||
|
||||
if (!list_empty(&subdomain->slave_links) || subdomain->device_count) {
|
||||
if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
|
||||
pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
|
||||
subdomain->name);
|
||||
ret = -EBUSY;
|
||||
|
@ -1508,6 +1528,20 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
|
|||
genpd->dev_ops.start = pm_clk_resume;
|
||||
}
|
||||
|
||||
if (genpd->state_idx >= GENPD_MAX_NUM_STATES) {
|
||||
pr_warn("Initial state index out of bounds.\n");
|
||||
genpd->state_idx = GENPD_MAX_NUM_STATES - 1;
|
||||
}
|
||||
|
||||
if (genpd->state_count > GENPD_MAX_NUM_STATES) {
|
||||
pr_warn("Limiting states to %d\n", GENPD_MAX_NUM_STATES);
|
||||
genpd->state_count = GENPD_MAX_NUM_STATES;
|
||||
}
|
||||
|
||||
/* Use only one "off" state if there were no states declared */
|
||||
if (genpd->state_count == 0)
|
||||
genpd->state_count = 1;
|
||||
|
||||
mutex_lock(&gpd_list_lock);
|
||||
list_add(&genpd->gpd_list_node, &gpd_list);
|
||||
mutex_unlock(&gpd_list_lock);
|
||||
|
@ -1668,6 +1702,9 @@ struct generic_pm_domain *of_genpd_get_from_provider(
|
|||
struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
|
||||
struct of_genpd_provider *provider;
|
||||
|
||||
if (!genpdspec)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
mutex_lock(&of_genpd_mutex);
|
||||
|
||||
/* Check if we have such a provider in our array */
|
||||
|
@ -1864,6 +1901,7 @@ static int pm_genpd_summary_one(struct seq_file *s,
|
|||
struct pm_domain_data *pm_data;
|
||||
const char *kobj_path;
|
||||
struct gpd_link *link;
|
||||
char state[16];
|
||||
int ret;
|
||||
|
||||
ret = mutex_lock_interruptible(&genpd->lock);
|
||||
|
@ -1872,7 +1910,13 @@ static int pm_genpd_summary_one(struct seq_file *s,
|
|||
|
||||
if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
|
||||
goto exit;
|
||||
seq_printf(s, "%-30s %-15s ", genpd->name, status_lookup[genpd->status]);
|
||||
if (genpd->status == GPD_STATE_POWER_OFF)
|
||||
snprintf(state, sizeof(state), "%s-%u",
|
||||
status_lookup[genpd->status], genpd->state_idx);
|
||||
else
|
||||
snprintf(state, sizeof(state), "%s",
|
||||
status_lookup[genpd->status]);
|
||||
seq_printf(s, "%-30s %-15s ", genpd->name, state);
|
||||
|
||||
/*
|
||||
* Modifications on the list require holding locks on both
|
||||
|
|
|
@ -98,7 +98,8 @@ static bool default_stop_ok(struct device *dev)
|
|||
*
|
||||
* This routine must be executed under the PM domain's lock.
|
||||
*/
|
||||
static bool default_power_down_ok(struct dev_pm_domain *pd)
|
||||
static bool __default_power_down_ok(struct dev_pm_domain *pd,
|
||||
unsigned int state)
|
||||
{
|
||||
struct generic_pm_domain *genpd = pd_to_genpd(pd);
|
||||
struct gpd_link *link;
|
||||
|
@ -106,27 +107,9 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
|
|||
s64 min_off_time_ns;
|
||||
s64 off_on_time_ns;
|
||||
|
||||
if (genpd->max_off_time_changed) {
|
||||
struct gpd_link *link;
|
||||
off_on_time_ns = genpd->states[state].power_off_latency_ns +
|
||||
genpd->states[state].power_on_latency_ns;
|
||||
|
||||
/*
|
||||
* We have to invalidate the cached results for the masters, so
|
||||
* use the observation that default_power_down_ok() is not
|
||||
* going to be called for any master until this instance
|
||||
* returns.
|
||||
*/
|
||||
list_for_each_entry(link, &genpd->slave_links, slave_node)
|
||||
link->master->max_off_time_changed = true;
|
||||
|
||||
genpd->max_off_time_changed = false;
|
||||
genpd->cached_power_down_ok = false;
|
||||
genpd->max_off_time_ns = -1;
|
||||
} else {
|
||||
return genpd->cached_power_down_ok;
|
||||
}
|
||||
|
||||
off_on_time_ns = genpd->power_off_latency_ns +
|
||||
genpd->power_on_latency_ns;
|
||||
|
||||
min_off_time_ns = -1;
|
||||
/*
|
||||
|
@ -186,8 +169,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
|
|||
min_off_time_ns = constraint_ns;
|
||||
}
|
||||
|
||||
genpd->cached_power_down_ok = true;
|
||||
|
||||
/*
|
||||
* If the computed minimum device off time is negative, there are no
|
||||
* latency constraints, so the domain can spend arbitrary time in the
|
||||
|
@ -201,10 +182,45 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
|
|||
* time and the time needed to turn the domain on is the maximum
|
||||
* theoretical time this domain can spend in the "off" state.
|
||||
*/
|
||||
genpd->max_off_time_ns = min_off_time_ns - genpd->power_on_latency_ns;
|
||||
genpd->max_off_time_ns = min_off_time_ns -
|
||||
genpd->states[state].power_on_latency_ns;
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool default_power_down_ok(struct dev_pm_domain *pd)
|
||||
{
|
||||
struct generic_pm_domain *genpd = pd_to_genpd(pd);
|
||||
struct gpd_link *link;
|
||||
|
||||
if (!genpd->max_off_time_changed)
|
||||
return genpd->cached_power_down_ok;
|
||||
|
||||
/*
|
||||
* We have to invalidate the cached results for the masters, so
|
||||
* use the observation that default_power_down_ok() is not
|
||||
* going to be called for any master until this instance
|
||||
* returns.
|
||||
*/
|
||||
list_for_each_entry(link, &genpd->slave_links, slave_node)
|
||||
link->master->max_off_time_changed = true;
|
||||
|
||||
genpd->max_off_time_ns = -1;
|
||||
genpd->max_off_time_changed = false;
|
||||
genpd->cached_power_down_ok = true;
|
||||
genpd->state_idx = genpd->state_count - 1;
|
||||
|
||||
/* Find a state to power down to, starting from the deepest. */
|
||||
while (!__default_power_down_ok(pd, genpd->state_idx)) {
|
||||
if (genpd->state_idx == 0) {
|
||||
genpd->cached_power_down_ok = false;
|
||||
break;
|
||||
}
|
||||
genpd->state_idx--;
|
||||
}
|
||||
|
||||
return genpd->cached_power_down_ok;
|
||||
}
|
||||
|
||||
static bool always_on_power_down_ok(struct dev_pm_domain *domain)
|
||||
{
|
||||
return false;
|
||||
|
|
|
@ -166,14 +166,14 @@ void generate_pm_trace(const void *tracedata, unsigned int user)
|
|||
}
|
||||
EXPORT_SYMBOL(generate_pm_trace);
|
||||
|
||||
extern char __tracedata_start, __tracedata_end;
|
||||
extern char __tracedata_start[], __tracedata_end[];
|
||||
static int show_file_hash(unsigned int value)
|
||||
{
|
||||
int match;
|
||||
char *tracedata;
|
||||
|
||||
match = 0;
|
||||
for (tracedata = &__tracedata_start ; tracedata < &__tracedata_end ;
|
||||
for (tracedata = __tracedata_start ; tracedata < __tracedata_end ;
|
||||
tracedata += 2 + sizeof(unsigned long)) {
|
||||
unsigned short lineno = *(unsigned short *)tracedata;
|
||||
const char *file = *(const char **)(tracedata + 2);
|
||||
|
|
|
@ -199,8 +199,8 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
|
|||
static void get_typical_interval(struct menu_device *data)
|
||||
{
|
||||
int i, divisor;
|
||||
unsigned int max, thresh;
|
||||
uint64_t avg, stddev;
|
||||
unsigned int max, thresh, avg;
|
||||
uint64_t sum, variance;
|
||||
|
||||
thresh = UINT_MAX; /* Discard outliers above this value */
|
||||
|
||||
|
@ -208,52 +208,51 @@ static void get_typical_interval(struct menu_device *data)
|
|||
|
||||
/* First calculate the average of past intervals */
|
||||
max = 0;
|
||||
avg = 0;
|
||||
sum = 0;
|
||||
divisor = 0;
|
||||
for (i = 0; i < INTERVALS; i++) {
|
||||
unsigned int value = data->intervals[i];
|
||||
if (value <= thresh) {
|
||||
avg += value;
|
||||
sum += value;
|
||||
divisor++;
|
||||
if (value > max)
|
||||
max = value;
|
||||
}
|
||||
}
|
||||
if (divisor == INTERVALS)
|
||||
avg >>= INTERVAL_SHIFT;
|
||||
avg = sum >> INTERVAL_SHIFT;
|
||||
else
|
||||
do_div(avg, divisor);
|
||||
avg = div_u64(sum, divisor);
|
||||
|
||||
/* Then try to determine standard deviation */
|
||||
stddev = 0;
|
||||
/* Then try to determine variance */
|
||||
variance = 0;
|
||||
for (i = 0; i < INTERVALS; i++) {
|
||||
unsigned int value = data->intervals[i];
|
||||
if (value <= thresh) {
|
||||
int64_t diff = value - avg;
|
||||
stddev += diff * diff;
|
||||
int64_t diff = (int64_t)value - avg;
|
||||
variance += diff * diff;
|
||||
}
|
||||
}
|
||||
if (divisor == INTERVALS)
|
||||
stddev >>= INTERVAL_SHIFT;
|
||||
variance >>= INTERVAL_SHIFT;
|
||||
else
|
||||
do_div(stddev, divisor);
|
||||
do_div(variance, divisor);
|
||||
|
||||
/*
|
||||
* The typical interval is obtained when standard deviation is small
|
||||
* or standard deviation is small compared to the average interval.
|
||||
*
|
||||
* int_sqrt() formal parameter type is unsigned long. When the
|
||||
* greatest difference to an outlier exceeds ~65 ms * sqrt(divisor)
|
||||
* the resulting squared standard deviation exceeds the input domain
|
||||
* of int_sqrt on platforms where unsigned long is 32 bits in size.
|
||||
* In such case reject the candidate average.
|
||||
* The typical interval is obtained when standard deviation is
|
||||
* small (stddev <= 20 us, variance <= 400 us^2) or standard
|
||||
* deviation is small compared to the average interval (avg >
|
||||
* 6*stddev, avg^2 > 36*variance). The average is smaller than
|
||||
* UINT_MAX aka U32_MAX, so computing its square does not
|
||||
* overflow a u64. We simply reject this candidate average if
|
||||
* the standard deviation is greater than 715 s (which is
|
||||
* rather unlikely).
|
||||
*
|
||||
* Use this result only if there is no timer to wake us up sooner.
|
||||
*/
|
||||
if (likely(stddev <= ULONG_MAX)) {
|
||||
stddev = int_sqrt(stddev);
|
||||
if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3))
|
||||
|| stddev <= 20) {
|
||||
if (likely(variance <= U64_MAX/36)) {
|
||||
if ((((u64)avg*avg > variance*36) && (divisor * 4 >= INTERVALS * 3))
|
||||
|| variance <= 400) {
|
||||
if (data->next_timer_us > avg)
|
||||
data->predicted_us = avg;
|
||||
return;
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
/* Defines used for the flags field in the struct generic_pm_domain */
|
||||
#define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */
|
||||
|
||||
#define GENPD_MAX_NUM_STATES 8 /* Number of possible low power states */
|
||||
|
||||
enum gpd_status {
|
||||
GPD_STATE_ACTIVE = 0, /* PM domain is active */
|
||||
GPD_STATE_POWER_OFF, /* PM domain is off */
|
||||
|
@ -37,6 +39,11 @@ struct gpd_dev_ops {
|
|||
bool (*active_wakeup)(struct device *dev);
|
||||
};
|
||||
|
||||
struct genpd_power_state {
|
||||
s64 power_off_latency_ns;
|
||||
s64 power_on_latency_ns;
|
||||
};
|
||||
|
||||
struct generic_pm_domain {
|
||||
struct dev_pm_domain domain; /* PM domain operations */
|
||||
struct list_head gpd_list_node; /* Node in the global PM domains list */
|
||||
|
@ -54,9 +61,7 @@ struct generic_pm_domain {
|
|||
unsigned int prepared_count; /* Suspend counter of prepared devices */
|
||||
bool suspend_power_off; /* Power status before system suspend */
|
||||
int (*power_off)(struct generic_pm_domain *domain);
|
||||
s64 power_off_latency_ns;
|
||||
int (*power_on)(struct generic_pm_domain *domain);
|
||||
s64 power_on_latency_ns;
|
||||
struct gpd_dev_ops dev_ops;
|
||||
s64 max_off_time_ns; /* Maximum allowed "suspended" time. */
|
||||
bool max_off_time_changed;
|
||||
|
@ -66,6 +71,10 @@ struct generic_pm_domain {
|
|||
void (*detach_dev)(struct generic_pm_domain *domain,
|
||||
struct device *dev);
|
||||
unsigned int flags; /* Bit field of configs for genpd */
|
||||
struct genpd_power_state states[GENPD_MAX_NUM_STATES];
|
||||
unsigned int state_count; /* number of states */
|
||||
unsigned int state_idx; /* state that genpd will go to when off */
|
||||
|
||||
};
|
||||
|
||||
static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
|
||||
|
|
|
@ -30,13 +30,12 @@ static int try_to_freeze_tasks(bool user_only)
|
|||
unsigned long end_time;
|
||||
unsigned int todo;
|
||||
bool wq_busy = false;
|
||||
struct timeval start, end;
|
||||
u64 elapsed_msecs64;
|
||||
ktime_t start, end, elapsed;
|
||||
unsigned int elapsed_msecs;
|
||||
bool wakeup = false;
|
||||
int sleep_usecs = USEC_PER_MSEC;
|
||||
|
||||
do_gettimeofday(&start);
|
||||
start = ktime_get_boottime();
|
||||
|
||||
end_time = jiffies + msecs_to_jiffies(freeze_timeout_msecs);
|
||||
|
||||
|
@ -78,10 +77,9 @@ static int try_to_freeze_tasks(bool user_only)
|
|||
sleep_usecs *= 2;
|
||||
}
|
||||
|
||||
do_gettimeofday(&end);
|
||||
elapsed_msecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
|
||||
do_div(elapsed_msecs64, NSEC_PER_MSEC);
|
||||
elapsed_msecs = elapsed_msecs64;
|
||||
end = ktime_get_boottime();
|
||||
elapsed = ktime_sub(end, start);
|
||||
elapsed_msecs = ktime_to_ms(elapsed);
|
||||
|
||||
if (todo) {
|
||||
pr_cont("\n");
|
||||
|
|
|
@ -248,7 +248,7 @@ static int suspend_test(int level)
|
|||
{
|
||||
#ifdef CONFIG_PM_DEBUG
|
||||
if (pm_test_level == level) {
|
||||
printk(KERN_INFO "suspend debug: Waiting for %d second(s).\n",
|
||||
pr_info("suspend debug: Waiting for %d second(s).\n",
|
||||
pm_test_delay);
|
||||
mdelay(pm_test_delay * 1000);
|
||||
return 1;
|
||||
|
@ -320,7 +320,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
|
|||
|
||||
error = dpm_suspend_late(PMSG_SUSPEND);
|
||||
if (error) {
|
||||
printk(KERN_ERR "PM: late suspend of devices failed\n");
|
||||
pr_err("PM: late suspend of devices failed\n");
|
||||
goto Platform_finish;
|
||||
}
|
||||
error = platform_suspend_prepare_late(state);
|
||||
|
@ -329,7 +329,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
|
|||
|
||||
error = dpm_suspend_noirq(PMSG_SUSPEND);
|
||||
if (error) {
|
||||
printk(KERN_ERR "PM: noirq suspend of devices failed\n");
|
||||
pr_err("PM: noirq suspend of devices failed\n");
|
||||
goto Platform_early_resume;
|
||||
}
|
||||
error = platform_suspend_prepare_noirq(state);
|
||||
|
|
Loading…
Reference in New Issue