mirror of https://gitee.com/openkylin/linux.git
More ACPI and power management updates for 3.17-rc1
- Fix for an ACPI-based device hotplug regression introduced in 3.14 that causes a kernel panic to trigger when memory hot-remove is attempted with CONFIG_ACPI_HOTPLUG_MEMORY unset from Tang Chen. - Fix for a cpufreq regression introduced in 3.16 that triggers a "sleeping function called from invalid context" bug in dev_pm_opp_init_cpufreq_table() from Stephen Boyd. - ACPI battery driver fix for a warning message added in 3.16 that prints silly stuff sometimes from Mariusz Ceier. - Hibernation fix for safer handling of mismatches in the 820 memory map between the configurations during image creation and during the subsequent restore from Chun-Yi Lee. - ACPI processor driver fix to handle CPU hotplug notifications correctly during system suspend/resume from Lan Tianyu. - Series of four cpuidle menu governor cleanups that also should speed it up a bit from Mel Gorman. - Fixes for the speedstep-smi, integrator, cpu0 and arm_big_little cpufreq drivers from Hans Wennborg, Himangi Saraogi, Markus Pargmann and Uwe Kleine-König. - Version 3.0 of the analyze_suspend.py suspend profiling tool from Todd E Brandt. / -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQIcBAABCAAGBQJT7UnNAAoJEILEb/54YlRxcxIP/ROFeak3+5tt3hkvZCevxpUh AMPccgUoqsF2dognO3pcR4AgGP+meM6Qw0zBjPDNx6oo87hw7P1HlngfaRPHnWPh iAkY2p1QhGAZW29vqxqBIdLVP9M+Nje0tvOX8/6QEsQgo2y6YCbJU0zITmvb8Tsk 183cXiz6xXDezt4sPeIVg2QVfngVFtOeNVgHDIhldQSF6zUQJP/3+BVutvaj3olt 2O3qpNfwJjFh9p6LWQ+CAalq3hJyNZ6ettLNCvudeq4kqRo49WAdjHaRW+qju/NR dWybO29MfviczABVQ1ReqSnz0MJOqhZNxkEi5KqnYBb3fx8e2XffsBFzFzTp6BJi bp4ALcFIu9r5ctWVxQhmgEC6uhYMIXZ681sH99HyIdzk2cNRgMxRj6u2aVe/Cczu Bb489CRHmOrZyXrkmENg+LkOYBNoXcT+RepH9Ex8R+TNBlKLEBKMMgPrfbFeVKWB Vm621tHNATJG8nJcs3zJulM2FQ0q8c2irw6WwhUxzbSOxmqSvO5zN3OgYt+c+gWk MmA8IhUpQBLkqBx1FMi0lOOdIW3qKZJFrU39VQEjoP4P1nXgf373NPlfgzMvEvqM qQ8srMKFUjYxH3g0ftWk5a2MwEjyHQpvZe0djsMCN7ZkFLwUe1ri/R9Ja2LLQcIZ SyVkFbbO+moXTRMA1yA9 =kpiw -----END PGP SIGNATURE----- Merge tag 'pm+acpi-3.17-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm Pull more ACPI and power management updates from Rafael Wysocki: "These are a couple of regression fixes, cpuidle menu governor optimizations, fixes for ACPI proccessor and battery drivers, hibernation fix to avoid problems related to the e820 memory map, fixes for a few cpufreq drivers and a new version of the suspend profiling tool analyze_suspend.py. Specifics: - Fix for an ACPI-based device hotplug regression introduced in 3.14 that causes a kernel panic to trigger when memory hot-remove is attempted with CONFIG_ACPI_HOTPLUG_MEMORY unset from Tang Chen - Fix for a cpufreq regression introduced in 3.16 that triggers a "sleeping function called from invalid context" bug in dev_pm_opp_init_cpufreq_table() from Stephen Boyd - ACPI battery driver fix for a warning message added in 3.16 that prints silly stuff sometimes from Mariusz Ceier - Hibernation fix for safer handling of mismatches in the 820 memory map between the configurations during image creation and during the subsequent restore from Chun-Yi Lee - ACPI processor driver fix to handle CPU hotplug notifications correctly during system suspend/resume from Lan Tianyu - Series of four cpuidle menu governor cleanups that also should speed it up a bit from Mel Gorman - Fixes for the speedstep-smi, integrator, cpu0 and arm_big_little cpufreq drivers from Hans Wennborg, Himangi Saraogi, Markus Pargmann and Uwe Kleine-König - Version 3.0 of the analyze_suspend.py suspend profiling tool from Todd E Brandt" * tag 'pm+acpi-3.17-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: ACPI / battery: Fix warning message in acpi_battery_get_state() PM / tools: analyze_suspend.py: update to v3.0 cpufreq: arm_big_little: fix module license spec cpufreq: speedstep-smi: fix decimal printf specifiers ACPI / hotplug: Check scan handlers in acpi_scan_hot_remove() cpufreq: OPP: Avoid sleeping while atomic cpufreq: cpu0: Do not print error message when deferring cpufreq: integrator: Use set_cpus_allowed_ptr PM / hibernate: avoid unsafe pages in e820 reserved regions ACPI / processor: Make acpi_cpu_soft_notify() process CPU FROZEN events cpuidle: menu: Lookup CPU runqueues less cpuidle: menu: Call nr_iowait_cpu less times cpuidle: menu: Use ktime_to_us instead of reinventing the wheel cpuidle: menu: Use shifts when calculating averages where possible
This commit is contained in:
commit
c9d26423e5
|
@ -540,12 +540,12 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
|
|||
*/
|
||||
if (battery->capacity_now > battery->full_charge_capacity
|
||||
&& battery->full_charge_capacity != ACPI_BATTERY_VALUE_UNKNOWN) {
|
||||
battery->capacity_now = battery->full_charge_capacity;
|
||||
if (battery->capacity_now != battery->design_capacity)
|
||||
printk_once(KERN_WARNING FW_BUG
|
||||
"battery: reported current charge level (%d) "
|
||||
"is higher than reported maximum charge level (%d).\n",
|
||||
battery->capacity_now, battery->full_charge_capacity);
|
||||
battery->capacity_now = battery->full_charge_capacity;
|
||||
}
|
||||
|
||||
if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)
|
||||
|
|
|
@ -120,6 +120,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
|
|||
unsigned int cpu = (unsigned long)hcpu;
|
||||
struct acpi_processor *pr = per_cpu(processors, cpu);
|
||||
struct acpi_device *device;
|
||||
action &= ~CPU_TASKS_FROZEN;
|
||||
|
||||
/*
|
||||
* CPU_STARTING and CPU_DYING must not sleep. Return here since
|
||||
|
|
|
@ -353,7 +353,8 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
|
|||
unsigned long long sta;
|
||||
acpi_status status;
|
||||
|
||||
if (device->handler->hotplug.demand_offline && !acpi_force_hot_remove) {
|
||||
if (device->handler && device->handler->hotplug.demand_offline
|
||||
&& !acpi_force_hot_remove) {
|
||||
if (!acpi_scan_is_offline(device, true))
|
||||
return -EBUSY;
|
||||
} else {
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include <linux/cpufreq.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/pm_opp.h>
|
||||
|
@ -593,3 +594,7 @@ void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops)
|
|||
arm_bL_ops = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bL_cpufreq_unregister);
|
||||
|
||||
MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
|
||||
MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
|
|
@ -114,4 +114,4 @@ module_platform_driver(generic_bL_platdrv);
|
|||
|
||||
MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
|
||||
MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver via DT");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
|
|
@ -137,7 +137,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
|
|||
* not yet registered, we should try defering probe.
|
||||
*/
|
||||
if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) {
|
||||
dev_err(cpu_dev, "cpu0 regulator not ready, retry\n");
|
||||
dev_dbg(cpu_dev, "cpu0 regulator not ready, retry\n");
|
||||
ret = -EPROBE_DEFER;
|
||||
goto out_put_node;
|
||||
}
|
||||
|
|
|
@ -60,7 +60,7 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev,
|
|||
goto out;
|
||||
}
|
||||
|
||||
freq_table = kzalloc(sizeof(*freq_table) * (max_opps + 1), GFP_KERNEL);
|
||||
freq_table = kcalloc(sizeof(*freq_table), (max_opps + 1), GFP_ATOMIC);
|
||||
if (!freq_table) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
|
|
|
@ -92,7 +92,7 @@ static int integrator_set_target(struct cpufreq_policy *policy,
|
|||
* Bind to the specified CPU. When this call returns,
|
||||
* we should be running on the right CPU.
|
||||
*/
|
||||
set_cpus_allowed(current, cpumask_of_cpu(cpu));
|
||||
set_cpus_allowed_ptr(current, cpumask_of(cpu));
|
||||
BUG_ON(cpu != smp_processor_id());
|
||||
|
||||
/* get current setting */
|
||||
|
@ -118,7 +118,7 @@ static int integrator_set_target(struct cpufreq_policy *policy,
|
|||
freqs.new = icst_hz(&cclk_params, vco) / 1000;
|
||||
|
||||
if (freqs.old == freqs.new) {
|
||||
set_cpus_allowed(current, cpus_allowed);
|
||||
set_cpus_allowed_ptr(current, &cpus_allowed);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -141,7 +141,7 @@ static int integrator_set_target(struct cpufreq_policy *policy,
|
|||
/*
|
||||
* Restore the CPUs allowed mask.
|
||||
*/
|
||||
set_cpus_allowed(current, cpus_allowed);
|
||||
set_cpus_allowed_ptr(current, &cpus_allowed);
|
||||
|
||||
cpufreq_freq_transition_end(policy, &freqs, 0);
|
||||
|
||||
|
@ -157,7 +157,7 @@ static unsigned int integrator_get(unsigned int cpu)
|
|||
|
||||
cpus_allowed = current->cpus_allowed;
|
||||
|
||||
set_cpus_allowed(current, cpumask_of_cpu(cpu));
|
||||
set_cpus_allowed_ptr(current, cpumask_of(cpu));
|
||||
BUG_ON(cpu != smp_processor_id());
|
||||
|
||||
/* detect memory etc. */
|
||||
|
@ -173,7 +173,7 @@ static unsigned int integrator_get(unsigned int cpu)
|
|||
|
||||
current_freq = icst_hz(&cclk_params, vco) / 1000; /* current freq */
|
||||
|
||||
set_cpus_allowed(current, cpus_allowed);
|
||||
set_cpus_allowed_ptr(current, &cpus_allowed);
|
||||
|
||||
return current_freq;
|
||||
}
|
||||
|
|
|
@ -324,8 +324,8 @@ static int __init speedstep_init(void)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
pr_debug("signature:0x%.8ulx, command:0x%.8ulx, "
|
||||
"event:0x%.8ulx, perf_level:0x%.8ulx.\n",
|
||||
pr_debug("signature:0x%.8x, command:0x%.8x, "
|
||||
"event:0x%.8x, perf_level:0x%.8x.\n",
|
||||
ist_info.signature, ist_info.command,
|
||||
ist_info.event, ist_info.perf_level);
|
||||
|
||||
|
|
|
@ -31,7 +31,8 @@
|
|||
* The default values do not overflow.
|
||||
*/
|
||||
#define BUCKETS 12
|
||||
#define INTERVALS 8
|
||||
#define INTERVAL_SHIFT 3
|
||||
#define INTERVALS (1UL << INTERVAL_SHIFT)
|
||||
#define RESOLUTION 1024
|
||||
#define DECAY 8
|
||||
#define MAX_INTERESTING 50000
|
||||
|
@ -133,15 +134,12 @@ struct menu_device {
|
|||
#define LOAD_INT(x) ((x) >> FSHIFT)
|
||||
#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
|
||||
|
||||
static int get_loadavg(void)
|
||||
static inline int get_loadavg(unsigned long load)
|
||||
{
|
||||
unsigned long this = this_cpu_load();
|
||||
|
||||
|
||||
return LOAD_INT(this) * 10 + LOAD_FRAC(this) / 10;
|
||||
return LOAD_INT(load) * 10 + LOAD_FRAC(load) / 10;
|
||||
}
|
||||
|
||||
static inline int which_bucket(unsigned int duration)
|
||||
static inline int which_bucket(unsigned int duration, unsigned long nr_iowaiters)
|
||||
{
|
||||
int bucket = 0;
|
||||
|
||||
|
@ -151,7 +149,7 @@ static inline int which_bucket(unsigned int duration)
|
|||
* This allows us to calculate
|
||||
* E(duration)|iowait
|
||||
*/
|
||||
if (nr_iowait_cpu(smp_processor_id()))
|
||||
if (nr_iowaiters)
|
||||
bucket = BUCKETS/2;
|
||||
|
||||
if (duration < 10)
|
||||
|
@ -174,16 +172,16 @@ static inline int which_bucket(unsigned int duration)
|
|||
* to be, the higher this multiplier, and thus the higher
|
||||
* the barrier to go to an expensive C state.
|
||||
*/
|
||||
static inline int performance_multiplier(void)
|
||||
static inline int performance_multiplier(unsigned long nr_iowaiters, unsigned long load)
|
||||
{
|
||||
int mult = 1;
|
||||
|
||||
/* for higher loadavg, we are more reluctant */
|
||||
|
||||
mult += 2 * get_loadavg();
|
||||
mult += 2 * get_loadavg(load);
|
||||
|
||||
/* for IO wait tasks (per cpu!) we add 5x each */
|
||||
mult += 10 * nr_iowait_cpu(smp_processor_id());
|
||||
mult += 10 * nr_iowaiters;
|
||||
|
||||
return mult;
|
||||
}
|
||||
|
@ -227,7 +225,10 @@ static void get_typical_interval(struct menu_device *data)
|
|||
max = value;
|
||||
}
|
||||
}
|
||||
do_div(avg, divisor);
|
||||
if (divisor == INTERVALS)
|
||||
avg >>= INTERVAL_SHIFT;
|
||||
else
|
||||
do_div(avg, divisor);
|
||||
|
||||
/* Then try to determine standard deviation */
|
||||
stddev = 0;
|
||||
|
@ -238,7 +239,11 @@ static void get_typical_interval(struct menu_device *data)
|
|||
stddev += diff * diff;
|
||||
}
|
||||
}
|
||||
do_div(stddev, divisor);
|
||||
if (divisor == INTERVALS)
|
||||
stddev >>= INTERVAL_SHIFT;
|
||||
else
|
||||
do_div(stddev, divisor);
|
||||
|
||||
/*
|
||||
* The typical interval is obtained when standard deviation is small
|
||||
* or standard deviation is small compared to the average interval.
|
||||
|
@ -288,7 +293,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
|||
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
|
||||
int i;
|
||||
unsigned int interactivity_req;
|
||||
struct timespec t;
|
||||
unsigned long nr_iowaiters, cpu_load;
|
||||
|
||||
if (data->needs_update) {
|
||||
menu_update(drv, dev);
|
||||
|
@ -302,12 +307,10 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
|||
return 0;
|
||||
|
||||
/* determine the expected residency time, round up */
|
||||
t = ktime_to_timespec(tick_nohz_get_sleep_length());
|
||||
data->next_timer_us =
|
||||
t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;
|
||||
data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length());
|
||||
|
||||
|
||||
data->bucket = which_bucket(data->next_timer_us);
|
||||
get_iowait_load(&nr_iowaiters, &cpu_load);
|
||||
data->bucket = which_bucket(data->next_timer_us, nr_iowaiters);
|
||||
|
||||
/*
|
||||
* Force the result of multiplication to be 64 bits even if both
|
||||
|
@ -325,7 +328,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
|||
* duration / latency ratio. Adjust the latency limit if
|
||||
* necessary.
|
||||
*/
|
||||
interactivity_req = data->predicted_us / performance_multiplier();
|
||||
interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters, cpu_load);
|
||||
if (latency_req > interactivity_req)
|
||||
latency_req = interactivity_req;
|
||||
|
||||
|
|
|
@ -169,8 +169,7 @@ extern int nr_processes(void);
|
|||
extern unsigned long nr_running(void);
|
||||
extern unsigned long nr_iowait(void);
|
||||
extern unsigned long nr_iowait_cpu(int cpu);
|
||||
extern unsigned long this_cpu_load(void);
|
||||
|
||||
extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
|
||||
|
||||
extern void calc_global_load(unsigned long ticks);
|
||||
extern void update_cpu_load_nohz(void);
|
||||
|
|
|
@ -954,6 +954,25 @@ static void mark_nosave_pages(struct memory_bitmap *bm)
|
|||
}
|
||||
}
|
||||
|
||||
static bool is_nosave_page(unsigned long pfn)
|
||||
{
|
||||
struct nosave_region *region;
|
||||
|
||||
list_for_each_entry(region, &nosave_regions, list) {
|
||||
if (pfn >= region->start_pfn && pfn < region->end_pfn) {
|
||||
pr_err("PM: %#010llx in e820 nosave region: "
|
||||
"[mem %#010llx-%#010llx]\n",
|
||||
(unsigned long long) pfn << PAGE_SHIFT,
|
||||
(unsigned long long) region->start_pfn << PAGE_SHIFT,
|
||||
((unsigned long long) region->end_pfn << PAGE_SHIFT)
|
||||
- 1);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* create_basic_memory_bitmaps - create bitmaps needed for marking page
|
||||
* frames that should not be saved and free page frames. The pointers
|
||||
|
@ -2015,7 +2034,7 @@ static int mark_unsafe_pages(struct memory_bitmap *bm)
|
|||
do {
|
||||
pfn = memory_bm_next_pfn(bm);
|
||||
if (likely(pfn != BM_END_OF_MAP)) {
|
||||
if (likely(pfn_valid(pfn)))
|
||||
if (likely(pfn_valid(pfn)) && !is_nosave_page(pfn))
|
||||
swsusp_set_page_free(pfn_to_page(pfn));
|
||||
else
|
||||
return -EFAULT;
|
||||
|
|
|
@ -2393,6 +2393,13 @@ unsigned long nr_iowait_cpu(int cpu)
|
|||
return atomic_read(&this->nr_iowait);
|
||||
}
|
||||
|
||||
void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
|
||||
{
|
||||
struct rq *this = this_rq();
|
||||
*nr_waiters = atomic_read(&this->nr_iowait);
|
||||
*load = this->cpu_load[0];
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
/*
|
||||
|
|
|
@ -8,13 +8,6 @@
|
|||
|
||||
#include "sched.h"
|
||||
|
||||
unsigned long this_cpu_load(void)
|
||||
{
|
||||
struct rq *this = this_rq();
|
||||
return this->cpu_load[0];
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Global load-average calculations
|
||||
*
|
||||
|
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue