Merge branches 'pm-cpufreq' and 'pm-sleep'

* pm-cpufreq:
  cpufreq: loongson2_cpufreq: adjust cpufreq uses of LOONGSON_CHIPCFG
  cpufreq: brcmstb-avs: fix imbalance of cpufreq policy refcount
  cpufreq: intel_pstate: fix spelling mistake: "Whethet" -> "Whether"
  cpufreq: s3c: fix unbalances of cpufreq policy refcount
  cpufreq: imx-cpufreq-dt: Add i.MX8MP support
  cpufreq: Use imx-cpufreq-dt for i.MX8MP's speed grading
  cpufreq: tegra186: convert to devm_platform_ioremap_resource
  cpufreq: kirkwood: convert to devm_platform_ioremap_resource
  cpufreq: CPPC: put ACPI table after using it
  cpufreq : CPPC: Break out if HiSilicon CPPC workaround is matched

* pm-sleep:
  PM: suspend: Add sysfs attribute to control the "sync on suspend" behavior
  PM: hibernate: fix spelling mistake "shapshot" -> "snapshot"
  PM: hibernate: Add more logging on hibernation failure
  PM: hibernate: improve arithmetic division in preallocate_highmem_fraction()
  PM: wakeup: Show statistics for deleted wakeup sources again
  PM: sleep: Switch to rtc_time64_to_tm()/rtc_tm_to_time64()
This commit is contained in:
Rafael J. Wysocki 2020-01-27 11:29:09 +01:00
commit 245224d1cb
19 changed files with 129 additions and 44 deletions

View File

@ -407,3 +407,16 @@ Contact: Kalesh Singh <kaleshsingh96@gmail.com>
Description:
The /sys/power/suspend_stats/last_failed_step file contains
the last failed step in the suspend/resume path.
What: /sys/power/sync_on_suspend
Date: October 2019
Contact: Jonas Meurer <jonas@freesources.org>
Description:
This file controls whether or not the kernel will sync()
filesystems during system suspend (after freezing user space
and before suspending devices).
Writing a "1" to this file enables the sync() and writing a "0"
disables it. Reads from the file return the current value.
The default is "1" if the build-time "SUSPEND_SKIP_SYNC" config
flag is unset, or "0" otherwise.

View File

@ -1125,6 +1125,9 @@ static void *wakeup_sources_stats_seq_next(struct seq_file *m,
break;
}
if (!next_ws)
print_wakeup_source_stats(m, &deleted_ws);
return next_ws;
}

View File

@ -455,6 +455,8 @@ static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct private_data *priv = policy->driver_data;
cpufreq_cpu_put(policy);
return brcm_avs_get_frequency(priv->base);
}

View File

@ -39,7 +39,7 @@
static struct cppc_cpudata **all_cpu_data;
struct cppc_workaround_oem_info {
char oem_id[ACPI_OEM_ID_SIZE +1];
char oem_id[ACPI_OEM_ID_SIZE + 1];
char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
u32 oem_revision;
};
@ -93,9 +93,13 @@ static void cppc_check_hisi_workaround(void)
for (i = 0; i < ARRAY_SIZE(wa_info); i++) {
if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) &&
!memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
wa_info[i].oem_revision == tbl->oem_revision)
wa_info[i].oem_revision == tbl->oem_revision) {
apply_hisi_workaround = true;
break;
}
}
acpi_put_table(tbl);
}
/* Callback function used to retrieve the max frequency from DMI */

View File

@ -109,6 +109,7 @@ static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "fsl,imx8mq", },
{ .compatible = "fsl,imx8mm", },
{ .compatible = "fsl,imx8mn", },
{ .compatible = "fsl,imx8mp", },
{ .compatible = "marvell,armadaxp", },

View File

@ -35,7 +35,8 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
if (ret)
return ret;
if (of_machine_is_compatible("fsl,imx8mn"))
if (of_machine_is_compatible("fsl,imx8mn") ||
of_machine_is_compatible("fsl,imx8mp"))
speed_grade = (cell_value & IMX8MN_OCOTP_CFG3_SPEED_GRADE_MASK)
>> OCOTP_CFG3_SPEED_GRADE_SHIFT;
else
@ -54,7 +55,8 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
if (of_machine_is_compatible("fsl,imx8mm") ||
of_machine_is_compatible("fsl,imx8mq"))
speed_grade = 1;
if (of_machine_is_compatible("fsl,imx8mn"))
if (of_machine_is_compatible("fsl,imx8mn") ||
of_machine_is_compatible("fsl,imx8mp"))
speed_grade = 0xb;
}

View File

@ -172,7 +172,7 @@ struct vid_data {
/**
* struct global_params - Global parameters, mostly tunable via sysfs.
* @no_turbo: Whether or not to use turbo P-states.
* @turbo_disabled: Whethet or not turbo P-states are available at all,
* @turbo_disabled: Whether or not turbo P-states are available at all,
* based on the MSR_IA32_MISC_ENABLE value and whether or
* not the maximum reported turbo P-state is different from
* the maximum reported non-turbo one.

View File

@ -102,13 +102,11 @@ static struct cpufreq_driver kirkwood_cpufreq_driver = {
static int kirkwood_cpufreq_probe(struct platform_device *pdev)
{
struct device_node *np;
struct resource *res;
int err;
priv.dev = &pdev->dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv.base = devm_ioremap_resource(&pdev->dev, res);
priv.base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv.base))
return PTR_ERR(priv.base);

View File

@ -144,9 +144,11 @@ static void loongson2_cpu_wait(void)
u32 cpu_freq;
spin_lock_irqsave(&loongson2_wait_lock, flags);
cpu_freq = LOONGSON_CHIPCFG(0);
LOONGSON_CHIPCFG(0) &= ~0x7; /* Put CPU into wait mode */
LOONGSON_CHIPCFG(0) = cpu_freq; /* Restore CPU state */
cpu_freq = readl(LOONGSON_CHIPCFG);
/* Put CPU into wait mode */
writel(readl(LOONGSON_CHIPCFG) & ~0x7, LOONGSON_CHIPCFG);
/* Restore CPU state */
writel(cpu_freq, LOONGSON_CHIPCFG);
spin_unlock_irqrestore(&loongson2_wait_lock, flags);
local_irq_enable();
}

View File

@ -304,6 +304,7 @@ static int s3c2416_cpufreq_reboot_notifier_evt(struct notifier_block *this,
{
struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
int ret;
struct cpufreq_policy *policy;
mutex_lock(&cpufreq_lock);
@ -318,7 +319,16 @@ static int s3c2416_cpufreq_reboot_notifier_evt(struct notifier_block *this,
*/
if (s3c_freq->is_dvs) {
pr_debug("cpufreq: leave dvs on reboot\n");
ret = cpufreq_driver_target(cpufreq_cpu_get(0), FREQ_SLEEP, 0);
policy = cpufreq_cpu_get(0);
if (!policy) {
pr_debug("cpufreq: get no policy for cpu0\n");
return NOTIFY_BAD;
}
ret = cpufreq_driver_target(policy, FREQ_SLEEP, 0);
cpufreq_cpu_put(policy);
if (ret < 0)
return NOTIFY_BAD;
}

View File

@ -555,8 +555,17 @@ static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
int ret;
struct cpufreq_policy *policy;
policy = cpufreq_cpu_get(0);
if (!policy) {
pr_debug("cpufreq: get no policy for cpu0\n");
return NOTIFY_BAD;
}
ret = cpufreq_driver_target(policy, SLEEP_FREQ, 0);
cpufreq_cpu_put(policy);
ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0);
if (ret < 0)
return NOTIFY_BAD;

View File

@ -187,7 +187,6 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev)
{
struct tegra186_cpufreq_data *data;
struct tegra_bpmp *bpmp;
struct resource *res;
unsigned int i = 0, err;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
@ -205,8 +204,7 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev)
if (IS_ERR(bpmp))
return PTR_ERR(bpmp);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
data->regs = devm_ioremap_resource(&pdev->dev, res);
data->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->regs)) {
err = PTR_ERR(data->regs);
goto put_bpmp;

View File

@ -329,6 +329,7 @@ extern void arch_suspend_disable_irqs(void);
extern void arch_suspend_enable_irqs(void);
extern int pm_suspend(suspend_state_t state);
extern bool sync_on_suspend_enabled;
#else /* !CONFIG_SUSPEND */
#define suspend_valid_only_mem NULL
@ -342,6 +343,7 @@ static inline bool pm_suspend_default_s2idle(void) { return false; }
static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
static inline bool sync_on_suspend_enabled(void) { return true; }
static inline bool idle_should_enter_s2idle(void) { return false; }
static inline void __init pm_states_init(void) {}
static inline void s2idle_set_ops(const struct platform_s2idle_ops *ops) {}

View File

@ -27,7 +27,10 @@ config SUSPEND_SKIP_SYNC
Skip the kernel sys_sync() before freezing user processes.
Some systems prefer not to pay this cost on every invocation
of suspend, or they are content with invoking sync() from
user-space before invoking suspend. Say Y if that's your case.
user-space before invoking suspend. There's a run-time switch
at '/sys/power/sync_on_suspend' to configure this behaviour.
This setting changes the default for the run-tim switch. Say Y
to change the default to disable the kernel sys_sync().
config HIBERNATE_CALLBACKS
bool

View File

@ -9,7 +9,7 @@
* Copyright (C) 2012 Bojan Smojver <bojan@rexursive.com>
*/
#define pr_fmt(fmt) "PM: " fmt
#define pr_fmt(fmt) "PM: hibernation: " fmt
#include <linux/export.h>
#include <linux/suspend.h>
@ -106,7 +106,7 @@ EXPORT_SYMBOL(system_entering_hibernation);
#ifdef CONFIG_PM_DEBUG
static void hibernation_debug_sleep(void)
{
pr_info("hibernation debug: Waiting for 5 seconds.\n");
pr_info("debug: Waiting for 5 seconds.\n");
mdelay(5000);
}
@ -277,7 +277,7 @@ static int create_image(int platform_mode)
error = dpm_suspend_end(PMSG_FREEZE);
if (error) {
pr_err("Some devices failed to power down, aborting hibernation\n");
pr_err("Some devices failed to power down, aborting\n");
return error;
}
@ -295,7 +295,7 @@ static int create_image(int platform_mode)
error = syscore_suspend();
if (error) {
pr_err("Some system devices failed to power down, aborting hibernation\n");
pr_err("Some system devices failed to power down, aborting\n");
goto Enable_irqs;
}
@ -310,7 +310,7 @@ static int create_image(int platform_mode)
restore_processor_state();
trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, false);
if (error)
pr_err("Error %d creating hibernation image\n", error);
pr_err("Error %d creating image\n", error);
if (!in_suspend) {
events_check_enabled = false;
@ -680,7 +680,7 @@ static int load_image_and_restore(void)
if (!error)
hibernation_restore(flags & SF_PLATFORM_MODE);
pr_err("Failed to load hibernation image, recovering.\n");
pr_err("Failed to load image, recovering.\n");
swsusp_free();
free_basic_memory_bitmaps();
Unlock:
@ -743,7 +743,7 @@ int hibernate(void)
else
flags |= SF_CRC32_MODE;
pm_pr_dbg("Writing image.\n");
pm_pr_dbg("Writing hibernation image.\n");
error = swsusp_write(flags);
swsusp_free();
if (!error) {
@ -755,7 +755,7 @@ int hibernate(void)
in_suspend = 0;
pm_restore_gfp_mask();
} else {
pm_pr_dbg("Image restored successfully.\n");
pm_pr_dbg("Hibernation image restored successfully.\n");
}
Free_bitmaps:
@ -894,7 +894,7 @@ static int software_resume(void)
goto Close_Finish;
}
pm_pr_dbg("Preparing processes for restore.\n");
pm_pr_dbg("Preparing processes for hibernation restore.\n");
error = freeze_processes();
if (error)
goto Close_Finish;
@ -903,7 +903,7 @@ static int software_resume(void)
Finish:
__pm_notifier_call_chain(PM_POST_RESTORE, nr_calls, NULL);
pm_restore_console();
pr_info("resume from hibernation failed (%d)\n", error);
pr_info("resume failed (%d)\n", error);
atomic_inc(&snapshot_device_available);
/* For success case, the suspend path will release the lock */
Unlock:
@ -1068,7 +1068,8 @@ static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr,
lock_system_sleep();
swsusp_resume_device = res;
unlock_system_sleep();
pm_pr_dbg("Configured resume from disk to %u\n", swsusp_resume_device);
pm_pr_dbg("Configured hibernation resume from disk to %u\n",
swsusp_resume_device);
noresume = 0;
software_resume();
return n;

View File

@ -190,6 +190,38 @@ static ssize_t mem_sleep_store(struct kobject *kobj, struct kobj_attribute *attr
}
power_attr(mem_sleep);
/*
* sync_on_suspend: invoke ksys_sync_helper() before suspend.
*
* show() returns whether ksys_sync_helper() is invoked before suspend.
* store() accepts 0 or 1. 0 disables ksys_sync_helper() and 1 enables it.
*/
bool sync_on_suspend_enabled = !IS_ENABLED(CONFIG_SUSPEND_SKIP_SYNC);
static ssize_t sync_on_suspend_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", sync_on_suspend_enabled);
}
static ssize_t sync_on_suspend_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
unsigned long val;
if (kstrtoul(buf, 10, &val))
return -EINVAL;
if (val > 1)
return -EINVAL;
sync_on_suspend_enabled = !!val;
return n;
}
power_attr(sync_on_suspend);
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_PM_SLEEP_DEBUG
@ -855,6 +887,7 @@ static struct attribute * g[] = {
&wakeup_count_attr.attr,
#ifdef CONFIG_SUSPEND
&mem_sleep_attr.attr,
&sync_on_suspend_attr.attr,
#endif
#ifdef CONFIG_PM_AUTOSLEEP
&autosleep_attr.attr,

View File

@ -8,7 +8,7 @@
* Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
*/
#define pr_fmt(fmt) "PM: " fmt
#define pr_fmt(fmt) "PM: hibernation: " fmt
#include <linux/version.h>
#include <linux/module.h>
@ -1566,9 +1566,7 @@ static unsigned long preallocate_image_highmem(unsigned long nr_pages)
*/
static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
{
x *= multiplier;
do_div(x, base);
return (unsigned long)x;
return div64_u64(x * multiplier, base);
}
static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
@ -1705,16 +1703,20 @@ int hibernate_preallocate_memory(void)
ktime_t start, stop;
int error;
pr_info("Preallocating image memory... ");
pr_info("Preallocating image memory\n");
start = ktime_get();
error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
if (error)
if (error) {
pr_err("Cannot allocate original bitmap\n");
goto err_out;
}
error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
if (error)
if (error) {
pr_err("Cannot allocate copy bitmap\n");
goto err_out;
}
alloc_normal = 0;
alloc_highmem = 0;
@ -1804,8 +1806,11 @@ int hibernate_preallocate_memory(void)
alloc -= pages;
pages += pages_highmem;
pages_highmem = preallocate_image_highmem(alloc);
if (pages_highmem < alloc)
if (pages_highmem < alloc) {
pr_err("Image allocation is %lu pages short\n",
alloc - pages_highmem);
goto err_out;
}
pages += pages_highmem;
/*
* size is the desired number of saveable pages to leave in
@ -1836,13 +1841,12 @@ int hibernate_preallocate_memory(void)
out:
stop = ktime_get();
pr_cont("done (allocated %lu pages)\n", pages);
pr_info("Allocated %lu pages for snapshot\n", pages);
swsusp_show_speed(start, stop, pages, "Allocated");
return 0;
err_out:
pr_cont("\n");
swsusp_free();
return -ENOMEM;
}
@ -1976,7 +1980,7 @@ asmlinkage __visible int swsusp_save(void)
{
unsigned int nr_pages, nr_highmem;
pr_info("Creating hibernation image:\n");
pr_info("Creating image:\n");
drain_local_pages(NULL);
nr_pages = count_data_pages();
@ -2010,7 +2014,7 @@ asmlinkage __visible int swsusp_save(void)
nr_copy_pages = nr_pages;
nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
pr_info("Hibernation image created (%d pages copied)\n", nr_pages);
pr_info("Image created (%d pages copied)\n", nr_pages);
return 0;
}

View File

@ -564,7 +564,7 @@ static int enter_state(suspend_state_t state)
if (state == PM_SUSPEND_TO_IDLE)
s2idle_begin();
if (!IS_ENABLED(CONFIG_SUSPEND_SKIP_SYNC)) {
if (sync_on_suspend_enabled) {
trace_suspend_resume(TPS("sync_filesystems"), 0, true);
ksys_sync_helper();
trace_suspend_resume(TPS("sync_filesystems"), 0, false);

View File

@ -70,7 +70,7 @@ static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state)
static char info_test[] __initdata =
KERN_INFO "PM: test RTC wakeup from '%s' suspend\n";
unsigned long now;
time64_t now;
struct rtc_wkalrm alm;
int status;
@ -81,10 +81,10 @@ static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state)
printk(err_readtime, dev_name(&rtc->dev), status);
return;
}
rtc_tm_to_time(&alm.time, &now);
now = rtc_tm_to_time64(&alm.time);
memset(&alm, 0, sizeof alm);
rtc_time_to_tm(now + TEST_SUSPEND_SECONDS, &alm.time);
rtc_time64_to_tm(now + TEST_SUSPEND_SECONDS, &alm.time);
alm.enabled = true;
status = rtc_set_alarm(rtc, &alm);