mirror of https://gitee.com/openkylin/linux.git
Power management updates for 3.7-rc1
* Improved system suspend/resume and runtime PM handling for the SH TMU, CMT and MTU2 clock event devices (also used by ARM/shmobile). * Generic PM domains framework extensions related to cpuidle support and domain objects lookup using names. * ARM/shmobile power management updates including improved support for the SH7372's A4S power domain containing the CPU core. * cpufreq changes related to AMD CPUs support from Matthew Garrett, Andre Przywara and Borislav Petkov. * cpu0 cpufreq driver from Shawn Guo. * cpufreq governor fixes related to the relaxing of limit from Michal Pecio. * OMAP cpufreq updates from Axel Lin and Richard Zhao. * cpuidle ladder governor fixes related to the disabling of states from Carsten Emde and me. * Runtime PM core updates related to the interactions with the system suspend core from Alan Stern and Kevin Hilman. * Wakeup sources modification allowing more helper functions to be called from interrupt context from John Stultz and additional diagnostic code from Todd Poynor. * System suspend error code path fix from Feng Hong. -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.18 (GNU/Linux) iQIcBAABAgAGBQJQa1rRAAoJEKhOf7ml8uNsYZ0P/2RZ71sgLWcUCfr0yHaiZeOd 2GxEYSZ+9BZJHADgoAK/bHRTv8crm40Y2RkbaWbxPDRNuE4SutbvNTGTlJSAguSD yHkU/6AFC7u8Jwq+afsWIdGX7eHd78zPpj6EVtVtjHM903WDwbMU2vUz7tQ+fFa+ ZZ7eydq9j0ec0OoH3UeNhet7JSOpT5BSLgjmIkHMBgIvTxNVDbkB31QUxnUxocxn k6S2wQaUSJJWGMLksRRNrhwLq+cGYwTsaOtG/KzRLH1raUyn33B5pcZr0aqhOkjg ClaCks3V8o3vRghSwOPB5aVXzjBKvM3UnSyJNIl+FeCeyWuwSNbkEFdA/e7oPuxG UsW6dcHiuVo6Ir4+zhd9+lN+/AcPTChO5b7lbU8qRF4ce04czWlUY/KzJjaM+YOE CKGq6eX9AHwFjE+h4+VcCXgmzcioiS8Y/CPz13u8N1y0zzwW+ftjb12K+7lVBEG1 fhrePKHgLw3kJ9LqGpR+4vVur7C+rCf6WwCReTY2vXXVYJ+SuKWTRI4zAjTPXtHa i9dpMRASpF+ScRYBcgwIpv789WuHATFKqdBSinZUKBaxQZ5flJ2qIrfqN5VeAejh oQs/zZCdIuAtFKqVycQ0L42YxFNKgPFKQErUCSu3M5OuZLlLVLu7yQvIo2Xmo9qf Hcrpvo5K+w29YkiwGP9e =rbCk -----END PGP SIGNATURE----- Merge tag 'pm-for-3.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm Pull power management updates from Rafael J Wysocki: - Improved system suspend/resume and runtime PM handling for the SH TMU, CMT and MTU2 clock event devices (also used by ARM/shmobile). - Generic PM domains framework extensions related to cpuidle support and domain objects lookup using names. - ARM/shmobile power management updates including improved support for the SH7372's A4S power domain containing the CPU core. - cpufreq changes related to AMD CPUs support from Matthew Garrett, Andre Przywara and Borislav Petkov. - cpu0 cpufreq driver from Shawn Guo. - cpufreq governor fixes related to the relaxing of limit from Michal Pecio. - OMAP cpufreq updates from Axel Lin and Richard Zhao. - cpuidle ladder governor fixes related to the disabling of states from Carsten Emde and me. - Runtime PM core updates related to the interactions with the system suspend core from Alan Stern and Kevin Hilman. - Wakeup sources modification allowing more helper functions to be called from interrupt context from John Stultz and additional diagnostic code from Todd Poynor. - System suspend error code path fix from Feng Hong. Fixed up conflicts in cpufreq/powernow-k8 that stemmed from the workqueue fixes conflicting fairly badly with the removal of support for hardware P-state chips. The changes were independent but somewhat intertwined. * tag 'pm-for-3.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (76 commits) Revert "PM QoS: Use spinlock in the per-device PM QoS constraints code" PM / Runtime: let rpm_resume() succeed if RPM_ACTIVE, even when disabled, v2 cpuidle: rename function name "__cpuidle_register_driver", v2 cpufreq: OMAP: Check IS_ERR() instead of NULL for omap_device_get_by_hwmod_name cpuidle: remove some empty lines PM: Prevent runtime suspend during system resume PM QoS: Use spinlock in the per-device PM QoS constraints code PM / Sleep: use resume event when call dpm_resume_early cpuidle / ACPI : move cpuidle_device field out of the acpi_processor_power structure ACPI / processor: remove pointless variable initialization ACPI / processor: remove unused function parameter cpufreq: OMAP: remove loops_per_jiffy recalculate for smp sections: fix section conflicts in drivers/cpufreq cpufreq: conservative: update frequency when limits are relaxed cpufreq / ondemand: update frequency when limits are relaxed properly __init-annotate pm_sysrq_init() cpufreq: Add a generic cpufreq-cpu0 driver PM / OPP: Initialize OPP table from device tree ARM: add cpufreq transiton notifier to adjust loops_per_jiffy for smp cpufreq: Remove support for hardware P-state chips from powernow-k8 ...
This commit is contained in:
commit
16642a2e7b
|
@ -176,3 +176,14 @@ Description: Disable L3 cache indices
|
|||
All AMD processors with L3 caches provide this functionality.
|
||||
For details, see BKDGs at
|
||||
http://developer.amd.com/documentation/guides/Pages/default.aspx
|
||||
|
||||
|
||||
What: /sys/devices/system/cpu/cpufreq/boost
|
||||
Date: August 2012
|
||||
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
|
||||
Description: Processor frequency boosting control
|
||||
|
||||
This switch controls the boost setting for the whole system.
|
||||
Boosting allows the CPU and the firmware to run at a frequency
|
||||
beyound it's nominal limit.
|
||||
More details can be found in Documentation/cpu-freq/boost.txt
|
||||
|
|
|
@ -0,0 +1,93 @@
|
|||
Processor boosting control
|
||||
|
||||
- information for users -
|
||||
|
||||
Quick guide for the impatient:
|
||||
--------------------
|
||||
/sys/devices/system/cpu/cpufreq/boost
|
||||
controls the boost setting for the whole system. You can read and write
|
||||
that file with either "0" (boosting disabled) or "1" (boosting allowed).
|
||||
Reading or writing 1 does not mean that the system is boosting at this
|
||||
very moment, but only that the CPU _may_ raise the frequency at it's
|
||||
discretion.
|
||||
--------------------
|
||||
|
||||
Introduction
|
||||
-------------
|
||||
Some CPUs support a functionality to raise the operating frequency of
|
||||
some cores in a multi-core package if certain conditions apply, mostly
|
||||
if the whole chip is not fully utilized and below it's intended thermal
|
||||
budget. This is done without operating system control by a combination
|
||||
of hardware and firmware.
|
||||
On Intel CPUs this is called "Turbo Boost", AMD calls it "Turbo-Core",
|
||||
in technical documentation "Core performance boost". In Linux we use
|
||||
the term "boost" for convenience.
|
||||
|
||||
Rationale for disable switch
|
||||
----------------------------
|
||||
|
||||
Though the idea is to just give better performance without any user
|
||||
intervention, sometimes the need arises to disable this functionality.
|
||||
Most systems offer a switch in the (BIOS) firmware to disable the
|
||||
functionality at all, but a more fine-grained and dynamic control would
|
||||
be desirable:
|
||||
1. While running benchmarks, reproducible results are important. Since
|
||||
the boosting functionality depends on the load of the whole package,
|
||||
single thread performance can vary. By explicitly disabling the boost
|
||||
functionality at least for the benchmark's run-time the system will run
|
||||
at a fixed frequency and results are reproducible again.
|
||||
2. To examine the impact of the boosting functionality it is helpful
|
||||
to do tests with and without boosting.
|
||||
3. Boosting means overclocking the processor, though under controlled
|
||||
conditions. By raising the frequency and the voltage the processor
|
||||
will consume more power than without the boosting, which may be
|
||||
undesirable for instance for mobile users. Disabling boosting may
|
||||
save power here, though this depends on the workload.
|
||||
|
||||
|
||||
User controlled switch
|
||||
----------------------
|
||||
|
||||
To allow the user to toggle the boosting functionality, the acpi-cpufreq
|
||||
driver exports a sysfs knob to disable it. There is a file:
|
||||
/sys/devices/system/cpu/cpufreq/boost
|
||||
which can either read "0" (boosting disabled) or "1" (boosting enabled).
|
||||
Reading the file is always supported, even if the processor does not
|
||||
support boosting. In this case the file will be read-only and always
|
||||
reads as "0". Explicitly changing the permissions and writing to that
|
||||
file anyway will return EINVAL.
|
||||
|
||||
On supported CPUs one can write either a "0" or a "1" into this file.
|
||||
This will either disable the boost functionality on all cores in the
|
||||
whole system (0) or will allow the hardware to boost at will (1).
|
||||
|
||||
Writing a "1" does not explicitly boost the system, but just allows the
|
||||
CPU (and the firmware) to boost at their discretion. Some implementations
|
||||
take external factors like the chip's temperature into account, so
|
||||
boosting once does not necessarily mean that it will occur every time
|
||||
even using the exact same software setup.
|
||||
|
||||
|
||||
AMD legacy cpb switch
|
||||
---------------------
|
||||
The AMD powernow-k8 driver used to support a very similar switch to
|
||||
disable or enable the "Core Performance Boost" feature of some AMD CPUs.
|
||||
This switch was instantiated in each CPU's cpufreq directory
|
||||
(/sys/devices/system/cpu[0-9]*/cpufreq) and was called "cpb".
|
||||
Though the per CPU existence hints at a more fine grained control, the
|
||||
actual implementation only supported a system-global switch semantics,
|
||||
which was simply reflected into each CPU's file. Writing a 0 or 1 into it
|
||||
would pull the other CPUs to the same state.
|
||||
For compatibility reasons this file and its behavior is still supported
|
||||
on AMD CPUs, though it is now protected by a config switch
|
||||
(X86_ACPI_CPUFREQ_CPB). On Intel CPUs this file will never be created,
|
||||
even with the config option set.
|
||||
This functionality is considered legacy and will be removed in some future
|
||||
kernel version.
|
||||
|
||||
More fine grained boosting control
|
||||
----------------------------------
|
||||
|
||||
Technically it is possible to switch the boosting functionality at least
|
||||
on a per package basis, for some CPUs even per core. Currently the driver
|
||||
does not support it, but this may be implemented in the future.
|
|
@ -76,9 +76,17 @@ total 0
|
|||
|
||||
|
||||
* desc : Small description about the idle state (string)
|
||||
* disable : Option to disable this idle state (bool)
|
||||
* disable : Option to disable this idle state (bool) -> see note below
|
||||
* latency : Latency to exit out of this idle state (in microseconds)
|
||||
* name : Name of the idle state (string)
|
||||
* power : Power consumed while in this idle state (in milliwatts)
|
||||
* time : Total time spent in this idle state (in microseconds)
|
||||
* usage : Number of times this state was entered (count)
|
||||
|
||||
Note:
|
||||
The behavior and the effect of the disable variable depends on the
|
||||
implementation of a particular governor. In the ladder governor, for
|
||||
example, it is not coherent, i.e. if one is disabling a light state,
|
||||
then all deeper states are disabled as well, but the disable variable
|
||||
does not reflect it. Likewise, if one enables a deep state but a lighter
|
||||
state still is disabled, then this has no effect.
|
||||
|
|
|
@ -0,0 +1,55 @@
|
|||
Generic CPU0 cpufreq driver
|
||||
|
||||
It is a generic cpufreq driver for CPU0 frequency management. It
|
||||
supports both uniprocessor (UP) and symmetric multiprocessor (SMP)
|
||||
systems which share clock and voltage across all CPUs.
|
||||
|
||||
Both required and optional properties listed below must be defined
|
||||
under node /cpus/cpu@0.
|
||||
|
||||
Required properties:
|
||||
- operating-points: Refer to Documentation/devicetree/bindings/power/opp.txt
|
||||
for details
|
||||
|
||||
Optional properties:
|
||||
- clock-latency: Specify the possible maximum transition latency for clock,
|
||||
in unit of nanoseconds.
|
||||
- voltage-tolerance: Specify the CPU voltage tolerance in percentage.
|
||||
|
||||
Examples:
|
||||
|
||||
cpus {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
cpu@0 {
|
||||
compatible = "arm,cortex-a9";
|
||||
reg = <0>;
|
||||
next-level-cache = <&L2>;
|
||||
operating-points = <
|
||||
/* kHz uV */
|
||||
792000 1100000
|
||||
396000 950000
|
||||
198000 850000
|
||||
>;
|
||||
transition-latency = <61036>; /* two CLK32 periods */
|
||||
};
|
||||
|
||||
cpu@1 {
|
||||
compatible = "arm,cortex-a9";
|
||||
reg = <1>;
|
||||
next-level-cache = <&L2>;
|
||||
};
|
||||
|
||||
cpu@2 {
|
||||
compatible = "arm,cortex-a9";
|
||||
reg = <2>;
|
||||
next-level-cache = <&L2>;
|
||||
};
|
||||
|
||||
cpu@3 {
|
||||
compatible = "arm,cortex-a9";
|
||||
reg = <3>;
|
||||
next-level-cache = <&L2>;
|
||||
};
|
||||
};
|
|
@ -0,0 +1,25 @@
|
|||
* Generic OPP Interface
|
||||
|
||||
SoCs have a standard set of tuples consisting of frequency and
|
||||
voltage pairs that the device will support per voltage domain. These
|
||||
are called Operating Performance Points or OPPs.
|
||||
|
||||
Properties:
|
||||
- operating-points: An array of 2-tuples items, and each item consists
|
||||
of frequency and voltage like <freq-kHz vol-uV>.
|
||||
freq: clock frequency in kHz
|
||||
vol: voltage in microvolt
|
||||
|
||||
Examples:
|
||||
|
||||
cpu@0 {
|
||||
compatible = "arm,cortex-a9";
|
||||
reg = <0>;
|
||||
next-level-cache = <&L2>;
|
||||
operating-points = <
|
||||
/* kHz uV */
|
||||
792000 1100000
|
||||
396000 950000
|
||||
198000 850000
|
||||
>;
|
||||
};
|
|
@ -24,6 +24,7 @@
|
|||
#include <linux/percpu.h>
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/cpufreq.h>
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/smp.h>
|
||||
|
@ -650,3 +651,56 @@ int setup_profiling_timer(unsigned int multiplier)
|
|||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ
|
||||
|
||||
static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
|
||||
static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
|
||||
static unsigned long global_l_p_j_ref;
|
||||
static unsigned long global_l_p_j_ref_freq;
|
||||
|
||||
static int cpufreq_callback(struct notifier_block *nb,
|
||||
unsigned long val, void *data)
|
||||
{
|
||||
struct cpufreq_freqs *freq = data;
|
||||
int cpu = freq->cpu;
|
||||
|
||||
if (freq->flags & CPUFREQ_CONST_LOOPS)
|
||||
return NOTIFY_OK;
|
||||
|
||||
if (!per_cpu(l_p_j_ref, cpu)) {
|
||||
per_cpu(l_p_j_ref, cpu) =
|
||||
per_cpu(cpu_data, cpu).loops_per_jiffy;
|
||||
per_cpu(l_p_j_ref_freq, cpu) = freq->old;
|
||||
if (!global_l_p_j_ref) {
|
||||
global_l_p_j_ref = loops_per_jiffy;
|
||||
global_l_p_j_ref_freq = freq->old;
|
||||
}
|
||||
}
|
||||
|
||||
if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
|
||||
(val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
|
||||
(val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
|
||||
loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
|
||||
global_l_p_j_ref_freq,
|
||||
freq->new);
|
||||
per_cpu(cpu_data, cpu).loops_per_jiffy =
|
||||
cpufreq_scale(per_cpu(l_p_j_ref, cpu),
|
||||
per_cpu(l_p_j_ref_freq, cpu),
|
||||
freq->new);
|
||||
}
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block cpufreq_notifier = {
|
||||
.notifier_call = cpufreq_callback,
|
||||
};
|
||||
|
||||
static int __init register_cpufreq_notifier(void)
|
||||
{
|
||||
return cpufreq_register_notifier(&cpufreq_notifier,
|
||||
CPUFREQ_TRANSITION_NOTIFIER);
|
||||
}
|
||||
core_initcall(register_cpufreq_notifier);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
#
|
||||
|
||||
# Common objects
|
||||
obj-y := timer.o console.o clock.o common.o
|
||||
obj-y := timer.o console.o clock.o
|
||||
|
||||
# CPU objects
|
||||
obj-$(CONFIG_ARCH_SH7367) += setup-sh7367.o clock-sh7367.o intc-sh7367.o
|
||||
|
|
|
@ -1231,6 +1231,15 @@ static struct i2c_board_info i2c1_devices[] = {
|
|||
#define USCCR1 IOMEM(0xE6058144)
|
||||
static void __init ap4evb_init(void)
|
||||
{
|
||||
struct pm_domain_device domain_devices[] = {
|
||||
{ "A4LC", &lcdc1_device, },
|
||||
{ "A4LC", &lcdc_device, },
|
||||
{ "A4MP", &fsi_device, },
|
||||
{ "A3SP", &sh_mmcif_device, },
|
||||
{ "A3SP", &sdhi0_device, },
|
||||
{ "A3SP", &sdhi1_device, },
|
||||
{ "A4R", &ceu_device, },
|
||||
};
|
||||
u32 srcr4;
|
||||
struct clk *clk;
|
||||
|
||||
|
@ -1463,14 +1472,8 @@ static void __init ap4evb_init(void)
|
|||
|
||||
platform_add_devices(ap4evb_devices, ARRAY_SIZE(ap4evb_devices));
|
||||
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a4lc, &lcdc1_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a4lc, &lcdc_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a4mp, &fsi_device);
|
||||
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sh_mmcif_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi0_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi1_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a4r, &ceu_device);
|
||||
rmobile_add_devices_to_domains(domain_devices,
|
||||
ARRAY_SIZE(domain_devices));
|
||||
|
||||
hdmi_init_pm_clock();
|
||||
fsi_init_pm_clock();
|
||||
|
@ -1485,6 +1488,6 @@ MACHINE_START(AP4EVB, "ap4evb")
|
|||
.init_irq = sh7372_init_irq,
|
||||
.handle_irq = shmobile_handle_irq_intc,
|
||||
.init_machine = ap4evb_init,
|
||||
.init_late = shmobile_init_late,
|
||||
.init_late = sh7372_pm_init_late,
|
||||
.timer = &shmobile_timer,
|
||||
MACHINE_END
|
||||
|
|
|
@ -1209,10 +1209,10 @@ static void __init eva_init(void)
|
|||
|
||||
eva_clock_init();
|
||||
|
||||
rmobile_add_device_to_domain(&r8a7740_pd_a4lc, &lcdc0_device);
|
||||
rmobile_add_device_to_domain(&r8a7740_pd_a4lc, &hdmi_lcdc_device);
|
||||
rmobile_add_device_to_domain("A4LC", &lcdc0_device);
|
||||
rmobile_add_device_to_domain("A4LC", &hdmi_lcdc_device);
|
||||
if (usb)
|
||||
rmobile_add_device_to_domain(&r8a7740_pd_a3sp, usb);
|
||||
rmobile_add_device_to_domain("A3SP", usb);
|
||||
}
|
||||
|
||||
static void __init eva_earlytimer_init(void)
|
||||
|
|
|
@ -1412,6 +1412,22 @@ static struct i2c_board_info i2c1_devices[] = {
|
|||
#define USCCR1 IOMEM(0xE6058144)
|
||||
static void __init mackerel_init(void)
|
||||
{
|
||||
struct pm_domain_device domain_devices[] = {
|
||||
{ "A4LC", &lcdc_device, },
|
||||
{ "A4LC", &hdmi_lcdc_device, },
|
||||
{ "A4LC", &meram_device, },
|
||||
{ "A4MP", &fsi_device, },
|
||||
{ "A3SP", &usbhs0_device, },
|
||||
{ "A3SP", &usbhs1_device, },
|
||||
{ "A3SP", &nand_flash_device, },
|
||||
{ "A3SP", &sh_mmcif_device, },
|
||||
{ "A3SP", &sdhi0_device, },
|
||||
#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
|
||||
{ "A3SP", &sdhi1_device, },
|
||||
#endif
|
||||
{ "A3SP", &sdhi2_device, },
|
||||
{ "A4R", &ceu_device, },
|
||||
};
|
||||
u32 srcr4;
|
||||
struct clk *clk;
|
||||
|
||||
|
@ -1626,20 +1642,8 @@ static void __init mackerel_init(void)
|
|||
|
||||
platform_add_devices(mackerel_devices, ARRAY_SIZE(mackerel_devices));
|
||||
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a4lc, &lcdc_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a4lc, &hdmi_lcdc_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a4lc, &meram_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a4mp, &fsi_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a3sp, &usbhs0_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a3sp, &usbhs1_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a3sp, &nand_flash_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sh_mmcif_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi0_device);
|
||||
#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi1_device);
|
||||
#endif
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi2_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a4r, &ceu_device);
|
||||
rmobile_add_devices_to_domains(domain_devices,
|
||||
ARRAY_SIZE(domain_devices));
|
||||
|
||||
hdmi_init_pm_clock();
|
||||
sh7372_pm_init();
|
||||
|
@ -1653,6 +1657,6 @@ MACHINE_START(MACKEREL, "mackerel")
|
|||
.init_irq = sh7372_init_irq,
|
||||
.handle_irq = shmobile_handle_irq_intc,
|
||||
.init_machine = mackerel_init,
|
||||
.init_late = shmobile_init_late,
|
||||
.init_late = sh7372_pm_init_late,
|
||||
.timer = &shmobile_timer,
|
||||
MACHINE_END
|
||||
|
|
|
@ -1,24 +0,0 @@
|
|||
/*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; version 2 of the License.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <mach/common.h>
|
||||
|
||||
void __init shmobile_init_late(void)
|
||||
{
|
||||
shmobile_suspend_init();
|
||||
shmobile_cpuidle_init();
|
||||
}
|
|
@ -16,51 +16,38 @@
|
|||
#include <asm/cpuidle.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
static void shmobile_enter_wfi(void)
|
||||
int shmobile_enter_wfi(struct cpuidle_device *dev, struct cpuidle_driver *drv,
|
||||
int index)
|
||||
{
|
||||
cpu_do_idle();
|
||||
}
|
||||
|
||||
void (*shmobile_cpuidle_modes[CPUIDLE_STATE_MAX])(void) = {
|
||||
shmobile_enter_wfi, /* regular sleep mode */
|
||||
};
|
||||
|
||||
static int shmobile_cpuidle_enter(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv,
|
||||
int index)
|
||||
{
|
||||
shmobile_cpuidle_modes[index]();
|
||||
|
||||
return index;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct cpuidle_device shmobile_cpuidle_dev;
|
||||
static struct cpuidle_driver shmobile_cpuidle_driver = {
|
||||
static struct cpuidle_driver shmobile_cpuidle_default_driver = {
|
||||
.name = "shmobile_cpuidle",
|
||||
.owner = THIS_MODULE,
|
||||
.en_core_tk_irqen = 1,
|
||||
.states[0] = ARM_CPUIDLE_WFI_STATE,
|
||||
.states[0].enter = shmobile_enter_wfi,
|
||||
.safe_state_index = 0, /* C1 */
|
||||
.state_count = 1,
|
||||
};
|
||||
|
||||
void (*shmobile_cpuidle_setup)(struct cpuidle_driver *drv);
|
||||
static struct cpuidle_driver *cpuidle_drv = &shmobile_cpuidle_default_driver;
|
||||
|
||||
void shmobile_cpuidle_set_driver(struct cpuidle_driver *drv)
|
||||
{
|
||||
cpuidle_drv = drv;
|
||||
}
|
||||
|
||||
int shmobile_cpuidle_init(void)
|
||||
{
|
||||
struct cpuidle_device *dev = &shmobile_cpuidle_dev;
|
||||
struct cpuidle_driver *drv = &shmobile_cpuidle_driver;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < CPUIDLE_STATE_MAX; i++)
|
||||
drv->states[i].enter = shmobile_cpuidle_enter;
|
||||
cpuidle_register_driver(cpuidle_drv);
|
||||
|
||||
if (shmobile_cpuidle_setup)
|
||||
shmobile_cpuidle_setup(drv);
|
||||
|
||||
cpuidle_register_driver(drv);
|
||||
|
||||
dev->state_count = drv->state_count;
|
||||
dev->state_count = cpuidle_drv->state_count;
|
||||
cpuidle_register_device(dev);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -13,8 +13,10 @@ extern int shmobile_clk_init(void);
|
|||
extern void shmobile_handle_irq_intc(struct pt_regs *);
|
||||
extern struct platform_suspend_ops shmobile_suspend_ops;
|
||||
struct cpuidle_driver;
|
||||
extern void (*shmobile_cpuidle_modes[])(void);
|
||||
extern void (*shmobile_cpuidle_setup)(struct cpuidle_driver *drv);
|
||||
struct cpuidle_device;
|
||||
extern int shmobile_enter_wfi(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv, int index);
|
||||
extern void shmobile_cpuidle_set_driver(struct cpuidle_driver *drv);
|
||||
|
||||
extern void sh7367_init_irq(void);
|
||||
extern void sh7367_map_io(void);
|
||||
|
@ -75,8 +77,6 @@ extern void r8a7740_meram_workaround(void);
|
|||
|
||||
extern void r8a7779_register_twd(void);
|
||||
|
||||
extern void shmobile_init_late(void);
|
||||
|
||||
#ifdef CONFIG_SUSPEND
|
||||
int shmobile_suspend_init(void);
|
||||
#else
|
||||
|
@ -100,4 +100,10 @@ static inline int shmobile_cpu_is_dead(unsigned int cpu) { return 1; }
|
|||
|
||||
extern void shmobile_smp_init_cpus(unsigned int ncores);
|
||||
|
||||
static inline void shmobile_init_late(void)
|
||||
{
|
||||
shmobile_suspend_init();
|
||||
shmobile_cpuidle_init();
|
||||
}
|
||||
|
||||
#endif /* __ARCH_MACH_COMMON_H */
|
||||
|
|
|
@ -12,6 +12,8 @@
|
|||
|
||||
#include <linux/pm_domain.h>
|
||||
|
||||
#define DEFAULT_DEV_LATENCY_NS 250000
|
||||
|
||||
struct platform_device;
|
||||
|
||||
struct rmobile_pm_domain {
|
||||
|
@ -29,16 +31,33 @@ struct rmobile_pm_domain *to_rmobile_pd(struct generic_pm_domain *d)
|
|||
return container_of(d, struct rmobile_pm_domain, genpd);
|
||||
}
|
||||
|
||||
struct pm_domain_device {
|
||||
const char *domain_name;
|
||||
struct platform_device *pdev;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
extern void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd);
|
||||
extern void rmobile_add_device_to_domain(struct rmobile_pm_domain *rmobile_pd,
|
||||
struct platform_device *pdev);
|
||||
extern void rmobile_pm_add_subdomain(struct rmobile_pm_domain *rmobile_pd,
|
||||
struct rmobile_pm_domain *rmobile_sd);
|
||||
extern void rmobile_init_domains(struct rmobile_pm_domain domains[], int num);
|
||||
extern void rmobile_add_device_to_domain_td(const char *domain_name,
|
||||
struct platform_device *pdev,
|
||||
struct gpd_timing_data *td);
|
||||
|
||||
static inline void rmobile_add_device_to_domain(const char *domain_name,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
rmobile_add_device_to_domain_td(domain_name, pdev, NULL);
|
||||
}
|
||||
|
||||
extern void rmobile_add_devices_to_domains(struct pm_domain_device data[],
|
||||
int size);
|
||||
#else
|
||||
#define rmobile_init_pm_domain(pd) do { } while (0)
|
||||
#define rmobile_add_device_to_domain(pd, pdev) do { } while (0)
|
||||
#define rmobile_pm_add_subdomain(pd, sd) do { } while (0)
|
||||
|
||||
#define rmobile_init_domains(domains, num) do { } while (0)
|
||||
#define rmobile_add_device_to_domain_td(name, pdev, td) do { } while (0)
|
||||
#define rmobile_add_device_to_domain(name, pdev) do { } while (0)
|
||||
|
||||
static inline void rmobile_add_devices_to_domains(struct pm_domain_device d[],
|
||||
int size) {}
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
#endif /* PM_RMOBILE_H */
|
||||
|
|
|
@ -607,9 +607,9 @@ enum {
|
|||
};
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
extern struct rmobile_pm_domain r8a7740_pd_a4s;
|
||||
extern struct rmobile_pm_domain r8a7740_pd_a3sp;
|
||||
extern struct rmobile_pm_domain r8a7740_pd_a4lc;
|
||||
extern void __init r8a7740_init_pm_domains(void);
|
||||
#else
|
||||
static inline void r8a7740_init_pm_domains(void) {}
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
#endif /* __ASM_R8A7740_H__ */
|
||||
|
|
|
@ -347,17 +347,9 @@ extern int r8a7779_sysc_power_down(struct r8a7779_pm_ch *r8a7779_ch);
|
|||
extern int r8a7779_sysc_power_up(struct r8a7779_pm_ch *r8a7779_ch);
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
extern struct r8a7779_pm_domain r8a7779_sh4a;
|
||||
extern struct r8a7779_pm_domain r8a7779_sgx;
|
||||
extern struct r8a7779_pm_domain r8a7779_vdp1;
|
||||
extern struct r8a7779_pm_domain r8a7779_impx3;
|
||||
|
||||
extern void r8a7779_init_pm_domain(struct r8a7779_pm_domain *r8a7779_pd);
|
||||
extern void r8a7779_add_device_to_domain(struct r8a7779_pm_domain *r8a7779_pd,
|
||||
struct platform_device *pdev);
|
||||
extern void __init r8a7779_init_pm_domains(void);
|
||||
#else
|
||||
#define r8a7779_init_pm_domain(pd) do { } while (0)
|
||||
#define r8a7779_add_device_to_domain(pd, pdev) do { } while (0)
|
||||
static inline void r8a7779_init_pm_domains(void) {}
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
extern struct smp_operations r8a7779_smp_ops;
|
||||
|
|
|
@ -478,21 +478,17 @@ extern struct clk sh7372_fsibck_clk;
|
|||
extern struct clk sh7372_fsidiva_clk;
|
||||
extern struct clk sh7372_fsidivb_clk;
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
extern struct rmobile_pm_domain sh7372_pd_a4lc;
|
||||
extern struct rmobile_pm_domain sh7372_pd_a4mp;
|
||||
extern struct rmobile_pm_domain sh7372_pd_d4;
|
||||
extern struct rmobile_pm_domain sh7372_pd_a4r;
|
||||
extern struct rmobile_pm_domain sh7372_pd_a3rv;
|
||||
extern struct rmobile_pm_domain sh7372_pd_a3ri;
|
||||
extern struct rmobile_pm_domain sh7372_pd_a4s;
|
||||
extern struct rmobile_pm_domain sh7372_pd_a3sp;
|
||||
extern struct rmobile_pm_domain sh7372_pd_a3sg;
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
extern void sh7372_intcs_suspend(void);
|
||||
extern void sh7372_intcs_resume(void);
|
||||
extern void sh7372_intca_suspend(void);
|
||||
extern void sh7372_intca_resume(void);
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
extern void __init sh7372_init_pm_domains(void);
|
||||
#else
|
||||
static inline void sh7372_init_pm_domains(void) {}
|
||||
#endif
|
||||
|
||||
extern void __init sh7372_pm_init_late(void);
|
||||
|
||||
#endif /* __ASM_SH7372_H__ */
|
||||
|
|
|
@ -21,14 +21,6 @@ static int r8a7740_pd_a4s_suspend(void)
|
|||
return -EBUSY;
|
||||
}
|
||||
|
||||
struct rmobile_pm_domain r8a7740_pd_a4s = {
|
||||
.genpd.name = "A4S",
|
||||
.bit_shift = 10,
|
||||
.gov = &pm_domain_always_on_gov,
|
||||
.no_debug = true,
|
||||
.suspend = r8a7740_pd_a4s_suspend,
|
||||
};
|
||||
|
||||
static int r8a7740_pd_a3sp_suspend(void)
|
||||
{
|
||||
/*
|
||||
|
@ -38,17 +30,31 @@ static int r8a7740_pd_a3sp_suspend(void)
|
|||
return console_suspend_enabled ? 0 : -EBUSY;
|
||||
}
|
||||
|
||||
struct rmobile_pm_domain r8a7740_pd_a3sp = {
|
||||
.genpd.name = "A3SP",
|
||||
.bit_shift = 11,
|
||||
.gov = &pm_domain_always_on_gov,
|
||||
.no_debug = true,
|
||||
.suspend = r8a7740_pd_a3sp_suspend,
|
||||
static struct rmobile_pm_domain r8a7740_pm_domains[] = {
|
||||
{
|
||||
.genpd.name = "A4S",
|
||||
.bit_shift = 10,
|
||||
.gov = &pm_domain_always_on_gov,
|
||||
.no_debug = true,
|
||||
.suspend = r8a7740_pd_a4s_suspend,
|
||||
},
|
||||
{
|
||||
.genpd.name = "A3SP",
|
||||
.bit_shift = 11,
|
||||
.gov = &pm_domain_always_on_gov,
|
||||
.no_debug = true,
|
||||
.suspend = r8a7740_pd_a3sp_suspend,
|
||||
},
|
||||
{
|
||||
.genpd.name = "A4LC",
|
||||
.bit_shift = 1,
|
||||
},
|
||||
};
|
||||
|
||||
struct rmobile_pm_domain r8a7740_pd_a4lc = {
|
||||
.genpd.name = "A4LC",
|
||||
.bit_shift = 1,
|
||||
};
|
||||
void __init r8a7740_init_pm_domains(void)
|
||||
{
|
||||
rmobile_init_domains(r8a7740_pm_domains, ARRAY_SIZE(r8a7740_pm_domains));
|
||||
pm_genpd_add_subdomain_names("A4S", "A3SP");
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PM */
|
||||
|
|
|
@ -183,7 +183,7 @@ static bool pd_active_wakeup(struct device *dev)
|
|||
return true;
|
||||
}
|
||||
|
||||
void r8a7779_init_pm_domain(struct r8a7779_pm_domain *r8a7779_pd)
|
||||
static void r8a7779_init_pm_domain(struct r8a7779_pm_domain *r8a7779_pd)
|
||||
{
|
||||
struct generic_pm_domain *genpd = &r8a7779_pd->genpd;
|
||||
|
||||
|
@ -199,44 +199,45 @@ void r8a7779_init_pm_domain(struct r8a7779_pm_domain *r8a7779_pd)
|
|||
pd_power_up(&r8a7779_pd->genpd);
|
||||
}
|
||||
|
||||
void r8a7779_add_device_to_domain(struct r8a7779_pm_domain *r8a7779_pd,
|
||||
struct platform_device *pdev)
|
||||
static struct r8a7779_pm_domain r8a7779_pm_domains[] = {
|
||||
{
|
||||
.genpd.name = "SH4A",
|
||||
.ch = {
|
||||
.chan_offs = 0x80, /* PWRSR1 .. PWRER1 */
|
||||
.isr_bit = 16, /* SH4A */
|
||||
},
|
||||
},
|
||||
{
|
||||
.genpd.name = "SGX",
|
||||
.ch = {
|
||||
.chan_offs = 0xc0, /* PWRSR2 .. PWRER2 */
|
||||
.isr_bit = 20, /* SGX */
|
||||
},
|
||||
},
|
||||
{
|
||||
.genpd.name = "VDP1",
|
||||
.ch = {
|
||||
.chan_offs = 0x100, /* PWRSR3 .. PWRER3 */
|
||||
.isr_bit = 21, /* VDP */
|
||||
},
|
||||
},
|
||||
{
|
||||
.genpd.name = "IMPX3",
|
||||
.ch = {
|
||||
.chan_offs = 0x140, /* PWRSR4 .. PWRER4 */
|
||||
.isr_bit = 24, /* IMP */
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
void __init r8a7779_init_pm_domains(void)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
int j;
|
||||
|
||||
pm_genpd_add_device(&r8a7779_pd->genpd, dev);
|
||||
if (pm_clk_no_clocks(dev))
|
||||
pm_clk_add(dev, NULL);
|
||||
for (j = 0; j < ARRAY_SIZE(r8a7779_pm_domains); j++)
|
||||
r8a7779_init_pm_domain(&r8a7779_pm_domains[j]);
|
||||
}
|
||||
|
||||
struct r8a7779_pm_domain r8a7779_sh4a = {
|
||||
.ch = {
|
||||
.chan_offs = 0x80, /* PWRSR1 .. PWRER1 */
|
||||
.isr_bit = 16, /* SH4A */
|
||||
}
|
||||
};
|
||||
|
||||
struct r8a7779_pm_domain r8a7779_sgx = {
|
||||
.ch = {
|
||||
.chan_offs = 0xc0, /* PWRSR2 .. PWRER2 */
|
||||
.isr_bit = 20, /* SGX */
|
||||
}
|
||||
};
|
||||
|
||||
struct r8a7779_pm_domain r8a7779_vdp1 = {
|
||||
.ch = {
|
||||
.chan_offs = 0x100, /* PWRSR3 .. PWRER3 */
|
||||
.isr_bit = 21, /* VDP */
|
||||
}
|
||||
};
|
||||
|
||||
struct r8a7779_pm_domain r8a7779_impx3 = {
|
||||
.ch = {
|
||||
.chan_offs = 0x140, /* PWRSR4 .. PWRER4 */
|
||||
.isr_bit = 24, /* IMP */
|
||||
}
|
||||
};
|
||||
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
void __init r8a7779_pm_init(void)
|
||||
|
|
|
@ -134,7 +134,7 @@ static int rmobile_pd_start_dev(struct device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd)
|
||||
static void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd)
|
||||
{
|
||||
struct generic_pm_domain *genpd = &rmobile_pd->genpd;
|
||||
struct dev_power_governor *gov = rmobile_pd->gov;
|
||||
|
@ -149,19 +149,38 @@ void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd)
|
|||
__rmobile_pd_power_up(rmobile_pd, false);
|
||||
}
|
||||
|
||||
void rmobile_add_device_to_domain(struct rmobile_pm_domain *rmobile_pd,
|
||||
struct platform_device *pdev)
|
||||
void rmobile_init_domains(struct rmobile_pm_domain domains[], int num)
|
||||
{
|
||||
int j;
|
||||
|
||||
for (j = 0; j < num; j++)
|
||||
rmobile_init_pm_domain(&domains[j]);
|
||||
}
|
||||
|
||||
void rmobile_add_device_to_domain_td(const char *domain_name,
|
||||
struct platform_device *pdev,
|
||||
struct gpd_timing_data *td)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
|
||||
pm_genpd_add_device(&rmobile_pd->genpd, dev);
|
||||
__pm_genpd_name_add_device(domain_name, dev, td);
|
||||
if (pm_clk_no_clocks(dev))
|
||||
pm_clk_add(dev, NULL);
|
||||
}
|
||||
|
||||
void rmobile_pm_add_subdomain(struct rmobile_pm_domain *rmobile_pd,
|
||||
struct rmobile_pm_domain *rmobile_sd)
|
||||
void rmobile_add_devices_to_domains(struct pm_domain_device data[],
|
||||
int size)
|
||||
{
|
||||
pm_genpd_add_subdomain(&rmobile_pd->genpd, &rmobile_sd->genpd);
|
||||
struct gpd_timing_data latencies = {
|
||||
.stop_latency_ns = DEFAULT_DEV_LATENCY_NS,
|
||||
.start_latency_ns = DEFAULT_DEV_LATENCY_NS,
|
||||
.save_state_latency_ns = DEFAULT_DEV_LATENCY_NS,
|
||||
.restore_state_latency_ns = DEFAULT_DEV_LATENCY_NS,
|
||||
};
|
||||
int j;
|
||||
|
||||
for (j = 0; j < size; j++)
|
||||
rmobile_add_device_to_domain_td(data[j].domain_name,
|
||||
data[j].pdev, &latencies);
|
||||
}
|
||||
#endif /* CONFIG_PM */
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <linux/irq.h>
|
||||
#include <linux/bitrev.h>
|
||||
#include <linux/console.h>
|
||||
#include <asm/cpuidle.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/suspend.h>
|
||||
|
@ -72,20 +73,7 @@
|
|||
|
||||
#ifdef CONFIG_PM
|
||||
|
||||
struct rmobile_pm_domain sh7372_pd_a4lc = {
|
||||
.genpd.name = "A4LC",
|
||||
.bit_shift = 1,
|
||||
};
|
||||
|
||||
struct rmobile_pm_domain sh7372_pd_a4mp = {
|
||||
.genpd.name = "A4MP",
|
||||
.bit_shift = 2,
|
||||
};
|
||||
|
||||
struct rmobile_pm_domain sh7372_pd_d4 = {
|
||||
.genpd.name = "D4",
|
||||
.bit_shift = 3,
|
||||
};
|
||||
#define PM_DOMAIN_ON_OFF_LATENCY_NS 250000
|
||||
|
||||
static int sh7372_a4r_pd_suspend(void)
|
||||
{
|
||||
|
@ -94,39 +82,25 @@ static int sh7372_a4r_pd_suspend(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct rmobile_pm_domain sh7372_pd_a4r = {
|
||||
.genpd.name = "A4R",
|
||||
.bit_shift = 5,
|
||||
.suspend = sh7372_a4r_pd_suspend,
|
||||
.resume = sh7372_intcs_resume,
|
||||
};
|
||||
static bool a4s_suspend_ready;
|
||||
|
||||
struct rmobile_pm_domain sh7372_pd_a3rv = {
|
||||
.genpd.name = "A3RV",
|
||||
.bit_shift = 6,
|
||||
};
|
||||
|
||||
struct rmobile_pm_domain sh7372_pd_a3ri = {
|
||||
.genpd.name = "A3RI",
|
||||
.bit_shift = 8,
|
||||
};
|
||||
|
||||
static int sh7372_pd_a4s_suspend(void)
|
||||
static int sh7372_a4s_pd_suspend(void)
|
||||
{
|
||||
/*
|
||||
* The A4S domain contains the CPU core and therefore it should
|
||||
* only be turned off if the CPU is in use.
|
||||
* only be turned off if the CPU is not in use. This may happen
|
||||
* during system suspend, when SYSC is going to be used for generating
|
||||
* resume signals and a4s_suspend_ready is set to let
|
||||
* sh7372_enter_suspend() know that it can turn A4S off.
|
||||
*/
|
||||
a4s_suspend_ready = true;
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
struct rmobile_pm_domain sh7372_pd_a4s = {
|
||||
.genpd.name = "A4S",
|
||||
.bit_shift = 10,
|
||||
.gov = &pm_domain_always_on_gov,
|
||||
.no_debug = true,
|
||||
.suspend = sh7372_pd_a4s_suspend,
|
||||
};
|
||||
static void sh7372_a4s_pd_resume(void)
|
||||
{
|
||||
a4s_suspend_ready = false;
|
||||
}
|
||||
|
||||
static int sh7372_a3sp_pd_suspend(void)
|
||||
{
|
||||
|
@ -137,18 +111,80 @@ static int sh7372_a3sp_pd_suspend(void)
|
|||
return console_suspend_enabled ? 0 : -EBUSY;
|
||||
}
|
||||
|
||||
struct rmobile_pm_domain sh7372_pd_a3sp = {
|
||||
.genpd.name = "A3SP",
|
||||
.bit_shift = 11,
|
||||
.gov = &pm_domain_always_on_gov,
|
||||
.no_debug = true,
|
||||
.suspend = sh7372_a3sp_pd_suspend,
|
||||
static struct rmobile_pm_domain sh7372_pm_domains[] = {
|
||||
{
|
||||
.genpd.name = "A4LC",
|
||||
.genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
|
||||
.genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
|
||||
.bit_shift = 1,
|
||||
},
|
||||
{
|
||||
.genpd.name = "A4MP",
|
||||
.genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
|
||||
.genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
|
||||
.bit_shift = 2,
|
||||
},
|
||||
{
|
||||
.genpd.name = "D4",
|
||||
.genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
|
||||
.genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
|
||||
.bit_shift = 3,
|
||||
},
|
||||
{
|
||||
.genpd.name = "A4R",
|
||||
.genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
|
||||
.genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
|
||||
.bit_shift = 5,
|
||||
.suspend = sh7372_a4r_pd_suspend,
|
||||
.resume = sh7372_intcs_resume,
|
||||
},
|
||||
{
|
||||
.genpd.name = "A3RV",
|
||||
.genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
|
||||
.genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
|
||||
.bit_shift = 6,
|
||||
},
|
||||
{
|
||||
.genpd.name = "A3RI",
|
||||
.genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
|
||||
.genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
|
||||
.bit_shift = 8,
|
||||
},
|
||||
{
|
||||
.genpd.name = "A4S",
|
||||
.genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
|
||||
.genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
|
||||
.bit_shift = 10,
|
||||
.gov = &pm_domain_always_on_gov,
|
||||
.no_debug = true,
|
||||
.suspend = sh7372_a4s_pd_suspend,
|
||||
.resume = sh7372_a4s_pd_resume,
|
||||
},
|
||||
{
|
||||
.genpd.name = "A3SP",
|
||||
.genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
|
||||
.genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
|
||||
.bit_shift = 11,
|
||||
.gov = &pm_domain_always_on_gov,
|
||||
.no_debug = true,
|
||||
.suspend = sh7372_a3sp_pd_suspend,
|
||||
},
|
||||
{
|
||||
.genpd.name = "A3SG",
|
||||
.genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
|
||||
.genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
|
||||
.bit_shift = 13,
|
||||
},
|
||||
};
|
||||
|
||||
struct rmobile_pm_domain sh7372_pd_a3sg = {
|
||||
.genpd.name = "A3SG",
|
||||
.bit_shift = 13,
|
||||
};
|
||||
void __init sh7372_init_pm_domains(void)
|
||||
{
|
||||
rmobile_init_domains(sh7372_pm_domains, ARRAY_SIZE(sh7372_pm_domains));
|
||||
pm_genpd_add_subdomain_names("A4LC", "A3RV");
|
||||
pm_genpd_add_subdomain_names("A4R", "A4LC");
|
||||
pm_genpd_add_subdomain_names("A4S", "A3SG");
|
||||
pm_genpd_add_subdomain_names("A4S", "A3SP");
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
|
@ -304,6 +340,21 @@ static void sh7372_enter_a3sm_common(int pllc0_on)
|
|||
sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc));
|
||||
sh7372_enter_sysc(pllc0_on, 1 << 12);
|
||||
}
|
||||
|
||||
static void sh7372_enter_a4s_common(int pllc0_on)
|
||||
{
|
||||
sh7372_intca_suspend();
|
||||
sh7372_set_reset_vector(SMFRAM);
|
||||
sh7372_enter_sysc(pllc0_on, 1 << 10);
|
||||
sh7372_intca_resume();
|
||||
}
|
||||
|
||||
static void sh7372_pm_setup_smfram(void)
|
||||
{
|
||||
memcpy((void *)SMFRAM, sh7372_resume_core_standby_sysc, 0x100);
|
||||
}
|
||||
#else
|
||||
static inline void sh7372_pm_setup_smfram(void) {}
|
||||
#endif /* CONFIG_SUSPEND || CONFIG_CPU_IDLE */
|
||||
|
||||
#ifdef CONFIG_CPU_IDLE
|
||||
|
@ -313,7 +364,8 @@ static int sh7372_do_idle_core_standby(unsigned long unused)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void sh7372_enter_core_standby(void)
|
||||
static int sh7372_enter_core_standby(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv, int index)
|
||||
{
|
||||
sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc));
|
||||
|
||||
|
@ -324,83 +376,102 @@ static void sh7372_enter_core_standby(void)
|
|||
|
||||
/* disable reset vector translation */
|
||||
__raw_writel(0, SBAR);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void sh7372_enter_a3sm_pll_on(void)
|
||||
static int sh7372_enter_a3sm_pll_on(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv, int index)
|
||||
{
|
||||
sh7372_enter_a3sm_common(1);
|
||||
return 2;
|
||||
}
|
||||
|
||||
static void sh7372_enter_a3sm_pll_off(void)
|
||||
static int sh7372_enter_a3sm_pll_off(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv, int index)
|
||||
{
|
||||
sh7372_enter_a3sm_common(0);
|
||||
return 3;
|
||||
}
|
||||
|
||||
static void sh7372_cpuidle_setup(struct cpuidle_driver *drv)
|
||||
static int sh7372_enter_a4s(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv, int index)
|
||||
{
|
||||
struct cpuidle_state *state = &drv->states[drv->state_count];
|
||||
unsigned long msk, msk2;
|
||||
|
||||
snprintf(state->name, CPUIDLE_NAME_LEN, "C2");
|
||||
strncpy(state->desc, "Core Standby Mode", CPUIDLE_DESC_LEN);
|
||||
state->exit_latency = 10;
|
||||
state->target_residency = 20 + 10;
|
||||
state->flags = CPUIDLE_FLAG_TIME_VALID;
|
||||
shmobile_cpuidle_modes[drv->state_count] = sh7372_enter_core_standby;
|
||||
drv->state_count++;
|
||||
if (!sh7372_sysc_valid(&msk, &msk2))
|
||||
return sh7372_enter_a3sm_pll_off(dev, drv, index);
|
||||
|
||||
state = &drv->states[drv->state_count];
|
||||
snprintf(state->name, CPUIDLE_NAME_LEN, "C3");
|
||||
strncpy(state->desc, "A3SM PLL ON", CPUIDLE_DESC_LEN);
|
||||
state->exit_latency = 20;
|
||||
state->target_residency = 30 + 20;
|
||||
state->flags = CPUIDLE_FLAG_TIME_VALID;
|
||||
shmobile_cpuidle_modes[drv->state_count] = sh7372_enter_a3sm_pll_on;
|
||||
drv->state_count++;
|
||||
|
||||
state = &drv->states[drv->state_count];
|
||||
snprintf(state->name, CPUIDLE_NAME_LEN, "C4");
|
||||
strncpy(state->desc, "A3SM PLL OFF", CPUIDLE_DESC_LEN);
|
||||
state->exit_latency = 120;
|
||||
state->target_residency = 30 + 120;
|
||||
state->flags = CPUIDLE_FLAG_TIME_VALID;
|
||||
shmobile_cpuidle_modes[drv->state_count] = sh7372_enter_a3sm_pll_off;
|
||||
drv->state_count++;
|
||||
sh7372_setup_sysc(msk, msk2);
|
||||
sh7372_enter_a4s_common(0);
|
||||
return 4;
|
||||
}
|
||||
|
||||
static struct cpuidle_driver sh7372_cpuidle_driver = {
|
||||
.name = "sh7372_cpuidle",
|
||||
.owner = THIS_MODULE,
|
||||
.en_core_tk_irqen = 1,
|
||||
.state_count = 5,
|
||||
.safe_state_index = 0, /* C1 */
|
||||
.states[0] = ARM_CPUIDLE_WFI_STATE,
|
||||
.states[0].enter = shmobile_enter_wfi,
|
||||
.states[1] = {
|
||||
.name = "C2",
|
||||
.desc = "Core Standby Mode",
|
||||
.exit_latency = 10,
|
||||
.target_residency = 20 + 10,
|
||||
.flags = CPUIDLE_FLAG_TIME_VALID,
|
||||
.enter = sh7372_enter_core_standby,
|
||||
},
|
||||
.states[2] = {
|
||||
.name = "C3",
|
||||
.desc = "A3SM PLL ON",
|
||||
.exit_latency = 20,
|
||||
.target_residency = 30 + 20,
|
||||
.flags = CPUIDLE_FLAG_TIME_VALID,
|
||||
.enter = sh7372_enter_a3sm_pll_on,
|
||||
},
|
||||
.states[3] = {
|
||||
.name = "C4",
|
||||
.desc = "A3SM PLL OFF",
|
||||
.exit_latency = 120,
|
||||
.target_residency = 30 + 120,
|
||||
.flags = CPUIDLE_FLAG_TIME_VALID,
|
||||
.enter = sh7372_enter_a3sm_pll_off,
|
||||
},
|
||||
.states[4] = {
|
||||
.name = "C5",
|
||||
.desc = "A4S PLL OFF",
|
||||
.exit_latency = 240,
|
||||
.target_residency = 30 + 240,
|
||||
.flags = CPUIDLE_FLAG_TIME_VALID,
|
||||
.enter = sh7372_enter_a4s,
|
||||
.disabled = true,
|
||||
},
|
||||
};
|
||||
|
||||
static void sh7372_cpuidle_init(void)
|
||||
{
|
||||
shmobile_cpuidle_setup = sh7372_cpuidle_setup;
|
||||
shmobile_cpuidle_set_driver(&sh7372_cpuidle_driver);
|
||||
}
|
||||
#else
|
||||
static void sh7372_cpuidle_init(void) {}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SUSPEND
|
||||
static void sh7372_enter_a4s_common(int pllc0_on)
|
||||
{
|
||||
sh7372_intca_suspend();
|
||||
memcpy((void *)SMFRAM, sh7372_resume_core_standby_sysc, 0x100);
|
||||
sh7372_set_reset_vector(SMFRAM);
|
||||
sh7372_enter_sysc(pllc0_on, 1 << 10);
|
||||
sh7372_intca_resume();
|
||||
}
|
||||
|
||||
static int sh7372_enter_suspend(suspend_state_t suspend_state)
|
||||
{
|
||||
unsigned long msk, msk2;
|
||||
|
||||
/* check active clocks to determine potential wakeup sources */
|
||||
if (sh7372_sysc_valid(&msk, &msk2)) {
|
||||
if (!console_suspend_enabled &&
|
||||
sh7372_pd_a4s.genpd.status == GPD_STATE_POWER_OFF) {
|
||||
/* convert INTC mask/sense to SYSC mask/sense */
|
||||
sh7372_setup_sysc(msk, msk2);
|
||||
if (sh7372_sysc_valid(&msk, &msk2) && a4s_suspend_ready) {
|
||||
/* convert INTC mask/sense to SYSC mask/sense */
|
||||
sh7372_setup_sysc(msk, msk2);
|
||||
|
||||
/* enter A4S sleep with PLLC0 off */
|
||||
pr_debug("entering A4S\n");
|
||||
sh7372_enter_a4s_common(0);
|
||||
return 0;
|
||||
}
|
||||
/* enter A4S sleep with PLLC0 off */
|
||||
pr_debug("entering A4S\n");
|
||||
sh7372_enter_a4s_common(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* default to enter A3SM sleep with PLLC0 off */
|
||||
|
@ -426,7 +497,7 @@ static int sh7372_pm_notifier_fn(struct notifier_block *notifier,
|
|||
* executed during system suspend and resume, respectively, so
|
||||
* that those functions don't crash while accessing the INTCS.
|
||||
*/
|
||||
pm_genpd_poweron(&sh7372_pd_a4r.genpd);
|
||||
pm_genpd_name_poweron("A4R");
|
||||
break;
|
||||
case PM_POST_SUSPEND:
|
||||
pm_genpd_poweroff_unused();
|
||||
|
@ -455,6 +526,14 @@ void __init sh7372_pm_init(void)
|
|||
/* do not convert A3SM, A3SP, A3SG, A4R power down into A4S */
|
||||
__raw_writel(0, PDNSEL);
|
||||
|
||||
sh7372_pm_setup_smfram();
|
||||
|
||||
sh7372_suspend_init();
|
||||
sh7372_cpuidle_init();
|
||||
}
|
||||
|
||||
void __init sh7372_pm_init_late(void)
|
||||
{
|
||||
shmobile_init_late();
|
||||
pm_genpd_name_attach_cpuidle("A4S", 4);
|
||||
}
|
||||
|
|
|
@ -673,12 +673,7 @@ void __init r8a7740_add_standard_devices(void)
|
|||
r8a7740_i2c_workaround(&i2c0_device);
|
||||
r8a7740_i2c_workaround(&i2c1_device);
|
||||
|
||||
/* PM domain */
|
||||
rmobile_init_pm_domain(&r8a7740_pd_a4s);
|
||||
rmobile_init_pm_domain(&r8a7740_pd_a3sp);
|
||||
rmobile_init_pm_domain(&r8a7740_pd_a4lc);
|
||||
|
||||
rmobile_pm_add_subdomain(&r8a7740_pd_a4s, &r8a7740_pd_a3sp);
|
||||
r8a7740_init_pm_domains();
|
||||
|
||||
/* add devices */
|
||||
platform_add_devices(r8a7740_early_devices,
|
||||
|
@ -688,16 +683,16 @@ void __init r8a7740_add_standard_devices(void)
|
|||
|
||||
/* add devices to PM domain */
|
||||
|
||||
rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif0_device);
|
||||
rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif1_device);
|
||||
rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif2_device);
|
||||
rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif3_device);
|
||||
rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif4_device);
|
||||
rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif5_device);
|
||||
rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif6_device);
|
||||
rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif7_device);
|
||||
rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scifb_device);
|
||||
rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &i2c1_device);
|
||||
rmobile_add_device_to_domain("A3SP", &scif0_device);
|
||||
rmobile_add_device_to_domain("A3SP", &scif1_device);
|
||||
rmobile_add_device_to_domain("A3SP", &scif2_device);
|
||||
rmobile_add_device_to_domain("A3SP", &scif3_device);
|
||||
rmobile_add_device_to_domain("A3SP", &scif4_device);
|
||||
rmobile_add_device_to_domain("A3SP", &scif5_device);
|
||||
rmobile_add_device_to_domain("A3SP", &scif6_device);
|
||||
rmobile_add_device_to_domain("A3SP", &scif7_device);
|
||||
rmobile_add_device_to_domain("A3SP", &scifb_device);
|
||||
rmobile_add_device_to_domain("A3SP", &i2c1_device);
|
||||
}
|
||||
|
||||
static void __init r8a7740_earlytimer_init(void)
|
||||
|
|
|
@ -251,10 +251,7 @@ void __init r8a7779_add_standard_devices(void)
|
|||
#endif
|
||||
r8a7779_pm_init();
|
||||
|
||||
r8a7779_init_pm_domain(&r8a7779_sh4a);
|
||||
r8a7779_init_pm_domain(&r8a7779_sgx);
|
||||
r8a7779_init_pm_domain(&r8a7779_vdp1);
|
||||
r8a7779_init_pm_domain(&r8a7779_impx3);
|
||||
r8a7779_init_pm_domains();
|
||||
|
||||
platform_add_devices(r8a7779_early_devices,
|
||||
ARRAY_SIZE(r8a7779_early_devices));
|
||||
|
|
|
@ -1001,21 +1001,34 @@ static struct platform_device *sh7372_late_devices[] __initdata = {
|
|||
|
||||
void __init sh7372_add_standard_devices(void)
|
||||
{
|
||||
rmobile_init_pm_domain(&sh7372_pd_a4lc);
|
||||
rmobile_init_pm_domain(&sh7372_pd_a4mp);
|
||||
rmobile_init_pm_domain(&sh7372_pd_d4);
|
||||
rmobile_init_pm_domain(&sh7372_pd_a4r);
|
||||
rmobile_init_pm_domain(&sh7372_pd_a3rv);
|
||||
rmobile_init_pm_domain(&sh7372_pd_a3ri);
|
||||
rmobile_init_pm_domain(&sh7372_pd_a4s);
|
||||
rmobile_init_pm_domain(&sh7372_pd_a3sp);
|
||||
rmobile_init_pm_domain(&sh7372_pd_a3sg);
|
||||
struct pm_domain_device domain_devices[] = {
|
||||
{ "A3RV", &vpu_device, },
|
||||
{ "A4MP", &spu0_device, },
|
||||
{ "A4MP", &spu1_device, },
|
||||
{ "A3SP", &scif0_device, },
|
||||
{ "A3SP", &scif1_device, },
|
||||
{ "A3SP", &scif2_device, },
|
||||
{ "A3SP", &scif3_device, },
|
||||
{ "A3SP", &scif4_device, },
|
||||
{ "A3SP", &scif5_device, },
|
||||
{ "A3SP", &scif6_device, },
|
||||
{ "A3SP", &iic1_device, },
|
||||
{ "A3SP", &dma0_device, },
|
||||
{ "A3SP", &dma1_device, },
|
||||
{ "A3SP", &dma2_device, },
|
||||
{ "A3SP", &usb_dma0_device, },
|
||||
{ "A3SP", &usb_dma1_device, },
|
||||
{ "A4R", &iic0_device, },
|
||||
{ "A4R", &veu0_device, },
|
||||
{ "A4R", &veu1_device, },
|
||||
{ "A4R", &veu2_device, },
|
||||
{ "A4R", &veu3_device, },
|
||||
{ "A4R", &jpu_device, },
|
||||
{ "A4R", &tmu00_device, },
|
||||
{ "A4R", &tmu01_device, },
|
||||
};
|
||||
|
||||
rmobile_pm_add_subdomain(&sh7372_pd_a4lc, &sh7372_pd_a3rv);
|
||||
rmobile_pm_add_subdomain(&sh7372_pd_a4r, &sh7372_pd_a4lc);
|
||||
|
||||
rmobile_pm_add_subdomain(&sh7372_pd_a4s, &sh7372_pd_a3sg);
|
||||
rmobile_pm_add_subdomain(&sh7372_pd_a4s, &sh7372_pd_a3sp);
|
||||
sh7372_init_pm_domains();
|
||||
|
||||
platform_add_devices(sh7372_early_devices,
|
||||
ARRAY_SIZE(sh7372_early_devices));
|
||||
|
@ -1023,30 +1036,8 @@ void __init sh7372_add_standard_devices(void)
|
|||
platform_add_devices(sh7372_late_devices,
|
||||
ARRAY_SIZE(sh7372_late_devices));
|
||||
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a3rv, &vpu_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a4mp, &spu0_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a4mp, &spu1_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif0_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif1_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif2_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif3_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif4_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif5_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif6_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a3sp, &iic1_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a3sp, &dma0_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a3sp, &dma1_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a3sp, &dma2_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a3sp, &usb_dma0_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a3sp, &usb_dma1_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a4r, &iic0_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a4r, &veu0_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a4r, &veu1_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a4r, &veu2_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a4r, &veu3_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a4r, &jpu_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a4r, &tmu00_device);
|
||||
rmobile_add_device_to_domain(&sh7372_pd_a4r, &tmu01_device);
|
||||
rmobile_add_devices_to_domains(domain_devices,
|
||||
ARRAY_SIZE(domain_devices));
|
||||
}
|
||||
|
||||
static void __init sh7372_earlytimer_init(void)
|
||||
|
|
|
@ -248,6 +248,9 @@
|
|||
|
||||
#define MSR_IA32_PERF_STATUS 0x00000198
|
||||
#define MSR_IA32_PERF_CTL 0x00000199
|
||||
#define MSR_AMD_PSTATE_DEF_BASE 0xc0010064
|
||||
#define MSR_AMD_PERF_STATUS 0xc0010063
|
||||
#define MSR_AMD_PERF_CTL 0xc0010062
|
||||
|
||||
#define MSR_IA32_MPERF 0x000000e7
|
||||
#define MSR_IA32_APERF 0x000000e8
|
||||
|
|
|
@ -475,7 +475,7 @@ static __ref int acpi_processor_start(struct acpi_processor *pr)
|
|||
acpi_processor_get_limit_info(pr);
|
||||
|
||||
if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
|
||||
acpi_processor_power_init(pr, device);
|
||||
acpi_processor_power_init(pr);
|
||||
|
||||
pr->cdev = thermal_cooling_device_register("Processor", device,
|
||||
&processor_cooling_ops);
|
||||
|
@ -509,7 +509,7 @@ static __ref int acpi_processor_start(struct acpi_processor *pr)
|
|||
err_thermal_unregister:
|
||||
thermal_cooling_device_unregister(pr->cdev);
|
||||
err_power_exit:
|
||||
acpi_processor_power_exit(pr, device);
|
||||
acpi_processor_power_exit(pr);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -620,7 +620,7 @@ static int acpi_processor_remove(struct acpi_device *device, int type)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
acpi_processor_power_exit(pr, device);
|
||||
acpi_processor_power_exit(pr);
|
||||
|
||||
sysfs_remove_link(&device->dev.kobj, "sysdev");
|
||||
|
||||
|
@ -905,8 +905,6 @@ static int __init acpi_processor_init(void)
|
|||
if (acpi_disabled)
|
||||
return 0;
|
||||
|
||||
memset(&errata, 0, sizeof(errata));
|
||||
|
||||
result = acpi_bus_register_driver(&acpi_processor_driver);
|
||||
if (result < 0)
|
||||
return result;
|
||||
|
|
|
@ -79,6 +79,8 @@ module_param(bm_check_disable, uint, 0000);
|
|||
static unsigned int latency_factor __read_mostly = 2;
|
||||
module_param(latency_factor, uint, 0644);
|
||||
|
||||
static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
|
||||
|
||||
static int disabled_by_idle_boot_param(void)
|
||||
{
|
||||
return boot_option_idle_override == IDLE_POLL ||
|
||||
|
@ -483,8 +485,6 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
|
|||
if (obj->type != ACPI_TYPE_INTEGER)
|
||||
continue;
|
||||
|
||||
cx.power = obj->integer.value;
|
||||
|
||||
current_count++;
|
||||
memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
|
||||
|
||||
|
@ -1000,7 +1000,7 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr)
|
|||
int i, count = CPUIDLE_DRIVER_STATE_START;
|
||||
struct acpi_processor_cx *cx;
|
||||
struct cpuidle_state_usage *state_usage;
|
||||
struct cpuidle_device *dev = &pr->power.dev;
|
||||
struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
|
||||
|
||||
if (!pr->flags.power_setup_done)
|
||||
return -EINVAL;
|
||||
|
@ -1132,6 +1132,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
|
|||
int acpi_processor_hotplug(struct acpi_processor *pr)
|
||||
{
|
||||
int ret = 0;
|
||||
struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
|
||||
|
||||
if (disabled_by_idle_boot_param())
|
||||
return 0;
|
||||
|
@ -1147,11 +1148,11 @@ int acpi_processor_hotplug(struct acpi_processor *pr)
|
|||
return -ENODEV;
|
||||
|
||||
cpuidle_pause_and_lock();
|
||||
cpuidle_disable_device(&pr->power.dev);
|
||||
cpuidle_disable_device(dev);
|
||||
acpi_processor_get_power_info(pr);
|
||||
if (pr->flags.power) {
|
||||
acpi_processor_setup_cpuidle_cx(pr);
|
||||
ret = cpuidle_enable_device(&pr->power.dev);
|
||||
ret = cpuidle_enable_device(dev);
|
||||
}
|
||||
cpuidle_resume_and_unlock();
|
||||
|
||||
|
@ -1162,6 +1163,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
|
|||
{
|
||||
int cpu;
|
||||
struct acpi_processor *_pr;
|
||||
struct cpuidle_device *dev;
|
||||
|
||||
if (disabled_by_idle_boot_param())
|
||||
return 0;
|
||||
|
@ -1192,7 +1194,8 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
|
|||
_pr = per_cpu(processors, cpu);
|
||||
if (!_pr || !_pr->flags.power_setup_done)
|
||||
continue;
|
||||
cpuidle_disable_device(&_pr->power.dev);
|
||||
dev = per_cpu(acpi_cpuidle_device, cpu);
|
||||
cpuidle_disable_device(dev);
|
||||
}
|
||||
|
||||
/* Populate Updated C-state information */
|
||||
|
@ -1206,7 +1209,8 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
|
|||
acpi_processor_get_power_info(_pr);
|
||||
if (_pr->flags.power) {
|
||||
acpi_processor_setup_cpuidle_cx(_pr);
|
||||
cpuidle_enable_device(&_pr->power.dev);
|
||||
dev = per_cpu(acpi_cpuidle_device, cpu);
|
||||
cpuidle_enable_device(dev);
|
||||
}
|
||||
}
|
||||
put_online_cpus();
|
||||
|
@ -1218,11 +1222,11 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
|
|||
|
||||
static int acpi_processor_registered;
|
||||
|
||||
int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
|
||||
struct acpi_device *device)
|
||||
int __cpuinit acpi_processor_power_init(struct acpi_processor *pr)
|
||||
{
|
||||
acpi_status status = 0;
|
||||
int retval;
|
||||
struct cpuidle_device *dev;
|
||||
static int first_run;
|
||||
|
||||
if (disabled_by_idle_boot_param())
|
||||
|
@ -1268,11 +1272,18 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
|
|||
printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
|
||||
acpi_idle_driver.name);
|
||||
}
|
||||
|
||||
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
|
||||
if (!dev)
|
||||
return -ENOMEM;
|
||||
per_cpu(acpi_cpuidle_device, pr->id) = dev;
|
||||
|
||||
acpi_processor_setup_cpuidle_cx(pr);
|
||||
|
||||
/* Register per-cpu cpuidle_device. Cpuidle driver
|
||||
* must already be registered before registering device
|
||||
*/
|
||||
acpi_processor_setup_cpuidle_cx(pr);
|
||||
retval = cpuidle_register_device(&pr->power.dev);
|
||||
retval = cpuidle_register_device(dev);
|
||||
if (retval) {
|
||||
if (acpi_processor_registered == 0)
|
||||
cpuidle_unregister_driver(&acpi_idle_driver);
|
||||
|
@ -1283,14 +1294,15 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int acpi_processor_power_exit(struct acpi_processor *pr,
|
||||
struct acpi_device *device)
|
||||
int acpi_processor_power_exit(struct acpi_processor *pr)
|
||||
{
|
||||
struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
|
||||
|
||||
if (disabled_by_idle_boot_param())
|
||||
return 0;
|
||||
|
||||
if (pr->flags.power) {
|
||||
cpuidle_unregister_device(&pr->power.dev);
|
||||
cpuidle_unregister_device(dev);
|
||||
acpi_processor_registered--;
|
||||
if (acpi_processor_registered == 0)
|
||||
cpuidle_unregister_driver(&acpi_idle_driver);
|
||||
|
|
|
@ -324,6 +324,34 @@ static int acpi_processor_get_performance_control(struct acpi_processor *pr)
|
|||
return result;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
/*
|
||||
* Some AMDs have 50MHz frequency multiples, but only provide 100MHz rounding
|
||||
* in their ACPI data. Calculate the real values and fix up the _PSS data.
|
||||
*/
|
||||
static void amd_fixup_frequency(struct acpi_processor_px *px, int i)
|
||||
{
|
||||
u32 hi, lo, fid, did;
|
||||
int index = px->control & 0x00000007;
|
||||
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
|
||||
return;
|
||||
|
||||
if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
|
||||
|| boot_cpu_data.x86 == 0x11) {
|
||||
rdmsr(MSR_AMD_PSTATE_DEF_BASE + index, lo, hi);
|
||||
fid = lo & 0x3f;
|
||||
did = (lo >> 6) & 7;
|
||||
if (boot_cpu_data.x86 == 0x10)
|
||||
px->core_frequency = (100 * (fid + 0x10)) >> did;
|
||||
else
|
||||
px->core_frequency = (100 * (fid + 8)) >> did;
|
||||
}
|
||||
}
|
||||
#else
|
||||
static void amd_fixup_frequency(struct acpi_processor_px *px, int i) {};
|
||||
#endif
|
||||
|
||||
static int acpi_processor_get_performance_states(struct acpi_processor *pr)
|
||||
{
|
||||
int result = 0;
|
||||
|
@ -379,6 +407,8 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
|
|||
goto end;
|
||||
}
|
||||
|
||||
amd_fixup_frequency(px, i);
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||
"State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n",
|
||||
i,
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <linux/idr.h>
|
||||
|
||||
#include "base.h"
|
||||
#include "power/power.h"
|
||||
|
||||
/* For automatically allocated device IDs */
|
||||
static DEFINE_IDA(platform_devid_ida);
|
||||
|
@ -983,6 +984,7 @@ void __init early_platform_add_devices(struct platform_device **devs, int num)
|
|||
dev = &devs[i]->dev;
|
||||
|
||||
if (!dev->devres_head.next) {
|
||||
pm_runtime_early_init(dev);
|
||||
INIT_LIST_HEAD(&dev->devres_head);
|
||||
list_add_tail(&dev->devres_head,
|
||||
&early_platform_device_list);
|
||||
|
|
|
@ -53,6 +53,24 @@
|
|||
static LIST_HEAD(gpd_list);
|
||||
static DEFINE_MUTEX(gpd_list_lock);
|
||||
|
||||
static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name)
|
||||
{
|
||||
struct generic_pm_domain *genpd = NULL, *gpd;
|
||||
|
||||
if (IS_ERR_OR_NULL(domain_name))
|
||||
return NULL;
|
||||
|
||||
mutex_lock(&gpd_list_lock);
|
||||
list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
|
||||
if (!strcmp(gpd->name, domain_name)) {
|
||||
genpd = gpd;
|
||||
break;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&gpd_list_lock);
|
||||
return genpd;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
||||
struct generic_pm_domain *dev_to_genpd(struct device *dev)
|
||||
|
@ -256,10 +274,28 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_name_poweron - Restore power to a given PM domain and its masters.
|
||||
* @domain_name: Name of the PM domain to power up.
|
||||
*/
|
||||
int pm_genpd_name_poweron(const char *domain_name)
|
||||
{
|
||||
struct generic_pm_domain *genpd;
|
||||
|
||||
genpd = pm_genpd_lookup_name(domain_name);
|
||||
return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
#ifdef CONFIG_PM_RUNTIME
|
||||
|
||||
static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd,
|
||||
struct device *dev)
|
||||
{
|
||||
return GENPD_DEV_CALLBACK(genpd, int, start, dev);
|
||||
}
|
||||
|
||||
static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
|
||||
{
|
||||
return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
|
||||
|
@ -436,7 +472,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
|
|||
not_suspended = 0;
|
||||
list_for_each_entry(pdd, &genpd->dev_list, list_node)
|
||||
if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
|
||||
|| pdd->dev->power.irq_safe || to_gpd_data(pdd)->always_on))
|
||||
|| pdd->dev->power.irq_safe))
|
||||
not_suspended++;
|
||||
|
||||
if (not_suspended > genpd->in_progress)
|
||||
|
@ -578,9 +614,6 @@ static int pm_genpd_runtime_suspend(struct device *dev)
|
|||
|
||||
might_sleep_if(!genpd->dev_irq_safe);
|
||||
|
||||
if (dev_gpd_data(dev)->always_on)
|
||||
return -EBUSY;
|
||||
|
||||
stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
|
||||
if (stop_ok && !stop_ok(dev))
|
||||
return -EBUSY;
|
||||
|
@ -629,7 +662,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
|
|||
|
||||
/* If power.irq_safe, the PM domain is never powered off. */
|
||||
if (dev->power.irq_safe)
|
||||
return genpd_start_dev(genpd, dev);
|
||||
return genpd_start_dev_no_timing(genpd, dev);
|
||||
|
||||
mutex_lock(&genpd->lock);
|
||||
ret = __pm_genpd_poweron(genpd);
|
||||
|
@ -697,6 +730,24 @@ static inline void genpd_power_off_work_fn(struct work_struct *work) {}
|
|||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
|
||||
/**
|
||||
* pm_genpd_present - Check if the given PM domain has been initialized.
|
||||
* @genpd: PM domain to check.
|
||||
*/
|
||||
static bool pm_genpd_present(struct generic_pm_domain *genpd)
|
||||
{
|
||||
struct generic_pm_domain *gpd;
|
||||
|
||||
if (IS_ERR_OR_NULL(genpd))
|
||||
return false;
|
||||
|
||||
list_for_each_entry(gpd, &gpd_list, gpd_list_node)
|
||||
if (gpd == genpd)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
|
||||
struct device *dev)
|
||||
{
|
||||
|
@ -750,9 +801,10 @@ static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
|
|||
* Check if the given PM domain can be powered off (during system suspend or
|
||||
* hibernation) and do that if so. Also, in that case propagate to its masters.
|
||||
*
|
||||
* This function is only called in "noirq" stages of system power transitions,
|
||||
* so it need not acquire locks (all of the "noirq" callbacks are executed
|
||||
* sequentially, so it is guaranteed that it will never run twice in parallel).
|
||||
* This function is only called in "noirq" and "syscore" stages of system power
|
||||
* transitions, so it need not acquire locks (all of the "noirq" callbacks are
|
||||
* executed sequentially, so it is guaranteed that it will never run twice in
|
||||
* parallel).
|
||||
*/
|
||||
static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
|
||||
{
|
||||
|
@ -776,6 +828,33 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
|
||||
* @genpd: PM domain to power on.
|
||||
*
|
||||
* This function is only called in "noirq" and "syscore" stages of system power
|
||||
* transitions, so it need not acquire locks (all of the "noirq" callbacks are
|
||||
* executed sequentially, so it is guaranteed that it will never run twice in
|
||||
* parallel).
|
||||
*/
|
||||
static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd)
|
||||
{
|
||||
struct gpd_link *link;
|
||||
|
||||
if (genpd->status != GPD_STATE_POWER_OFF)
|
||||
return;
|
||||
|
||||
list_for_each_entry(link, &genpd->slave_links, slave_node) {
|
||||
pm_genpd_sync_poweron(link->master);
|
||||
genpd_sd_counter_inc(link->master);
|
||||
}
|
||||
|
||||
if (genpd->power_on)
|
||||
genpd->power_on(genpd);
|
||||
|
||||
genpd->status = GPD_STATE_ACTIVE;
|
||||
}
|
||||
|
||||
/**
|
||||
* resume_needed - Check whether to resume a device before system suspend.
|
||||
* @dev: Device to check.
|
||||
|
@ -937,7 +1016,7 @@ static int pm_genpd_suspend_noirq(struct device *dev)
|
|||
if (IS_ERR(genpd))
|
||||
return -EINVAL;
|
||||
|
||||
if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
|
||||
if (genpd->suspend_power_off
|
||||
|| (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
|
||||
return 0;
|
||||
|
||||
|
@ -970,7 +1049,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
|
|||
if (IS_ERR(genpd))
|
||||
return -EINVAL;
|
||||
|
||||
if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
|
||||
if (genpd->suspend_power_off
|
||||
|| (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
|
||||
return 0;
|
||||
|
||||
|
@ -979,7 +1058,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
|
|||
* guaranteed that this function will never run twice in parallel for
|
||||
* the same PM domain, so it is not necessary to use locking here.
|
||||
*/
|
||||
pm_genpd_poweron(genpd);
|
||||
pm_genpd_sync_poweron(genpd);
|
||||
genpd->suspended_count--;
|
||||
|
||||
return genpd_start_dev(genpd, dev);
|
||||
|
@ -1090,8 +1169,7 @@ static int pm_genpd_freeze_noirq(struct device *dev)
|
|||
if (IS_ERR(genpd))
|
||||
return -EINVAL;
|
||||
|
||||
return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
|
||||
0 : genpd_stop_dev(genpd, dev);
|
||||
return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1111,8 +1189,7 @@ static int pm_genpd_thaw_noirq(struct device *dev)
|
|||
if (IS_ERR(genpd))
|
||||
return -EINVAL;
|
||||
|
||||
return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
|
||||
0 : genpd_start_dev(genpd, dev);
|
||||
return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1186,8 +1263,8 @@ static int pm_genpd_restore_noirq(struct device *dev)
|
|||
if (genpd->suspended_count++ == 0) {
|
||||
/*
|
||||
* The boot kernel might put the domain into arbitrary state,
|
||||
* so make it appear as powered off to pm_genpd_poweron(), so
|
||||
* that it tries to power it on in case it was really off.
|
||||
* so make it appear as powered off to pm_genpd_sync_poweron(),
|
||||
* so that it tries to power it on in case it was really off.
|
||||
*/
|
||||
genpd->status = GPD_STATE_POWER_OFF;
|
||||
if (genpd->suspend_power_off) {
|
||||
|
@ -1205,9 +1282,9 @@ static int pm_genpd_restore_noirq(struct device *dev)
|
|||
if (genpd->suspend_power_off)
|
||||
return 0;
|
||||
|
||||
pm_genpd_poweron(genpd);
|
||||
pm_genpd_sync_poweron(genpd);
|
||||
|
||||
return dev_gpd_data(dev)->always_on ? 0 : genpd_start_dev(genpd, dev);
|
||||
return genpd_start_dev(genpd, dev);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1246,6 +1323,31 @@ static void pm_genpd_complete(struct device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_syscore_switch - Switch power during system core suspend or resume.
|
||||
* @dev: Device that normally is marked as "always on" to switch power for.
|
||||
*
|
||||
* This routine may only be called during the system core (syscore) suspend or
|
||||
* resume phase for devices whose "always on" flags are set.
|
||||
*/
|
||||
void pm_genpd_syscore_switch(struct device *dev, bool suspend)
|
||||
{
|
||||
struct generic_pm_domain *genpd;
|
||||
|
||||
genpd = dev_to_genpd(dev);
|
||||
if (!pm_genpd_present(genpd))
|
||||
return;
|
||||
|
||||
if (suspend) {
|
||||
genpd->suspended_count++;
|
||||
pm_genpd_sync_poweroff(genpd);
|
||||
} else {
|
||||
pm_genpd_sync_poweron(genpd);
|
||||
genpd->suspended_count--;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_genpd_syscore_switch);
|
||||
|
||||
#else
|
||||
|
||||
#define pm_genpd_prepare NULL
|
||||
|
@ -1393,6 +1495,19 @@ int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
|
|||
return __pm_genpd_add_device(genpd, dev, td);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* __pm_genpd_name_add_device - Find I/O PM domain and add a device to it.
|
||||
* @domain_name: Name of the PM domain to add the device to.
|
||||
* @dev: Device to be added.
|
||||
* @td: Set of PM QoS timing parameters to attach to the device.
|
||||
*/
|
||||
int __pm_genpd_name_add_device(const char *domain_name, struct device *dev,
|
||||
struct gpd_timing_data *td)
|
||||
{
|
||||
return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td);
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_remove_device - Remove a device from an I/O PM domain.
|
||||
* @genpd: PM domain to remove the device from.
|
||||
|
@ -1454,26 +1569,6 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_dev_always_on - Set/unset the "always on" flag for a given device.
|
||||
* @dev: Device to set/unset the flag for.
|
||||
* @val: The new value of the device's "always on" flag.
|
||||
*/
|
||||
void pm_genpd_dev_always_on(struct device *dev, bool val)
|
||||
{
|
||||
struct pm_subsys_data *psd;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev->power.lock, flags);
|
||||
|
||||
psd = dev_to_psd(dev);
|
||||
if (psd && psd->domain_data)
|
||||
to_gpd_data(psd->domain_data)->always_on = val;
|
||||
|
||||
spin_unlock_irqrestore(&dev->power.lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_genpd_dev_always_on);
|
||||
|
||||
/**
|
||||
* pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag.
|
||||
* @dev: Device to set/unset the flag for.
|
||||
|
@ -1505,7 +1600,8 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
|
|||
struct gpd_link *link;
|
||||
int ret = 0;
|
||||
|
||||
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
|
||||
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
|
||||
|| genpd == subdomain)
|
||||
return -EINVAL;
|
||||
|
||||
start:
|
||||
|
@ -1551,6 +1647,35 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain.
|
||||
* @master_name: Name of the master PM domain to add the subdomain to.
|
||||
* @subdomain_name: Name of the subdomain to be added.
|
||||
*/
|
||||
int pm_genpd_add_subdomain_names(const char *master_name,
|
||||
const char *subdomain_name)
|
||||
{
|
||||
struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd;
|
||||
|
||||
if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&gpd_list_lock);
|
||||
list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
|
||||
if (!master && !strcmp(gpd->name, master_name))
|
||||
master = gpd;
|
||||
|
||||
if (!subdomain && !strcmp(gpd->name, subdomain_name))
|
||||
subdomain = gpd;
|
||||
|
||||
if (master && subdomain)
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&gpd_list_lock);
|
||||
|
||||
return pm_genpd_add_subdomain(master, subdomain);
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
|
||||
* @genpd: Master PM domain to remove the subdomain from.
|
||||
|
@ -1704,7 +1829,16 @@ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
|
||||
|
||||
int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
|
||||
/**
|
||||
* pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle.
|
||||
* @genpd: PM domain to be connected with cpuidle.
|
||||
* @state: cpuidle state this domain can disable/enable.
|
||||
*
|
||||
* Make a PM domain behave as though it contained a CPU core, that is, instead
|
||||
* of calling its power down routine it will enable the given cpuidle state so
|
||||
* that the cpuidle subsystem can power it down (if possible and desirable).
|
||||
*/
|
||||
int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
|
||||
{
|
||||
struct cpuidle_driver *cpuidle_drv;
|
||||
struct gpd_cpu_data *cpu_data;
|
||||
|
@ -1753,7 +1887,24 @@ int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
|
|||
goto out;
|
||||
}
|
||||
|
||||
int genpd_detach_cpuidle(struct generic_pm_domain *genpd)
|
||||
/**
|
||||
* pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it.
|
||||
* @name: Name of the domain to connect to cpuidle.
|
||||
* @state: cpuidle state this domain can manipulate.
|
||||
*/
|
||||
int pm_genpd_name_attach_cpuidle(const char *name, int state)
|
||||
{
|
||||
return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state);
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain.
|
||||
* @genpd: PM domain to remove the cpuidle connection from.
|
||||
*
|
||||
* Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the
|
||||
* given PM domain.
|
||||
*/
|
||||
int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
|
||||
{
|
||||
struct gpd_cpu_data *cpu_data;
|
||||
struct cpuidle_state *idle_state;
|
||||
|
@ -1784,6 +1935,15 @@ int genpd_detach_cpuidle(struct generic_pm_domain *genpd)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it.
|
||||
* @name: Name of the domain to disconnect cpuidle from.
|
||||
*/
|
||||
int pm_genpd_name_detach_cpuidle(const char *name)
|
||||
{
|
||||
return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name));
|
||||
}
|
||||
|
||||
/* Default device callbacks for generic PM domains. */
|
||||
|
||||
/**
|
||||
|
|
|
@ -57,20 +57,17 @@ static pm_message_t pm_transition;
|
|||
static int async_error;
|
||||
|
||||
/**
|
||||
* device_pm_init - Initialize the PM-related part of a device object.
|
||||
* device_pm_sleep_init - Initialize system suspend-related device fields.
|
||||
* @dev: Device object being initialized.
|
||||
*/
|
||||
void device_pm_init(struct device *dev)
|
||||
void device_pm_sleep_init(struct device *dev)
|
||||
{
|
||||
dev->power.is_prepared = false;
|
||||
dev->power.is_suspended = false;
|
||||
init_completion(&dev->power.completion);
|
||||
complete_all(&dev->power.completion);
|
||||
dev->power.wakeup = NULL;
|
||||
spin_lock_init(&dev->power.lock);
|
||||
pm_runtime_init(dev);
|
||||
INIT_LIST_HEAD(&dev->power.entry);
|
||||
dev->power.power_state = PMSG_INVALID;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -408,6 +405,9 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
|
|||
TRACE_DEVICE(dev);
|
||||
TRACE_RESUME(0);
|
||||
|
||||
if (dev->power.syscore)
|
||||
goto Out;
|
||||
|
||||
if (dev->pm_domain) {
|
||||
info = "noirq power domain ";
|
||||
callback = pm_noirq_op(&dev->pm_domain->ops, state);
|
||||
|
@ -429,6 +429,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
|
|||
|
||||
error = dpm_run_callback(callback, dev, state, info);
|
||||
|
||||
Out:
|
||||
TRACE_RESUME(error);
|
||||
return error;
|
||||
}
|
||||
|
@ -486,6 +487,9 @@ static int device_resume_early(struct device *dev, pm_message_t state)
|
|||
TRACE_DEVICE(dev);
|
||||
TRACE_RESUME(0);
|
||||
|
||||
if (dev->power.syscore)
|
||||
goto Out;
|
||||
|
||||
if (dev->pm_domain) {
|
||||
info = "early power domain ";
|
||||
callback = pm_late_early_op(&dev->pm_domain->ops, state);
|
||||
|
@ -507,6 +511,7 @@ static int device_resume_early(struct device *dev, pm_message_t state)
|
|||
|
||||
error = dpm_run_callback(callback, dev, state, info);
|
||||
|
||||
Out:
|
||||
TRACE_RESUME(error);
|
||||
return error;
|
||||
}
|
||||
|
@ -565,11 +570,13 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
|
|||
pm_callback_t callback = NULL;
|
||||
char *info = NULL;
|
||||
int error = 0;
|
||||
bool put = false;
|
||||
|
||||
TRACE_DEVICE(dev);
|
||||
TRACE_RESUME(0);
|
||||
|
||||
if (dev->power.syscore)
|
||||
goto Complete;
|
||||
|
||||
dpm_wait(dev->parent, async);
|
||||
device_lock(dev);
|
||||
|
||||
|
@ -583,7 +590,6 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
|
|||
goto Unlock;
|
||||
|
||||
pm_runtime_enable(dev);
|
||||
put = true;
|
||||
|
||||
if (dev->pm_domain) {
|
||||
info = "power domain ";
|
||||
|
@ -632,13 +638,12 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
|
|||
|
||||
Unlock:
|
||||
device_unlock(dev);
|
||||
|
||||
Complete:
|
||||
complete_all(&dev->power.completion);
|
||||
|
||||
TRACE_RESUME(error);
|
||||
|
||||
if (put)
|
||||
pm_runtime_put_sync(dev);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -722,6 +727,9 @@ static void device_complete(struct device *dev, pm_message_t state)
|
|||
void (*callback)(struct device *) = NULL;
|
||||
char *info = NULL;
|
||||
|
||||
if (dev->power.syscore)
|
||||
return;
|
||||
|
||||
device_lock(dev);
|
||||
|
||||
if (dev->pm_domain) {
|
||||
|
@ -749,6 +757,8 @@ static void device_complete(struct device *dev, pm_message_t state)
|
|||
}
|
||||
|
||||
device_unlock(dev);
|
||||
|
||||
pm_runtime_put_sync(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -834,6 +844,9 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
|
|||
pm_callback_t callback = NULL;
|
||||
char *info = NULL;
|
||||
|
||||
if (dev->power.syscore)
|
||||
return 0;
|
||||
|
||||
if (dev->pm_domain) {
|
||||
info = "noirq power domain ";
|
||||
callback = pm_noirq_op(&dev->pm_domain->ops, state);
|
||||
|
@ -917,6 +930,9 @@ static int device_suspend_late(struct device *dev, pm_message_t state)
|
|||
pm_callback_t callback = NULL;
|
||||
char *info = NULL;
|
||||
|
||||
if (dev->power.syscore)
|
||||
return 0;
|
||||
|
||||
if (dev->pm_domain) {
|
||||
info = "late power domain ";
|
||||
callback = pm_late_early_op(&dev->pm_domain->ops, state);
|
||||
|
@ -996,7 +1012,7 @@ int dpm_suspend_end(pm_message_t state)
|
|||
|
||||
error = dpm_suspend_noirq(state);
|
||||
if (error) {
|
||||
dpm_resume_early(state);
|
||||
dpm_resume_early(resume_event(state));
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -1043,16 +1059,23 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
|
|||
if (async_error)
|
||||
goto Complete;
|
||||
|
||||
pm_runtime_get_noresume(dev);
|
||||
/*
|
||||
* If a device configured to wake up the system from sleep states
|
||||
* has been suspended at run time and there's a resume request pending
|
||||
* for it, this is equivalent to the device signaling wakeup, so the
|
||||
* system suspend operation should be aborted.
|
||||
*/
|
||||
if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
|
||||
pm_wakeup_event(dev, 0);
|
||||
|
||||
if (pm_wakeup_pending()) {
|
||||
pm_runtime_put_sync(dev);
|
||||
async_error = -EBUSY;
|
||||
goto Complete;
|
||||
}
|
||||
|
||||
if (dev->power.syscore)
|
||||
goto Complete;
|
||||
|
||||
device_lock(dev);
|
||||
|
||||
if (dev->pm_domain) {
|
||||
|
@ -1111,12 +1134,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
|
|||
Complete:
|
||||
complete_all(&dev->power.completion);
|
||||
|
||||
if (error) {
|
||||
pm_runtime_put_sync(dev);
|
||||
if (error)
|
||||
async_error = error;
|
||||
} else if (dev->power.is_suspended) {
|
||||
else if (dev->power.is_suspended)
|
||||
__pm_runtime_disable(dev, false);
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
@ -1209,6 +1230,17 @@ static int device_prepare(struct device *dev, pm_message_t state)
|
|||
char *info = NULL;
|
||||
int error = 0;
|
||||
|
||||
if (dev->power.syscore)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If a device's parent goes into runtime suspend at the wrong time,
|
||||
* it won't be possible to resume the device. To prevent this we
|
||||
* block runtime suspend here, during the prepare phase, and allow
|
||||
* it again during the complete phase.
|
||||
*/
|
||||
pm_runtime_get_noresume(dev);
|
||||
|
||||
device_lock(dev);
|
||||
|
||||
dev->power.wakeup_path = device_may_wakeup(dev);
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <linux/rculist.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/opp.h>
|
||||
#include <linux/of.h>
|
||||
|
||||
/*
|
||||
* Internal data structure organization with the OPP layer library is as
|
||||
|
@ -674,3 +675,49 @@ struct srcu_notifier_head *opp_get_notifier(struct device *dev)
|
|||
|
||||
return &dev_opp->head;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
/**
|
||||
* of_init_opp_table() - Initialize opp table from device tree
|
||||
* @dev: device pointer used to lookup device OPPs.
|
||||
*
|
||||
* Register the initial OPP table with the OPP library for given device.
|
||||
*/
|
||||
int of_init_opp_table(struct device *dev)
|
||||
{
|
||||
const struct property *prop;
|
||||
const __be32 *val;
|
||||
int nr;
|
||||
|
||||
prop = of_find_property(dev->of_node, "operating-points", NULL);
|
||||
if (!prop)
|
||||
return -ENODEV;
|
||||
if (!prop->value)
|
||||
return -ENODATA;
|
||||
|
||||
/*
|
||||
* Each OPP is a set of tuples consisting of frequency and
|
||||
* voltage like <freq-kHz vol-uV>.
|
||||
*/
|
||||
nr = prop->length / sizeof(u32);
|
||||
if (nr % 2) {
|
||||
dev_err(dev, "%s: Invalid OPP list\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
val = prop->value;
|
||||
while (nr) {
|
||||
unsigned long freq = be32_to_cpup(val++) * 1000;
|
||||
unsigned long volt = be32_to_cpup(val++);
|
||||
|
||||
if (opp_add(dev, freq, volt)) {
|
||||
dev_warn(dev, "%s: Failed to add OPP %ld\n",
|
||||
__func__, freq);
|
||||
continue;
|
||||
}
|
||||
nr -= 2;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -1,12 +1,32 @@
|
|||
#include <linux/pm_qos.h>
|
||||
|
||||
static inline void device_pm_init_common(struct device *dev)
|
||||
{
|
||||
if (!dev->power.early_init) {
|
||||
spin_lock_init(&dev->power.lock);
|
||||
dev->power.power_state = PMSG_INVALID;
|
||||
dev->power.early_init = true;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_RUNTIME
|
||||
|
||||
static inline void pm_runtime_early_init(struct device *dev)
|
||||
{
|
||||
dev->power.disable_depth = 1;
|
||||
device_pm_init_common(dev);
|
||||
}
|
||||
|
||||
extern void pm_runtime_init(struct device *dev);
|
||||
extern void pm_runtime_remove(struct device *dev);
|
||||
|
||||
#else /* !CONFIG_PM_RUNTIME */
|
||||
|
||||
static inline void pm_runtime_early_init(struct device *dev)
|
||||
{
|
||||
device_pm_init_common(dev);
|
||||
}
|
||||
|
||||
static inline void pm_runtime_init(struct device *dev) {}
|
||||
static inline void pm_runtime_remove(struct device *dev) {}
|
||||
|
||||
|
@ -25,7 +45,7 @@ static inline struct device *to_device(struct list_head *entry)
|
|||
return container_of(entry, struct device, power.entry);
|
||||
}
|
||||
|
||||
extern void device_pm_init(struct device *dev);
|
||||
extern void device_pm_sleep_init(struct device *dev);
|
||||
extern void device_pm_add(struct device *);
|
||||
extern void device_pm_remove(struct device *);
|
||||
extern void device_pm_move_before(struct device *, struct device *);
|
||||
|
@ -34,12 +54,7 @@ extern void device_pm_move_last(struct device *);
|
|||
|
||||
#else /* !CONFIG_PM_SLEEP */
|
||||
|
||||
static inline void device_pm_init(struct device *dev)
|
||||
{
|
||||
spin_lock_init(&dev->power.lock);
|
||||
dev->power.power_state = PMSG_INVALID;
|
||||
pm_runtime_init(dev);
|
||||
}
|
||||
static inline void device_pm_sleep_init(struct device *dev) {}
|
||||
|
||||
static inline void device_pm_add(struct device *dev)
|
||||
{
|
||||
|
@ -60,6 +75,13 @@ static inline void device_pm_move_last(struct device *dev) {}
|
|||
|
||||
#endif /* !CONFIG_PM_SLEEP */
|
||||
|
||||
static inline void device_pm_init(struct device *dev)
|
||||
{
|
||||
device_pm_init_common(dev);
|
||||
device_pm_sleep_init(dev);
|
||||
pm_runtime_init(dev);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
||||
/*
|
||||
|
|
|
@ -509,6 +509,9 @@ static int rpm_resume(struct device *dev, int rpmflags)
|
|||
repeat:
|
||||
if (dev->power.runtime_error)
|
||||
retval = -EINVAL;
|
||||
else if (dev->power.disable_depth == 1 && dev->power.is_suspended
|
||||
&& dev->power.runtime_status == RPM_ACTIVE)
|
||||
retval = 1;
|
||||
else if (dev->power.disable_depth > 0)
|
||||
retval = -EACCES;
|
||||
if (retval)
|
||||
|
|
|
@ -127,6 +127,8 @@ EXPORT_SYMBOL_GPL(wakeup_source_destroy);
|
|||
*/
|
||||
void wakeup_source_add(struct wakeup_source *ws)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (WARN_ON(!ws))
|
||||
return;
|
||||
|
||||
|
@ -135,9 +137,9 @@ void wakeup_source_add(struct wakeup_source *ws)
|
|||
ws->active = false;
|
||||
ws->last_time = ktime_get();
|
||||
|
||||
spin_lock_irq(&events_lock);
|
||||
spin_lock_irqsave(&events_lock, flags);
|
||||
list_add_rcu(&ws->entry, &wakeup_sources);
|
||||
spin_unlock_irq(&events_lock);
|
||||
spin_unlock_irqrestore(&events_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(wakeup_source_add);
|
||||
|
||||
|
@ -147,12 +149,14 @@ EXPORT_SYMBOL_GPL(wakeup_source_add);
|
|||
*/
|
||||
void wakeup_source_remove(struct wakeup_source *ws)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (WARN_ON(!ws))
|
||||
return;
|
||||
|
||||
spin_lock_irq(&events_lock);
|
||||
spin_lock_irqsave(&events_lock, flags);
|
||||
list_del_rcu(&ws->entry);
|
||||
spin_unlock_irq(&events_lock);
|
||||
spin_unlock_irqrestore(&events_lock, flags);
|
||||
synchronize_rcu();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(wakeup_source_remove);
|
||||
|
@ -649,6 +653,31 @@ void pm_wakeup_event(struct device *dev, unsigned int msec)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(pm_wakeup_event);
|
||||
|
||||
static void print_active_wakeup_sources(void)
|
||||
{
|
||||
struct wakeup_source *ws;
|
||||
int active = 0;
|
||||
struct wakeup_source *last_activity_ws = NULL;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
|
||||
if (ws->active) {
|
||||
pr_info("active wakeup source: %s\n", ws->name);
|
||||
active = 1;
|
||||
} else if (!active &&
|
||||
(!last_activity_ws ||
|
||||
ktime_to_ns(ws->last_time) >
|
||||
ktime_to_ns(last_activity_ws->last_time))) {
|
||||
last_activity_ws = ws;
|
||||
}
|
||||
}
|
||||
|
||||
if (!active && last_activity_ws)
|
||||
pr_info("last active wakeup source: %s\n",
|
||||
last_activity_ws->name);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_wakeup_pending - Check if power transition in progress should be aborted.
|
||||
*
|
||||
|
@ -671,6 +700,10 @@ bool pm_wakeup_pending(void)
|
|||
events_check_enabled = !ret;
|
||||
}
|
||||
spin_unlock_irqrestore(&events_lock, flags);
|
||||
|
||||
if (ret)
|
||||
print_active_wakeup_sources();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -723,15 +756,16 @@ bool pm_get_wakeup_count(unsigned int *count, bool block)
|
|||
bool pm_save_wakeup_count(unsigned int count)
|
||||
{
|
||||
unsigned int cnt, inpr;
|
||||
unsigned long flags;
|
||||
|
||||
events_check_enabled = false;
|
||||
spin_lock_irq(&events_lock);
|
||||
spin_lock_irqsave(&events_lock, flags);
|
||||
split_counters(&cnt, &inpr);
|
||||
if (cnt == count && inpr == 0) {
|
||||
saved_count = count;
|
||||
events_check_enabled = true;
|
||||
}
|
||||
spin_unlock_irq(&events_lock);
|
||||
spin_unlock_irqrestore(&events_lock, flags);
|
||||
return events_check_enabled;
|
||||
}
|
||||
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pm_domain.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
struct sh_cmt_priv {
|
||||
void __iomem *mapbase;
|
||||
|
@ -52,6 +53,7 @@ struct sh_cmt_priv {
|
|||
struct clock_event_device ced;
|
||||
struct clocksource cs;
|
||||
unsigned long total_cycles;
|
||||
bool cs_enabled;
|
||||
};
|
||||
|
||||
static DEFINE_RAW_SPINLOCK(sh_cmt_lock);
|
||||
|
@ -155,6 +157,9 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
|
|||
{
|
||||
int k, ret;
|
||||
|
||||
pm_runtime_get_sync(&p->pdev->dev);
|
||||
dev_pm_syscore_device(&p->pdev->dev, true);
|
||||
|
||||
/* enable clock */
|
||||
ret = clk_enable(p->clk);
|
||||
if (ret) {
|
||||
|
@ -221,6 +226,9 @@ static void sh_cmt_disable(struct sh_cmt_priv *p)
|
|||
|
||||
/* stop clock */
|
||||
clk_disable(p->clk);
|
||||
|
||||
dev_pm_syscore_device(&p->pdev->dev, false);
|
||||
pm_runtime_put(&p->pdev->dev);
|
||||
}
|
||||
|
||||
/* private flags */
|
||||
|
@ -451,22 +459,42 @@ static int sh_cmt_clocksource_enable(struct clocksource *cs)
|
|||
int ret;
|
||||
struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
|
||||
|
||||
WARN_ON(p->cs_enabled);
|
||||
|
||||
p->total_cycles = 0;
|
||||
|
||||
ret = sh_cmt_start(p, FLAG_CLOCKSOURCE);
|
||||
if (!ret)
|
||||
if (!ret) {
|
||||
__clocksource_updatefreq_hz(cs, p->rate);
|
||||
p->cs_enabled = true;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void sh_cmt_clocksource_disable(struct clocksource *cs)
|
||||
{
|
||||
sh_cmt_stop(cs_to_sh_cmt(cs), FLAG_CLOCKSOURCE);
|
||||
struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
|
||||
|
||||
WARN_ON(!p->cs_enabled);
|
||||
|
||||
sh_cmt_stop(p, FLAG_CLOCKSOURCE);
|
||||
p->cs_enabled = false;
|
||||
}
|
||||
|
||||
static void sh_cmt_clocksource_suspend(struct clocksource *cs)
|
||||
{
|
||||
struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
|
||||
|
||||
sh_cmt_stop(p, FLAG_CLOCKSOURCE);
|
||||
pm_genpd_syscore_poweroff(&p->pdev->dev);
|
||||
}
|
||||
|
||||
static void sh_cmt_clocksource_resume(struct clocksource *cs)
|
||||
{
|
||||
sh_cmt_start(cs_to_sh_cmt(cs), FLAG_CLOCKSOURCE);
|
||||
struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
|
||||
|
||||
pm_genpd_syscore_poweron(&p->pdev->dev);
|
||||
sh_cmt_start(p, FLAG_CLOCKSOURCE);
|
||||
}
|
||||
|
||||
static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
|
||||
|
@ -479,7 +507,7 @@ static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
|
|||
cs->read = sh_cmt_clocksource_read;
|
||||
cs->enable = sh_cmt_clocksource_enable;
|
||||
cs->disable = sh_cmt_clocksource_disable;
|
||||
cs->suspend = sh_cmt_clocksource_disable;
|
||||
cs->suspend = sh_cmt_clocksource_suspend;
|
||||
cs->resume = sh_cmt_clocksource_resume;
|
||||
cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
|
||||
cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
|
||||
|
@ -562,6 +590,16 @@ static int sh_cmt_clock_event_next(unsigned long delta,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void sh_cmt_clock_event_suspend(struct clock_event_device *ced)
|
||||
{
|
||||
pm_genpd_syscore_poweroff(&ced_to_sh_cmt(ced)->pdev->dev);
|
||||
}
|
||||
|
||||
static void sh_cmt_clock_event_resume(struct clock_event_device *ced)
|
||||
{
|
||||
pm_genpd_syscore_poweron(&ced_to_sh_cmt(ced)->pdev->dev);
|
||||
}
|
||||
|
||||
static void sh_cmt_register_clockevent(struct sh_cmt_priv *p,
|
||||
char *name, unsigned long rating)
|
||||
{
|
||||
|
@ -576,6 +614,8 @@ static void sh_cmt_register_clockevent(struct sh_cmt_priv *p,
|
|||
ced->cpumask = cpumask_of(0);
|
||||
ced->set_next_event = sh_cmt_clock_event_next;
|
||||
ced->set_mode = sh_cmt_clock_event_mode;
|
||||
ced->suspend = sh_cmt_clock_event_suspend;
|
||||
ced->resume = sh_cmt_clock_event_resume;
|
||||
|
||||
dev_info(&p->pdev->dev, "used for clock events\n");
|
||||
clockevents_register_device(ced);
|
||||
|
@ -670,6 +710,7 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
|
|||
dev_err(&p->pdev->dev, "registration failed\n");
|
||||
goto err1;
|
||||
}
|
||||
p->cs_enabled = false;
|
||||
|
||||
ret = setup_irq(irq, &p->irqaction);
|
||||
if (ret) {
|
||||
|
@ -688,14 +729,17 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
|
|||
static int __devinit sh_cmt_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct sh_cmt_priv *p = platform_get_drvdata(pdev);
|
||||
struct sh_timer_config *cfg = pdev->dev.platform_data;
|
||||
int ret;
|
||||
|
||||
if (!is_early_platform_device(pdev))
|
||||
pm_genpd_dev_always_on(&pdev->dev, true);
|
||||
if (!is_early_platform_device(pdev)) {
|
||||
pm_runtime_set_active(&pdev->dev);
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
}
|
||||
|
||||
if (p) {
|
||||
dev_info(&pdev->dev, "kept as earlytimer\n");
|
||||
return 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
p = kmalloc(sizeof(*p), GFP_KERNEL);
|
||||
|
@ -708,8 +752,19 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev)
|
|||
if (ret) {
|
||||
kfree(p);
|
||||
platform_set_drvdata(pdev, NULL);
|
||||
pm_runtime_idle(&pdev->dev);
|
||||
return ret;
|
||||
}
|
||||
return ret;
|
||||
if (is_early_platform_device(pdev))
|
||||
return 0;
|
||||
|
||||
out:
|
||||
if (cfg->clockevent_rating || cfg->clocksource_rating)
|
||||
pm_runtime_irq_safe(&pdev->dev);
|
||||
else
|
||||
pm_runtime_idle(&pdev->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __devexit sh_cmt_remove(struct platform_device *pdev)
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pm_domain.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
struct sh_mtu2_priv {
|
||||
void __iomem *mapbase;
|
||||
|
@ -123,6 +124,9 @@ static int sh_mtu2_enable(struct sh_mtu2_priv *p)
|
|||
{
|
||||
int ret;
|
||||
|
||||
pm_runtime_get_sync(&p->pdev->dev);
|
||||
dev_pm_syscore_device(&p->pdev->dev, true);
|
||||
|
||||
/* enable clock */
|
||||
ret = clk_enable(p->clk);
|
||||
if (ret) {
|
||||
|
@ -157,6 +161,9 @@ static void sh_mtu2_disable(struct sh_mtu2_priv *p)
|
|||
|
||||
/* stop clock */
|
||||
clk_disable(p->clk);
|
||||
|
||||
dev_pm_syscore_device(&p->pdev->dev, false);
|
||||
pm_runtime_put(&p->pdev->dev);
|
||||
}
|
||||
|
||||
static irqreturn_t sh_mtu2_interrupt(int irq, void *dev_id)
|
||||
|
@ -208,6 +215,16 @@ static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,
|
|||
}
|
||||
}
|
||||
|
||||
static void sh_mtu2_clock_event_suspend(struct clock_event_device *ced)
|
||||
{
|
||||
pm_genpd_syscore_poweroff(&ced_to_sh_mtu2(ced)->pdev->dev);
|
||||
}
|
||||
|
||||
static void sh_mtu2_clock_event_resume(struct clock_event_device *ced)
|
||||
{
|
||||
pm_genpd_syscore_poweron(&ced_to_sh_mtu2(ced)->pdev->dev);
|
||||
}
|
||||
|
||||
static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p,
|
||||
char *name, unsigned long rating)
|
||||
{
|
||||
|
@ -221,6 +238,8 @@ static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p,
|
|||
ced->rating = rating;
|
||||
ced->cpumask = cpumask_of(0);
|
||||
ced->set_mode = sh_mtu2_clock_event_mode;
|
||||
ced->suspend = sh_mtu2_clock_event_suspend;
|
||||
ced->resume = sh_mtu2_clock_event_resume;
|
||||
|
||||
dev_info(&p->pdev->dev, "used for clock events\n");
|
||||
clockevents_register_device(ced);
|
||||
|
@ -305,14 +324,17 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
|
|||
static int __devinit sh_mtu2_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct sh_mtu2_priv *p = platform_get_drvdata(pdev);
|
||||
struct sh_timer_config *cfg = pdev->dev.platform_data;
|
||||
int ret;
|
||||
|
||||
if (!is_early_platform_device(pdev))
|
||||
pm_genpd_dev_always_on(&pdev->dev, true);
|
||||
if (!is_early_platform_device(pdev)) {
|
||||
pm_runtime_set_active(&pdev->dev);
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
}
|
||||
|
||||
if (p) {
|
||||
dev_info(&pdev->dev, "kept as earlytimer\n");
|
||||
return 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
p = kmalloc(sizeof(*p), GFP_KERNEL);
|
||||
|
@ -325,8 +347,19 @@ static int __devinit sh_mtu2_probe(struct platform_device *pdev)
|
|||
if (ret) {
|
||||
kfree(p);
|
||||
platform_set_drvdata(pdev, NULL);
|
||||
pm_runtime_idle(&pdev->dev);
|
||||
return ret;
|
||||
}
|
||||
return ret;
|
||||
if (is_early_platform_device(pdev))
|
||||
return 0;
|
||||
|
||||
out:
|
||||
if (cfg->clockevent_rating)
|
||||
pm_runtime_irq_safe(&pdev->dev);
|
||||
else
|
||||
pm_runtime_idle(&pdev->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __devexit sh_mtu2_remove(struct platform_device *pdev)
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pm_domain.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
struct sh_tmu_priv {
|
||||
void __iomem *mapbase;
|
||||
|
@ -43,6 +44,8 @@ struct sh_tmu_priv {
|
|||
unsigned long periodic;
|
||||
struct clock_event_device ced;
|
||||
struct clocksource cs;
|
||||
bool cs_enabled;
|
||||
unsigned int enable_count;
|
||||
};
|
||||
|
||||
static DEFINE_RAW_SPINLOCK(sh_tmu_lock);
|
||||
|
@ -107,7 +110,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
|
|||
raw_spin_unlock_irqrestore(&sh_tmu_lock, flags);
|
||||
}
|
||||
|
||||
static int sh_tmu_enable(struct sh_tmu_priv *p)
|
||||
static int __sh_tmu_enable(struct sh_tmu_priv *p)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -135,7 +138,18 @@ static int sh_tmu_enable(struct sh_tmu_priv *p)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void sh_tmu_disable(struct sh_tmu_priv *p)
|
||||
static int sh_tmu_enable(struct sh_tmu_priv *p)
|
||||
{
|
||||
if (p->enable_count++ > 0)
|
||||
return 0;
|
||||
|
||||
pm_runtime_get_sync(&p->pdev->dev);
|
||||
dev_pm_syscore_device(&p->pdev->dev, true);
|
||||
|
||||
return __sh_tmu_enable(p);
|
||||
}
|
||||
|
||||
static void __sh_tmu_disable(struct sh_tmu_priv *p)
|
||||
{
|
||||
/* disable channel */
|
||||
sh_tmu_start_stop_ch(p, 0);
|
||||
|
@ -147,6 +161,20 @@ static void sh_tmu_disable(struct sh_tmu_priv *p)
|
|||
clk_disable(p->clk);
|
||||
}
|
||||
|
||||
static void sh_tmu_disable(struct sh_tmu_priv *p)
|
||||
{
|
||||
if (WARN_ON(p->enable_count == 0))
|
||||
return;
|
||||
|
||||
if (--p->enable_count > 0)
|
||||
return;
|
||||
|
||||
__sh_tmu_disable(p);
|
||||
|
||||
dev_pm_syscore_device(&p->pdev->dev, false);
|
||||
pm_runtime_put(&p->pdev->dev);
|
||||
}
|
||||
|
||||
static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta,
|
||||
int periodic)
|
||||
{
|
||||
|
@ -203,15 +231,53 @@ static int sh_tmu_clocksource_enable(struct clocksource *cs)
|
|||
struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(p->cs_enabled))
|
||||
return 0;
|
||||
|
||||
ret = sh_tmu_enable(p);
|
||||
if (!ret)
|
||||
if (!ret) {
|
||||
__clocksource_updatefreq_hz(cs, p->rate);
|
||||
p->cs_enabled = true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void sh_tmu_clocksource_disable(struct clocksource *cs)
|
||||
{
|
||||
sh_tmu_disable(cs_to_sh_tmu(cs));
|
||||
struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
|
||||
|
||||
if (WARN_ON(!p->cs_enabled))
|
||||
return;
|
||||
|
||||
sh_tmu_disable(p);
|
||||
p->cs_enabled = false;
|
||||
}
|
||||
|
||||
static void sh_tmu_clocksource_suspend(struct clocksource *cs)
|
||||
{
|
||||
struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
|
||||
|
||||
if (!p->cs_enabled)
|
||||
return;
|
||||
|
||||
if (--p->enable_count == 0) {
|
||||
__sh_tmu_disable(p);
|
||||
pm_genpd_syscore_poweroff(&p->pdev->dev);
|
||||
}
|
||||
}
|
||||
|
||||
static void sh_tmu_clocksource_resume(struct clocksource *cs)
|
||||
{
|
||||
struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
|
||||
|
||||
if (!p->cs_enabled)
|
||||
return;
|
||||
|
||||
if (p->enable_count++ == 0) {
|
||||
pm_genpd_syscore_poweron(&p->pdev->dev);
|
||||
__sh_tmu_enable(p);
|
||||
}
|
||||
}
|
||||
|
||||
static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,
|
||||
|
@ -224,6 +290,8 @@ static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,
|
|||
cs->read = sh_tmu_clocksource_read;
|
||||
cs->enable = sh_tmu_clocksource_enable;
|
||||
cs->disable = sh_tmu_clocksource_disable;
|
||||
cs->suspend = sh_tmu_clocksource_suspend;
|
||||
cs->resume = sh_tmu_clocksource_resume;
|
||||
cs->mask = CLOCKSOURCE_MASK(32);
|
||||
cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
|
||||
|
||||
|
@ -301,6 +369,16 @@ static int sh_tmu_clock_event_next(unsigned long delta,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void sh_tmu_clock_event_suspend(struct clock_event_device *ced)
|
||||
{
|
||||
pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced)->pdev->dev);
|
||||
}
|
||||
|
||||
static void sh_tmu_clock_event_resume(struct clock_event_device *ced)
|
||||
{
|
||||
pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced)->pdev->dev);
|
||||
}
|
||||
|
||||
static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
|
||||
char *name, unsigned long rating)
|
||||
{
|
||||
|
@ -316,6 +394,8 @@ static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
|
|||
ced->cpumask = cpumask_of(0);
|
||||
ced->set_next_event = sh_tmu_clock_event_next;
|
||||
ced->set_mode = sh_tmu_clock_event_mode;
|
||||
ced->suspend = sh_tmu_clock_event_suspend;
|
||||
ced->resume = sh_tmu_clock_event_resume;
|
||||
|
||||
dev_info(&p->pdev->dev, "used for clock events\n");
|
||||
|
||||
|
@ -392,6 +472,8 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
|
|||
ret = PTR_ERR(p->clk);
|
||||
goto err1;
|
||||
}
|
||||
p->cs_enabled = false;
|
||||
p->enable_count = 0;
|
||||
|
||||
return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev),
|
||||
cfg->clockevent_rating,
|
||||
|
@ -405,14 +487,17 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
|
|||
static int __devinit sh_tmu_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct sh_tmu_priv *p = platform_get_drvdata(pdev);
|
||||
struct sh_timer_config *cfg = pdev->dev.platform_data;
|
||||
int ret;
|
||||
|
||||
if (!is_early_platform_device(pdev))
|
||||
pm_genpd_dev_always_on(&pdev->dev, true);
|
||||
if (!is_early_platform_device(pdev)) {
|
||||
pm_runtime_set_active(&pdev->dev);
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
}
|
||||
|
||||
if (p) {
|
||||
dev_info(&pdev->dev, "kept as earlytimer\n");
|
||||
return 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
p = kmalloc(sizeof(*p), GFP_KERNEL);
|
||||
|
@ -425,8 +510,19 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev)
|
|||
if (ret) {
|
||||
kfree(p);
|
||||
platform_set_drvdata(pdev, NULL);
|
||||
pm_runtime_idle(&pdev->dev);
|
||||
return ret;
|
||||
}
|
||||
return ret;
|
||||
if (is_early_platform_device(pdev))
|
||||
return 0;
|
||||
|
||||
out:
|
||||
if (cfg->clockevent_rating || cfg->clocksource_rating)
|
||||
pm_runtime_irq_safe(&pdev->dev);
|
||||
else
|
||||
pm_runtime_idle(&pdev->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __devexit sh_tmu_remove(struct platform_device *pdev)
|
||||
|
|
|
@ -179,6 +179,17 @@ config CPU_FREQ_GOV_CONSERVATIVE
|
|||
|
||||
If in doubt, say N.
|
||||
|
||||
config GENERIC_CPUFREQ_CPU0
|
||||
bool "Generic CPU0 cpufreq driver"
|
||||
depends on HAVE_CLK && REGULATOR && PM_OPP && OF
|
||||
select CPU_FREQ_TABLE
|
||||
help
|
||||
This adds a generic cpufreq driver for CPU0 frequency management.
|
||||
It supports both uniprocessor (UP) and symmetric multiprocessor (SMP)
|
||||
systems which share clock and voltage across all CPUs.
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
menu "x86 CPU frequency scaling drivers"
|
||||
depends on X86
|
||||
source "drivers/cpufreq/Kconfig.x86"
|
||||
|
|
|
@ -23,7 +23,8 @@ config X86_ACPI_CPUFREQ
|
|||
help
|
||||
This driver adds a CPUFreq driver which utilizes the ACPI
|
||||
Processor Performance States.
|
||||
This driver also supports Intel Enhanced Speedstep.
|
||||
This driver also supports Intel Enhanced Speedstep and newer
|
||||
AMD CPUs.
|
||||
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called acpi-cpufreq.
|
||||
|
@ -32,6 +33,18 @@ config X86_ACPI_CPUFREQ
|
|||
|
||||
If in doubt, say N.
|
||||
|
||||
config X86_ACPI_CPUFREQ_CPB
|
||||
default y
|
||||
bool "Legacy cpb sysfs knob support for AMD CPUs"
|
||||
depends on X86_ACPI_CPUFREQ && CPU_SUP_AMD
|
||||
help
|
||||
The powernow-k8 driver used to provide a sysfs knob called "cpb"
|
||||
to disable the Core Performance Boosting feature of AMD CPUs. This
|
||||
file has now been superseeded by the more generic "boost" entry.
|
||||
|
||||
By enabling this option the acpi_cpufreq driver provides the old
|
||||
entry in addition to the new boost ones, for compatibility reasons.
|
||||
|
||||
config ELAN_CPUFREQ
|
||||
tristate "AMD Elan SC400 and SC410"
|
||||
select CPU_FREQ_TABLE
|
||||
|
@ -95,7 +108,8 @@ config X86_POWERNOW_K8
|
|||
select CPU_FREQ_TABLE
|
||||
depends on ACPI && ACPI_PROCESSOR
|
||||
help
|
||||
This adds the CPUFreq driver for K8/K10 Opteron/Athlon64 processors.
|
||||
This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
|
||||
Support for K10 and newer processors is now in acpi-cpufreq.
|
||||
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called powernow-k8.
|
||||
|
|
|
@ -13,13 +13,15 @@ obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
|
|||
# CPUfreq cross-arch helpers
|
||||
obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o
|
||||
|
||||
obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o
|
||||
|
||||
##################################################################################
|
||||
# x86 drivers.
|
||||
# Link order matters. K8 is preferred to ACPI because of firmware bugs in early
|
||||
# K8 systems. ACPI is preferred to all other hardware-specific drivers.
|
||||
# speedstep-* is preferred over p4-clockmod.
|
||||
|
||||
obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o mperf.o
|
||||
obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
|
||||
obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o
|
||||
obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o
|
||||
obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o
|
||||
|
|
|
@ -51,13 +51,19 @@ MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
|
|||
MODULE_DESCRIPTION("ACPI Processor P-States Driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
#define PFX "acpi-cpufreq: "
|
||||
|
||||
enum {
|
||||
UNDEFINED_CAPABLE = 0,
|
||||
SYSTEM_INTEL_MSR_CAPABLE,
|
||||
SYSTEM_AMD_MSR_CAPABLE,
|
||||
SYSTEM_IO_CAPABLE,
|
||||
};
|
||||
|
||||
#define INTEL_MSR_RANGE (0xffff)
|
||||
#define AMD_MSR_RANGE (0x7)
|
||||
|
||||
#define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
|
||||
|
||||
struct acpi_cpufreq_data {
|
||||
struct acpi_processor_performance *acpi_data;
|
||||
|
@ -74,6 +80,116 @@ static struct acpi_processor_performance __percpu *acpi_perf_data;
|
|||
static struct cpufreq_driver acpi_cpufreq_driver;
|
||||
|
||||
static unsigned int acpi_pstate_strict;
|
||||
static bool boost_enabled, boost_supported;
|
||||
static struct msr __percpu *msrs;
|
||||
|
||||
static bool boost_state(unsigned int cpu)
|
||||
{
|
||||
u32 lo, hi;
|
||||
u64 msr;
|
||||
|
||||
switch (boot_cpu_data.x86_vendor) {
|
||||
case X86_VENDOR_INTEL:
|
||||
rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
|
||||
msr = lo | ((u64)hi << 32);
|
||||
return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
|
||||
case X86_VENDOR_AMD:
|
||||
rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
|
||||
msr = lo | ((u64)hi << 32);
|
||||
return !(msr & MSR_K7_HWCR_CPB_DIS);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void boost_set_msrs(bool enable, const struct cpumask *cpumask)
|
||||
{
|
||||
u32 cpu;
|
||||
u32 msr_addr;
|
||||
u64 msr_mask;
|
||||
|
||||
switch (boot_cpu_data.x86_vendor) {
|
||||
case X86_VENDOR_INTEL:
|
||||
msr_addr = MSR_IA32_MISC_ENABLE;
|
||||
msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
|
||||
break;
|
||||
case X86_VENDOR_AMD:
|
||||
msr_addr = MSR_K7_HWCR;
|
||||
msr_mask = MSR_K7_HWCR_CPB_DIS;
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
rdmsr_on_cpus(cpumask, msr_addr, msrs);
|
||||
|
||||
for_each_cpu(cpu, cpumask) {
|
||||
struct msr *reg = per_cpu_ptr(msrs, cpu);
|
||||
if (enable)
|
||||
reg->q &= ~msr_mask;
|
||||
else
|
||||
reg->q |= msr_mask;
|
||||
}
|
||||
|
||||
wrmsr_on_cpus(cpumask, msr_addr, msrs);
|
||||
}
|
||||
|
||||
static ssize_t _store_boost(const char *buf, size_t count)
|
||||
{
|
||||
int ret;
|
||||
unsigned long val = 0;
|
||||
|
||||
if (!boost_supported)
|
||||
return -EINVAL;
|
||||
|
||||
ret = kstrtoul(buf, 10, &val);
|
||||
if (ret || (val > 1))
|
||||
return -EINVAL;
|
||||
|
||||
if ((val && boost_enabled) || (!val && !boost_enabled))
|
||||
return count;
|
||||
|
||||
get_online_cpus();
|
||||
|
||||
boost_set_msrs(val, cpu_online_mask);
|
||||
|
||||
put_online_cpus();
|
||||
|
||||
boost_enabled = val;
|
||||
pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis");
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t store_global_boost(struct kobject *kobj, struct attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
return _store_boost(buf, count);
|
||||
}
|
||||
|
||||
static ssize_t show_global_boost(struct kobject *kobj,
|
||||
struct attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%u\n", boost_enabled);
|
||||
}
|
||||
|
||||
static struct global_attr global_boost = __ATTR(boost, 0644,
|
||||
show_global_boost,
|
||||
store_global_boost);
|
||||
|
||||
#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
|
||||
static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
|
||||
size_t count)
|
||||
{
|
||||
return _store_boost(buf, count);
|
||||
}
|
||||
|
||||
static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%u\n", boost_enabled);
|
||||
}
|
||||
|
||||
static struct freq_attr cpb = __ATTR(cpb, 0644, show_cpb, store_cpb);
|
||||
#endif
|
||||
|
||||
static int check_est_cpu(unsigned int cpuid)
|
||||
{
|
||||
|
@ -82,6 +198,13 @@ static int check_est_cpu(unsigned int cpuid)
|
|||
return cpu_has(cpu, X86_FEATURE_EST);
|
||||
}
|
||||
|
||||
static int check_amd_hwpstate_cpu(unsigned int cpuid)
|
||||
{
|
||||
struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
|
||||
|
||||
return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
|
||||
}
|
||||
|
||||
static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
|
||||
{
|
||||
struct acpi_processor_performance *perf;
|
||||
|
@ -101,7 +224,11 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
|
|||
int i;
|
||||
struct acpi_processor_performance *perf;
|
||||
|
||||
msr &= INTEL_MSR_RANGE;
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
|
||||
msr &= AMD_MSR_RANGE;
|
||||
else
|
||||
msr &= INTEL_MSR_RANGE;
|
||||
|
||||
perf = data->acpi_data;
|
||||
|
||||
for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
|
||||
|
@ -115,6 +242,7 @@ static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
|
|||
{
|
||||
switch (data->cpu_feature) {
|
||||
case SYSTEM_INTEL_MSR_CAPABLE:
|
||||
case SYSTEM_AMD_MSR_CAPABLE:
|
||||
return extract_msr(val, data);
|
||||
case SYSTEM_IO_CAPABLE:
|
||||
return extract_io(val, data);
|
||||
|
@ -150,6 +278,7 @@ static void do_drv_read(void *_cmd)
|
|||
|
||||
switch (cmd->type) {
|
||||
case SYSTEM_INTEL_MSR_CAPABLE:
|
||||
case SYSTEM_AMD_MSR_CAPABLE:
|
||||
rdmsr(cmd->addr.msr.reg, cmd->val, h);
|
||||
break;
|
||||
case SYSTEM_IO_CAPABLE:
|
||||
|
@ -174,6 +303,9 @@ static void do_drv_write(void *_cmd)
|
|||
lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
|
||||
wrmsr(cmd->addr.msr.reg, lo, hi);
|
||||
break;
|
||||
case SYSTEM_AMD_MSR_CAPABLE:
|
||||
wrmsr(cmd->addr.msr.reg, cmd->val, 0);
|
||||
break;
|
||||
case SYSTEM_IO_CAPABLE:
|
||||
acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
|
||||
cmd->val,
|
||||
|
@ -217,6 +349,10 @@ static u32 get_cur_val(const struct cpumask *mask)
|
|||
cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
|
||||
cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
|
||||
break;
|
||||
case SYSTEM_AMD_MSR_CAPABLE:
|
||||
cmd.type = SYSTEM_AMD_MSR_CAPABLE;
|
||||
cmd.addr.msr.reg = MSR_AMD_PERF_STATUS;
|
||||
break;
|
||||
case SYSTEM_IO_CAPABLE:
|
||||
cmd.type = SYSTEM_IO_CAPABLE;
|
||||
perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
|
||||
|
@ -326,6 +462,11 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
|
|||
cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
|
||||
cmd.val = (u32) perf->states[next_perf_state].control;
|
||||
break;
|
||||
case SYSTEM_AMD_MSR_CAPABLE:
|
||||
cmd.type = SYSTEM_AMD_MSR_CAPABLE;
|
||||
cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
|
||||
cmd.val = (u32) perf->states[next_perf_state].control;
|
||||
break;
|
||||
case SYSTEM_IO_CAPABLE:
|
||||
cmd.type = SYSTEM_IO_CAPABLE;
|
||||
cmd.addr.io.port = perf->control_register.address;
|
||||
|
@ -419,6 +560,44 @@ static void free_acpi_perf_data(void)
|
|||
free_percpu(acpi_perf_data);
|
||||
}
|
||||
|
||||
static int boost_notify(struct notifier_block *nb, unsigned long action,
|
||||
void *hcpu)
|
||||
{
|
||||
unsigned cpu = (long)hcpu;
|
||||
const struct cpumask *cpumask;
|
||||
|
||||
cpumask = get_cpu_mask(cpu);
|
||||
|
||||
/*
|
||||
* Clear the boost-disable bit on the CPU_DOWN path so that
|
||||
* this cpu cannot block the remaining ones from boosting. On
|
||||
* the CPU_UP path we simply keep the boost-disable flag in
|
||||
* sync with the current global state.
|
||||
*/
|
||||
|
||||
switch (action) {
|
||||
case CPU_UP_PREPARE:
|
||||
case CPU_UP_PREPARE_FROZEN:
|
||||
boost_set_msrs(boost_enabled, cpumask);
|
||||
break;
|
||||
|
||||
case CPU_DOWN_PREPARE:
|
||||
case CPU_DOWN_PREPARE_FROZEN:
|
||||
boost_set_msrs(1, cpumask);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
|
||||
static struct notifier_block boost_nb = {
|
||||
.notifier_call = boost_notify,
|
||||
};
|
||||
|
||||
/*
|
||||
* acpi_cpufreq_early_init - initialize ACPI P-States library
|
||||
*
|
||||
|
@ -559,6 +738,14 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
|||
policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
|
||||
cpumask_copy(policy->cpus, cpu_core_mask(cpu));
|
||||
}
|
||||
|
||||
if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
|
||||
cpumask_clear(policy->cpus);
|
||||
cpumask_set_cpu(cpu, policy->cpus);
|
||||
cpumask_copy(policy->related_cpus, cpu_sibling_mask(cpu));
|
||||
policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
|
||||
pr_info_once(PFX "overriding BIOS provided _PSD data\n");
|
||||
}
|
||||
#endif
|
||||
|
||||
/* capability check */
|
||||
|
@ -580,12 +767,16 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
|||
break;
|
||||
case ACPI_ADR_SPACE_FIXED_HARDWARE:
|
||||
pr_debug("HARDWARE addr space\n");
|
||||
if (!check_est_cpu(cpu)) {
|
||||
result = -ENODEV;
|
||||
goto err_unreg;
|
||||
if (check_est_cpu(cpu)) {
|
||||
data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
|
||||
break;
|
||||
}
|
||||
data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
|
||||
break;
|
||||
if (check_amd_hwpstate_cpu(cpu)) {
|
||||
data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
|
||||
break;
|
||||
}
|
||||
result = -ENODEV;
|
||||
goto err_unreg;
|
||||
default:
|
||||
pr_debug("Unknown addr space %d\n",
|
||||
(u32) (perf->control_register.space_id));
|
||||
|
@ -718,6 +909,7 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
|
|||
|
||||
static struct freq_attr *acpi_cpufreq_attr[] = {
|
||||
&cpufreq_freq_attr_scaling_available_freqs,
|
||||
NULL, /* this is a placeholder for cpb, do not remove */
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
@ -733,6 +925,49 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
|
|||
.attr = acpi_cpufreq_attr,
|
||||
};
|
||||
|
||||
static void __init acpi_cpufreq_boost_init(void)
|
||||
{
|
||||
if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) {
|
||||
msrs = msrs_alloc();
|
||||
|
||||
if (!msrs)
|
||||
return;
|
||||
|
||||
boost_supported = true;
|
||||
boost_enabled = boost_state(0);
|
||||
|
||||
get_online_cpus();
|
||||
|
||||
/* Force all MSRs to the same value */
|
||||
boost_set_msrs(boost_enabled, cpu_online_mask);
|
||||
|
||||
register_cpu_notifier(&boost_nb);
|
||||
|
||||
put_online_cpus();
|
||||
} else
|
||||
global_boost.attr.mode = 0444;
|
||||
|
||||
/* We create the boost file in any case, though for systems without
|
||||
* hardware support it will be read-only and hardwired to return 0.
|
||||
*/
|
||||
if (sysfs_create_file(cpufreq_global_kobject, &(global_boost.attr)))
|
||||
pr_warn(PFX "could not register global boost sysfs file\n");
|
||||
else
|
||||
pr_debug("registered global boost sysfs file\n");
|
||||
}
|
||||
|
||||
static void __exit acpi_cpufreq_boost_exit(void)
|
||||
{
|
||||
sysfs_remove_file(cpufreq_global_kobject, &(global_boost.attr));
|
||||
|
||||
if (msrs) {
|
||||
unregister_cpu_notifier(&boost_nb);
|
||||
|
||||
msrs_free(msrs);
|
||||
msrs = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int __init acpi_cpufreq_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
@ -746,9 +981,32 @@ static int __init acpi_cpufreq_init(void)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
|
||||
/* this is a sysfs file with a strange name and an even stranger
|
||||
* semantic - per CPU instantiation, but system global effect.
|
||||
* Lets enable it only on AMD CPUs for compatibility reasons and
|
||||
* only if configured. This is considered legacy code, which
|
||||
* will probably be removed at some point in the future.
|
||||
*/
|
||||
if (check_amd_hwpstate_cpu(0)) {
|
||||
struct freq_attr **iter;
|
||||
|
||||
pr_debug("adding sysfs entry for cpb\n");
|
||||
|
||||
for (iter = acpi_cpufreq_attr; *iter != NULL; iter++)
|
||||
;
|
||||
|
||||
/* make sure there is a terminator behind it */
|
||||
if (iter[1] == NULL)
|
||||
*iter = &cpb;
|
||||
}
|
||||
#endif
|
||||
|
||||
ret = cpufreq_register_driver(&acpi_cpufreq_driver);
|
||||
if (ret)
|
||||
free_acpi_perf_data();
|
||||
else
|
||||
acpi_cpufreq_boost_init();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -757,6 +1015,8 @@ static void __exit acpi_cpufreq_exit(void)
|
|||
{
|
||||
pr_debug("acpi_cpufreq_exit\n");
|
||||
|
||||
acpi_cpufreq_boost_exit();
|
||||
|
||||
cpufreq_unregister_driver(&acpi_cpufreq_driver);
|
||||
|
||||
free_acpi_perf_data();
|
||||
|
|
|
@ -0,0 +1,269 @@
|
|||
/*
|
||||
* Copyright (C) 2012 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* The OPP code in function cpu0_set_target() is reused from
|
||||
* drivers/cpufreq/omap-cpufreq.c
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/opp.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
static unsigned int transition_latency;
|
||||
static unsigned int voltage_tolerance; /* in percentage */
|
||||
|
||||
static struct device *cpu_dev;
|
||||
static struct clk *cpu_clk;
|
||||
static struct regulator *cpu_reg;
|
||||
static struct cpufreq_frequency_table *freq_table;
|
||||
|
||||
static int cpu0_verify_speed(struct cpufreq_policy *policy)
|
||||
{
|
||||
return cpufreq_frequency_table_verify(policy, freq_table);
|
||||
}
|
||||
|
||||
static unsigned int cpu0_get_speed(unsigned int cpu)
|
||||
{
|
||||
return clk_get_rate(cpu_clk) / 1000;
|
||||
}
|
||||
|
||||
static int cpu0_set_target(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq, unsigned int relation)
|
||||
{
|
||||
struct cpufreq_freqs freqs;
|
||||
struct opp *opp;
|
||||
unsigned long freq_Hz, volt = 0, volt_old = 0, tol = 0;
|
||||
unsigned int index, cpu;
|
||||
int ret;
|
||||
|
||||
ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
|
||||
relation, &index);
|
||||
if (ret) {
|
||||
pr_err("failed to match target freqency %d: %d\n",
|
||||
target_freq, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
|
||||
if (freq_Hz < 0)
|
||||
freq_Hz = freq_table[index].frequency * 1000;
|
||||
freqs.new = freq_Hz / 1000;
|
||||
freqs.old = clk_get_rate(cpu_clk) / 1000;
|
||||
|
||||
if (freqs.old == freqs.new)
|
||||
return 0;
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
freqs.cpu = cpu;
|
||||
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
|
||||
}
|
||||
|
||||
if (cpu_reg) {
|
||||
opp = opp_find_freq_ceil(cpu_dev, &freq_Hz);
|
||||
if (IS_ERR(opp)) {
|
||||
pr_err("failed to find OPP for %ld\n", freq_Hz);
|
||||
return PTR_ERR(opp);
|
||||
}
|
||||
volt = opp_get_voltage(opp);
|
||||
tol = volt * voltage_tolerance / 100;
|
||||
volt_old = regulator_get_voltage(cpu_reg);
|
||||
}
|
||||
|
||||
pr_debug("%u MHz, %ld mV --> %u MHz, %ld mV\n",
|
||||
freqs.old / 1000, volt_old ? volt_old / 1000 : -1,
|
||||
freqs.new / 1000, volt ? volt / 1000 : -1);
|
||||
|
||||
/* scaling up? scale voltage before frequency */
|
||||
if (cpu_reg && freqs.new > freqs.old) {
|
||||
ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
|
||||
if (ret) {
|
||||
pr_err("failed to scale voltage up: %d\n", ret);
|
||||
freqs.new = freqs.old;
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
ret = clk_set_rate(cpu_clk, freqs.new * 1000);
|
||||
if (ret) {
|
||||
pr_err("failed to set clock rate: %d\n", ret);
|
||||
if (cpu_reg)
|
||||
regulator_set_voltage_tol(cpu_reg, volt_old, tol);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* scaling down? scale voltage after frequency */
|
||||
if (cpu_reg && freqs.new < freqs.old) {
|
||||
ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
|
||||
if (ret) {
|
||||
pr_err("failed to scale voltage down: %d\n", ret);
|
||||
clk_set_rate(cpu_clk, freqs.old * 1000);
|
||||
freqs.new = freqs.old;
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
freqs.cpu = cpu;
|
||||
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cpu0_cpufreq_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (policy->cpu != 0)
|
||||
return -EINVAL;
|
||||
|
||||
ret = cpufreq_frequency_table_cpuinfo(policy, freq_table);
|
||||
if (ret) {
|
||||
pr_err("invalid frequency table: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
policy->cpuinfo.transition_latency = transition_latency;
|
||||
policy->cur = clk_get_rate(cpu_clk) / 1000;
|
||||
|
||||
/*
|
||||
* The driver only supports the SMP configuartion where all processors
|
||||
* share the clock and voltage and clock. Use cpufreq affected_cpus
|
||||
* interface to have all CPUs scaled together.
|
||||
*/
|
||||
policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
|
||||
cpumask_setall(policy->cpus);
|
||||
|
||||
cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cpu0_cpufreq_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
cpufreq_frequency_table_put_attr(policy->cpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct freq_attr *cpu0_cpufreq_attr[] = {
|
||||
&cpufreq_freq_attr_scaling_available_freqs,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct cpufreq_driver cpu0_cpufreq_driver = {
|
||||
.flags = CPUFREQ_STICKY,
|
||||
.verify = cpu0_verify_speed,
|
||||
.target = cpu0_set_target,
|
||||
.get = cpu0_get_speed,
|
||||
.init = cpu0_cpufreq_init,
|
||||
.exit = cpu0_cpufreq_exit,
|
||||
.name = "generic_cpu0",
|
||||
.attr = cpu0_cpufreq_attr,
|
||||
};
|
||||
|
||||
static int __devinit cpu0_cpufreq_driver_init(void)
|
||||
{
|
||||
struct device_node *np;
|
||||
int ret;
|
||||
|
||||
np = of_find_node_by_path("/cpus/cpu@0");
|
||||
if (!np) {
|
||||
pr_err("failed to find cpu0 node\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
cpu_dev = get_cpu_device(0);
|
||||
if (!cpu_dev) {
|
||||
pr_err("failed to get cpu0 device\n");
|
||||
ret = -ENODEV;
|
||||
goto out_put_node;
|
||||
}
|
||||
|
||||
cpu_dev->of_node = np;
|
||||
|
||||
cpu_clk = clk_get(cpu_dev, NULL);
|
||||
if (IS_ERR(cpu_clk)) {
|
||||
ret = PTR_ERR(cpu_clk);
|
||||
pr_err("failed to get cpu0 clock: %d\n", ret);
|
||||
goto out_put_node;
|
||||
}
|
||||
|
||||
cpu_reg = regulator_get(cpu_dev, "cpu0");
|
||||
if (IS_ERR(cpu_reg)) {
|
||||
pr_warn("failed to get cpu0 regulator\n");
|
||||
cpu_reg = NULL;
|
||||
}
|
||||
|
||||
ret = of_init_opp_table(cpu_dev);
|
||||
if (ret) {
|
||||
pr_err("failed to init OPP table: %d\n", ret);
|
||||
goto out_put_node;
|
||||
}
|
||||
|
||||
ret = opp_init_cpufreq_table(cpu_dev, &freq_table);
|
||||
if (ret) {
|
||||
pr_err("failed to init cpufreq table: %d\n", ret);
|
||||
goto out_put_node;
|
||||
}
|
||||
|
||||
of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance);
|
||||
|
||||
if (of_property_read_u32(np, "clock-latency", &transition_latency))
|
||||
transition_latency = CPUFREQ_ETERNAL;
|
||||
|
||||
if (cpu_reg) {
|
||||
struct opp *opp;
|
||||
unsigned long min_uV, max_uV;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* OPP is maintained in order of increasing frequency, and
|
||||
* freq_table initialised from OPP is therefore sorted in the
|
||||
* same order.
|
||||
*/
|
||||
for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
|
||||
;
|
||||
opp = opp_find_freq_exact(cpu_dev,
|
||||
freq_table[0].frequency * 1000, true);
|
||||
min_uV = opp_get_voltage(opp);
|
||||
opp = opp_find_freq_exact(cpu_dev,
|
||||
freq_table[i-1].frequency * 1000, true);
|
||||
max_uV = opp_get_voltage(opp);
|
||||
ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
|
||||
if (ret > 0)
|
||||
transition_latency += ret * 1000;
|
||||
}
|
||||
|
||||
ret = cpufreq_register_driver(&cpu0_cpufreq_driver);
|
||||
if (ret) {
|
||||
pr_err("failed register driver: %d\n", ret);
|
||||
goto out_free_table;
|
||||
}
|
||||
|
||||
of_node_put(np);
|
||||
return 0;
|
||||
|
||||
out_free_table:
|
||||
opp_free_cpufreq_table(cpu_dev, &freq_table);
|
||||
out_put_node:
|
||||
of_node_put(np);
|
||||
return ret;
|
||||
}
|
||||
late_initcall(cpu0_cpufreq_driver_init);
|
||||
|
||||
MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
|
||||
MODULE_DESCRIPTION("Generic CPU0 cpufreq driver");
|
||||
MODULE_LICENSE("GPL");
|
|
@ -504,6 +504,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
|||
j_dbs_info->prev_cpu_nice =
|
||||
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
|
||||
}
|
||||
this_dbs_info->cpu = cpu;
|
||||
this_dbs_info->down_skip = 0;
|
||||
this_dbs_info->requested_freq = policy->cur;
|
||||
|
||||
|
@ -583,6 +584,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
|||
__cpufreq_driver_target(
|
||||
this_dbs_info->cur_policy,
|
||||
policy->min, CPUFREQ_RELATION_L);
|
||||
dbs_check_cpu(this_dbs_info);
|
||||
mutex_unlock(&this_dbs_info->timer_mutex);
|
||||
|
||||
break;
|
||||
|
|
|
@ -761,6 +761,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
|||
else if (policy->min > this_dbs_info->cur_policy->cur)
|
||||
__cpufreq_driver_target(this_dbs_info->cur_policy,
|
||||
policy->min, CPUFREQ_RELATION_L);
|
||||
dbs_check_cpu(this_dbs_info);
|
||||
mutex_unlock(&this_dbs_info->timer_mutex);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ union msr_longhaul {
|
|||
/*
|
||||
* VIA C3 Samuel 1 & Samuel 2 (stepping 0)
|
||||
*/
|
||||
static const int __cpuinitdata samuel1_mults[16] = {
|
||||
static const int __cpuinitconst samuel1_mults[16] = {
|
||||
-1, /* 0000 -> RESERVED */
|
||||
30, /* 0001 -> 3.0x */
|
||||
40, /* 0010 -> 4.0x */
|
||||
|
@ -75,7 +75,7 @@ static const int __cpuinitdata samuel1_mults[16] = {
|
|||
-1, /* 1111 -> RESERVED */
|
||||
};
|
||||
|
||||
static const int __cpuinitdata samuel1_eblcr[16] = {
|
||||
static const int __cpuinitconst samuel1_eblcr[16] = {
|
||||
50, /* 0000 -> RESERVED */
|
||||
30, /* 0001 -> 3.0x */
|
||||
40, /* 0010 -> 4.0x */
|
||||
|
@ -97,7 +97,7 @@ static const int __cpuinitdata samuel1_eblcr[16] = {
|
|||
/*
|
||||
* VIA C3 Samuel2 Stepping 1->15
|
||||
*/
|
||||
static const int __cpuinitdata samuel2_eblcr[16] = {
|
||||
static const int __cpuinitconst samuel2_eblcr[16] = {
|
||||
50, /* 0000 -> 5.0x */
|
||||
30, /* 0001 -> 3.0x */
|
||||
40, /* 0010 -> 4.0x */
|
||||
|
@ -119,7 +119,7 @@ static const int __cpuinitdata samuel2_eblcr[16] = {
|
|||
/*
|
||||
* VIA C3 Ezra
|
||||
*/
|
||||
static const int __cpuinitdata ezra_mults[16] = {
|
||||
static const int __cpuinitconst ezra_mults[16] = {
|
||||
100, /* 0000 -> 10.0x */
|
||||
30, /* 0001 -> 3.0x */
|
||||
40, /* 0010 -> 4.0x */
|
||||
|
@ -138,7 +138,7 @@ static const int __cpuinitdata ezra_mults[16] = {
|
|||
120, /* 1111 -> 12.0x */
|
||||
};
|
||||
|
||||
static const int __cpuinitdata ezra_eblcr[16] = {
|
||||
static const int __cpuinitconst ezra_eblcr[16] = {
|
||||
50, /* 0000 -> 5.0x */
|
||||
30, /* 0001 -> 3.0x */
|
||||
40, /* 0010 -> 4.0x */
|
||||
|
@ -160,7 +160,7 @@ static const int __cpuinitdata ezra_eblcr[16] = {
|
|||
/*
|
||||
* VIA C3 (Ezra-T) [C5M].
|
||||
*/
|
||||
static const int __cpuinitdata ezrat_mults[32] = {
|
||||
static const int __cpuinitconst ezrat_mults[32] = {
|
||||
100, /* 0000 -> 10.0x */
|
||||
30, /* 0001 -> 3.0x */
|
||||
40, /* 0010 -> 4.0x */
|
||||
|
@ -196,7 +196,7 @@ static const int __cpuinitdata ezrat_mults[32] = {
|
|||
-1, /* 1111 -> RESERVED (12.0x) */
|
||||
};
|
||||
|
||||
static const int __cpuinitdata ezrat_eblcr[32] = {
|
||||
static const int __cpuinitconst ezrat_eblcr[32] = {
|
||||
50, /* 0000 -> 5.0x */
|
||||
30, /* 0001 -> 3.0x */
|
||||
40, /* 0010 -> 4.0x */
|
||||
|
@ -235,7 +235,7 @@ static const int __cpuinitdata ezrat_eblcr[32] = {
|
|||
/*
|
||||
* VIA C3 Nehemiah */
|
||||
|
||||
static const int __cpuinitdata nehemiah_mults[32] = {
|
||||
static const int __cpuinitconst nehemiah_mults[32] = {
|
||||
100, /* 0000 -> 10.0x */
|
||||
-1, /* 0001 -> 16.0x */
|
||||
40, /* 0010 -> 4.0x */
|
||||
|
@ -270,7 +270,7 @@ static const int __cpuinitdata nehemiah_mults[32] = {
|
|||
-1, /* 1111 -> 12.0x */
|
||||
};
|
||||
|
||||
static const int __cpuinitdata nehemiah_eblcr[32] = {
|
||||
static const int __cpuinitconst nehemiah_eblcr[32] = {
|
||||
50, /* 0000 -> 5.0x */
|
||||
160, /* 0001 -> 16.0x */
|
||||
40, /* 0010 -> 4.0x */
|
||||
|
@ -315,7 +315,7 @@ struct mV_pos {
|
|||
unsigned short pos;
|
||||
};
|
||||
|
||||
static const struct mV_pos __cpuinitdata vrm85_mV[32] = {
|
||||
static const struct mV_pos __cpuinitconst vrm85_mV[32] = {
|
||||
{1250, 8}, {1200, 6}, {1150, 4}, {1100, 2},
|
||||
{1050, 0}, {1800, 30}, {1750, 28}, {1700, 26},
|
||||
{1650, 24}, {1600, 22}, {1550, 20}, {1500, 18},
|
||||
|
@ -326,14 +326,14 @@ static const struct mV_pos __cpuinitdata vrm85_mV[32] = {
|
|||
{1475, 17}, {1425, 15}, {1375, 13}, {1325, 11}
|
||||
};
|
||||
|
||||
static const unsigned char __cpuinitdata mV_vrm85[32] = {
|
||||
static const unsigned char __cpuinitconst mV_vrm85[32] = {
|
||||
0x04, 0x14, 0x03, 0x13, 0x02, 0x12, 0x01, 0x11,
|
||||
0x00, 0x10, 0x0f, 0x1f, 0x0e, 0x1e, 0x0d, 0x1d,
|
||||
0x0c, 0x1c, 0x0b, 0x1b, 0x0a, 0x1a, 0x09, 0x19,
|
||||
0x08, 0x18, 0x07, 0x17, 0x06, 0x16, 0x05, 0x15
|
||||
};
|
||||
|
||||
static const struct mV_pos __cpuinitdata mobilevrm_mV[32] = {
|
||||
static const struct mV_pos __cpuinitconst mobilevrm_mV[32] = {
|
||||
{1750, 31}, {1700, 30}, {1650, 29}, {1600, 28},
|
||||
{1550, 27}, {1500, 26}, {1450, 25}, {1400, 24},
|
||||
{1350, 23}, {1300, 22}, {1250, 21}, {1200, 20},
|
||||
|
@ -344,7 +344,7 @@ static const struct mV_pos __cpuinitdata mobilevrm_mV[32] = {
|
|||
{675, 3}, {650, 2}, {625, 1}, {600, 0}
|
||||
};
|
||||
|
||||
static const unsigned char __cpuinitdata mV_mobilevrm[32] = {
|
||||
static const unsigned char __cpuinitconst mV_mobilevrm[32] = {
|
||||
0x1f, 0x1e, 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18,
|
||||
0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
|
||||
0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08,
|
||||
|
|
|
@ -40,16 +40,6 @@
|
|||
/* OPP tolerance in percentage */
|
||||
#define OPP_TOLERANCE 4
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
struct lpj_info {
|
||||
unsigned long ref;
|
||||
unsigned int freq;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct lpj_info, lpj_ref);
|
||||
static struct lpj_info global_lpj_ref;
|
||||
#endif
|
||||
|
||||
static struct cpufreq_frequency_table *freq_table;
|
||||
static atomic_t freq_table_users = ATOMIC_INIT(0);
|
||||
static struct clk *mpu_clk;
|
||||
|
@ -161,31 +151,6 @@ static int omap_target(struct cpufreq_policy *policy,
|
|||
}
|
||||
|
||||
freqs.new = omap_getspeed(policy->cpu);
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* Note that loops_per_jiffy is not updated on SMP systems in
|
||||
* cpufreq driver. So, update the per-CPU loops_per_jiffy value
|
||||
* on frequency transition. We need to update all dependent CPUs.
|
||||
*/
|
||||
for_each_cpu(i, policy->cpus) {
|
||||
struct lpj_info *lpj = &per_cpu(lpj_ref, i);
|
||||
if (!lpj->freq) {
|
||||
lpj->ref = per_cpu(cpu_data, i).loops_per_jiffy;
|
||||
lpj->freq = freqs.old;
|
||||
}
|
||||
|
||||
per_cpu(cpu_data, i).loops_per_jiffy =
|
||||
cpufreq_scale(lpj->ref, lpj->freq, freqs.new);
|
||||
}
|
||||
|
||||
/* And don't forget to adjust the global one */
|
||||
if (!global_lpj_ref.freq) {
|
||||
global_lpj_ref.ref = loops_per_jiffy;
|
||||
global_lpj_ref.freq = freqs.old;
|
||||
}
|
||||
loops_per_jiffy = cpufreq_scale(global_lpj_ref.ref, global_lpj_ref.freq,
|
||||
freqs.new);
|
||||
#endif
|
||||
|
||||
done:
|
||||
/* notifiers */
|
||||
|
@ -301,9 +266,9 @@ static int __init omap_cpufreq_init(void)
|
|||
}
|
||||
|
||||
mpu_dev = omap_device_get_by_hwmod_name("mpu");
|
||||
if (!mpu_dev) {
|
||||
if (IS_ERR(mpu_dev)) {
|
||||
pr_warning("%s: unable to get the mpu device\n", __func__);
|
||||
return -EINVAL;
|
||||
return PTR_ERR(mpu_dev);
|
||||
}
|
||||
|
||||
mpu_reg = regulator_get(mpu_dev, "vcc");
|
||||
|
|
|
@ -48,22 +48,12 @@
|
|||
#define PFX "powernow-k8: "
|
||||
#define VERSION "version 2.20.00"
|
||||
#include "powernow-k8.h"
|
||||
#include "mperf.h"
|
||||
|
||||
/* serialize freq changes */
|
||||
static DEFINE_MUTEX(fidvid_mutex);
|
||||
|
||||
static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
|
||||
|
||||
static int cpu_family = CPU_OPTERON;
|
||||
|
||||
/* array to map SW pstate number to acpi state */
|
||||
static u32 ps_to_as[8];
|
||||
|
||||
/* core performance boost */
|
||||
static bool cpb_capable, cpb_enabled;
|
||||
static struct msr __percpu *msrs;
|
||||
|
||||
static struct cpufreq_driver cpufreq_amd64_driver;
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
|
@ -85,12 +75,6 @@ static u32 find_khz_freq_from_fid(u32 fid)
|
|||
return 1000 * find_freq_from_fid(fid);
|
||||
}
|
||||
|
||||
static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data,
|
||||
u32 pstate)
|
||||
{
|
||||
return data[ps_to_as[pstate]].frequency;
|
||||
}
|
||||
|
||||
/* Return the vco fid for an input fid
|
||||
*
|
||||
* Each "low" fid has corresponding "high" fid, and you can get to "low" fids
|
||||
|
@ -113,9 +97,6 @@ static int pending_bit_stuck(void)
|
|||
{
|
||||
u32 lo, hi;
|
||||
|
||||
if (cpu_family == CPU_HW_PSTATE)
|
||||
return 0;
|
||||
|
||||
rdmsr(MSR_FIDVID_STATUS, lo, hi);
|
||||
return lo & MSR_S_LO_CHANGE_PENDING ? 1 : 0;
|
||||
}
|
||||
|
@ -129,20 +110,6 @@ static int query_current_values_with_pending_wait(struct powernow_k8_data *data)
|
|||
u32 lo, hi;
|
||||
u32 i = 0;
|
||||
|
||||
if (cpu_family == CPU_HW_PSTATE) {
|
||||
rdmsr(MSR_PSTATE_STATUS, lo, hi);
|
||||
i = lo & HW_PSTATE_MASK;
|
||||
data->currpstate = i;
|
||||
|
||||
/*
|
||||
* a workaround for family 11h erratum 311 might cause
|
||||
* an "out-of-range Pstate if the core is in Pstate-0
|
||||
*/
|
||||
if ((boot_cpu_data.x86 == 0x11) && (i >= data->numps))
|
||||
data->currpstate = HW_PSTATE_0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
do {
|
||||
if (i++ > 10000) {
|
||||
pr_debug("detected change pending stuck\n");
|
||||
|
@ -299,14 +266,6 @@ static int decrease_vid_code_by_step(struct powernow_k8_data *data,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Change hardware pstate by single MSR write */
|
||||
static int transition_pstate(struct powernow_k8_data *data, u32 pstate)
|
||||
{
|
||||
wrmsr(MSR_PSTATE_CTRL, pstate, 0);
|
||||
data->currpstate = pstate;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Change Opteron/Athlon64 fid and vid, by the 3 phases. */
|
||||
static int transition_fid_vid(struct powernow_k8_data *data,
|
||||
u32 reqfid, u32 reqvid)
|
||||
|
@ -523,8 +482,6 @@ static int core_voltage_post_transition(struct powernow_k8_data *data,
|
|||
static const struct x86_cpu_id powernow_k8_ids[] = {
|
||||
/* IO based frequency switching */
|
||||
{ X86_VENDOR_AMD, 0xf },
|
||||
/* MSR based frequency switching supported */
|
||||
X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, powernow_k8_ids);
|
||||
|
@ -560,15 +517,8 @@ static void check_supported_cpu(void *_rc)
|
|||
"Power state transitions not supported\n");
|
||||
return;
|
||||
}
|
||||
} else { /* must be a HW Pstate capable processor */
|
||||
cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
|
||||
if ((edx & USE_HW_PSTATE) == USE_HW_PSTATE)
|
||||
cpu_family = CPU_HW_PSTATE;
|
||||
else
|
||||
return;
|
||||
*rc = 0;
|
||||
}
|
||||
|
||||
*rc = 0;
|
||||
}
|
||||
|
||||
static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst,
|
||||
|
@ -632,18 +582,11 @@ static void print_basics(struct powernow_k8_data *data)
|
|||
for (j = 0; j < data->numps; j++) {
|
||||
if (data->powernow_table[j].frequency !=
|
||||
CPUFREQ_ENTRY_INVALID) {
|
||||
if (cpu_family == CPU_HW_PSTATE) {
|
||||
printk(KERN_INFO PFX
|
||||
" %d : pstate %d (%d MHz)\n", j,
|
||||
data->powernow_table[j].index,
|
||||
data->powernow_table[j].frequency/1000);
|
||||
} else {
|
||||
printk(KERN_INFO PFX
|
||||
"fid 0x%x (%d MHz), vid 0x%x\n",
|
||||
data->powernow_table[j].index & 0xff,
|
||||
data->powernow_table[j].frequency/1000,
|
||||
data->powernow_table[j].index >> 8);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (data->batps)
|
||||
|
@ -651,20 +594,6 @@ static void print_basics(struct powernow_k8_data *data)
|
|||
data->batps);
|
||||
}
|
||||
|
||||
static u32 freq_from_fid_did(u32 fid, u32 did)
|
||||
{
|
||||
u32 mhz = 0;
|
||||
|
||||
if (boot_cpu_data.x86 == 0x10)
|
||||
mhz = (100 * (fid + 0x10)) >> did;
|
||||
else if (boot_cpu_data.x86 == 0x11)
|
||||
mhz = (100 * (fid + 8)) >> did;
|
||||
else
|
||||
BUG();
|
||||
|
||||
return mhz * 1000;
|
||||
}
|
||||
|
||||
static int fill_powernow_table(struct powernow_k8_data *data,
|
||||
struct pst_s *pst, u8 maxvid)
|
||||
{
|
||||
|
@ -824,7 +753,7 @@ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data,
|
|||
{
|
||||
u64 control;
|
||||
|
||||
if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE))
|
||||
if (!data->acpi_data.state_count)
|
||||
return;
|
||||
|
||||
control = data->acpi_data.states[index].control;
|
||||
|
@ -875,10 +804,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
|
|||
data->numps = data->acpi_data.state_count;
|
||||
powernow_k8_acpi_pst_values(data, 0);
|
||||
|
||||
if (cpu_family == CPU_HW_PSTATE)
|
||||
ret_val = fill_powernow_table_pstate(data, powernow_table);
|
||||
else
|
||||
ret_val = fill_powernow_table_fidvid(data, powernow_table);
|
||||
ret_val = fill_powernow_table_fidvid(data, powernow_table);
|
||||
if (ret_val)
|
||||
goto err_out_mem;
|
||||
|
||||
|
@ -915,51 +841,6 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
|
|||
return ret_val;
|
||||
}
|
||||
|
||||
static int fill_powernow_table_pstate(struct powernow_k8_data *data,
|
||||
struct cpufreq_frequency_table *powernow_table)
|
||||
{
|
||||
int i;
|
||||
u32 hi = 0, lo = 0;
|
||||
rdmsr(MSR_PSTATE_CUR_LIMIT, lo, hi);
|
||||
data->max_hw_pstate = (lo & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT;
|
||||
|
||||
for (i = 0; i < data->acpi_data.state_count; i++) {
|
||||
u32 index;
|
||||
|
||||
index = data->acpi_data.states[i].control & HW_PSTATE_MASK;
|
||||
if (index > data->max_hw_pstate) {
|
||||
printk(KERN_ERR PFX "invalid pstate %d - "
|
||||
"bad value %d.\n", i, index);
|
||||
printk(KERN_ERR PFX "Please report to BIOS "
|
||||
"manufacturer\n");
|
||||
invalidate_entry(powernow_table, i);
|
||||
continue;
|
||||
}
|
||||
|
||||
ps_to_as[index] = i;
|
||||
|
||||
/* Frequency may be rounded for these */
|
||||
if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
|
||||
|| boot_cpu_data.x86 == 0x11) {
|
||||
|
||||
rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi);
|
||||
if (!(hi & HW_PSTATE_VALID_MASK)) {
|
||||
pr_debug("invalid pstate %d, ignoring\n", index);
|
||||
invalidate_entry(powernow_table, i);
|
||||
continue;
|
||||
}
|
||||
|
||||
powernow_table[i].frequency =
|
||||
freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7);
|
||||
} else
|
||||
powernow_table[i].frequency =
|
||||
data->acpi_data.states[i].core_frequency * 1000;
|
||||
|
||||
powernow_table[i].index = index;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fill_powernow_table_fidvid(struct powernow_k8_data *data,
|
||||
struct cpufreq_frequency_table *powernow_table)
|
||||
{
|
||||
|
@ -1036,15 +917,7 @@ static int get_transition_latency(struct powernow_k8_data *data)
|
|||
max_latency = cur_latency;
|
||||
}
|
||||
if (max_latency == 0) {
|
||||
/*
|
||||
* Fam 11h and later may return 0 as transition latency. This
|
||||
* is intended and means "very fast". While cpufreq core and
|
||||
* governors currently can handle that gracefully, better set it
|
||||
* to 1 to avoid problems in the future.
|
||||
*/
|
||||
if (boot_cpu_data.x86 < 0x11)
|
||||
printk(KERN_ERR FW_WARN PFX "Invalid zero transition "
|
||||
"latency\n");
|
||||
pr_err(FW_WARN PFX "Invalid zero transition latency\n");
|
||||
max_latency = 1;
|
||||
}
|
||||
/* value in usecs, needs to be in nanoseconds */
|
||||
|
@ -1104,40 +977,6 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
|
|||
return res;
|
||||
}
|
||||
|
||||
/* Take a frequency, and issue the hardware pstate transition command */
|
||||
static int transition_frequency_pstate(struct powernow_k8_data *data,
|
||||
unsigned int index)
|
||||
{
|
||||
u32 pstate = 0;
|
||||
int res, i;
|
||||
struct cpufreq_freqs freqs;
|
||||
|
||||
pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index);
|
||||
|
||||
/* get MSR index for hardware pstate transition */
|
||||
pstate = index & HW_PSTATE_MASK;
|
||||
if (pstate > data->max_hw_pstate)
|
||||
return -EINVAL;
|
||||
|
||||
freqs.old = find_khz_freq_from_pstate(data->powernow_table,
|
||||
data->currpstate);
|
||||
freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
|
||||
|
||||
for_each_cpu(i, data->available_cores) {
|
||||
freqs.cpu = i;
|
||||
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
|
||||
}
|
||||
|
||||
res = transition_pstate(data, pstate);
|
||||
freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
|
||||
|
||||
for_each_cpu(i, data->available_cores) {
|
||||
freqs.cpu = i;
|
||||
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
struct powernowk8_target_arg {
|
||||
struct cpufreq_policy *pol;
|
||||
unsigned targfreq;
|
||||
|
@ -1173,18 +1012,15 @@ static long powernowk8_target_fn(void *arg)
|
|||
if (query_current_values_with_pending_wait(data))
|
||||
return -EIO;
|
||||
|
||||
if (cpu_family != CPU_HW_PSTATE) {
|
||||
pr_debug("targ: curr fid 0x%x, vid 0x%x\n",
|
||||
data->currfid, data->currvid);
|
||||
pr_debug("targ: curr fid 0x%x, vid 0x%x\n",
|
||||
data->currfid, data->currvid);
|
||||
|
||||
if ((checkvid != data->currvid) ||
|
||||
(checkfid != data->currfid)) {
|
||||
printk(KERN_INFO PFX
|
||||
"error - out of sync, fix 0x%x 0x%x, "
|
||||
"vid 0x%x 0x%x\n",
|
||||
checkfid, data->currfid,
|
||||
checkvid, data->currvid);
|
||||
}
|
||||
if ((checkvid != data->currvid) ||
|
||||
(checkfid != data->currfid)) {
|
||||
pr_info(PFX
|
||||
"error - out of sync, fix 0x%x 0x%x, vid 0x%x 0x%x\n",
|
||||
checkfid, data->currfid,
|
||||
checkvid, data->currvid);
|
||||
}
|
||||
|
||||
if (cpufreq_frequency_table_target(pol, data->powernow_table,
|
||||
|
@ -1195,11 +1031,8 @@ static long powernowk8_target_fn(void *arg)
|
|||
|
||||
powernow_k8_acpi_pst_values(data, newstate);
|
||||
|
||||
if (cpu_family == CPU_HW_PSTATE)
|
||||
ret = transition_frequency_pstate(data,
|
||||
data->powernow_table[newstate].index);
|
||||
else
|
||||
ret = transition_frequency_fidvid(data, newstate);
|
||||
ret = transition_frequency_fidvid(data, newstate);
|
||||
|
||||
if (ret) {
|
||||
printk(KERN_ERR PFX "transition frequency failed\n");
|
||||
mutex_unlock(&fidvid_mutex);
|
||||
|
@ -1207,11 +1040,7 @@ static long powernowk8_target_fn(void *arg)
|
|||
}
|
||||
mutex_unlock(&fidvid_mutex);
|
||||
|
||||
if (cpu_family == CPU_HW_PSTATE)
|
||||
pol->cur = find_khz_freq_from_pstate(data->powernow_table,
|
||||
data->powernow_table[newstate].index);
|
||||
else
|
||||
pol->cur = find_khz_freq_from_fid(data->currfid);
|
||||
pol->cur = find_khz_freq_from_fid(data->currfid);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1264,22 +1093,23 @@ static void __cpuinit powernowk8_cpu_init_on_cpu(void *_init_on_cpu)
|
|||
return;
|
||||
}
|
||||
|
||||
if (cpu_family == CPU_OPTERON)
|
||||
fidvid_msr_init();
|
||||
fidvid_msr_init();
|
||||
|
||||
init_on_cpu->rc = 0;
|
||||
}
|
||||
|
||||
static const char missing_pss_msg[] =
|
||||
KERN_ERR
|
||||
FW_BUG PFX "No compatible ACPI _PSS objects found.\n"
|
||||
FW_BUG PFX "First, make sure Cool'N'Quiet is enabled in the BIOS.\n"
|
||||
FW_BUG PFX "If that doesn't help, try upgrading your BIOS.\n";
|
||||
|
||||
/* per CPU init entry point to the driver */
|
||||
static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
|
||||
{
|
||||
static const char ACPI_PSS_BIOS_BUG_MSG[] =
|
||||
KERN_ERR FW_BUG PFX "No compatible ACPI _PSS objects found.\n"
|
||||
FW_BUG PFX "Try again with latest BIOS.\n";
|
||||
struct powernow_k8_data *data;
|
||||
struct init_on_cpu init_on_cpu;
|
||||
int rc;
|
||||
struct cpuinfo_x86 *c = &cpu_data(pol->cpu);
|
||||
|
||||
if (!cpu_online(pol->cpu))
|
||||
return -ENODEV;
|
||||
|
@ -1295,7 +1125,6 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
|
|||
}
|
||||
|
||||
data->cpu = pol->cpu;
|
||||
data->currpstate = HW_PSTATE_INVALID;
|
||||
|
||||
if (powernow_k8_cpu_init_acpi(data)) {
|
||||
/*
|
||||
|
@ -1303,7 +1132,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
|
|||
* an UP version, and is deprecated by AMD.
|
||||
*/
|
||||
if (num_online_cpus() != 1) {
|
||||
printk_once(ACPI_PSS_BIOS_BUG_MSG);
|
||||
printk_once(missing_pss_msg);
|
||||
goto err_out;
|
||||
}
|
||||
if (pol->cpu != 0) {
|
||||
|
@ -1332,17 +1161,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
|
|||
if (rc != 0)
|
||||
goto err_out_exit_acpi;
|
||||
|
||||
if (cpu_family == CPU_HW_PSTATE)
|
||||
cpumask_copy(pol->cpus, cpumask_of(pol->cpu));
|
||||
else
|
||||
cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu));
|
||||
cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu));
|
||||
data->available_cores = pol->cpus;
|
||||
|
||||
if (cpu_family == CPU_HW_PSTATE)
|
||||
pol->cur = find_khz_freq_from_pstate(data->powernow_table,
|
||||
data->currpstate);
|
||||
else
|
||||
pol->cur = find_khz_freq_from_fid(data->currfid);
|
||||
pol->cur = find_khz_freq_from_fid(data->currfid);
|
||||
pr_debug("policy current frequency %d kHz\n", pol->cur);
|
||||
|
||||
/* min/max the cpu is capable of */
|
||||
|
@ -1354,18 +1176,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Check for APERF/MPERF support in hardware */
|
||||
if (cpu_has(c, X86_FEATURE_APERFMPERF))
|
||||
cpufreq_amd64_driver.getavg = cpufreq_get_measured_perf;
|
||||
|
||||
cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
|
||||
|
||||
if (cpu_family == CPU_HW_PSTATE)
|
||||
pr_debug("cpu_init done, current pstate 0x%x\n",
|
||||
data->currpstate);
|
||||
else
|
||||
pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
|
||||
data->currfid, data->currvid);
|
||||
pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
|
||||
data->currfid, data->currvid);
|
||||
|
||||
per_cpu(powernow_data, pol->cpu) = data;
|
||||
|
||||
|
@ -1418,88 +1232,15 @@ static unsigned int powernowk8_get(unsigned int cpu)
|
|||
if (err)
|
||||
goto out;
|
||||
|
||||
if (cpu_family == CPU_HW_PSTATE)
|
||||
khz = find_khz_freq_from_pstate(data->powernow_table,
|
||||
data->currpstate);
|
||||
else
|
||||
khz = find_khz_freq_from_fid(data->currfid);
|
||||
khz = find_khz_freq_from_fid(data->currfid);
|
||||
|
||||
|
||||
out:
|
||||
return khz;
|
||||
}
|
||||
|
||||
static void _cpb_toggle_msrs(bool t)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
get_online_cpus();
|
||||
|
||||
rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
|
||||
|
||||
for_each_cpu(cpu, cpu_online_mask) {
|
||||
struct msr *reg = per_cpu_ptr(msrs, cpu);
|
||||
if (t)
|
||||
reg->l &= ~BIT(25);
|
||||
else
|
||||
reg->l |= BIT(25);
|
||||
}
|
||||
wrmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
|
||||
|
||||
put_online_cpus();
|
||||
}
|
||||
|
||||
/*
|
||||
* Switch on/off core performance boosting.
|
||||
*
|
||||
* 0=disable
|
||||
* 1=enable.
|
||||
*/
|
||||
static void cpb_toggle(bool t)
|
||||
{
|
||||
if (!cpb_capable)
|
||||
return;
|
||||
|
||||
if (t && !cpb_enabled) {
|
||||
cpb_enabled = true;
|
||||
_cpb_toggle_msrs(t);
|
||||
printk(KERN_INFO PFX "Core Boosting enabled.\n");
|
||||
} else if (!t && cpb_enabled) {
|
||||
cpb_enabled = false;
|
||||
_cpb_toggle_msrs(t);
|
||||
printk(KERN_INFO PFX "Core Boosting disabled.\n");
|
||||
}
|
||||
}
|
||||
|
||||
static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
|
||||
size_t count)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
unsigned long val = 0;
|
||||
|
||||
ret = strict_strtoul(buf, 10, &val);
|
||||
if (!ret && (val == 0 || val == 1) && cpb_capable)
|
||||
cpb_toggle(val);
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%u\n", cpb_enabled);
|
||||
}
|
||||
|
||||
#define define_one_rw(_name) \
|
||||
static struct freq_attr _name = \
|
||||
__ATTR(_name, 0644, show_##_name, store_##_name)
|
||||
|
||||
define_one_rw(cpb);
|
||||
|
||||
static struct freq_attr *powernow_k8_attr[] = {
|
||||
&cpufreq_freq_attr_scaling_available_freqs,
|
||||
&cpb,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
@ -1515,53 +1256,18 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
|
|||
.attr = powernow_k8_attr,
|
||||
};
|
||||
|
||||
/*
|
||||
* Clear the boost-disable flag on the CPU_DOWN path so that this cpu
|
||||
* cannot block the remaining ones from boosting. On the CPU_UP path we
|
||||
* simply keep the boost-disable flag in sync with the current global
|
||||
* state.
|
||||
*/
|
||||
static int cpb_notify(struct notifier_block *nb, unsigned long action,
|
||||
void *hcpu)
|
||||
{
|
||||
unsigned cpu = (long)hcpu;
|
||||
u32 lo, hi;
|
||||
|
||||
switch (action) {
|
||||
case CPU_UP_PREPARE:
|
||||
case CPU_UP_PREPARE_FROZEN:
|
||||
|
||||
if (!cpb_enabled) {
|
||||
rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
|
||||
lo |= BIT(25);
|
||||
wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi);
|
||||
}
|
||||
break;
|
||||
|
||||
case CPU_DOWN_PREPARE:
|
||||
case CPU_DOWN_PREPARE_FROZEN:
|
||||
rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
|
||||
lo &= ~BIT(25);
|
||||
wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block cpb_nb = {
|
||||
.notifier_call = cpb_notify,
|
||||
};
|
||||
|
||||
/* driver entry point for init */
|
||||
static int __cpuinit powernowk8_init(void)
|
||||
{
|
||||
unsigned int i, supported_cpus = 0, cpu;
|
||||
unsigned int i, supported_cpus = 0;
|
||||
int rv;
|
||||
|
||||
if (static_cpu_has(X86_FEATURE_HW_PSTATE)) {
|
||||
pr_warn(PFX "this CPU is not supported anymore, using acpi-cpufreq instead.\n");
|
||||
request_module("acpi-cpufreq");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (!x86_match_cpu(powernow_k8_ids))
|
||||
return -ENODEV;
|
||||
|
||||
|
@ -1575,38 +1281,13 @@ static int __cpuinit powernowk8_init(void)
|
|||
if (supported_cpus != num_online_cpus())
|
||||
return -ENODEV;
|
||||
|
||||
printk(KERN_INFO PFX "Found %d %s (%d cpu cores) (" VERSION ")\n",
|
||||
num_online_nodes(), boot_cpu_data.x86_model_id, supported_cpus);
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_CPB)) {
|
||||
|
||||
cpb_capable = true;
|
||||
|
||||
msrs = msrs_alloc();
|
||||
if (!msrs) {
|
||||
printk(KERN_ERR "%s: Error allocating msrs!\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
register_cpu_notifier(&cpb_nb);
|
||||
|
||||
rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
|
||||
|
||||
for_each_cpu(cpu, cpu_online_mask) {
|
||||
struct msr *reg = per_cpu_ptr(msrs, cpu);
|
||||
cpb_enabled |= !(!!(reg->l & BIT(25)));
|
||||
}
|
||||
|
||||
printk(KERN_INFO PFX "Core Performance Boosting: %s.\n",
|
||||
(cpb_enabled ? "on" : "off"));
|
||||
}
|
||||
|
||||
rv = cpufreq_register_driver(&cpufreq_amd64_driver);
|
||||
if (rv < 0 && boot_cpu_has(X86_FEATURE_CPB)) {
|
||||
unregister_cpu_notifier(&cpb_nb);
|
||||
msrs_free(msrs);
|
||||
msrs = NULL;
|
||||
}
|
||||
|
||||
if (!rv)
|
||||
pr_info(PFX "Found %d %s (%d cpu cores) (" VERSION ")\n",
|
||||
num_online_nodes(), boot_cpu_data.x86_model_id,
|
||||
supported_cpus);
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
||||
|
@ -1615,13 +1296,6 @@ static void __exit powernowk8_exit(void)
|
|||
{
|
||||
pr_debug("exit\n");
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_CPB)) {
|
||||
msrs_free(msrs);
|
||||
msrs = NULL;
|
||||
|
||||
unregister_cpu_notifier(&cpb_nb);
|
||||
}
|
||||
|
||||
cpufreq_unregister_driver(&cpufreq_amd64_driver);
|
||||
}
|
||||
|
||||
|
|
|
@ -5,24 +5,11 @@
|
|||
* http://www.gnu.org/licenses/gpl.html
|
||||
*/
|
||||
|
||||
enum pstate {
|
||||
HW_PSTATE_INVALID = 0xff,
|
||||
HW_PSTATE_0 = 0,
|
||||
HW_PSTATE_1 = 1,
|
||||
HW_PSTATE_2 = 2,
|
||||
HW_PSTATE_3 = 3,
|
||||
HW_PSTATE_4 = 4,
|
||||
HW_PSTATE_5 = 5,
|
||||
HW_PSTATE_6 = 6,
|
||||
HW_PSTATE_7 = 7,
|
||||
};
|
||||
|
||||
struct powernow_k8_data {
|
||||
unsigned int cpu;
|
||||
|
||||
u32 numps; /* number of p-states */
|
||||
u32 batps; /* number of p-states supported on battery */
|
||||
u32 max_hw_pstate; /* maximum legal hardware pstate */
|
||||
|
||||
/* these values are constant when the PSB is used to determine
|
||||
* vid/fid pairings, but are modified during the ->target() call
|
||||
|
@ -37,7 +24,6 @@ struct powernow_k8_data {
|
|||
/* keep track of the current fid / vid or pstate */
|
||||
u32 currvid;
|
||||
u32 currfid;
|
||||
enum pstate currpstate;
|
||||
|
||||
/* the powernow_table includes all frequency and vid/fid pairings:
|
||||
* fid are the lower 8 bits of the index, vid are the upper 8 bits.
|
||||
|
@ -97,23 +83,6 @@ struct powernow_k8_data {
|
|||
#define MSR_S_HI_CURRENT_VID 0x0000003f
|
||||
#define MSR_C_HI_STP_GNT_BENIGN 0x00000001
|
||||
|
||||
|
||||
/* Hardware Pstate _PSS and MSR definitions */
|
||||
#define USE_HW_PSTATE 0x00000080
|
||||
#define HW_PSTATE_MASK 0x00000007
|
||||
#define HW_PSTATE_VALID_MASK 0x80000000
|
||||
#define HW_PSTATE_MAX_MASK 0x000000f0
|
||||
#define HW_PSTATE_MAX_SHIFT 4
|
||||
#define MSR_PSTATE_DEF_BASE 0xc0010064 /* base of Pstate MSRs */
|
||||
#define MSR_PSTATE_STATUS 0xc0010063 /* Pstate Status MSR */
|
||||
#define MSR_PSTATE_CTRL 0xc0010062 /* Pstate control MSR */
|
||||
#define MSR_PSTATE_CUR_LIMIT 0xc0010061 /* pstate current limit MSR */
|
||||
|
||||
/* define the two driver architectures */
|
||||
#define CPU_OPTERON 0
|
||||
#define CPU_HW_PSTATE 1
|
||||
|
||||
|
||||
/*
|
||||
* There are restrictions frequencies have to follow:
|
||||
* - only 1 entry in the low fid table ( <=1.4GHz )
|
||||
|
@ -218,5 +187,4 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid);
|
|||
|
||||
static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index);
|
||||
|
||||
static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table);
|
||||
static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table);
|
||||
|
|
|
@ -18,9 +18,10 @@ static struct cpuidle_driver *cpuidle_curr_driver;
|
|||
DEFINE_SPINLOCK(cpuidle_driver_lock);
|
||||
int cpuidle_driver_refcount;
|
||||
|
||||
static void __cpuidle_register_driver(struct cpuidle_driver *drv)
|
||||
static void set_power_states(struct cpuidle_driver *drv)
|
||||
{
|
||||
int i;
|
||||
|
||||
/*
|
||||
* cpuidle driver should set the drv->power_specified bit
|
||||
* before registering if the driver provides
|
||||
|
@ -35,13 +36,10 @@ static void __cpuidle_register_driver(struct cpuidle_driver *drv)
|
|||
* an power value of -1. So we use -2, -3, etc, for other
|
||||
* c-states.
|
||||
*/
|
||||
if (!drv->power_specified) {
|
||||
for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++)
|
||||
drv->states[i].power_usage = -1 - i;
|
||||
}
|
||||
for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++)
|
||||
drv->states[i].power_usage = -1 - i;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* cpuidle_register_driver - registers a driver
|
||||
* @drv: the driver
|
||||
|
@ -59,13 +57,16 @@ int cpuidle_register_driver(struct cpuidle_driver *drv)
|
|||
spin_unlock(&cpuidle_driver_lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
__cpuidle_register_driver(drv);
|
||||
|
||||
if (!drv->power_specified)
|
||||
set_power_states(drv);
|
||||
|
||||
cpuidle_curr_driver = drv;
|
||||
|
||||
spin_unlock(&cpuidle_driver_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(cpuidle_register_driver);
|
||||
|
||||
/**
|
||||
|
@ -96,7 +97,6 @@ void cpuidle_unregister_driver(struct cpuidle_driver *drv)
|
|||
|
||||
spin_unlock(&cpuidle_driver_lock);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(cpuidle_unregister_driver);
|
||||
|
||||
struct cpuidle_driver *cpuidle_driver_ref(void)
|
||||
|
|
|
@ -88,6 +88,8 @@ static int ladder_select_state(struct cpuidle_driver *drv,
|
|||
|
||||
/* consider promotion */
|
||||
if (last_idx < drv->state_count - 1 &&
|
||||
!drv->states[last_idx + 1].disabled &&
|
||||
!dev->states_usage[last_idx + 1].disable &&
|
||||
last_residency > last_state->threshold.promotion_time &&
|
||||
drv->states[last_idx + 1].exit_latency <= latency_req) {
|
||||
last_state->stats.promotion_count++;
|
||||
|
@ -100,7 +102,9 @@ static int ladder_select_state(struct cpuidle_driver *drv,
|
|||
|
||||
/* consider demotion */
|
||||
if (last_idx > CPUIDLE_DRIVER_STATE_START &&
|
||||
drv->states[last_idx].exit_latency > latency_req) {
|
||||
(drv->states[last_idx].disabled ||
|
||||
dev->states_usage[last_idx].disable ||
|
||||
drv->states[last_idx].exit_latency > latency_req)) {
|
||||
int i;
|
||||
|
||||
for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) {
|
||||
|
|
|
@ -606,21 +606,6 @@ static int pci_pm_prepare(struct device *dev)
|
|||
struct device_driver *drv = dev->driver;
|
||||
int error = 0;
|
||||
|
||||
/*
|
||||
* If a PCI device configured to wake up the system from sleep states
|
||||
* has been suspended at run time and there's a resume request pending
|
||||
* for it, this is equivalent to the device signaling wakeup, so the
|
||||
* system suspend operation should be aborted.
|
||||
*/
|
||||
pm_runtime_get_noresume(dev);
|
||||
if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
|
||||
pm_wakeup_event(dev, 0);
|
||||
|
||||
if (pm_wakeup_pending()) {
|
||||
pm_runtime_put_sync(dev);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/*
|
||||
* PCI devices suspended at run time need to be resumed at this
|
||||
* point, because in general it is necessary to reconfigure them for
|
||||
|
@ -644,8 +629,6 @@ static void pci_pm_complete(struct device *dev)
|
|||
|
||||
if (drv && drv->pm && drv->pm->complete)
|
||||
drv->pm->complete(dev);
|
||||
|
||||
pm_runtime_put_sync(dev);
|
||||
}
|
||||
|
||||
#else /* !CONFIG_PM_SLEEP */
|
||||
|
|
|
@ -98,7 +98,6 @@ static int push_cxx_to_hypervisor(struct acpi_processor *_pr)
|
|||
|
||||
dst_cx->type = cx->type;
|
||||
dst_cx->latency = cx->latency;
|
||||
dst_cx->power = cx->power;
|
||||
|
||||
dst_cx->dpcnt = 0;
|
||||
set_xen_guest_handle(dst_cx->dp, NULL);
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/cpuidle.h>
|
||||
#include <linux/thermal.h>
|
||||
#include <asm/acpi.h>
|
||||
|
||||
|
@ -59,13 +58,11 @@ struct acpi_processor_cx {
|
|||
u8 entry_method;
|
||||
u8 index;
|
||||
u32 latency;
|
||||
u32 power;
|
||||
u8 bm_sts_skip;
|
||||
char desc[ACPI_CX_DESC_LEN];
|
||||
};
|
||||
|
||||
struct acpi_processor_power {
|
||||
struct cpuidle_device dev;
|
||||
struct acpi_processor_cx *state;
|
||||
unsigned long bm_check_timestamp;
|
||||
u32 default_state;
|
||||
|
@ -325,12 +322,10 @@ extern void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
|
|||
extern const struct file_operations acpi_processor_throttling_fops;
|
||||
extern void acpi_processor_throttling_init(void);
|
||||
/* in processor_idle.c */
|
||||
int acpi_processor_power_init(struct acpi_processor *pr,
|
||||
struct acpi_device *device);
|
||||
int acpi_processor_power_init(struct acpi_processor *pr);
|
||||
int acpi_processor_power_exit(struct acpi_processor *pr);
|
||||
int acpi_processor_cst_has_changed(struct acpi_processor *pr);
|
||||
int acpi_processor_hotplug(struct acpi_processor *pr);
|
||||
int acpi_processor_power_exit(struct acpi_processor *pr,
|
||||
struct acpi_device *device);
|
||||
int acpi_processor_suspend(struct device *dev);
|
||||
int acpi_processor_resume(struct device *dev);
|
||||
extern struct cpuidle_driver acpi_idle_driver;
|
||||
|
|
|
@ -97,6 +97,8 @@ struct clock_event_device {
|
|||
void (*broadcast)(const struct cpumask *mask);
|
||||
void (*set_mode)(enum clock_event_mode mode,
|
||||
struct clock_event_device *);
|
||||
void (*suspend)(struct clock_event_device *);
|
||||
void (*resume)(struct clock_event_device *);
|
||||
unsigned long min_delta_ticks;
|
||||
unsigned long max_delta_ticks;
|
||||
|
||||
|
@ -156,6 +158,9 @@ clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 minsec)
|
|||
freq, minsec);
|
||||
}
|
||||
|
||||
extern void clockevents_suspend(void);
|
||||
extern void clockevents_resume(void);
|
||||
|
||||
#ifdef CONFIG_GENERIC_CLOCKEVENTS
|
||||
extern void clockevents_notify(unsigned long reason, void *arg);
|
||||
#else
|
||||
|
@ -164,6 +169,9 @@ extern void clockevents_notify(unsigned long reason, void *arg);
|
|||
|
||||
#else /* CONFIG_GENERIC_CLOCKEVENTS_BUILD */
|
||||
|
||||
static inline void clockevents_suspend(void) {}
|
||||
static inline void clockevents_resume(void) {}
|
||||
|
||||
#define clockevents_notify(reason, arg) do { } while (0)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -776,6 +776,13 @@ static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
|
|||
dev->power.ignore_children = enable;
|
||||
}
|
||||
|
||||
static inline void dev_pm_syscore_device(struct device *dev, bool val)
|
||||
{
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
dev->power.syscore = val;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void device_lock(struct device *dev)
|
||||
{
|
||||
mutex_lock(&dev->mutex);
|
||||
|
|
|
@ -48,6 +48,14 @@ int opp_disable(struct device *dev, unsigned long freq);
|
|||
|
||||
struct srcu_notifier_head *opp_get_notifier(struct device *dev);
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
int of_init_opp_table(struct device *dev);
|
||||
#else
|
||||
static inline int of_init_opp_table(struct device *dev)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif /* CONFIG_OF */
|
||||
#else
|
||||
static inline unsigned long opp_get_voltage(struct opp *opp)
|
||||
{
|
||||
|
|
|
@ -510,12 +510,14 @@ struct dev_pm_info {
|
|||
bool is_prepared:1; /* Owned by the PM core */
|
||||
bool is_suspended:1; /* Ditto */
|
||||
bool ignore_children:1;
|
||||
bool early_init:1; /* Owned by the PM core */
|
||||
spinlock_t lock;
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
struct list_head entry;
|
||||
struct completion completion;
|
||||
struct wakeup_source *wakeup;
|
||||
bool wakeup_path:1;
|
||||
bool syscore:1;
|
||||
#else
|
||||
unsigned int should_wakeup:1;
|
||||
#endif
|
||||
|
|
|
@ -114,7 +114,6 @@ struct generic_pm_domain_data {
|
|||
struct mutex lock;
|
||||
unsigned int refcount;
|
||||
bool need_restore;
|
||||
bool always_on;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PM_GENERIC_DOMAINS
|
||||
|
@ -139,36 +138,32 @@ extern int __pm_genpd_of_add_device(struct device_node *genpd_node,
|
|||
struct device *dev,
|
||||
struct gpd_timing_data *td);
|
||||
|
||||
static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
|
||||
struct device *dev)
|
||||
{
|
||||
return __pm_genpd_add_device(genpd, dev, NULL);
|
||||
}
|
||||
|
||||
static inline int pm_genpd_of_add_device(struct device_node *genpd_node,
|
||||
struct device *dev)
|
||||
{
|
||||
return __pm_genpd_of_add_device(genpd_node, dev, NULL);
|
||||
}
|
||||
extern int __pm_genpd_name_add_device(const char *domain_name,
|
||||
struct device *dev,
|
||||
struct gpd_timing_data *td);
|
||||
|
||||
extern int pm_genpd_remove_device(struct generic_pm_domain *genpd,
|
||||
struct device *dev);
|
||||
extern void pm_genpd_dev_always_on(struct device *dev, bool val);
|
||||
extern void pm_genpd_dev_need_restore(struct device *dev, bool val);
|
||||
extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
|
||||
struct generic_pm_domain *new_subdomain);
|
||||
extern int pm_genpd_add_subdomain_names(const char *master_name,
|
||||
const char *subdomain_name);
|
||||
extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
|
||||
struct generic_pm_domain *target);
|
||||
extern int pm_genpd_add_callbacks(struct device *dev,
|
||||
struct gpd_dev_ops *ops,
|
||||
struct gpd_timing_data *td);
|
||||
extern int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td);
|
||||
extern int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state);
|
||||
extern int genpd_detach_cpuidle(struct generic_pm_domain *genpd);
|
||||
extern int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state);
|
||||
extern int pm_genpd_name_attach_cpuidle(const char *name, int state);
|
||||
extern int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd);
|
||||
extern int pm_genpd_name_detach_cpuidle(const char *name);
|
||||
extern void pm_genpd_init(struct generic_pm_domain *genpd,
|
||||
struct dev_power_governor *gov, bool is_off);
|
||||
|
||||
extern int pm_genpd_poweron(struct generic_pm_domain *genpd);
|
||||
extern int pm_genpd_name_poweron(const char *domain_name);
|
||||
|
||||
extern bool default_stop_ok(struct device *dev);
|
||||
|
||||
|
@ -189,8 +184,15 @@ static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd,
|
|||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
|
||||
struct device *dev)
|
||||
static inline int __pm_genpd_of_add_device(struct device_node *genpd_node,
|
||||
struct device *dev,
|
||||
struct gpd_timing_data *td)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
static inline int __pm_genpd_name_add_device(const char *domain_name,
|
||||
struct device *dev,
|
||||
struct gpd_timing_data *td)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
@ -199,13 +201,17 @@ static inline int pm_genpd_remove_device(struct generic_pm_domain *genpd,
|
|||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
static inline void pm_genpd_dev_always_on(struct device *dev, bool val) {}
|
||||
static inline void pm_genpd_dev_need_restore(struct device *dev, bool val) {}
|
||||
static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
|
||||
struct generic_pm_domain *new_sd)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
static inline int pm_genpd_add_subdomain_names(const char *master_name,
|
||||
const char *subdomain_name)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
|
||||
struct generic_pm_domain *target)
|
||||
{
|
||||
|
@ -221,11 +227,19 @@ static inline int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
|
|||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
static inline int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int st)
|
||||
static inline int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int st)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
static inline int genpd_detach_cpuidle(struct generic_pm_domain *genpd)
|
||||
static inline int pm_genpd_name_attach_cpuidle(const char *name, int state)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
static inline int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
static inline int pm_genpd_name_detach_cpuidle(const char *name)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
@ -237,6 +251,10 @@ static inline int pm_genpd_poweron(struct generic_pm_domain *genpd)
|
|||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
static inline int pm_genpd_name_poweron(const char *domain_name)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
static inline bool default_stop_ok(struct device *dev)
|
||||
{
|
||||
return false;
|
||||
|
@ -245,6 +263,24 @@ static inline bool default_stop_ok(struct device *dev)
|
|||
#define pm_domain_always_on_gov NULL
|
||||
#endif
|
||||
|
||||
static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
|
||||
struct device *dev)
|
||||
{
|
||||
return __pm_genpd_add_device(genpd, dev, NULL);
|
||||
}
|
||||
|
||||
static inline int pm_genpd_of_add_device(struct device_node *genpd_node,
|
||||
struct device *dev)
|
||||
{
|
||||
return __pm_genpd_of_add_device(genpd_node, dev, NULL);
|
||||
}
|
||||
|
||||
static inline int pm_genpd_name_add_device(const char *domain_name,
|
||||
struct device *dev)
|
||||
{
|
||||
return __pm_genpd_name_add_device(domain_name, dev, NULL);
|
||||
}
|
||||
|
||||
static inline int pm_genpd_remove_callbacks(struct device *dev)
|
||||
{
|
||||
return __pm_genpd_remove_callbacks(dev, true);
|
||||
|
@ -258,4 +294,20 @@ static inline void genpd_queue_power_off_work(struct generic_pm_domain *gpd) {}
|
|||
static inline void pm_genpd_poweroff_unused(void) {}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP
|
||||
extern void pm_genpd_syscore_switch(struct device *dev, bool suspend);
|
||||
#else
|
||||
static inline void pm_genpd_syscore_switch(struct device *dev, bool suspend) {}
|
||||
#endif
|
||||
|
||||
static inline void pm_genpd_syscore_poweroff(struct device *dev)
|
||||
{
|
||||
pm_genpd_syscore_switch(dev, true);
|
||||
}
|
||||
|
||||
static inline void pm_genpd_syscore_poweron(struct device *dev)
|
||||
{
|
||||
pm_genpd_syscore_switch(dev, false);
|
||||
}
|
||||
|
||||
#endif /* _LINUX_PM_DOMAIN_H */
|
||||
|
|
|
@ -263,6 +263,10 @@ config PM_GENERIC_DOMAINS
|
|||
bool
|
||||
depends on PM
|
||||
|
||||
config PM_GENERIC_DOMAINS_SLEEP
|
||||
def_bool y
|
||||
depends on PM_SLEEP && PM_GENERIC_DOMAINS
|
||||
|
||||
config PM_GENERIC_DOMAINS_RUNTIME
|
||||
def_bool y
|
||||
depends on PM_RUNTIME && PM_GENERIC_DOMAINS
|
||||
|
|
|
@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
|
|||
.enable_mask = SYSRQ_ENABLE_BOOT,
|
||||
};
|
||||
|
||||
static int pm_sysrq_init(void)
|
||||
static int __init pm_sysrq_init(void)
|
||||
{
|
||||
register_sysrq_key('o', &sysrq_poweroff_op);
|
||||
return 0;
|
||||
|
|
|
@ -79,7 +79,7 @@ static int try_to_freeze_tasks(bool user_only)
|
|||
|
||||
/*
|
||||
* We need to retry, but first give the freezing tasks some
|
||||
* time to enter the regrigerator.
|
||||
* time to enter the refrigerator.
|
||||
*/
|
||||
msleep(10);
|
||||
}
|
||||
|
|
|
@ -139,6 +139,7 @@ static inline int pm_qos_get_value(struct pm_qos_constraints *c)
|
|||
default:
|
||||
/* runtime check for not using enum */
|
||||
BUG();
|
||||
return PM_QOS_DEFAULT_VALUE;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -397,6 +397,30 @@ void clockevents_exchange_device(struct clock_event_device *old,
|
|||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* clockevents_suspend - suspend clock devices
|
||||
*/
|
||||
void clockevents_suspend(void)
|
||||
{
|
||||
struct clock_event_device *dev;
|
||||
|
||||
list_for_each_entry_reverse(dev, &clockevent_devices, list)
|
||||
if (dev->suspend)
|
||||
dev->suspend(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* clockevents_resume - resume clock devices
|
||||
*/
|
||||
void clockevents_resume(void)
|
||||
{
|
||||
struct clock_event_device *dev;
|
||||
|
||||
list_for_each_entry(dev, &clockevent_devices, list)
|
||||
if (dev->resume)
|
||||
dev->resume(dev);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_GENERIC_CLOCKEVENTS
|
||||
/**
|
||||
* clockevents_notify - notification about relevant events
|
||||
|
|
|
@ -776,6 +776,7 @@ static void timekeeping_resume(void)
|
|||
|
||||
read_persistent_clock(&ts);
|
||||
|
||||
clockevents_resume();
|
||||
clocksource_resume();
|
||||
|
||||
write_seqlock_irqsave(&tk->lock, flags);
|
||||
|
@ -835,6 +836,7 @@ static int timekeeping_suspend(void)
|
|||
|
||||
clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
|
||||
clocksource_suspend();
|
||||
clockevents_suspend();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue