ARM: vexpress: migrate TC2 to the new MCPM backend abstraction
Signed-off-by: Nicolas Pitre <nico@linaro.org> Signed-off-by: Olof Johansson <olof@lixom.net>
This commit is contained in:
parent
d3a875444a
commit
41f26e2d94
|
@ -18,7 +18,6 @@
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/of_address.h>
|
#include <linux/of_address.h>
|
||||||
#include <linux/of_irq.h>
|
#include <linux/of_irq.h>
|
||||||
#include <linux/spinlock.h>
|
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/irqchip/arm-gic.h>
|
#include <linux/irqchip/arm-gic.h>
|
||||||
|
|
||||||
|
@ -44,101 +43,36 @@
|
||||||
|
|
||||||
static void __iomem *scc;
|
static void __iomem *scc;
|
||||||
|
|
||||||
/*
|
|
||||||
* We can't use regular spinlocks. In the switcher case, it is possible
|
|
||||||
* for an outbound CPU to call power_down() after its inbound counterpart
|
|
||||||
* is already live using the same logical CPU number which trips lockdep
|
|
||||||
* debugging.
|
|
||||||
*/
|
|
||||||
static arch_spinlock_t tc2_pm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
|
|
||||||
|
|
||||||
#define TC2_CLUSTERS 2
|
#define TC2_CLUSTERS 2
|
||||||
#define TC2_MAX_CPUS_PER_CLUSTER 3
|
#define TC2_MAX_CPUS_PER_CLUSTER 3
|
||||||
|
|
||||||
static unsigned int tc2_nr_cpus[TC2_CLUSTERS];
|
static unsigned int tc2_nr_cpus[TC2_CLUSTERS];
|
||||||
|
|
||||||
/* Keep per-cpu usage count to cope with unordered up/down requests */
|
static int tc2_pm_cpu_powerup(unsigned int cpu, unsigned int cluster)
|
||||||
static int tc2_pm_use_count[TC2_MAX_CPUS_PER_CLUSTER][TC2_CLUSTERS];
|
|
||||||
|
|
||||||
#define tc2_cluster_unused(cluster) \
|
|
||||||
(!tc2_pm_use_count[0][cluster] && \
|
|
||||||
!tc2_pm_use_count[1][cluster] && \
|
|
||||||
!tc2_pm_use_count[2][cluster])
|
|
||||||
|
|
||||||
static int tc2_pm_power_up(unsigned int cpu, unsigned int cluster)
|
|
||||||
{
|
{
|
||||||
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
|
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
|
||||||
if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster])
|
if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster])
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/*
|
|
||||||
* Since this is called with IRQs enabled, and no arch_spin_lock_irq
|
|
||||||
* variant exists, we need to disable IRQs manually here.
|
|
||||||
*/
|
|
||||||
local_irq_disable();
|
|
||||||
arch_spin_lock(&tc2_pm_lock);
|
|
||||||
|
|
||||||
if (tc2_cluster_unused(cluster))
|
|
||||||
ve_spc_powerdown(cluster, false);
|
|
||||||
|
|
||||||
tc2_pm_use_count[cpu][cluster]++;
|
|
||||||
if (tc2_pm_use_count[cpu][cluster] == 1) {
|
|
||||||
ve_spc_set_resume_addr(cluster, cpu,
|
ve_spc_set_resume_addr(cluster, cpu,
|
||||||
virt_to_phys(mcpm_entry_point));
|
virt_to_phys(mcpm_entry_point));
|
||||||
ve_spc_cpu_wakeup_irq(cluster, cpu, true);
|
ve_spc_cpu_wakeup_irq(cluster, cpu, true);
|
||||||
} else if (tc2_pm_use_count[cpu][cluster] != 2) {
|
|
||||||
/*
|
|
||||||
* The only possible values are:
|
|
||||||
* 0 = CPU down
|
|
||||||
* 1 = CPU (still) up
|
|
||||||
* 2 = CPU requested to be up before it had a chance
|
|
||||||
* to actually make itself down.
|
|
||||||
* Any other value is a bug.
|
|
||||||
*/
|
|
||||||
BUG();
|
|
||||||
}
|
|
||||||
|
|
||||||
arch_spin_unlock(&tc2_pm_lock);
|
|
||||||
local_irq_enable();
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tc2_pm_down(u64 residency)
|
static int tc2_pm_cluster_powerup(unsigned int cluster)
|
||||||
{
|
{
|
||||||
unsigned int mpidr, cpu, cluster;
|
pr_debug("%s: cluster %u\n", __func__, cluster);
|
||||||
bool last_man = false, skip_wfi = false;
|
if (cluster >= TC2_CLUSTERS)
|
||||||
|
return -EINVAL;
|
||||||
mpidr = read_cpuid_mpidr();
|
ve_spc_powerdown(cluster, false);
|
||||||
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
|
return 0;
|
||||||
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
|
}
|
||||||
|
|
||||||
|
static void tc2_pm_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster)
|
||||||
|
{
|
||||||
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
|
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
|
||||||
BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
|
BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
|
||||||
|
|
||||||
__mcpm_cpu_going_down(cpu, cluster);
|
|
||||||
|
|
||||||
arch_spin_lock(&tc2_pm_lock);
|
|
||||||
BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
|
|
||||||
tc2_pm_use_count[cpu][cluster]--;
|
|
||||||
if (tc2_pm_use_count[cpu][cluster] == 0) {
|
|
||||||
ve_spc_cpu_wakeup_irq(cluster, cpu, true);
|
ve_spc_cpu_wakeup_irq(cluster, cpu, true);
|
||||||
if (tc2_cluster_unused(cluster)) {
|
|
||||||
ve_spc_powerdown(cluster, true);
|
|
||||||
ve_spc_global_wakeup_irq(true);
|
|
||||||
last_man = true;
|
|
||||||
}
|
|
||||||
} else if (tc2_pm_use_count[cpu][cluster] == 1) {
|
|
||||||
/*
|
|
||||||
* A power_up request went ahead of us.
|
|
||||||
* Even if we do not want to shut this CPU down,
|
|
||||||
* the caller expects a certain state as if the WFI
|
|
||||||
* was aborted. So let's continue with cache cleaning.
|
|
||||||
*/
|
|
||||||
skip_wfi = true;
|
|
||||||
} else
|
|
||||||
BUG();
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the CPU is committed to power down, make sure
|
* If the CPU is committed to power down, make sure
|
||||||
* the power controller will be in charge of waking it
|
* the power controller will be in charge of waking it
|
||||||
|
@ -146,12 +80,24 @@ static void tc2_pm_down(u64 residency)
|
||||||
* to the CPU by disabling the GIC CPU IF to prevent wfi
|
* to the CPU by disabling the GIC CPU IF to prevent wfi
|
||||||
* from completing execution behind power controller back
|
* from completing execution behind power controller back
|
||||||
*/
|
*/
|
||||||
if (!skip_wfi)
|
|
||||||
gic_cpu_if_down();
|
gic_cpu_if_down();
|
||||||
|
}
|
||||||
|
|
||||||
if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
|
static void tc2_pm_cluster_powerdown_prepare(unsigned int cluster)
|
||||||
arch_spin_unlock(&tc2_pm_lock);
|
{
|
||||||
|
pr_debug("%s: cluster %u\n", __func__, cluster);
|
||||||
|
BUG_ON(cluster >= TC2_CLUSTERS);
|
||||||
|
ve_spc_powerdown(cluster, true);
|
||||||
|
ve_spc_global_wakeup_irq(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void tc2_pm_cpu_cache_disable(void)
|
||||||
|
{
|
||||||
|
v7_exit_coherency_flush(louis);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void tc2_pm_cluster_cache_disable(void)
|
||||||
|
{
|
||||||
if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
|
if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
|
||||||
/*
|
/*
|
||||||
* On the Cortex-A15 we need to disable
|
* On the Cortex-A15 we need to disable
|
||||||
|
@ -165,36 +111,7 @@ static void tc2_pm_down(u64 residency)
|
||||||
}
|
}
|
||||||
|
|
||||||
v7_exit_coherency_flush(all);
|
v7_exit_coherency_flush(all);
|
||||||
|
cci_disable_port_by_cpu(read_cpuid_mpidr());
|
||||||
cci_disable_port_by_cpu(mpidr);
|
|
||||||
|
|
||||||
__mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
|
|
||||||
} else {
|
|
||||||
/*
|
|
||||||
* If last man then undo any setup done previously.
|
|
||||||
*/
|
|
||||||
if (last_man) {
|
|
||||||
ve_spc_powerdown(cluster, false);
|
|
||||||
ve_spc_global_wakeup_irq(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
arch_spin_unlock(&tc2_pm_lock);
|
|
||||||
|
|
||||||
v7_exit_coherency_flush(louis);
|
|
||||||
}
|
|
||||||
|
|
||||||
__mcpm_cpu_down(cpu, cluster);
|
|
||||||
|
|
||||||
/* Now we are prepared for power-down, do it: */
|
|
||||||
if (!skip_wfi)
|
|
||||||
wfi();
|
|
||||||
|
|
||||||
/* Not dead at this point? Let our caller cope. */
|
|
||||||
}
|
|
||||||
|
|
||||||
static void tc2_pm_power_down(void)
|
|
||||||
{
|
|
||||||
tc2_pm_down(0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tc2_core_in_reset(unsigned int cpu, unsigned int cluster)
|
static int tc2_core_in_reset(unsigned int cpu, unsigned int cluster)
|
||||||
|
@ -217,11 +134,6 @@ static int tc2_pm_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
|
||||||
BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
|
BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
|
||||||
|
|
||||||
for (tries = 0; tries < TIMEOUT_MSEC / POLL_MSEC; ++tries) {
|
for (tries = 0; tries < TIMEOUT_MSEC / POLL_MSEC; ++tries) {
|
||||||
/*
|
|
||||||
* Only examine the hardware state if the target CPU has
|
|
||||||
* caught up at least as far as tc2_pm_down():
|
|
||||||
*/
|
|
||||||
if (ACCESS_ONCE(tc2_pm_use_count[cpu][cluster]) == 0) {
|
|
||||||
pr_debug("%s(cpu=%u, cluster=%u): RESET_CTRL = 0x%08X\n",
|
pr_debug("%s(cpu=%u, cluster=%u): RESET_CTRL = 0x%08X\n",
|
||||||
__func__, cpu, cluster,
|
__func__, cpu, cluster,
|
||||||
readl_relaxed(scc + RESET_CTRL));
|
readl_relaxed(scc + RESET_CTRL));
|
||||||
|
@ -237,7 +149,6 @@ static int tc2_pm_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
|
||||||
if (tc2_core_in_reset(cpu, cluster) ||
|
if (tc2_core_in_reset(cpu, cluster) ||
|
||||||
ve_spc_cpu_in_wfi(cpu, cluster))
|
ve_spc_cpu_in_wfi(cpu, cluster))
|
||||||
return 0; /* success: the CPU is halted */
|
return 0; /* success: the CPU is halted */
|
||||||
}
|
|
||||||
|
|
||||||
/* Otherwise, wait and retry: */
|
/* Otherwise, wait and retry: */
|
||||||
msleep(POLL_MSEC);
|
msleep(POLL_MSEC);
|
||||||
|
@ -246,72 +157,40 @@ static int tc2_pm_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
|
||||||
return -ETIMEDOUT; /* timeout */
|
return -ETIMEDOUT; /* timeout */
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tc2_pm_suspend(u64 residency)
|
static void tc2_pm_cpu_suspend_prepare(unsigned int cpu, unsigned int cluster)
|
||||||
{
|
{
|
||||||
unsigned int mpidr, cpu, cluster;
|
|
||||||
|
|
||||||
mpidr = read_cpuid_mpidr();
|
|
||||||
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
|
|
||||||
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
|
|
||||||
ve_spc_set_resume_addr(cluster, cpu, virt_to_phys(mcpm_entry_point));
|
ve_spc_set_resume_addr(cluster, cpu, virt_to_phys(mcpm_entry_point));
|
||||||
tc2_pm_down(residency);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tc2_pm_powered_up(void)
|
static void tc2_pm_cpu_is_up(unsigned int cpu, unsigned int cluster)
|
||||||
{
|
{
|
||||||
unsigned int mpidr, cpu, cluster;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
mpidr = read_cpuid_mpidr();
|
|
||||||
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
|
|
||||||
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
|
|
||||||
|
|
||||||
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
|
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
|
||||||
BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
|
BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
|
||||||
|
|
||||||
local_irq_save(flags);
|
|
||||||
arch_spin_lock(&tc2_pm_lock);
|
|
||||||
|
|
||||||
if (tc2_cluster_unused(cluster)) {
|
|
||||||
ve_spc_powerdown(cluster, false);
|
|
||||||
ve_spc_global_wakeup_irq(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!tc2_pm_use_count[cpu][cluster])
|
|
||||||
tc2_pm_use_count[cpu][cluster] = 1;
|
|
||||||
|
|
||||||
ve_spc_cpu_wakeup_irq(cluster, cpu, false);
|
ve_spc_cpu_wakeup_irq(cluster, cpu, false);
|
||||||
ve_spc_set_resume_addr(cluster, cpu, 0);
|
ve_spc_set_resume_addr(cluster, cpu, 0);
|
||||||
|
}
|
||||||
|
|
||||||
arch_spin_unlock(&tc2_pm_lock);
|
static void tc2_pm_cluster_is_up(unsigned int cluster)
|
||||||
local_irq_restore(flags);
|
{
|
||||||
|
pr_debug("%s: cluster %u\n", __func__, cluster);
|
||||||
|
BUG_ON(cluster >= TC2_CLUSTERS);
|
||||||
|
ve_spc_powerdown(cluster, false);
|
||||||
|
ve_spc_global_wakeup_irq(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct mcpm_platform_ops tc2_pm_power_ops = {
|
static const struct mcpm_platform_ops tc2_pm_power_ops = {
|
||||||
.power_up = tc2_pm_power_up,
|
.cpu_powerup = tc2_pm_cpu_powerup,
|
||||||
.power_down = tc2_pm_power_down,
|
.cluster_powerup = tc2_pm_cluster_powerup,
|
||||||
|
.cpu_suspend_prepare = tc2_pm_cpu_suspend_prepare,
|
||||||
|
.cpu_powerdown_prepare = tc2_pm_cpu_powerdown_prepare,
|
||||||
|
.cluster_powerdown_prepare = tc2_pm_cluster_powerdown_prepare,
|
||||||
|
.cpu_cache_disable = tc2_pm_cpu_cache_disable,
|
||||||
|
.cluster_cache_disable = tc2_pm_cluster_cache_disable,
|
||||||
.wait_for_powerdown = tc2_pm_wait_for_powerdown,
|
.wait_for_powerdown = tc2_pm_wait_for_powerdown,
|
||||||
.suspend = tc2_pm_suspend,
|
.cpu_is_up = tc2_pm_cpu_is_up,
|
||||||
.powered_up = tc2_pm_powered_up,
|
.cluster_is_up = tc2_pm_cluster_is_up,
|
||||||
};
|
};
|
||||||
|
|
||||||
static bool __init tc2_pm_usage_count_init(void)
|
|
||||||
{
|
|
||||||
unsigned int mpidr, cpu, cluster;
|
|
||||||
|
|
||||||
mpidr = read_cpuid_mpidr();
|
|
||||||
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
|
|
||||||
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
|
|
||||||
|
|
||||||
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
|
|
||||||
if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) {
|
|
||||||
pr_err("%s: boot CPU is out of bound!\n", __func__);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
tc2_pm_use_count[cpu][cluster] = 1;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Enable cluster-level coherency, in preparation for turning on the MMU.
|
* Enable cluster-level coherency, in preparation for turning on the MMU.
|
||||||
*/
|
*/
|
||||||
|
@ -323,23 +202,9 @@ static void __naked tc2_pm_power_up_setup(unsigned int affinity_level)
|
||||||
" b cci_enable_port_for_self ");
|
" b cci_enable_port_for_self ");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init tc2_cache_off(void)
|
|
||||||
{
|
|
||||||
pr_info("TC2: disabling cache during MCPM loopback test\n");
|
|
||||||
if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
|
|
||||||
/* disable L2 prefetching on the Cortex-A15 */
|
|
||||||
asm volatile(
|
|
||||||
"mcr p15, 1, %0, c15, c0, 3 \n\t"
|
|
||||||
"isb \n\t"
|
|
||||||
"dsb "
|
|
||||||
: : "r" (0x400) );
|
|
||||||
}
|
|
||||||
v7_exit_coherency_flush(all);
|
|
||||||
cci_disable_port_by_cpu(read_cpuid_mpidr());
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __init tc2_pm_init(void)
|
static int __init tc2_pm_init(void)
|
||||||
{
|
{
|
||||||
|
unsigned int mpidr, cpu, cluster;
|
||||||
int ret, irq;
|
int ret, irq;
|
||||||
u32 a15_cluster_id, a7_cluster_id, sys_info;
|
u32 a15_cluster_id, a7_cluster_id, sys_info;
|
||||||
struct device_node *np;
|
struct device_node *np;
|
||||||
|
@ -379,14 +244,20 @@ static int __init tc2_pm_init(void)
|
||||||
if (!cci_probed())
|
if (!cci_probed())
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
if (!tc2_pm_usage_count_init())
|
mpidr = read_cpuid_mpidr();
|
||||||
|
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
|
||||||
|
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
|
||||||
|
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
|
||||||
|
if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) {
|
||||||
|
pr_err("%s: boot CPU is out of bound!\n", __func__);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
ret = mcpm_platform_register(&tc2_pm_power_ops);
|
ret = mcpm_platform_register(&tc2_pm_power_ops);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
mcpm_sync_init(tc2_pm_power_up_setup);
|
mcpm_sync_init(tc2_pm_power_up_setup);
|
||||||
/* test if we can (re)enable the CCI on our own */
|
/* test if we can (re)enable the CCI on our own */
|
||||||
BUG_ON(mcpm_loopback(tc2_cache_off) != 0);
|
BUG_ON(mcpm_loopback(tc2_pm_cluster_cache_disable) != 0);
|
||||||
pr_info("TC2 power management initialized\n");
|
pr_info("TC2 power management initialized\n");
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
|
|
Loading…
Reference in New Issue