mirror of https://gitee.com/openkylin/linux.git
Merge branch 'mvebu/soc-cpufreq' into mvebu/soc
This commit is contained in:
commit
ba3ec5780b
|
@ -3,14 +3,15 @@ Device Tree Clock bindings for cpu clock of Marvell EBU platforms
|
|||
Required properties:
|
||||
- compatible : shall be one of the following:
|
||||
"marvell,armada-xp-cpu-clock" - cpu clocks for Armada XP
|
||||
- reg : Address and length of the clock complex register set
|
||||
- reg : Address and length of the clock complex register set, followed
|
||||
by address and length of the PMU DFS registers
|
||||
- #clock-cells : should be set to 1.
|
||||
- clocks : shall be the input parent clock phandle for the clock.
|
||||
|
||||
cpuclk: clock-complex@d0018700 {
|
||||
#clock-cells = <1>;
|
||||
compatible = "marvell,armada-xp-cpu-clock";
|
||||
reg = <0xd0018700 0xA0>;
|
||||
reg = <0xd0018700 0xA0>, <0x1c054 0x10>;
|
||||
clocks = <&coreclk 1>;
|
||||
}
|
||||
|
||||
|
|
|
@ -67,6 +67,7 @@ static void __init set_secondary_cpus_clock(void)
|
|||
if (!cpu_clk)
|
||||
return;
|
||||
clk_set_rate(cpu_clk, rate);
|
||||
clk_prepare_enable(cpu_clk);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -18,20 +18,26 @@
|
|||
|
||||
#define pr_fmt(fmt) "mvebu-pmsu: " fmt
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/cpu_pm.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_opp.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/resource.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/cp15.h>
|
||||
#include <asm/smp_plat.h>
|
||||
#include <asm/suspend.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include "common.h"
|
||||
#include "armada-370-xp.h"
|
||||
|
||||
static void __iomem *pmsu_mp_base;
|
||||
|
||||
|
@ -57,6 +63,10 @@ static void __iomem *pmsu_mp_base;
|
|||
#define PMSU_STATUS_AND_MASK_IRQ_MASK BIT(24)
|
||||
#define PMSU_STATUS_AND_MASK_FIQ_MASK BIT(25)
|
||||
|
||||
#define PMSU_EVENT_STATUS_AND_MASK(cpu) ((cpu * 0x100) + 0x120)
|
||||
#define PMSU_EVENT_STATUS_AND_MASK_DFS_DONE BIT(1)
|
||||
#define PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK BIT(17)
|
||||
|
||||
#define PMSU_BOOT_ADDR_REDIRECT_OFFSET(cpu) ((cpu * 0x100) + 0x124)
|
||||
|
||||
/* PMSU fabric registers */
|
||||
|
@ -291,3 +301,155 @@ static int __init armada_370_xp_cpu_pm_init(void)
|
|||
|
||||
arch_initcall(armada_370_xp_cpu_pm_init);
|
||||
early_initcall(armada_370_xp_pmsu_init);
|
||||
|
||||
static void mvebu_pmsu_dfs_request_local(void *data)
|
||||
{
|
||||
u32 reg;
|
||||
u32 cpu = smp_processor_id();
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
/* Prepare to enter idle */
|
||||
reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu));
|
||||
reg |= PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT |
|
||||
PMSU_STATUS_AND_MASK_IRQ_MASK |
|
||||
PMSU_STATUS_AND_MASK_FIQ_MASK;
|
||||
writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu));
|
||||
|
||||
/* Request the DFS transition */
|
||||
reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(cpu));
|
||||
reg |= PMSU_CONTROL_AND_CONFIG_DFS_REQ;
|
||||
writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(cpu));
|
||||
|
||||
/* The fact of entering idle will trigger the DFS transition */
|
||||
wfi();
|
||||
|
||||
/*
|
||||
* We're back from idle, the DFS transition has completed,
|
||||
* clear the idle wait indication.
|
||||
*/
|
||||
reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu));
|
||||
reg &= ~PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT;
|
||||
writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu));
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
int mvebu_pmsu_dfs_request(int cpu)
|
||||
{
|
||||
unsigned long timeout;
|
||||
int hwcpu = cpu_logical_map(cpu);
|
||||
u32 reg;
|
||||
|
||||
/* Clear any previous DFS DONE event */
|
||||
reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
|
||||
reg &= ~PMSU_EVENT_STATUS_AND_MASK_DFS_DONE;
|
||||
writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
|
||||
|
||||
/* Mask the DFS done interrupt, since we are going to poll */
|
||||
reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
|
||||
reg |= PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK;
|
||||
writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
|
||||
|
||||
/* Trigger the DFS on the appropriate CPU */
|
||||
smp_call_function_single(cpu, mvebu_pmsu_dfs_request_local,
|
||||
NULL, false);
|
||||
|
||||
/* Poll until the DFS done event is generated */
|
||||
timeout = jiffies + HZ;
|
||||
while (time_before(jiffies, timeout)) {
|
||||
reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
|
||||
if (reg & PMSU_EVENT_STATUS_AND_MASK_DFS_DONE)
|
||||
break;
|
||||
udelay(10);
|
||||
}
|
||||
|
||||
if (time_after(jiffies, timeout))
|
||||
return -ETIME;
|
||||
|
||||
/* Restore the DFS mask to its original state */
|
||||
reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
|
||||
reg &= ~PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK;
|
||||
writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init armada_xp_pmsu_cpufreq_init(void)
|
||||
{
|
||||
struct device_node *np;
|
||||
struct resource res;
|
||||
int ret, cpu;
|
||||
|
||||
if (!of_machine_is_compatible("marvell,armadaxp"))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* In order to have proper cpufreq handling, we need to ensure
|
||||
* that the Device Tree description of the CPU clock includes
|
||||
* the definition of the PMU DFS registers. If not, we do not
|
||||
* register the clock notifier and the cpufreq driver. This
|
||||
* piece of code is only for compatibility with old Device
|
||||
* Trees.
|
||||
*/
|
||||
np = of_find_compatible_node(NULL, NULL, "marvell,armada-xp-cpu-clock");
|
||||
if (!np)
|
||||
return 0;
|
||||
|
||||
ret = of_address_to_resource(np, 1, &res);
|
||||
if (ret) {
|
||||
pr_warn(FW_WARN "not enabling cpufreq, deprecated armada-xp-cpu-clock binding\n");
|
||||
of_node_put(np);
|
||||
return 0;
|
||||
}
|
||||
|
||||
of_node_put(np);
|
||||
|
||||
/*
|
||||
* For each CPU, this loop registers the operating points
|
||||
* supported (which are the nominal CPU frequency and half of
|
||||
* it), and registers the clock notifier that will take care
|
||||
* of doing the PMSU part of a frequency transition.
|
||||
*/
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct device *cpu_dev;
|
||||
struct clk *clk;
|
||||
int ret;
|
||||
|
||||
cpu_dev = get_cpu_device(cpu);
|
||||
if (!cpu_dev) {
|
||||
pr_err("Cannot get CPU %d\n", cpu);
|
||||
continue;
|
||||
}
|
||||
|
||||
clk = clk_get(cpu_dev, 0);
|
||||
if (!clk) {
|
||||
pr_err("Cannot get clock for CPU %d\n", cpu);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/*
|
||||
* In case of a failure of dev_pm_opp_add(), we don't
|
||||
* bother with cleaning up the registered OPP (there's
|
||||
* no function to do so), and simply cancel the
|
||||
* registration of the cpufreq device.
|
||||
*/
|
||||
ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk), 0);
|
||||
if (ret) {
|
||||
clk_put(clk);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk) / 2, 0);
|
||||
if (ret) {
|
||||
clk_put(clk);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
platform_device_register_simple("cpufreq-generic", -1, NULL, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
device_initcall(armada_xp_pmsu_cpufreq_init);
|
||||
|
|
|
@ -16,10 +16,19 @@
|
|||
#include <linux/io.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/mvebu-pmsu.h>
|
||||
#include <asm/smp_plat.h>
|
||||
|
||||
#define SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET 0x0
|
||||
#define SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET 0xC
|
||||
#define SYS_CTRL_CLK_DIVIDER_MASK 0x3F
|
||||
#define SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET 0x0
|
||||
#define SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL 0xff
|
||||
#define SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT 8
|
||||
#define SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET 0x8
|
||||
#define SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT 16
|
||||
#define SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET 0xC
|
||||
#define SYS_CTRL_CLK_DIVIDER_MASK 0x3F
|
||||
|
||||
#define PMU_DFS_RATIO_SHIFT 16
|
||||
#define PMU_DFS_RATIO_MASK 0x3F
|
||||
|
||||
#define MAX_CPU 4
|
||||
struct cpu_clk {
|
||||
|
@ -28,6 +37,7 @@ struct cpu_clk {
|
|||
const char *clk_name;
|
||||
const char *parent_name;
|
||||
void __iomem *reg_base;
|
||||
void __iomem *pmu_dfs;
|
||||
};
|
||||
|
||||
static struct clk **clks;
|
||||
|
@ -62,8 +72,9 @@ static long clk_cpu_round_rate(struct clk_hw *hwclk, unsigned long rate,
|
|||
return *parent_rate / div;
|
||||
}
|
||||
|
||||
static int clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate,
|
||||
unsigned long parent_rate)
|
||||
static int clk_cpu_off_set_rate(struct clk_hw *hwclk, unsigned long rate,
|
||||
unsigned long parent_rate)
|
||||
|
||||
{
|
||||
struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
|
||||
u32 reg, div;
|
||||
|
@ -95,6 +106,58 @@ static int clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int clk_cpu_on_set_rate(struct clk_hw *hwclk, unsigned long rate,
|
||||
unsigned long parent_rate)
|
||||
{
|
||||
u32 reg;
|
||||
unsigned long fabric_div, target_div, cur_rate;
|
||||
struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
|
||||
|
||||
/*
|
||||
* PMU DFS registers are not mapped, Device Tree does not
|
||||
* describes them. We cannot change the frequency dynamically.
|
||||
*/
|
||||
if (!cpuclk->pmu_dfs)
|
||||
return -ENODEV;
|
||||
|
||||
cur_rate = __clk_get_rate(hwclk->clk);
|
||||
|
||||
reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET);
|
||||
fabric_div = (reg >> SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT) &
|
||||
SYS_CTRL_CLK_DIVIDER_MASK;
|
||||
|
||||
/* Frequency is going up */
|
||||
if (rate == 2 * cur_rate)
|
||||
target_div = fabric_div / 2;
|
||||
/* Frequency is going down */
|
||||
else
|
||||
target_div = fabric_div;
|
||||
|
||||
if (target_div == 0)
|
||||
target_div = 1;
|
||||
|
||||
reg = readl(cpuclk->pmu_dfs);
|
||||
reg &= ~(PMU_DFS_RATIO_MASK << PMU_DFS_RATIO_SHIFT);
|
||||
reg |= (target_div << PMU_DFS_RATIO_SHIFT);
|
||||
writel(reg, cpuclk->pmu_dfs);
|
||||
|
||||
reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
|
||||
reg |= (SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL <<
|
||||
SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT);
|
||||
writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
|
||||
|
||||
return mvebu_pmsu_dfs_request(cpuclk->cpu);
|
||||
}
|
||||
|
||||
static int clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate,
|
||||
unsigned long parent_rate)
|
||||
{
|
||||
if (__clk_is_enabled(hwclk->clk))
|
||||
return clk_cpu_on_set_rate(hwclk, rate, parent_rate);
|
||||
else
|
||||
return clk_cpu_off_set_rate(hwclk, rate, parent_rate);
|
||||
}
|
||||
|
||||
static const struct clk_ops cpu_ops = {
|
||||
.recalc_rate = clk_cpu_recalc_rate,
|
||||
.round_rate = clk_cpu_round_rate,
|
||||
|
@ -105,6 +168,7 @@ static void __init of_cpu_clk_setup(struct device_node *node)
|
|||
{
|
||||
struct cpu_clk *cpuclk;
|
||||
void __iomem *clock_complex_base = of_iomap(node, 0);
|
||||
void __iomem *pmu_dfs_base = of_iomap(node, 1);
|
||||
int ncpus = 0;
|
||||
struct device_node *dn;
|
||||
|
||||
|
@ -114,6 +178,10 @@ static void __init of_cpu_clk_setup(struct device_node *node)
|
|||
return;
|
||||
}
|
||||
|
||||
if (pmu_dfs_base == NULL)
|
||||
pr_warn("%s: pmu-dfs base register not set, dynamic frequency scaling not available\n",
|
||||
__func__);
|
||||
|
||||
for_each_node_by_type(dn, "cpu")
|
||||
ncpus++;
|
||||
|
||||
|
@ -146,6 +214,8 @@ static void __init of_cpu_clk_setup(struct device_node *node)
|
|||
cpuclk[cpu].clk_name = clk_name;
|
||||
cpuclk[cpu].cpu = cpu;
|
||||
cpuclk[cpu].reg_base = clock_complex_base;
|
||||
if (pmu_dfs_base)
|
||||
cpuclk[cpu].pmu_dfs = pmu_dfs_base + 4 * cpu;
|
||||
cpuclk[cpu].hw.init = &init;
|
||||
|
||||
init.name = cpuclk[cpu].clk_name;
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
/*
|
||||
* Copyright (C) 2012 Marvell
|
||||
*
|
||||
* Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
|
||||
*
|
||||
* This file is licensed under the terms of the GNU General Public
|
||||
* License version 2. This program is licensed "as is" without any
|
||||
* warranty of any kind, whether express or implied.
|
||||
*/
|
||||
|
||||
#ifndef __MVEBU_PMSU_H__
|
||||
#define __MVEBU_PMSU_H__
|
||||
|
||||
#ifdef CONFIG_MACH_MVEBU_V7
|
||||
int mvebu_pmsu_dfs_request(int cpu);
|
||||
#else
|
||||
static inline int mvebu_pmsu_dfs_request(int cpu) { return -ENODEV; }
|
||||
#endif
|
||||
|
||||
#endif /* __MVEBU_PMSU_H__ */
|
Loading…
Reference in New Issue