mirror of https://gitee.com/openkylin/linux.git
Merge branch 'pm-cpufreq'
* pm-cpufreq: (60 commits) cpufreq: pmac32-cpufreq: remove device tree parsing for cpu nodes cpufreq: pmac64-cpufreq: remove device tree parsing for cpu nodes cpufreq: maple-cpufreq: remove device tree parsing for cpu nodes cpufreq: arm_big_little: remove device tree parsing for cpu nodes cpufreq: kirkwood-cpufreq: remove device tree parsing for cpu nodes cpufreq: spear-cpufreq: remove device tree parsing for cpu nodes cpufreq: highbank-cpufreq: remove device tree parsing for cpu nodes cpufreq: cpufreq-cpu0: remove device tree parsing for cpu nodes cpufreq: imx6q-cpufreq: remove device tree parsing for cpu nodes drivers/bus: arm-cci: avoid parsing DT for cpu device nodes ARM: mvebu: remove device tree parsing for cpu nodes ARM: topology: remove hwid/MPIDR dependency from cpu_capacity of/device: add helper to get cpu device node from logical cpu index driver/core: cpu: initialize of_node in cpu's device struture ARM: DT/kernel: define ARM specific arch_match_cpu_phys_id of: move of_get_cpu_node implementation to DT core library powerpc: refactor of_get_cpu_node to support other architectures openrisc: remove undefined of_get_cpu_node declaration microblaze: remove undefined of_get_cpu_node declaration cpufreq: fix bad unlock balance on !CONFIG_SMP ...
This commit is contained in:
commit
7a330a5416
|
@ -50,8 +50,6 @@ What shall this struct cpufreq_driver contain?
|
|||
|
||||
cpufreq_driver.name - The name of this driver.
|
||||
|
||||
cpufreq_driver.owner - THIS_MODULE;
|
||||
|
||||
cpufreq_driver.init - A pointer to the per-CPU initialization
|
||||
function.
|
||||
|
||||
|
|
|
@ -169,6 +169,11 @@ void __init arm_dt_init_cpu_maps(void)
|
|||
}
|
||||
}
|
||||
|
||||
bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
|
||||
{
|
||||
return (phys_id & MPIDR_HWID_BITMASK) == cpu_logical_map(cpu);
|
||||
}
|
||||
|
||||
/**
|
||||
* setup_machine_fdt - Machine setup when an dtb was passed to the kernel
|
||||
* @dt_phys: physical address of dt blob
|
||||
|
|
|
@ -74,12 +74,8 @@ struct cpu_efficiency table_efficiency[] = {
|
|||
{NULL, },
|
||||
};
|
||||
|
||||
struct cpu_capacity {
|
||||
unsigned long hwid;
|
||||
unsigned long capacity;
|
||||
};
|
||||
|
||||
struct cpu_capacity *cpu_capacity;
|
||||
unsigned long *__cpu_capacity;
|
||||
#define cpu_capacity(cpu) __cpu_capacity[cpu]
|
||||
|
||||
unsigned long middle_capacity = 1;
|
||||
|
||||
|
@ -100,15 +96,19 @@ static void __init parse_dt_topology(void)
|
|||
unsigned long capacity = 0;
|
||||
int alloc_size, cpu = 0;
|
||||
|
||||
alloc_size = nr_cpu_ids * sizeof(struct cpu_capacity);
|
||||
cpu_capacity = kzalloc(alloc_size, GFP_NOWAIT);
|
||||
alloc_size = nr_cpu_ids * sizeof(*__cpu_capacity);
|
||||
__cpu_capacity = kzalloc(alloc_size, GFP_NOWAIT);
|
||||
|
||||
while ((cn = of_find_node_by_type(cn, "cpu"))) {
|
||||
const u32 *rate, *reg;
|
||||
for_each_possible_cpu(cpu) {
|
||||
const u32 *rate;
|
||||
int len;
|
||||
|
||||
if (cpu >= num_possible_cpus())
|
||||
break;
|
||||
/* too early to use cpu->of_node */
|
||||
cn = of_get_cpu_node(cpu, NULL);
|
||||
if (!cn) {
|
||||
pr_err("missing device node for CPU %d\n", cpu);
|
||||
continue;
|
||||
}
|
||||
|
||||
for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
|
||||
if (of_device_is_compatible(cn, cpu_eff->compatible))
|
||||
|
@ -124,12 +124,6 @@ static void __init parse_dt_topology(void)
|
|||
continue;
|
||||
}
|
||||
|
||||
reg = of_get_property(cn, "reg", &len);
|
||||
if (!reg || len != 4) {
|
||||
pr_err("%s missing reg property\n", cn->full_name);
|
||||
continue;
|
||||
}
|
||||
|
||||
capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency;
|
||||
|
||||
/* Save min capacity of the system */
|
||||
|
@ -140,13 +134,9 @@ static void __init parse_dt_topology(void)
|
|||
if (capacity > max_capacity)
|
||||
max_capacity = capacity;
|
||||
|
||||
cpu_capacity[cpu].capacity = capacity;
|
||||
cpu_capacity[cpu++].hwid = be32_to_cpup(reg);
|
||||
cpu_capacity(cpu) = capacity;
|
||||
}
|
||||
|
||||
if (cpu < num_possible_cpus())
|
||||
cpu_capacity[cpu].hwid = (unsigned long)(-1);
|
||||
|
||||
/* If min and max capacities are equals, we bypass the update of the
|
||||
* cpu_scale because all CPUs have the same capacity. Otherwise, we
|
||||
* compute a middle_capacity factor that will ensure that the capacity
|
||||
|
@ -154,9 +144,7 @@ static void __init parse_dt_topology(void)
|
|||
* SCHED_POWER_SCALE, which is the default value, but with the
|
||||
* constraint explained near table_efficiency[].
|
||||
*/
|
||||
if (min_capacity == max_capacity)
|
||||
cpu_capacity[0].hwid = (unsigned long)(-1);
|
||||
else if (4*max_capacity < (3*(max_capacity + min_capacity)))
|
||||
if (4*max_capacity < (3*(max_capacity + min_capacity)))
|
||||
middle_capacity = (min_capacity + max_capacity)
|
||||
>> (SCHED_POWER_SHIFT+1);
|
||||
else
|
||||
|
@ -170,23 +158,12 @@ static void __init parse_dt_topology(void)
|
|||
* boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
|
||||
* function returns directly for SMP system.
|
||||
*/
|
||||
void update_cpu_power(unsigned int cpu, unsigned long hwid)
|
||||
void update_cpu_power(unsigned int cpu)
|
||||
{
|
||||
unsigned int idx = 0;
|
||||
|
||||
/* look for the cpu's hwid in the cpu capacity table */
|
||||
for (idx = 0; idx < num_possible_cpus(); idx++) {
|
||||
if (cpu_capacity[idx].hwid == hwid)
|
||||
break;
|
||||
|
||||
if (cpu_capacity[idx].hwid == -1)
|
||||
return;
|
||||
}
|
||||
|
||||
if (idx == num_possible_cpus())
|
||||
if (!cpu_capacity(cpu))
|
||||
return;
|
||||
|
||||
set_power_scale(cpu, cpu_capacity[idx].capacity / middle_capacity);
|
||||
set_power_scale(cpu, cpu_capacity(cpu) / middle_capacity);
|
||||
|
||||
printk(KERN_INFO "CPU%u: update cpu_power %lu\n",
|
||||
cpu, arch_scale_freq_power(NULL, cpu));
|
||||
|
@ -194,7 +171,7 @@ void update_cpu_power(unsigned int cpu, unsigned long hwid)
|
|||
|
||||
#else
|
||||
static inline void parse_dt_topology(void) {}
|
||||
static inline void update_cpu_power(unsigned int cpuid, unsigned int mpidr) {}
|
||||
static inline void update_cpu_power(unsigned int cpuid) {}
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -281,7 +258,7 @@ void store_cpu_topology(unsigned int cpuid)
|
|||
|
||||
update_siblings_masks(cpuid);
|
||||
|
||||
update_cpu_power(cpuid, mpidr & MPIDR_HWID_BITMASK);
|
||||
update_cpu_power(cpuid);
|
||||
|
||||
printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
|
||||
cpuid, cpu_topology[cpuid].thread_id,
|
||||
|
|
|
@ -254,13 +254,12 @@ static void __init imx6q_opp_init(struct device *cpu_dev)
|
|||
{
|
||||
struct device_node *np;
|
||||
|
||||
np = of_find_node_by_path("/cpus/cpu@0");
|
||||
np = of_node_get(cpu_dev->of_node);
|
||||
if (!np) {
|
||||
pr_warn("failed to find cpu0 node\n");
|
||||
return;
|
||||
}
|
||||
|
||||
cpu_dev->of_node = np;
|
||||
if (of_init_opp_table(cpu_dev)) {
|
||||
pr_warn("failed to init OPP table\n");
|
||||
goto put_node;
|
||||
|
|
|
@ -29,45 +29,40 @@
|
|||
#include "pmsu.h"
|
||||
#include "coherency.h"
|
||||
|
||||
static struct clk *__init get_cpu_clk(int cpu)
|
||||
{
|
||||
struct clk *cpu_clk;
|
||||
struct device_node *np = of_get_cpu_node(cpu, NULL);
|
||||
|
||||
if (WARN(!np, "missing cpu node\n"))
|
||||
return NULL;
|
||||
cpu_clk = of_clk_get(np, 0);
|
||||
if (WARN_ON(IS_ERR(cpu_clk)))
|
||||
return NULL;
|
||||
return cpu_clk;
|
||||
}
|
||||
|
||||
void __init set_secondary_cpus_clock(void)
|
||||
{
|
||||
int thiscpu;
|
||||
int thiscpu, cpu;
|
||||
unsigned long rate;
|
||||
struct clk *cpu_clk = NULL;
|
||||
struct device_node *np = NULL;
|
||||
struct clk *cpu_clk;
|
||||
|
||||
thiscpu = smp_processor_id();
|
||||
for_each_node_by_type(np, "cpu") {
|
||||
int err;
|
||||
int cpu;
|
||||
|
||||
err = of_property_read_u32(np, "reg", &cpu);
|
||||
if (WARN_ON(err))
|
||||
return;
|
||||
|
||||
if (cpu == thiscpu) {
|
||||
cpu_clk = of_clk_get(np, 0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (WARN_ON(IS_ERR(cpu_clk)))
|
||||
cpu_clk = get_cpu_clk(thiscpu);
|
||||
if (!cpu_clk)
|
||||
return;
|
||||
clk_prepare_enable(cpu_clk);
|
||||
rate = clk_get_rate(cpu_clk);
|
||||
|
||||
/* set all the other CPU clk to the same rate than the boot CPU */
|
||||
for_each_node_by_type(np, "cpu") {
|
||||
int err;
|
||||
int cpu;
|
||||
|
||||
err = of_property_read_u32(np, "reg", &cpu);
|
||||
if (WARN_ON(err))
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (cpu == thiscpu)
|
||||
continue;
|
||||
cpu_clk = get_cpu_clk(cpu);
|
||||
if (!cpu_clk)
|
||||
return;
|
||||
|
||||
if (cpu != thiscpu) {
|
||||
cpu_clk = of_clk_get(np, 0);
|
||||
clk_set_rate(cpu_clk, rate);
|
||||
}
|
||||
clk_set_rate(cpu_clk, rate);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -50,9 +50,6 @@ void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
|
|||
|
||||
extern void kdump_move_device_tree(void);
|
||||
|
||||
/* CPU OF node matching */
|
||||
struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
|
|
|
@ -44,9 +44,6 @@ void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
|
|||
|
||||
extern void kdump_move_device_tree(void);
|
||||
|
||||
/* CPU OF node matching */
|
||||
struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
|
||||
|
||||
/* Get the MAC address */
|
||||
extern const void *of_get_mac_address(struct device_node *np);
|
||||
|
||||
|
|
|
@ -43,9 +43,6 @@ void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
|
|||
|
||||
extern void kdump_move_device_tree(void);
|
||||
|
||||
/* CPU OF node matching */
|
||||
struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
|
||||
|
||||
/* cache lookup */
|
||||
struct device_node *of_find_next_cache_node(struct device_node *np);
|
||||
|
||||
|
|
|
@ -865,49 +865,10 @@ static int __init prom_reconfig_setup(void)
|
|||
__initcall(prom_reconfig_setup);
|
||||
#endif
|
||||
|
||||
/* Find the device node for a given logical cpu number, also returns the cpu
|
||||
* local thread number (index in ibm,interrupt-server#s) if relevant and
|
||||
* asked for (non NULL)
|
||||
*/
|
||||
struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
|
||||
bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
|
||||
{
|
||||
int hardid;
|
||||
struct device_node *np;
|
||||
|
||||
hardid = get_hard_smp_processor_id(cpu);
|
||||
|
||||
for_each_node_by_type(np, "cpu") {
|
||||
const u32 *intserv;
|
||||
unsigned int plen, t;
|
||||
|
||||
/* Check for ibm,ppc-interrupt-server#s. If it doesn't exist
|
||||
* fallback to "reg" property and assume no threads
|
||||
*/
|
||||
intserv = of_get_property(np, "ibm,ppc-interrupt-server#s",
|
||||
&plen);
|
||||
if (intserv == NULL) {
|
||||
const u32 *reg = of_get_property(np, "reg", NULL);
|
||||
if (reg == NULL)
|
||||
continue;
|
||||
if (*reg == hardid) {
|
||||
if (thread)
|
||||
*thread = 0;
|
||||
return np;
|
||||
}
|
||||
} else {
|
||||
plen /= sizeof(u32);
|
||||
for (t = 0; t < plen; t++) {
|
||||
if (hardid == intserv[t]) {
|
||||
if (thread)
|
||||
*thread = t;
|
||||
return np;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
return (int)phys_id == get_hard_smp_processor_id(cpu);
|
||||
}
|
||||
EXPORT_SYMBOL(of_get_cpu_node);
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS) && defined(DEBUG)
|
||||
static struct debugfs_blob_wrapper flat_dt_blob;
|
||||
|
|
|
@ -942,35 +942,6 @@ extern int set_tsc_mode(unsigned int val);
|
|||
|
||||
extern u16 amd_get_nb_id(int cpu);
|
||||
|
||||
struct aperfmperf {
|
||||
u64 aperf, mperf;
|
||||
};
|
||||
|
||||
static inline void get_aperfmperf(struct aperfmperf *am)
|
||||
{
|
||||
WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_APERFMPERF));
|
||||
|
||||
rdmsrl(MSR_IA32_APERF, am->aperf);
|
||||
rdmsrl(MSR_IA32_MPERF, am->mperf);
|
||||
}
|
||||
|
||||
#define APERFMPERF_SHIFT 10
|
||||
|
||||
static inline
|
||||
unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
|
||||
struct aperfmperf *new)
|
||||
{
|
||||
u64 aperf = new->aperf - old->aperf;
|
||||
u64 mperf = new->mperf - old->mperf;
|
||||
unsigned long ratio = aperf;
|
||||
|
||||
mperf >>= APERFMPERF_SHIFT;
|
||||
if (mperf)
|
||||
ratio = div64_u64(aperf, mperf);
|
||||
|
||||
return ratio;
|
||||
}
|
||||
|
||||
extern unsigned long arch_align_stack(unsigned long sp);
|
||||
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/of.h>
|
||||
|
||||
#include "base.h"
|
||||
|
||||
|
@ -289,6 +290,7 @@ int register_cpu(struct cpu *cpu, int num)
|
|||
cpu->dev.release = cpu_device_release;
|
||||
cpu->dev.offline_disabled = !cpu->hotpluggable;
|
||||
cpu->dev.offline = !cpu_online(num);
|
||||
cpu->dev.of_node = of_get_cpu_node(num, NULL);
|
||||
#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
|
||||
cpu->dev.bus->uevent = arch_cpu_uevent;
|
||||
#endif
|
||||
|
|
|
@ -122,17 +122,8 @@ EXPORT_SYMBOL_GPL(cci_ace_get_port);
|
|||
|
||||
static void __init cci_ace_init_ports(void)
|
||||
{
|
||||
int port, ac, cpu;
|
||||
u64 hwid;
|
||||
const u32 *cell;
|
||||
struct device_node *cpun, *cpus;
|
||||
|
||||
cpus = of_find_node_by_path("/cpus");
|
||||
if (WARN(!cpus, "Missing cpus node, bailing out\n"))
|
||||
return;
|
||||
|
||||
if (WARN_ON(of_property_read_u32(cpus, "#address-cells", &ac)))
|
||||
ac = of_n_addr_cells(cpus);
|
||||
int port, cpu;
|
||||
struct device_node *cpun;
|
||||
|
||||
/*
|
||||
* Port index look-up speeds up the function disabling ports by CPU,
|
||||
|
@ -141,18 +132,13 @@ static void __init cci_ace_init_ports(void)
|
|||
* The stashed index array is initialized for all possible CPUs
|
||||
* at probe time.
|
||||
*/
|
||||
for_each_child_of_node(cpus, cpun) {
|
||||
if (of_node_cmp(cpun->type, "cpu"))
|
||||
continue;
|
||||
cell = of_get_property(cpun, "reg", NULL);
|
||||
if (WARN(!cell, "%s: missing reg property\n", cpun->full_name))
|
||||
for_each_possible_cpu(cpu) {
|
||||
/* too early to use cpu->of_node */
|
||||
cpun = of_get_cpu_node(cpu, NULL);
|
||||
|
||||
if (WARN(!cpun, "Missing cpu device node\n"))
|
||||
continue;
|
||||
|
||||
hwid = of_read_number(cell, ac);
|
||||
cpu = get_logical_index(hwid & MPIDR_HWID_BITMASK);
|
||||
|
||||
if (cpu < 0 || !cpu_possible(cpu))
|
||||
continue;
|
||||
port = __cci_ace_get_port(cpun, ACE_PORT);
|
||||
if (port < 0)
|
||||
continue;
|
||||
|
|
|
@ -17,37 +17,47 @@ config ARM_DT_BL_CPUFREQ
|
|||
big.LITTLE platform. This gets frequency tables from DT.
|
||||
|
||||
config ARM_EXYNOS_CPUFREQ
|
||||
bool "SAMSUNG EXYNOS SoCs"
|
||||
depends on ARCH_EXYNOS
|
||||
bool
|
||||
select CPU_FREQ_TABLE
|
||||
default y
|
||||
help
|
||||
This adds the CPUFreq driver common part for Samsung
|
||||
EXYNOS SoCs.
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
config ARM_EXYNOS4210_CPUFREQ
|
||||
def_bool CPU_EXYNOS4210
|
||||
bool "SAMSUNG EXYNOS4210"
|
||||
depends on CPU_EXYNOS4210
|
||||
default y
|
||||
select ARM_EXYNOS_CPUFREQ
|
||||
help
|
||||
This adds the CPUFreq driver for Samsung EXYNOS4210
|
||||
SoC (S5PV310 or S5PC210).
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
config ARM_EXYNOS4X12_CPUFREQ
|
||||
def_bool (SOC_EXYNOS4212 || SOC_EXYNOS4412)
|
||||
bool "SAMSUNG EXYNOS4x12"
|
||||
depends on (SOC_EXYNOS4212 || SOC_EXYNOS4412)
|
||||
default y
|
||||
select ARM_EXYNOS_CPUFREQ
|
||||
help
|
||||
This adds the CPUFreq driver for Samsung EXYNOS4X12
|
||||
SoC (EXYNOS4212 or EXYNOS4412).
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
config ARM_EXYNOS5250_CPUFREQ
|
||||
def_bool SOC_EXYNOS5250
|
||||
bool "SAMSUNG EXYNOS5250"
|
||||
depends on SOC_EXYNOS5250
|
||||
default y
|
||||
select ARM_EXYNOS_CPUFREQ
|
||||
help
|
||||
This adds the CPUFreq driver for Samsung EXYNOS5250
|
||||
SoC.
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
config ARM_EXYNOS5440_CPUFREQ
|
||||
def_bool SOC_EXYNOS5440
|
||||
bool "SAMSUNG EXYNOS5440"
|
||||
depends on SOC_EXYNOS5440
|
||||
depends on HAVE_CLK && PM_OPP && OF
|
||||
default y
|
||||
select CPU_FREQ_TABLE
|
||||
help
|
||||
This adds the CPUFreq driver for Samsung EXYNOS5440
|
||||
|
@ -55,6 +65,8 @@ config ARM_EXYNOS5440_CPUFREQ
|
|||
different than previous exynos controllers so not using
|
||||
the common exynos framework.
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
config ARM_HIGHBANK_CPUFREQ
|
||||
tristate "Calxeda Highbank-based"
|
||||
depends on ARCH_HIGHBANK
|
||||
|
|
|
@ -23,7 +23,7 @@ obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o
|
|||
# powernow-k8 can load then. ACPI is preferred to all other hardware-specific drivers.
|
||||
# speedstep-* is preferred over p4-clockmod.
|
||||
|
||||
obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o
|
||||
obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o
|
||||
obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
|
||||
obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o
|
||||
obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o
|
||||
|
|
|
@ -45,7 +45,6 @@
|
|||
#include <asm/msr.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include "mperf.h"
|
||||
|
||||
MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
|
||||
MODULE_DESCRIPTION("ACPI Processor P-States Driver");
|
||||
|
@ -198,7 +197,7 @@ static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
|
|||
return sprintf(buf, "%u\n", boost_enabled);
|
||||
}
|
||||
|
||||
static struct freq_attr cpb = __ATTR(cpb, 0644, show_cpb, store_cpb);
|
||||
cpufreq_freq_attr_rw(cpb);
|
||||
#endif
|
||||
|
||||
static int check_est_cpu(unsigned int cpuid)
|
||||
|
@ -710,7 +709,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
|||
return blacklisted;
|
||||
#endif
|
||||
|
||||
data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL);
|
||||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -800,7 +799,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
|||
goto err_unreg;
|
||||
}
|
||||
|
||||
data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) *
|
||||
data->freq_table = kmalloc(sizeof(*data->freq_table) *
|
||||
(perf->state_count+1), GFP_KERNEL);
|
||||
if (!data->freq_table) {
|
||||
result = -ENOMEM;
|
||||
|
@ -861,10 +860,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
|||
/* notify BIOS that we exist */
|
||||
acpi_processor_notify_smm(THIS_MODULE);
|
||||
|
||||
/* Check for APERF/MPERF support in hardware */
|
||||
if (boot_cpu_has(X86_FEATURE_APERFMPERF))
|
||||
acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
|
||||
|
||||
pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
|
||||
for (i = 0; i < perf->state_count; i++)
|
||||
pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n",
|
||||
|
@ -941,7 +936,6 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
|
|||
.exit = acpi_cpufreq_cpu_exit,
|
||||
.resume = acpi_cpufreq_resume,
|
||||
.name = "acpi-cpufreq",
|
||||
.owner = THIS_MODULE,
|
||||
.attr = acpi_cpufreq_attr,
|
||||
};
|
||||
|
||||
|
|
|
@ -19,12 +19,11 @@
|
|||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/opp.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
|
@ -34,27 +33,13 @@
|
|||
/* get cpu node with valid operating-points */
|
||||
static struct device_node *get_cpu_node_with_valid_op(int cpu)
|
||||
{
|
||||
struct device_node *np = NULL, *parent;
|
||||
int count = 0;
|
||||
struct device_node *np = of_cpu_device_node_get(cpu);
|
||||
|
||||
parent = of_find_node_by_path("/cpus");
|
||||
if (!parent) {
|
||||
pr_err("failed to find OF /cpus\n");
|
||||
return NULL;
|
||||
if (!of_get_property(np, "operating-points", NULL)) {
|
||||
of_node_put(np);
|
||||
np = NULL;
|
||||
}
|
||||
|
||||
for_each_child_of_node(parent, np) {
|
||||
if (count++ != cpu)
|
||||
continue;
|
||||
if (!of_get_property(np, "operating-points", NULL)) {
|
||||
of_node_put(np);
|
||||
np = NULL;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
of_node_put(parent);
|
||||
return np;
|
||||
}
|
||||
|
||||
|
@ -63,11 +48,12 @@ static int dt_init_opp_table(struct device *cpu_dev)
|
|||
struct device_node *np;
|
||||
int ret;
|
||||
|
||||
np = get_cpu_node_with_valid_op(cpu_dev->id);
|
||||
if (!np)
|
||||
return -ENODATA;
|
||||
np = of_node_get(cpu_dev->of_node);
|
||||
if (!np) {
|
||||
pr_err("failed to find cpu%d node\n", cpu_dev->id);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
cpu_dev->of_node = np;
|
||||
ret = of_init_opp_table(cpu_dev);
|
||||
of_node_put(np);
|
||||
|
||||
|
@ -79,9 +65,11 @@ static int dt_get_transition_latency(struct device *cpu_dev)
|
|||
struct device_node *np;
|
||||
u32 transition_latency = CPUFREQ_ETERNAL;
|
||||
|
||||
np = get_cpu_node_with_valid_op(cpu_dev->id);
|
||||
if (!np)
|
||||
np = of_node_get(cpu_dev->of_node);
|
||||
if (!np) {
|
||||
pr_info("Failed to find cpu node. Use CPUFREQ_ETERNAL transition latency\n");
|
||||
return CPUFREQ_ETERNAL;
|
||||
}
|
||||
|
||||
of_property_read_u32(np, "clock-latency", &transition_latency);
|
||||
of_node_put(np);
|
||||
|
|
|
@ -108,7 +108,6 @@ static int __init at32_cpufreq_driver_init(struct cpufreq_policy *policy)
|
|||
|
||||
static struct cpufreq_driver at32_driver = {
|
||||
.name = "at32ap",
|
||||
.owner = THIS_MODULE,
|
||||
.init = at32_cpufreq_driver_init,
|
||||
.verify = at32_verify_speed,
|
||||
.target = at32_set_target,
|
||||
|
|
|
@ -225,7 +225,6 @@ static struct cpufreq_driver bfin_driver = {
|
|||
.get = bfin_getfreq_khz,
|
||||
.init = __bfin_cpu_init,
|
||||
.name = "bfin cpufreq",
|
||||
.owner = THIS_MODULE,
|
||||
.attr = bfin_freq_attr,
|
||||
};
|
||||
|
||||
|
|
|
@ -69,7 +69,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
|
|||
|
||||
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
|
||||
|
||||
if (cpu_reg) {
|
||||
if (!IS_ERR(cpu_reg)) {
|
||||
rcu_read_lock();
|
||||
opp = opp_find_freq_ceil(cpu_dev, &freq_Hz);
|
||||
if (IS_ERR(opp)) {
|
||||
|
@ -90,7 +90,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
|
|||
freqs.new / 1000, volt ? volt / 1000 : -1);
|
||||
|
||||
/* scaling up? scale voltage before frequency */
|
||||
if (cpu_reg && freqs.new > freqs.old) {
|
||||
if (!IS_ERR(cpu_reg) && freqs.new > freqs.old) {
|
||||
ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
|
||||
if (ret) {
|
||||
pr_err("failed to scale voltage up: %d\n", ret);
|
||||
|
@ -102,14 +102,14 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
|
|||
ret = clk_set_rate(cpu_clk, freq_exact);
|
||||
if (ret) {
|
||||
pr_err("failed to set clock rate: %d\n", ret);
|
||||
if (cpu_reg)
|
||||
if (!IS_ERR(cpu_reg))
|
||||
regulator_set_voltage_tol(cpu_reg, volt_old, tol);
|
||||
freqs.new = freqs.old;
|
||||
goto post_notify;
|
||||
}
|
||||
|
||||
/* scaling down? scale voltage after frequency */
|
||||
if (cpu_reg && freqs.new < freqs.old) {
|
||||
if (!IS_ERR(cpu_reg) && freqs.new < freqs.old) {
|
||||
ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
|
||||
if (ret) {
|
||||
pr_err("failed to scale voltage down: %d\n", ret);
|
||||
|
@ -174,29 +174,17 @@ static struct cpufreq_driver cpu0_cpufreq_driver = {
|
|||
|
||||
static int cpu0_cpufreq_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device_node *np, *parent;
|
||||
struct device_node *np;
|
||||
int ret;
|
||||
|
||||
parent = of_find_node_by_path("/cpus");
|
||||
if (!parent) {
|
||||
pr_err("failed to find OF /cpus\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
for_each_child_of_node(parent, np) {
|
||||
if (of_get_property(np, "operating-points", NULL))
|
||||
break;
|
||||
}
|
||||
cpu_dev = &pdev->dev;
|
||||
|
||||
np = of_node_get(cpu_dev->of_node);
|
||||
if (!np) {
|
||||
pr_err("failed to find cpu0 node\n");
|
||||
ret = -ENOENT;
|
||||
goto out_put_parent;
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
cpu_dev = &pdev->dev;
|
||||
cpu_dev->of_node = np;
|
||||
|
||||
cpu_reg = devm_regulator_get(cpu_dev, "cpu0");
|
||||
if (IS_ERR(cpu_reg)) {
|
||||
/*
|
||||
|
@ -210,7 +198,6 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
|
|||
}
|
||||
pr_warn("failed to get cpu0 regulator: %ld\n",
|
||||
PTR_ERR(cpu_reg));
|
||||
cpu_reg = NULL;
|
||||
}
|
||||
|
||||
cpu_clk = devm_clk_get(cpu_dev, NULL);
|
||||
|
@ -269,15 +256,12 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
of_node_put(np);
|
||||
of_node_put(parent);
|
||||
return 0;
|
||||
|
||||
out_free_table:
|
||||
opp_free_cpufreq_table(cpu_dev, &freq_table);
|
||||
out_put_node:
|
||||
of_node_put(np);
|
||||
out_put_parent:
|
||||
of_node_put(parent);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -379,7 +379,6 @@ static struct cpufreq_driver nforce2_driver = {
|
|||
.get = nforce2_get,
|
||||
.init = nforce2_cpu_init,
|
||||
.exit = nforce2_cpu_exit,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
#ifdef MODULE
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -11,19 +11,7 @@
|
|||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/percpu-defs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include "cpufreq_governor.h"
|
||||
|
||||
/* Conservative governor macros */
|
||||
|
@ -329,7 +317,7 @@ static int cs_init(struct dbs_data *dbs_data)
|
|||
{
|
||||
struct cs_dbs_tuners *tuners;
|
||||
|
||||
tuners = kzalloc(sizeof(struct cs_dbs_tuners), GFP_KERNEL);
|
||||
tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
|
||||
if (!tuners) {
|
||||
pr_err("%s: kzalloc failed\n", __func__);
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -16,15 +16,9 @@
|
|||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <asm/cputime.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include "cpufreq_governor.h"
|
||||
|
||||
|
@ -53,7 +47,7 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
|
|||
|
||||
policy = cdbs->cur_policy;
|
||||
|
||||
/* Get Absolute Load (in terms of freq for ondemand gov) */
|
||||
/* Get Absolute Load */
|
||||
for_each_cpu(j, policy->cpus) {
|
||||
struct cpu_dbs_common_info *j_cdbs;
|
||||
u64 cur_wall_time, cur_idle_time;
|
||||
|
@ -104,14 +98,6 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
|
|||
|
||||
load = 100 * (wall_time - idle_time) / wall_time;
|
||||
|
||||
if (dbs_data->cdata->governor == GOV_ONDEMAND) {
|
||||
int freq_avg = __cpufreq_driver_getavg(policy, j);
|
||||
if (freq_avg <= 0)
|
||||
freq_avg = policy->cur;
|
||||
|
||||
load *= freq_avg;
|
||||
}
|
||||
|
||||
if (load > max_load)
|
||||
max_load = load;
|
||||
}
|
||||
|
|
|
@ -18,10 +18,9 @@
|
|||
#define _CPUFREQ_GOVERNOR_H
|
||||
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/sysfs.h>
|
||||
|
||||
/*
|
||||
* The polling frequency depends on the capability of the processor. Default
|
||||
|
@ -169,7 +168,6 @@ struct od_dbs_tuners {
|
|||
unsigned int sampling_rate;
|
||||
unsigned int sampling_down_factor;
|
||||
unsigned int up_threshold;
|
||||
unsigned int adj_up_threshold;
|
||||
unsigned int powersave_bias;
|
||||
unsigned int io_is_busy;
|
||||
};
|
||||
|
@ -223,7 +221,7 @@ struct od_ops {
|
|||
void (*powersave_bias_init_cpu)(int cpu);
|
||||
unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
|
||||
unsigned int freq_next, unsigned int relation);
|
||||
void (*freq_increase)(struct cpufreq_policy *p, unsigned int freq);
|
||||
void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
|
||||
};
|
||||
|
||||
struct cs_ops {
|
||||
|
|
|
@ -12,28 +12,16 @@
|
|||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/percpu-defs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/tick.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/cpu.h>
|
||||
|
||||
#include "cpufreq_governor.h"
|
||||
|
||||
/* On-demand governor macros */
|
||||
#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
|
||||
#define DEF_FREQUENCY_UP_THRESHOLD (80)
|
||||
#define DEF_SAMPLING_DOWN_FACTOR (1)
|
||||
#define MAX_SAMPLING_DOWN_FACTOR (100000)
|
||||
#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)
|
||||
#define MICRO_FREQUENCY_UP_THRESHOLD (95)
|
||||
#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
|
||||
#define MIN_FREQUENCY_UP_THRESHOLD (11)
|
||||
|
@ -144,31 +132,27 @@ static void ondemand_powersave_bias_init(void)
|
|||
}
|
||||
}
|
||||
|
||||
static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
|
||||
static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
|
||||
{
|
||||
struct dbs_data *dbs_data = p->governor_data;
|
||||
struct dbs_data *dbs_data = policy->governor_data;
|
||||
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
|
||||
|
||||
if (od_tuners->powersave_bias)
|
||||
freq = od_ops.powersave_bias_target(p, freq,
|
||||
freq = od_ops.powersave_bias_target(policy, freq,
|
||||
CPUFREQ_RELATION_H);
|
||||
else if (p->cur == p->max)
|
||||
else if (policy->cur == policy->max)
|
||||
return;
|
||||
|
||||
__cpufreq_driver_target(p, freq, od_tuners->powersave_bias ?
|
||||
__cpufreq_driver_target(policy, freq, od_tuners->powersave_bias ?
|
||||
CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
|
||||
}
|
||||
|
||||
/*
|
||||
* Every sampling_rate, we check, if current idle time is less than 20%
|
||||
* (default), then we try to increase frequency. Every sampling_rate, we look
|
||||
* for the lowest frequency which can sustain the load while keeping idle time
|
||||
* over 30%. If such a frequency exist, we try to decrease to this frequency.
|
||||
*
|
||||
* Any frequency increase takes it to the maximum frequency. Frequency reduction
|
||||
* happens at minimum steps of 5% (default) of current frequency
|
||||
* (default), then we try to increase frequency. Else, we adjust the frequency
|
||||
* proportional to load.
|
||||
*/
|
||||
static void od_check_cpu(int cpu, unsigned int load_freq)
|
||||
static void od_check_cpu(int cpu, unsigned int load)
|
||||
{
|
||||
struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
|
||||
struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
|
||||
|
@ -178,29 +162,17 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
|
|||
dbs_info->freq_lo = 0;
|
||||
|
||||
/* Check for frequency increase */
|
||||
if (load_freq > od_tuners->up_threshold * policy->cur) {
|
||||
if (load > od_tuners->up_threshold) {
|
||||
/* If switching to max speed, apply sampling_down_factor */
|
||||
if (policy->cur < policy->max)
|
||||
dbs_info->rate_mult =
|
||||
od_tuners->sampling_down_factor;
|
||||
dbs_freq_increase(policy, policy->max);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Check for frequency decrease */
|
||||
/* if we cannot reduce the frequency anymore, break out early */
|
||||
if (policy->cur == policy->min)
|
||||
return;
|
||||
|
||||
/*
|
||||
* The optimal frequency is the frequency that is the lowest that can
|
||||
* support the current CPU usage without triggering the up policy. To be
|
||||
* safe, we focus 10 points under the threshold.
|
||||
*/
|
||||
if (load_freq < od_tuners->adj_up_threshold
|
||||
* policy->cur) {
|
||||
} else {
|
||||
/* Calculate the next frequency proportional to load */
|
||||
unsigned int freq_next;
|
||||
freq_next = load_freq / od_tuners->adj_up_threshold;
|
||||
freq_next = load * policy->cpuinfo.max_freq / 100;
|
||||
|
||||
/* No longer fully busy, reset rate_mult */
|
||||
dbs_info->rate_mult = 1;
|
||||
|
@ -374,9 +346,6 @@ static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
|
|||
input < MIN_FREQUENCY_UP_THRESHOLD) {
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Calculate the new adj_up_threshold */
|
||||
od_tuners->adj_up_threshold += input;
|
||||
od_tuners->adj_up_threshold -= od_tuners->up_threshold;
|
||||
|
||||
od_tuners->up_threshold = input;
|
||||
return count;
|
||||
|
@ -513,7 +482,7 @@ static int od_init(struct dbs_data *dbs_data)
|
|||
u64 idle_time;
|
||||
int cpu;
|
||||
|
||||
tuners = kzalloc(sizeof(struct od_dbs_tuners), GFP_KERNEL);
|
||||
tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
|
||||
if (!tuners) {
|
||||
pr_err("%s: kzalloc failed\n", __func__);
|
||||
return -ENOMEM;
|
||||
|
@ -525,8 +494,6 @@ static int od_init(struct dbs_data *dbs_data)
|
|||
if (idle_time != -1ULL) {
|
||||
/* Idle micro accounting is supported. Use finer thresholds */
|
||||
tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
|
||||
tuners->adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD -
|
||||
MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
|
||||
/*
|
||||
* In nohz/micro accounting case we set the minimum frequency
|
||||
* not depending on HZ, but fixed (very low). The deferred
|
||||
|
@ -535,8 +502,6 @@ static int od_init(struct dbs_data *dbs_data)
|
|||
dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
|
||||
} else {
|
||||
tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
|
||||
tuners->adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD -
|
||||
DEF_FREQUENCY_DOWN_DIFFERENTIAL;
|
||||
|
||||
/* For correct statistics, we need 10 ticks for each measure */
|
||||
dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
|
||||
|
|
|
@ -12,10 +12,9 @@
|
|||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
static int cpufreq_governor_performance(struct cpufreq_policy *policy,
|
||||
unsigned int event)
|
||||
|
|
|
@ -12,10 +12,9 @@
|
|||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
static int cpufreq_governor_powersave(struct cpufreq_policy *policy,
|
||||
unsigned int event)
|
||||
|
|
|
@ -9,17 +9,10 @@
|
|||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/cputime.h>
|
||||
|
||||
static spinlock_t cpufreq_stats_lock;
|
||||
|
@ -200,22 +193,22 @@ static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
|
|||
{
|
||||
unsigned int i, j, count = 0, ret = 0;
|
||||
struct cpufreq_stats *stat;
|
||||
struct cpufreq_policy *data;
|
||||
struct cpufreq_policy *current_policy;
|
||||
unsigned int alloc_size;
|
||||
unsigned int cpu = policy->cpu;
|
||||
if (per_cpu(cpufreq_stats_table, cpu))
|
||||
return -EBUSY;
|
||||
stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL);
|
||||
stat = kzalloc(sizeof(*stat), GFP_KERNEL);
|
||||
if ((stat) == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
data = cpufreq_cpu_get(cpu);
|
||||
if (data == NULL) {
|
||||
current_policy = cpufreq_cpu_get(cpu);
|
||||
if (current_policy == NULL) {
|
||||
ret = -EINVAL;
|
||||
goto error_get_fail;
|
||||
}
|
||||
|
||||
ret = sysfs_create_group(&data->kobj, &stats_attr_group);
|
||||
ret = sysfs_create_group(¤t_policy->kobj, &stats_attr_group);
|
||||
if (ret)
|
||||
goto error_out;
|
||||
|
||||
|
@ -258,10 +251,10 @@ static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
|
|||
stat->last_time = get_jiffies_64();
|
||||
stat->last_index = freq_table_get_index(stat, policy->cur);
|
||||
spin_unlock(&cpufreq_stats_lock);
|
||||
cpufreq_cpu_put(data);
|
||||
cpufreq_cpu_put(current_policy);
|
||||
return 0;
|
||||
error_out:
|
||||
cpufreq_cpu_put(data);
|
||||
cpufreq_cpu_put(current_policy);
|
||||
error_get_fail:
|
||||
kfree(stat);
|
||||
per_cpu(cpufreq_stats_table, cpu) = NULL;
|
||||
|
@ -348,16 +341,10 @@ static int cpufreq_stat_cpu_callback(struct notifier_block *nfb,
|
|||
unsigned int cpu = (unsigned long)hcpu;
|
||||
|
||||
switch (action) {
|
||||
case CPU_ONLINE:
|
||||
case CPU_ONLINE_FROZEN:
|
||||
cpufreq_update_policy(cpu);
|
||||
break;
|
||||
case CPU_DOWN_PREPARE:
|
||||
case CPU_DOWN_PREPARE_FROZEN:
|
||||
cpufreq_stats_free_sysfs(cpu);
|
||||
break;
|
||||
case CPU_DEAD:
|
||||
case CPU_DEAD_FROZEN:
|
||||
cpufreq_stats_free_table(cpu);
|
||||
break;
|
||||
}
|
||||
|
@ -390,8 +377,6 @@ static int __init cpufreq_stats_init(void)
|
|||
return ret;
|
||||
|
||||
register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
|
||||
for_each_online_cpu(cpu)
|
||||
cpufreq_update_policy(cpu);
|
||||
|
||||
ret = cpufreq_register_notifier(¬ifier_trans_block,
|
||||
CPUFREQ_TRANSITION_NOTIFIER);
|
||||
|
|
|
@ -111,7 +111,6 @@ static struct cpufreq_driver cris_freq_driver = {
|
|||
.init = cris_freq_cpu_init,
|
||||
.exit = cris_freq_cpu_exit,
|
||||
.name = "cris_freq",
|
||||
.owner = THIS_MODULE,
|
||||
.attr = cris_freq_attr,
|
||||
};
|
||||
|
||||
|
|
|
@ -108,7 +108,6 @@ static struct cpufreq_driver cris_freq_driver = {
|
|||
.init = cris_freq_cpu_init,
|
||||
.exit = cris_freq_cpu_exit,
|
||||
.name = "cris_freq",
|
||||
.owner = THIS_MODULE,
|
||||
.attr = cris_freq_attr,
|
||||
};
|
||||
|
||||
|
|
|
@ -54,7 +54,7 @@ static struct acpi_processor_performance *eps_acpi_cpu_perf;
|
|||
/* Minimum necessary to get acpi_processor_get_bios_limit() working */
|
||||
static int eps_acpi_init(void)
|
||||
{
|
||||
eps_acpi_cpu_perf = kzalloc(sizeof(struct acpi_processor_performance),
|
||||
eps_acpi_cpu_perf = kzalloc(sizeof(*eps_acpi_cpu_perf),
|
||||
GFP_KERNEL);
|
||||
if (!eps_acpi_cpu_perf)
|
||||
return -ENOMEM;
|
||||
|
@ -366,7 +366,7 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
|
|||
states = 2;
|
||||
|
||||
/* Allocate private data and frequency table for current cpu */
|
||||
centaur = kzalloc(sizeof(struct eps_cpu_data)
|
||||
centaur = kzalloc(sizeof(*centaur)
|
||||
+ (states + 1) * sizeof(struct cpufreq_frequency_table),
|
||||
GFP_KERNEL);
|
||||
if (!centaur)
|
||||
|
@ -436,7 +436,6 @@ static struct cpufreq_driver eps_driver = {
|
|||
.exit = eps_cpu_exit,
|
||||
.get = eps_get,
|
||||
.name = "e_powersaver",
|
||||
.owner = THIS_MODULE,
|
||||
.attr = eps_attr,
|
||||
};
|
||||
|
||||
|
|
|
@ -274,7 +274,6 @@ static struct cpufreq_driver elanfreq_driver = {
|
|||
.init = elanfreq_cpu_init,
|
||||
.exit = elanfreq_cpu_exit,
|
||||
.name = "elanfreq",
|
||||
.owner = THIS_MODULE,
|
||||
.attr = elanfreq_attr,
|
||||
};
|
||||
|
||||
|
|
|
@ -289,7 +289,7 @@ static int __init exynos_cpufreq_init(void)
|
|||
{
|
||||
int ret = -EINVAL;
|
||||
|
||||
exynos_info = kzalloc(sizeof(struct exynos_dvfs_info), GFP_KERNEL);
|
||||
exynos_info = kzalloc(sizeof(*exynos_info), GFP_KERNEL);
|
||||
if (!exynos_info)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -332,7 +332,6 @@ static int __init exynos_cpufreq_init(void)
|
|||
regulator_put(arm_regulator);
|
||||
err_vdd_arm:
|
||||
kfree(exynos_info);
|
||||
pr_debug("%s: failed initialization\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
late_initcall(exynos_cpufreq_init);
|
||||
|
|
|
@ -43,6 +43,27 @@ struct exynos_dvfs_info {
|
|||
bool (*need_apll_change)(unsigned int, unsigned int);
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ARM_EXYNOS4210_CPUFREQ
|
||||
extern int exynos4210_cpufreq_init(struct exynos_dvfs_info *);
|
||||
#else
|
||||
static inline int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_ARM_EXYNOS4X12_CPUFREQ
|
||||
extern int exynos4x12_cpufreq_init(struct exynos_dvfs_info *);
|
||||
#else
|
||||
static inline int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_ARM_EXYNOS5250_CPUFREQ
|
||||
extern int exynos5250_cpufreq_init(struct exynos_dvfs_info *);
|
||||
#else
|
||||
static inline int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -238,6 +238,9 @@ static int exynos_target(struct cpufreq_policy *policy,
|
|||
freqs.old = dvfs_info->cur_frequency;
|
||||
freqs.new = freq_table[index].frequency;
|
||||
|
||||
if (freqs.old == freqs.new)
|
||||
goto out;
|
||||
|
||||
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
|
||||
|
||||
/* Set the target frequency in all C0_3_PSTATE register */
|
||||
|
|
|
@ -11,10 +11,8 @@
|
|||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
/*********************************************************************
|
||||
* FREQUENCY TABLE HELPERS *
|
||||
|
|
|
@ -183,7 +183,7 @@ static void gx_write_byte(int reg, int value)
|
|||
* gx_detect_chipset:
|
||||
*
|
||||
**/
|
||||
static __init struct pci_dev *gx_detect_chipset(void)
|
||||
static struct pci_dev * __init gx_detect_chipset(void)
|
||||
{
|
||||
struct pci_dev *gx_pci = NULL;
|
||||
|
||||
|
@ -446,7 +446,6 @@ static struct cpufreq_driver gx_suspmod_driver = {
|
|||
.target = cpufreq_gx_target,
|
||||
.init = cpufreq_gx_cpu_init,
|
||||
.name = "gx-suspmod",
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init cpufreq_gx_init(void)
|
||||
|
@ -466,7 +465,7 @@ static int __init cpufreq_gx_init(void)
|
|||
|
||||
pr_debug("geode suspend modulation available.\n");
|
||||
|
||||
params = kzalloc(sizeof(struct gxfreq_params), GFP_KERNEL);
|
||||
params = kzalloc(sizeof(*params), GFP_KERNEL);
|
||||
if (params == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -69,24 +69,18 @@ static int hb_cpufreq_driver_init(void)
|
|||
if (!of_machine_is_compatible("calxeda,highbank"))
|
||||
return -ENODEV;
|
||||
|
||||
for_each_child_of_node(of_find_node_by_path("/cpus"), np)
|
||||
if (of_get_property(np, "operating-points", NULL))
|
||||
break;
|
||||
cpu_dev = get_cpu_device(0);
|
||||
if (!cpu_dev) {
|
||||
pr_err("failed to get highbank cpufreq device\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
np = of_node_get(cpu_dev->of_node);
|
||||
if (!np) {
|
||||
pr_err("failed to find highbank cpufreq node\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
cpu_dev = get_cpu_device(0);
|
||||
if (!cpu_dev) {
|
||||
pr_err("failed to get highbank cpufreq device\n");
|
||||
ret = -ENODEV;
|
||||
goto out_put_node;
|
||||
}
|
||||
|
||||
cpu_dev->of_node = np;
|
||||
|
||||
cpu_clk = clk_get(cpu_dev, NULL);
|
||||
if (IS_ERR(cpu_clk)) {
|
||||
ret = PTR_ERR(cpu_clk);
|
||||
|
|
|
@ -274,7 +274,7 @@ acpi_cpufreq_cpu_init (
|
|||
|
||||
pr_debug("acpi_cpufreq_cpu_init\n");
|
||||
|
||||
data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
|
||||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return (-ENOMEM);
|
||||
|
||||
|
@ -304,7 +304,7 @@ acpi_cpufreq_cpu_init (
|
|||
}
|
||||
|
||||
/* alloc freq_table */
|
||||
data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) *
|
||||
data->freq_table = kmalloc(sizeof(*data->freq_table) *
|
||||
(data->acpi_data.state_count + 1),
|
||||
GFP_KERNEL);
|
||||
if (!data->freq_table) {
|
||||
|
@ -409,7 +409,6 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
|
|||
.init = acpi_cpufreq_cpu_init,
|
||||
.exit = acpi_cpufreq_cpu_exit,
|
||||
.name = "acpi-cpufreq",
|
||||
.owner = THIS_MODULE,
|
||||
.attr = acpi_cpufreq_attr,
|
||||
};
|
||||
|
||||
|
|
|
@ -221,14 +221,12 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
|
|||
|
||||
cpu_dev = &pdev->dev;
|
||||
|
||||
np = of_find_node_by_path("/cpus/cpu@0");
|
||||
np = of_node_get(cpu_dev->of_node);
|
||||
if (!np) {
|
||||
dev_err(cpu_dev, "failed to find cpu0 node\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
cpu_dev->of_node = np;
|
||||
|
||||
arm_clk = devm_clk_get(cpu_dev, "arm");
|
||||
pll1_sys_clk = devm_clk_get(cpu_dev, "pll1_sys");
|
||||
pll1_sw_clk = devm_clk_get(cpu_dev, "pll1_sw");
|
||||
|
|
|
@ -665,7 +665,6 @@ static struct cpufreq_driver intel_pstate_driver = {
|
|||
.init = intel_pstate_cpu_init,
|
||||
.exit = intel_pstate_cpu_exit,
|
||||
.name = "intel_pstate",
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __initdata no_load;
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
#include <linux/clk.h>
|
||||
#include <linux/clk-provider.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/proc-fns.h>
|
||||
|
@ -158,7 +158,6 @@ static struct cpufreq_driver kirkwood_cpufreq_driver = {
|
|||
.init = kirkwood_cpufreq_cpu_init,
|
||||
.exit = kirkwood_cpufreq_cpu_exit,
|
||||
.name = "kirkwood-cpufreq",
|
||||
.owner = THIS_MODULE,
|
||||
.attr = kirkwood_cpufreq_attr,
|
||||
};
|
||||
|
||||
|
@ -175,9 +174,11 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
|
|||
if (IS_ERR(priv.base))
|
||||
return PTR_ERR(priv.base);
|
||||
|
||||
np = of_find_node_by_path("/cpus/cpu@0");
|
||||
if (!np)
|
||||
np = of_cpu_device_node_get(0);
|
||||
if (!np) {
|
||||
dev_err(&pdev->dev, "failed to get cpu device node\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
priv.cpu_clk = of_clk_get_by_name(np, "cpu_clk");
|
||||
if (IS_ERR(priv.cpu_clk)) {
|
||||
|
|
|
@ -948,7 +948,6 @@ static struct cpufreq_driver longhaul_driver = {
|
|||
.init = longhaul_cpu_init,
|
||||
.exit = longhaul_cpu_exit,
|
||||
.name = "longhaul",
|
||||
.owner = THIS_MODULE,
|
||||
.attr = longhaul_attr,
|
||||
};
|
||||
|
||||
|
|
|
@ -286,7 +286,6 @@ static struct cpufreq_driver longrun_driver = {
|
|||
.get = longrun_get,
|
||||
.init = longrun_cpu_init,
|
||||
.name = "longrun",
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id longrun_ids[] = {
|
||||
|
|
|
@ -158,7 +158,6 @@ static struct freq_attr *loongson2_table_attr[] = {
|
|||
};
|
||||
|
||||
static struct cpufreq_driver loongson2_cpufreq_driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "loongson2",
|
||||
.init = loongson2_cpufreq_cpu_init,
|
||||
.verify = loongson2_cpufreq_verify,
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
#include <linux/completion.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
|
||||
#define DBG(fmt...) pr_debug(fmt)
|
||||
|
||||
|
@ -190,7 +190,6 @@ static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
|||
|
||||
static struct cpufreq_driver maple_cpufreq_driver = {
|
||||
.name = "maple",
|
||||
.owner = THIS_MODULE,
|
||||
.flags = CPUFREQ_CONST_LOOPS,
|
||||
.init = maple_cpufreq_cpu_init,
|
||||
.verify = maple_cpufreq_verify,
|
||||
|
@ -201,7 +200,6 @@ static struct cpufreq_driver maple_cpufreq_driver = {
|
|||
|
||||
static int __init maple_cpufreq_init(void)
|
||||
{
|
||||
struct device_node *cpus;
|
||||
struct device_node *cpunode;
|
||||
unsigned int psize;
|
||||
unsigned long max_freq;
|
||||
|
@ -217,24 +215,11 @@ static int __init maple_cpufreq_init(void)
|
|||
!of_machine_is_compatible("Momentum,Apache"))
|
||||
return 0;
|
||||
|
||||
cpus = of_find_node_by_path("/cpus");
|
||||
if (cpus == NULL) {
|
||||
DBG("No /cpus node !\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Get first CPU node */
|
||||
for (cpunode = NULL;
|
||||
(cpunode = of_get_next_child(cpus, cpunode)) != NULL;) {
|
||||
const u32 *reg = of_get_property(cpunode, "reg", NULL);
|
||||
if (reg == NULL || (*reg) != 0)
|
||||
continue;
|
||||
if (!strcmp(cpunode->type, "cpu"))
|
||||
break;
|
||||
}
|
||||
cpunode = of_cpu_device_node_get(0);
|
||||
if (cpunode == NULL) {
|
||||
printk(KERN_ERR "cpufreq: Can't find any CPU 0 node\n");
|
||||
goto bail_cpus;
|
||||
goto bail_noprops;
|
||||
}
|
||||
|
||||
/* Check 970FX for now */
|
||||
|
@ -290,14 +275,11 @@ static int __init maple_cpufreq_init(void)
|
|||
rc = cpufreq_register_driver(&maple_cpufreq_driver);
|
||||
|
||||
of_node_put(cpunode);
|
||||
of_node_put(cpus);
|
||||
|
||||
return rc;
|
||||
|
||||
bail_noprops:
|
||||
of_node_put(cpunode);
|
||||
bail_cpus:
|
||||
of_node_put(cpus);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -1,51 +0,0 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "mperf.h"
|
||||
|
||||
static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf);
|
||||
|
||||
/* Called via smp_call_function_single(), on the target CPU */
|
||||
static void read_measured_perf_ctrs(void *_cur)
|
||||
{
|
||||
struct aperfmperf *am = _cur;
|
||||
|
||||
get_aperfmperf(am);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the measured active (C0) frequency on this CPU since last call
|
||||
* to this function.
|
||||
* Input: cpu number
|
||||
* Return: Average CPU frequency in terms of max frequency (zero on error)
|
||||
*
|
||||
* We use IA32_MPERF and IA32_APERF MSRs to get the measured performance
|
||||
* over a period of time, while CPU is in C0 state.
|
||||
* IA32_MPERF counts at the rate of max advertised frequency
|
||||
* IA32_APERF counts at the rate of actual CPU frequency
|
||||
* Only IA32_APERF/IA32_MPERF ratio is architecturally defined and
|
||||
* no meaning should be associated with absolute values of these MSRs.
|
||||
*/
|
||||
unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy,
|
||||
unsigned int cpu)
|
||||
{
|
||||
struct aperfmperf perf;
|
||||
unsigned long ratio;
|
||||
unsigned int retval;
|
||||
|
||||
if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1))
|
||||
return 0;
|
||||
|
||||
ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf);
|
||||
per_cpu(acfreq_old_perf, cpu) = perf;
|
||||
|
||||
retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT;
|
||||
|
||||
return retval;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpufreq_get_measured_perf);
|
||||
MODULE_LICENSE("GPL");
|
|
@ -1,9 +0,0 @@
|
|||
/*
|
||||
* (c) 2010 Advanced Micro Devices, Inc.
|
||||
* Your use of this code is subject to the terms and conditions of the
|
||||
* GNU general public license version 2. See "COPYING" or
|
||||
* http://www.gnu.org/licenses/gpl.html
|
||||
*/
|
||||
|
||||
unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy,
|
||||
unsigned int cpu);
|
|
@ -279,7 +279,6 @@ static struct cpufreq_driver p4clockmod_driver = {
|
|||
.exit = cpufreq_p4_cpu_exit,
|
||||
.get = cpufreq_p4_get,
|
||||
.name = "p4-clockmod",
|
||||
.owner = THIS_MODULE,
|
||||
.attr = p4clockmod_attr,
|
||||
};
|
||||
|
||||
|
|
|
@ -297,7 +297,6 @@ static int pas_cpufreq_target(struct cpufreq_policy *policy,
|
|||
|
||||
static struct cpufreq_driver pas_cpufreq_driver = {
|
||||
.name = "pas-cpufreq",
|
||||
.owner = THIS_MODULE,
|
||||
.flags = CPUFREQ_CONST_LOOPS,
|
||||
.init = pas_cpufreq_cpu_init,
|
||||
.exit = pas_cpufreq_cpu_exit,
|
||||
|
|
|
@ -587,7 +587,6 @@ static struct cpufreq_driver pcc_cpufreq_driver = {
|
|||
.init = pcc_cpufreq_cpu_init,
|
||||
.exit = pcc_cpufreq_cpu_exit,
|
||||
.name = "pcc-cpufreq",
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init pcc_cpufreq_init(void)
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <asm/prom.h>
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/irq.h>
|
||||
|
@ -477,7 +478,6 @@ static struct cpufreq_driver pmac_cpufreq_driver = {
|
|||
.flags = CPUFREQ_PM_NO_WARN,
|
||||
.attr = pmac_cpu_freqs_attr,
|
||||
.name = "powermac",
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
|
||||
|
@ -649,8 +649,8 @@ static int __init pmac_cpufreq_setup(void)
|
|||
if (strstr(cmd_line, "nocpufreq"))
|
||||
return 0;
|
||||
|
||||
/* Assume only one CPU */
|
||||
cpunode = of_find_node_by_type(NULL, "cpu");
|
||||
/* Get first CPU node */
|
||||
cpunode = of_cpu_device_node_get(0);
|
||||
if (!cpunode)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <asm/prom.h>
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/irq.h>
|
||||
|
@ -371,7 +372,6 @@ static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
|||
|
||||
static struct cpufreq_driver g5_cpufreq_driver = {
|
||||
.name = "powermac",
|
||||
.owner = THIS_MODULE,
|
||||
.flags = CPUFREQ_CONST_LOOPS,
|
||||
.init = g5_cpufreq_cpu_init,
|
||||
.verify = g5_cpufreq_verify,
|
||||
|
@ -383,9 +383,8 @@ static struct cpufreq_driver g5_cpufreq_driver = {
|
|||
|
||||
#ifdef CONFIG_PMAC_SMU
|
||||
|
||||
static int __init g5_neo2_cpufreq_init(struct device_node *cpus)
|
||||
static int __init g5_neo2_cpufreq_init(struct device_node *cpunode)
|
||||
{
|
||||
struct device_node *cpunode;
|
||||
unsigned int psize, ssize;
|
||||
unsigned long max_freq;
|
||||
char *freq_method, *volt_method;
|
||||
|
@ -405,20 +404,6 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpus)
|
|||
else
|
||||
return -ENODEV;
|
||||
|
||||
/* Get first CPU node */
|
||||
for (cpunode = NULL;
|
||||
(cpunode = of_get_next_child(cpus, cpunode)) != NULL;) {
|
||||
const u32 *reg = of_get_property(cpunode, "reg", NULL);
|
||||
if (reg == NULL || (*reg) != 0)
|
||||
continue;
|
||||
if (!strcmp(cpunode->type, "cpu"))
|
||||
break;
|
||||
}
|
||||
if (cpunode == NULL) {
|
||||
printk(KERN_ERR "cpufreq: Can't find any CPU 0 node\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Check 970FX for now */
|
||||
valp = of_get_property(cpunode, "cpu-version", NULL);
|
||||
if (!valp) {
|
||||
|
@ -447,9 +432,8 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpus)
|
|||
if (!shdr)
|
||||
goto bail_noprops;
|
||||
g5_fvt_table = (struct smu_sdbp_fvt *)&shdr[1];
|
||||
ssize = (shdr->len * sizeof(u32)) -
|
||||
sizeof(struct smu_sdbp_header);
|
||||
g5_fvt_count = ssize / sizeof(struct smu_sdbp_fvt);
|
||||
ssize = (shdr->len * sizeof(u32)) - sizeof(*shdr);
|
||||
g5_fvt_count = ssize / sizeof(*g5_fvt_table);
|
||||
g5_fvt_cur = 0;
|
||||
|
||||
/* Sanity checking */
|
||||
|
@ -537,9 +521,9 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpus)
|
|||
#endif /* CONFIG_PMAC_SMU */
|
||||
|
||||
|
||||
static int __init g5_pm72_cpufreq_init(struct device_node *cpus)
|
||||
static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
|
||||
{
|
||||
struct device_node *cpuid = NULL, *hwclock = NULL, *cpunode = NULL;
|
||||
struct device_node *cpuid = NULL, *hwclock = NULL;
|
||||
const u8 *eeprom = NULL;
|
||||
const u32 *valp;
|
||||
u64 max_freq, min_freq, ih, il;
|
||||
|
@ -548,17 +532,6 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpus)
|
|||
DBG("cpufreq: Initializing for PowerMac7,2, PowerMac7,3 and"
|
||||
" RackMac3,1...\n");
|
||||
|
||||
/* Get first CPU node */
|
||||
for (cpunode = NULL;
|
||||
(cpunode = of_get_next_child(cpus, cpunode)) != NULL;) {
|
||||
if (!strcmp(cpunode->type, "cpu"))
|
||||
break;
|
||||
}
|
||||
if (cpunode == NULL) {
|
||||
printk(KERN_ERR "cpufreq: Can't find any CPU node\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Lookup the cpuid eeprom node */
|
||||
cpuid = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/cpuid@a0");
|
||||
if (cpuid != NULL)
|
||||
|
@ -718,25 +691,25 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpus)
|
|||
|
||||
static int __init g5_cpufreq_init(void)
|
||||
{
|
||||
struct device_node *cpus;
|
||||
struct device_node *cpunode;
|
||||
int rc = 0;
|
||||
|
||||
cpus = of_find_node_by_path("/cpus");
|
||||
if (cpus == NULL) {
|
||||
DBG("No /cpus node !\n");
|
||||
/* Get first CPU node */
|
||||
cpunode = of_cpu_device_node_get(0);
|
||||
if (cpunode == NULL) {
|
||||
pr_err("cpufreq: Can't find any CPU node\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (of_machine_is_compatible("PowerMac7,2") ||
|
||||
of_machine_is_compatible("PowerMac7,3") ||
|
||||
of_machine_is_compatible("RackMac3,1"))
|
||||
rc = g5_pm72_cpufreq_init(cpus);
|
||||
rc = g5_pm72_cpufreq_init(cpunode);
|
||||
#ifdef CONFIG_PMAC_SMU
|
||||
else
|
||||
rc = g5_neo2_cpufreq_init(cpus);
|
||||
rc = g5_neo2_cpufreq_init(cpunode);
|
||||
#endif /* CONFIG_PMAC_SMU */
|
||||
|
||||
of_node_put(cpus);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
|
@ -207,7 +207,6 @@ static struct cpufreq_driver powernow_k6_driver = {
|
|||
.exit = powernow_k6_cpu_exit,
|
||||
.get = powernow_k6_get,
|
||||
.name = "powernow-k6",
|
||||
.owner = THIS_MODULE,
|
||||
.attr = powernow_k6_attr,
|
||||
};
|
||||
|
||||
|
|
|
@ -177,7 +177,7 @@ static int get_ranges(unsigned char *pst)
|
|||
unsigned int speed;
|
||||
u8 fid, vid;
|
||||
|
||||
powernow_table = kzalloc((sizeof(struct cpufreq_frequency_table) *
|
||||
powernow_table = kzalloc((sizeof(*powernow_table) *
|
||||
(number_scales + 1)), GFP_KERNEL);
|
||||
if (!powernow_table)
|
||||
return -ENOMEM;
|
||||
|
@ -309,8 +309,7 @@ static int powernow_acpi_init(void)
|
|||
goto err0;
|
||||
}
|
||||
|
||||
acpi_processor_perf = kzalloc(sizeof(struct acpi_processor_performance),
|
||||
GFP_KERNEL);
|
||||
acpi_processor_perf = kzalloc(sizeof(*acpi_processor_perf), GFP_KERNEL);
|
||||
if (!acpi_processor_perf) {
|
||||
retval = -ENOMEM;
|
||||
goto err0;
|
||||
|
@ -346,7 +345,7 @@ static int powernow_acpi_init(void)
|
|||
goto err2;
|
||||
}
|
||||
|
||||
powernow_table = kzalloc((sizeof(struct cpufreq_frequency_table) *
|
||||
powernow_table = kzalloc((sizeof(*powernow_table) *
|
||||
(number_scales + 1)), GFP_KERNEL);
|
||||
if (!powernow_table) {
|
||||
retval = -ENOMEM;
|
||||
|
@ -497,7 +496,7 @@ static int powernow_decode_bios(int maxfid, int startvid)
|
|||
"relevant to this CPU).\n",
|
||||
psb->numpst);
|
||||
|
||||
p += sizeof(struct psb_s);
|
||||
p += sizeof(*psb);
|
||||
|
||||
pst = (struct pst_s *) p;
|
||||
|
||||
|
@ -510,12 +509,12 @@ static int powernow_decode_bios(int maxfid, int startvid)
|
|||
(maxfid == pst->maxfid) &&
|
||||
(startvid == pst->startvid)) {
|
||||
print_pst_entry(pst, j);
|
||||
p = (char *)pst + sizeof(struct pst_s);
|
||||
p = (char *)pst + sizeof(*pst);
|
||||
ret = get_ranges(p);
|
||||
return ret;
|
||||
} else {
|
||||
unsigned int k;
|
||||
p = (char *)pst + sizeof(struct pst_s);
|
||||
p = (char *)pst + sizeof(*pst);
|
||||
for (k = 0; k < number_scales; k++)
|
||||
p += 2;
|
||||
}
|
||||
|
@ -717,7 +716,6 @@ static struct cpufreq_driver powernow_driver = {
|
|||
.init = powernow_cpu_init,
|
||||
.exit = powernow_cpu_exit,
|
||||
.name = "powernow-k7",
|
||||
.owner = THIS_MODULE,
|
||||
.attr = powernow_table_attr,
|
||||
};
|
||||
|
||||
|
|
|
@ -623,7 +623,7 @@ static int fill_powernow_table(struct powernow_k8_data *data,
|
|||
if (check_pst_table(data, pst, maxvid))
|
||||
return -EINVAL;
|
||||
|
||||
powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table)
|
||||
powernow_table = kmalloc((sizeof(*powernow_table)
|
||||
* (data->numps + 1)), GFP_KERNEL);
|
||||
if (!powernow_table) {
|
||||
printk(KERN_ERR PFX "powernow_table memory alloc failure\n");
|
||||
|
@ -793,7 +793,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
|
|||
}
|
||||
|
||||
/* fill in data->powernow_table */
|
||||
powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table)
|
||||
powernow_table = kmalloc((sizeof(*powernow_table)
|
||||
* (data->acpi_data.state_count + 1)), GFP_KERNEL);
|
||||
if (!powernow_table) {
|
||||
pr_debug("powernow_table memory alloc failure\n");
|
||||
|
@ -1106,7 +1106,7 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
|
|||
if (rc)
|
||||
return -ENODEV;
|
||||
|
||||
data = kzalloc(sizeof(struct powernow_k8_data), GFP_KERNEL);
|
||||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (!data) {
|
||||
printk(KERN_ERR PFX "unable to alloc powernow_k8_data");
|
||||
return -ENOMEM;
|
||||
|
@ -1240,7 +1240,6 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
|
|||
.exit = powernowk8_cpu_exit,
|
||||
.get = powernowk8_get,
|
||||
.name = "powernow-k8",
|
||||
.owner = THIS_MODULE,
|
||||
.attr = powernow_k8_attr,
|
||||
};
|
||||
|
||||
|
|
|
@ -300,7 +300,6 @@ static struct freq_attr *corenet_cpufreq_attr[] = {
|
|||
|
||||
static struct cpufreq_driver ppc_corenet_cpufreq_driver = {
|
||||
.name = "ppc_cpufreq",
|
||||
.owner = THIS_MODULE,
|
||||
.flags = CPUFREQ_CONST_LOOPS,
|
||||
.init = corenet_cpufreq_cpu_init,
|
||||
.exit = __exit_p(corenet_cpufreq_cpu_exit),
|
||||
|
|
|
@ -181,7 +181,6 @@ static struct cpufreq_driver cbe_cpufreq_driver = {
|
|||
.init = cbe_cpufreq_cpu_init,
|
||||
.exit = cbe_cpufreq_cpu_exit,
|
||||
.name = "cbe-cpufreq",
|
||||
.owner = THIS_MODULE,
|
||||
.flags = CPUFREQ_CONST_LOOPS,
|
||||
};
|
||||
|
||||
|
|
|
@ -191,7 +191,7 @@ static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static __init void pxa_cpufreq_init_voltages(void)
|
||||
static void __init pxa_cpufreq_init_voltages(void)
|
||||
{
|
||||
vcc_core = regulator_get(NULL, "vcc_core");
|
||||
if (IS_ERR(vcc_core)) {
|
||||
|
@ -207,7 +207,7 @@ static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static __init void pxa_cpufreq_init_voltages(void) { }
|
||||
static void __init pxa_cpufreq_init_voltages(void) { }
|
||||
#endif
|
||||
|
||||
static void find_freq_tables(struct cpufreq_frequency_table **freq_table,
|
||||
|
|
|
@ -213,10 +213,12 @@ static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
|
|||
policy->cur = policy->min = policy->max;
|
||||
|
||||
if (cpu_is_pxa300() || cpu_is_pxa310())
|
||||
ret = setup_freqs_table(policy, ARRAY_AND_SIZE(pxa300_freqs));
|
||||
ret = setup_freqs_table(policy, pxa300_freqs,
|
||||
ARRAY_SIZE(pxa300_freqs));
|
||||
|
||||
if (cpu_is_pxa320())
|
||||
ret = setup_freqs_table(policy, ARRAY_AND_SIZE(pxa320_freqs));
|
||||
ret = setup_freqs_table(policy, pxa320_freqs,
|
||||
ARRAY_SIZE(pxa320_freqs));
|
||||
|
||||
if (ret) {
|
||||
pr_err("failed to setup frequency table\n");
|
||||
|
|
|
@ -524,7 +524,6 @@ static struct freq_attr *s3c2416_cpufreq_attr[] = {
|
|||
};
|
||||
|
||||
static struct cpufreq_driver s3c2416_cpufreq_driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.flags = 0,
|
||||
.verify = s3c2416_cpufreq_verify_speed,
|
||||
.target = s3c2416_cpufreq_set_target,
|
||||
|
|
|
@ -392,7 +392,7 @@ static int s3c_cpufreq_init(struct cpufreq_policy *policy)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static __init int s3c_cpufreq_initclks(void)
|
||||
static int __init s3c_cpufreq_initclks(void)
|
||||
{
|
||||
_clk_mpll = s3c_cpufreq_clk_get(NULL, "mpll");
|
||||
_clk_xtal = s3c_cpufreq_clk_get(NULL, "xtal");
|
||||
|
@ -522,7 +522,7 @@ int __init s3c_cpufreq_setboard(struct s3c_cpufreq_board *board)
|
|||
/* Copy the board information so that each board can make this
|
||||
* initdata. */
|
||||
|
||||
ours = kzalloc(sizeof(struct s3c_cpufreq_board), GFP_KERNEL);
|
||||
ours = kzalloc(sizeof(*ours), GFP_KERNEL);
|
||||
if (ours == NULL) {
|
||||
printk(KERN_ERR "%s: no memory\n", __func__);
|
||||
return -ENOMEM;
|
||||
|
@ -615,7 +615,7 @@ static int s3c_cpufreq_build_freq(void)
|
|||
size = cpu_cur.info->calc_freqtable(&cpu_cur, NULL, 0);
|
||||
size++;
|
||||
|
||||
ftab = kmalloc(sizeof(struct cpufreq_frequency_table) * size, GFP_KERNEL);
|
||||
ftab = kmalloc(sizeof(*ftab) * size, GFP_KERNEL);
|
||||
if (!ftab) {
|
||||
printk(KERN_ERR "%s: no memory for tables\n", __func__);
|
||||
return -ENOMEM;
|
||||
|
@ -691,7 +691,7 @@ int __init s3c_plltab_register(struct cpufreq_frequency_table *plls,
|
|||
struct cpufreq_frequency_table *vals;
|
||||
unsigned int size;
|
||||
|
||||
size = sizeof(struct cpufreq_frequency_table) * (plls_no + 1);
|
||||
size = sizeof(*vals) * (plls_no + 1);
|
||||
|
||||
vals = kmalloc(size, GFP_KERNEL);
|
||||
if (vals) {
|
||||
|
|
|
@ -263,7 +263,6 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
|
|||
}
|
||||
|
||||
static struct cpufreq_driver s3c64xx_cpufreq_driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.flags = 0,
|
||||
.verify = s3c64xx_cpufreq_verify_speed,
|
||||
.target = s3c64xx_cpufreq_set_target,
|
||||
|
|
|
@ -147,7 +147,6 @@ static struct cpufreq_driver sc520_freq_driver = {
|
|||
.init = sc520_freq_cpu_init,
|
||||
.exit = sc520_freq_cpu_exit,
|
||||
.name = "sc520_freq",
|
||||
.owner = THIS_MODULE,
|
||||
.attr = sc520_freq_attr,
|
||||
};
|
||||
|
||||
|
|
|
@ -160,7 +160,6 @@ static struct freq_attr *sh_freq_attr[] = {
|
|||
};
|
||||
|
||||
static struct cpufreq_driver sh_cpufreq_driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "sh",
|
||||
.get = sh_cpufreq_get,
|
||||
.target = sh_cpufreq_target,
|
||||
|
|
|
@ -351,12 +351,11 @@ static int __init us2e_freq_init(void)
|
|||
struct cpufreq_driver *driver;
|
||||
|
||||
ret = -ENOMEM;
|
||||
driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
|
||||
driver = kzalloc(sizeof(*driver), GFP_KERNEL);
|
||||
if (!driver)
|
||||
goto err_out;
|
||||
|
||||
us2e_freq_table = kzalloc(
|
||||
(NR_CPUS * sizeof(struct us2e_freq_percpu_info)),
|
||||
us2e_freq_table = kzalloc((NR_CPUS * sizeof(*us2e_freq_table)),
|
||||
GFP_KERNEL);
|
||||
if (!us2e_freq_table)
|
||||
goto err_out;
|
||||
|
@ -366,7 +365,6 @@ static int __init us2e_freq_init(void)
|
|||
driver->target = us2e_freq_target;
|
||||
driver->get = us2e_freq_get;
|
||||
driver->exit = us2e_freq_cpu_exit;
|
||||
driver->owner = THIS_MODULE,
|
||||
strcpy(driver->name, "UltraSPARC-IIe");
|
||||
|
||||
cpufreq_us2e_driver = driver;
|
||||
|
|
|
@ -212,12 +212,11 @@ static int __init us3_freq_init(void)
|
|||
struct cpufreq_driver *driver;
|
||||
|
||||
ret = -ENOMEM;
|
||||
driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
|
||||
driver = kzalloc(sizeof(*driver), GFP_KERNEL);
|
||||
if (!driver)
|
||||
goto err_out;
|
||||
|
||||
us3_freq_table = kzalloc(
|
||||
(NR_CPUS * sizeof(struct us3_freq_percpu_info)),
|
||||
us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
|
||||
GFP_KERNEL);
|
||||
if (!us3_freq_table)
|
||||
goto err_out;
|
||||
|
@ -227,7 +226,6 @@ static int __init us3_freq_init(void)
|
|||
driver->target = us3_freq_target;
|
||||
driver->get = us3_freq_get;
|
||||
driver->exit = us3_freq_cpu_exit;
|
||||
driver->owner = THIS_MODULE,
|
||||
strcpy(driver->name, "UltraSPARC-III");
|
||||
|
||||
cpufreq_us3_driver = driver;
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
|
@ -223,7 +223,7 @@ static int spear_cpufreq_driver_init(void)
|
|||
const __be32 *val;
|
||||
int cnt, i, ret;
|
||||
|
||||
np = of_find_node_by_path("/cpus/cpu@0");
|
||||
np = of_cpu_device_node_get(0);
|
||||
if (!np) {
|
||||
pr_err("No cpu node found");
|
||||
return -ENODEV;
|
||||
|
|
|
@ -575,7 +575,6 @@ static struct cpufreq_driver centrino_driver = {
|
|||
.target = centrino_target,
|
||||
.get = get_cur_freq,
|
||||
.attr = centrino_attr,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -378,7 +378,6 @@ static struct cpufreq_driver speedstep_driver = {
|
|||
.init = speedstep_cpu_init,
|
||||
.exit = speedstep_cpu_exit,
|
||||
.get = speedstep_get,
|
||||
.owner = THIS_MODULE,
|
||||
.attr = speedstep_attr,
|
||||
};
|
||||
|
||||
|
|
|
@ -375,7 +375,6 @@ static struct cpufreq_driver speedstep_driver = {
|
|||
.exit = speedstep_cpu_exit,
|
||||
.get = speedstep_get,
|
||||
.resume = speedstep_resume,
|
||||
.owner = THIS_MODULE,
|
||||
.attr = speedstep_attr,
|
||||
};
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ static struct cpufreq_driver ucv2_driver;
|
|||
/* make sure that only the "userspace" governor is run
|
||||
* -- anything else wouldn't make sense on this platform, anyway.
|
||||
*/
|
||||
int ucv2_verify_speed(struct cpufreq_policy *policy)
|
||||
static int ucv2_verify_speed(struct cpufreq_policy *policy)
|
||||
{
|
||||
if (policy->cpu)
|
||||
return -EINVAL;
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
@ -230,6 +231,100 @@ const void *of_get_property(const struct device_node *np, const char *name,
|
|||
}
|
||||
EXPORT_SYMBOL(of_get_property);
|
||||
|
||||
/*
|
||||
* arch_match_cpu_phys_id - Match the given logical CPU and physical id
|
||||
*
|
||||
* @cpu: logical cpu index of a core/thread
|
||||
* @phys_id: physical identifier of a core/thread
|
||||
*
|
||||
* CPU logical to physical index mapping is architecture specific.
|
||||
* However this __weak function provides a default match of physical
|
||||
* id to logical cpu index. phys_id provided here is usually values read
|
||||
* from the device tree which must match the hardware internal registers.
|
||||
*
|
||||
* Returns true if the physical identifier and the logical cpu index
|
||||
* correspond to the same core/thread, false otherwise.
|
||||
*/
|
||||
bool __weak arch_match_cpu_phys_id(int cpu, u64 phys_id)
|
||||
{
|
||||
return (u32)phys_id == cpu;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the given "prop_name" property holds the physical id of the
|
||||
* core/thread corresponding to the logical cpu 'cpu'. If 'thread' is not
|
||||
* NULL, local thread number within the core is returned in it.
|
||||
*/
|
||||
static bool __of_find_n_match_cpu_property(struct device_node *cpun,
|
||||
const char *prop_name, int cpu, unsigned int *thread)
|
||||
{
|
||||
const __be32 *cell;
|
||||
int ac, prop_len, tid;
|
||||
u64 hwid;
|
||||
|
||||
ac = of_n_addr_cells(cpun);
|
||||
cell = of_get_property(cpun, prop_name, &prop_len);
|
||||
if (!cell)
|
||||
return false;
|
||||
prop_len /= sizeof(*cell);
|
||||
for (tid = 0; tid < prop_len; tid++) {
|
||||
hwid = of_read_number(cell, ac);
|
||||
if (arch_match_cpu_phys_id(cpu, hwid)) {
|
||||
if (thread)
|
||||
*thread = tid;
|
||||
return true;
|
||||
}
|
||||
cell += ac;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* of_get_cpu_node - Get device node associated with the given logical CPU
|
||||
*
|
||||
* @cpu: CPU number(logical index) for which device node is required
|
||||
* @thread: if not NULL, local thread number within the physical core is
|
||||
* returned
|
||||
*
|
||||
* The main purpose of this function is to retrieve the device node for the
|
||||
* given logical CPU index. It should be used to initialize the of_node in
|
||||
* cpu device. Once of_node in cpu device is populated, all the further
|
||||
* references can use that instead.
|
||||
*
|
||||
* CPU logical to physical index mapping is architecture specific and is built
|
||||
* before booting secondary cores. This function uses arch_match_cpu_phys_id
|
||||
* which can be overridden by architecture specific implementation.
|
||||
*
|
||||
* Returns a node pointer for the logical cpu if found, else NULL.
|
||||
*/
|
||||
struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
|
||||
{
|
||||
struct device_node *cpun, *cpus;
|
||||
|
||||
cpus = of_find_node_by_path("/cpus");
|
||||
if (!cpus) {
|
||||
pr_warn("Missing cpus node, bailing out\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for_each_child_of_node(cpus, cpun) {
|
||||
if (of_node_cmp(cpun->type, "cpu"))
|
||||
continue;
|
||||
/* Check for non-standard "ibm,ppc-interrupt-server#s" property
|
||||
* for thread ids on PowerPC. If it doesn't exist fallback to
|
||||
* standard "reg" property.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_PPC) &&
|
||||
__of_find_n_match_cpu_property(cpun,
|
||||
"ibm,ppc-interrupt-server#s", cpu, thread))
|
||||
return cpun;
|
||||
if (__of_find_n_match_cpu_property(cpun, "reg", cpu, thread))
|
||||
return cpun;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(of_get_cpu_node);
|
||||
|
||||
/** Checks if the given "compat" string matches one of the strings in
|
||||
* the device's "compatible" property
|
||||
*/
|
||||
|
|
|
@ -28,6 +28,7 @@ struct cpu {
|
|||
extern int register_cpu(struct cpu *cpu, int num);
|
||||
extern struct device *get_cpu_device(unsigned cpu);
|
||||
extern bool cpu_is_hotpluggable(unsigned cpu);
|
||||
extern bool arch_match_cpu_phys_id(int cpu, u64 phys_id);
|
||||
|
||||
extern int cpu_add_dev_attr(struct device_attribute *attr);
|
||||
extern void cpu_remove_dev_attr(struct device_attribute *attr);
|
||||
|
|
|
@ -11,71 +11,36 @@
|
|||
#ifndef _LINUX_CPUFREQ_H
|
||||
#define _LINUX_CPUFREQ_H
|
||||
|
||||
#include <asm/cputime.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/threads.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <asm/div64.h>
|
||||
|
||||
#define CPUFREQ_NAME_LEN 16
|
||||
/* Print length for names. Extra 1 space for accomodating '\n' in prints */
|
||||
#define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1)
|
||||
#include <linux/completion.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/sysfs.h>
|
||||
|
||||
/*********************************************************************
|
||||
* CPUFREQ NOTIFIER INTERFACE *
|
||||
* CPUFREQ INTERFACE *
|
||||
*********************************************************************/
|
||||
|
||||
#define CPUFREQ_TRANSITION_NOTIFIER (0)
|
||||
#define CPUFREQ_POLICY_NOTIFIER (1)
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ
|
||||
int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
|
||||
int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
|
||||
extern void disable_cpufreq(void);
|
||||
#else /* CONFIG_CPU_FREQ */
|
||||
static inline int cpufreq_register_notifier(struct notifier_block *nb,
|
||||
unsigned int list)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int cpufreq_unregister_notifier(struct notifier_block *nb,
|
||||
unsigned int list)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void disable_cpufreq(void) { }
|
||||
#endif /* CONFIG_CPU_FREQ */
|
||||
|
||||
/* if (cpufreq_driver->target) exists, the ->governor decides what frequency
|
||||
* within the limits is used. If (cpufreq_driver->setpolicy> exists, these
|
||||
* two generic policies are available:
|
||||
*/
|
||||
|
||||
#define CPUFREQ_POLICY_POWERSAVE (1)
|
||||
#define CPUFREQ_POLICY_PERFORMANCE (2)
|
||||
|
||||
/* Frequency values here are CPU kHz so that hardware which doesn't run
|
||||
* with some frequencies can complain without having to guess what per
|
||||
* cent / per mille means.
|
||||
/*
|
||||
* Frequency values here are CPU kHz
|
||||
*
|
||||
* Maximum transition latency is in nanoseconds - if it's unknown,
|
||||
* CPUFREQ_ETERNAL shall be used.
|
||||
*/
|
||||
|
||||
#define CPUFREQ_ETERNAL (-1)
|
||||
#define CPUFREQ_NAME_LEN 16
|
||||
/* Print length for names. Extra 1 space for accomodating '\n' in prints */
|
||||
#define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1)
|
||||
|
||||
struct cpufreq_governor;
|
||||
|
||||
/* /sys/devices/system/cpu/cpufreq: entry point for global variables */
|
||||
extern struct kobject *cpufreq_global_kobject;
|
||||
int cpufreq_get_global_kobject(void);
|
||||
void cpufreq_put_global_kobject(void);
|
||||
int cpufreq_sysfs_create_file(const struct attribute *attr);
|
||||
void cpufreq_sysfs_remove_file(const struct attribute *attr);
|
||||
struct cpufreq_freqs {
|
||||
unsigned int cpu; /* cpu nr */
|
||||
unsigned int old;
|
||||
unsigned int new;
|
||||
u8 flags; /* flags of cpufreq_driver, see below. */
|
||||
};
|
||||
|
||||
#define CPUFREQ_ETERNAL (-1)
|
||||
struct cpufreq_cpuinfo {
|
||||
unsigned int max_freq;
|
||||
unsigned int min_freq;
|
||||
|
@ -117,111 +82,59 @@ struct cpufreq_policy {
|
|||
|
||||
struct cpufreq_real_policy user_policy;
|
||||
|
||||
struct list_head policy_list;
|
||||
struct kobject kobj;
|
||||
struct completion kobj_unregister;
|
||||
int transition_ongoing; /* Tracks transition status */
|
||||
};
|
||||
|
||||
#define CPUFREQ_ADJUST (0)
|
||||
#define CPUFREQ_INCOMPATIBLE (1)
|
||||
#define CPUFREQ_NOTIFY (2)
|
||||
#define CPUFREQ_START (3)
|
||||
#define CPUFREQ_UPDATE_POLICY_CPU (4)
|
||||
|
||||
/* Only for ACPI */
|
||||
#define CPUFREQ_SHARED_TYPE_NONE (0) /* None */
|
||||
#define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */
|
||||
#define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */
|
||||
#define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/
|
||||
|
||||
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
|
||||
void cpufreq_cpu_put(struct cpufreq_policy *policy);
|
||||
|
||||
static inline bool policy_is_shared(struct cpufreq_policy *policy)
|
||||
{
|
||||
return cpumask_weight(policy->cpus) > 1;
|
||||
}
|
||||
|
||||
/******************** cpufreq transition notifiers *******************/
|
||||
/* /sys/devices/system/cpu/cpufreq: entry point for global variables */
|
||||
extern struct kobject *cpufreq_global_kobject;
|
||||
int cpufreq_get_global_kobject(void);
|
||||
void cpufreq_put_global_kobject(void);
|
||||
int cpufreq_sysfs_create_file(const struct attribute *attr);
|
||||
void cpufreq_sysfs_remove_file(const struct attribute *attr);
|
||||
|
||||
#define CPUFREQ_PRECHANGE (0)
|
||||
#define CPUFREQ_POSTCHANGE (1)
|
||||
#define CPUFREQ_RESUMECHANGE (8)
|
||||
#define CPUFREQ_SUSPENDCHANGE (9)
|
||||
#ifdef CONFIG_CPU_FREQ
|
||||
unsigned int cpufreq_get(unsigned int cpu);
|
||||
unsigned int cpufreq_quick_get(unsigned int cpu);
|
||||
unsigned int cpufreq_quick_get_max(unsigned int cpu);
|
||||
void disable_cpufreq(void);
|
||||
|
||||
struct cpufreq_freqs {
|
||||
unsigned int cpu; /* cpu nr */
|
||||
unsigned int old;
|
||||
unsigned int new;
|
||||
u8 flags; /* flags of cpufreq_driver, see below. */
|
||||
};
|
||||
|
||||
/**
|
||||
* cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch
|
||||
* safe)
|
||||
* @old: old value
|
||||
* @div: divisor
|
||||
* @mult: multiplier
|
||||
*
|
||||
*
|
||||
* new = old * mult / div
|
||||
*/
|
||||
static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
|
||||
u_int mult)
|
||||
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
|
||||
int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
|
||||
int cpufreq_update_policy(unsigned int cpu);
|
||||
bool have_governor_per_policy(void);
|
||||
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
|
||||
#else
|
||||
static inline unsigned int cpufreq_get(unsigned int cpu)
|
||||
{
|
||||
#if BITS_PER_LONG == 32
|
||||
|
||||
u64 result = ((u64) old) * ((u64) mult);
|
||||
do_div(result, div);
|
||||
return (unsigned long) result;
|
||||
|
||||
#elif BITS_PER_LONG == 64
|
||||
|
||||
unsigned long result = old * ((u64) mult);
|
||||
result /= div;
|
||||
return result;
|
||||
|
||||
return 0;
|
||||
}
|
||||
static inline unsigned int cpufreq_quick_get(unsigned int cpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void disable_cpufreq(void) { }
|
||||
#endif
|
||||
};
|
||||
|
||||
/*********************************************************************
|
||||
* CPUFREQ GOVERNORS *
|
||||
*********************************************************************/
|
||||
|
||||
#define CPUFREQ_GOV_START 1
|
||||
#define CPUFREQ_GOV_STOP 2
|
||||
#define CPUFREQ_GOV_LIMITS 3
|
||||
#define CPUFREQ_GOV_POLICY_INIT 4
|
||||
#define CPUFREQ_GOV_POLICY_EXIT 5
|
||||
|
||||
struct cpufreq_governor {
|
||||
char name[CPUFREQ_NAME_LEN];
|
||||
int initialized;
|
||||
int (*governor) (struct cpufreq_policy *policy,
|
||||
unsigned int event);
|
||||
ssize_t (*show_setspeed) (struct cpufreq_policy *policy,
|
||||
char *buf);
|
||||
int (*store_setspeed) (struct cpufreq_policy *policy,
|
||||
unsigned int freq);
|
||||
unsigned int max_transition_latency; /* HW must be able to switch to
|
||||
next freq faster than this value in nano secs or we
|
||||
will fallback to performance governor */
|
||||
struct list_head governor_list;
|
||||
struct module *owner;
|
||||
};
|
||||
|
||||
/*
|
||||
* Pass a target to the cpufreq driver.
|
||||
*/
|
||||
extern int cpufreq_driver_target(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
unsigned int relation);
|
||||
extern int __cpufreq_driver_target(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
unsigned int relation);
|
||||
|
||||
extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy,
|
||||
unsigned int cpu);
|
||||
|
||||
int cpufreq_register_governor(struct cpufreq_governor *governor);
|
||||
void cpufreq_unregister_governor(struct cpufreq_governor *governor);
|
||||
|
||||
/*********************************************************************
|
||||
* CPUFREQ DRIVER INTERFACE *
|
||||
|
@ -230,76 +143,6 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor);
|
|||
#define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */
|
||||
#define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */
|
||||
|
||||
struct freq_attr;
|
||||
|
||||
struct cpufreq_driver {
|
||||
struct module *owner;
|
||||
char name[CPUFREQ_NAME_LEN];
|
||||
u8 flags;
|
||||
/*
|
||||
* This should be set by platforms having multiple clock-domains, i.e.
|
||||
* supporting multiple policies. With this sysfs directories of governor
|
||||
* would be created in cpu/cpu<num>/cpufreq/ directory and so they can
|
||||
* use the same governor with different tunables for different clusters.
|
||||
*/
|
||||
bool have_governor_per_policy;
|
||||
|
||||
/* needed by all drivers */
|
||||
int (*init) (struct cpufreq_policy *policy);
|
||||
int (*verify) (struct cpufreq_policy *policy);
|
||||
|
||||
/* define one out of two */
|
||||
int (*setpolicy) (struct cpufreq_policy *policy);
|
||||
int (*target) (struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
unsigned int relation);
|
||||
|
||||
/* should be defined, if possible */
|
||||
unsigned int (*get) (unsigned int cpu);
|
||||
|
||||
/* optional */
|
||||
unsigned int (*getavg) (struct cpufreq_policy *policy,
|
||||
unsigned int cpu);
|
||||
int (*bios_limit) (int cpu, unsigned int *limit);
|
||||
|
||||
int (*exit) (struct cpufreq_policy *policy);
|
||||
int (*suspend) (struct cpufreq_policy *policy);
|
||||
int (*resume) (struct cpufreq_policy *policy);
|
||||
struct freq_attr **attr;
|
||||
};
|
||||
|
||||
/* flags */
|
||||
|
||||
#define CPUFREQ_STICKY 0x01 /* the driver isn't removed even if
|
||||
* all ->init() calls failed */
|
||||
#define CPUFREQ_CONST_LOOPS 0x02 /* loops_per_jiffy or other kernel
|
||||
* "constants" aren't affected by
|
||||
* frequency transitions */
|
||||
#define CPUFREQ_PM_NO_WARN 0x04 /* don't warn on suspend/resume speed
|
||||
* mismatches */
|
||||
|
||||
int cpufreq_register_driver(struct cpufreq_driver *driver_data);
|
||||
int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
|
||||
|
||||
void cpufreq_notify_transition(struct cpufreq_policy *policy,
|
||||
struct cpufreq_freqs *freqs, unsigned int state);
|
||||
|
||||
static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
|
||||
unsigned int min, unsigned int max)
|
||||
{
|
||||
if (policy->min < min)
|
||||
policy->min = min;
|
||||
if (policy->max < min)
|
||||
policy->max = min;
|
||||
if (policy->min > max)
|
||||
policy->min = max;
|
||||
if (policy->max > max)
|
||||
policy->max = max;
|
||||
if (policy->min > policy->max)
|
||||
policy->min = policy->max;
|
||||
return;
|
||||
}
|
||||
|
||||
struct freq_attr {
|
||||
struct attribute attr;
|
||||
ssize_t (*show)(struct cpufreq_policy *, char *);
|
||||
|
@ -334,52 +177,181 @@ __ATTR(_name, 0444, show_##_name, NULL)
|
|||
static struct global_attr _name = \
|
||||
__ATTR(_name, 0644, show_##_name, store_##_name)
|
||||
|
||||
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
|
||||
void cpufreq_cpu_put(struct cpufreq_policy *data);
|
||||
|
||||
struct cpufreq_driver {
|
||||
char name[CPUFREQ_NAME_LEN];
|
||||
u8 flags;
|
||||
/*
|
||||
* This should be set by platforms having multiple clock-domains, i.e.
|
||||
* supporting multiple policies. With this sysfs directories of governor
|
||||
* would be created in cpu/cpu<num>/cpufreq/ directory and so they can
|
||||
* use the same governor with different tunables for different clusters.
|
||||
*/
|
||||
bool have_governor_per_policy;
|
||||
|
||||
/* needed by all drivers */
|
||||
int (*init) (struct cpufreq_policy *policy);
|
||||
int (*verify) (struct cpufreq_policy *policy);
|
||||
|
||||
/* define one out of two */
|
||||
int (*setpolicy) (struct cpufreq_policy *policy);
|
||||
int (*target) (struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
unsigned int relation);
|
||||
|
||||
/* should be defined, if possible */
|
||||
unsigned int (*get) (unsigned int cpu);
|
||||
|
||||
/* optional */
|
||||
int (*bios_limit) (int cpu, unsigned int *limit);
|
||||
|
||||
int (*exit) (struct cpufreq_policy *policy);
|
||||
int (*suspend) (struct cpufreq_policy *policy);
|
||||
int (*resume) (struct cpufreq_policy *policy);
|
||||
struct freq_attr **attr;
|
||||
};
|
||||
|
||||
/* flags */
|
||||
#define CPUFREQ_STICKY 0x01 /* the driver isn't removed even if
|
||||
* all ->init() calls failed */
|
||||
#define CPUFREQ_CONST_LOOPS 0x02 /* loops_per_jiffy or other kernel
|
||||
* "constants" aren't affected by
|
||||
* frequency transitions */
|
||||
#define CPUFREQ_PM_NO_WARN 0x04 /* don't warn on suspend/resume speed
|
||||
* mismatches */
|
||||
|
||||
int cpufreq_register_driver(struct cpufreq_driver *driver_data);
|
||||
int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
|
||||
|
||||
const char *cpufreq_get_current_driver(void);
|
||||
|
||||
/*********************************************************************
|
||||
* CPUFREQ 2.6. INTERFACE *
|
||||
*********************************************************************/
|
||||
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
|
||||
int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
|
||||
int cpufreq_update_policy(unsigned int cpu);
|
||||
bool have_governor_per_policy(void);
|
||||
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ
|
||||
/*
|
||||
* query the current CPU frequency (in kHz). If zero, cpufreq couldn't detect it
|
||||
*/
|
||||
unsigned int cpufreq_get(unsigned int cpu);
|
||||
#else
|
||||
static inline unsigned int cpufreq_get(unsigned int cpu)
|
||||
static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
|
||||
unsigned int min, unsigned int max)
|
||||
{
|
||||
return 0;
|
||||
if (policy->min < min)
|
||||
policy->min = min;
|
||||
if (policy->max < min)
|
||||
policy->max = min;
|
||||
if (policy->min > max)
|
||||
policy->min = max;
|
||||
if (policy->max > max)
|
||||
policy->max = max;
|
||||
if (policy->min > policy->max)
|
||||
policy->min = policy->max;
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it
|
||||
*/
|
||||
#ifdef CONFIG_CPU_FREQ
|
||||
unsigned int cpufreq_quick_get(unsigned int cpu);
|
||||
unsigned int cpufreq_quick_get_max(unsigned int cpu);
|
||||
#else
|
||||
static inline unsigned int cpufreq_quick_get(unsigned int cpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*********************************************************************
|
||||
* CPUFREQ DEFAULT GOVERNOR *
|
||||
* CPUFREQ NOTIFIER INTERFACE *
|
||||
*********************************************************************/
|
||||
|
||||
#define CPUFREQ_TRANSITION_NOTIFIER (0)
|
||||
#define CPUFREQ_POLICY_NOTIFIER (1)
|
||||
|
||||
/* Transition notifiers */
|
||||
#define CPUFREQ_PRECHANGE (0)
|
||||
#define CPUFREQ_POSTCHANGE (1)
|
||||
#define CPUFREQ_RESUMECHANGE (8)
|
||||
#define CPUFREQ_SUSPENDCHANGE (9)
|
||||
|
||||
/* Policy Notifiers */
|
||||
#define CPUFREQ_ADJUST (0)
|
||||
#define CPUFREQ_INCOMPATIBLE (1)
|
||||
#define CPUFREQ_NOTIFY (2)
|
||||
#define CPUFREQ_START (3)
|
||||
#define CPUFREQ_UPDATE_POLICY_CPU (4)
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ
|
||||
int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
|
||||
int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
|
||||
|
||||
void cpufreq_notify_transition(struct cpufreq_policy *policy,
|
||||
struct cpufreq_freqs *freqs, unsigned int state);
|
||||
|
||||
#else /* CONFIG_CPU_FREQ */
|
||||
static inline int cpufreq_register_notifier(struct notifier_block *nb,
|
||||
unsigned int list)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int cpufreq_unregister_notifier(struct notifier_block *nb,
|
||||
unsigned int list)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* !CONFIG_CPU_FREQ */
|
||||
|
||||
/**
|
||||
* cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch
|
||||
* safe)
|
||||
* @old: old value
|
||||
* @div: divisor
|
||||
* @mult: multiplier
|
||||
*
|
||||
*
|
||||
* new = old * mult / div
|
||||
*/
|
||||
static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
|
||||
u_int mult)
|
||||
{
|
||||
#if BITS_PER_LONG == 32
|
||||
u64 result = ((u64) old) * ((u64) mult);
|
||||
do_div(result, div);
|
||||
return (unsigned long) result;
|
||||
|
||||
#elif BITS_PER_LONG == 64
|
||||
unsigned long result = old * ((u64) mult);
|
||||
result /= div;
|
||||
return result;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*********************************************************************
|
||||
* CPUFREQ GOVERNORS *
|
||||
*********************************************************************/
|
||||
|
||||
/*
|
||||
* If (cpufreq_driver->target) exists, the ->governor decides what frequency
|
||||
* within the limits is used. If (cpufreq_driver->setpolicy> exists, these
|
||||
* two generic policies are available:
|
||||
*/
|
||||
#define CPUFREQ_POLICY_POWERSAVE (1)
|
||||
#define CPUFREQ_POLICY_PERFORMANCE (2)
|
||||
|
||||
/* Governor Events */
|
||||
#define CPUFREQ_GOV_START 1
|
||||
#define CPUFREQ_GOV_STOP 2
|
||||
#define CPUFREQ_GOV_LIMITS 3
|
||||
#define CPUFREQ_GOV_POLICY_INIT 4
|
||||
#define CPUFREQ_GOV_POLICY_EXIT 5
|
||||
|
||||
struct cpufreq_governor {
|
||||
char name[CPUFREQ_NAME_LEN];
|
||||
int initialized;
|
||||
int (*governor) (struct cpufreq_policy *policy,
|
||||
unsigned int event);
|
||||
ssize_t (*show_setspeed) (struct cpufreq_policy *policy,
|
||||
char *buf);
|
||||
int (*store_setspeed) (struct cpufreq_policy *policy,
|
||||
unsigned int freq);
|
||||
unsigned int max_transition_latency; /* HW must be able to switch to
|
||||
next freq faster than this value in nano secs or we
|
||||
will fallback to performance governor */
|
||||
struct list_head governor_list;
|
||||
struct module *owner;
|
||||
};
|
||||
|
||||
/* Pass a target to the cpufreq driver */
|
||||
int cpufreq_driver_target(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
unsigned int relation);
|
||||
int __cpufreq_driver_target(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
unsigned int relation);
|
||||
int cpufreq_register_governor(struct cpufreq_governor *governor);
|
||||
void cpufreq_unregister_governor(struct cpufreq_governor *governor);
|
||||
|
||||
/* CPUFREQ DEFAULT GOVERNOR */
|
||||
/*
|
||||
* Performance governor is fallback governor if any other gov failed to auto
|
||||
* load due latency restrictions
|
||||
|
@ -428,18 +400,16 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
|
|||
unsigned int relation,
|
||||
unsigned int *index);
|
||||
|
||||
/* the following 3 funtions are for cpufreq core use only */
|
||||
void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy);
|
||||
ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
|
||||
|
||||
/* the following funtion is for cpufreq core use only */
|
||||
struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu);
|
||||
|
||||
/* the following are really really optional */
|
||||
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
|
||||
|
||||
void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
|
||||
unsigned int cpu);
|
||||
void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy);
|
||||
|
||||
void cpufreq_frequency_table_put_attr(unsigned int cpu);
|
||||
|
||||
ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
|
||||
|
||||
#endif /* _LINUX_CPUFREQ_H */
|
||||
|
|
|
@ -266,6 +266,7 @@ extern int of_device_is_available(const struct device_node *device);
|
|||
extern const void *of_get_property(const struct device_node *node,
|
||||
const char *name,
|
||||
int *lenp);
|
||||
extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
|
||||
#define for_each_property_of_node(dn, pp) \
|
||||
for (pp = dn->properties; pp != NULL; pp = pp->next)
|
||||
|
||||
|
@ -459,6 +460,12 @@ static inline const void *of_get_property(const struct device_node *node,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct device_node *of_get_cpu_node(int cpu,
|
||||
unsigned int *thread)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int of_property_read_u64(const struct device_node *np,
|
||||
const char *propname, u64 *out_value)
|
||||
{
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#ifndef _LINUX_OF_DEVICE_H
|
||||
#define _LINUX_OF_DEVICE_H
|
||||
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/of_platform.h> /* temporary until merge */
|
||||
|
||||
|
@ -43,6 +44,15 @@ static inline void of_device_node_put(struct device *dev)
|
|||
of_node_put(dev->of_node);
|
||||
}
|
||||
|
||||
static inline struct device_node *of_cpu_device_node_get(int cpu)
|
||||
{
|
||||
struct device *cpu_dev;
|
||||
cpu_dev = get_cpu_device(cpu);
|
||||
if (!cpu_dev)
|
||||
return NULL;
|
||||
return of_node_get(cpu_dev->of_node);
|
||||
}
|
||||
|
||||
#else /* CONFIG_OF */
|
||||
|
||||
static inline int of_driver_match_device(struct device *dev,
|
||||
|
@ -67,6 +77,11 @@ static inline const struct of_device_id *of_match_device(
|
|||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct device_node *of_cpu_device_node_get(int cpu)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif /* CONFIG_OF */
|
||||
|
||||
#endif /* _LINUX_OF_DEVICE_H */
|
||||
|
|
Loading…
Reference in New Issue