linux/arch/arm/kernel/perf_event_cpu.c

364 lines
8.9 KiB
C
Raw Normal View History

/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) 2012 ARM Limited
*
* Author: Will Deacon <will.deacon@arm.com>
*/
#define pr_fmt(fmt) "CPU PMU: " fmt
#include <linux/bitmap.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/irq.h>
#include <linux/irqdesc.h>
#include <asm/cputype.h>
#include <asm/irq_regs.h>
#include <asm/pmu.h>
/* Set at runtime when we know what CPU type we are. */
static struct arm_pmu *cpu_pmu;
static DEFINE_PER_CPU(struct arm_pmu *, percpu_pmu);
static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
/*
* Despite the names, these two functions are CPU-specific and are used
* by the OProfile/perf code.
*/
const char *perf_pmu_name(void)
{
if (!cpu_pmu)
return NULL;
return cpu_pmu->name;
}
EXPORT_SYMBOL_GPL(perf_pmu_name);
int perf_num_counters(void)
{
int max_events = 0;
if (cpu_pmu != NULL)
max_events = cpu_pmu->num_events;
return max_events;
}
EXPORT_SYMBOL_GPL(perf_num_counters);
/* Include the PMU-specific implementations. */
#include "perf_event_xscale.c"
#include "perf_event_v6.c"
#include "perf_event_v7.c"
static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
{
return this_cpu_ptr(&cpu_hw_events);
}
static void cpu_pmu_enable_percpu_irq(void *data)
{
ARM: 8149/1: perf: Don't sleep while atomic when enabling per-cpu interrupts Rob Clark reports a sleeping while atomic bug when using perf. BUG: sleeping function called from invalid context at ../kernel/locking/mutex.c:583 in_atomic(): 1, irqs_disabled(): 128, pid: 0, name: swapper/0 ------------[ cut here ]------------ WARNING: CPU: 2 PID: 4828 at ../kernel/locking/mutex.c:479 mutex_lock_nested+0x3a0/0x3e8() DEBUG_LOCKS_WARN_ON(in_interrupt()) Modules linked in: CPU: 2 PID: 4828 Comm: Xorg.bin Tainted: G W 3.17.0-rc3-00234-gd535c45-dirty #819 [<c0216690>] (unwind_backtrace) from [<c0212174>] (show_stack+0x10/0x14) [<c0212174>] (show_stack) from [<c0867cc0>] (dump_stack+0x98/0xb8) [<c0867cc0>] (dump_stack) from [<c02492a4>] (warn_slowpath_common+0x70/0x8c) [<c02492a4>] (warn_slowpath_common) from [<c02492f0>] (warn_slowpath_fmt+0x30/0x40) [<c02492f0>] (warn_slowpath_fmt) from [<c086a3f8>] (mutex_lock_nested+0x3a0/0x3e8) [<c086a3f8>] (mutex_lock_nested) from [<c0294d08>] (irq_find_host+0x20/0x9c) [<c0294d08>] (irq_find_host) from [<c0769d50>] (of_irq_get+0x28/0x48) [<c0769d50>] (of_irq_get) from [<c057d104>] (platform_get_irq+0x1c/0x8c) [<c057d104>] (platform_get_irq) from [<c021a06c>] (cpu_pmu_enable_percpu_irq+0x14/0x38) [<c021a06c>] (cpu_pmu_enable_percpu_irq) from [<c02b1634>] (flush_smp_call_function_queue+0x88/0x178) [<c02b1634>] (flush_smp_call_function_queue) from [<c0214dc0>] (handle_IPI+0x88/0x160) [<c0214dc0>] (handle_IPI) from [<c0208930>] (gic_handle_irq+0x64/0x68) [<c0208930>] (gic_handle_irq) from [<c0212d04>] (__irq_svc+0x44/0x5c) Exception stack(0xe63ddea0 to 0xe63ddee8) dea0: 00000001 00000001 00000000 c2f3b200 c16db380 c032d4a0 e63ddf40 60010013 dec0: 00000000 001fbfd4 00000100 00000000 00000001 e63ddee8 c0284770 c02a2e30 dee0: 20010013 ffffffff [<c0212d04>] (__irq_svc) from [<c02a2e30>] (ktime_get_ts64+0x1c8/0x200) [<c02a2e30>] (ktime_get_ts64) from [<c032d4a0>] (poll_select_set_timeout+0x60/0xa8) [<c032d4a0>] (poll_select_set_timeout) from [<c032df64>] (SyS_select+0xa8/0x118) [<c032df64>] (SyS_select) from [<c020e8e0>] (ret_fast_syscall+0x0/0x48) ---[ end trace 0bb583b46342da6f ]--- INFO: lockdep is turned off. We don't really need to get the platform irq again when we're enabling or disabling the per-cpu irq. Furthermore, we don't really need to set and clear bits in the active_irqs bitmask because that's only used in the non-percpu irq case to figure out when the last CPU PMU has been disabled. Just pass the irq directly to the enable/disable functions to clean all this up. This should be slightly more efficient and also fix the scheduling while atomic bug. Fixes: bbd64559376f "ARM: perf: support percpu irqs for the CPU PMU" Reported-by: Rob Clark <robdclark@gmail.com> Acked-by: Will Deacon <will.deacon@arm.com> Cc: stable@vger.kernel.org Signed-off-by: Stephen Boyd <sboyd@codeaurora.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-09-12 06:25:30 +08:00
int irq = *(int *)data;
enable_percpu_irq(irq, IRQ_TYPE_NONE);
}
static void cpu_pmu_disable_percpu_irq(void *data)
{
ARM: 8149/1: perf: Don't sleep while atomic when enabling per-cpu interrupts Rob Clark reports a sleeping while atomic bug when using perf. BUG: sleeping function called from invalid context at ../kernel/locking/mutex.c:583 in_atomic(): 1, irqs_disabled(): 128, pid: 0, name: swapper/0 ------------[ cut here ]------------ WARNING: CPU: 2 PID: 4828 at ../kernel/locking/mutex.c:479 mutex_lock_nested+0x3a0/0x3e8() DEBUG_LOCKS_WARN_ON(in_interrupt()) Modules linked in: CPU: 2 PID: 4828 Comm: Xorg.bin Tainted: G W 3.17.0-rc3-00234-gd535c45-dirty #819 [<c0216690>] (unwind_backtrace) from [<c0212174>] (show_stack+0x10/0x14) [<c0212174>] (show_stack) from [<c0867cc0>] (dump_stack+0x98/0xb8) [<c0867cc0>] (dump_stack) from [<c02492a4>] (warn_slowpath_common+0x70/0x8c) [<c02492a4>] (warn_slowpath_common) from [<c02492f0>] (warn_slowpath_fmt+0x30/0x40) [<c02492f0>] (warn_slowpath_fmt) from [<c086a3f8>] (mutex_lock_nested+0x3a0/0x3e8) [<c086a3f8>] (mutex_lock_nested) from [<c0294d08>] (irq_find_host+0x20/0x9c) [<c0294d08>] (irq_find_host) from [<c0769d50>] (of_irq_get+0x28/0x48) [<c0769d50>] (of_irq_get) from [<c057d104>] (platform_get_irq+0x1c/0x8c) [<c057d104>] (platform_get_irq) from [<c021a06c>] (cpu_pmu_enable_percpu_irq+0x14/0x38) [<c021a06c>] (cpu_pmu_enable_percpu_irq) from [<c02b1634>] (flush_smp_call_function_queue+0x88/0x178) [<c02b1634>] (flush_smp_call_function_queue) from [<c0214dc0>] (handle_IPI+0x88/0x160) [<c0214dc0>] (handle_IPI) from [<c0208930>] (gic_handle_irq+0x64/0x68) [<c0208930>] (gic_handle_irq) from [<c0212d04>] (__irq_svc+0x44/0x5c) Exception stack(0xe63ddea0 to 0xe63ddee8) dea0: 00000001 00000001 00000000 c2f3b200 c16db380 c032d4a0 e63ddf40 60010013 dec0: 00000000 001fbfd4 00000100 00000000 00000001 e63ddee8 c0284770 c02a2e30 dee0: 20010013 ffffffff [<c0212d04>] (__irq_svc) from [<c02a2e30>] (ktime_get_ts64+0x1c8/0x200) [<c02a2e30>] (ktime_get_ts64) from [<c032d4a0>] (poll_select_set_timeout+0x60/0xa8) [<c032d4a0>] (poll_select_set_timeout) from [<c032df64>] (SyS_select+0xa8/0x118) [<c032df64>] (SyS_select) from [<c020e8e0>] (ret_fast_syscall+0x0/0x48) ---[ end trace 0bb583b46342da6f ]--- INFO: lockdep is turned off. We don't really need to get the platform irq again when we're enabling or disabling the per-cpu irq. Furthermore, we don't really need to set and clear bits in the active_irqs bitmask because that's only used in the non-percpu irq case to figure out when the last CPU PMU has been disabled. Just pass the irq directly to the enable/disable functions to clean all this up. This should be slightly more efficient and also fix the scheduling while atomic bug. Fixes: bbd64559376f "ARM: perf: support percpu irqs for the CPU PMU" Reported-by: Rob Clark <robdclark@gmail.com> Acked-by: Will Deacon <will.deacon@arm.com> Cc: stable@vger.kernel.org Signed-off-by: Stephen Boyd <sboyd@codeaurora.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-09-12 06:25:30 +08:00
int irq = *(int *)data;
disable_percpu_irq(irq);
}
static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
{
int i, irq, irqs;
struct platform_device *pmu_device = cpu_pmu->plat_device;
irqs = min(pmu_device->num_resources, num_possible_cpus());
irq = platform_get_irq(pmu_device, 0);
if (irq >= 0 && irq_is_percpu(irq)) {
ARM: 8149/1: perf: Don't sleep while atomic when enabling per-cpu interrupts Rob Clark reports a sleeping while atomic bug when using perf. BUG: sleeping function called from invalid context at ../kernel/locking/mutex.c:583 in_atomic(): 1, irqs_disabled(): 128, pid: 0, name: swapper/0 ------------[ cut here ]------------ WARNING: CPU: 2 PID: 4828 at ../kernel/locking/mutex.c:479 mutex_lock_nested+0x3a0/0x3e8() DEBUG_LOCKS_WARN_ON(in_interrupt()) Modules linked in: CPU: 2 PID: 4828 Comm: Xorg.bin Tainted: G W 3.17.0-rc3-00234-gd535c45-dirty #819 [<c0216690>] (unwind_backtrace) from [<c0212174>] (show_stack+0x10/0x14) [<c0212174>] (show_stack) from [<c0867cc0>] (dump_stack+0x98/0xb8) [<c0867cc0>] (dump_stack) from [<c02492a4>] (warn_slowpath_common+0x70/0x8c) [<c02492a4>] (warn_slowpath_common) from [<c02492f0>] (warn_slowpath_fmt+0x30/0x40) [<c02492f0>] (warn_slowpath_fmt) from [<c086a3f8>] (mutex_lock_nested+0x3a0/0x3e8) [<c086a3f8>] (mutex_lock_nested) from [<c0294d08>] (irq_find_host+0x20/0x9c) [<c0294d08>] (irq_find_host) from [<c0769d50>] (of_irq_get+0x28/0x48) [<c0769d50>] (of_irq_get) from [<c057d104>] (platform_get_irq+0x1c/0x8c) [<c057d104>] (platform_get_irq) from [<c021a06c>] (cpu_pmu_enable_percpu_irq+0x14/0x38) [<c021a06c>] (cpu_pmu_enable_percpu_irq) from [<c02b1634>] (flush_smp_call_function_queue+0x88/0x178) [<c02b1634>] (flush_smp_call_function_queue) from [<c0214dc0>] (handle_IPI+0x88/0x160) [<c0214dc0>] (handle_IPI) from [<c0208930>] (gic_handle_irq+0x64/0x68) [<c0208930>] (gic_handle_irq) from [<c0212d04>] (__irq_svc+0x44/0x5c) Exception stack(0xe63ddea0 to 0xe63ddee8) dea0: 00000001 00000001 00000000 c2f3b200 c16db380 c032d4a0 e63ddf40 60010013 dec0: 00000000 001fbfd4 00000100 00000000 00000001 e63ddee8 c0284770 c02a2e30 dee0: 20010013 ffffffff [<c0212d04>] (__irq_svc) from [<c02a2e30>] (ktime_get_ts64+0x1c8/0x200) [<c02a2e30>] (ktime_get_ts64) from [<c032d4a0>] (poll_select_set_timeout+0x60/0xa8) [<c032d4a0>] (poll_select_set_timeout) from [<c032df64>] (SyS_select+0xa8/0x118) [<c032df64>] (SyS_select) from [<c020e8e0>] (ret_fast_syscall+0x0/0x48) ---[ end trace 0bb583b46342da6f ]--- INFO: lockdep is turned off. We don't really need to get the platform irq again when we're enabling or disabling the per-cpu irq. Furthermore, we don't really need to set and clear bits in the active_irqs bitmask because that's only used in the non-percpu irq case to figure out when the last CPU PMU has been disabled. Just pass the irq directly to the enable/disable functions to clean all this up. This should be slightly more efficient and also fix the scheduling while atomic bug. Fixes: bbd64559376f "ARM: perf: support percpu irqs for the CPU PMU" Reported-by: Rob Clark <robdclark@gmail.com> Acked-by: Will Deacon <will.deacon@arm.com> Cc: stable@vger.kernel.org Signed-off-by: Stephen Boyd <sboyd@codeaurora.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-09-12 06:25:30 +08:00
on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1);
free_percpu_irq(irq, &percpu_pmu);
} else {
for (i = 0; i < irqs; ++i) {
if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs))
continue;
irq = platform_get_irq(pmu_device, i);
if (irq >= 0)
free_irq(irq, cpu_pmu);
}
}
}
static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
{
int i, err, irq, irqs;
struct platform_device *pmu_device = cpu_pmu->plat_device;
if (!pmu_device)
return -ENODEV;
irqs = min(pmu_device->num_resources, num_possible_cpus());
if (irqs < 1) {
printk_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n");
return 0;
}
irq = platform_get_irq(pmu_device, 0);
if (irq >= 0 && irq_is_percpu(irq)) {
err = request_percpu_irq(irq, handler, "arm-pmu", &percpu_pmu);
if (err) {
pr_err("unable to request IRQ%d for ARM PMU counters\n",
irq);
return err;
}
ARM: 8149/1: perf: Don't sleep while atomic when enabling per-cpu interrupts Rob Clark reports a sleeping while atomic bug when using perf. BUG: sleeping function called from invalid context at ../kernel/locking/mutex.c:583 in_atomic(): 1, irqs_disabled(): 128, pid: 0, name: swapper/0 ------------[ cut here ]------------ WARNING: CPU: 2 PID: 4828 at ../kernel/locking/mutex.c:479 mutex_lock_nested+0x3a0/0x3e8() DEBUG_LOCKS_WARN_ON(in_interrupt()) Modules linked in: CPU: 2 PID: 4828 Comm: Xorg.bin Tainted: G W 3.17.0-rc3-00234-gd535c45-dirty #819 [<c0216690>] (unwind_backtrace) from [<c0212174>] (show_stack+0x10/0x14) [<c0212174>] (show_stack) from [<c0867cc0>] (dump_stack+0x98/0xb8) [<c0867cc0>] (dump_stack) from [<c02492a4>] (warn_slowpath_common+0x70/0x8c) [<c02492a4>] (warn_slowpath_common) from [<c02492f0>] (warn_slowpath_fmt+0x30/0x40) [<c02492f0>] (warn_slowpath_fmt) from [<c086a3f8>] (mutex_lock_nested+0x3a0/0x3e8) [<c086a3f8>] (mutex_lock_nested) from [<c0294d08>] (irq_find_host+0x20/0x9c) [<c0294d08>] (irq_find_host) from [<c0769d50>] (of_irq_get+0x28/0x48) [<c0769d50>] (of_irq_get) from [<c057d104>] (platform_get_irq+0x1c/0x8c) [<c057d104>] (platform_get_irq) from [<c021a06c>] (cpu_pmu_enable_percpu_irq+0x14/0x38) [<c021a06c>] (cpu_pmu_enable_percpu_irq) from [<c02b1634>] (flush_smp_call_function_queue+0x88/0x178) [<c02b1634>] (flush_smp_call_function_queue) from [<c0214dc0>] (handle_IPI+0x88/0x160) [<c0214dc0>] (handle_IPI) from [<c0208930>] (gic_handle_irq+0x64/0x68) [<c0208930>] (gic_handle_irq) from [<c0212d04>] (__irq_svc+0x44/0x5c) Exception stack(0xe63ddea0 to 0xe63ddee8) dea0: 00000001 00000001 00000000 c2f3b200 c16db380 c032d4a0 e63ddf40 60010013 dec0: 00000000 001fbfd4 00000100 00000000 00000001 e63ddee8 c0284770 c02a2e30 dee0: 20010013 ffffffff [<c0212d04>] (__irq_svc) from [<c02a2e30>] (ktime_get_ts64+0x1c8/0x200) [<c02a2e30>] (ktime_get_ts64) from [<c032d4a0>] (poll_select_set_timeout+0x60/0xa8) [<c032d4a0>] (poll_select_set_timeout) from [<c032df64>] (SyS_select+0xa8/0x118) [<c032df64>] (SyS_select) from [<c020e8e0>] (ret_fast_syscall+0x0/0x48) ---[ end trace 0bb583b46342da6f ]--- INFO: lockdep is turned off. We don't really need to get the platform irq again when we're enabling or disabling the per-cpu irq. Furthermore, we don't really need to set and clear bits in the active_irqs bitmask because that's only used in the non-percpu irq case to figure out when the last CPU PMU has been disabled. Just pass the irq directly to the enable/disable functions to clean all this up. This should be slightly more efficient and also fix the scheduling while atomic bug. Fixes: bbd64559376f "ARM: perf: support percpu irqs for the CPU PMU" Reported-by: Rob Clark <robdclark@gmail.com> Acked-by: Will Deacon <will.deacon@arm.com> Cc: stable@vger.kernel.org Signed-off-by: Stephen Boyd <sboyd@codeaurora.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-09-12 06:25:30 +08:00
on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1);
} else {
for (i = 0; i < irqs; ++i) {
err = 0;
irq = platform_get_irq(pmu_device, i);
if (irq < 0)
continue;
/*
* If we have a single PMU interrupt that we can't shift,
* assume that we're running on a uniprocessor machine and
* continue. Otherwise, continue without this interrupt.
*/
if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
irq, i);
continue;
}
err = request_irq(irq, handler,
IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
cpu_pmu);
if (err) {
pr_err("unable to request IRQ%d for ARM PMU counters\n",
irq);
return err;
}
cpumask_set_cpu(i, &cpu_pmu->active_irqs);
}
}
return 0;
}
static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
{
int cpu;
for_each_possible_cpu(cpu) {
struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
events->events = per_cpu(hw_events, cpu);
events->used_mask = per_cpu(used_mask, cpu);
raw_spin_lock_init(&events->pmu_lock);
per_cpu(percpu_pmu, cpu) = cpu_pmu;
}
cpu_pmu->get_hw_events = cpu_pmu_get_cpu_events;
cpu_pmu->request_irq = cpu_pmu_request_irq;
cpu_pmu->free_irq = cpu_pmu_free_irq;
/* Ensure the PMU has sane values out of reset. */
if (cpu_pmu->reset)
on_each_cpu(cpu_pmu->reset, cpu_pmu, 1);
/* If no interrupts available, set the corresponding capability flag */
if (!platform_get_irq(cpu_pmu->plat_device, 0))
cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
}
/*
* PMU hardware loses all context when a CPU goes offline.
* When a CPU is hotplugged back in, since some hardware registers are
* UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
* junk values out of them.
*/
arm: delete __cpuinit/__CPUINIT usage from all ARM users The __cpuinit type of throwaway sections might have made sense some time ago when RAM was more constrained, but now the savings do not offset the cost and complications. For example, the fix in commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time") is a good example of the nasty type of bugs that can be created with improper use of the various __init prefixes. After a discussion on LKML[1] it was decided that cpuinit should go the way of devinit and be phased out. Once all the users are gone, we can then finally remove the macros themselves from linux/init.h. Note that some harmless section mismatch warnings may result, since notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c) and are flagged as __cpuinit -- so if we remove the __cpuinit from the arch specific callers, we will also get section mismatch warnings. As an intermediate step, we intend to turn the linux/init.h cpuinit related content into no-ops as early as possible, since that will get rid of these warnings. In any case, they are temporary and harmless. This removes all the ARM uses of the __cpuinit macros from C code, and all __CPUINIT from assembly code. It also had two ".previous" section statements that were paired off against __CPUINIT (aka .section ".cpuinit.text") that also get removed here. [1] https://lkml.org/lkml/2013/5/20/589 Cc: Russell King <linux@arm.linux.org.uk> Cc: Will Deacon <will.deacon@arm.com> Cc: linux-arm-kernel@lists.infradead.org Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 03:43:14 +08:00
static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
void *hcpu)
{
if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
return NOTIFY_DONE;
if (cpu_pmu && cpu_pmu->reset)
cpu_pmu->reset(cpu_pmu);
else
return NOTIFY_DONE;
return NOTIFY_OK;
}
arm: delete __cpuinit/__CPUINIT usage from all ARM users The __cpuinit type of throwaway sections might have made sense some time ago when RAM was more constrained, but now the savings do not offset the cost and complications. For example, the fix in commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time") is a good example of the nasty type of bugs that can be created with improper use of the various __init prefixes. After a discussion on LKML[1] it was decided that cpuinit should go the way of devinit and be phased out. Once all the users are gone, we can then finally remove the macros themselves from linux/init.h. Note that some harmless section mismatch warnings may result, since notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c) and are flagged as __cpuinit -- so if we remove the __cpuinit from the arch specific callers, we will also get section mismatch warnings. As an intermediate step, we intend to turn the linux/init.h cpuinit related content into no-ops as early as possible, since that will get rid of these warnings. In any case, they are temporary and harmless. This removes all the ARM uses of the __cpuinit macros from C code, and all __CPUINIT from assembly code. It also had two ".previous" section statements that were paired off against __CPUINIT (aka .section ".cpuinit.text") that also get removed here. [1] https://lkml.org/lkml/2013/5/20/589 Cc: Russell King <linux@arm.linux.org.uk> Cc: Will Deacon <will.deacon@arm.com> Cc: linux-arm-kernel@lists.infradead.org Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 03:43:14 +08:00
static struct notifier_block cpu_pmu_hotplug_notifier = {
.notifier_call = cpu_pmu_notify,
};
/*
* PMU platform driver and devicetree bindings.
*/
static struct of_device_id cpu_pmu_of_device_ids[] = {
{.compatible = "arm,cortex-a17-pmu", .data = armv7_a17_pmu_init},
{.compatible = "arm,cortex-a15-pmu", .data = armv7_a15_pmu_init},
{.compatible = "arm,cortex-a12-pmu", .data = armv7_a12_pmu_init},
{.compatible = "arm,cortex-a9-pmu", .data = armv7_a9_pmu_init},
{.compatible = "arm,cortex-a8-pmu", .data = armv7_a8_pmu_init},
{.compatible = "arm,cortex-a7-pmu", .data = armv7_a7_pmu_init},
{.compatible = "arm,cortex-a5-pmu", .data = armv7_a5_pmu_init},
{.compatible = "arm,arm11mpcore-pmu", .data = armv6mpcore_pmu_init},
{.compatible = "arm,arm1176-pmu", .data = armv6_1176_pmu_init},
{.compatible = "arm,arm1136-pmu", .data = armv6_1136_pmu_init},
{.compatible = "qcom,krait-pmu", .data = krait_pmu_init},
{},
};
static struct platform_device_id cpu_pmu_plat_device_ids[] = {
{.name = "arm-pmu"},
{.name = "armv6-pmu"},
{.name = "armv7-pmu"},
{.name = "xscale-pmu"},
{},
};
/*
* CPU PMU identification and probing.
*/
static int probe_current_pmu(struct arm_pmu *pmu)
{
int cpu = get_cpu();
int ret = -ENODEV;
pr_info("probing PMU on CPU %d\n", cpu);
switch (read_cpuid_part()) {
/* ARM Ltd CPUs. */
case ARM_CPU_PART_ARM1136:
ret = armv6_1136_pmu_init(pmu);
break;
case ARM_CPU_PART_ARM1156:
ret = armv6_1156_pmu_init(pmu);
break;
case ARM_CPU_PART_ARM1176:
ret = armv6_1176_pmu_init(pmu);
break;
case ARM_CPU_PART_ARM11MPCORE:
ret = armv6mpcore_pmu_init(pmu);
break;
case ARM_CPU_PART_CORTEX_A8:
ret = armv7_a8_pmu_init(pmu);
break;
case ARM_CPU_PART_CORTEX_A9:
ret = armv7_a9_pmu_init(pmu);
break;
default:
if (read_cpuid_implementor() == ARM_CPU_IMP_INTEL) {
switch (xscale_cpu_arch_version()) {
case ARM_CPU_XSCALE_ARCH_V1:
ret = xscale1pmu_init(pmu);
break;
case ARM_CPU_XSCALE_ARCH_V2:
ret = xscale2pmu_init(pmu);
break;
}
}
break;
}
put_cpu();
return ret;
}
static int cpu_pmu_device_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id;
const int (*init_fn)(struct arm_pmu *);
struct device_node *node = pdev->dev.of_node;
struct arm_pmu *pmu;
int ret = -ENODEV;
if (cpu_pmu) {
pr_info("attempt to register multiple PMU devices!");
return -ENOSPC;
}
pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
if (!pmu) {
pr_info("failed to allocate PMU device!");
return -ENOMEM;
}
cpu_pmu = pmu;
cpu_pmu->plat_device = pdev;
if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) {
init_fn = of_id->data;
ret = init_fn(pmu);
} else {
ret = probe_current_pmu(pmu);
}
if (ret) {
pr_info("failed to probe PMU!");
goto out_free;
}
cpu_pmu_init(cpu_pmu);
ret = armpmu_register(cpu_pmu, PERF_TYPE_RAW);
if (!ret)
return 0;
out_free:
pr_info("failed to register PMU devices!");
kfree(pmu);
return ret;
}
static struct platform_driver cpu_pmu_driver = {
.driver = {
.name = "arm-pmu",
.pm = &armpmu_dev_pm_ops,
.of_match_table = cpu_pmu_of_device_ids,
},
.probe = cpu_pmu_device_probe,
.id_table = cpu_pmu_plat_device_ids,
};
static int __init register_pmu_driver(void)
{
int err;
err = register_cpu_notifier(&cpu_pmu_hotplug_notifier);
if (err)
return err;
err = platform_driver_register(&cpu_pmu_driver);
if (err)
unregister_cpu_notifier(&cpu_pmu_hotplug_notifier);
return err;
}
device_initcall(register_pmu_driver);