linux/arch/powerpc/platforms/85xx/smp.c

252 lines
6.1 KiB
C
Raw Normal View History

/*
* Author: Andy Fleming <afleming@freescale.com>
* Kumar Gala <galak@kernel.crashing.org>
*
* Copyright 2006-2008 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/kexec.h>
#include <linux/highmem.h>
#include <asm/machdep.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/mpic.h>
#include <asm/cacheflush.h>
#include <asm/dbell.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/mpic.h>
extern void __early_start(void);
#define BOOT_ENTRY_ADDR_UPPER 0
#define BOOT_ENTRY_ADDR_LOWER 1
#define BOOT_ENTRY_R3_UPPER 2
#define BOOT_ENTRY_R3_LOWER 3
#define BOOT_ENTRY_RESV 4
#define BOOT_ENTRY_PIR 5
#define BOOT_ENTRY_R6_UPPER 6
#define BOOT_ENTRY_R6_LOWER 7
#define NUM_BOOT_ENTRY 8
#define SIZE_BOOT_ENTRY (NUM_BOOT_ENTRY * sizeof(u32))
static int __init
smp_85xx_kick_cpu(int nr)
{
unsigned long flags;
const u64 *cpu_rel_addr;
__iomem u32 *bptr_vaddr;
struct device_node *np;
int n = 0;
int ioremappable;
WARN_ON (nr < 0 || nr >= NR_CPUS);
pr_debug("smp_85xx_kick_cpu: kick CPU #%d\n", nr);
np = of_get_cpu_node(nr, NULL);
cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL);
if (cpu_rel_addr == NULL) {
printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr);
return -ENOENT;
}
/*
* A secondary core could be in a spinloop in the bootpage
* (0xfffff000), somewhere in highmem, or somewhere in lowmem.
* The bootpage and highmem can be accessed via ioremap(), but
* we need to directly access the spinloop if its in lowmem.
*/
ioremappable = *cpu_rel_addr > virt_to_phys(high_memory);
/* Map the spin table */
if (ioremappable)
bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY);
else
bptr_vaddr = phys_to_virt(*cpu_rel_addr);
powerpc/85xx: Fix issue found by lockdep trace in smp_85xx_kick_cpu lockdep trace found the following: ------------[ cut here ]------------ Badness at c007baf0 [verbose debug info unavailable] NIP: c007baf0 LR: c007bad8 CTR: 00000000 REGS: ef855e00 TRAP: 0700 Tainted: G W (2.6.30-06736-g12a31df-dirty) MSR: 00021000 <ME,CE> CR: 24044022 XER: 20000000 TASK = ef858000[1] 'swapper' THREAD: ef854000 CPU: 0 GPR00: 00000000 ef855eb0 ef858000 00000001 000000d0 f1000000 ffbc8000 ffffffff GPR08: 000000d0 c0760000 c0710000 00000007 2fffffff 1004a388 7ffd9400 00000000 GPR16: 00000000 7ffcd100 7ffcd100 7ffcd100 c059cd78 c075c498 c057da7c ffffffff GPR24: ffbc8000 f1000000 00000001 c00bf8b0 c07595d4 000000d0 00021000 000000d0 NIP [c007baf0] lockdep_trace_alloc+0xc0/0xf0 LR [c007bad8] lockdep_trace_alloc+0xa8/0xf0 Call Trace: [ef855eb0] [c007ba60] lockdep_trace_alloc+0x30/0xf0 (unreliable) [ef855ec0] [c00cb3ac] kmem_cache_alloc+0x2c/0xf0 [ef855ee0] [c00bf8b0] __get_vm_area_node+0x80/0x1c0 [ef855f10] [c0017580] __ioremap_caller+0x1d0/0x1e0 [ef855f40] [c057da7c] smp_85xx_kick_cpu+0x64/0x124 [ef855f60] [c0599180] __cpu_up+0xd0/0x1a4 [ef855f80] [c05997c4] cpu_up+0x14c/0x1e0 [ef855fc0] [c05732a0] kernel_init+0x100/0x1c4 [ef855ff0] [c0011524] kernel_thread+0x4c/0x68 Instruction dump: 8009c174 2f800000 409e0048 73c08000 40820040 4818980d 2f830000 419effa0 3d20c076 8009c388 2f800000 409eff90 <0fe00000> 4bffff88 60000000 60000000 We were calling ioremap after we local_irq_restore(flags). A simple reorder fixes the problem. Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
2009-06-19 16:30:42 +08:00
local_irq_save(flags);
out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr);
#ifdef CONFIG_PPC32
out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start));
if (!ioremappable)
flush_dcache_range((ulong)bptr_vaddr,
(ulong)(bptr_vaddr + SIZE_BOOT_ENTRY));
/* Wait a bit for the CPU to ack. */
while ((__secondary_hold_acknowledge != nr) && (++n < 1000))
mdelay(1);
#else
smp_generic_kick_cpu(nr);
out_be64((u64 *)(bptr_vaddr + BOOT_ENTRY_ADDR_UPPER),
__pa((u64)*((unsigned long long *) generic_secondary_smp_init)));
if (!ioremappable)
flush_dcache_range((ulong)bptr_vaddr,
(ulong)(bptr_vaddr + SIZE_BOOT_ENTRY));
#endif
local_irq_restore(flags);
if (ioremappable)
iounmap(bptr_vaddr);
powerpc/85xx: Fix issue found by lockdep trace in smp_85xx_kick_cpu lockdep trace found the following: ------------[ cut here ]------------ Badness at c007baf0 [verbose debug info unavailable] NIP: c007baf0 LR: c007bad8 CTR: 00000000 REGS: ef855e00 TRAP: 0700 Tainted: G W (2.6.30-06736-g12a31df-dirty) MSR: 00021000 <ME,CE> CR: 24044022 XER: 20000000 TASK = ef858000[1] 'swapper' THREAD: ef854000 CPU: 0 GPR00: 00000000 ef855eb0 ef858000 00000001 000000d0 f1000000 ffbc8000 ffffffff GPR08: 000000d0 c0760000 c0710000 00000007 2fffffff 1004a388 7ffd9400 00000000 GPR16: 00000000 7ffcd100 7ffcd100 7ffcd100 c059cd78 c075c498 c057da7c ffffffff GPR24: ffbc8000 f1000000 00000001 c00bf8b0 c07595d4 000000d0 00021000 000000d0 NIP [c007baf0] lockdep_trace_alloc+0xc0/0xf0 LR [c007bad8] lockdep_trace_alloc+0xa8/0xf0 Call Trace: [ef855eb0] [c007ba60] lockdep_trace_alloc+0x30/0xf0 (unreliable) [ef855ec0] [c00cb3ac] kmem_cache_alloc+0x2c/0xf0 [ef855ee0] [c00bf8b0] __get_vm_area_node+0x80/0x1c0 [ef855f10] [c0017580] __ioremap_caller+0x1d0/0x1e0 [ef855f40] [c057da7c] smp_85xx_kick_cpu+0x64/0x124 [ef855f60] [c0599180] __cpu_up+0xd0/0x1a4 [ef855f80] [c05997c4] cpu_up+0x14c/0x1e0 [ef855fc0] [c05732a0] kernel_init+0x100/0x1c4 [ef855ff0] [c0011524] kernel_thread+0x4c/0x68 Instruction dump: 8009c174 2f800000 409e0048 73c08000 40820040 4818980d 2f830000 419effa0 3d20c076 8009c388 2f800000 409eff90 <0fe00000> 4bffff88 60000000 60000000 We were calling ioremap after we local_irq_restore(flags). A simple reorder fixes the problem. Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
2009-06-19 16:30:42 +08:00
pr_debug("waited %d msecs for CPU #%d.\n", n, nr);
return 0;
}
static void __init
smp_85xx_setup_cpu(int cpu_nr)
{
mpic_setup_this_cpu();
if (cpu_has_feature(CPU_FTR_DBELL))
doorbell_setup_this_cpu();
}
struct smp_ops_t smp_85xx_ops = {
.kick_cpu = smp_85xx_kick_cpu,
#ifdef CONFIG_KEXEC
.give_timebase = smp_generic_give_timebase,
.take_timebase = smp_generic_take_timebase,
#endif
};
#ifdef CONFIG_KEXEC
atomic_t kexec_down_cpus = ATOMIC_INIT(0);
void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
{
local_irq_disable();
if (secondary) {
atomic_inc(&kexec_down_cpus);
/* loop forever */
while (1);
}
}
static void mpc85xx_smp_kexec_down(void *arg)
{
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(0,1);
}
static void map_and_flush(unsigned long paddr)
{
struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
unsigned long kaddr = (unsigned long)kmap(page);
flush_dcache_range(kaddr, kaddr + PAGE_SIZE);
kunmap(page);
}
/**
* Before we reset the other cores, we need to flush relevant cache
* out to memory so we don't get anything corrupted, some of these flushes
* are performed out of an overabundance of caution as interrupts are not
* disabled yet and we can switch cores
*/
static void mpc85xx_smp_flush_dcache_kexec(struct kimage *image)
{
kimage_entry_t *ptr, entry;
unsigned long paddr;
int i;
if (image->type == KEXEC_TYPE_DEFAULT) {
/* normal kexec images are stored in temporary pages */
for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE);
ptr = (entry & IND_INDIRECTION) ?
phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
if (!(entry & IND_DESTINATION)) {
map_and_flush(entry);
}
}
/* flush out last IND_DONE page */
map_and_flush(entry);
} else {
/* crash type kexec images are copied to the crash region */
for (i = 0; i < image->nr_segments; i++) {
struct kexec_segment *seg = &image->segment[i];
for (paddr = seg->mem; paddr < seg->mem + seg->memsz;
paddr += PAGE_SIZE) {
map_and_flush(paddr);
}
}
}
/* also flush the kimage struct to be passed in as well */
flush_dcache_range((unsigned long)image,
(unsigned long)image + sizeof(*image));
}
static void mpc85xx_smp_machine_kexec(struct kimage *image)
{
int timeout = INT_MAX;
int i, num_cpus = num_present_cpus();
mpc85xx_smp_flush_dcache_kexec(image);
if (image->type == KEXEC_TYPE_DEFAULT)
smp_call_function(mpc85xx_smp_kexec_down, NULL, 0);
while ( (atomic_read(&kexec_down_cpus) != (num_cpus - 1)) &&
( timeout > 0 ) )
{
timeout--;
}
if ( !timeout )
printk(KERN_ERR "Unable to bring down secondary cpu(s)");
for (i = 0; i < num_cpus; i++)
{
if ( i == smp_processor_id() ) continue;
mpic_reset_core(i);
}
default_machine_kexec(image);
}
#endif /* CONFIG_KEXEC */
void __init mpc85xx_smp_init(void)
{
struct device_node *np;
np = of_find_node_by_type(NULL, "open-pic");
if (np) {
smp_85xx_ops.probe = smp_mpic_probe;
smp_85xx_ops.setup_cpu = smp_85xx_setup_cpu;
smp_85xx_ops.message_pass = smp_mpic_message_pass;
}
powerpc: Consolidate ipi message mux and demux Consolidate the mux and demux of ipi messages into smp.c and call a new smp_ops callback to actually trigger the ipi. The powerpc architecture code is optimised for having 4 distinct ipi triggers, which are mapped to 4 distinct messages (ipi many, ipi single, scheduler ipi, and enter debugger). However, several interrupt controllers only provide a single software triggered interrupt that can be delivered to each cpu. To resolve this limitation, each smp_ops implementation created a per-cpu variable that is manipulated with atomic bitops. Since these lines will be contended they are optimialy marked as shared_aligned and take a full cache line for each cpu. Distro kernels may have 2 or 3 of these in their config, each taking per-cpu space even though at most one will be in use. This consolidation removes smp_message_recv and replaces the single call actions cases with direct calls from the common message recognition loop. The complicated debugger ipi case with its muxed crash handling code is moved to debug_ipi_action which is now called from the demux code (instead of the multi-message action calling smp_message_recv). I put a call to reschedule_action to increase the likelyhood of correctly merging the anticipated scheduler_ipi() hook coming from the scheduler tree; that single required call can be inlined later. The actual message decode is a copy of the old pseries xics code with its memory barriers and cache line spacing, augmented with a per-cpu unsigned long based on the book-e doorbell code. The optional data is set via a callback from the implementation and is passed to the new cause-ipi hook along with the logical cpu number. While currently only the doorbell implemntation uses this data it should be almost zero cost to retrieve and pass it -- it adds a single register load for the argument from the same cache line to which we just completed a store and the register is dead on return from the call. I extended the data element from unsigned int to unsigned long in case some other code wanted to associate a pointer. The doorbell check_self is replaced by a call to smp_muxed_ipi_resend, conditioned on the CPU_DBELL feature. The ifdef guard could be relaxed to CONFIG_SMP but I left it with BOOKE for now. Also, the doorbell interrupt vector for book-e was not calling irq_enter and irq_exit, which throws off cpu accounting and causes code to not realize it is running in interrupt context. Add the missing calls. Signed-off-by: Milton Miller <miltonm@bga.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2011-05-11 03:29:39 +08:00
if (cpu_has_feature(CPU_FTR_DBELL)) {
/* .message_pass defaults to smp_muxed_ipi_message_pass */
powerpc: Consolidate ipi message mux and demux Consolidate the mux and demux of ipi messages into smp.c and call a new smp_ops callback to actually trigger the ipi. The powerpc architecture code is optimised for having 4 distinct ipi triggers, which are mapped to 4 distinct messages (ipi many, ipi single, scheduler ipi, and enter debugger). However, several interrupt controllers only provide a single software triggered interrupt that can be delivered to each cpu. To resolve this limitation, each smp_ops implementation created a per-cpu variable that is manipulated with atomic bitops. Since these lines will be contended they are optimialy marked as shared_aligned and take a full cache line for each cpu. Distro kernels may have 2 or 3 of these in their config, each taking per-cpu space even though at most one will be in use. This consolidation removes smp_message_recv and replaces the single call actions cases with direct calls from the common message recognition loop. The complicated debugger ipi case with its muxed crash handling code is moved to debug_ipi_action which is now called from the demux code (instead of the multi-message action calling smp_message_recv). I put a call to reschedule_action to increase the likelyhood of correctly merging the anticipated scheduler_ipi() hook coming from the scheduler tree; that single required call can be inlined later. The actual message decode is a copy of the old pseries xics code with its memory barriers and cache line spacing, augmented with a per-cpu unsigned long based on the book-e doorbell code. The optional data is set via a callback from the implementation and is passed to the new cause-ipi hook along with the logical cpu number. While currently only the doorbell implemntation uses this data it should be almost zero cost to retrieve and pass it -- it adds a single register load for the argument from the same cache line to which we just completed a store and the register is dead on return from the call. I extended the data element from unsigned int to unsigned long in case some other code wanted to associate a pointer. The doorbell check_self is replaced by a call to smp_muxed_ipi_resend, conditioned on the CPU_DBELL feature. The ifdef guard could be relaxed to CONFIG_SMP but I left it with BOOKE for now. Also, the doorbell interrupt vector for book-e was not calling irq_enter and irq_exit, which throws off cpu accounting and causes code to not realize it is running in interrupt context. Add the missing calls. Signed-off-by: Milton Miller <miltonm@bga.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2011-05-11 03:29:39 +08:00
smp_85xx_ops.cause_ipi = doorbell_cause_ipi;
}
BUG_ON(!smp_85xx_ops.message_pass);
smp_ops = &smp_85xx_ops;
#ifdef CONFIG_KEXEC
ppc_md.kexec_cpu_down = mpc85xx_smp_kexec_cpu_down;
ppc_md.machine_kexec = mpc85xx_smp_machine_kexec;
#endif
}