mirror of https://gitee.com/openkylin/linux.git
Merge remote-tracking branch 'origin/irq/ipi-as-irq' into irq/irqchip-next
Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
commit
7e62dd911a
|
@ -49,6 +49,7 @@ config ARM
|
||||||
select GENERIC_ARCH_TOPOLOGY if ARM_CPU_TOPOLOGY
|
select GENERIC_ARCH_TOPOLOGY if ARM_CPU_TOPOLOGY
|
||||||
select GENERIC_ATOMIC64 if CPU_V7M || CPU_V6 || !CPU_32v6K || !AEABI
|
select GENERIC_ATOMIC64 if CPU_V7M || CPU_V6 || !CPU_32v6K || !AEABI
|
||||||
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
|
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
|
||||||
|
select GENERIC_IRQ_IPI if SMP
|
||||||
select GENERIC_CPU_AUTOPROBE
|
select GENERIC_CPU_AUTOPROBE
|
||||||
select GENERIC_EARLY_IOREMAP
|
select GENERIC_EARLY_IOREMAP
|
||||||
select GENERIC_IDLE_POLL_SETUP
|
select GENERIC_IDLE_POLL_SETUP
|
||||||
|
|
|
@ -6,29 +6,12 @@
|
||||||
#include <linux/threads.h>
|
#include <linux/threads.h>
|
||||||
#include <asm/irq.h>
|
#include <asm/irq.h>
|
||||||
|
|
||||||
/* number of IPIS _not_ including IPI_CPU_BACKTRACE */
|
|
||||||
#define NR_IPI 7
|
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
unsigned int __softirq_pending;
|
unsigned int __softirq_pending;
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
unsigned int ipi_irqs[NR_IPI];
|
|
||||||
#endif
|
|
||||||
} ____cacheline_aligned irq_cpustat_t;
|
} ____cacheline_aligned irq_cpustat_t;
|
||||||
|
|
||||||
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
|
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
|
||||||
|
|
||||||
#define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++
|
|
||||||
#define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member)
|
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
u64 smp_irq_stat_cpu(unsigned int cpu);
|
|
||||||
#else
|
|
||||||
#define smp_irq_stat_cpu(cpu) 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define arch_irq_stat_cpu smp_irq_stat_cpu
|
|
||||||
|
|
||||||
#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
|
#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
|
||||||
|
|
||||||
#endif /* __ASM_HARDIRQ_H */
|
#endif /* __ASM_HARDIRQ_H */
|
||||||
|
|
|
@ -39,11 +39,10 @@ void handle_IPI(int ipinr, struct pt_regs *regs);
|
||||||
*/
|
*/
|
||||||
extern void smp_init_cpus(void);
|
extern void smp_init_cpus(void);
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Provide a function to raise an IPI cross call on CPUs in callmap.
|
* Register IPI interrupts with the arch SMP code
|
||||||
*/
|
*/
|
||||||
extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int));
|
extern void set_smp_ipi_range(int ipi_base, int nr_ipi);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Called from platform specific assembly code, this is the
|
* Called from platform specific assembly code, this is the
|
||||||
|
|
|
@ -18,7 +18,6 @@
|
||||||
* IRQ's are in fact implemented a bit like signal handlers for the kernel.
|
* IRQ's are in fact implemented a bit like signal handlers for the kernel.
|
||||||
* Naturally it's not a 1:1 relation, but there are similarities.
|
* Naturally it's not a 1:1 relation, but there are similarities.
|
||||||
*/
|
*/
|
||||||
#include <linux/kernel_stat.h>
|
|
||||||
#include <linux/signal.h>
|
#include <linux/signal.h>
|
||||||
#include <linux/ioport.h>
|
#include <linux/ioport.h>
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
|
|
|
@ -26,6 +26,7 @@
|
||||||
#include <linux/completion.h>
|
#include <linux/completion.h>
|
||||||
#include <linux/cpufreq.h>
|
#include <linux/cpufreq.h>
|
||||||
#include <linux/irq_work.h>
|
#include <linux/irq_work.h>
|
||||||
|
#include <linux/kernel_stat.h>
|
||||||
|
|
||||||
#include <linux/atomic.h>
|
#include <linux/atomic.h>
|
||||||
#include <asm/bugs.h>
|
#include <asm/bugs.h>
|
||||||
|
@ -65,18 +66,27 @@ enum ipi_msg_type {
|
||||||
IPI_CPU_STOP,
|
IPI_CPU_STOP,
|
||||||
IPI_IRQ_WORK,
|
IPI_IRQ_WORK,
|
||||||
IPI_COMPLETION,
|
IPI_COMPLETION,
|
||||||
|
NR_IPI,
|
||||||
/*
|
/*
|
||||||
* CPU_BACKTRACE is special and not included in NR_IPI
|
* CPU_BACKTRACE is special and not included in NR_IPI
|
||||||
* or tracable with trace_ipi_*
|
* or tracable with trace_ipi_*
|
||||||
*/
|
*/
|
||||||
IPI_CPU_BACKTRACE,
|
IPI_CPU_BACKTRACE = NR_IPI,
|
||||||
/*
|
/*
|
||||||
* SGI8-15 can be reserved by secure firmware, and thus may
|
* SGI8-15 can be reserved by secure firmware, and thus may
|
||||||
* not be usable by the kernel. Please keep the above limited
|
* not be usable by the kernel. Please keep the above limited
|
||||||
* to at most 8 entries.
|
* to at most 8 entries.
|
||||||
*/
|
*/
|
||||||
|
MAX_IPI
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static int ipi_irq_base __read_mostly;
|
||||||
|
static int nr_ipi __read_mostly = NR_IPI;
|
||||||
|
static struct irq_desc *ipi_desc[MAX_IPI] __read_mostly;
|
||||||
|
|
||||||
|
static void ipi_setup(int cpu);
|
||||||
|
static void ipi_teardown(int cpu);
|
||||||
|
|
||||||
static DECLARE_COMPLETION(cpu_running);
|
static DECLARE_COMPLETION(cpu_running);
|
||||||
|
|
||||||
static struct smp_operations smp_ops __ro_after_init;
|
static struct smp_operations smp_ops __ro_after_init;
|
||||||
|
@ -247,6 +257,7 @@ int __cpu_disable(void)
|
||||||
* and we must not schedule until we're ready to give up the cpu.
|
* and we must not schedule until we're ready to give up the cpu.
|
||||||
*/
|
*/
|
||||||
set_cpu_online(cpu, false);
|
set_cpu_online(cpu, false);
|
||||||
|
ipi_teardown(cpu);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* OK - migrate IRQs away from this CPU
|
* OK - migrate IRQs away from this CPU
|
||||||
|
@ -422,6 +433,8 @@ asmlinkage void secondary_start_kernel(void)
|
||||||
|
|
||||||
notify_cpu_starting(cpu);
|
notify_cpu_starting(cpu);
|
||||||
|
|
||||||
|
ipi_setup(cpu);
|
||||||
|
|
||||||
calibrate_delay();
|
calibrate_delay();
|
||||||
|
|
||||||
smp_store_cpu_info(cpu);
|
smp_store_cpu_info(cpu);
|
||||||
|
@ -500,14 +513,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
|
|
||||||
|
|
||||||
void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
|
|
||||||
{
|
|
||||||
if (!__smp_cross_call)
|
|
||||||
__smp_cross_call = fn;
|
|
||||||
}
|
|
||||||
|
|
||||||
static const char *ipi_types[NR_IPI] __tracepoint_string = {
|
static const char *ipi_types[NR_IPI] __tracepoint_string = {
|
||||||
#define S(x,s) [x] = s
|
#define S(x,s) [x] = s
|
||||||
S(IPI_WAKEUP, "CPU wakeup interrupts"),
|
S(IPI_WAKEUP, "CPU wakeup interrupts"),
|
||||||
|
@ -519,38 +524,23 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
|
||||||
S(IPI_COMPLETION, "completion interrupts"),
|
S(IPI_COMPLETION, "completion interrupts"),
|
||||||
};
|
};
|
||||||
|
|
||||||
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
|
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
|
||||||
{
|
|
||||||
trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
|
|
||||||
__smp_cross_call(target, ipinr);
|
|
||||||
}
|
|
||||||
|
|
||||||
void show_ipi_list(struct seq_file *p, int prec)
|
void show_ipi_list(struct seq_file *p, int prec)
|
||||||
{
|
{
|
||||||
unsigned int cpu, i;
|
unsigned int cpu, i;
|
||||||
|
|
||||||
for (i = 0; i < NR_IPI; i++) {
|
for (i = 0; i < NR_IPI; i++) {
|
||||||
|
unsigned int irq = irq_desc_get_irq(ipi_desc[i]);
|
||||||
seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
|
seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
|
||||||
|
|
||||||
for_each_online_cpu(cpu)
|
for_each_online_cpu(cpu)
|
||||||
seq_printf(p, "%10u ",
|
seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
|
||||||
__get_irq_stat(cpu, ipi_irqs[i]));
|
|
||||||
|
|
||||||
seq_printf(p, " %s\n", ipi_types[i]);
|
seq_printf(p, " %s\n", ipi_types[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 smp_irq_stat_cpu(unsigned int cpu)
|
|
||||||
{
|
|
||||||
u64 sum = 0;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < NR_IPI; i++)
|
|
||||||
sum += __get_irq_stat(cpu, ipi_irqs[i]);
|
|
||||||
|
|
||||||
return sum;
|
|
||||||
}
|
|
||||||
|
|
||||||
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
||||||
{
|
{
|
||||||
smp_cross_call(mask, IPI_CALL_FUNC);
|
smp_cross_call(mask, IPI_CALL_FUNC);
|
||||||
|
@ -627,15 +617,12 @@ asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
|
||||||
handle_IPI(ipinr, regs);
|
handle_IPI(ipinr, regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
void handle_IPI(int ipinr, struct pt_regs *regs)
|
static void do_handle_IPI(int ipinr)
|
||||||
{
|
{
|
||||||
unsigned int cpu = smp_processor_id();
|
unsigned int cpu = smp_processor_id();
|
||||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
|
||||||
|
|
||||||
if ((unsigned)ipinr < NR_IPI) {
|
if ((unsigned)ipinr < NR_IPI)
|
||||||
trace_ipi_entry_rcuidle(ipi_types[ipinr]);
|
trace_ipi_entry_rcuidle(ipi_types[ipinr]);
|
||||||
__inc_irq_stat(cpu, ipi_irqs[ipinr]);
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (ipinr) {
|
switch (ipinr) {
|
||||||
case IPI_WAKEUP:
|
case IPI_WAKEUP:
|
||||||
|
@ -643,9 +630,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
|
||||||
|
|
||||||
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
|
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
|
||||||
case IPI_TIMER:
|
case IPI_TIMER:
|
||||||
irq_enter();
|
|
||||||
tick_receive_broadcast();
|
tick_receive_broadcast();
|
||||||
irq_exit();
|
|
||||||
break;
|
break;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -654,36 +639,26 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case IPI_CALL_FUNC:
|
case IPI_CALL_FUNC:
|
||||||
irq_enter();
|
|
||||||
generic_smp_call_function_interrupt();
|
generic_smp_call_function_interrupt();
|
||||||
irq_exit();
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case IPI_CPU_STOP:
|
case IPI_CPU_STOP:
|
||||||
irq_enter();
|
|
||||||
ipi_cpu_stop(cpu);
|
ipi_cpu_stop(cpu);
|
||||||
irq_exit();
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
#ifdef CONFIG_IRQ_WORK
|
#ifdef CONFIG_IRQ_WORK
|
||||||
case IPI_IRQ_WORK:
|
case IPI_IRQ_WORK:
|
||||||
irq_enter();
|
|
||||||
irq_work_run();
|
irq_work_run();
|
||||||
irq_exit();
|
|
||||||
break;
|
break;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
case IPI_COMPLETION:
|
case IPI_COMPLETION:
|
||||||
irq_enter();
|
|
||||||
ipi_complete(cpu);
|
ipi_complete(cpu);
|
||||||
irq_exit();
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case IPI_CPU_BACKTRACE:
|
case IPI_CPU_BACKTRACE:
|
||||||
printk_nmi_enter();
|
printk_nmi_enter();
|
||||||
irq_enter();
|
nmi_cpu_backtrace(get_irq_regs());
|
||||||
nmi_cpu_backtrace(regs);
|
|
||||||
irq_exit();
|
|
||||||
printk_nmi_exit();
|
printk_nmi_exit();
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -695,9 +670,78 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
|
||||||
|
|
||||||
if ((unsigned)ipinr < NR_IPI)
|
if ((unsigned)ipinr < NR_IPI)
|
||||||
trace_ipi_exit_rcuidle(ipi_types[ipinr]);
|
trace_ipi_exit_rcuidle(ipi_types[ipinr]);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Legacy version, should go away once all irqchips have been converted */
|
||||||
|
void handle_IPI(int ipinr, struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||||
|
|
||||||
|
irq_enter();
|
||||||
|
do_handle_IPI(ipinr);
|
||||||
|
irq_exit();
|
||||||
|
|
||||||
set_irq_regs(old_regs);
|
set_irq_regs(old_regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static irqreturn_t ipi_handler(int irq, void *data)
|
||||||
|
{
|
||||||
|
do_handle_IPI(irq - ipi_irq_base);
|
||||||
|
return IRQ_HANDLED;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
|
||||||
|
{
|
||||||
|
trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
|
||||||
|
__ipi_send_mask(ipi_desc[ipinr], target);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ipi_setup(int cpu)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (WARN_ON_ONCE(!ipi_irq_base))
|
||||||
|
return;
|
||||||
|
|
||||||
|
for (i = 0; i < nr_ipi; i++)
|
||||||
|
enable_percpu_irq(ipi_irq_base + i, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ipi_teardown(int cpu)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (WARN_ON_ONCE(!ipi_irq_base))
|
||||||
|
return;
|
||||||
|
|
||||||
|
for (i = 0; i < nr_ipi; i++)
|
||||||
|
disable_percpu_irq(ipi_irq_base + i);
|
||||||
|
}
|
||||||
|
|
||||||
|
void __init set_smp_ipi_range(int ipi_base, int n)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
WARN_ON(n < MAX_IPI);
|
||||||
|
nr_ipi = min(n, MAX_IPI);
|
||||||
|
|
||||||
|
for (i = 0; i < nr_ipi; i++) {
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = request_percpu_irq(ipi_base + i, ipi_handler,
|
||||||
|
"IPI", &irq_stat);
|
||||||
|
WARN_ON(err);
|
||||||
|
|
||||||
|
ipi_desc[i] = irq_to_desc(ipi_base + i);
|
||||||
|
irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
|
||||||
|
}
|
||||||
|
|
||||||
|
ipi_irq_base = ipi_base;
|
||||||
|
|
||||||
|
/* Setup the boot CPU immediately */
|
||||||
|
ipi_setup(smp_processor_id());
|
||||||
|
}
|
||||||
|
|
||||||
void smp_send_reschedule(int cpu)
|
void smp_send_reschedule(int cpu)
|
||||||
{
|
{
|
||||||
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
|
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
|
||||||
|
@ -805,7 +849,7 @@ core_initcall(register_cpufreq_notifier);
|
||||||
|
|
||||||
static void raise_nmi(cpumask_t *mask)
|
static void raise_nmi(cpumask_t *mask)
|
||||||
{
|
{
|
||||||
__smp_cross_call(mask, IPI_CPU_BACKTRACE);
|
__ipi_send_mask(ipi_desc[IPI_CPU_BACKTRACE], mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
|
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
|
||||||
|
|
|
@ -106,6 +106,7 @@ config ARM64
|
||||||
select GENERIC_CPU_VULNERABILITIES
|
select GENERIC_CPU_VULNERABILITIES
|
||||||
select GENERIC_EARLY_IOREMAP
|
select GENERIC_EARLY_IOREMAP
|
||||||
select GENERIC_IDLE_POLL_SETUP
|
select GENERIC_IDLE_POLL_SETUP
|
||||||
|
select GENERIC_IRQ_IPI
|
||||||
select GENERIC_IRQ_MULTI_HANDLER
|
select GENERIC_IRQ_MULTI_HANDLER
|
||||||
select GENERIC_IRQ_PROBE
|
select GENERIC_IRQ_PROBE
|
||||||
select GENERIC_IRQ_SHOW
|
select GENERIC_IRQ_SHOW
|
||||||
|
|
|
@ -13,21 +13,12 @@
|
||||||
#include <asm/kvm_arm.h>
|
#include <asm/kvm_arm.h>
|
||||||
#include <asm/sysreg.h>
|
#include <asm/sysreg.h>
|
||||||
|
|
||||||
#define NR_IPI 7
|
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
unsigned int __softirq_pending;
|
unsigned int __softirq_pending;
|
||||||
unsigned int ipi_irqs[NR_IPI];
|
|
||||||
} ____cacheline_aligned irq_cpustat_t;
|
} ____cacheline_aligned irq_cpustat_t;
|
||||||
|
|
||||||
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
|
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
|
||||||
|
|
||||||
#define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++
|
|
||||||
#define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member)
|
|
||||||
|
|
||||||
u64 smp_irq_stat_cpu(unsigned int cpu);
|
|
||||||
#define arch_irq_stat_cpu smp_irq_stat_cpu
|
|
||||||
|
|
||||||
#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
|
#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
|
||||||
|
|
||||||
struct nmi_ctx {
|
struct nmi_ctx {
|
||||||
|
|
|
@ -2,11 +2,9 @@
|
||||||
#ifndef __ASM_IRQ_WORK_H
|
#ifndef __ASM_IRQ_WORK_H
|
||||||
#define __ASM_IRQ_WORK_H
|
#define __ASM_IRQ_WORK_H
|
||||||
|
|
||||||
#include <asm/smp.h>
|
|
||||||
|
|
||||||
static inline bool arch_irq_work_has_interrupt(void)
|
static inline bool arch_irq_work_has_interrupt(void)
|
||||||
{
|
{
|
||||||
return !!__smp_cross_call;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* __ASM_IRQ_WORK_H */
|
#endif /* __ASM_IRQ_WORK_H */
|
||||||
|
|
|
@ -55,16 +55,6 @@ static inline void set_cpu_logical_map(int cpu, u64 hwid)
|
||||||
|
|
||||||
struct seq_file;
|
struct seq_file;
|
||||||
|
|
||||||
/*
|
|
||||||
* generate IPI list text
|
|
||||||
*/
|
|
||||||
extern void show_ipi_list(struct seq_file *p, int prec);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Called from C code, this handles an IPI.
|
|
||||||
*/
|
|
||||||
extern void handle_IPI(int ipinr, struct pt_regs *regs);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Discover the set of possible CPUs and determine their
|
* Discover the set of possible CPUs and determine their
|
||||||
* SMP operations.
|
* SMP operations.
|
||||||
|
@ -72,11 +62,9 @@ extern void handle_IPI(int ipinr, struct pt_regs *regs);
|
||||||
extern void smp_init_cpus(void);
|
extern void smp_init_cpus(void);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Provide a function to raise an IPI cross call on CPUs in callmap.
|
* Register IPI interrupts with the arch SMP code
|
||||||
*/
|
*/
|
||||||
extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int));
|
extern void set_smp_ipi_range(int ipi_base, int nr_ipi);
|
||||||
|
|
||||||
extern void (*__smp_cross_call)(const struct cpumask *, unsigned int);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Called from the secondary holding pen, this is the secondary CPU entry point.
|
* Called from the secondary holding pen, this is the secondary CPU entry point.
|
||||||
|
|
|
@ -10,10 +10,10 @@
|
||||||
* Copyright (C) 2012 ARM Ltd.
|
* Copyright (C) 2012 ARM Ltd.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/kernel_stat.h>
|
|
||||||
#include <linux/irq.h>
|
#include <linux/irq.h>
|
||||||
#include <linux/memory.h>
|
#include <linux/memory.h>
|
||||||
#include <linux/smp.h>
|
#include <linux/smp.h>
|
||||||
|
#include <linux/hardirq.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/irqchip.h>
|
#include <linux/irqchip.h>
|
||||||
#include <linux/kprobes.h>
|
#include <linux/kprobes.h>
|
||||||
|
@ -22,20 +22,11 @@
|
||||||
#include <asm/daifflags.h>
|
#include <asm/daifflags.h>
|
||||||
#include <asm/vmap_stack.h>
|
#include <asm/vmap_stack.h>
|
||||||
|
|
||||||
unsigned long irq_err_count;
|
|
||||||
|
|
||||||
/* Only access this in an NMI enter/exit */
|
/* Only access this in an NMI enter/exit */
|
||||||
DEFINE_PER_CPU(struct nmi_ctx, nmi_contexts);
|
DEFINE_PER_CPU(struct nmi_ctx, nmi_contexts);
|
||||||
|
|
||||||
DEFINE_PER_CPU(unsigned long *, irq_stack_ptr);
|
DEFINE_PER_CPU(unsigned long *, irq_stack_ptr);
|
||||||
|
|
||||||
int arch_show_interrupts(struct seq_file *p, int prec)
|
|
||||||
{
|
|
||||||
show_ipi_list(p, prec);
|
|
||||||
seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_VMAP_STACK
|
#ifdef CONFIG_VMAP_STACK
|
||||||
static void init_irq_stacks(void)
|
static void init_irq_stacks(void)
|
||||||
{
|
{
|
||||||
|
|
|
@ -30,6 +30,7 @@
|
||||||
#include <linux/completion.h>
|
#include <linux/completion.h>
|
||||||
#include <linux/of.h>
|
#include <linux/of.h>
|
||||||
#include <linux/irq_work.h>
|
#include <linux/irq_work.h>
|
||||||
|
#include <linux/kernel_stat.h>
|
||||||
#include <linux/kexec.h>
|
#include <linux/kexec.h>
|
||||||
#include <linux/kvm_host.h>
|
#include <linux/kvm_host.h>
|
||||||
|
|
||||||
|
@ -72,9 +73,17 @@ enum ipi_msg_type {
|
||||||
IPI_CPU_CRASH_STOP,
|
IPI_CPU_CRASH_STOP,
|
||||||
IPI_TIMER,
|
IPI_TIMER,
|
||||||
IPI_IRQ_WORK,
|
IPI_IRQ_WORK,
|
||||||
IPI_WAKEUP
|
IPI_WAKEUP,
|
||||||
|
NR_IPI
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static int ipi_irq_base __read_mostly;
|
||||||
|
static int nr_ipi __read_mostly = NR_IPI;
|
||||||
|
static struct irq_desc *ipi_desc[NR_IPI] __read_mostly;
|
||||||
|
|
||||||
|
static void ipi_setup(int cpu);
|
||||||
|
static void ipi_teardown(int cpu);
|
||||||
|
|
||||||
#ifdef CONFIG_HOTPLUG_CPU
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
static int op_cpu_kill(unsigned int cpu);
|
static int op_cpu_kill(unsigned int cpu);
|
||||||
#else
|
#else
|
||||||
|
@ -237,6 +246,8 @@ asmlinkage notrace void secondary_start_kernel(void)
|
||||||
*/
|
*/
|
||||||
notify_cpu_starting(cpu);
|
notify_cpu_starting(cpu);
|
||||||
|
|
||||||
|
ipi_setup(cpu);
|
||||||
|
|
||||||
store_cpu_topology(cpu);
|
store_cpu_topology(cpu);
|
||||||
numa_add_cpu(cpu);
|
numa_add_cpu(cpu);
|
||||||
|
|
||||||
|
@ -302,6 +313,7 @@ int __cpu_disable(void)
|
||||||
* and we must not schedule until we're ready to give up the cpu.
|
* and we must not schedule until we're ready to give up the cpu.
|
||||||
*/
|
*/
|
||||||
set_cpu_online(cpu, false);
|
set_cpu_online(cpu, false);
|
||||||
|
ipi_teardown(cpu);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* OK - migrate IRQs away from this CPU
|
* OK - migrate IRQs away from this CPU
|
||||||
|
@ -772,13 +784,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void (*__smp_cross_call)(const struct cpumask *, unsigned int);
|
|
||||||
|
|
||||||
void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
|
|
||||||
{
|
|
||||||
__smp_cross_call = fn;
|
|
||||||
}
|
|
||||||
|
|
||||||
static const char *ipi_types[NR_IPI] __tracepoint_string = {
|
static const char *ipi_types[NR_IPI] __tracepoint_string = {
|
||||||
#define S(x,s) [x] = s
|
#define S(x,s) [x] = s
|
||||||
S(IPI_RESCHEDULE, "Rescheduling interrupts"),
|
S(IPI_RESCHEDULE, "Rescheduling interrupts"),
|
||||||
|
@ -790,35 +795,25 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
|
||||||
S(IPI_WAKEUP, "CPU wake-up interrupts"),
|
S(IPI_WAKEUP, "CPU wake-up interrupts"),
|
||||||
};
|
};
|
||||||
|
|
||||||
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
|
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
|
||||||
{
|
|
||||||
trace_ipi_raise(target, ipi_types[ipinr]);
|
|
||||||
__smp_cross_call(target, ipinr);
|
|
||||||
}
|
|
||||||
|
|
||||||
void show_ipi_list(struct seq_file *p, int prec)
|
unsigned long irq_err_count;
|
||||||
|
|
||||||
|
int arch_show_interrupts(struct seq_file *p, int prec)
|
||||||
{
|
{
|
||||||
unsigned int cpu, i;
|
unsigned int cpu, i;
|
||||||
|
|
||||||
for (i = 0; i < NR_IPI; i++) {
|
for (i = 0; i < NR_IPI; i++) {
|
||||||
|
unsigned int irq = irq_desc_get_irq(ipi_desc[i]);
|
||||||
seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
|
seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
|
||||||
prec >= 4 ? " " : "");
|
prec >= 4 ? " " : "");
|
||||||
for_each_online_cpu(cpu)
|
for_each_online_cpu(cpu)
|
||||||
seq_printf(p, "%10u ",
|
seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
|
||||||
__get_irq_stat(cpu, ipi_irqs[i]));
|
|
||||||
seq_printf(p, " %s\n", ipi_types[i]);
|
seq_printf(p, " %s\n", ipi_types[i]);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
u64 smp_irq_stat_cpu(unsigned int cpu)
|
seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
|
||||||
{
|
return 0;
|
||||||
u64 sum = 0;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < NR_IPI; i++)
|
|
||||||
sum += __get_irq_stat(cpu, ipi_irqs[i]);
|
|
||||||
|
|
||||||
return sum;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
||||||
|
@ -841,8 +836,7 @@ void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
|
||||||
#ifdef CONFIG_IRQ_WORK
|
#ifdef CONFIG_IRQ_WORK
|
||||||
void arch_irq_work_raise(void)
|
void arch_irq_work_raise(void)
|
||||||
{
|
{
|
||||||
if (__smp_cross_call)
|
smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
|
||||||
smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -890,15 +884,12 @@ static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
|
||||||
/*
|
/*
|
||||||
* Main handler for inter-processor interrupts
|
* Main handler for inter-processor interrupts
|
||||||
*/
|
*/
|
||||||
void handle_IPI(int ipinr, struct pt_regs *regs)
|
static void do_handle_IPI(int ipinr)
|
||||||
{
|
{
|
||||||
unsigned int cpu = smp_processor_id();
|
unsigned int cpu = smp_processor_id();
|
||||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
|
||||||
|
|
||||||
if ((unsigned)ipinr < NR_IPI) {
|
if ((unsigned)ipinr < NR_IPI)
|
||||||
trace_ipi_entry_rcuidle(ipi_types[ipinr]);
|
trace_ipi_entry_rcuidle(ipi_types[ipinr]);
|
||||||
__inc_irq_stat(cpu, ipi_irqs[ipinr]);
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (ipinr) {
|
switch (ipinr) {
|
||||||
case IPI_RESCHEDULE:
|
case IPI_RESCHEDULE:
|
||||||
|
@ -906,21 +897,16 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case IPI_CALL_FUNC:
|
case IPI_CALL_FUNC:
|
||||||
irq_enter();
|
|
||||||
generic_smp_call_function_interrupt();
|
generic_smp_call_function_interrupt();
|
||||||
irq_exit();
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case IPI_CPU_STOP:
|
case IPI_CPU_STOP:
|
||||||
irq_enter();
|
|
||||||
local_cpu_stop();
|
local_cpu_stop();
|
||||||
irq_exit();
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case IPI_CPU_CRASH_STOP:
|
case IPI_CPU_CRASH_STOP:
|
||||||
if (IS_ENABLED(CONFIG_KEXEC_CORE)) {
|
if (IS_ENABLED(CONFIG_KEXEC_CORE)) {
|
||||||
irq_enter();
|
ipi_cpu_crash_stop(cpu, get_irq_regs());
|
||||||
ipi_cpu_crash_stop(cpu, regs);
|
|
||||||
|
|
||||||
unreachable();
|
unreachable();
|
||||||
}
|
}
|
||||||
|
@ -928,17 +914,13 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
|
||||||
|
|
||||||
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
|
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
|
||||||
case IPI_TIMER:
|
case IPI_TIMER:
|
||||||
irq_enter();
|
|
||||||
tick_receive_broadcast();
|
tick_receive_broadcast();
|
||||||
irq_exit();
|
|
||||||
break;
|
break;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_IRQ_WORK
|
#ifdef CONFIG_IRQ_WORK
|
||||||
case IPI_IRQ_WORK:
|
case IPI_IRQ_WORK:
|
||||||
irq_enter();
|
|
||||||
irq_work_run();
|
irq_work_run();
|
||||||
irq_exit();
|
|
||||||
break;
|
break;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -957,7 +939,64 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
|
||||||
|
|
||||||
if ((unsigned)ipinr < NR_IPI)
|
if ((unsigned)ipinr < NR_IPI)
|
||||||
trace_ipi_exit_rcuidle(ipi_types[ipinr]);
|
trace_ipi_exit_rcuidle(ipi_types[ipinr]);
|
||||||
set_irq_regs(old_regs);
|
}
|
||||||
|
|
||||||
|
static irqreturn_t ipi_handler(int irq, void *data)
|
||||||
|
{
|
||||||
|
do_handle_IPI(irq - ipi_irq_base);
|
||||||
|
return IRQ_HANDLED;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
|
||||||
|
{
|
||||||
|
trace_ipi_raise(target, ipi_types[ipinr]);
|
||||||
|
__ipi_send_mask(ipi_desc[ipinr], target);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ipi_setup(int cpu)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (WARN_ON_ONCE(!ipi_irq_base))
|
||||||
|
return;
|
||||||
|
|
||||||
|
for (i = 0; i < nr_ipi; i++)
|
||||||
|
enable_percpu_irq(ipi_irq_base + i, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ipi_teardown(int cpu)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (WARN_ON_ONCE(!ipi_irq_base))
|
||||||
|
return;
|
||||||
|
|
||||||
|
for (i = 0; i < nr_ipi; i++)
|
||||||
|
disable_percpu_irq(ipi_irq_base + i);
|
||||||
|
}
|
||||||
|
|
||||||
|
void __init set_smp_ipi_range(int ipi_base, int n)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
WARN_ON(n < NR_IPI);
|
||||||
|
nr_ipi = min(n, NR_IPI);
|
||||||
|
|
||||||
|
for (i = 0; i < nr_ipi; i++) {
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = request_percpu_irq(ipi_base + i, ipi_handler,
|
||||||
|
"IPI", &cpu_number);
|
||||||
|
WARN_ON(err);
|
||||||
|
|
||||||
|
ipi_desc[i] = irq_to_desc(ipi_base + i);
|
||||||
|
irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
|
||||||
|
}
|
||||||
|
|
||||||
|
ipi_irq_base = ipi_base;
|
||||||
|
|
||||||
|
/* Setup the boot CPU immediately */
|
||||||
|
ipi_setup(smp_processor_id());
|
||||||
}
|
}
|
||||||
|
|
||||||
void smp_send_reschedule(int cpu)
|
void smp_send_reschedule(int cpu)
|
||||||
|
|
|
@ -310,7 +310,134 @@ static inline int armada_370_xp_msi_init(struct device_node *node,
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static void armada_xp_mpic_perf_init(void)
|
||||||
|
{
|
||||||
|
unsigned long cpuid = cpu_logical_map(smp_processor_id());
|
||||||
|
|
||||||
|
/* Enable Performance Counter Overflow interrupts */
|
||||||
|
writel(ARMADA_370_XP_INT_CAUSE_PERF(cpuid),
|
||||||
|
per_cpu_int_base + ARMADA_370_XP_INT_FABRIC_MASK_OFFS);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
|
static struct irq_domain *ipi_domain;
|
||||||
|
|
||||||
|
static void armada_370_xp_ipi_mask(struct irq_data *d)
|
||||||
|
{
|
||||||
|
u32 reg;
|
||||||
|
reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
|
||||||
|
reg &= ~BIT(d->hwirq);
|
||||||
|
writel(reg, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void armada_370_xp_ipi_unmask(struct irq_data *d)
|
||||||
|
{
|
||||||
|
u32 reg;
|
||||||
|
reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
|
||||||
|
reg |= BIT(d->hwirq);
|
||||||
|
writel(reg, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void armada_370_xp_ipi_send_mask(struct irq_data *d,
|
||||||
|
const struct cpumask *mask)
|
||||||
|
{
|
||||||
|
unsigned long map = 0;
|
||||||
|
int cpu;
|
||||||
|
|
||||||
|
/* Convert our logical CPU mask into a physical one. */
|
||||||
|
for_each_cpu(cpu, mask)
|
||||||
|
map |= 1 << cpu_logical_map(cpu);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Ensure that stores to Normal memory are visible to the
|
||||||
|
* other CPUs before issuing the IPI.
|
||||||
|
*/
|
||||||
|
dsb();
|
||||||
|
|
||||||
|
/* submit softirq */
|
||||||
|
writel((map << 8) | d->hwirq, main_int_base +
|
||||||
|
ARMADA_370_XP_SW_TRIG_INT_OFFS);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void armada_370_xp_ipi_eoi(struct irq_data *d)
|
||||||
|
{
|
||||||
|
writel(~BIT(d->hwirq), per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct irq_chip ipi_irqchip = {
|
||||||
|
.name = "IPI",
|
||||||
|
.irq_mask = armada_370_xp_ipi_mask,
|
||||||
|
.irq_unmask = armada_370_xp_ipi_unmask,
|
||||||
|
.irq_eoi = armada_370_xp_ipi_eoi,
|
||||||
|
.ipi_send_mask = armada_370_xp_ipi_send_mask,
|
||||||
|
};
|
||||||
|
|
||||||
|
static int armada_370_xp_ipi_alloc(struct irq_domain *d,
|
||||||
|
unsigned int virq,
|
||||||
|
unsigned int nr_irqs, void *args)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < nr_irqs; i++) {
|
||||||
|
irq_set_percpu_devid(virq + i);
|
||||||
|
irq_domain_set_info(d, virq + i, i, &ipi_irqchip,
|
||||||
|
d->host_data,
|
||||||
|
handle_percpu_devid_fasteoi_ipi,
|
||||||
|
NULL, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void armada_370_xp_ipi_free(struct irq_domain *d,
|
||||||
|
unsigned int virq,
|
||||||
|
unsigned int nr_irqs)
|
||||||
|
{
|
||||||
|
/* Not freeing IPIs */
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct irq_domain_ops ipi_domain_ops = {
|
||||||
|
.alloc = armada_370_xp_ipi_alloc,
|
||||||
|
.free = armada_370_xp_ipi_free,
|
||||||
|
};
|
||||||
|
|
||||||
|
static void ipi_resume(void)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < IPI_DOORBELL_END; i++) {
|
||||||
|
int irq;
|
||||||
|
|
||||||
|
irq = irq_find_mapping(ipi_domain, i);
|
||||||
|
if (irq <= 0)
|
||||||
|
continue;
|
||||||
|
if (irq_percpu_is_enabled(irq)) {
|
||||||
|
struct irq_data *d;
|
||||||
|
d = irq_domain_get_irq_data(ipi_domain, irq);
|
||||||
|
armada_370_xp_ipi_unmask(d);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static __init void armada_xp_ipi_init(struct device_node *node)
|
||||||
|
{
|
||||||
|
int base_ipi;
|
||||||
|
|
||||||
|
ipi_domain = irq_domain_create_linear(of_node_to_fwnode(node),
|
||||||
|
IPI_DOORBELL_END,
|
||||||
|
&ipi_domain_ops, NULL);
|
||||||
|
if (WARN_ON(!ipi_domain))
|
||||||
|
return;
|
||||||
|
|
||||||
|
irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI);
|
||||||
|
base_ipi = __irq_domain_alloc_irqs(ipi_domain, -1, IPI_DOORBELL_END,
|
||||||
|
NUMA_NO_NODE, NULL, false, NULL);
|
||||||
|
if (WARN_ON(!base_ipi))
|
||||||
|
return;
|
||||||
|
|
||||||
|
set_smp_ipi_range(base_ipi, IPI_DOORBELL_END);
|
||||||
|
}
|
||||||
|
|
||||||
static DEFINE_RAW_SPINLOCK(irq_controller_lock);
|
static DEFINE_RAW_SPINLOCK(irq_controller_lock);
|
||||||
|
|
||||||
static int armada_xp_set_affinity(struct irq_data *d,
|
static int armada_xp_set_affinity(struct irq_data *d,
|
||||||
|
@ -334,6 +461,70 @@ static int armada_xp_set_affinity(struct irq_data *d,
|
||||||
|
|
||||||
return IRQ_SET_MASK_OK;
|
return IRQ_SET_MASK_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void armada_xp_mpic_smp_cpu_init(void)
|
||||||
|
{
|
||||||
|
u32 control;
|
||||||
|
int nr_irqs, i;
|
||||||
|
|
||||||
|
control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
|
||||||
|
nr_irqs = (control >> 2) & 0x3ff;
|
||||||
|
|
||||||
|
for (i = 0; i < nr_irqs; i++)
|
||||||
|
writel(i, per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
|
||||||
|
|
||||||
|
/* Disable all IPIs */
|
||||||
|
writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
|
||||||
|
|
||||||
|
/* Clear pending IPIs */
|
||||||
|
writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
|
||||||
|
|
||||||
|
/* Unmask IPI interrupt */
|
||||||
|
writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void armada_xp_mpic_reenable_percpu(void)
|
||||||
|
{
|
||||||
|
unsigned int irq;
|
||||||
|
|
||||||
|
/* Re-enable per-CPU interrupts that were enabled before suspend */
|
||||||
|
for (irq = 0; irq < ARMADA_370_XP_MAX_PER_CPU_IRQS; irq++) {
|
||||||
|
struct irq_data *data;
|
||||||
|
int virq;
|
||||||
|
|
||||||
|
virq = irq_linear_revmap(armada_370_xp_mpic_domain, irq);
|
||||||
|
if (virq == 0)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
data = irq_get_irq_data(virq);
|
||||||
|
|
||||||
|
if (!irq_percpu_is_enabled(virq))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
armada_370_xp_irq_unmask(data);
|
||||||
|
}
|
||||||
|
|
||||||
|
ipi_resume();
|
||||||
|
}
|
||||||
|
|
||||||
|
static int armada_xp_mpic_starting_cpu(unsigned int cpu)
|
||||||
|
{
|
||||||
|
armada_xp_mpic_perf_init();
|
||||||
|
armada_xp_mpic_smp_cpu_init();
|
||||||
|
armada_xp_mpic_reenable_percpu();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int mpic_cascaded_starting_cpu(unsigned int cpu)
|
||||||
|
{
|
||||||
|
armada_xp_mpic_perf_init();
|
||||||
|
armada_xp_mpic_reenable_percpu();
|
||||||
|
enable_percpu_irq(parent_irq, IRQ_TYPE_NONE);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static void armada_xp_mpic_smp_cpu_init(void) {}
|
||||||
|
static void ipi_resume(void) {}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static struct irq_chip armada_370_xp_irq_chip = {
|
static struct irq_chip armada_370_xp_irq_chip = {
|
||||||
|
@ -372,98 +563,6 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void armada_xp_mpic_smp_cpu_init(void)
|
|
||||||
{
|
|
||||||
u32 control;
|
|
||||||
int nr_irqs, i;
|
|
||||||
|
|
||||||
control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
|
|
||||||
nr_irqs = (control >> 2) & 0x3ff;
|
|
||||||
|
|
||||||
for (i = 0; i < nr_irqs; i++)
|
|
||||||
writel(i, per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
|
|
||||||
|
|
||||||
/* Clear pending IPIs */
|
|
||||||
writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
|
|
||||||
|
|
||||||
/* Enable first 8 IPIs */
|
|
||||||
writel(IPI_DOORBELL_MASK, per_cpu_int_base +
|
|
||||||
ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
|
|
||||||
|
|
||||||
/* Unmask IPI interrupt */
|
|
||||||
writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void armada_xp_mpic_perf_init(void)
|
|
||||||
{
|
|
||||||
unsigned long cpuid = cpu_logical_map(smp_processor_id());
|
|
||||||
|
|
||||||
/* Enable Performance Counter Overflow interrupts */
|
|
||||||
writel(ARMADA_370_XP_INT_CAUSE_PERF(cpuid),
|
|
||||||
per_cpu_int_base + ARMADA_370_XP_INT_FABRIC_MASK_OFFS);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
static void armada_mpic_send_doorbell(const struct cpumask *mask,
|
|
||||||
unsigned int irq)
|
|
||||||
{
|
|
||||||
int cpu;
|
|
||||||
unsigned long map = 0;
|
|
||||||
|
|
||||||
/* Convert our logical CPU mask into a physical one. */
|
|
||||||
for_each_cpu(cpu, mask)
|
|
||||||
map |= 1 << cpu_logical_map(cpu);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Ensure that stores to Normal memory are visible to the
|
|
||||||
* other CPUs before issuing the IPI.
|
|
||||||
*/
|
|
||||||
dsb();
|
|
||||||
|
|
||||||
/* submit softirq */
|
|
||||||
writel((map << 8) | irq, main_int_base +
|
|
||||||
ARMADA_370_XP_SW_TRIG_INT_OFFS);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void armada_xp_mpic_reenable_percpu(void)
|
|
||||||
{
|
|
||||||
unsigned int irq;
|
|
||||||
|
|
||||||
/* Re-enable per-CPU interrupts that were enabled before suspend */
|
|
||||||
for (irq = 0; irq < ARMADA_370_XP_MAX_PER_CPU_IRQS; irq++) {
|
|
||||||
struct irq_data *data;
|
|
||||||
int virq;
|
|
||||||
|
|
||||||
virq = irq_linear_revmap(armada_370_xp_mpic_domain, irq);
|
|
||||||
if (virq == 0)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
data = irq_get_irq_data(virq);
|
|
||||||
|
|
||||||
if (!irq_percpu_is_enabled(virq))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
armada_370_xp_irq_unmask(data);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int armada_xp_mpic_starting_cpu(unsigned int cpu)
|
|
||||||
{
|
|
||||||
armada_xp_mpic_perf_init();
|
|
||||||
armada_xp_mpic_smp_cpu_init();
|
|
||||||
armada_xp_mpic_reenable_percpu();
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int mpic_cascaded_starting_cpu(unsigned int cpu)
|
|
||||||
{
|
|
||||||
armada_xp_mpic_perf_init();
|
|
||||||
armada_xp_mpic_reenable_percpu();
|
|
||||||
enable_percpu_irq(parent_irq, IRQ_TYPE_NONE);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static const struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
|
static const struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
|
||||||
.map = armada_370_xp_mpic_irq_map,
|
.map = armada_370_xp_mpic_irq_map,
|
||||||
.xlate = irq_domain_xlate_onecell,
|
.xlate = irq_domain_xlate_onecell,
|
||||||
|
@ -562,22 +661,15 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
/* IPI Handling */
|
/* IPI Handling */
|
||||||
if (irqnr == 0) {
|
if (irqnr == 0) {
|
||||||
u32 ipimask, ipinr;
|
unsigned long ipimask;
|
||||||
|
int ipi;
|
||||||
|
|
||||||
ipimask = readl_relaxed(per_cpu_int_base +
|
ipimask = readl_relaxed(per_cpu_int_base +
|
||||||
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
|
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
|
||||||
& IPI_DOORBELL_MASK;
|
& IPI_DOORBELL_MASK;
|
||||||
|
|
||||||
writel(~ipimask, per_cpu_int_base +
|
for_each_set_bit(ipi, &ipimask, IPI_DOORBELL_END)
|
||||||
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
|
handle_domain_irq(ipi_domain, ipi, regs);
|
||||||
|
|
||||||
/* Handle all pending doorbells */
|
|
||||||
for (ipinr = IPI_DOORBELL_START;
|
|
||||||
ipinr < IPI_DOORBELL_END; ipinr++) {
|
|
||||||
if (ipimask & (0x1 << ipinr))
|
|
||||||
handle_IPI(ipinr, regs);
|
|
||||||
}
|
|
||||||
continue;
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -636,6 +728,8 @@ static void armada_370_xp_mpic_resume(void)
|
||||||
writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
|
writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
|
||||||
if (doorbell_mask_reg & PCI_MSI_DOORBELL_MASK)
|
if (doorbell_mask_reg & PCI_MSI_DOORBELL_MASK)
|
||||||
writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
|
writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
|
||||||
|
|
||||||
|
ipi_resume();
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct syscore_ops armada_370_xp_mpic_syscore_ops = {
|
static struct syscore_ops armada_370_xp_mpic_syscore_ops = {
|
||||||
|
@ -691,7 +785,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
|
||||||
irq_set_default_host(armada_370_xp_mpic_domain);
|
irq_set_default_host(armada_370_xp_mpic_domain);
|
||||||
set_handle_irq(armada_370_xp_handle_irq);
|
set_handle_irq(armada_370_xp_handle_irq);
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
set_smp_cross_call(armada_mpic_send_doorbell);
|
armada_xp_ipi_init(node);
|
||||||
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING,
|
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING,
|
||||||
"irqchip/armada/ipi:starting",
|
"irqchip/armada/ipi:starting",
|
||||||
armada_xp_mpic_starting_cpu, NULL);
|
armada_xp_mpic_starting_cpu, NULL);
|
||||||
|
|
|
@ -10,6 +10,7 @@
|
||||||
#include <linux/of_irq.h>
|
#include <linux/of_irq.h>
|
||||||
#include <linux/irqchip.h>
|
#include <linux/irqchip.h>
|
||||||
#include <linux/irqdomain.h>
|
#include <linux/irqdomain.h>
|
||||||
|
#include <linux/irqchip/chained_irq.h>
|
||||||
#include <linux/irqchip/irq-bcm2836.h>
|
#include <linux/irqchip/irq-bcm2836.h>
|
||||||
|
|
||||||
#include <asm/exception.h>
|
#include <asm/exception.h>
|
||||||
|
@ -89,12 +90,24 @@ static struct irq_chip bcm2836_arm_irqchip_gpu = {
|
||||||
.irq_unmask = bcm2836_arm_irqchip_unmask_gpu_irq,
|
.irq_unmask = bcm2836_arm_irqchip_unmask_gpu_irq,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void bcm2836_arm_irqchip_dummy_op(struct irq_data *d)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct irq_chip bcm2836_arm_irqchip_dummy = {
|
||||||
|
.name = "bcm2836-dummy",
|
||||||
|
.irq_eoi = bcm2836_arm_irqchip_dummy_op,
|
||||||
|
};
|
||||||
|
|
||||||
static int bcm2836_map(struct irq_domain *d, unsigned int irq,
|
static int bcm2836_map(struct irq_domain *d, unsigned int irq,
|
||||||
irq_hw_number_t hw)
|
irq_hw_number_t hw)
|
||||||
{
|
{
|
||||||
struct irq_chip *chip;
|
struct irq_chip *chip;
|
||||||
|
|
||||||
switch (hw) {
|
switch (hw) {
|
||||||
|
case LOCAL_IRQ_MAILBOX0:
|
||||||
|
chip = &bcm2836_arm_irqchip_dummy;
|
||||||
|
break;
|
||||||
case LOCAL_IRQ_CNTPSIRQ:
|
case LOCAL_IRQ_CNTPSIRQ:
|
||||||
case LOCAL_IRQ_CNTPNSIRQ:
|
case LOCAL_IRQ_CNTPNSIRQ:
|
||||||
case LOCAL_IRQ_CNTHPIRQ:
|
case LOCAL_IRQ_CNTHPIRQ:
|
||||||
|
@ -127,17 +140,7 @@ __exception_irq_entry bcm2836_arm_irqchip_handle_irq(struct pt_regs *regs)
|
||||||
u32 stat;
|
u32 stat;
|
||||||
|
|
||||||
stat = readl_relaxed(intc.base + LOCAL_IRQ_PENDING0 + 4 * cpu);
|
stat = readl_relaxed(intc.base + LOCAL_IRQ_PENDING0 + 4 * cpu);
|
||||||
if (stat & BIT(LOCAL_IRQ_MAILBOX0)) {
|
if (stat) {
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
void __iomem *mailbox0 = (intc.base +
|
|
||||||
LOCAL_MAILBOX0_CLR0 + 16 * cpu);
|
|
||||||
u32 mbox_val = readl(mailbox0);
|
|
||||||
u32 ipi = ffs(mbox_val) - 1;
|
|
||||||
|
|
||||||
writel(1 << ipi, mailbox0);
|
|
||||||
handle_IPI(ipi, regs);
|
|
||||||
#endif
|
|
||||||
} else if (stat) {
|
|
||||||
u32 hwirq = ffs(stat) - 1;
|
u32 hwirq = ffs(stat) - 1;
|
||||||
|
|
||||||
handle_domain_irq(intc.domain, hwirq, regs);
|
handle_domain_irq(intc.domain, hwirq, regs);
|
||||||
|
@ -145,8 +148,35 @@ __exception_irq_entry bcm2836_arm_irqchip_handle_irq(struct pt_regs *regs)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
static void bcm2836_arm_irqchip_send_ipi(const struct cpumask *mask,
|
static struct irq_domain *ipi_domain;
|
||||||
unsigned int ipi)
|
|
||||||
|
static void bcm2836_arm_irqchip_handle_ipi(struct irq_desc *desc)
|
||||||
|
{
|
||||||
|
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||||
|
int cpu = smp_processor_id();
|
||||||
|
u32 mbox_val;
|
||||||
|
|
||||||
|
chained_irq_enter(chip, desc);
|
||||||
|
|
||||||
|
mbox_val = readl_relaxed(intc.base + LOCAL_MAILBOX0_CLR0 + 16 * cpu);
|
||||||
|
if (mbox_val) {
|
||||||
|
int hwirq = ffs(mbox_val) - 1;
|
||||||
|
generic_handle_irq(irq_find_mapping(ipi_domain, hwirq));
|
||||||
|
}
|
||||||
|
|
||||||
|
chained_irq_exit(chip, desc);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void bcm2836_arm_irqchip_ipi_eoi(struct irq_data *d)
|
||||||
|
{
|
||||||
|
int cpu = smp_processor_id();
|
||||||
|
|
||||||
|
writel_relaxed(BIT(d->hwirq),
|
||||||
|
intc.base + LOCAL_MAILBOX0_CLR0 + 16 * cpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void bcm2836_arm_irqchip_ipi_send_mask(struct irq_data *d,
|
||||||
|
const struct cpumask *mask)
|
||||||
{
|
{
|
||||||
int cpu;
|
int cpu;
|
||||||
void __iomem *mailbox0_base = intc.base + LOCAL_MAILBOX0_SET0;
|
void __iomem *mailbox0_base = intc.base + LOCAL_MAILBOX0_SET0;
|
||||||
|
@ -157,11 +187,47 @@ static void bcm2836_arm_irqchip_send_ipi(const struct cpumask *mask,
|
||||||
*/
|
*/
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
|
|
||||||
for_each_cpu(cpu, mask) {
|
for_each_cpu(cpu, mask)
|
||||||
writel(1 << ipi, mailbox0_base + 16 * cpu);
|
writel_relaxed(BIT(d->hwirq), mailbox0_base + 16 * cpu);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct irq_chip bcm2836_arm_irqchip_ipi = {
|
||||||
|
.name = "IPI",
|
||||||
|
.irq_mask = bcm2836_arm_irqchip_dummy_op,
|
||||||
|
.irq_unmask = bcm2836_arm_irqchip_dummy_op,
|
||||||
|
.irq_eoi = bcm2836_arm_irqchip_ipi_eoi,
|
||||||
|
.ipi_send_mask = bcm2836_arm_irqchip_ipi_send_mask,
|
||||||
|
};
|
||||||
|
|
||||||
|
static int bcm2836_arm_irqchip_ipi_alloc(struct irq_domain *d,
|
||||||
|
unsigned int virq,
|
||||||
|
unsigned int nr_irqs, void *args)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < nr_irqs; i++) {
|
||||||
|
irq_set_percpu_devid(virq + i);
|
||||||
|
irq_domain_set_info(d, virq + i, i, &bcm2836_arm_irqchip_ipi,
|
||||||
|
d->host_data,
|
||||||
|
handle_percpu_devid_fasteoi_ipi,
|
||||||
|
NULL, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void bcm2836_arm_irqchip_ipi_free(struct irq_domain *d,
|
||||||
|
unsigned int virq,
|
||||||
|
unsigned int nr_irqs)
|
||||||
|
{
|
||||||
|
/* Not freeing IPIs */
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct irq_domain_ops ipi_domain_ops = {
|
||||||
|
.alloc = bcm2836_arm_irqchip_ipi_alloc,
|
||||||
|
.free = bcm2836_arm_irqchip_ipi_free,
|
||||||
|
};
|
||||||
|
|
||||||
static int bcm2836_cpu_starting(unsigned int cpu)
|
static int bcm2836_cpu_starting(unsigned int cpu)
|
||||||
{
|
{
|
||||||
bcm2836_arm_irqchip_unmask_per_cpu_irq(LOCAL_MAILBOX_INT_CONTROL0, 0,
|
bcm2836_arm_irqchip_unmask_per_cpu_irq(LOCAL_MAILBOX_INT_CONTROL0, 0,
|
||||||
|
@ -175,6 +241,52 @@ static int bcm2836_cpu_dying(unsigned int cpu)
|
||||||
cpu);
|
cpu);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define BITS_PER_MBOX 32
|
||||||
|
|
||||||
|
static void bcm2836_arm_irqchip_smp_init(void)
|
||||||
|
{
|
||||||
|
struct irq_fwspec ipi_fwspec = {
|
||||||
|
.fwnode = intc.domain->fwnode,
|
||||||
|
.param_count = 1,
|
||||||
|
.param = {
|
||||||
|
[0] = LOCAL_IRQ_MAILBOX0,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
int base_ipi, mux_irq;
|
||||||
|
|
||||||
|
mux_irq = irq_create_fwspec_mapping(&ipi_fwspec);
|
||||||
|
if (WARN_ON(mux_irq <= 0))
|
||||||
|
return;
|
||||||
|
|
||||||
|
ipi_domain = irq_domain_create_linear(intc.domain->fwnode,
|
||||||
|
BITS_PER_MBOX, &ipi_domain_ops,
|
||||||
|
NULL);
|
||||||
|
if (WARN_ON(!ipi_domain))
|
||||||
|
return;
|
||||||
|
|
||||||
|
ipi_domain->flags |= IRQ_DOMAIN_FLAG_IPI_SINGLE;
|
||||||
|
irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI);
|
||||||
|
|
||||||
|
base_ipi = __irq_domain_alloc_irqs(ipi_domain, -1, BITS_PER_MBOX,
|
||||||
|
NUMA_NO_NODE, NULL,
|
||||||
|
false, NULL);
|
||||||
|
|
||||||
|
if (WARN_ON(!base_ipi))
|
||||||
|
return;
|
||||||
|
|
||||||
|
set_smp_ipi_range(base_ipi, BITS_PER_MBOX);
|
||||||
|
|
||||||
|
irq_set_chained_handler_and_data(mux_irq,
|
||||||
|
bcm2836_arm_irqchip_handle_ipi, NULL);
|
||||||
|
|
||||||
|
/* Unmask IPIs to the boot CPU. */
|
||||||
|
cpuhp_setup_state(CPUHP_AP_IRQ_BCM2836_STARTING,
|
||||||
|
"irqchip/bcm2836:starting", bcm2836_cpu_starting,
|
||||||
|
bcm2836_cpu_dying);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
#define bcm2836_arm_irqchip_smp_init() do { } while(0)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static const struct irq_domain_ops bcm2836_arm_irqchip_intc_ops = {
|
static const struct irq_domain_ops bcm2836_arm_irqchip_intc_ops = {
|
||||||
|
@ -182,19 +294,6 @@ static const struct irq_domain_ops bcm2836_arm_irqchip_intc_ops = {
|
||||||
.map = bcm2836_map,
|
.map = bcm2836_map,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void
|
|
||||||
bcm2836_arm_irqchip_smp_init(void)
|
|
||||||
{
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
/* Unmask IPIs to the boot CPU. */
|
|
||||||
cpuhp_setup_state(CPUHP_AP_IRQ_BCM2836_STARTING,
|
|
||||||
"irqchip/bcm2836:starting", bcm2836_cpu_starting,
|
|
||||||
bcm2836_cpu_dying);
|
|
||||||
|
|
||||||
set_smp_cross_call(bcm2836_arm_irqchip_send_ipi);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The LOCAL_IRQ_CNT* timer firings are based off of the external
|
* The LOCAL_IRQ_CNT* timer firings are based off of the external
|
||||||
* oscillator with some scaling. The firmware sets up CNTFRQ to
|
* oscillator with some scaling. The firmware sets up CNTFRQ to
|
||||||
|
@ -232,6 +331,8 @@ static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node,
|
||||||
if (!intc.domain)
|
if (!intc.domain)
|
||||||
panic("%pOF: unable to create IRQ domain\n", node);
|
panic("%pOF: unable to create IRQ domain\n", node);
|
||||||
|
|
||||||
|
irq_domain_update_bus_token(intc.domain, DOMAIN_BUS_WIRED);
|
||||||
|
|
||||||
bcm2836_arm_irqchip_smp_init();
|
bcm2836_arm_irqchip_smp_init();
|
||||||
|
|
||||||
set_handle_irq(bcm2836_arm_irqchip_handle_irq);
|
set_handle_irq(bcm2836_arm_irqchip_handle_irq);
|
||||||
|
|
|
@ -152,9 +152,6 @@ void gic_cpu_config(void __iomem *base, int nr, void (*sync_access)(void))
|
||||||
writel_relaxed(GICD_INT_DEF_PRI_X4,
|
writel_relaxed(GICD_INT_DEF_PRI_X4,
|
||||||
base + GIC_DIST_PRI + i * 4 / 4);
|
base + GIC_DIST_PRI + i * 4 / 4);
|
||||||
|
|
||||||
/* Ensure all SGI interrupts are now enabled */
|
|
||||||
writel_relaxed(GICD_INT_EN_SET_SGI, base + GIC_DIST_ENABLE_SET);
|
|
||||||
|
|
||||||
if (sync_access)
|
if (sync_access)
|
||||||
sync_access();
|
sync_access();
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,6 +36,8 @@
|
||||||
#define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0)
|
#define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0)
|
||||||
#define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1)
|
#define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1)
|
||||||
|
|
||||||
|
#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1)
|
||||||
|
|
||||||
struct redist_region {
|
struct redist_region {
|
||||||
void __iomem *redist_base;
|
void __iomem *redist_base;
|
||||||
phys_addr_t phys_base;
|
phys_addr_t phys_base;
|
||||||
|
@ -113,6 +115,7 @@ static DEFINE_PER_CPU(bool, has_rss);
|
||||||
#define DEFAULT_PMR_VALUE 0xf0
|
#define DEFAULT_PMR_VALUE 0xf0
|
||||||
|
|
||||||
enum gic_intid_range {
|
enum gic_intid_range {
|
||||||
|
SGI_RANGE,
|
||||||
PPI_RANGE,
|
PPI_RANGE,
|
||||||
SPI_RANGE,
|
SPI_RANGE,
|
||||||
EPPI_RANGE,
|
EPPI_RANGE,
|
||||||
|
@ -124,6 +127,8 @@ enum gic_intid_range {
|
||||||
static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq)
|
static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq)
|
||||||
{
|
{
|
||||||
switch (hwirq) {
|
switch (hwirq) {
|
||||||
|
case 0 ... 15:
|
||||||
|
return SGI_RANGE;
|
||||||
case 16 ... 31:
|
case 16 ... 31:
|
||||||
return PPI_RANGE;
|
return PPI_RANGE;
|
||||||
case 32 ... 1019:
|
case 32 ... 1019:
|
||||||
|
@ -149,15 +154,22 @@ static inline unsigned int gic_irq(struct irq_data *d)
|
||||||
return d->hwirq;
|
return d->hwirq;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int gic_irq_in_rdist(struct irq_data *d)
|
static inline bool gic_irq_in_rdist(struct irq_data *d)
|
||||||
{
|
{
|
||||||
enum gic_intid_range range = get_intid_range(d);
|
switch (get_intid_range(d)) {
|
||||||
return range == PPI_RANGE || range == EPPI_RANGE;
|
case SGI_RANGE:
|
||||||
|
case PPI_RANGE:
|
||||||
|
case EPPI_RANGE:
|
||||||
|
return true;
|
||||||
|
default:
|
||||||
|
return false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __iomem *gic_dist_base(struct irq_data *d)
|
static inline void __iomem *gic_dist_base(struct irq_data *d)
|
||||||
{
|
{
|
||||||
switch (get_intid_range(d)) {
|
switch (get_intid_range(d)) {
|
||||||
|
case SGI_RANGE:
|
||||||
case PPI_RANGE:
|
case PPI_RANGE:
|
||||||
case EPPI_RANGE:
|
case EPPI_RANGE:
|
||||||
/* SGI+PPI -> SGI_base for this CPU */
|
/* SGI+PPI -> SGI_base for this CPU */
|
||||||
|
@ -254,6 +266,7 @@ static void gic_enable_redist(bool enable)
|
||||||
static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index)
|
static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index)
|
||||||
{
|
{
|
||||||
switch (get_intid_range(d)) {
|
switch (get_intid_range(d)) {
|
||||||
|
case SGI_RANGE:
|
||||||
case PPI_RANGE:
|
case PPI_RANGE:
|
||||||
case SPI_RANGE:
|
case SPI_RANGE:
|
||||||
*index = d->hwirq;
|
*index = d->hwirq;
|
||||||
|
@ -373,7 +386,7 @@ static int gic_irq_set_irqchip_state(struct irq_data *d,
|
||||||
{
|
{
|
||||||
u32 reg;
|
u32 reg;
|
||||||
|
|
||||||
if (d->hwirq >= 8192) /* PPI/SPI only */
|
if (d->hwirq >= 8192) /* SGI/PPI/SPI only */
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
switch (which) {
|
switch (which) {
|
||||||
|
@ -540,12 +553,12 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
|
||||||
u32 offset, index;
|
u32 offset, index;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* Interrupt configuration for SGIs can't be changed */
|
|
||||||
if (irq < 16)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
range = get_intid_range(d);
|
range = get_intid_range(d);
|
||||||
|
|
||||||
|
/* Interrupt configuration for SGIs can't be changed */
|
||||||
|
if (range == SGI_RANGE)
|
||||||
|
return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0;
|
||||||
|
|
||||||
/* SPIs have restrictions on the supported types */
|
/* SPIs have restrictions on the supported types */
|
||||||
if ((range == SPI_RANGE || range == ESPI_RANGE) &&
|
if ((range == SPI_RANGE || range == ESPI_RANGE) &&
|
||||||
type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
|
type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
|
||||||
|
@ -573,6 +586,9 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
|
||||||
|
|
||||||
static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
|
static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
|
||||||
{
|
{
|
||||||
|
if (get_intid_range(d) == SGI_RANGE)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
if (vcpu)
|
if (vcpu)
|
||||||
irqd_set_forwarded_to_vcpu(d);
|
irqd_set_forwarded_to_vcpu(d);
|
||||||
else
|
else
|
||||||
|
@ -647,38 +663,14 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
|
||||||
if ((irqnr >= 1020 && irqnr <= 1023))
|
if ((irqnr >= 1020 && irqnr <= 1023))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Treat anything but SGIs in a uniform way */
|
if (static_branch_likely(&supports_deactivate_key))
|
||||||
if (likely(irqnr > 15)) {
|
|
||||||
int err;
|
|
||||||
|
|
||||||
if (static_branch_likely(&supports_deactivate_key))
|
|
||||||
gic_write_eoir(irqnr);
|
|
||||||
else
|
|
||||||
isb();
|
|
||||||
|
|
||||||
err = handle_domain_irq(gic_data.domain, irqnr, regs);
|
|
||||||
if (err) {
|
|
||||||
WARN_ONCE(true, "Unexpected interrupt received!\n");
|
|
||||||
gic_deactivate_unhandled(irqnr);
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (irqnr < 16) {
|
|
||||||
gic_write_eoir(irqnr);
|
gic_write_eoir(irqnr);
|
||||||
if (static_branch_likely(&supports_deactivate_key))
|
else
|
||||||
gic_write_dir(irqnr);
|
isb();
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
/*
|
if (handle_domain_irq(gic_data.domain, irqnr, regs)) {
|
||||||
* Unlike GICv2, we don't need an smp_rmb() here.
|
WARN_ONCE(true, "Unexpected interrupt received!\n");
|
||||||
* The control dependency from gic_read_iar to
|
gic_deactivate_unhandled(irqnr);
|
||||||
* the ISB in gic_write_eoir is enough to ensure
|
|
||||||
* that any shared data read by handle_IPI will
|
|
||||||
* be read after the ACK.
|
|
||||||
*/
|
|
||||||
handle_IPI(irqnr, regs);
|
|
||||||
#else
|
|
||||||
WARN_ONCE(true, "Unexpected SGI received!\n");
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1132,11 +1124,11 @@ static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
|
||||||
gic_write_sgi1r(val);
|
gic_write_sgi1r(val);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
|
static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
|
||||||
{
|
{
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
if (WARN_ON(irq >= 16))
|
if (WARN_ON(d->hwirq >= 16))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1150,7 +1142,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
|
||||||
u16 tlist;
|
u16 tlist;
|
||||||
|
|
||||||
tlist = gic_compute_target_list(&cpu, mask, cluster_id);
|
tlist = gic_compute_target_list(&cpu, mask, cluster_id);
|
||||||
gic_send_sgi(cluster_id, tlist, irq);
|
gic_send_sgi(cluster_id, tlist, d->hwirq);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Force the above writes to ICC_SGI1R_EL1 to be executed */
|
/* Force the above writes to ICC_SGI1R_EL1 to be executed */
|
||||||
|
@ -1159,10 +1151,24 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
|
||||||
|
|
||||||
static void __init gic_smp_init(void)
|
static void __init gic_smp_init(void)
|
||||||
{
|
{
|
||||||
set_smp_cross_call(gic_raise_softirq);
|
struct irq_fwspec sgi_fwspec = {
|
||||||
|
.fwnode = gic_data.fwnode,
|
||||||
|
.param_count = 1,
|
||||||
|
};
|
||||||
|
int base_sgi;
|
||||||
|
|
||||||
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
|
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
|
||||||
"irqchip/arm/gicv3:starting",
|
"irqchip/arm/gicv3:starting",
|
||||||
gic_starting_cpu, NULL);
|
gic_starting_cpu, NULL);
|
||||||
|
|
||||||
|
/* Register all 8 non-secure SGIs */
|
||||||
|
base_sgi = __irq_domain_alloc_irqs(gic_data.domain, -1, 8,
|
||||||
|
NUMA_NO_NODE, &sgi_fwspec,
|
||||||
|
false, NULL);
|
||||||
|
if (WARN_ON(base_sgi <= 0))
|
||||||
|
return;
|
||||||
|
|
||||||
|
set_smp_ipi_range(base_sgi, 8);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
|
static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
|
||||||
|
@ -1211,6 +1217,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
#define gic_set_affinity NULL
|
#define gic_set_affinity NULL
|
||||||
|
#define gic_ipi_send_mask NULL
|
||||||
#define gic_smp_init() do { } while(0)
|
#define gic_smp_init() do { } while(0)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -1253,6 +1260,7 @@ static struct irq_chip gic_chip = {
|
||||||
.irq_set_irqchip_state = gic_irq_set_irqchip_state,
|
.irq_set_irqchip_state = gic_irq_set_irqchip_state,
|
||||||
.irq_nmi_setup = gic_irq_nmi_setup,
|
.irq_nmi_setup = gic_irq_nmi_setup,
|
||||||
.irq_nmi_teardown = gic_irq_nmi_teardown,
|
.irq_nmi_teardown = gic_irq_nmi_teardown,
|
||||||
|
.ipi_send_mask = gic_ipi_send_mask,
|
||||||
.flags = IRQCHIP_SET_TYPE_MASKED |
|
.flags = IRQCHIP_SET_TYPE_MASKED |
|
||||||
IRQCHIP_SKIP_SET_WAKE |
|
IRQCHIP_SKIP_SET_WAKE |
|
||||||
IRQCHIP_MASK_ON_SUSPEND,
|
IRQCHIP_MASK_ON_SUSPEND,
|
||||||
|
@ -1270,6 +1278,7 @@ static struct irq_chip gic_eoimode1_chip = {
|
||||||
.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
|
.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
|
||||||
.irq_nmi_setup = gic_irq_nmi_setup,
|
.irq_nmi_setup = gic_irq_nmi_setup,
|
||||||
.irq_nmi_teardown = gic_irq_nmi_teardown,
|
.irq_nmi_teardown = gic_irq_nmi_teardown,
|
||||||
|
.ipi_send_mask = gic_ipi_send_mask,
|
||||||
.flags = IRQCHIP_SET_TYPE_MASKED |
|
.flags = IRQCHIP_SET_TYPE_MASKED |
|
||||||
IRQCHIP_SKIP_SET_WAKE |
|
IRQCHIP_SKIP_SET_WAKE |
|
||||||
IRQCHIP_MASK_ON_SUSPEND,
|
IRQCHIP_MASK_ON_SUSPEND,
|
||||||
|
@ -1284,6 +1293,13 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
|
||||||
chip = &gic_eoimode1_chip;
|
chip = &gic_eoimode1_chip;
|
||||||
|
|
||||||
switch (__get_intid_range(hw)) {
|
switch (__get_intid_range(hw)) {
|
||||||
|
case SGI_RANGE:
|
||||||
|
irq_set_percpu_devid(irq);
|
||||||
|
irq_domain_set_info(d, irq, hw, chip, d->host_data,
|
||||||
|
handle_percpu_devid_fasteoi_ipi,
|
||||||
|
NULL, NULL);
|
||||||
|
break;
|
||||||
|
|
||||||
case PPI_RANGE:
|
case PPI_RANGE:
|
||||||
case EPPI_RANGE:
|
case EPPI_RANGE:
|
||||||
irq_set_percpu_devid(irq);
|
irq_set_percpu_devid(irq);
|
||||||
|
@ -1313,13 +1329,17 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1)
|
|
||||||
|
|
||||||
static int gic_irq_domain_translate(struct irq_domain *d,
|
static int gic_irq_domain_translate(struct irq_domain *d,
|
||||||
struct irq_fwspec *fwspec,
|
struct irq_fwspec *fwspec,
|
||||||
unsigned long *hwirq,
|
unsigned long *hwirq,
|
||||||
unsigned int *type)
|
unsigned int *type)
|
||||||
{
|
{
|
||||||
|
if (fwspec->param_count == 1 && fwspec->param[0] < 16) {
|
||||||
|
*hwirq = fwspec->param[0];
|
||||||
|
*type = IRQ_TYPE_EDGE_RISING;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
if (is_of_node(fwspec->fwnode)) {
|
if (is_of_node(fwspec->fwnode)) {
|
||||||
if (fwspec->param_count < 3)
|
if (fwspec->param_count < 3)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -1676,9 +1696,9 @@ static int __init gic_init_bases(void __iomem *dist_base,
|
||||||
|
|
||||||
gic_update_rdist_properties();
|
gic_update_rdist_properties();
|
||||||
|
|
||||||
gic_smp_init();
|
|
||||||
gic_dist_init();
|
gic_dist_init();
|
||||||
gic_cpu_init();
|
gic_cpu_init();
|
||||||
|
gic_smp_init();
|
||||||
gic_cpu_pm_init();
|
gic_cpu_pm_init();
|
||||||
|
|
||||||
if (gic_dist_supports_lpis()) {
|
if (gic_dist_supports_lpis()) {
|
||||||
|
|
|
@ -83,9 +83,6 @@ struct gic_chip_data {
|
||||||
#endif
|
#endif
|
||||||
struct irq_domain *domain;
|
struct irq_domain *domain;
|
||||||
unsigned int gic_irqs;
|
unsigned int gic_irqs;
|
||||||
#ifdef CONFIG_GIC_NON_BANKED
|
|
||||||
void __iomem *(*get_base)(union gic_base *);
|
|
||||||
#endif
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_BL_SWITCHER
|
#ifdef CONFIG_BL_SWITCHER
|
||||||
|
@ -124,36 +121,30 @@ static struct gic_chip_data gic_data[CONFIG_ARM_GIC_MAX_NR] __read_mostly;
|
||||||
|
|
||||||
static struct gic_kvm_info gic_v2_kvm_info;
|
static struct gic_kvm_info gic_v2_kvm_info;
|
||||||
|
|
||||||
|
static DEFINE_PER_CPU(u32, sgi_intid);
|
||||||
|
|
||||||
#ifdef CONFIG_GIC_NON_BANKED
|
#ifdef CONFIG_GIC_NON_BANKED
|
||||||
static void __iomem *gic_get_percpu_base(union gic_base *base)
|
static DEFINE_STATIC_KEY_FALSE(frankengic_key);
|
||||||
|
|
||||||
|
static void enable_frankengic(void)
|
||||||
{
|
{
|
||||||
return raw_cpu_read(*base->percpu_base);
|
static_branch_enable(&frankengic_key);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __iomem *gic_get_common_base(union gic_base *base)
|
static inline void __iomem *__get_base(union gic_base *base)
|
||||||
{
|
{
|
||||||
|
if (static_branch_unlikely(&frankengic_key))
|
||||||
|
return raw_cpu_read(*base->percpu_base);
|
||||||
|
|
||||||
return base->common_base;
|
return base->common_base;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data)
|
#define gic_data_dist_base(d) __get_base(&(d)->dist_base)
|
||||||
{
|
#define gic_data_cpu_base(d) __get_base(&(d)->cpu_base)
|
||||||
return data->get_base(&data->dist_base);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data)
|
|
||||||
{
|
|
||||||
return data->get_base(&data->cpu_base);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void gic_set_base_accessor(struct gic_chip_data *data,
|
|
||||||
void __iomem *(*f)(union gic_base *))
|
|
||||||
{
|
|
||||||
data->get_base = f;
|
|
||||||
}
|
|
||||||
#else
|
#else
|
||||||
#define gic_data_dist_base(d) ((d)->dist_base.common_base)
|
#define gic_data_dist_base(d) ((d)->dist_base.common_base)
|
||||||
#define gic_data_cpu_base(d) ((d)->cpu_base.common_base)
|
#define gic_data_cpu_base(d) ((d)->cpu_base.common_base)
|
||||||
#define gic_set_base_accessor(d, f)
|
#define enable_frankengic() do { } while(0)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static inline void __iomem *gic_dist_base(struct irq_data *d)
|
static inline void __iomem *gic_dist_base(struct irq_data *d)
|
||||||
|
@ -226,16 +217,26 @@ static void gic_unmask_irq(struct irq_data *d)
|
||||||
|
|
||||||
static void gic_eoi_irq(struct irq_data *d)
|
static void gic_eoi_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
|
u32 hwirq = gic_irq(d);
|
||||||
|
|
||||||
|
if (hwirq < 16)
|
||||||
|
hwirq = this_cpu_read(sgi_intid);
|
||||||
|
|
||||||
|
writel_relaxed(hwirq, gic_cpu_base(d) + GIC_CPU_EOI);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gic_eoimode1_eoi_irq(struct irq_data *d)
|
static void gic_eoimode1_eoi_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
|
u32 hwirq = gic_irq(d);
|
||||||
|
|
||||||
/* Do not deactivate an IRQ forwarded to a vcpu. */
|
/* Do not deactivate an IRQ forwarded to a vcpu. */
|
||||||
if (irqd_is_forwarded_to_vcpu(d))
|
if (irqd_is_forwarded_to_vcpu(d))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_DEACTIVATE);
|
if (hwirq < 16)
|
||||||
|
hwirq = this_cpu_read(sgi_intid);
|
||||||
|
|
||||||
|
writel_relaxed(hwirq, gic_cpu_base(d) + GIC_CPU_DEACTIVATE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gic_irq_set_irqchip_state(struct irq_data *d,
|
static int gic_irq_set_irqchip_state(struct irq_data *d,
|
||||||
|
@ -295,7 +296,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
|
||||||
|
|
||||||
/* Interrupt configuration for SGIs can't be changed */
|
/* Interrupt configuration for SGIs can't be changed */
|
||||||
if (gicirq < 16)
|
if (gicirq < 16)
|
||||||
return -EINVAL;
|
return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0;
|
||||||
|
|
||||||
/* SPIs have restrictions on the supported types */
|
/* SPIs have restrictions on the supported types */
|
||||||
if (gicirq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
|
if (gicirq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
|
||||||
|
@ -315,7 +316,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
|
||||||
static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
|
static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
|
||||||
{
|
{
|
||||||
/* Only interrupts on the primary GIC can be forwarded to a vcpu. */
|
/* Only interrupts on the primary GIC can be forwarded to a vcpu. */
|
||||||
if (cascading_gic_irq(d))
|
if (cascading_gic_irq(d) || gic_irq(d) < 16)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (vcpu)
|
if (vcpu)
|
||||||
|
@ -325,28 +326,6 @@ static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
|
|
||||||
bool force)
|
|
||||||
{
|
|
||||||
void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d);
|
|
||||||
unsigned int cpu;
|
|
||||||
|
|
||||||
if (!force)
|
|
||||||
cpu = cpumask_any_and(mask_val, cpu_online_mask);
|
|
||||||
else
|
|
||||||
cpu = cpumask_first(mask_val);
|
|
||||||
|
|
||||||
if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
writeb_relaxed(gic_cpu_map[cpu], reg);
|
|
||||||
irq_data_update_effective_affinity(d, cpumask_of(cpu));
|
|
||||||
|
|
||||||
return IRQ_SET_MASK_OK_DONE;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
|
static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
u32 irqstat, irqnr;
|
u32 irqstat, irqnr;
|
||||||
|
@ -357,31 +336,33 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
|
||||||
irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
|
irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
|
||||||
irqnr = irqstat & GICC_IAR_INT_ID_MASK;
|
irqnr = irqstat & GICC_IAR_INT_ID_MASK;
|
||||||
|
|
||||||
if (likely(irqnr > 15 && irqnr < 1020)) {
|
if (unlikely(irqnr >= 1020))
|
||||||
if (static_branch_likely(&supports_deactivate_key))
|
break;
|
||||||
writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
|
|
||||||
isb();
|
if (static_branch_likely(&supports_deactivate_key))
|
||||||
handle_domain_irq(gic->domain, irqnr, regs);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if (irqnr < 16) {
|
|
||||||
writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
|
writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
|
||||||
if (static_branch_likely(&supports_deactivate_key))
|
isb();
|
||||||
writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE);
|
|
||||||
#ifdef CONFIG_SMP
|
/*
|
||||||
/*
|
* Ensure any shared data written by the CPU sending the IPI
|
||||||
* Ensure any shared data written by the CPU sending
|
* is read after we've read the ACK register on the GIC.
|
||||||
* the IPI is read after we've read the ACK register
|
*
|
||||||
* on the GIC.
|
* Pairs with the write barrier in gic_ipi_send_mask
|
||||||
*
|
*/
|
||||||
* Pairs with the write barrier in gic_raise_softirq
|
if (irqnr <= 15) {
|
||||||
*/
|
|
||||||
smp_rmb();
|
smp_rmb();
|
||||||
handle_IPI(irqnr, regs);
|
|
||||||
#endif
|
/*
|
||||||
continue;
|
* The GIC encodes the source CPU in GICC_IAR,
|
||||||
|
* leading to the deactivation to fail if not
|
||||||
|
* written back as is to GICC_EOI. Stash the INTID
|
||||||
|
* away for gic_eoi_irq() to write back. This only
|
||||||
|
* works because we don't nest SGIs...
|
||||||
|
*/
|
||||||
|
this_cpu_write(sgi_intid, irqstat);
|
||||||
}
|
}
|
||||||
break;
|
|
||||||
|
handle_domain_irq(gic->domain, irqnr, regs);
|
||||||
} while (1);
|
} while (1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -728,11 +709,6 @@ static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < CONFIG_ARM_GIC_MAX_NR; i++) {
|
for (i = 0; i < CONFIG_ARM_GIC_MAX_NR; i++) {
|
||||||
#ifdef CONFIG_GIC_NON_BANKED
|
|
||||||
/* Skip over unused GICs */
|
|
||||||
if (!gic_data[i].get_base)
|
|
||||||
continue;
|
|
||||||
#endif
|
|
||||||
switch (cmd) {
|
switch (cmd) {
|
||||||
case CPU_PM_ENTER:
|
case CPU_PM_ENTER:
|
||||||
gic_cpu_save(&gic_data[i]);
|
gic_cpu_save(&gic_data[i]);
|
||||||
|
@ -795,14 +771,34 @@ static int gic_pm_init(struct gic_chip_data *gic)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
|
static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
|
||||||
|
bool force)
|
||||||
|
{
|
||||||
|
void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d);
|
||||||
|
unsigned int cpu;
|
||||||
|
|
||||||
|
if (!force)
|
||||||
|
cpu = cpumask_any_and(mask_val, cpu_online_mask);
|
||||||
|
else
|
||||||
|
cpu = cpumask_first(mask_val);
|
||||||
|
|
||||||
|
if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
writeb_relaxed(gic_cpu_map[cpu], reg);
|
||||||
|
irq_data_update_effective_affinity(d, cpumask_of(cpu));
|
||||||
|
|
||||||
|
return IRQ_SET_MASK_OK_DONE;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
|
||||||
{
|
{
|
||||||
int cpu;
|
int cpu;
|
||||||
unsigned long flags, map = 0;
|
unsigned long flags, map = 0;
|
||||||
|
|
||||||
if (unlikely(nr_cpu_ids == 1)) {
|
if (unlikely(nr_cpu_ids == 1)) {
|
||||||
/* Only one CPU? let's do a self-IPI... */
|
/* Only one CPU? let's do a self-IPI... */
|
||||||
writel_relaxed(2 << 24 | irq,
|
writel_relaxed(2 << 24 | d->hwirq,
|
||||||
gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
|
gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -820,10 +816,41 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
|
||||||
dmb(ishst);
|
dmb(ishst);
|
||||||
|
|
||||||
/* this always happens on GIC0 */
|
/* this always happens on GIC0 */
|
||||||
writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
|
writel_relaxed(map << 16 | d->hwirq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
|
||||||
|
|
||||||
gic_unlock_irqrestore(flags);
|
gic_unlock_irqrestore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int gic_starting_cpu(unsigned int cpu)
|
||||||
|
{
|
||||||
|
gic_cpu_init(&gic_data[0]);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static __init void gic_smp_init(void)
|
||||||
|
{
|
||||||
|
struct irq_fwspec sgi_fwspec = {
|
||||||
|
.fwnode = gic_data[0].domain->fwnode,
|
||||||
|
.param_count = 1,
|
||||||
|
};
|
||||||
|
int base_sgi;
|
||||||
|
|
||||||
|
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
|
||||||
|
"irqchip/arm/gic:starting",
|
||||||
|
gic_starting_cpu, NULL);
|
||||||
|
|
||||||
|
base_sgi = __irq_domain_alloc_irqs(gic_data[0].domain, -1, 8,
|
||||||
|
NUMA_NO_NODE, &sgi_fwspec,
|
||||||
|
false, NULL);
|
||||||
|
if (WARN_ON(base_sgi <= 0))
|
||||||
|
return;
|
||||||
|
|
||||||
|
set_smp_ipi_range(base_sgi, 8);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
#define gic_smp_init() do { } while(0)
|
||||||
|
#define gic_set_affinity NULL
|
||||||
|
#define gic_ipi_send_mask NULL
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_BL_SWITCHER
|
#ifdef CONFIG_BL_SWITCHER
|
||||||
|
@ -970,15 +997,24 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
|
||||||
{
|
{
|
||||||
struct gic_chip_data *gic = d->host_data;
|
struct gic_chip_data *gic = d->host_data;
|
||||||
|
|
||||||
if (hw < 32) {
|
switch (hw) {
|
||||||
|
case 0 ... 15:
|
||||||
|
irq_set_percpu_devid(irq);
|
||||||
|
irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data,
|
||||||
|
handle_percpu_devid_fasteoi_ipi,
|
||||||
|
NULL, NULL);
|
||||||
|
break;
|
||||||
|
case 16 ... 31:
|
||||||
irq_set_percpu_devid(irq);
|
irq_set_percpu_devid(irq);
|
||||||
irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data,
|
irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data,
|
||||||
handle_percpu_devid_irq, NULL, NULL);
|
handle_percpu_devid_irq, NULL, NULL);
|
||||||
} else {
|
break;
|
||||||
|
default:
|
||||||
irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data,
|
irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data,
|
||||||
handle_fasteoi_irq, NULL, NULL);
|
handle_fasteoi_irq, NULL, NULL);
|
||||||
irq_set_probe(irq);
|
irq_set_probe(irq);
|
||||||
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
|
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -992,19 +1028,26 @@ static int gic_irq_domain_translate(struct irq_domain *d,
|
||||||
unsigned long *hwirq,
|
unsigned long *hwirq,
|
||||||
unsigned int *type)
|
unsigned int *type)
|
||||||
{
|
{
|
||||||
|
if (fwspec->param_count == 1 && fwspec->param[0] < 16) {
|
||||||
|
*hwirq = fwspec->param[0];
|
||||||
|
*type = IRQ_TYPE_EDGE_RISING;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
if (is_of_node(fwspec->fwnode)) {
|
if (is_of_node(fwspec->fwnode)) {
|
||||||
if (fwspec->param_count < 3)
|
if (fwspec->param_count < 3)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* Get the interrupt number and add 16 to skip over SGIs */
|
switch (fwspec->param[0]) {
|
||||||
*hwirq = fwspec->param[1] + 16;
|
case 0: /* SPI */
|
||||||
|
*hwirq = fwspec->param[1] + 32;
|
||||||
/*
|
break;
|
||||||
* For SPIs, we need to add 16 more to get the GIC irq
|
case 1: /* PPI */
|
||||||
* ID number
|
*hwirq = fwspec->param[1] + 16;
|
||||||
*/
|
break;
|
||||||
if (!fwspec->param[0])
|
default:
|
||||||
*hwirq += 16;
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
|
*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
|
||||||
|
|
||||||
|
@ -1027,12 +1070,6 @@ static int gic_irq_domain_translate(struct irq_domain *d,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gic_starting_cpu(unsigned int cpu)
|
|
||||||
{
|
|
||||||
gic_cpu_init(&gic_data[0]);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
|
static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
|
||||||
unsigned int nr_irqs, void *arg)
|
unsigned int nr_irqs, void *arg)
|
||||||
{
|
{
|
||||||
|
@ -1079,10 +1116,10 @@ static void gic_init_chip(struct gic_chip_data *gic, struct device *dev,
|
||||||
gic->chip.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity;
|
gic->chip.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
if (gic == &gic_data[0]) {
|
||||||
if (gic == &gic_data[0])
|
|
||||||
gic->chip.irq_set_affinity = gic_set_affinity;
|
gic->chip.irq_set_affinity = gic_set_affinity;
|
||||||
#endif
|
gic->chip.ipi_send_mask = gic_ipi_send_mask;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gic_init_bases(struct gic_chip_data *gic,
|
static int gic_init_bases(struct gic_chip_data *gic,
|
||||||
|
@ -1112,7 +1149,7 @@ static int gic_init_bases(struct gic_chip_data *gic,
|
||||||
gic->raw_cpu_base + offset;
|
gic->raw_cpu_base + offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
gic_set_base_accessor(gic, gic_get_percpu_base);
|
enable_frankengic();
|
||||||
} else {
|
} else {
|
||||||
/* Normal, sane GIC... */
|
/* Normal, sane GIC... */
|
||||||
WARN(gic->percpu_offset,
|
WARN(gic->percpu_offset,
|
||||||
|
@ -1120,7 +1157,6 @@ static int gic_init_bases(struct gic_chip_data *gic,
|
||||||
gic->percpu_offset);
|
gic->percpu_offset);
|
||||||
gic->dist_base.common_base = gic->raw_dist_base;
|
gic->dist_base.common_base = gic->raw_dist_base;
|
||||||
gic->cpu_base.common_base = gic->raw_cpu_base;
|
gic->cpu_base.common_base = gic->raw_cpu_base;
|
||||||
gic_set_base_accessor(gic, gic_get_common_base);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1199,12 +1235,7 @@ static int __init __gic_init_bases(struct gic_chip_data *gic,
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < NR_GIC_CPU_IF; i++)
|
for (i = 0; i < NR_GIC_CPU_IF; i++)
|
||||||
gic_cpu_map[i] = 0xff;
|
gic_cpu_map[i] = 0xff;
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
set_smp_cross_call(gic_raise_softirq);
|
|
||||||
#endif
|
|
||||||
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
|
|
||||||
"irqchip/arm/gic:starting",
|
|
||||||
gic_starting_cpu, NULL);
|
|
||||||
set_handle_irq(gic_handle_irq);
|
set_handle_irq(gic_handle_irq);
|
||||||
if (static_branch_likely(&supports_deactivate_key))
|
if (static_branch_likely(&supports_deactivate_key))
|
||||||
pr_info("GIC: Using split EOI/Deactivate mode\n");
|
pr_info("GIC: Using split EOI/Deactivate mode\n");
|
||||||
|
@ -1221,6 +1252,8 @@ static int __init __gic_init_bases(struct gic_chip_data *gic,
|
||||||
ret = gic_init_bases(gic, handle);
|
ret = gic_init_bases(gic, handle);
|
||||||
if (ret)
|
if (ret)
|
||||||
kfree(name);
|
kfree(name);
|
||||||
|
else if (gic == &gic_data[0])
|
||||||
|
gic_smp_init();
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -171,6 +171,29 @@ static int hip04_irq_set_affinity(struct irq_data *d,
|
||||||
|
|
||||||
return IRQ_SET_MASK_OK;
|
return IRQ_SET_MASK_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void hip04_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
|
||||||
|
{
|
||||||
|
int cpu;
|
||||||
|
unsigned long flags, map = 0;
|
||||||
|
|
||||||
|
raw_spin_lock_irqsave(&irq_controller_lock, flags);
|
||||||
|
|
||||||
|
/* Convert our logical CPU mask into a physical one. */
|
||||||
|
for_each_cpu(cpu, mask)
|
||||||
|
map |= hip04_cpu_map[cpu];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Ensure that stores to Normal memory are visible to the
|
||||||
|
* other CPUs before they observe us issuing the IPI.
|
||||||
|
*/
|
||||||
|
dmb(ishst);
|
||||||
|
|
||||||
|
/* this always happens on GIC0 */
|
||||||
|
writel_relaxed(map << 8 | d->hwirq, hip04_data.dist_base + GIC_DIST_SOFTINT);
|
||||||
|
|
||||||
|
raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void __exception_irq_entry hip04_handle_irq(struct pt_regs *regs)
|
static void __exception_irq_entry hip04_handle_irq(struct pt_regs *regs)
|
||||||
|
@ -182,19 +205,9 @@ static void __exception_irq_entry hip04_handle_irq(struct pt_regs *regs)
|
||||||
irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
|
irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
|
||||||
irqnr = irqstat & GICC_IAR_INT_ID_MASK;
|
irqnr = irqstat & GICC_IAR_INT_ID_MASK;
|
||||||
|
|
||||||
if (likely(irqnr > 15 && irqnr <= HIP04_MAX_IRQS)) {
|
if (irqnr <= HIP04_MAX_IRQS)
|
||||||
handle_domain_irq(hip04_data.domain, irqnr, regs);
|
handle_domain_irq(hip04_data.domain, irqnr, regs);
|
||||||
continue;
|
} while (irqnr > HIP04_MAX_IRQS);
|
||||||
}
|
|
||||||
if (irqnr < 16) {
|
|
||||||
writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
handle_IPI(irqnr, regs);
|
|
||||||
#endif
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
} while (1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct irq_chip hip04_irq_chip = {
|
static struct irq_chip hip04_irq_chip = {
|
||||||
|
@ -205,6 +218,7 @@ static struct irq_chip hip04_irq_chip = {
|
||||||
.irq_set_type = hip04_irq_set_type,
|
.irq_set_type = hip04_irq_set_type,
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
.irq_set_affinity = hip04_irq_set_affinity,
|
.irq_set_affinity = hip04_irq_set_affinity,
|
||||||
|
.ipi_send_mask = hip04_ipi_send_mask,
|
||||||
#endif
|
#endif
|
||||||
.flags = IRQCHIP_SET_TYPE_MASKED |
|
.flags = IRQCHIP_SET_TYPE_MASKED |
|
||||||
IRQCHIP_SKIP_SET_WAKE |
|
IRQCHIP_SKIP_SET_WAKE |
|
||||||
|
@ -279,39 +293,17 @@ static void hip04_irq_cpu_init(struct hip04_irq_data *intc)
|
||||||
writel_relaxed(1, base + GIC_CPU_CTRL);
|
writel_relaxed(1, base + GIC_CPU_CTRL);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
static void hip04_raise_softirq(const struct cpumask *mask, unsigned int irq)
|
|
||||||
{
|
|
||||||
int cpu;
|
|
||||||
unsigned long flags, map = 0;
|
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&irq_controller_lock, flags);
|
|
||||||
|
|
||||||
/* Convert our logical CPU mask into a physical one. */
|
|
||||||
for_each_cpu(cpu, mask)
|
|
||||||
map |= hip04_cpu_map[cpu];
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Ensure that stores to Normal memory are visible to the
|
|
||||||
* other CPUs before they observe us issuing the IPI.
|
|
||||||
*/
|
|
||||||
dmb(ishst);
|
|
||||||
|
|
||||||
/* this always happens on GIC0 */
|
|
||||||
writel_relaxed(map << 8 | irq, hip04_data.dist_base + GIC_DIST_SOFTINT);
|
|
||||||
|
|
||||||
raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static int hip04_irq_domain_map(struct irq_domain *d, unsigned int irq,
|
static int hip04_irq_domain_map(struct irq_domain *d, unsigned int irq,
|
||||||
irq_hw_number_t hw)
|
irq_hw_number_t hw)
|
||||||
{
|
{
|
||||||
if (hw < 32) {
|
if (hw < 16) {
|
||||||
|
irq_set_percpu_devid(irq);
|
||||||
|
irq_set_chip_and_handler(irq, &hip04_irq_chip,
|
||||||
|
handle_percpu_devid_fasteoi_ipi);
|
||||||
|
} else if (hw < 32) {
|
||||||
irq_set_percpu_devid(irq);
|
irq_set_percpu_devid(irq);
|
||||||
irq_set_chip_and_handler(irq, &hip04_irq_chip,
|
irq_set_chip_and_handler(irq, &hip04_irq_chip,
|
||||||
handle_percpu_devid_irq);
|
handle_percpu_devid_irq);
|
||||||
irq_set_status_flags(irq, IRQ_NOAUTOEN);
|
|
||||||
} else {
|
} else {
|
||||||
irq_set_chip_and_handler(irq, &hip04_irq_chip,
|
irq_set_chip_and_handler(irq, &hip04_irq_chip,
|
||||||
handle_fasteoi_irq);
|
handle_fasteoi_irq);
|
||||||
|
@ -328,10 +320,13 @@ static int hip04_irq_domain_xlate(struct irq_domain *d,
|
||||||
unsigned long *out_hwirq,
|
unsigned long *out_hwirq,
|
||||||
unsigned int *out_type)
|
unsigned int *out_type)
|
||||||
{
|
{
|
||||||
unsigned long ret = 0;
|
|
||||||
|
|
||||||
if (irq_domain_get_of_node(d) != controller)
|
if (irq_domain_get_of_node(d) != controller)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
if (intsize == 1 && intspec[0] < 16) {
|
||||||
|
*out_hwirq = intspec[0];
|
||||||
|
*out_type = IRQ_TYPE_EDGE_RISING;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
if (intsize < 3)
|
if (intsize < 3)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
@ -344,7 +339,7 @@ static int hip04_irq_domain_xlate(struct irq_domain *d,
|
||||||
|
|
||||||
*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
|
*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
|
||||||
|
|
||||||
return ret;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hip04_irq_starting_cpu(unsigned int cpu)
|
static int hip04_irq_starting_cpu(unsigned int cpu)
|
||||||
|
@ -361,7 +356,6 @@ static const struct irq_domain_ops hip04_irq_domain_ops = {
|
||||||
static int __init
|
static int __init
|
||||||
hip04_of_init(struct device_node *node, struct device_node *parent)
|
hip04_of_init(struct device_node *node, struct device_node *parent)
|
||||||
{
|
{
|
||||||
irq_hw_number_t hwirq_base = 16;
|
|
||||||
int nr_irqs, irq_base, i;
|
int nr_irqs, irq_base, i;
|
||||||
|
|
||||||
if (WARN_ON(!node))
|
if (WARN_ON(!node))
|
||||||
|
@ -390,24 +384,21 @@ hip04_of_init(struct device_node *node, struct device_node *parent)
|
||||||
nr_irqs = HIP04_MAX_IRQS;
|
nr_irqs = HIP04_MAX_IRQS;
|
||||||
hip04_data.nr_irqs = nr_irqs;
|
hip04_data.nr_irqs = nr_irqs;
|
||||||
|
|
||||||
nr_irqs -= hwirq_base; /* calculate # of irqs to allocate */
|
irq_base = irq_alloc_descs(-1, 0, nr_irqs, numa_node_id());
|
||||||
|
|
||||||
irq_base = irq_alloc_descs(-1, hwirq_base, nr_irqs, numa_node_id());
|
|
||||||
if (irq_base < 0) {
|
if (irq_base < 0) {
|
||||||
pr_err("failed to allocate IRQ numbers\n");
|
pr_err("failed to allocate IRQ numbers\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
hip04_data.domain = irq_domain_add_legacy(node, nr_irqs, irq_base,
|
hip04_data.domain = irq_domain_add_legacy(node, nr_irqs, irq_base,
|
||||||
hwirq_base,
|
0,
|
||||||
&hip04_irq_domain_ops,
|
&hip04_irq_domain_ops,
|
||||||
&hip04_data);
|
&hip04_data);
|
||||||
|
|
||||||
if (WARN_ON(!hip04_data.domain))
|
if (WARN_ON(!hip04_data.domain))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
set_smp_cross_call(hip04_raise_softirq);
|
set_smp_ipi_range(irq_base, 16);
|
||||||
#endif
|
#endif
|
||||||
set_handle_irq(hip04_handle_irq);
|
set_handle_irq(hip04_handle_irq);
|
||||||
|
|
||||||
|
|
|
@ -71,6 +71,7 @@ enum irqchip_irq_state;
|
||||||
* it from the spurious interrupt detection
|
* it from the spurious interrupt detection
|
||||||
* mechanism and from core side polling.
|
* mechanism and from core side polling.
|
||||||
* IRQ_DISABLE_UNLAZY - Disable lazy irq disable
|
* IRQ_DISABLE_UNLAZY - Disable lazy irq disable
|
||||||
|
* IRQ_HIDDEN - Don't show up in /proc/interrupts
|
||||||
*/
|
*/
|
||||||
enum {
|
enum {
|
||||||
IRQ_TYPE_NONE = 0x00000000,
|
IRQ_TYPE_NONE = 0x00000000,
|
||||||
|
@ -97,13 +98,14 @@ enum {
|
||||||
IRQ_PER_CPU_DEVID = (1 << 17),
|
IRQ_PER_CPU_DEVID = (1 << 17),
|
||||||
IRQ_IS_POLLED = (1 << 18),
|
IRQ_IS_POLLED = (1 << 18),
|
||||||
IRQ_DISABLE_UNLAZY = (1 << 19),
|
IRQ_DISABLE_UNLAZY = (1 << 19),
|
||||||
|
IRQ_HIDDEN = (1 << 20),
|
||||||
};
|
};
|
||||||
|
|
||||||
#define IRQF_MODIFY_MASK \
|
#define IRQF_MODIFY_MASK \
|
||||||
(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
|
(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
|
||||||
IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
|
IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
|
||||||
IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
|
IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
|
||||||
IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY)
|
IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_HIDDEN)
|
||||||
|
|
||||||
#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
|
#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
|
||||||
|
|
||||||
|
@ -634,6 +636,7 @@ static inline int irq_set_parent(int irq, int parent_irq)
|
||||||
*/
|
*/
|
||||||
extern void handle_level_irq(struct irq_desc *desc);
|
extern void handle_level_irq(struct irq_desc *desc);
|
||||||
extern void handle_fasteoi_irq(struct irq_desc *desc);
|
extern void handle_fasteoi_irq(struct irq_desc *desc);
|
||||||
|
extern void handle_percpu_devid_fasteoi_ipi(struct irq_desc *desc);
|
||||||
extern void handle_edge_irq(struct irq_desc *desc);
|
extern void handle_edge_irq(struct irq_desc *desc);
|
||||||
extern void handle_edge_eoi_irq(struct irq_desc *desc);
|
extern void handle_edge_eoi_irq(struct irq_desc *desc);
|
||||||
extern void handle_simple_irq(struct irq_desc *desc);
|
extern void handle_simple_irq(struct irq_desc *desc);
|
||||||
|
|
|
@ -944,6 +944,33 @@ void handle_percpu_devid_irq(struct irq_desc *desc)
|
||||||
chip->irq_eoi(&desc->irq_data);
|
chip->irq_eoi(&desc->irq_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* handle_percpu_devid_fasteoi_ipi - Per CPU local IPI handler with per cpu
|
||||||
|
* dev ids
|
||||||
|
* @desc: the interrupt description structure for this irq
|
||||||
|
*
|
||||||
|
* The biggest difference with the IRQ version is that the interrupt is
|
||||||
|
* EOIed early, as the IPI could result in a context switch, and we need to
|
||||||
|
* make sure the IPI can fire again. We also assume that the arch code has
|
||||||
|
* registered an action. If not, we are positively doomed.
|
||||||
|
*/
|
||||||
|
void handle_percpu_devid_fasteoi_ipi(struct irq_desc *desc)
|
||||||
|
{
|
||||||
|
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||||
|
struct irqaction *action = desc->action;
|
||||||
|
unsigned int irq = irq_desc_get_irq(desc);
|
||||||
|
irqreturn_t res;
|
||||||
|
|
||||||
|
__kstat_incr_irqs_this_cpu(desc);
|
||||||
|
|
||||||
|
if (chip->irq_eoi)
|
||||||
|
chip->irq_eoi(&desc->irq_data);
|
||||||
|
|
||||||
|
trace_irq_handler_entry(irq, action);
|
||||||
|
res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
|
||||||
|
trace_irq_handler_exit(irq, action, res);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu
|
* handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu
|
||||||
* dev ids
|
* dev ids
|
||||||
|
|
|
@ -136,6 +136,7 @@ static const struct irq_bit_descr irqdesc_states[] = {
|
||||||
BIT_MASK_DESCR(_IRQ_PER_CPU_DEVID),
|
BIT_MASK_DESCR(_IRQ_PER_CPU_DEVID),
|
||||||
BIT_MASK_DESCR(_IRQ_IS_POLLED),
|
BIT_MASK_DESCR(_IRQ_IS_POLLED),
|
||||||
BIT_MASK_DESCR(_IRQ_DISABLE_UNLAZY),
|
BIT_MASK_DESCR(_IRQ_DISABLE_UNLAZY),
|
||||||
|
BIT_MASK_DESCR(_IRQ_HIDDEN),
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct irq_bit_descr irqdesc_istates[] = {
|
static const struct irq_bit_descr irqdesc_istates[] = {
|
||||||
|
|
|
@ -485,7 +485,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
desc = irq_to_desc(i);
|
desc = irq_to_desc(i);
|
||||||
if (!desc)
|
if (!desc || irq_settings_is_hidden(desc))
|
||||||
goto outsparse;
|
goto outsparse;
|
||||||
|
|
||||||
if (desc->kstat_irqs)
|
if (desc->kstat_irqs)
|
||||||
|
|
|
@ -17,6 +17,7 @@ enum {
|
||||||
_IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
|
_IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
|
||||||
_IRQ_IS_POLLED = IRQ_IS_POLLED,
|
_IRQ_IS_POLLED = IRQ_IS_POLLED,
|
||||||
_IRQ_DISABLE_UNLAZY = IRQ_DISABLE_UNLAZY,
|
_IRQ_DISABLE_UNLAZY = IRQ_DISABLE_UNLAZY,
|
||||||
|
_IRQ_HIDDEN = IRQ_HIDDEN,
|
||||||
_IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
|
_IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -31,6 +32,7 @@ enum {
|
||||||
#define IRQ_PER_CPU_DEVID GOT_YOU_MORON
|
#define IRQ_PER_CPU_DEVID GOT_YOU_MORON
|
||||||
#define IRQ_IS_POLLED GOT_YOU_MORON
|
#define IRQ_IS_POLLED GOT_YOU_MORON
|
||||||
#define IRQ_DISABLE_UNLAZY GOT_YOU_MORON
|
#define IRQ_DISABLE_UNLAZY GOT_YOU_MORON
|
||||||
|
#define IRQ_HIDDEN GOT_YOU_MORON
|
||||||
#undef IRQF_MODIFY_MASK
|
#undef IRQF_MODIFY_MASK
|
||||||
#define IRQF_MODIFY_MASK GOT_YOU_MORON
|
#define IRQF_MODIFY_MASK GOT_YOU_MORON
|
||||||
|
|
||||||
|
@ -167,3 +169,8 @@ static inline void irq_settings_clr_disable_unlazy(struct irq_desc *desc)
|
||||||
{
|
{
|
||||||
desc->status_use_accessors &= ~_IRQ_DISABLE_UNLAZY;
|
desc->status_use_accessors &= ~_IRQ_DISABLE_UNLAZY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool irq_settings_is_hidden(struct irq_desc *desc)
|
||||||
|
{
|
||||||
|
return desc->status_use_accessors & _IRQ_HIDDEN;
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in New Issue