mirror of https://gitee.com/openkylin/linux.git
irqchip/sifive-plic: Add support for multiple PLICs
Current, PLIC driver can support only 1 PLIC on the board. However, there can be multiple PLICs present on a two socket systems in RISC-V. Modify the driver so that each PLIC handler can have a information about individual PLIC registers and an irqdomain associated with it. Tested on two socket RISC-V system based on VCU118 FPGA connected via OmniXtend protocol. Signed-off-by: Atish Patra <atish.patra@wdc.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Reviewed-by: Anup Patel <anup@brainfault.org> Link: https://lore.kernel.org/r/20200302231146.15530-3-atish.patra@wdc.com
This commit is contained in:
parent
ccbe80bad5
commit
f1ad1133b1
|
@ -59,7 +59,11 @@
|
||||||
#define PLIC_DISABLE_THRESHOLD 0xf
|
#define PLIC_DISABLE_THRESHOLD 0xf
|
||||||
#define PLIC_ENABLE_THRESHOLD 0
|
#define PLIC_ENABLE_THRESHOLD 0
|
||||||
|
|
||||||
static void __iomem *plic_regs;
|
struct plic_priv {
|
||||||
|
struct cpumask lmask;
|
||||||
|
struct irq_domain *irqdomain;
|
||||||
|
void __iomem *regs;
|
||||||
|
};
|
||||||
|
|
||||||
struct plic_handler {
|
struct plic_handler {
|
||||||
bool present;
|
bool present;
|
||||||
|
@ -70,6 +74,7 @@ struct plic_handler {
|
||||||
*/
|
*/
|
||||||
raw_spinlock_t enable_lock;
|
raw_spinlock_t enable_lock;
|
||||||
void __iomem *enable_base;
|
void __iomem *enable_base;
|
||||||
|
struct plic_priv *priv;
|
||||||
};
|
};
|
||||||
static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
|
static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
|
||||||
|
|
||||||
|
@ -88,31 +93,40 @@ static inline void plic_toggle(struct plic_handler *handler,
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void plic_irq_toggle(const struct cpumask *mask,
|
static inline void plic_irq_toggle(const struct cpumask *mask,
|
||||||
int hwirq, int enable)
|
struct irq_data *d, int enable)
|
||||||
{
|
{
|
||||||
int cpu;
|
int cpu;
|
||||||
|
struct plic_priv *priv = irq_get_chip_data(d->irq);
|
||||||
|
|
||||||
writel(enable, plic_regs + PRIORITY_BASE + hwirq * PRIORITY_PER_ID);
|
writel(enable, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
|
||||||
for_each_cpu(cpu, mask) {
|
for_each_cpu(cpu, mask) {
|
||||||
struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
|
struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
|
||||||
|
|
||||||
if (handler->present)
|
if (handler->present &&
|
||||||
plic_toggle(handler, hwirq, enable);
|
cpumask_test_cpu(cpu, &handler->priv->lmask))
|
||||||
|
plic_toggle(handler, d->hwirq, enable);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void plic_irq_unmask(struct irq_data *d)
|
static void plic_irq_unmask(struct irq_data *d)
|
||||||
{
|
{
|
||||||
unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
|
struct cpumask amask;
|
||||||
cpu_online_mask);
|
unsigned int cpu;
|
||||||
|
struct plic_priv *priv = irq_get_chip_data(d->irq);
|
||||||
|
|
||||||
|
cpumask_and(&amask, &priv->lmask, cpu_online_mask);
|
||||||
|
cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
|
||||||
|
&amask);
|
||||||
if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
|
if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
|
||||||
return;
|
return;
|
||||||
plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
|
plic_irq_toggle(cpumask_of(cpu), d, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void plic_irq_mask(struct irq_data *d)
|
static void plic_irq_mask(struct irq_data *d)
|
||||||
{
|
{
|
||||||
plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
|
struct plic_priv *priv = irq_get_chip_data(d->irq);
|
||||||
|
|
||||||
|
plic_irq_toggle(&priv->lmask, d, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
|
@ -120,17 +134,21 @@ static int plic_set_affinity(struct irq_data *d,
|
||||||
const struct cpumask *mask_val, bool force)
|
const struct cpumask *mask_val, bool force)
|
||||||
{
|
{
|
||||||
unsigned int cpu;
|
unsigned int cpu;
|
||||||
|
struct cpumask amask;
|
||||||
|
struct plic_priv *priv = irq_get_chip_data(d->irq);
|
||||||
|
|
||||||
|
cpumask_and(&amask, &priv->lmask, mask_val);
|
||||||
|
|
||||||
if (force)
|
if (force)
|
||||||
cpu = cpumask_first(mask_val);
|
cpu = cpumask_first(&amask);
|
||||||
else
|
else
|
||||||
cpu = cpumask_any_and(mask_val, cpu_online_mask);
|
cpu = cpumask_any_and(&amask, cpu_online_mask);
|
||||||
|
|
||||||
if (cpu >= nr_cpu_ids)
|
if (cpu >= nr_cpu_ids)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
|
plic_irq_toggle(&priv->lmask, d, 0);
|
||||||
plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
|
plic_irq_toggle(cpumask_of(cpu), d, 1);
|
||||||
|
|
||||||
irq_data_update_effective_affinity(d, cpumask_of(cpu));
|
irq_data_update_effective_affinity(d, cpumask_of(cpu));
|
||||||
|
|
||||||
|
@ -191,8 +209,6 @@ static const struct irq_domain_ops plic_irqdomain_ops = {
|
||||||
.free = irq_domain_free_irqs_top,
|
.free = irq_domain_free_irqs_top,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct irq_domain *plic_irqdomain;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Handling an interrupt is a two-step process: first you claim the interrupt
|
* Handling an interrupt is a two-step process: first you claim the interrupt
|
||||||
* by reading the claim register, then you complete the interrupt by writing
|
* by reading the claim register, then you complete the interrupt by writing
|
||||||
|
@ -209,7 +225,7 @@ static void plic_handle_irq(struct pt_regs *regs)
|
||||||
|
|
||||||
csr_clear(CSR_IE, IE_EIE);
|
csr_clear(CSR_IE, IE_EIE);
|
||||||
while ((hwirq = readl(claim))) {
|
while ((hwirq = readl(claim))) {
|
||||||
int irq = irq_find_mapping(plic_irqdomain, hwirq);
|
int irq = irq_find_mapping(handler->priv->irqdomain, hwirq);
|
||||||
|
|
||||||
if (unlikely(irq <= 0))
|
if (unlikely(irq <= 0))
|
||||||
pr_warn_ratelimited("can't find mapping for hwirq %lu\n",
|
pr_warn_ratelimited("can't find mapping for hwirq %lu\n",
|
||||||
|
@ -265,16 +281,18 @@ static int __init plic_init(struct device_node *node,
|
||||||
{
|
{
|
||||||
int error = 0, nr_contexts, nr_handlers = 0, i;
|
int error = 0, nr_contexts, nr_handlers = 0, i;
|
||||||
u32 nr_irqs;
|
u32 nr_irqs;
|
||||||
|
struct plic_priv *priv;
|
||||||
|
|
||||||
if (plic_regs) {
|
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||||
pr_warn("PLIC already present.\n");
|
if (!priv)
|
||||||
return -ENXIO;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
priv->regs = of_iomap(node, 0);
|
||||||
|
if (WARN_ON(!priv->regs)) {
|
||||||
|
error = -EIO;
|
||||||
|
goto out_free_priv;
|
||||||
}
|
}
|
||||||
|
|
||||||
plic_regs = of_iomap(node, 0);
|
|
||||||
if (WARN_ON(!plic_regs))
|
|
||||||
return -EIO;
|
|
||||||
|
|
||||||
error = -EINVAL;
|
error = -EINVAL;
|
||||||
of_property_read_u32(node, "riscv,ndev", &nr_irqs);
|
of_property_read_u32(node, "riscv,ndev", &nr_irqs);
|
||||||
if (WARN_ON(!nr_irqs))
|
if (WARN_ON(!nr_irqs))
|
||||||
|
@ -287,9 +305,9 @@ static int __init plic_init(struct device_node *node,
|
||||||
goto out_iounmap;
|
goto out_iounmap;
|
||||||
|
|
||||||
error = -ENOMEM;
|
error = -ENOMEM;
|
||||||
plic_irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
|
priv->irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
|
||||||
&plic_irqdomain_ops, NULL);
|
&plic_irqdomain_ops, priv);
|
||||||
if (WARN_ON(!plic_irqdomain))
|
if (WARN_ON(!priv->irqdomain))
|
||||||
goto out_iounmap;
|
goto out_iounmap;
|
||||||
|
|
||||||
for (i = 0; i < nr_contexts; i++) {
|
for (i = 0; i < nr_contexts; i++) {
|
||||||
|
@ -334,13 +352,14 @@ static int __init plic_init(struct device_node *node,
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cpumask_set_cpu(cpu, &priv->lmask);
|
||||||
handler->present = true;
|
handler->present = true;
|
||||||
handler->hart_base =
|
handler->hart_base =
|
||||||
plic_regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
|
priv->regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
|
||||||
raw_spin_lock_init(&handler->enable_lock);
|
raw_spin_lock_init(&handler->enable_lock);
|
||||||
handler->enable_base =
|
handler->enable_base =
|
||||||
plic_regs + ENABLE_BASE + i * ENABLE_PER_HART;
|
priv->regs + ENABLE_BASE + i * ENABLE_PER_HART;
|
||||||
|
handler->priv = priv;
|
||||||
done:
|
done:
|
||||||
for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
|
for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
|
||||||
plic_toggle(handler, hwirq, 0);
|
plic_toggle(handler, hwirq, 0);
|
||||||
|
@ -356,7 +375,9 @@ static int __init plic_init(struct device_node *node,
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_iounmap:
|
out_iounmap:
|
||||||
iounmap(plic_regs);
|
iounmap(priv->regs);
|
||||||
|
out_free_priv:
|
||||||
|
kfree(priv);
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue