Merge git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
This commit is contained in:
commit
80f77e54f1
|
@ -1713,6 +1713,13 @@
|
|||
irqaffinity= [SMP] Set the default irq affinity mask
|
||||
The argument is a cpu list, as described above.
|
||||
|
||||
irqchip.gicv2_force_probe=
|
||||
[ARM, ARM64]
|
||||
Format: <bool>
|
||||
Force the kernel to look for the second 4kB page
|
||||
of a GICv2 controller even if the memory range
|
||||
exposed by the device tree is too small.
|
||||
|
||||
irqfixup [HW]
|
||||
When an interrupt is not handled search all handlers
|
||||
for it. Intended to get systems with badly broken
|
||||
|
|
|
@ -70,6 +70,7 @@ stable kernels.
|
|||
| | | | |
|
||||
| Hisilicon | Hip0{5,6,7} | #161010101 | HISILICON_ERRATUM_161010101 |
|
||||
| Hisilicon | Hip0{6,7} | #161010701 | N/A |
|
||||
| Hisilicon | Hip07 | #161600802 | HISILICON_ERRATUM_161600802 |
|
||||
| | | | |
|
||||
| Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 |
|
||||
| Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 |
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
Amlogic meson GPIO interrupt controller
|
||||
|
||||
Meson SoCs contains an interrupt controller which is able to watch the SoC
|
||||
pads and generate an interrupt on edge or level. The controller is essentially
|
||||
a 256 pads to 8 GIC interrupt multiplexer, with a filter block to select edge
|
||||
or level and polarity. It does not expose all 256 mux inputs because the
|
||||
documentation shows that the upper part is not mapped to any pad. The actual
|
||||
number of interrupt exposed depends on the SoC.
|
||||
|
||||
Required properties:
|
||||
|
||||
- compatible : must have "amlogic,meson8-gpio-intc” and either
|
||||
“amlogic,meson8-gpio-intc” for meson8 SoCs (S802) or
|
||||
“amlogic,meson8b-gpio-intc” for meson8b SoCs (S805) or
|
||||
“amlogic,meson-gxbb-gpio-intc” for GXBB SoCs (S905) or
|
||||
“amlogic,meson-gxl-gpio-intc” for GXL SoCs (S905X, S912)
|
||||
- interrupt-parent : a phandle to the GIC the interrupts are routed to.
|
||||
Usually this is provided at the root level of the device tree as it is
|
||||
common to most of the SoC.
|
||||
- reg : Specifies base physical address and size of the registers.
|
||||
- interrupt-controller : Identifies the node as an interrupt controller.
|
||||
- #interrupt-cells : Specifies the number of cells needed to encode an
|
||||
interrupt source. The value must be 2.
|
||||
- meson,channel-interrupts: Array with the 8 upstream hwirq numbers. These
|
||||
are the hwirqs used on the parent interrupt controller.
|
||||
|
||||
Example:
|
||||
|
||||
gpio_interrupt: interrupt-controller@9880 {
|
||||
compatible = "amlogic,meson-gxbb-gpio-intc",
|
||||
"amlogic,meson-gpio-intc";
|
||||
reg = <0x0 0x9880 0x0 0x10>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
meson,channel-interrupts = <64 65 66 67 68 69 70 71>;
|
||||
};
|
|
@ -75,6 +75,10 @@ These nodes must have the following properties:
|
|||
- reg: Specifies the base physical address and size of the ITS
|
||||
registers.
|
||||
|
||||
Optional:
|
||||
- socionext,synquacer-pre-its: (u32, u32) tuple describing the untranslated
|
||||
address and size of the pre-ITS window.
|
||||
|
||||
The main GIC node must contain the appropriate #address-cells,
|
||||
#size-cells and ranges properties for the reg property of all ITS
|
||||
nodes.
|
||||
|
|
|
@ -2,7 +2,8 @@ Broadcom Generic Level 2 Interrupt Controller
|
|||
|
||||
Required properties:
|
||||
|
||||
- compatible: should be "brcm,l2-intc"
|
||||
- compatible: should be "brcm,l2-intc" for latched interrupt controllers
|
||||
should be "brcm,bcm7271-l2-intc" for level interrupt controllers
|
||||
- reg: specifies the base physical address and size of the registers
|
||||
- interrupt-controller: identifies the node as an interrupt controller
|
||||
- #interrupt-cells: specifies the number of cells needed to encode an
|
||||
|
|
|
@ -13,6 +13,9 @@ Required properties:
|
|||
- "renesas,irqc-r8a7793" (R-Car M2-N)
|
||||
- "renesas,irqc-r8a7794" (R-Car E2)
|
||||
- "renesas,intc-ex-r8a7795" (R-Car H3)
|
||||
- "renesas,intc-ex-r8a7796" (R-Car M3-W)
|
||||
- "renesas,intc-ex-r8a77970" (R-Car V3M)
|
||||
- "renesas,intc-ex-r8a77995" (R-Car D3)
|
||||
- #interrupt-cells: has to be <2>: an interrupt index and flags, as defined in
|
||||
interrupts.txt in this directory
|
||||
- clocks: Must contain a reference to the functional clock.
|
||||
|
|
|
@ -196,6 +196,11 @@ static inline void gic_write_ctlr(u32 val)
|
|||
isb();
|
||||
}
|
||||
|
||||
static inline u32 gic_read_ctlr(void)
|
||||
{
|
||||
return read_sysreg(ICC_CTLR);
|
||||
}
|
||||
|
||||
static inline void gic_write_grpen1(u32 val)
|
||||
{
|
||||
write_sysreg(val, ICC_IGRPEN1);
|
||||
|
|
|
@ -539,6 +539,25 @@ config QCOM_QDF2400_ERRATUM_0065
|
|||
|
||||
If unsure, say Y.
|
||||
|
||||
|
||||
config SOCIONEXT_SYNQUACER_PREITS
|
||||
bool "Socionext Synquacer: Workaround for GICv3 pre-ITS"
|
||||
default y
|
||||
help
|
||||
Socionext Synquacer SoCs implement a separate h/w block to generate
|
||||
MSI doorbell writes with non-zero values for the device ID.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config HISILICON_ERRATUM_161600802
|
||||
bool "Hip07 161600802: Erroneous redistributor VLPI base"
|
||||
default y
|
||||
help
|
||||
The HiSilicon Hip07 SoC usees the wrong redistributor base
|
||||
when issued ITS commands such as VMOVP and VMAPP, and requires
|
||||
a 128kB offset to be applied to the target address in this commands.
|
||||
|
||||
If unsure, say Y.
|
||||
endmenu
|
||||
|
||||
|
||||
|
|
|
@ -87,6 +87,11 @@ static inline void gic_write_ctlr(u32 val)
|
|||
isb();
|
||||
}
|
||||
|
||||
static inline u32 gic_read_ctlr(void)
|
||||
{
|
||||
return read_sysreg_s(SYS_ICC_CTLR_EL1);
|
||||
}
|
||||
|
||||
static inline void gic_write_grpen1(u32 val)
|
||||
{
|
||||
write_sysreg_s(val, SYS_ICC_IGRPEN1_EL1);
|
||||
|
|
|
@ -41,8 +41,8 @@ extern int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
|
|||
unsigned int nr_irqs, void *arg);
|
||||
extern void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq,
|
||||
unsigned int nr_irqs);
|
||||
extern void mp_irqdomain_activate(struct irq_domain *domain,
|
||||
struct irq_data *irq_data);
|
||||
extern int mp_irqdomain_activate(struct irq_domain *domain,
|
||||
struct irq_data *irq_data, bool early);
|
||||
extern void mp_irqdomain_deactivate(struct irq_domain *domain,
|
||||
struct irq_data *irq_data);
|
||||
extern int mp_irqdomain_ioapic_idx(struct irq_domain *domain);
|
||||
|
|
|
@ -112,8 +112,8 @@ static void htirq_domain_free(struct irq_domain *domain, unsigned int virq,
|
|||
irq_domain_free_irqs_top(domain, virq, nr_irqs);
|
||||
}
|
||||
|
||||
static void htirq_domain_activate(struct irq_domain *domain,
|
||||
struct irq_data *irq_data)
|
||||
static int htirq_domain_activate(struct irq_domain *domain,
|
||||
struct irq_data *irq_data, bool early)
|
||||
{
|
||||
struct ht_irq_msg msg;
|
||||
struct irq_cfg *cfg = irqd_cfg(irq_data);
|
||||
|
@ -132,6 +132,7 @@ static void htirq_domain_activate(struct irq_domain *domain,
|
|||
HT_IRQ_LOW_MT_ARBITRATED) |
|
||||
HT_IRQ_LOW_IRQ_MASKED;
|
||||
write_ht_irq_msg(irq_data->irq, &msg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void htirq_domain_deactivate(struct irq_domain *domain,
|
||||
|
|
|
@ -2096,7 +2096,7 @@ static inline void __init check_timer(void)
|
|||
unmask_ioapic_irq(irq_get_irq_data(0));
|
||||
}
|
||||
irq_domain_deactivate_irq(irq_data);
|
||||
irq_domain_activate_irq(irq_data);
|
||||
irq_domain_activate_irq(irq_data, false);
|
||||
if (timer_irq_works()) {
|
||||
if (disable_timer_pin_1 > 0)
|
||||
clear_IO_APIC_pin(0, pin1);
|
||||
|
@ -2118,7 +2118,7 @@ static inline void __init check_timer(void)
|
|||
*/
|
||||
replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
|
||||
irq_domain_deactivate_irq(irq_data);
|
||||
irq_domain_activate_irq(irq_data);
|
||||
irq_domain_activate_irq(irq_data, false);
|
||||
legacy_pic->unmask(0);
|
||||
if (timer_irq_works()) {
|
||||
apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
|
||||
|
@ -2977,8 +2977,8 @@ void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq,
|
|||
irq_domain_free_irqs_top(domain, virq, nr_irqs);
|
||||
}
|
||||
|
||||
void mp_irqdomain_activate(struct irq_domain *domain,
|
||||
struct irq_data *irq_data)
|
||||
int mp_irqdomain_activate(struct irq_domain *domain,
|
||||
struct irq_data *irq_data, bool early)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct irq_pin_list *entry;
|
||||
|
@ -2988,6 +2988,7 @@ void mp_irqdomain_activate(struct irq_domain *domain,
|
|||
for_each_irq_pin(entry, data->irq_2_pin)
|
||||
__ioapic_write_entry(entry->apic, entry->pin, data->entry);
|
||||
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mp_irqdomain_deactivate(struct irq_domain *domain,
|
||||
|
|
|
@ -127,10 +127,11 @@ static void uv_domain_free(struct irq_domain *domain, unsigned int virq,
|
|||
* Re-target the irq to the specified CPU and enable the specified MMR located
|
||||
* on the specified blade to allow the sending of MSIs to the specified CPU.
|
||||
*/
|
||||
static void uv_domain_activate(struct irq_domain *domain,
|
||||
struct irq_data *irq_data)
|
||||
static int uv_domain_activate(struct irq_domain *domain,
|
||||
struct irq_data *irq_data, bool early)
|
||||
{
|
||||
uv_program_mmr(irqd_cfg(irq_data), irq_data->chip_data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -140,8 +140,9 @@ static int xgene_gpio_sb_to_irq(struct gpio_chip *gc, u32 gpio)
|
|||
return irq_create_fwspec_mapping(&fwspec);
|
||||
}
|
||||
|
||||
static void xgene_gpio_sb_domain_activate(struct irq_domain *d,
|
||||
struct irq_data *irq_data)
|
||||
static int xgene_gpio_sb_domain_activate(struct irq_domain *d,
|
||||
struct irq_data *irq_data,
|
||||
bool early)
|
||||
{
|
||||
struct xgene_gpio_sb *priv = d->host_data;
|
||||
u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq);
|
||||
|
@ -150,11 +151,12 @@ static void xgene_gpio_sb_domain_activate(struct irq_domain *d,
|
|||
dev_err(priv->gc.parent,
|
||||
"Unable to configure XGene GPIO standby pin %d as IRQ\n",
|
||||
gpio);
|
||||
return;
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_SEL_LO,
|
||||
gpio * 2, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void xgene_gpio_sb_domain_deactivate(struct irq_domain *d,
|
||||
|
|
|
@ -4170,8 +4170,8 @@ static void irq_remapping_free(struct irq_domain *domain, unsigned int virq,
|
|||
irq_domain_free_irqs_common(domain, virq, nr_irqs);
|
||||
}
|
||||
|
||||
static void irq_remapping_activate(struct irq_domain *domain,
|
||||
struct irq_data *irq_data)
|
||||
static int irq_remapping_activate(struct irq_domain *domain,
|
||||
struct irq_data *irq_data, bool early)
|
||||
{
|
||||
struct amd_ir_data *data = irq_data->chip_data;
|
||||
struct irq_2_irte *irte_info = &data->irq_2_irte;
|
||||
|
@ -4180,6 +4180,7 @@ static void irq_remapping_activate(struct irq_domain *domain,
|
|||
if (iommu)
|
||||
iommu->irte_ops->activate(data->entry, irte_info->devid,
|
||||
irte_info->index);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void irq_remapping_deactivate(struct irq_domain *domain,
|
||||
|
|
|
@ -1389,12 +1389,13 @@ static void intel_irq_remapping_free(struct irq_domain *domain,
|
|||
irq_domain_free_irqs_common(domain, virq, nr_irqs);
|
||||
}
|
||||
|
||||
static void intel_irq_remapping_activate(struct irq_domain *domain,
|
||||
struct irq_data *irq_data)
|
||||
static int intel_irq_remapping_activate(struct irq_domain *domain,
|
||||
struct irq_data *irq_data, bool early)
|
||||
{
|
||||
struct intel_ir_data *data = irq_data->chip_data;
|
||||
|
||||
modify_irte(&data->irq_2_iommu, &data->irte_entry);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void intel_irq_remapping_deactivate(struct irq_domain *domain,
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
menu "IRQ chip support"
|
||||
|
||||
config IRQCHIP
|
||||
def_bool y
|
||||
depends on OF_IRQ
|
||||
|
@ -321,3 +323,13 @@ config IRQ_UNIPHIER_AIDET
|
|||
select IRQ_DOMAIN_HIERARCHY
|
||||
help
|
||||
Support for the UniPhier AIDET (ARM Interrupt Detector).
|
||||
|
||||
config MESON_IRQ_GPIO
|
||||
bool "Meson GPIO Interrupt Multiplexer"
|
||||
depends on ARCH_MESON
|
||||
select IRQ_DOMAIN
|
||||
select IRQ_DOMAIN_HIERARCHY
|
||||
help
|
||||
Support Meson SoC Family GPIO Interrupt Multiplexer
|
||||
|
||||
endmenu
|
||||
|
|
|
@ -79,3 +79,4 @@ obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o
|
|||
obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o
|
||||
obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o
|
||||
obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o
|
||||
obj-$(CONFIG_MESON_IRQ_GPIO) += irq-meson-gpio.o
|
||||
|
|
|
@ -76,8 +76,8 @@ static int __init aspeed_i2c_ic_of_init(struct device_node *node,
|
|||
return -ENOMEM;
|
||||
|
||||
i2c_ic->base = of_iomap(node, 0);
|
||||
if (IS_ERR(i2c_ic->base)) {
|
||||
ret = PTR_ERR(i2c_ic->base);
|
||||
if (!i2c_ic->base) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_ic;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* Generic Broadcom Set Top Box Level 2 Interrupt controller driver
|
||||
*
|
||||
* Copyright (C) 2014 Broadcom Corporation
|
||||
* Copyright (C) 2014-2017 Broadcom
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
|
@ -31,35 +31,82 @@
|
|||
#include <linux/irqchip.h>
|
||||
#include <linux/irqchip/chained_irq.h>
|
||||
|
||||
/* Register offsets in the L2 interrupt controller */
|
||||
#define CPU_STATUS 0x00
|
||||
#define CPU_SET 0x04
|
||||
#define CPU_CLEAR 0x08
|
||||
#define CPU_MASK_STATUS 0x0c
|
||||
#define CPU_MASK_SET 0x10
|
||||
#define CPU_MASK_CLEAR 0x14
|
||||
struct brcmstb_intc_init_params {
|
||||
irq_flow_handler_t handler;
|
||||
int cpu_status;
|
||||
int cpu_clear;
|
||||
int cpu_mask_status;
|
||||
int cpu_mask_set;
|
||||
int cpu_mask_clear;
|
||||
};
|
||||
|
||||
/* Register offsets in the L2 latched interrupt controller */
|
||||
static const struct brcmstb_intc_init_params l2_edge_intc_init = {
|
||||
.handler = handle_edge_irq,
|
||||
.cpu_status = 0x00,
|
||||
.cpu_clear = 0x08,
|
||||
.cpu_mask_status = 0x0c,
|
||||
.cpu_mask_set = 0x10,
|
||||
.cpu_mask_clear = 0x14
|
||||
};
|
||||
|
||||
/* Register offsets in the L2 level interrupt controller */
|
||||
static const struct brcmstb_intc_init_params l2_lvl_intc_init = {
|
||||
.handler = handle_level_irq,
|
||||
.cpu_status = 0x00,
|
||||
.cpu_clear = -1, /* Register not present */
|
||||
.cpu_mask_status = 0x04,
|
||||
.cpu_mask_set = 0x08,
|
||||
.cpu_mask_clear = 0x0C
|
||||
};
|
||||
|
||||
/* L2 intc private data structure */
|
||||
struct brcmstb_l2_intc_data {
|
||||
int parent_irq;
|
||||
void __iomem *base;
|
||||
struct irq_domain *domain;
|
||||
struct irq_chip_generic *gc;
|
||||
int status_offset;
|
||||
int mask_offset;
|
||||
bool can_wake;
|
||||
u32 saved_mask; /* for suspend/resume */
|
||||
};
|
||||
|
||||
/**
|
||||
* brcmstb_l2_mask_and_ack - Mask and ack pending interrupt
|
||||
* @d: irq_data
|
||||
*
|
||||
* Chip has separate enable/disable registers instead of a single mask
|
||||
* register and pending interrupt is acknowledged by setting a bit.
|
||||
*
|
||||
* Note: This function is generic and could easily be added to the
|
||||
* generic irqchip implementation if there ever becomes a will to do so.
|
||||
* Perhaps with a name like irq_gc_mask_disable_and_ack_set().
|
||||
*
|
||||
* e.g.: https://patchwork.kernel.org/patch/9831047/
|
||||
*/
|
||||
static void brcmstb_l2_mask_and_ack(struct irq_data *d)
|
||||
{
|
||||
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
||||
struct irq_chip_type *ct = irq_data_get_chip_type(d);
|
||||
u32 mask = d->mask;
|
||||
|
||||
irq_gc_lock(gc);
|
||||
irq_reg_writel(gc, mask, ct->regs.disable);
|
||||
*ct->mask_cache &= ~mask;
|
||||
irq_reg_writel(gc, mask, ct->regs.ack);
|
||||
irq_gc_unlock(gc);
|
||||
}
|
||||
|
||||
static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc)
|
||||
{
|
||||
struct brcmstb_l2_intc_data *b = irq_desc_get_handler_data(desc);
|
||||
struct irq_chip_generic *gc = irq_get_domain_generic_chip(b->domain, 0);
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
unsigned int irq;
|
||||
u32 status;
|
||||
|
||||
chained_irq_enter(chip, desc);
|
||||
|
||||
status = irq_reg_readl(gc, CPU_STATUS) &
|
||||
~(irq_reg_readl(gc, CPU_MASK_STATUS));
|
||||
status = irq_reg_readl(b->gc, b->status_offset) &
|
||||
~(irq_reg_readl(b->gc, b->mask_offset));
|
||||
|
||||
if (status == 0) {
|
||||
raw_spin_lock(&desc->lock);
|
||||
|
@ -70,10 +117,8 @@ static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc)
|
|||
|
||||
do {
|
||||
irq = ffs(status) - 1;
|
||||
/* ack at our level */
|
||||
irq_reg_writel(gc, 1 << irq, CPU_CLEAR);
|
||||
status &= ~(1 << irq);
|
||||
generic_handle_irq(irq_find_mapping(b->domain, irq));
|
||||
generic_handle_irq(irq_linear_revmap(b->domain, irq));
|
||||
} while (status);
|
||||
out:
|
||||
chained_irq_exit(chip, desc);
|
||||
|
@ -82,16 +127,17 @@ static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc)
|
|||
static void brcmstb_l2_intc_suspend(struct irq_data *d)
|
||||
{
|
||||
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
||||
struct irq_chip_type *ct = irq_data_get_chip_type(d);
|
||||
struct brcmstb_l2_intc_data *b = gc->private;
|
||||
|
||||
irq_gc_lock(gc);
|
||||
/* Save the current mask */
|
||||
b->saved_mask = irq_reg_readl(gc, CPU_MASK_STATUS);
|
||||
b->saved_mask = irq_reg_readl(gc, ct->regs.mask);
|
||||
|
||||
if (b->can_wake) {
|
||||
/* Program the wakeup mask */
|
||||
irq_reg_writel(gc, ~gc->wake_active, CPU_MASK_SET);
|
||||
irq_reg_writel(gc, gc->wake_active, CPU_MASK_CLEAR);
|
||||
irq_reg_writel(gc, ~gc->wake_active, ct->regs.disable);
|
||||
irq_reg_writel(gc, gc->wake_active, ct->regs.enable);
|
||||
}
|
||||
irq_gc_unlock(gc);
|
||||
}
|
||||
|
@ -99,49 +145,56 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d)
|
|||
static void brcmstb_l2_intc_resume(struct irq_data *d)
|
||||
{
|
||||
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
||||
struct irq_chip_type *ct = irq_data_get_chip_type(d);
|
||||
struct brcmstb_l2_intc_data *b = gc->private;
|
||||
|
||||
irq_gc_lock(gc);
|
||||
/* Clear unmasked non-wakeup interrupts */
|
||||
irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active, CPU_CLEAR);
|
||||
if (ct->chip.irq_ack) {
|
||||
/* Clear unmasked non-wakeup interrupts */
|
||||
irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active,
|
||||
ct->regs.ack);
|
||||
}
|
||||
|
||||
/* Restore the saved mask */
|
||||
irq_reg_writel(gc, b->saved_mask, CPU_MASK_SET);
|
||||
irq_reg_writel(gc, ~b->saved_mask, CPU_MASK_CLEAR);
|
||||
irq_reg_writel(gc, b->saved_mask, ct->regs.disable);
|
||||
irq_reg_writel(gc, ~b->saved_mask, ct->regs.enable);
|
||||
irq_gc_unlock(gc);
|
||||
}
|
||||
|
||||
static int __init brcmstb_l2_intc_of_init(struct device_node *np,
|
||||
struct device_node *parent)
|
||||
struct device_node *parent,
|
||||
const struct brcmstb_intc_init_params
|
||||
*init_params)
|
||||
{
|
||||
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
|
||||
struct brcmstb_l2_intc_data *data;
|
||||
struct irq_chip_generic *gc;
|
||||
struct irq_chip_type *ct;
|
||||
int ret;
|
||||
unsigned int flags;
|
||||
int parent_irq;
|
||||
void __iomem *base;
|
||||
|
||||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
data->base = of_iomap(np, 0);
|
||||
if (!data->base) {
|
||||
base = of_iomap(np, 0);
|
||||
if (!base) {
|
||||
pr_err("failed to remap intc L2 registers\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
/* Disable all interrupts by default */
|
||||
writel(0xffffffff, data->base + CPU_MASK_SET);
|
||||
writel(0xffffffff, base + init_params->cpu_mask_set);
|
||||
|
||||
/* Wakeup interrupts may be retained from S5 (cold boot) */
|
||||
data->can_wake = of_property_read_bool(np, "brcm,irq-can-wake");
|
||||
if (!data->can_wake)
|
||||
writel(0xffffffff, data->base + CPU_CLEAR);
|
||||
if (!data->can_wake && (init_params->cpu_clear >= 0))
|
||||
writel(0xffffffff, base + init_params->cpu_clear);
|
||||
|
||||
data->parent_irq = irq_of_parse_and_map(np, 0);
|
||||
if (!data->parent_irq) {
|
||||
parent_irq = irq_of_parse_and_map(np, 0);
|
||||
if (!parent_irq) {
|
||||
pr_err("failed to find parent interrupt\n");
|
||||
ret = -EINVAL;
|
||||
goto out_unmap;
|
||||
|
@ -163,29 +216,39 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
|
|||
|
||||
/* Allocate a single Generic IRQ chip for this node */
|
||||
ret = irq_alloc_domain_generic_chips(data->domain, 32, 1,
|
||||
np->full_name, handle_edge_irq, clr, 0, flags);
|
||||
np->full_name, init_params->handler, clr, 0, flags);
|
||||
if (ret) {
|
||||
pr_err("failed to allocate generic irq chip\n");
|
||||
goto out_free_domain;
|
||||
}
|
||||
|
||||
/* Set the IRQ chaining logic */
|
||||
irq_set_chained_handler_and_data(data->parent_irq,
|
||||
irq_set_chained_handler_and_data(parent_irq,
|
||||
brcmstb_l2_intc_irq_handle, data);
|
||||
|
||||
gc = irq_get_domain_generic_chip(data->domain, 0);
|
||||
gc->reg_base = data->base;
|
||||
gc->private = data;
|
||||
ct = gc->chip_types;
|
||||
data->gc = irq_get_domain_generic_chip(data->domain, 0);
|
||||
data->gc->reg_base = base;
|
||||
data->gc->private = data;
|
||||
data->status_offset = init_params->cpu_status;
|
||||
data->mask_offset = init_params->cpu_mask_status;
|
||||
|
||||
ct->chip.irq_ack = irq_gc_ack_set_bit;
|
||||
ct->regs.ack = CPU_CLEAR;
|
||||
ct = data->gc->chip_types;
|
||||
|
||||
if (init_params->cpu_clear >= 0) {
|
||||
ct->regs.ack = init_params->cpu_clear;
|
||||
ct->chip.irq_ack = irq_gc_ack_set_bit;
|
||||
ct->chip.irq_mask_ack = brcmstb_l2_mask_and_ack;
|
||||
} else {
|
||||
/* No Ack - but still slightly more efficient to define this */
|
||||
ct->chip.irq_mask_ack = irq_gc_mask_disable_reg;
|
||||
}
|
||||
|
||||
ct->chip.irq_mask = irq_gc_mask_disable_reg;
|
||||
ct->regs.disable = CPU_MASK_SET;
|
||||
ct->regs.disable = init_params->cpu_mask_set;
|
||||
ct->regs.mask = init_params->cpu_mask_status;
|
||||
|
||||
ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
|
||||
ct->regs.enable = CPU_MASK_CLEAR;
|
||||
ct->regs.enable = init_params->cpu_mask_clear;
|
||||
|
||||
ct->chip.irq_suspend = brcmstb_l2_intc_suspend;
|
||||
ct->chip.irq_resume = brcmstb_l2_intc_resume;
|
||||
|
@ -195,21 +258,35 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
|
|||
/* This IRQ chip can wake the system, set all child interrupts
|
||||
* in wake_enabled mask
|
||||
*/
|
||||
gc->wake_enabled = 0xffffffff;
|
||||
data->gc->wake_enabled = 0xffffffff;
|
||||
ct->chip.irq_set_wake = irq_gc_set_wake;
|
||||
}
|
||||
|
||||
pr_info("registered L2 intc (mem: 0x%p, parent irq: %d)\n",
|
||||
data->base, data->parent_irq);
|
||||
base, parent_irq);
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_domain:
|
||||
irq_domain_remove(data->domain);
|
||||
out_unmap:
|
||||
iounmap(data->base);
|
||||
iounmap(base);
|
||||
out_free:
|
||||
kfree(data);
|
||||
return ret;
|
||||
}
|
||||
IRQCHIP_DECLARE(brcmstb_l2_intc, "brcm,l2-intc", brcmstb_l2_intc_of_init);
|
||||
|
||||
int __init brcmstb_l2_edge_intc_of_init(struct device_node *np,
|
||||
struct device_node *parent)
|
||||
{
|
||||
return brcmstb_l2_intc_of_init(np, parent, &l2_edge_intc_init);
|
||||
}
|
||||
IRQCHIP_DECLARE(brcmstb_l2_intc, "brcm,l2-intc", brcmstb_l2_edge_intc_of_init);
|
||||
|
||||
int __init brcmstb_l2_lvl_intc_of_init(struct device_node *np,
|
||||
struct device_node *parent)
|
||||
{
|
||||
return brcmstb_l2_intc_of_init(np, parent, &l2_lvl_intc_init);
|
||||
}
|
||||
IRQCHIP_DECLARE(bcm7271_l2_intc, "brcm,bcm7271-l2-intc",
|
||||
brcmstb_l2_lvl_intc_of_init);
|
||||
|
|
|
@ -40,8 +40,9 @@ void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
|
|||
for (; quirks->desc; quirks++) {
|
||||
if (quirks->iidr != (quirks->mask & iidr))
|
||||
continue;
|
||||
quirks->init(data);
|
||||
pr_info("GIC: enabling workaround for %s\n", quirks->desc);
|
||||
if (quirks->init(data))
|
||||
pr_info("GIC: enabling workaround for %s\n",
|
||||
quirks->desc);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
|
||||
struct gic_quirk {
|
||||
const char *desc;
|
||||
void (*init)(void *data);
|
||||
bool (*init)(void *data);
|
||||
u32 iidr;
|
||||
u32 mask;
|
||||
};
|
||||
|
|
|
@ -83,6 +83,8 @@ struct its_baser {
|
|||
u32 psz;
|
||||
};
|
||||
|
||||
struct its_device;
|
||||
|
||||
/*
|
||||
* The ITS structure - contains most of the infrastructure, with the
|
||||
* top-level MSI domain, the command queue, the collections, and the
|
||||
|
@ -97,12 +99,18 @@ struct its_node {
|
|||
struct its_cmd_block *cmd_write;
|
||||
struct its_baser tables[GITS_BASER_NR_REGS];
|
||||
struct its_collection *collections;
|
||||
struct fwnode_handle *fwnode_handle;
|
||||
u64 (*get_msi_base)(struct its_device *its_dev);
|
||||
struct list_head its_device_list;
|
||||
u64 flags;
|
||||
unsigned long list_nr;
|
||||
u32 ite_size;
|
||||
u32 device_ids;
|
||||
int numa_node;
|
||||
unsigned int msi_domain_flags;
|
||||
u32 pre_its_base; /* for Socionext Synquacer */
|
||||
bool is_v4;
|
||||
int vlpi_redist_offset;
|
||||
};
|
||||
|
||||
#define ITS_ITT_ALIGN SZ_256
|
||||
|
@ -148,12 +156,6 @@ static DEFINE_SPINLOCK(its_lock);
|
|||
static struct rdists *gic_rdists;
|
||||
static struct irq_domain *its_parent;
|
||||
|
||||
/*
|
||||
* We have a maximum number of 16 ITSs in the whole system if we're
|
||||
* using the ITSList mechanism
|
||||
*/
|
||||
#define ITS_LIST_MAX 16
|
||||
|
||||
static unsigned long its_list_map;
|
||||
static u16 vmovp_seq_num;
|
||||
static DEFINE_RAW_SPINLOCK(vmovp_lock);
|
||||
|
@ -268,10 +270,12 @@ struct its_cmd_block {
|
|||
#define ITS_CMD_QUEUE_SZ SZ_64K
|
||||
#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
|
||||
|
||||
typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *,
|
||||
typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
|
||||
struct its_cmd_block *,
|
||||
struct its_cmd_desc *);
|
||||
|
||||
typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_cmd_block *,
|
||||
typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
|
||||
struct its_cmd_block *,
|
||||
struct its_cmd_desc *);
|
||||
|
||||
static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
|
||||
|
@ -375,7 +379,8 @@ static inline void its_fixup_cmd(struct its_cmd_block *cmd)
|
|||
cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]);
|
||||
}
|
||||
|
||||
static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd,
|
||||
static struct its_collection *its_build_mapd_cmd(struct its_node *its,
|
||||
struct its_cmd_block *cmd,
|
||||
struct its_cmd_desc *desc)
|
||||
{
|
||||
unsigned long itt_addr;
|
||||
|
@ -395,7 +400,8 @@ static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
|
||||
static struct its_collection *its_build_mapc_cmd(struct its_node *its,
|
||||
struct its_cmd_block *cmd,
|
||||
struct its_cmd_desc *desc)
|
||||
{
|
||||
its_encode_cmd(cmd, GITS_CMD_MAPC);
|
||||
|
@ -408,7 +414,8 @@ static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
|
|||
return desc->its_mapc_cmd.col;
|
||||
}
|
||||
|
||||
static struct its_collection *its_build_mapti_cmd(struct its_cmd_block *cmd,
|
||||
static struct its_collection *its_build_mapti_cmd(struct its_node *its,
|
||||
struct its_cmd_block *cmd,
|
||||
struct its_cmd_desc *desc)
|
||||
{
|
||||
struct its_collection *col;
|
||||
|
@ -427,7 +434,8 @@ static struct its_collection *its_build_mapti_cmd(struct its_cmd_block *cmd,
|
|||
return col;
|
||||
}
|
||||
|
||||
static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd,
|
||||
static struct its_collection *its_build_movi_cmd(struct its_node *its,
|
||||
struct its_cmd_block *cmd,
|
||||
struct its_cmd_desc *desc)
|
||||
{
|
||||
struct its_collection *col;
|
||||
|
@ -445,7 +453,8 @@ static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd,
|
|||
return col;
|
||||
}
|
||||
|
||||
static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd,
|
||||
static struct its_collection *its_build_discard_cmd(struct its_node *its,
|
||||
struct its_cmd_block *cmd,
|
||||
struct its_cmd_desc *desc)
|
||||
{
|
||||
struct its_collection *col;
|
||||
|
@ -462,7 +471,8 @@ static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd,
|
|||
return col;
|
||||
}
|
||||
|
||||
static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd,
|
||||
static struct its_collection *its_build_inv_cmd(struct its_node *its,
|
||||
struct its_cmd_block *cmd,
|
||||
struct its_cmd_desc *desc)
|
||||
{
|
||||
struct its_collection *col;
|
||||
|
@ -479,7 +489,8 @@ static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd,
|
|||
return col;
|
||||
}
|
||||
|
||||
static struct its_collection *its_build_int_cmd(struct its_cmd_block *cmd,
|
||||
static struct its_collection *its_build_int_cmd(struct its_node *its,
|
||||
struct its_cmd_block *cmd,
|
||||
struct its_cmd_desc *desc)
|
||||
{
|
||||
struct its_collection *col;
|
||||
|
@ -496,7 +507,8 @@ static struct its_collection *its_build_int_cmd(struct its_cmd_block *cmd,
|
|||
return col;
|
||||
}
|
||||
|
||||
static struct its_collection *its_build_clear_cmd(struct its_cmd_block *cmd,
|
||||
static struct its_collection *its_build_clear_cmd(struct its_node *its,
|
||||
struct its_cmd_block *cmd,
|
||||
struct its_cmd_desc *desc)
|
||||
{
|
||||
struct its_collection *col;
|
||||
|
@ -513,7 +525,8 @@ static struct its_collection *its_build_clear_cmd(struct its_cmd_block *cmd,
|
|||
return col;
|
||||
}
|
||||
|
||||
static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
|
||||
static struct its_collection *its_build_invall_cmd(struct its_node *its,
|
||||
struct its_cmd_block *cmd,
|
||||
struct its_cmd_desc *desc)
|
||||
{
|
||||
its_encode_cmd(cmd, GITS_CMD_INVALL);
|
||||
|
@ -524,7 +537,8 @@ static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static struct its_vpe *its_build_vinvall_cmd(struct its_cmd_block *cmd,
|
||||
static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
|
||||
struct its_cmd_block *cmd,
|
||||
struct its_cmd_desc *desc)
|
||||
{
|
||||
its_encode_cmd(cmd, GITS_CMD_VINVALL);
|
||||
|
@ -535,17 +549,20 @@ static struct its_vpe *its_build_vinvall_cmd(struct its_cmd_block *cmd,
|
|||
return desc->its_vinvall_cmd.vpe;
|
||||
}
|
||||
|
||||
static struct its_vpe *its_build_vmapp_cmd(struct its_cmd_block *cmd,
|
||||
static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
|
||||
struct its_cmd_block *cmd,
|
||||
struct its_cmd_desc *desc)
|
||||
{
|
||||
unsigned long vpt_addr;
|
||||
u64 target;
|
||||
|
||||
vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
|
||||
target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
|
||||
|
||||
its_encode_cmd(cmd, GITS_CMD_VMAPP);
|
||||
its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
|
||||
its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
|
||||
its_encode_target(cmd, desc->its_vmapp_cmd.col->target_address);
|
||||
its_encode_target(cmd, target);
|
||||
its_encode_vpt_addr(cmd, vpt_addr);
|
||||
its_encode_vpt_size(cmd, LPI_NRBITS - 1);
|
||||
|
||||
|
@ -554,7 +571,8 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_cmd_block *cmd,
|
|||
return desc->its_vmapp_cmd.vpe;
|
||||
}
|
||||
|
||||
static struct its_vpe *its_build_vmapti_cmd(struct its_cmd_block *cmd,
|
||||
static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
|
||||
struct its_cmd_block *cmd,
|
||||
struct its_cmd_desc *desc)
|
||||
{
|
||||
u32 db;
|
||||
|
@ -576,7 +594,8 @@ static struct its_vpe *its_build_vmapti_cmd(struct its_cmd_block *cmd,
|
|||
return desc->its_vmapti_cmd.vpe;
|
||||
}
|
||||
|
||||
static struct its_vpe *its_build_vmovi_cmd(struct its_cmd_block *cmd,
|
||||
static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
|
||||
struct its_cmd_block *cmd,
|
||||
struct its_cmd_desc *desc)
|
||||
{
|
||||
u32 db;
|
||||
|
@ -598,14 +617,18 @@ static struct its_vpe *its_build_vmovi_cmd(struct its_cmd_block *cmd,
|
|||
return desc->its_vmovi_cmd.vpe;
|
||||
}
|
||||
|
||||
static struct its_vpe *its_build_vmovp_cmd(struct its_cmd_block *cmd,
|
||||
static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
|
||||
struct its_cmd_block *cmd,
|
||||
struct its_cmd_desc *desc)
|
||||
{
|
||||
u64 target;
|
||||
|
||||
target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
|
||||
its_encode_cmd(cmd, GITS_CMD_VMOVP);
|
||||
its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
|
||||
its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
|
||||
its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
|
||||
its_encode_target(cmd, desc->its_vmovp_cmd.col->target_address);
|
||||
its_encode_target(cmd, target);
|
||||
|
||||
its_fixup_cmd(cmd);
|
||||
|
||||
|
@ -684,9 +707,9 @@ static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
|
|||
dsb(ishst);
|
||||
}
|
||||
|
||||
static void its_wait_for_range_completion(struct its_node *its,
|
||||
struct its_cmd_block *from,
|
||||
struct its_cmd_block *to)
|
||||
static int its_wait_for_range_completion(struct its_node *its,
|
||||
struct its_cmd_block *from,
|
||||
struct its_cmd_block *to)
|
||||
{
|
||||
u64 rd_idx, from_idx, to_idx;
|
||||
u32 count = 1000000; /* 1s! */
|
||||
|
@ -707,12 +730,15 @@ static void its_wait_for_range_completion(struct its_node *its,
|
|||
|
||||
count--;
|
||||
if (!count) {
|
||||
pr_err_ratelimited("ITS queue timeout\n");
|
||||
return;
|
||||
pr_err_ratelimited("ITS queue timeout (%llu %llu %llu)\n",
|
||||
from_idx, to_idx, rd_idx);
|
||||
return -1;
|
||||
}
|
||||
cpu_relax();
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Warning, macro hell follows */
|
||||
|
@ -732,7 +758,7 @@ void name(struct its_node *its, \
|
|||
raw_spin_unlock_irqrestore(&its->lock, flags); \
|
||||
return; \
|
||||
} \
|
||||
sync_obj = builder(cmd, desc); \
|
||||
sync_obj = builder(its, cmd, desc); \
|
||||
its_flush_cmd(its, cmd); \
|
||||
\
|
||||
if (sync_obj) { \
|
||||
|
@ -740,7 +766,7 @@ void name(struct its_node *its, \
|
|||
if (!sync_cmd) \
|
||||
goto post; \
|
||||
\
|
||||
buildfn(sync_cmd, sync_obj); \
|
||||
buildfn(its, sync_cmd, sync_obj); \
|
||||
its_flush_cmd(its, sync_cmd); \
|
||||
} \
|
||||
\
|
||||
|
@ -748,10 +774,12 @@ post: \
|
|||
next_cmd = its_post_commands(its); \
|
||||
raw_spin_unlock_irqrestore(&its->lock, flags); \
|
||||
\
|
||||
its_wait_for_range_completion(its, cmd, next_cmd); \
|
||||
if (its_wait_for_range_completion(its, cmd, next_cmd)) \
|
||||
pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
|
||||
}
|
||||
|
||||
static void its_build_sync_cmd(struct its_cmd_block *sync_cmd,
|
||||
static void its_build_sync_cmd(struct its_node *its,
|
||||
struct its_cmd_block *sync_cmd,
|
||||
struct its_collection *sync_col)
|
||||
{
|
||||
its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
|
||||
|
@ -763,7 +791,8 @@ static void its_build_sync_cmd(struct its_cmd_block *sync_cmd,
|
|||
static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
|
||||
struct its_collection, its_build_sync_cmd)
|
||||
|
||||
static void its_build_vsync_cmd(struct its_cmd_block *sync_cmd,
|
||||
static void its_build_vsync_cmd(struct its_node *its,
|
||||
struct its_cmd_block *sync_cmd,
|
||||
struct its_vpe *sync_vpe)
|
||||
{
|
||||
its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
|
||||
|
@ -895,21 +924,16 @@ static void its_send_vmovi(struct its_device *dev, u32 id)
|
|||
its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
|
||||
}
|
||||
|
||||
static void its_send_vmapp(struct its_vpe *vpe, bool valid)
|
||||
static void its_send_vmapp(struct its_node *its,
|
||||
struct its_vpe *vpe, bool valid)
|
||||
{
|
||||
struct its_cmd_desc desc;
|
||||
struct its_node *its;
|
||||
|
||||
desc.its_vmapp_cmd.vpe = vpe;
|
||||
desc.its_vmapp_cmd.valid = valid;
|
||||
desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
|
||||
|
||||
list_for_each_entry(its, &its_nodes, entry) {
|
||||
if (!its->is_v4)
|
||||
continue;
|
||||
|
||||
desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
|
||||
its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
|
||||
}
|
||||
its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
|
||||
}
|
||||
|
||||
static void its_send_vmovp(struct its_vpe *vpe)
|
||||
|
@ -947,6 +971,9 @@ static void its_send_vmovp(struct its_vpe *vpe)
|
|||
if (!its->is_v4)
|
||||
continue;
|
||||
|
||||
if (!vpe->its_vm->vlpi_count[its->list_nr])
|
||||
continue;
|
||||
|
||||
desc.its_vmovp_cmd.col = &its->collections[col_id];
|
||||
its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
|
||||
}
|
||||
|
@ -954,18 +981,12 @@ static void its_send_vmovp(struct its_vpe *vpe)
|
|||
raw_spin_unlock_irqrestore(&vmovp_lock, flags);
|
||||
}
|
||||
|
||||
static void its_send_vinvall(struct its_vpe *vpe)
|
||||
static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
|
||||
{
|
||||
struct its_cmd_desc desc;
|
||||
struct its_node *its;
|
||||
|
||||
desc.its_vinvall_cmd.vpe = vpe;
|
||||
|
||||
list_for_each_entry(its, &its_nodes, entry) {
|
||||
if (!its->is_v4)
|
||||
continue;
|
||||
its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
|
||||
}
|
||||
its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -987,9 +1008,15 @@ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
|
|||
if (irqd_is_forwarded_to_vcpu(d)) {
|
||||
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
|
||||
u32 event = its_get_event_id(d);
|
||||
struct its_vlpi_map *map;
|
||||
|
||||
prop_page = its_dev->event_map.vm->vprop_page;
|
||||
hwirq = its_dev->event_map.vlpi_maps[event].vintid;
|
||||
map = &its_dev->event_map.vlpi_maps[event];
|
||||
hwirq = map->vintid;
|
||||
|
||||
/* Remember the updated property */
|
||||
map->properties &= ~clr;
|
||||
map->properties |= set | LPI_PROP_GROUP1;
|
||||
} else {
|
||||
prop_page = gic_rdists->prop_page;
|
||||
hwirq = d->hwirq;
|
||||
|
@ -1095,6 +1122,13 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
|
|||
return IRQ_SET_MASK_OK_DONE;
|
||||
}
|
||||
|
||||
static u64 its_irq_get_msi_base(struct its_device *its_dev)
|
||||
{
|
||||
struct its_node *its = its_dev->its;
|
||||
|
||||
return its->phys_base + GITS_TRANSLATER;
|
||||
}
|
||||
|
||||
static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
|
||||
{
|
||||
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
|
||||
|
@ -1102,7 +1136,7 @@ static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
|
|||
u64 addr;
|
||||
|
||||
its = its_dev->its;
|
||||
addr = its->phys_base + GITS_TRANSLATER;
|
||||
addr = its->get_msi_base(its_dev);
|
||||
|
||||
msg->address_lo = lower_32_bits(addr);
|
||||
msg->address_hi = upper_32_bits(addr);
|
||||
|
@ -1129,6 +1163,60 @@ static int its_irq_set_irqchip_state(struct irq_data *d,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void its_map_vm(struct its_node *its, struct its_vm *vm)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/* Not using the ITS list? Everything is always mapped. */
|
||||
if (!its_list_map)
|
||||
return;
|
||||
|
||||
raw_spin_lock_irqsave(&vmovp_lock, flags);
|
||||
|
||||
/*
|
||||
* If the VM wasn't mapped yet, iterate over the vpes and get
|
||||
* them mapped now.
|
||||
*/
|
||||
vm->vlpi_count[its->list_nr]++;
|
||||
|
||||
if (vm->vlpi_count[its->list_nr] == 1) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < vm->nr_vpes; i++) {
|
||||
struct its_vpe *vpe = vm->vpes[i];
|
||||
struct irq_data *d = irq_get_irq_data(vpe->irq);
|
||||
|
||||
/* Map the VPE to the first possible CPU */
|
||||
vpe->col_idx = cpumask_first(cpu_online_mask);
|
||||
its_send_vmapp(its, vpe, true);
|
||||
its_send_vinvall(its, vpe);
|
||||
irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
|
||||
}
|
||||
}
|
||||
|
||||
raw_spin_unlock_irqrestore(&vmovp_lock, flags);
|
||||
}
|
||||
|
||||
static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/* Not using the ITS list? Everything is always mapped. */
|
||||
if (!its_list_map)
|
||||
return;
|
||||
|
||||
raw_spin_lock_irqsave(&vmovp_lock, flags);
|
||||
|
||||
if (!--vm->vlpi_count[its->list_nr]) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < vm->nr_vpes; i++)
|
||||
its_send_vmapp(its, vm->vpes[i], false);
|
||||
}
|
||||
|
||||
raw_spin_unlock_irqrestore(&vmovp_lock, flags);
|
||||
}
|
||||
|
||||
static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
|
||||
{
|
||||
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
|
||||
|
@ -1164,12 +1252,23 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
|
|||
/* Already mapped, move it around */
|
||||
its_send_vmovi(its_dev, event);
|
||||
} else {
|
||||
/* Ensure all the VPEs are mapped on this ITS */
|
||||
its_map_vm(its_dev->its, info->map->vm);
|
||||
|
||||
/*
|
||||
* Flag the interrupt as forwarded so that we can
|
||||
* start poking the virtual property table.
|
||||
*/
|
||||
irqd_set_forwarded_to_vcpu(d);
|
||||
|
||||
/* Write out the property to the prop table */
|
||||
lpi_write_config(d, 0xff, info->map->properties);
|
||||
|
||||
/* Drop the physical mapping */
|
||||
its_send_discard(its_dev, event);
|
||||
|
||||
/* and install the virtual one */
|
||||
its_send_vmapti(its_dev, event);
|
||||
irqd_set_forwarded_to_vcpu(d);
|
||||
|
||||
/* Increment the number of VLPIs */
|
||||
its_dev->event_map.nr_vlpis++;
|
||||
|
@ -1225,6 +1324,9 @@ static int its_vlpi_unmap(struct irq_data *d)
|
|||
LPI_PROP_ENABLED |
|
||||
LPI_PROP_GROUP1));
|
||||
|
||||
/* Potentially unmap the VM from this ITS */
|
||||
its_unmap_vm(its_dev->its, its_dev->event_map.vm);
|
||||
|
||||
/*
|
||||
* Drop the refcount and make the device available again if
|
||||
* this was the last VLPI.
|
||||
|
@ -1650,23 +1752,14 @@ static void its_free_tables(struct its_node *its)
|
|||
|
||||
static int its_alloc_tables(struct its_node *its)
|
||||
{
|
||||
u64 typer = gic_read_typer(its->base + GITS_TYPER);
|
||||
u32 ids = GITS_TYPER_DEVBITS(typer);
|
||||
u64 shr = GITS_BASER_InnerShareable;
|
||||
u64 cache = GITS_BASER_RaWaWb;
|
||||
u32 psz = SZ_64K;
|
||||
int err, i;
|
||||
|
||||
if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) {
|
||||
/*
|
||||
* erratum 22375: only alloc 8MB table size
|
||||
* erratum 24313: ignore memory access type
|
||||
*/
|
||||
cache = GITS_BASER_nCnB;
|
||||
ids = 0x14; /* 20 bits, 8MB */
|
||||
}
|
||||
|
||||
its->device_ids = ids;
|
||||
if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
|
||||
/* erratum 24313: ignore memory access type */
|
||||
cache = GITS_BASER_nCnB;
|
||||
|
||||
for (i = 0; i < GITS_BASER_NR_REGS; i++) {
|
||||
struct its_baser *baser = its->tables + i;
|
||||
|
@ -2186,8 +2279,8 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void its_irq_domain_activate(struct irq_domain *domain,
|
||||
struct irq_data *d)
|
||||
static int its_irq_domain_activate(struct irq_domain *domain,
|
||||
struct irq_data *d, bool early)
|
||||
{
|
||||
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
|
||||
u32 event = its_get_event_id(d);
|
||||
|
@ -2205,6 +2298,7 @@ static void its_irq_domain_activate(struct irq_domain *domain,
|
|||
|
||||
/* Map the GIC IRQ and event to the device */
|
||||
its_send_mapti(its_dev, d->hwirq, event);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void its_irq_domain_deactivate(struct irq_domain *domain,
|
||||
|
@ -2371,6 +2465,8 @@ static int its_vpe_set_affinity(struct irq_data *d,
|
|||
its_vpe_db_proxy_move(vpe, from, cpu);
|
||||
}
|
||||
|
||||
irq_data_update_effective_affinity(d, cpumask_of(cpu));
|
||||
|
||||
return IRQ_SET_MASK_OK_DONE;
|
||||
}
|
||||
|
||||
|
@ -2438,6 +2534,26 @@ static void its_vpe_deschedule(struct its_vpe *vpe)
|
|||
}
|
||||
}
|
||||
|
||||
static void its_vpe_invall(struct its_vpe *vpe)
|
||||
{
|
||||
struct its_node *its;
|
||||
|
||||
list_for_each_entry(its, &its_nodes, entry) {
|
||||
if (!its->is_v4)
|
||||
continue;
|
||||
|
||||
if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Sending a VINVALL to a single ITS is enough, as all
|
||||
* we need is to reach the redistributors.
|
||||
*/
|
||||
its_send_vinvall(its, vpe);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
|
||||
{
|
||||
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
|
||||
|
@ -2453,7 +2569,7 @@ static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
|
|||
return 0;
|
||||
|
||||
case INVALL_VPE:
|
||||
its_send_vinvall(vpe);
|
||||
its_vpe_invall(vpe);
|
||||
return 0;
|
||||
|
||||
default:
|
||||
|
@ -2678,23 +2794,51 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
|
|||
return err;
|
||||
}
|
||||
|
||||
static void its_vpe_irq_domain_activate(struct irq_domain *domain,
|
||||
struct irq_data *d)
|
||||
static int its_vpe_irq_domain_activate(struct irq_domain *domain,
|
||||
struct irq_data *d, bool early)
|
||||
{
|
||||
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
|
||||
struct its_node *its;
|
||||
|
||||
/* If we use the list map, we issue VMAPP on demand... */
|
||||
if (its_list_map)
|
||||
return true;
|
||||
|
||||
/* Map the VPE to the first possible CPU */
|
||||
vpe->col_idx = cpumask_first(cpu_online_mask);
|
||||
its_send_vmapp(vpe, true);
|
||||
its_send_vinvall(vpe);
|
||||
|
||||
list_for_each_entry(its, &its_nodes, entry) {
|
||||
if (!its->is_v4)
|
||||
continue;
|
||||
|
||||
its_send_vmapp(its, vpe, true);
|
||||
its_send_vinvall(its, vpe);
|
||||
}
|
||||
|
||||
irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
|
||||
struct irq_data *d)
|
||||
{
|
||||
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
|
||||
struct its_node *its;
|
||||
|
||||
its_send_vmapp(vpe, false);
|
||||
/*
|
||||
* If we use the list map, we unmap the VPE once no VLPIs are
|
||||
* associated with the VM.
|
||||
*/
|
||||
if (its_list_map)
|
||||
return;
|
||||
|
||||
list_for_each_entry(its, &its_nodes, entry) {
|
||||
if (!its->is_v4)
|
||||
continue;
|
||||
|
||||
its_send_vmapp(its, vpe, false);
|
||||
}
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops its_vpe_domain_ops = {
|
||||
|
@ -2737,26 +2881,85 @@ static int its_force_quiescent(void __iomem *base)
|
|||
}
|
||||
}
|
||||
|
||||
static void __maybe_unused its_enable_quirk_cavium_22375(void *data)
|
||||
static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
|
||||
{
|
||||
struct its_node *its = data;
|
||||
|
||||
/* erratum 22375: only alloc 8MB table size */
|
||||
its->device_ids = 0x14; /* 20 bits, 8MB */
|
||||
its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void __maybe_unused its_enable_quirk_cavium_23144(void *data)
|
||||
static bool __maybe_unused its_enable_quirk_cavium_23144(void *data)
|
||||
{
|
||||
struct its_node *its = data;
|
||||
|
||||
its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
|
||||
static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
|
||||
{
|
||||
struct its_node *its = data;
|
||||
|
||||
/* On QDF2400, the size of the ITE is 16Bytes */
|
||||
its->ite_size = 16;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
|
||||
{
|
||||
struct its_node *its = its_dev->its;
|
||||
|
||||
/*
|
||||
* The Socionext Synquacer SoC has a so-called 'pre-ITS',
|
||||
* which maps 32-bit writes targeted at a separate window of
|
||||
* size '4 << device_id_bits' onto writes to GITS_TRANSLATER
|
||||
* with device ID taken from bits [device_id_bits + 1:2] of
|
||||
* the window offset.
|
||||
*/
|
||||
return its->pre_its_base + (its_dev->device_id << 2);
|
||||
}
|
||||
|
||||
static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
|
||||
{
|
||||
struct its_node *its = data;
|
||||
u32 pre_its_window[2];
|
||||
u32 ids;
|
||||
|
||||
if (!fwnode_property_read_u32_array(its->fwnode_handle,
|
||||
"socionext,synquacer-pre-its",
|
||||
pre_its_window,
|
||||
ARRAY_SIZE(pre_its_window))) {
|
||||
|
||||
its->pre_its_base = pre_its_window[0];
|
||||
its->get_msi_base = its_irq_get_msi_base_pre_its;
|
||||
|
||||
ids = ilog2(pre_its_window[1]) - 2;
|
||||
if (its->device_ids > ids)
|
||||
its->device_ids = ids;
|
||||
|
||||
/* the pre-ITS breaks isolation, so disable MSI remapping */
|
||||
its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data)
|
||||
{
|
||||
struct its_node *its = data;
|
||||
|
||||
/*
|
||||
* Hip07 insists on using the wrong address for the VLPI
|
||||
* page. Trick it into doing the right thing...
|
||||
*/
|
||||
its->vlpi_redist_offset = SZ_128K;
|
||||
return true;
|
||||
}
|
||||
|
||||
static const struct gic_quirk its_quirks[] = {
|
||||
|
@ -2783,6 +2986,27 @@ static const struct gic_quirk its_quirks[] = {
|
|||
.mask = 0xffffffff,
|
||||
.init = its_enable_quirk_qdf2400_e0065,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
|
||||
{
|
||||
/*
|
||||
* The Socionext Synquacer SoC incorporates ARM's own GIC-500
|
||||
* implementation, but with a 'pre-ITS' added that requires
|
||||
* special handling in software.
|
||||
*/
|
||||
.desc = "ITS: Socionext Synquacer pre-ITS",
|
||||
.iidr = 0x0001143b,
|
||||
.mask = 0xffffffff,
|
||||
.init = its_enable_quirk_socionext_synquacer,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_HISILICON_ERRATUM_161600802
|
||||
{
|
||||
.desc = "ITS: Hip07 erratum 161600802",
|
||||
.iidr = 0x00000004,
|
||||
.mask = 0xffffffff,
|
||||
.init = its_enable_quirk_hip07_161600802,
|
||||
},
|
||||
#endif
|
||||
{
|
||||
}
|
||||
|
@ -2812,7 +3036,7 @@ static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
|
|||
|
||||
inner_domain->parent = its_parent;
|
||||
irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
|
||||
inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_REMAP;
|
||||
inner_domain->flags |= its->msi_domain_flags;
|
||||
info->ops = &its_msi_domain_ops;
|
||||
info->data = its;
|
||||
inner_domain->host_data = info;
|
||||
|
@ -2873,8 +3097,8 @@ static int __init its_compute_its_list_map(struct resource *res,
|
|||
* locking. Should this change, we should address
|
||||
* this.
|
||||
*/
|
||||
its_number = find_first_zero_bit(&its_list_map, ITS_LIST_MAX);
|
||||
if (its_number >= ITS_LIST_MAX) {
|
||||
its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
|
||||
if (its_number >= GICv4_ITS_LIST_MAX) {
|
||||
pr_err("ITS@%pa: No ITSList entry available!\n",
|
||||
&res->start);
|
||||
return -EINVAL;
|
||||
|
@ -2942,6 +3166,7 @@ static int __init its_probe_one(struct resource *res,
|
|||
its->base = its_base;
|
||||
its->phys_base = res->start;
|
||||
its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer);
|
||||
its->device_ids = GITS_TYPER_DEVBITS(typer);
|
||||
its->is_v4 = !!(typer & GITS_TYPER_VLPIS);
|
||||
if (its->is_v4) {
|
||||
if (!(typer & GITS_TYPER_VMOVP)) {
|
||||
|
@ -2949,6 +3174,8 @@ static int __init its_probe_one(struct resource *res,
|
|||
if (err < 0)
|
||||
goto out_free_its;
|
||||
|
||||
its->list_nr = err;
|
||||
|
||||
pr_info("ITS@%pa: Using ITS number %d\n",
|
||||
&res->start, err);
|
||||
} else {
|
||||
|
@ -2965,6 +3192,9 @@ static int __init its_probe_one(struct resource *res,
|
|||
goto out_free_its;
|
||||
}
|
||||
its->cmd_write = its->cmd_base;
|
||||
its->fwnode_handle = handle;
|
||||
its->get_msi_base = its_irq_get_msi_base;
|
||||
its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP;
|
||||
|
||||
its_enable_quirks(its);
|
||||
|
||||
|
|
|
@ -55,6 +55,7 @@ struct gic_chip_data {
|
|||
struct irq_domain *domain;
|
||||
u64 redist_stride;
|
||||
u32 nr_redist_regions;
|
||||
bool has_rss;
|
||||
unsigned int irq_nr;
|
||||
struct partition_desc *ppi_descs[16];
|
||||
};
|
||||
|
@ -63,7 +64,9 @@ static struct gic_chip_data gic_data __read_mostly;
|
|||
static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE;
|
||||
|
||||
static struct gic_kvm_info gic_v3_kvm_info;
|
||||
static DEFINE_PER_CPU(bool, has_rss);
|
||||
|
||||
#define MPIDR_RS(mpidr) (((mpidr) & 0xF0UL) >> 4)
|
||||
#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
|
||||
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
|
||||
#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
|
||||
|
@ -526,6 +529,10 @@ static void gic_update_vlpi_properties(void)
|
|||
|
||||
static void gic_cpu_sys_reg_init(void)
|
||||
{
|
||||
int i, cpu = smp_processor_id();
|
||||
u64 mpidr = cpu_logical_map(cpu);
|
||||
u64 need_rss = MPIDR_RS(mpidr);
|
||||
|
||||
/*
|
||||
* Need to check that the SRE bit has actually been set. If
|
||||
* not, it means that SRE is disabled at EL2. We're going to
|
||||
|
@ -557,6 +564,30 @@ static void gic_cpu_sys_reg_init(void)
|
|||
|
||||
/* ... and let's hit the road... */
|
||||
gic_write_grpen1(1);
|
||||
|
||||
/* Keep the RSS capability status in per_cpu variable */
|
||||
per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS);
|
||||
|
||||
/* Check all the CPUs have capable of sending SGIs to other CPUs */
|
||||
for_each_online_cpu(i) {
|
||||
bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu);
|
||||
|
||||
need_rss |= MPIDR_RS(cpu_logical_map(i));
|
||||
if (need_rss && (!have_rss))
|
||||
pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n",
|
||||
cpu, (unsigned long)mpidr,
|
||||
i, (unsigned long)cpu_logical_map(i));
|
||||
}
|
||||
|
||||
/**
|
||||
* GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0,
|
||||
* writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED
|
||||
* UNPREDICTABLE choice of :
|
||||
* - The write is ignored.
|
||||
* - The RS field is treated as 0.
|
||||
*/
|
||||
if (need_rss && (!gic_data.has_rss))
|
||||
pr_crit_once("RSS is required but GICD doesn't support it\n");
|
||||
}
|
||||
|
||||
static int gic_dist_supports_lpis(void)
|
||||
|
@ -591,6 +622,9 @@ static void gic_cpu_init(void)
|
|||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT)
|
||||
#define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL)
|
||||
|
||||
static int gic_starting_cpu(unsigned int cpu)
|
||||
{
|
||||
gic_cpu_init();
|
||||
|
@ -605,13 +639,6 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
|
|||
u16 tlist = 0;
|
||||
|
||||
while (cpu < nr_cpu_ids) {
|
||||
/*
|
||||
* If we ever get a cluster of more than 16 CPUs, just
|
||||
* scream and skip that CPU.
|
||||
*/
|
||||
if (WARN_ON((mpidr & 0xff) >= 16))
|
||||
goto out;
|
||||
|
||||
tlist |= 1 << (mpidr & 0xf);
|
||||
|
||||
next_cpu = cpumask_next(cpu, mask);
|
||||
|
@ -621,7 +648,7 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
|
|||
|
||||
mpidr = cpu_logical_map(cpu);
|
||||
|
||||
if (cluster_id != (mpidr & ~0xffUL)) {
|
||||
if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) {
|
||||
cpu--;
|
||||
goto out;
|
||||
}
|
||||
|
@ -643,6 +670,7 @@ static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
|
|||
MPIDR_TO_SGI_AFFINITY(cluster_id, 2) |
|
||||
irq << ICC_SGI1R_SGI_ID_SHIFT |
|
||||
MPIDR_TO_SGI_AFFINITY(cluster_id, 1) |
|
||||
MPIDR_TO_SGI_RS(cluster_id) |
|
||||
tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
|
||||
|
||||
pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
|
||||
|
@ -663,7 +691,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
|
|||
smp_wmb();
|
||||
|
||||
for_each_cpu(cpu, mask) {
|
||||
unsigned long cluster_id = cpu_logical_map(cpu) & ~0xffUL;
|
||||
u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu));
|
||||
u16 tlist;
|
||||
|
||||
tlist = gic_compute_target_list(&cpu, mask, cluster_id);
|
||||
|
@ -1007,6 +1035,10 @@ static int __init gic_init_bases(void __iomem *dist_base,
|
|||
goto out_free;
|
||||
}
|
||||
|
||||
gic_data.has_rss = !!(typer & GICD_TYPER_RSS);
|
||||
pr_info("Distributor has %sRange Selector support\n",
|
||||
gic_data.has_rss ? "" : "no ");
|
||||
|
||||
set_handle_irq(gic_handle_irq);
|
||||
|
||||
gic_update_vlpi_properties();
|
||||
|
|
|
@ -1256,6 +1256,19 @@ static void gic_teardown(struct gic_chip_data *gic)
|
|||
|
||||
#ifdef CONFIG_OF
|
||||
static int gic_cnt __initdata;
|
||||
static bool gicv2_force_probe;
|
||||
|
||||
static int __init gicv2_force_probe_cfg(char *buf)
|
||||
{
|
||||
return strtobool(buf, &gicv2_force_probe);
|
||||
}
|
||||
early_param("irqchip.gicv2_force_probe", gicv2_force_probe_cfg);
|
||||
|
||||
static bool gic_check_gicv2(void __iomem *base)
|
||||
{
|
||||
u32 val = readl_relaxed(base + GIC_CPU_IDENT);
|
||||
return (val & 0xff0fff) == 0x02043B;
|
||||
}
|
||||
|
||||
static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
|
||||
{
|
||||
|
@ -1265,20 +1278,60 @@ static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
|
|||
|
||||
if (!is_hyp_mode_available())
|
||||
return false;
|
||||
if (resource_size(&cpuif_res) < SZ_8K)
|
||||
return false;
|
||||
if (resource_size(&cpuif_res) == SZ_128K) {
|
||||
u32 val_low, val_high;
|
||||
if (resource_size(&cpuif_res) < SZ_8K) {
|
||||
void __iomem *alt;
|
||||
/*
|
||||
* Check for a stupid firmware that only exposes the
|
||||
* first page of a GICv2.
|
||||
*/
|
||||
if (!gic_check_gicv2(*base))
|
||||
return false;
|
||||
|
||||
if (!gicv2_force_probe) {
|
||||
pr_warn("GIC: GICv2 detected, but range too small and irqchip.gicv2_force_probe not set\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
alt = ioremap(cpuif_res.start, SZ_8K);
|
||||
if (!alt)
|
||||
return false;
|
||||
if (!gic_check_gicv2(alt + SZ_4K)) {
|
||||
/*
|
||||
* The first page was that of a GICv2, and
|
||||
* the second was *something*. Let's trust it
|
||||
* to be a GICv2, and update the mapping.
|
||||
*/
|
||||
pr_warn("GIC: GICv2 at %pa, but range is too small (broken DT?), assuming 8kB\n",
|
||||
&cpuif_res.start);
|
||||
iounmap(*base);
|
||||
*base = alt;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Verify that we have the first 4kB of a GIC400
|
||||
* We detected *two* initial GICv2 pages in a
|
||||
* row. Could be a GICv2 aliased over two 64kB
|
||||
* pages. Update the resource, map the iospace, and
|
||||
* pray.
|
||||
*/
|
||||
iounmap(alt);
|
||||
alt = ioremap(cpuif_res.start, SZ_128K);
|
||||
if (!alt)
|
||||
return false;
|
||||
pr_warn("GIC: Aliased GICv2 at %pa, trying to find the canonical range over 128kB\n",
|
||||
&cpuif_res.start);
|
||||
cpuif_res.end = cpuif_res.start + SZ_128K -1;
|
||||
iounmap(*base);
|
||||
*base = alt;
|
||||
}
|
||||
if (resource_size(&cpuif_res) == SZ_128K) {
|
||||
/*
|
||||
* Verify that we have the first 4kB of a GICv2
|
||||
* aliased over the first 64kB by checking the
|
||||
* GICC_IIDR register on both ends.
|
||||
*/
|
||||
val_low = readl_relaxed(*base + GIC_CPU_IDENT);
|
||||
val_high = readl_relaxed(*base + GIC_CPU_IDENT + 0xf000);
|
||||
if ((val_low & 0xffff0fff) != 0x0202043B ||
|
||||
val_low != val_high)
|
||||
if (!gic_check_gicv2(*base) ||
|
||||
!gic_check_gicv2(*base + 0xf000))
|
||||
return false;
|
||||
|
||||
/*
|
||||
|
|
|
@ -0,0 +1,419 @@
|
|||
/*
|
||||
* Copyright (c) 2015 Endless Mobile, Inc.
|
||||
* Author: Carlo Caione <carlo@endlessm.com>
|
||||
* Copyright (c) 2016 BayLibre, SAS.
|
||||
* Author: Jerome Brunet <jbrunet@baylibre.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
* The full GNU General Public License is included in this distribution
|
||||
* in the file called COPYING.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/irqchip.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
|
||||
#define NUM_CHANNEL 8
|
||||
#define MAX_INPUT_MUX 256
|
||||
|
||||
#define REG_EDGE_POL 0x00
|
||||
#define REG_PIN_03_SEL 0x04
|
||||
#define REG_PIN_47_SEL 0x08
|
||||
#define REG_FILTER_SEL 0x0c
|
||||
|
||||
#define REG_EDGE_POL_MASK(x) (BIT(x) | BIT(16 + (x)))
|
||||
#define REG_EDGE_POL_EDGE(x) BIT(x)
|
||||
#define REG_EDGE_POL_LOW(x) BIT(16 + (x))
|
||||
#define REG_PIN_SEL_SHIFT(x) (((x) % 4) * 8)
|
||||
#define REG_FILTER_SEL_SHIFT(x) ((x) * 4)
|
||||
|
||||
struct meson_gpio_irq_params {
|
||||
unsigned int nr_hwirq;
|
||||
};
|
||||
|
||||
static const struct meson_gpio_irq_params meson8_params = {
|
||||
.nr_hwirq = 134,
|
||||
};
|
||||
|
||||
static const struct meson_gpio_irq_params meson8b_params = {
|
||||
.nr_hwirq = 119,
|
||||
};
|
||||
|
||||
static const struct meson_gpio_irq_params gxbb_params = {
|
||||
.nr_hwirq = 133,
|
||||
};
|
||||
|
||||
static const struct meson_gpio_irq_params gxl_params = {
|
||||
.nr_hwirq = 110,
|
||||
};
|
||||
|
||||
static const struct of_device_id meson_irq_gpio_matches[] = {
|
||||
{ .compatible = "amlogic,meson8-gpio-intc", .data = &meson8_params },
|
||||
{ .compatible = "amlogic,meson8b-gpio-intc", .data = &meson8b_params },
|
||||
{ .compatible = "amlogic,meson-gxbb-gpio-intc", .data = &gxbb_params },
|
||||
{ .compatible = "amlogic,meson-gxl-gpio-intc", .data = &gxl_params },
|
||||
{ }
|
||||
};
|
||||
|
||||
struct meson_gpio_irq_controller {
|
||||
unsigned int nr_hwirq;
|
||||
void __iomem *base;
|
||||
u32 channel_irqs[NUM_CHANNEL];
|
||||
DECLARE_BITMAP(channel_map, NUM_CHANNEL);
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl,
|
||||
unsigned int reg, u32 mask, u32 val)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
tmp = readl_relaxed(ctl->base + reg);
|
||||
tmp &= ~mask;
|
||||
tmp |= val;
|
||||
writel_relaxed(tmp, ctl->base + reg);
|
||||
}
|
||||
|
||||
static unsigned int meson_gpio_irq_channel_to_reg(unsigned int channel)
|
||||
{
|
||||
return (channel < 4) ? REG_PIN_03_SEL : REG_PIN_47_SEL;
|
||||
}
|
||||
|
||||
static int
|
||||
meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
|
||||
unsigned long hwirq,
|
||||
u32 **channel_hwirq)
|
||||
{
|
||||
unsigned int reg, idx;
|
||||
|
||||
spin_lock(&ctl->lock);
|
||||
|
||||
/* Find a free channel */
|
||||
idx = find_first_zero_bit(ctl->channel_map, NUM_CHANNEL);
|
||||
if (idx >= NUM_CHANNEL) {
|
||||
spin_unlock(&ctl->lock);
|
||||
pr_err("No channel available\n");
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
/* Mark the channel as used */
|
||||
set_bit(idx, ctl->channel_map);
|
||||
|
||||
/*
|
||||
* Setup the mux of the channel to route the signal of the pad
|
||||
* to the appropriate input of the GIC
|
||||
*/
|
||||
reg = meson_gpio_irq_channel_to_reg(idx);
|
||||
meson_gpio_irq_update_bits(ctl, reg,
|
||||
0xff << REG_PIN_SEL_SHIFT(idx),
|
||||
hwirq << REG_PIN_SEL_SHIFT(idx));
|
||||
|
||||
/*
|
||||
* Get the hwirq number assigned to this channel through
|
||||
* a pointer the channel_irq table. The added benifit of this
|
||||
* method is that we can also retrieve the channel index with
|
||||
* it, using the table base.
|
||||
*/
|
||||
*channel_hwirq = &(ctl->channel_irqs[idx]);
|
||||
|
||||
spin_unlock(&ctl->lock);
|
||||
|
||||
pr_debug("hwirq %lu assigned to channel %d - irq %u\n",
|
||||
hwirq, idx, **channel_hwirq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
meson_gpio_irq_get_channel_idx(struct meson_gpio_irq_controller *ctl,
|
||||
u32 *channel_hwirq)
|
||||
{
|
||||
return channel_hwirq - ctl->channel_irqs;
|
||||
}
|
||||
|
||||
static void
|
||||
meson_gpio_irq_release_channel(struct meson_gpio_irq_controller *ctl,
|
||||
u32 *channel_hwirq)
|
||||
{
|
||||
unsigned int idx;
|
||||
|
||||
idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq);
|
||||
clear_bit(idx, ctl->channel_map);
|
||||
}
|
||||
|
||||
static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl,
|
||||
unsigned int type,
|
||||
u32 *channel_hwirq)
|
||||
{
|
||||
u32 val = 0;
|
||||
unsigned int idx;
|
||||
|
||||
idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq);
|
||||
|
||||
/*
|
||||
* The controller has a filter block to operate in either LEVEL or
|
||||
* EDGE mode, then signal is sent to the GIC. To enable LEVEL_LOW and
|
||||
* EDGE_FALLING support (which the GIC does not support), the filter
|
||||
* block is also able to invert the input signal it gets before
|
||||
* providing it to the GIC.
|
||||
*/
|
||||
type &= IRQ_TYPE_SENSE_MASK;
|
||||
|
||||
if (type == IRQ_TYPE_EDGE_BOTH)
|
||||
return -EINVAL;
|
||||
|
||||
if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
|
||||
val |= REG_EDGE_POL_EDGE(idx);
|
||||
|
||||
if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING))
|
||||
val |= REG_EDGE_POL_LOW(idx);
|
||||
|
||||
spin_lock(&ctl->lock);
|
||||
|
||||
meson_gpio_irq_update_bits(ctl, REG_EDGE_POL,
|
||||
REG_EDGE_POL_MASK(idx), val);
|
||||
|
||||
spin_unlock(&ctl->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int meson_gpio_irq_type_output(unsigned int type)
|
||||
{
|
||||
unsigned int sense = type & IRQ_TYPE_SENSE_MASK;
|
||||
|
||||
type &= ~IRQ_TYPE_SENSE_MASK;
|
||||
|
||||
/*
|
||||
* The polarity of the signal provided to the GIC should always
|
||||
* be high.
|
||||
*/
|
||||
if (sense & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
|
||||
type |= IRQ_TYPE_LEVEL_HIGH;
|
||||
else if (sense & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
|
||||
type |= IRQ_TYPE_EDGE_RISING;
|
||||
|
||||
return type;
|
||||
}
|
||||
|
||||
static int meson_gpio_irq_set_type(struct irq_data *data, unsigned int type)
|
||||
{
|
||||
struct meson_gpio_irq_controller *ctl = data->domain->host_data;
|
||||
u32 *channel_hwirq = irq_data_get_irq_chip_data(data);
|
||||
int ret;
|
||||
|
||||
ret = meson_gpio_irq_type_setup(ctl, type, channel_hwirq);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return irq_chip_set_type_parent(data,
|
||||
meson_gpio_irq_type_output(type));
|
||||
}
|
||||
|
||||
static struct irq_chip meson_gpio_irq_chip = {
|
||||
.name = "meson-gpio-irqchip",
|
||||
.irq_mask = irq_chip_mask_parent,
|
||||
.irq_unmask = irq_chip_unmask_parent,
|
||||
.irq_eoi = irq_chip_eoi_parent,
|
||||
.irq_set_type = meson_gpio_irq_set_type,
|
||||
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
||||
#ifdef CONFIG_SMP
|
||||
.irq_set_affinity = irq_chip_set_affinity_parent,
|
||||
#endif
|
||||
.flags = IRQCHIP_SET_TYPE_MASKED,
|
||||
};
|
||||
|
||||
static int meson_gpio_irq_domain_translate(struct irq_domain *domain,
|
||||
struct irq_fwspec *fwspec,
|
||||
unsigned long *hwirq,
|
||||
unsigned int *type)
|
||||
{
|
||||
if (is_of_node(fwspec->fwnode) && fwspec->param_count == 2) {
|
||||
*hwirq = fwspec->param[0];
|
||||
*type = fwspec->param[1];
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int meson_gpio_irq_allocate_gic_irq(struct irq_domain *domain,
|
||||
unsigned int virq,
|
||||
u32 hwirq,
|
||||
unsigned int type)
|
||||
{
|
||||
struct irq_fwspec fwspec;
|
||||
|
||||
fwspec.fwnode = domain->parent->fwnode;
|
||||
fwspec.param_count = 3;
|
||||
fwspec.param[0] = 0; /* SPI */
|
||||
fwspec.param[1] = hwirq;
|
||||
fwspec.param[2] = meson_gpio_irq_type_output(type);
|
||||
|
||||
return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
|
||||
}
|
||||
|
||||
static int meson_gpio_irq_domain_alloc(struct irq_domain *domain,
|
||||
unsigned int virq,
|
||||
unsigned int nr_irqs,
|
||||
void *data)
|
||||
{
|
||||
struct irq_fwspec *fwspec = data;
|
||||
struct meson_gpio_irq_controller *ctl = domain->host_data;
|
||||
unsigned long hwirq;
|
||||
u32 *channel_hwirq;
|
||||
unsigned int type;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(nr_irqs != 1))
|
||||
return -EINVAL;
|
||||
|
||||
ret = meson_gpio_irq_domain_translate(domain, fwspec, &hwirq, &type);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = meson_gpio_irq_request_channel(ctl, hwirq, &channel_hwirq);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = meson_gpio_irq_allocate_gic_irq(domain, virq,
|
||||
*channel_hwirq, type);
|
||||
if (ret < 0) {
|
||||
pr_err("failed to allocate gic irq %u\n", *channel_hwirq);
|
||||
meson_gpio_irq_release_channel(ctl, channel_hwirq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
|
||||
&meson_gpio_irq_chip, channel_hwirq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void meson_gpio_irq_domain_free(struct irq_domain *domain,
|
||||
unsigned int virq,
|
||||
unsigned int nr_irqs)
|
||||
{
|
||||
struct meson_gpio_irq_controller *ctl = domain->host_data;
|
||||
struct irq_data *irq_data;
|
||||
u32 *channel_hwirq;
|
||||
|
||||
if (WARN_ON(nr_irqs != 1))
|
||||
return;
|
||||
|
||||
irq_domain_free_irqs_parent(domain, virq, 1);
|
||||
|
||||
irq_data = irq_domain_get_irq_data(domain, virq);
|
||||
channel_hwirq = irq_data_get_irq_chip_data(irq_data);
|
||||
|
||||
meson_gpio_irq_release_channel(ctl, channel_hwirq);
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops meson_gpio_irq_domain_ops = {
|
||||
.alloc = meson_gpio_irq_domain_alloc,
|
||||
.free = meson_gpio_irq_domain_free,
|
||||
.translate = meson_gpio_irq_domain_translate,
|
||||
};
|
||||
|
||||
static int __init meson_gpio_irq_parse_dt(struct device_node *node,
|
||||
struct meson_gpio_irq_controller *ctl)
|
||||
{
|
||||
const struct of_device_id *match;
|
||||
const struct meson_gpio_irq_params *params;
|
||||
int ret;
|
||||
|
||||
match = of_match_node(meson_irq_gpio_matches, node);
|
||||
if (!match)
|
||||
return -ENODEV;
|
||||
|
||||
params = match->data;
|
||||
ctl->nr_hwirq = params->nr_hwirq;
|
||||
|
||||
ret = of_property_read_variable_u32_array(node,
|
||||
"amlogic,channel-interrupts",
|
||||
ctl->channel_irqs,
|
||||
NUM_CHANNEL,
|
||||
NUM_CHANNEL);
|
||||
if (ret < 0) {
|
||||
pr_err("can't get %d channel interrupts\n", NUM_CHANNEL);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init meson_gpio_irq_of_init(struct device_node *node,
|
||||
struct device_node *parent)
|
||||
{
|
||||
struct irq_domain *domain, *parent_domain;
|
||||
struct meson_gpio_irq_controller *ctl;
|
||||
int ret;
|
||||
|
||||
if (!parent) {
|
||||
pr_err("missing parent interrupt node\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
parent_domain = irq_find_host(parent);
|
||||
if (!parent_domain) {
|
||||
pr_err("unable to obtain parent domain\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
|
||||
if (!ctl)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&ctl->lock);
|
||||
|
||||
ctl->base = of_iomap(node, 0);
|
||||
if (!ctl->base) {
|
||||
ret = -ENOMEM;
|
||||
goto free_ctl;
|
||||
}
|
||||
|
||||
ret = meson_gpio_irq_parse_dt(node, ctl);
|
||||
if (ret)
|
||||
goto free_channel_irqs;
|
||||
|
||||
domain = irq_domain_create_hierarchy(parent_domain, 0, ctl->nr_hwirq,
|
||||
of_node_to_fwnode(node),
|
||||
&meson_gpio_irq_domain_ops,
|
||||
ctl);
|
||||
if (!domain) {
|
||||
pr_err("failed to add domain\n");
|
||||
ret = -ENODEV;
|
||||
goto free_channel_irqs;
|
||||
}
|
||||
|
||||
pr_info("%d to %d gpio interrupt mux initialized\n",
|
||||
ctl->nr_hwirq, NUM_CHANNEL);
|
||||
|
||||
return 0;
|
||||
|
||||
free_channel_irqs:
|
||||
iounmap(ctl->base);
|
||||
free_ctl:
|
||||
kfree(ctl);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
IRQCHIP_DECLARE(meson_gpio_intc, "amlogic,meson-gpio-intc",
|
||||
meson_gpio_irq_of_init);
|
|
@ -8,6 +8,7 @@
|
|||
*/
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/clocksource.h>
|
||||
#include <linux/cpuhotplug.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
|
@ -48,12 +49,16 @@ static DEFINE_SPINLOCK(gic_lock);
|
|||
static struct irq_domain *gic_irq_domain;
|
||||
static struct irq_domain *gic_ipi_domain;
|
||||
static int gic_shared_intrs;
|
||||
static int gic_vpes;
|
||||
static unsigned int gic_cpu_pin;
|
||||
static unsigned int timer_cpu_pin;
|
||||
static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
|
||||
DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
|
||||
DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS);
|
||||
static DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
|
||||
static DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS);
|
||||
|
||||
static struct gic_all_vpes_chip_data {
|
||||
u32 map;
|
||||
bool mask;
|
||||
} gic_all_vpes_chip_data[GIC_NUM_LOCAL_INTRS];
|
||||
|
||||
static void gic_clear_pcpu_masks(unsigned int intr)
|
||||
{
|
||||
|
@ -194,46 +199,46 @@ static void gic_ack_irq(struct irq_data *d)
|
|||
|
||||
static int gic_set_type(struct irq_data *d, unsigned int type)
|
||||
{
|
||||
unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
|
||||
unsigned int irq, pol, trig, dual;
|
||||
unsigned long flags;
|
||||
bool is_edge;
|
||||
|
||||
irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
|
||||
|
||||
spin_lock_irqsave(&gic_lock, flags);
|
||||
switch (type & IRQ_TYPE_SENSE_MASK) {
|
||||
case IRQ_TYPE_EDGE_FALLING:
|
||||
change_gic_pol(irq, GIC_POL_FALLING_EDGE);
|
||||
change_gic_trig(irq, GIC_TRIG_EDGE);
|
||||
change_gic_dual(irq, GIC_DUAL_SINGLE);
|
||||
is_edge = true;
|
||||
pol = GIC_POL_FALLING_EDGE;
|
||||
trig = GIC_TRIG_EDGE;
|
||||
dual = GIC_DUAL_SINGLE;
|
||||
break;
|
||||
case IRQ_TYPE_EDGE_RISING:
|
||||
change_gic_pol(irq, GIC_POL_RISING_EDGE);
|
||||
change_gic_trig(irq, GIC_TRIG_EDGE);
|
||||
change_gic_dual(irq, GIC_DUAL_SINGLE);
|
||||
is_edge = true;
|
||||
pol = GIC_POL_RISING_EDGE;
|
||||
trig = GIC_TRIG_EDGE;
|
||||
dual = GIC_DUAL_SINGLE;
|
||||
break;
|
||||
case IRQ_TYPE_EDGE_BOTH:
|
||||
/* polarity is irrelevant in this case */
|
||||
change_gic_trig(irq, GIC_TRIG_EDGE);
|
||||
change_gic_dual(irq, GIC_DUAL_DUAL);
|
||||
is_edge = true;
|
||||
pol = 0; /* Doesn't matter */
|
||||
trig = GIC_TRIG_EDGE;
|
||||
dual = GIC_DUAL_DUAL;
|
||||
break;
|
||||
case IRQ_TYPE_LEVEL_LOW:
|
||||
change_gic_pol(irq, GIC_POL_ACTIVE_LOW);
|
||||
change_gic_trig(irq, GIC_TRIG_LEVEL);
|
||||
change_gic_dual(irq, GIC_DUAL_SINGLE);
|
||||
is_edge = false;
|
||||
pol = GIC_POL_ACTIVE_LOW;
|
||||
trig = GIC_TRIG_LEVEL;
|
||||
dual = GIC_DUAL_SINGLE;
|
||||
break;
|
||||
case IRQ_TYPE_LEVEL_HIGH:
|
||||
default:
|
||||
change_gic_pol(irq, GIC_POL_ACTIVE_HIGH);
|
||||
change_gic_trig(irq, GIC_TRIG_LEVEL);
|
||||
change_gic_dual(irq, GIC_DUAL_SINGLE);
|
||||
is_edge = false;
|
||||
pol = GIC_POL_ACTIVE_HIGH;
|
||||
trig = GIC_TRIG_LEVEL;
|
||||
dual = GIC_DUAL_SINGLE;
|
||||
break;
|
||||
}
|
||||
|
||||
if (is_edge)
|
||||
change_gic_pol(irq, pol);
|
||||
change_gic_trig(irq, trig);
|
||||
change_gic_dual(irq, dual);
|
||||
|
||||
if (trig == GIC_TRIG_EDGE)
|
||||
irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller,
|
||||
handle_edge_irq, NULL);
|
||||
else
|
||||
|
@ -338,13 +343,17 @@ static struct irq_chip gic_local_irq_controller = {
|
|||
|
||||
static void gic_mask_local_irq_all_vpes(struct irq_data *d)
|
||||
{
|
||||
int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
|
||||
int i;
|
||||
struct gic_all_vpes_chip_data *cd;
|
||||
unsigned long flags;
|
||||
int intr, cpu;
|
||||
|
||||
intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
|
||||
cd = irq_data_get_irq_chip_data(d);
|
||||
cd->mask = false;
|
||||
|
||||
spin_lock_irqsave(&gic_lock, flags);
|
||||
for (i = 0; i < gic_vpes; i++) {
|
||||
write_gic_vl_other(mips_cm_vp_id(i));
|
||||
for_each_online_cpu(cpu) {
|
||||
write_gic_vl_other(mips_cm_vp_id(cpu));
|
||||
write_gic_vo_rmask(BIT(intr));
|
||||
}
|
||||
spin_unlock_irqrestore(&gic_lock, flags);
|
||||
|
@ -352,22 +361,40 @@ static void gic_mask_local_irq_all_vpes(struct irq_data *d)
|
|||
|
||||
static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
|
||||
{
|
||||
int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
|
||||
int i;
|
||||
struct gic_all_vpes_chip_data *cd;
|
||||
unsigned long flags;
|
||||
int intr, cpu;
|
||||
|
||||
intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
|
||||
cd = irq_data_get_irq_chip_data(d);
|
||||
cd->mask = true;
|
||||
|
||||
spin_lock_irqsave(&gic_lock, flags);
|
||||
for (i = 0; i < gic_vpes; i++) {
|
||||
write_gic_vl_other(mips_cm_vp_id(i));
|
||||
for_each_online_cpu(cpu) {
|
||||
write_gic_vl_other(mips_cm_vp_id(cpu));
|
||||
write_gic_vo_smask(BIT(intr));
|
||||
}
|
||||
spin_unlock_irqrestore(&gic_lock, flags);
|
||||
}
|
||||
|
||||
static void gic_all_vpes_irq_cpu_online(struct irq_data *d)
|
||||
{
|
||||
struct gic_all_vpes_chip_data *cd;
|
||||
unsigned int intr;
|
||||
|
||||
intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
|
||||
cd = irq_data_get_irq_chip_data(d);
|
||||
|
||||
write_gic_vl_map(intr, cd->map);
|
||||
if (cd->mask)
|
||||
write_gic_vl_smask(BIT(intr));
|
||||
}
|
||||
|
||||
static struct irq_chip gic_all_vpes_local_irq_controller = {
|
||||
.name = "MIPS GIC Local",
|
||||
.irq_mask = gic_mask_local_irq_all_vpes,
|
||||
.irq_unmask = gic_unmask_local_irq_all_vpes,
|
||||
.name = "MIPS GIC Local",
|
||||
.irq_mask = gic_mask_local_irq_all_vpes,
|
||||
.irq_unmask = gic_unmask_local_irq_all_vpes,
|
||||
.irq_cpu_online = gic_all_vpes_irq_cpu_online,
|
||||
};
|
||||
|
||||
static void __gic_irq_dispatch(void)
|
||||
|
@ -382,39 +409,6 @@ static void gic_irq_dispatch(struct irq_desc *desc)
|
|||
gic_handle_shared_int(true);
|
||||
}
|
||||
|
||||
static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
|
||||
irq_hw_number_t hw)
|
||||
{
|
||||
int intr = GIC_HWIRQ_TO_LOCAL(hw);
|
||||
int i;
|
||||
unsigned long flags;
|
||||
u32 val;
|
||||
|
||||
if (!gic_local_irq_is_routable(intr))
|
||||
return -EPERM;
|
||||
|
||||
if (intr > GIC_LOCAL_INT_FDC) {
|
||||
pr_err("Invalid local IRQ %d\n", intr);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (intr == GIC_LOCAL_INT_TIMER) {
|
||||
/* CONFIG_MIPS_CMP workaround (see __gic_init) */
|
||||
val = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin;
|
||||
} else {
|
||||
val = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&gic_lock, flags);
|
||||
for (i = 0; i < gic_vpes; i++) {
|
||||
write_gic_vl_other(mips_cm_vp_id(i));
|
||||
write_gic_vo_map(intr, val);
|
||||
}
|
||||
spin_unlock_irqrestore(&gic_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
|
||||
irq_hw_number_t hw, unsigned int cpu)
|
||||
{
|
||||
|
@ -457,7 +451,11 @@ static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
|
|||
static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
|
||||
irq_hw_number_t hwirq)
|
||||
{
|
||||
int err;
|
||||
struct gic_all_vpes_chip_data *cd;
|
||||
unsigned long flags;
|
||||
unsigned int intr;
|
||||
int err, cpu;
|
||||
u32 map;
|
||||
|
||||
if (hwirq >= GIC_SHARED_HWIRQ_BASE) {
|
||||
/* verify that shared irqs don't conflict with an IPI irq */
|
||||
|
@ -474,8 +472,14 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
|
|||
return gic_shared_irq_domain_map(d, virq, hwirq, 0);
|
||||
}
|
||||
|
||||
switch (GIC_HWIRQ_TO_LOCAL(hwirq)) {
|
||||
intr = GIC_HWIRQ_TO_LOCAL(hwirq);
|
||||
map = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin;
|
||||
|
||||
switch (intr) {
|
||||
case GIC_LOCAL_INT_TIMER:
|
||||
/* CONFIG_MIPS_CMP workaround (see __gic_init) */
|
||||
map = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin;
|
||||
/* fall-through */
|
||||
case GIC_LOCAL_INT_PERFCTR:
|
||||
case GIC_LOCAL_INT_FDC:
|
||||
/*
|
||||
|
@ -483,9 +487,11 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
|
|||
* the rest of the MIPS kernel code does not use the
|
||||
* percpu IRQ API for them.
|
||||
*/
|
||||
cd = &gic_all_vpes_chip_data[intr];
|
||||
cd->map = map;
|
||||
err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
|
||||
&gic_all_vpes_local_irq_controller,
|
||||
NULL);
|
||||
cd);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -504,7 +510,17 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
|
|||
break;
|
||||
}
|
||||
|
||||
return gic_local_irq_domain_map(d, virq, hwirq);
|
||||
if (!gic_local_irq_is_routable(intr))
|
||||
return -EPERM;
|
||||
|
||||
spin_lock_irqsave(&gic_lock, flags);
|
||||
for_each_online_cpu(cpu) {
|
||||
write_gic_vl_other(mips_cm_vp_id(cpu));
|
||||
write_gic_vo_map(intr, map);
|
||||
}
|
||||
spin_unlock_irqrestore(&gic_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
|
||||
|
@ -636,11 +652,25 @@ static const struct irq_domain_ops gic_ipi_domain_ops = {
|
|||
.match = gic_ipi_domain_match,
|
||||
};
|
||||
|
||||
static int gic_cpu_startup(unsigned int cpu)
|
||||
{
|
||||
/* Enable or disable EIC */
|
||||
change_gic_vl_ctl(GIC_VX_CTL_EIC,
|
||||
cpu_has_veic ? GIC_VX_CTL_EIC : 0);
|
||||
|
||||
/* Clear all local IRQ masks (ie. disable all local interrupts) */
|
||||
write_gic_vl_rmask(~0);
|
||||
|
||||
/* Invoke irq_cpu_online callbacks to enable desired interrupts */
|
||||
irq_cpu_online();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init gic_of_init(struct device_node *node,
|
||||
struct device_node *parent)
|
||||
{
|
||||
unsigned int cpu_vec, i, j, gicconfig, cpu, v[2];
|
||||
unsigned int cpu_vec, i, gicconfig, v[2], num_ipis;
|
||||
unsigned long reserved;
|
||||
phys_addr_t gic_base;
|
||||
struct resource res;
|
||||
|
@ -690,17 +720,7 @@ static int __init gic_of_init(struct device_node *node,
|
|||
gic_shared_intrs >>= __ffs(GIC_CONFIG_NUMINTERRUPTS);
|
||||
gic_shared_intrs = (gic_shared_intrs + 1) * 8;
|
||||
|
||||
gic_vpes = gicconfig & GIC_CONFIG_PVPS;
|
||||
gic_vpes >>= __ffs(GIC_CONFIG_PVPS);
|
||||
gic_vpes = gic_vpes + 1;
|
||||
|
||||
if (cpu_has_veic) {
|
||||
/* Set EIC mode for all VPEs */
|
||||
for_each_present_cpu(cpu) {
|
||||
write_gic_vl_other(mips_cm_vp_id(cpu));
|
||||
write_gic_vo_ctl(GIC_VX_CTL_EIC);
|
||||
}
|
||||
|
||||
/* Always use vector 1 in EIC mode */
|
||||
gic_cpu_pin = 0;
|
||||
timer_cpu_pin = gic_cpu_pin;
|
||||
|
@ -756,10 +776,12 @@ static int __init gic_of_init(struct device_node *node,
|
|||
!of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
|
||||
bitmap_set(ipi_resrv, v[0], v[1]);
|
||||
} else {
|
||||
/* Make the last 2 * gic_vpes available for IPIs */
|
||||
bitmap_set(ipi_resrv,
|
||||
gic_shared_intrs - 2 * gic_vpes,
|
||||
2 * gic_vpes);
|
||||
/*
|
||||
* Reserve 2 interrupts per possible CPU/VP for use as IPIs,
|
||||
* meeting the requirements of arch/mips SMP.
|
||||
*/
|
||||
num_ipis = 2 * num_possible_cpus();
|
||||
bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis);
|
||||
}
|
||||
|
||||
bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS);
|
||||
|
@ -773,15 +795,8 @@ static int __init gic_of_init(struct device_node *node,
|
|||
write_gic_rmask(i);
|
||||
}
|
||||
|
||||
for (i = 0; i < gic_vpes; i++) {
|
||||
write_gic_vl_other(mips_cm_vp_id(i));
|
||||
for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
|
||||
if (!gic_local_irq_is_routable(j))
|
||||
continue;
|
||||
write_gic_vo_rmask(BIT(j));
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
return cpuhp_setup_state(CPUHP_AP_IRQ_MIPS_GIC_STARTING,
|
||||
"irqchip/mips/gic:starting",
|
||||
gic_cpu_startup, NULL);
|
||||
}
|
||||
IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);
|
||||
|
|
|
@ -25,10 +25,6 @@
|
|||
|
||||
#include <linux/irqchip/irq-omap-intc.h>
|
||||
|
||||
/* Define these here for now until we drop all board-files */
|
||||
#define OMAP24XX_IC_BASE 0x480fe000
|
||||
#define OMAP34XX_IC_BASE 0x48200000
|
||||
|
||||
/* selected INTC register offsets */
|
||||
|
||||
#define INTC_REVISION 0x0000
|
||||
|
@ -70,8 +66,8 @@ static struct omap_intc_regs intc_context;
|
|||
|
||||
static struct irq_domain *domain;
|
||||
static void __iomem *omap_irq_base;
|
||||
static int omap_nr_pending = 3;
|
||||
static int omap_nr_irqs = 96;
|
||||
static int omap_nr_pending;
|
||||
static int omap_nr_irqs;
|
||||
|
||||
static void intc_writel(u32 reg, u32 val)
|
||||
{
|
||||
|
@ -364,14 +360,6 @@ omap_intc_handle_irq(struct pt_regs *regs)
|
|||
handle_domain_irq(domain, irqnr, regs);
|
||||
}
|
||||
|
||||
void __init omap3_init_irq(void)
|
||||
{
|
||||
omap_nr_irqs = 96;
|
||||
omap_nr_pending = 3;
|
||||
omap_init_irq(OMAP34XX_IC_BASE, NULL);
|
||||
set_handle_irq(omap_intc_handle_irq);
|
||||
}
|
||||
|
||||
static int __init intc_of_init(struct device_node *node,
|
||||
struct device_node *parent)
|
||||
{
|
||||
|
|
|
@ -389,9 +389,8 @@ MODULE_DEVICE_TABLE(of, intc_irqpin_dt_ids);
|
|||
|
||||
static int intc_irqpin_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct intc_irqpin_config *config = NULL;
|
||||
const struct intc_irqpin_config *config;
|
||||
struct device *dev = &pdev->dev;
|
||||
const struct of_device_id *of_id;
|
||||
struct intc_irqpin_priv *p;
|
||||
struct intc_irqpin_iomem *i;
|
||||
struct resource *io[INTC_IRQPIN_REG_NR];
|
||||
|
@ -422,11 +421,9 @@ static int intc_irqpin_probe(struct platform_device *pdev)
|
|||
p->pdev = pdev;
|
||||
platform_set_drvdata(pdev, p);
|
||||
|
||||
of_id = of_match_device(intc_irqpin_dt_ids, dev);
|
||||
if (of_id && of_id->data) {
|
||||
config = of_id->data;
|
||||
config = of_device_get_match_data(dev);
|
||||
if (config)
|
||||
p->needs_clk = config->needs_clk;
|
||||
}
|
||||
|
||||
p->clk = devm_clk_get(dev, NULL);
|
||||
if (IS_ERR(p->clk)) {
|
||||
|
|
|
@ -289,13 +289,14 @@ static int stm32_gpio_domain_translate(struct irq_domain *d,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void stm32_gpio_domain_activate(struct irq_domain *d,
|
||||
struct irq_data *irq_data)
|
||||
static int stm32_gpio_domain_activate(struct irq_domain *d,
|
||||
struct irq_data *irq_data, bool early)
|
||||
{
|
||||
struct stm32_gpio_bank *bank = d->host_data;
|
||||
struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
|
||||
|
||||
regmap_field_write(pctl->irqmux[irq_data->hwirq], bank->bank_nr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int stm32_gpio_domain_alloc(struct irq_domain *d,
|
||||
|
|
|
@ -98,6 +98,7 @@ enum cpuhp_state {
|
|||
CPUHP_AP_IRQ_HIP04_STARTING,
|
||||
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
|
||||
CPUHP_AP_IRQ_BCM2836_STARTING,
|
||||
CPUHP_AP_IRQ_MIPS_GIC_STARTING,
|
||||
CPUHP_AP_ARM_MVEBU_COHERENCY,
|
||||
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
|
||||
CPUHP_AP_PERF_X86_STARTING,
|
||||
|
|
|
@ -1113,6 +1113,28 @@ static inline u32 irq_reg_readl(struct irq_chip_generic *gc,
|
|||
return readl(gc->reg_base + reg_offset);
|
||||
}
|
||||
|
||||
struct irq_matrix;
|
||||
struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits,
|
||||
unsigned int alloc_start,
|
||||
unsigned int alloc_end);
|
||||
void irq_matrix_online(struct irq_matrix *m);
|
||||
void irq_matrix_offline(struct irq_matrix *m);
|
||||
void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace);
|
||||
int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk);
|
||||
void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk);
|
||||
int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu);
|
||||
void irq_matrix_reserve(struct irq_matrix *m);
|
||||
void irq_matrix_remove_reserved(struct irq_matrix *m);
|
||||
int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
|
||||
bool reserved, unsigned int *mapped_cpu);
|
||||
void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
|
||||
unsigned int bit, bool managed);
|
||||
void irq_matrix_assign(struct irq_matrix *m, unsigned int bit);
|
||||
unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown);
|
||||
unsigned int irq_matrix_allocated(struct irq_matrix *m);
|
||||
unsigned int irq_matrix_reserved(struct irq_matrix *m);
|
||||
void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind);
|
||||
|
||||
/* Contrary to Linux irqs, for hardware irqs the irq number 0 is valid */
|
||||
#define INVALID_HWIRQ (~0UL)
|
||||
irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu);
|
||||
|
|
|
@ -68,6 +68,7 @@
|
|||
#define GICD_CTLR_ENABLE_SS_G1 (1U << 1)
|
||||
#define GICD_CTLR_ENABLE_SS_G0 (1U << 0)
|
||||
|
||||
#define GICD_TYPER_RSS (1U << 26)
|
||||
#define GICD_TYPER_LPIS (1U << 17)
|
||||
#define GICD_TYPER_MBIS (1U << 16)
|
||||
|
||||
|
@ -459,6 +460,7 @@
|
|||
#define ICC_CTLR_EL1_SEIS_MASK (0x1 << ICC_CTLR_EL1_SEIS_SHIFT)
|
||||
#define ICC_CTLR_EL1_A3V_SHIFT 15
|
||||
#define ICC_CTLR_EL1_A3V_MASK (0x1 << ICC_CTLR_EL1_A3V_SHIFT)
|
||||
#define ICC_CTLR_EL1_RSS (0x1 << 18)
|
||||
#define ICC_PMR_EL1_SHIFT 0
|
||||
#define ICC_PMR_EL1_MASK (0xff << ICC_PMR_EL1_SHIFT)
|
||||
#define ICC_BPR0_EL1_SHIFT 0
|
||||
|
@ -547,6 +549,8 @@
|
|||
#define ICC_SGI1R_AFFINITY_2_SHIFT 32
|
||||
#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_2_SHIFT)
|
||||
#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40
|
||||
#define ICC_SGI1R_RS_SHIFT 44
|
||||
#define ICC_SGI1R_RS_MASK (0xfULL << ICC_SGI1R_RS_SHIFT)
|
||||
#define ICC_SGI1R_AFFINITY_3_SHIFT 48
|
||||
#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT)
|
||||
|
||||
|
|
|
@ -20,6 +20,12 @@
|
|||
|
||||
struct its_vpe;
|
||||
|
||||
/*
|
||||
* Maximum number of ITTs when GITS_TYPER.VMOVP == 0, using the
|
||||
* ITSList mechanism to perform inter-ITS synchronization.
|
||||
*/
|
||||
#define GICv4_ITS_LIST_MAX 16
|
||||
|
||||
/* Embedded in kvm.arch */
|
||||
struct its_vm {
|
||||
struct fwnode_handle *fwnode;
|
||||
|
@ -30,6 +36,7 @@ struct its_vm {
|
|||
irq_hw_number_t db_lpi_base;
|
||||
unsigned long *db_bitmap;
|
||||
int nr_db_lpis;
|
||||
u32 vlpi_count[GICv4_ITS_LIST_MAX];
|
||||
};
|
||||
|
||||
/* Embedded in kvm_vcpu.arch */
|
||||
|
@ -64,12 +71,14 @@ struct its_vpe {
|
|||
* @vm: Pointer to the GICv4 notion of a VM
|
||||
* @vpe: Pointer to the GICv4 notion of a virtual CPU (VPE)
|
||||
* @vintid: Virtual LPI number
|
||||
* @properties: Priority and enable bits (as written in the prop table)
|
||||
* @db_enabled: Is the VPE doorbell to be generated?
|
||||
*/
|
||||
struct its_vlpi_map {
|
||||
struct its_vm *vm;
|
||||
struct its_vpe *vpe;
|
||||
u32 vintid;
|
||||
u8 properties;
|
||||
bool db_enabled;
|
||||
};
|
||||
|
||||
|
|
|
@ -18,8 +18,6 @@
|
|||
#ifndef __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H
|
||||
#define __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H
|
||||
|
||||
void omap3_init_irq(void);
|
||||
|
||||
int omap_irq_pending(void);
|
||||
void omap_intc_save_context(void);
|
||||
void omap_intc_restore_context(void);
|
||||
|
|
|
@ -93,6 +93,7 @@ struct irq_desc {
|
|||
#endif
|
||||
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
|
||||
struct dentry *debugfs_file;
|
||||
const char *dev_name;
|
||||
#endif
|
||||
#ifdef CONFIG_SPARSE_IRQ
|
||||
struct rcu_head rcu;
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/irqhandler.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/radix-tree.h>
|
||||
|
||||
struct device_node;
|
||||
|
@ -40,6 +41,7 @@ struct of_device_id;
|
|||
struct irq_chip;
|
||||
struct irq_data;
|
||||
struct cpumask;
|
||||
struct seq_file;
|
||||
|
||||
/* Number of irqs reserved for a legacy isa controller */
|
||||
#define NUM_ISA_INTERRUPTS 16
|
||||
|
@ -104,18 +106,21 @@ struct irq_domain_ops {
|
|||
int (*xlate)(struct irq_domain *d, struct device_node *node,
|
||||
const u32 *intspec, unsigned int intsize,
|
||||
unsigned long *out_hwirq, unsigned int *out_type);
|
||||
|
||||
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
|
||||
/* extended V2 interfaces to support hierarchy irq_domains */
|
||||
int (*alloc)(struct irq_domain *d, unsigned int virq,
|
||||
unsigned int nr_irqs, void *arg);
|
||||
void (*free)(struct irq_domain *d, unsigned int virq,
|
||||
unsigned int nr_irqs);
|
||||
void (*activate)(struct irq_domain *d, struct irq_data *irq_data);
|
||||
int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool early);
|
||||
void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data);
|
||||
int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec,
|
||||
unsigned long *out_hwirq, unsigned int *out_type);
|
||||
#endif
|
||||
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
|
||||
void (*debug_show)(struct seq_file *m, struct irq_domain *d,
|
||||
struct irq_data *irqd, int ind);
|
||||
#endif
|
||||
};
|
||||
|
||||
extern struct irq_domain_ops irq_generic_chip_ops;
|
||||
|
@ -133,8 +138,8 @@ struct irq_domain_chip_generic;
|
|||
* @mapcount: The number of mapped interrupts
|
||||
*
|
||||
* Optional elements
|
||||
* @of_node: Pointer to device tree nodes associated with the irq_domain. Used
|
||||
* when decoding device tree interrupt specifiers.
|
||||
* @fwnode: Pointer to firmware node associated with the irq_domain. Pretty easy
|
||||
* to swap it for the of_node via the irq_domain_get_of_node accessor
|
||||
* @gc: Pointer to a list of generic chips. There is a helper function for
|
||||
* setting up one or more generic chips for interrupt controllers
|
||||
* drivers using the generic chip library which uses this pointer.
|
||||
|
@ -172,6 +177,7 @@ struct irq_domain {
|
|||
unsigned int revmap_direct_max_irq;
|
||||
unsigned int revmap_size;
|
||||
struct radix_tree_root revmap_tree;
|
||||
struct mutex revmap_tree_mutex;
|
||||
unsigned int linear_revmap[];
|
||||
};
|
||||
|
||||
|
@ -437,7 +443,7 @@ extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
|
|||
unsigned int nr_irqs, int node, void *arg,
|
||||
bool realloc, const struct cpumask *affinity);
|
||||
extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs);
|
||||
extern void irq_domain_activate_irq(struct irq_data *irq_data);
|
||||
extern int irq_domain_activate_irq(struct irq_data *irq_data, bool early);
|
||||
extern void irq_domain_deactivate_irq(struct irq_data *irq_data);
|
||||
|
||||
static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
|
||||
|
@ -507,8 +513,6 @@ static inline bool irq_domain_is_msi_remap(struct irq_domain *domain)
|
|||
extern bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain);
|
||||
|
||||
#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
|
||||
static inline void irq_domain_activate_irq(struct irq_data *data) { }
|
||||
static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
|
||||
static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
|
||||
unsigned int nr_irqs, int node, void *arg)
|
||||
{
|
||||
|
@ -557,8 +561,6 @@ irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
|
|||
|
||||
#else /* CONFIG_IRQ_DOMAIN */
|
||||
static inline void irq_dispose_mapping(unsigned int virq) { }
|
||||
static inline void irq_domain_activate_irq(struct irq_data *data) { }
|
||||
static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
|
||||
static inline struct irq_domain *irq_find_matching_fwnode(
|
||||
struct fwnode_handle *fwnode, enum irq_domain_bus_token bus_token)
|
||||
{
|
||||
|
|
|
@ -283,6 +283,11 @@ enum {
|
|||
MSI_FLAG_PCI_MSIX = (1 << 3),
|
||||
/* Needs early activate, required for PCI */
|
||||
MSI_FLAG_ACTIVATE_EARLY = (1 << 4),
|
||||
/*
|
||||
* Must reactivate when irq is started even when
|
||||
* MSI_FLAG_ACTIVATE_EARLY has been set.
|
||||
*/
|
||||
MSI_FLAG_MUST_REACTIVATE = (1 << 5),
|
||||
};
|
||||
|
||||
int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
|
||||
|
|
|
@ -0,0 +1,201 @@
|
|||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM irq_matrix
|
||||
|
||||
#if !defined(_TRACE_IRQ_MATRIX_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_IRQ_MATRIX_H
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
struct irq_matrix;
|
||||
struct cpumap;
|
||||
|
||||
DECLARE_EVENT_CLASS(irq_matrix_global,
|
||||
|
||||
TP_PROTO(struct irq_matrix *matrix),
|
||||
|
||||
TP_ARGS(matrix),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned int, online_maps )
|
||||
__field( unsigned int, global_available )
|
||||
__field( unsigned int, global_reserved )
|
||||
__field( unsigned int, total_allocated )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->online_maps = matrix->online_maps;
|
||||
__entry->global_available = matrix->global_available;
|
||||
__entry->global_reserved = matrix->global_reserved;
|
||||
__entry->total_allocated = matrix->total_allocated;
|
||||
),
|
||||
|
||||
TP_printk("online_maps=%d global_avl=%u, global_rsvd=%u, total_alloc=%u",
|
||||
__entry->online_maps, __entry->global_available,
|
||||
__entry->global_reserved, __entry->total_allocated)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(irq_matrix_global_update,
|
||||
|
||||
TP_PROTO(int bit, struct irq_matrix *matrix),
|
||||
|
||||
TP_ARGS(bit, matrix),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( int, bit )
|
||||
__field( unsigned int, online_maps )
|
||||
__field( unsigned int, global_available )
|
||||
__field( unsigned int, global_reserved )
|
||||
__field( unsigned int, total_allocated )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->bit = bit;
|
||||
__entry->online_maps = matrix->online_maps;
|
||||
__entry->global_available = matrix->global_available;
|
||||
__entry->global_reserved = matrix->global_reserved;
|
||||
__entry->total_allocated = matrix->total_allocated;
|
||||
),
|
||||
|
||||
TP_printk("bit=%d online_maps=%d global_avl=%u, global_rsvd=%u, total_alloc=%u",
|
||||
__entry->bit, __entry->online_maps,
|
||||
__entry->global_available, __entry->global_reserved,
|
||||
__entry->total_allocated)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(irq_matrix_cpu,
|
||||
|
||||
TP_PROTO(int bit, unsigned int cpu, struct irq_matrix *matrix,
|
||||
struct cpumap *cmap),
|
||||
|
||||
TP_ARGS(bit, cpu, matrix, cmap),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( int, bit )
|
||||
__field( unsigned int, cpu )
|
||||
__field( bool, online )
|
||||
__field( unsigned int, available )
|
||||
__field( unsigned int, allocated )
|
||||
__field( unsigned int, managed )
|
||||
__field( unsigned int, online_maps )
|
||||
__field( unsigned int, global_available )
|
||||
__field( unsigned int, global_reserved )
|
||||
__field( unsigned int, total_allocated )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->bit = bit;
|
||||
__entry->cpu = cpu;
|
||||
__entry->online = cmap->online;
|
||||
__entry->available = cmap->available;
|
||||
__entry->allocated = cmap->allocated;
|
||||
__entry->managed = cmap->managed;
|
||||
__entry->online_maps = matrix->online_maps;
|
||||
__entry->global_available = matrix->global_available;
|
||||
__entry->global_reserved = matrix->global_reserved;
|
||||
__entry->total_allocated = matrix->total_allocated;
|
||||
),
|
||||
|
||||
TP_printk("bit=%d cpu=%u online=%d avl=%u alloc=%u managed=%u online_maps=%u global_avl=%u, global_rsvd=%u, total_alloc=%u",
|
||||
__entry->bit, __entry->cpu, __entry->online,
|
||||
__entry->available, __entry->allocated,
|
||||
__entry->managed, __entry->online_maps,
|
||||
__entry->global_available, __entry->global_reserved,
|
||||
__entry->total_allocated)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(irq_matrix_global, irq_matrix_online,
|
||||
|
||||
TP_PROTO(struct irq_matrix *matrix),
|
||||
|
||||
TP_ARGS(matrix)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(irq_matrix_global, irq_matrix_offline,
|
||||
|
||||
TP_PROTO(struct irq_matrix *matrix),
|
||||
|
||||
TP_ARGS(matrix)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(irq_matrix_global, irq_matrix_reserve,
|
||||
|
||||
TP_PROTO(struct irq_matrix *matrix),
|
||||
|
||||
TP_ARGS(matrix)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(irq_matrix_global, irq_matrix_remove_reserved,
|
||||
|
||||
TP_PROTO(struct irq_matrix *matrix),
|
||||
|
||||
TP_ARGS(matrix)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(irq_matrix_global_update, irq_matrix_assign_system,
|
||||
|
||||
TP_PROTO(int bit, struct irq_matrix *matrix),
|
||||
|
||||
TP_ARGS(bit, matrix)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(irq_matrix_cpu, irq_matrix_alloc_reserved,
|
||||
|
||||
TP_PROTO(int bit, unsigned int cpu,
|
||||
struct irq_matrix *matrix, struct cpumap *cmap),
|
||||
|
||||
TP_ARGS(bit, cpu, matrix, cmap)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(irq_matrix_cpu, irq_matrix_reserve_managed,
|
||||
|
||||
TP_PROTO(int bit, unsigned int cpu,
|
||||
struct irq_matrix *matrix, struct cpumap *cmap),
|
||||
|
||||
TP_ARGS(bit, cpu, matrix, cmap)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(irq_matrix_cpu, irq_matrix_remove_managed,
|
||||
|
||||
TP_PROTO(int bit, unsigned int cpu,
|
||||
struct irq_matrix *matrix, struct cpumap *cmap),
|
||||
|
||||
TP_ARGS(bit, cpu, matrix, cmap)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(irq_matrix_cpu, irq_matrix_alloc_managed,
|
||||
|
||||
TP_PROTO(int bit, unsigned int cpu,
|
||||
struct irq_matrix *matrix, struct cpumap *cmap),
|
||||
|
||||
TP_ARGS(bit, cpu, matrix, cmap)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(irq_matrix_cpu, irq_matrix_assign,
|
||||
|
||||
TP_PROTO(int bit, unsigned int cpu,
|
||||
struct irq_matrix *matrix, struct cpumap *cmap),
|
||||
|
||||
TP_ARGS(bit, cpu, matrix, cmap)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(irq_matrix_cpu, irq_matrix_alloc,
|
||||
|
||||
TP_PROTO(int bit, unsigned int cpu,
|
||||
struct irq_matrix *matrix, struct cpumap *cmap),
|
||||
|
||||
TP_ARGS(bit, cpu, matrix, cmap)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(irq_matrix_cpu, irq_matrix_free,
|
||||
|
||||
TP_PROTO(int bit, unsigned int cpu,
|
||||
struct irq_matrix *matrix, struct cpumap *cmap),
|
||||
|
||||
TP_ARGS(bit, cpu, matrix, cmap)
|
||||
);
|
||||
|
||||
|
||||
#endif /* _TRACE_IRQ_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
|
@ -97,6 +97,9 @@ config HANDLE_DOMAIN_IRQ
|
|||
config IRQ_TIMINGS
|
||||
bool
|
||||
|
||||
config GENERIC_IRQ_MATRIX_ALLOCATOR
|
||||
bool
|
||||
|
||||
config IRQ_DOMAIN_DEBUG
|
||||
bool "Expose hardware/virtual IRQ mapping via debugfs"
|
||||
depends on IRQ_DOMAIN && DEBUG_FS
|
||||
|
|
|
@ -13,3 +13,4 @@ obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.o
|
|||
obj-$(CONFIG_GENERIC_IRQ_IPI) += ipi.o
|
||||
obj-$(CONFIG_SMP) += affinity.o
|
||||
obj-$(CONFIG_GENERIC_IRQ_DEBUGFS) += debugfs.o
|
||||
obj-$(CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR) += matrix.o
|
||||
|
|
|
@ -53,7 +53,7 @@ unsigned long probe_irq_on(void)
|
|||
if (desc->irq_data.chip->irq_set_type)
|
||||
desc->irq_data.chip->irq_set_type(&desc->irq_data,
|
||||
IRQ_TYPE_PROBE);
|
||||
irq_startup(desc, IRQ_NORESEND, IRQ_START_FORCE);
|
||||
irq_activate_and_startup(desc, IRQ_NORESEND);
|
||||
}
|
||||
raw_spin_unlock_irq(&desc->lock);
|
||||
}
|
||||
|
|
|
@ -207,20 +207,24 @@ __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
|
|||
* Catch code which fiddles with enable_irq() on a managed
|
||||
* and potentially shutdown IRQ. Chained interrupt
|
||||
* installment or irq auto probing should not happen on
|
||||
* managed irqs either. Emit a warning, break the affinity
|
||||
* and start it up as a normal interrupt.
|
||||
* managed irqs either.
|
||||
*/
|
||||
if (WARN_ON_ONCE(force))
|
||||
return IRQ_STARTUP_NORMAL;
|
||||
return IRQ_STARTUP_ABORT;
|
||||
/*
|
||||
* The interrupt was requested, but there is no online CPU
|
||||
* in it's affinity mask. Put it into managed shutdown
|
||||
* state and let the cpu hotplug mechanism start it up once
|
||||
* a CPU in the mask becomes available.
|
||||
*/
|
||||
irqd_set_managed_shutdown(d);
|
||||
return IRQ_STARTUP_ABORT;
|
||||
}
|
||||
/*
|
||||
* Managed interrupts have reserved resources, so this should not
|
||||
* happen.
|
||||
*/
|
||||
if (WARN_ON(irq_domain_activate_irq(d, false)))
|
||||
return IRQ_STARTUP_ABORT;
|
||||
return IRQ_STARTUP_MANAGED;
|
||||
}
|
||||
#else
|
||||
|
@ -236,7 +240,9 @@ static int __irq_startup(struct irq_desc *desc)
|
|||
struct irq_data *d = irq_desc_get_irq_data(desc);
|
||||
int ret = 0;
|
||||
|
||||
irq_domain_activate_irq(d);
|
||||
/* Warn if this interrupt is not activated but try nevertheless */
|
||||
WARN_ON_ONCE(!irqd_is_activated(d));
|
||||
|
||||
if (d->chip->irq_startup) {
|
||||
ret = d->chip->irq_startup(d);
|
||||
irq_state_clr_disabled(desc);
|
||||
|
@ -269,6 +275,7 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force)
|
|||
irq_set_affinity_locked(d, aff, false);
|
||||
break;
|
||||
case IRQ_STARTUP_ABORT:
|
||||
irqd_set_managed_shutdown(d);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
@ -278,6 +285,22 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int irq_activate(struct irq_desc *desc)
|
||||
{
|
||||
struct irq_data *d = irq_desc_get_irq_data(desc);
|
||||
|
||||
if (!irqd_affinity_is_managed(d))
|
||||
return irq_domain_activate_irq(d, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void irq_activate_and_startup(struct irq_desc *desc, bool resend)
|
||||
{
|
||||
if (WARN_ON(irq_activate(desc)))
|
||||
return;
|
||||
irq_startup(desc, resend, IRQ_START_FORCE);
|
||||
}
|
||||
|
||||
static void __irq_disable(struct irq_desc *desc, bool mask);
|
||||
|
||||
void irq_shutdown(struct irq_desc *desc)
|
||||
|
@ -953,7 +976,7 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
|
|||
irq_settings_set_norequest(desc);
|
||||
irq_settings_set_nothread(desc);
|
||||
desc->action = &chained_action;
|
||||
irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
|
||||
irq_activate_and_startup(desc, IRQ_RESEND);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -81,6 +81,8 @@ irq_debug_show_data(struct seq_file *m, struct irq_data *data, int ind)
|
|||
data->domain ? data->domain->name : "");
|
||||
seq_printf(m, "%*shwirq: 0x%lx\n", ind + 1, "", data->hwirq);
|
||||
irq_debug_show_chip(m, data, ind + 1);
|
||||
if (data->domain && data->domain->ops && data->domain->ops->debug_show)
|
||||
data->domain->ops->debug_show(m, NULL, data, ind + 1);
|
||||
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
|
||||
if (!data->parent_data)
|
||||
return;
|
||||
|
@ -149,6 +151,7 @@ static int irq_debug_show(struct seq_file *m, void *p)
|
|||
raw_spin_lock_irq(&desc->lock);
|
||||
data = irq_desc_get_irq_data(desc);
|
||||
seq_printf(m, "handler: %pf\n", desc->handle_irq);
|
||||
seq_printf(m, "device: %s\n", desc->dev_name);
|
||||
seq_printf(m, "status: 0x%08x\n", desc->status_use_accessors);
|
||||
irq_debug_show_bits(m, 0, desc->status_use_accessors, irqdesc_states,
|
||||
ARRAY_SIZE(irqdesc_states));
|
||||
|
@ -226,6 +229,15 @@ static const struct file_operations dfs_irq_ops = {
|
|||
.release = single_release,
|
||||
};
|
||||
|
||||
void irq_debugfs_copy_devname(int irq, struct device *dev)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
const char *name = dev_name(dev);
|
||||
|
||||
if (name)
|
||||
desc->dev_name = kstrdup(name, GFP_KERNEL);
|
||||
}
|
||||
|
||||
void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
char name [10];
|
||||
|
|
|
@ -74,6 +74,8 @@ extern void __enable_irq(struct irq_desc *desc);
|
|||
#define IRQ_START_FORCE true
|
||||
#define IRQ_START_COND false
|
||||
|
||||
extern int irq_activate(struct irq_desc *desc);
|
||||
extern void irq_activate_and_startup(struct irq_desc *desc, bool resend);
|
||||
extern int irq_startup(struct irq_desc *desc, bool resend, bool force);
|
||||
|
||||
extern void irq_shutdown(struct irq_desc *desc);
|
||||
|
@ -436,6 +438,18 @@ static inline bool irq_fixup_move_pending(struct irq_desc *desc, bool fclear)
|
|||
}
|
||||
#endif /* !CONFIG_GENERIC_PENDING_IRQ */
|
||||
|
||||
#if !defined(CONFIG_IRQ_DOMAIN) || !defined(CONFIG_IRQ_DOMAIN_HIERARCHY)
|
||||
static inline int irq_domain_activate_irq(struct irq_data *data, bool early)
|
||||
{
|
||||
irqd_set_activated(data);
|
||||
return 0;
|
||||
}
|
||||
static inline void irq_domain_deactivate_irq(struct irq_data *data)
|
||||
{
|
||||
irqd_clr_activated(data);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
|
||||
#include <linux/debugfs.h>
|
||||
|
||||
|
@ -443,7 +457,9 @@ void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc);
|
|||
static inline void irq_remove_debugfs_entry(struct irq_desc *desc)
|
||||
{
|
||||
debugfs_remove(desc->debugfs_file);
|
||||
kfree(desc->dev_name);
|
||||
}
|
||||
void irq_debugfs_copy_devname(int irq, struct device *dev);
|
||||
# ifdef CONFIG_IRQ_DOMAIN
|
||||
void irq_domain_debugfs_init(struct dentry *root);
|
||||
# else
|
||||
|
@ -458,4 +474,7 @@ static inline void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *d)
|
|||
static inline void irq_remove_debugfs_entry(struct irq_desc *d)
|
||||
{
|
||||
}
|
||||
static inline void irq_debugfs_copy_devname(int irq, struct device *dev)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_GENERIC_IRQ_DEBUGFS */
|
||||
|
|
|
@ -27,7 +27,7 @@ static struct lock_class_key irq_desc_lock_class;
|
|||
#if defined(CONFIG_SMP)
|
||||
static int __init irq_affinity_setup(char *str)
|
||||
{
|
||||
zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
|
||||
alloc_bootmem_cpumask_var(&irq_default_affinity);
|
||||
cpulist_parse(str, irq_default_affinity);
|
||||
/*
|
||||
* Set at least the boot cpu. We don't want to end up with
|
||||
|
@ -40,10 +40,8 @@ __setup("irqaffinity=", irq_affinity_setup);
|
|||
|
||||
static void __init init_irq_default_affinity(void)
|
||||
{
|
||||
#ifdef CONFIG_CPUMASK_OFFSTACK
|
||||
if (!irq_default_affinity)
|
||||
if (!cpumask_available(irq_default_affinity))
|
||||
zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
|
||||
#endif
|
||||
if (cpumask_empty(irq_default_affinity))
|
||||
cpumask_setall(irq_default_affinity);
|
||||
}
|
||||
|
@ -448,7 +446,7 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node,
|
|||
}
|
||||
}
|
||||
|
||||
flags = affinity ? IRQD_AFFINITY_MANAGED : 0;
|
||||
flags = affinity ? IRQD_AFFINITY_MANAGED | IRQD_MANAGED_SHUTDOWN : 0;
|
||||
mask = NULL;
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
|
@ -462,6 +460,7 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node,
|
|||
goto err;
|
||||
irq_insert_desc(start + i, desc);
|
||||
irq_sysfs_add(start + i, desc);
|
||||
irq_add_debugfs_entry(start + i, desc);
|
||||
}
|
||||
bitmap_set(allocated_irqs, start, cnt);
|
||||
return start;
|
||||
|
|
|
@ -21,7 +21,6 @@
|
|||
static LIST_HEAD(irq_domain_list);
|
||||
static DEFINE_MUTEX(irq_domain_mutex);
|
||||
|
||||
static DEFINE_MUTEX(revmap_trees_mutex);
|
||||
static struct irq_domain *irq_default_domain;
|
||||
|
||||
static void irq_domain_check_hierarchy(struct irq_domain *domain);
|
||||
|
@ -211,6 +210,7 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
|
|||
|
||||
/* Fill structure */
|
||||
INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
|
||||
mutex_init(&domain->revmap_tree_mutex);
|
||||
domain->ops = ops;
|
||||
domain->host_data = host_data;
|
||||
domain->hwirq_max = hwirq_max;
|
||||
|
@ -462,9 +462,9 @@ static void irq_domain_clear_mapping(struct irq_domain *domain,
|
|||
if (hwirq < domain->revmap_size) {
|
||||
domain->linear_revmap[hwirq] = 0;
|
||||
} else {
|
||||
mutex_lock(&revmap_trees_mutex);
|
||||
mutex_lock(&domain->revmap_tree_mutex);
|
||||
radix_tree_delete(&domain->revmap_tree, hwirq);
|
||||
mutex_unlock(&revmap_trees_mutex);
|
||||
mutex_unlock(&domain->revmap_tree_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -475,9 +475,9 @@ static void irq_domain_set_mapping(struct irq_domain *domain,
|
|||
if (hwirq < domain->revmap_size) {
|
||||
domain->linear_revmap[hwirq] = irq_data->irq;
|
||||
} else {
|
||||
mutex_lock(&revmap_trees_mutex);
|
||||
mutex_lock(&domain->revmap_tree_mutex);
|
||||
radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
|
||||
mutex_unlock(&revmap_trees_mutex);
|
||||
mutex_unlock(&domain->revmap_tree_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1459,11 +1459,11 @@ static void irq_domain_fix_revmap(struct irq_data *d)
|
|||
return; /* Not using radix tree. */
|
||||
|
||||
/* Fix up the revmap. */
|
||||
mutex_lock(&revmap_trees_mutex);
|
||||
mutex_lock(&d->domain->revmap_tree_mutex);
|
||||
slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq);
|
||||
if (slot)
|
||||
radix_tree_replace_slot(&d->domain->revmap_tree, slot, d);
|
||||
mutex_unlock(&revmap_trees_mutex);
|
||||
mutex_unlock(&d->domain->revmap_tree_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1682,18 +1682,6 @@ void irq_domain_free_irqs_parent(struct irq_domain *domain,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
|
||||
|
||||
static void __irq_domain_activate_irq(struct irq_data *irq_data)
|
||||
{
|
||||
if (irq_data && irq_data->domain) {
|
||||
struct irq_domain *domain = irq_data->domain;
|
||||
|
||||
if (irq_data->parent_data)
|
||||
__irq_domain_activate_irq(irq_data->parent_data);
|
||||
if (domain->ops->activate)
|
||||
domain->ops->activate(domain, irq_data);
|
||||
}
|
||||
}
|
||||
|
||||
static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
|
||||
{
|
||||
if (irq_data && irq_data->domain) {
|
||||
|
@ -1706,6 +1694,26 @@ static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
|
|||
}
|
||||
}
|
||||
|
||||
static int __irq_domain_activate_irq(struct irq_data *irqd, bool early)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (irqd && irqd->domain) {
|
||||
struct irq_domain *domain = irqd->domain;
|
||||
|
||||
if (irqd->parent_data)
|
||||
ret = __irq_domain_activate_irq(irqd->parent_data,
|
||||
early);
|
||||
if (!ret && domain->ops->activate) {
|
||||
ret = domain->ops->activate(domain, irqd, early);
|
||||
/* Rollback in case of error */
|
||||
if (ret && irqd->parent_data)
|
||||
__irq_domain_deactivate_irq(irqd->parent_data);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_domain_activate_irq - Call domain_ops->activate recursively to activate
|
||||
* interrupt
|
||||
|
@ -1714,12 +1722,15 @@ static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
|
|||
* This is the second step to call domain_ops->activate to program interrupt
|
||||
* controllers, so the interrupt could actually get delivered.
|
||||
*/
|
||||
void irq_domain_activate_irq(struct irq_data *irq_data)
|
||||
int irq_domain_activate_irq(struct irq_data *irq_data, bool early)
|
||||
{
|
||||
if (!irqd_is_activated(irq_data)) {
|
||||
__irq_domain_activate_irq(irq_data);
|
||||
int ret = 0;
|
||||
|
||||
if (!irqd_is_activated(irq_data))
|
||||
ret = __irq_domain_activate_irq(irq_data, early);
|
||||
if (!ret)
|
||||
irqd_set_activated(irq_data);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1810,6 +1821,8 @@ irq_domain_debug_show_one(struct seq_file *m, struct irq_domain *d, int ind)
|
|||
d->revmap_size + d->revmap_direct_max_irq);
|
||||
seq_printf(m, "%*smapped: %u\n", ind + 1, "", d->mapcount);
|
||||
seq_printf(m, "%*sflags: 0x%08x\n", ind +1 , "", d->flags);
|
||||
if (d->ops && d->ops->debug_show)
|
||||
d->ops->debug_show(m, d, NULL, ind + 1);
|
||||
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
|
||||
if (!d->parent)
|
||||
return;
|
||||
|
|
|
@ -381,7 +381,8 @@ int irq_select_affinity_usr(unsigned int irq)
|
|||
/**
|
||||
* irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
|
||||
* @irq: interrupt number to set affinity
|
||||
* @vcpu_info: vCPU specific data
|
||||
* @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
|
||||
* specific data for percpu_devid interrupts
|
||||
*
|
||||
* This function uses the vCPU specific data to set the vCPU
|
||||
* affinity for an irq. The vCPU specific data is passed from
|
||||
|
@ -519,7 +520,7 @@ void __enable_irq(struct irq_desc *desc)
|
|||
* time. If it was already started up, then irq_startup()
|
||||
* will invoke irq_enable() under the hood.
|
||||
*/
|
||||
irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
|
||||
irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
|
@ -1325,6 +1326,21 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* Activate the interrupt. That activation must happen
|
||||
* independently of IRQ_NOAUTOEN. request_irq() can fail
|
||||
* and the callers are supposed to handle
|
||||
* that. enable_irq() of an interrupt requested with
|
||||
* IRQ_NOAUTOEN is not supposed to fail. The activation
|
||||
* keeps it in shutdown mode, it merily associates
|
||||
* resources if necessary and if that's not possible it
|
||||
* fails. Interrupts which are in managed shutdown mode
|
||||
* will simply ignore that activation request.
|
||||
*/
|
||||
ret = irq_activate(desc);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
|
||||
IRQS_ONESHOT | IRQS_WAITING);
|
||||
irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
|
||||
|
@ -1400,7 +1416,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
|||
wake_up_process(new->secondary->thread);
|
||||
|
||||
register_irq_proc(irq, desc);
|
||||
irq_add_debugfs_entry(irq, desc);
|
||||
new->dir = NULL;
|
||||
register_handler_proc(irq, new);
|
||||
return 0;
|
||||
|
|
|
@ -0,0 +1,443 @@
|
|||
/*
|
||||
* Copyright (C) 2017 Thomas Gleixner <tglx@linutronix.de>
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-2.0
|
||||
*/
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/irq.h>
|
||||
|
||||
#define IRQ_MATRIX_SIZE (BITS_TO_LONGS(IRQ_MATRIX_BITS) * sizeof(unsigned long))
|
||||
|
||||
struct cpumap {
|
||||
unsigned int available;
|
||||
unsigned int allocated;
|
||||
unsigned int managed;
|
||||
bool online;
|
||||
unsigned long alloc_map[IRQ_MATRIX_SIZE];
|
||||
unsigned long managed_map[IRQ_MATRIX_SIZE];
|
||||
};
|
||||
|
||||
struct irq_matrix {
|
||||
unsigned int matrix_bits;
|
||||
unsigned int alloc_start;
|
||||
unsigned int alloc_end;
|
||||
unsigned int alloc_size;
|
||||
unsigned int global_available;
|
||||
unsigned int global_reserved;
|
||||
unsigned int systembits_inalloc;
|
||||
unsigned int total_allocated;
|
||||
unsigned int online_maps;
|
||||
struct cpumap __percpu *maps;
|
||||
unsigned long scratch_map[IRQ_MATRIX_SIZE];
|
||||
unsigned long system_map[IRQ_MATRIX_SIZE];
|
||||
};
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/irq_matrix.h>
|
||||
|
||||
/**
|
||||
* irq_alloc_matrix - Allocate a irq_matrix structure and initialize it
|
||||
* @matrix_bits: Number of matrix bits must be <= IRQ_MATRIX_BITS
|
||||
* @alloc_start: From which bit the allocation search starts
|
||||
* @alloc_end: At which bit the allocation search ends, i.e first
|
||||
* invalid bit
|
||||
*/
|
||||
__init struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits,
|
||||
unsigned int alloc_start,
|
||||
unsigned int alloc_end)
|
||||
{
|
||||
struct irq_matrix *m;
|
||||
|
||||
if (matrix_bits > IRQ_MATRIX_BITS)
|
||||
return NULL;
|
||||
|
||||
m = kzalloc(sizeof(*m), GFP_KERNEL);
|
||||
if (!m)
|
||||
return NULL;
|
||||
|
||||
m->matrix_bits = matrix_bits;
|
||||
m->alloc_start = alloc_start;
|
||||
m->alloc_end = alloc_end;
|
||||
m->alloc_size = alloc_end - alloc_start;
|
||||
m->maps = alloc_percpu(*m->maps);
|
||||
if (!m->maps) {
|
||||
kfree(m);
|
||||
return NULL;
|
||||
}
|
||||
return m;
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_matrix_online - Bring the local CPU matrix online
|
||||
* @m: Matrix pointer
|
||||
*/
|
||||
void irq_matrix_online(struct irq_matrix *m)
|
||||
{
|
||||
struct cpumap *cm = this_cpu_ptr(m->maps);
|
||||
|
||||
BUG_ON(cm->online);
|
||||
|
||||
bitmap_zero(cm->alloc_map, m->matrix_bits);
|
||||
cm->available = m->alloc_size - (cm->managed + m->systembits_inalloc);
|
||||
cm->allocated = 0;
|
||||
m->global_available += cm->available;
|
||||
cm->online = true;
|
||||
m->online_maps++;
|
||||
trace_irq_matrix_online(m);
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_matrix_offline - Bring the local CPU matrix offline
|
||||
* @m: Matrix pointer
|
||||
*/
|
||||
void irq_matrix_offline(struct irq_matrix *m)
|
||||
{
|
||||
struct cpumap *cm = this_cpu_ptr(m->maps);
|
||||
|
||||
/* Update the global available size */
|
||||
m->global_available -= cm->available;
|
||||
cm->online = false;
|
||||
m->online_maps--;
|
||||
trace_irq_matrix_offline(m);
|
||||
}
|
||||
|
||||
static unsigned int matrix_alloc_area(struct irq_matrix *m, struct cpumap *cm,
|
||||
unsigned int num, bool managed)
|
||||
{
|
||||
unsigned int area, start = m->alloc_start;
|
||||
unsigned int end = m->alloc_end;
|
||||
|
||||
bitmap_or(m->scratch_map, cm->managed_map, m->system_map, end);
|
||||
bitmap_or(m->scratch_map, m->scratch_map, cm->alloc_map, end);
|
||||
area = bitmap_find_next_zero_area(m->scratch_map, end, start, num, 0);
|
||||
if (area >= end)
|
||||
return area;
|
||||
if (managed)
|
||||
bitmap_set(cm->managed_map, area, num);
|
||||
else
|
||||
bitmap_set(cm->alloc_map, area, num);
|
||||
return area;
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_matrix_assign_system - Assign system wide entry in the matrix
|
||||
* @m: Matrix pointer
|
||||
* @bit: Which bit to reserve
|
||||
* @replace: Replace an already allocated vector with a system
|
||||
* vector at the same bit position.
|
||||
*
|
||||
* The BUG_ON()s below are on purpose. If this goes wrong in the
|
||||
* early boot process, then the chance to survive is about zero.
|
||||
* If this happens when the system is life, it's not much better.
|
||||
*/
|
||||
void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit,
|
||||
bool replace)
|
||||
{
|
||||
struct cpumap *cm = this_cpu_ptr(m->maps);
|
||||
|
||||
BUG_ON(bit > m->matrix_bits);
|
||||
BUG_ON(m->online_maps > 1 || (m->online_maps && !replace));
|
||||
|
||||
set_bit(bit, m->system_map);
|
||||
if (replace) {
|
||||
BUG_ON(!test_and_clear_bit(bit, cm->alloc_map));
|
||||
cm->allocated--;
|
||||
m->total_allocated--;
|
||||
}
|
||||
if (bit >= m->alloc_start && bit < m->alloc_end)
|
||||
m->systembits_inalloc++;
|
||||
|
||||
trace_irq_matrix_assign_system(bit, m);
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_matrix_reserve_managed - Reserve a managed interrupt in a CPU map
|
||||
* @m: Matrix pointer
|
||||
* @msk: On which CPUs the bits should be reserved.
|
||||
*
|
||||
* Can be called for offline CPUs. Note, this will only reserve one bit
|
||||
* on all CPUs in @msk, but it's not guaranteed that the bits are at the
|
||||
* same offset on all CPUs
|
||||
*/
|
||||
int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk)
|
||||
{
|
||||
unsigned int cpu, failed_cpu;
|
||||
|
||||
for_each_cpu(cpu, msk) {
|
||||
struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
|
||||
unsigned int bit;
|
||||
|
||||
bit = matrix_alloc_area(m, cm, 1, true);
|
||||
if (bit >= m->alloc_end)
|
||||
goto cleanup;
|
||||
cm->managed++;
|
||||
if (cm->online) {
|
||||
cm->available--;
|
||||
m->global_available--;
|
||||
}
|
||||
trace_irq_matrix_reserve_managed(bit, cpu, m, cm);
|
||||
}
|
||||
return 0;
|
||||
cleanup:
|
||||
failed_cpu = cpu;
|
||||
for_each_cpu(cpu, msk) {
|
||||
if (cpu == failed_cpu)
|
||||
break;
|
||||
irq_matrix_remove_managed(m, cpumask_of(cpu));
|
||||
}
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_matrix_remove_managed - Remove managed interrupts in a CPU map
|
||||
* @m: Matrix pointer
|
||||
* @msk: On which CPUs the bits should be removed
|
||||
*
|
||||
* Can be called for offline CPUs
|
||||
*
|
||||
* This removes not allocated managed interrupts from the map. It does
|
||||
* not matter which one because the managed interrupts free their
|
||||
* allocation when they shut down. If not, the accounting is screwed,
|
||||
* but all what can be done at this point is warn about it.
|
||||
*/
|
||||
void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
for_each_cpu(cpu, msk) {
|
||||
struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
|
||||
unsigned int bit, end = m->alloc_end;
|
||||
|
||||
if (WARN_ON_ONCE(!cm->managed))
|
||||
continue;
|
||||
|
||||
/* Get managed bit which are not allocated */
|
||||
bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
|
||||
|
||||
bit = find_first_bit(m->scratch_map, end);
|
||||
if (WARN_ON_ONCE(bit >= end))
|
||||
continue;
|
||||
|
||||
clear_bit(bit, cm->managed_map);
|
||||
|
||||
cm->managed--;
|
||||
if (cm->online) {
|
||||
cm->available++;
|
||||
m->global_available++;
|
||||
}
|
||||
trace_irq_matrix_remove_managed(bit, cpu, m, cm);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_matrix_alloc_managed - Allocate a managed interrupt in a CPU map
|
||||
* @m: Matrix pointer
|
||||
* @cpu: On which CPU the interrupt should be allocated
|
||||
*/
|
||||
int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu)
|
||||
{
|
||||
struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
|
||||
unsigned int bit, end = m->alloc_end;
|
||||
|
||||
/* Get managed bit which are not allocated */
|
||||
bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
|
||||
bit = find_first_bit(m->scratch_map, end);
|
||||
if (bit >= end)
|
||||
return -ENOSPC;
|
||||
set_bit(bit, cm->alloc_map);
|
||||
cm->allocated++;
|
||||
m->total_allocated++;
|
||||
trace_irq_matrix_alloc_managed(bit, cpu, m, cm);
|
||||
return bit;
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_matrix_assign - Assign a preallocated interrupt in the local CPU map
|
||||
* @m: Matrix pointer
|
||||
* @bit: Which bit to mark
|
||||
*
|
||||
* This should only be used to mark preallocated vectors
|
||||
*/
|
||||
void irq_matrix_assign(struct irq_matrix *m, unsigned int bit)
|
||||
{
|
||||
struct cpumap *cm = this_cpu_ptr(m->maps);
|
||||
|
||||
if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
|
||||
return;
|
||||
if (WARN_ON_ONCE(test_and_set_bit(bit, cm->alloc_map)))
|
||||
return;
|
||||
cm->allocated++;
|
||||
m->total_allocated++;
|
||||
cm->available--;
|
||||
m->global_available--;
|
||||
trace_irq_matrix_assign(bit, smp_processor_id(), m, cm);
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_matrix_reserve - Reserve interrupts
|
||||
* @m: Matrix pointer
|
||||
*
|
||||
* This is merily a book keeping call. It increments the number of globally
|
||||
* reserved interrupt bits w/o actually allocating them. This allows to
|
||||
* setup interrupt descriptors w/o assigning low level resources to it.
|
||||
* The actual allocation happens when the interrupt gets activated.
|
||||
*/
|
||||
void irq_matrix_reserve(struct irq_matrix *m)
|
||||
{
|
||||
if (m->global_reserved <= m->global_available &&
|
||||
m->global_reserved + 1 > m->global_available)
|
||||
pr_warn("Interrupt reservation exceeds available resources\n");
|
||||
|
||||
m->global_reserved++;
|
||||
trace_irq_matrix_reserve(m);
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_matrix_remove_reserved - Remove interrupt reservation
|
||||
* @m: Matrix pointer
|
||||
*
|
||||
* This is merily a book keeping call. It decrements the number of globally
|
||||
* reserved interrupt bits. This is used to undo irq_matrix_reserve() when the
|
||||
* interrupt was never in use and a real vector allocated, which undid the
|
||||
* reservation.
|
||||
*/
|
||||
void irq_matrix_remove_reserved(struct irq_matrix *m)
|
||||
{
|
||||
m->global_reserved--;
|
||||
trace_irq_matrix_remove_reserved(m);
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_matrix_alloc - Allocate a regular interrupt in a CPU map
|
||||
* @m: Matrix pointer
|
||||
* @msk: Which CPUs to search in
|
||||
* @reserved: Allocate previously reserved interrupts
|
||||
* @mapped_cpu: Pointer to store the CPU for which the irq was allocated
|
||||
*/
|
||||
int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
|
||||
bool reserved, unsigned int *mapped_cpu)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
for_each_cpu(cpu, msk) {
|
||||
struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
|
||||
unsigned int bit;
|
||||
|
||||
if (!cm->online)
|
||||
continue;
|
||||
|
||||
bit = matrix_alloc_area(m, cm, 1, false);
|
||||
if (bit < m->alloc_end) {
|
||||
cm->allocated++;
|
||||
cm->available--;
|
||||
m->total_allocated++;
|
||||
m->global_available--;
|
||||
if (reserved)
|
||||
m->global_reserved--;
|
||||
*mapped_cpu = cpu;
|
||||
trace_irq_matrix_alloc(bit, cpu, m, cm);
|
||||
return bit;
|
||||
}
|
||||
}
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_matrix_free - Free allocated interrupt in the matrix
|
||||
* @m: Matrix pointer
|
||||
* @cpu: Which CPU map needs be updated
|
||||
* @bit: The bit to remove
|
||||
* @managed: If true, the interrupt is managed and not accounted
|
||||
* as available.
|
||||
*/
|
||||
void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
|
||||
unsigned int bit, bool managed)
|
||||
{
|
||||
struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
|
||||
|
||||
if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
|
||||
return;
|
||||
|
||||
if (cm->online) {
|
||||
clear_bit(bit, cm->alloc_map);
|
||||
cm->allocated--;
|
||||
m->total_allocated--;
|
||||
if (!managed) {
|
||||
cm->available++;
|
||||
m->global_available++;
|
||||
}
|
||||
}
|
||||
trace_irq_matrix_free(bit, cpu, m, cm);
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_matrix_available - Get the number of globally available irqs
|
||||
* @m: Pointer to the matrix to query
|
||||
* @cpudown: If true, the local CPU is about to go down, adjust
|
||||
* the number of available irqs accordingly
|
||||
*/
|
||||
unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown)
|
||||
{
|
||||
struct cpumap *cm = this_cpu_ptr(m->maps);
|
||||
|
||||
return m->global_available - cpudown ? cm->available : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_matrix_reserved - Get the number of globally reserved irqs
|
||||
* @m: Pointer to the matrix to query
|
||||
*/
|
||||
unsigned int irq_matrix_reserved(struct irq_matrix *m)
|
||||
{
|
||||
return m->global_reserved;
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_matrix_allocated - Get the number of allocated irqs on the local cpu
|
||||
* @m: Pointer to the matrix to search
|
||||
*
|
||||
* This returns number of allocated irqs
|
||||
*/
|
||||
unsigned int irq_matrix_allocated(struct irq_matrix *m)
|
||||
{
|
||||
struct cpumap *cm = this_cpu_ptr(m->maps);
|
||||
|
||||
return cm->allocated;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
|
||||
/**
|
||||
* irq_matrix_debug_show - Show detailed allocation information
|
||||
* @sf: Pointer to the seq_file to print to
|
||||
* @m: Pointer to the matrix allocator
|
||||
* @ind: Indentation for the print format
|
||||
*
|
||||
* Note, this is a lockless snapshot.
|
||||
*/
|
||||
void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind)
|
||||
{
|
||||
unsigned int nsys = bitmap_weight(m->system_map, m->matrix_bits);
|
||||
int cpu;
|
||||
|
||||
seq_printf(sf, "Online bitmaps: %6u\n", m->online_maps);
|
||||
seq_printf(sf, "Global available: %6u\n", m->global_available);
|
||||
seq_printf(sf, "Global reserved: %6u\n", m->global_reserved);
|
||||
seq_printf(sf, "Total allocated: %6u\n", m->total_allocated);
|
||||
seq_printf(sf, "System: %u: %*pbl\n", nsys, m->matrix_bits,
|
||||
m->system_map);
|
||||
seq_printf(sf, "%*s| CPU | avl | man | act | vectors\n", ind, " ");
|
||||
cpus_read_lock();
|
||||
for_each_online_cpu(cpu) {
|
||||
struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
|
||||
|
||||
seq_printf(sf, "%*s %4d %4u %4u %4u %*pbl\n", ind, " ",
|
||||
cpu, cm->available, cm->managed, cm->allocated,
|
||||
m->matrix_bits, cm->alloc_map);
|
||||
}
|
||||
cpus_read_unlock();
|
||||
}
|
||||
#endif
|
|
@ -16,6 +16,8 @@
|
|||
#include <linux/msi.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "internals.h"
|
||||
|
||||
/**
|
||||
* alloc_msi_entry - Allocate an initialize msi_entry
|
||||
* @dev: Pointer to the device for which this is allocated
|
||||
|
@ -100,13 +102,14 @@ int msi_domain_set_affinity(struct irq_data *irq_data,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void msi_domain_activate(struct irq_domain *domain,
|
||||
struct irq_data *irq_data)
|
||||
static int msi_domain_activate(struct irq_domain *domain,
|
||||
struct irq_data *irq_data, bool early)
|
||||
{
|
||||
struct msi_msg msg;
|
||||
|
||||
BUG_ON(irq_chip_compose_msi_msg(irq_data, &msg));
|
||||
irq_chip_write_msi_msg(irq_data, &msg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void msi_domain_deactivate(struct irq_domain *domain,
|
||||
|
@ -373,8 +376,10 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
|
|||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < desc->nvec_used; i++)
|
||||
for (i = 0; i < desc->nvec_used; i++) {
|
||||
irq_set_msi_desc_off(virq, i, desc);
|
||||
irq_debugfs_copy_devname(virq + i, dev);
|
||||
}
|
||||
}
|
||||
|
||||
if (ops->msi_finish)
|
||||
|
@ -396,11 +401,28 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
|
|||
struct irq_data *irq_data;
|
||||
|
||||
irq_data = irq_domain_get_irq_data(domain, desc->irq);
|
||||
irq_domain_activate_irq(irq_data);
|
||||
ret = irq_domain_activate_irq(irq_data, true);
|
||||
if (ret)
|
||||
goto cleanup;
|
||||
if (info->flags & MSI_FLAG_MUST_REACTIVATE)
|
||||
irqd_clr_activated(irq_data);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
for_each_msi_entry(desc, dev) {
|
||||
struct irq_data *irqd;
|
||||
|
||||
if (desc->irq == virq)
|
||||
break;
|
||||
|
||||
irqd = irq_domain_get_irq_data(domain, desc->irq);
|
||||
if (irqd_is_activated(irqd))
|
||||
irq_domain_deactivate_irq(irqd);
|
||||
}
|
||||
msi_domain_free_irqs(domain, dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -138,11 +138,7 @@ static void irq_work_run_list(struct llist_head *list)
|
|||
return;
|
||||
|
||||
llnode = llist_del_all(list);
|
||||
while (llnode != NULL) {
|
||||
work = llist_entry(llnode, struct irq_work, llnode);
|
||||
|
||||
llnode = llist_next(llnode);
|
||||
|
||||
llist_for_each_entry(work, llnode, llnode) {
|
||||
/*
|
||||
* Clear the PENDING bit, after this point the @work
|
||||
* can be re-used.
|
||||
|
|
Loading…
Reference in New Issue