mirror of https://gitee.com/openkylin/linux.git
Merge branches 'pci/host-generic', 'pci/host-imx6', 'pci/host-iproc' and 'pci/host-rcar' into next
* pci/host-generic: PCI: generic,versatile: Remove unused pci_sys_data structures * pci/host-imx6: PCI: imx6: Add support for active-low reset GPIO PCI: imx6: Use gpio_set_value_cansleep() * pci/host-iproc: PCI: iproc: Add iProc PCIe MSI support PCI: iproc: Add iProc PCIe MSI device tree binding PCI: iproc: Add PAXC interface support PCI: iproc: Update iProc PCIe device tree binding PCI: iproc: Do not use 0x in front of %pap PCI: iproc: Hide CONFIG_PCIE_IPROC * pci/host-rcar: PCI: rcar: Add gen2 fallback compatibility string for pcie-rcar PCI: rcar: Add gen2 fallback compatibility string for pci-rcar-gen2 PCI: rcar: Add support for R-Car H3 to pcie-rcar Revert "PCI: rcar: Build pcie-rcar.c only on ARM" PCI: rcar: Convert to DT resource parsing API PCI: rcar: Allow DT to override default window settings
This commit is contained in:
commit
65d5b109d1
|
@ -1,7 +1,10 @@
|
|||
* Broadcom iProc PCIe controller with the platform bus interface
|
||||
|
||||
Required properties:
|
||||
- compatible: Must be "brcm,iproc-pcie"
|
||||
- compatible: Must be "brcm,iproc-pcie" for PAXB, or "brcm,iproc-pcie-paxc"
|
||||
for PAXC. PAXB-based root complex is used for external endpoint devices.
|
||||
PAXC-based root complex is connected to emulated endpoint devices
|
||||
internal to the ASIC
|
||||
- reg: base address and length of the PCIe controller I/O register space
|
||||
- #interrupt-cells: set to <1>
|
||||
- interrupt-map-mask and interrupt-map, standard PCI properties to define the
|
||||
|
@ -32,6 +35,28 @@ Optional:
|
|||
- brcm,pcie-ob-oarr-size: Some iProc SoCs need the OARR size bit to be set to
|
||||
increase the outbound window size
|
||||
|
||||
MSI support (optional):
|
||||
|
||||
For older platforms without MSI integrated in the GIC, iProc PCIe core provides
|
||||
an event queue based MSI support. The iProc MSI uses host memories to store
|
||||
MSI posted writes in the event queues
|
||||
|
||||
- msi-parent: Link to the device node of the MSI controller. On newer iProc
|
||||
platforms, the MSI controller may be gicv2m or gicv3-its. On older iProc
|
||||
platforms without MSI support in its interrupt controller, one may use the
|
||||
event queue based MSI support integrated within the iProc PCIe core.
|
||||
|
||||
When the iProc event queue based MSI is used, one needs to define the
|
||||
following properties in the MSI device node:
|
||||
- compatible: Must be "brcm,iproc-msi"
|
||||
- msi-controller: claims itself as an MSI controller
|
||||
- interrupt-parent: Link to its parent interrupt device
|
||||
- interrupts: List of interrupt IDs from its parent interrupt device
|
||||
|
||||
Optional properties:
|
||||
- brcm,pcie-msi-inten: Needs to be present for some older iProc platforms that
|
||||
require the interrupt enable registers to be set explicitly to enable MSI
|
||||
|
||||
Example:
|
||||
pcie0: pcie@18012000 {
|
||||
compatible = "brcm,iproc-pcie";
|
||||
|
@ -58,6 +83,19 @@ Example:
|
|||
brcm,pcie-ob-oarr-size;
|
||||
brcm,pcie-ob-axi-offset = <0x00000000>;
|
||||
brcm,pcie-ob-window-size = <256>;
|
||||
|
||||
msi-parent = <&msi0>;
|
||||
|
||||
/* iProc event queue based MSI */
|
||||
msi0: msi@18012000 {
|
||||
compatible = "brcm,iproc-msi";
|
||||
msi-controller;
|
||||
interrupt-parent = <&gic>;
|
||||
interrupts = <GIC_SPI 96 IRQ_TYPE_NONE>,
|
||||
<GIC_SPI 97 IRQ_TYPE_NONE>,
|
||||
<GIC_SPI 98 IRQ_TYPE_NONE>,
|
||||
<GIC_SPI 99 IRQ_TYPE_NONE>,
|
||||
};
|
||||
};
|
||||
|
||||
pcie1: pcie@18013000 {
|
||||
|
|
|
@ -8,7 +8,14 @@ OHCI and EHCI controllers.
|
|||
Required properties:
|
||||
- compatible: "renesas,pci-r8a7790" for the R8A7790 SoC;
|
||||
"renesas,pci-r8a7791" for the R8A7791 SoC;
|
||||
"renesas,pci-r8a7794" for the R8A7794 SoC.
|
||||
"renesas,pci-r8a7794" for the R8A7794 SoC;
|
||||
"renesas,pci-rcar-gen2" for a generic R-Car Gen2 compatible device
|
||||
|
||||
|
||||
When compatible with the generic version, nodes must list the
|
||||
SoC-specific version corresponding to the platform first
|
||||
followed by the generic version.
|
||||
|
||||
- reg: A list of physical regions to access the device: the first is
|
||||
the operational registers for the OHCI/EHCI controllers and the
|
||||
second is for the bridge configuration and control registers.
|
||||
|
@ -24,10 +31,15 @@ Required properties:
|
|||
- interrupt-map-mask: standard property that helps to define the interrupt
|
||||
mapping.
|
||||
|
||||
Optional properties:
|
||||
- dma-ranges: a single range for the inbound memory region. If not supplied,
|
||||
defaults to 1GiB at 0x40000000. Note there are hardware restrictions on the
|
||||
allowed combinations of address and size.
|
||||
|
||||
Example SoC configuration:
|
||||
|
||||
pci0: pci@ee090000 {
|
||||
compatible = "renesas,pci-r8a7790";
|
||||
compatible = "renesas,pci-r8a7790", "renesas,pci-rcar-gen2";
|
||||
clocks = <&mstp7_clks R8A7790_CLK_EHCI>;
|
||||
reg = <0x0 0xee090000 0x0 0xc00>,
|
||||
<0x0 0xee080000 0x0 0x1100>;
|
||||
|
@ -38,6 +50,7 @@ Example SoC configuration:
|
|||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
#interrupt-cells = <1>;
|
||||
dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x40000000>;
|
||||
interrupt-map-mask = <0xff00 0 0 0x7>;
|
||||
interrupt-map = <0x0000 0 0 1 &gic 0 108 IRQ_TYPE_LEVEL_HIGH
|
||||
0x0800 0 0 1 &gic 0 108 IRQ_TYPE_LEVEL_HIGH
|
||||
|
|
|
@ -1,8 +1,16 @@
|
|||
* Renesas RCar PCIe interface
|
||||
|
||||
Required properties:
|
||||
- compatible: should contain one of the following
|
||||
"renesas,pcie-r8a7779", "renesas,pcie-r8a7790", "renesas,pcie-r8a7791"
|
||||
compatible: "renesas,pcie-r8a7779" for the R8A7779 SoC;
|
||||
"renesas,pcie-r8a7790" for the R8A7790 SoC;
|
||||
"renesas,pcie-r8a7791" for the R8A7791 SoC;
|
||||
"renesas,pcie-r8a7795" for the R8A7795 SoC;
|
||||
"renesas,pcie-rcar-gen2" for a generic R-Car Gen2 compatible device.
|
||||
|
||||
When compatible with the generic version, nodes must list the
|
||||
SoC-specific version corresponding to the platform first
|
||||
followed by the generic version.
|
||||
|
||||
- reg: base address and length of the pcie controller registers.
|
||||
- #address-cells: set to <3>
|
||||
- #size-cells: set to <2>
|
||||
|
@ -25,7 +33,7 @@ Example:
|
|||
SoC specific DT Entry:
|
||||
|
||||
pcie: pcie@fe000000 {
|
||||
compatible = "renesas,pcie-r8a7791";
|
||||
compatible = "renesas,pcie-r8a7791", "renesas,pcie-rcar-gen2";
|
||||
reg = <0 0xfe000000 0 0x80000>;
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
|
|
|
@ -48,8 +48,7 @@ config PCI_RCAR_GEN2
|
|||
|
||||
config PCI_RCAR_GEN2_PCIE
|
||||
bool "Renesas R-Car PCIe controller"
|
||||
depends on ARM
|
||||
depends on ARCH_SHMOBILE || COMPILE_TEST
|
||||
depends on ARCH_SHMOBILE || (ARM && COMPILE_TEST)
|
||||
help
|
||||
Say Y here if you want PCIe controller support on R-Car Gen2 SoCs.
|
||||
|
||||
|
@ -118,13 +117,11 @@ config PCI_VERSATILE
|
|||
depends on ARCH_VERSATILE
|
||||
|
||||
config PCIE_IPROC
|
||||
tristate "Broadcom iProc PCIe controller"
|
||||
depends on OF && (ARM || ARM64)
|
||||
default n
|
||||
tristate
|
||||
help
|
||||
This enables the iProc PCIe core controller support for Broadcom's
|
||||
iProc family of SoCs. An appropriate bus interface driver also needs
|
||||
to be enabled
|
||||
iProc family of SoCs. An appropriate bus interface driver needs
|
||||
to be enabled to select this.
|
||||
|
||||
config PCIE_IPROC_PLATFORM
|
||||
tristate "Broadcom iProc PCIe platform bus driver"
|
||||
|
@ -147,6 +144,16 @@ config PCIE_IPROC_BCMA
|
|||
Say Y here if you want to use the Broadcom iProc PCIe controller
|
||||
through the BCMA bus interface
|
||||
|
||||
config PCIE_IPROC_MSI
|
||||
bool "Broadcom iProc PCIe MSI support"
|
||||
depends on PCIE_IPROC_PLATFORM || PCIE_IPROC_BCMA
|
||||
depends on PCI_MSI
|
||||
select PCI_MSI_IRQ_DOMAIN
|
||||
default ARCH_BCM_IPROC
|
||||
help
|
||||
Say Y here if you want to enable MSI support for Broadcom's iProc
|
||||
PCIe controller
|
||||
|
||||
config PCIE_ALTERA
|
||||
bool "Altera PCIe controller"
|
||||
depends on ARM || NIOS2
|
||||
|
|
|
@ -15,6 +15,7 @@ obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o
|
|||
obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
|
||||
obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
|
||||
obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o
|
||||
obj-$(CONFIG_PCIE_IPROC_MSI) += pcie-iproc-msi.o
|
||||
obj-$(CONFIG_PCIE_IPROC_PLATFORM) += pcie-iproc-platform.o
|
||||
obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o
|
||||
obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o
|
||||
|
|
|
@ -38,16 +38,7 @@ struct gen_pci_cfg_windows {
|
|||
struct gen_pci_cfg_bus_ops *ops;
|
||||
};
|
||||
|
||||
/*
|
||||
* ARM pcibios functions expect the ARM struct pci_sys_data as the PCI
|
||||
* sysdata. Add pci_sys_data as the first element in struct gen_pci so
|
||||
* that when we use a gen_pci pointer as sysdata, it is also a pointer to
|
||||
* a struct pci_sys_data.
|
||||
*/
|
||||
struct gen_pci {
|
||||
#ifdef CONFIG_ARM
|
||||
struct pci_sys_data sys;
|
||||
#endif
|
||||
struct pci_host_bridge host;
|
||||
struct gen_pci_cfg_windows cfg;
|
||||
struct list_head resources;
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
#define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp)
|
||||
|
||||
struct imx6_pcie {
|
||||
int reset_gpio;
|
||||
struct gpio_desc *reset_gpio;
|
||||
struct clk *pcie_bus;
|
||||
struct clk *pcie_phy;
|
||||
struct clk *pcie;
|
||||
|
@ -287,10 +287,10 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
|
|||
usleep_range(200, 500);
|
||||
|
||||
/* Some boards don't have PCIe reset GPIO. */
|
||||
if (gpio_is_valid(imx6_pcie->reset_gpio)) {
|
||||
gpio_set_value(imx6_pcie->reset_gpio, 0);
|
||||
if (imx6_pcie->reset_gpio) {
|
||||
gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 0);
|
||||
msleep(100);
|
||||
gpio_set_value(imx6_pcie->reset_gpio, 1);
|
||||
gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 1);
|
||||
}
|
||||
return 0;
|
||||
|
||||
|
@ -560,7 +560,6 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
|
|||
{
|
||||
struct imx6_pcie *imx6_pcie;
|
||||
struct pcie_port *pp;
|
||||
struct device_node *np = pdev->dev.of_node;
|
||||
struct resource *dbi_base;
|
||||
int ret;
|
||||
|
||||
|
@ -581,15 +580,8 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
|
|||
return PTR_ERR(pp->dbi_base);
|
||||
|
||||
/* Fetch GPIOs */
|
||||
imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
|
||||
if (gpio_is_valid(imx6_pcie->reset_gpio)) {
|
||||
ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
|
||||
GPIOF_OUT_INIT_LOW, "PCIe reset");
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "unable to get reset gpio\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
imx6_pcie->reset_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
|
||||
GPIOD_OUT_LOW);
|
||||
|
||||
/* Fetch clocks */
|
||||
imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <linux/io.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_pci.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
@ -102,6 +103,8 @@ struct rcar_pci_priv {
|
|||
unsigned busnr;
|
||||
int irq;
|
||||
unsigned long window_size;
|
||||
unsigned long window_addr;
|
||||
unsigned long window_pci;
|
||||
};
|
||||
|
||||
/* PCI configuration space operations */
|
||||
|
@ -239,8 +242,8 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys)
|
|||
RCAR_PCI_ARBITER_PCIBP_MODE;
|
||||
iowrite32(val, reg + RCAR_PCI_ARBITER_CTR_REG);
|
||||
|
||||
/* PCI-AHB mapping: 0x40000000 base */
|
||||
iowrite32(0x40000000 | RCAR_PCIAHB_PREFETCH16,
|
||||
/* PCI-AHB mapping */
|
||||
iowrite32(priv->window_addr | RCAR_PCIAHB_PREFETCH16,
|
||||
reg + RCAR_PCIAHB_WIN1_CTR_REG);
|
||||
|
||||
/* AHB-PCI mapping: OHCI/EHCI registers */
|
||||
|
@ -251,7 +254,7 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys)
|
|||
iowrite32(RCAR_AHBPCI_WIN1_HOST | RCAR_AHBPCI_WIN_CTR_CFG,
|
||||
reg + RCAR_AHBPCI_WIN1_CTR_REG);
|
||||
/* Set PCI-AHB Window1 address */
|
||||
iowrite32(0x40000000 | PCI_BASE_ADDRESS_MEM_PREFETCH,
|
||||
iowrite32(priv->window_pci | PCI_BASE_ADDRESS_MEM_PREFETCH,
|
||||
reg + PCI_BASE_ADDRESS_1);
|
||||
/* Set AHB-PCI bridge PCI communication area address */
|
||||
val = priv->cfg_res->start + RCAR_AHBPCI_PCICOM_OFFSET;
|
||||
|
@ -284,6 +287,64 @@ static struct pci_ops rcar_pci_ops = {
|
|||
.write = pci_generic_config_write,
|
||||
};
|
||||
|
||||
static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
|
||||
struct device_node *node)
|
||||
{
|
||||
const int na = 3, ns = 2;
|
||||
int rlen;
|
||||
|
||||
parser->node = node;
|
||||
parser->pna = of_n_addr_cells(node);
|
||||
parser->np = parser->pna + na + ns;
|
||||
|
||||
parser->range = of_get_property(node, "dma-ranges", &rlen);
|
||||
if (!parser->range)
|
||||
return -ENOENT;
|
||||
|
||||
parser->end = parser->range + rlen / sizeof(__be32);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rcar_pci_parse_map_dma_ranges(struct rcar_pci_priv *pci,
|
||||
struct device_node *np)
|
||||
{
|
||||
struct of_pci_range range;
|
||||
struct of_pci_range_parser parser;
|
||||
int index = 0;
|
||||
|
||||
/* Failure to parse is ok as we fall back to defaults */
|
||||
if (pci_dma_range_parser_init(&parser, np))
|
||||
return 0;
|
||||
|
||||
/* Get the dma-ranges from DT */
|
||||
for_each_of_pci_range(&parser, &range) {
|
||||
/* Hardware only allows one inbound 32-bit range */
|
||||
if (index)
|
||||
return -EINVAL;
|
||||
|
||||
pci->window_addr = (unsigned long)range.cpu_addr;
|
||||
pci->window_pci = (unsigned long)range.pci_addr;
|
||||
pci->window_size = (unsigned long)range.size;
|
||||
|
||||
/* Catch HW limitations */
|
||||
if (!(range.flags & IORESOURCE_PREFETCH)) {
|
||||
dev_err(pci->dev, "window must be prefetchable\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (pci->window_addr) {
|
||||
u32 lowaddr = 1 << (ffs(pci->window_addr) - 1);
|
||||
|
||||
if (lowaddr < pci->window_size) {
|
||||
dev_err(pci->dev, "invalid window size/addr\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
index++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rcar_pci_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct resource *cfg_res, *mem_res;
|
||||
|
@ -329,6 +390,9 @@ static int rcar_pci_probe(struct platform_device *pdev)
|
|||
return priv->irq;
|
||||
}
|
||||
|
||||
/* default window addr and size if not specified in DT */
|
||||
priv->window_addr = 0x40000000;
|
||||
priv->window_pci = 0x40000000;
|
||||
priv->window_size = SZ_1G;
|
||||
|
||||
if (pdev->dev.of_node) {
|
||||
|
@ -344,6 +408,12 @@ static int rcar_pci_probe(struct platform_device *pdev)
|
|||
priv->busnr = busnr.start;
|
||||
if (busnr.end != busnr.start)
|
||||
dev_warn(&pdev->dev, "only one bus number supported\n");
|
||||
|
||||
ret = rcar_pci_parse_map_dma_ranges(priv, pdev->dev.of_node);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "failed to parse dma-range\n");
|
||||
return ret;
|
||||
}
|
||||
} else {
|
||||
priv->busnr = pdev->id;
|
||||
}
|
||||
|
@ -360,6 +430,7 @@ static int rcar_pci_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
static struct of_device_id rcar_pci_of_match[] = {
|
||||
{ .compatible = "renesas,pci-rcar-gen2", },
|
||||
{ .compatible = "renesas,pci-r8a7790", },
|
||||
{ .compatible = "renesas,pci-r8a7791", },
|
||||
{ .compatible = "renesas,pci-r8a7794", },
|
||||
|
|
|
@ -125,9 +125,6 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
|
|||
return err;
|
||||
}
|
||||
|
||||
/* Unused, temporary to satisfy ARM arch code */
|
||||
struct pci_sys_data sys;
|
||||
|
||||
static int versatile_pci_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct resource *res;
|
||||
|
@ -208,7 +205,7 @@ static int versatile_pci_probe(struct platform_device *pdev)
|
|||
pci_add_flags(PCI_ENABLE_PROC_DOMAINS);
|
||||
pci_add_flags(PCI_REASSIGN_ALL_BUS | PCI_REASSIGN_ALL_RSRC);
|
||||
|
||||
bus = pci_scan_root_bus(&pdev->dev, 0, &pci_versatile_ops, &sys, &pci_res);
|
||||
bus = pci_scan_root_bus(&pdev->dev, 0, &pci_versatile_ops, NULL, &pci_res);
|
||||
if (!bus)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -55,6 +55,7 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
|
|||
bcma_set_drvdata(bdev, pcie);
|
||||
|
||||
pcie->base = bdev->io_addr;
|
||||
pcie->base_addr = bdev->addr;
|
||||
|
||||
res_mem.start = bdev->addr_s[0];
|
||||
res_mem.end = bdev->addr_s[0] + SZ_128M - 1;
|
||||
|
|
|
@ -0,0 +1,675 @@
|
|||
/*
|
||||
* Copyright (C) 2015 Broadcom Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation version 2.
|
||||
*
|
||||
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
|
||||
* kind, whether express or implied; without even the implied warranty
|
||||
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irqchip/chained_irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/msi.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_pci.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include "pcie-iproc.h"
|
||||
|
||||
#define IPROC_MSI_INTR_EN_SHIFT 11
|
||||
#define IPROC_MSI_INTR_EN BIT(IPROC_MSI_INTR_EN_SHIFT)
|
||||
#define IPROC_MSI_INT_N_EVENT_SHIFT 1
|
||||
#define IPROC_MSI_INT_N_EVENT BIT(IPROC_MSI_INT_N_EVENT_SHIFT)
|
||||
#define IPROC_MSI_EQ_EN_SHIFT 0
|
||||
#define IPROC_MSI_EQ_EN BIT(IPROC_MSI_EQ_EN_SHIFT)
|
||||
|
||||
#define IPROC_MSI_EQ_MASK 0x3f
|
||||
|
||||
/* Max number of GIC interrupts */
|
||||
#define NR_HW_IRQS 6
|
||||
|
||||
/* Number of entries in each event queue */
|
||||
#define EQ_LEN 64
|
||||
|
||||
/* Size of each event queue memory region */
|
||||
#define EQ_MEM_REGION_SIZE SZ_4K
|
||||
|
||||
/* Size of each MSI address region */
|
||||
#define MSI_MEM_REGION_SIZE SZ_4K
|
||||
|
||||
enum iproc_msi_reg {
|
||||
IPROC_MSI_EQ_PAGE = 0,
|
||||
IPROC_MSI_EQ_PAGE_UPPER,
|
||||
IPROC_MSI_PAGE,
|
||||
IPROC_MSI_PAGE_UPPER,
|
||||
IPROC_MSI_CTRL,
|
||||
IPROC_MSI_EQ_HEAD,
|
||||
IPROC_MSI_EQ_TAIL,
|
||||
IPROC_MSI_INTS_EN,
|
||||
IPROC_MSI_REG_SIZE,
|
||||
};
|
||||
|
||||
struct iproc_msi;
|
||||
|
||||
/**
|
||||
* iProc MSI group
|
||||
*
|
||||
* One MSI group is allocated per GIC interrupt, serviced by one iProc MSI
|
||||
* event queue.
|
||||
*
|
||||
* @msi: pointer to iProc MSI data
|
||||
* @gic_irq: GIC interrupt
|
||||
* @eq: Event queue number
|
||||
*/
|
||||
struct iproc_msi_grp {
|
||||
struct iproc_msi *msi;
|
||||
int gic_irq;
|
||||
unsigned int eq;
|
||||
};
|
||||
|
||||
/**
|
||||
* iProc event queue based MSI
|
||||
*
|
||||
* Only meant to be used on platforms without MSI support integrated into the
|
||||
* GIC.
|
||||
*
|
||||
* @pcie: pointer to iProc PCIe data
|
||||
* @reg_offsets: MSI register offsets
|
||||
* @grps: MSI groups
|
||||
* @nr_irqs: number of total interrupts connected to GIC
|
||||
* @nr_cpus: number of toal CPUs
|
||||
* @has_inten_reg: indicates the MSI interrupt enable register needs to be
|
||||
* set explicitly (required for some legacy platforms)
|
||||
* @bitmap: MSI vector bitmap
|
||||
* @bitmap_lock: lock to protect access to the MSI bitmap
|
||||
* @nr_msi_vecs: total number of MSI vectors
|
||||
* @inner_domain: inner IRQ domain
|
||||
* @msi_domain: MSI IRQ domain
|
||||
* @nr_eq_region: required number of 4K aligned memory region for MSI event
|
||||
* queues
|
||||
* @nr_msi_region: required number of 4K aligned address region for MSI posted
|
||||
* writes
|
||||
* @eq_cpu: pointer to allocated memory region for MSI event queues
|
||||
* @eq_dma: DMA address of MSI event queues
|
||||
* @msi_addr: MSI address
|
||||
*/
|
||||
struct iproc_msi {
|
||||
struct iproc_pcie *pcie;
|
||||
const u16 (*reg_offsets)[IPROC_MSI_REG_SIZE];
|
||||
struct iproc_msi_grp *grps;
|
||||
int nr_irqs;
|
||||
int nr_cpus;
|
||||
bool has_inten_reg;
|
||||
unsigned long *bitmap;
|
||||
struct mutex bitmap_lock;
|
||||
unsigned int nr_msi_vecs;
|
||||
struct irq_domain *inner_domain;
|
||||
struct irq_domain *msi_domain;
|
||||
unsigned int nr_eq_region;
|
||||
unsigned int nr_msi_region;
|
||||
void *eq_cpu;
|
||||
dma_addr_t eq_dma;
|
||||
phys_addr_t msi_addr;
|
||||
};
|
||||
|
||||
static const u16 iproc_msi_reg_paxb[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = {
|
||||
{ 0x200, 0x2c0, 0x204, 0x2c4, 0x210, 0x250, 0x254, 0x208 },
|
||||
{ 0x200, 0x2c0, 0x204, 0x2c4, 0x214, 0x258, 0x25c, 0x208 },
|
||||
{ 0x200, 0x2c0, 0x204, 0x2c4, 0x218, 0x260, 0x264, 0x208 },
|
||||
{ 0x200, 0x2c0, 0x204, 0x2c4, 0x21c, 0x268, 0x26c, 0x208 },
|
||||
{ 0x200, 0x2c0, 0x204, 0x2c4, 0x220, 0x270, 0x274, 0x208 },
|
||||
{ 0x200, 0x2c0, 0x204, 0x2c4, 0x224, 0x278, 0x27c, 0x208 },
|
||||
};
|
||||
|
||||
static const u16 iproc_msi_reg_paxc[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = {
|
||||
{ 0xc00, 0xc04, 0xc08, 0xc0c, 0xc40, 0xc50, 0xc60 },
|
||||
{ 0xc10, 0xc14, 0xc18, 0xc1c, 0xc44, 0xc54, 0xc64 },
|
||||
{ 0xc20, 0xc24, 0xc28, 0xc2c, 0xc48, 0xc58, 0xc68 },
|
||||
{ 0xc30, 0xc34, 0xc38, 0xc3c, 0xc4c, 0xc5c, 0xc6c },
|
||||
};
|
||||
|
||||
static inline u32 iproc_msi_read_reg(struct iproc_msi *msi,
|
||||
enum iproc_msi_reg reg,
|
||||
unsigned int eq)
|
||||
{
|
||||
struct iproc_pcie *pcie = msi->pcie;
|
||||
|
||||
return readl_relaxed(pcie->base + msi->reg_offsets[eq][reg]);
|
||||
}
|
||||
|
||||
static inline void iproc_msi_write_reg(struct iproc_msi *msi,
|
||||
enum iproc_msi_reg reg,
|
||||
int eq, u32 val)
|
||||
{
|
||||
struct iproc_pcie *pcie = msi->pcie;
|
||||
|
||||
writel_relaxed(val, pcie->base + msi->reg_offsets[eq][reg]);
|
||||
}
|
||||
|
||||
static inline u32 hwirq_to_group(struct iproc_msi *msi, unsigned long hwirq)
|
||||
{
|
||||
return (hwirq % msi->nr_irqs);
|
||||
}
|
||||
|
||||
static inline unsigned int iproc_msi_addr_offset(struct iproc_msi *msi,
|
||||
unsigned long hwirq)
|
||||
{
|
||||
if (msi->nr_msi_region > 1)
|
||||
return hwirq_to_group(msi, hwirq) * MSI_MEM_REGION_SIZE;
|
||||
else
|
||||
return hwirq_to_group(msi, hwirq) * sizeof(u32);
|
||||
}
|
||||
|
||||
static inline unsigned int iproc_msi_eq_offset(struct iproc_msi *msi, u32 eq)
|
||||
{
|
||||
if (msi->nr_eq_region > 1)
|
||||
return eq * EQ_MEM_REGION_SIZE;
|
||||
else
|
||||
return eq * EQ_LEN * sizeof(u32);
|
||||
}
|
||||
|
||||
static struct irq_chip iproc_msi_irq_chip = {
|
||||
.name = "iProc-MSI",
|
||||
};
|
||||
|
||||
static struct msi_domain_info iproc_msi_domain_info = {
|
||||
.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
|
||||
MSI_FLAG_PCI_MSIX,
|
||||
.chip = &iproc_msi_irq_chip,
|
||||
};
|
||||
|
||||
/*
|
||||
* In iProc PCIe core, each MSI group is serviced by a GIC interrupt and a
|
||||
* dedicated event queue. Each MSI group can support up to 64 MSI vectors.
|
||||
*
|
||||
* The number of MSI groups varies between different iProc SoCs. The total
|
||||
* number of CPU cores also varies. To support MSI IRQ affinity, we
|
||||
* distribute GIC interrupts across all available CPUs. MSI vector is moved
|
||||
* from one GIC interrupt to another to steer to the target CPU.
|
||||
*
|
||||
* Assuming:
|
||||
* - the number of MSI groups is M
|
||||
* - the number of CPU cores is N
|
||||
* - M is always a multiple of N
|
||||
*
|
||||
* Total number of raw MSI vectors = M * 64
|
||||
* Total number of supported MSI vectors = (M * 64) / N
|
||||
*/
|
||||
static inline int hwirq_to_cpu(struct iproc_msi *msi, unsigned long hwirq)
|
||||
{
|
||||
return (hwirq % msi->nr_cpus);
|
||||
}
|
||||
|
||||
static inline unsigned long hwirq_to_canonical_hwirq(struct iproc_msi *msi,
|
||||
unsigned long hwirq)
|
||||
{
|
||||
return (hwirq - hwirq_to_cpu(msi, hwirq));
|
||||
}
|
||||
|
||||
static int iproc_msi_irq_set_affinity(struct irq_data *data,
|
||||
const struct cpumask *mask, bool force)
|
||||
{
|
||||
struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
|
||||
int target_cpu = cpumask_first(mask);
|
||||
int curr_cpu;
|
||||
|
||||
curr_cpu = hwirq_to_cpu(msi, data->hwirq);
|
||||
if (curr_cpu == target_cpu)
|
||||
return IRQ_SET_MASK_OK_DONE;
|
||||
|
||||
/* steer MSI to the target CPU */
|
||||
data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu;
|
||||
|
||||
return IRQ_SET_MASK_OK;
|
||||
}
|
||||
|
||||
static void iproc_msi_irq_compose_msi_msg(struct irq_data *data,
|
||||
struct msi_msg *msg)
|
||||
{
|
||||
struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
|
||||
dma_addr_t addr;
|
||||
|
||||
addr = msi->msi_addr + iproc_msi_addr_offset(msi, data->hwirq);
|
||||
msg->address_lo = lower_32_bits(addr);
|
||||
msg->address_hi = upper_32_bits(addr);
|
||||
msg->data = data->hwirq;
|
||||
}
|
||||
|
||||
static struct irq_chip iproc_msi_bottom_irq_chip = {
|
||||
.name = "MSI",
|
||||
.irq_set_affinity = iproc_msi_irq_set_affinity,
|
||||
.irq_compose_msi_msg = iproc_msi_irq_compose_msi_msg,
|
||||
};
|
||||
|
||||
static int iproc_msi_irq_domain_alloc(struct irq_domain *domain,
|
||||
unsigned int virq, unsigned int nr_irqs,
|
||||
void *args)
|
||||
{
|
||||
struct iproc_msi *msi = domain->host_data;
|
||||
int hwirq;
|
||||
|
||||
mutex_lock(&msi->bitmap_lock);
|
||||
|
||||
/* Allocate 'nr_cpus' number of MSI vectors each time */
|
||||
hwirq = bitmap_find_next_zero_area(msi->bitmap, msi->nr_msi_vecs, 0,
|
||||
msi->nr_cpus, 0);
|
||||
if (hwirq < msi->nr_msi_vecs) {
|
||||
bitmap_set(msi->bitmap, hwirq, msi->nr_cpus);
|
||||
} else {
|
||||
mutex_unlock(&msi->bitmap_lock);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
mutex_unlock(&msi->bitmap_lock);
|
||||
|
||||
irq_domain_set_info(domain, virq, hwirq, &iproc_msi_bottom_irq_chip,
|
||||
domain->host_data, handle_simple_irq, NULL, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void iproc_msi_irq_domain_free(struct irq_domain *domain,
|
||||
unsigned int virq, unsigned int nr_irqs)
|
||||
{
|
||||
struct irq_data *data = irq_domain_get_irq_data(domain, virq);
|
||||
struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
|
||||
unsigned int hwirq;
|
||||
|
||||
mutex_lock(&msi->bitmap_lock);
|
||||
|
||||
hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq);
|
||||
bitmap_clear(msi->bitmap, hwirq, msi->nr_cpus);
|
||||
|
||||
mutex_unlock(&msi->bitmap_lock);
|
||||
|
||||
irq_domain_free_irqs_parent(domain, virq, nr_irqs);
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops msi_domain_ops = {
|
||||
.alloc = iproc_msi_irq_domain_alloc,
|
||||
.free = iproc_msi_irq_domain_free,
|
||||
};
|
||||
|
||||
static inline u32 decode_msi_hwirq(struct iproc_msi *msi, u32 eq, u32 head)
|
||||
{
|
||||
u32 *msg, hwirq;
|
||||
unsigned int offs;
|
||||
|
||||
offs = iproc_msi_eq_offset(msi, eq) + head * sizeof(u32);
|
||||
msg = (u32 *)(msi->eq_cpu + offs);
|
||||
hwirq = *msg & IPROC_MSI_EQ_MASK;
|
||||
|
||||
/*
|
||||
* Since we have multiple hwirq mapped to a single MSI vector,
|
||||
* now we need to derive the hwirq at CPU0. It can then be used to
|
||||
* mapped back to virq.
|
||||
*/
|
||||
return hwirq_to_canonical_hwirq(msi, hwirq);
|
||||
}
|
||||
|
||||
static void iproc_msi_handler(struct irq_desc *desc)
|
||||
{
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
struct iproc_msi_grp *grp;
|
||||
struct iproc_msi *msi;
|
||||
struct iproc_pcie *pcie;
|
||||
u32 eq, head, tail, nr_events;
|
||||
unsigned long hwirq;
|
||||
int virq;
|
||||
|
||||
chained_irq_enter(chip, desc);
|
||||
|
||||
grp = irq_desc_get_handler_data(desc);
|
||||
msi = grp->msi;
|
||||
pcie = msi->pcie;
|
||||
eq = grp->eq;
|
||||
|
||||
/*
|
||||
* iProc MSI event queue is tracked by head and tail pointers. Head
|
||||
* pointer indicates the next entry (MSI data) to be consumed by SW in
|
||||
* the queue and needs to be updated by SW. iProc MSI core uses the
|
||||
* tail pointer as the next data insertion point.
|
||||
*
|
||||
* Entries between head and tail pointers contain valid MSI data. MSI
|
||||
* data is guaranteed to be in the event queue memory before the tail
|
||||
* pointer is updated by the iProc MSI core.
|
||||
*/
|
||||
head = iproc_msi_read_reg(msi, IPROC_MSI_EQ_HEAD,
|
||||
eq) & IPROC_MSI_EQ_MASK;
|
||||
do {
|
||||
tail = iproc_msi_read_reg(msi, IPROC_MSI_EQ_TAIL,
|
||||
eq) & IPROC_MSI_EQ_MASK;
|
||||
|
||||
/*
|
||||
* Figure out total number of events (MSI data) to be
|
||||
* processed.
|
||||
*/
|
||||
nr_events = (tail < head) ?
|
||||
(EQ_LEN - (head - tail)) : (tail - head);
|
||||
if (!nr_events)
|
||||
break;
|
||||
|
||||
/* process all outstanding events */
|
||||
while (nr_events--) {
|
||||
hwirq = decode_msi_hwirq(msi, eq, head);
|
||||
virq = irq_find_mapping(msi->inner_domain, hwirq);
|
||||
generic_handle_irq(virq);
|
||||
|
||||
head++;
|
||||
head %= EQ_LEN;
|
||||
}
|
||||
|
||||
/*
|
||||
* Now all outstanding events have been processed. Update the
|
||||
* head pointer.
|
||||
*/
|
||||
iproc_msi_write_reg(msi, IPROC_MSI_EQ_HEAD, eq, head);
|
||||
|
||||
/*
|
||||
* Now go read the tail pointer again to see if there are new
|
||||
* oustanding events that came in during the above window.
|
||||
*/
|
||||
} while (true);
|
||||
|
||||
chained_irq_exit(chip, desc);
|
||||
}
|
||||
|
||||
static void iproc_msi_enable(struct iproc_msi *msi)
|
||||
{
|
||||
int i, eq;
|
||||
u32 val;
|
||||
|
||||
/* Program memory region for each event queue */
|
||||
for (i = 0; i < msi->nr_eq_region; i++) {
|
||||
dma_addr_t addr = msi->eq_dma + (i * EQ_MEM_REGION_SIZE);
|
||||
|
||||
iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE, i,
|
||||
lower_32_bits(addr));
|
||||
iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE_UPPER, i,
|
||||
upper_32_bits(addr));
|
||||
}
|
||||
|
||||
/* Program address region for MSI posted writes */
|
||||
for (i = 0; i < msi->nr_msi_region; i++) {
|
||||
phys_addr_t addr = msi->msi_addr + (i * MSI_MEM_REGION_SIZE);
|
||||
|
||||
iproc_msi_write_reg(msi, IPROC_MSI_PAGE, i,
|
||||
lower_32_bits(addr));
|
||||
iproc_msi_write_reg(msi, IPROC_MSI_PAGE_UPPER, i,
|
||||
upper_32_bits(addr));
|
||||
}
|
||||
|
||||
for (eq = 0; eq < msi->nr_irqs; eq++) {
|
||||
/* Enable MSI event queue */
|
||||
val = IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT |
|
||||
IPROC_MSI_EQ_EN;
|
||||
iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val);
|
||||
|
||||
/*
|
||||
* Some legacy platforms require the MSI interrupt enable
|
||||
* register to be set explicitly.
|
||||
*/
|
||||
if (msi->has_inten_reg) {
|
||||
val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq);
|
||||
val |= BIT(eq);
|
||||
iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void iproc_msi_disable(struct iproc_msi *msi)
|
||||
{
|
||||
u32 eq, val;
|
||||
|
||||
for (eq = 0; eq < msi->nr_irqs; eq++) {
|
||||
if (msi->has_inten_reg) {
|
||||
val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq);
|
||||
val &= ~BIT(eq);
|
||||
iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val);
|
||||
}
|
||||
|
||||
val = iproc_msi_read_reg(msi, IPROC_MSI_CTRL, eq);
|
||||
val &= ~(IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT |
|
||||
IPROC_MSI_EQ_EN);
|
||||
iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val);
|
||||
}
|
||||
}
|
||||
|
||||
static int iproc_msi_alloc_domains(struct device_node *node,
|
||||
struct iproc_msi *msi)
|
||||
{
|
||||
msi->inner_domain = irq_domain_add_linear(NULL, msi->nr_msi_vecs,
|
||||
&msi_domain_ops, msi);
|
||||
if (!msi->inner_domain)
|
||||
return -ENOMEM;
|
||||
|
||||
msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node),
|
||||
&iproc_msi_domain_info,
|
||||
msi->inner_domain);
|
||||
if (!msi->msi_domain) {
|
||||
irq_domain_remove(msi->inner_domain);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void iproc_msi_free_domains(struct iproc_msi *msi)
|
||||
{
|
||||
if (msi->msi_domain)
|
||||
irq_domain_remove(msi->msi_domain);
|
||||
|
||||
if (msi->inner_domain)
|
||||
irq_domain_remove(msi->inner_domain);
|
||||
}
|
||||
|
||||
static void iproc_msi_irq_free(struct iproc_msi *msi, unsigned int cpu)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) {
|
||||
irq_set_chained_handler_and_data(msi->grps[i].gic_irq,
|
||||
NULL, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
static int iproc_msi_irq_setup(struct iproc_msi *msi, unsigned int cpu)
|
||||
{
|
||||
int i, ret;
|
||||
cpumask_var_t mask;
|
||||
struct iproc_pcie *pcie = msi->pcie;
|
||||
|
||||
for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) {
|
||||
irq_set_chained_handler_and_data(msi->grps[i].gic_irq,
|
||||
iproc_msi_handler,
|
||||
&msi->grps[i]);
|
||||
/* Dedicate GIC interrupt to each CPU core */
|
||||
if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
|
||||
cpumask_clear(mask);
|
||||
cpumask_set_cpu(cpu, mask);
|
||||
ret = irq_set_affinity(msi->grps[i].gic_irq, mask);
|
||||
if (ret)
|
||||
dev_err(pcie->dev,
|
||||
"failed to set affinity for IRQ%d\n",
|
||||
msi->grps[i].gic_irq);
|
||||
free_cpumask_var(mask);
|
||||
} else {
|
||||
dev_err(pcie->dev, "failed to alloc CPU mask\n");
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
/* Free all configured/unconfigured IRQs */
|
||||
iproc_msi_irq_free(msi, cpu);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node)
|
||||
{
|
||||
struct iproc_msi *msi;
|
||||
int i, ret;
|
||||
unsigned int cpu;
|
||||
|
||||
if (!of_device_is_compatible(node, "brcm,iproc-msi"))
|
||||
return -ENODEV;
|
||||
|
||||
if (!of_find_property(node, "msi-controller", NULL))
|
||||
return -ENODEV;
|
||||
|
||||
if (pcie->msi)
|
||||
return -EBUSY;
|
||||
|
||||
msi = devm_kzalloc(pcie->dev, sizeof(*msi), GFP_KERNEL);
|
||||
if (!msi)
|
||||
return -ENOMEM;
|
||||
|
||||
msi->pcie = pcie;
|
||||
pcie->msi = msi;
|
||||
msi->msi_addr = pcie->base_addr;
|
||||
mutex_init(&msi->bitmap_lock);
|
||||
msi->nr_cpus = num_possible_cpus();
|
||||
|
||||
msi->nr_irqs = of_irq_count(node);
|
||||
if (!msi->nr_irqs) {
|
||||
dev_err(pcie->dev, "found no MSI GIC interrupt\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (msi->nr_irqs > NR_HW_IRQS) {
|
||||
dev_warn(pcie->dev, "too many MSI GIC interrupts defined %d\n",
|
||||
msi->nr_irqs);
|
||||
msi->nr_irqs = NR_HW_IRQS;
|
||||
}
|
||||
|
||||
if (msi->nr_irqs < msi->nr_cpus) {
|
||||
dev_err(pcie->dev,
|
||||
"not enough GIC interrupts for MSI affinity\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (msi->nr_irqs % msi->nr_cpus != 0) {
|
||||
msi->nr_irqs -= msi->nr_irqs % msi->nr_cpus;
|
||||
dev_warn(pcie->dev, "Reducing number of interrupts to %d\n",
|
||||
msi->nr_irqs);
|
||||
}
|
||||
|
||||
switch (pcie->type) {
|
||||
case IPROC_PCIE_PAXB:
|
||||
msi->reg_offsets = iproc_msi_reg_paxb;
|
||||
msi->nr_eq_region = 1;
|
||||
msi->nr_msi_region = 1;
|
||||
break;
|
||||
case IPROC_PCIE_PAXC:
|
||||
msi->reg_offsets = iproc_msi_reg_paxc;
|
||||
msi->nr_eq_region = msi->nr_irqs;
|
||||
msi->nr_msi_region = msi->nr_irqs;
|
||||
break;
|
||||
default:
|
||||
dev_err(pcie->dev, "incompatible iProc PCIe interface\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (of_find_property(node, "brcm,pcie-msi-inten", NULL))
|
||||
msi->has_inten_reg = true;
|
||||
|
||||
msi->nr_msi_vecs = msi->nr_irqs * EQ_LEN;
|
||||
msi->bitmap = devm_kcalloc(pcie->dev, BITS_TO_LONGS(msi->nr_msi_vecs),
|
||||
sizeof(*msi->bitmap), GFP_KERNEL);
|
||||
if (!msi->bitmap)
|
||||
return -ENOMEM;
|
||||
|
||||
msi->grps = devm_kcalloc(pcie->dev, msi->nr_irqs, sizeof(*msi->grps),
|
||||
GFP_KERNEL);
|
||||
if (!msi->grps)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < msi->nr_irqs; i++) {
|
||||
unsigned int irq = irq_of_parse_and_map(node, i);
|
||||
|
||||
if (!irq) {
|
||||
dev_err(pcie->dev, "unable to parse/map interrupt\n");
|
||||
ret = -ENODEV;
|
||||
goto free_irqs;
|
||||
}
|
||||
msi->grps[i].gic_irq = irq;
|
||||
msi->grps[i].msi = msi;
|
||||
msi->grps[i].eq = i;
|
||||
}
|
||||
|
||||
/* Reserve memory for event queue and make sure memories are zeroed */
|
||||
msi->eq_cpu = dma_zalloc_coherent(pcie->dev,
|
||||
msi->nr_eq_region * EQ_MEM_REGION_SIZE,
|
||||
&msi->eq_dma, GFP_KERNEL);
|
||||
if (!msi->eq_cpu) {
|
||||
ret = -ENOMEM;
|
||||
goto free_irqs;
|
||||
}
|
||||
|
||||
ret = iproc_msi_alloc_domains(node, msi);
|
||||
if (ret) {
|
||||
dev_err(pcie->dev, "failed to create MSI domains\n");
|
||||
goto free_eq_dma;
|
||||
}
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
ret = iproc_msi_irq_setup(msi, cpu);
|
||||
if (ret)
|
||||
goto free_msi_irq;
|
||||
}
|
||||
|
||||
iproc_msi_enable(msi);
|
||||
|
||||
return 0;
|
||||
|
||||
free_msi_irq:
|
||||
for_each_online_cpu(cpu)
|
||||
iproc_msi_irq_free(msi, cpu);
|
||||
iproc_msi_free_domains(msi);
|
||||
|
||||
free_eq_dma:
|
||||
dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE,
|
||||
msi->eq_cpu, msi->eq_dma);
|
||||
|
||||
free_irqs:
|
||||
for (i = 0; i < msi->nr_irqs; i++) {
|
||||
if (msi->grps[i].gic_irq)
|
||||
irq_dispose_mapping(msi->grps[i].gic_irq);
|
||||
}
|
||||
pcie->msi = NULL;
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(iproc_msi_init);
|
||||
|
||||
void iproc_msi_exit(struct iproc_pcie *pcie)
|
||||
{
|
||||
struct iproc_msi *msi = pcie->msi;
|
||||
unsigned int i, cpu;
|
||||
|
||||
if (!msi)
|
||||
return;
|
||||
|
||||
iproc_msi_disable(msi);
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
iproc_msi_irq_free(msi, cpu);
|
||||
|
||||
iproc_msi_free_domains(msi);
|
||||
|
||||
dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE,
|
||||
msi->eq_cpu, msi->eq_dma);
|
||||
|
||||
for (i = 0; i < msi->nr_irqs; i++) {
|
||||
if (msi->grps[i].gic_irq)
|
||||
irq_dispose_mapping(msi->grps[i].gic_irq);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(iproc_msi_exit);
|
|
@ -26,8 +26,21 @@
|
|||
|
||||
#include "pcie-iproc.h"
|
||||
|
||||
static const struct of_device_id iproc_pcie_of_match_table[] = {
|
||||
{
|
||||
.compatible = "brcm,iproc-pcie",
|
||||
.data = (int *)IPROC_PCIE_PAXB,
|
||||
}, {
|
||||
.compatible = "brcm,iproc-pcie-paxc",
|
||||
.data = (int *)IPROC_PCIE_PAXC,
|
||||
},
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, iproc_pcie_of_match_table);
|
||||
|
||||
static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct of_device_id *of_id;
|
||||
struct iproc_pcie *pcie;
|
||||
struct device_node *np = pdev->dev.of_node;
|
||||
struct resource reg;
|
||||
|
@ -35,11 +48,16 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
|
|||
LIST_HEAD(res);
|
||||
int ret;
|
||||
|
||||
of_id = of_match_device(iproc_pcie_of_match_table, &pdev->dev);
|
||||
if (!of_id)
|
||||
return -EINVAL;
|
||||
|
||||
pcie = devm_kzalloc(&pdev->dev, sizeof(struct iproc_pcie), GFP_KERNEL);
|
||||
if (!pcie)
|
||||
return -ENOMEM;
|
||||
|
||||
pcie->dev = &pdev->dev;
|
||||
pcie->type = (enum iproc_pcie_type)of_id->data;
|
||||
platform_set_drvdata(pdev, pcie);
|
||||
|
||||
ret = of_address_to_resource(np, 0, ®);
|
||||
|
@ -53,6 +71,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
|
|||
dev_err(pcie->dev, "unable to map controller registers\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
pcie->base_addr = reg.start;
|
||||
|
||||
if (of_property_read_bool(np, "brcm,pcie-ob")) {
|
||||
u32 val;
|
||||
|
@ -114,12 +133,6 @@ static int iproc_pcie_pltfm_remove(struct platform_device *pdev)
|
|||
return iproc_pcie_remove(pcie);
|
||||
}
|
||||
|
||||
static const struct of_device_id iproc_pcie_of_match_table[] = {
|
||||
{ .compatible = "brcm,iproc-pcie", },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, iproc_pcie_of_match_table);
|
||||
|
||||
static struct platform_driver iproc_pcie_pltfm_driver = {
|
||||
.driver = {
|
||||
.name = "iproc-pcie",
|
||||
|
|
|
@ -30,20 +30,16 @@
|
|||
|
||||
#include "pcie-iproc.h"
|
||||
|
||||
#define CLK_CONTROL_OFFSET 0x000
|
||||
#define EP_PERST_SOURCE_SELECT_SHIFT 2
|
||||
#define EP_PERST_SOURCE_SELECT BIT(EP_PERST_SOURCE_SELECT_SHIFT)
|
||||
#define EP_MODE_SURVIVE_PERST_SHIFT 1
|
||||
#define EP_MODE_SURVIVE_PERST BIT(EP_MODE_SURVIVE_PERST_SHIFT)
|
||||
#define RC_PCIE_RST_OUTPUT_SHIFT 0
|
||||
#define RC_PCIE_RST_OUTPUT BIT(RC_PCIE_RST_OUTPUT_SHIFT)
|
||||
#define PAXC_RESET_MASK 0x7f
|
||||
|
||||
#define CFG_IND_ADDR_OFFSET 0x120
|
||||
#define CFG_IND_ADDR_MASK 0x00001ffc
|
||||
|
||||
#define CFG_IND_DATA_OFFSET 0x124
|
||||
|
||||
#define CFG_ADDR_OFFSET 0x1f8
|
||||
#define CFG_ADDR_BUS_NUM_SHIFT 20
|
||||
#define CFG_ADDR_BUS_NUM_MASK 0x0ff00000
|
||||
#define CFG_ADDR_DEV_NUM_SHIFT 15
|
||||
|
@ -55,12 +51,8 @@
|
|||
#define CFG_ADDR_CFG_TYPE_SHIFT 0
|
||||
#define CFG_ADDR_CFG_TYPE_MASK 0x00000003
|
||||
|
||||
#define CFG_DATA_OFFSET 0x1fc
|
||||
|
||||
#define SYS_RC_INTX_EN 0x330
|
||||
#define SYS_RC_INTX_MASK 0xf
|
||||
|
||||
#define PCIE_LINK_STATUS_OFFSET 0xf0c
|
||||
#define PCIE_PHYLINKUP_SHIFT 3
|
||||
#define PCIE_PHYLINKUP BIT(PCIE_PHYLINKUP_SHIFT)
|
||||
#define PCIE_DL_ACTIVE_SHIFT 2
|
||||
|
@ -71,12 +63,54 @@
|
|||
#define OARR_SIZE_CFG_SHIFT 1
|
||||
#define OARR_SIZE_CFG BIT(OARR_SIZE_CFG_SHIFT)
|
||||
|
||||
#define OARR_LO(window) (0xd20 + (window) * 8)
|
||||
#define OARR_HI(window) (0xd24 + (window) * 8)
|
||||
#define OMAP_LO(window) (0xd40 + (window) * 8)
|
||||
#define OMAP_HI(window) (0xd44 + (window) * 8)
|
||||
|
||||
#define MAX_NUM_OB_WINDOWS 2
|
||||
#define MAX_NUM_PAXC_PF 4
|
||||
|
||||
#define IPROC_PCIE_REG_INVALID 0xffff
|
||||
|
||||
enum iproc_pcie_reg {
|
||||
IPROC_PCIE_CLK_CTRL = 0,
|
||||
IPROC_PCIE_CFG_IND_ADDR,
|
||||
IPROC_PCIE_CFG_IND_DATA,
|
||||
IPROC_PCIE_CFG_ADDR,
|
||||
IPROC_PCIE_CFG_DATA,
|
||||
IPROC_PCIE_INTX_EN,
|
||||
IPROC_PCIE_OARR_LO,
|
||||
IPROC_PCIE_OARR_HI,
|
||||
IPROC_PCIE_OMAP_LO,
|
||||
IPROC_PCIE_OMAP_HI,
|
||||
IPROC_PCIE_LINK_STATUS,
|
||||
};
|
||||
|
||||
/* iProc PCIe PAXB registers */
|
||||
static const u16 iproc_pcie_reg_paxb[] = {
|
||||
[IPROC_PCIE_CLK_CTRL] = 0x000,
|
||||
[IPROC_PCIE_CFG_IND_ADDR] = 0x120,
|
||||
[IPROC_PCIE_CFG_IND_DATA] = 0x124,
|
||||
[IPROC_PCIE_CFG_ADDR] = 0x1f8,
|
||||
[IPROC_PCIE_CFG_DATA] = 0x1fc,
|
||||
[IPROC_PCIE_INTX_EN] = 0x330,
|
||||
[IPROC_PCIE_OARR_LO] = 0xd20,
|
||||
[IPROC_PCIE_OARR_HI] = 0xd24,
|
||||
[IPROC_PCIE_OMAP_LO] = 0xd40,
|
||||
[IPROC_PCIE_OMAP_HI] = 0xd44,
|
||||
[IPROC_PCIE_LINK_STATUS] = 0xf0c,
|
||||
};
|
||||
|
||||
/* iProc PCIe PAXC v1 registers */
|
||||
static const u16 iproc_pcie_reg_paxc[] = {
|
||||
[IPROC_PCIE_CLK_CTRL] = 0x000,
|
||||
[IPROC_PCIE_CFG_IND_ADDR] = 0x1f0,
|
||||
[IPROC_PCIE_CFG_IND_DATA] = 0x1f4,
|
||||
[IPROC_PCIE_CFG_ADDR] = 0x1f8,
|
||||
[IPROC_PCIE_CFG_DATA] = 0x1fc,
|
||||
[IPROC_PCIE_INTX_EN] = IPROC_PCIE_REG_INVALID,
|
||||
[IPROC_PCIE_OARR_LO] = IPROC_PCIE_REG_INVALID,
|
||||
[IPROC_PCIE_OARR_HI] = IPROC_PCIE_REG_INVALID,
|
||||
[IPROC_PCIE_OMAP_LO] = IPROC_PCIE_REG_INVALID,
|
||||
[IPROC_PCIE_OMAP_HI] = IPROC_PCIE_REG_INVALID,
|
||||
[IPROC_PCIE_LINK_STATUS] = IPROC_PCIE_REG_INVALID,
|
||||
};
|
||||
|
||||
static inline struct iproc_pcie *iproc_data(struct pci_bus *bus)
|
||||
{
|
||||
|
@ -91,6 +125,65 @@ static inline struct iproc_pcie *iproc_data(struct pci_bus *bus)
|
|||
return pcie;
|
||||
}
|
||||
|
||||
static inline bool iproc_pcie_reg_is_invalid(u16 reg_offset)
|
||||
{
|
||||
return !!(reg_offset == IPROC_PCIE_REG_INVALID);
|
||||
}
|
||||
|
||||
static inline u16 iproc_pcie_reg_offset(struct iproc_pcie *pcie,
|
||||
enum iproc_pcie_reg reg)
|
||||
{
|
||||
return pcie->reg_offsets[reg];
|
||||
}
|
||||
|
||||
static inline u32 iproc_pcie_read_reg(struct iproc_pcie *pcie,
|
||||
enum iproc_pcie_reg reg)
|
||||
{
|
||||
u16 offset = iproc_pcie_reg_offset(pcie, reg);
|
||||
|
||||
if (iproc_pcie_reg_is_invalid(offset))
|
||||
return 0;
|
||||
|
||||
return readl(pcie->base + offset);
|
||||
}
|
||||
|
||||
static inline void iproc_pcie_write_reg(struct iproc_pcie *pcie,
|
||||
enum iproc_pcie_reg reg, u32 val)
|
||||
{
|
||||
u16 offset = iproc_pcie_reg_offset(pcie, reg);
|
||||
|
||||
if (iproc_pcie_reg_is_invalid(offset))
|
||||
return;
|
||||
|
||||
writel(val, pcie->base + offset);
|
||||
}
|
||||
|
||||
static inline void iproc_pcie_ob_write(struct iproc_pcie *pcie,
|
||||
enum iproc_pcie_reg reg,
|
||||
unsigned window, u32 val)
|
||||
{
|
||||
u16 offset = iproc_pcie_reg_offset(pcie, reg);
|
||||
|
||||
if (iproc_pcie_reg_is_invalid(offset))
|
||||
return;
|
||||
|
||||
writel(val, pcie->base + offset + (window * 8));
|
||||
}
|
||||
|
||||
static inline bool iproc_pcie_device_is_valid(struct iproc_pcie *pcie,
|
||||
unsigned int slot,
|
||||
unsigned int fn)
|
||||
{
|
||||
if (slot > 0)
|
||||
return false;
|
||||
|
||||
/* PAXC can only support limited number of functions */
|
||||
if (pcie->type == IPROC_PCIE_PAXC && fn >= MAX_NUM_PAXC_PF)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Note access to the configuration registers are protected at the higher layer
|
||||
* by 'pci_lock' in drivers/pci/access.c
|
||||
|
@ -104,28 +197,34 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus,
|
|||
unsigned fn = PCI_FUNC(devfn);
|
||||
unsigned busno = bus->number;
|
||||
u32 val;
|
||||
u16 offset;
|
||||
|
||||
if (!iproc_pcie_device_is_valid(pcie, slot, fn))
|
||||
return NULL;
|
||||
|
||||
/* root complex access */
|
||||
if (busno == 0) {
|
||||
if (slot >= 1)
|
||||
iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_IND_ADDR,
|
||||
where & CFG_IND_ADDR_MASK);
|
||||
offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_IND_DATA);
|
||||
if (iproc_pcie_reg_is_invalid(offset))
|
||||
return NULL;
|
||||
writel(where & CFG_IND_ADDR_MASK,
|
||||
pcie->base + CFG_IND_ADDR_OFFSET);
|
||||
return (pcie->base + CFG_IND_DATA_OFFSET);
|
||||
else
|
||||
return (pcie->base + offset);
|
||||
}
|
||||
|
||||
if (fn > 1)
|
||||
return NULL;
|
||||
|
||||
/* EP device access */
|
||||
val = (busno << CFG_ADDR_BUS_NUM_SHIFT) |
|
||||
(slot << CFG_ADDR_DEV_NUM_SHIFT) |
|
||||
(fn << CFG_ADDR_FUNC_NUM_SHIFT) |
|
||||
(where & CFG_ADDR_REG_NUM_MASK) |
|
||||
(1 & CFG_ADDR_CFG_TYPE_MASK);
|
||||
writel(val, pcie->base + CFG_ADDR_OFFSET);
|
||||
|
||||
return (pcie->base + CFG_DATA_OFFSET);
|
||||
iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_ADDR, val);
|
||||
offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_DATA);
|
||||
if (iproc_pcie_reg_is_invalid(offset))
|
||||
return NULL;
|
||||
else
|
||||
return (pcie->base + offset);
|
||||
}
|
||||
|
||||
static struct pci_ops iproc_pcie_ops = {
|
||||
|
@ -138,18 +237,29 @@ static void iproc_pcie_reset(struct iproc_pcie *pcie)
|
|||
{
|
||||
u32 val;
|
||||
|
||||
if (pcie->type == IPROC_PCIE_PAXC) {
|
||||
val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL);
|
||||
val &= ~PAXC_RESET_MASK;
|
||||
iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val);
|
||||
udelay(100);
|
||||
val |= PAXC_RESET_MASK;
|
||||
iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val);
|
||||
udelay(100);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Select perst_b signal as reset source. Put the device into reset,
|
||||
* and then bring it out of reset
|
||||
*/
|
||||
val = readl(pcie->base + CLK_CONTROL_OFFSET);
|
||||
val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL);
|
||||
val &= ~EP_PERST_SOURCE_SELECT & ~EP_MODE_SURVIVE_PERST &
|
||||
~RC_PCIE_RST_OUTPUT;
|
||||
writel(val, pcie->base + CLK_CONTROL_OFFSET);
|
||||
iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val);
|
||||
udelay(250);
|
||||
|
||||
val |= RC_PCIE_RST_OUTPUT;
|
||||
writel(val, pcie->base + CLK_CONTROL_OFFSET);
|
||||
iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val);
|
||||
msleep(100);
|
||||
}
|
||||
|
||||
|
@ -160,7 +270,14 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
|
|||
u16 pos, link_status;
|
||||
bool link_is_active = false;
|
||||
|
||||
val = readl(pcie->base + PCIE_LINK_STATUS_OFFSET);
|
||||
/*
|
||||
* PAXC connects to emulated endpoint devices directly and does not
|
||||
* have a Serdes. Therefore skip the link detection logic here.
|
||||
*/
|
||||
if (pcie->type == IPROC_PCIE_PAXC)
|
||||
return 0;
|
||||
|
||||
val = iproc_pcie_read_reg(pcie, IPROC_PCIE_LINK_STATUS);
|
||||
if (!(val & PCIE_PHYLINKUP) || !(val & PCIE_DL_ACTIVE)) {
|
||||
dev_err(pcie->dev, "PHY or data link is INACTIVE!\n");
|
||||
return -ENODEV;
|
||||
|
@ -221,7 +338,7 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
|
|||
|
||||
static void iproc_pcie_enable(struct iproc_pcie *pcie)
|
||||
{
|
||||
writel(SYS_RC_INTX_MASK, pcie->base + SYS_RC_INTX_EN);
|
||||
iproc_pcie_write_reg(pcie, IPROC_PCIE_INTX_EN, SYS_RC_INTX_MASK);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -245,7 +362,7 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
|
|||
|
||||
if (size > max_size) {
|
||||
dev_err(pcie->dev,
|
||||
"res size 0x%pap exceeds max supported size 0x%llx\n",
|
||||
"res size %pap exceeds max supported size 0x%llx\n",
|
||||
&size, max_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -272,11 +389,15 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
|
|||
axi_addr -= ob->axi_offset;
|
||||
|
||||
for (i = 0; i < MAX_NUM_OB_WINDOWS; i++) {
|
||||
writel(lower_32_bits(axi_addr) | OARR_VALID |
|
||||
(ob->set_oarr_size ? 1 : 0), pcie->base + OARR_LO(i));
|
||||
writel(upper_32_bits(axi_addr), pcie->base + OARR_HI(i));
|
||||
writel(lower_32_bits(pci_addr), pcie->base + OMAP_LO(i));
|
||||
writel(upper_32_bits(pci_addr), pcie->base + OMAP_HI(i));
|
||||
iproc_pcie_ob_write(pcie, IPROC_PCIE_OARR_LO, i,
|
||||
lower_32_bits(axi_addr) | OARR_VALID |
|
||||
(ob->set_oarr_size ? 1 : 0));
|
||||
iproc_pcie_ob_write(pcie, IPROC_PCIE_OARR_HI, i,
|
||||
upper_32_bits(axi_addr));
|
||||
iproc_pcie_ob_write(pcie, IPROC_PCIE_OMAP_LO, i,
|
||||
lower_32_bits(pci_addr));
|
||||
iproc_pcie_ob_write(pcie, IPROC_PCIE_OMAP_HI, i,
|
||||
upper_32_bits(pci_addr));
|
||||
|
||||
size -= ob->window_size;
|
||||
if (size == 0)
|
||||
|
@ -319,6 +440,26 @@ static int iproc_pcie_map_ranges(struct iproc_pcie *pcie,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int iproc_pcie_msi_enable(struct iproc_pcie *pcie)
|
||||
{
|
||||
struct device_node *msi_node;
|
||||
|
||||
msi_node = of_parse_phandle(pcie->dev->of_node, "msi-parent", 0);
|
||||
if (!msi_node)
|
||||
return -ENODEV;
|
||||
|
||||
/*
|
||||
* If another MSI controller is being used, the call below should fail
|
||||
* but that is okay
|
||||
*/
|
||||
return iproc_msi_init(pcie, msi_node);
|
||||
}
|
||||
|
||||
static void iproc_pcie_msi_disable(struct iproc_pcie *pcie)
|
||||
{
|
||||
iproc_msi_exit(pcie);
|
||||
}
|
||||
|
||||
int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
|
||||
{
|
||||
int ret;
|
||||
|
@ -340,6 +481,19 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
|
|||
goto err_exit_phy;
|
||||
}
|
||||
|
||||
switch (pcie->type) {
|
||||
case IPROC_PCIE_PAXB:
|
||||
pcie->reg_offsets = iproc_pcie_reg_paxb;
|
||||
break;
|
||||
case IPROC_PCIE_PAXC:
|
||||
pcie->reg_offsets = iproc_pcie_reg_paxc;
|
||||
break;
|
||||
default:
|
||||
dev_err(pcie->dev, "incompatible iProc PCIe interface\n");
|
||||
ret = -EINVAL;
|
||||
goto err_power_off_phy;
|
||||
}
|
||||
|
||||
iproc_pcie_reset(pcie);
|
||||
|
||||
if (pcie->need_ob_cfg) {
|
||||
|
@ -373,6 +527,10 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
|
|||
|
||||
iproc_pcie_enable(pcie);
|
||||
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI))
|
||||
if (iproc_pcie_msi_enable(pcie))
|
||||
dev_info(pcie->dev, "not using iProc MSI\n");
|
||||
|
||||
pci_scan_child_bus(bus);
|
||||
pci_assign_unassigned_bus_resources(bus);
|
||||
pci_fixup_irqs(pci_common_swizzle, pcie->map_irq);
|
||||
|
@ -397,6 +555,8 @@ int iproc_pcie_remove(struct iproc_pcie *pcie)
|
|||
pci_stop_root_bus(pcie->root_bus);
|
||||
pci_remove_root_bus(pcie->root_bus);
|
||||
|
||||
iproc_pcie_msi_disable(pcie);
|
||||
|
||||
phy_power_off(pcie->phy);
|
||||
phy_exit(pcie->phy);
|
||||
|
||||
|
|
|
@ -14,6 +14,20 @@
|
|||
#ifndef _PCIE_IPROC_H
|
||||
#define _PCIE_IPROC_H
|
||||
|
||||
/**
|
||||
* iProc PCIe interface type
|
||||
*
|
||||
* PAXB is the wrapper used in root complex that can be connected to an
|
||||
* external endpoint device.
|
||||
*
|
||||
* PAXC is the wrapper used in root complex dedicated for internal emulated
|
||||
* endpoint devices.
|
||||
*/
|
||||
enum iproc_pcie_type {
|
||||
IPROC_PCIE_PAXB = 0,
|
||||
IPROC_PCIE_PAXC,
|
||||
};
|
||||
|
||||
/**
|
||||
* iProc PCIe outbound mapping
|
||||
* @set_oarr_size: indicates the OARR size bit needs to be set
|
||||
|
@ -27,21 +41,30 @@ struct iproc_pcie_ob {
|
|||
resource_size_t window_size;
|
||||
};
|
||||
|
||||
struct iproc_msi;
|
||||
|
||||
/**
|
||||
* iProc PCIe device
|
||||
*
|
||||
* @dev: pointer to device data structure
|
||||
* @type: iProc PCIe interface type
|
||||
* @reg_offsets: register offsets
|
||||
* @base: PCIe host controller I/O register base
|
||||
* @base_addr: PCIe host controller register base physical address
|
||||
* @sysdata: Per PCI controller data (ARM-specific)
|
||||
* @root_bus: pointer to root bus
|
||||
* @phy: optional PHY device that controls the Serdes
|
||||
* @irqs: interrupt IDs
|
||||
* @map_irq: function callback to map interrupts
|
||||
* @need_ob_cfg: indidates SW needs to configure the outbound mapping window
|
||||
* @need_ob_cfg: indicates SW needs to configure the outbound mapping window
|
||||
* @ob: outbound mapping parameters
|
||||
* @msi: MSI data
|
||||
*/
|
||||
struct iproc_pcie {
|
||||
struct device *dev;
|
||||
enum iproc_pcie_type type;
|
||||
const u16 *reg_offsets;
|
||||
void __iomem *base;
|
||||
phys_addr_t base_addr;
|
||||
#ifdef CONFIG_ARM
|
||||
struct pci_sys_data sysdata;
|
||||
#endif
|
||||
|
@ -50,9 +73,24 @@ struct iproc_pcie {
|
|||
int (*map_irq)(const struct pci_dev *, u8, u8);
|
||||
bool need_ob_cfg;
|
||||
struct iproc_pcie_ob ob;
|
||||
struct iproc_msi *msi;
|
||||
};
|
||||
|
||||
int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res);
|
||||
int iproc_pcie_remove(struct iproc_pcie *pcie);
|
||||
|
||||
#ifdef CONFIG_PCIE_IPROC_MSI
|
||||
int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node);
|
||||
void iproc_msi_exit(struct iproc_pcie *pcie);
|
||||
#else
|
||||
static inline int iproc_msi_init(struct iproc_pcie *pcie,
|
||||
struct device_node *node)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
static inline void iproc_msi_exit(struct iproc_pcie *pcie)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _PCIE_IPROC_H */
|
||||
|
|
|
@ -108,8 +108,6 @@
|
|||
#define RCAR_PCI_MAX_RESOURCES 4
|
||||
#define MAX_NR_INBOUND_MAPS 6
|
||||
|
||||
static unsigned long global_io_offset;
|
||||
|
||||
struct rcar_msi {
|
||||
DECLARE_BITMAP(used, INT_PCI_MSI_NR);
|
||||
struct irq_domain *domain;
|
||||
|
@ -138,8 +136,7 @@ struct rcar_pcie {
|
|||
#endif
|
||||
struct device *dev;
|
||||
void __iomem *base;
|
||||
struct resource res[RCAR_PCI_MAX_RESOURCES];
|
||||
struct resource busn;
|
||||
struct list_head resources;
|
||||
int root_bus_nr;
|
||||
struct clk *clk;
|
||||
struct clk *bus_clk;
|
||||
|
@ -323,10 +320,9 @@ static struct pci_ops rcar_pcie_ops = {
|
|||
.write = rcar_pcie_write_conf,
|
||||
};
|
||||
|
||||
static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie)
|
||||
static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie,
|
||||
struct resource *res)
|
||||
{
|
||||
struct resource *res = &pcie->res[win];
|
||||
|
||||
/* Setup PCIe address space mappings for each resource */
|
||||
resource_size_t size;
|
||||
resource_size_t res_start;
|
||||
|
@ -359,31 +355,33 @@ static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie)
|
|||
rcar_pci_write_reg(pcie, mask, PCIEPTCTLR(win));
|
||||
}
|
||||
|
||||
static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pcie)
|
||||
static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pci)
|
||||
{
|
||||
struct resource *res;
|
||||
int i;
|
||||
|
||||
pcie->root_bus_nr = pcie->busn.start;
|
||||
struct resource_entry *win;
|
||||
int i = 0;
|
||||
|
||||
/* Setup PCI resources */
|
||||
for (i = 0; i < RCAR_PCI_MAX_RESOURCES; i++) {
|
||||
resource_list_for_each_entry(win, &pci->resources) {
|
||||
struct resource *res = win->res;
|
||||
|
||||
res = &pcie->res[i];
|
||||
if (!res->flags)
|
||||
continue;
|
||||
|
||||
rcar_pcie_setup_window(i, pcie);
|
||||
|
||||
if (res->flags & IORESOURCE_IO) {
|
||||
phys_addr_t io_start = pci_pio_to_address(res->start);
|
||||
pci_ioremap_io(global_io_offset, io_start);
|
||||
global_io_offset += SZ_64K;
|
||||
switch (resource_type(res)) {
|
||||
case IORESOURCE_IO:
|
||||
case IORESOURCE_MEM:
|
||||
rcar_pcie_setup_window(i, pci, res);
|
||||
i++;
|
||||
break;
|
||||
case IORESOURCE_BUS:
|
||||
pci->root_bus_nr = res->start;
|
||||
break;
|
||||
default:
|
||||
continue;
|
||||
}
|
||||
|
||||
pci_add_resource(resource, res);
|
||||
}
|
||||
pci_add_resource(resource, &pcie->busn);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@ -917,20 +915,71 @@ static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
|
|||
|
||||
static const struct of_device_id rcar_pcie_of_match[] = {
|
||||
{ .compatible = "renesas,pcie-r8a7779", .data = rcar_pcie_hw_init_h1 },
|
||||
{ .compatible = "renesas,pcie-rcar-gen2", .data = rcar_pcie_hw_init },
|
||||
{ .compatible = "renesas,pcie-r8a7790", .data = rcar_pcie_hw_init },
|
||||
{ .compatible = "renesas,pcie-r8a7791", .data = rcar_pcie_hw_init },
|
||||
{ .compatible = "renesas,pcie-r8a7795", .data = rcar_pcie_hw_init },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, rcar_pcie_of_match);
|
||||
|
||||
static void rcar_pcie_release_of_pci_ranges(struct rcar_pcie *pci)
|
||||
{
|
||||
pci_free_resource_list(&pci->resources);
|
||||
}
|
||||
|
||||
static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci)
|
||||
{
|
||||
int err;
|
||||
struct device *dev = pci->dev;
|
||||
struct device_node *np = dev->of_node;
|
||||
resource_size_t iobase;
|
||||
struct resource_entry *win;
|
||||
|
||||
err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources, &iobase);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
resource_list_for_each_entry(win, &pci->resources) {
|
||||
struct resource *parent, *res = win->res;
|
||||
|
||||
switch (resource_type(res)) {
|
||||
case IORESOURCE_IO:
|
||||
parent = &ioport_resource;
|
||||
err = pci_remap_iospace(res, iobase);
|
||||
if (err) {
|
||||
dev_warn(dev, "error %d: failed to map resource %pR\n",
|
||||
err, res);
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
case IORESOURCE_MEM:
|
||||
parent = &iomem_resource;
|
||||
break;
|
||||
|
||||
case IORESOURCE_BUS:
|
||||
default:
|
||||
continue;
|
||||
}
|
||||
|
||||
err = devm_request_resource(dev, parent, res);
|
||||
if (err)
|
||||
goto out_release_res;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_release_res:
|
||||
rcar_pcie_release_of_pci_ranges(pci);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int rcar_pcie_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct rcar_pcie *pcie;
|
||||
unsigned int data;
|
||||
struct of_pci_range range;
|
||||
struct of_pci_range_parser parser;
|
||||
const struct of_device_id *of_id;
|
||||
int err, win = 0;
|
||||
int err;
|
||||
int (*hw_init_fn)(struct rcar_pcie *);
|
||||
|
||||
pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
|
||||
|
@ -940,16 +989,9 @@ static int rcar_pcie_probe(struct platform_device *pdev)
|
|||
pcie->dev = &pdev->dev;
|
||||
platform_set_drvdata(pdev, pcie);
|
||||
|
||||
/* Get the bus range */
|
||||
if (of_pci_parse_bus_range(pdev->dev.of_node, &pcie->busn)) {
|
||||
dev_err(&pdev->dev, "failed to parse bus-range property\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
INIT_LIST_HEAD(&pcie->resources);
|
||||
|
||||
if (of_pci_range_parser_init(&parser, pdev->dev.of_node)) {
|
||||
dev_err(&pdev->dev, "missing ranges property\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
rcar_pcie_parse_request_of_pci_ranges(pcie);
|
||||
|
||||
err = rcar_pcie_get_resources(pdev, pcie);
|
||||
if (err < 0) {
|
||||
|
@ -957,16 +999,6 @@ static int rcar_pcie_probe(struct platform_device *pdev)
|
|||
return err;
|
||||
}
|
||||
|
||||
for_each_of_pci_range(&parser, &range) {
|
||||
err = of_pci_range_to_resource(&range, pdev->dev.of_node,
|
||||
&pcie->res[win++]);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (win > RCAR_PCI_MAX_RESOURCES)
|
||||
break;
|
||||
}
|
||||
|
||||
err = rcar_pcie_parse_map_dma_ranges(pcie, pdev->dev.of_node);
|
||||
if (err)
|
||||
return err;
|
||||
|
|
Loading…
Reference in New Issue