Merge remote-tracking branch 'lorenzo/pci/cadence' into next

* lorenzo/pci/cadence:
  PCI: cadence: Add EndPoint Controller driver for Cadence PCIe controller
  dt-bindings: PCI: cadence: Add DT bindings for Cadence PCIe endpoint controller
  PCI: endpoint: Fix EPF device name to support multi-function devices
  PCI: endpoint: Add the function number as argument to EPC ops
  PCI: cadence: Add host driver for Cadence PCIe controller
  dt-bindings: PCI: cadence: Add DT bindings for Cadence PCIe host controller
  PCI: Add vendor ID for Cadence
  PCI: Add generic function to probe PCI host controllers
  PCI: generic: fix missing call of pci_free_resource_list()
  PCI: OF: Add generic function to parse and allocate PCI resources
  PCI: Regroup all PCI related entries into drivers/pci/Makefile

Conflicts:
	drivers/pci/of.c
	include/linux/pci.h
This commit is contained in:
Bjorn Helgaas 2018-01-31 10:21:33 -06:00 committed by Bjorn Helgaas
commit c7f75aecb2
24 changed files with 1695 additions and 149 deletions

View File

@ -0,0 +1,22 @@
* Cadence PCIe endpoint controller
Required properties:
- compatible: Should contain "cdns,cdns-pcie-ep" to identify the IP used.
- reg: Should contain the controller register base address and AXI interface
region base address respectively.
- reg-names: Must be "reg" and "mem" respectively.
- cdns,max-outbound-regions: Set to maximum number of outbound regions
Optional properties:
- max-functions: Maximum number of functions that can be configured (default 1).
Example:
pcie@fc000000 {
compatible = "cdns,cdns-pcie-ep";
reg = <0x0 0xfc000000 0x0 0x01000000>,
<0x0 0x80000000 0x0 0x40000000>;
reg-names = "reg", "mem";
cdns,max-outbound-regions = <16>;
max-functions = /bits/ 8 <8>;
};

View File

@ -0,0 +1,60 @@
* Cadence PCIe host controller
This PCIe controller inherits the base properties defined in
host-generic-pci.txt.
Required properties:
- compatible: Should contain "cdns,cdns-pcie-host" to identify the IP used.
- reg: Should contain the controller register base address, PCIe configuration
window base address, and AXI interface region base address respectively.
- reg-names: Must be "reg", "cfg" and "mem" respectively.
- #address-cells: Set to <3>
- #size-cells: Set to <2>
- device_type: Set to "pci"
- ranges: Ranges for the PCI memory and I/O regions
- #interrupt-cells: Set to <1>
- interrupt-map-mask and interrupt-map: Standard PCI properties to define the
mapping of the PCIe interface to interrupt numbers.
Optional properties:
- cdns,max-outbound-regions: Set to maximum number of outbound regions
(default 32)
- cdns,no-bar-match-nbits: Set into the no BAR match register to configure the
number of least significant bits kept during inbound (PCIe -> AXI) address
translations (default 32)
- vendor-id: The PCI vendor ID (16 bits, default is design dependent)
- device-id: The PCI device ID (16 bits, default is design dependent)
Example:
pcie@fb000000 {
compatible = "cdns,cdns-pcie-host";
device_type = "pci";
#address-cells = <3>;
#size-cells = <2>;
bus-range = <0x0 0xff>;
linux,pci-domain = <0>;
cdns,max-outbound-regions = <16>;
cdns,no-bar-match-nbits = <32>;
vendor-id = /bits/ 16 <0x17cd>;
device-id = /bits/ 16 <0x0200>;
reg = <0x0 0xfb000000 0x0 0x01000000>,
<0x0 0x41000000 0x0 0x00001000>,
<0x0 0x40000000 0x0 0x04000000>;
reg-names = "reg", "cfg", "mem";
ranges = <0x02000000 0x0 0x42000000 0x0 0x42000000 0x0 0x1000000>,
<0x01000000 0x0 0x43000000 0x0 0x43000000 0x0 0x0010000>;
#interrupt-cells = <0x1>;
interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0x0 0x0 14 0x1
0x0 0x0 0x0 0x2 &gic 0x0 0x0 0x0 15 0x1
0x0 0x0 0x0 0x3 &gic 0x0 0x0 0x0 16 0x1
0x0 0x0 0x0 0x4 &gic 0x0 0x0 0x0 17 0x1>;
interrupt-map-mask = <0x0 0x0 0x0 0x7>;
msi-parent = <&its_pci>;
};

View File

@ -10402,6 +10402,13 @@ S: Maintained
F: Documentation/devicetree/bindings/pci/pci-armada8k.txt
F: drivers/pci/dwc/pcie-armada8k.c
PCI DRIVER FOR CADENCE PCIE IP
M: Alan Douglas <adouglas@cadence.com>
L: linux-pci@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/pci/cdns,*.txt
F: drivers/pci/cadence/pcie-cadence*
PCI DRIVER FOR FREESCALE LAYERSCAPE
M: Minghuan Lian <minghuan.Lian@freescale.com>
M: Mingkai Hu <mingkai.hu@freescale.com>

View File

@ -16,10 +16,7 @@ obj-$(CONFIG_PINCTRL) += pinctrl/
obj-$(CONFIG_GPIOLIB) += gpio/
obj-y += pwm/
obj-$(CONFIG_PCI) += pci/
obj-$(CONFIG_PCI_ENDPOINT) += pci/endpoint/
# PCI dwc controller drivers
obj-y += pci/dwc/
obj-y += pci/
obj-$(CONFIG_PARISC) += parisc/
obj-$(CONFIG_RAPIDIO) += rapidio/

View File

@ -125,6 +125,7 @@ config PCI_PASID
config PCI_LABEL
def_bool y if (DMI || ACPI)
depends on PCI
select NLS
config PCI_HYPERV
@ -135,6 +136,7 @@ config PCI_HYPERV
PCI devices from a PCI backend to support PCI driver domains.
source "drivers/pci/hotplug/Kconfig"
source "drivers/pci/cadence/Kconfig"
source "drivers/pci/dwc/Kconfig"
source "drivers/pci/host/Kconfig"
source "drivers/pci/endpoint/Kconfig"

View File

@ -3,12 +3,15 @@
# Makefile for the PCI bus specific drivers.
#
obj-y += access.o bus.o probe.o host-bridge.o remove.o pci.o \
obj-$(CONFIG_PCI) += access.o bus.o probe.o host-bridge.o remove.o pci.o \
pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \
irq.o vpd.o setup-bus.o vc.o mmap.o setup-irq.o
ifdef CONFIG_PCI
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_SYSFS) += slot.o
obj-$(CONFIG_OF) += of.o
endif
obj-$(CONFIG_PCI_QUIRKS) += quirks.o
@ -44,10 +47,15 @@ obj-$(CONFIG_PCI_ECAM) += ecam.o
obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
obj-$(CONFIG_OF) += of.o
ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG
# PCI host controller drivers
obj-y += host/
obj-y += switch/
obj-$(CONFIG_PCI_ENDPOINT) += endpoint/
# Endpoint library must be initialized before its users
obj-$(CONFIG_PCIE_CADENCE) += cadence/
# pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW
obj-y += dwc/

View File

@ -0,0 +1,27 @@
menu "Cadence PCIe controllers support"
config PCIE_CADENCE
bool
config PCIE_CADENCE_HOST
bool "Cadence PCIe host controller"
depends on OF
depends on PCI
select IRQ_DOMAIN
select PCIE_CADENCE
help
Say Y here if you want to support the Cadence PCIe controller in host
mode. This PCIe controller may be embedded into many different vendors
SoCs.
config PCIE_CADENCE_EP
bool "Cadence PCIe endpoint controller"
depends on OF
depends on PCI_ENDPOINT
select PCIE_CADENCE
help
Say Y here if you want to support the Cadence PCIe controller in
endpoint mode. This PCIe controller may be embedded into many
different vendors SoCs.
endmenu

View File

@ -0,0 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PCIE_CADENCE) += pcie-cadence.o
obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host.o
obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep.o

View File

@ -0,0 +1,542 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017 Cadence
// Cadence PCIe endpoint controller driver.
// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/pci-epc.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/sizes.h>
#include "pcie-cadence.h"
#define CDNS_PCIE_EP_MIN_APERTURE 128 /* 128 bytes */
#define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1
#define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x3
/**
* struct cdns_pcie_ep - private data for this PCIe endpoint controller driver
* @pcie: Cadence PCIe controller
* @max_regions: maximum number of regions supported by hardware
* @ob_region_map: bitmask of mapped outbound regions
* @ob_addr: base addresses in the AXI bus where the outbound regions start
* @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
* dedicated outbound regions is mapped.
* @irq_cpu_addr: base address in the CPU space where a write access triggers
* the sending of a memory write (MSI) / normal message (legacy
* IRQ) TLP through the PCIe bus.
* @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
* dedicated outbound region.
* @irq_pci_fn: the latest PCI function that has updated the mapping of
* the MSI/legacy IRQ dedicated outbound region.
* @irq_pending: bitmask of asserted legacy IRQs.
*/
struct cdns_pcie_ep {
struct cdns_pcie pcie;
u32 max_regions;
unsigned long ob_region_map;
phys_addr_t *ob_addr;
phys_addr_t irq_phys_addr;
void __iomem *irq_cpu_addr;
u64 irq_pci_addr;
u8 irq_pci_fn;
u8 irq_pending;
};
static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
struct pci_epf_header *hdr)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid);
cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid);
cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CLASS_PROG, hdr->progif_code);
cdns_pcie_ep_fn_writew(pcie, fn, PCI_CLASS_DEVICE,
hdr->subclass_code | hdr->baseclass_code << 8);
cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CACHE_LINE_SIZE,
hdr->cache_line_size);
cdns_pcie_ep_fn_writew(pcie, fn, PCI_SUBSYSTEM_ID, hdr->subsys_id);
cdns_pcie_ep_fn_writeb(pcie, fn, PCI_INTERRUPT_PIN, hdr->interrupt_pin);
/*
* Vendor ID can only be modified from function 0, all other functions
* use the same vendor ID as function 0.
*/
if (fn == 0) {
/* Update the vendor IDs. */
u32 id = CDNS_PCIE_LM_ID_VENDOR(hdr->vendorid) |
CDNS_PCIE_LM_ID_SUBSYS(hdr->subsys_vendor_id);
cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id);
}
return 0;
}
static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, enum pci_barno bar,
dma_addr_t bar_phys, size_t size, int flags)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
u32 addr0, addr1, reg, cfg, b, aperture, ctrl;
u64 sz;
/* BAR size is 2^(aperture + 7) */
sz = max_t(size_t, size, CDNS_PCIE_EP_MIN_APERTURE);
/*
* roundup_pow_of_two() returns an unsigned long, which is not suited
* for 64bit values.
*/
sz = 1ULL << fls64(sz - 1);
aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */
if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS;
} else {
bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
bool is_64bits = sz > SZ_2G;
if (is_64bits && (bar & 1))
return -EINVAL;
if (is_64bits && is_prefetch)
ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
else if (is_prefetch)
ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS;
else if (is_64bits)
ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS;
else
ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS;
}
addr0 = lower_32_bits(bar_phys);
addr1 = upper_32_bits(bar_phys);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar),
addr0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar),
addr1);
if (bar < BAR_4) {
reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
b = bar;
} else {
reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
b = bar - BAR_4;
}
cfg = cdns_pcie_readl(pcie, reg);
cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
cdns_pcie_writel(pcie, reg, cfg);
return 0;
}
static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
enum pci_barno bar)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
u32 reg, cfg, b, ctrl;
if (bar < BAR_4) {
reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
b = bar;
} else {
reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
b = bar - BAR_4;
}
ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
cfg = cdns_pcie_readl(pcie, reg);
cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
cdns_pcie_writel(pcie, reg, cfg);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0);
}
static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr,
u64 pci_addr, size_t size)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
u32 r;
r = find_first_zero_bit(&ep->ob_region_map,
sizeof(ep->ob_region_map) * BITS_PER_LONG);
if (r >= ep->max_regions - 1) {
dev_err(&epc->dev, "no free outbound region\n");
return -EINVAL;
}
cdns_pcie_set_outbound_region(pcie, fn, r, false, addr, pci_addr, size);
set_bit(r, &ep->ob_region_map);
ep->ob_addr[r] = addr;
return 0;
}
static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
phys_addr_t addr)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
u32 r;
for (r = 0; r < ep->max_regions - 1; r++)
if (ep->ob_addr[r] == addr)
break;
if (r == ep->max_regions - 1)
return;
cdns_pcie_reset_outbound_region(pcie, r);
ep->ob_addr[r] = 0;
clear_bit(r, &ep->ob_region_map);
}
static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 mmc)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
u16 flags;
/*
* Set the Multiple Message Capable bitfield into the Message Control
* register.
*/
flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
flags = (flags & ~PCI_MSI_FLAGS_QMASK) | (mmc << 1);
flags |= PCI_MSI_FLAGS_64BIT;
flags &= ~PCI_MSI_FLAGS_MASKBIT;
cdns_pcie_ep_fn_writew(pcie, fn, cap + PCI_MSI_FLAGS, flags);
return 0;
}
static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
u16 flags, mmc, mme;
/* Validate that the MSI feature is actually enabled. */
flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
if (!(flags & PCI_MSI_FLAGS_ENABLE))
return -EINVAL;
/*
* Get the Multiple Message Enable bitfield from the Message Control
* register.
*/
mmc = (flags & PCI_MSI_FLAGS_QMASK) >> 1;
mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
return mme;
}
static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,
u8 intx, bool is_asserted)
{
struct cdns_pcie *pcie = &ep->pcie;
u32 r = ep->max_regions - 1;
u32 offset;
u16 status;
u8 msg_code;
intx &= 3;
/* Set the outbound region if needed. */
if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY ||
ep->irq_pci_fn != fn)) {
/* Last region was reserved for IRQ writes. */
cdns_pcie_set_outbound_region_for_normal_msg(pcie, fn, r,
ep->irq_phys_addr);
ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY;
ep->irq_pci_fn = fn;
}
if (is_asserted) {
ep->irq_pending |= BIT(intx);
msg_code = MSG_CODE_ASSERT_INTA + intx;
} else {
ep->irq_pending &= ~BIT(intx);
msg_code = MSG_CODE_DEASSERT_INTA + intx;
}
status = cdns_pcie_ep_fn_readw(pcie, fn, PCI_STATUS);
if (((status & PCI_STATUS_INTERRUPT) != 0) ^ (ep->irq_pending != 0)) {
status ^= PCI_STATUS_INTERRUPT;
cdns_pcie_ep_fn_writew(pcie, fn, PCI_STATUS, status);
}
offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) |
CDNS_PCIE_NORMAL_MSG_CODE(msg_code) |
CDNS_PCIE_MSG_NO_DATA;
writel(0, ep->irq_cpu_addr + offset);
}
static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 intx)
{
u16 cmd;
cmd = cdns_pcie_ep_fn_readw(&ep->pcie, fn, PCI_COMMAND);
if (cmd & PCI_COMMAND_INTX_DISABLE)
return -EINVAL;
cdns_pcie_ep_assert_intx(ep, fn, intx, true);
/*
* The mdelay() value was taken from dra7xx_pcie_raise_legacy_irq()
* from drivers/pci/dwc/pci-dra7xx.c
*/
mdelay(1);
cdns_pcie_ep_assert_intx(ep, fn, intx, false);
return 0;
}
static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
u8 interrupt_num)
{
struct cdns_pcie *pcie = &ep->pcie;
u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
u16 flags, mme, data, data_mask;
u8 msi_count;
u64 pci_addr, pci_addr_mask = 0xff;
/* Check whether the MSI feature has been enabled by the PCI host. */
flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
if (!(flags & PCI_MSI_FLAGS_ENABLE))
return -EINVAL;
/* Get the number of enabled MSIs */
mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
msi_count = 1 << mme;
if (!interrupt_num || interrupt_num > msi_count)
return -EINVAL;
/* Compute the data value to be written. */
data_mask = msi_count - 1;
data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64);
data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask);
/* Get the PCI address where to write the data into. */
pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI);
pci_addr <<= 32;
pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO);
pci_addr &= GENMASK_ULL(63, 2);
/* Set the outbound region if needed. */
if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) ||
ep->irq_pci_fn != fn)) {
/* Last region was reserved for IRQ writes. */
cdns_pcie_set_outbound_region(pcie, fn, ep->max_regions - 1,
false,
ep->irq_phys_addr,
pci_addr & ~pci_addr_mask,
pci_addr_mask + 1);
ep->irq_pci_addr = (pci_addr & ~pci_addr_mask);
ep->irq_pci_fn = fn;
}
writew(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask));
return 0;
}
static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
enum pci_epc_irq_type type, u8 interrupt_num)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
switch (type) {
case PCI_EPC_IRQ_LEGACY:
return cdns_pcie_ep_send_legacy_irq(ep, fn, 0);
case PCI_EPC_IRQ_MSI:
return cdns_pcie_ep_send_msi_irq(ep, fn, interrupt_num);
default:
break;
}
return -EINVAL;
}
static int cdns_pcie_ep_start(struct pci_epc *epc)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
struct pci_epf *epf;
u32 cfg;
/*
* BIT(0) is hardwired to 1, hence function 0 is always enabled
* and can't be disabled anyway.
*/
cfg = BIT(0);
list_for_each_entry(epf, &epc->pci_epf, list)
cfg |= BIT(epf->func_no);
cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, cfg);
/*
* The PCIe links are automatically established by the controller
* once for all at powerup: the software can neither start nor stop
* those links later at runtime.
*
* Then we only have to notify the EP core that our links are already
* established. However we don't call directly pci_epc_linkup() because
* we've already locked the epc->lock.
*/
list_for_each_entry(epf, &epc->pci_epf, list)
pci_epf_linkup(epf);
return 0;
}
static const struct pci_epc_ops cdns_pcie_epc_ops = {
.write_header = cdns_pcie_ep_write_header,
.set_bar = cdns_pcie_ep_set_bar,
.clear_bar = cdns_pcie_ep_clear_bar,
.map_addr = cdns_pcie_ep_map_addr,
.unmap_addr = cdns_pcie_ep_unmap_addr,
.set_msi = cdns_pcie_ep_set_msi,
.get_msi = cdns_pcie_ep_get_msi,
.raise_irq = cdns_pcie_ep_raise_irq,
.start = cdns_pcie_ep_start,
};
static const struct of_device_id cdns_pcie_ep_of_match[] = {
{ .compatible = "cdns,cdns-pcie-ep" },
{ },
};
static int cdns_pcie_ep_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct cdns_pcie_ep *ep;
struct cdns_pcie *pcie;
struct pci_epc *epc;
struct resource *res;
int ret;
ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
if (!ep)
return -ENOMEM;
pcie = &ep->pcie;
pcie->is_rc = false;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg");
pcie->reg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(pcie->reg_base)) {
dev_err(dev, "missing \"reg\"\n");
return PTR_ERR(pcie->reg_base);
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem");
if (!res) {
dev_err(dev, "missing \"mem\"\n");
return -EINVAL;
}
pcie->mem_res = res;
ret = of_property_read_u32(np, "cdns,max-outbound-regions",
&ep->max_regions);
if (ret < 0) {
dev_err(dev, "missing \"cdns,max-outbound-regions\"\n");
return ret;
}
ep->ob_addr = devm_kzalloc(dev, ep->max_regions * sizeof(*ep->ob_addr),
GFP_KERNEL);
if (!ep->ob_addr)
return -ENOMEM;
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "pm_runtime_get_sync() failed\n");
goto err_get_sync;
}
/* Disable all but function 0 (anyway BIT(0) is hardwired to 1). */
cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, BIT(0));
epc = devm_pci_epc_create(dev, &cdns_pcie_epc_ops);
if (IS_ERR(epc)) {
dev_err(dev, "failed to create epc device\n");
ret = PTR_ERR(epc);
goto err_init;
}
epc_set_drvdata(epc, ep);
if (of_property_read_u8(np, "max-functions", &epc->max_functions) < 0)
epc->max_functions = 1;
ret = pci_epc_mem_init(epc, pcie->mem_res->start,
resource_size(pcie->mem_res));
if (ret < 0) {
dev_err(dev, "failed to initialize the memory space\n");
goto err_init;
}
ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr,
SZ_128K);
if (!ep->irq_cpu_addr) {
dev_err(dev, "failed to reserve memory space for MSI\n");
ret = -ENOMEM;
goto free_epc_mem;
}
ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE;
return 0;
free_epc_mem:
pci_epc_mem_exit(epc);
err_init:
pm_runtime_put_sync(dev);
err_get_sync:
pm_runtime_disable(dev);
return ret;
}
static void cdns_pcie_ep_shutdown(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int ret;
ret = pm_runtime_put_sync(dev);
if (ret < 0)
dev_dbg(dev, "pm_runtime_put_sync failed\n");
pm_runtime_disable(dev);
/* The PCIe controller can't be disabled. */
}
static struct platform_driver cdns_pcie_ep_driver = {
.driver = {
.name = "cdns-pcie-ep",
.of_match_table = cdns_pcie_ep_of_match,
},
.probe = cdns_pcie_ep_probe,
.shutdown = cdns_pcie_ep_shutdown,
};
builtin_platform_driver(cdns_pcie_ep_driver);

View File

@ -0,0 +1,336 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017 Cadence
// Cadence PCIe host controller driver.
// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
#include <linux/kernel.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include "pcie-cadence.h"
/**
* struct cdns_pcie_rc - private data for this PCIe Root Complex driver
* @pcie: Cadence PCIe controller
* @dev: pointer to PCIe device
* @cfg_res: start/end offsets in the physical system memory to map PCI
* configuration space accesses
* @bus_range: first/last buses behind the PCIe host controller
* @cfg_base: IO mapped window to access the PCI configuration space of a
* single function at a time
* @max_regions: maximum number of regions supported by the hardware
* @no_bar_nbits: Number of bits to keep for inbound (PCIe -> CPU) address
* translation (nbits sets into the "no BAR match" register)
* @vendor_id: PCI vendor ID
* @device_id: PCI device ID
*/
struct cdns_pcie_rc {
struct cdns_pcie pcie;
struct device *dev;
struct resource *cfg_res;
struct resource *bus_range;
void __iomem *cfg_base;
u32 max_regions;
u32 no_bar_nbits;
u16 vendor_id;
u16 device_id;
};
static void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
int where)
{
struct pci_host_bridge *bridge = pci_find_host_bridge(bus);
struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge);
struct cdns_pcie *pcie = &rc->pcie;
unsigned int busn = bus->number;
u32 addr0, desc0;
if (busn == rc->bus_range->start) {
/*
* Only the root port (devfn == 0) is connected to this bus.
* All other PCI devices are behind some bridge hence on another
* bus.
*/
if (devfn)
return NULL;
return pcie->reg_base + (where & 0xfff);
}
/* Update Output registers for AXI region 0. */
addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(12) |
CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) |
CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(busn);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(0), addr0);
/* Configuration Type 0 or Type 1 access. */
desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
/*
* The bus number was already set once for all in desc1 by
* cdns_pcie_host_init_address_translation().
*/
if (busn == rc->bus_range->start + 1)
desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0;
else
desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1;
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(0), desc0);
return rc->cfg_base + (where & 0xfff);
}
static struct pci_ops cdns_pcie_host_ops = {
.map_bus = cdns_pci_map_bus,
.read = pci_generic_config_read,
.write = pci_generic_config_write,
};
static const struct of_device_id cdns_pcie_host_of_match[] = {
{ .compatible = "cdns,cdns-pcie-host" },
{ },
};
static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
{
struct cdns_pcie *pcie = &rc->pcie;
u32 value, ctrl;
/*
* Set the root complex BAR configuration register:
* - disable both BAR0 and BAR1.
* - enable Prefetchable Memory Base and Limit registers in type 1
* config space (64 bits).
* - enable IO Base and Limit registers in type 1 config
* space (32 bits).
*/
ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
value = CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) |
CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(ctrl) |
CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE |
CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS |
CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE |
CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS;
cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
/* Set root port configuration space */
if (rc->vendor_id != 0xffff)
cdns_pcie_rp_writew(pcie, PCI_VENDOR_ID, rc->vendor_id);
if (rc->device_id != 0xffff)
cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id);
cdns_pcie_rp_writeb(pcie, PCI_CLASS_REVISION, 0);
cdns_pcie_rp_writeb(pcie, PCI_CLASS_PROG, 0);
cdns_pcie_rp_writew(pcie, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
return 0;
}
static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
{
struct cdns_pcie *pcie = &rc->pcie;
struct resource *cfg_res = rc->cfg_res;
struct resource *mem_res = pcie->mem_res;
struct resource *bus_range = rc->bus_range;
struct device *dev = rc->dev;
struct device_node *np = dev->of_node;
struct of_pci_range_parser parser;
struct of_pci_range range;
u32 addr0, addr1, desc1;
u64 cpu_addr;
int r, err;
/*
* Reserve region 0 for PCI configure space accesses:
* OB_REGION_PCI_ADDR0 and OB_REGION_DESC0 are updated dynamically by
* cdns_pci_map_bus(), other region registers are set here once for all.
*/
addr1 = 0; /* Should be programmed to zero. */
desc1 = CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus_range->start);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(0), addr1);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(0), desc1);
cpu_addr = cfg_res->start - mem_res->start;
addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(12) |
(lower_32_bits(cpu_addr) & GENMASK(31, 8));
addr1 = upper_32_bits(cpu_addr);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(0), addr0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(0), addr1);
err = of_pci_range_parser_init(&parser, np);
if (err)
return err;
r = 1;
for_each_of_pci_range(&parser, &range) {
bool is_io;
if (r >= rc->max_regions)
break;
if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM)
is_io = false;
else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO)
is_io = true;
else
continue;
cdns_pcie_set_outbound_region(pcie, 0, r, is_io,
range.cpu_addr,
range.pci_addr,
range.size);
r++;
}
/*
* Set Root Port no BAR match Inbound Translation registers:
* needed for MSI and DMA.
* Root Port BAR0 and BAR1 are disabled, hence no need to set their
* inbound translation registers.
*/
addr0 = CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(rc->no_bar_nbits);
addr1 = 0;
cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(RP_NO_BAR), addr0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(RP_NO_BAR), addr1);
return 0;
}
static int cdns_pcie_host_init(struct device *dev,
struct list_head *resources,
struct cdns_pcie_rc *rc)
{
struct resource *bus_range = NULL;
int err;
/* Parse our PCI ranges and request their resources */
err = pci_parse_request_of_pci_ranges(dev, resources, &bus_range);
if (err)
return err;
rc->bus_range = bus_range;
rc->pcie.bus = bus_range->start;
err = cdns_pcie_host_init_root_port(rc);
if (err)
goto err_out;
err = cdns_pcie_host_init_address_translation(rc);
if (err)
goto err_out;
return 0;
err_out:
pci_free_resource_list(resources);
return err;
}
static int cdns_pcie_host_probe(struct platform_device *pdev)
{
const char *type;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct pci_host_bridge *bridge;
struct list_head resources;
struct cdns_pcie_rc *rc;
struct cdns_pcie *pcie;
struct resource *res;
int ret;
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
if (!bridge)
return -ENOMEM;
rc = pci_host_bridge_priv(bridge);
rc->dev = dev;
pcie = &rc->pcie;
pcie->is_rc = true;
rc->max_regions = 32;
of_property_read_u32(np, "cdns,max-outbound-regions", &rc->max_regions);
rc->no_bar_nbits = 32;
of_property_read_u32(np, "cdns,no-bar-match-nbits", &rc->no_bar_nbits);
rc->vendor_id = 0xffff;
of_property_read_u16(np, "vendor-id", &rc->vendor_id);
rc->device_id = 0xffff;
of_property_read_u16(np, "device-id", &rc->device_id);
type = of_get_property(np, "device_type", NULL);
if (!type || strcmp(type, "pci")) {
dev_err(dev, "invalid \"device_type\" %s\n", type);
return -EINVAL;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg");
pcie->reg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(pcie->reg_base)) {
dev_err(dev, "missing \"reg\"\n");
return PTR_ERR(pcie->reg_base);
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
rc->cfg_base = devm_pci_remap_cfg_resource(dev, res);
if (IS_ERR(rc->cfg_base)) {
dev_err(dev, "missing \"cfg\"\n");
return PTR_ERR(rc->cfg_base);
}
rc->cfg_res = res;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem");
if (!res) {
dev_err(dev, "missing \"mem\"\n");
return -EINVAL;
}
pcie->mem_res = res;
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "pm_runtime_get_sync() failed\n");
goto err_get_sync;
}
ret = cdns_pcie_host_init(dev, &resources, rc);
if (ret)
goto err_init;
list_splice_init(&resources, &bridge->windows);
bridge->dev.parent = dev;
bridge->busnr = pcie->bus;
bridge->ops = &cdns_pcie_host_ops;
bridge->map_irq = of_irq_parse_and_map_pci;
bridge->swizzle_irq = pci_common_swizzle;
ret = pci_host_probe(bridge);
if (ret < 0)
goto err_host_probe;
return 0;
err_host_probe:
pci_free_resource_list(&resources);
err_init:
pm_runtime_put_sync(dev);
err_get_sync:
pm_runtime_disable(dev);
return ret;
}
static struct platform_driver cdns_pcie_host_driver = {
.driver = {
.name = "cdns-pcie-host",
.of_match_table = cdns_pcie_host_of_match,
},
.probe = cdns_pcie_host_probe,
};
builtin_platform_driver(cdns_pcie_host_driver);

View File

@ -0,0 +1,126 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017 Cadence
// Cadence PCIe controller driver.
// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
#include <linux/kernel.h>
#include "pcie-cadence.h"
void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn,
u32 r, bool is_io,
u64 cpu_addr, u64 pci_addr, size_t size)
{
/*
* roundup_pow_of_two() returns an unsigned long, which is not suited
* for 64bit values.
*/
u64 sz = 1ULL << fls64(size - 1);
int nbits = ilog2(sz);
u32 addr0, addr1, desc0, desc1;
if (nbits < 8)
nbits = 8;
/* Set the PCI address */
addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) |
(lower_32_bits(pci_addr) & GENMASK(31, 8));
addr1 = upper_32_bits(pci_addr);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), addr0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), addr1);
/* Set the PCIe header descriptor */
if (is_io)
desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO;
else
desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM;
desc1 = 0;
/*
* Whatever Bit [23] is set or not inside DESC0 register of the outbound
* PCIe descriptor, the PCI function number must be set into
* Bits [26:24] of DESC0 anyway.
*
* In Root Complex mode, the function number is always 0 but in Endpoint
* mode, the PCIe controller may support more than one function. This
* function number needs to be set properly into the outbound PCIe
* descriptor.
*
* Besides, setting Bit [23] is mandatory when in Root Complex mode:
* then the driver must provide the bus, resp. device, number in
* Bits [7:0] of DESC1, resp. Bits[31:27] of DESC0. Like the function
* number, the device number is always 0 in Root Complex mode.
*
* However when in Endpoint mode, we can clear Bit [23] of DESC0, hence
* the PCIe controller will use the captured values for the bus and
* device numbers.
*/
if (pcie->is_rc) {
/* The device and function numbers are always 0. */
desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(pcie->bus);
} else {
/*
* Use captured values for bus and device numbers but still
* need to set the function number.
*/
desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn);
}
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
/* Set the CPU address */
cpu_addr -= pcie->mem_res->start;
addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) |
(lower_32_bits(cpu_addr) & GENMASK(31, 8));
addr1 = upper_32_bits(cpu_addr);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
}
void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, u8 fn,
u32 r, u64 cpu_addr)
{
u32 addr0, addr1, desc0, desc1;
desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG;
desc1 = 0;
/* See cdns_pcie_set_outbound_region() comments above. */
if (pcie->is_rc) {
desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(pcie->bus);
} else {
desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn);
}
/* Set the CPU address */
cpu_addr -= pcie->mem_res->start;
addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(17) |
(lower_32_bits(cpu_addr) & GENMASK(31, 8));
addr1 = upper_32_bits(cpu_addr);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
}
void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r)
{
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), 0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), 0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), 0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), 0);
}

View File

@ -0,0 +1,311 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017 Cadence
// Cadence PCIe controller driver.
// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
#ifndef _PCIE_CADENCE_H
#define _PCIE_CADENCE_H
#include <linux/kernel.h>
#include <linux/pci.h>
/*
* Local Management Registers
*/
#define CDNS_PCIE_LM_BASE 0x00100000
/* Vendor ID Register */
#define CDNS_PCIE_LM_ID (CDNS_PCIE_LM_BASE + 0x0044)
#define CDNS_PCIE_LM_ID_VENDOR_MASK GENMASK(15, 0)
#define CDNS_PCIE_LM_ID_VENDOR_SHIFT 0
#define CDNS_PCIE_LM_ID_VENDOR(vid) \
(((vid) << CDNS_PCIE_LM_ID_VENDOR_SHIFT) & CDNS_PCIE_LM_ID_VENDOR_MASK)
#define CDNS_PCIE_LM_ID_SUBSYS_MASK GENMASK(31, 16)
#define CDNS_PCIE_LM_ID_SUBSYS_SHIFT 16
#define CDNS_PCIE_LM_ID_SUBSYS(sub) \
(((sub) << CDNS_PCIE_LM_ID_SUBSYS_SHIFT) & CDNS_PCIE_LM_ID_SUBSYS_MASK)
/* Root Port Requestor ID Register */
#define CDNS_PCIE_LM_RP_RID (CDNS_PCIE_LM_BASE + 0x0228)
#define CDNS_PCIE_LM_RP_RID_MASK GENMASK(15, 0)
#define CDNS_PCIE_LM_RP_RID_SHIFT 0
#define CDNS_PCIE_LM_RP_RID_(rid) \
(((rid) << CDNS_PCIE_LM_RP_RID_SHIFT) & CDNS_PCIE_LM_RP_RID_MASK)
/* Endpoint Bus and Device Number Register */
#define CDNS_PCIE_LM_EP_ID (CDNS_PCIE_LM_BASE + 0x022c)
#define CDNS_PCIE_LM_EP_ID_DEV_MASK GENMASK(4, 0)
#define CDNS_PCIE_LM_EP_ID_DEV_SHIFT 0
#define CDNS_PCIE_LM_EP_ID_BUS_MASK GENMASK(15, 8)
#define CDNS_PCIE_LM_EP_ID_BUS_SHIFT 8
/* Endpoint Function f BAR b Configuration Registers */
#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) \
(CDNS_PCIE_LM_BASE + 0x0240 + (fn) * 0x0008)
#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn) \
(CDNS_PCIE_LM_BASE + 0x0244 + (fn) * 0x0008)
#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \
(GENMASK(4, 0) << ((b) * 8))
#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \
(((a) << ((b) * 8)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b))
#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b) \
(GENMASK(7, 5) << ((b) * 8))
#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \
(((c) << ((b) * 8 + 5)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b))
/* Endpoint Function Configuration Register */
#define CDNS_PCIE_LM_EP_FUNC_CFG (CDNS_PCIE_LM_BASE + 0x02c0)
/* Root Complex BAR Configuration Register */
#define CDNS_PCIE_LM_RC_BAR_CFG (CDNS_PCIE_LM_BASE + 0x0300)
#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK GENMASK(5, 0)
#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE(a) \
(((a) << 0) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK)
#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK GENMASK(8, 6)
#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(c) \
(((c) << 6) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK)
#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK GENMASK(13, 9)
#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE(a) \
(((a) << 9) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK)
#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK GENMASK(16, 14)
#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(c) \
(((c) << 14) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK)
#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE BIT(17)
#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_32BITS 0
#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS BIT(18)
#define CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE BIT(19)
#define CDNS_PCIE_LM_RC_BAR_CFG_IO_16BITS 0
#define CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS BIT(20)
#define CDNS_PCIE_LM_RC_BAR_CFG_CHECK_ENABLE BIT(31)
/* BAR control values applicable to both Endpoint Function and Root Complex */
#define CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED 0x0
#define CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS 0x1
#define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS 0x4
#define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x5
#define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS 0x6
#define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7
/*
* Endpoint Function Registers (PCI configuration space for endpoint functions)
*/
#define CDNS_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12))
#define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90
/*
* Root Port Registers (PCI configuration space for the root port function)
*/
#define CDNS_PCIE_RP_BASE 0x00200000
/*
* Address Translation Registers
*/
#define CDNS_PCIE_AT_BASE 0x00400000
/* Region r Outbound AXI to PCIe Address Translation Register 0 */
#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r) \
(CDNS_PCIE_AT_BASE + 0x0000 + ((r) & 0x1f) * 0x0020)
#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK GENMASK(5, 0)
#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) \
(((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK)
#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(19, 12)
#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \
(((devfn) << 12) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK)
#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(27, 20)
#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \
(((bus) << 20) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK)
/* Region r Outbound AXI to PCIe Address Translation Register 1 */
#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r) \
(CDNS_PCIE_AT_BASE + 0x0004 + ((r) & 0x1f) * 0x0020)
/* Region r Outbound PCIe Descriptor Register 0 */
#define CDNS_PCIE_AT_OB_REGION_DESC0(r) \
(CDNS_PCIE_AT_BASE + 0x0008 + ((r) & 0x1f) * 0x0020)
#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MASK GENMASK(3, 0)
#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM 0x2
#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO 0x6
#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0 0xa
#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1 0xb
#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG 0xc
#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_VENDOR_MSG 0xd
/* Bit 23 MUST be set in RC mode. */
#define CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID BIT(23)
#define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK GENMASK(31, 24)
#define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \
(((devfn) << 24) & CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK)
/* Region r Outbound PCIe Descriptor Register 1 */
#define CDNS_PCIE_AT_OB_REGION_DESC1(r) \
(CDNS_PCIE_AT_BASE + 0x000c + ((r) & 0x1f) * 0x0020)
#define CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK GENMASK(7, 0)
#define CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus) \
((bus) & CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK)
/* Region r AXI Region Base Address Register 0 */
#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r) \
(CDNS_PCIE_AT_BASE + 0x0018 + ((r) & 0x1f) * 0x0020)
#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK GENMASK(5, 0)
#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) \
(((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK)
/* Region r AXI Region Base Address Register 1 */
#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r) \
(CDNS_PCIE_AT_BASE + 0x001c + ((r) & 0x1f) * 0x0020)
/* Root Port BAR Inbound PCIe to AXI Address Translation Register */
#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar) \
(CDNS_PCIE_AT_BASE + 0x0800 + (bar) * 0x0008)
#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK GENMASK(5, 0)
#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(nbits) \
(((nbits) - 1) & CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK)
#define CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar) \
(CDNS_PCIE_AT_BASE + 0x0804 + (bar) * 0x0008)
enum cdns_pcie_rp_bar {
RP_BAR0,
RP_BAR1,
RP_NO_BAR
};
/* Endpoint Function BAR Inbound PCIe to AXI Address Translation Register */
#define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \
(CDNS_PCIE_AT_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008)
#define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \
(CDNS_PCIE_AT_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008)
/* Normal/Vendor specific message access: offset inside some outbound region */
#define CDNS_PCIE_NORMAL_MSG_ROUTING_MASK GENMASK(7, 5)
#define CDNS_PCIE_NORMAL_MSG_ROUTING(route) \
(((route) << 5) & CDNS_PCIE_NORMAL_MSG_ROUTING_MASK)
#define CDNS_PCIE_NORMAL_MSG_CODE_MASK GENMASK(15, 8)
#define CDNS_PCIE_NORMAL_MSG_CODE(code) \
(((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK)
#define CDNS_PCIE_MSG_NO_DATA BIT(16)
enum cdns_pcie_msg_code {
MSG_CODE_ASSERT_INTA = 0x20,
MSG_CODE_ASSERT_INTB = 0x21,
MSG_CODE_ASSERT_INTC = 0x22,
MSG_CODE_ASSERT_INTD = 0x23,
MSG_CODE_DEASSERT_INTA = 0x24,
MSG_CODE_DEASSERT_INTB = 0x25,
MSG_CODE_DEASSERT_INTC = 0x26,
MSG_CODE_DEASSERT_INTD = 0x27,
};
enum cdns_pcie_msg_routing {
/* Route to Root Complex */
MSG_ROUTING_TO_RC,
/* Use Address Routing */
MSG_ROUTING_BY_ADDR,
/* Use ID Routing */
MSG_ROUTING_BY_ID,
/* Route as Broadcast Message from Root Complex */
MSG_ROUTING_BCAST,
/* Local message; terminate at receiver (INTx messages) */
MSG_ROUTING_LOCAL,
/* Gather & route to Root Complex (PME_TO_Ack message) */
MSG_ROUTING_GATHER,
};
/**
* struct cdns_pcie - private data for Cadence PCIe controller drivers
* @reg_base: IO mapped register base
* @mem_res: start/end offsets in the physical system memory to map PCI accesses
* @is_rc: tell whether the PCIe controller mode is Root Complex or Endpoint.
* @bus: In Root Complex mode, the bus number
*/
struct cdns_pcie {
void __iomem *reg_base;
struct resource *mem_res;
bool is_rc;
u8 bus;
};
/* Register access */
static inline void cdns_pcie_writeb(struct cdns_pcie *pcie, u32 reg, u8 value)
{
writeb(value, pcie->reg_base + reg);
}
static inline void cdns_pcie_writew(struct cdns_pcie *pcie, u32 reg, u16 value)
{
writew(value, pcie->reg_base + reg);
}
static inline void cdns_pcie_writel(struct cdns_pcie *pcie, u32 reg, u32 value)
{
writel(value, pcie->reg_base + reg);
}
static inline u32 cdns_pcie_readl(struct cdns_pcie *pcie, u32 reg)
{
return readl(pcie->reg_base + reg);
}
/* Root Port register access */
static inline void cdns_pcie_rp_writeb(struct cdns_pcie *pcie,
u32 reg, u8 value)
{
writeb(value, pcie->reg_base + CDNS_PCIE_RP_BASE + reg);
}
static inline void cdns_pcie_rp_writew(struct cdns_pcie *pcie,
u32 reg, u16 value)
{
writew(value, pcie->reg_base + CDNS_PCIE_RP_BASE + reg);
}
/* Endpoint Function register access */
static inline void cdns_pcie_ep_fn_writeb(struct cdns_pcie *pcie, u8 fn,
u32 reg, u8 value)
{
writeb(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
}
static inline void cdns_pcie_ep_fn_writew(struct cdns_pcie *pcie, u8 fn,
u32 reg, u16 value)
{
writew(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
}
static inline void cdns_pcie_ep_fn_writel(struct cdns_pcie *pcie, u8 fn,
u32 reg, u16 value)
{
writel(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
}
static inline u8 cdns_pcie_ep_fn_readb(struct cdns_pcie *pcie, u8 fn, u32 reg)
{
return readb(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
}
static inline u16 cdns_pcie_ep_fn_readw(struct cdns_pcie *pcie, u8 fn, u32 reg)
{
return readw(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
}
static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg)
{
return readl(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
}
void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn,
u32 r, bool is_io,
u64 cpu_addr, u64 pci_addr, size_t size);
void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, u8 fn,
u32 r, u64 cpu_addr);
void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r);
#endif /* _PCIE_CADENCE_H */

View File

@ -27,4 +27,6 @@ obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o
# ARM64 and use internal ifdefs to only build the pieces we need
# depending on whether ACPI, the DT driver, or both are enabled.
ifdef CONFIG_PCI
obj-$(CONFIG_ARM64) += pcie-hisi.o
endif

View File

@ -39,7 +39,7 @@ static void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
dw_pcie_writel_dbi(pci, reg, 0x0);
}
static int dw_pcie_ep_write_header(struct pci_epc *epc,
static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
struct pci_epf_header *hdr)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@ -112,7 +112,8 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, phys_addr_t phys_addr,
return 0;
}
static void dw_pcie_ep_clear_bar(struct pci_epc *epc, enum pci_barno bar)
static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
enum pci_barno bar)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@ -124,7 +125,8 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, enum pci_barno bar)
clear_bit(atu_index, &ep->ib_window_map);
}
static int dw_pcie_ep_set_bar(struct pci_epc *epc, enum pci_barno bar,
static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
enum pci_barno bar,
dma_addr_t bar_phys, size_t size, int flags)
{
int ret;
@ -163,7 +165,8 @@ static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
return -EINVAL;
}
static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, phys_addr_t addr)
static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no,
phys_addr_t addr)
{
int ret;
u32 atu_index;
@ -178,7 +181,8 @@ static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, phys_addr_t addr)
clear_bit(atu_index, &ep->ob_window_map);
}
static int dw_pcie_ep_map_addr(struct pci_epc *epc, phys_addr_t addr,
static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
phys_addr_t addr,
u64 pci_addr, size_t size)
{
int ret;
@ -194,7 +198,7 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, phys_addr_t addr,
return 0;
}
static int dw_pcie_ep_get_msi(struct pci_epc *epc)
static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
{
int val;
u32 lower_addr;
@ -214,7 +218,7 @@ static int dw_pcie_ep_get_msi(struct pci_epc *epc)
return val;
}
static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 encode_int)
static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 encode_int)
{
int val;
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@ -226,7 +230,7 @@ static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 encode_int)
return 0;
}
static int dw_pcie_ep_raise_irq(struct pci_epc *epc,
static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no,
enum pci_epc_irq_type type, u8 interrupt_num)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);

View File

@ -104,7 +104,8 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
goto err;
}
ret = pci_epc_map_addr(epc, src_phys_addr, reg->src_addr, reg->size);
ret = pci_epc_map_addr(epc, epf->func_no, src_phys_addr, reg->src_addr,
reg->size);
if (ret) {
dev_err(dev, "failed to map source address\n");
reg->status = STATUS_SRC_ADDR_INVALID;
@ -119,7 +120,8 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
goto err_src_map_addr;
}
ret = pci_epc_map_addr(epc, dst_phys_addr, reg->dst_addr, reg->size);
ret = pci_epc_map_addr(epc, epf->func_no, dst_phys_addr, reg->dst_addr,
reg->size);
if (ret) {
dev_err(dev, "failed to map destination address\n");
reg->status = STATUS_DST_ADDR_INVALID;
@ -128,13 +130,13 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
memcpy(dst_addr, src_addr, reg->size);
pci_epc_unmap_addr(epc, dst_phys_addr);
pci_epc_unmap_addr(epc, epf->func_no, dst_phys_addr);
err_dst_addr:
pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
err_src_map_addr:
pci_epc_unmap_addr(epc, src_phys_addr);
pci_epc_unmap_addr(epc, epf->func_no, src_phys_addr);
err_src_addr:
pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
@ -164,7 +166,8 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
goto err;
}
ret = pci_epc_map_addr(epc, phys_addr, reg->src_addr, reg->size);
ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->src_addr,
reg->size);
if (ret) {
dev_err(dev, "failed to map address\n");
reg->status = STATUS_SRC_ADDR_INVALID;
@ -186,7 +189,7 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
kfree(buf);
err_map_addr:
pci_epc_unmap_addr(epc, phys_addr);
pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
err_addr:
pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
@ -215,7 +218,8 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
goto err;
}
ret = pci_epc_map_addr(epc, phys_addr, reg->dst_addr, reg->size);
ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->dst_addr,
reg->size);
if (ret) {
dev_err(dev, "failed to map address\n");
reg->status = STATUS_DST_ADDR_INVALID;
@ -242,7 +246,7 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
kfree(buf);
err_map_addr:
pci_epc_unmap_addr(epc, phys_addr);
pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
err_addr:
pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
@ -260,11 +264,11 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq)
struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
reg->status |= STATUS_IRQ_RAISED;
msi_count = pci_epc_get_msi(epc);
msi_count = pci_epc_get_msi(epc, epf->func_no);
if (irq > msi_count || msi_count <= 0)
pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0);
pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
else
pci_epc_raise_irq(epc, PCI_EPC_IRQ_MSI, irq);
pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, irq);
}
static void pci_epf_test_cmd_handler(struct work_struct *work)
@ -291,7 +295,7 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
if (command & COMMAND_RAISE_LEGACY_IRQ) {
reg->status = STATUS_IRQ_RAISED;
pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0);
pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
goto reset_handler;
}
@ -326,11 +330,11 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
}
if (command & COMMAND_RAISE_MSI_IRQ) {
msi_count = pci_epc_get_msi(epc);
msi_count = pci_epc_get_msi(epc, epf->func_no);
if (irq > msi_count || msi_count <= 0)
goto reset_handler;
reg->status = STATUS_IRQ_RAISED;
pci_epc_raise_irq(epc, PCI_EPC_IRQ_MSI, irq);
pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, irq);
goto reset_handler;
}
@ -358,7 +362,7 @@ static void pci_epf_test_unbind(struct pci_epf *epf)
for (bar = BAR_0; bar <= BAR_5; bar++) {
if (epf_test->reg[bar]) {
pci_epf_free_space(epf, epf_test->reg[bar], bar);
pci_epc_clear_bar(epc, bar);
pci_epc_clear_bar(epc, epf->func_no, bar);
}
}
}
@ -380,7 +384,8 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
for (bar = BAR_0; bar <= BAR_5; bar++) {
epf_bar = &epf->bar[bar];
ret = pci_epc_set_bar(epc, bar, epf_bar->phys_addr,
ret = pci_epc_set_bar(epc, epf->func_no, bar,
epf_bar->phys_addr,
epf_bar->size, flags);
if (ret) {
pci_epf_free_space(epf, epf_test->reg[bar], bar);
@ -433,7 +438,7 @@ static int pci_epf_test_bind(struct pci_epf *epf)
if (WARN_ON_ONCE(!epc))
return -EINVAL;
ret = pci_epc_write_header(epc, header);
ret = pci_epc_write_header(epc, epf->func_no, header);
if (ret) {
dev_err(dev, "configuration header write failed\n");
return ret;
@ -447,7 +452,7 @@ static int pci_epf_test_bind(struct pci_epf *epf)
if (ret)
return ret;
ret = pci_epc_set_msi(epc, epf->msi_interrupts);
ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts);
if (ret)
return ret;

View File

@ -18,18 +18,22 @@
*/
#include <linux/module.h>
#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
#include <linux/pci-ep-cfs.h>
static DEFINE_IDR(functions_idr);
static DEFINE_MUTEX(functions_mutex);
static struct config_group *functions_group;
static struct config_group *controllers_group;
struct pci_epf_group {
struct config_group group;
struct pci_epf *epf;
int index;
};
struct pci_epc_group {
@ -353,6 +357,9 @@ static void pci_epf_release(struct config_item *item)
{
struct pci_epf_group *epf_group = to_pci_epf_group(item);
mutex_lock(&functions_mutex);
idr_remove(&functions_idr, epf_group->index);
mutex_unlock(&functions_mutex);
pci_epf_destroy(epf_group->epf);
kfree(epf_group);
}
@ -372,22 +379,57 @@ static struct config_group *pci_epf_make(struct config_group *group,
{
struct pci_epf_group *epf_group;
struct pci_epf *epf;
char *epf_name;
int index, err;
epf_group = kzalloc(sizeof(*epf_group), GFP_KERNEL);
if (!epf_group)
return ERR_PTR(-ENOMEM);
mutex_lock(&functions_mutex);
index = idr_alloc(&functions_idr, epf_group, 0, 0, GFP_KERNEL);
mutex_unlock(&functions_mutex);
if (index < 0) {
err = index;
goto free_group;
}
epf_group->index = index;
config_group_init_type_name(&epf_group->group, name, &pci_epf_type);
epf = pci_epf_create(group->cg_item.ci_name);
epf_name = kasprintf(GFP_KERNEL, "%s.%d",
group->cg_item.ci_name, epf_group->index);
if (!epf_name) {
err = -ENOMEM;
goto remove_idr;
}
epf = pci_epf_create(epf_name);
if (IS_ERR(epf)) {
pr_err("failed to create endpoint function device\n");
return ERR_PTR(-EINVAL);
err = -EINVAL;
goto free_name;
}
epf_group->epf = epf;
kfree(epf_name);
return &epf_group->group;
free_name:
kfree(epf_name);
remove_idr:
mutex_lock(&functions_mutex);
idr_remove(&functions_idr, epf_group->index);
mutex_unlock(&functions_mutex);
free_group:
kfree(epf_group);
return ERR_PTR(err);
}
static void pci_epf_drop(struct config_group *group, struct config_item *item)

View File

@ -142,25 +142,26 @@ EXPORT_SYMBOL_GPL(pci_epc_start);
/**
* pci_epc_raise_irq() - interrupt the host system
* @epc: the EPC device which has to interrupt the host
* @func_no: the endpoint function number in the EPC device
* @type: specify the type of interrupt; legacy or MSI
* @interrupt_num: the MSI interrupt number
*
* Invoke to raise an MSI or legacy interrupt
*/
int pci_epc_raise_irq(struct pci_epc *epc, enum pci_epc_irq_type type,
u8 interrupt_num)
int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
enum pci_epc_irq_type type, u8 interrupt_num)
{
int ret;
unsigned long flags;
if (IS_ERR(epc))
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return -EINVAL;
if (!epc->ops->raise_irq)
return 0;
spin_lock_irqsave(&epc->lock, flags);
ret = epc->ops->raise_irq(epc, type, interrupt_num);
ret = epc->ops->raise_irq(epc, func_no, type, interrupt_num);
spin_unlock_irqrestore(&epc->lock, flags);
return ret;
@ -170,22 +171,23 @@ EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
/**
* pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
* @epc: the EPC device to which MSI interrupts was requested
* @func_no: the endpoint function number in the EPC device
*
* Invoke to get the number of MSI interrupts allocated by the RC
*/
int pci_epc_get_msi(struct pci_epc *epc)
int pci_epc_get_msi(struct pci_epc *epc, u8 func_no)
{
int interrupt;
unsigned long flags;
if (IS_ERR(epc))
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return 0;
if (!epc->ops->get_msi)
return 0;
spin_lock_irqsave(&epc->lock, flags);
interrupt = epc->ops->get_msi(epc);
interrupt = epc->ops->get_msi(epc, func_no);
spin_unlock_irqrestore(&epc->lock, flags);
if (interrupt < 0)
@ -200,17 +202,18 @@ EXPORT_SYMBOL_GPL(pci_epc_get_msi);
/**
* pci_epc_set_msi() - set the number of MSI interrupt numbers required
* @epc: the EPC device on which MSI has to be configured
* @func_no: the endpoint function number in the EPC device
* @interrupts: number of MSI interrupts required by the EPF
*
* Invoke to set the required number of MSI interrupts.
*/
int pci_epc_set_msi(struct pci_epc *epc, u8 interrupts)
int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
{
int ret;
u8 encode_int;
unsigned long flags;
if (IS_ERR(epc))
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return -EINVAL;
if (!epc->ops->set_msi)
@ -219,7 +222,7 @@ int pci_epc_set_msi(struct pci_epc *epc, u8 interrupts)
encode_int = order_base_2(interrupts);
spin_lock_irqsave(&epc->lock, flags);
ret = epc->ops->set_msi(epc, encode_int);
ret = epc->ops->set_msi(epc, func_no, encode_int);
spin_unlock_irqrestore(&epc->lock, flags);
return ret;
@ -229,22 +232,24 @@ EXPORT_SYMBOL_GPL(pci_epc_set_msi);
/**
* pci_epc_unmap_addr() - unmap CPU address from PCI address
* @epc: the EPC device on which address is allocated
* @func_no: the endpoint function number in the EPC device
* @phys_addr: physical address of the local system
*
* Invoke to unmap the CPU address from PCI address.
*/
void pci_epc_unmap_addr(struct pci_epc *epc, phys_addr_t phys_addr)
void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
phys_addr_t phys_addr)
{
unsigned long flags;
if (IS_ERR(epc))
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return;
if (!epc->ops->unmap_addr)
return;
spin_lock_irqsave(&epc->lock, flags);
epc->ops->unmap_addr(epc, phys_addr);
epc->ops->unmap_addr(epc, func_no, phys_addr);
spin_unlock_irqrestore(&epc->lock, flags);
}
EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
@ -252,26 +257,27 @@ EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
/**
* pci_epc_map_addr() - map CPU address to PCI address
* @epc: the EPC device on which address is allocated
* @func_no: the endpoint function number in the EPC device
* @phys_addr: physical address of the local system
* @pci_addr: PCI address to which the physical address should be mapped
* @size: the size of the allocation
*
* Invoke to map CPU address with PCI address.
*/
int pci_epc_map_addr(struct pci_epc *epc, phys_addr_t phys_addr,
u64 pci_addr, size_t size)
int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
phys_addr_t phys_addr, u64 pci_addr, size_t size)
{
int ret;
unsigned long flags;
if (IS_ERR(epc))
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return -EINVAL;
if (!epc->ops->map_addr)
return 0;
spin_lock_irqsave(&epc->lock, flags);
ret = epc->ops->map_addr(epc, phys_addr, pci_addr, size);
ret = epc->ops->map_addr(epc, func_no, phys_addr, pci_addr, size);
spin_unlock_irqrestore(&epc->lock, flags);
return ret;
@ -281,22 +287,23 @@ EXPORT_SYMBOL_GPL(pci_epc_map_addr);
/**
* pci_epc_clear_bar() - reset the BAR
* @epc: the EPC device for which the BAR has to be cleared
* @func_no: the endpoint function number in the EPC device
* @bar: the BAR number that has to be reset
*
* Invoke to reset the BAR of the endpoint device.
*/
void pci_epc_clear_bar(struct pci_epc *epc, int bar)
void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, int bar)
{
unsigned long flags;
if (IS_ERR(epc))
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return;
if (!epc->ops->clear_bar)
return;
spin_lock_irqsave(&epc->lock, flags);
epc->ops->clear_bar(epc, bar);
epc->ops->clear_bar(epc, func_no, bar);
spin_unlock_irqrestore(&epc->lock, flags);
}
EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
@ -304,26 +311,27 @@ EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
/**
* pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space
* @epc: the EPC device on which BAR has to be configured
* @func_no: the endpoint function number in the EPC device
* @bar: the BAR number that has to be configured
* @size: the size of the addr space
* @flags: specify memory allocation/io allocation/32bit address/64 bit address
*
* Invoke to configure the BAR of the endpoint device.
*/
int pci_epc_set_bar(struct pci_epc *epc, enum pci_barno bar,
int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, enum pci_barno bar,
dma_addr_t bar_phys, size_t size, int flags)
{
int ret;
unsigned long irq_flags;
if (IS_ERR(epc))
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return -EINVAL;
if (!epc->ops->set_bar)
return 0;
spin_lock_irqsave(&epc->lock, irq_flags);
ret = epc->ops->set_bar(epc, bar, bar_phys, size, flags);
ret = epc->ops->set_bar(epc, func_no, bar, bar_phys, size, flags);
spin_unlock_irqrestore(&epc->lock, irq_flags);
return ret;
@ -333,6 +341,7 @@ EXPORT_SYMBOL_GPL(pci_epc_set_bar);
/**
* pci_epc_write_header() - write standard configuration header
* @epc: the EPC device to which the configuration header should be written
* @func_no: the endpoint function number in the EPC device
* @header: standard configuration header fields
*
* Invoke to write the configuration header to the endpoint controller. Every
@ -340,19 +349,20 @@ EXPORT_SYMBOL_GPL(pci_epc_set_bar);
* configuration header would be written. The callback function should write
* the header fields to this dedicated location.
*/
int pci_epc_write_header(struct pci_epc *epc, struct pci_epf_header *header)
int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
struct pci_epf_header *header)
{
int ret;
unsigned long flags;
if (IS_ERR(epc))
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return -EINVAL;
if (!epc->ops->write_header)
return 0;
spin_lock_irqsave(&epc->lock, flags);
ret = epc->ops->write_header(epc, header);
ret = epc->ops->write_header(epc, func_no, header);
spin_unlock_irqrestore(&epc->lock, flags);
return ret;

View File

@ -34,6 +34,8 @@ obj-$(CONFIG_VMD) += vmd.o
# ARM64 and use internal ifdefs to only build the pieces we need
# depending on whether ACPI, the DT driver, or both are enabled.
ifdef CONFIG_PCI
obj-$(CONFIG_ARM64) += pci-thunder-ecam.o
obj-$(CONFIG_ARM64) += pci-thunder-pem.o
obj-$(CONFIG_ARM64) += pci-xgene.o
endif

View File

@ -24,50 +24,6 @@
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
static int gen_pci_parse_request_of_pci_ranges(struct device *dev,
struct list_head *resources, struct resource **bus_range)
{
int err, res_valid = 0;
struct device_node *np = dev->of_node;
resource_size_t iobase;
struct resource_entry *win, *tmp;
err = of_pci_get_host_bridge_resources(np, 0, 0xff, resources, &iobase);
if (err)
return err;
err = devm_request_pci_bus_resources(dev, resources);
if (err)
return err;
resource_list_for_each_entry_safe(win, tmp, resources) {
struct resource *res = win->res;
switch (resource_type(res)) {
case IORESOURCE_IO:
err = pci_remap_iospace(res, iobase);
if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
err, res);
resource_list_destroy_entry(win);
}
break;
case IORESOURCE_MEM:
res_valid |= !(res->flags & IORESOURCE_PREFETCH);
break;
case IORESOURCE_BUS:
*bus_range = res;
break;
}
}
if (res_valid)
return 0;
dev_err(dev, "non-prefetchable memory resource required\n");
return -EINVAL;
}
static void gen_pci_unmap_cfg(void *ptr)
{
pci_ecam_free((struct pci_config_window *)ptr);
@ -82,9 +38,9 @@ static struct pci_config_window *gen_pci_init(struct device *dev,
struct pci_config_window *cfg;
/* Parse our PCI ranges and request their resources */
err = gen_pci_parse_request_of_pci_ranges(dev, resources, &bus_range);
err = pci_parse_request_of_pci_ranges(dev, resources, &bus_range);
if (err)
goto err_out;
return ERR_PTR(err);
err = of_address_to_resource(dev->of_node, 0, &cfgres);
if (err) {
@ -116,7 +72,6 @@ int pci_host_common_probe(struct platform_device *pdev,
const char *type;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct pci_bus *bus, *child;
struct pci_host_bridge *bridge;
struct pci_config_window *cfg;
struct list_head resources;
@ -135,7 +90,6 @@ int pci_host_common_probe(struct platform_device *pdev,
of_pci_check_probe_only();
/* Parse and map our Configuration Space windows */
INIT_LIST_HEAD(&resources);
cfg = gen_pci_init(dev, &resources, ops);
if (IS_ERR(cfg))
return PTR_ERR(cfg);
@ -152,29 +106,11 @@ int pci_host_common_probe(struct platform_device *pdev,
bridge->map_irq = of_irq_parse_and_map_pci;
bridge->swizzle_irq = pci_common_swizzle;
ret = pci_scan_root_bus_bridge(bridge);
ret = pci_host_probe(bridge);
if (ret < 0) {
dev_err(dev, "Scanning root bridge failed");
pci_free_resource_list(&resources);
return ret;
}
bus = bridge->bus;
/*
* We insert PCI resources into the iomem_resource and
* ioport_resource trees in either pci_bus_claim_resources()
* or pci_bus_assign_resources().
*/
if (pci_has_flag(PCI_PROBE_ONLY)) {
pci_bus_claim_resources(bus);
} else {
pci_bus_size_bridges(bus);
pci_bus_assign_resources(bus);
list_for_each_entry(child, &bus->children, node)
pcie_bus_configure_settings(child);
}
pci_bus_add_devices(bus);
return 0;
}

View File

@ -92,7 +92,6 @@ struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus)
#endif
}
static inline int __of_pci_pci_compare(struct device_node *node,
unsigned int data)
{
@ -598,3 +597,55 @@ int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
}
EXPORT_SYMBOL_GPL(of_irq_parse_and_map_pci);
#endif /* CONFIG_OF_IRQ */
int pci_parse_request_of_pci_ranges(struct device *dev,
struct list_head *resources,
struct resource **bus_range)
{
int err, res_valid = 0;
struct device_node *np = dev->of_node;
resource_size_t iobase;
struct resource_entry *win, *tmp;
INIT_LIST_HEAD(resources);
err = of_pci_get_host_bridge_resources(np, 0, 0xff, resources, &iobase);
if (err)
return err;
err = devm_request_pci_bus_resources(dev, resources);
if (err)
goto out_release_res;
resource_list_for_each_entry_safe(win, tmp, resources) {
struct resource *res = win->res;
switch (resource_type(res)) {
case IORESOURCE_IO:
err = pci_remap_iospace(res, iobase);
if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
err, res);
resource_list_destroy_entry(win);
}
break;
case IORESOURCE_MEM:
res_valid |= !(res->flags & IORESOURCE_PREFETCH);
break;
case IORESOURCE_BUS:
if (bus_range)
*bus_range = res;
break;
}
}
if (res_valid)
return 0;
dev_err(dev, "non-prefetchable memory resource required\n");
err = -EINVAL;
out_release_res:
pci_free_resource_list(resources);
return err;
}

View File

@ -2684,6 +2684,39 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
}
EXPORT_SYMBOL_GPL(pci_create_root_bus);
int pci_host_probe(struct pci_host_bridge *bridge)
{
struct pci_bus *bus, *child;
int ret;
ret = pci_scan_root_bus_bridge(bridge);
if (ret < 0) {
dev_err(bridge->dev.parent, "Scanning root bridge failed");
return ret;
}
bus = bridge->bus;
/*
* We insert PCI resources into the iomem_resource and
* ioport_resource trees in either pci_bus_claim_resources()
* or pci_bus_assign_resources().
*/
if (pci_has_flag(PCI_PROBE_ONLY)) {
pci_bus_claim_resources(bus);
} else {
pci_bus_size_bridges(bus);
pci_bus_assign_resources(bus);
list_for_each_entry(child, &bus->children, node)
pcie_bus_configure_settings(child);
}
pci_bus_add_devices(bus);
return 0;
}
EXPORT_SYMBOL_GPL(pci_host_probe);
int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
{
struct resource *res = &b->busn_res;

View File

@ -39,17 +39,20 @@ enum pci_epc_irq_type {
* @owner: the module owner containing the ops
*/
struct pci_epc_ops {
int (*write_header)(struct pci_epc *pci_epc,
int (*write_header)(struct pci_epc *epc, u8 func_no,
struct pci_epf_header *hdr);
int (*set_bar)(struct pci_epc *epc, enum pci_barno bar,
int (*set_bar)(struct pci_epc *epc, u8 func_no,
enum pci_barno bar,
dma_addr_t bar_phys, size_t size, int flags);
void (*clear_bar)(struct pci_epc *epc, enum pci_barno bar);
int (*map_addr)(struct pci_epc *epc, phys_addr_t addr,
u64 pci_addr, size_t size);
void (*unmap_addr)(struct pci_epc *epc, phys_addr_t addr);
int (*set_msi)(struct pci_epc *epc, u8 interrupts);
int (*get_msi)(struct pci_epc *epc);
int (*raise_irq)(struct pci_epc *pci_epc,
void (*clear_bar)(struct pci_epc *epc, u8 func_no,
enum pci_barno bar);
int (*map_addr)(struct pci_epc *epc, u8 func_no,
phys_addr_t addr, u64 pci_addr, size_t size);
void (*unmap_addr)(struct pci_epc *epc, u8 func_no,
phys_addr_t addr);
int (*set_msi)(struct pci_epc *epc, u8 func_no, u8 interrupts);
int (*get_msi)(struct pci_epc *epc, u8 func_no);
int (*raise_irq)(struct pci_epc *epc, u8 func_no,
enum pci_epc_irq_type type, u8 interrupt_num);
int (*start)(struct pci_epc *epc);
void (*stop)(struct pci_epc *epc);
@ -124,17 +127,21 @@ void pci_epc_destroy(struct pci_epc *epc);
int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf);
void pci_epc_linkup(struct pci_epc *epc);
void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf);
int pci_epc_write_header(struct pci_epc *epc, struct pci_epf_header *hdr);
int pci_epc_set_bar(struct pci_epc *epc, enum pci_barno bar,
int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
struct pci_epf_header *hdr);
int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
enum pci_barno bar,
dma_addr_t bar_phys, size_t size, int flags);
void pci_epc_clear_bar(struct pci_epc *epc, int bar);
int pci_epc_map_addr(struct pci_epc *epc, phys_addr_t phys_addr,
void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, int bar);
int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
phys_addr_t phys_addr,
u64 pci_addr, size_t size);
void pci_epc_unmap_addr(struct pci_epc *epc, phys_addr_t phys_addr);
int pci_epc_set_msi(struct pci_epc *epc, u8 interrupts);
int pci_epc_get_msi(struct pci_epc *epc);
int pci_epc_raise_irq(struct pci_epc *epc, enum pci_epc_irq_type type,
u8 interrupt_num);
void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
phys_addr_t phys_addr);
int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts);
int pci_epc_get_msi(struct pci_epc *epc, u8 func_no);
int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
enum pci_epc_irq_type type, u8 interrupt_num);
int pci_epc_start(struct pci_epc *epc);
void pci_epc_stop(struct pci_epc *epc);
struct pci_epc *pci_epc_get(const char *epc_name);

View File

@ -879,6 +879,7 @@ struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata);
struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
struct pci_ops *ops, void *sysdata,
struct list_head *resources);
int pci_host_probe(struct pci_host_bridge *bridge);
int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
void pci_bus_release_busn_res(struct pci_bus *b);
@ -2171,6 +2172,9 @@ void pci_release_of_node(struct pci_dev *dev);
void pci_set_bus_of_node(struct pci_bus *bus);
void pci_release_bus_of_node(struct pci_bus *bus);
struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
int pci_parse_request_of_pci_ranges(struct device *dev,
struct list_head *resources,
struct resource **bus_range);
/* Arch may override this (weak) */
struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
@ -2195,6 +2199,12 @@ static inline struct device_node *
pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; }
static inline struct irq_domain *
pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
static inline int pci_parse_request_of_pci_ranges(struct device *dev,
struct list_head *resources,
struct resource **bus_range)
{
return -EINVAL;
}
#endif /* CONFIG_OF */
#ifdef CONFIG_ACPI

View File

@ -2381,6 +2381,8 @@
#define PCI_VENDOR_ID_LENOVO 0x17aa
#define PCI_VENDOR_ID_CDNS 0x17cd
#define PCI_VENDOR_ID_ARECA 0x17d3
#define PCI_DEVICE_ID_ARECA_1110 0x1110
#define PCI_DEVICE_ID_ARECA_1120 0x1120