mirror of https://gitee.com/openkylin/linux.git
pci-v4.16-changes
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJad5lgAAoJEFmIoMA60/r8s2kQAI3PztawDpaCP9Z12pkbBHSt Ho0xTyk9rCZi9kQJbNjc+a+QrlA3QmTHXIXerB3LSWoh7M+XhsECjem92eHpgLNS JvYPhTfOrCr0vdiAmOz6hD0AqN/psrbfzgiJhSwomsGEFS77k7kERSJckRv81sxb Aj5F/WjucAgLorwm4auveAJEQ7atE7/6pkXzoqYm4G6NLOb46jUcRGndrnvXZBlz fws8fBM4BHyi7i25CYQl24tFq1CGax1rIPgLg+4KnH76bQk/N6Ju0sGVSzfh+hG8 SIerK9bJbzGRAuNKoxB3aO1dyzsK3x9WztE2mG98w5trOISPIR1FqnvC/225FWAU d6eIXiC7wKnEx+DElNTzCjzfHc7SAJoupO32H7CoiTe5zPUlWlxJ1zLYkK1gt50q m8PRBiYTglxyznzrO0drtcdjEzvbdZNRrsYnul4wi1vSHzjk6F6XLtzT10XWM1M1 1pXLB8384FTj0Hu4bq6Y3Aivkmz0Sf+eQM2NaOwe+Zj7/1VV0d3lvi4LUXkqzLCA FoXPJSMxG2Qu+iflCeYRQBJjExaZH3eNLZ3dT6QpcJrjaFVedd9u5DeeFqNL27zV bhr8TdqrR4p4rc8EBAGoCapw96IxLZROKB3gxbrZVOpfIZpzthwHbElHX6aqUgF4 w/EV1JWs36WXWaxFk8wd =ttq9 -----END PGP SIGNATURE----- Merge tag 'pci-v4.16-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci Pull PCI updates from Bjorn Helgaas: - skip AER driver error recovery callbacks for correctable errors reported via ACPI APEI, as we already do for errors reported via the native path (Tyler Baicar) - fix DPC shared interrupt handling (Alex Williamson) - print full DPC interrupt number (Keith Busch) - enable DPC only if AER is available (Keith Busch) - simplify DPC code (Bjorn Helgaas) - calculate ASPM L1 substate parameter instead of hardcoding it (Bjorn Helgaas) - enable Latency Tolerance Reporting for ASPM L1 substates (Bjorn Helgaas) - move ASPM internal interfaces out of public header (Bjorn Helgaas) - allow hot-removal of VGA devices (Mika Westerberg) - speed up unplug and shutdown by assuming Thunderbolt controllers don't support Command Completed events (Lukas Wunner) - add AtomicOps support for GPU and Infiniband drivers (Felix Kuehling, Jay Cornwall) - expose "ari_enabled" in sysfs to help NIC naming (Stuart Hayes) - clean up PCI DMA interface usage (Christoph Hellwig) - remove PCI pool API (replaced with DMA pool) (Romain Perier) - deprecate pci_get_bus_and_slot(), which assumed PCI domain 0 (Sinan Kaya) - move DT PCI code from drivers/of/ to drivers/pci/ (Rob Herring) - add PCI-specific wrappers for dev_info(), etc (Frederick Lawler) - remove warnings on sysfs mmap failure (Bjorn Helgaas) - quiet ROM validation messages (Alex Deucher) - remove redundant memory alloc failure messages (Markus Elfring) - fill in types for compile-time VGA and other I/O port resources (Bjorn Helgaas) - make "pci=pcie_scan_all" work for Root Ports as well as Downstream Ports to help AmigaOne X1000 (Bjorn Helgaas) - add SPDX tags to all PCI files (Bjorn Helgaas) - quirk Marvell 9128 DMA aliases (Alex Williamson) - quirk broken INTx disable on Ceton InfiniTV4 (Bjorn Helgaas) - fix CONFIG_PCI=n build by adding dummy pci_irqd_intx_xlate() (Niklas Cassel) - use DMA API to get MSI address for DesignWare IP (Niklas Cassel) - fix endpoint-mode DMA mask configuration (Kishon Vijay Abraham I) - fix ARTPEC-6 incorrect IS_ERR() usage (Wei Yongjun) - add support for ARTPEC-7 SoC (Niklas Cassel) - add endpoint-mode support for ARTPEC (Niklas Cassel) - add Cadence PCIe host and endpoint controller driver (Cyrille Pitchen) - handle multiple INTx status bits being set in dra7xx (Vignesh R) - translate dra7xx hwirq range to fix INTD handling (Vignesh R) - remove deprecated Exynos PHY initialization code (Jaehoon Chung) - fix MSI erratum workaround for HiSilicon Hip06/Hip07 (Dongdong Liu) - fix NULL pointer dereference in iProc BCMA driver (Ray Jui) - fix Keystone interrupt-controller-node lookup (Johan Hovold) - constify qcom driver structures (Julia Lawall) - rework Tegra config space mapping to increase space available for endpoints (Vidya Sagar) - simplify Tegra driver by using bus->sysdata (Manikanta Maddireddy) - remove PCI_REASSIGN_ALL_BUS usage on Tegra (Manikanta Maddireddy) - add support for Global Fabric Manager Server (GFMS) event to Microsemi Switchtec switch driver (Logan Gunthorpe) - add IDs for Switchtec PSX 24xG3 and PSX 48xG3 (Kelvin Cao) * tag 'pci-v4.16-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci: (140 commits) PCI: cadence: Add EndPoint Controller driver for Cadence PCIe controller dt-bindings: PCI: cadence: Add DT bindings for Cadence PCIe endpoint controller PCI: endpoint: Fix EPF device name to support multi-function devices PCI: endpoint: Add the function number as argument to EPC ops PCI: cadence: Add host driver for Cadence PCIe controller dt-bindings: PCI: cadence: Add DT bindings for Cadence PCIe host controller PCI: Add vendor ID for Cadence PCI: Add generic function to probe PCI host controllers PCI: generic: fix missing call of pci_free_resource_list() PCI: OF: Add generic function to parse and allocate PCI resources PCI: Regroup all PCI related entries into drivers/pci/Makefile PCI/DPC: Reformat DPC register definitions PCI/DPC: Add and use DPC Status register field definitions PCI/DPC: Squash dpc_rp_pio_get_info() into dpc_process_rp_pio_error() PCI/DPC: Remove unnecessary RP PIO register structs PCI/DPC: Push dpc->rp_pio_status assignment into dpc_rp_pio_get_info() PCI/DPC: Squash dpc_rp_pio_print_error() into dpc_rp_pio_get_info() PCI/DPC: Make RP PIO log size check more generic PCI/DPC: Rename local "status" to "dpc_status" PCI/DPC: Squash dpc_rp_pio_print_tlp_header() into dpc_rp_pio_print_error() ...
This commit is contained in:
commit
105cf3c8c6
|
@ -3711,7 +3711,11 @@
|
|||
[KNL, SMP] Set scheduler's default relax_domain_level.
|
||||
See Documentation/cgroup-v1/cpusets.txt.
|
||||
|
||||
reserve= [KNL,BUGS] Force the kernel to ignore some iomem area
|
||||
reserve= [KNL,BUGS] Force kernel to ignore I/O ports or memory
|
||||
Format: <base1>,<size1>[,<base2>,<size2>,...]
|
||||
Reserve I/O ports or memory so the kernel won't use
|
||||
them. If <base> is less than 0x10000, the region
|
||||
is assumed to be I/O ports; otherwise it is memory.
|
||||
|
||||
reservetop= [X86-32]
|
||||
Format: nn[KMG]
|
||||
|
|
|
@ -4,7 +4,10 @@ This PCIe host controller is based on the Synopsys DesignWare PCIe IP
|
|||
and thus inherits all the common properties defined in designware-pcie.txt.
|
||||
|
||||
Required properties:
|
||||
- compatible: "axis,artpec6-pcie", "snps,dw-pcie"
|
||||
- compatible: "axis,artpec6-pcie", "snps,dw-pcie" for ARTPEC-6 in RC mode;
|
||||
"axis,artpec6-pcie-ep", "snps,dw-pcie" for ARTPEC-6 in EP mode;
|
||||
"axis,artpec7-pcie", "snps,dw-pcie" for ARTPEC-7 in RC mode;
|
||||
"axis,artpec7-pcie-ep", "snps,dw-pcie" for ARTPEC-7 in EP mode;
|
||||
- reg: base addresses and lengths of the PCIe controller (DBI),
|
||||
the PHY controller, and configuration address space.
|
||||
- reg-names: Must include the following entries:
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
* Cadence PCIe endpoint controller
|
||||
|
||||
Required properties:
|
||||
- compatible: Should contain "cdns,cdns-pcie-ep" to identify the IP used.
|
||||
- reg: Should contain the controller register base address and AXI interface
|
||||
region base address respectively.
|
||||
- reg-names: Must be "reg" and "mem" respectively.
|
||||
- cdns,max-outbound-regions: Set to maximum number of outbound regions
|
||||
|
||||
Optional properties:
|
||||
- max-functions: Maximum number of functions that can be configured (default 1).
|
||||
|
||||
Example:
|
||||
|
||||
pcie@fc000000 {
|
||||
compatible = "cdns,cdns-pcie-ep";
|
||||
reg = <0x0 0xfc000000 0x0 0x01000000>,
|
||||
<0x0 0x80000000 0x0 0x40000000>;
|
||||
reg-names = "reg", "mem";
|
||||
cdns,max-outbound-regions = <16>;
|
||||
max-functions = /bits/ 8 <8>;
|
||||
};
|
|
@ -0,0 +1,60 @@
|
|||
* Cadence PCIe host controller
|
||||
|
||||
This PCIe controller inherits the base properties defined in
|
||||
host-generic-pci.txt.
|
||||
|
||||
Required properties:
|
||||
- compatible: Should contain "cdns,cdns-pcie-host" to identify the IP used.
|
||||
- reg: Should contain the controller register base address, PCIe configuration
|
||||
window base address, and AXI interface region base address respectively.
|
||||
- reg-names: Must be "reg", "cfg" and "mem" respectively.
|
||||
- #address-cells: Set to <3>
|
||||
- #size-cells: Set to <2>
|
||||
- device_type: Set to "pci"
|
||||
- ranges: Ranges for the PCI memory and I/O regions
|
||||
- #interrupt-cells: Set to <1>
|
||||
- interrupt-map-mask and interrupt-map: Standard PCI properties to define the
|
||||
mapping of the PCIe interface to interrupt numbers.
|
||||
|
||||
Optional properties:
|
||||
- cdns,max-outbound-regions: Set to maximum number of outbound regions
|
||||
(default 32)
|
||||
- cdns,no-bar-match-nbits: Set into the no BAR match register to configure the
|
||||
number of least significant bits kept during inbound (PCIe -> AXI) address
|
||||
translations (default 32)
|
||||
- vendor-id: The PCI vendor ID (16 bits, default is design dependent)
|
||||
- device-id: The PCI device ID (16 bits, default is design dependent)
|
||||
|
||||
Example:
|
||||
|
||||
pcie@fb000000 {
|
||||
compatible = "cdns,cdns-pcie-host";
|
||||
device_type = "pci";
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
bus-range = <0x0 0xff>;
|
||||
linux,pci-domain = <0>;
|
||||
cdns,max-outbound-regions = <16>;
|
||||
cdns,no-bar-match-nbits = <32>;
|
||||
vendor-id = /bits/ 16 <0x17cd>;
|
||||
device-id = /bits/ 16 <0x0200>;
|
||||
|
||||
reg = <0x0 0xfb000000 0x0 0x01000000>,
|
||||
<0x0 0x41000000 0x0 0x00001000>,
|
||||
<0x0 0x40000000 0x0 0x04000000>;
|
||||
reg-names = "reg", "cfg", "mem";
|
||||
|
||||
ranges = <0x02000000 0x0 0x42000000 0x0 0x42000000 0x0 0x1000000>,
|
||||
<0x01000000 0x0 0x43000000 0x0 0x43000000 0x0 0x0010000>;
|
||||
|
||||
#interrupt-cells = <0x1>;
|
||||
|
||||
interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0x0 0x0 14 0x1
|
||||
0x0 0x0 0x0 0x2 &gic 0x0 0x0 0x0 15 0x1
|
||||
0x0 0x0 0x0 0x3 &gic 0x0 0x0 0x0 16 0x1
|
||||
0x0 0x0 0x0 0x4 &gic 0x0 0x0 0x0 17 0x1>;
|
||||
|
||||
interrupt-map-mask = <0x0 0x0 0x0 0x7>;
|
||||
|
||||
msi-parent = <&its_pci>;
|
||||
};
|
|
@ -6,9 +6,6 @@ and thus inherits all the common properties defined in designware-pcie.txt.
|
|||
Required properties:
|
||||
- compatible: "samsung,exynos5440-pcie"
|
||||
- reg: base addresses and lengths of the PCIe controller,
|
||||
the PHY controller, additional register for the PHY controller.
|
||||
(Registers for the PHY controller are DEPRECATED.
|
||||
Use the PHY framework.)
|
||||
- reg-names : First name should be set to "elbi".
|
||||
And use the "config" instead of getting the configuration address space
|
||||
from "ranges".
|
||||
|
@ -23,49 +20,8 @@ For other common properties, refer to
|
|||
|
||||
Example:
|
||||
|
||||
SoC-specific DT Entry:
|
||||
SoC-specific DT Entry (with using PHY framework):
|
||||
|
||||
pcie@290000 {
|
||||
compatible = "samsung,exynos5440-pcie", "snps,dw-pcie";
|
||||
reg = <0x290000 0x1000
|
||||
0x270000 0x1000
|
||||
0x271000 0x40>;
|
||||
interrupts = <0 20 0>, <0 21 0>, <0 22 0>;
|
||||
clocks = <&clock 28>, <&clock 27>;
|
||||
clock-names = "pcie", "pcie_bus";
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
device_type = "pci";
|
||||
ranges = <0x00000800 0 0x40000000 0x40000000 0 0x00001000 /* configuration space */
|
||||
0x81000000 0 0 0x40001000 0 0x00010000 /* downstream I/O */
|
||||
0x82000000 0 0x40011000 0x40011000 0 0x1ffef000>; /* non-prefetchable memory */
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
interrupt-map = <0 0 0 0 &gic GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
|
||||
num-lanes = <4>;
|
||||
};
|
||||
|
||||
pcie@2a0000 {
|
||||
compatible = "samsung,exynos5440-pcie", "snps,dw-pcie";
|
||||
reg = <0x2a0000 0x1000
|
||||
0x272000 0x1000
|
||||
0x271040 0x40>;
|
||||
interrupts = <0 23 0>, <0 24 0>, <0 25 0>;
|
||||
clocks = <&clock 29>, <&clock 27>;
|
||||
clock-names = "pcie", "pcie_bus";
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
device_type = "pci";
|
||||
ranges = <0x00000800 0 0x60000000 0x60000000 0 0x00001000 /* configuration space */
|
||||
0x81000000 0 0 0x60001000 0 0x00010000 /* downstream I/O */
|
||||
0x82000000 0 0x60011000 0x60011000 0 0x1ffef000>; /* non-prefetchable memory */
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
interrupt-map = <0 0 0 0 &gic GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
|
||||
num-lanes = <4>;
|
||||
};
|
||||
|
||||
With using PHY framework:
|
||||
pcie_phy0: pcie-phy@270000 {
|
||||
...
|
||||
reg = <0x270000 0x1000>, <0x271000 0x40>;
|
||||
|
@ -74,13 +30,21 @@ With using PHY framework:
|
|||
};
|
||||
|
||||
pcie@290000 {
|
||||
...
|
||||
compatible = "samsung,exynos5440-pcie", "snps,dw-pcie";
|
||||
reg = <0x290000 0x1000>, <0x40000000 0x1000>;
|
||||
reg-names = "elbi", "config";
|
||||
clocks = <&clock 28>, <&clock 27>;
|
||||
clock-names = "pcie", "pcie_bus";
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
device_type = "pci";
|
||||
phys = <&pcie_phy0>;
|
||||
ranges = <0x81000000 0 0 0x60001000 0 0x00010000
|
||||
0x82000000 0 0x60011000 0x60011000 0 0x1ffef000>;
|
||||
...
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
interrupt-map = <0 0 0 0 &gic GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
|
||||
num-lanes = <4>;
|
||||
};
|
||||
|
||||
Board-specific DT Entry:
|
||||
|
|
11
MAINTAINERS
11
MAINTAINERS
|
@ -10513,6 +10513,13 @@ S: Maintained
|
|||
F: Documentation/devicetree/bindings/pci/pci-armada8k.txt
|
||||
F: drivers/pci/dwc/pcie-armada8k.c
|
||||
|
||||
PCI DRIVER FOR CADENCE PCIE IP
|
||||
M: Alan Douglas <adouglas@cadence.com>
|
||||
L: linux-pci@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/pci/cdns,*.txt
|
||||
F: drivers/pci/cadence/pcie-cadence*
|
||||
|
||||
PCI DRIVER FOR FREESCALE LAYERSCAPE
|
||||
M: Minghuan Lian <minghuan.Lian@freescale.com>
|
||||
M: Mingkai Hu <mingkai.hu@freescale.com>
|
||||
|
@ -10663,8 +10670,12 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci.git
|
|||
S: Supported
|
||||
F: Documentation/devicetree/bindings/pci/
|
||||
F: Documentation/PCI/
|
||||
F: drivers/acpi/pci*
|
||||
F: drivers/pci/
|
||||
F: include/asm-generic/pci*
|
||||
F: include/linux/pci*
|
||||
F: include/uapi/linux/pci*
|
||||
F: lib/pci*
|
||||
F: arch/x86/pci/
|
||||
F: arch/x86/kernel/quirks.c
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
struct pci_controller *pci_vga_hose;
|
||||
static struct resource alpha_vga = {
|
||||
.name = "alpha-vga+",
|
||||
.flags = IORESOURCE_IO,
|
||||
.start = 0x3C0,
|
||||
.end = 0x3DF
|
||||
};
|
||||
|
|
|
@ -10,10 +10,7 @@ extern unsigned long pcibios_min_io;
|
|||
extern unsigned long pcibios_min_mem;
|
||||
#define PCIBIOS_MIN_MEM pcibios_min_mem
|
||||
|
||||
static inline int pcibios_assign_all_busses(void)
|
||||
{
|
||||
return pci_has_flag(PCI_REASSIGN_ALL_RSRC);
|
||||
}
|
||||
#define pcibios_assign_all_busses() pci_has_flag(PCI_REASSIGN_ALL_BUS)
|
||||
|
||||
#ifdef CONFIG_PCI_DOMAINS
|
||||
static inline int pci_proc_domain(struct pci_bus *bus)
|
||||
|
|
|
@ -527,7 +527,7 @@ void pci_common_init_dev(struct device *parent, struct hw_pci *hw)
|
|||
struct pci_sys_data *sys;
|
||||
LIST_HEAD(head);
|
||||
|
||||
pci_add_flags(PCI_REASSIGN_ALL_RSRC);
|
||||
pci_add_flags(PCI_REASSIGN_ALL_BUS);
|
||||
if (hw->preinit)
|
||||
hw->preinit();
|
||||
pcibios_init_hw(parent, hw, &head);
|
||||
|
|
|
@ -10,7 +10,6 @@ menuconfig ARCH_MVEBU
|
|||
select ZONE_DMA if ARM_LPAE
|
||||
select GPIOLIB
|
||||
select PCI_QUIRKS if PCI
|
||||
select OF_ADDRESS_PCI
|
||||
|
||||
if ARCH_MVEBU
|
||||
|
||||
|
|
|
@ -32,22 +32,22 @@ static struct resource jazz_io_resources[] = {
|
|||
.start = 0x00,
|
||||
.end = 0x1f,
|
||||
.name = "dma1",
|
||||
.flags = IORESOURCE_BUSY
|
||||
.flags = IORESOURCE_IO | IORESOURCE_BUSY
|
||||
}, {
|
||||
.start = 0x40,
|
||||
.end = 0x5f,
|
||||
.name = "timer",
|
||||
.flags = IORESOURCE_BUSY
|
||||
.flags = IORESOURCE_IO | IORESOURCE_BUSY
|
||||
}, {
|
||||
.start = 0x80,
|
||||
.end = 0x8f,
|
||||
.name = "dma page reg",
|
||||
.flags = IORESOURCE_BUSY
|
||||
.flags = IORESOURCE_IO | IORESOURCE_BUSY
|
||||
}, {
|
||||
.start = 0xc0,
|
||||
.end = 0xdf,
|
||||
.name = "dma2",
|
||||
.flags = IORESOURCE_BUSY
|
||||
.flags = IORESOURCE_IO | IORESOURCE_BUSY
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -47,31 +47,31 @@ static struct resource standard_io_resources[] = {
|
|||
.name = "dma1",
|
||||
.start = 0x00,
|
||||
.end = 0x1f,
|
||||
.flags = IORESOURCE_BUSY
|
||||
.flags = IORESOURCE_IO | IORESOURCE_BUSY
|
||||
},
|
||||
{
|
||||
.name = "timer",
|
||||
.start = 0x40,
|
||||
.end = 0x5f,
|
||||
.flags = IORESOURCE_BUSY
|
||||
.flags = IORESOURCE_IO | IORESOURCE_BUSY
|
||||
},
|
||||
{
|
||||
.name = "keyboard",
|
||||
.start = 0x60,
|
||||
.end = 0x6f,
|
||||
.flags = IORESOURCE_BUSY
|
||||
.flags = IORESOURCE_IO | IORESOURCE_BUSY
|
||||
},
|
||||
{
|
||||
.name = "dma page reg",
|
||||
.start = 0x80,
|
||||
.end = 0x8f,
|
||||
.flags = IORESOURCE_BUSY
|
||||
.flags = IORESOURCE_IO | IORESOURCE_BUSY
|
||||
},
|
||||
{
|
||||
.name = "dma2",
|
||||
.start = 0xc0,
|
||||
.end = 0xdf,
|
||||
.flags = IORESOURCE_BUSY
|
||||
.flags = IORESOURCE_IO | IORESOURCE_BUSY
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
@ -756,14 +756,14 @@ int eeh_restore_vf_config(struct pci_dn *pdn)
|
|||
eeh_ops->write_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
|
||||
2, devctl);
|
||||
|
||||
/* Disable Completion Timeout */
|
||||
/* Disable Completion Timeout if possible */
|
||||
eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCAP2,
|
||||
4, &cap2);
|
||||
if (cap2 & 0x10) {
|
||||
if (cap2 & PCI_EXP_DEVCAP2_COMP_TMOUT_DIS) {
|
||||
eeh_ops->read_config(pdn,
|
||||
edev->pcie_cap + PCI_EXP_DEVCTL2,
|
||||
4, &cap2);
|
||||
cap2 |= 0x10;
|
||||
cap2 |= PCI_EXP_DEVCTL2_COMP_TMOUT_DIS;
|
||||
eeh_ops->write_config(pdn,
|
||||
edev->pcie_cap + PCI_EXP_DEVCTL2,
|
||||
4, cap2);
|
||||
|
|
|
@ -362,8 +362,7 @@ struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
|
|||
*/
|
||||
static int pci_read_irq_line(struct pci_dev *pci_dev)
|
||||
{
|
||||
struct of_phandle_args oirq;
|
||||
unsigned int virq;
|
||||
unsigned int virq = 0;
|
||||
|
||||
pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
|
||||
|
||||
|
@ -371,7 +370,7 @@ static int pci_read_irq_line(struct pci_dev *pci_dev)
|
|||
memset(&oirq, 0xff, sizeof(oirq));
|
||||
#endif
|
||||
/* Try to get a mapping from the device-tree */
|
||||
if (of_irq_parse_pci(pci_dev, &oirq)) {
|
||||
if (!of_irq_parse_and_map_pci(pci_dev, 0, 0)) {
|
||||
u8 line, pin;
|
||||
|
||||
/* If that fails, lets fallback to what is in the config
|
||||
|
@ -395,11 +394,6 @@ static int pci_read_irq_line(struct pci_dev *pci_dev)
|
|||
virq = irq_create_mapping(NULL, line);
|
||||
if (virq)
|
||||
irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
|
||||
} else {
|
||||
pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %pOF\n",
|
||||
oirq.args_count, oirq.args[0], oirq.args[1], oirq.np);
|
||||
|
||||
virq = irq_create_of_mapping(&oirq);
|
||||
}
|
||||
|
||||
if (!virq) {
|
||||
|
|
|
@ -104,7 +104,7 @@ EXPORT_SYMBOL_GPL(pci_hp_remove_devices);
|
|||
*/
|
||||
void pci_hp_add_devices(struct pci_bus *bus)
|
||||
{
|
||||
int slotno, mode, pass, max;
|
||||
int slotno, mode, max;
|
||||
struct pci_dev *dev;
|
||||
struct pci_controller *phb;
|
||||
struct device_node *dn = pci_bus_to_OF_node(bus);
|
||||
|
@ -133,13 +133,17 @@ void pci_hp_add_devices(struct pci_bus *bus)
|
|||
pci_scan_slot(bus, PCI_DEVFN(slotno, 0));
|
||||
pcibios_setup_bus_devices(bus);
|
||||
max = bus->busn_res.start;
|
||||
for (pass = 0; pass < 2; pass++) {
|
||||
list_for_each_entry(dev, &bus->devices, bus_list) {
|
||||
if (pci_is_bridge(dev))
|
||||
max = pci_scan_bridge(bus, dev,
|
||||
max, pass);
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Scan bridges that are already configured. We don't touch
|
||||
* them unless they are misconfigured (which will be done in
|
||||
* the second scan below).
|
||||
*/
|
||||
for_each_pci_bridge(dev, bus)
|
||||
max = pci_scan_bridge(bus, dev, max, 0);
|
||||
|
||||
/* Scan bridges that need to be reconfigured */
|
||||
for_each_pci_bridge(dev, bus)
|
||||
max = pci_scan_bridge(bus, dev, max, 1);
|
||||
}
|
||||
pcibios_finish_adding_to_bus(bus);
|
||||
}
|
||||
|
|
|
@ -96,7 +96,8 @@ make_one_node_map(struct device_node* node, u8 pci_bus)
|
|||
reg = of_get_property(node, "reg", NULL);
|
||||
if (!reg)
|
||||
continue;
|
||||
dev = pci_get_bus_and_slot(pci_bus, ((reg[0] >> 8) & 0xff));
|
||||
dev = pci_get_domain_bus_and_slot(0, pci_bus,
|
||||
((reg[0] >> 8) & 0xff));
|
||||
if (!dev || !dev->subordinate) {
|
||||
pci_dev_put(dev);
|
||||
continue;
|
||||
|
|
|
@ -369,11 +369,8 @@ static void __of_scan_bus(struct device_node *node, struct pci_bus *bus,
|
|||
pcibios_setup_bus_devices(bus);
|
||||
|
||||
/* Now scan child busses */
|
||||
list_for_each_entry(dev, &bus->devices, bus_list) {
|
||||
if (pci_is_bridge(dev)) {
|
||||
of_scan_pci_bridge(dev);
|
||||
}
|
||||
}
|
||||
for_each_pci_bridge(dev, bus)
|
||||
of_scan_pci_bridge(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -134,7 +134,7 @@ int maple_set_rtc_time(struct rtc_time *tm)
|
|||
|
||||
static struct resource rtc_iores = {
|
||||
.name = "rtc",
|
||||
.flags = IORESOURCE_BUSY,
|
||||
.flags = IORESOURCE_IO | IORESOURCE_BUSY,
|
||||
};
|
||||
|
||||
unsigned long __init maple_get_boot_time(void)
|
||||
|
|
|
@ -829,7 +829,7 @@ core99_ata100_enable(struct device_node *node, long value)
|
|||
|
||||
if (value) {
|
||||
if (pci_device_from_OF_node(node, &pbus, &pid) == 0)
|
||||
pdev = pci_get_bus_and_slot(pbus, pid);
|
||||
pdev = pci_get_domain_bus_and_slot(0, pbus, pid);
|
||||
if (pdev == NULL)
|
||||
return 0;
|
||||
rc = pci_enable_device(pdev);
|
||||
|
|
|
@ -145,21 +145,21 @@ static struct resource pic1_iores = {
|
|||
.name = "8259 (master)",
|
||||
.start = 0x20,
|
||||
.end = 0x21,
|
||||
.flags = IORESOURCE_BUSY,
|
||||
.flags = IORESOURCE_IO | IORESOURCE_BUSY,
|
||||
};
|
||||
|
||||
static struct resource pic2_iores = {
|
||||
.name = "8259 (slave)",
|
||||
.start = 0xa0,
|
||||
.end = 0xa1,
|
||||
.flags = IORESOURCE_BUSY,
|
||||
.flags = IORESOURCE_IO | IORESOURCE_BUSY,
|
||||
};
|
||||
|
||||
static struct resource pic_edgectrl_iores = {
|
||||
.name = "8259 edge control",
|
||||
.start = 0x4d0,
|
||||
.end = 0x4d1,
|
||||
.flags = IORESOURCE_BUSY,
|
||||
.flags = IORESOURCE_IO | IORESOURCE_BUSY,
|
||||
};
|
||||
|
||||
static int i8259_host_match(struct irq_domain *h, struct device_node *node,
|
||||
|
|
|
@ -37,7 +37,7 @@ static ssize_t mv64x60_hs_reg_read(struct file *filp, struct kobject *kobj,
|
|||
if (count < MV64X60_VAL_LEN_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
phb = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
|
||||
phb = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
|
||||
if (!phb)
|
||||
return -ENODEV;
|
||||
pci_read_config_dword(phb, MV64X60_PCICFG_CPCI_HOTSWAP, &v);
|
||||
|
@ -61,7 +61,7 @@ static ssize_t mv64x60_hs_reg_write(struct file *filp, struct kobject *kobj,
|
|||
if (sscanf(buf, "%i", &v) != 1)
|
||||
return -EINVAL;
|
||||
|
||||
phb = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
|
||||
phb = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
|
||||
if (!phb)
|
||||
return -ENODEV;
|
||||
pci_write_config_dword(phb, MV64X60_PCICFG_CPCI_HOTSWAP, v);
|
||||
|
|
|
@ -839,7 +839,8 @@ static void __init pirq_find_router(struct irq_router *r)
|
|||
DBG(KERN_DEBUG "PCI: Attempting to find IRQ router for [%04x:%04x]\n",
|
||||
rt->rtr_vendor, rt->rtr_device);
|
||||
|
||||
pirq_router_dev = pci_get_bus_and_slot(rt->rtr_bus, rt->rtr_devfn);
|
||||
pirq_router_dev = pci_get_domain_bus_and_slot(0, rt->rtr_bus,
|
||||
rt->rtr_devfn);
|
||||
if (!pirq_router_dev) {
|
||||
DBG(KERN_DEBUG "PCI: Interrupt router not found at "
|
||||
"%02x:%02x\n", rt->rtr_bus, rt->rtr_devfn);
|
||||
|
|
|
@ -409,10 +409,8 @@ int __init pci_xen_init(void)
|
|||
pcibios_enable_irq = xen_pcifront_enable_irq;
|
||||
pcibios_disable_irq = NULL;
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
/* Keep ACPI out of the picture */
|
||||
acpi_noirq = 1;
|
||||
#endif
|
||||
acpi_noirq_set();
|
||||
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
x86_msi.setup_msi_irqs = xen_setup_msi_irqs;
|
||||
|
|
|
@ -16,10 +16,7 @@ obj-$(CONFIG_PINCTRL) += pinctrl/
|
|||
obj-$(CONFIG_GPIOLIB) += gpio/
|
||||
obj-y += pwm/
|
||||
|
||||
obj-$(CONFIG_PCI) += pci/
|
||||
obj-$(CONFIG_PCI_ENDPOINT) += pci/endpoint/
|
||||
# PCI dwc controller drivers
|
||||
obj-y += pci/dwc/
|
||||
obj-y += pci/
|
||||
|
||||
obj-$(CONFIG_PARISC) += parisc/
|
||||
obj-$(CONFIG_RAPIDIO) += rapidio/
|
||||
|
|
|
@ -466,7 +466,8 @@ static void ali_init_chipset(struct pci_dev *pdev)
|
|||
tmp |= 0x01; /* CD_ROM enable for DMA */
|
||||
pci_write_config_byte(pdev, 0x53, tmp);
|
||||
}
|
||||
north = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
|
||||
north = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus), 0,
|
||||
PCI_DEVFN(0, 0));
|
||||
if (north && north->vendor == PCI_VENDOR_ID_AL && ali_isa_bridge) {
|
||||
/* Configure the ALi bridge logic. For non ALi rely on BIOS.
|
||||
Set the south bridge enable bit */
|
||||
|
|
|
@ -268,17 +268,17 @@ static bool DAC960_CreateAuxiliaryStructures(DAC960_Controller_T *Controller)
|
|||
void *AllocationPointer = NULL;
|
||||
void *ScatterGatherCPU = NULL;
|
||||
dma_addr_t ScatterGatherDMA;
|
||||
struct pci_pool *ScatterGatherPool;
|
||||
struct dma_pool *ScatterGatherPool;
|
||||
void *RequestSenseCPU = NULL;
|
||||
dma_addr_t RequestSenseDMA;
|
||||
struct pci_pool *RequestSensePool = NULL;
|
||||
struct dma_pool *RequestSensePool = NULL;
|
||||
|
||||
if (Controller->FirmwareType == DAC960_V1_Controller)
|
||||
{
|
||||
CommandAllocationLength = offsetof(DAC960_Command_T, V1.EndMarker);
|
||||
CommandAllocationGroupSize = DAC960_V1_CommandAllocationGroupSize;
|
||||
ScatterGatherPool = pci_pool_create("DAC960_V1_ScatterGather",
|
||||
Controller->PCIDevice,
|
||||
ScatterGatherPool = dma_pool_create("DAC960_V1_ScatterGather",
|
||||
&Controller->PCIDevice->dev,
|
||||
DAC960_V1_ScatterGatherLimit * sizeof(DAC960_V1_ScatterGatherSegment_T),
|
||||
sizeof(DAC960_V1_ScatterGatherSegment_T), 0);
|
||||
if (ScatterGatherPool == NULL)
|
||||
|
@ -290,18 +290,18 @@ static bool DAC960_CreateAuxiliaryStructures(DAC960_Controller_T *Controller)
|
|||
{
|
||||
CommandAllocationLength = offsetof(DAC960_Command_T, V2.EndMarker);
|
||||
CommandAllocationGroupSize = DAC960_V2_CommandAllocationGroupSize;
|
||||
ScatterGatherPool = pci_pool_create("DAC960_V2_ScatterGather",
|
||||
Controller->PCIDevice,
|
||||
ScatterGatherPool = dma_pool_create("DAC960_V2_ScatterGather",
|
||||
&Controller->PCIDevice->dev,
|
||||
DAC960_V2_ScatterGatherLimit * sizeof(DAC960_V2_ScatterGatherSegment_T),
|
||||
sizeof(DAC960_V2_ScatterGatherSegment_T), 0);
|
||||
if (ScatterGatherPool == NULL)
|
||||
return DAC960_Failure(Controller,
|
||||
"AUXILIARY STRUCTURE CREATION (SG)");
|
||||
RequestSensePool = pci_pool_create("DAC960_V2_RequestSense",
|
||||
Controller->PCIDevice, sizeof(DAC960_SCSI_RequestSense_T),
|
||||
RequestSensePool = dma_pool_create("DAC960_V2_RequestSense",
|
||||
&Controller->PCIDevice->dev, sizeof(DAC960_SCSI_RequestSense_T),
|
||||
sizeof(int), 0);
|
||||
if (RequestSensePool == NULL) {
|
||||
pci_pool_destroy(ScatterGatherPool);
|
||||
dma_pool_destroy(ScatterGatherPool);
|
||||
return DAC960_Failure(Controller,
|
||||
"AUXILIARY STRUCTURE CREATION (SG)");
|
||||
}
|
||||
|
@ -335,16 +335,16 @@ static bool DAC960_CreateAuxiliaryStructures(DAC960_Controller_T *Controller)
|
|||
Command->Next = Controller->FreeCommands;
|
||||
Controller->FreeCommands = Command;
|
||||
Controller->Commands[CommandIdentifier-1] = Command;
|
||||
ScatterGatherCPU = pci_pool_alloc(ScatterGatherPool, GFP_ATOMIC,
|
||||
ScatterGatherCPU = dma_pool_alloc(ScatterGatherPool, GFP_ATOMIC,
|
||||
&ScatterGatherDMA);
|
||||
if (ScatterGatherCPU == NULL)
|
||||
return DAC960_Failure(Controller, "AUXILIARY STRUCTURE CREATION");
|
||||
|
||||
if (RequestSensePool != NULL) {
|
||||
RequestSenseCPU = pci_pool_alloc(RequestSensePool, GFP_ATOMIC,
|
||||
RequestSenseCPU = dma_pool_alloc(RequestSensePool, GFP_ATOMIC,
|
||||
&RequestSenseDMA);
|
||||
if (RequestSenseCPU == NULL) {
|
||||
pci_pool_free(ScatterGatherPool, ScatterGatherCPU,
|
||||
dma_pool_free(ScatterGatherPool, ScatterGatherCPU,
|
||||
ScatterGatherDMA);
|
||||
return DAC960_Failure(Controller,
|
||||
"AUXILIARY STRUCTURE CREATION");
|
||||
|
@ -379,8 +379,8 @@ static bool DAC960_CreateAuxiliaryStructures(DAC960_Controller_T *Controller)
|
|||
static void DAC960_DestroyAuxiliaryStructures(DAC960_Controller_T *Controller)
|
||||
{
|
||||
int i;
|
||||
struct pci_pool *ScatterGatherPool = Controller->ScatterGatherPool;
|
||||
struct pci_pool *RequestSensePool = NULL;
|
||||
struct dma_pool *ScatterGatherPool = Controller->ScatterGatherPool;
|
||||
struct dma_pool *RequestSensePool = NULL;
|
||||
void *ScatterGatherCPU;
|
||||
dma_addr_t ScatterGatherDMA;
|
||||
void *RequestSenseCPU;
|
||||
|
@ -411,9 +411,9 @@ static void DAC960_DestroyAuxiliaryStructures(DAC960_Controller_T *Controller)
|
|||
RequestSenseDMA = Command->V2.RequestSenseDMA;
|
||||
}
|
||||
if (ScatterGatherCPU != NULL)
|
||||
pci_pool_free(ScatterGatherPool, ScatterGatherCPU, ScatterGatherDMA);
|
||||
dma_pool_free(ScatterGatherPool, ScatterGatherCPU, ScatterGatherDMA);
|
||||
if (RequestSenseCPU != NULL)
|
||||
pci_pool_free(RequestSensePool, RequestSenseCPU, RequestSenseDMA);
|
||||
dma_pool_free(RequestSensePool, RequestSenseCPU, RequestSenseDMA);
|
||||
|
||||
if ((Command->CommandIdentifier
|
||||
% Controller->CommandAllocationGroupSize) == 1) {
|
||||
|
@ -437,13 +437,11 @@ static void DAC960_DestroyAuxiliaryStructures(DAC960_Controller_T *Controller)
|
|||
Controller->CurrentStatusBuffer = NULL;
|
||||
}
|
||||
|
||||
if (ScatterGatherPool != NULL)
|
||||
pci_pool_destroy(ScatterGatherPool);
|
||||
dma_pool_destroy(ScatterGatherPool);
|
||||
if (Controller->FirmwareType == DAC960_V1_Controller)
|
||||
return;
|
||||
|
||||
if (RequestSensePool != NULL)
|
||||
pci_pool_destroy(RequestSensePool);
|
||||
dma_pool_destroy(RequestSensePool);
|
||||
|
||||
for (i = 0; i < DAC960_MaxLogicalDrives; i++) {
|
||||
kfree(Controller->V2.LogicalDeviceInformation[i]);
|
||||
|
|
|
@ -2316,7 +2316,7 @@ typedef struct DAC960_Controller
|
|||
bool SuppressEnclosureMessages;
|
||||
struct timer_list MonitoringTimer;
|
||||
struct gendisk *disks[DAC960_MaxLogicalDrives];
|
||||
struct pci_pool *ScatterGatherPool;
|
||||
struct dma_pool *ScatterGatherPool;
|
||||
DAC960_Command_T *FreeCommands;
|
||||
unsigned char *CombinedStatusBuffer;
|
||||
unsigned char *CurrentStatusBuffer;
|
||||
|
@ -2429,7 +2429,7 @@ typedef struct DAC960_Controller
|
|||
bool NeedDeviceSerialNumberInformation;
|
||||
bool StartLogicalDeviceInformationScan;
|
||||
bool StartPhysicalDeviceInformationScan;
|
||||
struct pci_pool *RequestSensePool;
|
||||
struct dma_pool *RequestSensePool;
|
||||
|
||||
dma_addr_t FirstCommandMailboxDMA;
|
||||
DAC960_V2_CommandMailbox_T *FirstCommandMailbox;
|
||||
|
|
|
@ -340,11 +340,17 @@ static int agp_nvidia_probe(struct pci_dev *pdev,
|
|||
u8 cap_ptr;
|
||||
|
||||
nvidia_private.dev_1 =
|
||||
pci_get_bus_and_slot((unsigned int)pdev->bus->number, PCI_DEVFN(0, 1));
|
||||
pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
|
||||
(unsigned int)pdev->bus->number,
|
||||
PCI_DEVFN(0, 1));
|
||||
nvidia_private.dev_2 =
|
||||
pci_get_bus_and_slot((unsigned int)pdev->bus->number, PCI_DEVFN(0, 2));
|
||||
pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
|
||||
(unsigned int)pdev->bus->number,
|
||||
PCI_DEVFN(0, 2));
|
||||
nvidia_private.dev_3 =
|
||||
pci_get_bus_and_slot((unsigned int)pdev->bus->number, PCI_DEVFN(30, 0));
|
||||
pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
|
||||
(unsigned int)pdev->bus->number,
|
||||
PCI_DEVFN(30, 0));
|
||||
|
||||
if (!nvidia_private.dev_1 || !nvidia_private.dev_2 || !nvidia_private.dev_3) {
|
||||
printk(KERN_INFO PFX "Detected an NVIDIA nForce/nForce2 "
|
||||
|
|
|
@ -474,7 +474,8 @@ static int agp_serverworks_probe(struct pci_dev *pdev,
|
|||
}
|
||||
|
||||
/* Everything is on func 1 here so we are hardcoding function one */
|
||||
bridge_dev = pci_get_bus_and_slot((unsigned int)pdev->bus->number,
|
||||
bridge_dev = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
|
||||
(unsigned int)pdev->bus->number,
|
||||
PCI_DEVFN(0, 1));
|
||||
if (!bridge_dev) {
|
||||
dev_info(&pdev->dev, "can't find secondary device\n");
|
||||
|
|
|
@ -669,10 +669,10 @@ edd_get_pci_dev(struct edd_device *edev)
|
|||
struct edd_info *info = edd_dev_get_info(edev);
|
||||
|
||||
if (edd_dev_is_type(edev, "PCI") || edd_dev_is_type(edev, "XPRS")) {
|
||||
return pci_get_bus_and_slot(info->params.interface_path.pci.bus,
|
||||
PCI_DEVFN(info->params.interface_path.pci.slot,
|
||||
info->params.interface_path.pci.
|
||||
function));
|
||||
return pci_get_domain_bus_and_slot(0,
|
||||
info->params.interface_path.pci.bus,
|
||||
PCI_DEVFN(info->params.interface_path.pci.slot,
|
||||
info->params.interface_path.pci.function));
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -719,8 +719,9 @@ static int __init ibft_create_kobject(struct acpi_table_ibft *header,
|
|||
* executes only devices which are in domain 0. Furthermore, the
|
||||
* iBFT spec doesn't have a domain id field :-(
|
||||
*/
|
||||
pci_dev = pci_get_bus_and_slot((nic->pci_bdf & 0xff00) >> 8,
|
||||
(nic->pci_bdf & 0xff));
|
||||
pci_dev = pci_get_domain_bus_and_slot(0,
|
||||
(nic->pci_bdf & 0xff00) >> 8,
|
||||
(nic->pci_bdf & 0xff));
|
||||
if (pci_dev) {
|
||||
rc = sysfs_create_link(&boot_kobj->kobj,
|
||||
&pci_dev->dev.kobj, "device");
|
||||
|
|
|
@ -185,21 +185,22 @@ static int cdv_backlight_init(struct drm_device *dev)
|
|||
* for this and the MID devices.
|
||||
*/
|
||||
|
||||
static inline u32 CDV_MSG_READ32(uint port, uint offset)
|
||||
static inline u32 CDV_MSG_READ32(int domain, uint port, uint offset)
|
||||
{
|
||||
int mcr = (0x10<<24) | (port << 16) | (offset << 8);
|
||||
uint32_t ret_val = 0;
|
||||
struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
|
||||
struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0);
|
||||
pci_write_config_dword(pci_root, 0xD0, mcr);
|
||||
pci_read_config_dword(pci_root, 0xD4, &ret_val);
|
||||
pci_dev_put(pci_root);
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
static inline void CDV_MSG_WRITE32(uint port, uint offset, u32 value)
|
||||
static inline void CDV_MSG_WRITE32(int domain, uint port, uint offset,
|
||||
u32 value)
|
||||
{
|
||||
int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0;
|
||||
struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
|
||||
struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0);
|
||||
pci_write_config_dword(pci_root, 0xD4, value);
|
||||
pci_write_config_dword(pci_root, 0xD0, mcr);
|
||||
pci_dev_put(pci_root);
|
||||
|
@ -216,11 +217,12 @@ static void cdv_init_pm(struct drm_device *dev)
|
|||
{
|
||||
struct drm_psb_private *dev_priv = dev->dev_private;
|
||||
u32 pwr_cnt;
|
||||
int domain = pci_domain_nr(dev->pdev->bus);
|
||||
int i;
|
||||
|
||||
dev_priv->apm_base = CDV_MSG_READ32(PSB_PUNIT_PORT,
|
||||
dev_priv->apm_base = CDV_MSG_READ32(domain, PSB_PUNIT_PORT,
|
||||
PSB_APMBA) & 0xFFFF;
|
||||
dev_priv->ospm_base = CDV_MSG_READ32(PSB_PUNIT_PORT,
|
||||
dev_priv->ospm_base = CDV_MSG_READ32(domain, PSB_PUNIT_PORT,
|
||||
PSB_OSPMBA) & 0xFFFF;
|
||||
|
||||
/* Power status */
|
||||
|
@ -251,7 +253,7 @@ static void cdv_errata(struct drm_device *dev)
|
|||
* Bonus Launch to work around the issue, by degrading
|
||||
* performance.
|
||||
*/
|
||||
CDV_MSG_WRITE32(3, 0x30, 0x08027108);
|
||||
CDV_MSG_WRITE32(pci_domain_nr(dev->pdev->bus), 3, 0x30, 0x08027108);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -19,7 +19,9 @@
|
|||
void gma_get_core_freq(struct drm_device *dev)
|
||||
{
|
||||
uint32_t clock;
|
||||
struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
|
||||
struct pci_dev *pci_root =
|
||||
pci_get_domain_bus_and_slot(pci_domain_nr(dev->pdev->bus),
|
||||
0, 0);
|
||||
struct drm_psb_private *dev_priv = dev->dev_private;
|
||||
|
||||
/*pci_write_config_dword(pci_root, 0xD4, 0x00C32004);*/
|
||||
|
|
|
@ -32,7 +32,9 @@
|
|||
static void mid_get_fuse_settings(struct drm_device *dev)
|
||||
{
|
||||
struct drm_psb_private *dev_priv = dev->dev_private;
|
||||
struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
|
||||
struct pci_dev *pci_root =
|
||||
pci_get_domain_bus_and_slot(pci_domain_nr(dev->pdev->bus),
|
||||
0, 0);
|
||||
uint32_t fuse_value = 0;
|
||||
uint32_t fuse_value_tmp = 0;
|
||||
|
||||
|
@ -104,7 +106,9 @@ static void mid_get_fuse_settings(struct drm_device *dev)
|
|||
static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
|
||||
{
|
||||
uint32_t platform_rev_id = 0;
|
||||
struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
|
||||
int domain = pci_domain_nr(dev_priv->dev->pdev->bus);
|
||||
struct pci_dev *pci_gfx_root =
|
||||
pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(2, 0));
|
||||
|
||||
if (pci_gfx_root == NULL) {
|
||||
WARN_ON(1);
|
||||
|
@ -281,7 +285,9 @@ static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
|
|||
u32 addr;
|
||||
u8 __iomem *vbt_virtual;
|
||||
struct mid_vbt_header vbt_header;
|
||||
struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
|
||||
struct pci_dev *pci_gfx_root =
|
||||
pci_get_domain_bus_and_slot(pci_domain_nr(dev->pdev->bus),
|
||||
0, PCI_DEVFN(2, 0));
|
||||
int ret = -1;
|
||||
|
||||
/* Get the address of the platform config vbt */
|
||||
|
|
|
@ -248,7 +248,11 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
goto out_err;
|
||||
|
||||
if (IS_MRST(dev)) {
|
||||
dev_priv->aux_pdev = pci_get_bus_and_slot(0, PCI_DEVFN(3, 0));
|
||||
int domain = pci_domain_nr(dev->pdev->bus);
|
||||
|
||||
dev_priv->aux_pdev =
|
||||
pci_get_domain_bus_and_slot(domain, 0,
|
||||
PCI_DEVFN(3, 0));
|
||||
|
||||
if (dev_priv->aux_pdev) {
|
||||
resource_start = pci_resource_start(dev_priv->aux_pdev,
|
||||
|
@ -268,7 +272,9 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
}
|
||||
dev_priv->gmbus_reg = dev_priv->aux_reg;
|
||||
|
||||
dev_priv->lpc_pdev = pci_get_bus_and_slot(0, PCI_DEVFN(31, 0));
|
||||
dev_priv->lpc_pdev =
|
||||
pci_get_domain_bus_and_slot(domain, 0,
|
||||
PCI_DEVFN(31, 0));
|
||||
if (dev_priv->lpc_pdev) {
|
||||
pci_read_config_word(dev_priv->lpc_pdev, PSB_LPC_GBA,
|
||||
&dev_priv->lpc_gpio_base);
|
||||
|
|
|
@ -780,38 +780,40 @@ extern const struct psb_ops cdv_chip_ops;
|
|||
extern int drm_idle_check_interval;
|
||||
|
||||
/* Utilities */
|
||||
static inline u32 MRST_MSG_READ32(uint port, uint offset)
|
||||
static inline u32 MRST_MSG_READ32(int domain, uint port, uint offset)
|
||||
{
|
||||
int mcr = (0xD0<<24) | (port << 16) | (offset << 8);
|
||||
uint32_t ret_val = 0;
|
||||
struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
|
||||
struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0);
|
||||
pci_write_config_dword(pci_root, 0xD0, mcr);
|
||||
pci_read_config_dword(pci_root, 0xD4, &ret_val);
|
||||
pci_dev_put(pci_root);
|
||||
return ret_val;
|
||||
}
|
||||
static inline void MRST_MSG_WRITE32(uint port, uint offset, u32 value)
|
||||
static inline void MRST_MSG_WRITE32(int domain, uint port, uint offset,
|
||||
u32 value)
|
||||
{
|
||||
int mcr = (0xE0<<24) | (port << 16) | (offset << 8) | 0xF0;
|
||||
struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
|
||||
struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0);
|
||||
pci_write_config_dword(pci_root, 0xD4, value);
|
||||
pci_write_config_dword(pci_root, 0xD0, mcr);
|
||||
pci_dev_put(pci_root);
|
||||
}
|
||||
static inline u32 MDFLD_MSG_READ32(uint port, uint offset)
|
||||
static inline u32 MDFLD_MSG_READ32(int domain, uint port, uint offset)
|
||||
{
|
||||
int mcr = (0x10<<24) | (port << 16) | (offset << 8);
|
||||
uint32_t ret_val = 0;
|
||||
struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
|
||||
struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0);
|
||||
pci_write_config_dword(pci_root, 0xD0, mcr);
|
||||
pci_read_config_dword(pci_root, 0xD4, &ret_val);
|
||||
pci_dev_put(pci_root);
|
||||
return ret_val;
|
||||
}
|
||||
static inline void MDFLD_MSG_WRITE32(uint port, uint offset, u32 value)
|
||||
static inline void MDFLD_MSG_WRITE32(int domain, uint port, uint offset,
|
||||
u32 value)
|
||||
{
|
||||
int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0;
|
||||
struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
|
||||
struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0);
|
||||
pci_write_config_dword(pci_root, 0xD4, value);
|
||||
pci_write_config_dword(pci_root, 0xD0, mcr);
|
||||
pci_dev_put(pci_root);
|
||||
|
|
|
@ -213,8 +213,10 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
|
|||
if ((dev->pdev->device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
|
||||
(dev->pdev->device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
|
||||
uint32_t type;
|
||||
int domain = pci_domain_nr(dev->pdev->bus);
|
||||
|
||||
pci_read_config_dword(pci_get_bus_and_slot(0, 1), 0x7c, &type);
|
||||
pci_read_config_dword(pci_get_domain_bus_and_slot(domain, 0, 1),
|
||||
0x7c, &type);
|
||||
|
||||
sim_data.memory_type = (type >> 12) & 1;
|
||||
sim_data.memory_width = 64;
|
||||
|
|
|
@ -216,12 +216,15 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
|
|||
{
|
||||
struct nvkm_pll_vals pllvals;
|
||||
int ret;
|
||||
int domain;
|
||||
|
||||
domain = pci_domain_nr(dev->pdev->bus);
|
||||
|
||||
if (plltype == PLL_MEMORY &&
|
||||
(dev->pdev->device & 0x0ff0) == CHIPSET_NFORCE) {
|
||||
uint32_t mpllP;
|
||||
|
||||
pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
|
||||
pci_read_config_dword(pci_get_domain_bus_and_slot(domain, 0, 3),
|
||||
0x6c, &mpllP);
|
||||
mpllP = (mpllP >> 8) & 0xf;
|
||||
if (!mpllP)
|
||||
mpllP = 4;
|
||||
|
@ -232,7 +235,8 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
|
|||
(dev->pdev->device & 0xff0) == CHIPSET_NFORCE2) {
|
||||
uint32_t clock;
|
||||
|
||||
pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
|
||||
pci_read_config_dword(pci_get_domain_bus_and_slot(domain, 0, 5),
|
||||
0x4c, &clock);
|
||||
return clock / 1000;
|
||||
}
|
||||
|
||||
|
|
|
@ -524,7 +524,8 @@ nouveau_get_hdmi_dev(struct nouveau_drm *drm)
|
|||
}
|
||||
|
||||
/* subfunction one is a hdmi audio device? */
|
||||
drm->hdmi_device = pci_get_bus_and_slot((unsigned int)pdev->bus->number,
|
||||
drm->hdmi_device = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
|
||||
(unsigned int)pdev->bus->number,
|
||||
PCI_DEVFN(PCI_SLOT(pdev->devfn), 1));
|
||||
|
||||
if (!drm->hdmi_device) {
|
||||
|
|
|
@ -28,8 +28,16 @@ nv1a_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
|
|||
{
|
||||
struct pci_dev *bridge;
|
||||
u32 mem, mib;
|
||||
int domain = 0;
|
||||
struct pci_dev *pdev = NULL;
|
||||
|
||||
bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
|
||||
if (dev_is_pci(fb->subdev.device->dev))
|
||||
pdev = to_pci_dev(fb->subdev.device->dev);
|
||||
|
||||
if (pdev)
|
||||
domain = pci_domain_nr(pdev->bus);
|
||||
|
||||
bridge = pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 1));
|
||||
if (!bridge) {
|
||||
nvkm_error(&fb->subdev, "no bridge device\n");
|
||||
return -ENODEV;
|
||||
|
|
|
@ -239,8 +239,9 @@ static u8 sl82c105_bridge_revision(struct pci_dev *dev)
|
|||
/*
|
||||
* The bridge should be part of the same device, but function 0.
|
||||
*/
|
||||
bridge = pci_get_bus_and_slot(dev->bus->number,
|
||||
PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
|
||||
bridge = pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus),
|
||||
dev->bus->number,
|
||||
PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
|
||||
if (!bridge)
|
||||
return -1;
|
||||
|
||||
|
|
|
@ -430,59 +430,16 @@ static void qedr_remove_sysfiles(struct qedr_dev *dev)
|
|||
|
||||
static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev)
|
||||
{
|
||||
struct pci_dev *bridge;
|
||||
u32 ctl2, cap2;
|
||||
u16 flags;
|
||||
int rc;
|
||||
int rc = pci_enable_atomic_ops_to_root(pdev,
|
||||
PCI_EXP_DEVCAP2_ATOMIC_COMP64);
|
||||
|
||||
bridge = pdev->bus->self;
|
||||
if (!bridge)
|
||||
goto disable;
|
||||
|
||||
/* Check atomic routing support all the way to root complex */
|
||||
while (bridge->bus->parent) {
|
||||
rc = pcie_capability_read_word(bridge, PCI_EXP_FLAGS, &flags);
|
||||
if (rc || ((flags & PCI_EXP_FLAGS_VERS) < 2))
|
||||
goto disable;
|
||||
|
||||
rc = pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap2);
|
||||
if (rc)
|
||||
goto disable;
|
||||
|
||||
rc = pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl2);
|
||||
if (rc)
|
||||
goto disable;
|
||||
|
||||
if (!(cap2 & PCI_EXP_DEVCAP2_ATOMIC_ROUTE) ||
|
||||
(ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK))
|
||||
goto disable;
|
||||
bridge = bridge->bus->parent->self;
|
||||
if (rc) {
|
||||
dev->atomic_cap = IB_ATOMIC_NONE;
|
||||
DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability disabled\n");
|
||||
} else {
|
||||
dev->atomic_cap = IB_ATOMIC_GLOB;
|
||||
DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability enabled\n");
|
||||
}
|
||||
|
||||
rc = pcie_capability_read_word(bridge, PCI_EXP_FLAGS, &flags);
|
||||
if (rc || ((flags & PCI_EXP_FLAGS_VERS) < 2))
|
||||
goto disable;
|
||||
|
||||
rc = pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap2);
|
||||
if (rc || !(cap2 & PCI_EXP_DEVCAP2_ATOMIC_COMP64))
|
||||
goto disable;
|
||||
|
||||
/* Set atomic operations */
|
||||
pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
|
||||
PCI_EXP_DEVCTL2_ATOMIC_REQ);
|
||||
dev->atomic_cap = IB_ATOMIC_GLOB;
|
||||
|
||||
DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability enabled\n");
|
||||
|
||||
return;
|
||||
|
||||
disable:
|
||||
pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL2,
|
||||
PCI_EXP_DEVCTL2_ATOMIC_REQ);
|
||||
dev->atomic_cap = IB_ATOMIC_NONE;
|
||||
|
||||
DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability disabled\n");
|
||||
|
||||
}
|
||||
|
||||
static const struct qed_rdma_ops *qed_ops;
|
||||
|
|
|
@ -527,7 +527,8 @@ static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
|
|||
struct iommu_dev_data *dev_data = NULL;
|
||||
struct pci_dev *pdev;
|
||||
|
||||
pdev = pci_get_bus_and_slot(PCI_BUS_NUM(devid), devid & 0xff);
|
||||
pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
|
||||
devid & 0xff);
|
||||
if (pdev)
|
||||
dev_data = get_dev_data(&pdev->dev);
|
||||
|
||||
|
|
|
@ -1697,8 +1697,8 @@ static int iommu_init_pci(struct amd_iommu *iommu)
|
|||
u32 range, misc, low, high;
|
||||
int ret;
|
||||
|
||||
iommu->dev = pci_get_bus_and_slot(PCI_BUS_NUM(iommu->devid),
|
||||
iommu->devid & 0xff);
|
||||
iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid),
|
||||
iommu->devid & 0xff);
|
||||
if (!iommu->dev)
|
||||
return -ENODEV;
|
||||
|
||||
|
@ -1764,8 +1764,9 @@ static int iommu_init_pci(struct amd_iommu *iommu)
|
|||
if (is_rd890_iommu(iommu->dev)) {
|
||||
int i, j;
|
||||
|
||||
iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number,
|
||||
PCI_DEVFN(0, 0));
|
||||
iommu->root_pdev =
|
||||
pci_get_domain_bus_and_slot(0, iommu->dev->bus->number,
|
||||
PCI_DEVFN(0, 0));
|
||||
|
||||
/*
|
||||
* Some rd890 systems may not be fully reconfigured by the
|
||||
|
|
|
@ -565,7 +565,8 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
|
|||
finish = (iommu_fault->tag >> 9) & 1;
|
||||
|
||||
devid = iommu_fault->device_id;
|
||||
pdev = pci_get_bus_and_slot(PCI_BUS_NUM(devid), devid & 0xff);
|
||||
pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
|
||||
devid & 0xff);
|
||||
if (!pdev)
|
||||
return -ENODEV;
|
||||
dev_data = get_dev_data(&pdev->dev);
|
||||
|
|
|
@ -289,14 +289,14 @@ static struct resource pic1_io_resource = {
|
|||
.name = "pic1",
|
||||
.start = PIC_MASTER_CMD,
|
||||
.end = PIC_MASTER_IMR,
|
||||
.flags = IORESOURCE_BUSY
|
||||
.flags = IORESOURCE_IO | IORESOURCE_BUSY
|
||||
};
|
||||
|
||||
static struct resource pic2_io_resource = {
|
||||
.name = "pic2",
|
||||
.start = PIC_SLAVE_CMD,
|
||||
.end = PIC_SLAVE_IMR,
|
||||
.flags = IORESOURCE_BUSY
|
||||
.flags = IORESOURCE_IO | IORESOURCE_BUSY
|
||||
};
|
||||
|
||||
static int i8259A_irq_domain_map(struct irq_domain *d, unsigned int virq,
|
||||
|
|
|
@ -1799,7 +1799,7 @@ static int powerbook_sleep_grackle(void)
|
|||
struct adb_request req;
|
||||
struct pci_dev *grackle;
|
||||
|
||||
grackle = pci_get_bus_and_slot(0, 0);
|
||||
grackle = pci_get_domain_bus_and_slot(0, 0, 0);
|
||||
if (!grackle)
|
||||
return -ENODEV;
|
||||
|
||||
|
|
|
@ -102,7 +102,6 @@ struct ttusb {
|
|||
unsigned int isoc_in_pipe;
|
||||
|
||||
void *iso_buffer;
|
||||
dma_addr_t iso_dma_handle;
|
||||
|
||||
struct urb *iso_urb[ISO_BUF_COUNT];
|
||||
|
||||
|
@ -792,26 +791,17 @@ static void ttusb_free_iso_urbs(struct ttusb *ttusb)
|
|||
|
||||
for (i = 0; i < ISO_BUF_COUNT; i++)
|
||||
usb_free_urb(ttusb->iso_urb[i]);
|
||||
|
||||
pci_free_consistent(NULL,
|
||||
ISO_FRAME_SIZE * FRAMES_PER_ISO_BUF *
|
||||
ISO_BUF_COUNT, ttusb->iso_buffer,
|
||||
ttusb->iso_dma_handle);
|
||||
kfree(ttusb->iso_buffer);
|
||||
}
|
||||
|
||||
static int ttusb_alloc_iso_urbs(struct ttusb *ttusb)
|
||||
{
|
||||
int i;
|
||||
|
||||
ttusb->iso_buffer = pci_zalloc_consistent(NULL,
|
||||
ISO_FRAME_SIZE * FRAMES_PER_ISO_BUF * ISO_BUF_COUNT,
|
||||
&ttusb->iso_dma_handle);
|
||||
|
||||
if (!ttusb->iso_buffer) {
|
||||
dprintk("%s: pci_alloc_consistent - not enough memory\n",
|
||||
__func__);
|
||||
ttusb->iso_buffer = kcalloc(FRAMES_PER_ISO_BUF * ISO_BUF_COUNT,
|
||||
ISO_FRAME_SIZE, GFP_KERNEL);
|
||||
if (!ttusb->iso_buffer)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < ISO_BUF_COUNT; i++) {
|
||||
struct urb *urb;
|
||||
|
|
|
@ -127,7 +127,6 @@ struct ttusb_dec {
|
|||
struct urb *irq_urb;
|
||||
dma_addr_t irq_dma_handle;
|
||||
void *iso_buffer;
|
||||
dma_addr_t iso_dma_handle;
|
||||
struct urb *iso_urb[ISO_BUF_COUNT];
|
||||
int iso_stream_count;
|
||||
struct mutex iso_mutex;
|
||||
|
@ -1185,11 +1184,7 @@ static void ttusb_dec_free_iso_urbs(struct ttusb_dec *dec)
|
|||
|
||||
for (i = 0; i < ISO_BUF_COUNT; i++)
|
||||
usb_free_urb(dec->iso_urb[i]);
|
||||
|
||||
pci_free_consistent(NULL,
|
||||
ISO_FRAME_SIZE * (FRAMES_PER_ISO_BUF *
|
||||
ISO_BUF_COUNT),
|
||||
dec->iso_buffer, dec->iso_dma_handle);
|
||||
kfree(dec->iso_buffer);
|
||||
}
|
||||
|
||||
static int ttusb_dec_alloc_iso_urbs(struct ttusb_dec *dec)
|
||||
|
@ -1198,15 +1193,10 @@ static int ttusb_dec_alloc_iso_urbs(struct ttusb_dec *dec)
|
|||
|
||||
dprintk("%s\n", __func__);
|
||||
|
||||
dec->iso_buffer = pci_zalloc_consistent(NULL,
|
||||
ISO_FRAME_SIZE * (FRAMES_PER_ISO_BUF * ISO_BUF_COUNT),
|
||||
&dec->iso_dma_handle);
|
||||
|
||||
if (!dec->iso_buffer) {
|
||||
dprintk("%s: pci_alloc_consistent - not enough memory\n",
|
||||
__func__);
|
||||
dec->iso_buffer = kcalloc(FRAMES_PER_ISO_BUF * ISO_BUF_COUNT,
|
||||
ISO_FRAME_SIZE, GFP_KERNEL);
|
||||
if (!dec->iso_buffer)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < ISO_BUF_COUNT; i++) {
|
||||
struct urb *urb;
|
||||
|
|
|
@ -812,7 +812,7 @@ static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
|
|||
if (!vf)
|
||||
return false;
|
||||
|
||||
dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
|
||||
dev = pci_get_domain_bus_and_slot(vf->domain, vf->bus, vf->devfn);
|
||||
if (dev)
|
||||
return bnx2x_is_pcie_pending(dev);
|
||||
return false;
|
||||
|
@ -1041,6 +1041,13 @@ void bnx2x_iov_init_dmae(struct bnx2x *bp)
|
|||
REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
|
||||
}
|
||||
|
||||
static int bnx2x_vf_domain(struct bnx2x *bp, int vfid)
|
||||
{
|
||||
struct pci_dev *dev = bp->pdev;
|
||||
|
||||
return pci_domain_nr(dev->bus);
|
||||
}
|
||||
|
||||
static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
|
||||
{
|
||||
struct pci_dev *dev = bp->pdev;
|
||||
|
@ -1606,6 +1613,7 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
|
|||
struct bnx2x_virtf *vf = BP_VF(bp, vfid);
|
||||
|
||||
/* fill in the BDF and bars */
|
||||
vf->domain = bnx2x_vf_domain(bp, vfid);
|
||||
vf->bus = bnx2x_vf_bus(bp, vfid);
|
||||
vf->devfn = bnx2x_vf_devfn(bp, vfid);
|
||||
bnx2x_vf_set_bars(bp, vf);
|
||||
|
|
|
@ -182,6 +182,7 @@ struct bnx2x_virtf {
|
|||
u32 error; /* 0 means all's-well */
|
||||
|
||||
/* BDF */
|
||||
unsigned int domain;
|
||||
unsigned int bus;
|
||||
unsigned int devfn;
|
||||
|
||||
|
|
|
@ -143,7 +143,7 @@ int hinic_alloc_cmdq_buf(struct hinic_cmdqs *cmdqs,
|
|||
struct hinic_hwif *hwif = cmdqs->hwif;
|
||||
struct pci_dev *pdev = hwif->pdev;
|
||||
|
||||
cmdq_buf->buf = pci_pool_alloc(cmdqs->cmdq_buf_pool, GFP_KERNEL,
|
||||
cmdq_buf->buf = dma_pool_alloc(cmdqs->cmdq_buf_pool, GFP_KERNEL,
|
||||
&cmdq_buf->dma_addr);
|
||||
if (!cmdq_buf->buf) {
|
||||
dev_err(&pdev->dev, "Failed to allocate cmd from the pool\n");
|
||||
|
@ -161,7 +161,7 @@ int hinic_alloc_cmdq_buf(struct hinic_cmdqs *cmdqs,
|
|||
void hinic_free_cmdq_buf(struct hinic_cmdqs *cmdqs,
|
||||
struct hinic_cmdq_buf *cmdq_buf)
|
||||
{
|
||||
pci_pool_free(cmdqs->cmdq_buf_pool, cmdq_buf->buf, cmdq_buf->dma_addr);
|
||||
dma_pool_free(cmdqs->cmdq_buf_pool, cmdq_buf->buf, cmdq_buf->dma_addr);
|
||||
}
|
||||
|
||||
static unsigned int cmdq_wqe_size_from_bdlen(enum bufdesc_len len)
|
||||
|
@ -875,7 +875,7 @@ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
|
|||
int err;
|
||||
|
||||
cmdqs->hwif = hwif;
|
||||
cmdqs->cmdq_buf_pool = pci_pool_create("hinic_cmdq", pdev,
|
||||
cmdqs->cmdq_buf_pool = dma_pool_create("hinic_cmdq", &pdev->dev,
|
||||
HINIC_CMDQ_BUF_SIZE,
|
||||
HINIC_CMDQ_BUF_SIZE, 0);
|
||||
if (!cmdqs->cmdq_buf_pool)
|
||||
|
@ -916,7 +916,7 @@ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
|
|||
devm_kfree(&pdev->dev, cmdqs->saved_wqs);
|
||||
|
||||
err_saved_wqs:
|
||||
pci_pool_destroy(cmdqs->cmdq_buf_pool);
|
||||
dma_pool_destroy(cmdqs->cmdq_buf_pool);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -942,5 +942,5 @@ void hinic_free_cmdqs(struct hinic_cmdqs *cmdqs)
|
|||
|
||||
devm_kfree(&pdev->dev, cmdqs->saved_wqs);
|
||||
|
||||
pci_pool_destroy(cmdqs->cmdq_buf_pool);
|
||||
dma_pool_destroy(cmdqs->cmdq_buf_pool);
|
||||
}
|
||||
|
|
|
@ -157,7 +157,7 @@ struct hinic_cmdq {
|
|||
struct hinic_cmdqs {
|
||||
struct hinic_hwif *hwif;
|
||||
|
||||
struct pci_pool *cmdq_buf_pool;
|
||||
struct dma_pool *cmdq_buf_pool;
|
||||
|
||||
struct hinic_wq *saved_wqs;
|
||||
|
||||
|
|
|
@ -607,7 +607,7 @@ struct nic {
|
|||
struct mem *mem;
|
||||
dma_addr_t dma_addr;
|
||||
|
||||
struct pci_pool *cbs_pool;
|
||||
struct dma_pool *cbs_pool;
|
||||
dma_addr_t cbs_dma_addr;
|
||||
u8 adaptive_ifs;
|
||||
u8 tx_threshold;
|
||||
|
@ -1892,7 +1892,7 @@ static void e100_clean_cbs(struct nic *nic)
|
|||
nic->cb_to_clean = nic->cb_to_clean->next;
|
||||
nic->cbs_avail++;
|
||||
}
|
||||
pci_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr);
|
||||
dma_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr);
|
||||
nic->cbs = NULL;
|
||||
nic->cbs_avail = 0;
|
||||
}
|
||||
|
@ -1910,7 +1910,7 @@ static int e100_alloc_cbs(struct nic *nic)
|
|||
nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
|
||||
nic->cbs_avail = 0;
|
||||
|
||||
nic->cbs = pci_pool_zalloc(nic->cbs_pool, GFP_KERNEL,
|
||||
nic->cbs = dma_pool_zalloc(nic->cbs_pool, GFP_KERNEL,
|
||||
&nic->cbs_dma_addr);
|
||||
if (!nic->cbs)
|
||||
return -ENOMEM;
|
||||
|
@ -2960,8 +2960,8 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n");
|
||||
goto err_out_free;
|
||||
}
|
||||
nic->cbs_pool = pci_pool_create(netdev->name,
|
||||
nic->pdev,
|
||||
nic->cbs_pool = dma_pool_create(netdev->name,
|
||||
&nic->pdev->dev,
|
||||
nic->params.cbs.max * sizeof(struct cb),
|
||||
sizeof(u32),
|
||||
0);
|
||||
|
@ -3001,7 +3001,7 @@ static void e100_remove(struct pci_dev *pdev)
|
|||
unregister_netdev(netdev);
|
||||
e100_free(nic);
|
||||
pci_iounmap(pdev, nic->csr);
|
||||
pci_pool_destroy(nic->cbs_pool);
|
||||
dma_pool_destroy(nic->cbs_pool);
|
||||
free_netdev(netdev);
|
||||
pci_release_regions(pdev);
|
||||
pci_disable_device(pdev);
|
||||
|
|
|
@ -2594,8 +2594,10 @@ static int pch_gbe_probe(struct pci_dev *pdev,
|
|||
if (adapter->pdata && adapter->pdata->platform_init)
|
||||
adapter->pdata->platform_init(pdev);
|
||||
|
||||
adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number,
|
||||
PCI_DEVFN(12, 4));
|
||||
adapter->ptp_pdev =
|
||||
pci_get_domain_bus_and_slot(pci_domain_nr(adapter->pdev->bus),
|
||||
adapter->pdev->bus->number,
|
||||
PCI_DEVFN(12, 4));
|
||||
|
||||
netdev->netdev_ops = &pch_gbe_netdev_ops;
|
||||
netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
|
||||
|
|
|
@ -152,6 +152,8 @@ struct tsi108_prv_data {
|
|||
u32 msg_enable; /* debug message level */
|
||||
struct mii_if_info mii_if;
|
||||
unsigned int init_media;
|
||||
|
||||
struct platform_device *pdev;
|
||||
};
|
||||
|
||||
/* Structure for a device driver */
|
||||
|
@ -703,17 +705,18 @@ static int tsi108_send_packet(struct sk_buff * skb, struct net_device *dev)
|
|||
data->txskbs[tx] = skb;
|
||||
|
||||
if (i == 0) {
|
||||
data->txring[tx].buf0 = dma_map_single(NULL, skb->data,
|
||||
skb_headlen(skb), DMA_TO_DEVICE);
|
||||
data->txring[tx].buf0 = dma_map_single(&data->pdev->dev,
|
||||
skb->data, skb_headlen(skb),
|
||||
DMA_TO_DEVICE);
|
||||
data->txring[tx].len = skb_headlen(skb);
|
||||
misc |= TSI108_TX_SOF;
|
||||
} else {
|
||||
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
|
||||
|
||||
data->txring[tx].buf0 = skb_frag_dma_map(NULL, frag,
|
||||
0,
|
||||
skb_frag_size(frag),
|
||||
DMA_TO_DEVICE);
|
||||
data->txring[tx].buf0 =
|
||||
skb_frag_dma_map(&data->pdev->dev, frag,
|
||||
0, skb_frag_size(frag),
|
||||
DMA_TO_DEVICE);
|
||||
data->txring[tx].len = skb_frag_size(frag);
|
||||
}
|
||||
|
||||
|
@ -808,9 +811,9 @@ static int tsi108_refill_rx(struct net_device *dev, int budget)
|
|||
if (!skb)
|
||||
break;
|
||||
|
||||
data->rxring[rx].buf0 = dma_map_single(NULL, skb->data,
|
||||
TSI108_RX_SKB_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
data->rxring[rx].buf0 = dma_map_single(&data->pdev->dev,
|
||||
skb->data, TSI108_RX_SKB_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
/* Sometimes the hardware sets blen to zero after packet
|
||||
* reception, even though the manual says that it's only ever
|
||||
|
@ -1308,15 +1311,15 @@ static int tsi108_open(struct net_device *dev)
|
|||
data->id, dev->irq, dev->name);
|
||||
}
|
||||
|
||||
data->rxring = dma_zalloc_coherent(NULL, rxring_size, &data->rxdma,
|
||||
GFP_KERNEL);
|
||||
data->rxring = dma_zalloc_coherent(&data->pdev->dev, rxring_size,
|
||||
&data->rxdma, GFP_KERNEL);
|
||||
if (!data->rxring)
|
||||
return -ENOMEM;
|
||||
|
||||
data->txring = dma_zalloc_coherent(NULL, txring_size, &data->txdma,
|
||||
GFP_KERNEL);
|
||||
data->txring = dma_zalloc_coherent(&data->pdev->dev, txring_size,
|
||||
&data->txdma, GFP_KERNEL);
|
||||
if (!data->txring) {
|
||||
pci_free_consistent(NULL, rxring_size, data->rxring,
|
||||
dma_free_coherent(&data->pdev->dev, rxring_size, data->rxring,
|
||||
data->rxdma);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -1428,10 +1431,10 @@ static int tsi108_close(struct net_device *dev)
|
|||
dev_kfree_skb(skb);
|
||||
}
|
||||
|
||||
dma_free_coherent(0,
|
||||
dma_free_coherent(&data->pdev->dev,
|
||||
TSI108_RXRING_LEN * sizeof(rx_desc),
|
||||
data->rxring, data->rxdma);
|
||||
dma_free_coherent(0,
|
||||
dma_free_coherent(&data->pdev->dev,
|
||||
TSI108_TXRING_LEN * sizeof(tx_desc),
|
||||
data->txring, data->txdma);
|
||||
|
||||
|
@ -1576,6 +1579,7 @@ tsi108_init_one(struct platform_device *pdev)
|
|||
printk("tsi108_eth%d: probe...\n", pdev->id);
|
||||
data = netdev_priv(dev);
|
||||
data->dev = dev;
|
||||
data->pdev = pdev;
|
||||
|
||||
pr_debug("tsi108_eth%d:regs:phyresgs:phy:irq_num=0x%x:0x%x:0x%x:0x%x\n",
|
||||
pdev->id, einfo->regs, einfo->phyregs,
|
||||
|
|
|
@ -64,10 +64,6 @@ config OF_DYNAMIC
|
|||
config OF_ADDRESS
|
||||
def_bool y
|
||||
depends on !SPARC && (HAS_IOMEM || UML)
|
||||
select OF_ADDRESS_PCI if PCI
|
||||
|
||||
config OF_ADDRESS_PCI
|
||||
bool
|
||||
|
||||
config OF_IRQ
|
||||
def_bool y
|
||||
|
@ -84,18 +80,6 @@ config OF_MDIO
|
|||
help
|
||||
OpenFirmware MDIO bus (Ethernet PHY) accessors
|
||||
|
||||
config OF_PCI
|
||||
def_tristate PCI
|
||||
depends on PCI
|
||||
help
|
||||
OpenFirmware PCI bus accessors
|
||||
|
||||
config OF_PCI_IRQ
|
||||
def_tristate PCI
|
||||
depends on OF_PCI && OF_IRQ
|
||||
help
|
||||
OpenFirmware PCI IRQ routing helpers
|
||||
|
||||
config OF_RESERVED_MEM
|
||||
depends on OF_EARLY_FLATTREE
|
||||
bool
|
||||
|
|
|
@ -10,8 +10,6 @@ obj-$(CONFIG_OF_IRQ) += irq.o
|
|||
obj-$(CONFIG_OF_NET) += of_net.o
|
||||
obj-$(CONFIG_OF_UNITTEST) += unittest.o
|
||||
obj-$(CONFIG_OF_MDIO) += of_mdio.o
|
||||
obj-$(CONFIG_OF_PCI) += of_pci.o
|
||||
obj-$(CONFIG_OF_PCI_IRQ) += of_pci_irq.o
|
||||
obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o
|
||||
obj-$(CONFIG_OF_RESOLVE) += resolver.o
|
||||
obj-$(CONFIG_OF_OVERLAY) += overlay.o
|
||||
|
|
|
@ -96,7 +96,7 @@ static unsigned int of_bus_default_get_flags(const __be32 *addr)
|
|||
return IORESOURCE_MEM;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OF_ADDRESS_PCI
|
||||
#ifdef CONFIG_PCI
|
||||
/*
|
||||
* PCI bus specific translator
|
||||
*/
|
||||
|
@ -171,9 +171,7 @@ static int of_bus_pci_translate(__be32 *addr, u64 offset, int na)
|
|||
{
|
||||
return of_bus_default_translate(addr + 1, offset, na - 1);
|
||||
}
|
||||
#endif /* CONFIG_OF_ADDRESS_PCI */
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
const __be32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size,
|
||||
unsigned int *flags)
|
||||
{
|
||||
|
@ -361,6 +359,7 @@ int of_pci_range_to_resource(struct of_pci_range *range,
|
|||
res->end = (resource_size_t)OF_BAD_ADDR;
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(of_pci_range_to_resource);
|
||||
#endif /* CONFIG_PCI */
|
||||
|
||||
/*
|
||||
|
@ -426,7 +425,7 @@ static unsigned int of_bus_isa_get_flags(const __be32 *addr)
|
|||
*/
|
||||
|
||||
static struct of_bus of_busses[] = {
|
||||
#ifdef CONFIG_OF_ADDRESS_PCI
|
||||
#ifdef CONFIG_PCI
|
||||
/* PCI */
|
||||
{
|
||||
.name = "pci",
|
||||
|
@ -437,7 +436,7 @@ static struct of_bus of_busses[] = {
|
|||
.translate = of_bus_pci_translate,
|
||||
.get_flags = of_bus_pci_get_flags,
|
||||
},
|
||||
#endif /* CONFIG_OF_ADDRESS_PCI */
|
||||
#endif /* CONFIG_PCI */
|
||||
/* ISA */
|
||||
{
|
||||
.name = "isa",
|
||||
|
|
|
@ -1,384 +0,0 @@
|
|||
#define pr_fmt(fmt) "OF: PCI: " fmt
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_pci.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
static inline int __of_pci_pci_compare(struct device_node *node,
|
||||
unsigned int data)
|
||||
{
|
||||
int devfn;
|
||||
|
||||
devfn = of_pci_get_devfn(node);
|
||||
if (devfn < 0)
|
||||
return 0;
|
||||
|
||||
return devfn == data;
|
||||
}
|
||||
|
||||
struct device_node *of_pci_find_child_device(struct device_node *parent,
|
||||
unsigned int devfn)
|
||||
{
|
||||
struct device_node *node, *node2;
|
||||
|
||||
for_each_child_of_node(parent, node) {
|
||||
if (__of_pci_pci_compare(node, devfn))
|
||||
return node;
|
||||
/*
|
||||
* Some OFs create a parent node "multifunc-device" as
|
||||
* a fake root for all functions of a multi-function
|
||||
* device we go down them as well.
|
||||
*/
|
||||
if (!strcmp(node->name, "multifunc-device")) {
|
||||
for_each_child_of_node(node, node2) {
|
||||
if (__of_pci_pci_compare(node2, devfn)) {
|
||||
of_node_put(node);
|
||||
return node2;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(of_pci_find_child_device);
|
||||
|
||||
/**
|
||||
* of_pci_get_devfn() - Get device and function numbers for a device node
|
||||
* @np: device node
|
||||
*
|
||||
* Parses a standard 5-cell PCI resource and returns an 8-bit value that can
|
||||
* be passed to the PCI_SLOT() and PCI_FUNC() macros to extract the device
|
||||
* and function numbers respectively. On error a negative error code is
|
||||
* returned.
|
||||
*/
|
||||
int of_pci_get_devfn(struct device_node *np)
|
||||
{
|
||||
u32 reg[5];
|
||||
int error;
|
||||
|
||||
error = of_property_read_u32_array(np, "reg", reg, ARRAY_SIZE(reg));
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
return (reg[0] >> 8) & 0xff;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(of_pci_get_devfn);
|
||||
|
||||
/**
|
||||
* of_pci_parse_bus_range() - parse the bus-range property of a PCI device
|
||||
* @node: device node
|
||||
* @res: address to a struct resource to return the bus-range
|
||||
*
|
||||
* Returns 0 on success or a negative error-code on failure.
|
||||
*/
|
||||
int of_pci_parse_bus_range(struct device_node *node, struct resource *res)
|
||||
{
|
||||
u32 bus_range[2];
|
||||
int error;
|
||||
|
||||
error = of_property_read_u32_array(node, "bus-range", bus_range,
|
||||
ARRAY_SIZE(bus_range));
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
res->name = node->name;
|
||||
res->start = bus_range[0];
|
||||
res->end = bus_range[1];
|
||||
res->flags = IORESOURCE_BUS;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(of_pci_parse_bus_range);
|
||||
|
||||
/**
|
||||
* This function will try to obtain the host bridge domain number by
|
||||
* finding a property called "linux,pci-domain" of the given device node.
|
||||
*
|
||||
* @node: device tree node with the domain information
|
||||
*
|
||||
* Returns the associated domain number from DT in the range [0-0xffff], or
|
||||
* a negative value if the required property is not found.
|
||||
*/
|
||||
int of_get_pci_domain_nr(struct device_node *node)
|
||||
{
|
||||
u32 domain;
|
||||
int error;
|
||||
|
||||
error = of_property_read_u32(node, "linux,pci-domain", &domain);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
return (u16)domain;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(of_get_pci_domain_nr);
|
||||
|
||||
/**
|
||||
* This function will try to find the limitation of link speed by finding
|
||||
* a property called "max-link-speed" of the given device node.
|
||||
*
|
||||
* @node: device tree node with the max link speed information
|
||||
*
|
||||
* Returns the associated max link speed from DT, or a negative value if the
|
||||
* required property is not found or is invalid.
|
||||
*/
|
||||
int of_pci_get_max_link_speed(struct device_node *node)
|
||||
{
|
||||
u32 max_link_speed;
|
||||
|
||||
if (of_property_read_u32(node, "max-link-speed", &max_link_speed) ||
|
||||
max_link_speed > 4)
|
||||
return -EINVAL;
|
||||
|
||||
return max_link_speed;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(of_pci_get_max_link_speed);
|
||||
|
||||
/**
|
||||
* of_pci_check_probe_only - Setup probe only mode if linux,pci-probe-only
|
||||
* is present and valid
|
||||
*/
|
||||
void of_pci_check_probe_only(void)
|
||||
{
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
ret = of_property_read_u32(of_chosen, "linux,pci-probe-only", &val);
|
||||
if (ret) {
|
||||
if (ret == -ENODATA || ret == -EOVERFLOW)
|
||||
pr_warn("linux,pci-probe-only without valid value, ignoring\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (val)
|
||||
pci_add_flags(PCI_PROBE_ONLY);
|
||||
else
|
||||
pci_clear_flags(PCI_PROBE_ONLY);
|
||||
|
||||
pr_info("PROBE_ONLY %sabled\n", val ? "en" : "dis");
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
|
||||
|
||||
#if defined(CONFIG_OF_ADDRESS)
|
||||
/**
|
||||
* of_pci_get_host_bridge_resources - Parse PCI host bridge resources from DT
|
||||
* @dev: device node of the host bridge having the range property
|
||||
* @busno: bus number associated with the bridge root bus
|
||||
* @bus_max: maximum number of buses for this bridge
|
||||
* @resources: list where the range of resources will be added after DT parsing
|
||||
* @io_base: pointer to a variable that will contain on return the physical
|
||||
* address for the start of the I/O range. Can be NULL if the caller doesn't
|
||||
* expect IO ranges to be present in the device tree.
|
||||
*
|
||||
* It is the caller's job to free the @resources list.
|
||||
*
|
||||
* This function will parse the "ranges" property of a PCI host bridge device
|
||||
* node and setup the resource mapping based on its content. It is expected
|
||||
* that the property conforms with the Power ePAPR document.
|
||||
*
|
||||
* It returns zero if the range parsing has been successful or a standard error
|
||||
* value if it failed.
|
||||
*/
|
||||
int of_pci_get_host_bridge_resources(struct device_node *dev,
|
||||
unsigned char busno, unsigned char bus_max,
|
||||
struct list_head *resources, resource_size_t *io_base)
|
||||
{
|
||||
struct resource_entry *window;
|
||||
struct resource *res;
|
||||
struct resource *bus_range;
|
||||
struct of_pci_range range;
|
||||
struct of_pci_range_parser parser;
|
||||
char range_type[4];
|
||||
int err;
|
||||
|
||||
if (io_base)
|
||||
*io_base = (resource_size_t)OF_BAD_ADDR;
|
||||
|
||||
bus_range = kzalloc(sizeof(*bus_range), GFP_KERNEL);
|
||||
if (!bus_range)
|
||||
return -ENOMEM;
|
||||
|
||||
pr_info("host bridge %pOF ranges:\n", dev);
|
||||
|
||||
err = of_pci_parse_bus_range(dev, bus_range);
|
||||
if (err) {
|
||||
bus_range->start = busno;
|
||||
bus_range->end = bus_max;
|
||||
bus_range->flags = IORESOURCE_BUS;
|
||||
pr_info(" No bus range found for %pOF, using %pR\n",
|
||||
dev, bus_range);
|
||||
} else {
|
||||
if (bus_range->end > bus_range->start + bus_max)
|
||||
bus_range->end = bus_range->start + bus_max;
|
||||
}
|
||||
pci_add_resource(resources, bus_range);
|
||||
|
||||
/* Check for ranges property */
|
||||
err = of_pci_range_parser_init(&parser, dev);
|
||||
if (err)
|
||||
goto parse_failed;
|
||||
|
||||
pr_debug("Parsing ranges property...\n");
|
||||
for_each_of_pci_range(&parser, &range) {
|
||||
/* Read next ranges element */
|
||||
if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO)
|
||||
snprintf(range_type, 4, " IO");
|
||||
else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM)
|
||||
snprintf(range_type, 4, "MEM");
|
||||
else
|
||||
snprintf(range_type, 4, "err");
|
||||
pr_info(" %s %#010llx..%#010llx -> %#010llx\n", range_type,
|
||||
range.cpu_addr, range.cpu_addr + range.size - 1,
|
||||
range.pci_addr);
|
||||
|
||||
/*
|
||||
* If we failed translation or got a zero-sized region
|
||||
* then skip this range
|
||||
*/
|
||||
if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
|
||||
continue;
|
||||
|
||||
res = kzalloc(sizeof(struct resource), GFP_KERNEL);
|
||||
if (!res) {
|
||||
err = -ENOMEM;
|
||||
goto parse_failed;
|
||||
}
|
||||
|
||||
err = of_pci_range_to_resource(&range, dev, res);
|
||||
if (err) {
|
||||
kfree(res);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (resource_type(res) == IORESOURCE_IO) {
|
||||
if (!io_base) {
|
||||
pr_err("I/O range found for %pOF. Please provide an io_base pointer to save CPU base address\n",
|
||||
dev);
|
||||
err = -EINVAL;
|
||||
goto conversion_failed;
|
||||
}
|
||||
if (*io_base != (resource_size_t)OF_BAD_ADDR)
|
||||
pr_warn("More than one I/O resource converted for %pOF. CPU base address for old range lost!\n",
|
||||
dev);
|
||||
*io_base = range.cpu_addr;
|
||||
}
|
||||
|
||||
pci_add_resource_offset(resources, res, res->start - range.pci_addr);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
conversion_failed:
|
||||
kfree(res);
|
||||
parse_failed:
|
||||
resource_list_for_each_entry(window, resources)
|
||||
kfree(window->res);
|
||||
pci_free_resource_list(resources);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources);
|
||||
#endif /* CONFIG_OF_ADDRESS */
|
||||
|
||||
/**
|
||||
* of_pci_map_rid - Translate a requester ID through a downstream mapping.
|
||||
* @np: root complex device node.
|
||||
* @rid: PCI requester ID to map.
|
||||
* @map_name: property name of the map to use.
|
||||
* @map_mask_name: optional property name of the mask to use.
|
||||
* @target: optional pointer to a target device node.
|
||||
* @id_out: optional pointer to receive the translated ID.
|
||||
*
|
||||
* Given a PCI requester ID, look up the appropriate implementation-defined
|
||||
* platform ID and/or the target device which receives transactions on that
|
||||
* ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or
|
||||
* @id_out may be NULL if only the other is required. If @target points to
|
||||
* a non-NULL device node pointer, only entries targeting that node will be
|
||||
* matched; if it points to a NULL value, it will receive the device node of
|
||||
* the first matching target phandle, with a reference held.
|
||||
*
|
||||
* Return: 0 on success or a standard error code on failure.
|
||||
*/
|
||||
int of_pci_map_rid(struct device_node *np, u32 rid,
|
||||
const char *map_name, const char *map_mask_name,
|
||||
struct device_node **target, u32 *id_out)
|
||||
{
|
||||
u32 map_mask, masked_rid;
|
||||
int map_len;
|
||||
const __be32 *map = NULL;
|
||||
|
||||
if (!np || !map_name || (!target && !id_out))
|
||||
return -EINVAL;
|
||||
|
||||
map = of_get_property(np, map_name, &map_len);
|
||||
if (!map) {
|
||||
if (target)
|
||||
return -ENODEV;
|
||||
/* Otherwise, no map implies no translation */
|
||||
*id_out = rid;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!map_len || map_len % (4 * sizeof(*map))) {
|
||||
pr_err("%pOF: Error: Bad %s length: %d\n", np,
|
||||
map_name, map_len);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* The default is to select all bits. */
|
||||
map_mask = 0xffffffff;
|
||||
|
||||
/*
|
||||
* Can be overridden by "{iommu,msi}-map-mask" property.
|
||||
* If of_property_read_u32() fails, the default is used.
|
||||
*/
|
||||
if (map_mask_name)
|
||||
of_property_read_u32(np, map_mask_name, &map_mask);
|
||||
|
||||
masked_rid = map_mask & rid;
|
||||
for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
|
||||
struct device_node *phandle_node;
|
||||
u32 rid_base = be32_to_cpup(map + 0);
|
||||
u32 phandle = be32_to_cpup(map + 1);
|
||||
u32 out_base = be32_to_cpup(map + 2);
|
||||
u32 rid_len = be32_to_cpup(map + 3);
|
||||
|
||||
if (rid_base & ~map_mask) {
|
||||
pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores rid-base (0x%x)\n",
|
||||
np, map_name, map_name,
|
||||
map_mask, rid_base);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (masked_rid < rid_base || masked_rid >= rid_base + rid_len)
|
||||
continue;
|
||||
|
||||
phandle_node = of_find_node_by_phandle(phandle);
|
||||
if (!phandle_node)
|
||||
return -ENODEV;
|
||||
|
||||
if (target) {
|
||||
if (*target)
|
||||
of_node_put(phandle_node);
|
||||
else
|
||||
*target = phandle_node;
|
||||
|
||||
if (*target != phandle_node)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (id_out)
|
||||
*id_out = masked_rid - rid_base + out_base;
|
||||
|
||||
pr_debug("%pOF: %s, using mask %08x, rid-base: %08x, out-base: %08x, length: %08x, rid: %08x -> %08x\n",
|
||||
np, map_name, map_mask, rid_base, out_base,
|
||||
rid_len, rid, masked_rid - rid_base + out_base);
|
||||
return 0;
|
||||
}
|
||||
|
||||
pr_err("%pOF: Invalid %s translation - no match for rid 0x%x on %pOF\n",
|
||||
np, map_name, rid, target && *target ? *target : NULL);
|
||||
return -EFAULT;
|
||||
}
|
|
@ -1,131 +0,0 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/of_pci.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
/**
|
||||
* of_irq_parse_pci - Resolve the interrupt for a PCI device
|
||||
* @pdev: the device whose interrupt is to be resolved
|
||||
* @out_irq: structure of_irq filled by this function
|
||||
*
|
||||
* This function resolves the PCI interrupt for a given PCI device. If a
|
||||
* device-node exists for a given pci_dev, it will use normal OF tree
|
||||
* walking. If not, it will implement standard swizzling and walk up the
|
||||
* PCI tree until an device-node is found, at which point it will finish
|
||||
* resolving using the OF tree walking.
|
||||
*/
|
||||
int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
|
||||
{
|
||||
struct device_node *dn, *ppnode;
|
||||
struct pci_dev *ppdev;
|
||||
__be32 laddr[3];
|
||||
u8 pin;
|
||||
int rc;
|
||||
|
||||
/* Check if we have a device node, if yes, fallback to standard
|
||||
* device tree parsing
|
||||
*/
|
||||
dn = pci_device_to_OF_node(pdev);
|
||||
if (dn) {
|
||||
rc = of_irq_parse_one(dn, 0, out_irq);
|
||||
if (!rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Ok, we don't, time to have fun. Let's start by building up an
|
||||
* interrupt spec. we assume #interrupt-cells is 1, which is standard
|
||||
* for PCI. If you do different, then don't use that routine.
|
||||
*/
|
||||
rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
|
||||
if (rc != 0)
|
||||
goto err;
|
||||
/* No pin, exit with no error message. */
|
||||
if (pin == 0)
|
||||
return -ENODEV;
|
||||
|
||||
/* Now we walk up the PCI tree */
|
||||
for (;;) {
|
||||
/* Get the pci_dev of our parent */
|
||||
ppdev = pdev->bus->self;
|
||||
|
||||
/* Ouch, it's a host bridge... */
|
||||
if (ppdev == NULL) {
|
||||
ppnode = pci_bus_to_OF_node(pdev->bus);
|
||||
|
||||
/* No node for host bridge ? give up */
|
||||
if (ppnode == NULL) {
|
||||
rc = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
} else {
|
||||
/* We found a P2P bridge, check if it has a node */
|
||||
ppnode = pci_device_to_OF_node(ppdev);
|
||||
}
|
||||
|
||||
/* Ok, we have found a parent with a device-node, hand over to
|
||||
* the OF parsing code.
|
||||
* We build a unit address from the linux device to be used for
|
||||
* resolution. Note that we use the linux bus number which may
|
||||
* not match your firmware bus numbering.
|
||||
* Fortunately, in most cases, interrupt-map-mask doesn't
|
||||
* include the bus number as part of the matching.
|
||||
* You should still be careful about that though if you intend
|
||||
* to rely on this function (you ship a firmware that doesn't
|
||||
* create device nodes for all PCI devices).
|
||||
*/
|
||||
if (ppnode)
|
||||
break;
|
||||
|
||||
/* We can only get here if we hit a P2P bridge with no node,
|
||||
* let's do standard swizzling and try again
|
||||
*/
|
||||
pin = pci_swizzle_interrupt_pin(pdev, pin);
|
||||
pdev = ppdev;
|
||||
}
|
||||
|
||||
out_irq->np = ppnode;
|
||||
out_irq->args_count = 1;
|
||||
out_irq->args[0] = pin;
|
||||
laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8));
|
||||
laddr[1] = laddr[2] = cpu_to_be32(0);
|
||||
rc = of_irq_parse_raw(laddr, out_irq);
|
||||
if (rc)
|
||||
goto err;
|
||||
return 0;
|
||||
err:
|
||||
if (rc == -ENOENT) {
|
||||
dev_warn(&pdev->dev,
|
||||
"%s: no interrupt-map found, INTx interrupts not available\n",
|
||||
__func__);
|
||||
pr_warn_once("%s: possibly some PCI slots don't have level triggered interrupts capability\n",
|
||||
__func__);
|
||||
} else {
|
||||
dev_err(&pdev->dev, "%s: failed with rc=%d\n", __func__, rc);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(of_irq_parse_pci);
|
||||
|
||||
/**
|
||||
* of_irq_parse_and_map_pci() - Decode a PCI irq from the device tree and map to a virq
|
||||
* @dev: The pci device needing an irq
|
||||
* @slot: PCI slot number; passed when used as map_irq callback. Unused
|
||||
* @pin: PCI irq pin number; passed when used as map_irq callback. Unused
|
||||
*
|
||||
* @slot and @pin are unused, but included in the function so that this
|
||||
* function can be used directly as the map_irq callback to
|
||||
* pci_assign_irq() and struct pci_host_bridge.map_irq pointer
|
||||
*/
|
||||
int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
|
||||
{
|
||||
struct of_phandle_args oirq;
|
||||
int ret;
|
||||
|
||||
ret = of_irq_parse_pci(dev, &oirq);
|
||||
if (ret)
|
||||
return 0; /* Proper return code 0 == NO_IRQ */
|
||||
|
||||
return irq_create_of_mapping(&oirq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(of_irq_parse_and_map_pci);
|
||||
|
|
@ -1,3 +1,4 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
# PCI configuration
|
||||
#
|
||||
|
@ -125,6 +126,7 @@ config PCI_PASID
|
|||
|
||||
config PCI_LABEL
|
||||
def_bool y if (DMI || ACPI)
|
||||
depends on PCI
|
||||
select NLS
|
||||
|
||||
config PCI_HYPERV
|
||||
|
@ -135,6 +137,7 @@ config PCI_HYPERV
|
|||
PCI devices from a PCI backend to support PCI driver domains.
|
||||
|
||||
source "drivers/pci/hotplug/Kconfig"
|
||||
source "drivers/pci/cadence/Kconfig"
|
||||
source "drivers/pci/dwc/Kconfig"
|
||||
source "drivers/pci/host/Kconfig"
|
||||
source "drivers/pci/endpoint/Kconfig"
|
||||
|
|
|
@ -3,12 +3,15 @@
|
|||
# Makefile for the PCI bus specific drivers.
|
||||
#
|
||||
|
||||
obj-y += access.o bus.o probe.o host-bridge.o remove.o pci.o \
|
||||
obj-$(CONFIG_PCI) += access.o bus.o probe.o host-bridge.o remove.o pci.o \
|
||||
pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \
|
||||
irq.o vpd.o setup-bus.o vc.o mmap.o setup-irq.o
|
||||
|
||||
ifdef CONFIG_PCI
|
||||
obj-$(CONFIG_PROC_FS) += proc.o
|
||||
obj-$(CONFIG_SYSFS) += slot.o
|
||||
obj-$(CONFIG_OF) += of.o
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_PCI_QUIRKS) += quirks.o
|
||||
|
||||
|
@ -44,10 +47,15 @@ obj-$(CONFIG_PCI_ECAM) += ecam.o
|
|||
|
||||
obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
|
||||
|
||||
obj-$(CONFIG_OF) += of.o
|
||||
|
||||
ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG
|
||||
|
||||
# PCI host controller drivers
|
||||
obj-y += host/
|
||||
obj-y += switch/
|
||||
|
||||
obj-$(CONFIG_PCI_ENDPOINT) += endpoint/
|
||||
|
||||
# Endpoint library must be initialized before its users
|
||||
obj-$(CONFIG_PCIE_CADENCE) += cadence/
|
||||
# pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW
|
||||
obj-y += dwc/
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/delay.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -333,8 +334,7 @@ static size_t pci_vpd_size(struct pci_dev *dev, size_t old_size)
|
|||
(tag == PCI_VPD_LTIN_RW_DATA)) {
|
||||
if (pci_read_vpd(dev, off+1, 2,
|
||||
&header[1]) != 2) {
|
||||
dev_warn(&dev->dev,
|
||||
"invalid large VPD tag %02x size at offset %zu",
|
||||
pci_warn(dev, "invalid large VPD tag %02x size at offset %zu",
|
||||
tag, off + 1);
|
||||
return 0;
|
||||
}
|
||||
|
@ -354,8 +354,7 @@ static size_t pci_vpd_size(struct pci_dev *dev, size_t old_size)
|
|||
if ((tag != PCI_VPD_LTIN_ID_STRING) &&
|
||||
(tag != PCI_VPD_LTIN_RO_DATA) &&
|
||||
(tag != PCI_VPD_LTIN_RW_DATA)) {
|
||||
dev_warn(&dev->dev,
|
||||
"invalid %s VPD tag %02x at offset %zu",
|
||||
pci_warn(dev, "invalid %s VPD tag %02x at offset %zu",
|
||||
(header[0] & PCI_VPD_LRDT) ? "large" : "short",
|
||||
tag, off);
|
||||
return 0;
|
||||
|
@ -402,7 +401,7 @@ static int pci_vpd_wait(struct pci_dev *dev)
|
|||
max_sleep *= 2;
|
||||
}
|
||||
|
||||
dev_warn(&dev->dev, "VPD access failed. This is likely a firmware bug on this device. Contact the card vendor for a firmware update\n");
|
||||
pci_warn(dev, "VPD access failed. This is likely a firmware bug on this device. Contact the card vendor for a firmware update\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* drivers/pci/ats.c
|
||||
*
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* drivers/pci/bus.c
|
||||
*
|
||||
|
@ -289,7 +290,7 @@ bool pci_bus_clip_resource(struct pci_dev *dev, int idx)
|
|||
res->end = end;
|
||||
res->flags &= ~IORESOURCE_UNSET;
|
||||
orig_res.flags &= ~IORESOURCE_UNSET;
|
||||
dev_printk(KERN_DEBUG, &dev->dev, "%pR clipped to %pR\n",
|
||||
pci_printk(KERN_DEBUG, dev, "%pR clipped to %pR\n",
|
||||
&orig_res, res);
|
||||
|
||||
return true;
|
||||
|
@ -325,7 +326,7 @@ void pci_bus_add_device(struct pci_dev *dev)
|
|||
dev->match_driver = true;
|
||||
retval = device_attach(&dev->dev);
|
||||
if (retval < 0 && retval != -EPROBE_DEFER) {
|
||||
dev_warn(&dev->dev, "device attach failed (%d)\n", retval);
|
||||
pci_warn(dev, "device attach failed (%d)\n", retval);
|
||||
pci_proc_detach_device(dev);
|
||||
pci_remove_sysfs_dev_files(dev);
|
||||
return;
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
menu "Cadence PCIe controllers support"
|
||||
|
||||
config PCIE_CADENCE
|
||||
bool
|
||||
|
||||
config PCIE_CADENCE_HOST
|
||||
bool "Cadence PCIe host controller"
|
||||
depends on OF
|
||||
depends on PCI
|
||||
select IRQ_DOMAIN
|
||||
select PCIE_CADENCE
|
||||
help
|
||||
Say Y here if you want to support the Cadence PCIe controller in host
|
||||
mode. This PCIe controller may be embedded into many different vendors
|
||||
SoCs.
|
||||
|
||||
config PCIE_CADENCE_EP
|
||||
bool "Cadence PCIe endpoint controller"
|
||||
depends on OF
|
||||
depends on PCI_ENDPOINT
|
||||
select PCIE_CADENCE
|
||||
help
|
||||
Say Y here if you want to support the Cadence PCIe controller in
|
||||
endpoint mode. This PCIe controller may be embedded into many
|
||||
different vendors SoCs.
|
||||
|
||||
endmenu
|
|
@ -0,0 +1,4 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
obj-$(CONFIG_PCIE_CADENCE) += pcie-cadence.o
|
||||
obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host.o
|
||||
obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep.o
|
|
@ -0,0 +1,542 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright (c) 2017 Cadence
|
||||
// Cadence PCIe endpoint controller driver.
|
||||
// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/pci-epc.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/sizes.h>
|
||||
|
||||
#include "pcie-cadence.h"
|
||||
|
||||
#define CDNS_PCIE_EP_MIN_APERTURE 128 /* 128 bytes */
|
||||
#define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1
|
||||
#define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x3
|
||||
|
||||
/**
|
||||
* struct cdns_pcie_ep - private data for this PCIe endpoint controller driver
|
||||
* @pcie: Cadence PCIe controller
|
||||
* @max_regions: maximum number of regions supported by hardware
|
||||
* @ob_region_map: bitmask of mapped outbound regions
|
||||
* @ob_addr: base addresses in the AXI bus where the outbound regions start
|
||||
* @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
|
||||
* dedicated outbound regions is mapped.
|
||||
* @irq_cpu_addr: base address in the CPU space where a write access triggers
|
||||
* the sending of a memory write (MSI) / normal message (legacy
|
||||
* IRQ) TLP through the PCIe bus.
|
||||
* @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
|
||||
* dedicated outbound region.
|
||||
* @irq_pci_fn: the latest PCI function that has updated the mapping of
|
||||
* the MSI/legacy IRQ dedicated outbound region.
|
||||
* @irq_pending: bitmask of asserted legacy IRQs.
|
||||
*/
|
||||
struct cdns_pcie_ep {
|
||||
struct cdns_pcie pcie;
|
||||
u32 max_regions;
|
||||
unsigned long ob_region_map;
|
||||
phys_addr_t *ob_addr;
|
||||
phys_addr_t irq_phys_addr;
|
||||
void __iomem *irq_cpu_addr;
|
||||
u64 irq_pci_addr;
|
||||
u8 irq_pci_fn;
|
||||
u8 irq_pending;
|
||||
};
|
||||
|
||||
static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
|
||||
struct pci_epf_header *hdr)
|
||||
{
|
||||
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
|
||||
struct cdns_pcie *pcie = &ep->pcie;
|
||||
|
||||
cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid);
|
||||
cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid);
|
||||
cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CLASS_PROG, hdr->progif_code);
|
||||
cdns_pcie_ep_fn_writew(pcie, fn, PCI_CLASS_DEVICE,
|
||||
hdr->subclass_code | hdr->baseclass_code << 8);
|
||||
cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CACHE_LINE_SIZE,
|
||||
hdr->cache_line_size);
|
||||
cdns_pcie_ep_fn_writew(pcie, fn, PCI_SUBSYSTEM_ID, hdr->subsys_id);
|
||||
cdns_pcie_ep_fn_writeb(pcie, fn, PCI_INTERRUPT_PIN, hdr->interrupt_pin);
|
||||
|
||||
/*
|
||||
* Vendor ID can only be modified from function 0, all other functions
|
||||
* use the same vendor ID as function 0.
|
||||
*/
|
||||
if (fn == 0) {
|
||||
/* Update the vendor IDs. */
|
||||
u32 id = CDNS_PCIE_LM_ID_VENDOR(hdr->vendorid) |
|
||||
CDNS_PCIE_LM_ID_SUBSYS(hdr->subsys_vendor_id);
|
||||
|
||||
cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, enum pci_barno bar,
|
||||
dma_addr_t bar_phys, size_t size, int flags)
|
||||
{
|
||||
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
|
||||
struct cdns_pcie *pcie = &ep->pcie;
|
||||
u32 addr0, addr1, reg, cfg, b, aperture, ctrl;
|
||||
u64 sz;
|
||||
|
||||
/* BAR size is 2^(aperture + 7) */
|
||||
sz = max_t(size_t, size, CDNS_PCIE_EP_MIN_APERTURE);
|
||||
/*
|
||||
* roundup_pow_of_two() returns an unsigned long, which is not suited
|
||||
* for 64bit values.
|
||||
*/
|
||||
sz = 1ULL << fls64(sz - 1);
|
||||
aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */
|
||||
|
||||
if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
|
||||
ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS;
|
||||
} else {
|
||||
bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
|
||||
bool is_64bits = sz > SZ_2G;
|
||||
|
||||
if (is_64bits && (bar & 1))
|
||||
return -EINVAL;
|
||||
|
||||
if (is_64bits && is_prefetch)
|
||||
ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
|
||||
else if (is_prefetch)
|
||||
ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS;
|
||||
else if (is_64bits)
|
||||
ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS;
|
||||
else
|
||||
ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS;
|
||||
}
|
||||
|
||||
addr0 = lower_32_bits(bar_phys);
|
||||
addr1 = upper_32_bits(bar_phys);
|
||||
cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar),
|
||||
addr0);
|
||||
cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar),
|
||||
addr1);
|
||||
|
||||
if (bar < BAR_4) {
|
||||
reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
|
||||
b = bar;
|
||||
} else {
|
||||
reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
|
||||
b = bar - BAR_4;
|
||||
}
|
||||
|
||||
cfg = cdns_pcie_readl(pcie, reg);
|
||||
cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
|
||||
CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
|
||||
cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
|
||||
CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
|
||||
cdns_pcie_writel(pcie, reg, cfg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
|
||||
enum pci_barno bar)
|
||||
{
|
||||
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
|
||||
struct cdns_pcie *pcie = &ep->pcie;
|
||||
u32 reg, cfg, b, ctrl;
|
||||
|
||||
if (bar < BAR_4) {
|
||||
reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
|
||||
b = bar;
|
||||
} else {
|
||||
reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
|
||||
b = bar - BAR_4;
|
||||
}
|
||||
|
||||
ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
|
||||
cfg = cdns_pcie_readl(pcie, reg);
|
||||
cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
|
||||
CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
|
||||
cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
|
||||
cdns_pcie_writel(pcie, reg, cfg);
|
||||
|
||||
cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0);
|
||||
cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0);
|
||||
}
|
||||
|
||||
static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr,
|
||||
u64 pci_addr, size_t size)
|
||||
{
|
||||
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
|
||||
struct cdns_pcie *pcie = &ep->pcie;
|
||||
u32 r;
|
||||
|
||||
r = find_first_zero_bit(&ep->ob_region_map,
|
||||
sizeof(ep->ob_region_map) * BITS_PER_LONG);
|
||||
if (r >= ep->max_regions - 1) {
|
||||
dev_err(&epc->dev, "no free outbound region\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cdns_pcie_set_outbound_region(pcie, fn, r, false, addr, pci_addr, size);
|
||||
|
||||
set_bit(r, &ep->ob_region_map);
|
||||
ep->ob_addr[r] = addr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
|
||||
phys_addr_t addr)
|
||||
{
|
||||
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
|
||||
struct cdns_pcie *pcie = &ep->pcie;
|
||||
u32 r;
|
||||
|
||||
for (r = 0; r < ep->max_regions - 1; r++)
|
||||
if (ep->ob_addr[r] == addr)
|
||||
break;
|
||||
|
||||
if (r == ep->max_regions - 1)
|
||||
return;
|
||||
|
||||
cdns_pcie_reset_outbound_region(pcie, r);
|
||||
|
||||
ep->ob_addr[r] = 0;
|
||||
clear_bit(r, &ep->ob_region_map);
|
||||
}
|
||||
|
||||
static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 mmc)
|
||||
{
|
||||
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
|
||||
struct cdns_pcie *pcie = &ep->pcie;
|
||||
u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
|
||||
u16 flags;
|
||||
|
||||
/*
|
||||
* Set the Multiple Message Capable bitfield into the Message Control
|
||||
* register.
|
||||
*/
|
||||
flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
|
||||
flags = (flags & ~PCI_MSI_FLAGS_QMASK) | (mmc << 1);
|
||||
flags |= PCI_MSI_FLAGS_64BIT;
|
||||
flags &= ~PCI_MSI_FLAGS_MASKBIT;
|
||||
cdns_pcie_ep_fn_writew(pcie, fn, cap + PCI_MSI_FLAGS, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
|
||||
{
|
||||
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
|
||||
struct cdns_pcie *pcie = &ep->pcie;
|
||||
u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
|
||||
u16 flags, mmc, mme;
|
||||
|
||||
/* Validate that the MSI feature is actually enabled. */
|
||||
flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
|
||||
if (!(flags & PCI_MSI_FLAGS_ENABLE))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Get the Multiple Message Enable bitfield from the Message Control
|
||||
* register.
|
||||
*/
|
||||
mmc = (flags & PCI_MSI_FLAGS_QMASK) >> 1;
|
||||
mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
|
||||
|
||||
return mme;
|
||||
}
|
||||
|
||||
static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,
|
||||
u8 intx, bool is_asserted)
|
||||
{
|
||||
struct cdns_pcie *pcie = &ep->pcie;
|
||||
u32 r = ep->max_regions - 1;
|
||||
u32 offset;
|
||||
u16 status;
|
||||
u8 msg_code;
|
||||
|
||||
intx &= 3;
|
||||
|
||||
/* Set the outbound region if needed. */
|
||||
if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY ||
|
||||
ep->irq_pci_fn != fn)) {
|
||||
/* Last region was reserved for IRQ writes. */
|
||||
cdns_pcie_set_outbound_region_for_normal_msg(pcie, fn, r,
|
||||
ep->irq_phys_addr);
|
||||
ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY;
|
||||
ep->irq_pci_fn = fn;
|
||||
}
|
||||
|
||||
if (is_asserted) {
|
||||
ep->irq_pending |= BIT(intx);
|
||||
msg_code = MSG_CODE_ASSERT_INTA + intx;
|
||||
} else {
|
||||
ep->irq_pending &= ~BIT(intx);
|
||||
msg_code = MSG_CODE_DEASSERT_INTA + intx;
|
||||
}
|
||||
|
||||
status = cdns_pcie_ep_fn_readw(pcie, fn, PCI_STATUS);
|
||||
if (((status & PCI_STATUS_INTERRUPT) != 0) ^ (ep->irq_pending != 0)) {
|
||||
status ^= PCI_STATUS_INTERRUPT;
|
||||
cdns_pcie_ep_fn_writew(pcie, fn, PCI_STATUS, status);
|
||||
}
|
||||
|
||||
offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) |
|
||||
CDNS_PCIE_NORMAL_MSG_CODE(msg_code) |
|
||||
CDNS_PCIE_MSG_NO_DATA;
|
||||
writel(0, ep->irq_cpu_addr + offset);
|
||||
}
|
||||
|
||||
static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 intx)
|
||||
{
|
||||
u16 cmd;
|
||||
|
||||
cmd = cdns_pcie_ep_fn_readw(&ep->pcie, fn, PCI_COMMAND);
|
||||
if (cmd & PCI_COMMAND_INTX_DISABLE)
|
||||
return -EINVAL;
|
||||
|
||||
cdns_pcie_ep_assert_intx(ep, fn, intx, true);
|
||||
/*
|
||||
* The mdelay() value was taken from dra7xx_pcie_raise_legacy_irq()
|
||||
* from drivers/pci/dwc/pci-dra7xx.c
|
||||
*/
|
||||
mdelay(1);
|
||||
cdns_pcie_ep_assert_intx(ep, fn, intx, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
|
||||
u8 interrupt_num)
|
||||
{
|
||||
struct cdns_pcie *pcie = &ep->pcie;
|
||||
u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
|
||||
u16 flags, mme, data, data_mask;
|
||||
u8 msi_count;
|
||||
u64 pci_addr, pci_addr_mask = 0xff;
|
||||
|
||||
/* Check whether the MSI feature has been enabled by the PCI host. */
|
||||
flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
|
||||
if (!(flags & PCI_MSI_FLAGS_ENABLE))
|
||||
return -EINVAL;
|
||||
|
||||
/* Get the number of enabled MSIs */
|
||||
mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
|
||||
msi_count = 1 << mme;
|
||||
if (!interrupt_num || interrupt_num > msi_count)
|
||||
return -EINVAL;
|
||||
|
||||
/* Compute the data value to be written. */
|
||||
data_mask = msi_count - 1;
|
||||
data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64);
|
||||
data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask);
|
||||
|
||||
/* Get the PCI address where to write the data into. */
|
||||
pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI);
|
||||
pci_addr <<= 32;
|
||||
pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO);
|
||||
pci_addr &= GENMASK_ULL(63, 2);
|
||||
|
||||
/* Set the outbound region if needed. */
|
||||
if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) ||
|
||||
ep->irq_pci_fn != fn)) {
|
||||
/* Last region was reserved for IRQ writes. */
|
||||
cdns_pcie_set_outbound_region(pcie, fn, ep->max_regions - 1,
|
||||
false,
|
||||
ep->irq_phys_addr,
|
||||
pci_addr & ~pci_addr_mask,
|
||||
pci_addr_mask + 1);
|
||||
ep->irq_pci_addr = (pci_addr & ~pci_addr_mask);
|
||||
ep->irq_pci_fn = fn;
|
||||
}
|
||||
writew(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
|
||||
enum pci_epc_irq_type type, u8 interrupt_num)
|
||||
{
|
||||
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
|
||||
|
||||
switch (type) {
|
||||
case PCI_EPC_IRQ_LEGACY:
|
||||
return cdns_pcie_ep_send_legacy_irq(ep, fn, 0);
|
||||
|
||||
case PCI_EPC_IRQ_MSI:
|
||||
return cdns_pcie_ep_send_msi_irq(ep, fn, interrupt_num);
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int cdns_pcie_ep_start(struct pci_epc *epc)
|
||||
{
|
||||
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
|
||||
struct cdns_pcie *pcie = &ep->pcie;
|
||||
struct pci_epf *epf;
|
||||
u32 cfg;
|
||||
|
||||
/*
|
||||
* BIT(0) is hardwired to 1, hence function 0 is always enabled
|
||||
* and can't be disabled anyway.
|
||||
*/
|
||||
cfg = BIT(0);
|
||||
list_for_each_entry(epf, &epc->pci_epf, list)
|
||||
cfg |= BIT(epf->func_no);
|
||||
cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, cfg);
|
||||
|
||||
/*
|
||||
* The PCIe links are automatically established by the controller
|
||||
* once for all at powerup: the software can neither start nor stop
|
||||
* those links later at runtime.
|
||||
*
|
||||
* Then we only have to notify the EP core that our links are already
|
||||
* established. However we don't call directly pci_epc_linkup() because
|
||||
* we've already locked the epc->lock.
|
||||
*/
|
||||
list_for_each_entry(epf, &epc->pci_epf, list)
|
||||
pci_epf_linkup(epf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct pci_epc_ops cdns_pcie_epc_ops = {
|
||||
.write_header = cdns_pcie_ep_write_header,
|
||||
.set_bar = cdns_pcie_ep_set_bar,
|
||||
.clear_bar = cdns_pcie_ep_clear_bar,
|
||||
.map_addr = cdns_pcie_ep_map_addr,
|
||||
.unmap_addr = cdns_pcie_ep_unmap_addr,
|
||||
.set_msi = cdns_pcie_ep_set_msi,
|
||||
.get_msi = cdns_pcie_ep_get_msi,
|
||||
.raise_irq = cdns_pcie_ep_raise_irq,
|
||||
.start = cdns_pcie_ep_start,
|
||||
};
|
||||
|
||||
static const struct of_device_id cdns_pcie_ep_of_match[] = {
|
||||
{ .compatible = "cdns,cdns-pcie-ep" },
|
||||
|
||||
{ },
|
||||
};
|
||||
|
||||
static int cdns_pcie_ep_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct device_node *np = dev->of_node;
|
||||
struct cdns_pcie_ep *ep;
|
||||
struct cdns_pcie *pcie;
|
||||
struct pci_epc *epc;
|
||||
struct resource *res;
|
||||
int ret;
|
||||
|
||||
ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
|
||||
if (!ep)
|
||||
return -ENOMEM;
|
||||
|
||||
pcie = &ep->pcie;
|
||||
pcie->is_rc = false;
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg");
|
||||
pcie->reg_base = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(pcie->reg_base)) {
|
||||
dev_err(dev, "missing \"reg\"\n");
|
||||
return PTR_ERR(pcie->reg_base);
|
||||
}
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem");
|
||||
if (!res) {
|
||||
dev_err(dev, "missing \"mem\"\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
pcie->mem_res = res;
|
||||
|
||||
ret = of_property_read_u32(np, "cdns,max-outbound-regions",
|
||||
&ep->max_regions);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "missing \"cdns,max-outbound-regions\"\n");
|
||||
return ret;
|
||||
}
|
||||
ep->ob_addr = devm_kzalloc(dev, ep->max_regions * sizeof(*ep->ob_addr),
|
||||
GFP_KERNEL);
|
||||
if (!ep->ob_addr)
|
||||
return -ENOMEM;
|
||||
|
||||
pm_runtime_enable(dev);
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "pm_runtime_get_sync() failed\n");
|
||||
goto err_get_sync;
|
||||
}
|
||||
|
||||
/* Disable all but function 0 (anyway BIT(0) is hardwired to 1). */
|
||||
cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, BIT(0));
|
||||
|
||||
epc = devm_pci_epc_create(dev, &cdns_pcie_epc_ops);
|
||||
if (IS_ERR(epc)) {
|
||||
dev_err(dev, "failed to create epc device\n");
|
||||
ret = PTR_ERR(epc);
|
||||
goto err_init;
|
||||
}
|
||||
|
||||
epc_set_drvdata(epc, ep);
|
||||
|
||||
if (of_property_read_u8(np, "max-functions", &epc->max_functions) < 0)
|
||||
epc->max_functions = 1;
|
||||
|
||||
ret = pci_epc_mem_init(epc, pcie->mem_res->start,
|
||||
resource_size(pcie->mem_res));
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "failed to initialize the memory space\n");
|
||||
goto err_init;
|
||||
}
|
||||
|
||||
ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr,
|
||||
SZ_128K);
|
||||
if (!ep->irq_cpu_addr) {
|
||||
dev_err(dev, "failed to reserve memory space for MSI\n");
|
||||
ret = -ENOMEM;
|
||||
goto free_epc_mem;
|
||||
}
|
||||
ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE;
|
||||
|
||||
return 0;
|
||||
|
||||
free_epc_mem:
|
||||
pci_epc_mem_exit(epc);
|
||||
|
||||
err_init:
|
||||
pm_runtime_put_sync(dev);
|
||||
|
||||
err_get_sync:
|
||||
pm_runtime_disable(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void cdns_pcie_ep_shutdown(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
int ret;
|
||||
|
||||
ret = pm_runtime_put_sync(dev);
|
||||
if (ret < 0)
|
||||
dev_dbg(dev, "pm_runtime_put_sync failed\n");
|
||||
|
||||
pm_runtime_disable(dev);
|
||||
|
||||
/* The PCIe controller can't be disabled. */
|
||||
}
|
||||
|
||||
static struct platform_driver cdns_pcie_ep_driver = {
|
||||
.driver = {
|
||||
.name = "cdns-pcie-ep",
|
||||
.of_match_table = cdns_pcie_ep_of_match,
|
||||
},
|
||||
.probe = cdns_pcie_ep_probe,
|
||||
.shutdown = cdns_pcie_ep_shutdown,
|
||||
};
|
||||
builtin_platform_driver(cdns_pcie_ep_driver);
|
|
@ -0,0 +1,336 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright (c) 2017 Cadence
|
||||
// Cadence PCIe host controller driver.
|
||||
// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_pci.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
#include "pcie-cadence.h"
|
||||
|
||||
/**
|
||||
* struct cdns_pcie_rc - private data for this PCIe Root Complex driver
|
||||
* @pcie: Cadence PCIe controller
|
||||
* @dev: pointer to PCIe device
|
||||
* @cfg_res: start/end offsets in the physical system memory to map PCI
|
||||
* configuration space accesses
|
||||
* @bus_range: first/last buses behind the PCIe host controller
|
||||
* @cfg_base: IO mapped window to access the PCI configuration space of a
|
||||
* single function at a time
|
||||
* @max_regions: maximum number of regions supported by the hardware
|
||||
* @no_bar_nbits: Number of bits to keep for inbound (PCIe -> CPU) address
|
||||
* translation (nbits sets into the "no BAR match" register)
|
||||
* @vendor_id: PCI vendor ID
|
||||
* @device_id: PCI device ID
|
||||
*/
|
||||
struct cdns_pcie_rc {
|
||||
struct cdns_pcie pcie;
|
||||
struct device *dev;
|
||||
struct resource *cfg_res;
|
||||
struct resource *bus_range;
|
||||
void __iomem *cfg_base;
|
||||
u32 max_regions;
|
||||
u32 no_bar_nbits;
|
||||
u16 vendor_id;
|
||||
u16 device_id;
|
||||
};
|
||||
|
||||
static void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
|
||||
int where)
|
||||
{
|
||||
struct pci_host_bridge *bridge = pci_find_host_bridge(bus);
|
||||
struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge);
|
||||
struct cdns_pcie *pcie = &rc->pcie;
|
||||
unsigned int busn = bus->number;
|
||||
u32 addr0, desc0;
|
||||
|
||||
if (busn == rc->bus_range->start) {
|
||||
/*
|
||||
* Only the root port (devfn == 0) is connected to this bus.
|
||||
* All other PCI devices are behind some bridge hence on another
|
||||
* bus.
|
||||
*/
|
||||
if (devfn)
|
||||
return NULL;
|
||||
|
||||
return pcie->reg_base + (where & 0xfff);
|
||||
}
|
||||
|
||||
/* Update Output registers for AXI region 0. */
|
||||
addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(12) |
|
||||
CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) |
|
||||
CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(busn);
|
||||
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(0), addr0);
|
||||
|
||||
/* Configuration Type 0 or Type 1 access. */
|
||||
desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
|
||||
CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
|
||||
/*
|
||||
* The bus number was already set once for all in desc1 by
|
||||
* cdns_pcie_host_init_address_translation().
|
||||
*/
|
||||
if (busn == rc->bus_range->start + 1)
|
||||
desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0;
|
||||
else
|
||||
desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1;
|
||||
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(0), desc0);
|
||||
|
||||
return rc->cfg_base + (where & 0xfff);
|
||||
}
|
||||
|
||||
static struct pci_ops cdns_pcie_host_ops = {
|
||||
.map_bus = cdns_pci_map_bus,
|
||||
.read = pci_generic_config_read,
|
||||
.write = pci_generic_config_write,
|
||||
};
|
||||
|
||||
static const struct of_device_id cdns_pcie_host_of_match[] = {
|
||||
{ .compatible = "cdns,cdns-pcie-host" },
|
||||
|
||||
{ },
|
||||
};
|
||||
|
||||
static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
|
||||
{
|
||||
struct cdns_pcie *pcie = &rc->pcie;
|
||||
u32 value, ctrl;
|
||||
|
||||
/*
|
||||
* Set the root complex BAR configuration register:
|
||||
* - disable both BAR0 and BAR1.
|
||||
* - enable Prefetchable Memory Base and Limit registers in type 1
|
||||
* config space (64 bits).
|
||||
* - enable IO Base and Limit registers in type 1 config
|
||||
* space (32 bits).
|
||||
*/
|
||||
ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
|
||||
value = CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) |
|
||||
CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(ctrl) |
|
||||
CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE |
|
||||
CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS |
|
||||
CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE |
|
||||
CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS;
|
||||
cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
|
||||
|
||||
/* Set root port configuration space */
|
||||
if (rc->vendor_id != 0xffff)
|
||||
cdns_pcie_rp_writew(pcie, PCI_VENDOR_ID, rc->vendor_id);
|
||||
if (rc->device_id != 0xffff)
|
||||
cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id);
|
||||
|
||||
cdns_pcie_rp_writeb(pcie, PCI_CLASS_REVISION, 0);
|
||||
cdns_pcie_rp_writeb(pcie, PCI_CLASS_PROG, 0);
|
||||
cdns_pcie_rp_writew(pcie, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
|
||||
{
|
||||
struct cdns_pcie *pcie = &rc->pcie;
|
||||
struct resource *cfg_res = rc->cfg_res;
|
||||
struct resource *mem_res = pcie->mem_res;
|
||||
struct resource *bus_range = rc->bus_range;
|
||||
struct device *dev = rc->dev;
|
||||
struct device_node *np = dev->of_node;
|
||||
struct of_pci_range_parser parser;
|
||||
struct of_pci_range range;
|
||||
u32 addr0, addr1, desc1;
|
||||
u64 cpu_addr;
|
||||
int r, err;
|
||||
|
||||
/*
|
||||
* Reserve region 0 for PCI configure space accesses:
|
||||
* OB_REGION_PCI_ADDR0 and OB_REGION_DESC0 are updated dynamically by
|
||||
* cdns_pci_map_bus(), other region registers are set here once for all.
|
||||
*/
|
||||
addr1 = 0; /* Should be programmed to zero. */
|
||||
desc1 = CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus_range->start);
|
||||
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(0), addr1);
|
||||
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(0), desc1);
|
||||
|
||||
cpu_addr = cfg_res->start - mem_res->start;
|
||||
addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(12) |
|
||||
(lower_32_bits(cpu_addr) & GENMASK(31, 8));
|
||||
addr1 = upper_32_bits(cpu_addr);
|
||||
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(0), addr0);
|
||||
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(0), addr1);
|
||||
|
||||
err = of_pci_range_parser_init(&parser, np);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
r = 1;
|
||||
for_each_of_pci_range(&parser, &range) {
|
||||
bool is_io;
|
||||
|
||||
if (r >= rc->max_regions)
|
||||
break;
|
||||
|
||||
if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM)
|
||||
is_io = false;
|
||||
else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO)
|
||||
is_io = true;
|
||||
else
|
||||
continue;
|
||||
|
||||
cdns_pcie_set_outbound_region(pcie, 0, r, is_io,
|
||||
range.cpu_addr,
|
||||
range.pci_addr,
|
||||
range.size);
|
||||
r++;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set Root Port no BAR match Inbound Translation registers:
|
||||
* needed for MSI and DMA.
|
||||
* Root Port BAR0 and BAR1 are disabled, hence no need to set their
|
||||
* inbound translation registers.
|
||||
*/
|
||||
addr0 = CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(rc->no_bar_nbits);
|
||||
addr1 = 0;
|
||||
cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(RP_NO_BAR), addr0);
|
||||
cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(RP_NO_BAR), addr1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cdns_pcie_host_init(struct device *dev,
|
||||
struct list_head *resources,
|
||||
struct cdns_pcie_rc *rc)
|
||||
{
|
||||
struct resource *bus_range = NULL;
|
||||
int err;
|
||||
|
||||
/* Parse our PCI ranges and request their resources */
|
||||
err = pci_parse_request_of_pci_ranges(dev, resources, &bus_range);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
rc->bus_range = bus_range;
|
||||
rc->pcie.bus = bus_range->start;
|
||||
|
||||
err = cdns_pcie_host_init_root_port(rc);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
err = cdns_pcie_host_init_address_translation(rc);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
pci_free_resource_list(resources);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int cdns_pcie_host_probe(struct platform_device *pdev)
|
||||
{
|
||||
const char *type;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct device_node *np = dev->of_node;
|
||||
struct pci_host_bridge *bridge;
|
||||
struct list_head resources;
|
||||
struct cdns_pcie_rc *rc;
|
||||
struct cdns_pcie *pcie;
|
||||
struct resource *res;
|
||||
int ret;
|
||||
|
||||
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
|
||||
if (!bridge)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = pci_host_bridge_priv(bridge);
|
||||
rc->dev = dev;
|
||||
|
||||
pcie = &rc->pcie;
|
||||
pcie->is_rc = true;
|
||||
|
||||
rc->max_regions = 32;
|
||||
of_property_read_u32(np, "cdns,max-outbound-regions", &rc->max_regions);
|
||||
|
||||
rc->no_bar_nbits = 32;
|
||||
of_property_read_u32(np, "cdns,no-bar-match-nbits", &rc->no_bar_nbits);
|
||||
|
||||
rc->vendor_id = 0xffff;
|
||||
of_property_read_u16(np, "vendor-id", &rc->vendor_id);
|
||||
|
||||
rc->device_id = 0xffff;
|
||||
of_property_read_u16(np, "device-id", &rc->device_id);
|
||||
|
||||
type = of_get_property(np, "device_type", NULL);
|
||||
if (!type || strcmp(type, "pci")) {
|
||||
dev_err(dev, "invalid \"device_type\" %s\n", type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg");
|
||||
pcie->reg_base = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(pcie->reg_base)) {
|
||||
dev_err(dev, "missing \"reg\"\n");
|
||||
return PTR_ERR(pcie->reg_base);
|
||||
}
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
|
||||
rc->cfg_base = devm_pci_remap_cfg_resource(dev, res);
|
||||
if (IS_ERR(rc->cfg_base)) {
|
||||
dev_err(dev, "missing \"cfg\"\n");
|
||||
return PTR_ERR(rc->cfg_base);
|
||||
}
|
||||
rc->cfg_res = res;
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem");
|
||||
if (!res) {
|
||||
dev_err(dev, "missing \"mem\"\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
pcie->mem_res = res;
|
||||
|
||||
pm_runtime_enable(dev);
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "pm_runtime_get_sync() failed\n");
|
||||
goto err_get_sync;
|
||||
}
|
||||
|
||||
ret = cdns_pcie_host_init(dev, &resources, rc);
|
||||
if (ret)
|
||||
goto err_init;
|
||||
|
||||
list_splice_init(&resources, &bridge->windows);
|
||||
bridge->dev.parent = dev;
|
||||
bridge->busnr = pcie->bus;
|
||||
bridge->ops = &cdns_pcie_host_ops;
|
||||
bridge->map_irq = of_irq_parse_and_map_pci;
|
||||
bridge->swizzle_irq = pci_common_swizzle;
|
||||
|
||||
ret = pci_host_probe(bridge);
|
||||
if (ret < 0)
|
||||
goto err_host_probe;
|
||||
|
||||
return 0;
|
||||
|
||||
err_host_probe:
|
||||
pci_free_resource_list(&resources);
|
||||
|
||||
err_init:
|
||||
pm_runtime_put_sync(dev);
|
||||
|
||||
err_get_sync:
|
||||
pm_runtime_disable(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct platform_driver cdns_pcie_host_driver = {
|
||||
.driver = {
|
||||
.name = "cdns-pcie-host",
|
||||
.of_match_table = cdns_pcie_host_of_match,
|
||||
},
|
||||
.probe = cdns_pcie_host_probe,
|
||||
};
|
||||
builtin_platform_driver(cdns_pcie_host_driver);
|
|
@ -0,0 +1,126 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright (c) 2017 Cadence
|
||||
// Cadence PCIe controller driver.
|
||||
// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
|
||||
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#include "pcie-cadence.h"
|
||||
|
||||
void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn,
|
||||
u32 r, bool is_io,
|
||||
u64 cpu_addr, u64 pci_addr, size_t size)
|
||||
{
|
||||
/*
|
||||
* roundup_pow_of_two() returns an unsigned long, which is not suited
|
||||
* for 64bit values.
|
||||
*/
|
||||
u64 sz = 1ULL << fls64(size - 1);
|
||||
int nbits = ilog2(sz);
|
||||
u32 addr0, addr1, desc0, desc1;
|
||||
|
||||
if (nbits < 8)
|
||||
nbits = 8;
|
||||
|
||||
/* Set the PCI address */
|
||||
addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) |
|
||||
(lower_32_bits(pci_addr) & GENMASK(31, 8));
|
||||
addr1 = upper_32_bits(pci_addr);
|
||||
|
||||
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), addr0);
|
||||
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), addr1);
|
||||
|
||||
/* Set the PCIe header descriptor */
|
||||
if (is_io)
|
||||
desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO;
|
||||
else
|
||||
desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM;
|
||||
desc1 = 0;
|
||||
|
||||
/*
|
||||
* Whatever Bit [23] is set or not inside DESC0 register of the outbound
|
||||
* PCIe descriptor, the PCI function number must be set into
|
||||
* Bits [26:24] of DESC0 anyway.
|
||||
*
|
||||
* In Root Complex mode, the function number is always 0 but in Endpoint
|
||||
* mode, the PCIe controller may support more than one function. This
|
||||
* function number needs to be set properly into the outbound PCIe
|
||||
* descriptor.
|
||||
*
|
||||
* Besides, setting Bit [23] is mandatory when in Root Complex mode:
|
||||
* then the driver must provide the bus, resp. device, number in
|
||||
* Bits [7:0] of DESC1, resp. Bits[31:27] of DESC0. Like the function
|
||||
* number, the device number is always 0 in Root Complex mode.
|
||||
*
|
||||
* However when in Endpoint mode, we can clear Bit [23] of DESC0, hence
|
||||
* the PCIe controller will use the captured values for the bus and
|
||||
* device numbers.
|
||||
*/
|
||||
if (pcie->is_rc) {
|
||||
/* The device and function numbers are always 0. */
|
||||
desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
|
||||
CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
|
||||
desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(pcie->bus);
|
||||
} else {
|
||||
/*
|
||||
* Use captured values for bus and device numbers but still
|
||||
* need to set the function number.
|
||||
*/
|
||||
desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn);
|
||||
}
|
||||
|
||||
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0);
|
||||
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
|
||||
|
||||
/* Set the CPU address */
|
||||
cpu_addr -= pcie->mem_res->start;
|
||||
addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) |
|
||||
(lower_32_bits(cpu_addr) & GENMASK(31, 8));
|
||||
addr1 = upper_32_bits(cpu_addr);
|
||||
|
||||
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
|
||||
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
|
||||
}
|
||||
|
||||
void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, u8 fn,
|
||||
u32 r, u64 cpu_addr)
|
||||
{
|
||||
u32 addr0, addr1, desc0, desc1;
|
||||
|
||||
desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG;
|
||||
desc1 = 0;
|
||||
|
||||
/* See cdns_pcie_set_outbound_region() comments above. */
|
||||
if (pcie->is_rc) {
|
||||
desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
|
||||
CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
|
||||
desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(pcie->bus);
|
||||
} else {
|
||||
desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn);
|
||||
}
|
||||
|
||||
/* Set the CPU address */
|
||||
cpu_addr -= pcie->mem_res->start;
|
||||
addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(17) |
|
||||
(lower_32_bits(cpu_addr) & GENMASK(31, 8));
|
||||
addr1 = upper_32_bits(cpu_addr);
|
||||
|
||||
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0);
|
||||
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0);
|
||||
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0);
|
||||
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
|
||||
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
|
||||
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
|
||||
}
|
||||
|
||||
void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r)
|
||||
{
|
||||
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0);
|
||||
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0);
|
||||
|
||||
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), 0);
|
||||
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), 0);
|
||||
|
||||
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), 0);
|
||||
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), 0);
|
||||
}
|
|
@ -0,0 +1,311 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright (c) 2017 Cadence
|
||||
// Cadence PCIe controller driver.
|
||||
// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
|
||||
|
||||
#ifndef _PCIE_CADENCE_H
|
||||
#define _PCIE_CADENCE_H
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
/*
|
||||
* Local Management Registers
|
||||
*/
|
||||
#define CDNS_PCIE_LM_BASE 0x00100000
|
||||
|
||||
/* Vendor ID Register */
|
||||
#define CDNS_PCIE_LM_ID (CDNS_PCIE_LM_BASE + 0x0044)
|
||||
#define CDNS_PCIE_LM_ID_VENDOR_MASK GENMASK(15, 0)
|
||||
#define CDNS_PCIE_LM_ID_VENDOR_SHIFT 0
|
||||
#define CDNS_PCIE_LM_ID_VENDOR(vid) \
|
||||
(((vid) << CDNS_PCIE_LM_ID_VENDOR_SHIFT) & CDNS_PCIE_LM_ID_VENDOR_MASK)
|
||||
#define CDNS_PCIE_LM_ID_SUBSYS_MASK GENMASK(31, 16)
|
||||
#define CDNS_PCIE_LM_ID_SUBSYS_SHIFT 16
|
||||
#define CDNS_PCIE_LM_ID_SUBSYS(sub) \
|
||||
(((sub) << CDNS_PCIE_LM_ID_SUBSYS_SHIFT) & CDNS_PCIE_LM_ID_SUBSYS_MASK)
|
||||
|
||||
/* Root Port Requestor ID Register */
|
||||
#define CDNS_PCIE_LM_RP_RID (CDNS_PCIE_LM_BASE + 0x0228)
|
||||
#define CDNS_PCIE_LM_RP_RID_MASK GENMASK(15, 0)
|
||||
#define CDNS_PCIE_LM_RP_RID_SHIFT 0
|
||||
#define CDNS_PCIE_LM_RP_RID_(rid) \
|
||||
(((rid) << CDNS_PCIE_LM_RP_RID_SHIFT) & CDNS_PCIE_LM_RP_RID_MASK)
|
||||
|
||||
/* Endpoint Bus and Device Number Register */
|
||||
#define CDNS_PCIE_LM_EP_ID (CDNS_PCIE_LM_BASE + 0x022c)
|
||||
#define CDNS_PCIE_LM_EP_ID_DEV_MASK GENMASK(4, 0)
|
||||
#define CDNS_PCIE_LM_EP_ID_DEV_SHIFT 0
|
||||
#define CDNS_PCIE_LM_EP_ID_BUS_MASK GENMASK(15, 8)
|
||||
#define CDNS_PCIE_LM_EP_ID_BUS_SHIFT 8
|
||||
|
||||
/* Endpoint Function f BAR b Configuration Registers */
|
||||
#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) \
|
||||
(CDNS_PCIE_LM_BASE + 0x0240 + (fn) * 0x0008)
|
||||
#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn) \
|
||||
(CDNS_PCIE_LM_BASE + 0x0244 + (fn) * 0x0008)
|
||||
#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \
|
||||
(GENMASK(4, 0) << ((b) * 8))
|
||||
#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \
|
||||
(((a) << ((b) * 8)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b))
|
||||
#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b) \
|
||||
(GENMASK(7, 5) << ((b) * 8))
|
||||
#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \
|
||||
(((c) << ((b) * 8 + 5)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b))
|
||||
|
||||
/* Endpoint Function Configuration Register */
|
||||
#define CDNS_PCIE_LM_EP_FUNC_CFG (CDNS_PCIE_LM_BASE + 0x02c0)
|
||||
|
||||
/* Root Complex BAR Configuration Register */
|
||||
#define CDNS_PCIE_LM_RC_BAR_CFG (CDNS_PCIE_LM_BASE + 0x0300)
|
||||
#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK GENMASK(5, 0)
|
||||
#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE(a) \
|
||||
(((a) << 0) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK)
|
||||
#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK GENMASK(8, 6)
|
||||
#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(c) \
|
||||
(((c) << 6) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK)
|
||||
#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK GENMASK(13, 9)
|
||||
#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE(a) \
|
||||
(((a) << 9) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK)
|
||||
#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK GENMASK(16, 14)
|
||||
#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(c) \
|
||||
(((c) << 14) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK)
|
||||
#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE BIT(17)
|
||||
#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_32BITS 0
|
||||
#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS BIT(18)
|
||||
#define CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE BIT(19)
|
||||
#define CDNS_PCIE_LM_RC_BAR_CFG_IO_16BITS 0
|
||||
#define CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS BIT(20)
|
||||
#define CDNS_PCIE_LM_RC_BAR_CFG_CHECK_ENABLE BIT(31)
|
||||
|
||||
/* BAR control values applicable to both Endpoint Function and Root Complex */
|
||||
#define CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED 0x0
|
||||
#define CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS 0x1
|
||||
#define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS 0x4
|
||||
#define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x5
|
||||
#define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS 0x6
|
||||
#define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7
|
||||
|
||||
|
||||
/*
|
||||
* Endpoint Function Registers (PCI configuration space for endpoint functions)
|
||||
*/
|
||||
#define CDNS_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12))
|
||||
|
||||
#define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90
|
||||
|
||||
/*
|
||||
* Root Port Registers (PCI configuration space for the root port function)
|
||||
*/
|
||||
#define CDNS_PCIE_RP_BASE 0x00200000
|
||||
|
||||
|
||||
/*
|
||||
* Address Translation Registers
|
||||
*/
|
||||
#define CDNS_PCIE_AT_BASE 0x00400000
|
||||
|
||||
/* Region r Outbound AXI to PCIe Address Translation Register 0 */
|
||||
#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r) \
|
||||
(CDNS_PCIE_AT_BASE + 0x0000 + ((r) & 0x1f) * 0x0020)
|
||||
#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK GENMASK(5, 0)
|
||||
#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) \
|
||||
(((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK)
|
||||
#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(19, 12)
|
||||
#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \
|
||||
(((devfn) << 12) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK)
|
||||
#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(27, 20)
|
||||
#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \
|
||||
(((bus) << 20) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK)
|
||||
|
||||
/* Region r Outbound AXI to PCIe Address Translation Register 1 */
|
||||
#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r) \
|
||||
(CDNS_PCIE_AT_BASE + 0x0004 + ((r) & 0x1f) * 0x0020)
|
||||
|
||||
/* Region r Outbound PCIe Descriptor Register 0 */
|
||||
#define CDNS_PCIE_AT_OB_REGION_DESC0(r) \
|
||||
(CDNS_PCIE_AT_BASE + 0x0008 + ((r) & 0x1f) * 0x0020)
|
||||
#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MASK GENMASK(3, 0)
|
||||
#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM 0x2
|
||||
#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO 0x6
|
||||
#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0 0xa
|
||||
#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1 0xb
|
||||
#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG 0xc
|
||||
#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_VENDOR_MSG 0xd
|
||||
/* Bit 23 MUST be set in RC mode. */
|
||||
#define CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID BIT(23)
|
||||
#define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK GENMASK(31, 24)
|
||||
#define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \
|
||||
(((devfn) << 24) & CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK)
|
||||
|
||||
/* Region r Outbound PCIe Descriptor Register 1 */
|
||||
#define CDNS_PCIE_AT_OB_REGION_DESC1(r) \
|
||||
(CDNS_PCIE_AT_BASE + 0x000c + ((r) & 0x1f) * 0x0020)
|
||||
#define CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK GENMASK(7, 0)
|
||||
#define CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus) \
|
||||
((bus) & CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK)
|
||||
|
||||
/* Region r AXI Region Base Address Register 0 */
|
||||
#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r) \
|
||||
(CDNS_PCIE_AT_BASE + 0x0018 + ((r) & 0x1f) * 0x0020)
|
||||
#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK GENMASK(5, 0)
|
||||
#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) \
|
||||
(((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK)
|
||||
|
||||
/* Region r AXI Region Base Address Register 1 */
|
||||
#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r) \
|
||||
(CDNS_PCIE_AT_BASE + 0x001c + ((r) & 0x1f) * 0x0020)
|
||||
|
||||
/* Root Port BAR Inbound PCIe to AXI Address Translation Register */
|
||||
#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar) \
|
||||
(CDNS_PCIE_AT_BASE + 0x0800 + (bar) * 0x0008)
|
||||
#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK GENMASK(5, 0)
|
||||
#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(nbits) \
|
||||
(((nbits) - 1) & CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK)
|
||||
#define CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar) \
|
||||
(CDNS_PCIE_AT_BASE + 0x0804 + (bar) * 0x0008)
|
||||
|
||||
enum cdns_pcie_rp_bar {
|
||||
RP_BAR0,
|
||||
RP_BAR1,
|
||||
RP_NO_BAR
|
||||
};
|
||||
|
||||
/* Endpoint Function BAR Inbound PCIe to AXI Address Translation Register */
|
||||
#define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \
|
||||
(CDNS_PCIE_AT_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008)
|
||||
#define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \
|
||||
(CDNS_PCIE_AT_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008)
|
||||
|
||||
/* Normal/Vendor specific message access: offset inside some outbound region */
|
||||
#define CDNS_PCIE_NORMAL_MSG_ROUTING_MASK GENMASK(7, 5)
|
||||
#define CDNS_PCIE_NORMAL_MSG_ROUTING(route) \
|
||||
(((route) << 5) & CDNS_PCIE_NORMAL_MSG_ROUTING_MASK)
|
||||
#define CDNS_PCIE_NORMAL_MSG_CODE_MASK GENMASK(15, 8)
|
||||
#define CDNS_PCIE_NORMAL_MSG_CODE(code) \
|
||||
(((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK)
|
||||
#define CDNS_PCIE_MSG_NO_DATA BIT(16)
|
||||
|
||||
enum cdns_pcie_msg_code {
|
||||
MSG_CODE_ASSERT_INTA = 0x20,
|
||||
MSG_CODE_ASSERT_INTB = 0x21,
|
||||
MSG_CODE_ASSERT_INTC = 0x22,
|
||||
MSG_CODE_ASSERT_INTD = 0x23,
|
||||
MSG_CODE_DEASSERT_INTA = 0x24,
|
||||
MSG_CODE_DEASSERT_INTB = 0x25,
|
||||
MSG_CODE_DEASSERT_INTC = 0x26,
|
||||
MSG_CODE_DEASSERT_INTD = 0x27,
|
||||
};
|
||||
|
||||
enum cdns_pcie_msg_routing {
|
||||
/* Route to Root Complex */
|
||||
MSG_ROUTING_TO_RC,
|
||||
|
||||
/* Use Address Routing */
|
||||
MSG_ROUTING_BY_ADDR,
|
||||
|
||||
/* Use ID Routing */
|
||||
MSG_ROUTING_BY_ID,
|
||||
|
||||
/* Route as Broadcast Message from Root Complex */
|
||||
MSG_ROUTING_BCAST,
|
||||
|
||||
/* Local message; terminate at receiver (INTx messages) */
|
||||
MSG_ROUTING_LOCAL,
|
||||
|
||||
/* Gather & route to Root Complex (PME_TO_Ack message) */
|
||||
MSG_ROUTING_GATHER,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct cdns_pcie - private data for Cadence PCIe controller drivers
|
||||
* @reg_base: IO mapped register base
|
||||
* @mem_res: start/end offsets in the physical system memory to map PCI accesses
|
||||
* @is_rc: tell whether the PCIe controller mode is Root Complex or Endpoint.
|
||||
* @bus: In Root Complex mode, the bus number
|
||||
*/
|
||||
struct cdns_pcie {
|
||||
void __iomem *reg_base;
|
||||
struct resource *mem_res;
|
||||
bool is_rc;
|
||||
u8 bus;
|
||||
};
|
||||
|
||||
/* Register access */
|
||||
static inline void cdns_pcie_writeb(struct cdns_pcie *pcie, u32 reg, u8 value)
|
||||
{
|
||||
writeb(value, pcie->reg_base + reg);
|
||||
}
|
||||
|
||||
static inline void cdns_pcie_writew(struct cdns_pcie *pcie, u32 reg, u16 value)
|
||||
{
|
||||
writew(value, pcie->reg_base + reg);
|
||||
}
|
||||
|
||||
static inline void cdns_pcie_writel(struct cdns_pcie *pcie, u32 reg, u32 value)
|
||||
{
|
||||
writel(value, pcie->reg_base + reg);
|
||||
}
|
||||
|
||||
static inline u32 cdns_pcie_readl(struct cdns_pcie *pcie, u32 reg)
|
||||
{
|
||||
return readl(pcie->reg_base + reg);
|
||||
}
|
||||
|
||||
/* Root Port register access */
|
||||
static inline void cdns_pcie_rp_writeb(struct cdns_pcie *pcie,
|
||||
u32 reg, u8 value)
|
||||
{
|
||||
writeb(value, pcie->reg_base + CDNS_PCIE_RP_BASE + reg);
|
||||
}
|
||||
|
||||
static inline void cdns_pcie_rp_writew(struct cdns_pcie *pcie,
|
||||
u32 reg, u16 value)
|
||||
{
|
||||
writew(value, pcie->reg_base + CDNS_PCIE_RP_BASE + reg);
|
||||
}
|
||||
|
||||
/* Endpoint Function register access */
|
||||
static inline void cdns_pcie_ep_fn_writeb(struct cdns_pcie *pcie, u8 fn,
|
||||
u32 reg, u8 value)
|
||||
{
|
||||
writeb(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
|
||||
}
|
||||
|
||||
static inline void cdns_pcie_ep_fn_writew(struct cdns_pcie *pcie, u8 fn,
|
||||
u32 reg, u16 value)
|
||||
{
|
||||
writew(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
|
||||
}
|
||||
|
||||
static inline void cdns_pcie_ep_fn_writel(struct cdns_pcie *pcie, u8 fn,
|
||||
u32 reg, u16 value)
|
||||
{
|
||||
writel(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
|
||||
}
|
||||
|
||||
static inline u8 cdns_pcie_ep_fn_readb(struct cdns_pcie *pcie, u8 fn, u32 reg)
|
||||
{
|
||||
return readb(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
|
||||
}
|
||||
|
||||
static inline u16 cdns_pcie_ep_fn_readw(struct cdns_pcie *pcie, u8 fn, u32 reg)
|
||||
{
|
||||
return readw(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
|
||||
}
|
||||
|
||||
static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg)
|
||||
{
|
||||
return readl(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
|
||||
}
|
||||
|
||||
void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn,
|
||||
u32 r, bool is_io,
|
||||
u64 cpu_addr, u64 pci_addr, size_t size);
|
||||
|
||||
void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, u8 fn,
|
||||
u32 r, u64 cpu_addr);
|
||||
|
||||
void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r);
|
||||
|
||||
#endif /* _PCIE_CADENCE_H */
|
|
@ -1,3 +1,5 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
menu "DesignWare PCI Core Support"
|
||||
|
||||
config PCIE_DW
|
||||
|
@ -15,39 +17,38 @@ config PCIE_DW_EP
|
|||
select PCIE_DW
|
||||
|
||||
config PCI_DRA7XX
|
||||
bool "TI DRA7xx PCIe controller"
|
||||
depends on SOC_DRA7XX || COMPILE_TEST
|
||||
depends on (PCI && PCI_MSI_IRQ_DOMAIN) || PCI_ENDPOINT
|
||||
depends on OF && HAS_IOMEM && TI_PIPE3
|
||||
help
|
||||
Enables support for the PCIe controller in the DRA7xx SoC. There
|
||||
are two instances of PCIe controller in DRA7xx. This controller can
|
||||
work either as EP or RC. In order to enable host-specific features
|
||||
PCI_DRA7XX_HOST must be selected and in order to enable device-
|
||||
specific features PCI_DRA7XX_EP must be selected. This uses
|
||||
the DesignWare core.
|
||||
|
||||
if PCI_DRA7XX
|
||||
bool
|
||||
|
||||
config PCI_DRA7XX_HOST
|
||||
bool "PCI DRA7xx Host Mode"
|
||||
depends on PCI
|
||||
depends on PCI_MSI_IRQ_DOMAIN
|
||||
bool "TI DRA7xx PCIe controller Host Mode"
|
||||
depends on SOC_DRA7XX || COMPILE_TEST
|
||||
depends on PCI && PCI_MSI_IRQ_DOMAIN
|
||||
depends on OF && HAS_IOMEM && TI_PIPE3
|
||||
select PCIE_DW_HOST
|
||||
select PCI_DRA7XX
|
||||
default y
|
||||
help
|
||||
Enables support for the PCIe controller in the DRA7xx SoC to work in
|
||||
host mode.
|
||||
Enables support for the PCIe controller in the DRA7xx SoC to work in
|
||||
host mode. There are two instances of PCIe controller in DRA7xx.
|
||||
This controller can work either as EP or RC. In order to enable
|
||||
host-specific features PCI_DRA7XX_HOST must be selected and in order
|
||||
to enable device-specific features PCI_DRA7XX_EP must be selected.
|
||||
This uses the DesignWare core.
|
||||
|
||||
config PCI_DRA7XX_EP
|
||||
bool "PCI DRA7xx Endpoint Mode"
|
||||
bool "TI DRA7xx PCIe controller Endpoint Mode"
|
||||
depends on SOC_DRA7XX || COMPILE_TEST
|
||||
depends on PCI_ENDPOINT
|
||||
depends on OF && HAS_IOMEM && TI_PIPE3
|
||||
select PCIE_DW_EP
|
||||
select PCI_DRA7XX
|
||||
help
|
||||
Enables support for the PCIe controller in the DRA7xx SoC to work in
|
||||
endpoint mode.
|
||||
|
||||
endif
|
||||
Enables support for the PCIe controller in the DRA7xx SoC to work in
|
||||
endpoint mode. There are two instances of PCIe controller in DRA7xx.
|
||||
This controller can work either as EP or RC. In order to enable
|
||||
host-specific features PCI_DRA7XX_HOST must be selected and in order
|
||||
to enable device-specific features PCI_DRA7XX_EP must be selected.
|
||||
This uses the DesignWare core.
|
||||
|
||||
config PCIE_DW_PLAT
|
||||
bool "Platform bus based DesignWare PCIe Controller"
|
||||
|
@ -149,15 +150,28 @@ config PCIE_ARMADA_8K
|
|||
DesignWare core functions to implement the driver.
|
||||
|
||||
config PCIE_ARTPEC6
|
||||
bool "Axis ARTPEC-6 PCIe controller"
|
||||
depends on PCI
|
||||
bool
|
||||
|
||||
config PCIE_ARTPEC6_HOST
|
||||
bool "Axis ARTPEC-6 PCIe controller Host Mode"
|
||||
depends on MACH_ARTPEC6
|
||||
depends on PCI_MSI_IRQ_DOMAIN
|
||||
depends on PCI && PCI_MSI_IRQ_DOMAIN
|
||||
select PCIEPORTBUS
|
||||
select PCIE_DW_HOST
|
||||
select PCIE_ARTPEC6
|
||||
help
|
||||
Say Y here to enable PCIe controller support on Axis ARTPEC-6
|
||||
SoCs. This PCIe controller uses the DesignWare core.
|
||||
Enables support for the PCIe controller in the ARTPEC-6 SoC to work in
|
||||
host mode. This uses the DesignWare core.
|
||||
|
||||
config PCIE_ARTPEC6_EP
|
||||
bool "Axis ARTPEC-6 PCIe controller Endpoint Mode"
|
||||
depends on MACH_ARTPEC6
|
||||
depends on PCI_ENDPOINT
|
||||
select PCIE_DW_EP
|
||||
select PCIE_ARTPEC6
|
||||
help
|
||||
Enables support for the PCIe controller in the ARTPEC-6 SoC to work in
|
||||
endpoint mode. This uses the DesignWare core.
|
||||
|
||||
config PCIE_KIRIN
|
||||
depends on OF && ARM64
|
||||
|
|
|
@ -3,9 +3,7 @@ obj-$(CONFIG_PCIE_DW) += pcie-designware.o
|
|||
obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o
|
||||
obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o
|
||||
obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o
|
||||
ifneq ($(filter y,$(CONFIG_PCI_DRA7XX_HOST) $(CONFIG_PCI_DRA7XX_EP)),)
|
||||
obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
|
||||
endif
|
||||
obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
|
||||
obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
|
||||
obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
|
||||
obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
|
||||
|
@ -27,4 +25,6 @@ obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o
|
|||
# ARM64 and use internal ifdefs to only build the pieces we need
|
||||
# depending on whether ACPI, the DT driver, or both are enabled.
|
||||
|
||||
ifdef CONFIG_PCI
|
||||
obj-$(CONFIG_ARM64) += pcie-hisi.o
|
||||
endif
|
||||
|
|
|
@ -1,13 +1,10 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs
|
||||
*
|
||||
* Copyright (C) 2013-2014 Texas Instruments Incorporated - http://www.ti.com
|
||||
*
|
||||
* Authors: Kishon Vijay Abraham I <kishon@ti.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
|
@ -110,7 +107,7 @@ static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
|
|||
writel(value, pcie->base + offset);
|
||||
}
|
||||
|
||||
static u64 dra7xx_pcie_cpu_addr_fixup(u64 pci_addr)
|
||||
static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
|
||||
{
|
||||
return pci_addr & DRA7XX_CPU_TO_BUS_ADDR;
|
||||
}
|
||||
|
@ -226,6 +223,7 @@ static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
|
|||
|
||||
static const struct irq_domain_ops intx_domain_ops = {
|
||||
.map = dra7xx_pcie_intx_map,
|
||||
.xlate = pci_irqd_intx_xlate,
|
||||
};
|
||||
|
||||
static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
|
||||
|
@ -256,7 +254,8 @@ static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
|
|||
struct dra7xx_pcie *dra7xx = arg;
|
||||
struct dw_pcie *pci = dra7xx->pci;
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
u32 reg;
|
||||
unsigned long reg;
|
||||
u32 virq, bit;
|
||||
|
||||
reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI);
|
||||
|
||||
|
@ -268,8 +267,11 @@ static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
|
|||
case INTB:
|
||||
case INTC:
|
||||
case INTD:
|
||||
generic_handle_irq(irq_find_mapping(dra7xx->irq_domain,
|
||||
ffs(reg)));
|
||||
for_each_set_bit(bit, ®, PCI_NUM_INTX) {
|
||||
virq = irq_find_mapping(dra7xx->irq_domain, bit);
|
||||
if (virq)
|
||||
generic_handle_irq(virq);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -337,15 +339,6 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
reg = PCI_BASE_ADDRESS_0 + (4 * bar);
|
||||
dw_pcie_writel_dbi2(pci, reg, 0x0);
|
||||
dw_pcie_writel_dbi(pci, reg, 0x0);
|
||||
}
|
||||
|
||||
static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
|
@ -375,7 +368,7 @@ static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx,
|
|||
dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg);
|
||||
}
|
||||
|
||||
static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep,
|
||||
static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
|
||||
enum pci_epc_irq_type type, u8 interrupt_num)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
|
@ -470,6 +463,8 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
|
|||
if (!pci->dbi_base)
|
||||
return -ENOMEM;
|
||||
|
||||
pp->ops = &dra7xx_pcie_host_ops;
|
||||
|
||||
ret = dw_pcie_host_init(pp);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to initialize host\n");
|
||||
|
@ -599,7 +594,6 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
|
|||
void __iomem *base;
|
||||
struct resource *res;
|
||||
struct dw_pcie *pci;
|
||||
struct pcie_port *pp;
|
||||
struct dra7xx_pcie *dra7xx;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct device_node *np = dev->of_node;
|
||||
|
@ -627,9 +621,6 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
|
|||
pci->dev = dev;
|
||||
pci->ops = &dw_pcie_ops;
|
||||
|
||||
pp = &pci->pp;
|
||||
pp->ops = &dra7xx_pcie_host_ops;
|
||||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0) {
|
||||
dev_err(dev, "missing IRQ resource: %d\n", irq);
|
||||
|
@ -705,6 +696,11 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
|
|||
|
||||
switch (mode) {
|
||||
case DW_PCIE_RC_TYPE:
|
||||
if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST)) {
|
||||
ret = -ENODEV;
|
||||
goto err_gpio;
|
||||
}
|
||||
|
||||
dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
|
||||
DEVICE_TYPE_RC);
|
||||
ret = dra7xx_add_pcie_port(dra7xx, pdev);
|
||||
|
@ -712,6 +708,11 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
|
|||
goto err_gpio;
|
||||
break;
|
||||
case DW_PCIE_EP_TYPE:
|
||||
if (!IS_ENABLED(CONFIG_PCI_DRA7XX_EP)) {
|
||||
ret = -ENODEV;
|
||||
goto err_gpio;
|
||||
}
|
||||
|
||||
dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
|
||||
DEVICE_TYPE_EP);
|
||||
|
||||
|
@ -810,7 +811,7 @@ static int dra7xx_pcie_resume_noirq(struct device *dev)
|
|||
}
|
||||
#endif
|
||||
|
||||
void dra7xx_pcie_shutdown(struct platform_device *pdev)
|
||||
static void dra7xx_pcie_shutdown(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* PCIe host controller driver for Samsung EXYNOS SoCs
|
||||
*
|
||||
|
@ -5,10 +6,6 @@
|
|||
* http://www.samsung.com
|
||||
*
|
||||
* Author: Jingoo Han <jg1.han@samsung.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
|
@ -55,49 +52,8 @@
|
|||
#define PCIE_ELBI_SLV_ARMISC 0x120
|
||||
#define PCIE_ELBI_SLV_DBI_ENABLE BIT(21)
|
||||
|
||||
/* PCIe Purple registers */
|
||||
#define PCIE_PHY_GLOBAL_RESET 0x000
|
||||
#define PCIE_PHY_COMMON_RESET 0x004
|
||||
#define PCIE_PHY_CMN_REG 0x008
|
||||
#define PCIE_PHY_MAC_RESET 0x00c
|
||||
#define PCIE_PHY_PLL_LOCKED 0x010
|
||||
#define PCIE_PHY_TRSVREG_RESET 0x020
|
||||
#define PCIE_PHY_TRSV_RESET 0x024
|
||||
|
||||
/* PCIe PHY registers */
|
||||
#define PCIE_PHY_IMPEDANCE 0x004
|
||||
#define PCIE_PHY_PLL_DIV_0 0x008
|
||||
#define PCIE_PHY_PLL_BIAS 0x00c
|
||||
#define PCIE_PHY_DCC_FEEDBACK 0x014
|
||||
#define PCIE_PHY_PLL_DIV_1 0x05c
|
||||
#define PCIE_PHY_COMMON_POWER 0x064
|
||||
#define PCIE_PHY_COMMON_PD_CMN BIT(3)
|
||||
#define PCIE_PHY_TRSV0_EMP_LVL 0x084
|
||||
#define PCIE_PHY_TRSV0_DRV_LVL 0x088
|
||||
#define PCIE_PHY_TRSV0_RXCDR 0x0ac
|
||||
#define PCIE_PHY_TRSV0_POWER 0x0c4
|
||||
#define PCIE_PHY_TRSV0_PD_TSV BIT(7)
|
||||
#define PCIE_PHY_TRSV0_LVCC 0x0dc
|
||||
#define PCIE_PHY_TRSV1_EMP_LVL 0x144
|
||||
#define PCIE_PHY_TRSV1_RXCDR 0x16c
|
||||
#define PCIE_PHY_TRSV1_POWER 0x184
|
||||
#define PCIE_PHY_TRSV1_PD_TSV BIT(7)
|
||||
#define PCIE_PHY_TRSV1_LVCC 0x19c
|
||||
#define PCIE_PHY_TRSV2_EMP_LVL 0x204
|
||||
#define PCIE_PHY_TRSV2_RXCDR 0x22c
|
||||
#define PCIE_PHY_TRSV2_POWER 0x244
|
||||
#define PCIE_PHY_TRSV2_PD_TSV BIT(7)
|
||||
#define PCIE_PHY_TRSV2_LVCC 0x25c
|
||||
#define PCIE_PHY_TRSV3_EMP_LVL 0x2c4
|
||||
#define PCIE_PHY_TRSV3_RXCDR 0x2ec
|
||||
#define PCIE_PHY_TRSV3_POWER 0x304
|
||||
#define PCIE_PHY_TRSV3_PD_TSV BIT(7)
|
||||
#define PCIE_PHY_TRSV3_LVCC 0x31c
|
||||
|
||||
struct exynos_pcie_mem_res {
|
||||
void __iomem *elbi_base; /* DT 0th resource: PCIe CTRL */
|
||||
void __iomem *phy_base; /* DT 1st resource: PHY CTRL */
|
||||
void __iomem *block_base; /* DT 2nd resource: PHY ADDITIONAL CTRL */
|
||||
};
|
||||
|
||||
struct exynos_pcie_clk_res {
|
||||
|
@ -112,8 +68,6 @@ struct exynos_pcie {
|
|||
const struct exynos_pcie_ops *ops;
|
||||
int reset_gpio;
|
||||
|
||||
/* For Generic PHY Framework */
|
||||
bool using_phy;
|
||||
struct phy *phy;
|
||||
};
|
||||
|
||||
|
@ -141,20 +95,6 @@ static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev,
|
|||
if (IS_ERR(ep->mem_res->elbi_base))
|
||||
return PTR_ERR(ep->mem_res->elbi_base);
|
||||
|
||||
/* If using the PHY framework, doesn't need to get other resource */
|
||||
if (ep->using_phy)
|
||||
return 0;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
||||
ep->mem_res->phy_base = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(ep->mem_res->phy_base))
|
||||
return PTR_ERR(ep->mem_res->phy_base);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
|
||||
ep->mem_res->block_base = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(ep->mem_res->block_base))
|
||||
return PTR_ERR(ep->mem_res->block_base);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -279,111 +219,6 @@ static void exynos_pcie_deassert_core_reset(struct exynos_pcie *ep)
|
|||
exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_NONSTICKY_RESET);
|
||||
exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_APP_INIT_RESET);
|
||||
exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_APP_INIT_RESET);
|
||||
exynos_pcie_writel(ep->mem_res->block_base, 1, PCIE_PHY_MAC_RESET);
|
||||
}
|
||||
|
||||
static void exynos_pcie_assert_phy_reset(struct exynos_pcie *ep)
|
||||
{
|
||||
exynos_pcie_writel(ep->mem_res->block_base, 0, PCIE_PHY_MAC_RESET);
|
||||
exynos_pcie_writel(ep->mem_res->block_base, 1, PCIE_PHY_GLOBAL_RESET);
|
||||
}
|
||||
|
||||
static void exynos_pcie_deassert_phy_reset(struct exynos_pcie *ep)
|
||||
{
|
||||
exynos_pcie_writel(ep->mem_res->block_base, 0, PCIE_PHY_GLOBAL_RESET);
|
||||
exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_PWR_RESET);
|
||||
exynos_pcie_writel(ep->mem_res->block_base, 0, PCIE_PHY_COMMON_RESET);
|
||||
exynos_pcie_writel(ep->mem_res->block_base, 0, PCIE_PHY_CMN_REG);
|
||||
exynos_pcie_writel(ep->mem_res->block_base, 0, PCIE_PHY_TRSVREG_RESET);
|
||||
exynos_pcie_writel(ep->mem_res->block_base, 0, PCIE_PHY_TRSV_RESET);
|
||||
}
|
||||
|
||||
static void exynos_pcie_power_on_phy(struct exynos_pcie *ep)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_COMMON_POWER);
|
||||
val &= ~PCIE_PHY_COMMON_PD_CMN;
|
||||
exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_COMMON_POWER);
|
||||
|
||||
val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_TRSV0_POWER);
|
||||
val &= ~PCIE_PHY_TRSV0_PD_TSV;
|
||||
exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_TRSV0_POWER);
|
||||
|
||||
val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_TRSV1_POWER);
|
||||
val &= ~PCIE_PHY_TRSV1_PD_TSV;
|
||||
exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_TRSV1_POWER);
|
||||
|
||||
val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_TRSV2_POWER);
|
||||
val &= ~PCIE_PHY_TRSV2_PD_TSV;
|
||||
exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_TRSV2_POWER);
|
||||
|
||||
val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_TRSV3_POWER);
|
||||
val &= ~PCIE_PHY_TRSV3_PD_TSV;
|
||||
exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_TRSV3_POWER);
|
||||
}
|
||||
|
||||
static void exynos_pcie_power_off_phy(struct exynos_pcie *ep)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_COMMON_POWER);
|
||||
val |= PCIE_PHY_COMMON_PD_CMN;
|
||||
exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_COMMON_POWER);
|
||||
|
||||
val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_TRSV0_POWER);
|
||||
val |= PCIE_PHY_TRSV0_PD_TSV;
|
||||
exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_TRSV0_POWER);
|
||||
|
||||
val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_TRSV1_POWER);
|
||||
val |= PCIE_PHY_TRSV1_PD_TSV;
|
||||
exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_TRSV1_POWER);
|
||||
|
||||
val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_TRSV2_POWER);
|
||||
val |= PCIE_PHY_TRSV2_PD_TSV;
|
||||
exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_TRSV2_POWER);
|
||||
|
||||
val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_TRSV3_POWER);
|
||||
val |= PCIE_PHY_TRSV3_PD_TSV;
|
||||
exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_TRSV3_POWER);
|
||||
}
|
||||
|
||||
static void exynos_pcie_init_phy(struct exynos_pcie *ep)
|
||||
{
|
||||
/* DCC feedback control off */
|
||||
exynos_pcie_writel(ep->mem_res->phy_base, 0x29, PCIE_PHY_DCC_FEEDBACK);
|
||||
|
||||
/* set TX/RX impedance */
|
||||
exynos_pcie_writel(ep->mem_res->phy_base, 0xd5, PCIE_PHY_IMPEDANCE);
|
||||
|
||||
/* set 50Mhz PHY clock */
|
||||
exynos_pcie_writel(ep->mem_res->phy_base, 0x14, PCIE_PHY_PLL_DIV_0);
|
||||
exynos_pcie_writel(ep->mem_res->phy_base, 0x12, PCIE_PHY_PLL_DIV_1);
|
||||
|
||||
/* set TX Differential output for lane 0 */
|
||||
exynos_pcie_writel(ep->mem_res->phy_base, 0x7f, PCIE_PHY_TRSV0_DRV_LVL);
|
||||
|
||||
/* set TX Pre-emphasis Level Control for lane 0 to minimum */
|
||||
exynos_pcie_writel(ep->mem_res->phy_base, 0x0, PCIE_PHY_TRSV0_EMP_LVL);
|
||||
|
||||
/* set RX clock and data recovery bandwidth */
|
||||
exynos_pcie_writel(ep->mem_res->phy_base, 0xe7, PCIE_PHY_PLL_BIAS);
|
||||
exynos_pcie_writel(ep->mem_res->phy_base, 0x82, PCIE_PHY_TRSV0_RXCDR);
|
||||
exynos_pcie_writel(ep->mem_res->phy_base, 0x82, PCIE_PHY_TRSV1_RXCDR);
|
||||
exynos_pcie_writel(ep->mem_res->phy_base, 0x82, PCIE_PHY_TRSV2_RXCDR);
|
||||
exynos_pcie_writel(ep->mem_res->phy_base, 0x82, PCIE_PHY_TRSV3_RXCDR);
|
||||
|
||||
/* change TX Pre-emphasis Level Control for lanes */
|
||||
exynos_pcie_writel(ep->mem_res->phy_base, 0x39, PCIE_PHY_TRSV0_EMP_LVL);
|
||||
exynos_pcie_writel(ep->mem_res->phy_base, 0x39, PCIE_PHY_TRSV1_EMP_LVL);
|
||||
exynos_pcie_writel(ep->mem_res->phy_base, 0x39, PCIE_PHY_TRSV2_EMP_LVL);
|
||||
exynos_pcie_writel(ep->mem_res->phy_base, 0x39, PCIE_PHY_TRSV3_EMP_LVL);
|
||||
|
||||
/* set LVCC */
|
||||
exynos_pcie_writel(ep->mem_res->phy_base, 0x20, PCIE_PHY_TRSV0_LVCC);
|
||||
exynos_pcie_writel(ep->mem_res->phy_base, 0xa0, PCIE_PHY_TRSV1_LVCC);
|
||||
exynos_pcie_writel(ep->mem_res->phy_base, 0xa0, PCIE_PHY_TRSV2_LVCC);
|
||||
exynos_pcie_writel(ep->mem_res->phy_base, 0xa0, PCIE_PHY_TRSV3_LVCC);
|
||||
}
|
||||
|
||||
static void exynos_pcie_assert_reset(struct exynos_pcie *ep)
|
||||
|
@ -401,7 +236,6 @@ static int exynos_pcie_establish_link(struct exynos_pcie *ep)
|
|||
struct dw_pcie *pci = ep->pci;
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
struct device *dev = pci->dev;
|
||||
u32 val;
|
||||
|
||||
if (dw_pcie_link_up(pci)) {
|
||||
dev_err(dev, "Link already up\n");
|
||||
|
@ -410,32 +244,13 @@ static int exynos_pcie_establish_link(struct exynos_pcie *ep)
|
|||
|
||||
exynos_pcie_assert_core_reset(ep);
|
||||
|
||||
if (ep->using_phy) {
|
||||
phy_reset(ep->phy);
|
||||
phy_reset(ep->phy);
|
||||
|
||||
exynos_pcie_writel(ep->mem_res->elbi_base, 1,
|
||||
PCIE_PWR_RESET);
|
||||
exynos_pcie_writel(ep->mem_res->elbi_base, 1,
|
||||
PCIE_PWR_RESET);
|
||||
|
||||
phy_power_on(ep->phy);
|
||||
phy_init(ep->phy);
|
||||
} else {
|
||||
exynos_pcie_assert_phy_reset(ep);
|
||||
exynos_pcie_deassert_phy_reset(ep);
|
||||
exynos_pcie_power_on_phy(ep);
|
||||
exynos_pcie_init_phy(ep);
|
||||
|
||||
/* pulse for common reset */
|
||||
exynos_pcie_writel(ep->mem_res->block_base, 1,
|
||||
PCIE_PHY_COMMON_RESET);
|
||||
udelay(500);
|
||||
exynos_pcie_writel(ep->mem_res->block_base, 0,
|
||||
PCIE_PHY_COMMON_RESET);
|
||||
}
|
||||
|
||||
/* pulse for common reset */
|
||||
exynos_pcie_writel(ep->mem_res->block_base, 1, PCIE_PHY_COMMON_RESET);
|
||||
udelay(500);
|
||||
exynos_pcie_writel(ep->mem_res->block_base, 0, PCIE_PHY_COMMON_RESET);
|
||||
phy_power_on(ep->phy);
|
||||
phy_init(ep->phy);
|
||||
|
||||
exynos_pcie_deassert_core_reset(ep);
|
||||
dw_pcie_setup_rc(pp);
|
||||
|
@ -449,18 +264,7 @@ static int exynos_pcie_establish_link(struct exynos_pcie *ep)
|
|||
if (!dw_pcie_wait_for_link(pci))
|
||||
return 0;
|
||||
|
||||
if (ep->using_phy) {
|
||||
phy_power_off(ep->phy);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
while (exynos_pcie_readl(ep->mem_res->phy_base,
|
||||
PCIE_PHY_PLL_LOCKED) == 0) {
|
||||
val = exynos_pcie_readl(ep->mem_res->block_base,
|
||||
PCIE_PHY_PLL_LOCKED);
|
||||
dev_info(dev, "PLL Locked: 0x%x\n", val);
|
||||
}
|
||||
exynos_pcie_power_off_phy(ep);
|
||||
phy_power_off(ep->phy);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
|
@ -678,16 +482,13 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
|
|||
|
||||
ep->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
|
||||
|
||||
/* Assume that controller doesn't use the PHY framework */
|
||||
ep->using_phy = false;
|
||||
|
||||
ep->phy = devm_of_phy_get(dev, np, NULL);
|
||||
if (IS_ERR(ep->phy)) {
|
||||
if (PTR_ERR(ep->phy) == -EPROBE_DEFER)
|
||||
return PTR_ERR(ep->phy);
|
||||
dev_warn(dev, "Use the 'phy' property. Current DT of pci-exynos was deprecated!!\n");
|
||||
} else
|
||||
ep->using_phy = true;
|
||||
|
||||
ep->phy = NULL;
|
||||
}
|
||||
|
||||
if (ep->ops && ep->ops->get_mem_resources) {
|
||||
ret = ep->ops->get_mem_resources(pdev, ep);
|
||||
|
@ -695,7 +496,8 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (ep->ops && ep->ops->get_clk_resources) {
|
||||
if (ep->ops && ep->ops->get_clk_resources &&
|
||||
ep->ops->init_clk_resources) {
|
||||
ret = ep->ops->get_clk_resources(ep);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -713,8 +515,7 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
|
|||
return 0;
|
||||
|
||||
fail_probe:
|
||||
if (ep->using_phy)
|
||||
phy_exit(ep->phy);
|
||||
phy_exit(ep->phy);
|
||||
|
||||
if (ep->ops && ep->ops->deinit_clk_resources)
|
||||
ep->ops->deinit_clk_resources(ep);
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* PCIe host controller driver for Freescale i.MX6 SoCs
|
||||
*
|
||||
|
@ -5,10 +6,6 @@
|
|||
* http://www.kosagi.com
|
||||
*
|
||||
* Author: Sean Cross <xobs@kosagi.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* DesignWare application register space functions for Keystone PCI controller
|
||||
*
|
||||
|
@ -5,11 +6,6 @@
|
|||
* http://www.ti.com
|
||||
*
|
||||
* Author: Murali Karicheri <m-karicheri2@ti.com>
|
||||
*
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/irq.h>
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* PCIe host controller driver for Texas Instruments Keystone SoCs
|
||||
*
|
||||
|
@ -6,10 +7,6 @@
|
|||
*
|
||||
* Author: Murali Karicheri <m-karicheri2@ti.com>
|
||||
* Implementation based on pci-exynos.c and pcie-designware.c
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/irqchip/chained_irq.h>
|
||||
|
@ -178,7 +175,7 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
|
|||
}
|
||||
|
||||
/* interrupt controller is in a child node */
|
||||
*np_temp = of_find_node_by_name(np_pcie, controller);
|
||||
*np_temp = of_get_child_by_name(np_pcie, controller);
|
||||
if (!(*np_temp)) {
|
||||
dev_err(dev, "Node for %s is absent\n", controller);
|
||||
return -EINVAL;
|
||||
|
@ -187,6 +184,7 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
|
|||
temp = of_irq_count(*np_temp);
|
||||
if (!temp) {
|
||||
dev_err(dev, "No IRQ entries in %s\n", controller);
|
||||
of_node_put(*np_temp);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -204,6 +202,8 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
|
|||
break;
|
||||
}
|
||||
|
||||
of_node_put(*np_temp);
|
||||
|
||||
if (temp) {
|
||||
*num_irqs = temp;
|
||||
return 0;
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Keystone PCI Controller's common includes
|
||||
*
|
||||
|
@ -5,11 +6,6 @@
|
|||
* http://www.ti.com
|
||||
*
|
||||
* Author: Murali Karicheri <m-karicheri2@ti.com>
|
||||
*
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#define MAX_MSI_HOST_IRQS 8
|
||||
|
|
|
@ -1,13 +1,10 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* PCIe host controller driver for Freescale Layerscape SoCs
|
||||
*
|
||||
* Copyright (C) 2014 Freescale Semiconductor.
|
||||
*
|
||||
* Author: Minghuan Lian <Minghuan.Lian@freescale.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* PCIe host controller driver for Marvell Armada-8K SoCs
|
||||
*
|
||||
|
@ -7,10 +8,6 @@
|
|||
*
|
||||
* Author: Yehuda Yitshak <yehuday@marvell.com>
|
||||
* Author: Shadi Ammouri <shadi@marvell.com>
|
||||
*
|
||||
* This file is licensed under the terms of the GNU General Public
|
||||
* License version 2. This program is licensed "as is" without any
|
||||
* warranty of any kind, whether express or implied.
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
|
|
|
@ -1,18 +1,16 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* PCIe host controller driver for Axis ARTPEC-6 SoC
|
||||
*
|
||||
* Author: Niklas Cassel <niklas.cassel@axis.com>
|
||||
*
|
||||
* Based on work done by Phil Edworthy <phil@edworthys.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/resource.h>
|
||||
|
@ -26,44 +24,72 @@
|
|||
|
||||
#define to_artpec6_pcie(x) dev_get_drvdata((x)->dev)
|
||||
|
||||
enum artpec_pcie_variants {
|
||||
ARTPEC6,
|
||||
ARTPEC7,
|
||||
};
|
||||
|
||||
struct artpec6_pcie {
|
||||
struct dw_pcie *pci;
|
||||
struct regmap *regmap; /* DT axis,syscon-pcie */
|
||||
void __iomem *phy_base; /* DT phy */
|
||||
enum artpec_pcie_variants variant;
|
||||
enum dw_pcie_device_mode mode;
|
||||
};
|
||||
|
||||
struct artpec_pcie_of_data {
|
||||
enum artpec_pcie_variants variant;
|
||||
enum dw_pcie_device_mode mode;
|
||||
};
|
||||
|
||||
static const struct of_device_id artpec6_pcie_of_match[];
|
||||
|
||||
/* PCIe Port Logic registers (memory-mapped) */
|
||||
#define PL_OFFSET 0x700
|
||||
#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
|
||||
#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
|
||||
|
||||
#define MISC_CONTROL_1_OFF (PL_OFFSET + 0x1bc)
|
||||
#define DBI_RO_WR_EN 1
|
||||
#define ACK_F_ASPM_CTRL_OFF (PL_OFFSET + 0xc)
|
||||
#define ACK_N_FTS_MASK GENMASK(15, 8)
|
||||
#define ACK_N_FTS(x) (((x) << 8) & ACK_N_FTS_MASK)
|
||||
|
||||
#define FAST_TRAINING_SEQ_MASK GENMASK(7, 0)
|
||||
#define FAST_TRAINING_SEQ(x) (((x) << 0) & FAST_TRAINING_SEQ_MASK)
|
||||
|
||||
/* ARTPEC-6 specific registers */
|
||||
#define PCIECFG 0x18
|
||||
#define PCIECFG_DBG_OEN (1 << 24)
|
||||
#define PCIECFG_CORE_RESET_REQ (1 << 21)
|
||||
#define PCIECFG_LTSSM_ENABLE (1 << 20)
|
||||
#define PCIECFG_CLKREQ_B (1 << 11)
|
||||
#define PCIECFG_REFCLK_ENABLE (1 << 10)
|
||||
#define PCIECFG_PLL_ENABLE (1 << 9)
|
||||
#define PCIECFG_PCLK_ENABLE (1 << 8)
|
||||
#define PCIECFG_RISRCREN (1 << 4)
|
||||
#define PCIECFG_MODE_TX_DRV_EN (1 << 3)
|
||||
#define PCIECFG_CISRREN (1 << 2)
|
||||
#define PCIECFG_MACRO_ENABLE (1 << 0)
|
||||
#define PCIECFG_DBG_OEN BIT(24)
|
||||
#define PCIECFG_CORE_RESET_REQ BIT(21)
|
||||
#define PCIECFG_LTSSM_ENABLE BIT(20)
|
||||
#define PCIECFG_DEVICE_TYPE_MASK GENMASK(19, 16)
|
||||
#define PCIECFG_CLKREQ_B BIT(11)
|
||||
#define PCIECFG_REFCLK_ENABLE BIT(10)
|
||||
#define PCIECFG_PLL_ENABLE BIT(9)
|
||||
#define PCIECFG_PCLK_ENABLE BIT(8)
|
||||
#define PCIECFG_RISRCREN BIT(4)
|
||||
#define PCIECFG_MODE_TX_DRV_EN BIT(3)
|
||||
#define PCIECFG_CISRREN BIT(2)
|
||||
#define PCIECFG_MACRO_ENABLE BIT(0)
|
||||
/* ARTPEC-7 specific fields */
|
||||
#define PCIECFG_REFCLKSEL BIT(23)
|
||||
#define PCIECFG_NOC_RESET BIT(3)
|
||||
|
||||
#define PCIESTAT 0x1c
|
||||
/* ARTPEC-7 specific fields */
|
||||
#define PCIESTAT_EXTREFCLK BIT(3)
|
||||
|
||||
#define NOCCFG 0x40
|
||||
#define NOCCFG_ENABLE_CLK_PCIE (1 << 4)
|
||||
#define NOCCFG_POWER_PCIE_IDLEACK (1 << 3)
|
||||
#define NOCCFG_POWER_PCIE_IDLE (1 << 2)
|
||||
#define NOCCFG_POWER_PCIE_IDLEREQ (1 << 1)
|
||||
#define NOCCFG_ENABLE_CLK_PCIE BIT(4)
|
||||
#define NOCCFG_POWER_PCIE_IDLEACK BIT(3)
|
||||
#define NOCCFG_POWER_PCIE_IDLE BIT(2)
|
||||
#define NOCCFG_POWER_PCIE_IDLEREQ BIT(1)
|
||||
|
||||
#define PHY_STATUS 0x118
|
||||
#define PHY_COSPLLLOCK (1 << 0)
|
||||
#define PHY_COSPLLLOCK BIT(0)
|
||||
|
||||
#define ARTPEC6_CPU_TO_BUS_ADDR 0x0fffffff
|
||||
#define PHY_TX_ASIC_OUT 0x4040
|
||||
#define PHY_TX_ASIC_OUT_TX_ACK BIT(0)
|
||||
|
||||
#define PHY_RX_ASIC_OUT 0x405c
|
||||
#define PHY_RX_ASIC_OUT_ACK BIT(0)
|
||||
|
||||
static u32 artpec6_pcie_readl(struct artpec6_pcie *artpec6_pcie, u32 offset)
|
||||
{
|
||||
|
@ -78,22 +104,123 @@ static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u
|
|||
regmap_write(artpec6_pcie->regmap, offset, val);
|
||||
}
|
||||
|
||||
static u64 artpec6_pcie_cpu_addr_fixup(u64 pci_addr)
|
||||
static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
|
||||
{
|
||||
return pci_addr & ARTPEC6_CPU_TO_BUS_ADDR;
|
||||
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
struct dw_pcie_ep *ep = &pci->ep;
|
||||
|
||||
switch (artpec6_pcie->mode) {
|
||||
case DW_PCIE_RC_TYPE:
|
||||
return pci_addr - pp->cfg0_base;
|
||||
case DW_PCIE_EP_TYPE:
|
||||
return pci_addr - ep->phys_base;
|
||||
default:
|
||||
dev_err(pci->dev, "UNKNOWN device type\n");
|
||||
}
|
||||
return pci_addr;
|
||||
}
|
||||
|
||||
static int artpec6_pcie_establish_link(struct artpec6_pcie *artpec6_pcie)
|
||||
static int artpec6_pcie_establish_link(struct dw_pcie *pci)
|
||||
{
|
||||
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
|
||||
u32 val;
|
||||
|
||||
val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
|
||||
val |= PCIECFG_LTSSM_ENABLE;
|
||||
artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void artpec6_pcie_stop_link(struct dw_pcie *pci)
|
||||
{
|
||||
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
|
||||
u32 val;
|
||||
|
||||
val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
|
||||
val &= ~PCIECFG_LTSSM_ENABLE;
|
||||
artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
|
||||
}
|
||||
|
||||
static const struct dw_pcie_ops dw_pcie_ops = {
|
||||
.cpu_addr_fixup = artpec6_pcie_cpu_addr_fixup,
|
||||
.start_link = artpec6_pcie_establish_link,
|
||||
.stop_link = artpec6_pcie_stop_link,
|
||||
};
|
||||
|
||||
static void artpec6_pcie_wait_for_phy_a6(struct artpec6_pcie *artpec6_pcie)
|
||||
{
|
||||
struct dw_pcie *pci = artpec6_pcie->pci;
|
||||
struct pcie_port *pp = &pci->pp;
|
||||
struct device *dev = pci->dev;
|
||||
u32 val;
|
||||
unsigned int retries;
|
||||
|
||||
/* Hold DW core in reset */
|
||||
val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
|
||||
val |= PCIECFG_CORE_RESET_REQ;
|
||||
artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
|
||||
retries = 50;
|
||||
do {
|
||||
usleep_range(1000, 2000);
|
||||
val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
|
||||
retries--;
|
||||
} while (retries &&
|
||||
(val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE)));
|
||||
if (!retries)
|
||||
dev_err(dev, "PCIe clock manager did not leave idle state\n");
|
||||
|
||||
retries = 50;
|
||||
do {
|
||||
usleep_range(1000, 2000);
|
||||
val = readl(artpec6_pcie->phy_base + PHY_STATUS);
|
||||
retries--;
|
||||
} while (retries && !(val & PHY_COSPLLLOCK));
|
||||
if (!retries)
|
||||
dev_err(dev, "PHY PLL did not lock\n");
|
||||
}
|
||||
|
||||
static void artpec6_pcie_wait_for_phy_a7(struct artpec6_pcie *artpec6_pcie)
|
||||
{
|
||||
struct dw_pcie *pci = artpec6_pcie->pci;
|
||||
struct device *dev = pci->dev;
|
||||
u32 val;
|
||||
u16 phy_status_tx, phy_status_rx;
|
||||
unsigned int retries;
|
||||
|
||||
retries = 50;
|
||||
do {
|
||||
usleep_range(1000, 2000);
|
||||
val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
|
||||
retries--;
|
||||
} while (retries &&
|
||||
(val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE)));
|
||||
if (!retries)
|
||||
dev_err(dev, "PCIe clock manager did not leave idle state\n");
|
||||
|
||||
retries = 50;
|
||||
do {
|
||||
usleep_range(1000, 2000);
|
||||
phy_status_tx = readw(artpec6_pcie->phy_base + PHY_TX_ASIC_OUT);
|
||||
phy_status_rx = readw(artpec6_pcie->phy_base + PHY_RX_ASIC_OUT);
|
||||
retries--;
|
||||
} while (retries && ((phy_status_tx & PHY_TX_ASIC_OUT_TX_ACK) ||
|
||||
(phy_status_rx & PHY_RX_ASIC_OUT_ACK)));
|
||||
if (!retries)
|
||||
dev_err(dev, "PHY did not enter Pn state\n");
|
||||
}
|
||||
|
||||
static void artpec6_pcie_wait_for_phy(struct artpec6_pcie *artpec6_pcie)
|
||||
{
|
||||
switch (artpec6_pcie->variant) {
|
||||
case ARTPEC6:
|
||||
artpec6_pcie_wait_for_phy_a6(artpec6_pcie);
|
||||
break;
|
||||
case ARTPEC7:
|
||||
artpec6_pcie_wait_for_phy_a7(artpec6_pcie);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void artpec6_pcie_init_phy_a6(struct artpec6_pcie *artpec6_pcie)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
|
||||
val |= PCIECFG_RISRCREN | /* Receiver term. 50 Ohm */
|
||||
|
@ -119,45 +246,110 @@ static int artpec6_pcie_establish_link(struct artpec6_pcie *artpec6_pcie)
|
|||
val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
|
||||
val &= ~NOCCFG_POWER_PCIE_IDLEREQ;
|
||||
artpec6_pcie_writel(artpec6_pcie, NOCCFG, val);
|
||||
}
|
||||
|
||||
retries = 50;
|
||||
do {
|
||||
usleep_range(1000, 2000);
|
||||
val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
|
||||
retries--;
|
||||
} while (retries &&
|
||||
(val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE)));
|
||||
static void artpec6_pcie_init_phy_a7(struct artpec6_pcie *artpec6_pcie)
|
||||
{
|
||||
struct dw_pcie *pci = artpec6_pcie->pci;
|
||||
u32 val;
|
||||
bool extrefclk;
|
||||
|
||||
retries = 50;
|
||||
do {
|
||||
usleep_range(1000, 2000);
|
||||
val = readl(artpec6_pcie->phy_base + PHY_STATUS);
|
||||
retries--;
|
||||
} while (retries && !(val & PHY_COSPLLLOCK));
|
||||
/* Check if external reference clock is connected */
|
||||
val = artpec6_pcie_readl(artpec6_pcie, PCIESTAT);
|
||||
extrefclk = !!(val & PCIESTAT_EXTREFCLK);
|
||||
dev_dbg(pci->dev, "Using reference clock: %s\n",
|
||||
extrefclk ? "external" : "internal");
|
||||
|
||||
/* Take DW core out of reset */
|
||||
val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
|
||||
val &= ~PCIECFG_CORE_RESET_REQ;
|
||||
val |= PCIECFG_RISRCREN | /* Receiver term. 50 Ohm */
|
||||
PCIECFG_PCLK_ENABLE;
|
||||
if (extrefclk)
|
||||
val |= PCIECFG_REFCLKSEL;
|
||||
else
|
||||
val &= ~PCIECFG_REFCLKSEL;
|
||||
artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
|
||||
usleep_range(10, 20);
|
||||
|
||||
val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
|
||||
val |= NOCCFG_ENABLE_CLK_PCIE;
|
||||
artpec6_pcie_writel(artpec6_pcie, NOCCFG, val);
|
||||
usleep_range(20, 30);
|
||||
|
||||
val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
|
||||
val &= ~NOCCFG_POWER_PCIE_IDLEREQ;
|
||||
artpec6_pcie_writel(artpec6_pcie, NOCCFG, val);
|
||||
}
|
||||
|
||||
static void artpec6_pcie_init_phy(struct artpec6_pcie *artpec6_pcie)
|
||||
{
|
||||
switch (artpec6_pcie->variant) {
|
||||
case ARTPEC6:
|
||||
artpec6_pcie_init_phy_a6(artpec6_pcie);
|
||||
break;
|
||||
case ARTPEC7:
|
||||
artpec6_pcie_init_phy_a7(artpec6_pcie);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void artpec6_pcie_set_nfts(struct artpec6_pcie *artpec6_pcie)
|
||||
{
|
||||
struct dw_pcie *pci = artpec6_pcie->pci;
|
||||
u32 val;
|
||||
|
||||
if (artpec6_pcie->variant != ARTPEC7)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Increase the N_FTS (Number of Fast Training Sequences)
|
||||
* to be transmitted when transitioning from L0s to L0.
|
||||
*/
|
||||
val = dw_pcie_readl_dbi(pci, ACK_F_ASPM_CTRL_OFF);
|
||||
val &= ~ACK_N_FTS_MASK;
|
||||
val |= ACK_N_FTS(180);
|
||||
dw_pcie_writel_dbi(pci, ACK_F_ASPM_CTRL_OFF, val);
|
||||
|
||||
/*
|
||||
* Set the Number of Fast Training Sequences that the core
|
||||
* advertises as its N_FTS during Gen2 or Gen3 link training.
|
||||
*/
|
||||
val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
|
||||
val &= ~FAST_TRAINING_SEQ_MASK;
|
||||
val |= FAST_TRAINING_SEQ(180);
|
||||
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
|
||||
}
|
||||
|
||||
static void artpec6_pcie_assert_core_reset(struct artpec6_pcie *artpec6_pcie)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
|
||||
switch (artpec6_pcie->variant) {
|
||||
case ARTPEC6:
|
||||
val |= PCIECFG_CORE_RESET_REQ;
|
||||
break;
|
||||
case ARTPEC7:
|
||||
val &= ~PCIECFG_NOC_RESET;
|
||||
break;
|
||||
}
|
||||
artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
|
||||
}
|
||||
|
||||
static void artpec6_pcie_deassert_core_reset(struct artpec6_pcie *artpec6_pcie)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
|
||||
switch (artpec6_pcie->variant) {
|
||||
case ARTPEC6:
|
||||
val &= ~PCIECFG_CORE_RESET_REQ;
|
||||
break;
|
||||
case ARTPEC7:
|
||||
val |= PCIECFG_NOC_RESET;
|
||||
break;
|
||||
}
|
||||
artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
|
||||
usleep_range(100, 200);
|
||||
|
||||
/* setup root complex */
|
||||
dw_pcie_setup_rc(pp);
|
||||
|
||||
/* assert LTSSM enable */
|
||||
val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
|
||||
val |= PCIECFG_LTSSM_ENABLE;
|
||||
artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
|
||||
|
||||
/* check if the link is up or not */
|
||||
if (!dw_pcie_wait_for_link(pci))
|
||||
return 0;
|
||||
|
||||
dev_dbg(pci->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
|
||||
dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0),
|
||||
dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1));
|
||||
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static void artpec6_pcie_enable_interrupts(struct artpec6_pcie *artpec6_pcie)
|
||||
|
@ -174,7 +366,14 @@ static int artpec6_pcie_host_init(struct pcie_port *pp)
|
|||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
|
||||
|
||||
artpec6_pcie_establish_link(artpec6_pcie);
|
||||
artpec6_pcie_assert_core_reset(artpec6_pcie);
|
||||
artpec6_pcie_init_phy(artpec6_pcie);
|
||||
artpec6_pcie_deassert_core_reset(artpec6_pcie);
|
||||
artpec6_pcie_wait_for_phy(artpec6_pcie);
|
||||
artpec6_pcie_set_nfts(artpec6_pcie);
|
||||
dw_pcie_setup_rc(pp);
|
||||
artpec6_pcie_establish_link(pci);
|
||||
dw_pcie_wait_for_link(pci);
|
||||
artpec6_pcie_enable_interrupts(artpec6_pcie);
|
||||
|
||||
return 0;
|
||||
|
@ -230,10 +429,78 @@ static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static const struct dw_pcie_ops dw_pcie_ops = {
|
||||
.cpu_addr_fixup = artpec6_pcie_cpu_addr_fixup,
|
||||
static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
|
||||
enum pci_barno bar;
|
||||
|
||||
artpec6_pcie_assert_core_reset(artpec6_pcie);
|
||||
artpec6_pcie_init_phy(artpec6_pcie);
|
||||
artpec6_pcie_deassert_core_reset(artpec6_pcie);
|
||||
artpec6_pcie_wait_for_phy(artpec6_pcie);
|
||||
artpec6_pcie_set_nfts(artpec6_pcie);
|
||||
|
||||
for (bar = BAR_0; bar <= BAR_5; bar++)
|
||||
dw_pcie_ep_reset_bar(pci, bar);
|
||||
}
|
||||
|
||||
static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
|
||||
enum pci_epc_irq_type type, u8 interrupt_num)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
|
||||
switch (type) {
|
||||
case PCI_EPC_IRQ_LEGACY:
|
||||
dev_err(pci->dev, "EP cannot trigger legacy IRQs\n");
|
||||
return -EINVAL;
|
||||
case PCI_EPC_IRQ_MSI:
|
||||
return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
|
||||
default:
|
||||
dev_err(pci->dev, "UNKNOWN IRQ type\n");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct dw_pcie_ep_ops pcie_ep_ops = {
|
||||
.ep_init = artpec6_pcie_ep_init,
|
||||
.raise_irq = artpec6_pcie_raise_irq,
|
||||
};
|
||||
|
||||
static int artpec6_add_pcie_ep(struct artpec6_pcie *artpec6_pcie,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
int ret;
|
||||
struct dw_pcie_ep *ep;
|
||||
struct resource *res;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct dw_pcie *pci = artpec6_pcie->pci;
|
||||
|
||||
ep = &pci->ep;
|
||||
ep->ops = &pcie_ep_ops;
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
|
||||
pci->dbi_base2 = devm_ioremap(dev, res->start, resource_size(res));
|
||||
if (!pci->dbi_base2)
|
||||
return -ENOMEM;
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
|
||||
if (!res)
|
||||
return -EINVAL;
|
||||
|
||||
ep->phys_base = res->start;
|
||||
ep->addr_size = resource_size(res);
|
||||
|
||||
ret = dw_pcie_ep_init(ep);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to initialize endpoint\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int artpec6_pcie_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
|
@ -242,6 +509,18 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
|
|||
struct resource *dbi_base;
|
||||
struct resource *phy_base;
|
||||
int ret;
|
||||
const struct of_device_id *match;
|
||||
const struct artpec_pcie_of_data *data;
|
||||
enum artpec_pcie_variants variant;
|
||||
enum dw_pcie_device_mode mode;
|
||||
|
||||
match = of_match_device(artpec6_pcie_of_match, dev);
|
||||
if (!match)
|
||||
return -EINVAL;
|
||||
|
||||
data = (struct artpec_pcie_of_data *)match->data;
|
||||
variant = (enum artpec_pcie_variants)data->variant;
|
||||
mode = (enum dw_pcie_device_mode)data->mode;
|
||||
|
||||
artpec6_pcie = devm_kzalloc(dev, sizeof(*artpec6_pcie), GFP_KERNEL);
|
||||
if (!artpec6_pcie)
|
||||
|
@ -255,6 +534,8 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
|
|||
pci->ops = &dw_pcie_ops;
|
||||
|
||||
artpec6_pcie->pci = pci;
|
||||
artpec6_pcie->variant = variant;
|
||||
artpec6_pcie->mode = mode;
|
||||
|
||||
dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
|
||||
pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
|
||||
|
@ -274,15 +555,73 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
|
|||
|
||||
platform_set_drvdata(pdev, artpec6_pcie);
|
||||
|
||||
ret = artpec6_add_pcie_port(artpec6_pcie, pdev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
switch (artpec6_pcie->mode) {
|
||||
case DW_PCIE_RC_TYPE:
|
||||
if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_HOST))
|
||||
return -ENODEV;
|
||||
|
||||
ret = artpec6_add_pcie_port(artpec6_pcie, pdev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
break;
|
||||
case DW_PCIE_EP_TYPE: {
|
||||
u32 val;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_EP))
|
||||
return -ENODEV;
|
||||
|
||||
val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
|
||||
val &= ~PCIECFG_DEVICE_TYPE_MASK;
|
||||
artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
|
||||
ret = artpec6_add_pcie_ep(artpec6_pcie, pdev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
dev_err(dev, "INVALID device type %d\n", artpec6_pcie->mode);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct artpec_pcie_of_data artpec6_pcie_rc_of_data = {
|
||||
.variant = ARTPEC6,
|
||||
.mode = DW_PCIE_RC_TYPE,
|
||||
};
|
||||
|
||||
static const struct artpec_pcie_of_data artpec6_pcie_ep_of_data = {
|
||||
.variant = ARTPEC6,
|
||||
.mode = DW_PCIE_EP_TYPE,
|
||||
};
|
||||
|
||||
static const struct artpec_pcie_of_data artpec7_pcie_rc_of_data = {
|
||||
.variant = ARTPEC7,
|
||||
.mode = DW_PCIE_RC_TYPE,
|
||||
};
|
||||
|
||||
static const struct artpec_pcie_of_data artpec7_pcie_ep_of_data = {
|
||||
.variant = ARTPEC7,
|
||||
.mode = DW_PCIE_EP_TYPE,
|
||||
};
|
||||
|
||||
static const struct of_device_id artpec6_pcie_of_match[] = {
|
||||
{ .compatible = "axis,artpec6-pcie", },
|
||||
{
|
||||
.compatible = "axis,artpec6-pcie",
|
||||
.data = &artpec6_pcie_rc_of_data,
|
||||
},
|
||||
{
|
||||
.compatible = "axis,artpec6-pcie-ep",
|
||||
.data = &artpec6_pcie_ep_of_data,
|
||||
},
|
||||
{
|
||||
.compatible = "axis,artpec7-pcie",
|
||||
.data = &artpec7_pcie_rc_of_data,
|
||||
},
|
||||
{
|
||||
.compatible = "axis,artpec7-pcie-ep",
|
||||
.data = &artpec7_pcie_ep_of_data,
|
||||
},
|
||||
{},
|
||||
};
|
||||
|
||||
|
|
|
@ -1,20 +1,9 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/**
|
||||
* Synopsys DesignWare PCIe Endpoint controller driver
|
||||
*
|
||||
* Copyright (C) 2017 Texas Instruments
|
||||
* Author: Kishon Vijay Abraham I <kishon@ti.com>
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 of
|
||||
* the License as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/of.h>
|
||||
|
@ -30,21 +19,24 @@ void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
|
|||
pci_epc_linkup(epc);
|
||||
}
|
||||
|
||||
static void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
|
||||
void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
reg = PCI_BASE_ADDRESS_0 + (4 * bar);
|
||||
dw_pcie_dbi_ro_wr_en(pci);
|
||||
dw_pcie_writel_dbi2(pci, reg, 0x0);
|
||||
dw_pcie_writel_dbi(pci, reg, 0x0);
|
||||
dw_pcie_dbi_ro_wr_dis(pci);
|
||||
}
|
||||
|
||||
static int dw_pcie_ep_write_header(struct pci_epc *epc,
|
||||
static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
|
||||
struct pci_epf_header *hdr)
|
||||
{
|
||||
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
|
||||
dw_pcie_dbi_ro_wr_en(pci);
|
||||
dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, hdr->vendorid);
|
||||
dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, hdr->deviceid);
|
||||
dw_pcie_writeb_dbi(pci, PCI_REVISION_ID, hdr->revid);
|
||||
|
@ -58,6 +50,7 @@ static int dw_pcie_ep_write_header(struct pci_epc *epc,
|
|||
dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_ID, hdr->subsys_id);
|
||||
dw_pcie_writeb_dbi(pci, PCI_INTERRUPT_PIN,
|
||||
hdr->interrupt_pin);
|
||||
dw_pcie_dbi_ro_wr_dis(pci);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -70,8 +63,7 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar,
|
|||
u32 free_win;
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
|
||||
free_win = find_first_zero_bit(&ep->ib_window_map,
|
||||
sizeof(ep->ib_window_map));
|
||||
free_win = find_first_zero_bit(ep->ib_window_map, ep->num_ib_windows);
|
||||
if (free_win >= ep->num_ib_windows) {
|
||||
dev_err(pci->dev, "no free inbound window\n");
|
||||
return -EINVAL;
|
||||
|
@ -85,7 +77,7 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar,
|
|||
}
|
||||
|
||||
ep->bar_to_atu[bar] = free_win;
|
||||
set_bit(free_win, &ep->ib_window_map);
|
||||
set_bit(free_win, ep->ib_window_map);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -96,8 +88,7 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, phys_addr_t phys_addr,
|
|||
u32 free_win;
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
|
||||
free_win = find_first_zero_bit(&ep->ob_window_map,
|
||||
sizeof(ep->ob_window_map));
|
||||
free_win = find_first_zero_bit(ep->ob_window_map, ep->num_ob_windows);
|
||||
if (free_win >= ep->num_ob_windows) {
|
||||
dev_err(pci->dev, "no free outbound window\n");
|
||||
return -EINVAL;
|
||||
|
@ -106,13 +97,14 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, phys_addr_t phys_addr,
|
|||
dw_pcie_prog_outbound_atu(pci, free_win, PCIE_ATU_TYPE_MEM,
|
||||
phys_addr, pci_addr, size);
|
||||
|
||||
set_bit(free_win, &ep->ob_window_map);
|
||||
set_bit(free_win, ep->ob_window_map);
|
||||
ep->outbound_addr[free_win] = phys_addr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dw_pcie_ep_clear_bar(struct pci_epc *epc, enum pci_barno bar)
|
||||
static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
|
||||
enum pci_barno bar)
|
||||
{
|
||||
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
|
@ -121,10 +113,11 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, enum pci_barno bar)
|
|||
dw_pcie_ep_reset_bar(pci, bar);
|
||||
|
||||
dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND);
|
||||
clear_bit(atu_index, &ep->ib_window_map);
|
||||
clear_bit(atu_index, ep->ib_window_map);
|
||||
}
|
||||
|
||||
static int dw_pcie_ep_set_bar(struct pci_epc *epc, enum pci_barno bar,
|
||||
static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
|
||||
enum pci_barno bar,
|
||||
dma_addr_t bar_phys, size_t size, int flags)
|
||||
{
|
||||
int ret;
|
||||
|
@ -142,8 +135,10 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, enum pci_barno bar,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
dw_pcie_dbi_ro_wr_en(pci);
|
||||
dw_pcie_writel_dbi2(pci, reg, size - 1);
|
||||
dw_pcie_writel_dbi(pci, reg, flags);
|
||||
dw_pcie_dbi_ro_wr_dis(pci);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -163,7 +158,8 @@ static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, phys_addr_t addr)
|
||||
static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no,
|
||||
phys_addr_t addr)
|
||||
{
|
||||
int ret;
|
||||
u32 atu_index;
|
||||
|
@ -175,10 +171,11 @@ static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, phys_addr_t addr)
|
|||
return;
|
||||
|
||||
dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_OUTBOUND);
|
||||
clear_bit(atu_index, &ep->ob_window_map);
|
||||
clear_bit(atu_index, ep->ob_window_map);
|
||||
}
|
||||
|
||||
static int dw_pcie_ep_map_addr(struct pci_epc *epc, phys_addr_t addr,
|
||||
static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
|
||||
phys_addr_t addr,
|
||||
u64 pci_addr, size_t size)
|
||||
{
|
||||
int ret;
|
||||
|
@ -194,39 +191,37 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, phys_addr_t addr,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int dw_pcie_ep_get_msi(struct pci_epc *epc)
|
||||
static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
|
||||
{
|
||||
int val;
|
||||
u32 lower_addr;
|
||||
u32 upper_addr;
|
||||
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
|
||||
val = dw_pcie_readb_dbi(pci, MSI_MESSAGE_CONTROL);
|
||||
val = (val & MSI_CAP_MME_MASK) >> MSI_CAP_MME_SHIFT;
|
||||
|
||||
lower_addr = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_L32);
|
||||
upper_addr = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_U32);
|
||||
|
||||
if (!(lower_addr || upper_addr))
|
||||
val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL);
|
||||
if (!(val & MSI_CAP_MSI_EN_MASK))
|
||||
return -EINVAL;
|
||||
|
||||
val = (val & MSI_CAP_MME_MASK) >> MSI_CAP_MME_SHIFT;
|
||||
return val;
|
||||
}
|
||||
|
||||
static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 encode_int)
|
||||
static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 encode_int)
|
||||
{
|
||||
int val;
|
||||
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
|
||||
val = (encode_int << MSI_CAP_MMC_SHIFT);
|
||||
val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL);
|
||||
val &= ~MSI_CAP_MMC_MASK;
|
||||
val |= (encode_int << MSI_CAP_MMC_SHIFT) & MSI_CAP_MMC_MASK;
|
||||
dw_pcie_dbi_ro_wr_en(pci);
|
||||
dw_pcie_writew_dbi(pci, MSI_MESSAGE_CONTROL, val);
|
||||
dw_pcie_dbi_ro_wr_dis(pci);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dw_pcie_ep_raise_irq(struct pci_epc *epc,
|
||||
static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no,
|
||||
enum pci_epc_irq_type type, u8 interrupt_num)
|
||||
{
|
||||
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
|
||||
|
@ -234,7 +229,7 @@ static int dw_pcie_ep_raise_irq(struct pci_epc *epc,
|
|||
if (!ep->ops->raise_irq)
|
||||
return -EINVAL;
|
||||
|
||||
return ep->ops->raise_irq(ep, type, interrupt_num);
|
||||
return ep->ops->raise_irq(ep, func_no, type, interrupt_num);
|
||||
}
|
||||
|
||||
static void dw_pcie_ep_stop(struct pci_epc *epc)
|
||||
|
@ -272,10 +267,48 @@ static const struct pci_epc_ops epc_ops = {
|
|||
.stop = dw_pcie_ep_stop,
|
||||
};
|
||||
|
||||
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
|
||||
u8 interrupt_num)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
struct pci_epc *epc = ep->epc;
|
||||
u16 msg_ctrl, msg_data;
|
||||
u32 msg_addr_lower, msg_addr_upper;
|
||||
u64 msg_addr;
|
||||
bool has_upper;
|
||||
int ret;
|
||||
|
||||
/* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
|
||||
msg_ctrl = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL);
|
||||
has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
|
||||
msg_addr_lower = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_L32);
|
||||
if (has_upper) {
|
||||
msg_addr_upper = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_U32);
|
||||
msg_data = dw_pcie_readw_dbi(pci, MSI_MESSAGE_DATA_64);
|
||||
} else {
|
||||
msg_addr_upper = 0;
|
||||
msg_data = dw_pcie_readw_dbi(pci, MSI_MESSAGE_DATA_32);
|
||||
}
|
||||
msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
|
||||
ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
|
||||
epc->mem->page_size);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
writel(msg_data | (interrupt_num - 1), ep->msi_mem);
|
||||
|
||||
dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
|
||||
{
|
||||
struct pci_epc *epc = ep->epc;
|
||||
|
||||
pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
|
||||
epc->mem->page_size);
|
||||
|
||||
pci_epc_mem_exit(epc);
|
||||
}
|
||||
|
||||
|
@ -298,12 +331,32 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
|
|||
dev_err(dev, "unable to read *num-ib-windows* property\n");
|
||||
return ret;
|
||||
}
|
||||
if (ep->num_ib_windows > MAX_IATU_IN) {
|
||||
dev_err(dev, "invalid *num-ib-windows*\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "unable to read *num-ob-windows* property\n");
|
||||
return ret;
|
||||
}
|
||||
if (ep->num_ob_windows > MAX_IATU_OUT) {
|
||||
dev_err(dev, "invalid *num-ob-windows*\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ep->ib_window_map = devm_kzalloc(dev, sizeof(long) *
|
||||
BITS_TO_LONGS(ep->num_ib_windows),
|
||||
GFP_KERNEL);
|
||||
if (!ep->ib_window_map)
|
||||
return -ENOMEM;
|
||||
|
||||
ep->ob_window_map = devm_kzalloc(dev, sizeof(long) *
|
||||
BITS_TO_LONGS(ep->num_ob_windows),
|
||||
GFP_KERNEL);
|
||||
if (!ep->ob_window_map)
|
||||
return -ENOMEM;
|
||||
|
||||
addr = devm_kzalloc(dev, sizeof(phys_addr_t) * ep->num_ob_windows,
|
||||
GFP_KERNEL);
|
||||
|
@ -331,6 +384,13 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
|
||||
epc->mem->page_size);
|
||||
if (!ep->msi_mem) {
|
||||
dev_err(dev, "Failed to reserve memory for MSI\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ep->epc = epc;
|
||||
epc_set_drvdata(epc, ep);
|
||||
dw_pcie_setup(pci);
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Synopsys DesignWare PCIe host controller driver
|
||||
*
|
||||
|
@ -5,10 +6,6 @@
|
|||
* http://www.samsung.com
|
||||
*
|
||||
* Author: Jingoo Han <jg1.han@samsung.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/irqdomain.h>
|
||||
|
@ -83,10 +80,19 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
|
|||
|
||||
void dw_pcie_msi_init(struct pcie_port *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct device *dev = pci->dev;
|
||||
struct page *page;
|
||||
u64 msi_target;
|
||||
|
||||
pp->msi_data = __get_free_pages(GFP_KERNEL, 0);
|
||||
msi_target = virt_to_phys((void *)pp->msi_data);
|
||||
page = alloc_page(GFP_KERNEL);
|
||||
pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(dev, pp->msi_data)) {
|
||||
dev_err(dev, "failed to map MSI data\n");
|
||||
__free_page(page);
|
||||
return;
|
||||
}
|
||||
msi_target = (u64)pp->msi_data;
|
||||
|
||||
/* program the msi_data */
|
||||
dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
|
||||
|
@ -187,7 +193,7 @@ static void dw_msi_setup_msg(struct pcie_port *pp, unsigned int irq, u32 pos)
|
|||
if (pp->ops->get_msi_addr)
|
||||
msi_target = pp->ops->get_msi_addr(pp);
|
||||
else
|
||||
msi_target = virt_to_phys((void *)pp->msi_data);
|
||||
msi_target = (u64)pp->msi_data;
|
||||
|
||||
msg.address_lo = (u32)(msi_target & 0xffffffff);
|
||||
msg.address_hi = (u32)(msi_target >> 32 & 0xffffffff);
|
||||
|
|
|
@ -1,13 +1,10 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* PCIe RC driver for Synopsys DesignWare Core
|
||||
*
|
||||
* Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
|
||||
*
|
||||
* Authors: Joao Pinto <Joao.Pinto@synopsys.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#include <linux/clk.h>
|
||||
#include <linux/delay.h>
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Synopsys DesignWare PCIe host controller driver
|
||||
*
|
||||
|
@ -5,10 +6,6 @@
|
|||
* http://www.samsung.com
|
||||
*
|
||||
* Author: Jingoo Han <jg1.han@samsung.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
|
@ -149,7 +146,7 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
|
|||
u32 retries, val;
|
||||
|
||||
if (pci->ops->cpu_addr_fixup)
|
||||
cpu_addr = pci->ops->cpu_addr_fixup(cpu_addr);
|
||||
cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
|
||||
|
||||
if (pci->iatu_unroll_enabled) {
|
||||
dw_pcie_prog_outbound_atu_unroll(pci, index, type, cpu_addr,
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Synopsys DesignWare PCIe host controller driver
|
||||
*
|
||||
|
@ -5,15 +6,12 @@
|
|||
* http://www.samsung.com
|
||||
*
|
||||
* Author: Jingoo Han <jg1.han@samsung.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _PCIE_DESIGNWARE_H
|
||||
#define _PCIE_DESIGNWARE_H
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/msi.h>
|
||||
#include <linux/pci.h>
|
||||
|
@ -100,10 +98,14 @@
|
|||
|
||||
#define MSI_MESSAGE_CONTROL 0x52
|
||||
#define MSI_CAP_MMC_SHIFT 1
|
||||
#define MSI_CAP_MMC_MASK (7 << MSI_CAP_MMC_SHIFT)
|
||||
#define MSI_CAP_MME_SHIFT 4
|
||||
#define MSI_CAP_MSI_EN_MASK 0x1
|
||||
#define MSI_CAP_MME_MASK (7 << MSI_CAP_MME_SHIFT)
|
||||
#define MSI_MESSAGE_ADDR_L32 0x54
|
||||
#define MSI_MESSAGE_ADDR_U32 0x58
|
||||
#define MSI_MESSAGE_DATA_32 0x58
|
||||
#define MSI_MESSAGE_DATA_64 0x5C
|
||||
|
||||
/*
|
||||
* Maximum number of MSI IRQs can be 256 per controller. But keep
|
||||
|
@ -113,6 +115,10 @@
|
|||
#define MAX_MSI_IRQS 32
|
||||
#define MAX_MSI_CTRLS (MAX_MSI_IRQS / 32)
|
||||
|
||||
/* Maximum number of inbound/outbound iATUs */
|
||||
#define MAX_IATU_IN 256
|
||||
#define MAX_IATU_OUT 256
|
||||
|
||||
struct pcie_port;
|
||||
struct dw_pcie;
|
||||
struct dw_pcie_ep;
|
||||
|
@ -168,7 +174,7 @@ struct pcie_port {
|
|||
const struct dw_pcie_host_ops *ops;
|
||||
int msi_irq;
|
||||
struct irq_domain *irq_domain;
|
||||
unsigned long msi_data;
|
||||
dma_addr_t msi_data;
|
||||
DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
|
||||
};
|
||||
|
||||
|
@ -180,8 +186,8 @@ enum dw_pcie_as_type {
|
|||
|
||||
struct dw_pcie_ep_ops {
|
||||
void (*ep_init)(struct dw_pcie_ep *ep);
|
||||
int (*raise_irq)(struct dw_pcie_ep *ep, enum pci_epc_irq_type type,
|
||||
u8 interrupt_num);
|
||||
int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,
|
||||
enum pci_epc_irq_type type, u8 interrupt_num);
|
||||
};
|
||||
|
||||
struct dw_pcie_ep {
|
||||
|
@ -192,14 +198,16 @@ struct dw_pcie_ep {
|
|||
size_t page_size;
|
||||
u8 bar_to_atu[6];
|
||||
phys_addr_t *outbound_addr;
|
||||
unsigned long ib_window_map;
|
||||
unsigned long ob_window_map;
|
||||
unsigned long *ib_window_map;
|
||||
unsigned long *ob_window_map;
|
||||
u32 num_ib_windows;
|
||||
u32 num_ob_windows;
|
||||
void __iomem *msi_mem;
|
||||
phys_addr_t msi_mem_phys;
|
||||
};
|
||||
|
||||
struct dw_pcie_ops {
|
||||
u64 (*cpu_addr_fixup)(u64 cpu_addr);
|
||||
u64 (*cpu_addr_fixup)(struct dw_pcie *pcie, u64 cpu_addr);
|
||||
u32 (*read_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
|
||||
size_t size);
|
||||
void (*write_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
|
||||
|
@ -334,6 +342,9 @@ static inline int dw_pcie_host_init(struct pcie_port *pp)
|
|||
void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
|
||||
int dw_pcie_ep_init(struct dw_pcie_ep *ep);
|
||||
void dw_pcie_ep_exit(struct dw_pcie_ep *ep);
|
||||
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
|
||||
u8 interrupt_num);
|
||||
void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar);
|
||||
#else
|
||||
static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
|
||||
{
|
||||
|
@ -347,5 +358,15 @@ static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep)
|
|||
static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
|
||||
u8 interrupt_num)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
#endif /* _PCIE_DESIGNWARE_H */
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* PCIe host controller driver for HiSilicon SoCs
|
||||
*
|
||||
|
@ -6,10 +7,6 @@
|
|||
* Authors: Zhou Wang <wangzhou1@hisilicon.com>
|
||||
* Dacai Zhu <zhudacai@hisilicon.com>
|
||||
* Gabriele Paoloni <gabriele.paoloni@huawei.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/init.h>
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* PCIe host controller driver for HiSilicon STB SoCs
|
||||
*
|
||||
|
@ -5,10 +6,6 @@
|
|||
*
|
||||
* Authors: Ruqiang Ju <juruqiang@hisilicon.com>
|
||||
* Jianguo Sun <sunjianguo1@huawei.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* PCIe host controller driver for Kirin Phone SoCs
|
||||
*
|
||||
|
@ -5,10 +6,6 @@
|
|||
* http://www.huawei.com
|
||||
*
|
||||
* Author: Xiaowei Song <songxiaowei@huawei.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <asm/compiler.h>
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Qualcomm PCIe root complex driver
|
||||
*
|
||||
|
@ -5,15 +6,6 @@
|
|||
* Copyright 2015 Linaro Limited.
|
||||
*
|
||||
* Author: Stanimir Varbanov <svarbanov@mm-sol.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
|
@ -171,7 +163,7 @@ struct qcom_pcie {
|
|||
union qcom_pcie_resources res;
|
||||
struct phy *phy;
|
||||
struct gpio_desc *reset;
|
||||
struct qcom_pcie_ops *ops;
|
||||
const struct qcom_pcie_ops *ops;
|
||||
};
|
||||
|
||||
#define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
|
||||
|
@ -1234,7 +1226,7 @@ static int qcom_pcie_probe(struct platform_device *pdev)
|
|||
|
||||
pcie->pci = pci;
|
||||
|
||||
pcie->ops = (struct qcom_pcie_ops *)of_device_get_match_data(dev);
|
||||
pcie->ops = of_device_get_match_data(dev);
|
||||
|
||||
pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW);
|
||||
if (IS_ERR(pcie->reset))
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* PCIe host controller driver for ST Microelectronics SPEAr13xx SoCs
|
||||
*
|
||||
|
@ -6,10 +7,6 @@
|
|||
* Copyright (C) 2010-2014 ST Microelectronics
|
||||
* Pratyush Anand <pratyush.anand@gmail.com>
|
||||
* Mohit Kumar <mohit.kumar.dhaka@gmail.com>
|
||||
*
|
||||
* This file is licensed under the terms of the GNU General Public
|
||||
* License version 2. This program is licensed "as is" without any
|
||||
* warranty of any kind, whether express or implied.
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
|
|
|
@ -1,17 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright 2016 Broadcom
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, version 2, as
|
||||
* published by the Free Software Foundation (the "GPL").
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License version 2 (GPLv2) for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* version 2 (GPLv2) along with this source code.
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
# PCI Endpoint Support
|
||||
#
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
# Makefile for PCI Endpoint Support
|
||||
#
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
# PCI Endpoint Functions
|
||||
#
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
# Makefile for PCI Endpoint Functions
|
||||
#
|
||||
|
|
|
@ -1,20 +1,9 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/**
|
||||
* Test driver to test endpoint functionality
|
||||
*
|
||||
* Copyright (C) 2017 Texas Instruments
|
||||
* Author: Kishon Vijay Abraham I <kishon@ti.com>
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 of
|
||||
* the License as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/crc32.h>
|
||||
|
@ -104,7 +93,8 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
|
|||
goto err;
|
||||
}
|
||||
|
||||
ret = pci_epc_map_addr(epc, src_phys_addr, reg->src_addr, reg->size);
|
||||
ret = pci_epc_map_addr(epc, epf->func_no, src_phys_addr, reg->src_addr,
|
||||
reg->size);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to map source address\n");
|
||||
reg->status = STATUS_SRC_ADDR_INVALID;
|
||||
|
@ -119,7 +109,8 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
|
|||
goto err_src_map_addr;
|
||||
}
|
||||
|
||||
ret = pci_epc_map_addr(epc, dst_phys_addr, reg->dst_addr, reg->size);
|
||||
ret = pci_epc_map_addr(epc, epf->func_no, dst_phys_addr, reg->dst_addr,
|
||||
reg->size);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to map destination address\n");
|
||||
reg->status = STATUS_DST_ADDR_INVALID;
|
||||
|
@ -128,13 +119,13 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
|
|||
|
||||
memcpy(dst_addr, src_addr, reg->size);
|
||||
|
||||
pci_epc_unmap_addr(epc, dst_phys_addr);
|
||||
pci_epc_unmap_addr(epc, epf->func_no, dst_phys_addr);
|
||||
|
||||
err_dst_addr:
|
||||
pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
|
||||
|
||||
err_src_map_addr:
|
||||
pci_epc_unmap_addr(epc, src_phys_addr);
|
||||
pci_epc_unmap_addr(epc, epf->func_no, src_phys_addr);
|
||||
|
||||
err_src_addr:
|
||||
pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
|
||||
|
@ -164,7 +155,8 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
|
|||
goto err;
|
||||
}
|
||||
|
||||
ret = pci_epc_map_addr(epc, phys_addr, reg->src_addr, reg->size);
|
||||
ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->src_addr,
|
||||
reg->size);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to map address\n");
|
||||
reg->status = STATUS_SRC_ADDR_INVALID;
|
||||
|
@ -186,7 +178,7 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
|
|||
kfree(buf);
|
||||
|
||||
err_map_addr:
|
||||
pci_epc_unmap_addr(epc, phys_addr);
|
||||
pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
|
||||
|
||||
err_addr:
|
||||
pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
|
||||
|
@ -215,7 +207,8 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
|
|||
goto err;
|
||||
}
|
||||
|
||||
ret = pci_epc_map_addr(epc, phys_addr, reg->dst_addr, reg->size);
|
||||
ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->dst_addr,
|
||||
reg->size);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to map address\n");
|
||||
reg->status = STATUS_DST_ADDR_INVALID;
|
||||
|
@ -242,7 +235,7 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
|
|||
kfree(buf);
|
||||
|
||||
err_map_addr:
|
||||
pci_epc_unmap_addr(epc, phys_addr);
|
||||
pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
|
||||
|
||||
err_addr:
|
||||
pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
|
||||
|
@ -260,11 +253,11 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq)
|
|||
struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
|
||||
|
||||
reg->status |= STATUS_IRQ_RAISED;
|
||||
msi_count = pci_epc_get_msi(epc);
|
||||
msi_count = pci_epc_get_msi(epc, epf->func_no);
|
||||
if (irq > msi_count || msi_count <= 0)
|
||||
pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0);
|
||||
pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
|
||||
else
|
||||
pci_epc_raise_irq(epc, PCI_EPC_IRQ_MSI, irq);
|
||||
pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, irq);
|
||||
}
|
||||
|
||||
static void pci_epf_test_cmd_handler(struct work_struct *work)
|
||||
|
@ -291,7 +284,7 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
|
|||
|
||||
if (command & COMMAND_RAISE_LEGACY_IRQ) {
|
||||
reg->status = STATUS_IRQ_RAISED;
|
||||
pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0);
|
||||
pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
|
||||
goto reset_handler;
|
||||
}
|
||||
|
||||
|
@ -326,11 +319,11 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
|
|||
}
|
||||
|
||||
if (command & COMMAND_RAISE_MSI_IRQ) {
|
||||
msi_count = pci_epc_get_msi(epc);
|
||||
msi_count = pci_epc_get_msi(epc, epf->func_no);
|
||||
if (irq > msi_count || msi_count <= 0)
|
||||
goto reset_handler;
|
||||
reg->status = STATUS_IRQ_RAISED;
|
||||
pci_epc_raise_irq(epc, PCI_EPC_IRQ_MSI, irq);
|
||||
pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, irq);
|
||||
goto reset_handler;
|
||||
}
|
||||
|
||||
|
@ -358,7 +351,7 @@ static void pci_epf_test_unbind(struct pci_epf *epf)
|
|||
for (bar = BAR_0; bar <= BAR_5; bar++) {
|
||||
if (epf_test->reg[bar]) {
|
||||
pci_epf_free_space(epf, epf_test->reg[bar], bar);
|
||||
pci_epc_clear_bar(epc, bar);
|
||||
pci_epc_clear_bar(epc, epf->func_no, bar);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -380,7 +373,8 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
|
|||
|
||||
for (bar = BAR_0; bar <= BAR_5; bar++) {
|
||||
epf_bar = &epf->bar[bar];
|
||||
ret = pci_epc_set_bar(epc, bar, epf_bar->phys_addr,
|
||||
ret = pci_epc_set_bar(epc, epf->func_no, bar,
|
||||
epf_bar->phys_addr,
|
||||
epf_bar->size, flags);
|
||||
if (ret) {
|
||||
pci_epf_free_space(epf, epf_test->reg[bar], bar);
|
||||
|
@ -433,7 +427,7 @@ static int pci_epf_test_bind(struct pci_epf *epf)
|
|||
if (WARN_ON_ONCE(!epc))
|
||||
return -EINVAL;
|
||||
|
||||
ret = pci_epc_write_header(epc, header);
|
||||
ret = pci_epc_write_header(epc, epf->func_no, header);
|
||||
if (ret) {
|
||||
dev_err(dev, "configuration header write failed\n");
|
||||
return ret;
|
||||
|
@ -447,7 +441,7 @@ static int pci_epf_test_bind(struct pci_epf *epf)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = pci_epc_set_msi(epc, epf->msi_interrupts);
|
||||
ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -1,35 +1,28 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/**
|
||||
* configfs to configure the PCI endpoint
|
||||
*
|
||||
* Copyright (C) 2017 Texas Instruments
|
||||
* Author: Kishon Vijay Abraham I <kishon@ti.com>
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 of
|
||||
* the License as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <linux/pci-epc.h>
|
||||
#include <linux/pci-epf.h>
|
||||
#include <linux/pci-ep-cfs.h>
|
||||
|
||||
static DEFINE_IDR(functions_idr);
|
||||
static DEFINE_MUTEX(functions_mutex);
|
||||
static struct config_group *functions_group;
|
||||
static struct config_group *controllers_group;
|
||||
|
||||
struct pci_epf_group {
|
||||
struct config_group group;
|
||||
struct pci_epf *epf;
|
||||
int index;
|
||||
};
|
||||
|
||||
struct pci_epc_group {
|
||||
|
@ -97,22 +90,23 @@ static int pci_epc_epf_link(struct config_item *epc_item,
|
|||
{
|
||||
int ret;
|
||||
u32 func_no = 0;
|
||||
struct pci_epc *epc;
|
||||
struct pci_epf *epf;
|
||||
struct pci_epf_group *epf_group = to_pci_epf_group(epf_item);
|
||||
struct pci_epc_group *epc_group = to_pci_epc_group(epc_item);
|
||||
struct pci_epc *epc = epc_group->epc;
|
||||
struct pci_epf *epf = epf_group->epf;
|
||||
|
||||
func_no = find_first_zero_bit(&epc_group->function_num_map,
|
||||
BITS_PER_LONG);
|
||||
if (func_no >= BITS_PER_LONG)
|
||||
return -EINVAL;
|
||||
|
||||
set_bit(func_no, &epc_group->function_num_map);
|
||||
epf->func_no = func_no;
|
||||
|
||||
epc = epc_group->epc;
|
||||
epf = epf_group->epf;
|
||||
ret = pci_epc_add_epf(epc, epf);
|
||||
if (ret)
|
||||
goto err_add_epf;
|
||||
|
||||
func_no = find_first_zero_bit(&epc_group->function_num_map,
|
||||
sizeof(epc_group->function_num_map));
|
||||
set_bit(func_no, &epc_group->function_num_map);
|
||||
epf->func_no = func_no;
|
||||
|
||||
ret = pci_epf_bind(epf);
|
||||
if (ret)
|
||||
goto err_epf_bind;
|
||||
|
@ -353,6 +347,9 @@ static void pci_epf_release(struct config_item *item)
|
|||
{
|
||||
struct pci_epf_group *epf_group = to_pci_epf_group(item);
|
||||
|
||||
mutex_lock(&functions_mutex);
|
||||
idr_remove(&functions_idr, epf_group->index);
|
||||
mutex_unlock(&functions_mutex);
|
||||
pci_epf_destroy(epf_group->epf);
|
||||
kfree(epf_group);
|
||||
}
|
||||
|
@ -372,22 +369,57 @@ static struct config_group *pci_epf_make(struct config_group *group,
|
|||
{
|
||||
struct pci_epf_group *epf_group;
|
||||
struct pci_epf *epf;
|
||||
char *epf_name;
|
||||
int index, err;
|
||||
|
||||
epf_group = kzalloc(sizeof(*epf_group), GFP_KERNEL);
|
||||
if (!epf_group)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mutex_lock(&functions_mutex);
|
||||
index = idr_alloc(&functions_idr, epf_group, 0, 0, GFP_KERNEL);
|
||||
mutex_unlock(&functions_mutex);
|
||||
if (index < 0) {
|
||||
err = index;
|
||||
goto free_group;
|
||||
}
|
||||
|
||||
epf_group->index = index;
|
||||
|
||||
config_group_init_type_name(&epf_group->group, name, &pci_epf_type);
|
||||
|
||||
epf = pci_epf_create(group->cg_item.ci_name);
|
||||
epf_name = kasprintf(GFP_KERNEL, "%s.%d",
|
||||
group->cg_item.ci_name, epf_group->index);
|
||||
if (!epf_name) {
|
||||
err = -ENOMEM;
|
||||
goto remove_idr;
|
||||
}
|
||||
|
||||
epf = pci_epf_create(epf_name);
|
||||
if (IS_ERR(epf)) {
|
||||
pr_err("failed to create endpoint function device\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
err = -EINVAL;
|
||||
goto free_name;
|
||||
}
|
||||
|
||||
epf_group->epf = epf;
|
||||
|
||||
kfree(epf_name);
|
||||
|
||||
return &epf_group->group;
|
||||
|
||||
free_name:
|
||||
kfree(epf_name);
|
||||
|
||||
remove_idr:
|
||||
mutex_lock(&functions_mutex);
|
||||
idr_remove(&functions_idr, epf_group->index);
|
||||
mutex_unlock(&functions_mutex);
|
||||
|
||||
free_group:
|
||||
kfree(epf_group);
|
||||
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static void pci_epf_drop(struct config_group *group, struct config_item *item)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue