mirror of https://gitee.com/openkylin/linux.git
pci-v4.13-changes
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJZYAFUAAoJEFmIoMA60/r8cFQP/A4fpdjhd42WRNQXGTpZieop i40lBQtGdBn/UY97U6BoutcS1ygDi9OiSzg+IR6I90iMgidqyUHFhe4hGWgVHD2g Tg0KLzd+lKKfQ6Gqt1P6t4dLGLvyEj5NUbCeFE4XYODAUkkiBaOndax6DK1GvU54 Vjuj63rHtMKFR/tG/4iFTigObqyI8QE6O9JVxwuvIyEX6RXKbJe+wkulv5taSnWt Ne94950i10MrELtNreVdi8UbCbXiqjg0r5sKI/WTJ7Bc7WsC7X5PhWlhcNrbHyBT Ivhoypkui3Ky8gvwWqL0KBG+cRp8prBXAdabrD9wRbz0TKnfGI6pQzseCGRnkE6T mhlSJpsSNIHaejoCjk93yPn5oRiTNtPMdVhMpEQL9V/crVRGRRmbd7v2TYvpMHVR JaPZ8bv+C2aBTY8uL3/v/rgrjsMKOYFeaxeNklpErxrknsbgb6BgubmeZXDvTBVv YUIbAkvveonUKisv+kbD8L7tp1+jdbRUT0AikS0NVgAJQhfArOmBcDpTL9YC51vE feFhkVx4A32vvOm7Zcg9A7IMXNjeSfccKGw3dJOAvzgDODuJiaCG6S0o7B5Yngze axMi87ixGT4QM98z/I4MC8E9rDrJdIitlpvb6ZBgiLzoO3kmvsIZZKt8UxWqf5r8 w3U2HoyKH13Qbkn1xkum =mkyb -----END PGP SIGNATURE----- Merge tag 'pci-v4.13-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci Pull PCI updates from Bjorn Helgaas: - add sysfs max_link_speed/width, current_link_speed/width (Wong Vee Khee) - make host bridge IRQ mapping much more generic (Matthew Minter, Lorenzo Pieralisi) - convert most drivers to pci_scan_root_bus_bridge() (Lorenzo Pieralisi) - mutex sriov_configure() (Jakub Kicinski) - mutex pci_error_handlers callbacks (Christoph Hellwig) - split ->reset_notify() into ->reset_prepare()/reset_done() (Christoph Hellwig) - support multiple PCIe portdrv interrupts for MSI as well as MSI-X (Gabriele Paoloni) - allocate MSI/MSI-X vector for Downstream Port Containment (Gabriele Paoloni) - fix MSI IRQ affinity pre/post/min_vecs issue (Michael Hernandez) - test INTx masking during enumeration, not at run-time (Piotr Gregor) - avoid using device_may_wakeup() for runtime PM (Rafael J. Wysocki) - restore the status of PCI devices across hibernation (Chen Yu) - keep parent resources that start at 0x0 (Ard Biesheuvel) - enable ECRC only if device supports it (Bjorn Helgaas) - restore PRI and PASID state after Function-Level Reset (CQ Tang) - skip DPC event if device is not present (Keith Busch) - check domain when matching SMBIOS info (Sujith Pandel) - mark Intel XXV710 NIC INTx masking as broken (Alex Williamson) - avoid AMD SB7xx EHCI USB wakeup defect (Kai-Heng Feng) - work around long-standing Macbook Pro poweroff issue (Bjorn Helgaas) - add Switchtec "running" status flag (Logan Gunthorpe) - fix dra7xx incorrect RW1C IRQ register usage (Arvind Yadav) - modify xilinx-nwl IRQ chip for legacy interrupts (Bharat Kumar Gogada) - move VMD SRCU cleanup after bus, child device removal (Jon Derrick) - add Faraday clock handling (Linus Walleij) - configure Rockchip MPS and reorganize (Shawn Lin) - limit Qualcomm TLP size to 2K (hardware issue) (Srinivas Kandagatla) - support Tegra MSI 64-bit addressing (Thierry Reding) - use Rockchip normal (not privileged) register bank (Shawn Lin) - add HiSilicon Kirin SoC PCIe controller driver (Xiaowei Song) - add Sigma Designs Tango SMP8759 PCIe controller driver (Marc Gonzalez) - add MediaTek PCIe host controller support (Ryder Lee) - add Qualcomm IPQ4019 support (John Crispin) - add HyperV vPCI protocol v1.2 support (Jork Loeser) - add i.MX6 regulator support (Quentin Schulz) * tag 'pci-v4.13-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci: (113 commits) PCI: tango: Add Sigma Designs Tango SMP8759 PCIe host bridge support PCI: Add DT binding for Sigma Designs Tango PCIe controller PCI: rockchip: Use normal register bank for config accessors dt-bindings: PCI: Add documentation for MediaTek PCIe PCI: Remove __pci_dev_reset() and pci_dev_reset() PCI: Split ->reset_notify() method into ->reset_prepare() and ->reset_done() PCI: xilinx: Make of_device_ids const PCI: xilinx-nwl: Modify IRQ chip for legacy interrupts PCI: vmd: Move SRCU cleanup after bus, child device removal PCI: vmd: Correct comment: VMD domains start at 0x10000, not 0x1000 PCI: versatile: Add local struct device pointers PCI: tegra: Do not allocate MSI target memory PCI: tegra: Support MSI 64-bit addressing PCI: rockchip: Use local struct device pointer consistently PCI: rockchip: Check for clk_prepare_enable() errors during resume MAINTAINERS: Remove Wenrui Li as Rockchip PCIe driver maintainer PCI: rockchip: Configure RC's MPS setting PCI: rockchip: Reconfigure configuration space header type PCI: rockchip: Split out rockchip_pcie_cfg_configuration_accesses() PCI: rockchip: Move configuration accesses into rockchip_pcie_cfg_atu() ...
This commit is contained in:
commit
f263fbb8d6
|
@ -30,6 +30,13 @@ Mandatory properties:
|
||||||
128MB, 256MB, 512MB, 1GB or 2GB in size. The memory should be marked as
|
128MB, 256MB, 512MB, 1GB or 2GB in size. The memory should be marked as
|
||||||
pre-fetchable.
|
pre-fetchable.
|
||||||
|
|
||||||
|
Optional properties:
|
||||||
|
- clocks: when present, this should contain the peripheral clock (PCLK) and the
|
||||||
|
PCI clock (PCICLK). If these are not present, they are assumed to be
|
||||||
|
hard-wired enabled and always on. The PCI clock will be 33 or 66 MHz.
|
||||||
|
- clock-names: when present, this should contain "PCLK" for the peripheral
|
||||||
|
clock and "PCICLK" for the PCI-side clock.
|
||||||
|
|
||||||
Mandatory subnodes:
|
Mandatory subnodes:
|
||||||
- For "faraday,ftpci100" a node representing the interrupt-controller inside the
|
- For "faraday,ftpci100" a node representing the interrupt-controller inside the
|
||||||
host bridge is mandatory. It has the following mandatory properties:
|
host bridge is mandatory. It has the following mandatory properties:
|
||||||
|
|
|
@ -33,6 +33,10 @@ Optional properties:
|
||||||
- reset-gpio-active-high: If present then the reset sequence using the GPIO
|
- reset-gpio-active-high: If present then the reset sequence using the GPIO
|
||||||
specified in the "reset-gpio" property is reversed (H=reset state,
|
specified in the "reset-gpio" property is reversed (H=reset state,
|
||||||
L=operation state).
|
L=operation state).
|
||||||
|
- vpcie-supply: Should specify the regulator in charge of PCIe port power.
|
||||||
|
The regulator will be enabled when initializing the PCIe host and
|
||||||
|
disabled either as part of the init process or when shutting down the
|
||||||
|
host.
|
||||||
|
|
||||||
Additional required properties for imx6sx-pcie:
|
Additional required properties for imx6sx-pcie:
|
||||||
- clock names: Must include the following additional entries:
|
- clock names: Must include the following additional entries:
|
||||||
|
|
|
@ -0,0 +1,130 @@
|
||||||
|
MediaTek Gen2 PCIe controller which is available on MT7623 series SoCs
|
||||||
|
|
||||||
|
PCIe subsys supports single root complex (RC) with 3 Root Ports. Each root
|
||||||
|
ports supports a Gen2 1-lane Link and has PIPE interface to PHY.
|
||||||
|
|
||||||
|
Required properties:
|
||||||
|
- compatible: Should contain "mediatek,mt7623-pcie".
|
||||||
|
- device_type: Must be "pci"
|
||||||
|
- reg: Base addresses and lengths of the PCIe controller.
|
||||||
|
- #address-cells: Address representation for root ports (must be 3)
|
||||||
|
- #size-cells: Size representation for root ports (must be 2)
|
||||||
|
- #interrupt-cells: Size representation for interrupts (must be 1)
|
||||||
|
- interrupt-map-mask and interrupt-map: Standard PCI IRQ mapping properties
|
||||||
|
Please refer to the standard PCI bus binding document for a more detailed
|
||||||
|
explanation.
|
||||||
|
- clocks: Must contain an entry for each entry in clock-names.
|
||||||
|
See ../clocks/clock-bindings.txt for details.
|
||||||
|
- clock-names: Must include the following entries:
|
||||||
|
- free_ck :for reference clock of PCIe subsys
|
||||||
|
- sys_ck0 :for clock of Port0
|
||||||
|
- sys_ck1 :for clock of Port1
|
||||||
|
- sys_ck2 :for clock of Port2
|
||||||
|
- resets: Must contain an entry for each entry in reset-names.
|
||||||
|
See ../reset/reset.txt for details.
|
||||||
|
- reset-names: Must include the following entries:
|
||||||
|
- pcie-rst0 :port0 reset
|
||||||
|
- pcie-rst1 :port1 reset
|
||||||
|
- pcie-rst2 :port2 reset
|
||||||
|
- phys: List of PHY specifiers (used by generic PHY framework).
|
||||||
|
- phy-names : Must be "pcie-phy0", "pcie-phy1", "pcie-phyN".. based on the
|
||||||
|
number of PHYs as specified in *phys* property.
|
||||||
|
- power-domains: A phandle and power domain specifier pair to the power domain
|
||||||
|
which is responsible for collapsing and restoring power to the peripheral.
|
||||||
|
- bus-range: Range of bus numbers associated with this controller.
|
||||||
|
- ranges: Ranges for the PCI memory and I/O regions.
|
||||||
|
|
||||||
|
In addition, the device tree node must have sub-nodes describing each
|
||||||
|
PCIe port interface, having the following mandatory properties:
|
||||||
|
|
||||||
|
Required properties:
|
||||||
|
- device_type: Must be "pci"
|
||||||
|
- reg: Only the first four bytes are used to refer to the correct bus number
|
||||||
|
and device number.
|
||||||
|
- #address-cells: Must be 3
|
||||||
|
- #size-cells: Must be 2
|
||||||
|
- #interrupt-cells: Must be 1
|
||||||
|
- interrupt-map-mask and interrupt-map: Standard PCI IRQ mapping properties
|
||||||
|
Please refer to the standard PCI bus binding document for a more detailed
|
||||||
|
explanation.
|
||||||
|
- ranges: Sub-ranges distributed from the PCIe controller node. An empty
|
||||||
|
property is sufficient.
|
||||||
|
- num-lanes: Number of lanes to use for this port.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
hifsys: syscon@1a000000 {
|
||||||
|
compatible = "mediatek,mt7623-hifsys",
|
||||||
|
"mediatek,mt2701-hifsys",
|
||||||
|
"syscon";
|
||||||
|
reg = <0 0x1a000000 0 0x1000>;
|
||||||
|
#clock-cells = <1>;
|
||||||
|
#reset-cells = <1>;
|
||||||
|
};
|
||||||
|
|
||||||
|
pcie: pcie-controller@1a140000 {
|
||||||
|
compatible = "mediatek,mt7623-pcie";
|
||||||
|
device_type = "pci";
|
||||||
|
reg = <0 0x1a140000 0 0x1000>, /* PCIe shared registers */
|
||||||
|
<0 0x1a142000 0 0x1000>, /* Port0 registers */
|
||||||
|
<0 0x1a143000 0 0x1000>, /* Port1 registers */
|
||||||
|
<0 0x1a144000 0 0x1000>; /* Port2 registers */
|
||||||
|
#address-cells = <3>;
|
||||||
|
#size-cells = <2>;
|
||||||
|
#interrupt-cells = <1>;
|
||||||
|
interrupt-map-mask = <0xf800 0 0 0>;
|
||||||
|
interrupt-map = <0x0000 0 0 0 &sysirq GIC_SPI 193 IRQ_TYPE_LEVEL_LOW>,
|
||||||
|
<0x0800 0 0 0 &sysirq GIC_SPI 194 IRQ_TYPE_LEVEL_LOW>,
|
||||||
|
<0x1000 0 0 0 &sysirq GIC_SPI 195 IRQ_TYPE_LEVEL_LOW>;
|
||||||
|
clocks = <&topckgen CLK_TOP_ETHIF_SEL>,
|
||||||
|
<&hifsys CLK_HIFSYS_PCIE0>,
|
||||||
|
<&hifsys CLK_HIFSYS_PCIE1>,
|
||||||
|
<&hifsys CLK_HIFSYS_PCIE2>;
|
||||||
|
clock-names = "free_ck", "sys_ck0", "sys_ck1", "sys_ck2";
|
||||||
|
resets = <&hifsys MT2701_HIFSYS_PCIE0_RST>,
|
||||||
|
<&hifsys MT2701_HIFSYS_PCIE1_RST>,
|
||||||
|
<&hifsys MT2701_HIFSYS_PCIE2_RST>;
|
||||||
|
reset-names = "pcie-rst0", "pcie-rst1", "pcie-rst2";
|
||||||
|
phys = <&pcie0_phy>, <&pcie1_phy>, <&pcie2_phy>;
|
||||||
|
phy-names = "pcie-phy0", "pcie-phy1", "pcie-phy2";
|
||||||
|
power-domains = <&scpsys MT2701_POWER_DOMAIN_HIF>;
|
||||||
|
bus-range = <0x00 0xff>;
|
||||||
|
ranges = <0x81000000 0 0x1a160000 0 0x1a160000 0 0x00010000 /* I/O space */
|
||||||
|
0x83000000 0 0x60000000 0 0x60000000 0 0x10000000>; /* memory space */
|
||||||
|
|
||||||
|
pcie@0,0 {
|
||||||
|
device_type = "pci";
|
||||||
|
reg = <0x0000 0 0 0 0>;
|
||||||
|
#address-cells = <3>;
|
||||||
|
#size-cells = <2>;
|
||||||
|
#interrupt-cells = <1>;
|
||||||
|
interrupt-map-mask = <0 0 0 0>;
|
||||||
|
interrupt-map = <0 0 0 0 &sysirq GIC_SPI 193 IRQ_TYPE_LEVEL_LOW>;
|
||||||
|
ranges;
|
||||||
|
num-lanes = <1>;
|
||||||
|
};
|
||||||
|
|
||||||
|
pcie@1,0 {
|
||||||
|
device_type = "pci";
|
||||||
|
reg = <0x0800 0 0 0 0>;
|
||||||
|
#address-cells = <3>;
|
||||||
|
#size-cells = <2>;
|
||||||
|
#interrupt-cells = <1>;
|
||||||
|
interrupt-map-mask = <0 0 0 0>;
|
||||||
|
interrupt-map = <0 0 0 0 &sysirq GIC_SPI 194 IRQ_TYPE_LEVEL_LOW>;
|
||||||
|
ranges;
|
||||||
|
num-lanes = <1>;
|
||||||
|
};
|
||||||
|
|
||||||
|
pcie@2,0 {
|
||||||
|
device_type = "pci";
|
||||||
|
reg = <0x1000 0 0 0 0>;
|
||||||
|
#address-cells = <3>;
|
||||||
|
#size-cells = <2>;
|
||||||
|
#interrupt-cells = <1>;
|
||||||
|
interrupt-map-mask = <0 0 0 0>;
|
||||||
|
interrupt-map = <0 0 0 0 &sysirq GIC_SPI 195 IRQ_TYPE_LEVEL_LOW>;
|
||||||
|
ranges;
|
||||||
|
num-lanes = <1>;
|
||||||
|
};
|
||||||
|
};
|
|
@ -8,6 +8,7 @@
|
||||||
- "qcom,pcie-apq8064" for apq8064
|
- "qcom,pcie-apq8064" for apq8064
|
||||||
- "qcom,pcie-apq8084" for apq8084
|
- "qcom,pcie-apq8084" for apq8084
|
||||||
- "qcom,pcie-msm8996" for msm8996 or apq8096
|
- "qcom,pcie-msm8996" for msm8996 or apq8096
|
||||||
|
- "qcom,pcie-ipq4019" for ipq4019
|
||||||
|
|
||||||
- reg:
|
- reg:
|
||||||
Usage: required
|
Usage: required
|
||||||
|
@ -87,7 +88,7 @@
|
||||||
- "core" Clocks the pcie hw block
|
- "core" Clocks the pcie hw block
|
||||||
- "phy" Clocks the pcie PHY block
|
- "phy" Clocks the pcie PHY block
|
||||||
- clock-names:
|
- clock-names:
|
||||||
Usage: required for apq8084
|
Usage: required for apq8084/ipq4019
|
||||||
Value type: <stringlist>
|
Value type: <stringlist>
|
||||||
Definition: Should contain the following entries
|
Definition: Should contain the following entries
|
||||||
- "aux" Auxiliary (AUX) clock
|
- "aux" Auxiliary (AUX) clock
|
||||||
|
@ -126,6 +127,23 @@
|
||||||
Definition: Should contain the following entries
|
Definition: Should contain the following entries
|
||||||
- "core" Core reset
|
- "core" Core reset
|
||||||
|
|
||||||
|
- reset-names:
|
||||||
|
Usage: required for ipq/apq8064
|
||||||
|
Value type: <stringlist>
|
||||||
|
Definition: Should contain the following entries
|
||||||
|
- "axi_m" AXI master reset
|
||||||
|
- "axi_s" AXI slave reset
|
||||||
|
- "pipe" PIPE reset
|
||||||
|
- "axi_m_vmid" VMID reset
|
||||||
|
- "axi_s_xpu" XPU reset
|
||||||
|
- "parf" PARF reset
|
||||||
|
- "phy" PHY reset
|
||||||
|
- "axi_m_sticky" AXI sticky reset
|
||||||
|
- "pipe_sticky" PIPE sticky reset
|
||||||
|
- "pwr" PWR reset
|
||||||
|
- "ahb" AHB reset
|
||||||
|
- "phy_ahb" PHY AHB reset
|
||||||
|
|
||||||
- power-domains:
|
- power-domains:
|
||||||
Usage: required for apq8084 and msm8996/apq8096
|
Usage: required for apq8084 and msm8996/apq8096
|
||||||
Value type: <prop-encoded-array>
|
Value type: <prop-encoded-array>
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
* Renesas RCar PCIe interface
|
* Renesas R-Car PCIe interface
|
||||||
|
|
||||||
Required properties:
|
Required properties:
|
||||||
compatible: "renesas,pcie-r8a7779" for the R8A7779 SoC;
|
compatible: "renesas,pcie-r8a7779" for the R8A7779 SoC;
|
||||||
|
|
|
@ -0,0 +1,29 @@
|
||||||
|
Sigma Designs Tango PCIe controller
|
||||||
|
|
||||||
|
Required properties:
|
||||||
|
|
||||||
|
- compatible: "sigma,smp8759-pcie"
|
||||||
|
- reg: address/size of PCI configuration space, address/size of register area
|
||||||
|
- bus-range: defined by size of PCI configuration space
|
||||||
|
- device_type: "pci"
|
||||||
|
- #size-cells: <2>
|
||||||
|
- #address-cells: <3>
|
||||||
|
- msi-controller
|
||||||
|
- ranges: translation from system to bus addresses
|
||||||
|
- interrupts: spec for misc interrupts, spec for MSI
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
pcie@2e000 {
|
||||||
|
compatible = "sigma,smp8759-pcie";
|
||||||
|
reg = <0x50000000 0x400000>, <0x2e000 0x100>;
|
||||||
|
bus-range = <0 3>;
|
||||||
|
device_type = "pci";
|
||||||
|
#size-cells = <2>;
|
||||||
|
#address-cells = <3>;
|
||||||
|
msi-controller;
|
||||||
|
ranges = <0x02000000 0x0 0x00400000 0x50400000 0x0 0x3c00000>;
|
||||||
|
interrupts =
|
||||||
|
<54 IRQ_TYPE_LEVEL_HIGH>, /* misc interrupts */
|
||||||
|
<55 IRQ_TYPE_LEVEL_HIGH>; /* MSI */
|
||||||
|
};
|
|
@ -348,6 +348,7 @@ PER-CPU MEM
|
||||||
devm_free_percpu()
|
devm_free_percpu()
|
||||||
|
|
||||||
PCI
|
PCI
|
||||||
|
devm_pci_alloc_host_bridge() : managed PCI host bridge allocation
|
||||||
devm_pci_remap_cfgspace() : ioremap PCI configuration space
|
devm_pci_remap_cfgspace() : ioremap PCI configuration space
|
||||||
devm_pci_remap_cfg_resource() : ioremap PCI configuration space resource
|
devm_pci_remap_cfg_resource() : ioremap PCI configuration space resource
|
||||||
pcim_enable_device() : after success, all PCI ops become managed
|
pcim_enable_device() : after success, all PCI ops become managed
|
||||||
|
|
17
MAINTAINERS
17
MAINTAINERS
|
@ -10160,9 +10160,16 @@ S: Maintained
|
||||||
F: Documentation/devicetree/bindings/pci/hisilicon-pcie.txt
|
F: Documentation/devicetree/bindings/pci/hisilicon-pcie.txt
|
||||||
F: drivers/pci/dwc/pcie-hisi.c
|
F: drivers/pci/dwc/pcie-hisi.c
|
||||||
|
|
||||||
|
PCIE DRIVER FOR HISILICON KIRIN
|
||||||
|
M: Xiaowei Song <songxiaowei@hisilicon.com>
|
||||||
|
M: Binghui Wang <wangbinghui@hisilicon.com>
|
||||||
|
L: linux-pci@vger.kernel.org
|
||||||
|
S: Maintained
|
||||||
|
F: Documentation/devicetree/bindings/pci/pcie-kirin.txt
|
||||||
|
F: drivers/pci/dwc/pcie-kirin.c
|
||||||
|
|
||||||
PCIE DRIVER FOR ROCKCHIP
|
PCIE DRIVER FOR ROCKCHIP
|
||||||
M: Shawn Lin <shawn.lin@rock-chips.com>
|
M: Shawn Lin <shawn.lin@rock-chips.com>
|
||||||
M: Wenrui Li <wenrui.li@rock-chips.com>
|
|
||||||
L: linux-pci@vger.kernel.org
|
L: linux-pci@vger.kernel.org
|
||||||
L: linux-rockchip@lists.infradead.org
|
L: linux-rockchip@lists.infradead.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
|
@ -10184,6 +10191,14 @@ S: Supported
|
||||||
F: Documentation/devicetree/bindings/pci/pci-thunder-*
|
F: Documentation/devicetree/bindings/pci/pci-thunder-*
|
||||||
F: drivers/pci/host/pci-thunder-*
|
F: drivers/pci/host/pci-thunder-*
|
||||||
|
|
||||||
|
PCIE DRIVER FOR MEDIATEK
|
||||||
|
M: Ryder Lee <ryder.lee@mediatek.com>
|
||||||
|
L: linux-pci@vger.kernel.org
|
||||||
|
L: linux-mediatek@lists.infradead.org
|
||||||
|
S: Supported
|
||||||
|
F: Documentation/devicetree/bindings/pci/mediatek*
|
||||||
|
F: drivers/pci/host/*mediatek*
|
||||||
|
|
||||||
PCMCIA SUBSYSTEM
|
PCMCIA SUBSYSTEM
|
||||||
P: Linux PCMCIA Team
|
P: Linux PCMCIA Team
|
||||||
L: linux-pcmcia@lists.infradead.org
|
L: linux-pcmcia@lists.infradead.org
|
||||||
|
|
|
@ -16,6 +16,7 @@
|
||||||
struct pci_sys_data;
|
struct pci_sys_data;
|
||||||
struct pci_ops;
|
struct pci_ops;
|
||||||
struct pci_bus;
|
struct pci_bus;
|
||||||
|
struct pci_host_bridge;
|
||||||
struct device;
|
struct device;
|
||||||
|
|
||||||
struct hw_pci {
|
struct hw_pci {
|
||||||
|
@ -25,7 +26,7 @@ struct hw_pci {
|
||||||
unsigned int io_optional:1;
|
unsigned int io_optional:1;
|
||||||
void **private_data;
|
void **private_data;
|
||||||
int (*setup)(int nr, struct pci_sys_data *);
|
int (*setup)(int nr, struct pci_sys_data *);
|
||||||
struct pci_bus *(*scan)(int nr, struct pci_sys_data *);
|
int (*scan)(int nr, struct pci_host_bridge *);
|
||||||
void (*preinit)(void);
|
void (*preinit)(void);
|
||||||
void (*postinit)(void);
|
void (*postinit)(void);
|
||||||
u8 (*swizzle)(struct pci_dev *dev, u8 *pin);
|
u8 (*swizzle)(struct pci_dev *dev, u8 *pin);
|
||||||
|
|
|
@ -458,10 +458,14 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
|
||||||
int nr, busnr;
|
int nr, busnr;
|
||||||
|
|
||||||
for (nr = busnr = 0; nr < hw->nr_controllers; nr++) {
|
for (nr = busnr = 0; nr < hw->nr_controllers; nr++) {
|
||||||
sys = kzalloc(sizeof(struct pci_sys_data), GFP_KERNEL);
|
struct pci_host_bridge *bridge;
|
||||||
if (WARN(!sys, "PCI: unable to allocate sys data!"))
|
|
||||||
|
bridge = pci_alloc_host_bridge(sizeof(struct pci_sys_data));
|
||||||
|
if (WARN(!bridge, "PCI: unable to allocate bridge!"))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
sys = pci_host_bridge_priv(bridge);
|
||||||
|
|
||||||
sys->busnr = busnr;
|
sys->busnr = busnr;
|
||||||
sys->swizzle = hw->swizzle;
|
sys->swizzle = hw->swizzle;
|
||||||
sys->map_irq = hw->map_irq;
|
sys->map_irq = hw->map_irq;
|
||||||
|
@ -473,7 +477,6 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
|
||||||
ret = hw->setup(nr, sys);
|
ret = hw->setup(nr, sys);
|
||||||
|
|
||||||
if (ret > 0) {
|
if (ret > 0) {
|
||||||
struct pci_host_bridge *host_bridge;
|
|
||||||
|
|
||||||
ret = pcibios_init_resource(nr, sys, hw->io_optional);
|
ret = pcibios_init_resource(nr, sys, hw->io_optional);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
@ -481,26 +484,37 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (hw->scan)
|
bridge->map_irq = pcibios_map_irq;
|
||||||
sys->bus = hw->scan(nr, sys);
|
bridge->swizzle_irq = pcibios_swizzle;
|
||||||
else
|
|
||||||
sys->bus = pci_scan_root_bus_msi(parent,
|
|
||||||
sys->busnr, hw->ops, sys,
|
|
||||||
&sys->resources, hw->msi_ctrl);
|
|
||||||
|
|
||||||
if (WARN(!sys->bus, "PCI: unable to scan bus!")) {
|
if (hw->scan)
|
||||||
kfree(sys);
|
ret = hw->scan(nr, bridge);
|
||||||
|
else {
|
||||||
|
list_splice_init(&sys->resources,
|
||||||
|
&bridge->windows);
|
||||||
|
bridge->dev.parent = parent;
|
||||||
|
bridge->sysdata = sys;
|
||||||
|
bridge->busnr = sys->busnr;
|
||||||
|
bridge->ops = hw->ops;
|
||||||
|
bridge->msi = hw->msi_ctrl;
|
||||||
|
bridge->align_resource =
|
||||||
|
hw->align_resource;
|
||||||
|
|
||||||
|
ret = pci_scan_root_bus_bridge(bridge);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (WARN(ret < 0, "PCI: unable to scan bus!")) {
|
||||||
|
pci_free_host_bridge(bridge);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sys->bus = bridge->bus;
|
||||||
|
|
||||||
busnr = sys->bus->busn_res.end + 1;
|
busnr = sys->bus->busn_res.end + 1;
|
||||||
|
|
||||||
list_add(&sys->node, head);
|
list_add(&sys->node, head);
|
||||||
|
|
||||||
host_bridge = pci_find_host_bridge(sys->bus);
|
|
||||||
host_bridge->align_resource = hw->align_resource;
|
|
||||||
} else {
|
} else {
|
||||||
kfree(sys);
|
pci_free_host_bridge(bridge);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -519,8 +533,6 @@ void pci_common_init_dev(struct device *parent, struct hw_pci *hw)
|
||||||
if (hw->postinit)
|
if (hw->postinit)
|
||||||
hw->postinit();
|
hw->postinit();
|
||||||
|
|
||||||
pci_fixup_irqs(pcibios_swizzle, pcibios_map_irq);
|
|
||||||
|
|
||||||
list_for_each_entry(sys, &head, node) {
|
list_for_each_entry(sys, &head, node) {
|
||||||
struct pci_bus *bus = sys->bus;
|
struct pci_bus *bus = sys->bus;
|
||||||
|
|
||||||
|
|
|
@ -152,16 +152,23 @@ static void rc_pci_fixup(struct pci_dev *dev)
|
||||||
}
|
}
|
||||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL, PCI_ANY_ID, rc_pci_fixup);
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL, PCI_ANY_ID, rc_pci_fixup);
|
||||||
|
|
||||||
static struct pci_bus __init *
|
static int __init
|
||||||
dove_pcie_scan_bus(int nr, struct pci_sys_data *sys)
|
dove_pcie_scan_bus(int nr, struct pci_host_bridge *bridge)
|
||||||
{
|
{
|
||||||
|
struct pci_sys_data *sys = pci_host_bridge_priv(bridge);
|
||||||
|
|
||||||
if (nr >= num_pcie_ports) {
|
if (nr >= num_pcie_ports) {
|
||||||
BUG();
|
BUG();
|
||||||
return NULL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
return pci_scan_root_bus(NULL, sys->busnr, &pcie_ops, sys,
|
list_splice_init(&sys->resources, &bridge->windows);
|
||||||
&sys->resources);
|
bridge->dev.parent = NULL;
|
||||||
|
bridge->sysdata = sys;
|
||||||
|
bridge->busnr = sys->busnr;
|
||||||
|
bridge->ops = &pcie_ops;
|
||||||
|
|
||||||
|
return pci_scan_root_bus_bridge(bridge);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init dove_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
|
static int __init dove_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
|
||||||
|
|
|
@ -504,10 +504,10 @@ iop13xx_pci_abort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
||||||
|
|
||||||
/* Scan an IOP13XX PCI bus. nr selects which ATU we use.
|
/* Scan an IOP13XX PCI bus. nr selects which ATU we use.
|
||||||
*/
|
*/
|
||||||
struct pci_bus *iop13xx_scan_bus(int nr, struct pci_sys_data *sys)
|
int iop13xx_scan_bus(int nr, struct pci_host_bridge *bridge)
|
||||||
{
|
{
|
||||||
int which_atu;
|
int which_atu, ret;
|
||||||
struct pci_bus *bus = NULL;
|
struct pci_sys_data *sys = pci_host_bridge_priv(bridge);
|
||||||
|
|
||||||
switch (init_atu) {
|
switch (init_atu) {
|
||||||
case IOP13XX_INIT_ATU_ATUX:
|
case IOP13XX_INIT_ATU_ATUX:
|
||||||
|
@ -525,9 +525,14 @@ struct pci_bus *iop13xx_scan_bus(int nr, struct pci_sys_data *sys)
|
||||||
|
|
||||||
if (!which_atu) {
|
if (!which_atu) {
|
||||||
BUG();
|
BUG();
|
||||||
return NULL;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
list_splice_init(&sys->resources, &bridge->windows);
|
||||||
|
bridge->dev.parent = NULL;
|
||||||
|
bridge->sysdata = sys;
|
||||||
|
bridge->busnr = sys->busnr;
|
||||||
|
|
||||||
switch (which_atu) {
|
switch (which_atu) {
|
||||||
case IOP13XX_INIT_ATU_ATUX:
|
case IOP13XX_INIT_ATU_ATUX:
|
||||||
if (time_after_eq(jiffies + msecs_to_jiffies(1000),
|
if (time_after_eq(jiffies + msecs_to_jiffies(1000),
|
||||||
|
@ -535,18 +540,22 @@ struct pci_bus *iop13xx_scan_bus(int nr, struct pci_sys_data *sys)
|
||||||
while(time_before(jiffies, atux_trhfa_timeout))
|
while(time_before(jiffies, atux_trhfa_timeout))
|
||||||
udelay(100);
|
udelay(100);
|
||||||
|
|
||||||
bus = pci_bus_atux = pci_scan_root_bus(NULL, sys->busnr,
|
bridge->ops = &iop13xx_atux_ops;
|
||||||
&iop13xx_atux_ops,
|
ret = pci_scan_root_bus_bridge(bridge);
|
||||||
sys, &sys->resources);
|
if (!ret)
|
||||||
|
pci_bus_atux = bridge->bus;
|
||||||
break;
|
break;
|
||||||
case IOP13XX_INIT_ATU_ATUE:
|
case IOP13XX_INIT_ATU_ATUE:
|
||||||
bus = pci_bus_atue = pci_scan_root_bus(NULL, sys->busnr,
|
bridge->ops = &iop13xx_atue_ops;
|
||||||
&iop13xx_atue_ops,
|
ret = pci_scan_root_bus_bridge(bridge);
|
||||||
sys, &sys->resources);
|
if (!ret)
|
||||||
|
pci_bus_atue = bridge->bus;
|
||||||
break;
|
break;
|
||||||
|
default:
|
||||||
|
ret = -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
return bus;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This function is called from iop13xx_pci_init() after assigning valid
|
/* This function is called from iop13xx_pci_init() after assigning valid
|
||||||
|
|
|
@ -11,9 +11,10 @@ extern size_t iop13xx_atue_mem_size;
|
||||||
extern size_t iop13xx_atux_mem_size;
|
extern size_t iop13xx_atux_mem_size;
|
||||||
|
|
||||||
struct pci_sys_data;
|
struct pci_sys_data;
|
||||||
|
struct pci_host_bridge;
|
||||||
struct hw_pci;
|
struct hw_pci;
|
||||||
int iop13xx_pci_setup(int nr, struct pci_sys_data *sys);
|
int iop13xx_pci_setup(int nr, struct pci_sys_data *sys);
|
||||||
struct pci_bus *iop13xx_scan_bus(int nr, struct pci_sys_data *);
|
int iop13xx_scan_bus(int nr, struct pci_host_bridge *bridge);
|
||||||
void iop13xx_atu_select(struct hw_pci *plat_pci);
|
void iop13xx_atu_select(struct hw_pci *plat_pci);
|
||||||
void iop13xx_pci_init(void);
|
void iop13xx_pci_init(void);
|
||||||
void iop13xx_map_pci_memory(void);
|
void iop13xx_map_pci_memory(void);
|
||||||
|
|
|
@ -194,16 +194,22 @@ static void rc_pci_fixup(struct pci_dev *dev)
|
||||||
}
|
}
|
||||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL, PCI_ANY_ID, rc_pci_fixup);
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL, PCI_ANY_ID, rc_pci_fixup);
|
||||||
|
|
||||||
static struct pci_bus __init *
|
static int __init mv78xx0_pcie_scan_bus(int nr, struct pci_host_bridge *bridge)
|
||||||
mv78xx0_pcie_scan_bus(int nr, struct pci_sys_data *sys)
|
|
||||||
{
|
{
|
||||||
|
struct pci_sys_data *sys = pci_host_bridge_priv(bridge);
|
||||||
|
|
||||||
if (nr >= num_pcie_ports) {
|
if (nr >= num_pcie_ports) {
|
||||||
BUG();
|
BUG();
|
||||||
return NULL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
return pci_scan_root_bus(NULL, sys->busnr, &pcie_ops, sys,
|
list_splice_init(&sys->resources, &bridge->windows);
|
||||||
&sys->resources);
|
bridge->dev.parent = NULL;
|
||||||
|
bridge->sysdata = sys;
|
||||||
|
bridge->busnr = sys->busnr;
|
||||||
|
bridge->ops = &pcie_ops;
|
||||||
|
|
||||||
|
return pci_scan_root_bus_bridge(bridge);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init mv78xx0_pcie_map_irq(const struct pci_dev *dev, u8 slot,
|
static int __init mv78xx0_pcie_map_irq(const struct pci_dev *dev, u8 slot,
|
||||||
|
|
|
@ -54,6 +54,7 @@ void orion5x_restart(enum reboot_mode, const char *);
|
||||||
* PCIe/PCI functions.
|
* PCIe/PCI functions.
|
||||||
*/
|
*/
|
||||||
struct pci_bus;
|
struct pci_bus;
|
||||||
|
struct pci_host_bridge;
|
||||||
struct pci_sys_data;
|
struct pci_sys_data;
|
||||||
struct pci_dev;
|
struct pci_dev;
|
||||||
|
|
||||||
|
@ -61,7 +62,7 @@ void orion5x_pcie_id(u32 *dev, u32 *rev);
|
||||||
void orion5x_pci_disable(void);
|
void orion5x_pci_disable(void);
|
||||||
void orion5x_pci_set_cardbus_mode(void);
|
void orion5x_pci_set_cardbus_mode(void);
|
||||||
int orion5x_pci_sys_setup(int nr, struct pci_sys_data *sys);
|
int orion5x_pci_sys_setup(int nr, struct pci_sys_data *sys);
|
||||||
struct pci_bus *orion5x_pci_sys_scan_bus(int nr, struct pci_sys_data *sys);
|
int orion5x_pci_sys_scan_bus(int nr, struct pci_host_bridge *bridge);
|
||||||
int orion5x_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
|
int orion5x_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
|
||||||
|
|
||||||
struct tag;
|
struct tag;
|
||||||
|
|
|
@ -555,18 +555,27 @@ int __init orion5x_pci_sys_setup(int nr, struct pci_sys_data *sys)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct pci_bus __init *orion5x_pci_sys_scan_bus(int nr, struct pci_sys_data *sys)
|
int __init orion5x_pci_sys_scan_bus(int nr, struct pci_host_bridge *bridge)
|
||||||
{
|
{
|
||||||
if (nr == 0)
|
struct pci_sys_data *sys = pci_host_bridge_priv(bridge);
|
||||||
return pci_scan_root_bus(NULL, sys->busnr, &pcie_ops, sys,
|
|
||||||
&sys->resources);
|
|
||||||
|
|
||||||
if (nr == 1 && !orion5x_pci_disabled)
|
list_splice_init(&sys->resources, &bridge->windows);
|
||||||
return pci_scan_root_bus(NULL, sys->busnr, &pci_ops, sys,
|
bridge->dev.parent = NULL;
|
||||||
&sys->resources);
|
bridge->sysdata = sys;
|
||||||
|
bridge->busnr = sys->busnr;
|
||||||
|
|
||||||
|
if (nr == 0) {
|
||||||
|
bridge->ops = &pcie_ops;
|
||||||
|
return pci_scan_root_bus_bridge(bridge);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (nr == 1 && !orion5x_pci_disabled) {
|
||||||
|
bridge->ops = &pci_ops;
|
||||||
|
return pci_scan_root_bus_bridge(bridge);
|
||||||
|
}
|
||||||
|
|
||||||
BUG();
|
BUG();
|
||||||
return NULL;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
int __init orion5x_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
|
int __init orion5x_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
|
||||||
|
|
|
@ -39,20 +39,18 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
|
||||||
return res->start;
|
return res->start;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_ACPI
|
||||||
/*
|
/*
|
||||||
* Try to assign the IRQ number when probing a new device
|
* Try to assign the IRQ number when probing a new device
|
||||||
*/
|
*/
|
||||||
int pcibios_alloc_irq(struct pci_dev *dev)
|
int pcibios_alloc_irq(struct pci_dev *dev)
|
||||||
{
|
{
|
||||||
if (acpi_disabled)
|
if (!acpi_disabled)
|
||||||
dev->irq = of_irq_parse_and_map_pci(dev, 0, 0);
|
acpi_pci_irq_enable(dev);
|
||||||
#ifdef CONFIG_ACPI
|
|
||||||
else
|
|
||||||
return acpi_pci_irq_enable(dev);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* raw_pci_read/write - Platform-specific PCI config space access.
|
* raw_pci_read/write - Platform-specific PCI config space access.
|
||||||
|
|
|
@ -80,7 +80,6 @@ extern u32 cs5536_pci_conf_read4(int function, int reg);
|
||||||
#define PCI_BAR3_REG 0x1c
|
#define PCI_BAR3_REG 0x1c
|
||||||
#define PCI_BAR4_REG 0x20
|
#define PCI_BAR4_REG 0x20
|
||||||
#define PCI_BAR5_REG 0x24
|
#define PCI_BAR5_REG 0x24
|
||||||
#define PCI_BAR_COUNT 6
|
|
||||||
#define PCI_BAR_RANGE_MASK 0xFFFFFFFF
|
#define PCI_BAR_RANGE_MASK 0xFFFFFFFF
|
||||||
|
|
||||||
/* CARDBUS CIS POINTER */
|
/* CARDBUS CIS POINTER */
|
||||||
|
|
|
@ -39,7 +39,6 @@ struct pci_controller {
|
||||||
unsigned long io_offset;
|
unsigned long io_offset;
|
||||||
unsigned long io_map_base;
|
unsigned long io_map_base;
|
||||||
struct resource *busn_resource;
|
struct resource *busn_resource;
|
||||||
unsigned long busn_offset;
|
|
||||||
|
|
||||||
#ifndef CONFIG_PCI_DOMAINS_GENERIC
|
#ifndef CONFIG_PCI_DOMAINS_GENERIC
|
||||||
unsigned int index;
|
unsigned int index;
|
||||||
|
|
|
@ -86,8 +86,7 @@ static void pcibios_scanbus(struct pci_controller *hose)
|
||||||
hose->mem_resource, hose->mem_offset);
|
hose->mem_resource, hose->mem_offset);
|
||||||
pci_add_resource_offset(&resources,
|
pci_add_resource_offset(&resources,
|
||||||
hose->io_resource, hose->io_offset);
|
hose->io_resource, hose->io_offset);
|
||||||
pci_add_resource_offset(&resources,
|
pci_add_resource(&resources, hose->busn_resource);
|
||||||
hose->busn_resource, hose->busn_offset);
|
|
||||||
bus = pci_scan_root_bus(NULL, next_busno, hose->pci_ops, hose,
|
bus = pci_scan_root_bus(NULL, next_busno, hose->pci_ops, hose,
|
||||||
&resources);
|
&resources);
|
||||||
hose->bus = bus;
|
hose->bus = bus;
|
||||||
|
|
|
@ -149,6 +149,12 @@
|
||||||
*/
|
*/
|
||||||
#define HV_X64_DEPRECATING_AEOI_RECOMMENDED (1 << 9)
|
#define HV_X64_DEPRECATING_AEOI_RECOMMENDED (1 << 9)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* HV_VP_SET available
|
||||||
|
*/
|
||||||
|
#define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED (1 << 11)
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Crash notification flag.
|
* Crash notification flag.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -24,7 +24,6 @@ unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 |
|
||||||
|
|
||||||
unsigned int pci_early_dump_regs;
|
unsigned int pci_early_dump_regs;
|
||||||
static int pci_bf_sort;
|
static int pci_bf_sort;
|
||||||
static int smbios_type_b1_flag;
|
|
||||||
int pci_routeirq;
|
int pci_routeirq;
|
||||||
int noioapicquirk;
|
int noioapicquirk;
|
||||||
#ifdef CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS
|
#ifdef CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS
|
||||||
|
@ -197,34 +196,18 @@ static int __init set_bf_sort(const struct dmi_system_id *d)
|
||||||
static void __init read_dmi_type_b1(const struct dmi_header *dm,
|
static void __init read_dmi_type_b1(const struct dmi_header *dm,
|
||||||
void *private_data)
|
void *private_data)
|
||||||
{
|
{
|
||||||
u8 *d = (u8 *)dm + 4;
|
u8 *data = (u8 *)dm + 4;
|
||||||
|
|
||||||
if (dm->type != 0xB1)
|
if (dm->type != 0xB1)
|
||||||
return;
|
return;
|
||||||
switch (((*(u32 *)d) >> 9) & 0x03) {
|
if ((((*(u32 *)data) >> 9) & 0x03) == 0x01)
|
||||||
case 0x00:
|
set_bf_sort((const struct dmi_system_id *)private_data);
|
||||||
printk(KERN_INFO "dmi type 0xB1 record - unknown flag\n");
|
|
||||||
break;
|
|
||||||
case 0x01: /* set pci=bfsort */
|
|
||||||
smbios_type_b1_flag = 1;
|
|
||||||
break;
|
|
||||||
case 0x02: /* do not set pci=bfsort */
|
|
||||||
smbios_type_b1_flag = 2;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init find_sort_method(const struct dmi_system_id *d)
|
static int __init find_sort_method(const struct dmi_system_id *d)
|
||||||
{
|
{
|
||||||
dmi_walk(read_dmi_type_b1, NULL);
|
dmi_walk(read_dmi_type_b1, (void *)d);
|
||||||
|
return 0;
|
||||||
if (smbios_type_b1_flag == 1) {
|
|
||||||
set_bf_sort(d);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
return -1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -571,3 +571,50 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, pci_invalid_bar);
|
||||||
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar);
|
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar);
|
||||||
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar);
|
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar);
|
||||||
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar);
|
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Device [1022:7808]
|
||||||
|
* 23. USB Wake on Connect/Disconnect with Low Speed Devices
|
||||||
|
* https://support.amd.com/TechDocs/46837.pdf
|
||||||
|
* Appendix A2
|
||||||
|
* https://support.amd.com/TechDocs/42413.pdf
|
||||||
|
*/
|
||||||
|
static void pci_fixup_amd_ehci_pme(struct pci_dev *dev)
|
||||||
|
{
|
||||||
|
dev_info(&dev->dev, "PME# does not work under D3, disabling it\n");
|
||||||
|
dev->pme_support &= ~((PCI_PM_CAP_PME_D3 | PCI_PM_CAP_PME_D3cold)
|
||||||
|
>> PCI_PM_CAP_PME_SHIFT);
|
||||||
|
}
|
||||||
|
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x7808, pci_fixup_amd_ehci_pme);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Apple MacBook Pro: Avoid [mem 0x7fa00000-0x7fbfffff]
|
||||||
|
*
|
||||||
|
* Using the [mem 0x7fa00000-0x7fbfffff] region, e.g., by assigning it to
|
||||||
|
* the 00:1c.0 Root Port, causes a conflict with [io 0x1804], which is used
|
||||||
|
* for soft poweroff and suspend-to-RAM.
|
||||||
|
*
|
||||||
|
* As far as we know, this is related to the address space, not to the Root
|
||||||
|
* Port itself. Attaching the quirk to the Root Port is a convenience, but
|
||||||
|
* it could probably also be a standalone DMI quirk.
|
||||||
|
*
|
||||||
|
* https://bugzilla.kernel.org/show_bug.cgi?id=103211
|
||||||
|
*/
|
||||||
|
static void quirk_apple_mbp_poweroff(struct pci_dev *pdev)
|
||||||
|
{
|
||||||
|
struct device *dev = &pdev->dev;
|
||||||
|
struct resource *res;
|
||||||
|
|
||||||
|
if ((!dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,4") &&
|
||||||
|
!dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,5")) ||
|
||||||
|
pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x1c, 0))
|
||||||
|
return;
|
||||||
|
|
||||||
|
res = request_mem_region(0x7fa00000, 0x200000,
|
||||||
|
"MacBook Pro poweroff workaround");
|
||||||
|
if (res)
|
||||||
|
dev_info(dev, "claimed %s %pR\n", res->name, res);
|
||||||
|
else
|
||||||
|
dev_info(dev, "can't work around MacBook Pro poweroff issue\n");
|
||||||
|
}
|
||||||
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8c10, quirk_apple_mbp_poweroff);
|
||||||
|
|
|
@ -46,7 +46,7 @@ static inline void set_bios_x(void)
|
||||||
pcibios_enabled = 1;
|
pcibios_enabled = 1;
|
||||||
set_memory_x(PAGE_OFFSET + BIOS_BEGIN, (BIOS_END - BIOS_BEGIN) >> PAGE_SHIFT);
|
set_memory_x(PAGE_OFFSET + BIOS_BEGIN, (BIOS_END - BIOS_BEGIN) >> PAGE_SHIFT);
|
||||||
if (__supported_pte_mask & _PAGE_NX)
|
if (__supported_pte_mask & _PAGE_NX)
|
||||||
printk(KERN_INFO "PCI : PCI BIOS area is rw and x. Use pci=nobios if you want it NX.\n");
|
printk(KERN_INFO "PCI: PCI BIOS area is rw and x. Use pci=nobios if you want it NX.\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -1152,16 +1152,12 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (state == VGA_SWITCHEROO_ON) {
|
if (state == VGA_SWITCHEROO_ON) {
|
||||||
unsigned d3_delay = dev->pdev->d3_delay;
|
|
||||||
|
|
||||||
pr_info("amdgpu: switched on\n");
|
pr_info("amdgpu: switched on\n");
|
||||||
/* don't suspend or resume card normally */
|
/* don't suspend or resume card normally */
|
||||||
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
||||||
|
|
||||||
amdgpu_device_resume(dev, true, true);
|
amdgpu_device_resume(dev, true, true);
|
||||||
|
|
||||||
dev->pdev->d3_delay = d3_delay;
|
|
||||||
|
|
||||||
dev->switch_power_state = DRM_SWITCH_POWER_ON;
|
dev->switch_power_state = DRM_SWITCH_POWER_ON;
|
||||||
drm_kms_helper_poll_enable(dev);
|
drm_kms_helper_poll_enable(dev);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -113,7 +113,6 @@ static inline bool radeon_is_atpx_hybrid(void) { return false; }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
|
#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
|
||||||
#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
|
|
||||||
|
|
||||||
struct radeon_px_quirk {
|
struct radeon_px_quirk {
|
||||||
u32 chip_vendor;
|
u32 chip_vendor;
|
||||||
|
@ -140,8 +139,6 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = {
|
||||||
* https://bugs.freedesktop.org/show_bug.cgi?id=101491
|
* https://bugs.freedesktop.org/show_bug.cgi?id=101491
|
||||||
*/
|
*/
|
||||||
{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
|
{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
|
||||||
/* macbook pro 8.2 */
|
|
||||||
{ PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
|
|
||||||
{ 0, 0, 0, 0, 0 },
|
{ 0, 0, 0, 0, 0 },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1245,25 +1242,17 @@ static void radeon_check_arguments(struct radeon_device *rdev)
|
||||||
static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
|
static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||||
struct radeon_device *rdev = dev->dev_private;
|
|
||||||
|
|
||||||
if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
|
if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (state == VGA_SWITCHEROO_ON) {
|
if (state == VGA_SWITCHEROO_ON) {
|
||||||
unsigned d3_delay = dev->pdev->d3_delay;
|
|
||||||
|
|
||||||
pr_info("radeon: switched on\n");
|
pr_info("radeon: switched on\n");
|
||||||
/* don't suspend or resume card normally */
|
/* don't suspend or resume card normally */
|
||||||
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
||||||
|
|
||||||
if (d3_delay < 20 && (rdev->px_quirk_flags & RADEON_PX_QUIRK_LONG_WAKEUP))
|
|
||||||
dev->pdev->d3_delay = 20;
|
|
||||||
|
|
||||||
radeon_resume_kms(dev, true, true);
|
radeon_resume_kms(dev, true, true);
|
||||||
|
|
||||||
dev->pdev->d3_delay = d3_delay;
|
|
||||||
|
|
||||||
dev->switch_power_state = DRM_SWITCH_POWER_ON;
|
dev->switch_power_state = DRM_SWITCH_POWER_ON;
|
||||||
drm_kms_helper_poll_enable(dev);
|
drm_kms_helper_poll_enable(dev);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -2348,30 +2348,19 @@ static void fm10k_io_resume(struct pci_dev *pdev)
|
||||||
netif_device_attach(netdev);
|
netif_device_attach(netdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
static void fm10k_io_reset_prepare(struct pci_dev *pdev)
|
||||||
* fm10k_io_reset_notify - called when PCI function is reset
|
{
|
||||||
* @pdev: Pointer to PCI device
|
/* warn incase we have any active VF devices */
|
||||||
*
|
if (pci_num_vf(pdev))
|
||||||
* This callback is called when the PCI function is reset such as from
|
dev_warn(&pdev->dev,
|
||||||
* /sys/class/net/<enpX>/device/reset or similar. When prepare is true, it
|
"PCIe FLR may cause issues for any active VF devices\n");
|
||||||
* means we should prepare for a function reset. If prepare is false, it means
|
fm10k_prepare_suspend(pci_get_drvdata(pdev));
|
||||||
* the function reset just occurred.
|
}
|
||||||
*/
|
|
||||||
static void fm10k_io_reset_notify(struct pci_dev *pdev, bool prepare)
|
static void fm10k_io_reset_done(struct pci_dev *pdev)
|
||||||
{
|
{
|
||||||
struct fm10k_intfc *interface = pci_get_drvdata(pdev);
|
struct fm10k_intfc *interface = pci_get_drvdata(pdev);
|
||||||
int err = 0;
|
int err = fm10k_handle_resume(interface);
|
||||||
|
|
||||||
if (prepare) {
|
|
||||||
/* warn incase we have any active VF devices */
|
|
||||||
if (pci_num_vf(pdev))
|
|
||||||
dev_warn(&pdev->dev,
|
|
||||||
"PCIe FLR may cause issues for any active VF devices\n");
|
|
||||||
|
|
||||||
fm10k_prepare_suspend(interface);
|
|
||||||
} else {
|
|
||||||
err = fm10k_handle_resume(interface);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_warn(&pdev->dev,
|
dev_warn(&pdev->dev,
|
||||||
|
@ -2384,7 +2373,8 @@ static const struct pci_error_handlers fm10k_err_handler = {
|
||||||
.error_detected = fm10k_io_error_detected,
|
.error_detected = fm10k_io_error_detected,
|
||||||
.slot_reset = fm10k_io_slot_reset,
|
.slot_reset = fm10k_io_slot_reset,
|
||||||
.resume = fm10k_io_resume,
|
.resume = fm10k_io_resume,
|
||||||
.reset_notify = fm10k_io_reset_notify,
|
.reset_prepare = fm10k_io_reset_prepare,
|
||||||
|
.reset_done = fm10k_io_reset_done,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct pci_driver fm10k_driver = {
|
static struct pci_driver fm10k_driver = {
|
||||||
|
|
|
@ -346,7 +346,36 @@ static const struct pci_device_id mwifiex_ids[] = {
|
||||||
|
|
||||||
MODULE_DEVICE_TABLE(pci, mwifiex_ids);
|
MODULE_DEVICE_TABLE(pci, mwifiex_ids);
|
||||||
|
|
||||||
static void mwifiex_pcie_reset_notify(struct pci_dev *pdev, bool prepare)
|
/*
|
||||||
|
* Cleanup all software without cleaning anything related to PCIe and HW.
|
||||||
|
*/
|
||||||
|
static void mwifiex_pcie_reset_prepare(struct pci_dev *pdev)
|
||||||
|
{
|
||||||
|
struct pcie_service_card *card = pci_get_drvdata(pdev);
|
||||||
|
struct mwifiex_adapter *adapter = card->adapter;
|
||||||
|
|
||||||
|
if (!adapter) {
|
||||||
|
dev_err(&pdev->dev, "%s: adapter structure is not valid\n",
|
||||||
|
__func__);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
mwifiex_dbg(adapter, INFO,
|
||||||
|
"%s: vendor=0x%4.04x device=0x%4.04x rev=%d Pre-FLR\n",
|
||||||
|
__func__, pdev->vendor, pdev->device, pdev->revision);
|
||||||
|
|
||||||
|
mwifiex_shutdown_sw(adapter);
|
||||||
|
clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
|
||||||
|
clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
|
||||||
|
mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Kernel stores and restores PCIe function context before and after performing
|
||||||
|
* FLR respectively. Reconfigure the software and firmware including firmware
|
||||||
|
* redownload.
|
||||||
|
*/
|
||||||
|
static void mwifiex_pcie_reset_done(struct pci_dev *pdev)
|
||||||
{
|
{
|
||||||
struct pcie_service_card *card = pci_get_drvdata(pdev);
|
struct pcie_service_card *card = pci_get_drvdata(pdev);
|
||||||
struct mwifiex_adapter *adapter = card->adapter;
|
struct mwifiex_adapter *adapter = card->adapter;
|
||||||
|
@ -359,35 +388,19 @@ static void mwifiex_pcie_reset_notify(struct pci_dev *pdev, bool prepare)
|
||||||
}
|
}
|
||||||
|
|
||||||
mwifiex_dbg(adapter, INFO,
|
mwifiex_dbg(adapter, INFO,
|
||||||
"%s: vendor=0x%4.04x device=0x%4.04x rev=%d %s\n",
|
"%s: vendor=0x%4.04x device=0x%4.04x rev=%d Post-FLR\n",
|
||||||
__func__, pdev->vendor, pdev->device,
|
__func__, pdev->vendor, pdev->device, pdev->revision);
|
||||||
pdev->revision,
|
|
||||||
prepare ? "Pre-FLR" : "Post-FLR");
|
|
||||||
|
|
||||||
if (prepare) {
|
ret = mwifiex_reinit_sw(adapter);
|
||||||
/* Kernel would be performing FLR after this notification.
|
if (ret)
|
||||||
* Cleanup all software without cleaning anything related to
|
dev_err(&pdev->dev, "reinit failed: %d\n", ret);
|
||||||
* PCIe and HW.
|
else
|
||||||
*/
|
mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
|
||||||
mwifiex_shutdown_sw(adapter);
|
|
||||||
clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
|
|
||||||
clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
|
|
||||||
} else {
|
|
||||||
/* Kernel stores and restores PCIe function context before and
|
|
||||||
* after performing FLR respectively. Reconfigure the software
|
|
||||||
* and firmware including firmware redownload
|
|
||||||
*/
|
|
||||||
ret = mwifiex_reinit_sw(adapter);
|
|
||||||
if (ret) {
|
|
||||||
dev_err(&pdev->dev, "reinit failed: %d\n", ret);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct pci_error_handlers mwifiex_pcie_err_handler[] = {
|
static const struct pci_error_handlers mwifiex_pcie_err_handler = {
|
||||||
{ .reset_notify = mwifiex_pcie_reset_notify, },
|
.reset_prepare = mwifiex_pcie_reset_prepare,
|
||||||
|
.reset_done = mwifiex_pcie_reset_done,
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_PM_SLEEP
|
#ifdef CONFIG_PM_SLEEP
|
||||||
|
@ -408,7 +421,7 @@ static struct pci_driver __refdata mwifiex_pcie = {
|
||||||
},
|
},
|
||||||
#endif
|
#endif
|
||||||
.shutdown = mwifiex_pcie_shutdown,
|
.shutdown = mwifiex_pcie_shutdown,
|
||||||
.err_handler = mwifiex_pcie_err_handler,
|
.err_handler = &mwifiex_pcie_err_handler,
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -2303,14 +2303,16 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
|
static void nvme_reset_prepare(struct pci_dev *pdev)
|
||||||
{
|
{
|
||||||
struct nvme_dev *dev = pci_get_drvdata(pdev);
|
struct nvme_dev *dev = pci_get_drvdata(pdev);
|
||||||
|
nvme_dev_disable(dev, false);
|
||||||
|
}
|
||||||
|
|
||||||
if (prepare)
|
static void nvme_reset_done(struct pci_dev *pdev)
|
||||||
nvme_dev_disable(dev, false);
|
{
|
||||||
else
|
struct nvme_dev *dev = pci_get_drvdata(pdev);
|
||||||
nvme_reset_ctrl(&dev->ctrl);
|
nvme_reset_ctrl(&dev->ctrl);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nvme_shutdown(struct pci_dev *pdev)
|
static void nvme_shutdown(struct pci_dev *pdev)
|
||||||
|
@ -2434,7 +2436,8 @@ static const struct pci_error_handlers nvme_err_handler = {
|
||||||
.error_detected = nvme_error_detected,
|
.error_detected = nvme_error_detected,
|
||||||
.slot_reset = nvme_slot_reset,
|
.slot_reset = nvme_slot_reset,
|
||||||
.resume = nvme_error_resume,
|
.resume = nvme_error_resume,
|
||||||
.reset_notify = nvme_reset_notify,
|
.reset_prepare = nvme_reset_prepare,
|
||||||
|
.reset_done = nvme_reset_done,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct pci_device_id nvme_id_table[] = {
|
static const struct pci_device_id nvme_id_table[] = {
|
||||||
|
|
|
@ -113,7 +113,8 @@ EXPORT_SYMBOL_GPL(of_irq_parse_pci);
|
||||||
* @pin: PCI irq pin number; passed when used as map_irq callback. Unused
|
* @pin: PCI irq pin number; passed when used as map_irq callback. Unused
|
||||||
*
|
*
|
||||||
* @slot and @pin are unused, but included in the function so that this
|
* @slot and @pin are unused, but included in the function so that this
|
||||||
* function can be used directly as the map_irq callback to pci_fixup_irqs().
|
* function can be used directly as the map_irq callback to
|
||||||
|
* pci_assign_irq() and struct pci_host_bridge.map_irq pointer
|
||||||
*/
|
*/
|
||||||
int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
|
int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
|
||||||
{
|
{
|
||||||
|
|
|
@ -4,7 +4,8 @@
|
||||||
|
|
||||||
obj-y += access.o bus.o probe.o host-bridge.o remove.o pci.o \
|
obj-y += access.o bus.o probe.o host-bridge.o remove.o pci.o \
|
||||||
pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \
|
pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \
|
||||||
irq.o vpd.o setup-bus.o vc.o mmap.o
|
irq.o vpd.o setup-bus.o vc.o mmap.o setup-irq.o
|
||||||
|
|
||||||
obj-$(CONFIG_PROC_FS) += proc.o
|
obj-$(CONFIG_PROC_FS) += proc.o
|
||||||
obj-$(CONFIG_SYSFS) += slot.o
|
obj-$(CONFIG_SYSFS) += slot.o
|
||||||
|
|
||||||
|
@ -28,20 +29,6 @@ obj-$(CONFIG_HT_IRQ) += htirq.o
|
||||||
obj-$(CONFIG_PCI_ATS) += ats.o
|
obj-$(CONFIG_PCI_ATS) += ats.o
|
||||||
obj-$(CONFIG_PCI_IOV) += iov.o
|
obj-$(CONFIG_PCI_IOV) += iov.o
|
||||||
|
|
||||||
#
|
|
||||||
# Some architectures use the generic PCI setup functions
|
|
||||||
#
|
|
||||||
obj-$(CONFIG_ALPHA) += setup-irq.o
|
|
||||||
obj-$(CONFIG_ARC) += setup-irq.o
|
|
||||||
obj-$(CONFIG_ARM) += setup-irq.o
|
|
||||||
obj-$(CONFIG_ARM64) += setup-irq.o
|
|
||||||
obj-$(CONFIG_UNICORE32) += setup-irq.o
|
|
||||||
obj-$(CONFIG_SUPERH) += setup-irq.o
|
|
||||||
obj-$(CONFIG_MIPS) += setup-irq.o
|
|
||||||
obj-$(CONFIG_TILE) += setup-irq.o
|
|
||||||
obj-$(CONFIG_SPARC_LEON) += setup-irq.o
|
|
||||||
obj-$(CONFIG_M68K) += setup-irq.o
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# ACPI Related PCI FW Functions
|
# ACPI Related PCI FW Functions
|
||||||
# ACPI _DSM provided firmware instance and string name
|
# ACPI _DSM provided firmware instance and string name
|
||||||
|
|
|
@ -153,23 +153,27 @@ int pci_enable_pri(struct pci_dev *pdev, u32 reqs)
|
||||||
u32 max_requests;
|
u32 max_requests;
|
||||||
int pos;
|
int pos;
|
||||||
|
|
||||||
|
if (WARN_ON(pdev->pri_enabled))
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
|
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
|
||||||
if (!pos)
|
if (!pos)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
|
|
||||||
pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
|
pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
|
||||||
if ((control & PCI_PRI_CTRL_ENABLE) ||
|
if (!(status & PCI_PRI_STATUS_STOPPED))
|
||||||
!(status & PCI_PRI_STATUS_STOPPED))
|
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
pci_read_config_dword(pdev, pos + PCI_PRI_MAX_REQ, &max_requests);
|
pci_read_config_dword(pdev, pos + PCI_PRI_MAX_REQ, &max_requests);
|
||||||
reqs = min(max_requests, reqs);
|
reqs = min(max_requests, reqs);
|
||||||
|
pdev->pri_reqs_alloc = reqs;
|
||||||
pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ, reqs);
|
pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ, reqs);
|
||||||
|
|
||||||
control |= PCI_PRI_CTRL_ENABLE;
|
control = PCI_PRI_CTRL_ENABLE;
|
||||||
pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
|
pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
|
||||||
|
|
||||||
|
pdev->pri_enabled = 1;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(pci_enable_pri);
|
EXPORT_SYMBOL_GPL(pci_enable_pri);
|
||||||
|
@ -185,6 +189,9 @@ void pci_disable_pri(struct pci_dev *pdev)
|
||||||
u16 control;
|
u16 control;
|
||||||
int pos;
|
int pos;
|
||||||
|
|
||||||
|
if (WARN_ON(!pdev->pri_enabled))
|
||||||
|
return;
|
||||||
|
|
||||||
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
|
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
|
||||||
if (!pos)
|
if (!pos)
|
||||||
return;
|
return;
|
||||||
|
@ -192,9 +199,33 @@ void pci_disable_pri(struct pci_dev *pdev)
|
||||||
pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
|
pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
|
||||||
control &= ~PCI_PRI_CTRL_ENABLE;
|
control &= ~PCI_PRI_CTRL_ENABLE;
|
||||||
pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
|
pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
|
||||||
|
|
||||||
|
pdev->pri_enabled = 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(pci_disable_pri);
|
EXPORT_SYMBOL_GPL(pci_disable_pri);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* pci_restore_pri_state - Restore PRI
|
||||||
|
* @pdev: PCI device structure
|
||||||
|
*/
|
||||||
|
void pci_restore_pri_state(struct pci_dev *pdev)
|
||||||
|
{
|
||||||
|
u16 control = PCI_PRI_CTRL_ENABLE;
|
||||||
|
u32 reqs = pdev->pri_reqs_alloc;
|
||||||
|
int pos;
|
||||||
|
|
||||||
|
if (!pdev->pri_enabled)
|
||||||
|
return;
|
||||||
|
|
||||||
|
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
|
||||||
|
if (!pos)
|
||||||
|
return;
|
||||||
|
|
||||||
|
pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ, reqs);
|
||||||
|
pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(pci_restore_pri_state);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* pci_reset_pri - Resets device's PRI state
|
* pci_reset_pri - Resets device's PRI state
|
||||||
* @pdev: PCI device structure
|
* @pdev: PCI device structure
|
||||||
|
@ -207,16 +238,14 @@ int pci_reset_pri(struct pci_dev *pdev)
|
||||||
u16 control;
|
u16 control;
|
||||||
int pos;
|
int pos;
|
||||||
|
|
||||||
|
if (WARN_ON(pdev->pri_enabled))
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
|
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
|
||||||
if (!pos)
|
if (!pos)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
|
control = PCI_PRI_CTRL_RESET;
|
||||||
if (control & PCI_PRI_CTRL_ENABLE)
|
|
||||||
return -EBUSY;
|
|
||||||
|
|
||||||
control |= PCI_PRI_CTRL_RESET;
|
|
||||||
|
|
||||||
pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
|
pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -239,16 +268,14 @@ int pci_enable_pasid(struct pci_dev *pdev, int features)
|
||||||
u16 control, supported;
|
u16 control, supported;
|
||||||
int pos;
|
int pos;
|
||||||
|
|
||||||
|
if (WARN_ON(pdev->pasid_enabled))
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
|
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
|
||||||
if (!pos)
|
if (!pos)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
pci_read_config_word(pdev, pos + PCI_PASID_CTRL, &control);
|
|
||||||
pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported);
|
pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported);
|
||||||
|
|
||||||
if (control & PCI_PASID_CTRL_ENABLE)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
supported &= PCI_PASID_CAP_EXEC | PCI_PASID_CAP_PRIV;
|
supported &= PCI_PASID_CAP_EXEC | PCI_PASID_CAP_PRIV;
|
||||||
|
|
||||||
/* User wants to enable anything unsupported? */
|
/* User wants to enable anything unsupported? */
|
||||||
|
@ -256,9 +283,12 @@ int pci_enable_pasid(struct pci_dev *pdev, int features)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
control = PCI_PASID_CTRL_ENABLE | features;
|
control = PCI_PASID_CTRL_ENABLE | features;
|
||||||
|
pdev->pasid_features = features;
|
||||||
|
|
||||||
pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
|
pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
|
||||||
|
|
||||||
|
pdev->pasid_enabled = 1;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(pci_enable_pasid);
|
EXPORT_SYMBOL_GPL(pci_enable_pasid);
|
||||||
|
@ -266,21 +296,46 @@ EXPORT_SYMBOL_GPL(pci_enable_pasid);
|
||||||
/**
|
/**
|
||||||
* pci_disable_pasid - Disable the PASID capability
|
* pci_disable_pasid - Disable the PASID capability
|
||||||
* @pdev: PCI device structure
|
* @pdev: PCI device structure
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
void pci_disable_pasid(struct pci_dev *pdev)
|
void pci_disable_pasid(struct pci_dev *pdev)
|
||||||
{
|
{
|
||||||
u16 control = 0;
|
u16 control = 0;
|
||||||
int pos;
|
int pos;
|
||||||
|
|
||||||
|
if (WARN_ON(!pdev->pasid_enabled))
|
||||||
|
return;
|
||||||
|
|
||||||
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
|
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
|
||||||
if (!pos)
|
if (!pos)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
|
pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
|
||||||
|
|
||||||
|
pdev->pasid_enabled = 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(pci_disable_pasid);
|
EXPORT_SYMBOL_GPL(pci_disable_pasid);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* pci_restore_pasid_state - Restore PASID capabilities
|
||||||
|
* @pdev: PCI device structure
|
||||||
|
*/
|
||||||
|
void pci_restore_pasid_state(struct pci_dev *pdev)
|
||||||
|
{
|
||||||
|
u16 control;
|
||||||
|
int pos;
|
||||||
|
|
||||||
|
if (!pdev->pasid_enabled)
|
||||||
|
return;
|
||||||
|
|
||||||
|
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
|
||||||
|
if (!pos)
|
||||||
|
return;
|
||||||
|
|
||||||
|
control = PCI_PASID_CTRL_ENABLE | pdev->pasid_features;
|
||||||
|
pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(pci_restore_pasid_state);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* pci_pasid_features - Check which PASID features are supported
|
* pci_pasid_features - Check which PASID features are supported
|
||||||
* @pdev: PCI device structure
|
* @pdev: PCI device structure
|
||||||
|
|
|
@ -16,6 +16,7 @@ config PCIE_DW_EP
|
||||||
|
|
||||||
config PCI_DRA7XX
|
config PCI_DRA7XX
|
||||||
bool "TI DRA7xx PCIe controller"
|
bool "TI DRA7xx PCIe controller"
|
||||||
|
depends on SOC_DRA7XX || COMPILE_TEST
|
||||||
depends on (PCI && PCI_MSI_IRQ_DOMAIN) || PCI_ENDPOINT
|
depends on (PCI && PCI_MSI_IRQ_DOMAIN) || PCI_ENDPOINT
|
||||||
depends on OF && HAS_IOMEM && TI_PIPE3
|
depends on OF && HAS_IOMEM && TI_PIPE3
|
||||||
help
|
help
|
||||||
|
@ -158,4 +159,14 @@ config PCIE_ARTPEC6
|
||||||
Say Y here to enable PCIe controller support on Axis ARTPEC-6
|
Say Y here to enable PCIe controller support on Axis ARTPEC-6
|
||||||
SoCs. This PCIe controller uses the DesignWare core.
|
SoCs. This PCIe controller uses the DesignWare core.
|
||||||
|
|
||||||
|
config PCIE_KIRIN
|
||||||
|
depends on OF && ARM64
|
||||||
|
bool "HiSilicon Kirin series SoCs PCIe controllers"
|
||||||
|
depends on PCI
|
||||||
|
select PCIEPORTBUS
|
||||||
|
select PCIE_DW_HOST
|
||||||
|
help
|
||||||
|
Say Y here if you want PCIe controller support
|
||||||
|
on HiSilicon Kirin series SoCs.
|
||||||
|
|
||||||
endmenu
|
endmenu
|
||||||
|
|
|
@ -13,6 +13,7 @@ obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
|
||||||
obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
|
obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
|
||||||
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
|
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
|
||||||
obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
|
obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
|
||||||
|
obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
|
||||||
|
|
||||||
# The following drivers are for devices that use the generic ACPI
|
# The following drivers are for devices that use the generic ACPI
|
||||||
# pci_root.c driver but don't support standard ECAM config access.
|
# pci_root.c driver but don't support standard ECAM config access.
|
||||||
|
|
|
@ -174,7 +174,7 @@ static int dra7xx_pcie_establish_link(struct dw_pcie *pci)
|
||||||
static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx)
|
static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx)
|
||||||
{
|
{
|
||||||
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI,
|
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI,
|
||||||
~LEG_EP_INTERRUPTS & ~MSI);
|
LEG_EP_INTERRUPTS | MSI);
|
||||||
|
|
||||||
dra7xx_pcie_writel(dra7xx,
|
dra7xx_pcie_writel(dra7xx,
|
||||||
PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
|
PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
|
||||||
|
@ -184,7 +184,7 @@ static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx)
|
||||||
static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx)
|
static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx)
|
||||||
{
|
{
|
||||||
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
|
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
|
||||||
~INTERRUPTS);
|
INTERRUPTS);
|
||||||
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN,
|
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN,
|
||||||
INTERRUPTS);
|
INTERRUPTS);
|
||||||
}
|
}
|
||||||
|
@ -208,7 +208,7 @@ static void dra7xx_pcie_host_init(struct pcie_port *pp)
|
||||||
dra7xx_pcie_enable_interrupts(dra7xx);
|
dra7xx_pcie_enable_interrupts(dra7xx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
|
static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
|
||||||
.host_init = dra7xx_pcie_host_init,
|
.host_init = dra7xx_pcie_host_init,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -590,7 +590,7 @@ static void exynos_pcie_host_init(struct pcie_port *pp)
|
||||||
exynos_pcie_enable_interrupts(ep);
|
exynos_pcie_enable_interrupts(ep);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct dw_pcie_host_ops exynos_pcie_host_ops = {
|
static const struct dw_pcie_host_ops exynos_pcie_host_ops = {
|
||||||
.rd_own_conf = exynos_pcie_rd_own_conf,
|
.rd_own_conf = exynos_pcie_rd_own_conf,
|
||||||
.wr_own_conf = exynos_pcie_wr_own_conf,
|
.wr_own_conf = exynos_pcie_wr_own_conf,
|
||||||
.host_init = exynos_pcie_host_init,
|
.host_init = exynos_pcie_host_init,
|
||||||
|
|
|
@ -24,6 +24,7 @@
|
||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
#include <linux/platform_device.h>
|
#include <linux/platform_device.h>
|
||||||
#include <linux/regmap.h>
|
#include <linux/regmap.h>
|
||||||
|
#include <linux/regulator/consumer.h>
|
||||||
#include <linux/resource.h>
|
#include <linux/resource.h>
|
||||||
#include <linux/signal.h>
|
#include <linux/signal.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
@ -59,6 +60,7 @@ struct imx6_pcie {
|
||||||
u32 tx_swing_full;
|
u32 tx_swing_full;
|
||||||
u32 tx_swing_low;
|
u32 tx_swing_low;
|
||||||
int link_gen;
|
int link_gen;
|
||||||
|
struct regulator *vpcie;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
|
/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
|
||||||
|
@ -284,6 +286,8 @@ static int imx6q_pcie_abort_handler(unsigned long addr,
|
||||||
|
|
||||||
static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
|
static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
|
||||||
{
|
{
|
||||||
|
struct device *dev = imx6_pcie->pci->dev;
|
||||||
|
|
||||||
switch (imx6_pcie->variant) {
|
switch (imx6_pcie->variant) {
|
||||||
case IMX7D:
|
case IMX7D:
|
||||||
reset_control_assert(imx6_pcie->pciephy_reset);
|
reset_control_assert(imx6_pcie->pciephy_reset);
|
||||||
|
@ -310,6 +314,14 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
|
||||||
IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
|
IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
|
||||||
|
int ret = regulator_disable(imx6_pcie->vpcie);
|
||||||
|
|
||||||
|
if (ret)
|
||||||
|
dev_err(dev, "failed to disable vpcie regulator: %d\n",
|
||||||
|
ret);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
|
static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
|
||||||
|
@ -376,10 +388,19 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
|
||||||
struct device *dev = pci->dev;
|
struct device *dev = pci->dev;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) {
|
||||||
|
ret = regulator_enable(imx6_pcie->vpcie);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(dev, "failed to enable vpcie regulator: %d\n",
|
||||||
|
ret);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
ret = clk_prepare_enable(imx6_pcie->pcie_phy);
|
ret = clk_prepare_enable(imx6_pcie->pcie_phy);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(dev, "unable to enable pcie_phy clock\n");
|
dev_err(dev, "unable to enable pcie_phy clock\n");
|
||||||
return;
|
goto err_pcie_phy;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = clk_prepare_enable(imx6_pcie->pcie_bus);
|
ret = clk_prepare_enable(imx6_pcie->pcie_bus);
|
||||||
|
@ -439,6 +460,13 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
|
||||||
clk_disable_unprepare(imx6_pcie->pcie_bus);
|
clk_disable_unprepare(imx6_pcie->pcie_bus);
|
||||||
err_pcie_bus:
|
err_pcie_bus:
|
||||||
clk_disable_unprepare(imx6_pcie->pcie_phy);
|
clk_disable_unprepare(imx6_pcie->pcie_phy);
|
||||||
|
err_pcie_phy:
|
||||||
|
if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
|
||||||
|
ret = regulator_disable(imx6_pcie->vpcie);
|
||||||
|
if (ret)
|
||||||
|
dev_err(dev, "failed to disable vpcie regulator: %d\n",
|
||||||
|
ret);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
|
static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
|
||||||
|
@ -629,7 +657,7 @@ static int imx6_pcie_link_up(struct dw_pcie *pci)
|
||||||
PCIE_PHY_DEBUG_R1_XMLH_LINK_UP;
|
PCIE_PHY_DEBUG_R1_XMLH_LINK_UP;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct dw_pcie_host_ops imx6_pcie_host_ops = {
|
static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
|
||||||
.host_init = imx6_pcie_host_init,
|
.host_init = imx6_pcie_host_init,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -802,6 +830,13 @@ static int imx6_pcie_probe(struct platform_device *pdev)
|
||||||
if (ret)
|
if (ret)
|
||||||
imx6_pcie->link_gen = 1;
|
imx6_pcie->link_gen = 1;
|
||||||
|
|
||||||
|
imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
|
||||||
|
if (IS_ERR(imx6_pcie->vpcie)) {
|
||||||
|
if (PTR_ERR(imx6_pcie->vpcie) == -EPROBE_DEFER)
|
||||||
|
return -EPROBE_DEFER;
|
||||||
|
imx6_pcie->vpcie = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
platform_set_drvdata(pdev, imx6_pcie);
|
platform_set_drvdata(pdev, imx6_pcie);
|
||||||
|
|
||||||
ret = imx6_add_pcie_port(imx6_pcie, pdev);
|
ret = imx6_add_pcie_port(imx6_pcie, pdev);
|
||||||
|
|
|
@ -291,7 +291,7 @@ static void __init ks_pcie_host_init(struct pcie_port *pp)
|
||||||
"Asynchronous external abort");
|
"Asynchronous external abort");
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct dw_pcie_host_ops keystone_pcie_host_ops = {
|
static const struct dw_pcie_host_ops keystone_pcie_host_ops = {
|
||||||
.rd_other_conf = ks_dw_pcie_rd_other_conf,
|
.rd_other_conf = ks_dw_pcie_rd_other_conf,
|
||||||
.wr_other_conf = ks_dw_pcie_wr_other_conf,
|
.wr_other_conf = ks_dw_pcie_wr_other_conf,
|
||||||
.host_init = ks_pcie_host_init,
|
.host_init = ks_pcie_host_init,
|
||||||
|
|
|
@ -39,7 +39,7 @@ struct ls_pcie_drvdata {
|
||||||
u32 lut_offset;
|
u32 lut_offset;
|
||||||
u32 ltssm_shift;
|
u32 ltssm_shift;
|
||||||
u32 lut_dbg;
|
u32 lut_dbg;
|
||||||
struct dw_pcie_host_ops *ops;
|
const struct dw_pcie_host_ops *ops;
|
||||||
const struct dw_pcie_ops *dw_pcie_ops;
|
const struct dw_pcie_ops *dw_pcie_ops;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -185,12 +185,12 @@ static int ls_pcie_msi_host_init(struct pcie_port *pp,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct dw_pcie_host_ops ls1021_pcie_host_ops = {
|
static const struct dw_pcie_host_ops ls1021_pcie_host_ops = {
|
||||||
.host_init = ls1021_pcie_host_init,
|
.host_init = ls1021_pcie_host_init,
|
||||||
.msi_host_init = ls_pcie_msi_host_init,
|
.msi_host_init = ls_pcie_msi_host_init,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct dw_pcie_host_ops ls_pcie_host_ops = {
|
static const struct dw_pcie_host_ops ls_pcie_host_ops = {
|
||||||
.host_init = ls_pcie_host_init,
|
.host_init = ls_pcie_host_init,
|
||||||
.msi_host_init = ls_pcie_msi_host_init,
|
.msi_host_init = ls_pcie_msi_host_init,
|
||||||
};
|
};
|
||||||
|
|
|
@ -160,7 +160,7 @@ static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg)
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct dw_pcie_host_ops armada8k_pcie_host_ops = {
|
static const struct dw_pcie_host_ops armada8k_pcie_host_ops = {
|
||||||
.host_init = armada8k_pcie_host_init,
|
.host_init = armada8k_pcie_host_init,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -184,7 +184,7 @@ static void artpec6_pcie_host_init(struct pcie_port *pp)
|
||||||
artpec6_pcie_enable_interrupts(artpec6_pcie);
|
artpec6_pcie_enable_interrupts(artpec6_pcie);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct dw_pcie_host_ops artpec6_pcie_host_ops = {
|
static const struct dw_pcie_host_ops artpec6_pcie_host_ops = {
|
||||||
.host_init = artpec6_pcie_host_init,
|
.host_init = artpec6_pcie_host_init,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -280,9 +280,9 @@ int dw_pcie_host_init(struct pcie_port *pp)
|
||||||
struct device_node *np = dev->of_node;
|
struct device_node *np = dev->of_node;
|
||||||
struct platform_device *pdev = to_platform_device(dev);
|
struct platform_device *pdev = to_platform_device(dev);
|
||||||
struct pci_bus *bus, *child;
|
struct pci_bus *bus, *child;
|
||||||
|
struct pci_host_bridge *bridge;
|
||||||
struct resource *cfg_res;
|
struct resource *cfg_res;
|
||||||
int i, ret;
|
int i, ret;
|
||||||
LIST_HEAD(res);
|
|
||||||
struct resource_entry *win, *tmp;
|
struct resource_entry *win, *tmp;
|
||||||
|
|
||||||
cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
|
cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
|
||||||
|
@ -295,16 +295,21 @@ int dw_pcie_host_init(struct pcie_port *pp)
|
||||||
dev_err(dev, "missing *config* reg space\n");
|
dev_err(dev, "missing *config* reg space\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &pp->io_base);
|
bridge = pci_alloc_host_bridge(0);
|
||||||
|
if (!bridge)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
ret = of_pci_get_host_bridge_resources(np, 0, 0xff,
|
||||||
|
&bridge->windows, &pp->io_base);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = devm_request_pci_bus_resources(dev, &res);
|
ret = devm_request_pci_bus_resources(dev, &bridge->windows);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
/* Get the I/O and memory ranges from DT */
|
/* Get the I/O and memory ranges from DT */
|
||||||
resource_list_for_each_entry_safe(win, tmp, &res) {
|
resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
|
||||||
switch (resource_type(win->res)) {
|
switch (resource_type(win->res)) {
|
||||||
case IORESOURCE_IO:
|
case IORESOURCE_IO:
|
||||||
ret = pci_remap_iospace(win->res, pp->io_base);
|
ret = pci_remap_iospace(win->res, pp->io_base);
|
||||||
|
@ -400,27 +405,27 @@ int dw_pcie_host_init(struct pcie_port *pp)
|
||||||
pp->ops->host_init(pp);
|
pp->ops->host_init(pp);
|
||||||
|
|
||||||
pp->root_bus_nr = pp->busn->start;
|
pp->root_bus_nr = pp->busn->start;
|
||||||
|
|
||||||
|
bridge->dev.parent = dev;
|
||||||
|
bridge->sysdata = pp;
|
||||||
|
bridge->busnr = pp->root_bus_nr;
|
||||||
|
bridge->ops = &dw_pcie_ops;
|
||||||
|
bridge->map_irq = of_irq_parse_and_map_pci;
|
||||||
|
bridge->swizzle_irq = pci_common_swizzle;
|
||||||
if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
||||||
bus = pci_scan_root_bus_msi(dev, pp->root_bus_nr,
|
bridge->msi = &dw_pcie_msi_chip;
|
||||||
&dw_pcie_ops, pp, &res,
|
|
||||||
&dw_pcie_msi_chip);
|
|
||||||
dw_pcie_msi_chip.dev = dev;
|
dw_pcie_msi_chip.dev = dev;
|
||||||
} else
|
|
||||||
bus = pci_scan_root_bus(dev, pp->root_bus_nr, &dw_pcie_ops,
|
|
||||||
pp, &res);
|
|
||||||
if (!bus) {
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto error;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ret = pci_scan_root_bus_bridge(bridge);
|
||||||
|
if (ret)
|
||||||
|
goto error;
|
||||||
|
|
||||||
|
bus = bridge->bus;
|
||||||
|
|
||||||
if (pp->ops->scan_bus)
|
if (pp->ops->scan_bus)
|
||||||
pp->ops->scan_bus(pp);
|
pp->ops->scan_bus(pp);
|
||||||
|
|
||||||
#ifdef CONFIG_ARM
|
|
||||||
/* support old dtbs that incorrectly describe IRQs */
|
|
||||||
pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
pci_bus_size_bridges(bus);
|
pci_bus_size_bridges(bus);
|
||||||
pci_bus_assign_resources(bus);
|
pci_bus_assign_resources(bus);
|
||||||
|
|
||||||
|
@ -431,7 +436,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
error:
|
error:
|
||||||
pci_free_resource_list(&res);
|
pci_free_host_bridge(bridge);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -46,7 +46,7 @@ static void dw_plat_pcie_host_init(struct pcie_port *pp)
|
||||||
dw_pcie_msi_init(pp);
|
dw_pcie_msi_init(pp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct dw_pcie_host_ops dw_plat_pcie_host_ops = {
|
static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = {
|
||||||
.host_init = dw_plat_pcie_host_init,
|
.host_init = dw_plat_pcie_host_init,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -67,7 +67,8 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp,
|
||||||
|
|
||||||
ret = devm_request_irq(dev, pp->msi_irq,
|
ret = devm_request_irq(dev, pp->msi_irq,
|
||||||
dw_plat_pcie_msi_irq_handler,
|
dw_plat_pcie_msi_irq_handler,
|
||||||
IRQF_SHARED, "dw-plat-pcie-msi", pp);
|
IRQF_SHARED | IRQF_NO_THREAD,
|
||||||
|
"dw-plat-pcie-msi", pp);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(dev, "failed to request MSI IRQ\n");
|
dev_err(dev, "failed to request MSI IRQ\n");
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -162,7 +162,7 @@ struct pcie_port {
|
||||||
struct resource *mem;
|
struct resource *mem;
|
||||||
struct resource *busn;
|
struct resource *busn;
|
||||||
int irq;
|
int irq;
|
||||||
struct dw_pcie_host_ops *ops;
|
const struct dw_pcie_host_ops *ops;
|
||||||
int msi_irq;
|
int msi_irq;
|
||||||
struct irq_domain *irq_domain;
|
struct irq_domain *irq_domain;
|
||||||
unsigned long msi_data;
|
unsigned long msi_data;
|
||||||
|
|
|
@ -0,0 +1,517 @@
|
||||||
|
/*
|
||||||
|
* PCIe host controller driver for Kirin Phone SoCs
|
||||||
|
*
|
||||||
|
* Copyright (C) 2017 Hilisicon Electronics Co., Ltd.
|
||||||
|
* http://www.huawei.com
|
||||||
|
*
|
||||||
|
* Author: Xiaowei Song <songxiaowei@huawei.com>
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License version 2 as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <asm/compiler.h>
|
||||||
|
#include <linux/compiler.h>
|
||||||
|
#include <linux/clk.h>
|
||||||
|
#include <linux/delay.h>
|
||||||
|
#include <linux/err.h>
|
||||||
|
#include <linux/gpio.h>
|
||||||
|
#include <linux/interrupt.h>
|
||||||
|
#include <linux/mfd/syscon.h>
|
||||||
|
#include <linux/of_address.h>
|
||||||
|
#include <linux/of_gpio.h>
|
||||||
|
#include <linux/of_pci.h>
|
||||||
|
#include <linux/pci.h>
|
||||||
|
#include <linux/pci_regs.h>
|
||||||
|
#include <linux/platform_device.h>
|
||||||
|
#include <linux/regmap.h>
|
||||||
|
#include <linux/resource.h>
|
||||||
|
#include <linux/types.h>
|
||||||
|
#include "pcie-designware.h"
|
||||||
|
|
||||||
|
#define to_kirin_pcie(x) dev_get_drvdata((x)->dev)
|
||||||
|
|
||||||
|
#define REF_CLK_FREQ 100000000
|
||||||
|
|
||||||
|
/* PCIe ELBI registers */
|
||||||
|
#define SOC_PCIECTRL_CTRL0_ADDR 0x000
|
||||||
|
#define SOC_PCIECTRL_CTRL1_ADDR 0x004
|
||||||
|
#define SOC_PCIEPHY_CTRL2_ADDR 0x008
|
||||||
|
#define SOC_PCIEPHY_CTRL3_ADDR 0x00c
|
||||||
|
#define PCIE_ELBI_SLV_DBI_ENABLE (0x1 << 21)
|
||||||
|
|
||||||
|
/* info located in APB */
|
||||||
|
#define PCIE_APP_LTSSM_ENABLE 0x01c
|
||||||
|
#define PCIE_APB_PHY_CTRL0 0x0
|
||||||
|
#define PCIE_APB_PHY_CTRL1 0x4
|
||||||
|
#define PCIE_APB_PHY_STATUS0 0x400
|
||||||
|
#define PCIE_LINKUP_ENABLE (0x8020)
|
||||||
|
#define PCIE_LTSSM_ENABLE_BIT (0x1 << 11)
|
||||||
|
#define PIPE_CLK_STABLE (0x1 << 19)
|
||||||
|
#define PHY_REF_PAD_BIT (0x1 << 8)
|
||||||
|
#define PHY_PWR_DOWN_BIT (0x1 << 22)
|
||||||
|
#define PHY_RST_ACK_BIT (0x1 << 16)
|
||||||
|
|
||||||
|
/* info located in sysctrl */
|
||||||
|
#define SCTRL_PCIE_CMOS_OFFSET 0x60
|
||||||
|
#define SCTRL_PCIE_CMOS_BIT 0x10
|
||||||
|
#define SCTRL_PCIE_ISO_OFFSET 0x44
|
||||||
|
#define SCTRL_PCIE_ISO_BIT 0x30
|
||||||
|
#define SCTRL_PCIE_HPCLK_OFFSET 0x190
|
||||||
|
#define SCTRL_PCIE_HPCLK_BIT 0x184000
|
||||||
|
#define SCTRL_PCIE_OE_OFFSET 0x14a
|
||||||
|
#define PCIE_DEBOUNCE_PARAM 0xF0F400
|
||||||
|
#define PCIE_OE_BYPASS (0x3 << 28)
|
||||||
|
|
||||||
|
/* peri_crg ctrl */
|
||||||
|
#define CRGCTRL_PCIE_ASSERT_OFFSET 0x88
|
||||||
|
#define CRGCTRL_PCIE_ASSERT_BIT 0x8c000000
|
||||||
|
|
||||||
|
/* Time for delay */
|
||||||
|
#define REF_2_PERST_MIN 20000
|
||||||
|
#define REF_2_PERST_MAX 25000
|
||||||
|
#define PERST_2_ACCESS_MIN 10000
|
||||||
|
#define PERST_2_ACCESS_MAX 12000
|
||||||
|
#define LINK_WAIT_MIN 900
|
||||||
|
#define LINK_WAIT_MAX 1000
|
||||||
|
#define PIPE_CLK_WAIT_MIN 550
|
||||||
|
#define PIPE_CLK_WAIT_MAX 600
|
||||||
|
#define TIME_CMOS_MIN 100
|
||||||
|
#define TIME_CMOS_MAX 105
|
||||||
|
#define TIME_PHY_PD_MIN 10
|
||||||
|
#define TIME_PHY_PD_MAX 11
|
||||||
|
|
||||||
|
struct kirin_pcie {
|
||||||
|
struct dw_pcie *pci;
|
||||||
|
void __iomem *apb_base;
|
||||||
|
void __iomem *phy_base;
|
||||||
|
struct regmap *crgctrl;
|
||||||
|
struct regmap *sysctrl;
|
||||||
|
struct clk *apb_sys_clk;
|
||||||
|
struct clk *apb_phy_clk;
|
||||||
|
struct clk *phy_ref_clk;
|
||||||
|
struct clk *pcie_aclk;
|
||||||
|
struct clk *pcie_aux_clk;
|
||||||
|
int gpio_id_reset;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* Registers in PCIeCTRL */
|
||||||
|
static inline void kirin_apb_ctrl_writel(struct kirin_pcie *kirin_pcie,
|
||||||
|
u32 val, u32 reg)
|
||||||
|
{
|
||||||
|
writel(val, kirin_pcie->apb_base + reg);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 kirin_apb_ctrl_readl(struct kirin_pcie *kirin_pcie, u32 reg)
|
||||||
|
{
|
||||||
|
return readl(kirin_pcie->apb_base + reg);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Registers in PCIePHY */
|
||||||
|
static inline void kirin_apb_phy_writel(struct kirin_pcie *kirin_pcie,
|
||||||
|
u32 val, u32 reg)
|
||||||
|
{
|
||||||
|
writel(val, kirin_pcie->phy_base + reg);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 kirin_apb_phy_readl(struct kirin_pcie *kirin_pcie, u32 reg)
|
||||||
|
{
|
||||||
|
return readl(kirin_pcie->phy_base + reg);
|
||||||
|
}
|
||||||
|
|
||||||
|
static long kirin_pcie_get_clk(struct kirin_pcie *kirin_pcie,
|
||||||
|
struct platform_device *pdev)
|
||||||
|
{
|
||||||
|
struct device *dev = &pdev->dev;
|
||||||
|
|
||||||
|
kirin_pcie->phy_ref_clk = devm_clk_get(dev, "pcie_phy_ref");
|
||||||
|
if (IS_ERR(kirin_pcie->phy_ref_clk))
|
||||||
|
return PTR_ERR(kirin_pcie->phy_ref_clk);
|
||||||
|
|
||||||
|
kirin_pcie->pcie_aux_clk = devm_clk_get(dev, "pcie_aux");
|
||||||
|
if (IS_ERR(kirin_pcie->pcie_aux_clk))
|
||||||
|
return PTR_ERR(kirin_pcie->pcie_aux_clk);
|
||||||
|
|
||||||
|
kirin_pcie->apb_phy_clk = devm_clk_get(dev, "pcie_apb_phy");
|
||||||
|
if (IS_ERR(kirin_pcie->apb_phy_clk))
|
||||||
|
return PTR_ERR(kirin_pcie->apb_phy_clk);
|
||||||
|
|
||||||
|
kirin_pcie->apb_sys_clk = devm_clk_get(dev, "pcie_apb_sys");
|
||||||
|
if (IS_ERR(kirin_pcie->apb_sys_clk))
|
||||||
|
return PTR_ERR(kirin_pcie->apb_sys_clk);
|
||||||
|
|
||||||
|
kirin_pcie->pcie_aclk = devm_clk_get(dev, "pcie_aclk");
|
||||||
|
if (IS_ERR(kirin_pcie->pcie_aclk))
|
||||||
|
return PTR_ERR(kirin_pcie->pcie_aclk);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,
|
||||||
|
struct platform_device *pdev)
|
||||||
|
{
|
||||||
|
struct device *dev = &pdev->dev;
|
||||||
|
struct resource *apb;
|
||||||
|
struct resource *phy;
|
||||||
|
struct resource *dbi;
|
||||||
|
|
||||||
|
apb = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb");
|
||||||
|
kirin_pcie->apb_base = devm_ioremap_resource(dev, apb);
|
||||||
|
if (IS_ERR(kirin_pcie->apb_base))
|
||||||
|
return PTR_ERR(kirin_pcie->apb_base);
|
||||||
|
|
||||||
|
phy = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
|
||||||
|
kirin_pcie->phy_base = devm_ioremap_resource(dev, phy);
|
||||||
|
if (IS_ERR(kirin_pcie->phy_base))
|
||||||
|
return PTR_ERR(kirin_pcie->phy_base);
|
||||||
|
|
||||||
|
dbi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
|
||||||
|
kirin_pcie->pci->dbi_base = devm_ioremap_resource(dev, dbi);
|
||||||
|
if (IS_ERR(kirin_pcie->pci->dbi_base))
|
||||||
|
return PTR_ERR(kirin_pcie->pci->dbi_base);
|
||||||
|
|
||||||
|
kirin_pcie->crgctrl =
|
||||||
|
syscon_regmap_lookup_by_compatible("hisilicon,hi3660-crgctrl");
|
||||||
|
if (IS_ERR(kirin_pcie->crgctrl))
|
||||||
|
return PTR_ERR(kirin_pcie->crgctrl);
|
||||||
|
|
||||||
|
kirin_pcie->sysctrl =
|
||||||
|
syscon_regmap_lookup_by_compatible("hisilicon,hi3660-sctrl");
|
||||||
|
if (IS_ERR(kirin_pcie->sysctrl))
|
||||||
|
return PTR_ERR(kirin_pcie->sysctrl);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int kirin_pcie_phy_init(struct kirin_pcie *kirin_pcie)
|
||||||
|
{
|
||||||
|
struct device *dev = kirin_pcie->pci->dev;
|
||||||
|
u32 reg_val;
|
||||||
|
|
||||||
|
reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1);
|
||||||
|
reg_val &= ~PHY_REF_PAD_BIT;
|
||||||
|
kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1);
|
||||||
|
|
||||||
|
reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL0);
|
||||||
|
reg_val &= ~PHY_PWR_DOWN_BIT;
|
||||||
|
kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL0);
|
||||||
|
usleep_range(TIME_PHY_PD_MIN, TIME_PHY_PD_MAX);
|
||||||
|
|
||||||
|
reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1);
|
||||||
|
reg_val &= ~PHY_RST_ACK_BIT;
|
||||||
|
kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1);
|
||||||
|
|
||||||
|
usleep_range(PIPE_CLK_WAIT_MIN, PIPE_CLK_WAIT_MAX);
|
||||||
|
reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_STATUS0);
|
||||||
|
if (reg_val & PIPE_CLK_STABLE) {
|
||||||
|
dev_err(dev, "PIPE clk is not stable\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void kirin_pcie_oe_enable(struct kirin_pcie *kirin_pcie)
|
||||||
|
{
|
||||||
|
u32 val;
|
||||||
|
|
||||||
|
regmap_read(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, &val);
|
||||||
|
val |= PCIE_DEBOUNCE_PARAM;
|
||||||
|
val &= ~PCIE_OE_BYPASS;
|
||||||
|
regmap_write(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, val);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int kirin_pcie_clk_ctrl(struct kirin_pcie *kirin_pcie, bool enable)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
if (!enable)
|
||||||
|
goto close_clk;
|
||||||
|
|
||||||
|
ret = clk_set_rate(kirin_pcie->phy_ref_clk, REF_CLK_FREQ);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
ret = clk_prepare_enable(kirin_pcie->phy_ref_clk);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
ret = clk_prepare_enable(kirin_pcie->apb_sys_clk);
|
||||||
|
if (ret)
|
||||||
|
goto apb_sys_fail;
|
||||||
|
|
||||||
|
ret = clk_prepare_enable(kirin_pcie->apb_phy_clk);
|
||||||
|
if (ret)
|
||||||
|
goto apb_phy_fail;
|
||||||
|
|
||||||
|
ret = clk_prepare_enable(kirin_pcie->pcie_aclk);
|
||||||
|
if (ret)
|
||||||
|
goto aclk_fail;
|
||||||
|
|
||||||
|
ret = clk_prepare_enable(kirin_pcie->pcie_aux_clk);
|
||||||
|
if (ret)
|
||||||
|
goto aux_clk_fail;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
close_clk:
|
||||||
|
clk_disable_unprepare(kirin_pcie->pcie_aux_clk);
|
||||||
|
aux_clk_fail:
|
||||||
|
clk_disable_unprepare(kirin_pcie->pcie_aclk);
|
||||||
|
aclk_fail:
|
||||||
|
clk_disable_unprepare(kirin_pcie->apb_phy_clk);
|
||||||
|
apb_phy_fail:
|
||||||
|
clk_disable_unprepare(kirin_pcie->apb_sys_clk);
|
||||||
|
apb_sys_fail:
|
||||||
|
clk_disable_unprepare(kirin_pcie->phy_ref_clk);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int kirin_pcie_power_on(struct kirin_pcie *kirin_pcie)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
/* Power supply for Host */
|
||||||
|
regmap_write(kirin_pcie->sysctrl,
|
||||||
|
SCTRL_PCIE_CMOS_OFFSET, SCTRL_PCIE_CMOS_BIT);
|
||||||
|
usleep_range(TIME_CMOS_MIN, TIME_CMOS_MAX);
|
||||||
|
kirin_pcie_oe_enable(kirin_pcie);
|
||||||
|
|
||||||
|
ret = kirin_pcie_clk_ctrl(kirin_pcie, true);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
/* ISO disable, PCIeCtrl, PHY assert and clk gate clear */
|
||||||
|
regmap_write(kirin_pcie->sysctrl,
|
||||||
|
SCTRL_PCIE_ISO_OFFSET, SCTRL_PCIE_ISO_BIT);
|
||||||
|
regmap_write(kirin_pcie->crgctrl,
|
||||||
|
CRGCTRL_PCIE_ASSERT_OFFSET, CRGCTRL_PCIE_ASSERT_BIT);
|
||||||
|
regmap_write(kirin_pcie->sysctrl,
|
||||||
|
SCTRL_PCIE_HPCLK_OFFSET, SCTRL_PCIE_HPCLK_BIT);
|
||||||
|
|
||||||
|
ret = kirin_pcie_phy_init(kirin_pcie);
|
||||||
|
if (ret)
|
||||||
|
goto close_clk;
|
||||||
|
|
||||||
|
/* perst assert Endpoint */
|
||||||
|
if (!gpio_request(kirin_pcie->gpio_id_reset, "pcie_perst")) {
|
||||||
|
usleep_range(REF_2_PERST_MIN, REF_2_PERST_MAX);
|
||||||
|
ret = gpio_direction_output(kirin_pcie->gpio_id_reset, 1);
|
||||||
|
if (ret)
|
||||||
|
goto close_clk;
|
||||||
|
usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
close_clk:
|
||||||
|
kirin_pcie_clk_ctrl(kirin_pcie, false);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void kirin_pcie_sideband_dbi_w_mode(struct kirin_pcie *kirin_pcie,
|
||||||
|
bool on)
|
||||||
|
{
|
||||||
|
u32 val;
|
||||||
|
|
||||||
|
val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL0_ADDR);
|
||||||
|
if (on)
|
||||||
|
val = val | PCIE_ELBI_SLV_DBI_ENABLE;
|
||||||
|
else
|
||||||
|
val = val & ~PCIE_ELBI_SLV_DBI_ENABLE;
|
||||||
|
|
||||||
|
kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL0_ADDR);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void kirin_pcie_sideband_dbi_r_mode(struct kirin_pcie *kirin_pcie,
|
||||||
|
bool on)
|
||||||
|
{
|
||||||
|
u32 val;
|
||||||
|
|
||||||
|
val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL1_ADDR);
|
||||||
|
if (on)
|
||||||
|
val = val | PCIE_ELBI_SLV_DBI_ENABLE;
|
||||||
|
else
|
||||||
|
val = val & ~PCIE_ELBI_SLV_DBI_ENABLE;
|
||||||
|
|
||||||
|
kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL1_ADDR);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int kirin_pcie_rd_own_conf(struct pcie_port *pp,
|
||||||
|
int where, int size, u32 *val)
|
||||||
|
{
|
||||||
|
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||||
|
struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true);
|
||||||
|
ret = dw_pcie_read(pci->dbi_base + where, size, val);
|
||||||
|
kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int kirin_pcie_wr_own_conf(struct pcie_port *pp,
|
||||||
|
int where, int size, u32 val)
|
||||||
|
{
|
||||||
|
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||||
|
struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true);
|
||||||
|
ret = dw_pcie_write(pci->dbi_base + where, size, val);
|
||||||
|
kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static u32 kirin_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
|
||||||
|
u32 reg, size_t size)
|
||||||
|
{
|
||||||
|
struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
|
||||||
|
u32 ret;
|
||||||
|
|
||||||
|
kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true);
|
||||||
|
dw_pcie_read(base + reg, size, &ret);
|
||||||
|
kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void kirin_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
|
||||||
|
u32 reg, size_t size, u32 val)
|
||||||
|
{
|
||||||
|
struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
|
||||||
|
|
||||||
|
kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true);
|
||||||
|
dw_pcie_write(base + reg, size, val);
|
||||||
|
kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int kirin_pcie_link_up(struct dw_pcie *pci)
|
||||||
|
{
|
||||||
|
struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
|
||||||
|
u32 val = kirin_apb_ctrl_readl(kirin_pcie, PCIE_APB_PHY_STATUS0);
|
||||||
|
|
||||||
|
if ((val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE)
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int kirin_pcie_establish_link(struct pcie_port *pp)
|
||||||
|
{
|
||||||
|
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||||
|
struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
|
||||||
|
struct device *dev = kirin_pcie->pci->dev;
|
||||||
|
int count = 0;
|
||||||
|
|
||||||
|
if (kirin_pcie_link_up(pci))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
dw_pcie_setup_rc(pp);
|
||||||
|
|
||||||
|
/* assert LTSSM enable */
|
||||||
|
kirin_apb_ctrl_writel(kirin_pcie, PCIE_LTSSM_ENABLE_BIT,
|
||||||
|
PCIE_APP_LTSSM_ENABLE);
|
||||||
|
|
||||||
|
/* check if the link is up or not */
|
||||||
|
while (!kirin_pcie_link_up(pci)) {
|
||||||
|
usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
|
||||||
|
count++;
|
||||||
|
if (count == 1000) {
|
||||||
|
dev_err(dev, "Link Fail\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void kirin_pcie_host_init(struct pcie_port *pp)
|
||||||
|
{
|
||||||
|
kirin_pcie_establish_link(pp);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct dw_pcie_ops kirin_dw_pcie_ops = {
|
||||||
|
.read_dbi = kirin_pcie_read_dbi,
|
||||||
|
.write_dbi = kirin_pcie_write_dbi,
|
||||||
|
.link_up = kirin_pcie_link_up,
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct dw_pcie_host_ops kirin_pcie_host_ops = {
|
||||||
|
.rd_own_conf = kirin_pcie_rd_own_conf,
|
||||||
|
.wr_own_conf = kirin_pcie_wr_own_conf,
|
||||||
|
.host_init = kirin_pcie_host_init,
|
||||||
|
};
|
||||||
|
|
||||||
|
static int __init kirin_add_pcie_port(struct dw_pcie *pci,
|
||||||
|
struct platform_device *pdev)
|
||||||
|
{
|
||||||
|
pci->pp.ops = &kirin_pcie_host_ops;
|
||||||
|
|
||||||
|
return dw_pcie_host_init(&pci->pp);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int kirin_pcie_probe(struct platform_device *pdev)
|
||||||
|
{
|
||||||
|
struct device *dev = &pdev->dev;
|
||||||
|
struct kirin_pcie *kirin_pcie;
|
||||||
|
struct dw_pcie *pci;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (!dev->of_node) {
|
||||||
|
dev_err(dev, "NULL node\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL);
|
||||||
|
if (!kirin_pcie)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
|
||||||
|
if (!pci)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
pci->dev = dev;
|
||||||
|
pci->ops = &kirin_dw_pcie_ops;
|
||||||
|
kirin_pcie->pci = pci;
|
||||||
|
|
||||||
|
ret = kirin_pcie_get_clk(kirin_pcie, pdev);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
ret = kirin_pcie_get_resource(kirin_pcie, pdev);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node,
|
||||||
|
"reset-gpio", 0);
|
||||||
|
if (kirin_pcie->gpio_id_reset < 0)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
ret = kirin_pcie_power_on(kirin_pcie);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
platform_set_drvdata(pdev, kirin_pcie);
|
||||||
|
|
||||||
|
return kirin_add_pcie_port(pci, pdev);
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct of_device_id kirin_pcie_match[] = {
|
||||||
|
{ .compatible = "hisilicon,kirin960-pcie" },
|
||||||
|
{},
|
||||||
|
};
|
||||||
|
|
||||||
|
struct platform_driver kirin_pcie_driver = {
|
||||||
|
.probe = kirin_pcie_probe,
|
||||||
|
.driver = {
|
||||||
|
.name = "kirin-pcie",
|
||||||
|
.of_match_table = kirin_pcie_match,
|
||||||
|
.suppress_bind_attrs = true,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
builtin_platform_driver(kirin_pcie_driver);
|
|
@ -51,6 +51,12 @@
|
||||||
#define PCIE20_ELBI_SYS_CTRL 0x04
|
#define PCIE20_ELBI_SYS_CTRL 0x04
|
||||||
#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
|
#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
|
||||||
|
|
||||||
|
#define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818
|
||||||
|
#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4
|
||||||
|
#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5
|
||||||
|
#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c
|
||||||
|
#define CFG_BRIDGE_SB_INIT BIT(0)
|
||||||
|
|
||||||
#define PCIE20_CAP 0x70
|
#define PCIE20_CAP 0x70
|
||||||
|
|
||||||
#define PERST_DELAY_US 1000
|
#define PERST_DELAY_US 1000
|
||||||
|
@ -86,10 +92,29 @@ struct qcom_pcie_resources_v2 {
|
||||||
struct clk *pipe_clk;
|
struct clk *pipe_clk;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct qcom_pcie_resources_v3 {
|
||||||
|
struct clk *aux_clk;
|
||||||
|
struct clk *master_clk;
|
||||||
|
struct clk *slave_clk;
|
||||||
|
struct reset_control *axi_m_reset;
|
||||||
|
struct reset_control *axi_s_reset;
|
||||||
|
struct reset_control *pipe_reset;
|
||||||
|
struct reset_control *axi_m_vmid_reset;
|
||||||
|
struct reset_control *axi_s_xpu_reset;
|
||||||
|
struct reset_control *parf_reset;
|
||||||
|
struct reset_control *phy_reset;
|
||||||
|
struct reset_control *axi_m_sticky_reset;
|
||||||
|
struct reset_control *pipe_sticky_reset;
|
||||||
|
struct reset_control *pwr_reset;
|
||||||
|
struct reset_control *ahb_reset;
|
||||||
|
struct reset_control *phy_ahb_reset;
|
||||||
|
};
|
||||||
|
|
||||||
union qcom_pcie_resources {
|
union qcom_pcie_resources {
|
||||||
struct qcom_pcie_resources_v0 v0;
|
struct qcom_pcie_resources_v0 v0;
|
||||||
struct qcom_pcie_resources_v1 v1;
|
struct qcom_pcie_resources_v1 v1;
|
||||||
struct qcom_pcie_resources_v2 v2;
|
struct qcom_pcie_resources_v2 v2;
|
||||||
|
struct qcom_pcie_resources_v3 v3;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct qcom_pcie;
|
struct qcom_pcie;
|
||||||
|
@ -133,26 +158,6 @@ static irqreturn_t qcom_pcie_msi_irq_handler(int irq, void *arg)
|
||||||
return dw_handle_msi_irq(pp);
|
return dw_handle_msi_irq(pp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie *pcie)
|
|
||||||
{
|
|
||||||
u32 val;
|
|
||||||
|
|
||||||
/* enable link training */
|
|
||||||
val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
|
|
||||||
val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
|
|
||||||
writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie)
|
|
||||||
{
|
|
||||||
u32 val;
|
|
||||||
|
|
||||||
/* enable link training */
|
|
||||||
val = readl(pcie->parf + PCIE20_PARF_LTSSM);
|
|
||||||
val |= BIT(8);
|
|
||||||
writel(val, pcie->parf + PCIE20_PARF_LTSSM);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
|
static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
|
||||||
{
|
{
|
||||||
struct dw_pcie *pci = pcie->pci;
|
struct dw_pcie *pci = pcie->pci;
|
||||||
|
@ -167,6 +172,16 @@ static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
|
||||||
return dw_pcie_wait_for_link(pci);
|
return dw_pcie_wait_for_link(pci);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie *pcie)
|
||||||
|
{
|
||||||
|
u32 val;
|
||||||
|
|
||||||
|
/* enable link training */
|
||||||
|
val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
|
||||||
|
val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
|
||||||
|
writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
|
||||||
|
}
|
||||||
|
|
||||||
static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie)
|
static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie)
|
||||||
{
|
{
|
||||||
struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
|
struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
|
||||||
|
@ -217,36 +232,6 @@ static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie)
|
||||||
return PTR_ERR_OR_ZERO(res->phy_reset);
|
return PTR_ERR_OR_ZERO(res->phy_reset);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie)
|
|
||||||
{
|
|
||||||
struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
|
|
||||||
struct dw_pcie *pci = pcie->pci;
|
|
||||||
struct device *dev = pci->dev;
|
|
||||||
|
|
||||||
res->vdda = devm_regulator_get(dev, "vdda");
|
|
||||||
if (IS_ERR(res->vdda))
|
|
||||||
return PTR_ERR(res->vdda);
|
|
||||||
|
|
||||||
res->iface = devm_clk_get(dev, "iface");
|
|
||||||
if (IS_ERR(res->iface))
|
|
||||||
return PTR_ERR(res->iface);
|
|
||||||
|
|
||||||
res->aux = devm_clk_get(dev, "aux");
|
|
||||||
if (IS_ERR(res->aux))
|
|
||||||
return PTR_ERR(res->aux);
|
|
||||||
|
|
||||||
res->master_bus = devm_clk_get(dev, "master_bus");
|
|
||||||
if (IS_ERR(res->master_bus))
|
|
||||||
return PTR_ERR(res->master_bus);
|
|
||||||
|
|
||||||
res->slave_bus = devm_clk_get(dev, "slave_bus");
|
|
||||||
if (IS_ERR(res->slave_bus))
|
|
||||||
return PTR_ERR(res->slave_bus);
|
|
||||||
|
|
||||||
res->core = devm_reset_control_get(dev, "core");
|
|
||||||
return PTR_ERR_OR_ZERO(res->core);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie)
|
static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie)
|
||||||
{
|
{
|
||||||
struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
|
struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
|
||||||
|
@ -357,6 +342,13 @@ static int qcom_pcie_init_v0(struct qcom_pcie *pcie)
|
||||||
/* wait for clock acquisition */
|
/* wait for clock acquisition */
|
||||||
usleep_range(1000, 1500);
|
usleep_range(1000, 1500);
|
||||||
|
|
||||||
|
|
||||||
|
/* Set the Max TLP size to 2K, instead of using default of 4K */
|
||||||
|
writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
|
||||||
|
pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
|
||||||
|
writel(CFG_BRIDGE_SB_INIT,
|
||||||
|
pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_deassert_ahb:
|
err_deassert_ahb:
|
||||||
|
@ -375,6 +367,36 @@ static int qcom_pcie_init_v0(struct qcom_pcie *pcie)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie)
|
||||||
|
{
|
||||||
|
struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
|
||||||
|
struct dw_pcie *pci = pcie->pci;
|
||||||
|
struct device *dev = pci->dev;
|
||||||
|
|
||||||
|
res->vdda = devm_regulator_get(dev, "vdda");
|
||||||
|
if (IS_ERR(res->vdda))
|
||||||
|
return PTR_ERR(res->vdda);
|
||||||
|
|
||||||
|
res->iface = devm_clk_get(dev, "iface");
|
||||||
|
if (IS_ERR(res->iface))
|
||||||
|
return PTR_ERR(res->iface);
|
||||||
|
|
||||||
|
res->aux = devm_clk_get(dev, "aux");
|
||||||
|
if (IS_ERR(res->aux))
|
||||||
|
return PTR_ERR(res->aux);
|
||||||
|
|
||||||
|
res->master_bus = devm_clk_get(dev, "master_bus");
|
||||||
|
if (IS_ERR(res->master_bus))
|
||||||
|
return PTR_ERR(res->master_bus);
|
||||||
|
|
||||||
|
res->slave_bus = devm_clk_get(dev, "slave_bus");
|
||||||
|
if (IS_ERR(res->slave_bus))
|
||||||
|
return PTR_ERR(res->slave_bus);
|
||||||
|
|
||||||
|
res->core = devm_reset_control_get(dev, "core");
|
||||||
|
return PTR_ERR_OR_ZERO(res->core);
|
||||||
|
}
|
||||||
|
|
||||||
static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie)
|
static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie)
|
||||||
{
|
{
|
||||||
struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
|
struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
|
||||||
|
@ -455,6 +477,16 @@ static int qcom_pcie_init_v1(struct qcom_pcie *pcie)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie)
|
||||||
|
{
|
||||||
|
u32 val;
|
||||||
|
|
||||||
|
/* enable link training */
|
||||||
|
val = readl(pcie->parf + PCIE20_PARF_LTSSM);
|
||||||
|
val |= BIT(8);
|
||||||
|
writel(val, pcie->parf + PCIE20_PARF_LTSSM);
|
||||||
|
}
|
||||||
|
|
||||||
static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie)
|
static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie)
|
||||||
{
|
{
|
||||||
struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
|
struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
|
||||||
|
@ -481,6 +513,17 @@ static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie)
|
||||||
return PTR_ERR_OR_ZERO(res->pipe_clk);
|
return PTR_ERR_OR_ZERO(res->pipe_clk);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void qcom_pcie_deinit_v2(struct qcom_pcie *pcie)
|
||||||
|
{
|
||||||
|
struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
|
||||||
|
|
||||||
|
clk_disable_unprepare(res->pipe_clk);
|
||||||
|
clk_disable_unprepare(res->slave_clk);
|
||||||
|
clk_disable_unprepare(res->master_clk);
|
||||||
|
clk_disable_unprepare(res->cfg_clk);
|
||||||
|
clk_disable_unprepare(res->aux_clk);
|
||||||
|
}
|
||||||
|
|
||||||
static int qcom_pcie_init_v2(struct qcom_pcie *pcie)
|
static int qcom_pcie_init_v2(struct qcom_pcie *pcie)
|
||||||
{
|
{
|
||||||
struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
|
struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
|
||||||
|
@ -562,6 +605,285 @@ static int qcom_pcie_post_init_v2(struct qcom_pcie *pcie)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int qcom_pcie_get_resources_v3(struct qcom_pcie *pcie)
|
||||||
|
{
|
||||||
|
struct qcom_pcie_resources_v3 *res = &pcie->res.v3;
|
||||||
|
struct dw_pcie *pci = pcie->pci;
|
||||||
|
struct device *dev = pci->dev;
|
||||||
|
|
||||||
|
res->aux_clk = devm_clk_get(dev, "aux");
|
||||||
|
if (IS_ERR(res->aux_clk))
|
||||||
|
return PTR_ERR(res->aux_clk);
|
||||||
|
|
||||||
|
res->master_clk = devm_clk_get(dev, "master_bus");
|
||||||
|
if (IS_ERR(res->master_clk))
|
||||||
|
return PTR_ERR(res->master_clk);
|
||||||
|
|
||||||
|
res->slave_clk = devm_clk_get(dev, "slave_bus");
|
||||||
|
if (IS_ERR(res->slave_clk))
|
||||||
|
return PTR_ERR(res->slave_clk);
|
||||||
|
|
||||||
|
res->axi_m_reset = devm_reset_control_get(dev, "axi_m");
|
||||||
|
if (IS_ERR(res->axi_m_reset))
|
||||||
|
return PTR_ERR(res->axi_m_reset);
|
||||||
|
|
||||||
|
res->axi_s_reset = devm_reset_control_get(dev, "axi_s");
|
||||||
|
if (IS_ERR(res->axi_s_reset))
|
||||||
|
return PTR_ERR(res->axi_s_reset);
|
||||||
|
|
||||||
|
res->pipe_reset = devm_reset_control_get(dev, "pipe");
|
||||||
|
if (IS_ERR(res->pipe_reset))
|
||||||
|
return PTR_ERR(res->pipe_reset);
|
||||||
|
|
||||||
|
res->axi_m_vmid_reset = devm_reset_control_get(dev, "axi_m_vmid");
|
||||||
|
if (IS_ERR(res->axi_m_vmid_reset))
|
||||||
|
return PTR_ERR(res->axi_m_vmid_reset);
|
||||||
|
|
||||||
|
res->axi_s_xpu_reset = devm_reset_control_get(dev, "axi_s_xpu");
|
||||||
|
if (IS_ERR(res->axi_s_xpu_reset))
|
||||||
|
return PTR_ERR(res->axi_s_xpu_reset);
|
||||||
|
|
||||||
|
res->parf_reset = devm_reset_control_get(dev, "parf");
|
||||||
|
if (IS_ERR(res->parf_reset))
|
||||||
|
return PTR_ERR(res->parf_reset);
|
||||||
|
|
||||||
|
res->phy_reset = devm_reset_control_get(dev, "phy");
|
||||||
|
if (IS_ERR(res->phy_reset))
|
||||||
|
return PTR_ERR(res->phy_reset);
|
||||||
|
|
||||||
|
res->axi_m_sticky_reset = devm_reset_control_get(dev, "axi_m_sticky");
|
||||||
|
if (IS_ERR(res->axi_m_sticky_reset))
|
||||||
|
return PTR_ERR(res->axi_m_sticky_reset);
|
||||||
|
|
||||||
|
res->pipe_sticky_reset = devm_reset_control_get(dev, "pipe_sticky");
|
||||||
|
if (IS_ERR(res->pipe_sticky_reset))
|
||||||
|
return PTR_ERR(res->pipe_sticky_reset);
|
||||||
|
|
||||||
|
res->pwr_reset = devm_reset_control_get(dev, "pwr");
|
||||||
|
if (IS_ERR(res->pwr_reset))
|
||||||
|
return PTR_ERR(res->pwr_reset);
|
||||||
|
|
||||||
|
res->ahb_reset = devm_reset_control_get(dev, "ahb");
|
||||||
|
if (IS_ERR(res->ahb_reset))
|
||||||
|
return PTR_ERR(res->ahb_reset);
|
||||||
|
|
||||||
|
res->phy_ahb_reset = devm_reset_control_get(dev, "phy_ahb");
|
||||||
|
if (IS_ERR(res->phy_ahb_reset))
|
||||||
|
return PTR_ERR(res->phy_ahb_reset);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void qcom_pcie_deinit_v3(struct qcom_pcie *pcie)
|
||||||
|
{
|
||||||
|
struct qcom_pcie_resources_v3 *res = &pcie->res.v3;
|
||||||
|
|
||||||
|
reset_control_assert(res->axi_m_reset);
|
||||||
|
reset_control_assert(res->axi_s_reset);
|
||||||
|
reset_control_assert(res->pipe_reset);
|
||||||
|
reset_control_assert(res->pipe_sticky_reset);
|
||||||
|
reset_control_assert(res->phy_reset);
|
||||||
|
reset_control_assert(res->phy_ahb_reset);
|
||||||
|
reset_control_assert(res->axi_m_sticky_reset);
|
||||||
|
reset_control_assert(res->pwr_reset);
|
||||||
|
reset_control_assert(res->ahb_reset);
|
||||||
|
clk_disable_unprepare(res->aux_clk);
|
||||||
|
clk_disable_unprepare(res->master_clk);
|
||||||
|
clk_disable_unprepare(res->slave_clk);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int qcom_pcie_init_v3(struct qcom_pcie *pcie)
|
||||||
|
{
|
||||||
|
struct qcom_pcie_resources_v3 *res = &pcie->res.v3;
|
||||||
|
struct dw_pcie *pci = pcie->pci;
|
||||||
|
struct device *dev = pci->dev;
|
||||||
|
u32 val;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = reset_control_assert(res->axi_m_reset);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(dev, "cannot assert axi master reset\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = reset_control_assert(res->axi_s_reset);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(dev, "cannot assert axi slave reset\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
usleep_range(10000, 12000);
|
||||||
|
|
||||||
|
ret = reset_control_assert(res->pipe_reset);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(dev, "cannot assert pipe reset\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = reset_control_assert(res->pipe_sticky_reset);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(dev, "cannot assert pipe sticky reset\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = reset_control_assert(res->phy_reset);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(dev, "cannot assert phy reset\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = reset_control_assert(res->phy_ahb_reset);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(dev, "cannot assert phy ahb reset\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
usleep_range(10000, 12000);
|
||||||
|
|
||||||
|
ret = reset_control_assert(res->axi_m_sticky_reset);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(dev, "cannot assert axi master sticky reset\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = reset_control_assert(res->pwr_reset);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(dev, "cannot assert power reset\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = reset_control_assert(res->ahb_reset);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(dev, "cannot assert ahb reset\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
usleep_range(10000, 12000);
|
||||||
|
|
||||||
|
ret = reset_control_deassert(res->phy_ahb_reset);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(dev, "cannot deassert phy ahb reset\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = reset_control_deassert(res->phy_reset);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(dev, "cannot deassert phy reset\n");
|
||||||
|
goto err_rst_phy;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = reset_control_deassert(res->pipe_reset);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(dev, "cannot deassert pipe reset\n");
|
||||||
|
goto err_rst_pipe;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = reset_control_deassert(res->pipe_sticky_reset);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(dev, "cannot deassert pipe sticky reset\n");
|
||||||
|
goto err_rst_pipe_sticky;
|
||||||
|
}
|
||||||
|
|
||||||
|
usleep_range(10000, 12000);
|
||||||
|
|
||||||
|
ret = reset_control_deassert(res->axi_m_reset);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(dev, "cannot deassert axi master reset\n");
|
||||||
|
goto err_rst_axi_m;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = reset_control_deassert(res->axi_m_sticky_reset);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(dev, "cannot deassert axi master sticky reset\n");
|
||||||
|
goto err_rst_axi_m_sticky;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = reset_control_deassert(res->axi_s_reset);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(dev, "cannot deassert axi slave reset\n");
|
||||||
|
goto err_rst_axi_s;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = reset_control_deassert(res->pwr_reset);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(dev, "cannot deassert power reset\n");
|
||||||
|
goto err_rst_pwr;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = reset_control_deassert(res->ahb_reset);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(dev, "cannot deassert ahb reset\n");
|
||||||
|
goto err_rst_ahb;
|
||||||
|
}
|
||||||
|
|
||||||
|
usleep_range(10000, 12000);
|
||||||
|
|
||||||
|
ret = clk_prepare_enable(res->aux_clk);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(dev, "cannot prepare/enable iface clock\n");
|
||||||
|
goto err_clk_aux;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = clk_prepare_enable(res->master_clk);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(dev, "cannot prepare/enable core clock\n");
|
||||||
|
goto err_clk_axi_m;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = clk_prepare_enable(res->slave_clk);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(dev, "cannot prepare/enable phy clock\n");
|
||||||
|
goto err_clk_axi_s;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* enable PCIe clocks and resets */
|
||||||
|
val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
|
||||||
|
val &= !BIT(0);
|
||||||
|
writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
|
||||||
|
|
||||||
|
/* change DBI base address */
|
||||||
|
writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
|
||||||
|
|
||||||
|
/* MAC PHY_POWERDOWN MUX DISABLE */
|
||||||
|
val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
|
||||||
|
val &= ~BIT(29);
|
||||||
|
writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
|
||||||
|
|
||||||
|
val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
|
||||||
|
val |= BIT(4);
|
||||||
|
writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
|
||||||
|
|
||||||
|
val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
|
||||||
|
val |= BIT(31);
|
||||||
|
writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err_clk_axi_s:
|
||||||
|
clk_disable_unprepare(res->master_clk);
|
||||||
|
err_clk_axi_m:
|
||||||
|
clk_disable_unprepare(res->aux_clk);
|
||||||
|
err_clk_aux:
|
||||||
|
reset_control_assert(res->ahb_reset);
|
||||||
|
err_rst_ahb:
|
||||||
|
reset_control_assert(res->pwr_reset);
|
||||||
|
err_rst_pwr:
|
||||||
|
reset_control_assert(res->axi_s_reset);
|
||||||
|
err_rst_axi_s:
|
||||||
|
reset_control_assert(res->axi_m_sticky_reset);
|
||||||
|
err_rst_axi_m_sticky:
|
||||||
|
reset_control_assert(res->axi_m_reset);
|
||||||
|
err_rst_axi_m:
|
||||||
|
reset_control_assert(res->pipe_sticky_reset);
|
||||||
|
err_rst_pipe_sticky:
|
||||||
|
reset_control_assert(res->pipe_reset);
|
||||||
|
err_rst_pipe:
|
||||||
|
reset_control_assert(res->phy_reset);
|
||||||
|
err_rst_phy:
|
||||||
|
reset_control_assert(res->phy_ahb_reset);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static int qcom_pcie_link_up(struct dw_pcie *pci)
|
static int qcom_pcie_link_up(struct dw_pcie *pci)
|
||||||
{
|
{
|
||||||
u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
|
u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
|
||||||
|
@ -569,17 +891,6 @@ static int qcom_pcie_link_up(struct dw_pcie *pci)
|
||||||
return !!(val & PCI_EXP_LNKSTA_DLLLA);
|
return !!(val & PCI_EXP_LNKSTA_DLLLA);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void qcom_pcie_deinit_v2(struct qcom_pcie *pcie)
|
|
||||||
{
|
|
||||||
struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
|
|
||||||
|
|
||||||
clk_disable_unprepare(res->pipe_clk);
|
|
||||||
clk_disable_unprepare(res->slave_clk);
|
|
||||||
clk_disable_unprepare(res->master_clk);
|
|
||||||
clk_disable_unprepare(res->cfg_clk);
|
|
||||||
clk_disable_unprepare(res->aux_clk);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void qcom_pcie_host_init(struct pcie_port *pp)
|
static void qcom_pcie_host_init(struct pcie_port *pp)
|
||||||
{
|
{
|
||||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||||
|
@ -634,7 +945,7 @@ static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
|
||||||
return dw_pcie_read(pci->dbi_base + where, size, val);
|
return dw_pcie_read(pci->dbi_base + where, size, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct dw_pcie_host_ops qcom_pcie_dw_ops = {
|
static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
|
||||||
.host_init = qcom_pcie_host_init,
|
.host_init = qcom_pcie_host_init,
|
||||||
.rd_own_conf = qcom_pcie_rd_own_conf,
|
.rd_own_conf = qcom_pcie_rd_own_conf,
|
||||||
};
|
};
|
||||||
|
@ -665,6 +976,13 @@ static const struct dw_pcie_ops dw_pcie_ops = {
|
||||||
.link_up = qcom_pcie_link_up,
|
.link_up = qcom_pcie_link_up,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct qcom_pcie_ops ops_v3 = {
|
||||||
|
.get_resources = qcom_pcie_get_resources_v3,
|
||||||
|
.init = qcom_pcie_init_v3,
|
||||||
|
.deinit = qcom_pcie_deinit_v3,
|
||||||
|
.ltssm_enable = qcom_pcie_v2_ltssm_enable,
|
||||||
|
};
|
||||||
|
|
||||||
static int qcom_pcie_probe(struct platform_device *pdev)
|
static int qcom_pcie_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct device *dev = &pdev->dev;
|
struct device *dev = &pdev->dev;
|
||||||
|
@ -727,7 +1045,8 @@ static int qcom_pcie_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
ret = devm_request_irq(dev, pp->msi_irq,
|
ret = devm_request_irq(dev, pp->msi_irq,
|
||||||
qcom_pcie_msi_irq_handler,
|
qcom_pcie_msi_irq_handler,
|
||||||
IRQF_SHARED, "qcom-pcie-msi", pp);
|
IRQF_SHARED | IRQF_NO_THREAD,
|
||||||
|
"qcom-pcie-msi", pp);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(dev, "cannot request msi irq\n");
|
dev_err(dev, "cannot request msi irq\n");
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -754,6 +1073,7 @@ static const struct of_device_id qcom_pcie_match[] = {
|
||||||
{ .compatible = "qcom,pcie-apq8064", .data = &ops_v0 },
|
{ .compatible = "qcom,pcie-apq8064", .data = &ops_v0 },
|
||||||
{ .compatible = "qcom,pcie-apq8084", .data = &ops_v1 },
|
{ .compatible = "qcom,pcie-apq8084", .data = &ops_v1 },
|
||||||
{ .compatible = "qcom,pcie-msm8996", .data = &ops_v2 },
|
{ .compatible = "qcom,pcie-msm8996", .data = &ops_v2 },
|
||||||
|
{ .compatible = "qcom,pcie-ipq4019", .data = &ops_v3 },
|
||||||
{ }
|
{ }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -186,7 +186,7 @@ static void spear13xx_pcie_host_init(struct pcie_port *pp)
|
||||||
spear13xx_pcie_enable_interrupts(spear13xx_pcie);
|
spear13xx_pcie_enable_interrupts(spear13xx_pcie);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct dw_pcie_host_ops spear13xx_pcie_host_ops = {
|
static const struct dw_pcie_host_ops spear13xx_pcie_host_ops = {
|
||||||
.host_init = spear13xx_pcie_host_init,
|
.host_init = spear13xx_pcie_host_init,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -180,6 +180,31 @@ config PCIE_ROCKCHIP
|
||||||
There is 1 internal PCIe port available to support GEN2 with
|
There is 1 internal PCIe port available to support GEN2 with
|
||||||
4 slots.
|
4 slots.
|
||||||
|
|
||||||
|
config PCIE_MEDIATEK
|
||||||
|
bool "MediaTek PCIe controller"
|
||||||
|
depends on ARM && (ARCH_MEDIATEK || COMPILE_TEST)
|
||||||
|
depends on OF
|
||||||
|
depends on PCI
|
||||||
|
select PCIEPORTBUS
|
||||||
|
help
|
||||||
|
Say Y here if you want to enable PCIe controller support on
|
||||||
|
MT7623 series SoCs. There is one single root complex with 3 root
|
||||||
|
ports available. Each port supports Gen2 lane x1.
|
||||||
|
|
||||||
|
config PCIE_TANGO_SMP8759
|
||||||
|
bool "Tango SMP8759 PCIe controller (DANGEROUS)"
|
||||||
|
depends on ARCH_TANGO && PCI_MSI && OF
|
||||||
|
depends on BROKEN
|
||||||
|
select PCI_HOST_COMMON
|
||||||
|
help
|
||||||
|
Say Y here to enable PCIe controller support for Sigma Designs
|
||||||
|
Tango SMP8759-based systems.
|
||||||
|
|
||||||
|
Note: The SMP8759 controller multiplexes PCI config and MMIO
|
||||||
|
accesses, and Linux doesn't provide a way to serialize them.
|
||||||
|
This can lead to data corruption if drivers perform concurrent
|
||||||
|
config and MMIO accesses.
|
||||||
|
|
||||||
config VMD
|
config VMD
|
||||||
depends on PCI_MSI && X86_64 && SRCU
|
depends on PCI_MSI && X86_64 && SRCU
|
||||||
tristate "Intel Volume Management Device Driver"
|
tristate "Intel Volume Management Device Driver"
|
||||||
|
|
|
@ -18,6 +18,8 @@ obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o
|
||||||
obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o
|
obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o
|
||||||
obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o
|
obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o
|
||||||
obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o
|
obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o
|
||||||
|
obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o
|
||||||
|
obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o
|
||||||
obj-$(CONFIG_VMD) += vmd.o
|
obj-$(CONFIG_VMD) += vmd.o
|
||||||
|
|
||||||
# The following drivers are for devices that use the generic ACPI
|
# The following drivers are for devices that use the generic ACPI
|
||||||
|
|
|
@ -886,12 +886,14 @@ static int advk_pcie_probe(struct platform_device *pdev)
|
||||||
struct advk_pcie *pcie;
|
struct advk_pcie *pcie;
|
||||||
struct resource *res;
|
struct resource *res;
|
||||||
struct pci_bus *bus, *child;
|
struct pci_bus *bus, *child;
|
||||||
|
struct pci_host_bridge *bridge;
|
||||||
int ret, irq;
|
int ret, irq;
|
||||||
|
|
||||||
pcie = devm_kzalloc(dev, sizeof(struct advk_pcie), GFP_KERNEL);
|
bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie));
|
||||||
if (!pcie)
|
if (!bridge)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
pcie = pci_host_bridge_priv(bridge);
|
||||||
pcie->pdev = pdev;
|
pcie->pdev = pdev;
|
||||||
|
|
||||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||||
|
@ -929,14 +931,21 @@ static int advk_pcie_probe(struct platform_device *pdev)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
bus = pci_scan_root_bus(dev, 0, &advk_pcie_ops,
|
list_splice_init(&pcie->resources, &bridge->windows);
|
||||||
pcie, &pcie->resources);
|
bridge->dev.parent = dev;
|
||||||
if (!bus) {
|
bridge->sysdata = pcie;
|
||||||
|
bridge->busnr = 0;
|
||||||
|
bridge->ops = &advk_pcie_ops;
|
||||||
|
|
||||||
|
ret = pci_scan_root_bus_bridge(bridge);
|
||||||
|
if (ret < 0) {
|
||||||
advk_pcie_remove_msi_irq_domain(pcie);
|
advk_pcie_remove_msi_irq_domain(pcie);
|
||||||
advk_pcie_remove_irq_domain(pcie);
|
advk_pcie_remove_irq_domain(pcie);
|
||||||
return -ENOMEM;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bus = bridge->bus;
|
||||||
|
|
||||||
pci_bus_assign_resources(bus);
|
pci_bus_assign_resources(bus);
|
||||||
|
|
||||||
list_for_each_entry(child, &bus->children, node)
|
list_for_each_entry(child, &bus->children, node)
|
||||||
|
|
|
@ -25,6 +25,7 @@
|
||||||
#include <linux/irqchip/chained_irq.h>
|
#include <linux/irqchip/chained_irq.h>
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
#include <linux/irq.h>
|
#include <linux/irq.h>
|
||||||
|
#include <linux/clk.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Special configuration registers directly in the first few words
|
* Special configuration registers directly in the first few words
|
||||||
|
@ -37,6 +38,7 @@
|
||||||
#define PCI_CONFIG 0x28 /* PCI configuration command register */
|
#define PCI_CONFIG 0x28 /* PCI configuration command register */
|
||||||
#define PCI_DATA 0x2C
|
#define PCI_DATA 0x2C
|
||||||
|
|
||||||
|
#define FARADAY_PCI_STATUS_CMD 0x04 /* Status and command */
|
||||||
#define FARADAY_PCI_PMC 0x40 /* Power management control */
|
#define FARADAY_PCI_PMC 0x40 /* Power management control */
|
||||||
#define FARADAY_PCI_PMCSR 0x44 /* Power management status */
|
#define FARADAY_PCI_PMCSR 0x44 /* Power management status */
|
||||||
#define FARADAY_PCI_CTRL1 0x48 /* Control register 1 */
|
#define FARADAY_PCI_CTRL1 0x48 /* Control register 1 */
|
||||||
|
@ -45,6 +47,8 @@
|
||||||
#define FARADAY_PCI_MEM2_BASE_SIZE 0x54 /* Memory base and size #2 */
|
#define FARADAY_PCI_MEM2_BASE_SIZE 0x54 /* Memory base and size #2 */
|
||||||
#define FARADAY_PCI_MEM3_BASE_SIZE 0x58 /* Memory base and size #3 */
|
#define FARADAY_PCI_MEM3_BASE_SIZE 0x58 /* Memory base and size #3 */
|
||||||
|
|
||||||
|
#define PCI_STATUS_66MHZ_CAPABLE BIT(21)
|
||||||
|
|
||||||
/* Bits 31..28 gives INTD..INTA status */
|
/* Bits 31..28 gives INTD..INTA status */
|
||||||
#define PCI_CTRL2_INTSTS_SHIFT 28
|
#define PCI_CTRL2_INTSTS_SHIFT 28
|
||||||
#define PCI_CTRL2_INTMASK_CMDERR BIT(27)
|
#define PCI_CTRL2_INTMASK_CMDERR BIT(27)
|
||||||
|
@ -117,6 +121,7 @@ struct faraday_pci {
|
||||||
void __iomem *base;
|
void __iomem *base;
|
||||||
struct irq_domain *irqdomain;
|
struct irq_domain *irqdomain;
|
||||||
struct pci_bus *bus;
|
struct pci_bus *bus;
|
||||||
|
struct clk *bus_clk;
|
||||||
};
|
};
|
||||||
|
|
||||||
static int faraday_res_to_memcfg(resource_size_t mem_base,
|
static int faraday_res_to_memcfg(resource_size_t mem_base,
|
||||||
|
@ -178,12 +183,11 @@ static int faraday_res_to_memcfg(resource_size_t mem_base,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int faraday_pci_read_config(struct pci_bus *bus, unsigned int fn,
|
static int faraday_raw_pci_read_config(struct faraday_pci *p, int bus_number,
|
||||||
int config, int size, u32 *value)
|
unsigned int fn, int config, int size,
|
||||||
|
u32 *value)
|
||||||
{
|
{
|
||||||
struct faraday_pci *p = bus->sysdata;
|
writel(PCI_CONF_BUS(bus_number) |
|
||||||
|
|
||||||
writel(PCI_CONF_BUS(bus->number) |
|
|
||||||
PCI_CONF_DEVICE(PCI_SLOT(fn)) |
|
PCI_CONF_DEVICE(PCI_SLOT(fn)) |
|
||||||
PCI_CONF_FUNCTION(PCI_FUNC(fn)) |
|
PCI_CONF_FUNCTION(PCI_FUNC(fn)) |
|
||||||
PCI_CONF_WHERE(config) |
|
PCI_CONF_WHERE(config) |
|
||||||
|
@ -197,24 +201,28 @@ static int faraday_pci_read_config(struct pci_bus *bus, unsigned int fn,
|
||||||
else if (size == 2)
|
else if (size == 2)
|
||||||
*value = (*value >> (8 * (config & 3))) & 0xFFFF;
|
*value = (*value >> (8 * (config & 3))) & 0xFFFF;
|
||||||
|
|
||||||
|
return PCIBIOS_SUCCESSFUL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int faraday_pci_read_config(struct pci_bus *bus, unsigned int fn,
|
||||||
|
int config, int size, u32 *value)
|
||||||
|
{
|
||||||
|
struct faraday_pci *p = bus->sysdata;
|
||||||
|
|
||||||
dev_dbg(&bus->dev,
|
dev_dbg(&bus->dev,
|
||||||
"[read] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n",
|
"[read] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n",
|
||||||
PCI_SLOT(fn), PCI_FUNC(fn), config, size, *value);
|
PCI_SLOT(fn), PCI_FUNC(fn), config, size, *value);
|
||||||
|
|
||||||
return PCIBIOS_SUCCESSFUL;
|
return faraday_raw_pci_read_config(p, bus->number, fn, config, size, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int faraday_pci_write_config(struct pci_bus *bus, unsigned int fn,
|
static int faraday_raw_pci_write_config(struct faraday_pci *p, int bus_number,
|
||||||
int config, int size, u32 value)
|
unsigned int fn, int config, int size,
|
||||||
|
u32 value)
|
||||||
{
|
{
|
||||||
struct faraday_pci *p = bus->sysdata;
|
|
||||||
int ret = PCIBIOS_SUCCESSFUL;
|
int ret = PCIBIOS_SUCCESSFUL;
|
||||||
|
|
||||||
dev_dbg(&bus->dev,
|
writel(PCI_CONF_BUS(bus_number) |
|
||||||
"[write] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n",
|
|
||||||
PCI_SLOT(fn), PCI_FUNC(fn), config, size, value);
|
|
||||||
|
|
||||||
writel(PCI_CONF_BUS(bus->number) |
|
|
||||||
PCI_CONF_DEVICE(PCI_SLOT(fn)) |
|
PCI_CONF_DEVICE(PCI_SLOT(fn)) |
|
||||||
PCI_CONF_FUNCTION(PCI_FUNC(fn)) |
|
PCI_CONF_FUNCTION(PCI_FUNC(fn)) |
|
||||||
PCI_CONF_WHERE(config) |
|
PCI_CONF_WHERE(config) |
|
||||||
|
@ -238,6 +246,19 @@ static int faraday_pci_write_config(struct pci_bus *bus, unsigned int fn,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int faraday_pci_write_config(struct pci_bus *bus, unsigned int fn,
|
||||||
|
int config, int size, u32 value)
|
||||||
|
{
|
||||||
|
struct faraday_pci *p = bus->sysdata;
|
||||||
|
|
||||||
|
dev_dbg(&bus->dev,
|
||||||
|
"[write] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n",
|
||||||
|
PCI_SLOT(fn), PCI_FUNC(fn), config, size, value);
|
||||||
|
|
||||||
|
return faraday_raw_pci_write_config(p, bus->number, fn, config, size,
|
||||||
|
value);
|
||||||
|
}
|
||||||
|
|
||||||
static struct pci_ops faraday_pci_ops = {
|
static struct pci_ops faraday_pci_ops = {
|
||||||
.read = faraday_pci_read_config,
|
.read = faraday_pci_read_config,
|
||||||
.write = faraday_pci_write_config,
|
.write = faraday_pci_write_config,
|
||||||
|
@ -248,10 +269,10 @@ static void faraday_pci_ack_irq(struct irq_data *d)
|
||||||
struct faraday_pci *p = irq_data_get_irq_chip_data(d);
|
struct faraday_pci *p = irq_data_get_irq_chip_data(d);
|
||||||
unsigned int reg;
|
unsigned int reg;
|
||||||
|
|
||||||
faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, ®);
|
faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, ®);
|
||||||
reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT);
|
reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT);
|
||||||
reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTSTS_SHIFT);
|
reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTSTS_SHIFT);
|
||||||
faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, reg);
|
faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void faraday_pci_mask_irq(struct irq_data *d)
|
static void faraday_pci_mask_irq(struct irq_data *d)
|
||||||
|
@ -259,10 +280,10 @@ static void faraday_pci_mask_irq(struct irq_data *d)
|
||||||
struct faraday_pci *p = irq_data_get_irq_chip_data(d);
|
struct faraday_pci *p = irq_data_get_irq_chip_data(d);
|
||||||
unsigned int reg;
|
unsigned int reg;
|
||||||
|
|
||||||
faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, ®);
|
faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, ®);
|
||||||
reg &= ~((0xF << PCI_CTRL2_INTSTS_SHIFT)
|
reg &= ~((0xF << PCI_CTRL2_INTSTS_SHIFT)
|
||||||
| BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT));
|
| BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT));
|
||||||
faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, reg);
|
faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void faraday_pci_unmask_irq(struct irq_data *d)
|
static void faraday_pci_unmask_irq(struct irq_data *d)
|
||||||
|
@ -270,10 +291,10 @@ static void faraday_pci_unmask_irq(struct irq_data *d)
|
||||||
struct faraday_pci *p = irq_data_get_irq_chip_data(d);
|
struct faraday_pci *p = irq_data_get_irq_chip_data(d);
|
||||||
unsigned int reg;
|
unsigned int reg;
|
||||||
|
|
||||||
faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, ®);
|
faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, ®);
|
||||||
reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT);
|
reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT);
|
||||||
reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT);
|
reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT);
|
||||||
faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, reg);
|
faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void faraday_pci_irq_handler(struct irq_desc *desc)
|
static void faraday_pci_irq_handler(struct irq_desc *desc)
|
||||||
|
@ -282,7 +303,7 @@ static void faraday_pci_irq_handler(struct irq_desc *desc)
|
||||||
struct irq_chip *irqchip = irq_desc_get_chip(desc);
|
struct irq_chip *irqchip = irq_desc_get_chip(desc);
|
||||||
unsigned int irq_stat, reg, i;
|
unsigned int irq_stat, reg, i;
|
||||||
|
|
||||||
faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, ®);
|
faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, ®);
|
||||||
irq_stat = reg >> PCI_CTRL2_INTSTS_SHIFT;
|
irq_stat = reg >> PCI_CTRL2_INTSTS_SHIFT;
|
||||||
|
|
||||||
chained_irq_enter(irqchip, desc);
|
chained_irq_enter(irqchip, desc);
|
||||||
|
@ -403,8 +424,8 @@ static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p,
|
||||||
dev_info(dev, "DMA MEM%d BASE: 0x%016llx -> 0x%016llx config %08x\n",
|
dev_info(dev, "DMA MEM%d BASE: 0x%016llx -> 0x%016llx config %08x\n",
|
||||||
i + 1, range.pci_addr, end, val);
|
i + 1, range.pci_addr, end, val);
|
||||||
if (i <= 2) {
|
if (i <= 2) {
|
||||||
faraday_pci_write_config(p->bus, 0, confreg[i],
|
faraday_raw_pci_write_config(p, 0, 0, confreg[i],
|
||||||
4, val);
|
4, val);
|
||||||
} else {
|
} else {
|
||||||
dev_err(dev, "ignore extraneous dma-range %d\n", i);
|
dev_err(dev, "ignore extraneous dma-range %d\n", i);
|
||||||
break;
|
break;
|
||||||
|
@ -428,11 +449,14 @@ static int faraday_pci_probe(struct platform_device *pdev)
|
||||||
struct resource *mem;
|
struct resource *mem;
|
||||||
struct resource *io;
|
struct resource *io;
|
||||||
struct pci_host_bridge *host;
|
struct pci_host_bridge *host;
|
||||||
|
struct clk *clk;
|
||||||
|
unsigned char max_bus_speed = PCI_SPEED_33MHz;
|
||||||
|
unsigned char cur_bus_speed = PCI_SPEED_33MHz;
|
||||||
int ret;
|
int ret;
|
||||||
u32 val;
|
u32 val;
|
||||||
LIST_HEAD(res);
|
LIST_HEAD(res);
|
||||||
|
|
||||||
host = pci_alloc_host_bridge(sizeof(*p));
|
host = devm_pci_alloc_host_bridge(dev, sizeof(*p));
|
||||||
if (!host)
|
if (!host)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -440,10 +464,30 @@ static int faraday_pci_probe(struct platform_device *pdev)
|
||||||
host->ops = &faraday_pci_ops;
|
host->ops = &faraday_pci_ops;
|
||||||
host->busnr = 0;
|
host->busnr = 0;
|
||||||
host->msi = NULL;
|
host->msi = NULL;
|
||||||
|
host->map_irq = of_irq_parse_and_map_pci;
|
||||||
|
host->swizzle_irq = pci_common_swizzle;
|
||||||
p = pci_host_bridge_priv(host);
|
p = pci_host_bridge_priv(host);
|
||||||
host->sysdata = p;
|
host->sysdata = p;
|
||||||
p->dev = dev;
|
p->dev = dev;
|
||||||
|
|
||||||
|
/* Retrieve and enable optional clocks */
|
||||||
|
clk = devm_clk_get(dev, "PCLK");
|
||||||
|
if (IS_ERR(clk))
|
||||||
|
return PTR_ERR(clk);
|
||||||
|
ret = clk_prepare_enable(clk);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(dev, "could not prepare PCLK\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
p->bus_clk = devm_clk_get(dev, "PCICLK");
|
||||||
|
if (IS_ERR(p->bus_clk))
|
||||||
|
return PTR_ERR(clk);
|
||||||
|
ret = clk_prepare_enable(p->bus_clk);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(dev, "could not prepare PCICLK\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||||
p->base = devm_ioremap_resource(dev, regs);
|
p->base = devm_ioremap_resource(dev, regs);
|
||||||
if (IS_ERR(p->base))
|
if (IS_ERR(p->base))
|
||||||
|
@ -496,17 +540,8 @@ static int faraday_pci_probe(struct platform_device *pdev)
|
||||||
val |= PCI_COMMAND_MEMORY;
|
val |= PCI_COMMAND_MEMORY;
|
||||||
val |= PCI_COMMAND_MASTER;
|
val |= PCI_COMMAND_MASTER;
|
||||||
writel(val, p->base + PCI_CTRL);
|
writel(val, p->base + PCI_CTRL);
|
||||||
|
|
||||||
list_splice_init(&res, &host->windows);
|
|
||||||
ret = pci_register_host_bridge(host);
|
|
||||||
if (ret) {
|
|
||||||
dev_err(dev, "failed to register host: %d\n", ret);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
p->bus = host->bus;
|
|
||||||
|
|
||||||
/* Mask and clear all interrupts */
|
/* Mask and clear all interrupts */
|
||||||
faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2 + 2, 2, 0xF000);
|
faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2 + 2, 2, 0xF000);
|
||||||
if (variant->cascaded_irq) {
|
if (variant->cascaded_irq) {
|
||||||
ret = faraday_pci_setup_cascaded_irq(p);
|
ret = faraday_pci_setup_cascaded_irq(p);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
@ -515,12 +550,48 @@ static int faraday_pci_probe(struct platform_device *pdev)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Check bus clock if we can gear up to 66 MHz */
|
||||||
|
if (!IS_ERR(p->bus_clk)) {
|
||||||
|
unsigned long rate;
|
||||||
|
u32 val;
|
||||||
|
|
||||||
|
faraday_raw_pci_read_config(p, 0, 0,
|
||||||
|
FARADAY_PCI_STATUS_CMD, 4, &val);
|
||||||
|
rate = clk_get_rate(p->bus_clk);
|
||||||
|
|
||||||
|
if ((rate == 33000000) && (val & PCI_STATUS_66MHZ_CAPABLE)) {
|
||||||
|
dev_info(dev, "33MHz bus is 66MHz capable\n");
|
||||||
|
max_bus_speed = PCI_SPEED_66MHz;
|
||||||
|
ret = clk_set_rate(p->bus_clk, 66000000);
|
||||||
|
if (ret)
|
||||||
|
dev_err(dev, "failed to set bus clock\n");
|
||||||
|
} else {
|
||||||
|
dev_info(dev, "33MHz only bus\n");
|
||||||
|
max_bus_speed = PCI_SPEED_33MHz;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Bumping the clock may fail so read back the rate */
|
||||||
|
rate = clk_get_rate(p->bus_clk);
|
||||||
|
if (rate == 33000000)
|
||||||
|
cur_bus_speed = PCI_SPEED_33MHz;
|
||||||
|
if (rate == 66000000)
|
||||||
|
cur_bus_speed = PCI_SPEED_66MHz;
|
||||||
|
}
|
||||||
|
|
||||||
ret = faraday_pci_parse_map_dma_ranges(p, dev->of_node);
|
ret = faraday_pci_parse_map_dma_ranges(p, dev->of_node);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
pci_scan_child_bus(p->bus);
|
list_splice_init(&res, &host->windows);
|
||||||
pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
|
ret = pci_scan_root_bus_bridge(host);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(dev, "failed to scan host: %d\n", ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
p->bus = host->bus;
|
||||||
|
p->bus->max_bus_speed = max_bus_speed;
|
||||||
|
p->bus->cur_bus_speed = cur_bus_speed;
|
||||||
|
|
||||||
pci_bus_assign_resources(p->bus);
|
pci_bus_assign_resources(p->bus);
|
||||||
pci_bus_add_devices(p->bus);
|
pci_bus_add_devices(p->bus);
|
||||||
pci_free_resource_list(&res);
|
pci_free_resource_list(&res);
|
||||||
|
|
|
@ -117,8 +117,14 @@ int pci_host_common_probe(struct platform_device *pdev,
|
||||||
struct device *dev = &pdev->dev;
|
struct device *dev = &pdev->dev;
|
||||||
struct device_node *np = dev->of_node;
|
struct device_node *np = dev->of_node;
|
||||||
struct pci_bus *bus, *child;
|
struct pci_bus *bus, *child;
|
||||||
|
struct pci_host_bridge *bridge;
|
||||||
struct pci_config_window *cfg;
|
struct pci_config_window *cfg;
|
||||||
struct list_head resources;
|
struct list_head resources;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
bridge = devm_pci_alloc_host_bridge(dev, 0);
|
||||||
|
if (!bridge)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
type = of_get_property(np, "device_type", NULL);
|
type = of_get_property(np, "device_type", NULL);
|
||||||
if (!type || strcmp(type, "pci")) {
|
if (!type || strcmp(type, "pci")) {
|
||||||
|
@ -138,16 +144,21 @@ int pci_host_common_probe(struct platform_device *pdev,
|
||||||
if (!pci_has_flag(PCI_PROBE_ONLY))
|
if (!pci_has_flag(PCI_PROBE_ONLY))
|
||||||
pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
|
pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
|
||||||
|
|
||||||
bus = pci_scan_root_bus(dev, cfg->busr.start, &ops->pci_ops, cfg,
|
list_splice_init(&resources, &bridge->windows);
|
||||||
&resources);
|
bridge->dev.parent = dev;
|
||||||
if (!bus) {
|
bridge->sysdata = cfg;
|
||||||
dev_err(dev, "Scanning rootbus failed");
|
bridge->busnr = cfg->busr.start;
|
||||||
return -ENODEV;
|
bridge->ops = &ops->pci_ops;
|
||||||
|
bridge->map_irq = of_irq_parse_and_map_pci;
|
||||||
|
bridge->swizzle_irq = pci_common_swizzle;
|
||||||
|
|
||||||
|
ret = pci_scan_root_bus_bridge(bridge);
|
||||||
|
if (ret < 0) {
|
||||||
|
dev_err(dev, "Scanning root bridge failed");
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_ARM
|
bus = bridge->bus;
|
||||||
pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We insert PCI resources into the iomem_resource and
|
* We insert PCI resources into the iomem_resource and
|
||||||
|
|
|
@ -64,22 +64,39 @@
|
||||||
* major version.
|
* major version.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (major)))
|
#define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (minor)))
|
||||||
#define PCI_MAJOR_VERSION(version) ((u32)(version) >> 16)
|
#define PCI_MAJOR_VERSION(version) ((u32)(version) >> 16)
|
||||||
#define PCI_MINOR_VERSION(version) ((u32)(version) & 0xff)
|
#define PCI_MINOR_VERSION(version) ((u32)(version) & 0xff)
|
||||||
|
|
||||||
enum {
|
enum pci_protocol_version_t {
|
||||||
PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1),
|
PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1), /* Win10 */
|
||||||
PCI_PROTOCOL_VERSION_CURRENT = PCI_PROTOCOL_VERSION_1_1
|
PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2), /* RS1 */
|
||||||
};
|
};
|
||||||
|
|
||||||
#define CPU_AFFINITY_ALL -1ULL
|
#define CPU_AFFINITY_ALL -1ULL
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Supported protocol versions in the order of probing - highest go
|
||||||
|
* first.
|
||||||
|
*/
|
||||||
|
static enum pci_protocol_version_t pci_protocol_versions[] = {
|
||||||
|
PCI_PROTOCOL_VERSION_1_2,
|
||||||
|
PCI_PROTOCOL_VERSION_1_1,
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Protocol version negotiated by hv_pci_protocol_negotiation().
|
||||||
|
*/
|
||||||
|
static enum pci_protocol_version_t pci_protocol_version;
|
||||||
|
|
||||||
#define PCI_CONFIG_MMIO_LENGTH 0x2000
|
#define PCI_CONFIG_MMIO_LENGTH 0x2000
|
||||||
#define CFG_PAGE_OFFSET 0x1000
|
#define CFG_PAGE_OFFSET 0x1000
|
||||||
#define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
|
#define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
|
||||||
|
|
||||||
#define MAX_SUPPORTED_MSI_MESSAGES 0x400
|
#define MAX_SUPPORTED_MSI_MESSAGES 0x400
|
||||||
|
|
||||||
|
#define STATUS_REVISION_MISMATCH 0xC0000059
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Message Types
|
* Message Types
|
||||||
*/
|
*/
|
||||||
|
@ -109,6 +126,9 @@ enum pci_message_type {
|
||||||
PCI_QUERY_PROTOCOL_VERSION = PCI_MESSAGE_BASE + 0x13,
|
PCI_QUERY_PROTOCOL_VERSION = PCI_MESSAGE_BASE + 0x13,
|
||||||
PCI_CREATE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x14,
|
PCI_CREATE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x14,
|
||||||
PCI_DELETE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x15,
|
PCI_DELETE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x15,
|
||||||
|
PCI_RESOURCES_ASSIGNED2 = PCI_MESSAGE_BASE + 0x16,
|
||||||
|
PCI_CREATE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x17,
|
||||||
|
PCI_DELETE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x18, /* unused */
|
||||||
PCI_MESSAGE_MAXIMUM
|
PCI_MESSAGE_MAXIMUM
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -178,6 +198,30 @@ struct hv_msi_desc {
|
||||||
u64 cpu_mask;
|
u64 cpu_mask;
|
||||||
} __packed;
|
} __packed;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct hv_msi_desc2 - 1.2 version of hv_msi_desc
|
||||||
|
* @vector: IDT entry
|
||||||
|
* @delivery_mode: As defined in Intel's Programmer's
|
||||||
|
* Reference Manual, Volume 3, Chapter 8.
|
||||||
|
* @vector_count: Number of contiguous entries in the
|
||||||
|
* Interrupt Descriptor Table that are
|
||||||
|
* occupied by this Message-Signaled
|
||||||
|
* Interrupt. For "MSI", as first defined
|
||||||
|
* in PCI 2.2, this can be between 1 and
|
||||||
|
* 32. For "MSI-X," as first defined in PCI
|
||||||
|
* 3.0, this must be 1, as each MSI-X table
|
||||||
|
* entry would have its own descriptor.
|
||||||
|
* @processor_count: number of bits enabled in array.
|
||||||
|
* @processor_array: All the target virtual processors.
|
||||||
|
*/
|
||||||
|
struct hv_msi_desc2 {
|
||||||
|
u8 vector;
|
||||||
|
u8 delivery_mode;
|
||||||
|
u16 vector_count;
|
||||||
|
u16 processor_count;
|
||||||
|
u16 processor_array[32];
|
||||||
|
} __packed;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct tran_int_desc
|
* struct tran_int_desc
|
||||||
* @reserved: unused, padding
|
* @reserved: unused, padding
|
||||||
|
@ -245,7 +289,7 @@ struct pci_packet {
|
||||||
|
|
||||||
struct pci_version_request {
|
struct pci_version_request {
|
||||||
struct pci_message message_type;
|
struct pci_message message_type;
|
||||||
enum pci_message_type protocol_version;
|
u32 protocol_version;
|
||||||
} __packed;
|
} __packed;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -294,6 +338,14 @@ struct pci_resources_assigned {
|
||||||
u32 reserved[4];
|
u32 reserved[4];
|
||||||
} __packed;
|
} __packed;
|
||||||
|
|
||||||
|
struct pci_resources_assigned2 {
|
||||||
|
struct pci_message message_type;
|
||||||
|
union win_slot_encoding wslot;
|
||||||
|
u8 memory_range[0x14][6]; /* not used here */
|
||||||
|
u32 msi_descriptor_count;
|
||||||
|
u8 reserved[70];
|
||||||
|
} __packed;
|
||||||
|
|
||||||
struct pci_create_interrupt {
|
struct pci_create_interrupt {
|
||||||
struct pci_message message_type;
|
struct pci_message message_type;
|
||||||
union win_slot_encoding wslot;
|
union win_slot_encoding wslot;
|
||||||
|
@ -306,6 +358,12 @@ struct pci_create_int_response {
|
||||||
struct tran_int_desc int_desc;
|
struct tran_int_desc int_desc;
|
||||||
} __packed;
|
} __packed;
|
||||||
|
|
||||||
|
struct pci_create_interrupt2 {
|
||||||
|
struct pci_message message_type;
|
||||||
|
union win_slot_encoding wslot;
|
||||||
|
struct hv_msi_desc2 int_desc;
|
||||||
|
} __packed;
|
||||||
|
|
||||||
struct pci_delete_interrupt {
|
struct pci_delete_interrupt {
|
||||||
struct pci_message message_type;
|
struct pci_message message_type;
|
||||||
union win_slot_encoding wslot;
|
union win_slot_encoding wslot;
|
||||||
|
@ -331,17 +389,42 @@ static int pci_ring_size = (4 * PAGE_SIZE);
|
||||||
#define HV_PARTITION_ID_SELF ((u64)-1)
|
#define HV_PARTITION_ID_SELF ((u64)-1)
|
||||||
#define HVCALL_RETARGET_INTERRUPT 0x7e
|
#define HVCALL_RETARGET_INTERRUPT 0x7e
|
||||||
|
|
||||||
struct retarget_msi_interrupt {
|
struct hv_interrupt_entry {
|
||||||
u64 partition_id; /* use "self" */
|
|
||||||
u64 device_id;
|
|
||||||
u32 source; /* 1 for MSI(-X) */
|
u32 source; /* 1 for MSI(-X) */
|
||||||
u32 reserved1;
|
u32 reserved1;
|
||||||
u32 address;
|
u32 address;
|
||||||
u32 data;
|
u32 data;
|
||||||
u64 reserved2;
|
};
|
||||||
|
|
||||||
|
#define HV_VP_SET_BANK_COUNT_MAX 5 /* current implementation limit */
|
||||||
|
|
||||||
|
struct hv_vp_set {
|
||||||
|
u64 format; /* 0 (HvGenericSetSparse4k) */
|
||||||
|
u64 valid_banks;
|
||||||
|
u64 masks[HV_VP_SET_BANK_COUNT_MAX];
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* flags for hv_device_interrupt_target.flags
|
||||||
|
*/
|
||||||
|
#define HV_DEVICE_INTERRUPT_TARGET_MULTICAST 1
|
||||||
|
#define HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET 2
|
||||||
|
|
||||||
|
struct hv_device_interrupt_target {
|
||||||
u32 vector;
|
u32 vector;
|
||||||
u32 flags;
|
u32 flags;
|
||||||
u64 vp_mask;
|
union {
|
||||||
|
u64 vp_mask;
|
||||||
|
struct hv_vp_set vp_set;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
struct retarget_msi_interrupt {
|
||||||
|
u64 partition_id; /* use "self" */
|
||||||
|
u64 device_id;
|
||||||
|
struct hv_interrupt_entry int_entry;
|
||||||
|
u64 reserved2;
|
||||||
|
struct hv_device_interrupt_target int_target;
|
||||||
} __packed;
|
} __packed;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -382,7 +465,10 @@ struct hv_pcibus_device {
|
||||||
struct msi_domain_info msi_info;
|
struct msi_domain_info msi_info;
|
||||||
struct msi_controller msi_chip;
|
struct msi_controller msi_chip;
|
||||||
struct irq_domain *irq_domain;
|
struct irq_domain *irq_domain;
|
||||||
|
|
||||||
|
/* hypercall arg, must not cross page boundary */
|
||||||
struct retarget_msi_interrupt retarget_msi_interrupt_params;
|
struct retarget_msi_interrupt retarget_msi_interrupt_params;
|
||||||
|
|
||||||
spinlock_t retarget_msi_interrupt_lock;
|
spinlock_t retarget_msi_interrupt_lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -476,6 +562,52 @@ static void put_pcichild(struct hv_pci_dev *hv_pcidev,
|
||||||
static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus);
|
static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus);
|
||||||
static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus);
|
static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus);
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Temporary CPU to vCPU mapping to address transitioning
|
||||||
|
* vmbus_cpu_number_to_vp_number() being migrated to
|
||||||
|
* hv_cpu_number_to_vp_number() in a separate patch. Once that patch
|
||||||
|
* has been picked up in the main line, remove this code here and use
|
||||||
|
* the official code.
|
||||||
|
*/
|
||||||
|
static struct hv_tmpcpumap
|
||||||
|
{
|
||||||
|
bool initialized;
|
||||||
|
u32 vp_index[NR_CPUS];
|
||||||
|
} hv_tmpcpumap;
|
||||||
|
|
||||||
|
static void hv_tmpcpumap_init_cpu(void *_unused)
|
||||||
|
{
|
||||||
|
int cpu = smp_processor_id();
|
||||||
|
u64 vp_index;
|
||||||
|
|
||||||
|
hv_get_vp_index(vp_index);
|
||||||
|
|
||||||
|
hv_tmpcpumap.vp_index[cpu] = vp_index;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void hv_tmpcpumap_init(void)
|
||||||
|
{
|
||||||
|
if (hv_tmpcpumap.initialized)
|
||||||
|
return;
|
||||||
|
|
||||||
|
memset(hv_tmpcpumap.vp_index, -1, sizeof(hv_tmpcpumap.vp_index));
|
||||||
|
on_each_cpu(hv_tmpcpumap_init_cpu, NULL, true);
|
||||||
|
hv_tmpcpumap.initialized = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* hv_tmp_cpu_nr_to_vp_nr() - Convert Linux CPU nr to Hyper-V vCPU nr
|
||||||
|
*
|
||||||
|
* Remove once vmbus_cpu_number_to_vp_number() has been converted to
|
||||||
|
* hv_cpu_number_to_vp_number() and replace callers appropriately.
|
||||||
|
*/
|
||||||
|
static u32 hv_tmp_cpu_nr_to_vp_nr(int cpu)
|
||||||
|
{
|
||||||
|
return hv_tmpcpumap.vp_index[cpu];
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* devfn_to_wslot() - Convert from Linux PCI slot to Windows
|
* devfn_to_wslot() - Convert from Linux PCI slot to Windows
|
||||||
* @devfn: The Linux representation of PCI slot
|
* @devfn: The Linux representation of PCI slot
|
||||||
|
@ -786,8 +918,11 @@ static void hv_irq_unmask(struct irq_data *data)
|
||||||
struct cpumask *dest;
|
struct cpumask *dest;
|
||||||
struct pci_bus *pbus;
|
struct pci_bus *pbus;
|
||||||
struct pci_dev *pdev;
|
struct pci_dev *pdev;
|
||||||
int cpu;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
u32 var_size = 0;
|
||||||
|
int cpu_vmbus;
|
||||||
|
int cpu;
|
||||||
|
u64 res;
|
||||||
|
|
||||||
dest = irq_data_get_affinity_mask(data);
|
dest = irq_data_get_affinity_mask(data);
|
||||||
pdev = msi_desc_to_pci_dev(msi_desc);
|
pdev = msi_desc_to_pci_dev(msi_desc);
|
||||||
|
@ -799,23 +934,74 @@ static void hv_irq_unmask(struct irq_data *data)
|
||||||
params = &hbus->retarget_msi_interrupt_params;
|
params = &hbus->retarget_msi_interrupt_params;
|
||||||
memset(params, 0, sizeof(*params));
|
memset(params, 0, sizeof(*params));
|
||||||
params->partition_id = HV_PARTITION_ID_SELF;
|
params->partition_id = HV_PARTITION_ID_SELF;
|
||||||
params->source = 1; /* MSI(-X) */
|
params->int_entry.source = 1; /* MSI(-X) */
|
||||||
params->address = msi_desc->msg.address_lo;
|
params->int_entry.address = msi_desc->msg.address_lo;
|
||||||
params->data = msi_desc->msg.data;
|
params->int_entry.data = msi_desc->msg.data;
|
||||||
params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
|
params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
|
||||||
(hbus->hdev->dev_instance.b[4] << 16) |
|
(hbus->hdev->dev_instance.b[4] << 16) |
|
||||||
(hbus->hdev->dev_instance.b[7] << 8) |
|
(hbus->hdev->dev_instance.b[7] << 8) |
|
||||||
(hbus->hdev->dev_instance.b[6] & 0xf8) |
|
(hbus->hdev->dev_instance.b[6] & 0xf8) |
|
||||||
PCI_FUNC(pdev->devfn);
|
PCI_FUNC(pdev->devfn);
|
||||||
params->vector = cfg->vector;
|
params->int_target.vector = cfg->vector;
|
||||||
|
|
||||||
for_each_cpu_and(cpu, dest, cpu_online_mask)
|
/*
|
||||||
params->vp_mask |= (1ULL << vmbus_cpu_number_to_vp_number(cpu));
|
* Honoring apic->irq_delivery_mode set to dest_Fixed by
|
||||||
|
* setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a
|
||||||
|
* spurious interrupt storm. Not doing so does not seem to have a
|
||||||
|
* negative effect (yet?).
|
||||||
|
*/
|
||||||
|
|
||||||
hv_do_hypercall(HVCALL_RETARGET_INTERRUPT, params, NULL);
|
if (pci_protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
|
||||||
|
/*
|
||||||
|
* PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the
|
||||||
|
* HVCALL_RETARGET_INTERRUPT hypercall, which also coincides
|
||||||
|
* with >64 VP support.
|
||||||
|
* ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED
|
||||||
|
* is not sufficient for this hypercall.
|
||||||
|
*/
|
||||||
|
params->int_target.flags |=
|
||||||
|
HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET;
|
||||||
|
params->int_target.vp_set.valid_banks =
|
||||||
|
(1ull << HV_VP_SET_BANK_COUNT_MAX) - 1;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* var-sized hypercall, var-size starts after vp_mask (thus
|
||||||
|
* vp_set.format does not count, but vp_set.valid_banks does).
|
||||||
|
*/
|
||||||
|
var_size = 1 + HV_VP_SET_BANK_COUNT_MAX;
|
||||||
|
|
||||||
|
for_each_cpu_and(cpu, dest, cpu_online_mask) {
|
||||||
|
cpu_vmbus = hv_tmp_cpu_nr_to_vp_nr(cpu);
|
||||||
|
|
||||||
|
if (cpu_vmbus >= HV_VP_SET_BANK_COUNT_MAX * 64) {
|
||||||
|
dev_err(&hbus->hdev->device,
|
||||||
|
"too high CPU %d", cpu_vmbus);
|
||||||
|
res = 1;
|
||||||
|
goto exit_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
params->int_target.vp_set.masks[cpu_vmbus / 64] |=
|
||||||
|
(1ULL << (cpu_vmbus & 63));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for_each_cpu_and(cpu, dest, cpu_online_mask) {
|
||||||
|
params->int_target.vp_mask |=
|
||||||
|
(1ULL << hv_tmp_cpu_nr_to_vp_nr(cpu));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17),
|
||||||
|
params, NULL);
|
||||||
|
|
||||||
|
exit_unlock:
|
||||||
spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags);
|
spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags);
|
||||||
|
|
||||||
|
if (res) {
|
||||||
|
dev_err(&hbus->hdev->device,
|
||||||
|
"%s() failed: %#llx", __func__, res);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
pci_msi_unmask_irq(data);
|
pci_msi_unmask_irq(data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -836,6 +1022,53 @@ static void hv_pci_compose_compl(void *context, struct pci_response *resp,
|
||||||
complete(&comp_pkt->comp_pkt.host_event);
|
complete(&comp_pkt->comp_pkt.host_event);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u32 hv_compose_msi_req_v1(
|
||||||
|
struct pci_create_interrupt *int_pkt, struct cpumask *affinity,
|
||||||
|
u32 slot, u8 vector)
|
||||||
|
{
|
||||||
|
int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
|
||||||
|
int_pkt->wslot.slot = slot;
|
||||||
|
int_pkt->int_desc.vector = vector;
|
||||||
|
int_pkt->int_desc.vector_count = 1;
|
||||||
|
int_pkt->int_desc.delivery_mode =
|
||||||
|
(apic->irq_delivery_mode == dest_LowestPrio) ?
|
||||||
|
dest_LowestPrio : dest_Fixed;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in
|
||||||
|
* hv_irq_unmask().
|
||||||
|
*/
|
||||||
|
int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL;
|
||||||
|
|
||||||
|
return sizeof(*int_pkt);
|
||||||
|
}
|
||||||
|
|
||||||
|
static u32 hv_compose_msi_req_v2(
|
||||||
|
struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity,
|
||||||
|
u32 slot, u8 vector)
|
||||||
|
{
|
||||||
|
int cpu;
|
||||||
|
|
||||||
|
int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2;
|
||||||
|
int_pkt->wslot.slot = slot;
|
||||||
|
int_pkt->int_desc.vector = vector;
|
||||||
|
int_pkt->int_desc.vector_count = 1;
|
||||||
|
int_pkt->int_desc.delivery_mode =
|
||||||
|
(apic->irq_delivery_mode == dest_LowestPrio) ?
|
||||||
|
dest_LowestPrio : dest_Fixed;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
|
||||||
|
* by subsequent retarget in hv_irq_unmask().
|
||||||
|
*/
|
||||||
|
cpu = cpumask_first_and(affinity, cpu_online_mask);
|
||||||
|
int_pkt->int_desc.processor_array[0] =
|
||||||
|
hv_tmp_cpu_nr_to_vp_nr(cpu);
|
||||||
|
int_pkt->int_desc.processor_count = 1;
|
||||||
|
|
||||||
|
return sizeof(*int_pkt);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* hv_compose_msi_msg() - Supplies a valid MSI address/data
|
* hv_compose_msi_msg() - Supplies a valid MSI address/data
|
||||||
* @data: Everything about this MSI
|
* @data: Everything about this MSI
|
||||||
|
@ -854,15 +1087,17 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
|
||||||
struct hv_pci_dev *hpdev;
|
struct hv_pci_dev *hpdev;
|
||||||
struct pci_bus *pbus;
|
struct pci_bus *pbus;
|
||||||
struct pci_dev *pdev;
|
struct pci_dev *pdev;
|
||||||
struct pci_create_interrupt *int_pkt;
|
|
||||||
struct compose_comp_ctxt comp;
|
struct compose_comp_ctxt comp;
|
||||||
struct tran_int_desc *int_desc;
|
struct tran_int_desc *int_desc;
|
||||||
struct cpumask *affinity;
|
|
||||||
struct {
|
struct {
|
||||||
struct pci_packet pkt;
|
struct pci_packet pci_pkt;
|
||||||
u8 buffer[sizeof(struct pci_create_interrupt)];
|
union {
|
||||||
} ctxt;
|
struct pci_create_interrupt v1;
|
||||||
int cpu;
|
struct pci_create_interrupt2 v2;
|
||||||
|
} int_pkts;
|
||||||
|
} __packed ctxt;
|
||||||
|
|
||||||
|
u32 size;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data));
|
pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data));
|
||||||
|
@ -885,36 +1120,44 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
|
||||||
|
|
||||||
memset(&ctxt, 0, sizeof(ctxt));
|
memset(&ctxt, 0, sizeof(ctxt));
|
||||||
init_completion(&comp.comp_pkt.host_event);
|
init_completion(&comp.comp_pkt.host_event);
|
||||||
ctxt.pkt.completion_func = hv_pci_compose_compl;
|
ctxt.pci_pkt.completion_func = hv_pci_compose_compl;
|
||||||
ctxt.pkt.compl_ctxt = ∁
|
ctxt.pci_pkt.compl_ctxt = ∁
|
||||||
int_pkt = (struct pci_create_interrupt *)&ctxt.pkt.message;
|
|
||||||
int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
|
|
||||||
int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
|
|
||||||
int_pkt->int_desc.vector = cfg->vector;
|
|
||||||
int_pkt->int_desc.vector_count = 1;
|
|
||||||
int_pkt->int_desc.delivery_mode =
|
|
||||||
(apic->irq_delivery_mode == dest_LowestPrio) ? 1 : 0;
|
|
||||||
|
|
||||||
/*
|
switch (pci_protocol_version) {
|
||||||
* This bit doesn't have to work on machines with more than 64
|
case PCI_PROTOCOL_VERSION_1_1:
|
||||||
* processors because Hyper-V only supports 64 in a guest.
|
size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
|
||||||
*/
|
irq_data_get_affinity_mask(data),
|
||||||
affinity = irq_data_get_affinity_mask(data);
|
hpdev->desc.win_slot.slot,
|
||||||
if (cpumask_weight(affinity) >= 32) {
|
cfg->vector);
|
||||||
int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL;
|
break;
|
||||||
} else {
|
|
||||||
for_each_cpu_and(cpu, affinity, cpu_online_mask) {
|
case PCI_PROTOCOL_VERSION_1_2:
|
||||||
int_pkt->int_desc.cpu_mask |=
|
size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
|
||||||
(1ULL << vmbus_cpu_number_to_vp_number(cpu));
|
irq_data_get_affinity_mask(data),
|
||||||
}
|
hpdev->desc.win_slot.slot,
|
||||||
|
cfg->vector);
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
/* As we only negotiate protocol versions known to this driver,
|
||||||
|
* this path should never hit. However, this is it not a hot
|
||||||
|
* path so we print a message to aid future updates.
|
||||||
|
*/
|
||||||
|
dev_err(&hbus->hdev->device,
|
||||||
|
"Unexpected vPCI protocol, update driver.");
|
||||||
|
goto free_int_desc;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt,
|
ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, &ctxt.int_pkts,
|
||||||
sizeof(*int_pkt), (unsigned long)&ctxt.pkt,
|
size, (unsigned long)&ctxt.pci_pkt,
|
||||||
VM_PKT_DATA_INBAND,
|
VM_PKT_DATA_INBAND,
|
||||||
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
|
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
|
||||||
if (ret)
|
if (ret) {
|
||||||
|
dev_err(&hbus->hdev->device,
|
||||||
|
"Sending request for interrupt failed: 0x%x",
|
||||||
|
comp.comp_pkt.completion_status);
|
||||||
goto free_int_desc;
|
goto free_int_desc;
|
||||||
|
}
|
||||||
|
|
||||||
wait_for_completion(&comp.comp_pkt.host_event);
|
wait_for_completion(&comp.comp_pkt.host_event);
|
||||||
|
|
||||||
|
@ -1513,12 +1756,12 @@ static void pci_devices_present_work(struct work_struct *work)
|
||||||
put_pcichild(hpdev, hv_pcidev_ref_initial);
|
put_pcichild(hpdev, hv_pcidev_ref_initial);
|
||||||
}
|
}
|
||||||
|
|
||||||
switch(hbus->state) {
|
switch (hbus->state) {
|
||||||
case hv_pcibus_installed:
|
case hv_pcibus_installed:
|
||||||
/*
|
/*
|
||||||
* Tell the core to rescan bus
|
* Tell the core to rescan bus
|
||||||
* because there may have been changes.
|
* because there may have been changes.
|
||||||
*/
|
*/
|
||||||
pci_lock_rescan_remove();
|
pci_lock_rescan_remove();
|
||||||
pci_scan_child_bus(hbus->pci_bus);
|
pci_scan_child_bus(hbus->pci_bus);
|
||||||
pci_unlock_rescan_remove();
|
pci_unlock_rescan_remove();
|
||||||
|
@ -1800,6 +2043,7 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev)
|
||||||
struct hv_pci_compl comp_pkt;
|
struct hv_pci_compl comp_pkt;
|
||||||
struct pci_packet *pkt;
|
struct pci_packet *pkt;
|
||||||
int ret;
|
int ret;
|
||||||
|
int i;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Initiate the handshake with the host and negotiate
|
* Initiate the handshake with the host and negotiate
|
||||||
|
@ -1816,26 +2060,44 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev)
|
||||||
pkt->compl_ctxt = &comp_pkt;
|
pkt->compl_ctxt = &comp_pkt;
|
||||||
version_req = (struct pci_version_request *)&pkt->message;
|
version_req = (struct pci_version_request *)&pkt->message;
|
||||||
version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
|
version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
|
||||||
version_req->protocol_version = PCI_PROTOCOL_VERSION_CURRENT;
|
|
||||||
|
|
||||||
ret = vmbus_sendpacket(hdev->channel, version_req,
|
for (i = 0; i < ARRAY_SIZE(pci_protocol_versions); i++) {
|
||||||
sizeof(struct pci_version_request),
|
version_req->protocol_version = pci_protocol_versions[i];
|
||||||
(unsigned long)pkt, VM_PKT_DATA_INBAND,
|
ret = vmbus_sendpacket(hdev->channel, version_req,
|
||||||
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
|
sizeof(struct pci_version_request),
|
||||||
if (ret)
|
(unsigned long)pkt, VM_PKT_DATA_INBAND,
|
||||||
goto exit;
|
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(&hdev->device,
|
||||||
|
"PCI Pass-through VSP failed sending version reqquest: %#x",
|
||||||
|
ret);
|
||||||
|
goto exit;
|
||||||
|
}
|
||||||
|
|
||||||
wait_for_completion(&comp_pkt.host_event);
|
wait_for_completion(&comp_pkt.host_event);
|
||||||
|
|
||||||
if (comp_pkt.completion_status < 0) {
|
if (comp_pkt.completion_status >= 0) {
|
||||||
dev_err(&hdev->device,
|
pci_protocol_version = pci_protocol_versions[i];
|
||||||
"PCI Pass-through VSP failed version request %x\n",
|
dev_info(&hdev->device,
|
||||||
comp_pkt.completion_status);
|
"PCI VMBus probing: Using version %#x\n",
|
||||||
ret = -EPROTO;
|
pci_protocol_version);
|
||||||
goto exit;
|
goto exit;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (comp_pkt.completion_status != STATUS_REVISION_MISMATCH) {
|
||||||
|
dev_err(&hdev->device,
|
||||||
|
"PCI Pass-through VSP failed version request: %#x",
|
||||||
|
comp_pkt.completion_status);
|
||||||
|
ret = -EPROTO;
|
||||||
|
goto exit;
|
||||||
|
}
|
||||||
|
|
||||||
|
reinit_completion(&comp_pkt.host_event);
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = 0;
|
dev_err(&hdev->device,
|
||||||
|
"PCI pass-through VSP failed to find supported version");
|
||||||
|
ret = -EPROTO;
|
||||||
|
|
||||||
exit:
|
exit:
|
||||||
kfree(pkt);
|
kfree(pkt);
|
||||||
|
@ -2094,13 +2356,18 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
|
||||||
{
|
{
|
||||||
struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
|
struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
|
||||||
struct pci_resources_assigned *res_assigned;
|
struct pci_resources_assigned *res_assigned;
|
||||||
|
struct pci_resources_assigned2 *res_assigned2;
|
||||||
struct hv_pci_compl comp_pkt;
|
struct hv_pci_compl comp_pkt;
|
||||||
struct hv_pci_dev *hpdev;
|
struct hv_pci_dev *hpdev;
|
||||||
struct pci_packet *pkt;
|
struct pci_packet *pkt;
|
||||||
|
size_t size_res;
|
||||||
u32 wslot;
|
u32 wslot;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
pkt = kmalloc(sizeof(*pkt) + sizeof(*res_assigned), GFP_KERNEL);
|
size_res = (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2)
|
||||||
|
? sizeof(*res_assigned) : sizeof(*res_assigned2);
|
||||||
|
|
||||||
|
pkt = kmalloc(sizeof(*pkt) + size_res, GFP_KERNEL);
|
||||||
if (!pkt)
|
if (!pkt)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -2111,22 +2378,30 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
|
||||||
if (!hpdev)
|
if (!hpdev)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
memset(pkt, 0, sizeof(*pkt) + sizeof(*res_assigned));
|
memset(pkt, 0, sizeof(*pkt) + size_res);
|
||||||
init_completion(&comp_pkt.host_event);
|
init_completion(&comp_pkt.host_event);
|
||||||
pkt->completion_func = hv_pci_generic_compl;
|
pkt->completion_func = hv_pci_generic_compl;
|
||||||
pkt->compl_ctxt = &comp_pkt;
|
pkt->compl_ctxt = &comp_pkt;
|
||||||
res_assigned = (struct pci_resources_assigned *)&pkt->message;
|
|
||||||
res_assigned->message_type.type = PCI_RESOURCES_ASSIGNED;
|
|
||||||
res_assigned->wslot.slot = hpdev->desc.win_slot.slot;
|
|
||||||
|
|
||||||
|
if (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2) {
|
||||||
|
res_assigned =
|
||||||
|
(struct pci_resources_assigned *)&pkt->message;
|
||||||
|
res_assigned->message_type.type =
|
||||||
|
PCI_RESOURCES_ASSIGNED;
|
||||||
|
res_assigned->wslot.slot = hpdev->desc.win_slot.slot;
|
||||||
|
} else {
|
||||||
|
res_assigned2 =
|
||||||
|
(struct pci_resources_assigned2 *)&pkt->message;
|
||||||
|
res_assigned2->message_type.type =
|
||||||
|
PCI_RESOURCES_ASSIGNED2;
|
||||||
|
res_assigned2->wslot.slot = hpdev->desc.win_slot.slot;
|
||||||
|
}
|
||||||
put_pcichild(hpdev, hv_pcidev_ref_by_slot);
|
put_pcichild(hpdev, hv_pcidev_ref_by_slot);
|
||||||
|
|
||||||
ret = vmbus_sendpacket(
|
ret = vmbus_sendpacket(hdev->channel, &pkt->message,
|
||||||
hdev->channel, &pkt->message,
|
size_res, (unsigned long)pkt,
|
||||||
sizeof(*res_assigned),
|
VM_PKT_DATA_INBAND,
|
||||||
(unsigned long)pkt,
|
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
|
||||||
VM_PKT_DATA_INBAND,
|
|
||||||
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -2204,11 +2479,19 @@ static int hv_pci_probe(struct hv_device *hdev,
|
||||||
struct hv_pcibus_device *hbus;
|
struct hv_pcibus_device *hbus;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
hbus = kzalloc(sizeof(*hbus), GFP_KERNEL);
|
/*
|
||||||
|
* hv_pcibus_device contains the hypercall arguments for retargeting in
|
||||||
|
* hv_irq_unmask(). Those must not cross a page boundary.
|
||||||
|
*/
|
||||||
|
BUILD_BUG_ON(sizeof(*hbus) > PAGE_SIZE);
|
||||||
|
|
||||||
|
hbus = (struct hv_pcibus_device *)get_zeroed_page(GFP_KERNEL);
|
||||||
if (!hbus)
|
if (!hbus)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
hbus->state = hv_pcibus_init;
|
hbus->state = hv_pcibus_init;
|
||||||
|
|
||||||
|
hv_tmpcpumap_init();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The PCI bus "domain" is what is called "segment" in ACPI and
|
* The PCI bus "domain" is what is called "segment" in ACPI and
|
||||||
* other specs. Pull it from the instance ID, to get something
|
* other specs. Pull it from the instance ID, to get something
|
||||||
|
@ -2308,7 +2591,7 @@ static int hv_pci_probe(struct hv_device *hdev,
|
||||||
close:
|
close:
|
||||||
vmbus_close(hdev->channel);
|
vmbus_close(hdev->channel);
|
||||||
free_bus:
|
free_bus:
|
||||||
kfree(hbus);
|
free_page((unsigned long)hbus);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2386,7 +2669,7 @@ static int hv_pci_remove(struct hv_device *hdev)
|
||||||
irq_domain_free_fwnode(hbus->sysdata.fwnode);
|
irq_domain_free_fwnode(hbus->sysdata.fwnode);
|
||||||
put_hvpcibus(hbus);
|
put_hvpcibus(hbus);
|
||||||
wait_for_completion(&hbus->remove_event);
|
wait_for_completion(&hbus->remove_event);
|
||||||
kfree(hbus);
|
free_page((unsigned long)hbus);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -429,7 +429,7 @@ static int rcar_pci_probe(struct platform_device *pdev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct of_device_id rcar_pci_of_match[] = {
|
static const struct of_device_id rcar_pci_of_match[] = {
|
||||||
{ .compatible = "renesas,pci-r8a7790", },
|
{ .compatible = "renesas,pci-r8a7790", },
|
||||||
{ .compatible = "renesas,pci-r8a7791", },
|
{ .compatible = "renesas,pci-r8a7791", },
|
||||||
{ .compatible = "renesas,pci-r8a7794", },
|
{ .compatible = "renesas,pci-r8a7794", },
|
||||||
|
|
|
@ -233,8 +233,8 @@ struct tegra_msi {
|
||||||
struct msi_controller chip;
|
struct msi_controller chip;
|
||||||
DECLARE_BITMAP(used, INT_PCI_MSI_NR);
|
DECLARE_BITMAP(used, INT_PCI_MSI_NR);
|
||||||
struct irq_domain *domain;
|
struct irq_domain *domain;
|
||||||
unsigned long pages;
|
|
||||||
struct mutex lock;
|
struct mutex lock;
|
||||||
|
u64 phys;
|
||||||
int irq;
|
int irq;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1448,9 +1448,8 @@ static int tegra_msi_setup_irq(struct msi_controller *chip,
|
||||||
|
|
||||||
irq_set_msi_desc(irq, desc);
|
irq_set_msi_desc(irq, desc);
|
||||||
|
|
||||||
msg.address_lo = virt_to_phys((void *)msi->pages);
|
msg.address_lo = lower_32_bits(msi->phys);
|
||||||
/* 32 bit address only */
|
msg.address_hi = upper_32_bits(msi->phys);
|
||||||
msg.address_hi = 0;
|
|
||||||
msg.data = hwirq;
|
msg.data = hwirq;
|
||||||
|
|
||||||
pci_write_msi_msg(irq, &msg);
|
pci_write_msi_msg(irq, &msg);
|
||||||
|
@ -1499,7 +1498,6 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
|
||||||
const struct tegra_pcie_soc *soc = pcie->soc;
|
const struct tegra_pcie_soc *soc = pcie->soc;
|
||||||
struct tegra_msi *msi = &pcie->msi;
|
struct tegra_msi *msi = &pcie->msi;
|
||||||
struct device *dev = pcie->dev;
|
struct device *dev = pcie->dev;
|
||||||
unsigned long base;
|
|
||||||
int err;
|
int err;
|
||||||
u32 reg;
|
u32 reg;
|
||||||
|
|
||||||
|
@ -1531,12 +1529,25 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* setup AFI/FPCI range */
|
/*
|
||||||
msi->pages = __get_free_pages(GFP_KERNEL, 0);
|
* The PCI host bridge on Tegra contains some logic that intercepts
|
||||||
base = virt_to_phys((void *)msi->pages);
|
* MSI writes, which means that the MSI target address doesn't have
|
||||||
|
* to point to actual physical memory. Rather than allocating one 4
|
||||||
|
* KiB page of system memory that's never used, we can simply pick
|
||||||
|
* an arbitrary address within an area reserved for system memory
|
||||||
|
* in the FPCI address map.
|
||||||
|
*
|
||||||
|
* However, in order to avoid confusion, we pick an address that
|
||||||
|
* doesn't map to physical memory. The FPCI address map reserves a
|
||||||
|
* 1012 GiB region for system memory and memory-mapped I/O. Since
|
||||||
|
* none of the Tegra SoCs that contain this PCI host bridge can
|
||||||
|
* address more than 16 GiB of system memory, the last 4 KiB of
|
||||||
|
* these 1012 GiB is a good candidate.
|
||||||
|
*/
|
||||||
|
msi->phys = 0xfcfffff000;
|
||||||
|
|
||||||
afi_writel(pcie, base >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
|
afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
|
||||||
afi_writel(pcie, base, AFI_MSI_AXI_BAR_ST);
|
afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
|
||||||
/* this register is in 4K increments */
|
/* this register is in 4K increments */
|
||||||
afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
|
afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
|
||||||
|
|
||||||
|
@ -1585,8 +1596,6 @@ static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
|
||||||
afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
|
afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
|
||||||
afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
|
afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
|
||||||
|
|
||||||
free_pages(msi->pages, 0);
|
|
||||||
|
|
||||||
if (msi->irq > 0)
|
if (msi->irq > 0)
|
||||||
free_irq(msi->irq, pcie);
|
free_irq(msi->irq, pcie);
|
||||||
|
|
||||||
|
@ -2238,7 +2247,7 @@ static int tegra_pcie_probe(struct platform_device *pdev)
|
||||||
struct pci_bus *child;
|
struct pci_bus *child;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
host = pci_alloc_host_bridge(sizeof(*pcie));
|
host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
|
||||||
if (!host)
|
if (!host)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -2284,16 +2293,15 @@ static int tegra_pcie_probe(struct platform_device *pdev)
|
||||||
host->busnr = pcie->busn.start;
|
host->busnr = pcie->busn.start;
|
||||||
host->dev.parent = &pdev->dev;
|
host->dev.parent = &pdev->dev;
|
||||||
host->ops = &tegra_pcie_ops;
|
host->ops = &tegra_pcie_ops;
|
||||||
|
host->map_irq = tegra_pcie_map_irq;
|
||||||
|
host->swizzle_irq = pci_common_swizzle;
|
||||||
|
|
||||||
err = pci_register_host_bridge(host);
|
err = pci_scan_root_bus_bridge(host);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
dev_err(dev, "failed to register host: %d\n", err);
|
dev_err(dev, "failed to register host: %d\n", err);
|
||||||
goto disable_msi;
|
goto disable_msi;
|
||||||
}
|
}
|
||||||
|
|
||||||
pci_scan_child_bus(host->bus);
|
|
||||||
|
|
||||||
pci_fixup_irqs(pci_common_swizzle, tegra_pcie_map_irq);
|
|
||||||
pci_bus_size_bridges(host->bus);
|
pci_bus_size_bridges(host->bus);
|
||||||
pci_bus_assign_resources(host->bus);
|
pci_bus_assign_resources(host->bus);
|
||||||
|
|
||||||
|
|
|
@ -120,30 +120,35 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
|
||||||
|
|
||||||
static int versatile_pci_probe(struct platform_device *pdev)
|
static int versatile_pci_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
|
struct device *dev = &pdev->dev;
|
||||||
struct resource *res;
|
struct resource *res;
|
||||||
int ret, i, myslot = -1;
|
int ret, i, myslot = -1;
|
||||||
u32 val;
|
u32 val;
|
||||||
void __iomem *local_pci_cfg_base;
|
void __iomem *local_pci_cfg_base;
|
||||||
struct pci_bus *bus, *child;
|
struct pci_bus *bus, *child;
|
||||||
|
struct pci_host_bridge *bridge;
|
||||||
LIST_HEAD(pci_res);
|
LIST_HEAD(pci_res);
|
||||||
|
|
||||||
|
bridge = devm_pci_alloc_host_bridge(dev, 0);
|
||||||
|
if (!bridge)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||||
versatile_pci_base = devm_ioremap_resource(&pdev->dev, res);
|
versatile_pci_base = devm_ioremap_resource(dev, res);
|
||||||
if (IS_ERR(versatile_pci_base))
|
if (IS_ERR(versatile_pci_base))
|
||||||
return PTR_ERR(versatile_pci_base);
|
return PTR_ERR(versatile_pci_base);
|
||||||
|
|
||||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
||||||
versatile_cfg_base[0] = devm_ioremap_resource(&pdev->dev, res);
|
versatile_cfg_base[0] = devm_ioremap_resource(dev, res);
|
||||||
if (IS_ERR(versatile_cfg_base[0]))
|
if (IS_ERR(versatile_cfg_base[0]))
|
||||||
return PTR_ERR(versatile_cfg_base[0]);
|
return PTR_ERR(versatile_cfg_base[0]);
|
||||||
|
|
||||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
|
||||||
versatile_cfg_base[1] = devm_pci_remap_cfg_resource(&pdev->dev,
|
versatile_cfg_base[1] = devm_pci_remap_cfg_resource(dev, res);
|
||||||
res);
|
|
||||||
if (IS_ERR(versatile_cfg_base[1]))
|
if (IS_ERR(versatile_cfg_base[1]))
|
||||||
return PTR_ERR(versatile_cfg_base[1]);
|
return PTR_ERR(versatile_cfg_base[1]);
|
||||||
|
|
||||||
ret = versatile_pci_parse_request_of_pci_ranges(&pdev->dev, &pci_res);
|
ret = versatile_pci_parse_request_of_pci_ranges(dev, &pci_res);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -159,7 +164,7 @@ static int versatile_pci_probe(struct platform_device *pdev)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (myslot == -1) {
|
if (myslot == -1) {
|
||||||
dev_err(&pdev->dev, "Cannot find PCI core!\n");
|
dev_err(dev, "Cannot find PCI core!\n");
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
|
@ -167,7 +172,7 @@ static int versatile_pci_probe(struct platform_device *pdev)
|
||||||
*/
|
*/
|
||||||
pci_slot_ignore |= (1 << myslot);
|
pci_slot_ignore |= (1 << myslot);
|
||||||
|
|
||||||
dev_info(&pdev->dev, "PCI core found (slot %d)\n", myslot);
|
dev_info(dev, "PCI core found (slot %d)\n", myslot);
|
||||||
|
|
||||||
writel(myslot, PCI_SELFID);
|
writel(myslot, PCI_SELFID);
|
||||||
local_pci_cfg_base = versatile_cfg_base[1] + (myslot << 11);
|
local_pci_cfg_base = versatile_cfg_base[1] + (myslot << 11);
|
||||||
|
@ -199,11 +204,20 @@ static int versatile_pci_probe(struct platform_device *pdev)
|
||||||
pci_add_flags(PCI_ENABLE_PROC_DOMAINS);
|
pci_add_flags(PCI_ENABLE_PROC_DOMAINS);
|
||||||
pci_add_flags(PCI_REASSIGN_ALL_BUS | PCI_REASSIGN_ALL_RSRC);
|
pci_add_flags(PCI_REASSIGN_ALL_BUS | PCI_REASSIGN_ALL_RSRC);
|
||||||
|
|
||||||
bus = pci_scan_root_bus(&pdev->dev, 0, &pci_versatile_ops, NULL, &pci_res);
|
list_splice_init(&pci_res, &bridge->windows);
|
||||||
if (!bus)
|
bridge->dev.parent = dev;
|
||||||
return -ENOMEM;
|
bridge->sysdata = NULL;
|
||||||
|
bridge->busnr = 0;
|
||||||
|
bridge->ops = &pci_versatile_ops;
|
||||||
|
bridge->map_irq = of_irq_parse_and_map_pci;
|
||||||
|
bridge->swizzle_irq = pci_common_swizzle;
|
||||||
|
|
||||||
|
ret = pci_scan_root_bus_bridge(bridge);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
bus = bridge->bus;
|
||||||
|
|
||||||
pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
|
|
||||||
pci_assign_unassigned_bus_resources(bus);
|
pci_assign_unassigned_bus_resources(bus);
|
||||||
list_for_each_entry(child, &bus->children, node)
|
list_for_each_entry(child, &bus->children, node)
|
||||||
pcie_bus_configure_settings(child);
|
pcie_bus_configure_settings(child);
|
||||||
|
|
|
@ -636,13 +636,16 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev)
|
||||||
struct xgene_pcie_port *port;
|
struct xgene_pcie_port *port;
|
||||||
resource_size_t iobase = 0;
|
resource_size_t iobase = 0;
|
||||||
struct pci_bus *bus, *child;
|
struct pci_bus *bus, *child;
|
||||||
|
struct pci_host_bridge *bridge;
|
||||||
int ret;
|
int ret;
|
||||||
LIST_HEAD(res);
|
LIST_HEAD(res);
|
||||||
|
|
||||||
port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
|
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
|
||||||
if (!port)
|
if (!bridge)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
port = pci_host_bridge_priv(bridge);
|
||||||
|
|
||||||
port->node = of_node_get(dn);
|
port->node = of_node_get(dn);
|
||||||
port->dev = dev;
|
port->dev = dev;
|
||||||
|
|
||||||
|
@ -670,11 +673,19 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
bus = pci_create_root_bus(dev, 0, &xgene_pcie_ops, port, &res);
|
list_splice_init(&res, &bridge->windows);
|
||||||
if (!bus) {
|
bridge->dev.parent = dev;
|
||||||
ret = -ENOMEM;
|
bridge->sysdata = port;
|
||||||
|
bridge->busnr = 0;
|
||||||
|
bridge->ops = &xgene_pcie_ops;
|
||||||
|
bridge->map_irq = of_irq_parse_and_map_pci;
|
||||||
|
bridge->swizzle_irq = pci_common_swizzle;
|
||||||
|
|
||||||
|
ret = pci_scan_root_bus_bridge(bridge);
|
||||||
|
if (ret < 0)
|
||||||
goto error;
|
goto error;
|
||||||
}
|
|
||||||
|
bus = bridge->bus;
|
||||||
|
|
||||||
pci_scan_child_bus(bus);
|
pci_scan_child_bus(bus);
|
||||||
pci_assign_unassigned_bus_resources(bus);
|
pci_assign_unassigned_bus_resources(bus);
|
||||||
|
|
|
@ -579,12 +579,14 @@ static int altera_pcie_probe(struct platform_device *pdev)
|
||||||
struct altera_pcie *pcie;
|
struct altera_pcie *pcie;
|
||||||
struct pci_bus *bus;
|
struct pci_bus *bus;
|
||||||
struct pci_bus *child;
|
struct pci_bus *child;
|
||||||
|
struct pci_host_bridge *bridge;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
|
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
|
||||||
if (!pcie)
|
if (!bridge)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
pcie = pci_host_bridge_priv(bridge);
|
||||||
pcie->pdev = pdev;
|
pcie->pdev = pdev;
|
||||||
|
|
||||||
ret = altera_pcie_parse_dt(pcie);
|
ret = altera_pcie_parse_dt(pcie);
|
||||||
|
@ -613,12 +615,20 @@ static int altera_pcie_probe(struct platform_device *pdev)
|
||||||
cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
|
cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
|
||||||
altera_pcie_host_init(pcie);
|
altera_pcie_host_init(pcie);
|
||||||
|
|
||||||
bus = pci_scan_root_bus(dev, pcie->root_bus_nr, &altera_pcie_ops,
|
list_splice_init(&pcie->resources, &bridge->windows);
|
||||||
pcie, &pcie->resources);
|
bridge->dev.parent = dev;
|
||||||
if (!bus)
|
bridge->sysdata = pcie;
|
||||||
return -ENOMEM;
|
bridge->busnr = pcie->root_bus_nr;
|
||||||
|
bridge->ops = &altera_pcie_ops;
|
||||||
|
bridge->map_irq = of_irq_parse_and_map_pci;
|
||||||
|
bridge->swizzle_irq = pci_common_swizzle;
|
||||||
|
|
||||||
|
ret = pci_scan_root_bus_bridge(bridge);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
bus = bridge->bus;
|
||||||
|
|
||||||
pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
|
|
||||||
pci_assign_unassigned_bus_resources(bus);
|
pci_assign_unassigned_bus_resources(bus);
|
||||||
|
|
||||||
/* Configure PCI Express setting. */
|
/* Configure PCI Express setting. */
|
||||||
|
|
|
@ -45,12 +45,15 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
|
||||||
struct device *dev = &bdev->dev;
|
struct device *dev = &bdev->dev;
|
||||||
struct iproc_pcie *pcie;
|
struct iproc_pcie *pcie;
|
||||||
LIST_HEAD(resources);
|
LIST_HEAD(resources);
|
||||||
|
struct pci_host_bridge *bridge;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
|
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
|
||||||
if (!pcie)
|
if (!bridge)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
pcie = pci_host_bridge_priv(bridge);
|
||||||
|
|
||||||
pcie->dev = dev;
|
pcie->dev = dev;
|
||||||
|
|
||||||
pcie->type = IPROC_PCIE_PAXB_BCMA;
|
pcie->type = IPROC_PCIE_PAXB_BCMA;
|
||||||
|
|
|
@ -52,12 +52,15 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
|
||||||
struct resource reg;
|
struct resource reg;
|
||||||
resource_size_t iobase = 0;
|
resource_size_t iobase = 0;
|
||||||
LIST_HEAD(resources);
|
LIST_HEAD(resources);
|
||||||
|
struct pci_host_bridge *bridge;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
|
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
|
||||||
if (!pcie)
|
if (!bridge)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
pcie = pci_host_bridge_priv(bridge);
|
||||||
|
|
||||||
pcie->dev = dev;
|
pcie->dev = dev;
|
||||||
pcie->type = (enum iproc_pcie_type) of_device_get_match_data(dev);
|
pcie->type = (enum iproc_pcie_type) of_device_get_match_data(dev);
|
||||||
|
|
||||||
|
|
|
@ -452,14 +452,13 @@ static inline void iproc_pcie_apb_err_disable(struct pci_bus *bus,
|
||||||
* Note access to the configuration registers are protected at the higher layer
|
* Note access to the configuration registers are protected at the higher layer
|
||||||
* by 'pci_lock' in drivers/pci/access.c
|
* by 'pci_lock' in drivers/pci/access.c
|
||||||
*/
|
*/
|
||||||
static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus,
|
static void __iomem *iproc_pcie_map_cfg_bus(struct iproc_pcie *pcie,
|
||||||
|
int busno,
|
||||||
unsigned int devfn,
|
unsigned int devfn,
|
||||||
int where)
|
int where)
|
||||||
{
|
{
|
||||||
struct iproc_pcie *pcie = iproc_data(bus);
|
|
||||||
unsigned slot = PCI_SLOT(devfn);
|
unsigned slot = PCI_SLOT(devfn);
|
||||||
unsigned fn = PCI_FUNC(devfn);
|
unsigned fn = PCI_FUNC(devfn);
|
||||||
unsigned busno = bus->number;
|
|
||||||
u32 val;
|
u32 val;
|
||||||
u16 offset;
|
u16 offset;
|
||||||
|
|
||||||
|
@ -499,6 +498,58 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus,
|
||||||
return (pcie->base + offset);
|
return (pcie->base + offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __iomem *iproc_pcie_bus_map_cfg_bus(struct pci_bus *bus,
|
||||||
|
unsigned int devfn,
|
||||||
|
int where)
|
||||||
|
{
|
||||||
|
return iproc_pcie_map_cfg_bus(iproc_data(bus), bus->number, devfn,
|
||||||
|
where);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int iproc_pci_raw_config_read32(struct iproc_pcie *pcie,
|
||||||
|
unsigned int devfn, int where,
|
||||||
|
int size, u32 *val)
|
||||||
|
{
|
||||||
|
void __iomem *addr;
|
||||||
|
|
||||||
|
addr = iproc_pcie_map_cfg_bus(pcie, 0, devfn, where & ~0x3);
|
||||||
|
if (!addr) {
|
||||||
|
*val = ~0;
|
||||||
|
return PCIBIOS_DEVICE_NOT_FOUND;
|
||||||
|
}
|
||||||
|
|
||||||
|
*val = readl(addr);
|
||||||
|
|
||||||
|
if (size <= 2)
|
||||||
|
*val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
|
||||||
|
|
||||||
|
return PCIBIOS_SUCCESSFUL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int iproc_pci_raw_config_write32(struct iproc_pcie *pcie,
|
||||||
|
unsigned int devfn, int where,
|
||||||
|
int size, u32 val)
|
||||||
|
{
|
||||||
|
void __iomem *addr;
|
||||||
|
u32 mask, tmp;
|
||||||
|
|
||||||
|
addr = iproc_pcie_map_cfg_bus(pcie, 0, devfn, where & ~0x3);
|
||||||
|
if (!addr)
|
||||||
|
return PCIBIOS_DEVICE_NOT_FOUND;
|
||||||
|
|
||||||
|
if (size == 4) {
|
||||||
|
writel(val, addr);
|
||||||
|
return PCIBIOS_SUCCESSFUL;
|
||||||
|
}
|
||||||
|
|
||||||
|
mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
|
||||||
|
tmp = readl(addr) & mask;
|
||||||
|
tmp |= val << ((where & 0x3) * 8);
|
||||||
|
writel(tmp, addr);
|
||||||
|
|
||||||
|
return PCIBIOS_SUCCESSFUL;
|
||||||
|
}
|
||||||
|
|
||||||
static int iproc_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
|
static int iproc_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
|
||||||
int where, int size, u32 *val)
|
int where, int size, u32 *val)
|
||||||
{
|
{
|
||||||
|
@ -524,7 +575,7 @@ static int iproc_pcie_config_write32(struct pci_bus *bus, unsigned int devfn,
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct pci_ops iproc_pcie_ops = {
|
static struct pci_ops iproc_pcie_ops = {
|
||||||
.map_bus = iproc_pcie_map_cfg_bus,
|
.map_bus = iproc_pcie_bus_map_cfg_bus,
|
||||||
.read = iproc_pcie_config_read32,
|
.read = iproc_pcie_config_read32,
|
||||||
.write = iproc_pcie_config_write32,
|
.write = iproc_pcie_config_write32,
|
||||||
};
|
};
|
||||||
|
@ -556,12 +607,11 @@ static void iproc_pcie_reset(struct iproc_pcie *pcie)
|
||||||
msleep(100);
|
msleep(100);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
|
static int iproc_pcie_check_link(struct iproc_pcie *pcie)
|
||||||
{
|
{
|
||||||
struct device *dev = pcie->dev;
|
struct device *dev = pcie->dev;
|
||||||
u8 hdr_type;
|
u32 hdr_type, link_ctrl, link_status, class, val;
|
||||||
u32 link_ctrl, class, val;
|
u16 pos = PCI_EXP_CAP;
|
||||||
u16 pos = PCI_EXP_CAP, link_status;
|
|
||||||
bool link_is_active = false;
|
bool link_is_active = false;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -578,7 +628,7 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* make sure we are not in EP mode */
|
/* make sure we are not in EP mode */
|
||||||
pci_bus_read_config_byte(bus, 0, PCI_HEADER_TYPE, &hdr_type);
|
iproc_pci_raw_config_read32(pcie, 0, PCI_HEADER_TYPE, 1, &hdr_type);
|
||||||
if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) {
|
if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) {
|
||||||
dev_err(dev, "in EP mode, hdr=%#02x\n", hdr_type);
|
dev_err(dev, "in EP mode, hdr=%#02x\n", hdr_type);
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
@ -588,13 +638,16 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
|
||||||
#define PCI_BRIDGE_CTRL_REG_OFFSET 0x43c
|
#define PCI_BRIDGE_CTRL_REG_OFFSET 0x43c
|
||||||
#define PCI_CLASS_BRIDGE_MASK 0xffff00
|
#define PCI_CLASS_BRIDGE_MASK 0xffff00
|
||||||
#define PCI_CLASS_BRIDGE_SHIFT 8
|
#define PCI_CLASS_BRIDGE_SHIFT 8
|
||||||
pci_bus_read_config_dword(bus, 0, PCI_BRIDGE_CTRL_REG_OFFSET, &class);
|
iproc_pci_raw_config_read32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET,
|
||||||
|
4, &class);
|
||||||
class &= ~PCI_CLASS_BRIDGE_MASK;
|
class &= ~PCI_CLASS_BRIDGE_MASK;
|
||||||
class |= (PCI_CLASS_BRIDGE_PCI << PCI_CLASS_BRIDGE_SHIFT);
|
class |= (PCI_CLASS_BRIDGE_PCI << PCI_CLASS_BRIDGE_SHIFT);
|
||||||
pci_bus_write_config_dword(bus, 0, PCI_BRIDGE_CTRL_REG_OFFSET, class);
|
iproc_pci_raw_config_write32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET,
|
||||||
|
4, class);
|
||||||
|
|
||||||
/* check link status to see if link is active */
|
/* check link status to see if link is active */
|
||||||
pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA, &link_status);
|
iproc_pci_raw_config_read32(pcie, 0, pos + PCI_EXP_LNKSTA,
|
||||||
|
2, &link_status);
|
||||||
if (link_status & PCI_EXP_LNKSTA_NLW)
|
if (link_status & PCI_EXP_LNKSTA_NLW)
|
||||||
link_is_active = true;
|
link_is_active = true;
|
||||||
|
|
||||||
|
@ -603,20 +656,21 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
|
||||||
#define PCI_TARGET_LINK_SPEED_MASK 0xf
|
#define PCI_TARGET_LINK_SPEED_MASK 0xf
|
||||||
#define PCI_TARGET_LINK_SPEED_GEN2 0x2
|
#define PCI_TARGET_LINK_SPEED_GEN2 0x2
|
||||||
#define PCI_TARGET_LINK_SPEED_GEN1 0x1
|
#define PCI_TARGET_LINK_SPEED_GEN1 0x1
|
||||||
pci_bus_read_config_dword(bus, 0,
|
iproc_pci_raw_config_read32(pcie, 0,
|
||||||
pos + PCI_EXP_LNKCTL2,
|
pos + PCI_EXP_LNKCTL2, 4,
|
||||||
&link_ctrl);
|
&link_ctrl);
|
||||||
if ((link_ctrl & PCI_TARGET_LINK_SPEED_MASK) ==
|
if ((link_ctrl & PCI_TARGET_LINK_SPEED_MASK) ==
|
||||||
PCI_TARGET_LINK_SPEED_GEN2) {
|
PCI_TARGET_LINK_SPEED_GEN2) {
|
||||||
link_ctrl &= ~PCI_TARGET_LINK_SPEED_MASK;
|
link_ctrl &= ~PCI_TARGET_LINK_SPEED_MASK;
|
||||||
link_ctrl |= PCI_TARGET_LINK_SPEED_GEN1;
|
link_ctrl |= PCI_TARGET_LINK_SPEED_GEN1;
|
||||||
pci_bus_write_config_dword(bus, 0,
|
iproc_pci_raw_config_write32(pcie, 0,
|
||||||
pos + PCI_EXP_LNKCTL2,
|
pos + PCI_EXP_LNKCTL2,
|
||||||
link_ctrl);
|
4, link_ctrl);
|
||||||
msleep(100);
|
msleep(100);
|
||||||
|
|
||||||
pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA,
|
iproc_pci_raw_config_read32(pcie, 0,
|
||||||
&link_status);
|
pos + PCI_EXP_LNKSTA,
|
||||||
|
2, &link_status);
|
||||||
if (link_status & PCI_EXP_LNKSTA_NLW)
|
if (link_status & PCI_EXP_LNKSTA_NLW)
|
||||||
link_is_active = true;
|
link_is_active = true;
|
||||||
}
|
}
|
||||||
|
@ -1205,7 +1259,8 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
int ret;
|
int ret;
|
||||||
void *sysdata;
|
void *sysdata;
|
||||||
struct pci_bus *bus, *child;
|
struct pci_bus *child;
|
||||||
|
struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
|
||||||
|
|
||||||
dev = pcie->dev;
|
dev = pcie->dev;
|
||||||
|
|
||||||
|
@ -1252,18 +1307,10 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
|
||||||
sysdata = pcie;
|
sysdata = pcie;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
bus = pci_create_root_bus(dev, 0, &iproc_pcie_ops, sysdata, res);
|
ret = iproc_pcie_check_link(pcie);
|
||||||
if (!bus) {
|
|
||||||
dev_err(dev, "unable to create PCI root bus\n");
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto err_power_off_phy;
|
|
||||||
}
|
|
||||||
pcie->root_bus = bus;
|
|
||||||
|
|
||||||
ret = iproc_pcie_check_link(pcie, bus);
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(dev, "no PCIe EP device detected\n");
|
dev_err(dev, "no PCIe EP device detected\n");
|
||||||
goto err_rm_root_bus;
|
goto err_power_off_phy;
|
||||||
}
|
}
|
||||||
|
|
||||||
iproc_pcie_enable(pcie);
|
iproc_pcie_enable(pcie);
|
||||||
|
@ -1272,23 +1319,31 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
|
||||||
if (iproc_pcie_msi_enable(pcie))
|
if (iproc_pcie_msi_enable(pcie))
|
||||||
dev_info(dev, "not using iProc MSI\n");
|
dev_info(dev, "not using iProc MSI\n");
|
||||||
|
|
||||||
pci_scan_child_bus(bus);
|
list_splice_init(res, &host->windows);
|
||||||
pci_assign_unassigned_bus_resources(bus);
|
host->busnr = 0;
|
||||||
|
host->dev.parent = dev;
|
||||||
|
host->ops = &iproc_pcie_ops;
|
||||||
|
host->sysdata = sysdata;
|
||||||
|
host->map_irq = pcie->map_irq;
|
||||||
|
host->swizzle_irq = pci_common_swizzle;
|
||||||
|
|
||||||
if (pcie->map_irq)
|
ret = pci_scan_root_bus_bridge(host);
|
||||||
pci_fixup_irqs(pci_common_swizzle, pcie->map_irq);
|
if (ret < 0) {
|
||||||
|
dev_err(dev, "failed to scan host: %d\n", ret);
|
||||||
|
goto err_power_off_phy;
|
||||||
|
}
|
||||||
|
|
||||||
list_for_each_entry(child, &bus->children, node)
|
pci_assign_unassigned_bus_resources(host->bus);
|
||||||
|
|
||||||
|
pcie->root_bus = host->bus;
|
||||||
|
|
||||||
|
list_for_each_entry(child, &host->bus->children, node)
|
||||||
pcie_bus_configure_settings(child);
|
pcie_bus_configure_settings(child);
|
||||||
|
|
||||||
pci_bus_add_devices(bus);
|
pci_bus_add_devices(host->bus);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_rm_root_bus:
|
|
||||||
pci_stop_root_bus(bus);
|
|
||||||
pci_remove_root_bus(bus);
|
|
||||||
|
|
||||||
err_power_off_phy:
|
err_power_off_phy:
|
||||||
phy_power_off(pcie->phy);
|
phy_power_off(pcie->phy);
|
||||||
err_exit_phy:
|
err_exit_phy:
|
||||||
|
|
|
@ -0,0 +1,554 @@
|
||||||
|
/*
|
||||||
|
* MediaTek PCIe host controller driver.
|
||||||
|
*
|
||||||
|
* Copyright (c) 2017 MediaTek Inc.
|
||||||
|
* Author: Ryder Lee <ryder.lee@mediatek.com>
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License version 2 as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU General Public License for more details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/clk.h>
|
||||||
|
#include <linux/delay.h>
|
||||||
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/of_address.h>
|
||||||
|
#include <linux/of_pci.h>
|
||||||
|
#include <linux/of_platform.h>
|
||||||
|
#include <linux/pci.h>
|
||||||
|
#include <linux/phy/phy.h>
|
||||||
|
#include <linux/platform_device.h>
|
||||||
|
#include <linux/pm_runtime.h>
|
||||||
|
#include <linux/reset.h>
|
||||||
|
|
||||||
|
/* PCIe shared registers */
|
||||||
|
#define PCIE_SYS_CFG 0x00
|
||||||
|
#define PCIE_INT_ENABLE 0x0c
|
||||||
|
#define PCIE_CFG_ADDR 0x20
|
||||||
|
#define PCIE_CFG_DATA 0x24
|
||||||
|
|
||||||
|
/* PCIe per port registers */
|
||||||
|
#define PCIE_BAR0_SETUP 0x10
|
||||||
|
#define PCIE_CLASS 0x34
|
||||||
|
#define PCIE_LINK_STATUS 0x50
|
||||||
|
|
||||||
|
#define PCIE_PORT_INT_EN(x) BIT(20 + (x))
|
||||||
|
#define PCIE_PORT_PERST(x) BIT(1 + (x))
|
||||||
|
#define PCIE_PORT_LINKUP BIT(0)
|
||||||
|
#define PCIE_BAR_MAP_MAX GENMASK(31, 16)
|
||||||
|
|
||||||
|
#define PCIE_BAR_ENABLE BIT(0)
|
||||||
|
#define PCIE_REVISION_ID BIT(0)
|
||||||
|
#define PCIE_CLASS_CODE (0x60400 << 8)
|
||||||
|
#define PCIE_CONF_REG(regn) (((regn) & GENMASK(7, 2)) | \
|
||||||
|
((((regn) >> 8) & GENMASK(3, 0)) << 24))
|
||||||
|
#define PCIE_CONF_FUN(fun) (((fun) << 8) & GENMASK(10, 8))
|
||||||
|
#define PCIE_CONF_DEV(dev) (((dev) << 11) & GENMASK(15, 11))
|
||||||
|
#define PCIE_CONF_BUS(bus) (((bus) << 16) & GENMASK(23, 16))
|
||||||
|
#define PCIE_CONF_ADDR(regn, fun, dev, bus) \
|
||||||
|
(PCIE_CONF_REG(regn) | PCIE_CONF_FUN(fun) | \
|
||||||
|
PCIE_CONF_DEV(dev) | PCIE_CONF_BUS(bus))
|
||||||
|
|
||||||
|
/* MediaTek specific configuration registers */
|
||||||
|
#define PCIE_FTS_NUM 0x70c
|
||||||
|
#define PCIE_FTS_NUM_MASK GENMASK(15, 8)
|
||||||
|
#define PCIE_FTS_NUM_L0(x) ((x) & 0xff << 8)
|
||||||
|
|
||||||
|
#define PCIE_FC_CREDIT 0x73c
|
||||||
|
#define PCIE_FC_CREDIT_MASK (GENMASK(31, 31) | GENMASK(28, 16))
|
||||||
|
#define PCIE_FC_CREDIT_VAL(x) ((x) << 16)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct mtk_pcie_port - PCIe port information
|
||||||
|
* @base: IO mapped register base
|
||||||
|
* @list: port list
|
||||||
|
* @pcie: pointer to PCIe host info
|
||||||
|
* @reset: pointer to port reset control
|
||||||
|
* @sys_ck: pointer to bus clock
|
||||||
|
* @phy: pointer to phy control block
|
||||||
|
* @lane: lane count
|
||||||
|
* @index: port index
|
||||||
|
*/
|
||||||
|
struct mtk_pcie_port {
|
||||||
|
void __iomem *base;
|
||||||
|
struct list_head list;
|
||||||
|
struct mtk_pcie *pcie;
|
||||||
|
struct reset_control *reset;
|
||||||
|
struct clk *sys_ck;
|
||||||
|
struct phy *phy;
|
||||||
|
u32 lane;
|
||||||
|
u32 index;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct mtk_pcie - PCIe host information
|
||||||
|
* @dev: pointer to PCIe device
|
||||||
|
* @base: IO mapped register base
|
||||||
|
* @free_ck: free-run reference clock
|
||||||
|
* @io: IO resource
|
||||||
|
* @pio: PIO resource
|
||||||
|
* @mem: non-prefetchable memory resource
|
||||||
|
* @busn: bus range
|
||||||
|
* @offset: IO / Memory offset
|
||||||
|
* @ports: pointer to PCIe port information
|
||||||
|
*/
|
||||||
|
struct mtk_pcie {
|
||||||
|
struct device *dev;
|
||||||
|
void __iomem *base;
|
||||||
|
struct clk *free_ck;
|
||||||
|
|
||||||
|
struct resource io;
|
||||||
|
struct resource pio;
|
||||||
|
struct resource mem;
|
||||||
|
struct resource busn;
|
||||||
|
struct {
|
||||||
|
resource_size_t mem;
|
||||||
|
resource_size_t io;
|
||||||
|
} offset;
|
||||||
|
struct list_head ports;
|
||||||
|
};
|
||||||
|
|
||||||
|
static inline bool mtk_pcie_link_up(struct mtk_pcie_port *port)
|
||||||
|
{
|
||||||
|
return !!(readl(port->base + PCIE_LINK_STATUS) & PCIE_PORT_LINKUP);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie)
|
||||||
|
{
|
||||||
|
struct device *dev = pcie->dev;
|
||||||
|
|
||||||
|
clk_disable_unprepare(pcie->free_ck);
|
||||||
|
|
||||||
|
if (dev->pm_domain) {
|
||||||
|
pm_runtime_put_sync(dev);
|
||||||
|
pm_runtime_disable(dev);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mtk_pcie_port_free(struct mtk_pcie_port *port)
|
||||||
|
{
|
||||||
|
struct mtk_pcie *pcie = port->pcie;
|
||||||
|
struct device *dev = pcie->dev;
|
||||||
|
|
||||||
|
devm_iounmap(dev, port->base);
|
||||||
|
list_del(&port->list);
|
||||||
|
devm_kfree(dev, port);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mtk_pcie_put_resources(struct mtk_pcie *pcie)
|
||||||
|
{
|
||||||
|
struct mtk_pcie_port *port, *tmp;
|
||||||
|
|
||||||
|
list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
|
||||||
|
phy_power_off(port->phy);
|
||||||
|
clk_disable_unprepare(port->sys_ck);
|
||||||
|
mtk_pcie_port_free(port);
|
||||||
|
}
|
||||||
|
|
||||||
|
mtk_pcie_subsys_powerdown(pcie);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus,
|
||||||
|
unsigned int devfn, int where)
|
||||||
|
{
|
||||||
|
struct pci_host_bridge *host = pci_find_host_bridge(bus);
|
||||||
|
struct mtk_pcie *pcie = pci_host_bridge_priv(host);
|
||||||
|
|
||||||
|
writel(PCIE_CONF_ADDR(where, PCI_FUNC(devfn), PCI_SLOT(devfn),
|
||||||
|
bus->number), pcie->base + PCIE_CFG_ADDR);
|
||||||
|
|
||||||
|
return pcie->base + PCIE_CFG_DATA + (where & 3);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct pci_ops mtk_pcie_ops = {
|
||||||
|
.map_bus = mtk_pcie_map_bus,
|
||||||
|
.read = pci_generic_config_read,
|
||||||
|
.write = pci_generic_config_write,
|
||||||
|
};
|
||||||
|
|
||||||
|
static void mtk_pcie_configure_rc(struct mtk_pcie_port *port)
|
||||||
|
{
|
||||||
|
struct mtk_pcie *pcie = port->pcie;
|
||||||
|
u32 func = PCI_FUNC(port->index << 3);
|
||||||
|
u32 slot = PCI_SLOT(port->index << 3);
|
||||||
|
u32 val;
|
||||||
|
|
||||||
|
/* enable interrupt */
|
||||||
|
val = readl(pcie->base + PCIE_INT_ENABLE);
|
||||||
|
val |= PCIE_PORT_INT_EN(port->index);
|
||||||
|
writel(val, pcie->base + PCIE_INT_ENABLE);
|
||||||
|
|
||||||
|
/* map to all DDR region. We need to set it before cfg operation. */
|
||||||
|
writel(PCIE_BAR_MAP_MAX | PCIE_BAR_ENABLE,
|
||||||
|
port->base + PCIE_BAR0_SETUP);
|
||||||
|
|
||||||
|
/* configure class code and revision ID */
|
||||||
|
writel(PCIE_CLASS_CODE | PCIE_REVISION_ID, port->base + PCIE_CLASS);
|
||||||
|
|
||||||
|
/* configure FC credit */
|
||||||
|
writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0),
|
||||||
|
pcie->base + PCIE_CFG_ADDR);
|
||||||
|
val = readl(pcie->base + PCIE_CFG_DATA);
|
||||||
|
val &= ~PCIE_FC_CREDIT_MASK;
|
||||||
|
val |= PCIE_FC_CREDIT_VAL(0x806c);
|
||||||
|
writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0),
|
||||||
|
pcie->base + PCIE_CFG_ADDR);
|
||||||
|
writel(val, pcie->base + PCIE_CFG_DATA);
|
||||||
|
|
||||||
|
/* configure RC FTS number to 250 when it leaves L0s */
|
||||||
|
writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0),
|
||||||
|
pcie->base + PCIE_CFG_ADDR);
|
||||||
|
val = readl(pcie->base + PCIE_CFG_DATA);
|
||||||
|
val &= ~PCIE_FTS_NUM_MASK;
|
||||||
|
val |= PCIE_FTS_NUM_L0(0x50);
|
||||||
|
writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0),
|
||||||
|
pcie->base + PCIE_CFG_ADDR);
|
||||||
|
writel(val, pcie->base + PCIE_CFG_DATA);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mtk_pcie_assert_ports(struct mtk_pcie_port *port)
|
||||||
|
{
|
||||||
|
struct mtk_pcie *pcie = port->pcie;
|
||||||
|
u32 val;
|
||||||
|
|
||||||
|
/* assert port PERST_N */
|
||||||
|
val = readl(pcie->base + PCIE_SYS_CFG);
|
||||||
|
val |= PCIE_PORT_PERST(port->index);
|
||||||
|
writel(val, pcie->base + PCIE_SYS_CFG);
|
||||||
|
|
||||||
|
/* de-assert port PERST_N */
|
||||||
|
val = readl(pcie->base + PCIE_SYS_CFG);
|
||||||
|
val &= ~PCIE_PORT_PERST(port->index);
|
||||||
|
writel(val, pcie->base + PCIE_SYS_CFG);
|
||||||
|
|
||||||
|
/* PCIe v2.0 need at least 100ms delay to train from Gen1 to Gen2 */
|
||||||
|
msleep(100);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mtk_pcie_enable_ports(struct mtk_pcie_port *port)
|
||||||
|
{
|
||||||
|
struct device *dev = port->pcie->dev;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = clk_prepare_enable(port->sys_ck);
|
||||||
|
if (err) {
|
||||||
|
dev_err(dev, "failed to enable port%d clock\n", port->index);
|
||||||
|
goto err_sys_clk;
|
||||||
|
}
|
||||||
|
|
||||||
|
reset_control_assert(port->reset);
|
||||||
|
reset_control_deassert(port->reset);
|
||||||
|
|
||||||
|
err = phy_power_on(port->phy);
|
||||||
|
if (err) {
|
||||||
|
dev_err(dev, "failed to power on port%d phy\n", port->index);
|
||||||
|
goto err_phy_on;
|
||||||
|
}
|
||||||
|
|
||||||
|
mtk_pcie_assert_ports(port);
|
||||||
|
|
||||||
|
/* if link up, then setup root port configuration space */
|
||||||
|
if (mtk_pcie_link_up(port)) {
|
||||||
|
mtk_pcie_configure_rc(port);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
dev_info(dev, "Port%d link down\n", port->index);
|
||||||
|
|
||||||
|
phy_power_off(port->phy);
|
||||||
|
err_phy_on:
|
||||||
|
clk_disable_unprepare(port->sys_ck);
|
||||||
|
err_sys_clk:
|
||||||
|
mtk_pcie_port_free(port);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int mtk_pcie_parse_ports(struct mtk_pcie *pcie,
|
||||||
|
struct device_node *node,
|
||||||
|
int index)
|
||||||
|
{
|
||||||
|
struct mtk_pcie_port *port;
|
||||||
|
struct resource *regs;
|
||||||
|
struct device *dev = pcie->dev;
|
||||||
|
struct platform_device *pdev = to_platform_device(dev);
|
||||||
|
char name[10];
|
||||||
|
int err;
|
||||||
|
|
||||||
|
port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
|
||||||
|
if (!port)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
err = of_property_read_u32(node, "num-lanes", &port->lane);
|
||||||
|
if (err) {
|
||||||
|
dev_err(dev, "missing num-lanes property\n");
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
regs = platform_get_resource(pdev, IORESOURCE_MEM, index + 1);
|
||||||
|
port->base = devm_ioremap_resource(dev, regs);
|
||||||
|
if (IS_ERR(port->base)) {
|
||||||
|
dev_err(dev, "failed to map port%d base\n", index);
|
||||||
|
return PTR_ERR(port->base);
|
||||||
|
}
|
||||||
|
|
||||||
|
snprintf(name, sizeof(name), "sys_ck%d", index);
|
||||||
|
port->sys_ck = devm_clk_get(dev, name);
|
||||||
|
if (IS_ERR(port->sys_ck)) {
|
||||||
|
dev_err(dev, "failed to get port%d clock\n", index);
|
||||||
|
return PTR_ERR(port->sys_ck);
|
||||||
|
}
|
||||||
|
|
||||||
|
snprintf(name, sizeof(name), "pcie-rst%d", index);
|
||||||
|
port->reset = devm_reset_control_get_optional(dev, name);
|
||||||
|
if (PTR_ERR(port->reset) == -EPROBE_DEFER)
|
||||||
|
return PTR_ERR(port->reset);
|
||||||
|
|
||||||
|
/* some platforms may use default PHY setting */
|
||||||
|
snprintf(name, sizeof(name), "pcie-phy%d", index);
|
||||||
|
port->phy = devm_phy_optional_get(dev, name);
|
||||||
|
if (IS_ERR(port->phy))
|
||||||
|
return PTR_ERR(port->phy);
|
||||||
|
|
||||||
|
port->index = index;
|
||||||
|
port->pcie = pcie;
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&port->list);
|
||||||
|
list_add_tail(&port->list, &pcie->ports);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie)
|
||||||
|
{
|
||||||
|
struct device *dev = pcie->dev;
|
||||||
|
struct platform_device *pdev = to_platform_device(dev);
|
||||||
|
struct resource *regs;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
/* get shared registers */
|
||||||
|
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||||
|
pcie->base = devm_ioremap_resource(dev, regs);
|
||||||
|
if (IS_ERR(pcie->base)) {
|
||||||
|
dev_err(dev, "failed to map shared register\n");
|
||||||
|
return PTR_ERR(pcie->base);
|
||||||
|
}
|
||||||
|
|
||||||
|
pcie->free_ck = devm_clk_get(dev, "free_ck");
|
||||||
|
if (IS_ERR(pcie->free_ck)) {
|
||||||
|
if (PTR_ERR(pcie->free_ck) == -EPROBE_DEFER)
|
||||||
|
return -EPROBE_DEFER;
|
||||||
|
|
||||||
|
pcie->free_ck = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (dev->pm_domain) {
|
||||||
|
pm_runtime_enable(dev);
|
||||||
|
pm_runtime_get_sync(dev);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* enable top level clock */
|
||||||
|
err = clk_prepare_enable(pcie->free_ck);
|
||||||
|
if (err) {
|
||||||
|
dev_err(dev, "failed to enable free_ck\n");
|
||||||
|
goto err_free_ck;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err_free_ck:
|
||||||
|
if (dev->pm_domain) {
|
||||||
|
pm_runtime_put_sync(dev);
|
||||||
|
pm_runtime_disable(dev);
|
||||||
|
}
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int mtk_pcie_setup(struct mtk_pcie *pcie)
|
||||||
|
{
|
||||||
|
struct device *dev = pcie->dev;
|
||||||
|
struct device_node *node = dev->of_node, *child;
|
||||||
|
struct of_pci_range_parser parser;
|
||||||
|
struct of_pci_range range;
|
||||||
|
struct resource res;
|
||||||
|
struct mtk_pcie_port *port, *tmp;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
if (of_pci_range_parser_init(&parser, node)) {
|
||||||
|
dev_err(dev, "missing \"ranges\" property\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
for_each_of_pci_range(&parser, &range) {
|
||||||
|
err = of_pci_range_to_resource(&range, node, &res);
|
||||||
|
if (err < 0)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
switch (res.flags & IORESOURCE_TYPE_BITS) {
|
||||||
|
case IORESOURCE_IO:
|
||||||
|
pcie->offset.io = res.start - range.pci_addr;
|
||||||
|
|
||||||
|
memcpy(&pcie->pio, &res, sizeof(res));
|
||||||
|
pcie->pio.name = node->full_name;
|
||||||
|
|
||||||
|
pcie->io.start = range.cpu_addr;
|
||||||
|
pcie->io.end = range.cpu_addr + range.size - 1;
|
||||||
|
pcie->io.flags = IORESOURCE_MEM;
|
||||||
|
pcie->io.name = "I/O";
|
||||||
|
|
||||||
|
memcpy(&res, &pcie->io, sizeof(res));
|
||||||
|
break;
|
||||||
|
|
||||||
|
case IORESOURCE_MEM:
|
||||||
|
pcie->offset.mem = res.start - range.pci_addr;
|
||||||
|
|
||||||
|
memcpy(&pcie->mem, &res, sizeof(res));
|
||||||
|
pcie->mem.name = "non-prefetchable";
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = of_pci_parse_bus_range(node, &pcie->busn);
|
||||||
|
if (err < 0) {
|
||||||
|
dev_err(dev, "failed to parse bus ranges property: %d\n", err);
|
||||||
|
pcie->busn.name = node->name;
|
||||||
|
pcie->busn.start = 0;
|
||||||
|
pcie->busn.end = 0xff;
|
||||||
|
pcie->busn.flags = IORESOURCE_BUS;
|
||||||
|
}
|
||||||
|
|
||||||
|
for_each_available_child_of_node(node, child) {
|
||||||
|
int index;
|
||||||
|
|
||||||
|
err = of_pci_get_devfn(child);
|
||||||
|
if (err < 0) {
|
||||||
|
dev_err(dev, "failed to parse devfn: %d\n", err);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
index = PCI_SLOT(err);
|
||||||
|
|
||||||
|
err = mtk_pcie_parse_ports(pcie, child, index);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = mtk_pcie_subsys_powerup(pcie);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
/* enable each port, and then check link status */
|
||||||
|
list_for_each_entry_safe(port, tmp, &pcie->ports, list)
|
||||||
|
mtk_pcie_enable_ports(port);
|
||||||
|
|
||||||
|
/* power down PCIe subsys if slots are all empty (link down) */
|
||||||
|
if (list_empty(&pcie->ports))
|
||||||
|
mtk_pcie_subsys_powerdown(pcie);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int mtk_pcie_request_resources(struct mtk_pcie *pcie)
|
||||||
|
{
|
||||||
|
struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
|
||||||
|
struct list_head *windows = &host->windows;
|
||||||
|
struct device *dev = pcie->dev;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io);
|
||||||
|
pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem);
|
||||||
|
pci_add_resource(windows, &pcie->busn);
|
||||||
|
|
||||||
|
err = devm_request_pci_bus_resources(dev, windows);
|
||||||
|
if (err < 0)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
pci_remap_iospace(&pcie->pio, pcie->io.start);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int mtk_pcie_register_host(struct pci_host_bridge *host)
|
||||||
|
{
|
||||||
|
struct mtk_pcie *pcie = pci_host_bridge_priv(host);
|
||||||
|
struct pci_bus *child;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
host->busnr = pcie->busn.start;
|
||||||
|
host->dev.parent = pcie->dev;
|
||||||
|
host->ops = &mtk_pcie_ops;
|
||||||
|
host->map_irq = of_irq_parse_and_map_pci;
|
||||||
|
host->swizzle_irq = pci_common_swizzle;
|
||||||
|
|
||||||
|
err = pci_scan_root_bus_bridge(host);
|
||||||
|
if (err < 0)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
pci_bus_size_bridges(host->bus);
|
||||||
|
pci_bus_assign_resources(host->bus);
|
||||||
|
|
||||||
|
list_for_each_entry(child, &host->bus->children, node)
|
||||||
|
pcie_bus_configure_settings(child);
|
||||||
|
|
||||||
|
pci_bus_add_devices(host->bus);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int mtk_pcie_probe(struct platform_device *pdev)
|
||||||
|
{
|
||||||
|
struct device *dev = &pdev->dev;
|
||||||
|
struct mtk_pcie *pcie;
|
||||||
|
struct pci_host_bridge *host;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
|
||||||
|
if (!host)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
pcie = pci_host_bridge_priv(host);
|
||||||
|
|
||||||
|
pcie->dev = dev;
|
||||||
|
platform_set_drvdata(pdev, pcie);
|
||||||
|
INIT_LIST_HEAD(&pcie->ports);
|
||||||
|
|
||||||
|
err = mtk_pcie_setup(pcie);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
err = mtk_pcie_request_resources(pcie);
|
||||||
|
if (err)
|
||||||
|
goto put_resources;
|
||||||
|
|
||||||
|
err = mtk_pcie_register_host(host);
|
||||||
|
if (err)
|
||||||
|
goto put_resources;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
put_resources:
|
||||||
|
if (!list_empty(&pcie->ports))
|
||||||
|
mtk_pcie_put_resources(pcie);
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct of_device_id mtk_pcie_ids[] = {
|
||||||
|
{ .compatible = "mediatek,mt7623-pcie"},
|
||||||
|
{ .compatible = "mediatek,mt2701-pcie"},
|
||||||
|
{},
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct platform_driver mtk_pcie_driver = {
|
||||||
|
.probe = mtk_pcie_probe,
|
||||||
|
.driver = {
|
||||||
|
.name = "mtk-pcie",
|
||||||
|
.of_match_table = mtk_pcie_ids,
|
||||||
|
.suppress_bind_attrs = true,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
builtin_platform_driver(mtk_pcie_driver);
|
|
@ -450,29 +450,33 @@ static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
|
||||||
static int rcar_pcie_enable(struct rcar_pcie *pcie)
|
static int rcar_pcie_enable(struct rcar_pcie *pcie)
|
||||||
{
|
{
|
||||||
struct device *dev = pcie->dev;
|
struct device *dev = pcie->dev;
|
||||||
|
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
|
||||||
struct pci_bus *bus, *child;
|
struct pci_bus *bus, *child;
|
||||||
LIST_HEAD(res);
|
int ret;
|
||||||
|
|
||||||
/* Try setting 5 GT/s link speed */
|
/* Try setting 5 GT/s link speed */
|
||||||
rcar_pcie_force_speedup(pcie);
|
rcar_pcie_force_speedup(pcie);
|
||||||
|
|
||||||
rcar_pcie_setup(&res, pcie);
|
rcar_pcie_setup(&bridge->windows, pcie);
|
||||||
|
|
||||||
pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
|
pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
|
||||||
|
|
||||||
|
bridge->dev.parent = dev;
|
||||||
|
bridge->sysdata = pcie;
|
||||||
|
bridge->busnr = pcie->root_bus_nr;
|
||||||
|
bridge->ops = &rcar_pcie_ops;
|
||||||
|
bridge->map_irq = of_irq_parse_and_map_pci;
|
||||||
|
bridge->swizzle_irq = pci_common_swizzle;
|
||||||
if (IS_ENABLED(CONFIG_PCI_MSI))
|
if (IS_ENABLED(CONFIG_PCI_MSI))
|
||||||
bus = pci_scan_root_bus_msi(dev, pcie->root_bus_nr,
|
bridge->msi = &pcie->msi.chip;
|
||||||
&rcar_pcie_ops, pcie, &res, &pcie->msi.chip);
|
|
||||||
else
|
|
||||||
bus = pci_scan_root_bus(dev, pcie->root_bus_nr,
|
|
||||||
&rcar_pcie_ops, pcie, &res);
|
|
||||||
|
|
||||||
if (!bus) {
|
ret = pci_scan_root_bus_bridge(bridge);
|
||||||
dev_err(dev, "Scanning rootbus failed");
|
if (ret < 0) {
|
||||||
return -ENODEV;
|
kfree(bridge);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
|
bus = bridge->bus;
|
||||||
|
|
||||||
pci_bus_size_bridges(bus);
|
pci_bus_size_bridges(bus);
|
||||||
pci_bus_assign_resources(bus);
|
pci_bus_assign_resources(bus);
|
||||||
|
@ -1127,11 +1131,14 @@ static int rcar_pcie_probe(struct platform_device *pdev)
|
||||||
unsigned int data;
|
unsigned int data;
|
||||||
int err;
|
int err;
|
||||||
int (*hw_init_fn)(struct rcar_pcie *);
|
int (*hw_init_fn)(struct rcar_pcie *);
|
||||||
|
struct pci_host_bridge *bridge;
|
||||||
|
|
||||||
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
|
bridge = pci_alloc_host_bridge(sizeof(*pcie));
|
||||||
if (!pcie)
|
if (!bridge)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
pcie = pci_host_bridge_priv(bridge);
|
||||||
|
|
||||||
pcie->dev = dev;
|
pcie->dev = dev;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&pcie->resources);
|
INIT_LIST_HEAD(&pcie->resources);
|
||||||
|
@ -1141,12 +1148,12 @@ static int rcar_pcie_probe(struct platform_device *pdev)
|
||||||
err = rcar_pcie_get_resources(pcie);
|
err = rcar_pcie_get_resources(pcie);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
dev_err(dev, "failed to request resources: %d\n", err);
|
dev_err(dev, "failed to request resources: %d\n", err);
|
||||||
return err;
|
goto err_free_bridge;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node);
|
err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
goto err_free_bridge;
|
||||||
|
|
||||||
pm_runtime_enable(dev);
|
pm_runtime_enable(dev);
|
||||||
err = pm_runtime_get_sync(dev);
|
err = pm_runtime_get_sync(dev);
|
||||||
|
@ -1183,6 +1190,9 @@ static int rcar_pcie_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
err_free_bridge:
|
||||||
|
pci_free_host_bridge(bridge);
|
||||||
|
|
||||||
err_pm_put:
|
err_pm_put:
|
||||||
pm_runtime_put(dev);
|
pm_runtime_put(dev);
|
||||||
|
|
||||||
|
|
|
@ -139,6 +139,7 @@
|
||||||
PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | \
|
PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | \
|
||||||
PCIE_CORE_INT_MMVC)
|
PCIE_CORE_INT_MMVC)
|
||||||
|
|
||||||
|
#define PCIE_RC_CONFIG_NORMAL_BASE 0x800000
|
||||||
#define PCIE_RC_CONFIG_BASE 0xa00000
|
#define PCIE_RC_CONFIG_BASE 0xa00000
|
||||||
#define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08)
|
#define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08)
|
||||||
#define PCIE_RC_CONFIG_SCC_SHIFT 16
|
#define PCIE_RC_CONFIG_SCC_SHIFT 16
|
||||||
|
@ -146,6 +147,9 @@
|
||||||
#define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18
|
#define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18
|
||||||
#define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff
|
#define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff
|
||||||
#define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26
|
#define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26
|
||||||
|
#define PCIE_RC_CONFIG_DCSR (PCIE_RC_CONFIG_BASE + 0xc8)
|
||||||
|
#define PCIE_RC_CONFIG_DCSR_MPS_MASK GENMASK(7, 5)
|
||||||
|
#define PCIE_RC_CONFIG_DCSR_MPS_256 (0x1 << 5)
|
||||||
#define PCIE_RC_CONFIG_LINK_CAP (PCIE_RC_CONFIG_BASE + 0xcc)
|
#define PCIE_RC_CONFIG_LINK_CAP (PCIE_RC_CONFIG_BASE + 0xcc)
|
||||||
#define PCIE_RC_CONFIG_LINK_CAP_L0S BIT(10)
|
#define PCIE_RC_CONFIG_LINK_CAP_L0S BIT(10)
|
||||||
#define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0)
|
#define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0)
|
||||||
|
@ -175,6 +179,8 @@
|
||||||
#define IB_ROOT_PORT_REG_SIZE_SHIFT 3
|
#define IB_ROOT_PORT_REG_SIZE_SHIFT 3
|
||||||
#define AXI_WRAPPER_IO_WRITE 0x6
|
#define AXI_WRAPPER_IO_WRITE 0x6
|
||||||
#define AXI_WRAPPER_MEM_WRITE 0x2
|
#define AXI_WRAPPER_MEM_WRITE 0x2
|
||||||
|
#define AXI_WRAPPER_TYPE0_CFG 0xa
|
||||||
|
#define AXI_WRAPPER_TYPE1_CFG 0xb
|
||||||
#define AXI_WRAPPER_NOR_MSG 0xc
|
#define AXI_WRAPPER_NOR_MSG 0xc
|
||||||
|
|
||||||
#define MAX_AXI_IB_ROOTPORT_REGION_NUM 3
|
#define MAX_AXI_IB_ROOTPORT_REGION_NUM 3
|
||||||
|
@ -198,6 +204,7 @@
|
||||||
#define RC_REGION_0_ADDR_TRANS_H 0x00000000
|
#define RC_REGION_0_ADDR_TRANS_H 0x00000000
|
||||||
#define RC_REGION_0_ADDR_TRANS_L 0x00000000
|
#define RC_REGION_0_ADDR_TRANS_L 0x00000000
|
||||||
#define RC_REGION_0_PASS_BITS (25 - 1)
|
#define RC_REGION_0_PASS_BITS (25 - 1)
|
||||||
|
#define RC_REGION_0_TYPE_MASK GENMASK(3, 0)
|
||||||
#define MAX_AXI_WRAPPER_REGION_NUM 33
|
#define MAX_AXI_WRAPPER_REGION_NUM 33
|
||||||
|
|
||||||
struct rockchip_pcie {
|
struct rockchip_pcie {
|
||||||
|
@ -295,7 +302,9 @@ static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip,
|
||||||
static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip,
|
static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip,
|
||||||
int where, int size, u32 *val)
|
int where, int size, u32 *val)
|
||||||
{
|
{
|
||||||
void __iomem *addr = rockchip->apb_base + PCIE_RC_CONFIG_BASE + where;
|
void __iomem *addr;
|
||||||
|
|
||||||
|
addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + where;
|
||||||
|
|
||||||
if (!IS_ALIGNED((uintptr_t)addr, size)) {
|
if (!IS_ALIGNED((uintptr_t)addr, size)) {
|
||||||
*val = 0;
|
*val = 0;
|
||||||
|
@ -319,11 +328,13 @@ static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip,
|
||||||
int where, int size, u32 val)
|
int where, int size, u32 val)
|
||||||
{
|
{
|
||||||
u32 mask, tmp, offset;
|
u32 mask, tmp, offset;
|
||||||
|
void __iomem *addr;
|
||||||
|
|
||||||
offset = where & ~0x3;
|
offset = where & ~0x3;
|
||||||
|
addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + offset;
|
||||||
|
|
||||||
if (size == 4) {
|
if (size == 4) {
|
||||||
writel(val, rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset);
|
writel(val, addr);
|
||||||
return PCIBIOS_SUCCESSFUL;
|
return PCIBIOS_SUCCESSFUL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -334,13 +345,33 @@ static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip,
|
||||||
* corrupt RW1C bits in adjacent registers. But the hardware
|
* corrupt RW1C bits in adjacent registers. But the hardware
|
||||||
* doesn't support smaller writes.
|
* doesn't support smaller writes.
|
||||||
*/
|
*/
|
||||||
tmp = readl(rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset) & mask;
|
tmp = readl(addr) & mask;
|
||||||
tmp |= val << ((where & 0x3) * 8);
|
tmp |= val << ((where & 0x3) * 8);
|
||||||
writel(tmp, rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset);
|
writel(tmp, addr);
|
||||||
|
|
||||||
return PCIBIOS_SUCCESSFUL;
|
return PCIBIOS_SUCCESSFUL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void rockchip_pcie_cfg_configuration_accesses(
|
||||||
|
struct rockchip_pcie *rockchip, u32 type)
|
||||||
|
{
|
||||||
|
u32 ob_desc_0;
|
||||||
|
|
||||||
|
/* Configuration Accesses for region 0 */
|
||||||
|
rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF);
|
||||||
|
|
||||||
|
rockchip_pcie_write(rockchip,
|
||||||
|
(RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS),
|
||||||
|
PCIE_CORE_OB_REGION_ADDR0);
|
||||||
|
rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H,
|
||||||
|
PCIE_CORE_OB_REGION_ADDR1);
|
||||||
|
ob_desc_0 = rockchip_pcie_read(rockchip, PCIE_CORE_OB_REGION_DESC0);
|
||||||
|
ob_desc_0 &= ~(RC_REGION_0_TYPE_MASK);
|
||||||
|
ob_desc_0 |= (type | (0x1 << 23));
|
||||||
|
rockchip_pcie_write(rockchip, ob_desc_0, PCIE_CORE_OB_REGION_DESC0);
|
||||||
|
rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1);
|
||||||
|
}
|
||||||
|
|
||||||
static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip,
|
static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip,
|
||||||
struct pci_bus *bus, u32 devfn,
|
struct pci_bus *bus, u32 devfn,
|
||||||
int where, int size, u32 *val)
|
int where, int size, u32 *val)
|
||||||
|
@ -355,6 +386,13 @@ static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip,
|
||||||
return PCIBIOS_BAD_REGISTER_NUMBER;
|
return PCIBIOS_BAD_REGISTER_NUMBER;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (bus->parent->number == rockchip->root_bus_nr)
|
||||||
|
rockchip_pcie_cfg_configuration_accesses(rockchip,
|
||||||
|
AXI_WRAPPER_TYPE0_CFG);
|
||||||
|
else
|
||||||
|
rockchip_pcie_cfg_configuration_accesses(rockchip,
|
||||||
|
AXI_WRAPPER_TYPE1_CFG);
|
||||||
|
|
||||||
if (size == 4) {
|
if (size == 4) {
|
||||||
*val = readl(rockchip->reg_base + busdev);
|
*val = readl(rockchip->reg_base + busdev);
|
||||||
} else if (size == 2) {
|
} else if (size == 2) {
|
||||||
|
@ -379,6 +417,13 @@ static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip,
|
||||||
if (!IS_ALIGNED(busdev, size))
|
if (!IS_ALIGNED(busdev, size))
|
||||||
return PCIBIOS_BAD_REGISTER_NUMBER;
|
return PCIBIOS_BAD_REGISTER_NUMBER;
|
||||||
|
|
||||||
|
if (bus->parent->number == rockchip->root_bus_nr)
|
||||||
|
rockchip_pcie_cfg_configuration_accesses(rockchip,
|
||||||
|
AXI_WRAPPER_TYPE0_CFG);
|
||||||
|
else
|
||||||
|
rockchip_pcie_cfg_configuration_accesses(rockchip,
|
||||||
|
AXI_WRAPPER_TYPE1_CFG);
|
||||||
|
|
||||||
if (size == 4)
|
if (size == 4)
|
||||||
writel(val, rockchip->reg_base + busdev);
|
writel(val, rockchip->reg_base + busdev);
|
||||||
else if (size == 2)
|
else if (size == 2)
|
||||||
|
@ -664,15 +709,10 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
|
||||||
rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP);
|
rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP);
|
||||||
}
|
}
|
||||||
|
|
||||||
rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF);
|
status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR);
|
||||||
|
status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK;
|
||||||
rockchip_pcie_write(rockchip,
|
status |= PCIE_RC_CONFIG_DCSR_MPS_256;
|
||||||
(RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS),
|
rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR);
|
||||||
PCIE_CORE_OB_REGION_ADDR0);
|
|
||||||
rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H,
|
|
||||||
PCIE_CORE_OB_REGION_ADDR1);
|
|
||||||
rockchip_pcie_write(rockchip, 0x0080000a, PCIE_CORE_OB_REGION_DESC0);
|
|
||||||
rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1156,13 +1196,16 @@ static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int rockchip_cfg_atu(struct rockchip_pcie *rockchip)
|
static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip)
|
||||||
{
|
{
|
||||||
struct device *dev = rockchip->dev;
|
struct device *dev = rockchip->dev;
|
||||||
int offset;
|
int offset;
|
||||||
int err;
|
int err;
|
||||||
int reg_no;
|
int reg_no;
|
||||||
|
|
||||||
|
rockchip_pcie_cfg_configuration_accesses(rockchip,
|
||||||
|
AXI_WRAPPER_TYPE0_CFG);
|
||||||
|
|
||||||
for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) {
|
for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) {
|
||||||
err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1,
|
err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1,
|
||||||
AXI_WRAPPER_MEM_WRITE,
|
AXI_WRAPPER_MEM_WRITE,
|
||||||
|
@ -1251,6 +1294,9 @@ static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev)
|
||||||
clk_disable_unprepare(rockchip->aclk_perf_pcie);
|
clk_disable_unprepare(rockchip->aclk_perf_pcie);
|
||||||
clk_disable_unprepare(rockchip->aclk_pcie);
|
clk_disable_unprepare(rockchip->aclk_pcie);
|
||||||
|
|
||||||
|
if (!IS_ERR(rockchip->vpcie0v9))
|
||||||
|
regulator_disable(rockchip->vpcie0v9);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1259,24 +1305,54 @@ static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev)
|
||||||
struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
|
struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
clk_prepare_enable(rockchip->clk_pcie_pm);
|
if (!IS_ERR(rockchip->vpcie0v9)) {
|
||||||
clk_prepare_enable(rockchip->hclk_pcie);
|
err = regulator_enable(rockchip->vpcie0v9);
|
||||||
clk_prepare_enable(rockchip->aclk_perf_pcie);
|
if (err) {
|
||||||
clk_prepare_enable(rockchip->aclk_pcie);
|
dev_err(dev, "fail to enable vpcie0v9 regulator\n");
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = clk_prepare_enable(rockchip->clk_pcie_pm);
|
||||||
|
if (err)
|
||||||
|
goto err_pcie_pm;
|
||||||
|
|
||||||
|
err = clk_prepare_enable(rockchip->hclk_pcie);
|
||||||
|
if (err)
|
||||||
|
goto err_hclk_pcie;
|
||||||
|
|
||||||
|
err = clk_prepare_enable(rockchip->aclk_perf_pcie);
|
||||||
|
if (err)
|
||||||
|
goto err_aclk_perf_pcie;
|
||||||
|
|
||||||
|
err = clk_prepare_enable(rockchip->aclk_pcie);
|
||||||
|
if (err)
|
||||||
|
goto err_aclk_pcie;
|
||||||
|
|
||||||
err = rockchip_pcie_init_port(rockchip);
|
err = rockchip_pcie_init_port(rockchip);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
goto err_pcie_resume;
|
||||||
|
|
||||||
err = rockchip_cfg_atu(rockchip);
|
err = rockchip_pcie_cfg_atu(rockchip);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
goto err_pcie_resume;
|
||||||
|
|
||||||
/* Need this to enter L1 again */
|
/* Need this to enter L1 again */
|
||||||
rockchip_pcie_update_txcredit_mui(rockchip);
|
rockchip_pcie_update_txcredit_mui(rockchip);
|
||||||
rockchip_pcie_enable_interrupts(rockchip);
|
rockchip_pcie_enable_interrupts(rockchip);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
err_pcie_resume:
|
||||||
|
clk_disable_unprepare(rockchip->aclk_pcie);
|
||||||
|
err_aclk_pcie:
|
||||||
|
clk_disable_unprepare(rockchip->aclk_perf_pcie);
|
||||||
|
err_aclk_perf_pcie:
|
||||||
|
clk_disable_unprepare(rockchip->hclk_pcie);
|
||||||
|
err_hclk_pcie:
|
||||||
|
clk_disable_unprepare(rockchip->clk_pcie_pm);
|
||||||
|
err_pcie_pm:
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int rockchip_pcie_probe(struct platform_device *pdev)
|
static int rockchip_pcie_probe(struct platform_device *pdev)
|
||||||
|
@ -1284,6 +1360,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
|
||||||
struct rockchip_pcie *rockchip;
|
struct rockchip_pcie *rockchip;
|
||||||
struct device *dev = &pdev->dev;
|
struct device *dev = &pdev->dev;
|
||||||
struct pci_bus *bus, *child;
|
struct pci_bus *bus, *child;
|
||||||
|
struct pci_host_bridge *bridge;
|
||||||
struct resource_entry *win;
|
struct resource_entry *win;
|
||||||
resource_size_t io_base;
|
resource_size_t io_base;
|
||||||
struct resource *mem;
|
struct resource *mem;
|
||||||
|
@ -1295,10 +1372,12 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
|
||||||
if (!dev->of_node)
|
if (!dev->of_node)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
rockchip = devm_kzalloc(dev, sizeof(*rockchip), GFP_KERNEL);
|
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rockchip));
|
||||||
if (!rockchip)
|
if (!bridge)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
rockchip = pci_host_bridge_priv(bridge);
|
||||||
|
|
||||||
platform_set_drvdata(pdev, rockchip);
|
platform_set_drvdata(pdev, rockchip);
|
||||||
rockchip->dev = dev;
|
rockchip->dev = dev;
|
||||||
|
|
||||||
|
@ -1385,22 +1464,30 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = rockchip_cfg_atu(rockchip);
|
err = rockchip_pcie_cfg_atu(rockchip);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_free_res;
|
goto err_free_res;
|
||||||
|
|
||||||
rockchip->msg_region = devm_ioremap(rockchip->dev,
|
rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M);
|
||||||
rockchip->msg_bus_addr, SZ_1M);
|
|
||||||
if (!rockchip->msg_region) {
|
if (!rockchip->msg_region) {
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
goto err_free_res;
|
goto err_free_res;
|
||||||
}
|
}
|
||||||
|
|
||||||
bus = pci_scan_root_bus(&pdev->dev, 0, &rockchip_pcie_ops, rockchip, &res);
|
list_splice_init(&res, &bridge->windows);
|
||||||
if (!bus) {
|
bridge->dev.parent = dev;
|
||||||
err = -ENOMEM;
|
bridge->sysdata = rockchip;
|
||||||
|
bridge->busnr = 0;
|
||||||
|
bridge->ops = &rockchip_pcie_ops;
|
||||||
|
bridge->map_irq = of_irq_parse_and_map_pci;
|
||||||
|
bridge->swizzle_irq = pci_common_swizzle;
|
||||||
|
|
||||||
|
err = pci_scan_root_bus_bridge(bridge);
|
||||||
|
if (!err)
|
||||||
goto err_free_res;
|
goto err_free_res;
|
||||||
}
|
|
||||||
|
bus = bridge->bus;
|
||||||
|
|
||||||
rockchip->root_bus = bus;
|
rockchip->root_bus = bus;
|
||||||
|
|
||||||
pci_bus_size_bridges(bus);
|
pci_bus_size_bridges(bus);
|
||||||
|
|
|
@ -0,0 +1,141 @@
|
||||||
|
#include <linux/pci-ecam.h>
|
||||||
|
#include <linux/delay.h>
|
||||||
|
#include <linux/of.h>
|
||||||
|
|
||||||
|
#define SMP8759_MUX 0x48
|
||||||
|
#define SMP8759_TEST_OUT 0x74
|
||||||
|
|
||||||
|
struct tango_pcie {
|
||||||
|
void __iomem *base;
|
||||||
|
};
|
||||||
|
|
||||||
|
static int smp8759_config_read(struct pci_bus *bus, unsigned int devfn,
|
||||||
|
int where, int size, u32 *val)
|
||||||
|
{
|
||||||
|
struct pci_config_window *cfg = bus->sysdata;
|
||||||
|
struct tango_pcie *pcie = dev_get_drvdata(cfg->parent);
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
/* Reads in configuration space outside devfn 0 return garbage */
|
||||||
|
if (devfn != 0)
|
||||||
|
return PCIBIOS_FUNC_NOT_SUPPORTED;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* PCI config and MMIO accesses are muxed. Linux doesn't have a
|
||||||
|
* mutual exclusion mechanism for config vs. MMIO accesses, so
|
||||||
|
* concurrent accesses may cause corruption.
|
||||||
|
*/
|
||||||
|
writel_relaxed(1, pcie->base + SMP8759_MUX);
|
||||||
|
ret = pci_generic_config_read(bus, devfn, where, size, val);
|
||||||
|
writel_relaxed(0, pcie->base + SMP8759_MUX);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int smp8759_config_write(struct pci_bus *bus, unsigned int devfn,
|
||||||
|
int where, int size, u32 val)
|
||||||
|
{
|
||||||
|
struct pci_config_window *cfg = bus->sysdata;
|
||||||
|
struct tango_pcie *pcie = dev_get_drvdata(cfg->parent);
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
writel_relaxed(1, pcie->base + SMP8759_MUX);
|
||||||
|
ret = pci_generic_config_write(bus, devfn, where, size, val);
|
||||||
|
writel_relaxed(0, pcie->base + SMP8759_MUX);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct pci_ecam_ops smp8759_ecam_ops = {
|
||||||
|
.bus_shift = 20,
|
||||||
|
.pci_ops = {
|
||||||
|
.map_bus = pci_ecam_map_bus,
|
||||||
|
.read = smp8759_config_read,
|
||||||
|
.write = smp8759_config_write,
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
static int tango_pcie_link_up(struct tango_pcie *pcie)
|
||||||
|
{
|
||||||
|
void __iomem *test_out = pcie->base + SMP8759_TEST_OUT;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
writel_relaxed(16, test_out);
|
||||||
|
for (i = 0; i < 10; ++i) {
|
||||||
|
u32 ltssm_state = readl_relaxed(test_out) >> 8;
|
||||||
|
if ((ltssm_state & 0x1f) == 0xf) /* L0 */
|
||||||
|
return 1;
|
||||||
|
usleep_range(3000, 4000);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int tango_pcie_probe(struct platform_device *pdev)
|
||||||
|
{
|
||||||
|
struct device *dev = &pdev->dev;
|
||||||
|
struct tango_pcie *pcie;
|
||||||
|
struct resource *res;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
dev_warn(dev, "simultaneous PCI config and MMIO accesses may cause data corruption\n");
|
||||||
|
add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
|
||||||
|
|
||||||
|
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
|
||||||
|
if (!pcie)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
||||||
|
pcie->base = devm_ioremap_resource(dev, res);
|
||||||
|
if (IS_ERR(pcie->base))
|
||||||
|
return PTR_ERR(pcie->base);
|
||||||
|
|
||||||
|
platform_set_drvdata(pdev, pcie);
|
||||||
|
|
||||||
|
if (!tango_pcie_link_up(pcie))
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
return pci_host_common_probe(pdev, &smp8759_ecam_ops);
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct of_device_id tango_pcie_ids[] = {
|
||||||
|
{ .compatible = "sigma,smp8759-pcie" },
|
||||||
|
{ },
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct platform_driver tango_pcie_driver = {
|
||||||
|
.probe = tango_pcie_probe,
|
||||||
|
.driver = {
|
||||||
|
.name = KBUILD_MODNAME,
|
||||||
|
.of_match_table = tango_pcie_ids,
|
||||||
|
.suppress_bind_attrs = true,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
builtin_platform_driver(tango_pcie_driver);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The root complex advertises the wrong device class.
|
||||||
|
* Header Type 1 is for PCI-to-PCI bridges.
|
||||||
|
*/
|
||||||
|
static void tango_fixup_class(struct pci_dev *dev)
|
||||||
|
{
|
||||||
|
dev->class = PCI_CLASS_BRIDGE_PCI << 8;
|
||||||
|
}
|
||||||
|
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0024, tango_fixup_class);
|
||||||
|
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0028, tango_fixup_class);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The root complex exposes a "fake" BAR, which is used to filter
|
||||||
|
* bus-to-system accesses. Only accesses within the range defined by this
|
||||||
|
* BAR are forwarded to the host, others are ignored.
|
||||||
|
*
|
||||||
|
* By default, the DMA framework expects an identity mapping, and DRAM0 is
|
||||||
|
* mapped at 0x80000000.
|
||||||
|
*/
|
||||||
|
static void tango_fixup_bar(struct pci_dev *dev)
|
||||||
|
{
|
||||||
|
dev->non_compliant_bars = true;
|
||||||
|
pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, 0x80000000);
|
||||||
|
}
|
||||||
|
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0024, tango_fixup_bar);
|
||||||
|
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0028, tango_fixup_bar);
|
|
@ -172,6 +172,7 @@ struct nwl_pcie {
|
||||||
u8 root_busno;
|
u8 root_busno;
|
||||||
struct nwl_msi msi;
|
struct nwl_msi msi;
|
||||||
struct irq_domain *legacy_irq_domain;
|
struct irq_domain *legacy_irq_domain;
|
||||||
|
raw_spinlock_t leg_mask_lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline u32 nwl_bridge_readl(struct nwl_pcie *pcie, u32 off)
|
static inline u32 nwl_bridge_readl(struct nwl_pcie *pcie, u32 off)
|
||||||
|
@ -383,11 +384,52 @@ static void nwl_pcie_msi_handler_low(struct irq_desc *desc)
|
||||||
chained_irq_exit(chip, desc);
|
chained_irq_exit(chip, desc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void nwl_mask_leg_irq(struct irq_data *data)
|
||||||
|
{
|
||||||
|
struct irq_desc *desc = irq_to_desc(data->irq);
|
||||||
|
struct nwl_pcie *pcie;
|
||||||
|
unsigned long flags;
|
||||||
|
u32 mask;
|
||||||
|
u32 val;
|
||||||
|
|
||||||
|
pcie = irq_desc_get_chip_data(desc);
|
||||||
|
mask = 1 << (data->hwirq - 1);
|
||||||
|
raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
|
||||||
|
val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
|
||||||
|
nwl_bridge_writel(pcie, (val & (~mask)), MSGF_LEG_MASK);
|
||||||
|
raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void nwl_unmask_leg_irq(struct irq_data *data)
|
||||||
|
{
|
||||||
|
struct irq_desc *desc = irq_to_desc(data->irq);
|
||||||
|
struct nwl_pcie *pcie;
|
||||||
|
unsigned long flags;
|
||||||
|
u32 mask;
|
||||||
|
u32 val;
|
||||||
|
|
||||||
|
pcie = irq_desc_get_chip_data(desc);
|
||||||
|
mask = 1 << (data->hwirq - 1);
|
||||||
|
raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
|
||||||
|
val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
|
||||||
|
nwl_bridge_writel(pcie, (val | mask), MSGF_LEG_MASK);
|
||||||
|
raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct irq_chip nwl_leg_irq_chip = {
|
||||||
|
.name = "nwl_pcie:legacy",
|
||||||
|
.irq_enable = nwl_unmask_leg_irq,
|
||||||
|
.irq_disable = nwl_mask_leg_irq,
|
||||||
|
.irq_mask = nwl_mask_leg_irq,
|
||||||
|
.irq_unmask = nwl_unmask_leg_irq,
|
||||||
|
};
|
||||||
|
|
||||||
static int nwl_legacy_map(struct irq_domain *domain, unsigned int irq,
|
static int nwl_legacy_map(struct irq_domain *domain, unsigned int irq,
|
||||||
irq_hw_number_t hwirq)
|
irq_hw_number_t hwirq)
|
||||||
{
|
{
|
||||||
irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
|
irq_set_chip_and_handler(irq, &nwl_leg_irq_chip, handle_level_irq);
|
||||||
irq_set_chip_data(irq, domain->host_data);
|
irq_set_chip_data(irq, domain->host_data);
|
||||||
|
irq_set_status_flags(irq, IRQ_LEVEL);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -526,11 +568,12 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
raw_spin_lock_init(&pcie->leg_mask_lock);
|
||||||
nwl_pcie_init_msi_irq_domain(pcie);
|
nwl_pcie_init_msi_irq_domain(pcie);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nwl_pcie_enable_msi(struct nwl_pcie *pcie, struct pci_bus *bus)
|
static int nwl_pcie_enable_msi(struct nwl_pcie *pcie)
|
||||||
{
|
{
|
||||||
struct device *dev = pcie->dev;
|
struct device *dev = pcie->dev;
|
||||||
struct platform_device *pdev = to_platform_device(dev);
|
struct platform_device *pdev = to_platform_device(dev);
|
||||||
|
@ -791,13 +834,16 @@ static int nwl_pcie_probe(struct platform_device *pdev)
|
||||||
struct nwl_pcie *pcie;
|
struct nwl_pcie *pcie;
|
||||||
struct pci_bus *bus;
|
struct pci_bus *bus;
|
||||||
struct pci_bus *child;
|
struct pci_bus *child;
|
||||||
|
struct pci_host_bridge *bridge;
|
||||||
int err;
|
int err;
|
||||||
resource_size_t iobase = 0;
|
resource_size_t iobase = 0;
|
||||||
LIST_HEAD(res);
|
LIST_HEAD(res);
|
||||||
|
|
||||||
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
|
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
|
||||||
if (!pcie)
|
if (!bridge)
|
||||||
return -ENOMEM;
|
return -ENODEV;
|
||||||
|
|
||||||
|
pcie = pci_host_bridge_priv(bridge);
|
||||||
|
|
||||||
pcie->dev = dev;
|
pcie->dev = dev;
|
||||||
pcie->ecam_value = NWL_ECAM_VALUE_DEFAULT;
|
pcie->ecam_value = NWL_ECAM_VALUE_DEFAULT;
|
||||||
|
@ -830,21 +876,28 @@ static int nwl_pcie_probe(struct platform_device *pdev)
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
|
||||||
bus = pci_create_root_bus(dev, pcie->root_busno,
|
list_splice_init(&res, &bridge->windows);
|
||||||
&nwl_pcie_ops, pcie, &res);
|
bridge->dev.parent = dev;
|
||||||
if (!bus) {
|
bridge->sysdata = pcie;
|
||||||
err = -ENOMEM;
|
bridge->busnr = pcie->root_busno;
|
||||||
goto error;
|
bridge->ops = &nwl_pcie_ops;
|
||||||
}
|
bridge->map_irq = of_irq_parse_and_map_pci;
|
||||||
|
bridge->swizzle_irq = pci_common_swizzle;
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
||||||
err = nwl_pcie_enable_msi(pcie, bus);
|
err = nwl_pcie_enable_msi(pcie);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
dev_err(dev, "failed to enable MSI support: %d\n", err);
|
dev_err(dev, "failed to enable MSI support: %d\n", err);
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pci_scan_child_bus(bus);
|
|
||||||
|
err = pci_scan_root_bus_bridge(bridge);
|
||||||
|
if (err)
|
||||||
|
goto error;
|
||||||
|
|
||||||
|
bus = bridge->bus;
|
||||||
|
|
||||||
pci_assign_unassigned_bus_resources(bus);
|
pci_assign_unassigned_bus_resources(bus);
|
||||||
list_for_each_entry(child, &bus->children, node)
|
list_for_each_entry(child, &bus->children, node)
|
||||||
pcie_bus_configure_settings(child);
|
pcie_bus_configure_settings(child);
|
||||||
|
|
|
@ -633,6 +633,7 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
|
||||||
struct device *dev = &pdev->dev;
|
struct device *dev = &pdev->dev;
|
||||||
struct xilinx_pcie_port *port;
|
struct xilinx_pcie_port *port;
|
||||||
struct pci_bus *bus, *child;
|
struct pci_bus *bus, *child;
|
||||||
|
struct pci_host_bridge *bridge;
|
||||||
int err;
|
int err;
|
||||||
resource_size_t iobase = 0;
|
resource_size_t iobase = 0;
|
||||||
LIST_HEAD(res);
|
LIST_HEAD(res);
|
||||||
|
@ -640,9 +641,11 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
|
||||||
if (!dev->of_node)
|
if (!dev->of_node)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
|
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
|
||||||
if (!port)
|
if (!bridge)
|
||||||
return -ENOMEM;
|
return -ENODEV;
|
||||||
|
|
||||||
|
port = pci_host_bridge_priv(bridge);
|
||||||
|
|
||||||
port->dev = dev;
|
port->dev = dev;
|
||||||
|
|
||||||
|
@ -671,21 +674,26 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
|
||||||
if (err)
|
if (err)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
bus = pci_create_root_bus(dev, 0, &xilinx_pcie_ops, port, &res);
|
|
||||||
if (!bus) {
|
list_splice_init(&res, &bridge->windows);
|
||||||
err = -ENOMEM;
|
bridge->dev.parent = dev;
|
||||||
goto error;
|
bridge->sysdata = port;
|
||||||
}
|
bridge->busnr = 0;
|
||||||
|
bridge->ops = &xilinx_pcie_ops;
|
||||||
|
bridge->map_irq = of_irq_parse_and_map_pci;
|
||||||
|
bridge->swizzle_irq = pci_common_swizzle;
|
||||||
|
|
||||||
#ifdef CONFIG_PCI_MSI
|
#ifdef CONFIG_PCI_MSI
|
||||||
xilinx_pcie_msi_chip.dev = dev;
|
xilinx_pcie_msi_chip.dev = dev;
|
||||||
bus->msi = &xilinx_pcie_msi_chip;
|
bridge->msi = &xilinx_pcie_msi_chip;
|
||||||
#endif
|
#endif
|
||||||
pci_scan_child_bus(bus);
|
err = pci_scan_root_bus_bridge(bridge);
|
||||||
|
if (err < 0)
|
||||||
|
goto error;
|
||||||
|
|
||||||
|
bus = bridge->bus;
|
||||||
|
|
||||||
pci_assign_unassigned_bus_resources(bus);
|
pci_assign_unassigned_bus_resources(bus);
|
||||||
#ifndef CONFIG_MICROBLAZE
|
|
||||||
pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
|
|
||||||
#endif
|
|
||||||
list_for_each_entry(child, &bus->children, node)
|
list_for_each_entry(child, &bus->children, node)
|
||||||
pcie_bus_configure_settings(child);
|
pcie_bus_configure_settings(child);
|
||||||
pci_bus_add_devices(bus);
|
pci_bus_add_devices(bus);
|
||||||
|
@ -696,7 +704,7 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct of_device_id xilinx_pcie_of_match[] = {
|
static const struct of_device_id xilinx_pcie_of_match[] = {
|
||||||
{ .compatible = "xlnx,axi-pcie-host-1.00.a", },
|
{ .compatible = "xlnx,axi-pcie-host-1.00.a", },
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
|
@ -539,7 +539,10 @@ static void vmd_detach_resources(struct vmd_dev *vmd)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* VMD domains start at 0x1000 to not clash with ACPI _SEG domains.
|
* VMD domains start at 0x10000 to not clash with ACPI _SEG domains.
|
||||||
|
* Per ACPI r6.0, sec 6.5.6, _SEG returns an integer, of which the lower
|
||||||
|
* 16 bits are the PCI Segment Group (domain) number. Other bits are
|
||||||
|
* currently reserved.
|
||||||
*/
|
*/
|
||||||
static int vmd_find_free_domain(void)
|
static int vmd_find_free_domain(void)
|
||||||
{
|
{
|
||||||
|
@ -710,7 +713,8 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
||||||
|
|
||||||
INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
|
INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
|
||||||
err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i),
|
err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i),
|
||||||
vmd_irq, 0, "vmd", &vmd->irqs[i]);
|
vmd_irq, IRQF_NO_THREAD,
|
||||||
|
"vmd", &vmd->irqs[i]);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -739,10 +743,10 @@ static void vmd_remove(struct pci_dev *dev)
|
||||||
struct vmd_dev *vmd = pci_get_drvdata(dev);
|
struct vmd_dev *vmd = pci_get_drvdata(dev);
|
||||||
|
|
||||||
vmd_detach_resources(vmd);
|
vmd_detach_resources(vmd);
|
||||||
vmd_cleanup_srcu(vmd);
|
|
||||||
sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
|
sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
|
||||||
pci_stop_root_bus(vmd->bus);
|
pci_stop_root_bus(vmd->bus);
|
||||||
pci_remove_root_bus(vmd->bus);
|
pci_remove_root_bus(vmd->bus);
|
||||||
|
vmd_cleanup_srcu(vmd);
|
||||||
vmd_teardown_dma_ops(vmd);
|
vmd_teardown_dma_ops(vmd);
|
||||||
irq_domain_remove(vmd->irq_domain);
|
irq_domain_remove(vmd->irq_domain);
|
||||||
}
|
}
|
||||||
|
|
|
@ -461,8 +461,6 @@ static int sriov_init(struct pci_dev *dev, int pos)
|
||||||
else
|
else
|
||||||
iov->dev = dev;
|
iov->dev = dev;
|
||||||
|
|
||||||
mutex_init(&iov->lock);
|
|
||||||
|
|
||||||
dev->sriov = iov;
|
dev->sriov = iov;
|
||||||
dev->is_physfn = 1;
|
dev->is_physfn = 1;
|
||||||
rc = compute_max_vf_buses(dev);
|
rc = compute_max_vf_buses(dev);
|
||||||
|
@ -491,8 +489,6 @@ static void sriov_release(struct pci_dev *dev)
|
||||||
if (dev != dev->sriov->dev)
|
if (dev != dev->sriov->dev)
|
||||||
pci_dev_put(dev->sriov->dev);
|
pci_dev_put(dev->sriov->dev);
|
||||||
|
|
||||||
mutex_destroy(&dev->sriov->lock);
|
|
||||||
|
|
||||||
kfree(dev->sriov);
|
kfree(dev->sriov);
|
||||||
dev->sriov = NULL;
|
dev->sriov = NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1058,7 +1058,7 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
if (affd) {
|
if (affd) {
|
||||||
nvec = irq_calc_affinity_vectors(nvec, affd);
|
nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
|
||||||
if (nvec < minvec)
|
if (nvec < minvec)
|
||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
}
|
}
|
||||||
|
@ -1097,7 +1097,7 @@ static int __pci_enable_msix_range(struct pci_dev *dev,
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
if (affd) {
|
if (affd) {
|
||||||
nvec = irq_calc_affinity_vectors(nvec, affd);
|
nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
|
||||||
if (nvec < minvec)
|
if (nvec < minvec)
|
||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
}
|
}
|
||||||
|
@ -1165,16 +1165,6 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
|
||||||
if (flags & PCI_IRQ_AFFINITY) {
|
if (flags & PCI_IRQ_AFFINITY) {
|
||||||
if (!affd)
|
if (!affd)
|
||||||
affd = &msi_default_affd;
|
affd = &msi_default_affd;
|
||||||
|
|
||||||
if (affd->pre_vectors + affd->post_vectors > min_vecs)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If there aren't any vectors left after applying the pre/post
|
|
||||||
* vectors don't bother with assigning affinity.
|
|
||||||
*/
|
|
||||||
if (affd->pre_vectors + affd->post_vectors == min_vecs)
|
|
||||||
affd = NULL;
|
|
||||||
} else {
|
} else {
|
||||||
if (WARN_ON(affd))
|
if (WARN_ON(affd))
|
||||||
affd = NULL;
|
affd = NULL;
|
||||||
|
|
|
@ -415,6 +415,8 @@ static int pci_device_probe(struct device *dev)
|
||||||
struct pci_dev *pci_dev = to_pci_dev(dev);
|
struct pci_dev *pci_dev = to_pci_dev(dev);
|
||||||
struct pci_driver *drv = to_pci_driver(dev->driver);
|
struct pci_driver *drv = to_pci_driver(dev->driver);
|
||||||
|
|
||||||
|
pci_assign_irq(pci_dev);
|
||||||
|
|
||||||
error = pcibios_alloc_irq(pci_dev);
|
error = pcibios_alloc_irq(pci_dev);
|
||||||
if (error < 0)
|
if (error < 0)
|
||||||
return error;
|
return error;
|
||||||
|
@ -967,6 +969,7 @@ static int pci_pm_thaw_noirq(struct device *dev)
|
||||||
return pci_legacy_resume_early(dev);
|
return pci_legacy_resume_early(dev);
|
||||||
|
|
||||||
pci_update_current_state(pci_dev, PCI_D0);
|
pci_update_current_state(pci_dev, PCI_D0);
|
||||||
|
pci_restore_state(pci_dev);
|
||||||
|
|
||||||
if (drv && drv->pm && drv->pm->thaw_noirq)
|
if (drv && drv->pm && drv->pm->thaw_noirq)
|
||||||
error = drv->pm->thaw_noirq(dev);
|
error = drv->pm->thaw_noirq(dev);
|
||||||
|
|
|
@ -43,9 +43,11 @@ static size_t find_smbios_instance_string(struct pci_dev *pdev, char *buf,
|
||||||
{
|
{
|
||||||
const struct dmi_device *dmi;
|
const struct dmi_device *dmi;
|
||||||
struct dmi_dev_onboard *donboard;
|
struct dmi_dev_onboard *donboard;
|
||||||
|
int domain_nr;
|
||||||
int bus;
|
int bus;
|
||||||
int devfn;
|
int devfn;
|
||||||
|
|
||||||
|
domain_nr = pci_domain_nr(pdev->bus);
|
||||||
bus = pdev->bus->number;
|
bus = pdev->bus->number;
|
||||||
devfn = pdev->devfn;
|
devfn = pdev->devfn;
|
||||||
|
|
||||||
|
@ -53,8 +55,9 @@ static size_t find_smbios_instance_string(struct pci_dev *pdev, char *buf,
|
||||||
while ((dmi = dmi_find_device(DMI_DEV_TYPE_DEV_ONBOARD,
|
while ((dmi = dmi_find_device(DMI_DEV_TYPE_DEV_ONBOARD,
|
||||||
NULL, dmi)) != NULL) {
|
NULL, dmi)) != NULL) {
|
||||||
donboard = dmi->device_data;
|
donboard = dmi->device_data;
|
||||||
if (donboard && donboard->bus == bus &&
|
if (donboard && donboard->segment == domain_nr &&
|
||||||
donboard->devfn == devfn) {
|
donboard->bus == bus &&
|
||||||
|
donboard->devfn == devfn) {
|
||||||
if (buf) {
|
if (buf) {
|
||||||
if (attribute == SMBIOS_ATTR_INSTANCE_SHOW)
|
if (attribute == SMBIOS_ATTR_INSTANCE_SHOW)
|
||||||
return scnprintf(buf, PAGE_SIZE,
|
return scnprintf(buf, PAGE_SIZE,
|
||||||
|
|
|
@ -154,6 +154,129 @@ static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
|
||||||
}
|
}
|
||||||
static DEVICE_ATTR_RO(resource);
|
static DEVICE_ATTR_RO(resource);
|
||||||
|
|
||||||
|
static ssize_t max_link_speed_show(struct device *dev,
|
||||||
|
struct device_attribute *attr, char *buf)
|
||||||
|
{
|
||||||
|
struct pci_dev *pci_dev = to_pci_dev(dev);
|
||||||
|
u32 linkcap;
|
||||||
|
int err;
|
||||||
|
const char *speed;
|
||||||
|
|
||||||
|
err = pcie_capability_read_dword(pci_dev, PCI_EXP_LNKCAP, &linkcap);
|
||||||
|
if (err)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
switch (linkcap & PCI_EXP_LNKCAP_SLS) {
|
||||||
|
case PCI_EXP_LNKCAP_SLS_8_0GB:
|
||||||
|
speed = "8 GT/s";
|
||||||
|
break;
|
||||||
|
case PCI_EXP_LNKCAP_SLS_5_0GB:
|
||||||
|
speed = "5 GT/s";
|
||||||
|
break;
|
||||||
|
case PCI_EXP_LNKCAP_SLS_2_5GB:
|
||||||
|
speed = "2.5 GT/s";
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
speed = "Unknown speed";
|
||||||
|
}
|
||||||
|
|
||||||
|
return sprintf(buf, "%s\n", speed);
|
||||||
|
}
|
||||||
|
static DEVICE_ATTR_RO(max_link_speed);
|
||||||
|
|
||||||
|
static ssize_t max_link_width_show(struct device *dev,
|
||||||
|
struct device_attribute *attr, char *buf)
|
||||||
|
{
|
||||||
|
struct pci_dev *pci_dev = to_pci_dev(dev);
|
||||||
|
u32 linkcap;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = pcie_capability_read_dword(pci_dev, PCI_EXP_LNKCAP, &linkcap);
|
||||||
|
if (err)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
return sprintf(buf, "%u\n", (linkcap & PCI_EXP_LNKCAP_MLW) >> 4);
|
||||||
|
}
|
||||||
|
static DEVICE_ATTR_RO(max_link_width);
|
||||||
|
|
||||||
|
static ssize_t current_link_speed_show(struct device *dev,
|
||||||
|
struct device_attribute *attr, char *buf)
|
||||||
|
{
|
||||||
|
struct pci_dev *pci_dev = to_pci_dev(dev);
|
||||||
|
u16 linkstat;
|
||||||
|
int err;
|
||||||
|
const char *speed;
|
||||||
|
|
||||||
|
err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
|
||||||
|
if (err)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
switch (linkstat & PCI_EXP_LNKSTA_CLS) {
|
||||||
|
case PCI_EXP_LNKSTA_CLS_8_0GB:
|
||||||
|
speed = "8 GT/s";
|
||||||
|
break;
|
||||||
|
case PCI_EXP_LNKSTA_CLS_5_0GB:
|
||||||
|
speed = "5 GT/s";
|
||||||
|
break;
|
||||||
|
case PCI_EXP_LNKSTA_CLS_2_5GB:
|
||||||
|
speed = "2.5 GT/s";
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
speed = "Unknown speed";
|
||||||
|
}
|
||||||
|
|
||||||
|
return sprintf(buf, "%s\n", speed);
|
||||||
|
}
|
||||||
|
static DEVICE_ATTR_RO(current_link_speed);
|
||||||
|
|
||||||
|
static ssize_t current_link_width_show(struct device *dev,
|
||||||
|
struct device_attribute *attr, char *buf)
|
||||||
|
{
|
||||||
|
struct pci_dev *pci_dev = to_pci_dev(dev);
|
||||||
|
u16 linkstat;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
|
||||||
|
if (err)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
return sprintf(buf, "%u\n",
|
||||||
|
(linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT);
|
||||||
|
}
|
||||||
|
static DEVICE_ATTR_RO(current_link_width);
|
||||||
|
|
||||||
|
static ssize_t secondary_bus_number_show(struct device *dev,
|
||||||
|
struct device_attribute *attr,
|
||||||
|
char *buf)
|
||||||
|
{
|
||||||
|
struct pci_dev *pci_dev = to_pci_dev(dev);
|
||||||
|
u8 sec_bus;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = pci_read_config_byte(pci_dev, PCI_SECONDARY_BUS, &sec_bus);
|
||||||
|
if (err)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
return sprintf(buf, "%u\n", sec_bus);
|
||||||
|
}
|
||||||
|
static DEVICE_ATTR_RO(secondary_bus_number);
|
||||||
|
|
||||||
|
static ssize_t subordinate_bus_number_show(struct device *dev,
|
||||||
|
struct device_attribute *attr,
|
||||||
|
char *buf)
|
||||||
|
{
|
||||||
|
struct pci_dev *pci_dev = to_pci_dev(dev);
|
||||||
|
u8 sub_bus;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = pci_read_config_byte(pci_dev, PCI_SUBORDINATE_BUS, &sub_bus);
|
||||||
|
if (err)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
return sprintf(buf, "%u\n", sub_bus);
|
||||||
|
}
|
||||||
|
static DEVICE_ATTR_RO(subordinate_bus_number);
|
||||||
|
|
||||||
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
|
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
|
||||||
char *buf)
|
char *buf)
|
||||||
{
|
{
|
||||||
|
@ -472,7 +595,6 @@ static ssize_t sriov_numvfs_store(struct device *dev,
|
||||||
const char *buf, size_t count)
|
const char *buf, size_t count)
|
||||||
{
|
{
|
||||||
struct pci_dev *pdev = to_pci_dev(dev);
|
struct pci_dev *pdev = to_pci_dev(dev);
|
||||||
struct pci_sriov *iov = pdev->sriov;
|
|
||||||
int ret;
|
int ret;
|
||||||
u16 num_vfs;
|
u16 num_vfs;
|
||||||
|
|
||||||
|
@ -483,7 +605,7 @@ static ssize_t sriov_numvfs_store(struct device *dev,
|
||||||
if (num_vfs > pci_sriov_get_totalvfs(pdev))
|
if (num_vfs > pci_sriov_get_totalvfs(pdev))
|
||||||
return -ERANGE;
|
return -ERANGE;
|
||||||
|
|
||||||
mutex_lock(&iov->dev->sriov->lock);
|
device_lock(&pdev->dev);
|
||||||
|
|
||||||
if (num_vfs == pdev->sriov->num_VFs)
|
if (num_vfs == pdev->sriov->num_VFs)
|
||||||
goto exit;
|
goto exit;
|
||||||
|
@ -518,7 +640,7 @@ static ssize_t sriov_numvfs_store(struct device *dev,
|
||||||
num_vfs, ret);
|
num_vfs, ret);
|
||||||
|
|
||||||
exit:
|
exit:
|
||||||
mutex_unlock(&iov->dev->sriov->lock);
|
device_unlock(&pdev->dev);
|
||||||
|
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -629,12 +751,17 @@ static struct attribute *pci_dev_attrs[] = {
|
||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct attribute_group pci_dev_group = {
|
static struct attribute *pci_bridge_attrs[] = {
|
||||||
.attrs = pci_dev_attrs,
|
&dev_attr_subordinate_bus_number.attr,
|
||||||
|
&dev_attr_secondary_bus_number.attr,
|
||||||
|
NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
const struct attribute_group *pci_dev_groups[] = {
|
static struct attribute *pcie_dev_attrs[] = {
|
||||||
&pci_dev_group,
|
&dev_attr_current_link_speed.attr,
|
||||||
|
&dev_attr_current_link_width.attr,
|
||||||
|
&dev_attr_max_link_width.attr,
|
||||||
|
&dev_attr_max_link_speed.attr,
|
||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1557,6 +1684,57 @@ static umode_t pci_dev_hp_attrs_are_visible(struct kobject *kobj,
|
||||||
return a->mode;
|
return a->mode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static umode_t pci_bridge_attrs_are_visible(struct kobject *kobj,
|
||||||
|
struct attribute *a, int n)
|
||||||
|
{
|
||||||
|
struct device *dev = kobj_to_dev(kobj);
|
||||||
|
struct pci_dev *pdev = to_pci_dev(dev);
|
||||||
|
|
||||||
|
if (pci_is_bridge(pdev))
|
||||||
|
return a->mode;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static umode_t pcie_dev_attrs_are_visible(struct kobject *kobj,
|
||||||
|
struct attribute *a, int n)
|
||||||
|
{
|
||||||
|
struct device *dev = kobj_to_dev(kobj);
|
||||||
|
struct pci_dev *pdev = to_pci_dev(dev);
|
||||||
|
|
||||||
|
if (pci_is_pcie(pdev))
|
||||||
|
return a->mode;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct attribute_group pci_dev_group = {
|
||||||
|
.attrs = pci_dev_attrs,
|
||||||
|
};
|
||||||
|
|
||||||
|
const struct attribute_group *pci_dev_groups[] = {
|
||||||
|
&pci_dev_group,
|
||||||
|
NULL,
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct attribute_group pci_bridge_group = {
|
||||||
|
.attrs = pci_bridge_attrs,
|
||||||
|
};
|
||||||
|
|
||||||
|
const struct attribute_group *pci_bridge_groups[] = {
|
||||||
|
&pci_bridge_group,
|
||||||
|
NULL,
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct attribute_group pcie_dev_group = {
|
||||||
|
.attrs = pcie_dev_attrs,
|
||||||
|
};
|
||||||
|
|
||||||
|
const struct attribute_group *pcie_dev_groups[] = {
|
||||||
|
&pcie_dev_group,
|
||||||
|
NULL,
|
||||||
|
};
|
||||||
|
|
||||||
static struct attribute_group pci_dev_hp_attr_group = {
|
static struct attribute_group pci_dev_hp_attr_group = {
|
||||||
.attrs = pci_dev_hp_attrs,
|
.attrs = pci_dev_hp_attrs,
|
||||||
.is_visible = pci_dev_hp_attrs_are_visible,
|
.is_visible = pci_dev_hp_attrs_are_visible,
|
||||||
|
@ -1592,12 +1770,24 @@ static struct attribute_group pci_dev_attr_group = {
|
||||||
.is_visible = pci_dev_attrs_are_visible,
|
.is_visible = pci_dev_attrs_are_visible,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static struct attribute_group pci_bridge_attr_group = {
|
||||||
|
.attrs = pci_bridge_attrs,
|
||||||
|
.is_visible = pci_bridge_attrs_are_visible,
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct attribute_group pcie_dev_attr_group = {
|
||||||
|
.attrs = pcie_dev_attrs,
|
||||||
|
.is_visible = pcie_dev_attrs_are_visible,
|
||||||
|
};
|
||||||
|
|
||||||
static const struct attribute_group *pci_dev_attr_groups[] = {
|
static const struct attribute_group *pci_dev_attr_groups[] = {
|
||||||
&pci_dev_attr_group,
|
&pci_dev_attr_group,
|
||||||
&pci_dev_hp_attr_group,
|
&pci_dev_hp_attr_group,
|
||||||
#ifdef CONFIG_PCI_IOV
|
#ifdef CONFIG_PCI_IOV
|
||||||
&sriov_dev_attr_group,
|
&sriov_dev_attr_group,
|
||||||
#endif
|
#endif
|
||||||
|
&pci_bridge_attr_group,
|
||||||
|
&pcie_dev_attr_group,
|
||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
#include <linux/pm_runtime.h>
|
#include <linux/pm_runtime.h>
|
||||||
#include <linux/pci_hotplug.h>
|
#include <linux/pci_hotplug.h>
|
||||||
#include <linux/vmalloc.h>
|
#include <linux/vmalloc.h>
|
||||||
|
#include <linux/pci-ats.h>
|
||||||
#include <asm/setup.h>
|
#include <asm/setup.h>
|
||||||
#include <asm/dma.h>
|
#include <asm/dma.h>
|
||||||
#include <linux/aer.h>
|
#include <linux/aer.h>
|
||||||
|
@ -455,7 +456,7 @@ struct resource *pci_find_parent_resource(const struct pci_dev *dev,
|
||||||
pci_bus_for_each_resource(bus, r, i) {
|
pci_bus_for_each_resource(bus, r, i) {
|
||||||
if (!r)
|
if (!r)
|
||||||
continue;
|
continue;
|
||||||
if (res->start && resource_contains(r, res)) {
|
if (resource_contains(r, res)) {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the window is prefetchable but the BAR is
|
* If the window is prefetchable but the BAR is
|
||||||
|
@ -1166,6 +1167,8 @@ void pci_restore_state(struct pci_dev *dev)
|
||||||
|
|
||||||
/* PCI Express register must be restored first */
|
/* PCI Express register must be restored first */
|
||||||
pci_restore_pcie_state(dev);
|
pci_restore_pcie_state(dev);
|
||||||
|
pci_restore_pasid_state(dev);
|
||||||
|
pci_restore_pri_state(dev);
|
||||||
pci_restore_ats_state(dev);
|
pci_restore_ats_state(dev);
|
||||||
pci_restore_vc_state(dev);
|
pci_restore_vc_state(dev);
|
||||||
|
|
||||||
|
@ -1966,12 +1969,13 @@ EXPORT_SYMBOL(pci_wake_from_d3);
|
||||||
/**
|
/**
|
||||||
* pci_target_state - find an appropriate low power state for a given PCI dev
|
* pci_target_state - find an appropriate low power state for a given PCI dev
|
||||||
* @dev: PCI device
|
* @dev: PCI device
|
||||||
|
* @wakeup: Whether or not wakeup functionality will be enabled for the device.
|
||||||
*
|
*
|
||||||
* Use underlying platform code to find a supported low power state for @dev.
|
* Use underlying platform code to find a supported low power state for @dev.
|
||||||
* If the platform can't manage @dev, return the deepest state from which it
|
* If the platform can't manage @dev, return the deepest state from which it
|
||||||
* can generate wake events, based on any available PME info.
|
* can generate wake events, based on any available PME info.
|
||||||
*/
|
*/
|
||||||
static pci_power_t pci_target_state(struct pci_dev *dev)
|
static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
|
||||||
{
|
{
|
||||||
pci_power_t target_state = PCI_D3hot;
|
pci_power_t target_state = PCI_D3hot;
|
||||||
|
|
||||||
|
@ -2008,7 +2012,7 @@ static pci_power_t pci_target_state(struct pci_dev *dev)
|
||||||
if (dev->current_state == PCI_D3cold)
|
if (dev->current_state == PCI_D3cold)
|
||||||
target_state = PCI_D3cold;
|
target_state = PCI_D3cold;
|
||||||
|
|
||||||
if (device_may_wakeup(&dev->dev)) {
|
if (wakeup) {
|
||||||
/*
|
/*
|
||||||
* Find the deepest state from which the device can generate
|
* Find the deepest state from which the device can generate
|
||||||
* wake-up events, make it the target state and enable device
|
* wake-up events, make it the target state and enable device
|
||||||
|
@ -2034,13 +2038,14 @@ static pci_power_t pci_target_state(struct pci_dev *dev)
|
||||||
*/
|
*/
|
||||||
int pci_prepare_to_sleep(struct pci_dev *dev)
|
int pci_prepare_to_sleep(struct pci_dev *dev)
|
||||||
{
|
{
|
||||||
pci_power_t target_state = pci_target_state(dev);
|
bool wakeup = device_may_wakeup(&dev->dev);
|
||||||
|
pci_power_t target_state = pci_target_state(dev, wakeup);
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
if (target_state == PCI_POWER_ERROR)
|
if (target_state == PCI_POWER_ERROR)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
|
pci_enable_wake(dev, target_state, wakeup);
|
||||||
|
|
||||||
error = pci_set_power_state(dev, target_state);
|
error = pci_set_power_state(dev, target_state);
|
||||||
|
|
||||||
|
@ -2073,9 +2078,10 @@ EXPORT_SYMBOL(pci_back_from_sleep);
|
||||||
*/
|
*/
|
||||||
int pci_finish_runtime_suspend(struct pci_dev *dev)
|
int pci_finish_runtime_suspend(struct pci_dev *dev)
|
||||||
{
|
{
|
||||||
pci_power_t target_state = pci_target_state(dev);
|
pci_power_t target_state;
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
|
target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
|
||||||
if (target_state == PCI_POWER_ERROR)
|
if (target_state == PCI_POWER_ERROR)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
|
@ -2111,8 +2117,8 @@ bool pci_dev_run_wake(struct pci_dev *dev)
|
||||||
if (!dev->pme_support)
|
if (!dev->pme_support)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/* PME-capable in principle, but not from the intended sleep state */
|
/* PME-capable in principle, but not from the target power state */
|
||||||
if (!pci_pme_capable(dev, pci_target_state(dev)))
|
if (!pci_pme_capable(dev, pci_target_state(dev, false)))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
while (bus->parent) {
|
while (bus->parent) {
|
||||||
|
@ -2147,9 +2153,10 @@ EXPORT_SYMBOL_GPL(pci_dev_run_wake);
|
||||||
bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
|
bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
|
||||||
{
|
{
|
||||||
struct device *dev = &pci_dev->dev;
|
struct device *dev = &pci_dev->dev;
|
||||||
|
bool wakeup = device_may_wakeup(dev);
|
||||||
|
|
||||||
if (!pm_runtime_suspended(dev)
|
if (!pm_runtime_suspended(dev)
|
||||||
|| pci_target_state(pci_dev) != pci_dev->current_state
|
|| pci_target_state(pci_dev, wakeup) != pci_dev->current_state
|
||||||
|| platform_pci_need_resume(pci_dev)
|
|| platform_pci_need_resume(pci_dev)
|
||||||
|| (pci_dev->dev_flags & PCI_DEV_FLAGS_NEEDS_RESUME))
|
|| (pci_dev->dev_flags & PCI_DEV_FLAGS_NEEDS_RESUME))
|
||||||
return false;
|
return false;
|
||||||
|
@ -2167,7 +2174,7 @@ bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
|
||||||
spin_lock_irq(&dev->power.lock);
|
spin_lock_irq(&dev->power.lock);
|
||||||
|
|
||||||
if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold &&
|
if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold &&
|
||||||
!device_may_wakeup(dev))
|
!wakeup)
|
||||||
__pci_pme_active(pci_dev, false);
|
__pci_pme_active(pci_dev, false);
|
||||||
|
|
||||||
spin_unlock_irq(&dev->power.lock);
|
spin_unlock_irq(&dev->power.lock);
|
||||||
|
@ -3715,46 +3722,6 @@ void pci_intx(struct pci_dev *pdev, int enable)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(pci_intx);
|
EXPORT_SYMBOL_GPL(pci_intx);
|
||||||
|
|
||||||
/**
|
|
||||||
* pci_intx_mask_supported - probe for INTx masking support
|
|
||||||
* @dev: the PCI device to operate on
|
|
||||||
*
|
|
||||||
* Check if the device dev support INTx masking via the config space
|
|
||||||
* command word.
|
|
||||||
*/
|
|
||||||
bool pci_intx_mask_supported(struct pci_dev *dev)
|
|
||||||
{
|
|
||||||
bool mask_supported = false;
|
|
||||||
u16 orig, new;
|
|
||||||
|
|
||||||
if (dev->broken_intx_masking)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
pci_cfg_access_lock(dev);
|
|
||||||
|
|
||||||
pci_read_config_word(dev, PCI_COMMAND, &orig);
|
|
||||||
pci_write_config_word(dev, PCI_COMMAND,
|
|
||||||
orig ^ PCI_COMMAND_INTX_DISABLE);
|
|
||||||
pci_read_config_word(dev, PCI_COMMAND, &new);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* There's no way to protect against hardware bugs or detect them
|
|
||||||
* reliably, but as long as we know what the value should be, let's
|
|
||||||
* go ahead and check it.
|
|
||||||
*/
|
|
||||||
if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
|
|
||||||
dev_err(&dev->dev, "Command register changed from 0x%x to 0x%x: driver or hardware bug?\n",
|
|
||||||
orig, new);
|
|
||||||
} else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
|
|
||||||
mask_supported = true;
|
|
||||||
pci_write_config_word(dev, PCI_COMMAND, orig);
|
|
||||||
}
|
|
||||||
|
|
||||||
pci_cfg_access_unlock(dev);
|
|
||||||
return mask_supported;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
|
|
||||||
|
|
||||||
static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
|
static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
|
||||||
{
|
{
|
||||||
struct pci_bus *bus = dev->bus;
|
struct pci_bus *bus = dev->bus;
|
||||||
|
@ -3805,7 +3772,7 @@ static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
|
||||||
* @dev: the PCI device to operate on
|
* @dev: the PCI device to operate on
|
||||||
*
|
*
|
||||||
* Check if the device dev has its INTx line asserted, mask it and
|
* Check if the device dev has its INTx line asserted, mask it and
|
||||||
* return true in that case. False is returned if not interrupt was
|
* return true in that case. False is returned if no interrupt was
|
||||||
* pending.
|
* pending.
|
||||||
*/
|
*/
|
||||||
bool pci_check_and_mask_intx(struct pci_dev *dev)
|
bool pci_check_and_mask_intx(struct pci_dev *dev)
|
||||||
|
@ -4075,40 +4042,6 @@ static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
|
||||||
return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
|
return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __pci_dev_reset(struct pci_dev *dev, int probe)
|
|
||||||
{
|
|
||||||
int rc;
|
|
||||||
|
|
||||||
might_sleep();
|
|
||||||
|
|
||||||
rc = pci_dev_specific_reset(dev, probe);
|
|
||||||
if (rc != -ENOTTY)
|
|
||||||
goto done;
|
|
||||||
|
|
||||||
if (pcie_has_flr(dev)) {
|
|
||||||
if (!probe)
|
|
||||||
pcie_flr(dev);
|
|
||||||
rc = 0;
|
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
|
|
||||||
rc = pci_af_flr(dev, probe);
|
|
||||||
if (rc != -ENOTTY)
|
|
||||||
goto done;
|
|
||||||
|
|
||||||
rc = pci_pm_reset(dev, probe);
|
|
||||||
if (rc != -ENOTTY)
|
|
||||||
goto done;
|
|
||||||
|
|
||||||
rc = pci_dev_reset_slot_function(dev, probe);
|
|
||||||
if (rc != -ENOTTY)
|
|
||||||
goto done;
|
|
||||||
|
|
||||||
rc = pci_parent_bus_reset(dev, probe);
|
|
||||||
done:
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void pci_dev_lock(struct pci_dev *dev)
|
static void pci_dev_lock(struct pci_dev *dev)
|
||||||
{
|
{
|
||||||
pci_cfg_access_lock(dev);
|
pci_cfg_access_lock(dev);
|
||||||
|
@ -4134,26 +4067,18 @@ static void pci_dev_unlock(struct pci_dev *dev)
|
||||||
pci_cfg_access_unlock(dev);
|
pci_cfg_access_unlock(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
static void pci_dev_save_and_disable(struct pci_dev *dev)
|
||||||
* pci_reset_notify - notify device driver of reset
|
|
||||||
* @dev: device to be notified of reset
|
|
||||||
* @prepare: 'true' if device is about to be reset; 'false' if reset attempt
|
|
||||||
* completed
|
|
||||||
*
|
|
||||||
* Must be called prior to device access being disabled and after device
|
|
||||||
* access is restored.
|
|
||||||
*/
|
|
||||||
static void pci_reset_notify(struct pci_dev *dev, bool prepare)
|
|
||||||
{
|
{
|
||||||
const struct pci_error_handlers *err_handler =
|
const struct pci_error_handlers *err_handler =
|
||||||
dev->driver ? dev->driver->err_handler : NULL;
|
dev->driver ? dev->driver->err_handler : NULL;
|
||||||
if (err_handler && err_handler->reset_notify)
|
|
||||||
err_handler->reset_notify(dev, prepare);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void pci_dev_save_and_disable(struct pci_dev *dev)
|
/*
|
||||||
{
|
* dev->driver->err_handler->reset_prepare() is protected against
|
||||||
pci_reset_notify(dev, true);
|
* races with ->remove() by the device lock, which must be held by
|
||||||
|
* the caller.
|
||||||
|
*/
|
||||||
|
if (err_handler && err_handler->reset_prepare)
|
||||||
|
err_handler->reset_prepare(dev);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Wake-up device prior to save. PM registers default to D0 after
|
* Wake-up device prior to save. PM registers default to D0 after
|
||||||
|
@ -4175,23 +4100,18 @@ static void pci_dev_save_and_disable(struct pci_dev *dev)
|
||||||
|
|
||||||
static void pci_dev_restore(struct pci_dev *dev)
|
static void pci_dev_restore(struct pci_dev *dev)
|
||||||
{
|
{
|
||||||
|
const struct pci_error_handlers *err_handler =
|
||||||
|
dev->driver ? dev->driver->err_handler : NULL;
|
||||||
|
|
||||||
pci_restore_state(dev);
|
pci_restore_state(dev);
|
||||||
pci_reset_notify(dev, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int pci_dev_reset(struct pci_dev *dev, int probe)
|
/*
|
||||||
{
|
* dev->driver->err_handler->reset_done() is protected against
|
||||||
int rc;
|
* races with ->remove() by the device lock, which must be held by
|
||||||
|
* the caller.
|
||||||
if (!probe)
|
*/
|
||||||
pci_dev_lock(dev);
|
if (err_handler && err_handler->reset_done)
|
||||||
|
err_handler->reset_done(dev);
|
||||||
rc = __pci_dev_reset(dev, probe);
|
|
||||||
|
|
||||||
if (!probe)
|
|
||||||
pci_dev_unlock(dev);
|
|
||||||
|
|
||||||
return rc;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -4213,7 +4133,13 @@ static int pci_dev_reset(struct pci_dev *dev, int probe)
|
||||||
*/
|
*/
|
||||||
int __pci_reset_function(struct pci_dev *dev)
|
int __pci_reset_function(struct pci_dev *dev)
|
||||||
{
|
{
|
||||||
return pci_dev_reset(dev, 0);
|
int ret;
|
||||||
|
|
||||||
|
pci_dev_lock(dev);
|
||||||
|
ret = __pci_reset_function_locked(dev);
|
||||||
|
pci_dev_unlock(dev);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(__pci_reset_function);
|
EXPORT_SYMBOL_GPL(__pci_reset_function);
|
||||||
|
|
||||||
|
@ -4238,7 +4164,27 @@ EXPORT_SYMBOL_GPL(__pci_reset_function);
|
||||||
*/
|
*/
|
||||||
int __pci_reset_function_locked(struct pci_dev *dev)
|
int __pci_reset_function_locked(struct pci_dev *dev)
|
||||||
{
|
{
|
||||||
return __pci_dev_reset(dev, 0);
|
int rc;
|
||||||
|
|
||||||
|
might_sleep();
|
||||||
|
|
||||||
|
rc = pci_dev_specific_reset(dev, 0);
|
||||||
|
if (rc != -ENOTTY)
|
||||||
|
return rc;
|
||||||
|
if (pcie_has_flr(dev)) {
|
||||||
|
pcie_flr(dev);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
rc = pci_af_flr(dev, 0);
|
||||||
|
if (rc != -ENOTTY)
|
||||||
|
return rc;
|
||||||
|
rc = pci_pm_reset(dev, 0);
|
||||||
|
if (rc != -ENOTTY)
|
||||||
|
return rc;
|
||||||
|
rc = pci_dev_reset_slot_function(dev, 0);
|
||||||
|
if (rc != -ENOTTY)
|
||||||
|
return rc;
|
||||||
|
return pci_parent_bus_reset(dev, 0);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
|
EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
|
||||||
|
|
||||||
|
@ -4255,7 +4201,26 @@ EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
|
||||||
*/
|
*/
|
||||||
int pci_probe_reset_function(struct pci_dev *dev)
|
int pci_probe_reset_function(struct pci_dev *dev)
|
||||||
{
|
{
|
||||||
return pci_dev_reset(dev, 1);
|
int rc;
|
||||||
|
|
||||||
|
might_sleep();
|
||||||
|
|
||||||
|
rc = pci_dev_specific_reset(dev, 1);
|
||||||
|
if (rc != -ENOTTY)
|
||||||
|
return rc;
|
||||||
|
if (pcie_has_flr(dev))
|
||||||
|
return 0;
|
||||||
|
rc = pci_af_flr(dev, 1);
|
||||||
|
if (rc != -ENOTTY)
|
||||||
|
return rc;
|
||||||
|
rc = pci_pm_reset(dev, 1);
|
||||||
|
if (rc != -ENOTTY)
|
||||||
|
return rc;
|
||||||
|
rc = pci_dev_reset_slot_function(dev, 1);
|
||||||
|
if (rc != -ENOTTY)
|
||||||
|
return rc;
|
||||||
|
|
||||||
|
return pci_parent_bus_reset(dev, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -4278,15 +4243,17 @@ int pci_reset_function(struct pci_dev *dev)
|
||||||
{
|
{
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
rc = pci_dev_reset(dev, 1);
|
rc = pci_probe_reset_function(dev);
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
|
pci_dev_lock(dev);
|
||||||
pci_dev_save_and_disable(dev);
|
pci_dev_save_and_disable(dev);
|
||||||
|
|
||||||
rc = pci_dev_reset(dev, 0);
|
rc = __pci_reset_function_locked(dev);
|
||||||
|
|
||||||
pci_dev_restore(dev);
|
pci_dev_restore(dev);
|
||||||
|
pci_dev_unlock(dev);
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
@ -4302,20 +4269,18 @@ int pci_try_reset_function(struct pci_dev *dev)
|
||||||
{
|
{
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
rc = pci_dev_reset(dev, 1);
|
rc = pci_probe_reset_function(dev);
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
pci_dev_save_and_disable(dev);
|
if (!pci_dev_trylock(dev))
|
||||||
|
return -EAGAIN;
|
||||||
|
|
||||||
if (pci_dev_trylock(dev)) {
|
pci_dev_save_and_disable(dev);
|
||||||
rc = __pci_dev_reset(dev, 0);
|
rc = __pci_reset_function_locked(dev);
|
||||||
pci_dev_unlock(dev);
|
pci_dev_unlock(dev);
|
||||||
} else
|
|
||||||
rc = -EAGAIN;
|
|
||||||
|
|
||||||
pci_dev_restore(dev);
|
pci_dev_restore(dev);
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(pci_try_reset_function);
|
EXPORT_SYMBOL_GPL(pci_try_reset_function);
|
||||||
|
@ -4465,7 +4430,9 @@ static void pci_bus_save_and_disable(struct pci_bus *bus)
|
||||||
struct pci_dev *dev;
|
struct pci_dev *dev;
|
||||||
|
|
||||||
list_for_each_entry(dev, &bus->devices, bus_list) {
|
list_for_each_entry(dev, &bus->devices, bus_list) {
|
||||||
|
pci_dev_lock(dev);
|
||||||
pci_dev_save_and_disable(dev);
|
pci_dev_save_and_disable(dev);
|
||||||
|
pci_dev_unlock(dev);
|
||||||
if (dev->subordinate)
|
if (dev->subordinate)
|
||||||
pci_bus_save_and_disable(dev->subordinate);
|
pci_bus_save_and_disable(dev->subordinate);
|
||||||
}
|
}
|
||||||
|
@ -4480,7 +4447,9 @@ static void pci_bus_restore(struct pci_bus *bus)
|
||||||
struct pci_dev *dev;
|
struct pci_dev *dev;
|
||||||
|
|
||||||
list_for_each_entry(dev, &bus->devices, bus_list) {
|
list_for_each_entry(dev, &bus->devices, bus_list) {
|
||||||
|
pci_dev_lock(dev);
|
||||||
pci_dev_restore(dev);
|
pci_dev_restore(dev);
|
||||||
|
pci_dev_unlock(dev);
|
||||||
if (dev->subordinate)
|
if (dev->subordinate)
|
||||||
pci_bus_restore(dev->subordinate);
|
pci_bus_restore(dev->subordinate);
|
||||||
}
|
}
|
||||||
|
|
|
@ -267,7 +267,6 @@ struct pci_sriov {
|
||||||
u16 driver_max_VFs; /* max num VFs driver supports */
|
u16 driver_max_VFs; /* max num VFs driver supports */
|
||||||
struct pci_dev *dev; /* lowest numbered PF */
|
struct pci_dev *dev; /* lowest numbered PF */
|
||||||
struct pci_dev *self; /* this PF */
|
struct pci_dev *self; /* this PF */
|
||||||
struct mutex lock; /* lock for setting sriov_numvfs in sysfs */
|
|
||||||
resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */
|
resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */
|
||||||
bool drivers_autoprobe; /* auto probing of VFs by driver */
|
bool drivers_autoprobe; /* auto probing of VFs by driver */
|
||||||
};
|
};
|
||||||
|
|
|
@ -92,7 +92,7 @@ static irqreturn_t dpc_irq(int irq, void *context)
|
||||||
pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_STATUS, &status);
|
pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_STATUS, &status);
|
||||||
pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_SOURCE_ID,
|
pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_SOURCE_ID,
|
||||||
&source);
|
&source);
|
||||||
if (!status)
|
if (!status || status == (u16)(~0))
|
||||||
return IRQ_NONE;
|
return IRQ_NONE;
|
||||||
|
|
||||||
dev_info(&dpc->dev->device, "DPC containment event, status:%#06x source:%#06x\n",
|
dev_info(&dpc->dev->device, "DPC containment event, status:%#06x source:%#06x\n",
|
||||||
|
@ -144,7 +144,7 @@ static int dpc_probe(struct pcie_device *dev)
|
||||||
|
|
||||||
dpc->rp = (cap & PCI_EXP_DPC_CAP_RP_EXT);
|
dpc->rp = (cap & PCI_EXP_DPC_CAP_RP_EXT);
|
||||||
|
|
||||||
ctl |= PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN;
|
ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN;
|
||||||
pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl);
|
pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl);
|
||||||
|
|
||||||
dev_info(&dev->device, "DPC error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n",
|
dev_info(&dev->device, "DPC error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n",
|
||||||
|
|
|
@ -13,10 +13,11 @@
|
||||||
|
|
||||||
#define PCIE_PORT_DEVICE_MAXSERVICES 5
|
#define PCIE_PORT_DEVICE_MAXSERVICES 5
|
||||||
/*
|
/*
|
||||||
* According to the PCI Express Base Specification 2.0, the indices of
|
* The PCIe Capability Interrupt Message Number (PCIe r3.1, sec 7.8.2) must
|
||||||
* the MSI-X table entries used by port services must not exceed 31
|
* be one of the first 32 MSI-X entries. Per PCI r3.0, sec 6.8.3.1, MSI
|
||||||
|
* supports a maximum of 32 vectors per function.
|
||||||
*/
|
*/
|
||||||
#define PCIE_PORT_MAX_MSIX_ENTRIES 32
|
#define PCIE_PORT_MAX_MSI_ENTRIES 32
|
||||||
|
|
||||||
#define get_descriptor_id(type, service) (((type - 4) << 8) | service)
|
#define get_descriptor_id(type, service) (((type - 4) << 8) | service)
|
||||||
|
|
||||||
|
|
|
@ -44,14 +44,15 @@ static void release_pcie_device(struct device *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* pcie_port_enable_msix - try to set up MSI-X as interrupt mode for given port
|
* pcie_port_enable_irq_vec - try to set up MSI-X or MSI as interrupt mode
|
||||||
|
* for given port
|
||||||
* @dev: PCI Express port to handle
|
* @dev: PCI Express port to handle
|
||||||
* @irqs: Array of interrupt vectors to populate
|
* @irqs: Array of interrupt vectors to populate
|
||||||
* @mask: Bitmask of port capabilities returned by get_port_device_capability()
|
* @mask: Bitmask of port capabilities returned by get_port_device_capability()
|
||||||
*
|
*
|
||||||
* Return value: 0 on success, error code on failure
|
* Return value: 0 on success, error code on failure
|
||||||
*/
|
*/
|
||||||
static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask)
|
static int pcie_port_enable_irq_vec(struct pci_dev *dev, int *irqs, int mask)
|
||||||
{
|
{
|
||||||
int nr_entries, entry, nvec = 0;
|
int nr_entries, entry, nvec = 0;
|
||||||
|
|
||||||
|
@ -61,8 +62,8 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask)
|
||||||
* equal to the number of entries this port actually uses, we'll happily
|
* equal to the number of entries this port actually uses, we'll happily
|
||||||
* go through without any tricks.
|
* go through without any tricks.
|
||||||
*/
|
*/
|
||||||
nr_entries = pci_alloc_irq_vectors(dev, 1, PCIE_PORT_MAX_MSIX_ENTRIES,
|
nr_entries = pci_alloc_irq_vectors(dev, 1, PCIE_PORT_MAX_MSI_ENTRIES,
|
||||||
PCI_IRQ_MSIX);
|
PCI_IRQ_MSIX | PCI_IRQ_MSI);
|
||||||
if (nr_entries < 0)
|
if (nr_entries < 0)
|
||||||
return nr_entries;
|
return nr_entries;
|
||||||
|
|
||||||
|
@ -70,14 +71,19 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask)
|
||||||
u16 reg16;
|
u16 reg16;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The code below follows the PCI Express Base Specification 2.0
|
* Per PCIe r3.1, sec 6.1.6, "PME and Hot-Plug Event
|
||||||
* stating in Section 6.1.6 that "PME and Hot-Plug Event
|
* interrupts (when both are implemented) always share the
|
||||||
* interrupts (when both are implemented) always share the same
|
* same MSI or MSI-X vector, as indicated by the Interrupt
|
||||||
* MSI or MSI-X vector, as indicated by the Interrupt Message
|
* Message Number field in the PCI Express Capabilities
|
||||||
* Number field in the PCI Express Capabilities register", where
|
* register".
|
||||||
* according to Section 7.8.2 of the specification "For MSI-X,
|
*
|
||||||
* the value in this field indicates which MSI-X Table entry is
|
* Per sec 7.8.2, "For MSI, the [Interrupt Message Number]
|
||||||
* used to generate the interrupt message."
|
* indicates the offset between the base Message Data and
|
||||||
|
* the interrupt message that is generated."
|
||||||
|
*
|
||||||
|
* "For MSI-X, the [Interrupt Message Number] indicates
|
||||||
|
* which MSI-X Table entry is used to generate the
|
||||||
|
* interrupt message."
|
||||||
*/
|
*/
|
||||||
pcie_capability_read_word(dev, PCI_EXP_FLAGS, ®16);
|
pcie_capability_read_word(dev, PCI_EXP_FLAGS, ®16);
|
||||||
entry = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9;
|
entry = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9;
|
||||||
|
@ -94,13 +100,17 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask)
|
||||||
u32 reg32, pos;
|
u32 reg32, pos;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The code below follows Section 7.10.10 of the PCI Express
|
* Per PCIe r3.1, sec 7.10.10, the Advanced Error Interrupt
|
||||||
* Base Specification 2.0 stating that bits 31-27 of the Root
|
* Message Number in the Root Error Status register
|
||||||
* Error Status Register contain a value indicating which of the
|
* indicates which MSI/MSI-X vector is used for AER.
|
||||||
* MSI/MSI-X vectors assigned to the port is going to be used
|
*
|
||||||
* for AER, where "For MSI-X, the value in this register
|
* "For MSI, the [Advanced Error Interrupt Message Number]
|
||||||
* indicates which MSI-X Table entry is used to generate the
|
* indicates the offset between the base Message Data and
|
||||||
* interrupt message."
|
* the interrupt message that is generated."
|
||||||
|
*
|
||||||
|
* "For MSI-X, the [Advanced Error Interrupt Message
|
||||||
|
* Number] indicates which MSI-X Table entry is used to
|
||||||
|
* generate the interrupt message."
|
||||||
*/
|
*/
|
||||||
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
|
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
|
||||||
pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, ®32);
|
pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, ®32);
|
||||||
|
@ -113,6 +123,33 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask)
|
||||||
nvec = max(nvec, entry + 1);
|
nvec = max(nvec, entry + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (mask & PCIE_PORT_SERVICE_DPC) {
|
||||||
|
u16 reg16, pos;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Per PCIe r4.0 (v0.9), sec 7.9.15.2, the DPC Interrupt
|
||||||
|
* Message Number in the DPC Capability register indicates
|
||||||
|
* which MSI/MSI-X vector is used for DPC.
|
||||||
|
*
|
||||||
|
* "For MSI, the [DPC Interrupt Message Number] indicates
|
||||||
|
* the offset between the base Message Data and the
|
||||||
|
* interrupt message that is generated."
|
||||||
|
*
|
||||||
|
* "For MSI-X, the [DPC Interrupt Message Number] indicates
|
||||||
|
* which MSI-X Table entry is used to generate the
|
||||||
|
* interrupt message."
|
||||||
|
*/
|
||||||
|
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DPC);
|
||||||
|
pci_read_config_word(dev, pos + PCI_EXP_DPC_CAP, ®16);
|
||||||
|
entry = reg16 & 0x1f;
|
||||||
|
if (entry >= nr_entries)
|
||||||
|
goto out_free_irqs;
|
||||||
|
|
||||||
|
irqs[PCIE_PORT_SERVICE_DPC_SHIFT] = pci_irq_vector(dev, entry);
|
||||||
|
|
||||||
|
nvec = max(nvec, entry + 1);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If nvec is equal to the allocated number of entries, we can just use
|
* If nvec is equal to the allocated number of entries, we can just use
|
||||||
* what we have. Otherwise, the port has some extra entries not for the
|
* what we have. Otherwise, the port has some extra entries not for the
|
||||||
|
@ -124,7 +161,7 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask)
|
||||||
|
|
||||||
/* Now allocate the MSI-X vectors for real */
|
/* Now allocate the MSI-X vectors for real */
|
||||||
nr_entries = pci_alloc_irq_vectors(dev, nvec, nvec,
|
nr_entries = pci_alloc_irq_vectors(dev, nvec, nvec,
|
||||||
PCI_IRQ_MSIX);
|
PCI_IRQ_MSIX | PCI_IRQ_MSI);
|
||||||
if (nr_entries < 0)
|
if (nr_entries < 0)
|
||||||
return nr_entries;
|
return nr_entries;
|
||||||
}
|
}
|
||||||
|
@ -146,26 +183,29 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask)
|
||||||
*/
|
*/
|
||||||
static int pcie_init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
|
static int pcie_init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
|
||||||
{
|
{
|
||||||
unsigned flags = PCI_IRQ_LEGACY | PCI_IRQ_MSI;
|
|
||||||
int ret, i;
|
int ret, i;
|
||||||
|
|
||||||
for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
|
for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
|
||||||
irqs[i] = -1;
|
irqs[i] = -1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If MSI cannot be used for PCIe PME or hotplug, we have to use
|
* If we support PME or hotplug, but we can't use MSI/MSI-X for
|
||||||
* INTx or other interrupts, e.g. system shared interrupt.
|
* them, we have to fall back to INTx or other interrupts, e.g., a
|
||||||
|
* system shared interrupt.
|
||||||
*/
|
*/
|
||||||
if (((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi()) ||
|
if ((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi())
|
||||||
((mask & PCIE_PORT_SERVICE_HP) && pciehp_no_msi())) {
|
goto legacy_irq;
|
||||||
flags &= ~PCI_IRQ_MSI;
|
|
||||||
} else {
|
|
||||||
/* Try to use MSI-X if supported */
|
|
||||||
if (!pcie_port_enable_msix(dev, irqs, mask))
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = pci_alloc_irq_vectors(dev, 1, 1, flags);
|
if ((mask & PCIE_PORT_SERVICE_HP) && pciehp_no_msi())
|
||||||
|
goto legacy_irq;
|
||||||
|
|
||||||
|
/* Try to use MSI-X or MSI if supported */
|
||||||
|
if (pcie_port_enable_irq_vec(dev, irqs, mask) == 0)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
legacy_irq:
|
||||||
|
/* fall back to legacy IRQ */
|
||||||
|
ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
|
|
|
@ -510,16 +510,18 @@ static struct pci_bus *pci_alloc_bus(struct pci_bus *parent)
|
||||||
return b;
|
return b;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pci_release_host_bridge_dev(struct device *dev)
|
static void devm_pci_release_host_bridge_dev(struct device *dev)
|
||||||
{
|
{
|
||||||
struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
|
struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
|
||||||
|
|
||||||
if (bridge->release_fn)
|
if (bridge->release_fn)
|
||||||
bridge->release_fn(bridge);
|
bridge->release_fn(bridge);
|
||||||
|
}
|
||||||
|
|
||||||
pci_free_resource_list(&bridge->windows);
|
static void pci_release_host_bridge_dev(struct device *dev)
|
||||||
|
{
|
||||||
kfree(bridge);
|
devm_pci_release_host_bridge_dev(dev);
|
||||||
|
pci_free_host_bridge(to_pci_host_bridge(dev));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
|
struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
|
||||||
|
@ -531,11 +533,36 @@ struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&bridge->windows);
|
INIT_LIST_HEAD(&bridge->windows);
|
||||||
|
bridge->dev.release = pci_release_host_bridge_dev;
|
||||||
|
|
||||||
return bridge;
|
return bridge;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(pci_alloc_host_bridge);
|
EXPORT_SYMBOL(pci_alloc_host_bridge);
|
||||||
|
|
||||||
|
struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
|
||||||
|
size_t priv)
|
||||||
|
{
|
||||||
|
struct pci_host_bridge *bridge;
|
||||||
|
|
||||||
|
bridge = devm_kzalloc(dev, sizeof(*bridge) + priv, GFP_KERNEL);
|
||||||
|
if (!bridge)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&bridge->windows);
|
||||||
|
bridge->dev.release = devm_pci_release_host_bridge_dev;
|
||||||
|
|
||||||
|
return bridge;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(devm_pci_alloc_host_bridge);
|
||||||
|
|
||||||
|
void pci_free_host_bridge(struct pci_host_bridge *bridge)
|
||||||
|
{
|
||||||
|
pci_free_resource_list(&bridge->windows);
|
||||||
|
|
||||||
|
kfree(bridge);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(pci_free_host_bridge);
|
||||||
|
|
||||||
static const unsigned char pcix_bus_speed[] = {
|
static const unsigned char pcix_bus_speed[] = {
|
||||||
PCI_SPEED_UNKNOWN, /* 0 */
|
PCI_SPEED_UNKNOWN, /* 0 */
|
||||||
PCI_SPEED_66MHz_PCIX, /* 1 */
|
PCI_SPEED_66MHz_PCIX, /* 1 */
|
||||||
|
@ -719,7 +746,7 @@ static void pci_set_bus_msi_domain(struct pci_bus *bus)
|
||||||
dev_set_msi_domain(&bus->dev, d);
|
dev_set_msi_domain(&bus->dev, d);
|
||||||
}
|
}
|
||||||
|
|
||||||
int pci_register_host_bridge(struct pci_host_bridge *bridge)
|
static int pci_register_host_bridge(struct pci_host_bridge *bridge)
|
||||||
{
|
{
|
||||||
struct device *parent = bridge->dev.parent;
|
struct device *parent = bridge->dev.parent;
|
||||||
struct resource_entry *window, *n;
|
struct resource_entry *window, *n;
|
||||||
|
@ -834,7 +861,6 @@ int pci_register_host_bridge(struct pci_host_bridge *bridge)
|
||||||
kfree(bus);
|
kfree(bus);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(pci_register_host_bridge);
|
|
||||||
|
|
||||||
static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
|
static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
|
||||||
struct pci_dev *bridge, int busnr)
|
struct pci_dev *bridge, int busnr)
|
||||||
|
@ -1329,6 +1355,34 @@ static void pci_msi_setup_pci_dev(struct pci_dev *dev)
|
||||||
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
|
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* pci_intx_mask_broken - test PCI_COMMAND_INTX_DISABLE writability
|
||||||
|
* @dev: PCI device
|
||||||
|
*
|
||||||
|
* Test whether PCI_COMMAND_INTX_DISABLE is writable for @dev. Check this
|
||||||
|
* at enumeration-time to avoid modifying PCI_COMMAND at run-time.
|
||||||
|
*/
|
||||||
|
static int pci_intx_mask_broken(struct pci_dev *dev)
|
||||||
|
{
|
||||||
|
u16 orig, toggle, new;
|
||||||
|
|
||||||
|
pci_read_config_word(dev, PCI_COMMAND, &orig);
|
||||||
|
toggle = orig ^ PCI_COMMAND_INTX_DISABLE;
|
||||||
|
pci_write_config_word(dev, PCI_COMMAND, toggle);
|
||||||
|
pci_read_config_word(dev, PCI_COMMAND, &new);
|
||||||
|
|
||||||
|
pci_write_config_word(dev, PCI_COMMAND, orig);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* PCI_COMMAND_INTX_DISABLE was reserved and read-only prior to PCI
|
||||||
|
* r2.3, so strictly speaking, a device is not *broken* if it's not
|
||||||
|
* writable. But we'll live with the misnomer for now.
|
||||||
|
*/
|
||||||
|
if (new != toggle)
|
||||||
|
return 1;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* pci_setup_device - fill in class and map information of a device
|
* pci_setup_device - fill in class and map information of a device
|
||||||
* @dev: the device structure to fill
|
* @dev: the device structure to fill
|
||||||
|
@ -1399,6 +1453,8 @@ int pci_setup_device(struct pci_dev *dev)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dev->broken_intx_masking = pci_intx_mask_broken(dev);
|
||||||
|
|
||||||
switch (dev->hdr_type) { /* header type */
|
switch (dev->hdr_type) { /* header type */
|
||||||
case PCI_HEADER_TYPE_NORMAL: /* standard header */
|
case PCI_HEADER_TYPE_NORMAL: /* standard header */
|
||||||
if (class == PCI_CLASS_BRIDGE_PCI)
|
if (class == PCI_CLASS_BRIDGE_PCI)
|
||||||
|
@ -1674,6 +1730,11 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
|
||||||
/* Initialize Advanced Error Capabilities and Control Register */
|
/* Initialize Advanced Error Capabilities and Control Register */
|
||||||
pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32);
|
pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32);
|
||||||
reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
|
reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
|
||||||
|
/* Don't enable ECRC generation or checking if unsupported */
|
||||||
|
if (!(reg32 & PCI_ERR_CAP_ECRC_GENC))
|
||||||
|
reg32 &= ~PCI_ERR_CAP_ECRC_GENE;
|
||||||
|
if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC))
|
||||||
|
reg32 &= ~PCI_ERR_CAP_ECRC_CHKE;
|
||||||
pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
|
pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2298,9 +2359,8 @@ void __weak pcibios_remove_bus(struct pci_bus *bus)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct pci_bus *pci_create_root_bus_msi(struct device *parent,
|
struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
|
||||||
int bus, struct pci_ops *ops, void *sysdata,
|
struct pci_ops *ops, void *sysdata, struct list_head *resources)
|
||||||
struct list_head *resources, struct msi_controller *msi)
|
|
||||||
{
|
{
|
||||||
int error;
|
int error;
|
||||||
struct pci_host_bridge *bridge;
|
struct pci_host_bridge *bridge;
|
||||||
|
@ -2310,13 +2370,11 @@ static struct pci_bus *pci_create_root_bus_msi(struct device *parent,
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
bridge->dev.parent = parent;
|
bridge->dev.parent = parent;
|
||||||
bridge->dev.release = pci_release_host_bridge_dev;
|
|
||||||
|
|
||||||
list_splice_init(resources, &bridge->windows);
|
list_splice_init(resources, &bridge->windows);
|
||||||
bridge->sysdata = sysdata;
|
bridge->sysdata = sysdata;
|
||||||
bridge->busnr = bus;
|
bridge->busnr = bus;
|
||||||
bridge->ops = ops;
|
bridge->ops = ops;
|
||||||
bridge->msi = msi;
|
|
||||||
|
|
||||||
error = pci_register_host_bridge(bridge);
|
error = pci_register_host_bridge(bridge);
|
||||||
if (error < 0)
|
if (error < 0)
|
||||||
|
@ -2328,13 +2386,6 @@ static struct pci_bus *pci_create_root_bus_msi(struct device *parent,
|
||||||
kfree(bridge);
|
kfree(bridge);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
|
|
||||||
struct pci_ops *ops, void *sysdata, struct list_head *resources)
|
|
||||||
{
|
|
||||||
return pci_create_root_bus_msi(parent, bus, ops, sysdata, resources,
|
|
||||||
NULL);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(pci_create_root_bus);
|
EXPORT_SYMBOL_GPL(pci_create_root_bus);
|
||||||
|
|
||||||
int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
|
int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
|
||||||
|
@ -2400,9 +2451,47 @@ void pci_bus_release_busn_res(struct pci_bus *b)
|
||||||
res, ret ? "can not be" : "is");
|
res, ret ? "can not be" : "is");
|
||||||
}
|
}
|
||||||
|
|
||||||
struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus,
|
int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge)
|
||||||
struct pci_ops *ops, void *sysdata,
|
{
|
||||||
struct list_head *resources, struct msi_controller *msi)
|
struct resource_entry *window;
|
||||||
|
bool found = false;
|
||||||
|
struct pci_bus *b;
|
||||||
|
int max, bus, ret;
|
||||||
|
|
||||||
|
if (!bridge)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
resource_list_for_each_entry(window, &bridge->windows)
|
||||||
|
if (window->res->flags & IORESOURCE_BUS) {
|
||||||
|
found = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = pci_register_host_bridge(bridge);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
b = bridge->bus;
|
||||||
|
bus = bridge->busnr;
|
||||||
|
|
||||||
|
if (!found) {
|
||||||
|
dev_info(&b->dev,
|
||||||
|
"No busn resource found for root bus, will use [bus %02x-ff]\n",
|
||||||
|
bus);
|
||||||
|
pci_bus_insert_busn_res(b, bus, 255);
|
||||||
|
}
|
||||||
|
|
||||||
|
max = pci_scan_child_bus(b);
|
||||||
|
|
||||||
|
if (!found)
|
||||||
|
pci_bus_update_busn_res_end(b, max);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(pci_scan_root_bus_bridge);
|
||||||
|
|
||||||
|
struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
|
||||||
|
struct pci_ops *ops, void *sysdata, struct list_head *resources)
|
||||||
{
|
{
|
||||||
struct resource_entry *window;
|
struct resource_entry *window;
|
||||||
bool found = false;
|
bool found = false;
|
||||||
|
@ -2415,7 +2504,7 @@ struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
b = pci_create_root_bus_msi(parent, bus, ops, sysdata, resources, msi);
|
b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
|
||||||
if (!b)
|
if (!b)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
@ -2433,13 +2522,6 @@ struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus,
|
||||||
|
|
||||||
return b;
|
return b;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
|
|
||||||
struct pci_ops *ops, void *sysdata, struct list_head *resources)
|
|
||||||
{
|
|
||||||
return pci_scan_root_bus_msi(parent, bus, ops, sysdata, resources,
|
|
||||||
NULL);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(pci_scan_root_bus);
|
EXPORT_SYMBOL(pci_scan_root_bus);
|
||||||
|
|
||||||
struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
|
struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
|
||||||
|
|
|
@ -304,7 +304,7 @@ static void quirk_extend_bar_to_page(struct pci_dev *dev)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
|
for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
|
||||||
struct resource *r = &dev->resource[i];
|
struct resource *r = &dev->resource[i];
|
||||||
|
|
||||||
if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
|
if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
|
||||||
|
@ -1684,6 +1684,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2609, quirk_intel_pcie_pm);
|
||||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm);
|
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm);
|
||||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm);
|
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm);
|
||||||
|
|
||||||
|
static void quirk_radeon_pm(struct pci_dev *dev)
|
||||||
|
{
|
||||||
|
if (dev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
|
||||||
|
dev->subsystem_device == 0x00e2) {
|
||||||
|
if (dev->d3_delay < 20) {
|
||||||
|
dev->d3_delay = 20;
|
||||||
|
dev_info(&dev->dev, "extending delay after power-on from D3 to %d msec\n",
|
||||||
|
dev->d3_delay);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6741, quirk_radeon_pm);
|
||||||
|
|
||||||
#ifdef CONFIG_X86_IO_APIC
|
#ifdef CONFIG_X86_IO_APIC
|
||||||
static int dmi_disable_ioapicreroute(const struct dmi_system_id *d)
|
static int dmi_disable_ioapicreroute(const struct dmi_system_id *d)
|
||||||
{
|
{
|
||||||
|
@ -3236,6 +3249,10 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1588,
|
||||||
quirk_broken_intx_masking);
|
quirk_broken_intx_masking);
|
||||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1589,
|
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1589,
|
||||||
quirk_broken_intx_masking);
|
quirk_broken_intx_masking);
|
||||||
|
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158a,
|
||||||
|
quirk_broken_intx_masking);
|
||||||
|
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158b,
|
||||||
|
quirk_broken_intx_masking);
|
||||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d0,
|
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d0,
|
||||||
quirk_broken_intx_masking);
|
quirk_broken_intx_masking);
|
||||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d1,
|
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d1,
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/ioport.h>
|
#include <linux/ioport.h>
|
||||||
#include <linux/cache.h>
|
#include <linux/cache.h>
|
||||||
|
#include "pci.h"
|
||||||
|
|
||||||
void __weak pcibios_update_irq(struct pci_dev *dev, int irq)
|
void __weak pcibios_update_irq(struct pci_dev *dev, int irq)
|
||||||
{
|
{
|
||||||
|
@ -22,12 +23,17 @@ void __weak pcibios_update_irq(struct pci_dev *dev, int irq)
|
||||||
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
|
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pdev_fixup_irq(struct pci_dev *dev,
|
void pci_assign_irq(struct pci_dev *dev)
|
||||||
u8 (*swizzle)(struct pci_dev *, u8 *),
|
|
||||||
int (*map_irq)(const struct pci_dev *, u8, u8))
|
|
||||||
{
|
{
|
||||||
u8 pin, slot;
|
u8 pin;
|
||||||
|
u8 slot = -1;
|
||||||
int irq = 0;
|
int irq = 0;
|
||||||
|
struct pci_host_bridge *hbrg = pci_find_host_bridge(dev->bus);
|
||||||
|
|
||||||
|
if (!(hbrg->map_irq)) {
|
||||||
|
dev_dbg(&dev->dev, "runtime IRQ mapping not provided by arch\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/* If this device is not on the primary bus, we need to figure out
|
/* If this device is not on the primary bus, we need to figure out
|
||||||
which interrupt pin it will come in on. We know which slot it
|
which interrupt pin it will come in on. We know which slot it
|
||||||
|
@ -40,17 +46,22 @@ static void pdev_fixup_irq(struct pci_dev *dev,
|
||||||
if (pin > 4)
|
if (pin > 4)
|
||||||
pin = 1;
|
pin = 1;
|
||||||
|
|
||||||
if (pin != 0) {
|
if (pin) {
|
||||||
/* Follow the chain of bridges, swizzling as we go. */
|
/* Follow the chain of bridges, swizzling as we go. */
|
||||||
slot = (*swizzle)(dev, &pin);
|
if (hbrg->swizzle_irq)
|
||||||
|
slot = (*(hbrg->swizzle_irq))(dev, &pin);
|
||||||
|
|
||||||
irq = (*map_irq)(dev, slot, pin);
|
/*
|
||||||
|
* If a swizzling function is not used map_irq must
|
||||||
|
* ignore slot
|
||||||
|
*/
|
||||||
|
irq = (*(hbrg->map_irq))(dev, slot, pin);
|
||||||
if (irq == -1)
|
if (irq == -1)
|
||||||
irq = 0;
|
irq = 0;
|
||||||
}
|
}
|
||||||
dev->irq = irq;
|
dev->irq = irq;
|
||||||
|
|
||||||
dev_dbg(&dev->dev, "fixup irq: got %d\n", dev->irq);
|
dev_dbg(&dev->dev, "assign IRQ: got %d\n", dev->irq);
|
||||||
|
|
||||||
/* Always tell the device, so the driver knows what is
|
/* Always tell the device, so the driver knows what is
|
||||||
the real IRQ to use; the device does not use it. */
|
the real IRQ to use; the device does not use it. */
|
||||||
|
@ -60,9 +71,23 @@ static void pdev_fixup_irq(struct pci_dev *dev,
|
||||||
void pci_fixup_irqs(u8 (*swizzle)(struct pci_dev *, u8 *),
|
void pci_fixup_irqs(u8 (*swizzle)(struct pci_dev *, u8 *),
|
||||||
int (*map_irq)(const struct pci_dev *, u8, u8))
|
int (*map_irq)(const struct pci_dev *, u8, u8))
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* Implement pci_fixup_irqs() through pci_assign_irq().
|
||||||
|
* This code should be remove eventually, it is a wrapper
|
||||||
|
* around pci_assign_irq() interface to keep current
|
||||||
|
* pci_fixup_irqs() behaviour unchanged on architecture
|
||||||
|
* code still relying on its interface.
|
||||||
|
*/
|
||||||
struct pci_dev *dev = NULL;
|
struct pci_dev *dev = NULL;
|
||||||
|
struct pci_host_bridge *hbrg = NULL;
|
||||||
|
|
||||||
for_each_pci_dev(dev)
|
for_each_pci_dev(dev) {
|
||||||
pdev_fixup_irq(dev, swizzle, map_irq);
|
hbrg = pci_find_host_bridge(dev->bus);
|
||||||
|
hbrg->swizzle_irq = swizzle;
|
||||||
|
hbrg->map_irq = map_irq;
|
||||||
|
pci_assign_irq(dev);
|
||||||
|
hbrg->swizzle_irq = NULL;
|
||||||
|
hbrg->map_irq = NULL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(pci_fixup_irqs);
|
EXPORT_SYMBOL_GPL(pci_fixup_irqs);
|
||||||
|
|
|
@ -120,6 +120,13 @@ struct sw_event_regs {
|
||||||
u32 reserved16[4];
|
u32 reserved16[4];
|
||||||
} __packed;
|
} __packed;
|
||||||
|
|
||||||
|
enum {
|
||||||
|
SWITCHTEC_CFG0_RUNNING = 0x04,
|
||||||
|
SWITCHTEC_CFG1_RUNNING = 0x05,
|
||||||
|
SWITCHTEC_IMG0_RUNNING = 0x03,
|
||||||
|
SWITCHTEC_IMG1_RUNNING = 0x07,
|
||||||
|
};
|
||||||
|
|
||||||
struct sys_info_regs {
|
struct sys_info_regs {
|
||||||
u32 device_id;
|
u32 device_id;
|
||||||
u32 device_version;
|
u32 device_version;
|
||||||
|
@ -129,7 +136,9 @@ struct sys_info_regs {
|
||||||
u32 table_format_version;
|
u32 table_format_version;
|
||||||
u32 partition_id;
|
u32 partition_id;
|
||||||
u32 cfg_file_fmt_version;
|
u32 cfg_file_fmt_version;
|
||||||
u32 reserved2[58];
|
u16 cfg_running;
|
||||||
|
u16 img_running;
|
||||||
|
u32 reserved2[57];
|
||||||
char vendor_id[8];
|
char vendor_id[8];
|
||||||
char product_id[16];
|
char product_id[16];
|
||||||
char product_revision[4];
|
char product_revision[4];
|
||||||
|
@ -807,6 +816,7 @@ static int ioctl_flash_part_info(struct switchtec_dev *stdev,
|
||||||
{
|
{
|
||||||
struct switchtec_ioctl_flash_part_info info = {0};
|
struct switchtec_ioctl_flash_part_info info = {0};
|
||||||
struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
|
struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
|
||||||
|
struct sys_info_regs __iomem *si = stdev->mmio_sys_info;
|
||||||
u32 active_addr = -1;
|
u32 active_addr = -1;
|
||||||
|
|
||||||
if (copy_from_user(&info, uinfo, sizeof(info)))
|
if (copy_from_user(&info, uinfo, sizeof(info)))
|
||||||
|
@ -816,18 +826,26 @@ static int ioctl_flash_part_info(struct switchtec_dev *stdev,
|
||||||
case SWITCHTEC_IOCTL_PART_CFG0:
|
case SWITCHTEC_IOCTL_PART_CFG0:
|
||||||
active_addr = ioread32(&fi->active_cfg);
|
active_addr = ioread32(&fi->active_cfg);
|
||||||
set_fw_info_part(&info, &fi->cfg0);
|
set_fw_info_part(&info, &fi->cfg0);
|
||||||
|
if (ioread16(&si->cfg_running) == SWITCHTEC_CFG0_RUNNING)
|
||||||
|
info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
|
||||||
break;
|
break;
|
||||||
case SWITCHTEC_IOCTL_PART_CFG1:
|
case SWITCHTEC_IOCTL_PART_CFG1:
|
||||||
active_addr = ioread32(&fi->active_cfg);
|
active_addr = ioread32(&fi->active_cfg);
|
||||||
set_fw_info_part(&info, &fi->cfg1);
|
set_fw_info_part(&info, &fi->cfg1);
|
||||||
|
if (ioread16(&si->cfg_running) == SWITCHTEC_CFG1_RUNNING)
|
||||||
|
info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
|
||||||
break;
|
break;
|
||||||
case SWITCHTEC_IOCTL_PART_IMG0:
|
case SWITCHTEC_IOCTL_PART_IMG0:
|
||||||
active_addr = ioread32(&fi->active_img);
|
active_addr = ioread32(&fi->active_img);
|
||||||
set_fw_info_part(&info, &fi->img0);
|
set_fw_info_part(&info, &fi->img0);
|
||||||
|
if (ioread16(&si->img_running) == SWITCHTEC_IMG0_RUNNING)
|
||||||
|
info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
|
||||||
break;
|
break;
|
||||||
case SWITCHTEC_IOCTL_PART_IMG1:
|
case SWITCHTEC_IOCTL_PART_IMG1:
|
||||||
active_addr = ioread32(&fi->active_img);
|
active_addr = ioread32(&fi->active_img);
|
||||||
set_fw_info_part(&info, &fi->img1);
|
set_fw_info_part(&info, &fi->img1);
|
||||||
|
if (ioread16(&si->img_running) == SWITCHTEC_IMG1_RUNNING)
|
||||||
|
info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
|
||||||
break;
|
break;
|
||||||
case SWITCHTEC_IOCTL_PART_NVLOG:
|
case SWITCHTEC_IOCTL_PART_NVLOG:
|
||||||
set_fw_info_part(&info, &fi->nvlog);
|
set_fw_info_part(&info, &fi->nvlog);
|
||||||
|
@ -861,7 +879,7 @@ static int ioctl_flash_part_info(struct switchtec_dev *stdev,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (info.address == active_addr)
|
if (info.address == active_addr)
|
||||||
info.active = 1;
|
info.active |= SWITCHTEC_IOCTL_PART_ACTIVE;
|
||||||
|
|
||||||
if (copy_to_user(uinfo, &info, sizeof(info)))
|
if (copy_to_user(uinfo, &info, sizeof(info)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
@ -1540,6 +1558,24 @@ static const struct pci_device_id switchtec_pci_tbl[] = {
|
||||||
SWITCHTEC_PCI_DEVICE(0x8544), //PSX 64xG3
|
SWITCHTEC_PCI_DEVICE(0x8544), //PSX 64xG3
|
||||||
SWITCHTEC_PCI_DEVICE(0x8545), //PSX 80xG3
|
SWITCHTEC_PCI_DEVICE(0x8545), //PSX 80xG3
|
||||||
SWITCHTEC_PCI_DEVICE(0x8546), //PSX 96xG3
|
SWITCHTEC_PCI_DEVICE(0x8546), //PSX 96xG3
|
||||||
|
SWITCHTEC_PCI_DEVICE(0x8551), //PAX 24XG3
|
||||||
|
SWITCHTEC_PCI_DEVICE(0x8552), //PAX 32XG3
|
||||||
|
SWITCHTEC_PCI_DEVICE(0x8553), //PAX 48XG3
|
||||||
|
SWITCHTEC_PCI_DEVICE(0x8554), //PAX 64XG3
|
||||||
|
SWITCHTEC_PCI_DEVICE(0x8555), //PAX 80XG3
|
||||||
|
SWITCHTEC_PCI_DEVICE(0x8556), //PAX 96XG3
|
||||||
|
SWITCHTEC_PCI_DEVICE(0x8561), //PFXL 24XG3
|
||||||
|
SWITCHTEC_PCI_DEVICE(0x8562), //PFXL 32XG3
|
||||||
|
SWITCHTEC_PCI_DEVICE(0x8563), //PFXL 48XG3
|
||||||
|
SWITCHTEC_PCI_DEVICE(0x8564), //PFXL 64XG3
|
||||||
|
SWITCHTEC_PCI_DEVICE(0x8565), //PFXL 80XG3
|
||||||
|
SWITCHTEC_PCI_DEVICE(0x8566), //PFXL 96XG3
|
||||||
|
SWITCHTEC_PCI_DEVICE(0x8571), //PFXI 24XG3
|
||||||
|
SWITCHTEC_PCI_DEVICE(0x8572), //PFXI 32XG3
|
||||||
|
SWITCHTEC_PCI_DEVICE(0x8573), //PFXI 48XG3
|
||||||
|
SWITCHTEC_PCI_DEVICE(0x8574), //PFXI 64XG3
|
||||||
|
SWITCHTEC_PCI_DEVICE(0x8575), //PFXI 80XG3
|
||||||
|
SWITCHTEC_PCI_DEVICE(0x8576), //PFXI 96XG3
|
||||||
{0}
|
{0}
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
|
MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
|
||||||
|
|
|
@ -408,7 +408,7 @@ static void efifb_fixup_resources(struct pci_dev *dev)
|
||||||
if (!base)
|
if (!base)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
|
for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
|
||||||
struct resource *res = &dev->resource[i];
|
struct resource *res = &dev->resource[i];
|
||||||
|
|
||||||
if (!(res->flags & IORESOURCE_MEM))
|
if (!(res->flags & IORESOURCE_MEM))
|
||||||
|
|
|
@ -291,7 +291,7 @@ extern int
|
||||||
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
|
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
|
||||||
|
|
||||||
struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd);
|
struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd);
|
||||||
int irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd);
|
int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd);
|
||||||
|
|
||||||
#else /* CONFIG_SMP */
|
#else /* CONFIG_SMP */
|
||||||
|
|
||||||
|
@ -331,7 +331,7 @@ irq_create_affinity_masks(int nvec, const struct irq_affinity *affd)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd)
|
irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd)
|
||||||
{
|
{
|
||||||
return maxvec;
|
return maxvec;
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,6 +7,7 @@
|
||||||
|
|
||||||
int pci_enable_pri(struct pci_dev *pdev, u32 reqs);
|
int pci_enable_pri(struct pci_dev *pdev, u32 reqs);
|
||||||
void pci_disable_pri(struct pci_dev *pdev);
|
void pci_disable_pri(struct pci_dev *pdev);
|
||||||
|
void pci_restore_pri_state(struct pci_dev *pdev);
|
||||||
int pci_reset_pri(struct pci_dev *pdev);
|
int pci_reset_pri(struct pci_dev *pdev);
|
||||||
|
|
||||||
#else /* CONFIG_PCI_PRI */
|
#else /* CONFIG_PCI_PRI */
|
||||||
|
@ -20,6 +21,10 @@ static inline void pci_disable_pri(struct pci_dev *pdev)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void pci_restore_pri_state(struct pci_dev *pdev)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
static inline int pci_reset_pri(struct pci_dev *pdev)
|
static inline int pci_reset_pri(struct pci_dev *pdev)
|
||||||
{
|
{
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
@ -31,6 +36,7 @@ static inline int pci_reset_pri(struct pci_dev *pdev)
|
||||||
|
|
||||||
int pci_enable_pasid(struct pci_dev *pdev, int features);
|
int pci_enable_pasid(struct pci_dev *pdev, int features);
|
||||||
void pci_disable_pasid(struct pci_dev *pdev);
|
void pci_disable_pasid(struct pci_dev *pdev);
|
||||||
|
void pci_restore_pasid_state(struct pci_dev *pdev);
|
||||||
int pci_pasid_features(struct pci_dev *pdev);
|
int pci_pasid_features(struct pci_dev *pdev);
|
||||||
int pci_max_pasids(struct pci_dev *pdev);
|
int pci_max_pasids(struct pci_dev *pdev);
|
||||||
|
|
||||||
|
@ -45,6 +51,10 @@ static inline void pci_disable_pasid(struct pci_dev *pdev)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void pci_restore_pasid_state(struct pci_dev *pdev)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
static inline int pci_pasid_features(struct pci_dev *pdev)
|
static inline int pci_pasid_features(struct pci_dev *pdev)
|
||||||
{
|
{
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
|
@ -360,6 +360,8 @@ struct pci_dev {
|
||||||
unsigned int msix_enabled:1;
|
unsigned int msix_enabled:1;
|
||||||
unsigned int ari_enabled:1; /* ARI forwarding */
|
unsigned int ari_enabled:1; /* ARI forwarding */
|
||||||
unsigned int ats_enabled:1; /* Address Translation Service */
|
unsigned int ats_enabled:1; /* Address Translation Service */
|
||||||
|
unsigned int pasid_enabled:1; /* Process Address Space ID */
|
||||||
|
unsigned int pri_enabled:1; /* Page Request Interface */
|
||||||
unsigned int is_managed:1;
|
unsigned int is_managed:1;
|
||||||
unsigned int needs_freset:1; /* Dev requires fundamental reset */
|
unsigned int needs_freset:1; /* Dev requires fundamental reset */
|
||||||
unsigned int state_saved:1;
|
unsigned int state_saved:1;
|
||||||
|
@ -370,7 +372,7 @@ struct pci_dev {
|
||||||
unsigned int is_thunderbolt:1; /* Thunderbolt controller */
|
unsigned int is_thunderbolt:1; /* Thunderbolt controller */
|
||||||
unsigned int __aer_firmware_first_valid:1;
|
unsigned int __aer_firmware_first_valid:1;
|
||||||
unsigned int __aer_firmware_first:1;
|
unsigned int __aer_firmware_first:1;
|
||||||
unsigned int broken_intx_masking:1;
|
unsigned int broken_intx_masking:1; /* INTx masking can't be used */
|
||||||
unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */
|
unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */
|
||||||
unsigned int irq_managed:1;
|
unsigned int irq_managed:1;
|
||||||
unsigned int has_secondary_link:1;
|
unsigned int has_secondary_link:1;
|
||||||
|
@ -403,6 +405,12 @@ struct pci_dev {
|
||||||
u16 ats_cap; /* ATS Capability offset */
|
u16 ats_cap; /* ATS Capability offset */
|
||||||
u8 ats_stu; /* ATS Smallest Translation Unit */
|
u8 ats_stu; /* ATS Smallest Translation Unit */
|
||||||
atomic_t ats_ref_cnt; /* number of VFs with ATS enabled */
|
atomic_t ats_ref_cnt; /* number of VFs with ATS enabled */
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_PCI_PRI
|
||||||
|
u32 pri_reqs_alloc; /* Number of PRI requests allocated */
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_PCI_PASID
|
||||||
|
u16 pasid_features;
|
||||||
#endif
|
#endif
|
||||||
phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */
|
phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */
|
||||||
size_t romlen; /* Length of ROM if it's not from the BAR */
|
size_t romlen; /* Length of ROM if it's not from the BAR */
|
||||||
|
@ -437,6 +445,8 @@ struct pci_host_bridge {
|
||||||
void *sysdata;
|
void *sysdata;
|
||||||
int busnr;
|
int busnr;
|
||||||
struct list_head windows; /* resource_entry */
|
struct list_head windows; /* resource_entry */
|
||||||
|
u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* platform IRQ swizzler */
|
||||||
|
int (*map_irq)(const struct pci_dev *, u8, u8);
|
||||||
void (*release_fn)(struct pci_host_bridge *);
|
void (*release_fn)(struct pci_host_bridge *);
|
||||||
void *release_data;
|
void *release_data;
|
||||||
struct msi_controller *msi;
|
struct msi_controller *msi;
|
||||||
|
@ -463,7 +473,9 @@ static inline struct pci_host_bridge *pci_host_bridge_from_priv(void *priv)
|
||||||
}
|
}
|
||||||
|
|
||||||
struct pci_host_bridge *pci_alloc_host_bridge(size_t priv);
|
struct pci_host_bridge *pci_alloc_host_bridge(size_t priv);
|
||||||
int pci_register_host_bridge(struct pci_host_bridge *bridge);
|
struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
|
||||||
|
size_t priv);
|
||||||
|
void pci_free_host_bridge(struct pci_host_bridge *bridge);
|
||||||
struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
|
struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
|
||||||
|
|
||||||
void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
|
void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
|
||||||
|
@ -695,7 +707,8 @@ struct pci_error_handlers {
|
||||||
pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
|
pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
|
||||||
|
|
||||||
/* PCI function reset prepare or completed */
|
/* PCI function reset prepare or completed */
|
||||||
void (*reset_notify)(struct pci_dev *dev, bool prepare);
|
void (*reset_prepare)(struct pci_dev *dev);
|
||||||
|
void (*reset_done)(struct pci_dev *dev);
|
||||||
|
|
||||||
/* Device driver may resume normal operations */
|
/* Device driver may resume normal operations */
|
||||||
void (*resume)(struct pci_dev *dev);
|
void (*resume)(struct pci_dev *dev);
|
||||||
|
@ -852,13 +865,10 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
|
||||||
int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
|
int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
|
||||||
int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
|
int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
|
||||||
void pci_bus_release_busn_res(struct pci_bus *b);
|
void pci_bus_release_busn_res(struct pci_bus *b);
|
||||||
struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus,
|
|
||||||
struct pci_ops *ops, void *sysdata,
|
|
||||||
struct list_head *resources,
|
|
||||||
struct msi_controller *msi);
|
|
||||||
struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
|
struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
|
||||||
struct pci_ops *ops, void *sysdata,
|
struct pci_ops *ops, void *sysdata,
|
||||||
struct list_head *resources);
|
struct list_head *resources);
|
||||||
|
int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge);
|
||||||
struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
|
struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
|
||||||
int busnr);
|
int busnr);
|
||||||
void pcie_update_link_speed(struct pci_bus *bus, u16 link_status);
|
void pcie_update_link_speed(struct pci_bus *bus, u16 link_status);
|
||||||
|
@ -1008,6 +1018,15 @@ int __must_check pci_reenable_device(struct pci_dev *);
|
||||||
int __must_check pcim_enable_device(struct pci_dev *pdev);
|
int __must_check pcim_enable_device(struct pci_dev *pdev);
|
||||||
void pcim_pin_device(struct pci_dev *pdev);
|
void pcim_pin_device(struct pci_dev *pdev);
|
||||||
|
|
||||||
|
static inline bool pci_intx_mask_supported(struct pci_dev *pdev)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* INTx masking is supported if PCI_COMMAND_INTX_DISABLE is
|
||||||
|
* writable and no quirk has marked the feature broken.
|
||||||
|
*/
|
||||||
|
return !pdev->broken_intx_masking;
|
||||||
|
}
|
||||||
|
|
||||||
static inline int pci_is_enabled(struct pci_dev *pdev)
|
static inline int pci_is_enabled(struct pci_dev *pdev)
|
||||||
{
|
{
|
||||||
return (atomic_read(&pdev->enable_cnt) > 0);
|
return (atomic_read(&pdev->enable_cnt) > 0);
|
||||||
|
@ -1031,7 +1050,6 @@ int __must_check pci_set_mwi(struct pci_dev *dev);
|
||||||
int pci_try_set_mwi(struct pci_dev *dev);
|
int pci_try_set_mwi(struct pci_dev *dev);
|
||||||
void pci_clear_mwi(struct pci_dev *dev);
|
void pci_clear_mwi(struct pci_dev *dev);
|
||||||
void pci_intx(struct pci_dev *dev, int enable);
|
void pci_intx(struct pci_dev *dev, int enable);
|
||||||
bool pci_intx_mask_supported(struct pci_dev *dev);
|
|
||||||
bool pci_check_and_mask_intx(struct pci_dev *dev);
|
bool pci_check_and_mask_intx(struct pci_dev *dev);
|
||||||
bool pci_check_and_unmask_intx(struct pci_dev *dev);
|
bool pci_check_and_unmask_intx(struct pci_dev *dev);
|
||||||
int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
|
int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
|
||||||
|
@ -1144,6 +1162,7 @@ void pdev_enable_device(struct pci_dev *);
|
||||||
int pci_enable_resources(struct pci_dev *, int mask);
|
int pci_enable_resources(struct pci_dev *, int mask);
|
||||||
void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *),
|
void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *),
|
||||||
int (*)(const struct pci_dev *, u8, u8));
|
int (*)(const struct pci_dev *, u8, u8));
|
||||||
|
void pci_assign_irq(struct pci_dev *dev);
|
||||||
struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res);
|
struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res);
|
||||||
#define HAVE_PCI_REQ_REGIONS 2
|
#define HAVE_PCI_REQ_REGIONS 2
|
||||||
int __must_check pci_request_regions(struct pci_dev *, const char *);
|
int __must_check pci_request_regions(struct pci_dev *, const char *);
|
||||||
|
|
|
@ -1373,6 +1373,8 @@
|
||||||
#define PCI_DEVICE_ID_TTI_HPT374 0x0008
|
#define PCI_DEVICE_ID_TTI_HPT374 0x0008
|
||||||
#define PCI_DEVICE_ID_TTI_HPT372N 0x0009 /* apparently a 372N variant? */
|
#define PCI_DEVICE_ID_TTI_HPT372N 0x0009 /* apparently a 372N variant? */
|
||||||
|
|
||||||
|
#define PCI_VENDOR_ID_SIGMA 0x1105
|
||||||
|
|
||||||
#define PCI_VENDOR_ID_VIA 0x1106
|
#define PCI_VENDOR_ID_VIA 0x1106
|
||||||
#define PCI_DEVICE_ID_VIA_8763_0 0x0198
|
#define PCI_DEVICE_ID_VIA_8763_0 0x0198
|
||||||
#define PCI_DEVICE_ID_VIA_8380_0 0x0204
|
#define PCI_DEVICE_ID_VIA_8380_0 0x0204
|
||||||
|
|
|
@ -517,6 +517,7 @@
|
||||||
#define PCI_EXP_LNKCAP_SLS 0x0000000f /* Supported Link Speeds */
|
#define PCI_EXP_LNKCAP_SLS 0x0000000f /* Supported Link Speeds */
|
||||||
#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */
|
#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */
|
||||||
#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */
|
#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */
|
||||||
|
#define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */
|
||||||
#define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */
|
#define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */
|
||||||
#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */
|
#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */
|
||||||
#define PCI_EXP_LNKCAP_L0SEL 0x00007000 /* L0s Exit Latency */
|
#define PCI_EXP_LNKCAP_L0SEL 0x00007000 /* L0s Exit Latency */
|
||||||
|
|
|
@ -39,6 +39,9 @@ struct switchtec_ioctl_flash_info {
|
||||||
__u32 padding;
|
__u32 padding;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define SWITCHTEC_IOCTL_PART_ACTIVE 1
|
||||||
|
#define SWITCHTEC_IOCTL_PART_RUNNING 2
|
||||||
|
|
||||||
struct switchtec_ioctl_flash_part_info {
|
struct switchtec_ioctl_flash_part_info {
|
||||||
__u32 flash_partition;
|
__u32 flash_partition;
|
||||||
__u32 address;
|
__u32 address;
|
||||||
|
|
|
@ -110,6 +110,13 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
|
||||||
struct cpumask *masks;
|
struct cpumask *masks;
|
||||||
cpumask_var_t nmsk, *node_to_present_cpumask;
|
cpumask_var_t nmsk, *node_to_present_cpumask;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If there aren't any vectors left after applying the pre/post
|
||||||
|
* vectors don't bother with assigning affinity.
|
||||||
|
*/
|
||||||
|
if (!affv)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
|
if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
@ -192,15 +199,19 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* irq_calc_affinity_vectors - Calculate the optimal number of vectors
|
* irq_calc_affinity_vectors - Calculate the optimal number of vectors
|
||||||
|
* @minvec: The minimum number of vectors available
|
||||||
* @maxvec: The maximum number of vectors available
|
* @maxvec: The maximum number of vectors available
|
||||||
* @affd: Description of the affinity requirements
|
* @affd: Description of the affinity requirements
|
||||||
*/
|
*/
|
||||||
int irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd)
|
int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd)
|
||||||
{
|
{
|
||||||
int resv = affd->pre_vectors + affd->post_vectors;
|
int resv = affd->pre_vectors + affd->post_vectors;
|
||||||
int vecs = maxvec - resv;
|
int vecs = maxvec - resv;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (resv > minvec)
|
||||||
|
return 0;
|
||||||
|
|
||||||
get_online_cpus();
|
get_online_cpus();
|
||||||
ret = min_t(int, cpumask_weight(cpu_present_mask), vecs) + resv;
|
ret = min_t(int, cpumask_weight(cpu_present_mask), vecs) + resv;
|
||||||
put_online_cpus();
|
put_online_cpus();
|
||||||
|
|
Loading…
Reference in New Issue