summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-08 19:03:25 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-08 19:03:25 -0700
commit857f8640147c9fb43f20e43cbca6452710e1ca5d (patch)
tree76a92068d703b8001ca790ffa096d435fa24ae81
parent8f3207c7eab9d885cc64c778416537034a7d9c5b (diff)
parent3146c8f4de9b0858794a902f273aec13f168596e (diff)
downloadlwn-857f8640147c9fb43f20e43cbca6452710e1ca5d.tar.gz
lwn-857f8640147c9fb43f20e43cbca6452710e1ca5d.zip
Merge tag 'pci-v4.12-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci
Pull PCI updates from Bjorn Helgaas: - add framework for supporting PCIe devices in Endpoint mode (Kishon Vijay Abraham I) - use non-postable PCI config space mappings when possible (Lorenzo Pieralisi) - clean up and unify mmap of PCI BARs (David Woodhouse) - export and unify Function Level Reset support (Christoph Hellwig) - avoid FLR for Intel 82579 NICs (Sasha Neftin) - add pci_request_irq() and pci_free_irq() helpers (Christoph Hellwig) - short-circuit config access failures for disconnected devices (Keith Busch) - remove D3 sleep delay when possible (Adrian Hunter) - freeze PME scan before suspending devices (Lukas Wunner) - stop disabling MSI/MSI-X in pci_device_shutdown() (Prarit Bhargava) - disable boot interrupt quirk for ASUS M2N-LR (Stefan Assmann) - add arch-specific alignment control to improve device passthrough by avoiding multiple BARs in a page (Yongji Xie) - add sysfs sriov_drivers_autoprobe to control VF driver binding (Bodong Wang) - allow slots below PCI-to-PCIe "reverse bridges" (Bjorn Helgaas) - fix crashes when unbinding host controllers that don't support removal (Brian Norris) - add driver for MicroSemi Switchtec management interface (Logan Gunthorpe) - add driver for Faraday Technology FTPCI100 host bridge (Linus Walleij) - add i.MX7D support (Andrey Smirnov) - use generic MSI support for Aardvark (Thomas Petazzoni) - make Rockchip driver modular (Brian Norris) - advertise 128-byte Read Completion Boundary support for Rockchip (Shawn Lin) - advertise PCI_EXP_LNKSTA_SLC for Rockchip root port (Shawn Lin) - convert atomic_t to refcount_t in HV driver (Elena Reshetova) - add CPU IRQ affinity in HV driver (K. Y. Srinivasan) - fix PCI bus removal in HV driver (Long Li) - add support for ThunderX2 DMA alias topology (Jayachandran C) - add ThunderX pass2.x 2nd node MCFG quirk (Tomasz Nowicki) - add ITE 8893 bridge DMA alias quirk (Jarod Wilson) - restrict Cavium ACS quirk only to CN81xx/CN83xx/CN88xx devices (Manish Jaggi) * tag 'pci-v4.12-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci: (146 commits) PCI: Don't allow unbinding host controllers that aren't prepared ARM: DRA7: clockdomain: Change the CLKTRCTRL of CM_PCIE_CLKSTCTRL to SW_WKUP MAINTAINERS: Add PCI Endpoint maintainer Documentation: PCI: Add userguide for PCI endpoint test function tools: PCI: Add sample test script to invoke pcitest tools: PCI: Add a userspace tool to test PCI endpoint Documentation: misc-devices: Add Documentation for pci-endpoint-test driver misc: Add host side PCI driver for PCI test function device PCI: Add device IDs for DRA74x and DRA72x dt-bindings: PCI: dra7xx: Add DT bindings to enable unaligned access PCI: dwc: dra7xx: Workaround for errata id i870 dt-bindings: PCI: dra7xx: Add DT bindings for PCI dra7xx EP mode PCI: dwc: dra7xx: Add EP mode support PCI: dwc: dra7xx: Facilitate wrapper and MSI interrupts to be enabled independently dt-bindings: PCI: Add DT bindings for PCI designware EP mode PCI: dwc: designware: Add EP mode support Documentation: PCI: Add binding documentation for pci-test endpoint function ixgbe: Use pcie_flr() instead of duplicating it IB/hfi1: Use pcie_flr() instead of duplicating it PCI: imx6: Fix spelling mistake: "contol" -> "control" ...
-rw-r--r--Documentation/ABI/testing/sysfs-bus-pci22
-rw-r--r--Documentation/ABI/testing/sysfs-class-switchtec96
-rw-r--r--Documentation/PCI/00-INDEX10
-rw-r--r--Documentation/PCI/endpoint/function/binding/pci-test.txt17
-rw-r--r--Documentation/PCI/endpoint/pci-endpoint-cfs.txt105
-rw-r--r--Documentation/PCI/endpoint/pci-endpoint.txt215
-rw-r--r--Documentation/PCI/endpoint/pci-test-function.txt66
-rw-r--r--Documentation/PCI/endpoint/pci-test-howto.txt179
-rw-r--r--Documentation/PCI/pci-iov-howto.txt12
-rw-r--r--Documentation/devicetree/bindings/pci/designware-pcie.txt26
-rw-r--r--Documentation/devicetree/bindings/pci/faraday,ftpci100.txt129
-rw-r--r--Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt14
-rw-r--r--Documentation/devicetree/bindings/pci/ti-pci.txt42
-rw-r--r--Documentation/driver-model/devres.txt6
-rw-r--r--Documentation/filesystems/sysfs-pci.txt15
-rw-r--r--Documentation/ioctl/ioctl-number.txt1
-rw-r--r--Documentation/misc-devices/pci-endpoint-test.txt35
-rw-r--r--Documentation/switchtec.txt80
-rw-r--r--MAINTAINERS20
-rw-r--r--arch/arm/include/asm/io.h10
-rw-r--r--arch/arm/include/asm/pci.h3
-rw-r--r--arch/arm/kernel/bios32.c19
-rw-r--r--arch/arm/mach-omap2/clockdomains7xx_data.c2
-rw-r--r--arch/arm/mm/ioremap.c7
-rw-r--r--arch/arm/mm/nommu.c12
-rw-r--r--arch/arm64/include/asm/io.h10
-rw-r--r--arch/arm64/include/asm/pci.h2
-rw-r--r--arch/cris/arch-v32/drivers/pci/bios.c22
-rw-r--r--arch/cris/include/asm/pci.h4
-rw-r--r--arch/ia64/include/asm/pci.h5
-rw-r--r--arch/ia64/pci/pci.c46
-rw-r--r--arch/microblaze/include/asm/pci.h6
-rw-r--r--arch/microblaze/pci/pci-common.c2
-rw-r--r--arch/mips/include/asm/pci.h5
-rw-r--r--arch/mips/pci/pci.c24
-rw-r--r--arch/mn10300/include/asm/pci.h4
-rw-r--r--arch/mn10300/unit-asb2305/pci-asb2305.c23
-rw-r--r--arch/parisc/include/asm/pci.h4
-rw-r--r--arch/parisc/kernel/pci.c28
-rw-r--r--arch/powerpc/include/asm/machdep.h2
-rw-r--r--arch/powerpc/include/asm/pci.h9
-rw-r--r--arch/powerpc/kernel/pci-common.c11
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c7
-rw-r--r--arch/sh/drivers/pci/pci.c21
-rw-r--r--arch/sh/include/asm/pci.h4
-rw-r--r--arch/sparc/include/asm/pci_64.h5
-rw-r--r--arch/sparc/kernel/pci.c6
-rw-r--r--arch/unicore32/include/asm/pci.h3
-rw-r--r--arch/unicore32/kernel/pci.c23
-rw-r--r--arch/x86/include/asm/pci.h7
-rw-r--r--arch/x86/pci/i386.c47
-rw-r--r--arch/xtensa/include/asm/pci.h7
-rw-r--r--arch/xtensa/kernel/pci.c24
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/pci_mcfg.c14
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c4
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h1
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c30
-rw-r--r--drivers/misc/Kconfig7
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/pci_endpoint_test.c534
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c16
-rw-r--r--drivers/nvme/host/pci.c30
-rw-r--r--drivers/of/of_pci.c45
-rw-r--r--drivers/pci/Kconfig2
-rw-r--r--drivers/pci/Makefile3
-rw-r--r--drivers/pci/access.c61
-rw-r--r--drivers/pci/dwc/Kconfig36
-rw-r--r--drivers/pci/dwc/Makefile5
-rw-r--r--drivers/pci/dwc/pci-dra7xx.c293
-rw-r--r--drivers/pci/dwc/pci-exynos.c14
-rw-r--r--drivers/pci/dwc/pci-imx6.c199
-rw-r--r--drivers/pci/dwc/pci-keystone-dw.c2
-rw-r--r--drivers/pci/dwc/pci-layerscape.c3
-rw-r--r--drivers/pci/dwc/pcie-armada8k.c3
-rw-r--r--drivers/pci/dwc/pcie-artpec6.c12
-rw-r--r--drivers/pci/dwc/pcie-designware-ep.c342
-rw-r--r--drivers/pci/dwc/pcie-designware-host.c39
-rw-r--r--drivers/pci/dwc/pcie-designware-plat.c1
-rw-r--r--drivers/pci/dwc/pcie-designware.c258
-rw-r--r--drivers/pci/dwc/pcie-designware.h135
-rw-r--r--drivers/pci/dwc/pcie-hisi.c9
-rw-r--r--drivers/pci/dwc/pcie-qcom.c2
-rw-r--r--drivers/pci/dwc/pcie-spear13xx.c3
-rw-r--r--drivers/pci/ecam.c6
-rw-r--r--drivers/pci/endpoint/Kconfig31
-rw-r--r--drivers/pci/endpoint/Makefile7
-rw-r--r--drivers/pci/endpoint/functions/Kconfig12
-rw-r--r--drivers/pci/endpoint/functions/Makefile5
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c510
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c509
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c580
-rw-r--r--drivers/pci/endpoint/pci-epc-mem.c143
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c359
-rw-r--r--drivers/pci/host/Kconfig10
-rw-r--r--drivers/pci/host/Makefile1
-rw-r--r--drivers/pci/host/pci-aardvark.c173
-rw-r--r--drivers/pci/host/pci-ftpci100.c563
-rw-r--r--drivers/pci/host/pci-host-generic.c1
-rw-r--r--drivers/pci/host/pci-hyperv.c46
-rw-r--r--drivers/pci/host/pci-mvebu.c22
-rw-r--r--drivers/pci/host/pci-tegra.c4
-rw-r--r--drivers/pci/host/pci-thunder-ecam.c1
-rw-r--r--drivers/pci/host/pci-thunder-pem.c1
-rw-r--r--drivers/pci/host/pci-versatile.c4
-rw-r--r--drivers/pci/host/pci-xgene.c5
-rw-r--r--drivers/pci/host/pcie-iproc-platform.c3
-rw-r--r--drivers/pci/host/pcie-rockchip.c91
-rw-r--r--drivers/pci/host/pcie-xilinx-nwl.c2
-rw-r--r--drivers/pci/host/pcie-xilinx.c2
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c6
-rw-r--r--drivers/pci/iov.c1
-rw-r--r--drivers/pci/irq.c61
-rw-r--r--drivers/pci/mmap.c99
-rw-r--r--drivers/pci/msi.c17
-rw-r--r--drivers/pci/pci-driver.c24
-rw-r--r--drivers/pci/pci-sysfs.c104
-rw-r--r--drivers/pci/pci.c252
-rw-r--r--drivers/pci/pci.h19
-rw-r--r--drivers/pci/pcie/pcie-dpc.c5
-rw-r--r--drivers/pci/probe.c4
-rw-r--r--drivers/pci/proc.c41
-rw-r--r--drivers/pci/quirks.c82
-rw-r--r--drivers/pci/search.c4
-rw-r--r--drivers/pci/setup-bus.c4
-rw-r--r--drivers/pci/setup-res.c2
-rw-r--r--drivers/pci/switch/Kconfig13
-rw-r--r--drivers/pci/switch/Makefile1
-rw-r--r--drivers/pci/switch/switchtec.c1600
-rw-r--r--include/linux/interrupt.h2
-rw-r--r--include/linux/io.h21
-rw-r--r--include/linux/mfd/syscon/imx7-iomuxc-gpr.h4
-rw-r--r--include/linux/mod_devicetable.h10
-rw-r--r--include/linux/of_pci.h11
-rw-r--r--include/linux/pci-ecam.h3
-rw-r--r--include/linux/pci-ep-cfs.h41
-rw-r--r--include/linux/pci-epc.h144
-rw-r--r--include/linux/pci-epf.h162
-rw-r--r--include/linux/pci.h84
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/pci_regs.h2
-rw-r--r--include/uapi/linux/pcitest.h19
-rw-r--r--include/uapi/linux/switchtec_ioctl.h132
-rw-r--r--kernel/irq/manage.c15
-rw-r--r--lib/devres.c6
-rw-r--r--tools/pci/pcitest.c186
-rw-r--r--tools/pci/pcitest.sh56
148 files changed, 8937 insertions, 1004 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci
index e4e90104d7c3..44d4b2be92fd 100644
--- a/Documentation/ABI/testing/sysfs-bus-pci
+++ b/Documentation/ABI/testing/sysfs-bus-pci
@@ -301,3 +301,25 @@ Contact: Emil Velikov <emil.l.velikov@gmail.com>
Description:
This file contains the revision field of the PCI device.
The value comes from device config space. The file is read only.
+
+What: /sys/bus/pci/devices/.../sriov_drivers_autoprobe
+Date: April 2017
+Contact: Bodong Wang<bodong@mellanox.com>
+Description:
+ This file is associated with the PF of a device that
+ supports SR-IOV. It determines whether newly-enabled VFs
+ are immediately bound to a driver. It initially contains
+ 1, which means the kernel automatically binds VFs to a
+ compatible driver immediately after they are enabled. If
+ an application writes 0 to the file before enabling VFs,
+ the kernel will not bind VFs to a driver.
+
+ A typical use case is to write 0 to this file, then enable
+ VFs, then assign the newly-created VFs to virtual machines.
+ Note that changing this file does not affect already-
+ enabled VFs. In this scenario, the user must first disable
+ the VFs, write 0 to sriov_drivers_autoprobe, then re-enable
+ the VFs.
+
+ This is similar to /sys/bus/pci/drivers_autoprobe, but
+ affects only the VFs associated with a specific PF.
diff --git a/Documentation/ABI/testing/sysfs-class-switchtec b/Documentation/ABI/testing/sysfs-class-switchtec
new file mode 100644
index 000000000000..48cb4c15e430
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-switchtec
@@ -0,0 +1,96 @@
+switchtec - Microsemi Switchtec PCI Switch Management Endpoint
+
+For details on this subsystem look at Documentation/switchtec.txt.
+
+What: /sys/class/switchtec
+Date: 05-Jan-2017
+KernelVersion: v4.11
+Contact: Logan Gunthorpe <logang@deltatee.com>
+Description: The switchtec class subsystem folder.
+ Each registered switchtec driver is represented by a switchtecX
+ subfolder (X being an integer >= 0).
+
+
+What: /sys/class/switchtec/switchtec[0-9]+/component_id
+Date: 05-Jan-2017
+KernelVersion: v4.11
+Contact: Logan Gunthorpe <logang@deltatee.com>
+Description: Component identifier as stored in the hardware (eg. PM8543)
+ (read only)
+Values: arbitrary string.
+
+
+What: /sys/class/switchtec/switchtec[0-9]+/component_revision
+Date: 05-Jan-2017
+KernelVersion: v4.11
+Contact: Logan Gunthorpe <logang@deltatee.com>
+Description: Component revision stored in the hardware (read only)
+Values: integer.
+
+
+What: /sys/class/switchtec/switchtec[0-9]+/component_vendor
+Date: 05-Jan-2017
+KernelVersion: v4.11
+Contact: Logan Gunthorpe <logang@deltatee.com>
+Description: Component vendor as stored in the hardware (eg. MICROSEM)
+ (read only)
+Values: arbitrary string.
+
+
+What: /sys/class/switchtec/switchtec[0-9]+/device_version
+Date: 05-Jan-2017
+KernelVersion: v4.11
+Contact: Logan Gunthorpe <logang@deltatee.com>
+Description: Device version as stored in the hardware (read only)
+Values: integer.
+
+
+What: /sys/class/switchtec/switchtec[0-9]+/fw_version
+Date: 05-Jan-2017
+KernelVersion: v4.11
+Contact: Logan Gunthorpe <logang@deltatee.com>
+Description: Currently running firmware version (read only)
+Values: integer (in hexadecimal).
+
+
+What: /sys/class/switchtec/switchtec[0-9]+/partition
+Date: 05-Jan-2017
+KernelVersion: v4.11
+Contact: Logan Gunthorpe <logang@deltatee.com>
+Description: Partition number for this device in the switch (read only)
+Values: integer.
+
+
+What: /sys/class/switchtec/switchtec[0-9]+/partition_count
+Date: 05-Jan-2017
+KernelVersion: v4.11
+Contact: Logan Gunthorpe <logang@deltatee.com>
+Description: Total number of partitions in the switch (read only)
+Values: integer.
+
+
+What: /sys/class/switchtec/switchtec[0-9]+/product_id
+Date: 05-Jan-2017
+KernelVersion: v4.11
+Contact: Logan Gunthorpe <logang@deltatee.com>
+Description: Product identifier as stored in the hardware (eg. PSX 48XG3)
+ (read only)
+Values: arbitrary string.
+
+
+What: /sys/class/switchtec/switchtec[0-9]+/product_revision
+Date: 05-Jan-2017
+KernelVersion: v4.11
+Contact: Logan Gunthorpe <logang@deltatee.com>
+Description: Product revision stored in the hardware (eg. RevB)
+ (read only)
+Values: arbitrary string.
+
+
+What: /sys/class/switchtec/switchtec[0-9]+/product_vendor
+Date: 05-Jan-2017
+KernelVersion: v4.11
+Contact: Logan Gunthorpe <logang@deltatee.com>
+Description: Product vendor as stored in the hardware (eg. MICROSEM)
+ (read only)
+Values: arbitrary string.
diff --git a/Documentation/PCI/00-INDEX b/Documentation/PCI/00-INDEX
index 147231f1613e..00c9a90b6f38 100644
--- a/Documentation/PCI/00-INDEX
+++ b/Documentation/PCI/00-INDEX
@@ -12,3 +12,13 @@ pci.txt
- info on the PCI subsystem for device driver authors
pcieaer-howto.txt
- the PCI Express Advanced Error Reporting Driver Guide HOWTO
+endpoint/pci-endpoint.txt
+ - guide to add endpoint controller driver and endpoint function driver.
+endpoint/pci-endpoint-cfs.txt
+ - guide to use configfs to configure the PCI endpoint function.
+endpoint/pci-test-function.txt
+ - specification of *PCI test* function device.
+endpoint/pci-test-howto.txt
+ - userguide for PCI endpoint test function.
+endpoint/function/binding/
+ - binding documentation for PCI endpoint function
diff --git a/Documentation/PCI/endpoint/function/binding/pci-test.txt b/Documentation/PCI/endpoint/function/binding/pci-test.txt
new file mode 100644
index 000000000000..3b68b955fb50
--- /dev/null
+++ b/Documentation/PCI/endpoint/function/binding/pci-test.txt
@@ -0,0 +1,17 @@
+PCI TEST ENDPOINT FUNCTION
+
+name: Should be "pci_epf_test" to bind to the pci_epf_test driver.
+
+Configurable Fields:
+vendorid : should be 0x104c
+deviceid : should be 0xb500 for DRA74x and 0xb501 for DRA72x
+revid : don't care
+progif_code : don't care
+subclass_code : don't care
+baseclass_code : should be 0xff
+cache_line_size : don't care
+subsys_vendor_id : don't care
+subsys_id : don't care
+interrupt_pin : Should be 1 - INTA, 2 - INTB, 3 - INTC, 4 -INTD
+msi_interrupts : Should be 1 to 32 depending on the number of MSI interrupts
+ to test
diff --git a/Documentation/PCI/endpoint/pci-endpoint-cfs.txt b/Documentation/PCI/endpoint/pci-endpoint-cfs.txt
new file mode 100644
index 000000000000..d740f29960a4
--- /dev/null
+++ b/Documentation/PCI/endpoint/pci-endpoint-cfs.txt
@@ -0,0 +1,105 @@
+ CONFIGURING PCI ENDPOINT USING CONFIGFS
+ Kishon Vijay Abraham I <kishon@ti.com>
+
+The PCI Endpoint Core exposes configfs entry (pci_ep) to configure the
+PCI endpoint function and to bind the endpoint function
+with the endpoint controller. (For introducing other mechanisms to
+configure the PCI Endpoint Function refer to [1]).
+
+*) Mounting configfs
+
+The PCI Endpoint Core layer creates pci_ep directory in the mounted configfs
+directory. configfs can be mounted using the following command.
+
+ mount -t configfs none /sys/kernel/config
+
+*) Directory Structure
+
+The pci_ep configfs has two directories at its root: controllers and
+functions. Every EPC device present in the system will have an entry in
+the *controllers* directory and and every EPF driver present in the system
+will have an entry in the *functions* directory.
+
+/sys/kernel/config/pci_ep/
+ .. controllers/
+ .. functions/
+
+*) Creating EPF Device
+
+Every registered EPF driver will be listed in controllers directory. The
+entries corresponding to EPF driver will be created by the EPF core.
+
+/sys/kernel/config/pci_ep/functions/
+ .. <EPF Driver1>/
+ ... <EPF Device 11>/
+ ... <EPF Device 21>/
+ .. <EPF Driver2>/
+ ... <EPF Device 12>/
+ ... <EPF Device 22>/
+
+In order to create a <EPF device> of the type probed by <EPF Driver>, the
+user has to create a directory inside <EPF DriverN>.
+
+Every <EPF device> directory consists of the following entries that can be
+used to configure the standard configuration header of the endpoint function.
+(These entries are created by the framework when any new <EPF Device> is
+created)
+
+ .. <EPF Driver1>/
+ ... <EPF Device 11>/
+ ... vendorid
+ ... deviceid
+ ... revid
+ ... progif_code
+ ... subclass_code
+ ... baseclass_code
+ ... cache_line_size
+ ... subsys_vendor_id
+ ... subsys_id
+ ... interrupt_pin
+
+*) EPC Device
+
+Every registered EPC device will be listed in controllers directory. The
+entries corresponding to EPC device will be created by the EPC core.
+
+/sys/kernel/config/pci_ep/controllers/
+ .. <EPC Device1>/
+ ... <Symlink EPF Device11>/
+ ... <Symlink EPF Device12>/
+ ... start
+ .. <EPC Device2>/
+ ... <Symlink EPF Device21>/
+ ... <Symlink EPF Device22>/
+ ... start
+
+The <EPC Device> directory will have a list of symbolic links to
+<EPF Device>. These symbolic links should be created by the user to
+represent the functions present in the endpoint device.
+
+The <EPC Device> directory will also have a *start* field. Once
+"1" is written to this field, the endpoint device will be ready to
+establish the link with the host. This is usually done after
+all the EPF devices are created and linked with the EPC device.
+
+
+ | controllers/
+ | <Directory: EPC name>/
+ | <Symbolic Link: Function>
+ | start
+ | functions/
+ | <Directory: EPF driver>/
+ | <Directory: EPF device>/
+ | vendorid
+ | deviceid
+ | revid
+ | progif_code
+ | subclass_code
+ | baseclass_code
+ | cache_line_size
+ | subsys_vendor_id
+ | subsys_id
+ | interrupt_pin
+ | function
+
+[1] -> Documentation/PCI/endpoint/pci-endpoint.txt
diff --git a/Documentation/PCI/endpoint/pci-endpoint.txt b/Documentation/PCI/endpoint/pci-endpoint.txt
new file mode 100644
index 000000000000..9b1d66829290
--- /dev/null
+++ b/Documentation/PCI/endpoint/pci-endpoint.txt
@@ -0,0 +1,215 @@
+ PCI ENDPOINT FRAMEWORK
+ Kishon Vijay Abraham I <kishon@ti.com>
+
+This document is a guide to use the PCI Endpoint Framework in order to create
+endpoint controller driver, endpoint function driver, and using configfs
+interface to bind the function driver to the controller driver.
+
+1. Introduction
+
+Linux has a comprehensive PCI subsystem to support PCI controllers that
+operates in Root Complex mode. The subsystem has capability to scan PCI bus,
+assign memory resources and IRQ resources, load PCI driver (based on
+vendor ID, device ID), support other services like hot-plug, power management,
+advanced error reporting and virtual channels.
+
+However the PCI controller IP integrated in some SoCs is capable of operating
+either in Root Complex mode or Endpoint mode. PCI Endpoint Framework will
+add endpoint mode support in Linux. This will help to run Linux in an
+EP system which can have a wide variety of use cases from testing or
+validation, co-processor accelerator, etc.
+
+2. PCI Endpoint Core
+
+The PCI Endpoint Core layer comprises 3 components: the Endpoint Controller
+library, the Endpoint Function library, and the configfs layer to bind the
+endpoint function with the endpoint controller.
+
+2.1 PCI Endpoint Controller(EPC) Library
+
+The EPC library provides APIs to be used by the controller that can operate
+in endpoint mode. It also provides APIs to be used by function driver/library
+in order to implement a particular endpoint function.
+
+2.1.1 APIs for the PCI controller Driver
+
+This section lists the APIs that the PCI Endpoint core provides to be used
+by the PCI controller driver.
+
+*) devm_pci_epc_create()/pci_epc_create()
+
+ The PCI controller driver should implement the following ops:
+ * write_header: ops to populate configuration space header
+ * set_bar: ops to configure the BAR
+ * clear_bar: ops to reset the BAR
+ * alloc_addr_space: ops to allocate in PCI controller address space
+ * free_addr_space: ops to free the allocated address space
+ * raise_irq: ops to raise a legacy or MSI interrupt
+ * start: ops to start the PCI link
+ * stop: ops to stop the PCI link
+
+ The PCI controller driver can then create a new EPC device by invoking
+ devm_pci_epc_create()/pci_epc_create().
+
+*) devm_pci_epc_destroy()/pci_epc_destroy()
+
+ The PCI controller driver can destroy the EPC device created by either
+ devm_pci_epc_create() or pci_epc_create() using devm_pci_epc_destroy() or
+ pci_epc_destroy().
+
+*) pci_epc_linkup()
+
+ In order to notify all the function devices that the EPC device to which
+ they are linked has established a link with the host, the PCI controller
+ driver should invoke pci_epc_linkup().
+
+*) pci_epc_mem_init()
+
+ Initialize the pci_epc_mem structure used for allocating EPC addr space.
+
+*) pci_epc_mem_exit()
+
+ Cleanup the pci_epc_mem structure allocated during pci_epc_mem_init().
+
+2.1.2 APIs for the PCI Endpoint Function Driver
+
+This section lists the APIs that the PCI Endpoint core provides to be used
+by the PCI endpoint function driver.
+
+*) pci_epc_write_header()
+
+ The PCI endpoint function driver should use pci_epc_write_header() to
+ write the standard configuration header to the endpoint controller.
+
+*) pci_epc_set_bar()
+
+ The PCI endpoint function driver should use pci_epc_set_bar() to configure
+ the Base Address Register in order for the host to assign PCI addr space.
+ Register space of the function driver is usually configured
+ using this API.
+
+*) pci_epc_clear_bar()
+
+ The PCI endpoint function driver should use pci_epc_clear_bar() to reset
+ the BAR.
+
+*) pci_epc_raise_irq()
+
+ The PCI endpoint function driver should use pci_epc_raise_irq() to raise
+ Legacy Interrupt or MSI Interrupt.
+
+*) pci_epc_mem_alloc_addr()
+
+ The PCI endpoint function driver should use pci_epc_mem_alloc_addr(), to
+ allocate memory address from EPC addr space which is required to access
+ RC's buffer
+
+*) pci_epc_mem_free_addr()
+
+ The PCI endpoint function driver should use pci_epc_mem_free_addr() to
+ free the memory space allocated using pci_epc_mem_alloc_addr().
+
+2.1.3 Other APIs
+
+There are other APIs provided by the EPC library. These are used for binding
+the EPF device with EPC device. pci-ep-cfs.c can be used as reference for
+using these APIs.
+
+*) pci_epc_get()
+
+ Get a reference to the PCI endpoint controller based on the device name of
+ the controller.
+
+*) pci_epc_put()
+
+ Release the reference to the PCI endpoint controller obtained using
+ pci_epc_get()
+
+*) pci_epc_add_epf()
+
+ Add a PCI endpoint function to a PCI endpoint controller. A PCIe device
+ can have up to 8 functions according to the specification.
+
+*) pci_epc_remove_epf()
+
+ Remove the PCI endpoint function from PCI endpoint controller.
+
+*) pci_epc_start()
+
+ The PCI endpoint function driver should invoke pci_epc_start() once it
+ has configured the endpoint function and wants to start the PCI link.
+
+*) pci_epc_stop()
+
+ The PCI endpoint function driver should invoke pci_epc_stop() to stop
+ the PCI LINK.
+
+2.2 PCI Endpoint Function(EPF) Library
+
+The EPF library provides APIs to be used by the function driver and the EPC
+library to provide endpoint mode functionality.
+
+2.2.1 APIs for the PCI Endpoint Function Driver
+
+This section lists the APIs that the PCI Endpoint core provides to be used
+by the PCI endpoint function driver.
+
+*) pci_epf_register_driver()
+
+ The PCI Endpoint Function driver should implement the following ops:
+ * bind: ops to perform when a EPC device has been bound to EPF device
+ * unbind: ops to perform when a binding has been lost between a EPC
+ device and EPF device
+ * linkup: ops to perform when the EPC device has established a
+ connection with a host system
+
+ The PCI Function driver can then register the PCI EPF driver by using
+ pci_epf_register_driver().
+
+*) pci_epf_unregister_driver()
+
+ The PCI Function driver can unregister the PCI EPF driver by using
+ pci_epf_unregister_driver().
+
+*) pci_epf_alloc_space()
+
+ The PCI Function driver can allocate space for a particular BAR using
+ pci_epf_alloc_space().
+
+*) pci_epf_free_space()
+
+ The PCI Function driver can free the allocated space
+ (using pci_epf_alloc_space) by invoking pci_epf_free_space().
+
+2.2.2 APIs for the PCI Endpoint Controller Library
+This section lists the APIs that the PCI Endpoint core provides to be used
+by the PCI endpoint controller library.
+
+*) pci_epf_linkup()
+
+ The PCI endpoint controller library invokes pci_epf_linkup() when the
+ EPC device has established the connection to the host.
+
+2.2.2 Other APIs
+There are other APIs provided by the EPF library. These are used to notify
+the function driver when the EPF device is bound to the EPC device.
+pci-ep-cfs.c can be used as reference for using these APIs.
+
+*) pci_epf_create()
+
+ Create a new PCI EPF device by passing the name of the PCI EPF device.
+ This name will be used to bind the the EPF device to a EPF driver.
+
+*) pci_epf_destroy()
+
+ Destroy the created PCI EPF device.
+
+*) pci_epf_bind()
+
+ pci_epf_bind() should be invoked when the EPF device has been bound to
+ a EPC device.
+
+*) pci_epf_unbind()
+
+ pci_epf_unbind() should be invoked when the binding between EPC device
+ and EPF device is lost.
diff --git a/Documentation/PCI/endpoint/pci-test-function.txt b/Documentation/PCI/endpoint/pci-test-function.txt
new file mode 100644
index 000000000000..0c519c9bf94a
--- /dev/null
+++ b/Documentation/PCI/endpoint/pci-test-function.txt
@@ -0,0 +1,66 @@
+ PCI TEST
+ Kishon Vijay Abraham I <kishon@ti.com>
+
+Traditionally PCI RC has always been validated by using standard
+PCI cards like ethernet PCI cards or USB PCI cards or SATA PCI cards.
+However with the addition of EP-core in linux kernel, it is possible
+to configure a PCI controller that can operate in EP mode to work as
+a test device.
+
+The PCI endpoint test device is a virtual device (defined in software)
+used to test the endpoint functionality and serve as a sample driver
+for other PCI endpoint devices (to use the EP framework).
+
+The PCI endpoint test device has the following registers:
+
+ 1) PCI_ENDPOINT_TEST_MAGIC
+ 2) PCI_ENDPOINT_TEST_COMMAND
+ 3) PCI_ENDPOINT_TEST_STATUS
+ 4) PCI_ENDPOINT_TEST_SRC_ADDR
+ 5) PCI_ENDPOINT_TEST_DST_ADDR
+ 6) PCI_ENDPOINT_TEST_SIZE
+ 7) PCI_ENDPOINT_TEST_CHECKSUM
+
+*) PCI_ENDPOINT_TEST_MAGIC
+
+This register will be used to test BAR0. A known pattern will be written
+and read back from MAGIC register to verify BAR0.
+
+*) PCI_ENDPOINT_TEST_COMMAND:
+
+This register will be used by the host driver to indicate the function
+that the endpoint device must perform.
+
+Bitfield Description:
+ Bit 0 : raise legacy IRQ
+ Bit 1 : raise MSI IRQ
+ Bit 2 - 7 : MSI interrupt number
+ Bit 8 : read command (read data from RC buffer)
+ Bit 9 : write command (write data to RC buffer)
+ Bit 10 : copy command (copy data from one RC buffer to another
+ RC buffer)
+
+*) PCI_ENDPOINT_TEST_STATUS
+
+This register reflects the status of the PCI endpoint device.
+
+Bitfield Description:
+ Bit 0 : read success
+ Bit 1 : read fail
+ Bit 2 : write success
+ Bit 3 : write fail
+ Bit 4 : copy success
+ Bit 5 : copy fail
+ Bit 6 : IRQ raised
+ Bit 7 : source address is invalid
+ Bit 8 : destination address is invalid
+
+*) PCI_ENDPOINT_TEST_SRC_ADDR
+
+This register contains the source address (RC buffer address) for the
+COPY/READ command.
+
+*) PCI_ENDPOINT_TEST_DST_ADDR
+
+This register contains the destination address (RC buffer address) for
+the COPY/WRITE command.
diff --git a/Documentation/PCI/endpoint/pci-test-howto.txt b/Documentation/PCI/endpoint/pci-test-howto.txt
new file mode 100644
index 000000000000..75f48c3bb191
--- /dev/null
+++ b/Documentation/PCI/endpoint/pci-test-howto.txt
@@ -0,0 +1,179 @@
+ PCI TEST USERGUIDE
+ Kishon Vijay Abraham I <kishon@ti.com>
+
+This document is a guide to help users use pci-epf-test function driver
+and pci_endpoint_test host driver for testing PCI. The list of steps to
+be followed in the host side and EP side is given below.
+
+1. Endpoint Device
+
+1.1 Endpoint Controller Devices
+
+To find the list of endpoint controller devices in the system:
+
+ # ls /sys/class/pci_epc/
+ 51000000.pcie_ep
+
+If PCI_ENDPOINT_CONFIGFS is enabled
+ # ls /sys/kernel/config/pci_ep/controllers
+ 51000000.pcie_ep
+
+1.2 Endpoint Function Drivers
+
+To find the list of endpoint function drivers in the system:
+
+ # ls /sys/bus/pci-epf/drivers
+ pci_epf_test
+
+If PCI_ENDPOINT_CONFIGFS is enabled
+ # ls /sys/kernel/config/pci_ep/functions
+ pci_epf_test
+
+1.3 Creating pci-epf-test Device
+
+PCI endpoint function device can be created using the configfs. To create
+pci-epf-test device, the following commands can be used
+
+ # mount -t configfs none /sys/kernel/config
+ # cd /sys/kernel/config/pci_ep/
+ # mkdir functions/pci_epf_test/func1
+
+The "mkdir func1" above creates the pci-epf-test function device that will
+be probed by pci_epf_test driver.
+
+The PCI endpoint framework populates the directory with the following
+configurable fields.
+
+ # ls functions/pci_epf_test/func1
+ baseclass_code interrupt_pin revid subsys_vendor_id
+ cache_line_size msi_interrupts subclass_code vendorid
+ deviceid progif_code subsys_id
+
+The PCI endpoint function driver populates these entries with default values
+when the device is bound to the driver. The pci-epf-test driver populates
+vendorid with 0xffff and interrupt_pin with 0x0001
+
+ # cat functions/pci_epf_test/func1/vendorid
+ 0xffff
+ # cat functions/pci_epf_test/func1/interrupt_pin
+ 0x0001
+
+1.4 Configuring pci-epf-test Device
+
+The user can configure the pci-epf-test device using configfs entry. In order
+to change the vendorid and the number of MSI interrupts used by the function
+device, the following commands can be used.
+
+ # echo 0x104c > functions/pci_epf_test/func1/vendorid
+ # echo 0xb500 > functions/pci_epf_test/func1/deviceid
+ # echo 16 > functions/pci_epf_test/func1/msi_interrupts
+
+1.5 Binding pci-epf-test Device to EP Controller
+
+In order for the endpoint function device to be useful, it has to be bound to
+a PCI endpoint controller driver. Use the configfs to bind the function
+device to one of the controller driver present in the system.
+
+ # ln -s functions/pci_epf_test/func1 controllers/51000000.pcie_ep/
+
+Once the above step is completed, the PCI endpoint is ready to establish a link
+with the host.
+
+1.6 Start the Link
+
+In order for the endpoint device to establish a link with the host, the _start_
+field should be populated with '1'.
+
+ # echo 1 > controllers/51000000.pcie_ep/start
+
+2. RootComplex Device
+
+2.1 lspci Output
+
+Note that the devices listed here correspond to the value populated in 1.4 above
+
+ 00:00.0 PCI bridge: Texas Instruments Device 8888 (rev 01)
+ 01:00.0 Unassigned class [ff00]: Texas Instruments Device b500
+
+2.2 Using Endpoint Test function Device
+
+pcitest.sh added in tools/pci/ can be used to run all the default PCI endpoint
+tests. Before pcitest.sh can be used pcitest.c should be compiled using the
+following commands.
+
+ cd <kernel-dir>
+ make headers_install ARCH=arm
+ arm-linux-gnueabihf-gcc -Iusr/include tools/pci/pcitest.c -o pcitest
+ cp pcitest <rootfs>/usr/sbin/
+ cp tools/pci/pcitest.sh <rootfs>
+
+2.2.1 pcitest.sh Output
+ # ./pcitest.sh
+ BAR tests
+
+ BAR0: OKAY
+ BAR1: OKAY
+ BAR2: OKAY
+ BAR3: OKAY
+ BAR4: NOT OKAY
+ BAR5: NOT OKAY
+
+ Interrupt tests
+
+ LEGACY IRQ: NOT OKAY
+ MSI1: OKAY
+ MSI2: OKAY
+ MSI3: OKAY
+ MSI4: OKAY
+ MSI5: OKAY
+ MSI6: OKAY
+ MSI7: OKAY
+ MSI8: OKAY
+ MSI9: OKAY
+ MSI10: OKAY
+ MSI11: OKAY
+ MSI12: OKAY
+ MSI13: OKAY
+ MSI14: OKAY
+ MSI15: OKAY
+ MSI16: OKAY
+ MSI17: NOT OKAY
+ MSI18: NOT OKAY
+ MSI19: NOT OKAY
+ MSI20: NOT OKAY
+ MSI21: NOT OKAY
+ MSI22: NOT OKAY
+ MSI23: NOT OKAY
+ MSI24: NOT OKAY
+ MSI25: NOT OKAY
+ MSI26: NOT OKAY
+ MSI27: NOT OKAY
+ MSI28: NOT OKAY
+ MSI29: NOT OKAY
+ MSI30: NOT OKAY
+ MSI31: NOT OKAY
+ MSI32: NOT OKAY
+
+ Read Tests
+
+ READ ( 1 bytes): OKAY
+ READ ( 1024 bytes): OKAY
+ READ ( 1025 bytes): OKAY
+ READ (1024000 bytes): OKAY
+ READ (1024001 bytes): OKAY
+
+ Write Tests
+
+ WRITE ( 1 bytes): OKAY
+ WRITE ( 1024 bytes): OKAY
+ WRITE ( 1025 bytes): OKAY
+ WRITE (1024000 bytes): OKAY
+ WRITE (1024001 bytes): OKAY
+
+ Copy Tests
+
+ COPY ( 1 bytes): OKAY
+ COPY ( 1024 bytes): OKAY
+ COPY ( 1025 bytes): OKAY
+ COPY (1024000 bytes): OKAY
+ COPY (1024001 bytes): OKAY
diff --git a/Documentation/PCI/pci-iov-howto.txt b/Documentation/PCI/pci-iov-howto.txt
index 2d91ae251982..d2a84151e99c 100644
--- a/Documentation/PCI/pci-iov-howto.txt
+++ b/Documentation/PCI/pci-iov-howto.txt
@@ -68,6 +68,18 @@ To disable SR-IOV capability:
echo 0 > \
/sys/bus/pci/devices/<DOMAIN:BUS:DEVICE.FUNCTION>/sriov_numvfs
+To enable auto probing VFs by a compatible driver on the host, run
+command below before enabling SR-IOV capabilities. This is the
+default behavior.
+ echo 1 > \
+ /sys/bus/pci/devices/<DOMAIN:BUS:DEVICE.FUNCTION>/sriov_drivers_autoprobe
+
+To disable auto probing VFs by a compatible driver on the host, run
+command below before enabling SR-IOV capabilities. Updating this
+entry will not affect VFs which are already probed.
+ echo 0 > \
+ /sys/bus/pci/devices/<DOMAIN:BUS:DEVICE.FUNCTION>/sriov_drivers_autoprobe
+
3.2 Usage example
Following piece of code illustrates the usage of the SR-IOV API.
diff --git a/Documentation/devicetree/bindings/pci/designware-pcie.txt b/Documentation/devicetree/bindings/pci/designware-pcie.txt
index 1392c705ceca..b2480dd38c11 100644
--- a/Documentation/devicetree/bindings/pci/designware-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/designware-pcie.txt
@@ -6,30 +6,40 @@ Required properties:
- reg-names: Must be "config" for the PCIe configuration space.
(The old way of getting the configuration address space from "ranges"
is deprecated and should be avoided.)
+- num-lanes: number of lanes to use
+RC mode:
- #address-cells: set to <3>
- #size-cells: set to <2>
- device_type: set to "pci"
- ranges: ranges for the PCI memory and I/O regions
- #interrupt-cells: set to <1>
-- interrupt-map-mask and interrupt-map: standard PCI properties
- to define the mapping of the PCIe interface to interrupt
+- interrupt-map-mask and interrupt-map: standard PCI
+ properties to define the mapping of the PCIe interface to interrupt
numbers.
-- num-lanes: number of lanes to use
+EP mode:
+- num-ib-windows: number of inbound address translation
+ windows
+- num-ob-windows: number of outbound address translation
+ windows
Optional properties:
-- num-viewport: number of view ports configured in hardware. If a platform
- does not specify it, the driver assumes 2.
- num-lanes: number of lanes to use (this property should be specified unless
the link is brought already up in BIOS)
- reset-gpio: gpio pin number of power good signal
-- bus-range: PCI bus numbers covered (it is recommended for new devicetrees to
- specify this property, to keep backwards compatibility a range of 0x00-0xff
- is assumed if not present)
- clocks: Must contain an entry for each entry in clock-names.
See ../clocks/clock-bindings.txt for details.
- clock-names: Must include the following entries:
- "pcie"
- "pcie_bus"
+RC mode:
+- num-viewport: number of view ports configured in
+ hardware. If a platform does not specify it, the driver assumes 2.
+- bus-range: PCI bus numbers covered (it is recommended
+ for new devicetrees to specify this property, to keep backwards
+ compatibility a range of 0x00-0xff is assumed if not present)
+EP mode:
+- max-functions: maximum number of functions that can be
+ configured
Example configuration:
diff --git a/Documentation/devicetree/bindings/pci/faraday,ftpci100.txt b/Documentation/devicetree/bindings/pci/faraday,ftpci100.txt
new file mode 100644
index 000000000000..35d4a979bb7b
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/faraday,ftpci100.txt
@@ -0,0 +1,129 @@
+Faraday Technology FTPCI100 PCI Host Bridge
+
+This PCI bridge is found inside that Cortina Systems Gemini SoC platform and
+is a generic IP block from Faraday Technology. It exists in two variants:
+plain and dual PCI. The plain version embeds a cascading interrupt controller
+into the host bridge. The dual version routes the interrupts to the host
+chips interrupt controller.
+
+The host controller appear on the PCI bus with vendor ID 0x159b (Faraday
+Technology) and product ID 0x4321.
+
+Mandatory properties:
+
+- compatible: ranging from specific to generic, should be one of
+ "cortina,gemini-pci", "faraday,ftpci100"
+ "cortina,gemini-pci-dual", "faraday,ftpci100-dual"
+ "faraday,ftpci100"
+ "faraday,ftpci100-dual"
+- reg: memory base and size for the host bridge
+- #address-cells: set to <3>
+- #size-cells: set to <2>
+- #interrupt-cells: set to <1>
+- bus-range: set to <0x00 0xff>
+- device_type, set to "pci"
+- ranges: see pci.txt
+- interrupt-map-mask: see pci.txt
+- interrupt-map: see pci.txt
+- dma-ranges: three ranges for the inbound memory region. The ranges must
+ be aligned to a 1MB boundary, and may be 1MB, 2MB, 4MB, 8MB, 16MB, 32MB, 64MB,
+ 128MB, 256MB, 512MB, 1GB or 2GB in size. The memory should be marked as
+ pre-fetchable.
+
+Mandatory subnodes:
+- For "faraday,ftpci100" a node representing the interrupt-controller inside the
+ host bridge is mandatory. It has the following mandatory properties:
+ - interrupt: see interrupt-controller/interrupts.txt
+ - interrupt-parent: see interrupt-controller/interrupts.txt
+ - interrupt-controller: see interrupt-controller/interrupts.txt
+ - #address-cells: set to <0>
+ - #interrupt-cells: set to <1>
+
+I/O space considerations:
+
+The plain variant has 128MiB of non-prefetchable memory space, whereas the
+"dual" variant has 64MiB. Take this into account when describing the ranges.
+
+Interrupt map considerations:
+
+The "dual" variant will get INT A, B, C, D from the system interrupt controller
+and should point to respective interrupt in that controller in its
+interrupt-map.
+
+The code which is the only documentation of how the Faraday PCI (the non-dual
+variant) interrupts assigns the default interrupt mapping/swizzling has
+typically been like this, doing the swizzling on the interrupt controller side
+rather than in the interconnect:
+
+interrupt-map-mask = <0xf800 0 0 7>;
+interrupt-map =
+ <0x4800 0 0 1 &pci_intc 0>, /* Slot 9 */
+ <0x4800 0 0 2 &pci_intc 1>,
+ <0x4800 0 0 3 &pci_intc 2>,
+ <0x4800 0 0 4 &pci_intc 3>,
+ <0x5000 0 0 1 &pci_intc 1>, /* Slot 10 */
+ <0x5000 0 0 2 &pci_intc 2>,
+ <0x5000 0 0 3 &pci_intc 3>,
+ <0x5000 0 0 4 &pci_intc 0>,
+ <0x5800 0 0 1 &pci_intc 2>, /* Slot 11 */
+ <0x5800 0 0 2 &pci_intc 3>,
+ <0x5800 0 0 3 &pci_intc 0>,
+ <0x5800 0 0 4 &pci_intc 1>,
+ <0x6000 0 0 1 &pci_intc 3>, /* Slot 12 */
+ <0x6000 0 0 2 &pci_intc 0>,
+ <0x6000 0 0 3 &pci_intc 1>,
+ <0x6000 0 0 4 &pci_intc 2>;
+
+Example:
+
+pci@50000000 {
+ compatible = "cortina,gemini-pci", "faraday,ftpci100";
+ reg = <0x50000000 0x100>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>, /* PCI A */
+ <26 IRQ_TYPE_LEVEL_HIGH>, /* PCI B */
+ <27 IRQ_TYPE_LEVEL_HIGH>, /* PCI C */
+ <28 IRQ_TYPE_LEVEL_HIGH>; /* PCI D */
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+
+ bus-range = <0x00 0xff>;
+ ranges = /* 1MiB I/O space 0x50000000-0x500fffff */
+ <0x01000000 0 0 0x50000000 0 0x00100000>,
+ /* 128MiB non-prefetchable memory 0x58000000-0x5fffffff */
+ <0x02000000 0 0x58000000 0x58000000 0 0x08000000>;
+
+ /* DMA ranges */
+ dma-ranges =
+ /* 128MiB at 0x00000000-0x07ffffff */
+ <0x02000000 0 0x00000000 0x00000000 0 0x08000000>,
+ /* 64MiB at 0x00000000-0x03ffffff */
+ <0x02000000 0 0x00000000 0x00000000 0 0x04000000>,
+ /* 64MiB at 0x00000000-0x03ffffff */
+ <0x02000000 0 0x00000000 0x00000000 0 0x04000000>;
+
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map =
+ <0x4800 0 0 1 &pci_intc 0>, /* Slot 9 */
+ <0x4800 0 0 2 &pci_intc 1>,
+ <0x4800 0 0 3 &pci_intc 2>,
+ <0x4800 0 0 4 &pci_intc 3>,
+ <0x5000 0 0 1 &pci_intc 1>, /* Slot 10 */
+ <0x5000 0 0 2 &pci_intc 2>,
+ <0x5000 0 0 3 &pci_intc 3>,
+ <0x5000 0 0 4 &pci_intc 0>,
+ <0x5800 0 0 1 &pci_intc 2>, /* Slot 11 */
+ <0x5800 0 0 2 &pci_intc 3>,
+ <0x5800 0 0 3 &pci_intc 0>,
+ <0x5800 0 0 4 &pci_intc 1>,
+ <0x6000 0 0 1 &pci_intc 3>, /* Slot 12 */
+ <0x6000 0 0 2 &pci_intc 0>,
+ <0x6000 0 0 3 &pci_intc 0>,
+ <0x6000 0 0 4 &pci_intc 0>;
+ pci_intc: interrupt-controller {
+ interrupt-parent = <&intcon>;
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
index 83aeb1f5a645..e3d5680875b1 100644
--- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
@@ -4,7 +4,11 @@ This PCIe host controller is based on the Synopsis Designware PCIe IP
and thus inherits all the common properties defined in designware-pcie.txt.
Required properties:
-- compatible: "fsl,imx6q-pcie", "fsl,imx6sx-pcie", "fsl,imx6qp-pcie"
+- compatible:
+ - "fsl,imx6q-pcie"
+ - "fsl,imx6sx-pcie",
+ - "fsl,imx6qp-pcie"
+ - "fsl,imx7d-pcie"
- reg: base address and length of the PCIe controller
- interrupts: A list of interrupt outputs of the controller. Must contain an
entry for each entry in the interrupt-names property.
@@ -34,6 +38,14 @@ Additional required properties for imx6sx-pcie:
- clock names: Must include the following additional entries:
- "pcie_inbound_axi"
+Additional required properties for imx7d-pcie:
+- power-domains: Must be set to a phandle pointing to PCIE_PHY power domain
+- resets: Must contain phandles to PCIe-related reset lines exposed by SRC
+ IP block
+- reset-names: Must contain the following entires:
+ - "pciephy"
+ - "apps"
+
Example:
pcie@0x01000000 {
diff --git a/Documentation/devicetree/bindings/pci/ti-pci.txt b/Documentation/devicetree/bindings/pci/ti-pci.txt
index 60e25161f351..6a07c96227e0 100644
--- a/Documentation/devicetree/bindings/pci/ti-pci.txt
+++ b/Documentation/devicetree/bindings/pci/ti-pci.txt
@@ -1,17 +1,22 @@
TI PCI Controllers
PCIe Designware Controller
- - compatible: Should be "ti,dra7-pcie""
- - reg : Two register ranges as listed in the reg-names property
- - reg-names : The first entry must be "ti-conf" for the TI specific registers
- The second entry must be "rc-dbics" for the designware pcie
- registers
- The third entry must be "config" for the PCIe configuration space
+ - compatible: Should be "ti,dra7-pcie" for RC
+ Should be "ti,dra7-pcie-ep" for EP
- phys : list of PHY specifiers (used by generic PHY framework)
- phy-names : must be "pcie-phy0", "pcie-phy1", "pcie-phyN".. based on the
number of PHYs as specified in *phys* property.
- ti,hwmods : Name of the hwmod associated to the pcie, "pcie<X>",
where <X> is the instance number of the pcie from the HW spec.
+ - num-lanes as specified in ../designware-pcie.txt
+
+HOST MODE
+=========
+ - reg : Two register ranges as listed in the reg-names property
+ - reg-names : The first entry must be "ti-conf" for the TI specific registers
+ The second entry must be "rc-dbics" for the DesignWare PCIe
+ registers
+ The third entry must be "config" for the PCIe configuration space
- interrupts : Two interrupt entries must be specified. The first one is for
main interrupt line and the second for MSI interrupt line.
- #address-cells,
@@ -19,13 +24,36 @@ PCIe Designware Controller
#interrupt-cells,
device_type,
ranges,
- num-lanes,
interrupt-map-mask,
interrupt-map : as specified in ../designware-pcie.txt
+DEVICE MODE
+===========
+ - reg : Four register ranges as listed in the reg-names property
+ - reg-names : "ti-conf" for the TI specific registers
+ "ep_dbics" for the standard configuration registers as
+ they are locally accessed within the DIF CS space
+ "ep_dbics2" for the standard configuration registers as
+ they are locally accessed within the DIF CS2 space
+ "addr_space" used to map remote RC address space
+ - interrupts : one interrupt entries must be specified for main interrupt.
+ - num-ib-windows : number of inbound address translation windows
+ - num-ob-windows : number of outbound address translation windows
+ - ti,syscon-unaligned-access: phandle to the syscon DT node. The 1st argument
+ should contain the register offset within syscon
+ and the 2nd argument should contain the bit field
+ for setting the bit to enable unaligned
+ access.
+
Optional Property:
- gpios : Should be added if a gpio line is required to drive PERST# line
+NOTE: Two DT nodes may be added for each PCI controller; one for host
+mode and another for device mode. So in order for PCI to
+work in host mode, EP mode DT node should be disabled and in order to PCI to
+work in EP mode, host mode DT node should be disabled. Host mode and EP
+mode are mutually exclusive.
+
Example:
axi {
compatible = "simple-bus";
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index bf34d5b3a733..e72587fe477d 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -342,8 +342,10 @@ PER-CPU MEM
devm_free_percpu()
PCI
- pcim_enable_device() : after success, all PCI ops become managed
- pcim_pin_device() : keep PCI device enabled after release
+ devm_pci_remap_cfgspace() : ioremap PCI configuration space
+ devm_pci_remap_cfg_resource() : ioremap PCI configuration space resource
+ pcim_enable_device() : after success, all PCI ops become managed
+ pcim_pin_device() : keep PCI device enabled after release
PHY
devm_usb_get_phy()
diff --git a/Documentation/filesystems/sysfs-pci.txt b/Documentation/filesystems/sysfs-pci.txt
index 6ea1ceda6f52..06f1d64c6f70 100644
--- a/Documentation/filesystems/sysfs-pci.txt
+++ b/Documentation/filesystems/sysfs-pci.txt
@@ -113,9 +113,18 @@ Supporting PCI access on new platforms
--------------------------------------
In order to support PCI resource mapping as described above, Linux platform
-code must define HAVE_PCI_MMAP and provide a pci_mmap_page_range function.
-Platforms are free to only support subsets of the mmap functionality, but
-useful return codes should be provided.
+code should ideally define ARCH_GENERIC_PCI_MMAP_RESOURCE and use the generic
+implementation of that functionality. To support the historical interface of
+mmap() through files in /proc/bus/pci, platforms may also set HAVE_PCI_MMAP.
+
+Alternatively, platforms which set HAVE_PCI_MMAP may provide their own
+implementation of pci_mmap_page_range() instead of defining
+ARCH_GENERIC_PCI_MMAP_RESOURCE.
+
+Platforms which support write-combining maps of PCI resources must define
+arch_can_pci_mmap_wc() which shall evaluate to non-zero at runtime when
+write-combining is permitted. Platforms which support maps of I/O resources
+define arch_can_pci_mmap_io() similarly.
Legacy resources are protected by the HAVE_PCI_LEGACY define. Platforms
wishing to support legacy functionality should define it and provide
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index a77ead911956..eccb675a2852 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -191,6 +191,7 @@ Code Seq#(hex) Include File Comments
'W' 00-1F linux/watchdog.h conflict!
'W' 00-1F linux/wanrouter.h conflict! (pre 3.9)
'W' 00-3F sound/asound.h conflict!
+'W' 40-5F drivers/pci/switch/switchtec.c
'X' all fs/xfs/xfs_fs.h conflict!
and fs/xfs/linux-2.6/xfs_ioctl32.h
and include/linux/falloc.h
diff --git a/Documentation/misc-devices/pci-endpoint-test.txt b/Documentation/misc-devices/pci-endpoint-test.txt
new file mode 100644
index 000000000000..4ebc3594b32c
--- /dev/null
+++ b/Documentation/misc-devices/pci-endpoint-test.txt
@@ -0,0 +1,35 @@
+Driver for PCI Endpoint Test Function
+
+This driver should be used as a host side driver if the root complex is
+connected to a configurable PCI endpoint running *pci_epf_test* function
+driver configured according to [1].
+
+The "pci_endpoint_test" driver can be used to perform the following tests.
+
+The PCI driver for the test device performs the following tests
+ *) verifying addresses programmed in BAR
+ *) raise legacy IRQ
+ *) raise MSI IRQ
+ *) read data
+ *) write data
+ *) copy data
+
+This misc driver creates /dev/pci-endpoint-test.<num> for every
+*pci_epf_test* function connected to the root complex and "ioctls"
+should be used to perform the above tests.
+
+ioctl
+-----
+ PCITEST_BAR: Tests the BAR. The number of the BAR to be tested
+ should be passed as argument.
+ PCITEST_LEGACY_IRQ: Tests legacy IRQ
+ PCITEST_MSI: Tests message signalled interrupts. The MSI number
+ to be tested should be passed as argument.
+ PCITEST_WRITE: Perform write tests. The size of the buffer should be passed
+ as argument.
+ PCITEST_READ: Perform read tests. The size of the buffer should be passed
+ as argument.
+ PCITEST_COPY: Perform read tests. The size of the buffer should be passed
+ as argument.
+
+[1] -> Documentation/PCI/endpoint/function/binding/pci-test.txt
diff --git a/Documentation/switchtec.txt b/Documentation/switchtec.txt
new file mode 100644
index 000000000000..a0a9c7b3d4d5
--- /dev/null
+++ b/Documentation/switchtec.txt
@@ -0,0 +1,80 @@
+========================
+Linux Switchtec Support
+========================
+
+Microsemi's "Switchtec" line of PCI switch devices is already
+supported by the kernel with standard PCI switch drivers. However, the
+Switchtec device advertises a special management endpoint which
+enables some additional functionality. This includes:
+
+* Packet and Byte Counters
+* Firmware Upgrades
+* Event and Error logs
+* Querying port link status
+* Custom user firmware commands
+
+The switchtec kernel module implements this functionality.
+
+
+Interface
+=========
+
+The primary means of communicating with the Switchtec management firmware is
+through the Memory-mapped Remote Procedure Call (MRPC) interface.
+Commands are submitted to the interface with a 4-byte command
+identifier and up to 1KB of command specific data. The firmware will
+respond with a 4 bytes return code and up to 1KB of command specific
+data. The interface only processes a single command at a time.
+
+
+Userspace Interface
+===================
+
+The MRPC interface will be exposed to userspace through a simple char
+device: /dev/switchtec#, one for each management endpoint in the system.
+
+The char device has the following semantics:
+
+* A write must consist of at least 4 bytes and no more than 1028 bytes.
+ The first four bytes will be interpreted as the command to run and
+ the remainder will be used as the input data. A write will send the
+ command to the firmware to begin processing.
+
+* Each write must be followed by exactly one read. Any double write will
+ produce an error and any read that doesn't follow a write will
+ produce an error.
+
+* A read will block until the firmware completes the command and return
+ the four bytes of status plus up to 1024 bytes of output data. (The
+ length will be specified by the size parameter of the read call --
+ reading less than 4 bytes will produce an error.
+
+* The poll call will also be supported for userspace applications that
+ need to do other things while waiting for the command to complete.
+
+The following IOCTLs are also supported by the device:
+
+* SWITCHTEC_IOCTL_FLASH_INFO - Retrieve firmware length and number
+ of partitions in the device.
+
+* SWITCHTEC_IOCTL_FLASH_PART_INFO - Retrieve address and lengeth for
+ any specified partition in flash.
+
+* SWITCHTEC_IOCTL_EVENT_SUMMARY - Read a structure of bitmaps
+ indicating all uncleared events.
+
+* SWITCHTEC_IOCTL_EVENT_CTL - Get the current count, clear and set flags
+ for any event. This ioctl takes in a switchtec_ioctl_event_ctl struct
+ with the event_id, index and flags set (index being the partition or PFF
+ number for non-global events). It returns whether the event has
+ occurred, the number of times and any event specific data. The flags
+ can be used to clear the count or enable and disable actions to
+ happen when the event occurs.
+ By using the SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL flag,
+ you can set an event to trigger a poll command to return with
+ POLLPRI. In this way, userspace can wait for events to occur.
+
+* SWITCHTEC_IOCTL_PFF_TO_PORT and SWITCHTEC_IOCTL_PORT_TO_PFF convert
+ between PCI Function Framework number (used by the event system)
+ and Switchtec Logic Port ID and Partition number (which is more
+ user friendly).
diff --git a/MAINTAINERS b/MAINTAINERS
index 56b1111cc9c1..a4a9e31fed72 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -9723,6 +9723,15 @@ F: include/linux/pci*
F: arch/x86/pci/
F: arch/x86/kernel/quirks.c
+PCI ENDPOINT SUBSYSTEM
+M: Kishon Vijay Abraham I <kishon@ti.com>
+L: linux-pci@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/kishon/pci-endpoint.git
+S: Supported
+F: drivers/pci/endpoint/
+F: drivers/misc/pci_endpoint_test.c
+F: tools/pci/
+
PCI DRIVER FOR ALTERA PCIE IP
M: Ley Foon Tan <lftan@altera.com>
L: rfi@lists.rocketboards.org (moderated for non-subscribers)
@@ -9797,6 +9806,17 @@ S: Maintained
F: Documentation/devicetree/bindings/pci/aardvark-pci.txt
F: drivers/pci/host/pci-aardvark.c
+PCI DRIVER FOR MICROSEMI SWITCHTEC
+M: Kurt Schwemmer <kurt.schwemmer@microsemi.com>
+M: Stephen Bates <stephen.bates@microsemi.com>
+M: Logan Gunthorpe <logang@deltatee.com>
+L: linux-pci@vger.kernel.org
+S: Maintained
+F: Documentation/switchtec.txt
+F: Documentation/ABI/testing/sysfs-class-switchtec
+F: drivers/pci/switch/switchtec*
+F: include/uapi/linux/switchtec_ioctl.h
+
PCI DRIVER FOR NVIDIA TEGRA
M: Thierry Reding <thierry.reding@gmail.com>
L: linux-tegra@vger.kernel.org
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 42871fb8340e..2cfbc531f63b 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -187,6 +187,16 @@ static inline void pci_ioremap_set_mem_type(int mem_type) {}
extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr);
/*
+ * PCI configuration space mapping function.
+ *
+ * The PCI specification does not allow configuration write
+ * transactions to be posted. Add an arch specific
+ * pci_remap_cfgspace() definition that is implemented
+ * through strongly ordered memory mappings.
+ */
+#define pci_remap_cfgspace pci_remap_cfgspace
+void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size);
+/*
* Now, pick up the machine-defined IO definitions
*/
#ifdef CONFIG_NEED_MACH_IO_H
diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h
index 057d381f4e57..396c92bcc0cf 100644
--- a/arch/arm/include/asm/pci.h
+++ b/arch/arm/include/asm/pci.h
@@ -29,8 +29,7 @@ static inline int pci_proc_domain(struct pci_bus *bus)
#define PCI_DMA_BUS_IS_PHYS (1)
#define HAVE_PCI_MMAP
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index 2f0e07735d1d..b259956365a0 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -597,25 +597,6 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
return start;
}
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine)
-{
- if (mmap_state == pci_mmap_io)
- return -EINVAL;
-
- /*
- * Mark this as IO
- */
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot))
- return -EAGAIN;
-
- return 0;
-}
-
void __init pci_map_io_early(unsigned long pfn)
{
struct map_desc pci_io_desc = {
diff --git a/arch/arm/mach-omap2/clockdomains7xx_data.c b/arch/arm/mach-omap2/clockdomains7xx_data.c
index 6c679659cda5..67ebff829cf2 100644
--- a/arch/arm/mach-omap2/clockdomains7xx_data.c
+++ b/arch/arm/mach-omap2/clockdomains7xx_data.c
@@ -524,7 +524,7 @@ static struct clockdomain pcie_7xx_clkdm = {
.dep_bit = DRA7XX_PCIE_STATDEP_SHIFT,
.wkdep_srcs = pcie_wkup_sleep_deps,
.sleepdep_srcs = pcie_wkup_sleep_deps,
- .flags = CLKDM_CAN_HWSUP_SWSUP,
+ .flags = CLKDM_CAN_SWSUP,
};
static struct clockdomain atl_7xx_clkdm = {
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index ff0eed23ddf1..fc91205ff46c 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -481,6 +481,13 @@ int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
__pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
}
EXPORT_SYMBOL_GPL(pci_ioremap_io);
+
+void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
+{
+ return arch_ioremap_caller(res_cookie, size, MT_UNCACHED,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL_GPL(pci_remap_cfgspace);
#endif
/*
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 33a45bd96860..3b8e728cc944 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -436,6 +436,18 @@ void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
}
EXPORT_SYMBOL(ioremap_wc);
+#ifdef CONFIG_PCI
+
+#include <asm/mach/map.h>
+
+void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
+{
+ return arch_ioremap_caller(res_cookie, size, MT_UNCACHED,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL_GPL(pci_remap_cfgspace);
+#endif
+
void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
{
return (void *)phys_addr;
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 0c00c87bb9dd..35b2e50f17fb 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -173,6 +173,16 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
#define iounmap __iounmap
/*
+ * PCI configuration space mapping function.
+ *
+ * The PCI specification disallows posted write configuration transactions.
+ * Add an arch specific pci_remap_cfgspace() definition that is implemented
+ * through nGnRnE device memory attribute as recommended by the ARM v8
+ * Architecture reference manual Issue A.k B2.8.2 "Device memory".
+ */
+#define pci_remap_cfgspace(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRnE))
+
+/*
* io{read,write}{16,32,64}be() macros
*/
#define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h
index b9a7ba9ca44c..1fc19744ffe9 100644
--- a/arch/arm64/include/asm/pci.h
+++ b/arch/arm64/include/asm/pci.h
@@ -22,6 +22,8 @@
*/
#define PCI_DMA_BUS_IS_PHYS (0)
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE 1
+
extern int isa_dma_bridge_buggy;
#ifdef CONFIG_PCI
diff --git a/arch/cris/arch-v32/drivers/pci/bios.c b/arch/cris/arch-v32/drivers/pci/bios.c
index 212266a2c5d9..394c2a73d5e2 100644
--- a/arch/cris/arch-v32/drivers/pci/bios.c
+++ b/arch/cris/arch-v32/drivers/pci/bios.c
@@ -14,28 +14,6 @@ void pcibios_set_master(struct pci_dev *dev)
pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
}
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine)
-{
- unsigned long prot;
-
- /* Leave vm_pgoff as-is, the PCI space address is the physical
- * address on this platform.
- */
- prot = pgprot_val(vma->vm_page_prot);
- vma->vm_page_prot = __pgprot(prot);
-
- /* Write-combine setting is ignored, it is changed via the mtrr
- * interfaces on this platform.
- */
- if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot))
- return -EAGAIN;
-
- return 0;
-}
-
resource_size_t
pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
diff --git a/arch/cris/include/asm/pci.h b/arch/cris/include/asm/pci.h
index b1b289df04c7..6e505332b3e3 100644
--- a/arch/cris/include/asm/pci.h
+++ b/arch/cris/include/asm/pci.h
@@ -42,9 +42,7 @@ struct pci_dev;
#define PCI_DMA_BUS_IS_PHYS (1)
#define HAVE_PCI_MMAP
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
-
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
#endif /* __KERNEL__ */
diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h
index c0835b0dc722..6459f2d46200 100644
--- a/arch/ia64/include/asm/pci.h
+++ b/arch/ia64/include/asm/pci.h
@@ -51,8 +51,9 @@ extern unsigned long ia64_max_iommu_merge_mask;
#define PCI_DMA_BUS_IS_PHYS (ia64_max_iommu_merge_mask == ~0UL)
#define HAVE_PCI_MMAP
-extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
+#define arch_can_pci_mmap_wc() 1
+
#define HAVE_PCI_LEGACY
extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
struct vm_area_struct *vma,
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 8f6ac2f8ae4c..4068bde623dc 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -418,52 +418,6 @@ pcibios_align_resource (void *data, const struct resource *res,
return res->start;
}
-int
-pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine)
-{
- unsigned long size = vma->vm_end - vma->vm_start;
- pgprot_t prot;
-
- /*
- * I/O space cannot be accessed via normal processor loads and
- * stores on this platform.
- */
- if (mmap_state == pci_mmap_io)
- /*
- * XXX we could relax this for I/O spaces for which ACPI
- * indicates that the space is 1-to-1 mapped. But at the
- * moment, we don't support multiple PCI address spaces and
- * the legacy I/O space is not 1-to-1 mapped, so this is moot.
- */
- return -EINVAL;
-
- if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
- return -EINVAL;
-
- prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
- vma->vm_page_prot);
-
- /*
- * If the user requested WC, the kernel uses UC or WC for this region,
- * and the chipset supports WC, we can use WC. Otherwise, we have to
- * use the same attribute the kernel uses.
- */
- if (write_combine &&
- ((pgprot_val(prot) & _PAGE_MA_MASK) == _PAGE_MA_UC ||
- (pgprot_val(prot) & _PAGE_MA_MASK) == _PAGE_MA_WC) &&
- efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- else
- vma->vm_page_prot = prot;
-
- if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- vma->vm_end - vma->vm_start, vma->vm_page_prot))
- return -EAGAIN;
-
- return 0;
-}
-
/**
* ia64_pci_get_legacy_mem - generic legacy mem routine
* @bus: bus to get legacy memory base address for
diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h
index 2a120bb70e54..efd4983cb697 100644
--- a/arch/microblaze/include/asm/pci.h
+++ b/arch/microblaze/include/asm/pci.h
@@ -46,12 +46,10 @@ extern int pci_domain_nr(struct pci_bus *bus);
extern int pci_proc_domain(struct pci_bus *bus);
struct vm_area_struct;
-/* Map a range of PCI memory or I/O space for a device into user space */
-int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
-#define HAVE_PCI_MMAP 1
+#define HAVE_PCI_MMAP 1
+#define arch_can_pci_mmap_io() 1
extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val,
size_t count);
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index 13bc93242c0c..404fb38d06b7 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -278,7 +278,7 @@ pgprot_t pci_phys_mem_access_prot(struct file *file,
*
* Returns a negative error code on failure, zero on success.
*/
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+int pci_mmap_page_range(struct pci_dev *dev, int bar, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
resource_size_t offset =
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index 30d1129d8624..1000c1b4c875 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -110,10 +110,7 @@ extern unsigned long PCIBIOS_MIN_MEM;
extern void pcibios_set_master(struct pci_dev *dev);
#define HAVE_PCI_MMAP
-
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
-
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
#define HAVE_ARCH_PCI_RESOURCE_TO_USER
/*
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index f6325fa657fb..bd67ac74fe2d 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -57,27 +57,3 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
*start = fixup_bigphys_addr(rsrc->start, size);
*end = rsrc->start + size;
}
-
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine)
-{
- unsigned long prot;
-
- /*
- * I/O space can be accessed via normal processor loads and stores on
- * this platform but for now we elect not to do this and portable
- * drivers should not do this anyway.
- */
- if (mmap_state == pci_mmap_io)
- return -EINVAL;
-
- /*
- * Ignore write-combine; for now only return uncached mappings.
- */
- prot = pgprot_val(vma->vm_page_prot);
- prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
- vma->vm_page_prot = __pgprot(prot);
-
- return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- vma->vm_end - vma->vm_start, vma->vm_page_prot);
-}
diff --git a/arch/mn10300/include/asm/pci.h b/arch/mn10300/include/asm/pci.h
index 51159fff025a..d27654902f28 100644
--- a/arch/mn10300/include/asm/pci.h
+++ b/arch/mn10300/include/asm/pci.h
@@ -74,9 +74,7 @@ static inline int pci_controller_num(struct pci_dev *dev)
}
#define HAVE_PCI_MMAP
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state,
- int write_combine);
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
#endif /* __KERNEL__ */
diff --git a/arch/mn10300/unit-asb2305/pci-asb2305.c b/arch/mn10300/unit-asb2305/pci-asb2305.c
index b7ab8378964c..e0f4617c0c7a 100644
--- a/arch/mn10300/unit-asb2305/pci-asb2305.c
+++ b/arch/mn10300/unit-asb2305/pci-asb2305.c
@@ -210,26 +210,3 @@ void __init pcibios_resource_survey(void)
pcibios_allocate_resources(0);
pcibios_allocate_resources(1);
}
-
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine)
-{
- unsigned long prot;
-
- /* Leave vm_pgoff as-is, the PCI space address is the physical
- * address on this platform.
- */
- vma->vm_flags |= VM_LOCKED;
-
- prot = pgprot_val(vma->vm_page_prot);
- prot &= ~_PAGE_CACHE;
- vma->vm_page_prot = __pgprot(prot);
-
- /* Write-combine setting is ignored */
- if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot))
- return -EAGAIN;
-
- return 0;
-}
diff --git a/arch/parisc/include/asm/pci.h b/arch/parisc/include/asm/pci.h
index defebd956585..1de1a3f412ec 100644
--- a/arch/parisc/include/asm/pci.h
+++ b/arch/parisc/include/asm/pci.h
@@ -200,8 +200,6 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
}
#define HAVE_PCI_MMAP
-
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
#endif /* __ASM_PARISC_PCI_H */
diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c
index 0903c6abd7a4..13ee3569959a 100644
--- a/arch/parisc/kernel/pci.c
+++ b/arch/parisc/kernel/pci.c
@@ -227,34 +227,6 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
return start;
}
-
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine)
-{
- unsigned long prot;
-
- /*
- * I/O space can be accessed via normal processor loads and stores on
- * this platform but for now we elect not to do this and portable
- * drivers should not do this anyway.
- */
- if (mmap_state == pci_mmap_io)
- return -EINVAL;
-
- if (write_combine)
- return -EINVAL;
-
- /*
- * Ignore write-combine; for now only return uncached mappings.
- */
- prot = pgprot_val(vma->vm_page_prot);
- prot |= _PAGE_NO_CACHE;
- vma->vm_page_prot = __pgprot(prot);
-
- return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- vma->vm_end - vma->vm_start, vma->vm_page_prot);
-}
-
/*
* A driver is enabling the device. We make sure that all the appropriate
* bits are set to allow the device to operate as the driver is expecting.
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 5011b69107a7..f90b22c722e1 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -173,6 +173,8 @@ struct machdep_calls {
/* Called after scan and before resource survey */
void (*pcibios_fixup_phb)(struct pci_controller *hose);
+ resource_size_t (*pcibios_default_alignment)(void);
+
#ifdef CONFIG_PCI_IOV
void (*pcibios_fixup_sriov)(struct pci_dev *pdev);
resource_size_t (*pcibios_iov_resource_alignment)(struct pci_dev *, int resno);
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index 93eded8d3843..c8975dac535f 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -77,12 +77,11 @@ extern int pci_domain_nr(struct pci_bus *bus);
extern int pci_proc_domain(struct pci_bus *bus);
struct vm_area_struct;
-/* Map a range of PCI memory or I/O space for a device into user space */
-int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
-/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
-#define HAVE_PCI_MMAP 1
+/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() and it does WC */
+#define HAVE_PCI_MMAP 1
+#define arch_can_pci_mmap_io() 1
+#define arch_can_pci_mmap_wc() 1
extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val,
size_t count);
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index ffda24a38dda..341a7469cab8 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -233,6 +233,14 @@ void pcibios_reset_secondary_bus(struct pci_dev *dev)
pci_reset_secondary_bus(dev);
}
+resource_size_t pcibios_default_alignment(void)
+{
+ if (ppc_md.pcibios_default_alignment)
+ return ppc_md.pcibios_default_alignment();
+
+ return 0;
+}
+
#ifdef CONFIG_PCI_IOV
resource_size_t pcibios_iov_resource_alignment(struct pci_dev *pdev, int resno)
{
@@ -513,7 +521,8 @@ pgprot_t pci_phys_mem_access_prot(struct file *file,
*
* Returns a negative error code on failure, zero on success.
*/
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+int pci_mmap_page_range(struct pci_dev *dev, int bar,
+ struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
resource_size_t offset =
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 6fdbd383f676..283caf1070c9 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -3330,6 +3330,11 @@ static void pnv_pci_setup_bridge(struct pci_bus *bus, unsigned long type)
}
}
+static resource_size_t pnv_pci_default_alignment(void)
+{
+ return PAGE_SIZE;
+}
+
#ifdef CONFIG_PCI_IOV
static resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev,
int resno)
@@ -3863,6 +3868,8 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
hose->controller_ops = pnv_pci_ioda_controller_ops;
}
+ ppc_md.pcibios_default_alignment = pnv_pci_default_alignment;
+
#ifdef CONFIG_PCI_IOV
ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources;
ppc_md.pcibios_iov_resource_alignment = pnv_pci_iov_resource_alignment;
diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c
index 84563e39a5b8..c99ee286b69f 100644
--- a/arch/sh/drivers/pci/pci.c
+++ b/arch/sh/drivers/pci/pci.c
@@ -269,27 +269,6 @@ void __ref pcibios_report_status(unsigned int status_mask, int warn)
}
}
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine)
-{
- /*
- * I/O space can be accessed via normal processor loads and stores on
- * this platform but for now we elect not to do this and portable
- * drivers should not do this anyway.
- */
- if (mmap_state == pci_mmap_io)
- return -EINVAL;
-
- /*
- * Ignore write-combine; for now only return uncached mappings.
- */
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
-}
-
#ifndef CONFIG_GENERIC_IOMAP
void __iomem *__pci_ioport_map(struct pci_dev *dev,
diff --git a/arch/sh/include/asm/pci.h b/arch/sh/include/asm/pci.h
index 644314f2b1ef..17fa69bc814d 100644
--- a/arch/sh/include/asm/pci.h
+++ b/arch/sh/include/asm/pci.h
@@ -66,8 +66,8 @@ extern unsigned long PCIBIOS_MIN_IO, PCIBIOS_MIN_MEM;
struct pci_dev;
#define HAVE_PCI_MMAP
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
+
extern void pcibios_set_master(struct pci_dev *dev);
/* Dynamic DMA mapping stuff.
diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h
index 2303635158f5..b957ca5527a3 100644
--- a/arch/sparc/include/asm/pci_64.h
+++ b/arch/sparc/include/asm/pci_64.h
@@ -42,13 +42,10 @@ static inline int pci_proc_domain(struct pci_bus *bus)
/* Platform support for /proc/bus/pci/X/Y mmap()s. */
#define HAVE_PCI_MMAP
+#define arch_can_pci_mmap_io() 1
#define HAVE_ARCH_PCI_GET_UNMAPPED_AREA
#define get_pci_unmapped_area get_fb_unmapped_area
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state,
- int write_combine);
-
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
return PCI_IRQ_NONE;
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 015e55a7495d..7eceaa10836f 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -862,9 +862,9 @@ static void __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vm
*
* Returns a negative error code on failure, zero on success.
*/
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state,
- int write_combine)
+int pci_mmap_page_range(struct pci_dev *dev, int bar,
+ struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int write_combine)
{
int ret;
diff --git a/arch/unicore32/include/asm/pci.h b/arch/unicore32/include/asm/pci.h
index 37e55d018de5..ac5acdf4c4d0 100644
--- a/arch/unicore32/include/asm/pci.h
+++ b/arch/unicore32/include/asm/pci.h
@@ -17,8 +17,7 @@
#include <mach/hardware.h> /* for PCIBIOS_MIN_* */
#define HAVE_PCI_MMAP
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
#endif /* __KERNEL__ */
#endif
diff --git a/arch/unicore32/kernel/pci.c b/arch/unicore32/kernel/pci.c
index 62137d13c6f9..1053bca1f8aa 100644
--- a/arch/unicore32/kernel/pci.c
+++ b/arch/unicore32/kernel/pci.c
@@ -356,26 +356,3 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
}
return 0;
}
-
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine)
-{
- unsigned long phys;
-
- if (mmap_state == pci_mmap_io)
- return -EINVAL;
-
- phys = vma->vm_pgoff;
-
- /*
- * Mark this as IO
- */
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- if (remap_pfn_range(vma, vma->vm_start, phys,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot))
- return -EAGAIN;
-
- return 0;
-}
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index 1411dbed5e5e..f513cc231151 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -7,6 +7,7 @@
#include <linux/string.h>
#include <linux/scatterlist.h>
#include <asm/io.h>
+#include <asm/pat.h>
#include <asm/x86_init.h>
#ifdef __KERNEL__
@@ -102,10 +103,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
#define HAVE_PCI_MMAP
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state,
- int write_combine);
-
+#define arch_can_pci_mmap_wc() pat_enabled()
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
#ifdef CONFIG_PCI
extern void early_quirks(void);
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index 6fa84d531f4f..7b4307163eac 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -406,50 +406,3 @@ void __init pcibios_resource_survey(void)
*/
ioapic_insert_resources();
}
-
-static const struct vm_operations_struct pci_mmap_ops = {
- .access = generic_access_phys,
-};
-
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine)
-{
- unsigned long prot;
-
- /* I/O space cannot be accessed via normal processor loads and
- * stores on this platform.
- */
- if (mmap_state == pci_mmap_io)
- return -EINVAL;
-
- prot = pgprot_val(vma->vm_page_prot);
-
- /*
- * Return error if pat is not enabled and write_combine is requested.
- * Caller can followup with UC MINUS request and add a WC mtrr if there
- * is a free mtrr slot.
- */
- if (!pat_enabled() && write_combine)
- return -EINVAL;
-
- if (pat_enabled() && write_combine)
- prot |= cachemode2protval(_PAGE_CACHE_MODE_WC);
- else if (pat_enabled() || boot_cpu_data.x86 > 3)
- /*
- * ioremap() and ioremap_nocache() defaults to UC MINUS for now.
- * To avoid attribute conflicts, request UC MINUS here
- * as well.
- */
- prot |= cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS);
-
- vma->vm_page_prot = __pgprot(prot);
-
- if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot))
- return -EAGAIN;
-
- vma->vm_ops = &pci_mmap_ops;
-
- return 0;
-}
diff --git a/arch/xtensa/include/asm/pci.h b/arch/xtensa/include/asm/pci.h
index 5d6bd932ba4e..e4f366a488d3 100644
--- a/arch/xtensa/include/asm/pci.h
+++ b/arch/xtensa/include/asm/pci.h
@@ -46,12 +46,9 @@ struct pci_dev;
#define PCI_DMA_BUS_IS_PHYS (1)
-/* Map a range of PCI memory or I/O space for a device into user space */
-int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
-
/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
-#define HAVE_PCI_MMAP 1
+#define HAVE_PCI_MMAP 1
+#define arch_can_pci_mmap_io() 1
#endif /* __KERNEL__ */
diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c
index b848cc3dc913..903963ee495d 100644
--- a/arch/xtensa/kernel/pci.c
+++ b/arch/xtensa/kernel/pci.c
@@ -334,25 +334,6 @@ __pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vma,
}
/*
- * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
- * device mapping.
- */
-static __inline__ void
-__pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine)
-{
- int prot = pgprot_val(vma->vm_page_prot);
-
- /* Set to write-through */
- prot = (prot & _PAGE_CA_MASK) | _PAGE_CA_WT;
-#if 0
- if (!write_combine)
- prot |= _PAGE_WRITETHRU;
-#endif
- vma->vm_page_prot = __pgprot(prot);
-}
-
-/*
* Perform the actual remap of the pages for a PCI device mapping, as
* appropriate for this architecture. The region in the process to map
* is described by vm_start and vm_end members of VMA, the base physical
@@ -362,7 +343,8 @@ __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
*
* Returns a negative error code on failure, zero on success.
*/
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+int pci_mmap_page_range(struct pci_dev *dev, int bar,
+ struct vm_area_struct *vma,
enum pci_mmap_state mmap_state,
int write_combine)
{
@@ -372,7 +354,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
if (ret < 0)
return ret;
- __pci_mmap_set_pgprot(dev, vma, mmap_state, write_combine);
+ vma->vm_page_prot = pgprot_device(vma->vm_page_prot);
ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start,vma->vm_page_prot);
diff --git a/drivers/Makefile b/drivers/Makefile
index 903b19199b69..edba1edc6654 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -14,7 +14,9 @@ obj-$(CONFIG_GENERIC_PHY) += phy/
obj-$(CONFIG_PINCTRL) += pinctrl/
obj-$(CONFIG_GPIOLIB) += gpio/
obj-y += pwm/
+
obj-$(CONFIG_PCI) += pci/
+obj-$(CONFIG_PCI_ENDPOINT) += pci/endpoint/
# PCI dwc controller drivers
obj-y += pci/dwc/
diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
index 2944353253ed..a4e8432fc2fb 100644
--- a/drivers/acpi/pci_mcfg.c
+++ b/drivers/acpi/pci_mcfg.c
@@ -54,6 +54,7 @@ static struct mcfg_fixup mcfg_quirks[] = {
#define QCOM_ECAM32(seg) \
{ "QCOM ", "QDF2432 ", 1, seg, MCFG_BUS_ANY, &pci_32b_ops }
+
QCOM_ECAM32(0),
QCOM_ECAM32(1),
QCOM_ECAM32(2),
@@ -68,6 +69,7 @@ static struct mcfg_fixup mcfg_quirks[] = {
{ "HISI ", table_id, 0, (seg) + 1, MCFG_BUS_ANY, ops }, \
{ "HISI ", table_id, 0, (seg) + 2, MCFG_BUS_ANY, ops }, \
{ "HISI ", table_id, 0, (seg) + 3, MCFG_BUS_ANY, ops }
+
HISI_QUAD_DOM("HIP05 ", 0, &hisi_pcie_ops),
HISI_QUAD_DOM("HIP06 ", 0, &hisi_pcie_ops),
HISI_QUAD_DOM("HIP07 ", 0, &hisi_pcie_ops),
@@ -77,6 +79,7 @@ static struct mcfg_fixup mcfg_quirks[] = {
#define THUNDER_PEM_RES(addr, node) \
DEFINE_RES_MEM((addr) + ((u64) (node) << 44), 0x39 * SZ_16M)
+
#define THUNDER_PEM_QUIRK(rev, node) \
{ "CAVIUM", "THUNDERX", rev, 4 + (10 * (node)), MCFG_BUS_ANY, \
&thunder_pem_ecam_ops, THUNDER_PEM_RES(0x88001f000000UL, node) }, \
@@ -90,13 +93,16 @@ static struct mcfg_fixup mcfg_quirks[] = {
&thunder_pem_ecam_ops, THUNDER_PEM_RES(0x894057000000UL, node) }, \
{ "CAVIUM", "THUNDERX", rev, 9 + (10 * (node)), MCFG_BUS_ANY, \
&thunder_pem_ecam_ops, THUNDER_PEM_RES(0x89808f000000UL, node) }
- /* SoC pass2.x */
- THUNDER_PEM_QUIRK(1, 0),
- THUNDER_PEM_QUIRK(1, 1),
#define THUNDER_ECAM_QUIRK(rev, seg) \
{ "CAVIUM", "THUNDERX", rev, seg, MCFG_BUS_ANY, \
&pci_thunder_ecam_ops }
+
+ /* SoC pass2.x */
+ THUNDER_PEM_QUIRK(1, 0),
+ THUNDER_PEM_QUIRK(1, 1),
+ THUNDER_ECAM_QUIRK(1, 10),
+
/* SoC pass1.x */
THUNDER_PEM_QUIRK(2, 0), /* off-chip devices */
THUNDER_PEM_QUIRK(2, 1), /* off-chip devices */
@@ -112,9 +118,11 @@ static struct mcfg_fixup mcfg_quirks[] = {
#define XGENE_V1_ECAM_MCFG(rev, seg) \
{"APM ", "XGENE ", rev, seg, MCFG_BUS_ANY, \
&xgene_v1_pcie_ecam_ops }
+
#define XGENE_V2_ECAM_MCFG(rev, seg) \
{"APM ", "XGENE ", rev, seg, MCFG_BUS_ANY, \
&xgene_v2_pcie_ecam_ops }
+
/* X-Gene SoC with v1 PCIe controller */
XGENE_V1_ECAM_MCFG(1, 0),
XGENE_V1_ECAM_MCFG(1, 1),
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 0f6916d2d549..39279fd630bc 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -13841,14 +13841,14 @@ static void init_chip(struct hfi1_devdata *dd)
dd_dev_info(dd, "Resetting CSRs with FLR\n");
/* do the FLR, the DC reset will remain */
- hfi1_pcie_flr(dd);
+ pcie_flr(dd->pcidev);
/* restore command and BARs */
restore_pci_variables(dd);
if (is_ax(dd)) {
dd_dev_info(dd, "Resetting CSRs with FLR\n");
- hfi1_pcie_flr(dd);
+ pcie_flr(dd->pcidev);
restore_pci_variables(dd);
}
} else {
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index f06674317abf..14063bd30c2a 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -1825,7 +1825,6 @@ int hfi1_pcie_init(struct pci_dev *, const struct pci_device_id *);
void hfi1_pcie_cleanup(struct pci_dev *);
int hfi1_pcie_ddinit(struct hfi1_devdata *, struct pci_dev *);
void hfi1_pcie_ddcleanup(struct hfi1_devdata *);
-void hfi1_pcie_flr(struct hfi1_devdata *);
int pcie_speeds(struct hfi1_devdata *);
void request_msix(struct hfi1_devdata *, u32 *, struct hfi1_msix_entry *);
void hfi1_enable_intx(struct pci_dev *);
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index e39e01b79382..93faf86d54b6 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -240,36 +240,6 @@ void hfi1_pcie_ddcleanup(struct hfi1_devdata *dd)
iounmap(dd->piobase);
}
-/*
- * Do a Function Level Reset (FLR) on the device.
- * Based on static function drivers/pci/pci.c:pcie_flr().
- */
-void hfi1_pcie_flr(struct hfi1_devdata *dd)
-{
- int i;
- u16 status;
-
- /* no need to check for the capability - we know the device has it */
-
- /* wait for Transaction Pending bit to clear, at most a few ms */
- for (i = 0; i < 4; i++) {
- if (i)
- msleep((1 << (i - 1)) * 100);
-
- pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVSTA, &status);
- if (!(status & PCI_EXP_DEVSTA_TRPND))
- goto clear;
- }
-
- dd_dev_err(dd, "Transaction Pending bit is not clearing, proceeding with reset anyway\n");
-
-clear:
- pcie_capability_set_word(dd->pcidev, PCI_EXP_DEVCTL,
- PCI_EXP_DEVCTL_BCR_FLR);
- /* PCIe spec requires the function to be back within 100ms */
- msleep(100);
-}
-
static void msix_setup(struct hfi1_devdata *dd, int pos, u32 *msixcnt,
struct hfi1_msix_entry *hfi1_msix_entry)
{
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 39d1acb27452..2cba76e6fa3c 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -490,6 +490,13 @@ config ASPEED_LPC_CTRL
ioctl()s, the driver also provides a read/write interface to a BMC ram
region where the host LPC read/write region can be buffered.
+config PCI_ENDPOINT_TEST
+ depends on PCI
+ tristate "PCI Endpoint Test driver"
+ ---help---
+ Enable this configuration option to enable the host side test driver
+ for PCI Endpoint.
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 4fb10af2ea1c..81ef3e67acc9 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -53,6 +53,7 @@ obj-$(CONFIG_ECHO) += echo/
obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o
obj-$(CONFIG_CXL_BASE) += cxl/
obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o
+obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_core.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_bugs.o
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
new file mode 100644
index 000000000000..09c10f426b64
--- /dev/null
+++ b/drivers/misc/pci_endpoint_test.c
@@ -0,0 +1,534 @@
+/**
+ * Host side test driver to test endpoint functionality
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+
+#include <linux/pci_regs.h>
+
+#include <uapi/linux/pcitest.h>
+
+#define DRV_MODULE_NAME "pci-endpoint-test"
+
+#define PCI_ENDPOINT_TEST_MAGIC 0x0
+
+#define PCI_ENDPOINT_TEST_COMMAND 0x4
+#define COMMAND_RAISE_LEGACY_IRQ BIT(0)
+#define COMMAND_RAISE_MSI_IRQ BIT(1)
+#define MSI_NUMBER_SHIFT 2
+/* 6 bits for MSI number */
+#define COMMAND_READ BIT(8)
+#define COMMAND_WRITE BIT(9)
+#define COMMAND_COPY BIT(10)
+
+#define PCI_ENDPOINT_TEST_STATUS 0x8
+#define STATUS_READ_SUCCESS BIT(0)
+#define STATUS_READ_FAIL BIT(1)
+#define STATUS_WRITE_SUCCESS BIT(2)
+#define STATUS_WRITE_FAIL BIT(3)
+#define STATUS_COPY_SUCCESS BIT(4)
+#define STATUS_COPY_FAIL BIT(5)
+#define STATUS_IRQ_RAISED BIT(6)
+#define STATUS_SRC_ADDR_INVALID BIT(7)
+#define STATUS_DST_ADDR_INVALID BIT(8)
+
+#define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0xc
+#define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10
+
+#define PCI_ENDPOINT_TEST_LOWER_DST_ADDR 0x14
+#define PCI_ENDPOINT_TEST_UPPER_DST_ADDR 0x18
+
+#define PCI_ENDPOINT_TEST_SIZE 0x1c
+#define PCI_ENDPOINT_TEST_CHECKSUM 0x20
+
+static DEFINE_IDA(pci_endpoint_test_ida);
+
+#define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
+ miscdev)
+enum pci_barno {
+ BAR_0,
+ BAR_1,
+ BAR_2,
+ BAR_3,
+ BAR_4,
+ BAR_5,
+};
+
+struct pci_endpoint_test {
+ struct pci_dev *pdev;
+ void __iomem *base;
+ void __iomem *bar[6];
+ struct completion irq_raised;
+ int last_irq;
+ /* mutex to protect the ioctls */
+ struct mutex mutex;
+ struct miscdevice miscdev;
+};
+
+static int bar_size[] = { 4, 512, 1024, 16384, 131072, 1048576 };
+
+static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
+ u32 offset)
+{
+ return readl(test->base + offset);
+}
+
+static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
+ u32 offset, u32 value)
+{
+ writel(value, test->base + offset);
+}
+
+static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test,
+ int bar, int offset)
+{
+ return readl(test->bar[bar] + offset);
+}
+
+static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test,
+ int bar, u32 offset, u32 value)
+{
+ writel(value, test->bar[bar] + offset);
+}
+
+static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
+{
+ struct pci_endpoint_test *test = dev_id;
+ u32 reg;
+
+ reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
+ if (reg & STATUS_IRQ_RAISED) {
+ test->last_irq = irq;
+ complete(&test->irq_raised);
+ reg &= ~STATUS_IRQ_RAISED;
+ }
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS,
+ reg);
+
+ return IRQ_HANDLED;
+}
+
+static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
+ enum pci_barno barno)
+{
+ int j;
+ u32 val;
+ int size;
+
+ if (!test->bar[barno])
+ return false;
+
+ size = bar_size[barno];
+
+ for (j = 0; j < size; j += 4)
+ pci_endpoint_test_bar_writel(test, barno, j, 0xA0A0A0A0);
+
+ for (j = 0; j < size; j += 4) {
+ val = pci_endpoint_test_bar_readl(test, barno, j);
+ if (val != 0xA0A0A0A0)
+ return false;
+ }
+
+ return true;
+}
+
+static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test)
+{
+ u32 val;
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
+ COMMAND_RAISE_LEGACY_IRQ);
+ val = wait_for_completion_timeout(&test->irq_raised,
+ msecs_to_jiffies(1000));
+ if (!val)
+ return false;
+
+ return true;
+}
+
+static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
+ u8 msi_num)
+{
+ u32 val;
+ struct pci_dev *pdev = test->pdev;
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
+ msi_num << MSI_NUMBER_SHIFT |
+ COMMAND_RAISE_MSI_IRQ);
+ val = wait_for_completion_timeout(&test->irq_raised,
+ msecs_to_jiffies(1000));
+ if (!val)
+ return false;
+
+ if (test->last_irq - pdev->irq == msi_num - 1)
+ return true;
+
+ return false;
+}
+
+static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
+{
+ bool ret = false;
+ void *src_addr;
+ void *dst_addr;
+ dma_addr_t src_phys_addr;
+ dma_addr_t dst_phys_addr;
+ struct pci_dev *pdev = test->pdev;
+ struct device *dev = &pdev->dev;
+ u32 src_crc32;
+ u32 dst_crc32;
+
+ src_addr = dma_alloc_coherent(dev, size, &src_phys_addr, GFP_KERNEL);
+ if (!src_addr) {
+ dev_err(dev, "failed to allocate source buffer\n");
+ ret = false;
+ goto err;
+ }
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
+ lower_32_bits(src_phys_addr));
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
+ upper_32_bits(src_phys_addr));
+
+ get_random_bytes(src_addr, size);
+ src_crc32 = crc32_le(~0, src_addr, size);
+
+ dst_addr = dma_alloc_coherent(dev, size, &dst_phys_addr, GFP_KERNEL);
+ if (!dst_addr) {
+ dev_err(dev, "failed to allocate destination address\n");
+ ret = false;
+ goto err_src_addr;
+ }
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
+ lower_32_bits(dst_phys_addr));
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
+ upper_32_bits(dst_phys_addr));
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
+ size);
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
+ 1 << MSI_NUMBER_SHIFT | COMMAND_COPY);
+
+ wait_for_completion(&test->irq_raised);
+
+ dst_crc32 = crc32_le(~0, dst_addr, size);
+ if (dst_crc32 == src_crc32)
+ ret = true;
+
+ dma_free_coherent(dev, size, dst_addr, dst_phys_addr);
+
+err_src_addr:
+ dma_free_coherent(dev, size, src_addr, src_phys_addr);
+
+err:
+ return ret;
+}
+
+static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
+{
+ bool ret = false;
+ u32 reg;
+ void *addr;
+ dma_addr_t phys_addr;
+ struct pci_dev *pdev = test->pdev;
+ struct device *dev = &pdev->dev;
+ u32 crc32;
+
+ addr = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL);
+ if (!addr) {
+ dev_err(dev, "failed to allocate address\n");
+ ret = false;
+ goto err;
+ }
+
+ get_random_bytes(addr, size);
+
+ crc32 = crc32_le(~0, addr, size);
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
+ crc32);
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
+ lower_32_bits(phys_addr));
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
+ upper_32_bits(phys_addr));
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
+ 1 << MSI_NUMBER_SHIFT | COMMAND_READ);
+
+ wait_for_completion(&test->irq_raised);
+
+ reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
+ if (reg & STATUS_READ_SUCCESS)
+ ret = true;
+
+ dma_free_coherent(dev, size, addr, phys_addr);
+
+err:
+ return ret;
+}
+
+static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size)
+{
+ bool ret = false;
+ void *addr;
+ dma_addr_t phys_addr;
+ struct pci_dev *pdev = test->pdev;
+ struct device *dev = &pdev->dev;
+ u32 crc32;
+
+ addr = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL);
+ if (!addr) {
+ dev_err(dev, "failed to allocate destination address\n");
+ ret = false;
+ goto err;
+ }
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
+ lower_32_bits(phys_addr));
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
+ upper_32_bits(phys_addr));
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
+ 1 << MSI_NUMBER_SHIFT | COMMAND_WRITE);
+
+ wait_for_completion(&test->irq_raised);
+
+ crc32 = crc32_le(~0, addr, size);
+ if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
+ ret = true;
+
+ dma_free_coherent(dev, size, addr, phys_addr);
+err:
+ return ret;
+}
+
+static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret = -EINVAL;
+ enum pci_barno bar;
+ struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
+
+ mutex_lock(&test->mutex);
+ switch (cmd) {
+ case PCITEST_BAR:
+ bar = arg;
+ if (bar < 0 || bar > 5)
+ goto ret;
+ ret = pci_endpoint_test_bar(test, bar);
+ break;
+ case PCITEST_LEGACY_IRQ:
+ ret = pci_endpoint_test_legacy_irq(test);
+ break;
+ case PCITEST_MSI:
+ ret = pci_endpoint_test_msi_irq(test, arg);
+ break;
+ case PCITEST_WRITE:
+ ret = pci_endpoint_test_write(test, arg);
+ break;
+ case PCITEST_READ:
+ ret = pci_endpoint_test_read(test, arg);
+ break;
+ case PCITEST_COPY:
+ ret = pci_endpoint_test_copy(test, arg);
+ break;
+ }
+
+ret:
+ mutex_unlock(&test->mutex);
+ return ret;
+}
+
+static const struct file_operations pci_endpoint_test_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = pci_endpoint_test_ioctl,
+};
+
+static int pci_endpoint_test_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int i;
+ int err;
+ int irq;
+ int id;
+ char name[20];
+ enum pci_barno bar;
+ void __iomem *base;
+ struct device *dev = &pdev->dev;
+ struct pci_endpoint_test *test;
+ struct miscdevice *misc_device;
+
+ if (pci_is_bridge(pdev))
+ return -ENODEV;
+
+ test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
+ if (!test)
+ return -ENOMEM;
+
+ test->pdev = pdev;
+ init_completion(&test->irq_raised);
+ mutex_init(&test->mutex);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Cannot enable PCI device\n");
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_MODULE_NAME);
+ if (err) {
+ dev_err(dev, "Cannot obtain PCI resources\n");
+ goto err_disable_pdev;
+ }
+
+ pci_set_master(pdev);
+
+ irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
+ if (irq < 0)
+ dev_err(dev, "failed to get MSI interrupts\n");
+
+ err = devm_request_irq(dev, pdev->irq, pci_endpoint_test_irqhandler,
+ IRQF_SHARED, DRV_MODULE_NAME, test);
+ if (err) {
+ dev_err(dev, "failed to request IRQ %d\n", pdev->irq);
+ goto err_disable_msi;
+ }
+
+ for (i = 1; i < irq; i++) {
+ err = devm_request_irq(dev, pdev->irq + i,
+ pci_endpoint_test_irqhandler,
+ IRQF_SHARED, DRV_MODULE_NAME, test);
+ if (err)
+ dev_err(dev, "failed to request IRQ %d for MSI %d\n",
+ pdev->irq + i, i + 1);
+ }
+
+ for (bar = BAR_0; bar <= BAR_5; bar++) {
+ base = pci_ioremap_bar(pdev, bar);
+ if (!base) {
+ dev_err(dev, "failed to read BAR%d\n", bar);
+ WARN_ON(bar == BAR_0);
+ }
+ test->bar[bar] = base;
+ }
+
+ test->base = test->bar[0];
+ if (!test->base) {
+ dev_err(dev, "Cannot perform PCI test without BAR0\n");
+ goto err_iounmap;
+ }
+
+ pci_set_drvdata(pdev, test);
+
+ id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
+ if (id < 0) {
+ dev_err(dev, "unable to get id\n");
+ goto err_iounmap;
+ }
+
+ snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
+ misc_device = &test->miscdev;
+ misc_device->minor = MISC_DYNAMIC_MINOR;
+ misc_device->name = name;
+ misc_device->fops = &pci_endpoint_test_fops,
+
+ err = misc_register(misc_device);
+ if (err) {
+ dev_err(dev, "failed to register device\n");
+ goto err_ida_remove;
+ }
+
+ return 0;
+
+err_ida_remove:
+ ida_simple_remove(&pci_endpoint_test_ida, id);
+
+err_iounmap:
+ for (bar = BAR_0; bar <= BAR_5; bar++) {
+ if (test->bar[bar])
+ pci_iounmap(pdev, test->bar[bar]);
+ }
+
+err_disable_msi:
+ pci_disable_msi(pdev);
+ pci_release_regions(pdev);
+
+err_disable_pdev:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static void pci_endpoint_test_remove(struct pci_dev *pdev)
+{
+ int id;
+ enum pci_barno bar;
+ struct pci_endpoint_test *test = pci_get_drvdata(pdev);
+ struct miscdevice *misc_device = &test->miscdev;
+
+ if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
+ return;
+
+ misc_deregister(&test->miscdev);
+ ida_simple_remove(&pci_endpoint_test_ida, id);
+ for (bar = BAR_0; bar <= BAR_5; bar++) {
+ if (test->bar[bar])
+ pci_iounmap(pdev, test->bar[bar]);
+ }
+ pci_disable_msi(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static const struct pci_device_id pci_endpoint_test_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) },
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
+
+static struct pci_driver pci_endpoint_test_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = pci_endpoint_test_tbl,
+ .probe = pci_endpoint_test_probe,
+ .remove = pci_endpoint_test_remove,
+};
+module_pci_driver(pci_endpoint_test_driver);
+
+MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 22a29df1d29e..d39cba214320 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7332,18 +7332,6 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
}
#ifdef CONFIG_PCI_IOV
-static inline void ixgbe_issue_vf_flr(struct ixgbe_adapter *adapter,
- struct pci_dev *vfdev)
-{
- if (!pci_wait_for_pending_transaction(vfdev))
- e_dev_warn("Issuing VFLR with pending transactions\n");
-
- e_dev_err("Issuing VFLR for VF %s\n", pci_name(vfdev));
- pcie_capability_set_word(vfdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
-
- msleep(100);
-}
-
static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -7376,7 +7364,7 @@ static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
if (status_reg != IXGBE_FAILED_READ_CFG_WORD &&
status_reg & PCI_STATUS_REC_MASTER_ABORT)
- ixgbe_issue_vf_flr(adapter, vfdev);
+ pcie_flr(vfdev);
}
}
@@ -10602,7 +10590,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
* VFLR. Just clean up the AER in that case.
*/
if (vfdev) {
- ixgbe_issue_vf_flr(adapter, vfdev);
+ pcie_flr(vfdev);
/* Free device reference count */
pci_dev_put(vfdev);
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 56a315bd4d96..fed803232edc 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -132,7 +132,6 @@ static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
struct nvme_queue {
struct device *q_dmadev;
struct nvme_dev *dev;
- char irqname[24]; /* nvme4294967295-65535\0 */
spinlock_t q_lock;
struct nvme_command *sq_cmds;
struct nvme_command __iomem *sq_cmds_io;
@@ -329,11 +328,6 @@ static unsigned int nvme_cmd_size(struct nvme_dev *dev)
nvme_iod_alloc_size(dev, NVME_INT_BYTES(dev), NVME_INT_PAGES);
}
-static int nvmeq_irq(struct nvme_queue *nvmeq)
-{
- return pci_irq_vector(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector);
-}
-
static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
@@ -1078,7 +1072,7 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
spin_unlock_irq(&nvmeq->q_lock);
return 1;
}
- vector = nvmeq_irq(nvmeq);
+ vector = nvmeq->cq_vector;
nvmeq->dev->online_queues--;
nvmeq->cq_vector = -1;
spin_unlock_irq(&nvmeq->q_lock);
@@ -1086,7 +1080,7 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
blk_mq_stop_hw_queues(nvmeq->dev->ctrl.admin_q);
- free_irq(vector, nvmeq);
+ pci_free_irq(to_pci_dev(nvmeq->dev->dev), vector, nvmeq);
return 0;
}
@@ -1171,8 +1165,6 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
nvmeq->q_dmadev = dev->dev;
nvmeq->dev = dev;
- snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
- dev->ctrl.instance, qid);
spin_lock_init(&nvmeq->q_lock);
nvmeq->cq_head = 0;
nvmeq->cq_phase = 1;
@@ -1195,12 +1187,16 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
static int queue_request_irq(struct nvme_queue *nvmeq)
{
- if (use_threaded_interrupts)
- return request_threaded_irq(nvmeq_irq(nvmeq), nvme_irq_check,
- nvme_irq, IRQF_SHARED, nvmeq->irqname, nvmeq);
- else
- return request_irq(nvmeq_irq(nvmeq), nvme_irq, IRQF_SHARED,
- nvmeq->irqname, nvmeq);
+ struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev);
+ int nr = nvmeq->dev->ctrl.instance;
+
+ if (use_threaded_interrupts) {
+ return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check,
+ nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
+ } else {
+ return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq,
+ NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
+ }
}
static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
@@ -1557,7 +1553,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
}
/* Deregister the admin queue's interrupt */
- free_irq(pci_irq_vector(pdev, 0), adminq);
+ pci_free_irq(pdev, 0, adminq);
/*
* If we enable msix early due to not intx, disable it again before
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
index 0ee42c3e66a1..c9d4d3a7b0fe 100644
--- a/drivers/of/of_pci.c
+++ b/drivers/of/of_pci.c
@@ -285,51 +285,6 @@ parse_failed:
EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources);
#endif /* CONFIG_OF_ADDRESS */
-#ifdef CONFIG_PCI_MSI
-
-static LIST_HEAD(of_pci_msi_chip_list);
-static DEFINE_MUTEX(of_pci_msi_chip_mutex);
-
-int of_pci_msi_chip_add(struct msi_controller *chip)
-{
- if (!of_property_read_bool(chip->of_node, "msi-controller"))
- return -EINVAL;
-
- mutex_lock(&of_pci_msi_chip_mutex);
- list_add(&chip->list, &of_pci_msi_chip_list);
- mutex_unlock(&of_pci_msi_chip_mutex);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(of_pci_msi_chip_add);
-
-void of_pci_msi_chip_remove(struct msi_controller *chip)
-{
- mutex_lock(&of_pci_msi_chip_mutex);
- list_del(&chip->list);
- mutex_unlock(&of_pci_msi_chip_mutex);
-}
-EXPORT_SYMBOL_GPL(of_pci_msi_chip_remove);
-
-struct msi_controller *of_pci_find_msi_chip_by_node(struct device_node *of_node)
-{
- struct msi_controller *c;
-
- mutex_lock(&of_pci_msi_chip_mutex);
- list_for_each_entry(c, &of_pci_msi_chip_list, list) {
- if (c->of_node == of_node) {
- mutex_unlock(&of_pci_msi_chip_mutex);
- return c;
- }
- }
- mutex_unlock(&of_pci_msi_chip_mutex);
-
- return NULL;
-}
-EXPORT_SYMBOL_GPL(of_pci_find_msi_chip_by_node);
-
-#endif /* CONFIG_PCI_MSI */
-
/**
* of_pci_map_rid - Translate a requester ID through a downstream mapping.
* @np: root complex device node.
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index df141420c902..e0cacb7b8563 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -134,3 +134,5 @@ config PCI_HYPERV
source "drivers/pci/hotplug/Kconfig"
source "drivers/pci/dwc/Kconfig"
source "drivers/pci/host/Kconfig"
+source "drivers/pci/endpoint/Kconfig"
+source "drivers/pci/switch/Kconfig"
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 8db5079f09a7..462c1f5f5546 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -4,7 +4,7 @@
obj-y += access.o bus.o probe.o host-bridge.o remove.o pci.o \
pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \
- irq.o vpd.o setup-bus.o vc.o
+ irq.o vpd.o setup-bus.o vc.o mmap.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_SYSFS) += slot.o
@@ -68,3 +68,4 @@ ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG
# PCI host controller drivers
obj-y += host/
+obj-y += switch/
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 8b7382705bf2..74cf5fffb1e1 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -629,7 +629,7 @@ void pci_vpd_release(struct pci_dev *dev)
*
* When access is locked, any userspace reads or writes to config
* space and concurrent lock requests will sleep until access is
- * allowed via pci_cfg_access_unlocked again.
+ * allowed via pci_cfg_access_unlock() again.
*/
void pci_cfg_access_lock(struct pci_dev *dev)
{
@@ -700,7 +700,8 @@ static bool pcie_downstream_port(const struct pci_dev *dev)
int type = pci_pcie_type(dev);
return type == PCI_EXP_TYPE_ROOT_PORT ||
- type == PCI_EXP_TYPE_DOWNSTREAM;
+ type == PCI_EXP_TYPE_DOWNSTREAM ||
+ type == PCI_EXP_TYPE_PCIE_BRIDGE;
}
bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
@@ -890,3 +891,59 @@ int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
return ret;
}
EXPORT_SYMBOL(pcie_capability_clear_and_set_dword);
+
+int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val)
+{
+ if (pci_dev_is_disconnected(dev)) {
+ *val = ~0;
+ return -ENODEV;
+ }
+ return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val);
+}
+EXPORT_SYMBOL(pci_read_config_byte);
+
+int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val)
+{
+ if (pci_dev_is_disconnected(dev)) {
+ *val = ~0;
+ return -ENODEV;
+ }
+ return pci_bus_read_config_word(dev->bus, dev->devfn, where, val);
+}
+EXPORT_SYMBOL(pci_read_config_word);
+
+int pci_read_config_dword(const struct pci_dev *dev, int where,
+ u32 *val)
+{
+ if (pci_dev_is_disconnected(dev)) {
+ *val = ~0;
+ return -ENODEV;
+ }
+ return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val);
+}
+EXPORT_SYMBOL(pci_read_config_dword);
+
+int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val)
+{
+ if (pci_dev_is_disconnected(dev))
+ return -ENODEV;
+ return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val);
+}
+EXPORT_SYMBOL(pci_write_config_byte);
+
+int pci_write_config_word(const struct pci_dev *dev, int where, u16 val)
+{
+ if (pci_dev_is_disconnected(dev))
+ return -ENODEV;
+ return pci_bus_write_config_word(dev->bus, dev->devfn, where, val);
+}
+EXPORT_SYMBOL(pci_write_config_word);
+
+int pci_write_config_dword(const struct pci_dev *dev, int where,
+ u32 val)
+{
+ if (pci_dev_is_disconnected(dev))
+ return -ENODEV;
+ return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val);
+}
+EXPORT_SYMBOL(pci_write_config_dword);
diff --git a/drivers/pci/dwc/Kconfig b/drivers/pci/dwc/Kconfig
index d2d2ba5b8a68..b7e15526d676 100644
--- a/drivers/pci/dwc/Kconfig
+++ b/drivers/pci/dwc/Kconfig
@@ -9,16 +9,44 @@ config PCIE_DW_HOST
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW
+config PCIE_DW_EP
+ bool
+ depends on PCI_ENDPOINT
+ select PCIE_DW
+
config PCI_DRA7XX
bool "TI DRA7xx PCIe controller"
- depends on PCI
+ depends on (PCI && PCI_MSI_IRQ_DOMAIN) || PCI_ENDPOINT
depends on OF && HAS_IOMEM && TI_PIPE3
+ help
+ Enables support for the PCIe controller in the DRA7xx SoC. There
+ are two instances of PCIe controller in DRA7xx. This controller can
+ work either as EP or RC. In order to enable host-specific features
+ PCI_DRA7XX_HOST must be selected and in order to enable device-
+ specific features PCI_DRA7XX_EP must be selected. This uses
+ the Designware core.
+
+if PCI_DRA7XX
+
+config PCI_DRA7XX_HOST
+ bool "PCI DRA7xx Host Mode"
+ depends on PCI
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
+ default y
help
- Enables support for the PCIe controller in the DRA7xx SoC. There
- are two instances of PCIe controller in DRA7xx. This controller can
- act both as EP and RC. This reuses the Designware core.
+ Enables support for the PCIe controller in the DRA7xx SoC to work in
+ host mode.
+
+config PCI_DRA7XX_EP
+ bool "PCI DRA7xx Endpoint Mode"
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ help
+ Enables support for the PCIe controller in the DRA7xx SoC to work in
+ endpoint mode.
+
+endif
config PCIE_DW_PLAT
bool "Platform bus based DesignWare PCIe Controller"
diff --git a/drivers/pci/dwc/Makefile b/drivers/pci/dwc/Makefile
index a2df13c28798..f31a8596442a 100644
--- a/drivers/pci/dwc/Makefile
+++ b/drivers/pci/dwc/Makefile
@@ -1,7 +1,10 @@
obj-$(CONFIG_PCIE_DW) += pcie-designware.o
obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o
+obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o
obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o
-obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
+ifneq ($(filter y,$(CONFIG_PCI_DRA7XX_HOST) $(CONFIG_PCI_DRA7XX_EP)),)
+ obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
+endif
obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
diff --git a/drivers/pci/dwc/pci-dra7xx.c b/drivers/pci/dwc/pci-dra7xx.c
index 0984baff07e3..8decf46cf525 100644
--- a/drivers/pci/dwc/pci-dra7xx.c
+++ b/drivers/pci/dwc/pci-dra7xx.c
@@ -10,12 +10,14 @@
* published by the Free Software Foundation.
*/
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
@@ -24,6 +26,8 @@
#include <linux/pm_runtime.h>
#include <linux/resource.h>
#include <linux/types.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include "pcie-designware.h"
@@ -57,6 +61,11 @@
#define MSI BIT(4)
#define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD)
+#define PCIECTRL_TI_CONF_DEVICE_TYPE 0x0100
+#define DEVICE_TYPE_EP 0x0
+#define DEVICE_TYPE_LEG_EP 0x1
+#define DEVICE_TYPE_RC 0x4
+
#define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104
#define LTSSM_EN 0x1
@@ -66,6 +75,13 @@
#define EXP_CAP_ID_OFFSET 0x70
+#define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124
+#define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128
+
+#define PCIECTRL_TI_CONF_MSI_XMT 0x012c
+#define MSI_REQ_GRANT BIT(0)
+#define MSI_VECTOR_SHIFT 7
+
struct dra7xx_pcie {
struct dw_pcie *pci;
void __iomem *base; /* DT ti_conf */
@@ -73,6 +89,11 @@ struct dra7xx_pcie {
struct phy **phy;
int link_gen;
struct irq_domain *irq_domain;
+ enum dw_pcie_device_mode mode;
+};
+
+struct dra7xx_pcie_of_data {
+ enum dw_pcie_device_mode mode;
};
#define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev)
@@ -88,6 +109,11 @@ static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
writel(value, pcie->base + offset);
}
+static u64 dra7xx_pcie_cpu_addr_fixup(u64 pci_addr)
+{
+ return pci_addr & DRA7XX_CPU_TO_BUS_ADDR;
+}
+
static int dra7xx_pcie_link_up(struct dw_pcie *pci)
{
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
@@ -96,9 +122,19 @@ static int dra7xx_pcie_link_up(struct dw_pcie *pci)
return !!(reg & LINK_UP);
}
-static int dra7xx_pcie_establish_link(struct dra7xx_pcie *dra7xx)
+static void dra7xx_pcie_stop_link(struct dw_pcie *pci)
{
- struct dw_pcie *pci = dra7xx->pci;
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+ u32 reg;
+
+ reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
+ reg &= ~LTSSM_EN;
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
+}
+
+static int dra7xx_pcie_establish_link(struct dw_pcie *pci)
+{
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
struct device *dev = pci->dev;
u32 reg;
u32 exp_cap_off = EXP_CAP_ID_OFFSET;
@@ -132,34 +168,42 @@ static int dra7xx_pcie_establish_link(struct dra7xx_pcie *dra7xx)
reg |= LTSSM_EN;
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
- return dw_pcie_wait_for_link(pci);
+ return 0;
}
-static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
+static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx)
{
- dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
- ~INTERRUPTS);
- dra7xx_pcie_writel(dra7xx,
- PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, INTERRUPTS);
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI,
~LEG_EP_INTERRUPTS & ~MSI);
- dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
+
+ dra7xx_pcie_writel(dra7xx,
+ PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
MSI | LEG_EP_INTERRUPTS);
}
+static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx)
+{
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
+ ~INTERRUPTS);
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN,
+ INTERRUPTS);
+}
+
+static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
+{
+ dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
+ dra7xx_pcie_enable_msi_interrupts(dra7xx);
+}
+
static void dra7xx_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
- pp->io_base &= DRA7XX_CPU_TO_BUS_ADDR;
- pp->mem_base &= DRA7XX_CPU_TO_BUS_ADDR;
- pp->cfg0_base &= DRA7XX_CPU_TO_BUS_ADDR;
- pp->cfg1_base &= DRA7XX_CPU_TO_BUS_ADDR;
-
dw_pcie_setup_rc(pp);
- dra7xx_pcie_establish_link(dra7xx);
+ dra7xx_pcie_establish_link(pci);
+ dw_pcie_wait_for_link(pci);
dw_pcie_msi_init(pp);
dra7xx_pcie_enable_interrupts(dra7xx);
}
@@ -237,6 +281,7 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
struct dra7xx_pcie *dra7xx = arg;
struct dw_pcie *pci = dra7xx->pci;
struct device *dev = pci->dev;
+ struct dw_pcie_ep *ep = &pci->ep;
u32 reg;
reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN);
@@ -273,8 +318,11 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
if (reg & LINK_REQ_RST)
dev_dbg(dev, "Link Request Reset\n");
- if (reg & LINK_UP_EVT)
+ if (reg & LINK_UP_EVT) {
+ if (dra7xx->mode == DW_PCIE_EP_TYPE)
+ dw_pcie_ep_linkup(ep);
dev_dbg(dev, "Link-up state change\n");
+ }
if (reg & CFG_BME_EVT)
dev_dbg(dev, "CFG 'Bus Master Enable' change\n");
@@ -287,6 +335,94 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
+static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+
+ dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
+}
+
+static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx)
+{
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1);
+ mdelay(1);
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1);
+}
+
+static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx,
+ u8 interrupt_num)
+{
+ u32 reg;
+
+ reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT;
+ reg |= MSI_REQ_GRANT;
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg);
+}
+
+static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep,
+ enum pci_epc_irq_type type, u8 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ dra7xx_pcie_raise_legacy_irq(dra7xx);
+ break;
+ case PCI_EPC_IRQ_MSI:
+ dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num);
+ break;
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ }
+
+ return 0;
+}
+
+static struct dw_pcie_ep_ops pcie_ep_ops = {
+ .ep_init = dra7xx_pcie_ep_init,
+ .raise_irq = dra7xx_pcie_raise_irq,
+};
+
+static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
+ struct platform_device *pdev)
+{
+ int ret;
+ struct dw_pcie_ep *ep;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci = dra7xx->pci;
+
+ ep = &pci->ep;
+ ep->ops = &pcie_ep_ops;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics");
+ pci->dbi_base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!pci->dbi_base)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2");
+ pci->dbi_base2 = devm_ioremap(dev, res->start, resource_size(res));
+ if (!pci->dbi_base2)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+ if (!res)
+ return -EINVAL;
+
+ ep->phys_base = res->start;
+ ep->addr_size = resource_size(res);
+
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ dev_err(dev, "failed to initialize endpoint\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
struct platform_device *pdev)
{
@@ -329,6 +465,9 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
}
static const struct dw_pcie_ops dw_pcie_ops = {
+ .cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup,
+ .start_link = dra7xx_pcie_establish_link,
+ .stop_link = dra7xx_pcie_stop_link,
.link_up = dra7xx_pcie_link_up,
};
@@ -371,6 +510,68 @@ err_phy:
return ret;
}
+static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = {
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = {
+ .mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct of_device_id of_dra7xx_pcie_match[] = {
+ {
+ .compatible = "ti,dra7-pcie",
+ .data = &dra7xx_pcie_rc_of_data,
+ },
+ {
+ .compatible = "ti,dra7-pcie-ep",
+ .data = &dra7xx_pcie_ep_of_data,
+ },
+ {},
+};
+
+/*
+ * dra7xx_pcie_ep_unaligned_memaccess: workaround for AM572x/AM571x Errata i870
+ * @dra7xx: the dra7xx device where the workaround should be applied
+ *
+ * Access to the PCIe slave port that are not 32-bit aligned will result
+ * in incorrect mapping to TLP Address and Byte enable fields. Therefore,
+ * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or
+ * 0x3.
+ *
+ * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1.
+ */
+static int dra7xx_pcie_ep_unaligned_memaccess(struct device *dev)
+{
+ int ret;
+ struct device_node *np = dev->of_node;
+ struct of_phandle_args args;
+ struct regmap *regmap;
+
+ regmap = syscon_regmap_lookup_by_phandle(np,
+ "ti,syscon-unaligned-access");
+ if (IS_ERR(regmap)) {
+ dev_dbg(dev, "can't get ti,syscon-unaligned-access\n");
+ return -EINVAL;
+ }
+
+ ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access",
+ 2, 0, &args);
+ if (ret) {
+ dev_err(dev, "failed to parse ti,syscon-unaligned-access\n");
+ return ret;
+ }
+
+ ret = regmap_update_bits(regmap, args.args[0], args.args[1],
+ args.args[1]);
+ if (ret)
+ dev_err(dev, "failed to enable unaligned access\n");
+
+ of_node_put(args.np);
+
+ return ret;
+}
+
static int __init dra7xx_pcie_probe(struct platform_device *pdev)
{
u32 reg;
@@ -388,6 +589,16 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
char name[10];
struct gpio_desc *reset;
+ const struct of_device_id *match;
+ const struct dra7xx_pcie_of_data *data;
+ enum dw_pcie_device_mode mode;
+
+ match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev);
+ if (!match)
+ return -EINVAL;
+
+ data = (struct dra7xx_pcie_of_data *)match->data;
+ mode = (enum dw_pcie_device_mode)data->mode;
dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL);
if (!dra7xx)
@@ -409,13 +620,6 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
return -EINVAL;
}
- ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler,
- IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
- if (ret) {
- dev_err(dev, "failed to request irq\n");
- return ret;
- }
-
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf");
base = devm_ioremap_nocache(dev, res->start, resource_size(res));
if (!base)
@@ -473,9 +677,37 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2)
dra7xx->link_gen = 2;
- ret = dra7xx_add_pcie_port(dra7xx, pdev);
- if (ret < 0)
+ switch (mode) {
+ case DW_PCIE_RC_TYPE:
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
+ DEVICE_TYPE_RC);
+ ret = dra7xx_add_pcie_port(dra7xx, pdev);
+ if (ret < 0)
+ goto err_gpio;
+ break;
+ case DW_PCIE_EP_TYPE:
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
+ DEVICE_TYPE_EP);
+
+ ret = dra7xx_pcie_ep_unaligned_memaccess(dev);
+ if (ret)
+ goto err_gpio;
+
+ ret = dra7xx_add_pcie_ep(dra7xx, pdev);
+ if (ret < 0)
+ goto err_gpio;
+ break;
+ default:
+ dev_err(dev, "INVALID device type %d\n", mode);
+ }
+ dra7xx->mode = mode;
+
+ ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler,
+ IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
+ if (ret) {
+ dev_err(dev, "failed to request irq\n");
goto err_gpio;
+ }
return 0;
@@ -496,6 +728,9 @@ static int dra7xx_pcie_suspend(struct device *dev)
struct dw_pcie *pci = dra7xx->pci;
u32 val;
+ if (dra7xx->mode != DW_PCIE_RC_TYPE)
+ return 0;
+
/* clear MSE */
val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
val &= ~PCI_COMMAND_MEMORY;
@@ -510,6 +745,9 @@ static int dra7xx_pcie_resume(struct device *dev)
struct dw_pcie *pci = dra7xx->pci;
u32 val;
+ if (dra7xx->mode != DW_PCIE_RC_TYPE)
+ return 0;
+
/* set MSE */
val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
val |= PCI_COMMAND_MEMORY;
@@ -548,11 +786,6 @@ static const struct dev_pm_ops dra7xx_pcie_pm_ops = {
dra7xx_pcie_resume_noirq)
};
-static const struct of_device_id of_dra7xx_pcie_match[] = {
- { .compatible = "ti,dra7-pcie", },
- {},
-};
-
static struct platform_driver dra7xx_pcie_driver = {
.driver = {
.name = "dra7-pcie",
diff --git a/drivers/pci/dwc/pci-exynos.c b/drivers/pci/dwc/pci-exynos.c
index 44f774c12fb2..546082ad5a3f 100644
--- a/drivers/pci/dwc/pci-exynos.c
+++ b/drivers/pci/dwc/pci-exynos.c
@@ -521,23 +521,25 @@ static void exynos_pcie_enable_interrupts(struct exynos_pcie *ep)
exynos_pcie_msi_init(ep);
}
-static u32 exynos_pcie_readl_dbi(struct dw_pcie *pci, u32 reg)
+static u32 exynos_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size)
{
struct exynos_pcie *ep = to_exynos_pcie(pci);
u32 val;
exynos_pcie_sideband_dbi_r_mode(ep, true);
- val = readl(pci->dbi_base + reg);
+ dw_pcie_read(base + reg, size, &val);
exynos_pcie_sideband_dbi_r_mode(ep, false);
return val;
}
-static void exynos_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
+static void exynos_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size, u32 val)
{
struct exynos_pcie *ep = to_exynos_pcie(pci);
exynos_pcie_sideband_dbi_w_mode(ep, true);
- writel(val, pci->dbi_base + reg);
+ dw_pcie_write(base + reg, size, val);
exynos_pcie_sideband_dbi_w_mode(ep, false);
}
@@ -644,8 +646,8 @@ static int __init exynos_add_pcie_port(struct exynos_pcie *ep,
}
static const struct dw_pcie_ops dw_pcie_ops = {
- .readl_dbi = exynos_pcie_readl_dbi,
- .writel_dbi = exynos_pcie_writel_dbi,
+ .read_dbi = exynos_pcie_read_dbi,
+ .write_dbi = exynos_pcie_write_dbi,
.link_up = exynos_pcie_link_up,
};
diff --git a/drivers/pci/dwc/pci-imx6.c b/drivers/pci/dwc/pci-imx6.c
index 801e46cd266d..a98cba55c7f0 100644
--- a/drivers/pci/dwc/pci-imx6.c
+++ b/drivers/pci/dwc/pci-imx6.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
#include <linux/module.h>
#include <linux/of_gpio.h>
#include <linux/of_device.h>
@@ -27,6 +28,7 @@
#include <linux/signal.h>
#include <linux/types.h>
#include <linux/interrupt.h>
+#include <linux/reset.h>
#include "pcie-designware.h"
@@ -36,6 +38,7 @@ enum imx6_pcie_variants {
IMX6Q,
IMX6SX,
IMX6QP,
+ IMX7D,
};
struct imx6_pcie {
@@ -47,6 +50,8 @@ struct imx6_pcie {
struct clk *pcie_inbound_axi;
struct clk *pcie;
struct regmap *iomuxc_gpr;
+ struct reset_control *pciephy_reset;
+ struct reset_control *apps_reset;
enum imx6_pcie_variants variant;
u32 tx_deemph_gen1;
u32 tx_deemph_gen2_3p5db;
@@ -56,6 +61,11 @@ struct imx6_pcie {
int link_gen;
};
+/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
+#define PHY_PLL_LOCK_WAIT_MAX_RETRIES 2000
+#define PHY_PLL_LOCK_WAIT_USLEEP_MIN 50
+#define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200
+
/* PCIe Root Complex registers (memory-mapped) */
#define PCIE_RC_LCR 0x7c
#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1
@@ -248,6 +258,10 @@ static int imx6q_pcie_abort_handler(unsigned long addr,
static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
{
switch (imx6_pcie->variant) {
+ case IMX7D:
+ reset_control_assert(imx6_pcie->pciephy_reset);
+ reset_control_assert(imx6_pcie->apps_reset);
+ break;
case IMX6SX:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
@@ -303,11 +317,32 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
break;
+ case IMX7D:
+ break;
}
return ret;
}
+static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
+{
+ u32 val;
+ unsigned int retries;
+ struct device *dev = imx6_pcie->pci->dev;
+
+ for (retries = 0; retries < PHY_PLL_LOCK_WAIT_MAX_RETRIES; retries++) {
+ regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR22, &val);
+
+ if (val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED)
+ return;
+
+ usleep_range(PHY_PLL_LOCK_WAIT_USLEEP_MIN,
+ PHY_PLL_LOCK_WAIT_USLEEP_MAX);
+ }
+
+ dev_err(dev, "PCIe PLL lock timeout\n");
+}
+
static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
{
struct dw_pcie *pci = imx6_pcie->pci;
@@ -351,6 +386,10 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
}
switch (imx6_pcie->variant) {
+ case IMX7D:
+ reset_control_deassert(imx6_pcie->pciephy_reset);
+ imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie);
+ break;
case IMX6SX:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
@@ -377,35 +416,44 @@ err_pcie_bus:
static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
{
- if (imx6_pcie->variant == IMX6SX)
+ switch (imx6_pcie->variant) {
+ case IMX7D:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
+ break;
+ case IMX6SX:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_RX_EQ_MASK,
IMX6SX_GPR12_PCIE_RX_EQ_2);
+ /* FALLTHROUGH */
+ default:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
+ /* configure constant input signal to the pcie ctrl and phy */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN1,
+ imx6_pcie->tx_deemph_gen1 << 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
+ imx6_pcie->tx_deemph_gen2_3p5db << 6);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
+ imx6_pcie->tx_deemph_gen2_6db << 12);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_SWING_FULL,
+ imx6_pcie->tx_swing_full << 18);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_SWING_LOW,
+ imx6_pcie->tx_swing_low << 25);
+ break;
+ }
- /* configure constant input signal to the pcie ctrl and phy */
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
-
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN1,
- imx6_pcie->tx_deemph_gen1 << 0);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
- imx6_pcie->tx_deemph_gen2_3p5db << 6);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
- imx6_pcie->tx_deemph_gen2_6db << 12);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_SWING_FULL,
- imx6_pcie->tx_swing_full << 18);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_SWING_LOW,
- imx6_pcie->tx_swing_low << 25);
}
static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie)
@@ -469,8 +517,11 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
/* Start LTSSM. */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
+ if (imx6_pcie->variant == IMX7D)
+ reset_control_deassert(imx6_pcie->apps_reset);
+ else
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
ret = imx6_pcie_wait_for_link(imx6_pcie);
if (ret)
@@ -482,29 +533,40 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
- } else {
- dev_info(dev, "Link: Gen2 disabled\n");
- }
-
- /*
- * Start Directed Speed Change so the best possible speed both link
- * partners support can be negotiated.
- */
- tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
- tmp |= PORT_LOGIC_SPEED_CHANGE;
- dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
- ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
- if (ret) {
- dev_err(dev, "Failed to bring link up!\n");
- goto err_reset_phy;
- }
+ /*
+ * Start Directed Speed Change so the best possible
+ * speed both link partners support can be negotiated.
+ */
+ tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ tmp |= PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
+
+ if (imx6_pcie->variant != IMX7D) {
+ /*
+ * On i.MX7, DIRECT_SPEED_CHANGE behaves differently
+ * from i.MX6 family when no link speed transition
+ * occurs and we go Gen1 -> yep, Gen1. The difference
+ * is that, in such case, it will not be cleared by HW
+ * which will cause the following code to report false
+ * failure.
+ */
+
+ ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
+ if (ret) {
+ dev_err(dev, "Failed to bring link up!\n");
+ goto err_reset_phy;
+ }
+ }
- /* Make sure link training is finished as well! */
- ret = imx6_pcie_wait_for_link(imx6_pcie);
- if (ret) {
- dev_err(dev, "Failed to bring link up!\n");
- goto err_reset_phy;
+ /* Make sure link training is finished as well! */
+ ret = imx6_pcie_wait_for_link(imx6_pcie);
+ if (ret) {
+ dev_err(dev, "Failed to bring link up!\n");
+ goto err_reset_phy;
+ }
+ } else {
+ dev_info(dev, "Link: Gen2 disabled\n");
}
tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCSR);
@@ -544,8 +606,8 @@ static struct dw_pcie_host_ops imx6_pcie_host_ops = {
.host_init = imx6_pcie_host_init,
};
-static int __init imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
- struct platform_device *pdev)
+static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
+ struct platform_device *pdev)
{
struct dw_pcie *pci = imx6_pcie->pci;
struct pcie_port *pp = &pci->pp;
@@ -585,7 +647,7 @@ static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = imx6_pcie_link_up,
};
-static int __init imx6_pcie_probe(struct platform_device *pdev)
+static int imx6_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dw_pcie *pci;
@@ -609,10 +671,6 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
imx6_pcie->variant =
(enum imx6_pcie_variants)of_device_get_match_data(dev);
- /* Added for PCI abort handling */
- hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
- "imprecise external abort");
-
dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
if (IS_ERR(pci->dbi_base))
@@ -632,6 +690,8 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
dev_err(dev, "unable to get reset gpio\n");
return ret;
}
+ } else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) {
+ return imx6_pcie->reset_gpio;
}
/* Fetch clocks */
@@ -653,13 +713,31 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
return PTR_ERR(imx6_pcie->pcie);
}
- if (imx6_pcie->variant == IMX6SX) {
+ switch (imx6_pcie->variant) {
+ case IMX6SX:
imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
"pcie_inbound_axi");
if (IS_ERR(imx6_pcie->pcie_inbound_axi)) {
dev_err(dev, "pcie_inbound_axi clock missing or invalid\n");
return PTR_ERR(imx6_pcie->pcie_inbound_axi);
}
+ break;
+ case IMX7D:
+ imx6_pcie->pciephy_reset = devm_reset_control_get(dev,
+ "pciephy");
+ if (IS_ERR(imx6_pcie->pciephy_reset)) {
+ dev_err(dev, "Failed to get PCIEPHY reset control\n");
+ return PTR_ERR(imx6_pcie->pciephy_reset);
+ }
+
+ imx6_pcie->apps_reset = devm_reset_control_get(dev, "apps");
+ if (IS_ERR(imx6_pcie->apps_reset)) {
+ dev_err(dev, "Failed to get PCIE APPS reset control\n");
+ return PTR_ERR(imx6_pcie->apps_reset);
+ }
+ break;
+ default:
+ break;
}
/* Grab GPR config register range */
@@ -718,6 +796,7 @@ static const struct of_device_id imx6_pcie_of_match[] = {
{ .compatible = "fsl,imx6q-pcie", .data = (void *)IMX6Q, },
{ .compatible = "fsl,imx6sx-pcie", .data = (void *)IMX6SX, },
{ .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, },
+ { .compatible = "fsl,imx7d-pcie", .data = (void *)IMX7D, },
{},
};
@@ -725,12 +804,24 @@ static struct platform_driver imx6_pcie_driver = {
.driver = {
.name = "imx6q-pcie",
.of_match_table = imx6_pcie_of_match,
+ .suppress_bind_attrs = true,
},
+ .probe = imx6_pcie_probe,
.shutdown = imx6_pcie_shutdown,
};
static int __init imx6_pcie_init(void)
{
- return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe);
+ /*
+ * Since probe() can be deferred we need to make sure that
+ * hook_fault_code is not called after __init memory is freed
+ * by kernel and since imx6q_pcie_abort_handler() is a no-op,
+ * we can install the handler here without risking it
+ * accessing some uninitialized driver state.
+ */
+ hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
+ "imprecise external abort");
+
+ return platform_driver_register(&imx6_pcie_driver);
}
device_initcall(imx6_pcie_init);
diff --git a/drivers/pci/dwc/pci-keystone-dw.c b/drivers/pci/dwc/pci-keystone-dw.c
index 6b396f6b4615..8bc626e640c8 100644
--- a/drivers/pci/dwc/pci-keystone-dw.c
+++ b/drivers/pci/dwc/pci-keystone-dw.c
@@ -543,7 +543,7 @@ int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
/* Index 0 is the config reg. space address */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pci->dbi_base = devm_ioremap_resource(dev, res);
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
diff --git a/drivers/pci/dwc/pci-layerscape.c b/drivers/pci/dwc/pci-layerscape.c
index c32e392a0ae6..27d638c4e134 100644
--- a/drivers/pci/dwc/pci-layerscape.c
+++ b/drivers/pci/dwc/pci-layerscape.c
@@ -283,7 +283,7 @@ static int __init ls_pcie_probe(struct platform_device *pdev)
pcie->pci = pci;
dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
- pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
@@ -305,6 +305,7 @@ static struct platform_driver ls_pcie_driver = {
.driver = {
.name = "layerscape-pcie",
.of_match_table = ls_pcie_of_match,
+ .suppress_bind_attrs = true,
},
};
builtin_platform_driver_probe(ls_pcie_driver, ls_pcie_probe);
diff --git a/drivers/pci/dwc/pcie-armada8k.c b/drivers/pci/dwc/pcie-armada8k.c
index f110e3b24a26..495b023042b3 100644
--- a/drivers/pci/dwc/pcie-armada8k.c
+++ b/drivers/pci/dwc/pcie-armada8k.c
@@ -230,7 +230,7 @@ static int armada8k_pcie_probe(struct platform_device *pdev)
/* Get the dw-pcie unit configuration/control registers base. */
base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
- pci->dbi_base = devm_ioremap_resource(dev, base);
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, base);
if (IS_ERR(pci->dbi_base)) {
dev_err(dev, "couldn't remap regs base %p\n", base);
ret = PTR_ERR(pci->dbi_base);
@@ -262,6 +262,7 @@ static struct platform_driver armada8k_pcie_driver = {
.driver = {
.name = "armada8k-pcie",
.of_match_table = of_match_ptr(armada8k_pcie_of_match),
+ .suppress_bind_attrs = true,
},
};
builtin_platform_driver(armada8k_pcie_driver);
diff --git a/drivers/pci/dwc/pcie-artpec6.c b/drivers/pci/dwc/pcie-artpec6.c
index 6d23683c0892..82a04acc42fd 100644
--- a/drivers/pci/dwc/pcie-artpec6.c
+++ b/drivers/pci/dwc/pcie-artpec6.c
@@ -78,6 +78,11 @@ static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u
regmap_write(artpec6_pcie->regmap, offset, val);
}
+static u64 artpec6_pcie_cpu_addr_fixup(u64 pci_addr)
+{
+ return pci_addr & ARTPEC6_CPU_TO_BUS_ADDR;
+}
+
static int artpec6_pcie_establish_link(struct artpec6_pcie *artpec6_pcie)
{
struct dw_pcie *pci = artpec6_pcie->pci;
@@ -142,11 +147,6 @@ static int artpec6_pcie_establish_link(struct artpec6_pcie *artpec6_pcie)
*/
dw_pcie_writel_dbi(pci, MISC_CONTROL_1_OFF, DBI_RO_WR_EN);
- pp->io_base &= ARTPEC6_CPU_TO_BUS_ADDR;
- pp->mem_base &= ARTPEC6_CPU_TO_BUS_ADDR;
- pp->cfg0_base &= ARTPEC6_CPU_TO_BUS_ADDR;
- pp->cfg1_base &= ARTPEC6_CPU_TO_BUS_ADDR;
-
/* setup root complex */
dw_pcie_setup_rc(pp);
@@ -235,6 +235,7 @@ static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
}
static const struct dw_pcie_ops dw_pcie_ops = {
+ .cpu_addr_fixup = artpec6_pcie_cpu_addr_fixup,
};
static int artpec6_pcie_probe(struct platform_device *pdev)
@@ -294,6 +295,7 @@ static struct platform_driver artpec6_pcie_driver = {
.driver = {
.name = "artpec6-pcie",
.of_match_table = artpec6_pcie_of_match,
+ .suppress_bind_attrs = true,
},
};
builtin_platform_driver(artpec6_pcie_driver);
diff --git a/drivers/pci/dwc/pcie-designware-ep.c b/drivers/pci/dwc/pcie-designware-ep.c
new file mode 100644
index 000000000000..398406393f37
--- /dev/null
+++ b/drivers/pci/dwc/pcie-designware-ep.c
@@ -0,0 +1,342 @@
+/**
+ * Synopsys Designware PCIe Endpoint controller driver
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/of.h>
+
+#include "pcie-designware.h"
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+
+void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
+{
+ struct pci_epc *epc = ep->epc;
+
+ pci_epc_linkup(epc);
+}
+
+static void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
+{
+ u32 reg;
+
+ reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+ dw_pcie_writel_dbi2(pci, reg, 0x0);
+ dw_pcie_writel_dbi(pci, reg, 0x0);
+}
+
+static int dw_pcie_ep_write_header(struct pci_epc *epc,
+ struct pci_epf_header *hdr)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, hdr->vendorid);
+ dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, hdr->deviceid);
+ dw_pcie_writeb_dbi(pci, PCI_REVISION_ID, hdr->revid);
+ dw_pcie_writeb_dbi(pci, PCI_CLASS_PROG, hdr->progif_code);
+ dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE,
+ hdr->subclass_code | hdr->baseclass_code << 8);
+ dw_pcie_writeb_dbi(pci, PCI_CACHE_LINE_SIZE,
+ hdr->cache_line_size);
+ dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_VENDOR_ID,
+ hdr->subsys_vendor_id);
+ dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_ID, hdr->subsys_id);
+ dw_pcie_writeb_dbi(pci, PCI_INTERRUPT_PIN,
+ hdr->interrupt_pin);
+
+ return 0;
+}
+
+static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar,
+ dma_addr_t cpu_addr,
+ enum dw_pcie_as_type as_type)
+{
+ int ret;
+ u32 free_win;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ free_win = find_first_zero_bit(&ep->ib_window_map,
+ sizeof(ep->ib_window_map));
+ if (free_win >= ep->num_ib_windows) {
+ dev_err(pci->dev, "no free inbound window\n");
+ return -EINVAL;
+ }
+
+ ret = dw_pcie_prog_inbound_atu(pci, free_win, bar, cpu_addr,
+ as_type);
+ if (ret < 0) {
+ dev_err(pci->dev, "Failed to program IB window\n");
+ return ret;
+ }
+
+ ep->bar_to_atu[bar] = free_win;
+ set_bit(free_win, &ep->ib_window_map);
+
+ return 0;
+}
+
+static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, phys_addr_t phys_addr,
+ u64 pci_addr, size_t size)
+{
+ u32 free_win;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ free_win = find_first_zero_bit(&ep->ob_window_map,
+ sizeof(ep->ob_window_map));
+ if (free_win >= ep->num_ob_windows) {
+ dev_err(pci->dev, "no free outbound window\n");
+ return -EINVAL;
+ }
+
+ dw_pcie_prog_outbound_atu(pci, free_win, PCIE_ATU_TYPE_MEM,
+ phys_addr, pci_addr, size);
+
+ set_bit(free_win, &ep->ob_window_map);
+ ep->outbound_addr[free_win] = phys_addr;
+
+ return 0;
+}
+
+static void dw_pcie_ep_clear_bar(struct pci_epc *epc, enum pci_barno bar)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ u32 atu_index = ep->bar_to_atu[bar];
+
+ dw_pcie_ep_reset_bar(pci, bar);
+
+ dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND);
+ clear_bit(atu_index, &ep->ib_window_map);
+}
+
+static int dw_pcie_ep_set_bar(struct pci_epc *epc, enum pci_barno bar,
+ dma_addr_t bar_phys, size_t size, int flags)
+{
+ int ret;
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum dw_pcie_as_type as_type;
+ u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+
+ if (!(flags & PCI_BASE_ADDRESS_SPACE))
+ as_type = DW_PCIE_AS_MEM;
+ else
+ as_type = DW_PCIE_AS_IO;
+
+ ret = dw_pcie_ep_inbound_atu(ep, bar, bar_phys, as_type);
+ if (ret)
+ return ret;
+
+ dw_pcie_writel_dbi2(pci, reg, size - 1);
+ dw_pcie_writel_dbi(pci, reg, flags);
+
+ return 0;
+}
+
+static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
+ u32 *atu_index)
+{
+ u32 index;
+
+ for (index = 0; index < ep->num_ob_windows; index++) {
+ if (ep->outbound_addr[index] != addr)
+ continue;
+ *atu_index = index;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, phys_addr_t addr)
+{
+ int ret;
+ u32 atu_index;
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ ret = dw_pcie_find_index(ep, addr, &atu_index);
+ if (ret < 0)
+ return;
+
+ dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_OUTBOUND);
+ clear_bit(atu_index, &ep->ob_window_map);
+}
+
+static int dw_pcie_ep_map_addr(struct pci_epc *epc, phys_addr_t addr,
+ u64 pci_addr, size_t size)
+{
+ int ret;
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ ret = dw_pcie_ep_outbound_atu(ep, addr, pci_addr, size);
+ if (ret) {
+ dev_err(pci->dev, "failed to enable address\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dw_pcie_ep_get_msi(struct pci_epc *epc)
+{
+ int val;
+ u32 lower_addr;
+ u32 upper_addr;
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ val = dw_pcie_readb_dbi(pci, MSI_MESSAGE_CONTROL);
+ val = (val & MSI_CAP_MME_MASK) >> MSI_CAP_MME_SHIFT;
+
+ lower_addr = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_L32);
+ upper_addr = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_U32);
+
+ if (!(lower_addr || upper_addr))
+ return -EINVAL;
+
+ return val;
+}
+
+static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 encode_int)
+{
+ int val;
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ val = (encode_int << MSI_CAP_MMC_SHIFT);
+ dw_pcie_writew_dbi(pci, MSI_MESSAGE_CONTROL, val);
+
+ return 0;
+}
+
+static int dw_pcie_ep_raise_irq(struct pci_epc *epc,
+ enum pci_epc_irq_type type, u8 interrupt_num)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+
+ if (!ep->ops->raise_irq)
+ return -EINVAL;
+
+ return ep->ops->raise_irq(ep, type, interrupt_num);
+}
+
+static void dw_pcie_ep_stop(struct pci_epc *epc)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ if (!pci->ops->stop_link)
+ return;
+
+ pci->ops->stop_link(pci);
+}
+
+static int dw_pcie_ep_start(struct pci_epc *epc)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ if (!pci->ops->start_link)
+ return -EINVAL;
+
+ return pci->ops->start_link(pci);
+}
+
+static const struct pci_epc_ops epc_ops = {
+ .write_header = dw_pcie_ep_write_header,
+ .set_bar = dw_pcie_ep_set_bar,
+ .clear_bar = dw_pcie_ep_clear_bar,
+ .map_addr = dw_pcie_ep_map_addr,
+ .unmap_addr = dw_pcie_ep_unmap_addr,
+ .set_msi = dw_pcie_ep_set_msi,
+ .get_msi = dw_pcie_ep_get_msi,
+ .raise_irq = dw_pcie_ep_raise_irq,
+ .start = dw_pcie_ep_start,
+ .stop = dw_pcie_ep_stop,
+};
+
+void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
+{
+ struct pci_epc *epc = ep->epc;
+
+ pci_epc_mem_exit(epc);
+}
+
+int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ int ret;
+ void *addr;
+ enum pci_barno bar;
+ struct pci_epc *epc;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct device *dev = pci->dev;
+ struct device_node *np = dev->of_node;
+
+ if (!pci->dbi_base || !pci->dbi_base2) {
+ dev_err(dev, "dbi_base/deb_base2 is not populated\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows);
+ if (ret < 0) {
+ dev_err(dev, "unable to read *num-ib-windows* property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows);
+ if (ret < 0) {
+ dev_err(dev, "unable to read *num-ob-windows* property\n");
+ return ret;
+ }
+
+ addr = devm_kzalloc(dev, sizeof(phys_addr_t) * ep->num_ob_windows,
+ GFP_KERNEL);
+ if (!addr)
+ return -ENOMEM;
+ ep->outbound_addr = addr;
+
+ for (bar = BAR_0; bar <= BAR_5; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+
+ if (ep->ops->ep_init)
+ ep->ops->ep_init(ep);
+
+ epc = devm_pci_epc_create(dev, &epc_ops);
+ if (IS_ERR(epc)) {
+ dev_err(dev, "failed to create epc device\n");
+ return PTR_ERR(epc);
+ }
+
+ ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
+ if (ret < 0)
+ epc->max_functions = 1;
+
+ ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize address space\n");
+ return ret;
+ }
+
+ ep->epc = epc;
+ epc_set_drvdata(epc, ep);
+ dw_pcie_setup(pci);
+
+ return 0;
+}
diff --git a/drivers/pci/dwc/pcie-designware-host.c b/drivers/pci/dwc/pcie-designware-host.c
index 5ba334938b52..28ed32ba4f1b 100644
--- a/drivers/pci/dwc/pcie-designware-host.c
+++ b/drivers/pci/dwc/pcie-designware-host.c
@@ -56,24 +56,25 @@ static struct irq_chip dw_msi_irq_chip = {
/* MSI int handler */
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
{
- unsigned long val;
+ u32 val;
int i, pos, irq;
irqreturn_t ret = IRQ_NONE;
for (i = 0; i < MAX_MSI_CTRLS; i++) {
dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
- (u32 *)&val);
- if (val) {
- ret = IRQ_HANDLED;
- pos = 0;
- while ((pos = find_next_bit(&val, 32, pos)) != 32) {
- irq = irq_find_mapping(pp->irq_domain,
- i * 32 + pos);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS +
- i * 12, 4, 1 << pos);
- generic_handle_irq(irq);
- pos++;
- }
+ &val);
+ if (!val)
+ continue;
+
+ ret = IRQ_HANDLED;
+ pos = 0;
+ while ((pos = find_next_bit((unsigned long *) &val, 32,
+ pos)) != 32) {
+ irq = irq_find_mapping(pp->irq_domain, i * 32 + pos);
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12,
+ 4, 1 << pos);
+ generic_handle_irq(irq);
+ pos++;
}
}
@@ -338,8 +339,9 @@ int dw_pcie_host_init(struct pcie_port *pp)
}
if (!pci->dbi_base) {
- pci->dbi_base = devm_ioremap(dev, pp->cfg->start,
- resource_size(pp->cfg));
+ pci->dbi_base = devm_pci_remap_cfgspace(dev,
+ pp->cfg->start,
+ resource_size(pp->cfg));
if (!pci->dbi_base) {
dev_err(dev, "error with ioremap\n");
ret = -ENOMEM;
@@ -350,8 +352,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
pp->mem_base = pp->mem->start;
if (!pp->va_cfg0_base) {
- pp->va_cfg0_base = devm_ioremap(dev, pp->cfg0_base,
- pp->cfg0_size);
+ pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,
+ pp->cfg0_base, pp->cfg0_size);
if (!pp->va_cfg0_base) {
dev_err(dev, "error with ioremap in function\n");
ret = -ENOMEM;
@@ -360,7 +362,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
}
if (!pp->va_cfg1_base) {
- pp->va_cfg1_base = devm_ioremap(dev, pp->cfg1_base,
+ pp->va_cfg1_base = devm_pci_remap_cfgspace(dev,
+ pp->cfg1_base,
pp->cfg1_size);
if (!pp->va_cfg1_base) {
dev_err(dev, "error with ioremap\n");
diff --git a/drivers/pci/dwc/pcie-designware-plat.c b/drivers/pci/dwc/pcie-designware-plat.c
index f20d494922ab..32091b32f6e1 100644
--- a/drivers/pci/dwc/pcie-designware-plat.c
+++ b/drivers/pci/dwc/pcie-designware-plat.c
@@ -133,6 +133,7 @@ static struct platform_driver dw_plat_pcie_driver = {
.driver = {
.name = "dw-pcie",
.of_match_table = dw_plat_pcie_of_match,
+ .suppress_bind_attrs = true,
},
.probe = dw_plat_pcie_probe,
};
diff --git a/drivers/pci/dwc/pcie-designware.c b/drivers/pci/dwc/pcie-designware.c
index 7e1fb7d6643c..0e03af279259 100644
--- a/drivers/pci/dwc/pcie-designware.c
+++ b/drivers/pci/dwc/pcie-designware.c
@@ -61,91 +61,253 @@ int dw_pcie_write(void __iomem *addr, int size, u32 val)
return PCIBIOS_SUCCESSFUL;
}
-u32 dw_pcie_readl_dbi(struct dw_pcie *pci, u32 reg)
+u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
+ size_t size)
{
- if (pci->ops->readl_dbi)
- return pci->ops->readl_dbi(pci, reg);
+ int ret;
+ u32 val;
- return readl(pci->dbi_base + reg);
+ if (pci->ops->read_dbi)
+ return pci->ops->read_dbi(pci, base, reg, size);
+
+ ret = dw_pcie_read(base + reg, size, &val);
+ if (ret)
+ dev_err(pci->dev, "read DBI address failed\n");
+
+ return val;
}
-void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
+void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
+ size_t size, u32 val)
{
- if (pci->ops->writel_dbi)
- pci->ops->writel_dbi(pci, reg, val);
- else
- writel(val, pci->dbi_base + reg);
+ int ret;
+
+ if (pci->ops->write_dbi) {
+ pci->ops->write_dbi(pci, base, reg, size, val);
+ return;
+ }
+
+ ret = dw_pcie_write(base + reg, size, val);
+ if (ret)
+ dev_err(pci->dev, "write DBI address failed\n");
}
-static u32 dw_pcie_readl_unroll(struct dw_pcie *pci, u32 index, u32 reg)
+static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
{
u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
return dw_pcie_readl_dbi(pci, offset + reg);
}
-static void dw_pcie_writel_unroll(struct dw_pcie *pci, u32 index, u32 reg,
- u32 val)
+static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
+ u32 val)
{
u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
dw_pcie_writel_dbi(pci, offset + reg, val);
}
+void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index, int type,
+ u64 cpu_addr, u64 pci_addr, u32 size)
+{
+ u32 retries, val;
+
+ dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
+ lower_32_bits(cpu_addr));
+ dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
+ upper_32_bits(cpu_addr));
+ dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LIMIT,
+ lower_32_bits(cpu_addr + size - 1));
+ dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
+ lower_32_bits(pci_addr));
+ dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
+ upper_32_bits(pci_addr));
+ dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
+ type);
+ dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
+ PCIE_ATU_ENABLE);
+
+ /*
+ * Make sure ATU enable takes effect before any subsequent config
+ * and I/O accesses.
+ */
+ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
+ val = dw_pcie_readl_ob_unroll(pci, index,
+ PCIE_ATU_UNR_REGION_CTRL2);
+ if (val & PCIE_ATU_ENABLE)
+ return;
+
+ usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
+ }
+ dev_err(pci->dev, "outbound iATU is not being enabled\n");
+}
+
void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
u64 cpu_addr, u64 pci_addr, u32 size)
{
u32 retries, val;
+ if (pci->ops->cpu_addr_fixup)
+ cpu_addr = pci->ops->cpu_addr_fixup(cpu_addr);
+
if (pci->iatu_unroll_enabled) {
- dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
- lower_32_bits(cpu_addr));
- dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
- upper_32_bits(cpu_addr));
- dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_LIMIT,
- lower_32_bits(cpu_addr + size - 1));
- dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
- lower_32_bits(pci_addr));
- dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
- upper_32_bits(pci_addr));
- dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
- type);
- dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
- PCIE_ATU_ENABLE);
- } else {
- dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT,
- PCIE_ATU_REGION_OUTBOUND | index);
- dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE,
- lower_32_bits(cpu_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE,
- upper_32_bits(cpu_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
- lower_32_bits(cpu_addr + size - 1));
- dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
- lower_32_bits(pci_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
- upper_32_bits(pci_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
+ dw_pcie_prog_outbound_atu_unroll(pci, index, type, cpu_addr,
+ pci_addr, size);
+ return;
}
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT,
+ PCIE_ATU_REGION_OUTBOUND | index);
+ dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE,
+ lower_32_bits(cpu_addr));
+ dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE,
+ upper_32_bits(cpu_addr));
+ dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
+ lower_32_bits(cpu_addr + size - 1));
+ dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
+ lower_32_bits(pci_addr));
+ dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
+ upper_32_bits(pci_addr));
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
+
/*
* Make sure ATU enable takes effect before any subsequent config
* and I/O accesses.
*/
for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
- if (pci->iatu_unroll_enabled)
- val = dw_pcie_readl_unroll(pci, index,
- PCIE_ATU_UNR_REGION_CTRL2);
- else
- val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
-
+ val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
if (val == PCIE_ATU_ENABLE)
return;
usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
}
- dev_err(pci->dev, "iATU is not being enabled\n");
+ dev_err(pci->dev, "outbound iATU is not being enabled\n");
+}
+
+static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
+{
+ u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
+
+ return dw_pcie_readl_dbi(pci, offset + reg);
+}
+
+static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
+ u32 val)
+{
+ u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
+
+ dw_pcie_writel_dbi(pci, offset + reg, val);
+}
+
+int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index, int bar,
+ u64 cpu_addr, enum dw_pcie_as_type as_type)
+{
+ int type;
+ u32 retries, val;
+
+ dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
+ lower_32_bits(cpu_addr));
+ dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
+ upper_32_bits(cpu_addr));
+
+ switch (as_type) {
+ case DW_PCIE_AS_MEM:
+ type = PCIE_ATU_TYPE_MEM;
+ break;
+ case DW_PCIE_AS_IO:
+ type = PCIE_ATU_TYPE_IO;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type);
+ dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
+ PCIE_ATU_ENABLE |
+ PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
+
+ /*
+ * Make sure ATU enable takes effect before any subsequent config
+ * and I/O accesses.
+ */
+ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
+ val = dw_pcie_readl_ib_unroll(pci, index,
+ PCIE_ATU_UNR_REGION_CTRL2);
+ if (val & PCIE_ATU_ENABLE)
+ return 0;
+
+ usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
+ }
+ dev_err(pci->dev, "inbound iATU is not being enabled\n");
+
+ return -EBUSY;
+}
+
+int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
+ u64 cpu_addr, enum dw_pcie_as_type as_type)
+{
+ int type;
+ u32 retries, val;
+
+ if (pci->iatu_unroll_enabled)
+ return dw_pcie_prog_inbound_atu_unroll(pci, index, bar,
+ cpu_addr, as_type);
+
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND |
+ index);
+ dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr));
+ dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr));
+
+ switch (as_type) {
+ case DW_PCIE_AS_MEM:
+ type = PCIE_ATU_TYPE_MEM;
+ break;
+ case DW_PCIE_AS_IO:
+ type = PCIE_ATU_TYPE_IO;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE
+ | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
+
+ /*
+ * Make sure ATU enable takes effect before any subsequent config
+ * and I/O accesses.
+ */
+ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
+ val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
+ if (val & PCIE_ATU_ENABLE)
+ return 0;
+
+ usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
+ }
+ dev_err(pci->dev, "inbound iATU is not being enabled\n");
+
+ return -EBUSY;
+}
+
+void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
+ enum dw_pcie_region_type type)
+{
+ int region;
+
+ switch (type) {
+ case DW_PCIE_REGION_INBOUND:
+ region = PCIE_ATU_REGION_INBOUND;
+ break;
+ case DW_PCIE_REGION_OUTBOUND:
+ region = PCIE_ATU_REGION_OUTBOUND;
+ break;
+ default:
+ return;
+ }
+
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index);
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~PCIE_ATU_ENABLE);
}
int dw_pcie_wait_for_link(struct dw_pcie *pci)
diff --git a/drivers/pci/dwc/pcie-designware.h b/drivers/pci/dwc/pcie-designware.h
index cd3b8713fe50..c6a840575796 100644
--- a/drivers/pci/dwc/pcie-designware.h
+++ b/drivers/pci/dwc/pcie-designware.h
@@ -18,6 +18,9 @@
#include <linux/msi.h>
#include <linux/pci.h>
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+
/* Parameters for the waiting for link up routine */
#define LINK_WAIT_MAX_RETRIES 10
#define LINK_WAIT_USLEEP_MIN 90000
@@ -89,6 +92,16 @@
#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) \
((0x3 << 20) | ((region) << 9))
+#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \
+ ((0x3 << 20) | ((region) << 9) | (0x1 << 8))
+
+#define MSI_MESSAGE_CONTROL 0x52
+#define MSI_CAP_MMC_SHIFT 1
+#define MSI_CAP_MME_SHIFT 4
+#define MSI_CAP_MME_MASK (7 << MSI_CAP_MME_SHIFT)
+#define MSI_MESSAGE_ADDR_L32 0x54
+#define MSI_MESSAGE_ADDR_U32 0x58
+
/*
* Maximum number of MSI IRQs can be 256 per controller. But keep
* it 32 as of now. Probably we will never need more than 32. If needed,
@@ -99,6 +112,20 @@
struct pcie_port;
struct dw_pcie;
+struct dw_pcie_ep;
+
+enum dw_pcie_region_type {
+ DW_PCIE_REGION_UNKNOWN,
+ DW_PCIE_REGION_INBOUND,
+ DW_PCIE_REGION_OUTBOUND,
+};
+
+enum dw_pcie_device_mode {
+ DW_PCIE_UNKNOWN_TYPE,
+ DW_PCIE_EP_TYPE,
+ DW_PCIE_LEG_EP_TYPE,
+ DW_PCIE_RC_TYPE,
+};
struct dw_pcie_host_ops {
int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val);
@@ -142,35 +169,116 @@ struct pcie_port {
DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
};
+enum dw_pcie_as_type {
+ DW_PCIE_AS_UNKNOWN,
+ DW_PCIE_AS_MEM,
+ DW_PCIE_AS_IO,
+};
+
+struct dw_pcie_ep_ops {
+ void (*ep_init)(struct dw_pcie_ep *ep);
+ int (*raise_irq)(struct dw_pcie_ep *ep, enum pci_epc_irq_type type,
+ u8 interrupt_num);
+};
+
+struct dw_pcie_ep {
+ struct pci_epc *epc;
+ struct dw_pcie_ep_ops *ops;
+ phys_addr_t phys_base;
+ size_t addr_size;
+ u8 bar_to_atu[6];
+ phys_addr_t *outbound_addr;
+ unsigned long ib_window_map;
+ unsigned long ob_window_map;
+ u32 num_ib_windows;
+ u32 num_ob_windows;
+};
+
struct dw_pcie_ops {
- u32 (*readl_dbi)(struct dw_pcie *pcie, u32 reg);
- void (*writel_dbi)(struct dw_pcie *pcie, u32 reg, u32 val);
+ u64 (*cpu_addr_fixup)(u64 cpu_addr);
+ u32 (*read_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
+ size_t size);
+ void (*write_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
+ size_t size, u32 val);
int (*link_up)(struct dw_pcie *pcie);
+ int (*start_link)(struct dw_pcie *pcie);
+ void (*stop_link)(struct dw_pcie *pcie);
};
struct dw_pcie {
struct device *dev;
void __iomem *dbi_base;
+ void __iomem *dbi_base2;
u32 num_viewport;
u8 iatu_unroll_enabled;
struct pcie_port pp;
+ struct dw_pcie_ep ep;
const struct dw_pcie_ops *ops;
};
#define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp)
+#define to_dw_pcie_from_ep(endpoint) \
+ container_of((endpoint), struct dw_pcie, ep)
+
int dw_pcie_read(void __iomem *addr, int size, u32 *val);
int dw_pcie_write(void __iomem *addr, int size, u32 val);
-u32 dw_pcie_readl_dbi(struct dw_pcie *pci, u32 reg);
-void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val);
+u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
+ size_t size);
+void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
+ size_t size, u32 val);
int dw_pcie_link_up(struct dw_pcie *pci);
int dw_pcie_wait_for_link(struct dw_pcie *pci);
void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,
int type, u64 cpu_addr, u64 pci_addr,
u32 size);
+int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
+ u64 cpu_addr, enum dw_pcie_as_type as_type);
+void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
+ enum dw_pcie_region_type type);
void dw_pcie_setup(struct dw_pcie *pci);
+static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
+{
+ __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x4, val);
+}
+
+static inline u32 dw_pcie_readl_dbi(struct dw_pcie *pci, u32 reg)
+{
+ return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x4);
+}
+
+static inline void dw_pcie_writew_dbi(struct dw_pcie *pci, u32 reg, u16 val)
+{
+ __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x2, val);
+}
+
+static inline u16 dw_pcie_readw_dbi(struct dw_pcie *pci, u32 reg)
+{
+ return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x2);
+}
+
+static inline void dw_pcie_writeb_dbi(struct dw_pcie *pci, u32 reg, u8 val)
+{
+ __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x1, val);
+}
+
+static inline u8 dw_pcie_readb_dbi(struct dw_pcie *pci, u32 reg)
+{
+ return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x1);
+}
+
+static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val)
+{
+ __dw_pcie_write_dbi(pci, pci->dbi_base2, reg, 0x4, val);
+}
+
+static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg)
+{
+ return __dw_pcie_read_dbi(pci, pci->dbi_base2, reg, 0x4);
+}
+
#ifdef CONFIG_PCIE_DW_HOST
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp);
void dw_pcie_msi_init(struct pcie_port *pp);
@@ -195,4 +303,23 @@ static inline int dw_pcie_host_init(struct pcie_port *pp)
return 0;
}
#endif
+
+#ifdef CONFIG_PCIE_DW_EP
+void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
+int dw_pcie_ep_init(struct dw_pcie_ep *ep);
+void dw_pcie_ep_exit(struct dw_pcie_ep *ep);
+#else
+static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
+{
+}
+
+static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ return 0;
+}
+
+static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
+{
+}
+#endif
#endif /* _PCIE_DESIGNWARE_H */
diff --git a/drivers/pci/dwc/pcie-hisi.c b/drivers/pci/dwc/pcie-hisi.c
index cf9d6a9d9fd4..e51acee0ddf3 100644
--- a/drivers/pci/dwc/pcie-hisi.c
+++ b/drivers/pci/dwc/pcie-hisi.c
@@ -99,7 +99,7 @@ static int hisi_pcie_init(struct pci_config_window *cfg)
return -ENOMEM;
}
- reg_base = devm_ioremap(dev, res->start, resource_size(res));
+ reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res));
if (!reg_base)
return -ENOMEM;
@@ -296,10 +296,9 @@ static int hisi_pcie_probe(struct platform_device *pdev)
}
reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi");
- pci->dbi_base = devm_ioremap_resource(dev, reg);
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, reg);
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
-
platform_set_drvdata(pdev, hisi_pcie);
ret = hisi_add_pcie_port(hisi_pcie, pdev);
@@ -334,6 +333,7 @@ static struct platform_driver hisi_pcie_driver = {
.driver = {
.name = "hisi-pcie",
.of_match_table = hisi_pcie_of_match,
+ .suppress_bind_attrs = true,
},
};
builtin_platform_driver(hisi_pcie_driver);
@@ -360,7 +360,7 @@ static int hisi_pcie_platform_init(struct pci_config_window *cfg)
return -EINVAL;
}
- reg_base = devm_ioremap(dev, res->start, resource_size(res));
+ reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res));
if (!reg_base)
return -ENOMEM;
@@ -395,6 +395,7 @@ static struct platform_driver hisi_pcie_almost_ecam_driver = {
.driver = {
.name = "hisi-pcie-almost-ecam",
.of_match_table = hisi_pcie_almost_ecam_of_match,
+ .suppress_bind_attrs = true,
},
};
builtin_platform_driver(hisi_pcie_almost_ecam_driver);
diff --git a/drivers/pci/dwc/pcie-qcom.c b/drivers/pci/dwc/pcie-qcom.c
index 67eb7f5926dd..5bf23d432fdb 100644
--- a/drivers/pci/dwc/pcie-qcom.c
+++ b/drivers/pci/dwc/pcie-qcom.c
@@ -700,7 +700,7 @@ static int qcom_pcie_probe(struct platform_device *pdev)
return PTR_ERR(pcie->parf);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- pci->dbi_base = devm_ioremap_resource(dev, res);
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
diff --git a/drivers/pci/dwc/pcie-spear13xx.c b/drivers/pci/dwc/pcie-spear13xx.c
index eaa4ea8e2ea4..8ff36b3dbbdf 100644
--- a/drivers/pci/dwc/pcie-spear13xx.c
+++ b/drivers/pci/dwc/pcie-spear13xx.c
@@ -273,7 +273,7 @@ static int spear13xx_pcie_probe(struct platform_device *pdev)
}
dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
if (IS_ERR(pci->dbi_base)) {
dev_err(dev, "couldn't remap dbi base %p\n", dbi_base);
ret = PTR_ERR(pci->dbi_base);
@@ -308,6 +308,7 @@ static struct platform_driver spear13xx_pcie_driver = {
.driver = {
.name = "spear-pcie",
.of_match_table = of_match_ptr(spear13xx_pcie_of_match),
+ .suppress_bind_attrs = true,
},
};
diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c
index 2fee61bb6559..c228a2eb7faa 100644
--- a/drivers/pci/ecam.c
+++ b/drivers/pci/ecam.c
@@ -84,12 +84,14 @@ struct pci_config_window *pci_ecam_create(struct device *dev,
if (!cfg->winp)
goto err_exit_malloc;
for (i = 0; i < bus_range; i++) {
- cfg->winp[i] = ioremap(cfgres->start + i * bsz, bsz);
+ cfg->winp[i] =
+ pci_remap_cfgspace(cfgres->start + i * bsz,
+ bsz);
if (!cfg->winp[i])
goto err_exit_iomap;
}
} else {
- cfg->win = ioremap(cfgres->start, bus_range * bsz);
+ cfg->win = pci_remap_cfgspace(cfgres->start, bus_range * bsz);
if (!cfg->win)
goto err_exit_iomap;
}
diff --git a/drivers/pci/endpoint/Kconfig b/drivers/pci/endpoint/Kconfig
new file mode 100644
index 000000000000..c23f146fb5a6
--- /dev/null
+++ b/drivers/pci/endpoint/Kconfig
@@ -0,0 +1,31 @@
+#
+# PCI Endpoint Support
+#
+
+menu "PCI Endpoint"
+
+config PCI_ENDPOINT
+ bool "PCI Endpoint Support"
+ help
+ Enable this configuration option to support configurable PCI
+ endpoint. This should be enabled if the platform has a PCI
+ controller that can operate in endpoint mode.
+
+ Enabling this option will build the endpoint library, which
+ includes endpoint controller library and endpoint function
+ library.
+
+ If in doubt, say "N" to disable Endpoint support.
+
+config PCI_ENDPOINT_CONFIGFS
+ bool "PCI Endpoint Configfs Support"
+ depends on PCI_ENDPOINT
+ select CONFIGFS_FS
+ help
+ This will enable the configfs entry that can be used to
+ configure the endpoint function and used to bind the
+ function with a endpoint controller.
+
+source "drivers/pci/endpoint/functions/Kconfig"
+
+endmenu
diff --git a/drivers/pci/endpoint/Makefile b/drivers/pci/endpoint/Makefile
new file mode 100644
index 000000000000..1041f80a4645
--- /dev/null
+++ b/drivers/pci/endpoint/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for PCI Endpoint Support
+#
+
+obj-$(CONFIG_PCI_ENDPOINT_CONFIGFS) += pci-ep-cfs.o
+obj-$(CONFIG_PCI_ENDPOINT) += pci-epc-core.o pci-epf-core.o\
+ pci-epc-mem.o functions/
diff --git a/drivers/pci/endpoint/functions/Kconfig b/drivers/pci/endpoint/functions/Kconfig
new file mode 100644
index 000000000000..175edad42d2f
--- /dev/null
+++ b/drivers/pci/endpoint/functions/Kconfig
@@ -0,0 +1,12 @@
+#
+# PCI Endpoint Functions
+#
+
+config PCI_EPF_TEST
+ tristate "PCI Endpoint Test driver"
+ depends on PCI_ENDPOINT
+ help
+ Enable this configuration option to enable the test driver
+ for PCI Endpoint.
+
+ If in doubt, say "N" to disable Endpoint test driver.
diff --git a/drivers/pci/endpoint/functions/Makefile b/drivers/pci/endpoint/functions/Makefile
new file mode 100644
index 000000000000..6d94a4801838
--- /dev/null
+++ b/drivers/pci/endpoint/functions/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for PCI Endpoint Functions
+#
+
+obj-$(CONFIG_PCI_EPF_TEST) += pci-epf-test.o
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
new file mode 100644
index 000000000000..53fff8030337
--- /dev/null
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -0,0 +1,510 @@
+/**
+ * Test driver to test endpoint functionality
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci_ids.h>
+#include <linux/random.h>
+
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+#include <linux/pci_regs.h>
+
+#define COMMAND_RAISE_LEGACY_IRQ BIT(0)
+#define COMMAND_RAISE_MSI_IRQ BIT(1)
+#define MSI_NUMBER_SHIFT 2
+#define MSI_NUMBER_MASK (0x3f << MSI_NUMBER_SHIFT)
+#define COMMAND_READ BIT(8)
+#define COMMAND_WRITE BIT(9)
+#define COMMAND_COPY BIT(10)
+
+#define STATUS_READ_SUCCESS BIT(0)
+#define STATUS_READ_FAIL BIT(1)
+#define STATUS_WRITE_SUCCESS BIT(2)
+#define STATUS_WRITE_FAIL BIT(3)
+#define STATUS_COPY_SUCCESS BIT(4)
+#define STATUS_COPY_FAIL BIT(5)
+#define STATUS_IRQ_RAISED BIT(6)
+#define STATUS_SRC_ADDR_INVALID BIT(7)
+#define STATUS_DST_ADDR_INVALID BIT(8)
+
+#define TIMER_RESOLUTION 1
+
+static struct workqueue_struct *kpcitest_workqueue;
+
+struct pci_epf_test {
+ void *reg[6];
+ struct pci_epf *epf;
+ struct delayed_work cmd_handler;
+};
+
+struct pci_epf_test_reg {
+ u32 magic;
+ u32 command;
+ u32 status;
+ u64 src_addr;
+ u64 dst_addr;
+ u32 size;
+ u32 checksum;
+} __packed;
+
+static struct pci_epf_header test_header = {
+ .vendorid = PCI_ANY_ID,
+ .deviceid = PCI_ANY_ID,
+ .baseclass_code = PCI_CLASS_OTHERS,
+ .interrupt_pin = PCI_INTERRUPT_INTA,
+};
+
+static int bar_size[] = { 512, 1024, 16384, 131072, 1048576 };
+
+static int pci_epf_test_copy(struct pci_epf_test *epf_test)
+{
+ int ret;
+ void __iomem *src_addr;
+ void __iomem *dst_addr;
+ phys_addr_t src_phys_addr;
+ phys_addr_t dst_phys_addr;
+ struct pci_epf *epf = epf_test->epf;
+ struct device *dev = &epf->dev;
+ struct pci_epc *epc = epf->epc;
+ struct pci_epf_test_reg *reg = epf_test->reg[0];
+
+ src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size);
+ if (!src_addr) {
+ dev_err(dev, "failed to allocate source address\n");
+ reg->status = STATUS_SRC_ADDR_INVALID;
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = pci_epc_map_addr(epc, src_phys_addr, reg->src_addr, reg->size);
+ if (ret) {
+ dev_err(dev, "failed to map source address\n");
+ reg->status = STATUS_SRC_ADDR_INVALID;
+ goto err_src_addr;
+ }
+
+ dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size);
+ if (!dst_addr) {
+ dev_err(dev, "failed to allocate destination address\n");
+ reg->status = STATUS_DST_ADDR_INVALID;
+ ret = -ENOMEM;
+ goto err_src_map_addr;
+ }
+
+ ret = pci_epc_map_addr(epc, dst_phys_addr, reg->dst_addr, reg->size);
+ if (ret) {
+ dev_err(dev, "failed to map destination address\n");
+ reg->status = STATUS_DST_ADDR_INVALID;
+ goto err_dst_addr;
+ }
+
+ memcpy(dst_addr, src_addr, reg->size);
+
+ pci_epc_unmap_addr(epc, dst_phys_addr);
+
+err_dst_addr:
+ pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
+
+err_src_map_addr:
+ pci_epc_unmap_addr(epc, src_phys_addr);
+
+err_src_addr:
+ pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
+
+err:
+ return ret;
+}
+
+static int pci_epf_test_read(struct pci_epf_test *epf_test)
+{
+ int ret;
+ void __iomem *src_addr;
+ void *buf;
+ u32 crc32;
+ phys_addr_t phys_addr;
+ struct pci_epf *epf = epf_test->epf;
+ struct device *dev = &epf->dev;
+ struct pci_epc *epc = epf->epc;
+ struct pci_epf_test_reg *reg = epf_test->reg[0];
+
+ src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
+ if (!src_addr) {
+ dev_err(dev, "failed to allocate address\n");
+ reg->status = STATUS_SRC_ADDR_INVALID;
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = pci_epc_map_addr(epc, phys_addr, reg->src_addr, reg->size);
+ if (ret) {
+ dev_err(dev, "failed to map address\n");
+ reg->status = STATUS_SRC_ADDR_INVALID;
+ goto err_addr;
+ }
+
+ buf = kzalloc(reg->size, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_map_addr;
+ }
+
+ memcpy(buf, src_addr, reg->size);
+
+ crc32 = crc32_le(~0, buf, reg->size);
+ if (crc32 != reg->checksum)
+ ret = -EIO;
+
+ kfree(buf);
+
+err_map_addr:
+ pci_epc_unmap_addr(epc, phys_addr);
+
+err_addr:
+ pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
+
+err:
+ return ret;
+}
+
+static int pci_epf_test_write(struct pci_epf_test *epf_test)
+{
+ int ret;
+ void __iomem *dst_addr;
+ void *buf;
+ phys_addr_t phys_addr;
+ struct pci_epf *epf = epf_test->epf;
+ struct device *dev = &epf->dev;
+ struct pci_epc *epc = epf->epc;
+ struct pci_epf_test_reg *reg = epf_test->reg[0];
+
+ dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
+ if (!dst_addr) {
+ dev_err(dev, "failed to allocate address\n");
+ reg->status = STATUS_DST_ADDR_INVALID;
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = pci_epc_map_addr(epc, phys_addr, reg->dst_addr, reg->size);
+ if (ret) {
+ dev_err(dev, "failed to map address\n");
+ reg->status = STATUS_DST_ADDR_INVALID;
+ goto err_addr;
+ }
+
+ buf = kzalloc(reg->size, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_map_addr;
+ }
+
+ get_random_bytes(buf, reg->size);
+ reg->checksum = crc32_le(~0, buf, reg->size);
+
+ memcpy(dst_addr, buf, reg->size);
+
+ /*
+ * wait 1ms inorder for the write to complete. Without this delay L3
+ * error in observed in the host system.
+ */
+ mdelay(1);
+
+ kfree(buf);
+
+err_map_addr:
+ pci_epc_unmap_addr(epc, phys_addr);
+
+err_addr:
+ pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
+
+err:
+ return ret;
+}
+
+static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test)
+{
+ u8 irq;
+ u8 msi_count;
+ struct pci_epf *epf = epf_test->epf;
+ struct pci_epc *epc = epf->epc;
+ struct pci_epf_test_reg *reg = epf_test->reg[0];
+
+ reg->status |= STATUS_IRQ_RAISED;
+ msi_count = pci_epc_get_msi(epc);
+ irq = (reg->command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT;
+ if (irq > msi_count || msi_count <= 0)
+ pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0);
+ else
+ pci_epc_raise_irq(epc, PCI_EPC_IRQ_MSI, irq);
+}
+
+static void pci_epf_test_cmd_handler(struct work_struct *work)
+{
+ int ret;
+ u8 irq;
+ u8 msi_count;
+ struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
+ cmd_handler.work);
+ struct pci_epf *epf = epf_test->epf;
+ struct pci_epc *epc = epf->epc;
+ struct pci_epf_test_reg *reg = epf_test->reg[0];
+
+ if (!reg->command)
+ goto reset_handler;
+
+ if (reg->command & COMMAND_RAISE_LEGACY_IRQ) {
+ reg->status = STATUS_IRQ_RAISED;
+ pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0);
+ goto reset_handler;
+ }
+
+ if (reg->command & COMMAND_WRITE) {
+ ret = pci_epf_test_write(epf_test);
+ if (ret)
+ reg->status |= STATUS_WRITE_FAIL;
+ else
+ reg->status |= STATUS_WRITE_SUCCESS;
+ pci_epf_test_raise_irq(epf_test);
+ goto reset_handler;
+ }
+
+ if (reg->command & COMMAND_READ) {
+ ret = pci_epf_test_read(epf_test);
+ if (!ret)
+ reg->status |= STATUS_READ_SUCCESS;
+ else
+ reg->status |= STATUS_READ_FAIL;
+ pci_epf_test_raise_irq(epf_test);
+ goto reset_handler;
+ }
+
+ if (reg->command & COMMAND_COPY) {
+ ret = pci_epf_test_copy(epf_test);
+ if (!ret)
+ reg->status |= STATUS_COPY_SUCCESS;
+ else
+ reg->status |= STATUS_COPY_FAIL;
+ pci_epf_test_raise_irq(epf_test);
+ goto reset_handler;
+ }
+
+ if (reg->command & COMMAND_RAISE_MSI_IRQ) {
+ msi_count = pci_epc_get_msi(epc);
+ irq = (reg->command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT;
+ if (irq > msi_count || msi_count <= 0)
+ goto reset_handler;
+ reg->status = STATUS_IRQ_RAISED;
+ pci_epc_raise_irq(epc, PCI_EPC_IRQ_MSI, irq);
+ goto reset_handler;
+ }
+
+reset_handler:
+ reg->command = 0;
+
+ queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
+ msecs_to_jiffies(1));
+}
+
+static void pci_epf_test_linkup(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+
+ queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
+ msecs_to_jiffies(1));
+}
+
+static void pci_epf_test_unbind(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ struct pci_epc *epc = epf->epc;
+ int bar;
+
+ cancel_delayed_work(&epf_test->cmd_handler);
+ pci_epc_stop(epc);
+ for (bar = BAR_0; bar <= BAR_5; bar++) {
+ if (epf_test->reg[bar]) {
+ pci_epf_free_space(epf, epf_test->reg[bar], bar);
+ pci_epc_clear_bar(epc, bar);
+ }
+ }
+}
+
+static int pci_epf_test_set_bar(struct pci_epf *epf)
+{
+ int flags;
+ int bar;
+ int ret;
+ struct pci_epf_bar *epf_bar;
+ struct pci_epc *epc = epf->epc;
+ struct device *dev = &epf->dev;
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+
+ flags = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32;
+ if (sizeof(dma_addr_t) == 0x8)
+ flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
+
+ for (bar = BAR_0; bar <= BAR_5; bar++) {
+ epf_bar = &epf->bar[bar];
+ ret = pci_epc_set_bar(epc, bar, epf_bar->phys_addr,
+ epf_bar->size, flags);
+ if (ret) {
+ pci_epf_free_space(epf, epf_test->reg[bar], bar);
+ dev_err(dev, "failed to set BAR%d\n", bar);
+ if (bar == BAR_0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int pci_epf_test_alloc_space(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ struct device *dev = &epf->dev;
+ void *base;
+ int bar;
+
+ base = pci_epf_alloc_space(epf, sizeof(struct pci_epf_test_reg),
+ BAR_0);
+ if (!base) {
+ dev_err(dev, "failed to allocated register space\n");
+ return -ENOMEM;
+ }
+ epf_test->reg[0] = base;
+
+ for (bar = BAR_1; bar <= BAR_5; bar++) {
+ base = pci_epf_alloc_space(epf, bar_size[bar - 1], bar);
+ if (!base)
+ dev_err(dev, "failed to allocate space for BAR%d\n",
+ bar);
+ epf_test->reg[bar] = base;
+ }
+
+ return 0;
+}
+
+static int pci_epf_test_bind(struct pci_epf *epf)
+{
+ int ret;
+ struct pci_epf_header *header = epf->header;
+ struct pci_epc *epc = epf->epc;
+ struct device *dev = &epf->dev;
+
+ if (WARN_ON_ONCE(!epc))
+ return -EINVAL;
+
+ ret = pci_epc_write_header(epc, header);
+ if (ret) {
+ dev_err(dev, "configuration header write failed\n");
+ return ret;
+ }
+
+ ret = pci_epf_test_alloc_space(epf);
+ if (ret)
+ return ret;
+
+ ret = pci_epf_test_set_bar(epf);
+ if (ret)
+ return ret;
+
+ ret = pci_epc_set_msi(epc, epf->msi_interrupts);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int pci_epf_test_probe(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test;
+ struct device *dev = &epf->dev;
+
+ epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL);
+ if (!epf_test)
+ return -ENOMEM;
+
+ epf->header = &test_header;
+ epf_test->epf = epf;
+
+ INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);
+
+ epf_set_drvdata(epf, epf_test);
+ return 0;
+}
+
+static int pci_epf_test_remove(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+
+ kfree(epf_test);
+ return 0;
+}
+
+static struct pci_epf_ops ops = {
+ .unbind = pci_epf_test_unbind,
+ .bind = pci_epf_test_bind,
+ .linkup = pci_epf_test_linkup,
+};
+
+static const struct pci_epf_device_id pci_epf_test_ids[] = {
+ {
+ .name = "pci_epf_test",
+ },
+ {},
+};
+
+static struct pci_epf_driver test_driver = {
+ .driver.name = "pci_epf_test",
+ .probe = pci_epf_test_probe,
+ .remove = pci_epf_test_remove,
+ .id_table = pci_epf_test_ids,
+ .ops = &ops,
+ .owner = THIS_MODULE,
+};
+
+static int __init pci_epf_test_init(void)
+{
+ int ret;
+
+ kpcitest_workqueue = alloc_workqueue("kpcitest",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+ ret = pci_epf_register_driver(&test_driver);
+ if (ret) {
+ pr_err("failed to register pci epf test driver --> %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(pci_epf_test_init);
+
+static void __exit pci_epf_test_exit(void)
+{
+ pci_epf_unregister_driver(&test_driver);
+}
+module_exit(pci_epf_test_exit);
+
+MODULE_DESCRIPTION("PCI EPF TEST DRIVER");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c
new file mode 100644
index 000000000000..424fdd6ed1ca
--- /dev/null
+++ b/drivers/pci/endpoint/pci-ep-cfs.c
@@ -0,0 +1,509 @@
+/**
+ * configfs to configure the PCI endpoint
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+#include <linux/pci-ep-cfs.h>
+
+static struct config_group *functions_group;
+static struct config_group *controllers_group;
+
+struct pci_epf_group {
+ struct config_group group;
+ struct pci_epf *epf;
+};
+
+struct pci_epc_group {
+ struct config_group group;
+ struct pci_epc *epc;
+ bool start;
+ unsigned long function_num_map;
+};
+
+static inline struct pci_epf_group *to_pci_epf_group(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct pci_epf_group, group);
+}
+
+static inline struct pci_epc_group *to_pci_epc_group(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct pci_epc_group, group);
+}
+
+static ssize_t pci_epc_start_store(struct config_item *item, const char *page,
+ size_t len)
+{
+ int ret;
+ bool start;
+ struct pci_epc *epc;
+ struct pci_epc_group *epc_group = to_pci_epc_group(item);
+
+ epc = epc_group->epc;
+
+ ret = kstrtobool(page, &start);
+ if (ret)
+ return ret;
+
+ if (!start) {
+ pci_epc_stop(epc);
+ return len;
+ }
+
+ ret = pci_epc_start(epc);
+ if (ret) {
+ dev_err(&epc->dev, "failed to start endpoint controller\n");
+ return -EINVAL;
+ }
+
+ epc_group->start = start;
+
+ return len;
+}
+
+static ssize_t pci_epc_start_show(struct config_item *item, char *page)
+{
+ return sprintf(page, "%d\n",
+ to_pci_epc_group(item)->start);
+}
+
+CONFIGFS_ATTR(pci_epc_, start);
+
+static struct configfs_attribute *pci_epc_attrs[] = {
+ &pci_epc_attr_start,
+ NULL,
+};
+
+static int pci_epc_epf_link(struct config_item *epc_item,
+ struct config_item *epf_item)
+{
+ int ret;
+ u32 func_no = 0;
+ struct pci_epc *epc;
+ struct pci_epf *epf;
+ struct pci_epf_group *epf_group = to_pci_epf_group(epf_item);
+ struct pci_epc_group *epc_group = to_pci_epc_group(epc_item);
+
+ epc = epc_group->epc;
+ epf = epf_group->epf;
+ ret = pci_epc_add_epf(epc, epf);
+ if (ret)
+ goto err_add_epf;
+
+ func_no = find_first_zero_bit(&epc_group->function_num_map,
+ sizeof(epc_group->function_num_map));
+ set_bit(func_no, &epc_group->function_num_map);
+ epf->func_no = func_no;
+
+ ret = pci_epf_bind(epf);
+ if (ret)
+ goto err_epf_bind;
+
+ return 0;
+
+err_epf_bind:
+ pci_epc_remove_epf(epc, epf);
+
+err_add_epf:
+ clear_bit(func_no, &epc_group->function_num_map);
+
+ return ret;
+}
+
+static void pci_epc_epf_unlink(struct config_item *epc_item,
+ struct config_item *epf_item)
+{
+ struct pci_epc *epc;
+ struct pci_epf *epf;
+ struct pci_epf_group *epf_group = to_pci_epf_group(epf_item);
+ struct pci_epc_group *epc_group = to_pci_epc_group(epc_item);
+
+ WARN_ON_ONCE(epc_group->start);
+
+ epc = epc_group->epc;
+ epf = epf_group->epf;
+ clear_bit(epf->func_no, &epc_group->function_num_map);
+ pci_epf_unbind(epf);
+ pci_epc_remove_epf(epc, epf);
+}
+
+static struct configfs_item_operations pci_epc_item_ops = {
+ .allow_link = pci_epc_epf_link,
+ .drop_link = pci_epc_epf_unlink,
+};
+
+static struct config_item_type pci_epc_type = {
+ .ct_item_ops = &pci_epc_item_ops,
+ .ct_attrs = pci_epc_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+struct config_group *pci_ep_cfs_add_epc_group(const char *name)
+{
+ int ret;
+ struct pci_epc *epc;
+ struct config_group *group;
+ struct pci_epc_group *epc_group;
+
+ epc_group = kzalloc(sizeof(*epc_group), GFP_KERNEL);
+ if (!epc_group) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ group = &epc_group->group;
+
+ config_group_init_type_name(group, name, &pci_epc_type);
+ ret = configfs_register_group(controllers_group, group);
+ if (ret) {
+ pr_err("failed to register configfs group for %s\n", name);
+ goto err_register_group;
+ }
+
+ epc = pci_epc_get(name);
+ if (IS_ERR(epc)) {
+ ret = PTR_ERR(epc);
+ goto err_epc_get;
+ }
+
+ epc_group->epc = epc;
+
+ return group;
+
+err_epc_get:
+ configfs_unregister_group(group);
+
+err_register_group:
+ kfree(epc_group);
+
+err:
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(pci_ep_cfs_add_epc_group);
+
+void pci_ep_cfs_remove_epc_group(struct config_group *group)
+{
+ struct pci_epc_group *epc_group;
+
+ if (!group)
+ return;
+
+ epc_group = container_of(group, struct pci_epc_group, group);
+ pci_epc_put(epc_group->epc);
+ configfs_unregister_group(&epc_group->group);
+ kfree(epc_group);
+}
+EXPORT_SYMBOL(pci_ep_cfs_remove_epc_group);
+
+#define PCI_EPF_HEADER_R(_name) \
+static ssize_t pci_epf_##_name##_show(struct config_item *item, char *page) \
+{ \
+ struct pci_epf *epf = to_pci_epf_group(item)->epf; \
+ if (WARN_ON_ONCE(!epf->header)) \
+ return -EINVAL; \
+ return sprintf(page, "0x%04x\n", epf->header->_name); \
+}
+
+#define PCI_EPF_HEADER_W_u32(_name) \
+static ssize_t pci_epf_##_name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ u32 val; \
+ int ret; \
+ struct pci_epf *epf = to_pci_epf_group(item)->epf; \
+ if (WARN_ON_ONCE(!epf->header)) \
+ return -EINVAL; \
+ ret = kstrtou32(page, 0, &val); \
+ if (ret) \
+ return ret; \
+ epf->header->_name = val; \
+ return len; \
+}
+
+#define PCI_EPF_HEADER_W_u16(_name) \
+static ssize_t pci_epf_##_name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ u16 val; \
+ int ret; \
+ struct pci_epf *epf = to_pci_epf_group(item)->epf; \
+ if (WARN_ON_ONCE(!epf->header)) \
+ return -EINVAL; \
+ ret = kstrtou16(page, 0, &val); \
+ if (ret) \
+ return ret; \
+ epf->header->_name = val; \
+ return len; \
+}
+
+#define PCI_EPF_HEADER_W_u8(_name) \
+static ssize_t pci_epf_##_name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ u8 val; \
+ int ret; \
+ struct pci_epf *epf = to_pci_epf_group(item)->epf; \
+ if (WARN_ON_ONCE(!epf->header)) \
+ return -EINVAL; \
+ ret = kstrtou8(page, 0, &val); \
+ if (ret) \
+ return ret; \
+ epf->header->_name = val; \
+ return len; \
+}
+
+static ssize_t pci_epf_msi_interrupts_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ u8 val;
+ int ret;
+
+ ret = kstrtou8(page, 0, &val);
+ if (ret)
+ return ret;
+
+ to_pci_epf_group(item)->epf->msi_interrupts = val;
+
+ return len;
+}
+
+static ssize_t pci_epf_msi_interrupts_show(struct config_item *item,
+ char *page)
+{
+ return sprintf(page, "%d\n",
+ to_pci_epf_group(item)->epf->msi_interrupts);
+}
+
+PCI_EPF_HEADER_R(vendorid)
+PCI_EPF_HEADER_W_u16(vendorid)
+
+PCI_EPF_HEADER_R(deviceid)
+PCI_EPF_HEADER_W_u16(deviceid)
+
+PCI_EPF_HEADER_R(revid)
+PCI_EPF_HEADER_W_u8(revid)
+
+PCI_EPF_HEADER_R(progif_code)
+PCI_EPF_HEADER_W_u8(progif_code)
+
+PCI_EPF_HEADER_R(subclass_code)
+PCI_EPF_HEADER_W_u8(subclass_code)
+
+PCI_EPF_HEADER_R(baseclass_code)
+PCI_EPF_HEADER_W_u8(baseclass_code)
+
+PCI_EPF_HEADER_R(cache_line_size)
+PCI_EPF_HEADER_W_u8(cache_line_size)
+
+PCI_EPF_HEADER_R(subsys_vendor_id)
+PCI_EPF_HEADER_W_u16(subsys_vendor_id)
+
+PCI_EPF_HEADER_R(subsys_id)
+PCI_EPF_HEADER_W_u16(subsys_id)
+
+PCI_EPF_HEADER_R(interrupt_pin)
+PCI_EPF_HEADER_W_u8(interrupt_pin)
+
+CONFIGFS_ATTR(pci_epf_, vendorid);
+CONFIGFS_ATTR(pci_epf_, deviceid);
+CONFIGFS_ATTR(pci_epf_, revid);
+CONFIGFS_ATTR(pci_epf_, progif_code);
+CONFIGFS_ATTR(pci_epf_, subclass_code);
+CONFIGFS_ATTR(pci_epf_, baseclass_code);
+CONFIGFS_ATTR(pci_epf_, cache_line_size);
+CONFIGFS_ATTR(pci_epf_, subsys_vendor_id);
+CONFIGFS_ATTR(pci_epf_, subsys_id);
+CONFIGFS_ATTR(pci_epf_, interrupt_pin);
+CONFIGFS_ATTR(pci_epf_, msi_interrupts);
+
+static struct configfs_attribute *pci_epf_attrs[] = {
+ &pci_epf_attr_vendorid,
+ &pci_epf_attr_deviceid,
+ &pci_epf_attr_revid,
+ &pci_epf_attr_progif_code,
+ &pci_epf_attr_subclass_code,
+ &pci_epf_attr_baseclass_code,
+ &pci_epf_attr_cache_line_size,
+ &pci_epf_attr_subsys_vendor_id,
+ &pci_epf_attr_subsys_id,
+ &pci_epf_attr_interrupt_pin,
+ &pci_epf_attr_msi_interrupts,
+ NULL,
+};
+
+static void pci_epf_release(struct config_item *item)
+{
+ struct pci_epf_group *epf_group = to_pci_epf_group(item);
+
+ pci_epf_destroy(epf_group->epf);
+ kfree(epf_group);
+}
+
+static struct configfs_item_operations pci_epf_ops = {
+ .release = pci_epf_release,
+};
+
+static struct config_item_type pci_epf_type = {
+ .ct_item_ops = &pci_epf_ops,
+ .ct_attrs = pci_epf_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *pci_epf_make(struct config_group *group,
+ const char *name)
+{
+ struct pci_epf_group *epf_group;
+ struct pci_epf *epf;
+
+ epf_group = kzalloc(sizeof(*epf_group), GFP_KERNEL);
+ if (!epf_group)
+ return ERR_PTR(-ENOMEM);
+
+ config_group_init_type_name(&epf_group->group, name, &pci_epf_type);
+
+ epf = pci_epf_create(group->cg_item.ci_name);
+ if (IS_ERR(epf)) {
+ pr_err("failed to create endpoint function device\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ epf_group->epf = epf;
+
+ return &epf_group->group;
+}
+
+static void pci_epf_drop(struct config_group *group, struct config_item *item)
+{
+ config_item_put(item);
+}
+
+static struct configfs_group_operations pci_epf_group_ops = {
+ .make_group = &pci_epf_make,
+ .drop_item = &pci_epf_drop,
+};
+
+static struct config_item_type pci_epf_group_type = {
+ .ct_group_ops = &pci_epf_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+struct config_group *pci_ep_cfs_add_epf_group(const char *name)
+{
+ struct config_group *group;
+
+ group = configfs_register_default_group(functions_group, name,
+ &pci_epf_group_type);
+ if (IS_ERR(group))
+ pr_err("failed to register configfs group for %s function\n",
+ name);
+
+ return group;
+}
+EXPORT_SYMBOL(pci_ep_cfs_add_epf_group);
+
+void pci_ep_cfs_remove_epf_group(struct config_group *group)
+{
+ if (IS_ERR_OR_NULL(group))
+ return;
+
+ configfs_unregister_default_group(group);
+}
+EXPORT_SYMBOL(pci_ep_cfs_remove_epf_group);
+
+static struct config_item_type pci_functions_type = {
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_item_type pci_controllers_type = {
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_item_type pci_ep_type = {
+ .ct_owner = THIS_MODULE,
+};
+
+static struct configfs_subsystem pci_ep_cfs_subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "pci_ep",
+ .ci_type = &pci_ep_type,
+ },
+ },
+ .su_mutex = __MUTEX_INITIALIZER(pci_ep_cfs_subsys.su_mutex),
+};
+
+static int __init pci_ep_cfs_init(void)
+{
+ int ret;
+ struct config_group *root = &pci_ep_cfs_subsys.su_group;
+
+ config_group_init(root);
+
+ ret = configfs_register_subsystem(&pci_ep_cfs_subsys);
+ if (ret) {
+ pr_err("Error %d while registering subsystem %s\n",
+ ret, root->cg_item.ci_namebuf);
+ goto err;
+ }
+
+ functions_group = configfs_register_default_group(root, "functions",
+ &pci_functions_type);
+ if (IS_ERR(functions_group)) {
+ ret = PTR_ERR(functions_group);
+ pr_err("Error %d while registering functions group\n",
+ ret);
+ goto err_functions_group;
+ }
+
+ controllers_group =
+ configfs_register_default_group(root, "controllers",
+ &pci_controllers_type);
+ if (IS_ERR(controllers_group)) {
+ ret = PTR_ERR(controllers_group);
+ pr_err("Error %d while registering controllers group\n",
+ ret);
+ goto err_controllers_group;
+ }
+
+ return 0;
+
+err_controllers_group:
+ configfs_unregister_default_group(functions_group);
+
+err_functions_group:
+ configfs_unregister_subsystem(&pci_ep_cfs_subsys);
+
+err:
+ return ret;
+}
+module_init(pci_ep_cfs_init);
+
+static void __exit pci_ep_cfs_exit(void)
+{
+ configfs_unregister_default_group(controllers_group);
+ configfs_unregister_default_group(functions_group);
+ configfs_unregister_subsystem(&pci_ep_cfs_subsys);
+}
+module_exit(pci_ep_cfs_exit);
+
+MODULE_DESCRIPTION("PCI EP CONFIGFS");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
new file mode 100644
index 000000000000..caa7be10e473
--- /dev/null
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -0,0 +1,580 @@
+/**
+ * PCI Endpoint *Controller* (EPC) library
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+#include <linux/pci-ep-cfs.h>
+
+static struct class *pci_epc_class;
+
+static void devm_pci_epc_release(struct device *dev, void *res)
+{
+ struct pci_epc *epc = *(struct pci_epc **)res;
+
+ pci_epc_destroy(epc);
+}
+
+static int devm_pci_epc_match(struct device *dev, void *res, void *match_data)
+{
+ struct pci_epc **epc = res;
+
+ return *epc == match_data;
+}
+
+/**
+ * pci_epc_put() - release the PCI endpoint controller
+ * @epc: epc returned by pci_epc_get()
+ *
+ * release the refcount the caller obtained by invoking pci_epc_get()
+ */
+void pci_epc_put(struct pci_epc *epc)
+{
+ if (!epc || IS_ERR(epc))
+ return;
+
+ module_put(epc->ops->owner);
+ put_device(&epc->dev);
+}
+EXPORT_SYMBOL_GPL(pci_epc_put);
+
+/**
+ * pci_epc_get() - get the PCI endpoint controller
+ * @epc_name: device name of the endpoint controller
+ *
+ * Invoke to get struct pci_epc * corresponding to the device name of the
+ * endpoint controller
+ */
+struct pci_epc *pci_epc_get(const char *epc_name)
+{
+ int ret = -EINVAL;
+ struct pci_epc *epc;
+ struct device *dev;
+ struct class_dev_iter iter;
+
+ class_dev_iter_init(&iter, pci_epc_class, NULL, NULL);
+ while ((dev = class_dev_iter_next(&iter))) {
+ if (strcmp(epc_name, dev_name(dev)))
+ continue;
+
+ epc = to_pci_epc(dev);
+ if (!try_module_get(epc->ops->owner)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ class_dev_iter_exit(&iter);
+ get_device(&epc->dev);
+ return epc;
+ }
+
+err:
+ class_dev_iter_exit(&iter);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(pci_epc_get);
+
+/**
+ * pci_epc_stop() - stop the PCI link
+ * @epc: the link of the EPC device that has to be stopped
+ *
+ * Invoke to stop the PCI link
+ */
+void pci_epc_stop(struct pci_epc *epc)
+{
+ unsigned long flags;
+
+ if (IS_ERR(epc) || !epc->ops->stop)
+ return;
+
+ spin_lock_irqsave(&epc->lock, flags);
+ epc->ops->stop(epc);
+ spin_unlock_irqrestore(&epc->lock, flags);
+}
+EXPORT_SYMBOL_GPL(pci_epc_stop);
+
+/**
+ * pci_epc_start() - start the PCI link
+ * @epc: the link of *this* EPC device has to be started
+ *
+ * Invoke to start the PCI link
+ */
+int pci_epc_start(struct pci_epc *epc)
+{
+ int ret;
+ unsigned long flags;
+
+ if (IS_ERR(epc))
+ return -EINVAL;
+
+ if (!epc->ops->start)
+ return 0;
+
+ spin_lock_irqsave(&epc->lock, flags);
+ ret = epc->ops->start(epc);
+ spin_unlock_irqrestore(&epc->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_start);
+
+/**
+ * pci_epc_raise_irq() - interrupt the host system
+ * @epc: the EPC device which has to interrupt the host
+ * @type: specify the type of interrupt; legacy or MSI
+ * @interrupt_num: the MSI interrupt number
+ *
+ * Invoke to raise an MSI or legacy interrupt
+ */
+int pci_epc_raise_irq(struct pci_epc *epc, enum pci_epc_irq_type type,
+ u8 interrupt_num)
+{
+ int ret;
+ unsigned long flags;
+
+ if (IS_ERR(epc))
+ return -EINVAL;
+
+ if (!epc->ops->raise_irq)
+ return 0;
+
+ spin_lock_irqsave(&epc->lock, flags);
+ ret = epc->ops->raise_irq(epc, type, interrupt_num);
+ spin_unlock_irqrestore(&epc->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
+
+/**
+ * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
+ * @epc: the EPC device to which MSI interrupts was requested
+ *
+ * Invoke to get the number of MSI interrupts allocated by the RC
+ */
+int pci_epc_get_msi(struct pci_epc *epc)
+{
+ int interrupt;
+ unsigned long flags;
+
+ if (IS_ERR(epc))
+ return 0;
+
+ if (!epc->ops->get_msi)
+ return 0;
+
+ spin_lock_irqsave(&epc->lock, flags);
+ interrupt = epc->ops->get_msi(epc);
+ spin_unlock_irqrestore(&epc->lock, flags);
+
+ if (interrupt < 0)
+ return 0;
+
+ interrupt = 1 << interrupt;
+
+ return interrupt;
+}
+EXPORT_SYMBOL_GPL(pci_epc_get_msi);
+
+/**
+ * pci_epc_set_msi() - set the number of MSI interrupt numbers required
+ * @epc: the EPC device on which MSI has to be configured
+ * @interrupts: number of MSI interrupts required by the EPF
+ *
+ * Invoke to set the required number of MSI interrupts.
+ */
+int pci_epc_set_msi(struct pci_epc *epc, u8 interrupts)
+{
+ int ret;
+ u8 encode_int;
+ unsigned long flags;
+
+ if (IS_ERR(epc))
+ return -EINVAL;
+
+ if (!epc->ops->set_msi)
+ return 0;
+
+ encode_int = order_base_2(interrupts);
+
+ spin_lock_irqsave(&epc->lock, flags);
+ ret = epc->ops->set_msi(epc, encode_int);
+ spin_unlock_irqrestore(&epc->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_set_msi);
+
+/**
+ * pci_epc_unmap_addr() - unmap CPU address from PCI address
+ * @epc: the EPC device on which address is allocated
+ * @phys_addr: physical address of the local system
+ *
+ * Invoke to unmap the CPU address from PCI address.
+ */
+void pci_epc_unmap_addr(struct pci_epc *epc, phys_addr_t phys_addr)
+{
+ unsigned long flags;
+
+ if (IS_ERR(epc))
+ return;
+
+ if (!epc->ops->unmap_addr)
+ return;
+
+ spin_lock_irqsave(&epc->lock, flags);
+ epc->ops->unmap_addr(epc, phys_addr);
+ spin_unlock_irqrestore(&epc->lock, flags);
+}
+EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
+
+/**
+ * pci_epc_map_addr() - map CPU address to PCI address
+ * @epc: the EPC device on which address is allocated
+ * @phys_addr: physical address of the local system
+ * @pci_addr: PCI address to which the physical address should be mapped
+ * @size: the size of the allocation
+ *
+ * Invoke to map CPU address with PCI address.
+ */
+int pci_epc_map_addr(struct pci_epc *epc, phys_addr_t phys_addr,
+ u64 pci_addr, size_t size)
+{
+ int ret;
+ unsigned long flags;
+
+ if (IS_ERR(epc))
+ return -EINVAL;
+
+ if (!epc->ops->map_addr)
+ return 0;
+
+ spin_lock_irqsave(&epc->lock, flags);
+ ret = epc->ops->map_addr(epc, phys_addr, pci_addr, size);
+ spin_unlock_irqrestore(&epc->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_map_addr);
+
+/**
+ * pci_epc_clear_bar() - reset the BAR
+ * @epc: the EPC device for which the BAR has to be cleared
+ * @bar: the BAR number that has to be reset
+ *
+ * Invoke to reset the BAR of the endpoint device.
+ */
+void pci_epc_clear_bar(struct pci_epc *epc, int bar)
+{
+ unsigned long flags;
+
+ if (IS_ERR(epc))
+ return;
+
+ if (!epc->ops->clear_bar)
+ return;
+
+ spin_lock_irqsave(&epc->lock, flags);
+ epc->ops->clear_bar(epc, bar);
+ spin_unlock_irqrestore(&epc->lock, flags);
+}
+EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
+
+/**
+ * pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space
+ * @epc: the EPC device on which BAR has to be configured
+ * @bar: the BAR number that has to be configured
+ * @size: the size of the addr space
+ * @flags: specify memory allocation/io allocation/32bit address/64 bit address
+ *
+ * Invoke to configure the BAR of the endpoint device.
+ */
+int pci_epc_set_bar(struct pci_epc *epc, enum pci_barno bar,
+ dma_addr_t bar_phys, size_t size, int flags)
+{
+ int ret;
+ unsigned long irq_flags;
+
+ if (IS_ERR(epc))
+ return -EINVAL;
+
+ if (!epc->ops->set_bar)
+ return 0;
+
+ spin_lock_irqsave(&epc->lock, irq_flags);
+ ret = epc->ops->set_bar(epc, bar, bar_phys, size, flags);
+ spin_unlock_irqrestore(&epc->lock, irq_flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_set_bar);
+
+/**
+ * pci_epc_write_header() - write standard configuration header
+ * @epc: the EPC device to which the configuration header should be written
+ * @header: standard configuration header fields
+ *
+ * Invoke to write the configuration header to the endpoint controller. Every
+ * endpoint controller will have a dedicated location to which the standard
+ * configuration header would be written. The callback function should write
+ * the header fields to this dedicated location.
+ */
+int pci_epc_write_header(struct pci_epc *epc, struct pci_epf_header *header)
+{
+ int ret;
+ unsigned long flags;
+
+ if (IS_ERR(epc))
+ return -EINVAL;
+
+ if (!epc->ops->write_header)
+ return 0;
+
+ spin_lock_irqsave(&epc->lock, flags);
+ ret = epc->ops->write_header(epc, header);
+ spin_unlock_irqrestore(&epc->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_write_header);
+
+/**
+ * pci_epc_add_epf() - bind PCI endpoint function to an endpoint controller
+ * @epc: the EPC device to which the endpoint function should be added
+ * @epf: the endpoint function to be added
+ *
+ * A PCI endpoint device can have one or more functions. In the case of PCIe,
+ * the specification allows up to 8 PCIe endpoint functions. Invoke
+ * pci_epc_add_epf() to add a PCI endpoint function to an endpoint controller.
+ */
+int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf)
+{
+ unsigned long flags;
+
+ if (epf->epc)
+ return -EBUSY;
+
+ if (IS_ERR(epc))
+ return -EINVAL;
+
+ if (epf->func_no > epc->max_functions - 1)
+ return -EINVAL;
+
+ epf->epc = epc;
+ dma_set_coherent_mask(&epf->dev, epc->dev.coherent_dma_mask);
+ epf->dev.dma_mask = epc->dev.dma_mask;
+
+ spin_lock_irqsave(&epc->lock, flags);
+ list_add_tail(&epf->list, &epc->pci_epf);
+ spin_unlock_irqrestore(&epc->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_epc_add_epf);
+
+/**
+ * pci_epc_remove_epf() - remove PCI endpoint function from endpoint controller
+ * @epc: the EPC device from which the endpoint function should be removed
+ * @epf: the endpoint function to be removed
+ *
+ * Invoke to remove PCI endpoint function from the endpoint controller.
+ */
+void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf)
+{
+ unsigned long flags;
+
+ if (!epc || IS_ERR(epc))
+ return;
+
+ spin_lock_irqsave(&epc->lock, flags);
+ list_del(&epf->list);
+ spin_unlock_irqrestore(&epc->lock, flags);
+}
+EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
+
+/**
+ * pci_epc_linkup() - Notify the EPF device that EPC device has established a
+ * connection with the Root Complex.
+ * @epc: the EPC device which has established link with the host
+ *
+ * Invoke to Notify the EPF device that the EPC device has established a
+ * connection with the Root Complex.
+ */
+void pci_epc_linkup(struct pci_epc *epc)
+{
+ unsigned long flags;
+ struct pci_epf *epf;
+
+ if (!epc || IS_ERR(epc))
+ return;
+
+ spin_lock_irqsave(&epc->lock, flags);
+ list_for_each_entry(epf, &epc->pci_epf, list)
+ pci_epf_linkup(epf);
+ spin_unlock_irqrestore(&epc->lock, flags);
+}
+EXPORT_SYMBOL_GPL(pci_epc_linkup);
+
+/**
+ * pci_epc_destroy() - destroy the EPC device
+ * @epc: the EPC device that has to be destroyed
+ *
+ * Invoke to destroy the PCI EPC device
+ */
+void pci_epc_destroy(struct pci_epc *epc)
+{
+ pci_ep_cfs_remove_epc_group(epc->group);
+ device_unregister(&epc->dev);
+ kfree(epc);
+}
+EXPORT_SYMBOL_GPL(pci_epc_destroy);
+
+/**
+ * devm_pci_epc_destroy() - destroy the EPC device
+ * @dev: device that wants to destroy the EPC
+ * @epc: the EPC device that has to be destroyed
+ *
+ * Invoke to destroy the devres associated with this
+ * pci_epc and destroy the EPC device.
+ */
+void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc)
+{
+ int r;
+
+ r = devres_destroy(dev, devm_pci_epc_release, devm_pci_epc_match,
+ epc);
+ dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n");
+}
+EXPORT_SYMBOL_GPL(devm_pci_epc_destroy);
+
+/**
+ * __pci_epc_create() - create a new endpoint controller (EPC) device
+ * @dev: device that is creating the new EPC
+ * @ops: function pointers for performing EPC operations
+ * @owner: the owner of the module that creates the EPC device
+ *
+ * Invoke to create a new EPC device and add it to pci_epc class.
+ */
+struct pci_epc *
+__pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
+ struct module *owner)
+{
+ int ret;
+ struct pci_epc *epc;
+
+ if (WARN_ON(!dev)) {
+ ret = -EINVAL;
+ goto err_ret;
+ }
+
+ epc = kzalloc(sizeof(*epc), GFP_KERNEL);
+ if (!epc) {
+ ret = -ENOMEM;
+ goto err_ret;
+ }
+
+ spin_lock_init(&epc->lock);
+ INIT_LIST_HEAD(&epc->pci_epf);
+
+ device_initialize(&epc->dev);
+ dma_set_coherent_mask(&epc->dev, dev->coherent_dma_mask);
+ epc->dev.class = pci_epc_class;
+ epc->dev.dma_mask = dev->dma_mask;
+ epc->ops = ops;
+
+ ret = dev_set_name(&epc->dev, "%s", dev_name(dev));
+ if (ret)
+ goto put_dev;
+
+ ret = device_add(&epc->dev);
+ if (ret)
+ goto put_dev;
+
+ epc->group = pci_ep_cfs_add_epc_group(dev_name(dev));
+
+ return epc;
+
+put_dev:
+ put_device(&epc->dev);
+ kfree(epc);
+
+err_ret:
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(__pci_epc_create);
+
+/**
+ * __devm_pci_epc_create() - create a new endpoint controller (EPC) device
+ * @dev: device that is creating the new EPC
+ * @ops: function pointers for performing EPC operations
+ * @owner: the owner of the module that creates the EPC device
+ *
+ * Invoke to create a new EPC device and add it to pci_epc class.
+ * While at that, it also associates the device with the pci_epc using devres.
+ * On driver detach, release function is invoked on the devres data,
+ * then, devres data is freed.
+ */
+struct pci_epc *
+__devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
+ struct module *owner)
+{
+ struct pci_epc **ptr, *epc;
+
+ ptr = devres_alloc(devm_pci_epc_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ epc = __pci_epc_create(dev, ops, owner);
+ if (!IS_ERR(epc)) {
+ *ptr = epc;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return epc;
+}
+EXPORT_SYMBOL_GPL(__devm_pci_epc_create);
+
+static int __init pci_epc_init(void)
+{
+ pci_epc_class = class_create(THIS_MODULE, "pci_epc");
+ if (IS_ERR(pci_epc_class)) {
+ pr_err("failed to create pci epc class --> %ld\n",
+ PTR_ERR(pci_epc_class));
+ return PTR_ERR(pci_epc_class);
+ }
+
+ return 0;
+}
+module_init(pci_epc_init);
+
+static void __exit pci_epc_exit(void)
+{
+ class_destroy(pci_epc_class);
+}
+module_exit(pci_epc_exit);
+
+MODULE_DESCRIPTION("PCI EPC Library");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/endpoint/pci-epc-mem.c b/drivers/pci/endpoint/pci-epc-mem.c
new file mode 100644
index 000000000000..3a94cc1caf22
--- /dev/null
+++ b/drivers/pci/endpoint/pci-epc-mem.c
@@ -0,0 +1,143 @@
+/**
+ * PCI Endpoint *Controller* Address Space Management
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <linux/pci-epc.h>
+
+/**
+ * pci_epc_mem_init() - initialize the pci_epc_mem structure
+ * @epc: the EPC device that invoked pci_epc_mem_init
+ * @phys_base: the physical address of the base
+ * @size: the size of the address space
+ *
+ * Invoke to initialize the pci_epc_mem structure used by the
+ * endpoint functions to allocate mapped PCI address.
+ */
+int pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_base, size_t size)
+{
+ int ret;
+ struct pci_epc_mem *mem;
+ unsigned long *bitmap;
+ int pages = size >> PAGE_SHIFT;
+ int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
+
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ if (!mem) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!bitmap) {
+ ret = -ENOMEM;
+ goto err_mem;
+ }
+
+ mem->bitmap = bitmap;
+ mem->phys_base = phys_base;
+ mem->pages = pages;
+ mem->size = size;
+
+ epc->mem = mem;
+
+ return 0;
+
+err_mem:
+ kfree(mem);
+
+err:
+return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_mem_init);
+
+/**
+ * pci_epc_mem_exit() - cleanup the pci_epc_mem structure
+ * @epc: the EPC device that invoked pci_epc_mem_exit
+ *
+ * Invoke to cleanup the pci_epc_mem structure allocated in
+ * pci_epc_mem_init().
+ */
+void pci_epc_mem_exit(struct pci_epc *epc)
+{
+ struct pci_epc_mem *mem = epc->mem;
+
+ epc->mem = NULL;
+ kfree(mem->bitmap);
+ kfree(mem);
+}
+EXPORT_SYMBOL_GPL(pci_epc_mem_exit);
+
+/**
+ * pci_epc_mem_alloc_addr() - allocate memory address from EPC addr space
+ * @epc: the EPC device on which memory has to be allocated
+ * @phys_addr: populate the allocated physical address here
+ * @size: the size of the address space that has to be allocated
+ *
+ * Invoke to allocate memory address from the EPC address space. This
+ * is usually done to map the remote RC address into the local system.
+ */
+void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
+ phys_addr_t *phys_addr, size_t size)
+{
+ int pageno;
+ void __iomem *virt_addr;
+ struct pci_epc_mem *mem = epc->mem;
+ int order = get_order(size);
+
+ pageno = bitmap_find_free_region(mem->bitmap, mem->pages, order);
+ if (pageno < 0)
+ return NULL;
+
+ *phys_addr = mem->phys_base + (pageno << PAGE_SHIFT);
+ virt_addr = ioremap(*phys_addr, size);
+ if (!virt_addr)
+ bitmap_release_region(mem->bitmap, pageno, order);
+
+ return virt_addr;
+}
+EXPORT_SYMBOL_GPL(pci_epc_mem_alloc_addr);
+
+/**
+ * pci_epc_mem_free_addr() - free the allocated memory address
+ * @epc: the EPC device on which memory was allocated
+ * @phys_addr: the allocated physical address
+ * @virt_addr: virtual address of the allocated mem space
+ * @size: the size of the allocated address space
+ *
+ * Invoke to free the memory allocated using pci_epc_mem_alloc_addr.
+ */
+void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr,
+ void __iomem *virt_addr, size_t size)
+{
+ int pageno;
+ int order = get_order(size);
+ struct pci_epc_mem *mem = epc->mem;
+
+ iounmap(virt_addr);
+ pageno = (phys_addr - mem->phys_base) >> PAGE_SHIFT;
+ bitmap_release_region(mem->bitmap, pageno, order);
+}
+EXPORT_SYMBOL_GPL(pci_epc_mem_free_addr);
+
+MODULE_DESCRIPTION("PCI EPC Address Space Management");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
new file mode 100644
index 000000000000..6877d6a5bcc9
--- /dev/null
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -0,0 +1,359 @@
+/**
+ * PCI Endpoint *Function* (EPF) library
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+#include <linux/pci-ep-cfs.h>
+
+static struct bus_type pci_epf_bus_type;
+static struct device_type pci_epf_type;
+
+/**
+ * pci_epf_linkup() - Notify the function driver that EPC device has
+ * established a connection with the Root Complex.
+ * @epf: the EPF device bound to the EPC device which has established
+ * the connection with the host
+ *
+ * Invoke to notify the function driver that EPC device has established
+ * a connection with the Root Complex.
+ */
+void pci_epf_linkup(struct pci_epf *epf)
+{
+ if (!epf->driver) {
+ dev_WARN(&epf->dev, "epf device not bound to driver\n");
+ return;
+ }
+
+ epf->driver->ops->linkup(epf);
+}
+EXPORT_SYMBOL_GPL(pci_epf_linkup);
+
+/**
+ * pci_epf_unbind() - Notify the function driver that the binding between the
+ * EPF device and EPC device has been lost
+ * @epf: the EPF device which has lost the binding with the EPC device
+ *
+ * Invoke to notify the function driver that the binding between the EPF device
+ * and EPC device has been lost.
+ */
+void pci_epf_unbind(struct pci_epf *epf)
+{
+ if (!epf->driver) {
+ dev_WARN(&epf->dev, "epf device not bound to driver\n");
+ return;
+ }
+
+ epf->driver->ops->unbind(epf);
+ module_put(epf->driver->owner);
+}
+EXPORT_SYMBOL_GPL(pci_epf_unbind);
+
+/**
+ * pci_epf_bind() - Notify the function driver that the EPF device has been
+ * bound to a EPC device
+ * @epf: the EPF device which has been bound to the EPC device
+ *
+ * Invoke to notify the function driver that it has been bound to a EPC device
+ */
+int pci_epf_bind(struct pci_epf *epf)
+{
+ if (!epf->driver) {
+ dev_WARN(&epf->dev, "epf device not bound to driver\n");
+ return -EINVAL;
+ }
+
+ if (!try_module_get(epf->driver->owner))
+ return -EAGAIN;
+
+ return epf->driver->ops->bind(epf);
+}
+EXPORT_SYMBOL_GPL(pci_epf_bind);
+
+/**
+ * pci_epf_free_space() - free the allocated PCI EPF register space
+ * @addr: the virtual address of the PCI EPF register space
+ * @bar: the BAR number corresponding to the register space
+ *
+ * Invoke to free the allocated PCI EPF register space.
+ */
+void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar)
+{
+ struct device *dev = &epf->dev;
+
+ if (!addr)
+ return;
+
+ dma_free_coherent(dev, epf->bar[bar].size, addr,
+ epf->bar[bar].phys_addr);
+
+ epf->bar[bar].phys_addr = 0;
+ epf->bar[bar].size = 0;
+}
+EXPORT_SYMBOL_GPL(pci_epf_free_space);
+
+/**
+ * pci_epf_alloc_space() - allocate memory for the PCI EPF register space
+ * @size: the size of the memory that has to be allocated
+ * @bar: the BAR number corresponding to the allocated register space
+ *
+ * Invoke to allocate memory for the PCI EPF register space.
+ */
+void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar)
+{
+ void *space;
+ struct device *dev = &epf->dev;
+ dma_addr_t phys_addr;
+
+ if (size < 128)
+ size = 128;
+ size = roundup_pow_of_two(size);
+
+ space = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL);
+ if (!space) {
+ dev_err(dev, "failed to allocate mem space\n");
+ return NULL;
+ }
+
+ epf->bar[bar].phys_addr = phys_addr;
+ epf->bar[bar].size = size;
+
+ return space;
+}
+EXPORT_SYMBOL_GPL(pci_epf_alloc_space);
+
+/**
+ * pci_epf_unregister_driver() - unregister the PCI EPF driver
+ * @driver: the PCI EPF driver that has to be unregistered
+ *
+ * Invoke to unregister the PCI EPF driver.
+ */
+void pci_epf_unregister_driver(struct pci_epf_driver *driver)
+{
+ pci_ep_cfs_remove_epf_group(driver->group);
+ driver_unregister(&driver->driver);
+}
+EXPORT_SYMBOL_GPL(pci_epf_unregister_driver);
+
+/**
+ * __pci_epf_register_driver() - register a new PCI EPF driver
+ * @driver: structure representing PCI EPF driver
+ * @owner: the owner of the module that registers the PCI EPF driver
+ *
+ * Invoke to register a new PCI EPF driver.
+ */
+int __pci_epf_register_driver(struct pci_epf_driver *driver,
+ struct module *owner)
+{
+ int ret;
+
+ if (!driver->ops)
+ return -EINVAL;
+
+ if (!driver->ops->bind || !driver->ops->unbind || !driver->ops->linkup)
+ return -EINVAL;
+
+ driver->driver.bus = &pci_epf_bus_type;
+ driver->driver.owner = owner;
+
+ ret = driver_register(&driver->driver);
+ if (ret)
+ return ret;
+
+ driver->group = pci_ep_cfs_add_epf_group(driver->driver.name);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__pci_epf_register_driver);
+
+/**
+ * pci_epf_destroy() - destroy the created PCI EPF device
+ * @epf: the PCI EPF device that has to be destroyed.
+ *
+ * Invoke to destroy the PCI EPF device created by invoking pci_epf_create().
+ */
+void pci_epf_destroy(struct pci_epf *epf)
+{
+ device_unregister(&epf->dev);
+}
+EXPORT_SYMBOL_GPL(pci_epf_destroy);
+
+/**
+ * pci_epf_create() - create a new PCI EPF device
+ * @name: the name of the PCI EPF device. This name will be used to bind the
+ * the EPF device to a EPF driver
+ *
+ * Invoke to create a new PCI EPF device by providing the name of the function
+ * device.
+ */
+struct pci_epf *pci_epf_create(const char *name)
+{
+ int ret;
+ struct pci_epf *epf;
+ struct device *dev;
+ char *func_name;
+ char *buf;
+
+ epf = kzalloc(sizeof(*epf), GFP_KERNEL);
+ if (!epf) {
+ ret = -ENOMEM;
+ goto err_ret;
+ }
+
+ buf = kstrdup(name, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto free_epf;
+ }
+
+ func_name = buf;
+ buf = strchrnul(buf, '.');
+ *buf = '\0';
+
+ epf->name = kstrdup(func_name, GFP_KERNEL);
+ if (!epf->name) {
+ ret = -ENOMEM;
+ goto free_func_name;
+ }
+
+ dev = &epf->dev;
+ device_initialize(dev);
+ dev->bus = &pci_epf_bus_type;
+ dev->type = &pci_epf_type;
+
+ ret = dev_set_name(dev, "%s", name);
+ if (ret)
+ goto put_dev;
+
+ ret = device_add(dev);
+ if (ret)
+ goto put_dev;
+
+ kfree(func_name);
+ return epf;
+
+put_dev:
+ put_device(dev);
+ kfree(epf->name);
+
+free_func_name:
+ kfree(func_name);
+
+free_epf:
+ kfree(epf);
+
+err_ret:
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(pci_epf_create);
+
+static void pci_epf_dev_release(struct device *dev)
+{
+ struct pci_epf *epf = to_pci_epf(dev);
+
+ kfree(epf->name);
+ kfree(epf);
+}
+
+static struct device_type pci_epf_type = {
+ .release = pci_epf_dev_release,
+};
+
+static int
+pci_epf_match_id(const struct pci_epf_device_id *id, const struct pci_epf *epf)
+{
+ while (id->name[0]) {
+ if (strcmp(epf->name, id->name) == 0)
+ return true;
+ id++;
+ }
+
+ return false;
+}
+
+static int pci_epf_device_match(struct device *dev, struct device_driver *drv)
+{
+ struct pci_epf *epf = to_pci_epf(dev);
+ struct pci_epf_driver *driver = to_pci_epf_driver(drv);
+
+ if (driver->id_table)
+ return pci_epf_match_id(driver->id_table, epf);
+
+ return !strcmp(epf->name, drv->name);
+}
+
+static int pci_epf_device_probe(struct device *dev)
+{
+ struct pci_epf *epf = to_pci_epf(dev);
+ struct pci_epf_driver *driver = to_pci_epf_driver(dev->driver);
+
+ if (!driver->probe)
+ return -ENODEV;
+
+ epf->driver = driver;
+
+ return driver->probe(epf);
+}
+
+static int pci_epf_device_remove(struct device *dev)
+{
+ int ret;
+ struct pci_epf *epf = to_pci_epf(dev);
+ struct pci_epf_driver *driver = to_pci_epf_driver(dev->driver);
+
+ ret = driver->remove(epf);
+ epf->driver = NULL;
+
+ return ret;
+}
+
+static struct bus_type pci_epf_bus_type = {
+ .name = "pci-epf",
+ .match = pci_epf_device_match,
+ .probe = pci_epf_device_probe,
+ .remove = pci_epf_device_remove,
+};
+
+static int __init pci_epf_init(void)
+{
+ int ret;
+
+ ret = bus_register(&pci_epf_bus_type);
+ if (ret) {
+ pr_err("failed to register pci epf bus --> %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(pci_epf_init);
+
+static void __exit pci_epf_exit(void)
+{
+ bus_unregister(&pci_epf_bus_type);
+}
+module_exit(pci_epf_exit);
+
+MODULE_DESCRIPTION("PCI EPF Library");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index f7c1d4d5c665..7f47cd5e10a5 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -27,6 +27,12 @@ config PCIE_XILINX_NWL
or End Point. The current option selection will only
support root port enabling.
+config PCI_FTPCI100
+ bool "Faraday Technology FTPCI100 PCI controller"
+ depends on OF
+ depends on ARM
+ default ARCH_GEMINI
+
config PCI_TEGRA
bool "NVIDIA Tegra PCIe controller"
depends on ARCH_TEGRA
@@ -95,6 +101,7 @@ config PCI_VERSATILE
config PCIE_IPROC
tristate
+ select PCI_DOMAINS
help
This enables the iProc PCIe core controller support for Broadcom's
iProc family of SoCs. An appropriate bus interface driver needs
@@ -115,7 +122,6 @@ config PCIE_IPROC_BCMA
depends on ARM && (ARCH_BCM_IPROC || COMPILE_TEST)
select PCIE_IPROC
select BCMA
- select PCI_DOMAINS
default ARCH_BCM_5301X
help
Say Y here if you want to use the Broadcom iProc PCIe controller
@@ -164,7 +170,7 @@ config PCI_HOST_THUNDER_ECAM
Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs.
config PCIE_ROCKCHIP
- bool "Rockchip PCIe controller"
+ tristate "Rockchip PCIe controller"
depends on ARCH_ROCKCHIP || COMPILE_TEST
depends on OF
depends on PCI_MSI_IRQ_DOMAIN
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 4d3686676cc3..cab879578003 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -1,3 +1,4 @@
+obj-$(CONFIG_PCI_FTPCI100) += pci-ftpci100.o
obj-$(CONFIG_PCI_HYPERV) += pci-hyperv.o
obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
obj-$(CONFIG_PCI_AARDVARK) += pci-aardvark.o
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
index 4fce494271cc..37d0bcd31f8a 100644
--- a/drivers/pci/host/pci-aardvark.c
+++ b/drivers/pci/host/pci-aardvark.c
@@ -200,10 +200,12 @@ struct advk_pcie {
struct list_head resources;
struct irq_domain *irq_domain;
struct irq_chip irq_chip;
- struct msi_controller msi;
struct irq_domain *msi_domain;
+ struct irq_domain *msi_inner_domain;
+ struct irq_chip msi_bottom_irq_chip;
struct irq_chip msi_irq_chip;
- DECLARE_BITMAP(msi_irq_in_use, MSI_IRQ_NUM);
+ struct msi_domain_info msi_domain_info;
+ DECLARE_BITMAP(msi_used, MSI_IRQ_NUM);
struct mutex msi_used_lock;
u16 msi_msg;
int root_bus_nr;
@@ -545,94 +547,64 @@ static struct pci_ops advk_pcie_ops = {
.write = advk_pcie_wr_conf,
};
-static int advk_pcie_alloc_msi(struct advk_pcie *pcie)
+static void advk_msi_irq_compose_msi_msg(struct irq_data *data,
+ struct msi_msg *msg)
{
- int hwirq;
+ struct advk_pcie *pcie = irq_data_get_irq_chip_data(data);
+ phys_addr_t msi_msg = virt_to_phys(&pcie->msi_msg);
- mutex_lock(&pcie->msi_used_lock);
- hwirq = find_first_zero_bit(pcie->msi_irq_in_use, MSI_IRQ_NUM);
- if (hwirq >= MSI_IRQ_NUM)
- hwirq = -ENOSPC;
- else
- set_bit(hwirq, pcie->msi_irq_in_use);
- mutex_unlock(&pcie->msi_used_lock);
-
- return hwirq;
+ msg->address_lo = lower_32_bits(msi_msg);
+ msg->address_hi = upper_32_bits(msi_msg);
+ msg->data = data->irq;
}
-static void advk_pcie_free_msi(struct advk_pcie *pcie, int hwirq)
+static int advk_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
{
- struct device *dev = &pcie->pdev->dev;
-
- mutex_lock(&pcie->msi_used_lock);
- if (!test_bit(hwirq, pcie->msi_irq_in_use))
- dev_err(dev, "trying to free unused MSI#%d\n", hwirq);
- else
- clear_bit(hwirq, pcie->msi_irq_in_use);
- mutex_unlock(&pcie->msi_used_lock);
+ return -EINVAL;
}
-static int advk_pcie_setup_msi_irq(struct msi_controller *chip,
- struct pci_dev *pdev,
- struct msi_desc *desc)
+static int advk_msi_irq_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs, void *args)
{
- struct advk_pcie *pcie = pdev->bus->sysdata;
- struct msi_msg msg;
- int virq, hwirq;
- phys_addr_t msi_msg_phys;
-
- /* We support MSI, but not MSI-X */
- if (desc->msi_attrib.is_msix)
- return -EINVAL;
-
- hwirq = advk_pcie_alloc_msi(pcie);
- if (hwirq < 0)
- return hwirq;
+ struct advk_pcie *pcie = domain->host_data;
+ int hwirq, i;
- virq = irq_create_mapping(pcie->msi_domain, hwirq);
- if (!virq) {
- advk_pcie_free_msi(pcie, hwirq);
- return -EINVAL;
+ mutex_lock(&pcie->msi_used_lock);
+ hwirq = bitmap_find_next_zero_area(pcie->msi_used, MSI_IRQ_NUM,
+ 0, nr_irqs, 0);
+ if (hwirq >= MSI_IRQ_NUM) {
+ mutex_unlock(&pcie->msi_used_lock);
+ return -ENOSPC;
}
- irq_set_msi_desc(virq, desc);
-
- msi_msg_phys = virt_to_phys(&pcie->msi_msg);
-
- msg.address_lo = lower_32_bits(msi_msg_phys);
- msg.address_hi = upper_32_bits(msi_msg_phys);
- msg.data = virq;
-
- pci_write_msi_msg(virq, &msg);
-
- return 0;
-}
+ bitmap_set(pcie->msi_used, hwirq, nr_irqs);
+ mutex_unlock(&pcie->msi_used_lock);
-static void advk_pcie_teardown_msi_irq(struct msi_controller *chip,
- unsigned int irq)
-{
- struct irq_data *d = irq_get_irq_data(irq);
- struct msi_desc *msi = irq_data_get_msi_desc(d);
- struct advk_pcie *pcie = msi_desc_to_pci_sysdata(msi);
- unsigned long hwirq = d->hwirq;
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_set_info(domain, virq + i, hwirq + i,
+ &pcie->msi_bottom_irq_chip,
+ domain->host_data, handle_simple_irq,
+ NULL, NULL);
- irq_dispose_mapping(irq);
- advk_pcie_free_msi(pcie, hwirq);
+ return hwirq;
}
-static int advk_pcie_msi_map(struct irq_domain *domain,
- unsigned int virq, irq_hw_number_t hw)
+static void advk_msi_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
struct advk_pcie *pcie = domain->host_data;
- irq_set_chip_and_handler(virq, &pcie->msi_irq_chip,
- handle_simple_irq);
-
- return 0;
+ mutex_lock(&pcie->msi_used_lock);
+ bitmap_clear(pcie->msi_used, d->hwirq, nr_irqs);
+ mutex_unlock(&pcie->msi_used_lock);
}
-static const struct irq_domain_ops advk_pcie_msi_irq_ops = {
- .map = advk_pcie_msi_map,
+static const struct irq_domain_ops advk_msi_domain_ops = {
+ .alloc = advk_msi_irq_domain_alloc,
+ .free = advk_msi_irq_domain_free,
};
static void advk_pcie_irq_mask(struct irq_data *d)
@@ -680,30 +652,25 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
struct device_node *node = dev->of_node;
- struct irq_chip *msi_irq_chip;
- struct msi_controller *msi;
+ struct irq_chip *bottom_ic, *msi_ic;
+ struct msi_domain_info *msi_di;
phys_addr_t msi_msg_phys;
- int ret;
- msi_irq_chip = &pcie->msi_irq_chip;
+ mutex_init(&pcie->msi_used_lock);
- msi_irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-msi",
- dev_name(dev));
- if (!msi_irq_chip->name)
- return -ENOMEM;
+ bottom_ic = &pcie->msi_bottom_irq_chip;
- msi_irq_chip->irq_enable = pci_msi_unmask_irq;
- msi_irq_chip->irq_disable = pci_msi_mask_irq;
- msi_irq_chip->irq_mask = pci_msi_mask_irq;
- msi_irq_chip->irq_unmask = pci_msi_unmask_irq;
+ bottom_ic->name = "MSI";
+ bottom_ic->irq_compose_msi_msg = advk_msi_irq_compose_msi_msg;
+ bottom_ic->irq_set_affinity = advk_msi_set_affinity;
- msi = &pcie->msi;
+ msi_ic = &pcie->msi_irq_chip;
+ msi_ic->name = "advk-MSI";
- msi->setup_irq = advk_pcie_setup_msi_irq;
- msi->teardown_irq = advk_pcie_teardown_msi_irq;
- msi->of_node = node;
-
- mutex_init(&pcie->msi_used_lock);
+ msi_di = &pcie->msi_domain_info;
+ msi_di->flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI;
+ msi_di->chip = msi_ic;
msi_msg_phys = virt_to_phys(&pcie->msi_msg);
@@ -712,16 +679,18 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
advk_writel(pcie, upper_32_bits(msi_msg_phys),
PCIE_MSI_ADDR_HIGH_REG);
- pcie->msi_domain =
+ pcie->msi_inner_domain =
irq_domain_add_linear(NULL, MSI_IRQ_NUM,
- &advk_pcie_msi_irq_ops, pcie);
- if (!pcie->msi_domain)
+ &advk_msi_domain_ops, pcie);
+ if (!pcie->msi_inner_domain)
return -ENOMEM;
- ret = of_pci_msi_chip_add(msi);
- if (ret < 0) {
- irq_domain_remove(pcie->msi_domain);
- return ret;
+ pcie->msi_domain =
+ pci_msi_create_irq_domain(of_node_to_fwnode(node),
+ msi_di, pcie->msi_inner_domain);
+ if (!pcie->msi_domain) {
+ irq_domain_remove(pcie->msi_inner_domain);
+ return -ENOMEM;
}
return 0;
@@ -729,8 +698,8 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
static void advk_pcie_remove_msi_irq_domain(struct advk_pcie *pcie)
{
- of_pci_msi_chip_remove(&pcie->msi);
irq_domain_remove(pcie->msi_domain);
+ irq_domain_remove(pcie->msi_inner_domain);
}
static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
@@ -917,8 +886,6 @@ static int advk_pcie_probe(struct platform_device *pdev)
struct advk_pcie *pcie;
struct resource *res;
struct pci_bus *bus, *child;
- struct msi_controller *msi;
- struct device_node *msi_node;
int ret, irq;
pcie = devm_kzalloc(dev, sizeof(struct advk_pcie), GFP_KERNEL);
@@ -962,14 +929,8 @@ static int advk_pcie_probe(struct platform_device *pdev)
return ret;
}
- msi_node = of_parse_phandle(dev->of_node, "msi-parent", 0);
- if (msi_node)
- msi = of_pci_find_msi_chip_by_node(msi_node);
- else
- msi = NULL;
-
- bus = pci_scan_root_bus_msi(dev, 0, &advk_pcie_ops,
- pcie, &pcie->resources, &pcie->msi);
+ bus = pci_scan_root_bus(dev, 0, &advk_pcie_ops,
+ pcie, &pcie->resources);
if (!bus) {
advk_pcie_remove_msi_irq_domain(pcie);
advk_pcie_remove_irq_domain(pcie);
diff --git a/drivers/pci/host/pci-ftpci100.c b/drivers/pci/host/pci-ftpci100.c
new file mode 100644
index 000000000000..d26501c4145a
--- /dev/null
+++ b/drivers/pci/host/pci-ftpci100.c
@@ -0,0 +1,563 @@
+/*
+ * Support for Faraday Technology FTPC100 PCI Controller
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based on the out-of-tree OpenWRT patch for Cortina Gemini:
+ * Copyright (C) 2009 Janos Laube <janos.dev@gmail.com>
+ * Copyright (C) 2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
+ * Based on SL2312 PCI controller code
+ * Storlink (C) 2003
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/bitops.h>
+#include <linux/irq.h>
+
+/*
+ * Special configuration registers directly in the first few words
+ * in I/O space.
+ */
+#define PCI_IOSIZE 0x00
+#define PCI_PROT 0x04 /* AHB protection */
+#define PCI_CTRL 0x08 /* PCI control signal */
+#define PCI_SOFTRST 0x10 /* Soft reset counter and response error enable */
+#define PCI_CONFIG 0x28 /* PCI configuration command register */
+#define PCI_DATA 0x2C
+
+#define FARADAY_PCI_PMC 0x40 /* Power management control */
+#define FARADAY_PCI_PMCSR 0x44 /* Power management status */
+#define FARADAY_PCI_CTRL1 0x48 /* Control register 1 */
+#define FARADAY_PCI_CTRL2 0x4C /* Control register 2 */
+#define FARADAY_PCI_MEM1_BASE_SIZE 0x50 /* Memory base and size #1 */
+#define FARADAY_PCI_MEM2_BASE_SIZE 0x54 /* Memory base and size #2 */
+#define FARADAY_PCI_MEM3_BASE_SIZE 0x58 /* Memory base and size #3 */
+
+/* Bits 31..28 gives INTD..INTA status */
+#define PCI_CTRL2_INTSTS_SHIFT 28
+#define PCI_CTRL2_INTMASK_CMDERR BIT(27)
+#define PCI_CTRL2_INTMASK_PARERR BIT(26)
+/* Bits 25..22 masks INTD..INTA */
+#define PCI_CTRL2_INTMASK_SHIFT 22
+#define PCI_CTRL2_INTMASK_MABRT_RX BIT(21)
+#define PCI_CTRL2_INTMASK_TABRT_RX BIT(20)
+#define PCI_CTRL2_INTMASK_TABRT_TX BIT(19)
+#define PCI_CTRL2_INTMASK_RETRY4 BIT(18)
+#define PCI_CTRL2_INTMASK_SERR_RX BIT(17)
+#define PCI_CTRL2_INTMASK_PERR_RX BIT(16)
+/* Bit 15 reserved */
+#define PCI_CTRL2_MSTPRI_REQ6 BIT(14)
+#define PCI_CTRL2_MSTPRI_REQ5 BIT(13)
+#define PCI_CTRL2_MSTPRI_REQ4 BIT(12)
+#define PCI_CTRL2_MSTPRI_REQ3 BIT(11)
+#define PCI_CTRL2_MSTPRI_REQ2 BIT(10)
+#define PCI_CTRL2_MSTPRI_REQ1 BIT(9)
+#define PCI_CTRL2_MSTPRI_REQ0 BIT(8)
+/* Bits 7..4 reserved */
+/* Bits 3..0 TRDYW */
+
+/*
+ * Memory configs:
+ * Bit 31..20 defines the PCI side memory base
+ * Bit 19..16 (4 bits) defines the size per below
+ */
+#define FARADAY_PCI_MEMBASE_MASK 0xfff00000
+#define FARADAY_PCI_MEMSIZE_1MB 0x0
+#define FARADAY_PCI_MEMSIZE_2MB 0x1
+#define FARADAY_PCI_MEMSIZE_4MB 0x2
+#define FARADAY_PCI_MEMSIZE_8MB 0x3
+#define FARADAY_PCI_MEMSIZE_16MB 0x4
+#define FARADAY_PCI_MEMSIZE_32MB 0x5
+#define FARADAY_PCI_MEMSIZE_64MB 0x6
+#define FARADAY_PCI_MEMSIZE_128MB 0x7
+#define FARADAY_PCI_MEMSIZE_256MB 0x8
+#define FARADAY_PCI_MEMSIZE_512MB 0x9
+#define FARADAY_PCI_MEMSIZE_1GB 0xa
+#define FARADAY_PCI_MEMSIZE_2GB 0xb
+#define FARADAY_PCI_MEMSIZE_SHIFT 16
+
+/*
+ * The DMA base is set to 0x0 for all memory segments, it reflects the
+ * fact that the memory of the host system starts at 0x0.
+ */
+#define FARADAY_PCI_DMA_MEM1_BASE 0x00000000
+#define FARADAY_PCI_DMA_MEM2_BASE 0x00000000
+#define FARADAY_PCI_DMA_MEM3_BASE 0x00000000
+
+/* Defines for PCI configuration command register */
+#define PCI_CONF_ENABLE BIT(31)
+#define PCI_CONF_WHERE(r) ((r) & 0xFC)
+#define PCI_CONF_BUS(b) (((b) & 0xFF) << 16)
+#define PCI_CONF_DEVICE(d) (((d) & 0x1F) << 11)
+#define PCI_CONF_FUNCTION(f) (((f) & 0x07) << 8)
+
+/**
+ * struct faraday_pci_variant - encodes IP block differences
+ * @cascaded_irq: this host has cascaded IRQs from an interrupt controller
+ * embedded in the host bridge.
+ */
+struct faraday_pci_variant {
+ bool cascaded_irq;
+};
+
+struct faraday_pci {
+ struct device *dev;
+ void __iomem *base;
+ struct irq_domain *irqdomain;
+ struct pci_bus *bus;
+};
+
+static int faraday_res_to_memcfg(resource_size_t mem_base,
+ resource_size_t mem_size, u32 *val)
+{
+ u32 outval;
+
+ switch (mem_size) {
+ case SZ_1M:
+ outval = FARADAY_PCI_MEMSIZE_1MB;
+ break;
+ case SZ_2M:
+ outval = FARADAY_PCI_MEMSIZE_2MB;
+ break;
+ case SZ_4M:
+ outval = FARADAY_PCI_MEMSIZE_4MB;
+ break;
+ case SZ_8M:
+ outval = FARADAY_PCI_MEMSIZE_8MB;
+ break;
+ case SZ_16M:
+ outval = FARADAY_PCI_MEMSIZE_16MB;
+ break;
+ case SZ_32M:
+ outval = FARADAY_PCI_MEMSIZE_32MB;
+ break;
+ case SZ_64M:
+ outval = FARADAY_PCI_MEMSIZE_64MB;
+ break;
+ case SZ_128M:
+ outval = FARADAY_PCI_MEMSIZE_128MB;
+ break;
+ case SZ_256M:
+ outval = FARADAY_PCI_MEMSIZE_256MB;
+ break;
+ case SZ_512M:
+ outval = FARADAY_PCI_MEMSIZE_512MB;
+ break;
+ case SZ_1G:
+ outval = FARADAY_PCI_MEMSIZE_1GB;
+ break;
+ case SZ_2G:
+ outval = FARADAY_PCI_MEMSIZE_2GB;
+ break;
+ default:
+ return -EINVAL;
+ }
+ outval <<= FARADAY_PCI_MEMSIZE_SHIFT;
+
+ /* This is probably not good */
+ if (mem_base & ~(FARADAY_PCI_MEMBASE_MASK))
+ pr_warn("truncated PCI memory base\n");
+ /* Translate to bridge side address space */
+ outval |= (mem_base & FARADAY_PCI_MEMBASE_MASK);
+ pr_debug("Translated pci base @%pap, size %pap to config %08x\n",
+ &mem_base, &mem_size, outval);
+
+ *val = outval;
+ return 0;
+}
+
+static int faraday_pci_read_config(struct pci_bus *bus, unsigned int fn,
+ int config, int size, u32 *value)
+{
+ struct faraday_pci *p = bus->sysdata;
+
+ writel(PCI_CONF_BUS(bus->number) |
+ PCI_CONF_DEVICE(PCI_SLOT(fn)) |
+ PCI_CONF_FUNCTION(PCI_FUNC(fn)) |
+ PCI_CONF_WHERE(config) |
+ PCI_CONF_ENABLE,
+ p->base + PCI_CONFIG);
+
+ *value = readl(p->base + PCI_DATA);
+
+ if (size == 1)
+ *value = (*value >> (8 * (config & 3))) & 0xFF;
+ else if (size == 2)
+ *value = (*value >> (8 * (config & 3))) & 0xFFFF;
+
+ dev_dbg(&bus->dev,
+ "[read] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n",
+ PCI_SLOT(fn), PCI_FUNC(fn), config, size, *value);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int faraday_pci_write_config(struct pci_bus *bus, unsigned int fn,
+ int config, int size, u32 value)
+{
+ struct faraday_pci *p = bus->sysdata;
+ int ret = PCIBIOS_SUCCESSFUL;
+
+ dev_dbg(&bus->dev,
+ "[write] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n",
+ PCI_SLOT(fn), PCI_FUNC(fn), config, size, value);
+
+ writel(PCI_CONF_BUS(bus->number) |
+ PCI_CONF_DEVICE(PCI_SLOT(fn)) |
+ PCI_CONF_FUNCTION(PCI_FUNC(fn)) |
+ PCI_CONF_WHERE(config) |
+ PCI_CONF_ENABLE,
+ p->base + PCI_CONFIG);
+
+ switch (size) {
+ case 4:
+ writel(value, p->base + PCI_DATA);
+ break;
+ case 2:
+ writew(value, p->base + PCI_DATA + (config & 3));
+ break;
+ case 1:
+ writeb(value, p->base + PCI_DATA + (config & 3));
+ break;
+ default:
+ ret = PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ return ret;
+}
+
+static struct pci_ops faraday_pci_ops = {
+ .read = faraday_pci_read_config,
+ .write = faraday_pci_write_config,
+};
+
+static void faraday_pci_ack_irq(struct irq_data *d)
+{
+ struct faraday_pci *p = irq_data_get_irq_chip_data(d);
+ unsigned int reg;
+
+ faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, &reg);
+ reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT);
+ reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTSTS_SHIFT);
+ faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, reg);
+}
+
+static void faraday_pci_mask_irq(struct irq_data *d)
+{
+ struct faraday_pci *p = irq_data_get_irq_chip_data(d);
+ unsigned int reg;
+
+ faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, &reg);
+ reg &= ~((0xF << PCI_CTRL2_INTSTS_SHIFT)
+ | BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT));
+ faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, reg);
+}
+
+static void faraday_pci_unmask_irq(struct irq_data *d)
+{
+ struct faraday_pci *p = irq_data_get_irq_chip_data(d);
+ unsigned int reg;
+
+ faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, &reg);
+ reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT);
+ reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT);
+ faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, reg);
+}
+
+static void faraday_pci_irq_handler(struct irq_desc *desc)
+{
+ struct faraday_pci *p = irq_desc_get_handler_data(desc);
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ unsigned int irq_stat, reg, i;
+
+ faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, &reg);
+ irq_stat = reg >> PCI_CTRL2_INTSTS_SHIFT;
+
+ chained_irq_enter(irqchip, desc);
+
+ for (i = 0; i < 4; i++) {
+ if ((irq_stat & BIT(i)) == 0)
+ continue;
+ generic_handle_irq(irq_find_mapping(p->irqdomain, i));
+ }
+
+ chained_irq_exit(irqchip, desc);
+}
+
+static struct irq_chip faraday_pci_irq_chip = {
+ .name = "PCI",
+ .irq_ack = faraday_pci_ack_irq,
+ .irq_mask = faraday_pci_mask_irq,
+ .irq_unmask = faraday_pci_unmask_irq,
+};
+
+static int faraday_pci_irq_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &faraday_pci_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops faraday_pci_irqdomain_ops = {
+ .map = faraday_pci_irq_map,
+};
+
+static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p)
+{
+ struct device_node *intc = of_get_next_child(p->dev->of_node, NULL);
+ int irq;
+ int i;
+
+ if (!intc) {
+ dev_err(p->dev, "missing child interrupt-controller node\n");
+ return -EINVAL;
+ }
+
+ /* All PCI IRQs cascade off this one */
+ irq = of_irq_get(intc, 0);
+ if (!irq) {
+ dev_err(p->dev, "failed to get parent IRQ\n");
+ return -EINVAL;
+ }
+
+ p->irqdomain = irq_domain_add_linear(intc, 4,
+ &faraday_pci_irqdomain_ops, p);
+ if (!p->irqdomain) {
+ dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n");
+ return -EINVAL;
+ }
+
+ irq_set_chained_handler_and_data(irq, faraday_pci_irq_handler, p);
+
+ for (i = 0; i < 4; i++)
+ irq_create_mapping(p->irqdomain, i);
+
+ return 0;
+}
+
+static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
+ struct device_node *node)
+{
+ const int na = 3, ns = 2;
+ int rlen;
+
+ parser->node = node;
+ parser->pna = of_n_addr_cells(node);
+ parser->np = parser->pna + na + ns;
+
+ parser->range = of_get_property(node, "dma-ranges", &rlen);
+ if (!parser->range)
+ return -ENOENT;
+ parser->end = parser->range + rlen / sizeof(__be32);
+
+ return 0;
+}
+
+static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p,
+ struct device_node *np)
+{
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
+ struct device *dev = p->dev;
+ u32 confreg[3] = {
+ FARADAY_PCI_MEM1_BASE_SIZE,
+ FARADAY_PCI_MEM2_BASE_SIZE,
+ FARADAY_PCI_MEM3_BASE_SIZE,
+ };
+ int i = 0;
+ u32 val;
+
+ if (pci_dma_range_parser_init(&parser, np)) {
+ dev_err(dev, "missing dma-ranges property\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Get the dma-ranges from the device tree
+ */
+ for_each_of_pci_range(&parser, &range) {
+ u64 end = range.pci_addr + range.size - 1;
+ int ret;
+
+ ret = faraday_res_to_memcfg(range.pci_addr, range.size, &val);
+ if (ret) {
+ dev_err(dev,
+ "DMA range %d: illegal MEM resource size\n", i);
+ return -EINVAL;
+ }
+
+ dev_info(dev, "DMA MEM%d BASE: 0x%016llx -> 0x%016llx config %08x\n",
+ i + 1, range.pci_addr, end, val);
+ if (i <= 2) {
+ faraday_pci_write_config(p->bus, 0, confreg[i],
+ 4, val);
+ } else {
+ dev_err(dev, "ignore extraneous dma-range %d\n", i);
+ break;
+ }
+
+ i++;
+ }
+
+ return 0;
+}
+
+static int faraday_pci_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct faraday_pci_variant *variant =
+ of_device_get_match_data(dev);
+ struct resource *regs;
+ resource_size_t io_base;
+ struct resource_entry *win;
+ struct faraday_pci *p;
+ struct resource *mem;
+ struct resource *io;
+ struct pci_host_bridge *host;
+ int ret;
+ u32 val;
+ LIST_HEAD(res);
+
+ host = pci_alloc_host_bridge(sizeof(*p));
+ if (!host)
+ return -ENOMEM;
+
+ host->dev.parent = dev;
+ host->ops = &faraday_pci_ops;
+ host->busnr = 0;
+ host->msi = NULL;
+ p = pci_host_bridge_priv(host);
+ host->sysdata = p;
+ p->dev = dev;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ p->base = devm_ioremap_resource(dev, regs);
+ if (IS_ERR(p->base))
+ return PTR_ERR(p->base);
+
+ ret = of_pci_get_host_bridge_resources(dev->of_node, 0, 0xff,
+ &res, &io_base);
+ if (ret)
+ return ret;
+
+ ret = devm_request_pci_bus_resources(dev, &res);
+ if (ret)
+ return ret;
+
+ /* Get the I/O and memory ranges from DT */
+ resource_list_for_each_entry(win, &res) {
+ switch (resource_type(win->res)) {
+ case IORESOURCE_IO:
+ io = win->res;
+ io->name = "Gemini PCI I/O";
+ if (!faraday_res_to_memcfg(io->start - win->offset,
+ resource_size(io), &val)) {
+ /* setup I/O space size */
+ writel(val, p->base + PCI_IOSIZE);
+ } else {
+ dev_err(dev, "illegal IO mem size\n");
+ return -EINVAL;
+ }
+ ret = pci_remap_iospace(io, io_base);
+ if (ret) {
+ dev_warn(dev, "error %d: failed to map resource %pR\n",
+ ret, io);
+ continue;
+ }
+ break;
+ case IORESOURCE_MEM:
+ mem = win->res;
+ mem->name = "Gemini PCI MEM";
+ break;
+ case IORESOURCE_BUS:
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Setup hostbridge */
+ val = readl(p->base + PCI_CTRL);
+ val |= PCI_COMMAND_IO;
+ val |= PCI_COMMAND_MEMORY;
+ val |= PCI_COMMAND_MASTER;
+ writel(val, p->base + PCI_CTRL);
+
+ list_splice_init(&res, &host->windows);
+ ret = pci_register_host_bridge(host);
+ if (ret) {
+ dev_err(dev, "failed to register host: %d\n", ret);
+ return ret;
+ }
+ p->bus = host->bus;
+
+ /* Mask and clear all interrupts */
+ faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2 + 2, 2, 0xF000);
+ if (variant->cascaded_irq) {
+ ret = faraday_pci_setup_cascaded_irq(p);
+ if (ret) {
+ dev_err(dev, "failed to setup cascaded IRQ\n");
+ return ret;
+ }
+ }
+
+ ret = faraday_pci_parse_map_dma_ranges(p, dev->of_node);
+ if (ret)
+ return ret;
+
+ pci_scan_child_bus(p->bus);
+ pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
+ pci_bus_assign_resources(p->bus);
+ pci_bus_add_devices(p->bus);
+ pci_free_resource_list(&res);
+
+ return 0;
+}
+
+/*
+ * We encode bridge variants here, we have at least two so it doesn't
+ * hurt to have infrastructure to encompass future variants as well.
+ */
+const struct faraday_pci_variant faraday_regular = {
+ .cascaded_irq = true,
+};
+
+const struct faraday_pci_variant faraday_dual = {
+ .cascaded_irq = false,
+};
+
+static const struct of_device_id faraday_pci_of_match[] = {
+ {
+ .compatible = "faraday,ftpci100",
+ .data = &faraday_regular,
+ },
+ {
+ .compatible = "faraday,ftpci100-dual",
+ .data = &faraday_dual,
+ },
+ {},
+};
+
+static struct platform_driver faraday_pci_driver = {
+ .driver = {
+ .name = "ftpci100",
+ .of_match_table = of_match_ptr(faraday_pci_of_match),
+ .suppress_bind_attrs = true,
+ },
+ .probe = faraday_pci_probe,
+};
+builtin_platform_driver(faraday_pci_driver);
diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
index c05ea9d72f69..7d709a7e0aa8 100644
--- a/drivers/pci/host/pci-host-generic.c
+++ b/drivers/pci/host/pci-host-generic.c
@@ -60,6 +60,7 @@ static struct platform_driver gen_pci_driver = {
.driver = {
.name = "pci-host-generic",
.of_match_table = gen_pci_of_match,
+ .suppress_bind_attrs = true,
},
.probe = gen_pci_probe,
};
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index ada98569b78e..84936383e269 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -56,6 +56,7 @@
#include <asm/apic.h>
#include <linux/msi.h>
#include <linux/hyperv.h>
+#include <linux/refcount.h>
#include <asm/mshyperv.h>
/*
@@ -72,6 +73,7 @@ enum {
PCI_PROTOCOL_VERSION_CURRENT = PCI_PROTOCOL_VERSION_1_1
};
+#define CPU_AFFINITY_ALL -1ULL
#define PCI_CONFIG_MMIO_LENGTH 0x2000
#define CFG_PAGE_OFFSET 0x1000
#define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
@@ -350,6 +352,7 @@ enum hv_pcibus_state {
hv_pcibus_init = 0,
hv_pcibus_probed,
hv_pcibus_installed,
+ hv_pcibus_removed,
hv_pcibus_maximum
};
@@ -421,7 +424,7 @@ enum hv_pcidev_ref_reason {
struct hv_pci_dev {
/* List protected by pci_rescan_remove_lock */
struct list_head list_entry;
- atomic_t refs;
+ refcount_t refs;
enum hv_pcichild_state state;
struct pci_function_description desc;
bool reported_missing;
@@ -876,7 +879,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
hv_int_desc_free(hpdev, int_desc);
}
- int_desc = kzalloc(sizeof(*int_desc), GFP_KERNEL);
+ int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC);
if (!int_desc)
goto drop_reference;
@@ -897,9 +900,13 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
* processors because Hyper-V only supports 64 in a guest.
*/
affinity = irq_data_get_affinity_mask(data);
- for_each_cpu_and(cpu, affinity, cpu_online_mask) {
- int_pkt->int_desc.cpu_mask |=
- (1ULL << vmbus_cpu_number_to_vp_number(cpu));
+ if (cpumask_weight(affinity) >= 32) {
+ int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL;
+ } else {
+ for_each_cpu_and(cpu, affinity, cpu_online_mask) {
+ int_pkt->int_desc.cpu_mask |=
+ (1ULL << vmbus_cpu_number_to_vp_number(cpu));
+ }
}
ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt,
@@ -1208,9 +1215,11 @@ static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
hbus->pci_bus->msi = &hbus->msi_chip;
hbus->pci_bus->msi->dev = &hbus->hdev->device;
+ pci_lock_rescan_remove();
pci_scan_child_bus(hbus->pci_bus);
pci_bus_assign_resources(hbus->pci_bus);
pci_bus_add_devices(hbus->pci_bus);
+ pci_unlock_rescan_remove();
hbus->state = hv_pcibus_installed;
return 0;
}
@@ -1254,13 +1263,13 @@ static void q_resource_requirements(void *context, struct pci_response *resp,
static void get_pcichild(struct hv_pci_dev *hpdev,
enum hv_pcidev_ref_reason reason)
{
- atomic_inc(&hpdev->refs);
+ refcount_inc(&hpdev->refs);
}
static void put_pcichild(struct hv_pci_dev *hpdev,
enum hv_pcidev_ref_reason reason)
{
- if (atomic_dec_and_test(&hpdev->refs))
+ if (refcount_dec_and_test(&hpdev->refs))
kfree(hpdev);
}
@@ -1314,7 +1323,7 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
wait_for_completion(&comp_pkt.host_event);
hpdev->desc = *desc;
- get_pcichild(hpdev, hv_pcidev_ref_initial);
+ refcount_set(&hpdev->refs, 1);
get_pcichild(hpdev, hv_pcidev_ref_childlist);
spin_lock_irqsave(&hbus->device_list_lock, flags);
@@ -1504,13 +1513,24 @@ static void pci_devices_present_work(struct work_struct *work)
put_pcichild(hpdev, hv_pcidev_ref_initial);
}
- /* Tell the core to rescan bus because there may have been changes. */
- if (hbus->state == hv_pcibus_installed) {
+ switch(hbus->state) {
+ case hv_pcibus_installed:
+ /*
+ * Tell the core to rescan bus
+ * because there may have been changes.
+ */
pci_lock_rescan_remove();
pci_scan_child_bus(hbus->pci_bus);
pci_unlock_rescan_remove();
- } else {
+ break;
+
+ case hv_pcibus_init:
+ case hv_pcibus_probed:
survey_child_resources(hbus);
+ break;
+
+ default:
+ break;
}
up(&hbus->enum_sem);
@@ -1600,8 +1620,10 @@ static void hv_eject_device_work(struct work_struct *work)
pdev = pci_get_domain_bus_and_slot(hpdev->hbus->sysdata.domain, 0,
wslot);
if (pdev) {
+ pci_lock_rescan_remove();
pci_stop_and_remove_bus_device(pdev);
pci_dev_put(pdev);
+ pci_unlock_rescan_remove();
}
spin_lock_irqsave(&hpdev->hbus->device_list_lock, flags);
@@ -2185,6 +2207,7 @@ static int hv_pci_probe(struct hv_device *hdev,
hbus = kzalloc(sizeof(*hbus), GFP_KERNEL);
if (!hbus)
return -ENOMEM;
+ hbus->state = hv_pcibus_init;
/*
* The PCI bus "domain" is what is called "segment" in ACPI and
@@ -2348,6 +2371,7 @@ static int hv_pci_remove(struct hv_device *hdev)
pci_stop_root_bus(hbus->pci_bus);
pci_remove_root_bus(hbus->pci_bus);
pci_unlock_rescan_remove();
+ hbus->state = hv_pcibus_removed;
}
hv_pci_bus_exit(hdev);
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index cd7d51988738..f353a6eb2f01 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -752,10 +752,11 @@ static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port,
* If the mask is 0xffff0000, then we only want to write
* the link control register, rather than clearing the
* RW1C bits in the link status register. Mask out the
- * status register bits.
+ * RW1C status register bits.
*/
if (mask == 0xffff0000)
- value &= 0xffff;
+ value &= ~((PCI_EXP_LNKSTA_LABS |
+ PCI_EXP_LNKSTA_LBMS) << 16);
mvebu_writel(port, value, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
break;
@@ -1005,22 +1006,6 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
return -ENOENT;
}
-static void mvebu_pcie_msi_enable(struct mvebu_pcie *pcie)
-{
- struct device_node *msi_node;
-
- msi_node = of_parse_phandle(pcie->pdev->dev.of_node,
- "msi-parent", 0);
- if (!msi_node)
- return;
-
- pcie->msi = of_pci_find_msi_chip_by_node(msi_node);
- of_node_put(msi_node);
-
- if (pcie->msi)
- pcie->msi->dev = &pcie->pdev->dev;
-}
-
#ifdef CONFIG_PM_SLEEP
static int mvebu_pcie_suspend(struct device *dev)
{
@@ -1298,7 +1283,6 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
for (i = 0; i < (IO_SPACE_LIMIT - SZ_64K); i += SZ_64K)
pci_ioremap_io(i, pcie->io.start + i);
- mvebu_pcie_msi_enable(pcie);
mvebu_pcie_enable(pcie);
platform_set_drvdata(pdev, pcie);
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index ed8a93f2bfb5..2618f875a600 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -380,7 +380,7 @@ static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
unsigned int busnr)
{
struct device *dev = pcie->dev;
- pgprot_t prot = pgprot_device(PAGE_KERNEL);
+ pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
phys_addr_t cs = pcie->cs->start;
struct tegra_pcie_bus *bus;
unsigned int i;
@@ -1962,7 +1962,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
rp->pcie = pcie;
rp->np = port;
- rp->base = devm_ioremap_resource(dev, &rp->regs);
+ rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
if (IS_ERR(rp->base))
return PTR_ERR(rp->base);
diff --git a/drivers/pci/host/pci-thunder-ecam.c b/drivers/pci/host/pci-thunder-ecam.c
index 3f54a43bbbea..fc0ca03f280e 100644
--- a/drivers/pci/host/pci-thunder-ecam.c
+++ b/drivers/pci/host/pci-thunder-ecam.c
@@ -373,6 +373,7 @@ static struct platform_driver thunder_ecam_driver = {
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = thunder_ecam_of_match,
+ .suppress_bind_attrs = true,
},
.probe = thunder_ecam_probe,
};
diff --git a/drivers/pci/host/pci-thunder-pem.c b/drivers/pci/host/pci-thunder-pem.c
index 6e031b522529..6e066f8b74df 100644
--- a/drivers/pci/host/pci-thunder-pem.c
+++ b/drivers/pci/host/pci-thunder-pem.c
@@ -474,6 +474,7 @@ static struct platform_driver thunder_pem_driver = {
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = thunder_pem_of_match,
+ .suppress_bind_attrs = true,
},
.probe = thunder_pem_probe,
};
diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c
index 5ebee7d37ff5..9281eee2d000 100644
--- a/drivers/pci/host/pci-versatile.c
+++ b/drivers/pci/host/pci-versatile.c
@@ -138,7 +138,8 @@ static int versatile_pci_probe(struct platform_device *pdev)
return PTR_ERR(versatile_cfg_base[0]);
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- versatile_cfg_base[1] = devm_ioremap_resource(&pdev->dev, res);
+ versatile_cfg_base[1] = devm_pci_remap_cfg_resource(&pdev->dev,
+ res);
if (IS_ERR(versatile_cfg_base[1]))
return PTR_ERR(versatile_cfg_base[1]);
@@ -221,6 +222,7 @@ static struct platform_driver versatile_pci_driver = {
.driver = {
.name = "versatile-pci",
.of_match_table = versatile_pci_of_match,
+ .suppress_bind_attrs = true,
},
.probe = versatile_pci_probe,
};
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index 1a6108788f6f..8cae013e7188 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -248,7 +248,7 @@ static int xgene_pcie_ecam_init(struct pci_config_window *cfg, u32 ipversion)
dev_err(dev, "can't get CSR resource\n");
return ret;
}
- port->csr_base = devm_ioremap_resource(dev, &csr);
+ port->csr_base = devm_pci_remap_cfg_resource(dev, &csr);
if (IS_ERR(port->csr_base))
return PTR_ERR(port->csr_base);
@@ -359,7 +359,7 @@ static int xgene_pcie_map_reg(struct xgene_pcie_port *port,
struct resource *res;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr");
- port->csr_base = devm_ioremap_resource(dev, res);
+ port->csr_base = devm_pci_remap_cfg_resource(dev, res);
if (IS_ERR(port->csr_base))
return PTR_ERR(port->csr_base);
@@ -697,6 +697,7 @@ static struct platform_driver xgene_pcie_driver = {
.driver = {
.name = "xgene-pcie",
.of_match_table = of_match_ptr(xgene_pcie_match_table),
+ .suppress_bind_attrs = true,
},
.probe = xgene_pcie_probe_bridge,
};
diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c
index 8c6a327ca6cd..90d2bdd94e41 100644
--- a/drivers/pci/host/pcie-iproc-platform.c
+++ b/drivers/pci/host/pcie-iproc-platform.c
@@ -67,7 +67,8 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
return ret;
}
- pcie->base = devm_ioremap(dev, reg.start, resource_size(&reg));
+ pcie->base = devm_pci_remap_cfgspace(dev, reg.start,
+ resource_size(&reg));
if (!pcie->base) {
dev_err(dev, "unable to map controller registers\n");
return -ENOMEM;
diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c
index 26ddd3535272..0e020b6e0943 100644
--- a/drivers/pci/host/pcie-rockchip.c
+++ b/drivers/pci/host/pcie-rockchip.c
@@ -26,6 +26,7 @@
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_pci.h>
@@ -223,9 +224,11 @@ struct rockchip_pcie {
int link_gen;
struct device *dev;
struct irq_domain *irq_domain;
- u32 io_size;
int offset;
+ struct pci_bus *root_bus;
+ struct resource *io;
phys_addr_t io_bus_addr;
+ u32 io_size;
void __iomem *msg_region;
u32 mem_size;
phys_addr_t msg_bus_addr;
@@ -425,7 +428,8 @@ static struct pci_ops rockchip_pcie_ops = {
static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip)
{
- u32 status, curr, scale, power;
+ int curr;
+ u32 status, scale, power;
if (IS_ERR(rockchip->vpcie3v3))
return;
@@ -437,24 +441,25 @@ static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip)
* to the actual power supply.
*/
curr = regulator_get_current_limit(rockchip->vpcie3v3);
- if (curr > 0) {
- scale = 3; /* 0.001x */
- curr = curr / 1000; /* convert to mA */
- power = (curr * 3300) / 1000; /* milliwatt */
- while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) {
- if (!scale) {
- dev_warn(rockchip->dev, "invalid power supply\n");
- return;
- }
- scale--;
- power = power / 10;
- }
+ if (curr <= 0)
+ return;
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR);
- status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) |
- (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT);
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR);
+ scale = 3; /* 0.001x */
+ curr = curr / 1000; /* convert to mA */
+ power = (curr * 3300) / 1000; /* milliwatt */
+ while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) {
+ if (!scale) {
+ dev_warn(rockchip->dev, "invalid power supply\n");
+ return;
+ }
+ scale--;
+ power = power / 10;
}
+
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR);
+ status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) |
+ (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR);
}
/**
@@ -596,7 +601,12 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
/* Set RC's clock architecture as common clock */
status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
- status |= PCI_EXP_LNKCTL_CCC;
+ status |= PCI_EXP_LNKSTA_SLC << 16;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+
+ /* Set RC's RCB to 128 */
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status |= PCI_EXP_LNKCTL_RCB;
rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
/* Enable Gen1 training */
@@ -822,7 +832,7 @@ static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
regs = platform_get_resource_byname(pdev,
IORESOURCE_MEM,
"axi-base");
- rockchip->reg_base = devm_ioremap_resource(dev, regs);
+ rockchip->reg_base = devm_pci_remap_cfg_resource(dev, regs);
if (IS_ERR(rockchip->reg_base))
return PTR_ERR(rockchip->reg_base);
@@ -1359,6 +1369,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
err, io);
continue;
}
+ rockchip->io = io;
break;
case IORESOURCE_MEM:
mem = win->res;
@@ -1390,6 +1401,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
err = -ENOMEM;
goto err_free_res;
}
+ rockchip->root_bus = bus;
pci_bus_size_bridges(bus);
pci_bus_assign_resources(bus);
@@ -1397,7 +1409,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
- return err;
+ return 0;
err_free_res:
pci_free_resource_list(&res);
@@ -1420,6 +1432,34 @@ err_aclk_pcie:
return err;
}
+static int rockchip_pcie_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
+
+ pci_stop_root_bus(rockchip->root_bus);
+ pci_remove_root_bus(rockchip->root_bus);
+ pci_unmap_iospace(rockchip->io);
+ irq_domain_remove(rockchip->irq_domain);
+
+ phy_power_off(rockchip->phy);
+ phy_exit(rockchip->phy);
+
+ clk_disable_unprepare(rockchip->clk_pcie_pm);
+ clk_disable_unprepare(rockchip->hclk_pcie);
+ clk_disable_unprepare(rockchip->aclk_perf_pcie);
+ clk_disable_unprepare(rockchip->aclk_pcie);
+
+ if (!IS_ERR(rockchip->vpcie3v3))
+ regulator_disable(rockchip->vpcie3v3);
+ if (!IS_ERR(rockchip->vpcie1v8))
+ regulator_disable(rockchip->vpcie1v8);
+ if (!IS_ERR(rockchip->vpcie0v9))
+ regulator_disable(rockchip->vpcie0v9);
+
+ return 0;
+}
+
static const struct dev_pm_ops rockchip_pcie_pm_ops = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq,
rockchip_pcie_resume_noirq)
@@ -1429,6 +1469,7 @@ static const struct of_device_id rockchip_pcie_of_match[] = {
{ .compatible = "rockchip,rk3399-pcie", },
{}
};
+MODULE_DEVICE_TABLE(of, rockchip_pcie_of_match);
static struct platform_driver rockchip_pcie_driver = {
.driver = {
@@ -1437,6 +1478,10 @@ static struct platform_driver rockchip_pcie_driver = {
.pm = &rockchip_pcie_pm_ops,
},
.probe = rockchip_pcie_probe,
-
+ .remove = rockchip_pcie_remove,
};
-builtin_platform_driver(rockchip_pcie_driver);
+module_platform_driver(rockchip_pcie_driver);
+
+MODULE_AUTHOR("Rockchip Inc");
+MODULE_DESCRIPTION("Rockchip AXI PCIe driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c
index 4c3e0ab35496..4b16b26ae909 100644
--- a/drivers/pci/host/pcie-xilinx-nwl.c
+++ b/drivers/pci/host/pcie-xilinx-nwl.c
@@ -761,7 +761,7 @@ static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
pcie->phys_pcie_reg_base = res->start;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
- pcie->ecam_base = devm_ioremap_resource(dev, res);
+ pcie->ecam_base = devm_pci_remap_cfg_resource(dev, res);
if (IS_ERR(pcie->ecam_base))
return PTR_ERR(pcie->ecam_base);
pcie->phys_ecam_base = res->start;
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
index 7f030f5d750b..2fe2df51f9f8 100644
--- a/drivers/pci/host/pcie-xilinx.c
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -606,7 +606,7 @@ static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
return err;
}
- port->reg_base = devm_ioremap_resource(dev, &regs);
+ port->reg_base = devm_pci_remap_cfg_resource(dev, &regs);
if (IS_ERR(port->reg_base))
return PTR_ERR(port->reg_base);
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index 9e69403be632..19f30a9f461d 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -109,6 +109,12 @@ int pciehp_unconfigure_device(struct slot *p_slot)
break;
}
}
+ if (!presence) {
+ pci_dev_set_disconnected(dev, NULL);
+ if (pci_has_subordinate(dev))
+ pci_walk_bus(dev->subordinate,
+ pci_dev_set_disconnected, NULL);
+ }
pci_stop_and_remove_bus_device(dev);
/*
* Ensure that no new Requests will be generated from
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 2479ae876482..d9dc7363ac77 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -450,6 +450,7 @@ found:
iov->total_VFs = total;
iov->pgsz = pgsz;
iov->self = dev;
+ iov->drivers_autoprobe = true;
pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END)
diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c
index f9f2a0324ecc..83d30953ce19 100644
--- a/drivers/pci/irq.c
+++ b/drivers/pci/irq.c
@@ -1,7 +1,8 @@
/*
- * PCI IRQ failure handing code
+ * PCI IRQ handling code
*
* Copyright (c) 2008 James Bottomley <James.Bottomley@HansenPartnership.com>
+ * Copyright (C) 2017 Christoph Hellwig.
*/
#include <linux/acpi.h>
@@ -59,3 +60,61 @@ enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *pdev)
return PCI_LOST_IRQ_NO_INFORMATION;
}
EXPORT_SYMBOL(pci_lost_interrupt);
+
+/**
+ * pci_request_irq - allocate an interrupt line for a PCI device
+ * @dev: PCI device to operate on
+ * @nr: device-relative interrupt vector index (0-based).
+ * @handler: Function to be called when the IRQ occurs.
+ * Primary handler for threaded interrupts.
+ * If NULL and thread_fn != NULL the default primary handler is
+ * installed.
+ * @thread_fn: Function called from the IRQ handler thread
+ * If NULL, no IRQ thread is created
+ * @dev_id: Cookie passed back to the handler function
+ * @fmt: Printf-like format string naming the handler
+ *
+ * This call allocates interrupt resources and enables the interrupt line and
+ * IRQ handling. From the point this call is made @handler and @thread_fn may
+ * be invoked. All interrupts requested using this function might be shared.
+ *
+ * @dev_id must not be NULL and must be globally unique.
+ */
+int pci_request_irq(struct pci_dev *dev, unsigned int nr, irq_handler_t handler,
+ irq_handler_t thread_fn, void *dev_id, const char *fmt, ...)
+{
+ va_list ap;
+ int ret;
+ char *devname;
+
+ va_start(ap, fmt);
+ devname = kvasprintf(GFP_KERNEL, fmt, ap);
+ va_end(ap);
+
+ ret = request_threaded_irq(pci_irq_vector(dev, nr), handler, thread_fn,
+ IRQF_SHARED, devname, dev_id);
+ if (ret)
+ kfree(devname);
+ return ret;
+}
+EXPORT_SYMBOL(pci_request_irq);
+
+/**
+ * pci_free_irq - free an interrupt allocated with pci_request_irq
+ * @dev: PCI device to operate on
+ * @nr: device-relative interrupt vector index (0-based).
+ * @dev_id: Device identity to free
+ *
+ * Remove an interrupt handler. The handler is removed and if the interrupt
+ * line is no longer in use by any driver it is disabled. The caller must
+ * ensure the interrupt is disabled on the device before calling this function.
+ * The function does not return until any executing interrupts for this IRQ
+ * have completed.
+ *
+ * This function must not be called from interrupt context.
+ */
+void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id)
+{
+ kfree(free_irq(pci_irq_vector(dev, nr), dev_id));
+}
+EXPORT_SYMBOL(pci_free_irq);
diff --git a/drivers/pci/mmap.c b/drivers/pci/mmap.c
new file mode 100644
index 000000000000..9a5e5a9055eb
--- /dev/null
+++ b/drivers/pci/mmap.c
@@ -0,0 +1,99 @@
+/*
+ * mmap.c — generic PCI resource mmap helper
+ *
+ * Copyright © 2017 Amazon.com, Inc. or its affiliates.
+ *
+ * Author: David Woodhouse <dwmw2@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/pci.h>
+
+#ifdef ARCH_GENERIC_PCI_MMAP_RESOURCE
+
+/*
+ * Modern setup: generic pci_mmap_resource_range(), and implement the legacy
+ * pci_mmap_page_range() (if needed) as a wrapper round it.
+ */
+
+#ifdef HAVE_PCI_MMAP
+int pci_mmap_page_range(struct pci_dev *pdev, int bar,
+ struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int write_combine)
+{
+ resource_size_t start, end;
+
+ pci_resource_to_user(pdev, bar, &pdev->resource[bar], &start, &end);
+
+ /* Adjust vm_pgoff to be the offset within the resource */
+ vma->vm_pgoff -= start >> PAGE_SHIFT;
+ return pci_mmap_resource_range(pdev, bar, vma, mmap_state,
+ write_combine);
+}
+#endif
+
+static const struct vm_operations_struct pci_phys_vm_ops = {
+#ifdef CONFIG_HAVE_IOREMAP_PROT
+ .access = generic_access_phys,
+#endif
+};
+
+int pci_mmap_resource_range(struct pci_dev *pdev, int bar,
+ struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int write_combine)
+{
+ unsigned long size;
+ int ret;
+
+ size = ((pci_resource_len(pdev, bar) - 1) >> PAGE_SHIFT) + 1;
+ if (vma->vm_pgoff + vma_pages(vma) > size)
+ return -EINVAL;
+
+ if (write_combine)
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ else
+ vma->vm_page_prot = pgprot_device(vma->vm_page_prot);
+
+ if (mmap_state == pci_mmap_io) {
+ ret = pci_iobar_pfn(pdev, bar, vma);
+ if (ret)
+ return ret;
+ } else
+ vma->vm_pgoff += (pci_resource_start(pdev, bar) >> PAGE_SHIFT);
+
+ vma->vm_ops = &pci_phys_vm_ops;
+
+ return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+}
+
+#elif defined(HAVE_PCI_MMAP) /* && !ARCH_GENERIC_PCI_MMAP_RESOURCE */
+
+/*
+ * Legacy setup: Impement pci_mmap_resource_range() as a wrapper around
+ * the architecture's pci_mmap_page_range(), converting to "user visible"
+ * addresses as necessary.
+ */
+
+int pci_mmap_resource_range(struct pci_dev *pdev, int bar,
+ struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int write_combine)
+{
+ resource_size_t start, end;
+
+ /*
+ * pci_mmap_page_range() expects the same kind of entry as coming
+ * from /proc/bus/pci/ which is a "user visible" value. If this is
+ * different from the resource itself, arch will do necessary fixup.
+ */
+ pci_resource_to_user(pdev, bar, &pdev->resource[bar], &start, &end);
+ vma->vm_pgoff += start >> PAGE_SHIFT;
+ return pci_mmap_page_range(pdev, bar, vma, mmap_state, write_combine);
+}
+#endif
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 0042c365b29b..ba44fdfda66b 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -298,7 +298,7 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
{
struct pci_dev *dev = msi_desc_to_pci_dev(entry);
- if (dev->current_state != PCI_D0) {
+ if (dev->current_state != PCI_D0 || pci_dev_is_disconnected(dev)) {
/* Don't touch the hardware now */
} else if (entry->msi_attrib.is_msix) {
void __iomem *base = pci_msix_desc_addr(entry);
@@ -541,7 +541,8 @@ msi_setup_entry(struct pci_dev *dev, int nvec, const struct irq_affinity *affd)
if (affd) {
masks = irq_create_affinity_masks(nvec, affd);
if (!masks)
- pr_err("Unable to allocate affinity masks, ignoring\n");
+ dev_err(&dev->dev, "can't allocate MSI affinity masks for %d vectors\n",
+ nvec);
}
/* MSI Entry Initialization */
@@ -681,7 +682,8 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
if (affd) {
masks = irq_create_affinity_masks(nvec, affd);
if (!masks)
- pr_err("Unable to allocate affinity masks, ignoring\n");
+ dev_err(&dev->dev, "can't allocate MSI-X affinity masks for %d vectors\n",
+ nvec);
}
for (i = 0, curmsk = masks; i < nvec; i++) {
@@ -882,7 +884,7 @@ int pci_msi_vec_count(struct pci_dev *dev)
}
EXPORT_SYMBOL(pci_msi_vec_count);
-void pci_msi_shutdown(struct pci_dev *dev)
+static void pci_msi_shutdown(struct pci_dev *dev)
{
struct msi_desc *desc;
u32 mask;
@@ -973,13 +975,18 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
return msix_capability_init(dev, entries, nvec, affd);
}
-void pci_msix_shutdown(struct pci_dev *dev)
+static void pci_msix_shutdown(struct pci_dev *dev)
{
struct msi_desc *entry;
if (!pci_msi_enable || !dev || !dev->msix_enabled)
return;
+ if (pci_dev_is_disconnected(dev)) {
+ dev->msix_enabled = 0;
+ return;
+ }
+
/* Return the device with MSI-X masked as initial states */
for_each_pci_msi_entry(entry, dev) {
/* Keep cached states to be restored */
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index afa72717a979..192e7b681b96 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -394,6 +394,18 @@ void __weak pcibios_free_irq(struct pci_dev *dev)
{
}
+#ifdef CONFIG_PCI_IOV
+static inline bool pci_device_can_probe(struct pci_dev *pdev)
+{
+ return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe);
+}
+#else
+static inline bool pci_device_can_probe(struct pci_dev *pdev)
+{
+ return true;
+}
+#endif
+
static int pci_device_probe(struct device *dev)
{
int error;
@@ -405,10 +417,12 @@ static int pci_device_probe(struct device *dev)
return error;
pci_dev_get(pci_dev);
- error = __pci_device_probe(drv, pci_dev);
- if (error) {
- pcibios_free_irq(pci_dev);
- pci_dev_put(pci_dev);
+ if (pci_device_can_probe(pci_dev)) {
+ error = __pci_device_probe(drv, pci_dev);
+ if (error) {
+ pcibios_free_irq(pci_dev);
+ pci_dev_put(pci_dev);
+ }
}
return error;
@@ -461,8 +475,6 @@ static void pci_device_shutdown(struct device *dev)
if (drv && drv->shutdown)
drv->shutdown(pci_dev);
- pci_msi_shutdown(pci_dev);
- pci_msix_shutdown(pci_dev);
/*
* If this is a kexec reboot, turn off Bus Master bit on the
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 25d010d449a3..31e99613a12e 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -526,10 +526,37 @@ exit:
return count;
}
+static ssize_t sriov_drivers_autoprobe_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return sprintf(buf, "%u\n", pdev->sriov->drivers_autoprobe);
+}
+
+static ssize_t sriov_drivers_autoprobe_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ bool drivers_autoprobe;
+
+ if (kstrtobool(buf, &drivers_autoprobe) < 0)
+ return -EINVAL;
+
+ pdev->sriov->drivers_autoprobe = drivers_autoprobe;
+
+ return count;
+}
+
static struct device_attribute sriov_totalvfs_attr = __ATTR_RO(sriov_totalvfs);
static struct device_attribute sriov_numvfs_attr =
__ATTR(sriov_numvfs, (S_IRUGO|S_IWUSR|S_IWGRP),
sriov_numvfs_show, sriov_numvfs_store);
+static struct device_attribute sriov_drivers_autoprobe_attr =
+ __ATTR(sriov_drivers_autoprobe, (S_IRUGO|S_IWUSR|S_IWGRP),
+ sriov_drivers_autoprobe_show, sriov_drivers_autoprobe_store);
#endif /* CONFIG_PCI_IOV */
static ssize_t driver_override_store(struct device *dev,
@@ -980,20 +1007,24 @@ void pci_remove_legacy_files(struct pci_bus *b)
}
#endif /* HAVE_PCI_LEGACY */
-#ifdef HAVE_PCI_MMAP
+#if defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)
int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
enum pci_mmap_api mmap_api)
{
- unsigned long nr, start, size, pci_start;
+ unsigned long nr, start, size;
+ resource_size_t pci_start = 0, pci_end;
if (pci_resource_len(pdev, resno) == 0)
return 0;
nr = vma_pages(vma);
start = vma->vm_pgoff;
size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
- pci_start = (mmap_api == PCI_MMAP_PROCFS) ?
- pci_resource_start(pdev, resno) >> PAGE_SHIFT : 0;
+ if (mmap_api == PCI_MMAP_PROCFS) {
+ pci_resource_to_user(pdev, resno, &pdev->resource[resno],
+ &pci_start, &pci_end);
+ pci_start >>= PAGE_SHIFT;
+ }
if (start >= pci_start && start < pci_start + size &&
start + nr <= pci_start + size)
return 1;
@@ -1013,37 +1044,24 @@ static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
struct vm_area_struct *vma, int write_combine)
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
- struct resource *res = attr->private;
+ int bar = (unsigned long)attr->private;
enum pci_mmap_state mmap_type;
- resource_size_t start, end;
- int i;
-
- for (i = 0; i < PCI_ROM_RESOURCE; i++)
- if (res == &pdev->resource[i])
- break;
- if (i >= PCI_ROM_RESOURCE)
- return -ENODEV;
+ struct resource *res = &pdev->resource[bar];
if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start))
return -EINVAL;
- if (!pci_mmap_fits(pdev, i, vma, PCI_MMAP_SYSFS)) {
+ if (!pci_mmap_fits(pdev, bar, vma, PCI_MMAP_SYSFS)) {
WARN(1, "process \"%s\" tried to map 0x%08lx bytes at page 0x%08lx on %s BAR %d (start 0x%16Lx, size 0x%16Lx)\n",
current->comm, vma->vm_end-vma->vm_start, vma->vm_pgoff,
- pci_name(pdev), i,
- (u64)pci_resource_start(pdev, i),
- (u64)pci_resource_len(pdev, i));
+ pci_name(pdev), bar,
+ (u64)pci_resource_start(pdev, bar),
+ (u64)pci_resource_len(pdev, bar));
return -EINVAL;
}
-
- /* pci_mmap_page_range() expects the same kind of entry as coming
- * from /proc/bus/pci/ which is a "user visible" value. If this is
- * different from the resource itself, arch will do necessary fixup.
- */
- pci_resource_to_user(pdev, i, res, &start, &end);
- vma->vm_pgoff += start >> PAGE_SHIFT;
mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io;
- return pci_mmap_page_range(pdev, vma, mmap_type, write_combine);
+
+ return pci_mmap_resource_range(pdev, bar, vma, mmap_type, write_combine);
}
static int pci_mmap_resource_uc(struct file *filp, struct kobject *kobj,
@@ -1065,22 +1083,18 @@ static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj,
loff_t off, size_t count, bool write)
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
- struct resource *res = attr->private;
+ int bar = (unsigned long)attr->private;
+ struct resource *res;
unsigned long port = off;
- int i;
- for (i = 0; i < PCI_ROM_RESOURCE; i++)
- if (res == &pdev->resource[i])
- break;
- if (i >= PCI_ROM_RESOURCE)
- return -ENODEV;
+ res = &pdev->resource[bar];
- port += pci_resource_start(pdev, i);
+ port += pci_resource_start(pdev, bar);
- if (port > pci_resource_end(pdev, i))
+ if (port > pci_resource_end(pdev, bar))
return 0;
- if (port + count - 1 > pci_resource_end(pdev, i))
+ if (port + count - 1 > pci_resource_end(pdev, bar))
return -EINVAL;
switch (count) {
@@ -1170,16 +1184,19 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
} else {
pdev->res_attr[num] = res_attr;
sprintf(res_attr_name, "resource%d", num);
- res_attr->mmap = pci_mmap_resource_uc;
- }
- if (pci_resource_flags(pdev, num) & IORESOURCE_IO) {
- res_attr->read = pci_read_resource_io;
- res_attr->write = pci_write_resource_io;
+ if (pci_resource_flags(pdev, num) & IORESOURCE_IO) {
+ res_attr->read = pci_read_resource_io;
+ res_attr->write = pci_write_resource_io;
+ if (arch_can_pci_mmap_io())
+ res_attr->mmap = pci_mmap_resource_uc;
+ } else {
+ res_attr->mmap = pci_mmap_resource_uc;
+ }
}
res_attr->attr.name = res_attr_name;
res_attr->attr.mode = S_IRUSR | S_IWUSR;
res_attr->size = pci_resource_len(pdev, num);
- res_attr->private = &pdev->resource[num];
+ res_attr->private = (void *)(unsigned long)num;
retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr);
if (retval)
kfree(res_attr);
@@ -1207,9 +1224,9 @@ static int pci_create_resource_files(struct pci_dev *pdev)
retval = pci_create_attr(pdev, i, 0);
/* for prefetchable resources, create a WC mappable file */
- if (!retval && pdev->resource[i].flags & IORESOURCE_PREFETCH)
+ if (!retval && arch_can_pci_mmap_wc() &&
+ pdev->resource[i].flags & IORESOURCE_PREFETCH)
retval = pci_create_attr(pdev, i, 1);
-
if (retval) {
pci_remove_resource_files(pdev);
return retval;
@@ -1549,6 +1566,7 @@ static struct attribute_group pci_dev_hp_attr_group = {
static struct attribute *sriov_dev_attrs[] = {
&sriov_totalvfs_attr.attr,
&sriov_numvfs_attr.attr,
+ &sriov_drivers_autoprobe_attr.attr,
NULL,
};
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 7904d02ffdb9..b01bd5bba8e6 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -66,7 +66,8 @@ static void pci_dev_d3_sleep(struct pci_dev *dev)
if (delay < pci_pm_d3_delay)
delay = pci_pm_d3_delay;
- msleep(delay);
+ if (delay)
+ msleep(delay);
}
#ifdef CONFIG_PCI_DOMAINS
@@ -827,7 +828,8 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
* because have already delayed for the bridge.
*/
if (dev->runtime_d3cold) {
- msleep(dev->d3cold_delay);
+ if (dev->d3cold_delay)
+ msleep(dev->d3cold_delay);
/*
* When powering on a bridge from D3cold, the
* whole hierarchy may be powered on into
@@ -1782,8 +1784,8 @@ static void pci_pme_list_scan(struct work_struct *work)
}
}
if (!list_empty(&pci_pme_list))
- schedule_delayed_work(&pci_pme_work,
- msecs_to_jiffies(PME_TIMEOUT));
+ queue_delayed_work(system_freezable_wq, &pci_pme_work,
+ msecs_to_jiffies(PME_TIMEOUT));
mutex_unlock(&pci_pme_list_mutex);
}
@@ -1848,8 +1850,9 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
mutex_lock(&pci_pme_list_mutex);
list_add(&pme_dev->list, &pci_pme_list);
if (list_is_singular(&pci_pme_list))
- schedule_delayed_work(&pci_pme_work,
- msecs_to_jiffies(PME_TIMEOUT));
+ queue_delayed_work(system_freezable_wq,
+ &pci_pme_work,
+ msecs_to_jiffies(PME_TIMEOUT));
mutex_unlock(&pci_pme_list_mutex);
} else {
mutex_lock(&pci_pme_list_mutex);
@@ -3363,7 +3366,7 @@ unsigned long __weak pci_address_to_pio(phys_addr_t address)
* Only architectures that have memory mapped IO functions defined
* (and the PCI_IOBASE value defined) should call this function.
*/
-int __weak pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
+int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
{
#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
@@ -3383,6 +3386,7 @@ int __weak pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
return -ENODEV;
#endif
}
+EXPORT_SYMBOL(pci_remap_iospace);
/**
* pci_unmap_iospace - Unmap the memory mapped I/O space
@@ -3400,6 +3404,89 @@ void pci_unmap_iospace(struct resource *res)
unmap_kernel_range(vaddr, resource_size(res));
#endif
}
+EXPORT_SYMBOL(pci_unmap_iospace);
+
+/**
+ * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
+ * @dev: Generic device to remap IO address for
+ * @offset: Resource address to map
+ * @size: Size of map
+ *
+ * Managed pci_remap_cfgspace(). Map is automatically unmapped on driver
+ * detach.
+ */
+void __iomem *devm_pci_remap_cfgspace(struct device *dev,
+ resource_size_t offset,
+ resource_size_t size)
+{
+ void __iomem **ptr, *addr;
+
+ ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ addr = pci_remap_cfgspace(offset, size);
+ if (addr) {
+ *ptr = addr;
+ devres_add(dev, ptr);
+ } else
+ devres_free(ptr);
+
+ return addr;
+}
+EXPORT_SYMBOL(devm_pci_remap_cfgspace);
+
+/**
+ * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
+ * @dev: generic device to handle the resource for
+ * @res: configuration space resource to be handled
+ *
+ * Checks that a resource is a valid memory region, requests the memory
+ * region and ioremaps with pci_remap_cfgspace() API that ensures the
+ * proper PCI configuration space memory attributes are guaranteed.
+ *
+ * All operations are managed and will be undone on driver detach.
+ *
+ * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
+ * on failure. Usage example:
+ *
+ * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ * base = devm_pci_remap_cfg_resource(&pdev->dev, res);
+ * if (IS_ERR(base))
+ * return PTR_ERR(base);
+ */
+void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
+ struct resource *res)
+{
+ resource_size_t size;
+ const char *name;
+ void __iomem *dest_ptr;
+
+ BUG_ON(!dev);
+
+ if (!res || resource_type(res) != IORESOURCE_MEM) {
+ dev_err(dev, "invalid resource\n");
+ return IOMEM_ERR_PTR(-EINVAL);
+ }
+
+ size = resource_size(res);
+ name = res->name ?: dev_name(dev);
+
+ if (!devm_request_mem_region(dev, res->start, size, name)) {
+ dev_err(dev, "can't request region for resource %pR\n", res);
+ return IOMEM_ERR_PTR(-EBUSY);
+ }
+
+ dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
+ if (!dest_ptr) {
+ dev_err(dev, "ioremap failed for resource %pR\n", res);
+ devm_release_mem_region(dev, res->start, size);
+ dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
+ }
+
+ return dest_ptr;
+}
+EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
static void __pci_set_master(struct pci_dev *dev, bool enable)
{
@@ -3773,24 +3860,41 @@ static void pci_flr_wait(struct pci_dev *dev)
(i - 1) * 100);
}
-static int pcie_flr(struct pci_dev *dev, int probe)
+/**
+ * pcie_has_flr - check if a device supports function level resets
+ * @dev: device to check
+ *
+ * Returns true if the device advertises support for PCIe function level
+ * resets.
+ */
+static bool pcie_has_flr(struct pci_dev *dev)
{
u32 cap;
- pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
- if (!(cap & PCI_EXP_DEVCAP_FLR))
- return -ENOTTY;
+ if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
+ return false;
- if (probe)
- return 0;
+ pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
+ return cap & PCI_EXP_DEVCAP_FLR;
+}
+/**
+ * pcie_flr - initiate a PCIe function level reset
+ * @dev: device to reset
+ *
+ * Initiate a function level reset on @dev. The caller should ensure the
+ * device supports FLR before calling this function, e.g. by using the
+ * pcie_has_flr() helper.
+ */
+void pcie_flr(struct pci_dev *dev)
+{
if (!pci_wait_for_pending_transaction(dev))
dev_err(&dev->dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
pci_flr_wait(dev);
- return 0;
}
+EXPORT_SYMBOL_GPL(pcie_flr);
static int pci_af_flr(struct pci_dev *dev, int probe)
{
@@ -3801,6 +3905,9 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
if (!pos)
return -ENOTTY;
+ if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
+ return -ENOTTY;
+
pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
return -ENOTTY;
@@ -3971,9 +4078,12 @@ static int __pci_dev_reset(struct pci_dev *dev, int probe)
if (rc != -ENOTTY)
goto done;
- rc = pcie_flr(dev, probe);
- if (rc != -ENOTTY)
+ if (pcie_has_flr(dev)) {
+ if (!probe)
+ pcie_flr(dev);
+ rc = 0;
goto done;
+ }
rc = pci_af_flr(dev, probe);
if (rc != -ENOTTY)
@@ -4932,6 +5042,8 @@ bool pci_device_is_present(struct pci_dev *pdev)
{
u32 v;
+ if (pci_dev_is_disconnected(pdev))
+ return false;
return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
}
EXPORT_SYMBOL_GPL(pci_device_is_present);
@@ -4947,6 +5059,11 @@ void pci_ignore_hotplug(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
+resource_size_t __weak pcibios_default_alignment(void)
+{
+ return 0;
+}
+
#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
static DEFINE_SPINLOCK(resource_alignment_lock);
@@ -4954,22 +5071,25 @@ static DEFINE_SPINLOCK(resource_alignment_lock);
/**
* pci_specified_resource_alignment - get resource alignment specified by user.
* @dev: the PCI device to get
+ * @resize: whether or not to change resources' size when reassigning alignment
*
* RETURNS: Resource alignment if it is specified.
* Zero if it is not specified.
*/
-static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
+static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
+ bool *resize)
{
int seg, bus, slot, func, align_order, count;
unsigned short vendor, device, subsystem_vendor, subsystem_device;
- resource_size_t align = 0;
+ resource_size_t align = pcibios_default_alignment();
char *p;
spin_lock(&resource_alignment_lock);
p = resource_alignment_param;
- if (!*p)
+ if (!*p && !align)
goto out;
if (pci_has_flag(PCI_PROBE_ONLY)) {
+ align = 0;
pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
goto out;
}
@@ -4999,6 +5119,7 @@ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
(!device || (device == dev->device)) &&
(!subsystem_vendor || (subsystem_vendor == dev->subsystem_vendor)) &&
(!subsystem_device || (subsystem_device == dev->subsystem_device))) {
+ *resize = true;
if (align_order == -1)
align = PAGE_SIZE;
else
@@ -5024,6 +5145,7 @@ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
bus == dev->bus->number &&
slot == PCI_SLOT(dev->devfn) &&
func == PCI_FUNC(dev->devfn)) {
+ *resize = true;
if (align_order == -1)
align = PAGE_SIZE;
else
@@ -5043,6 +5165,68 @@ out:
return align;
}
+static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
+ resource_size_t align, bool resize)
+{
+ struct resource *r = &dev->resource[bar];
+ resource_size_t size;
+
+ if (!(r->flags & IORESOURCE_MEM))
+ return;
+
+ if (r->flags & IORESOURCE_PCI_FIXED) {
+ dev_info(&dev->dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
+ bar, r, (unsigned long long)align);
+ return;
+ }
+
+ size = resource_size(r);
+ if (size >= align)
+ return;
+
+ /*
+ * Increase the alignment of the resource. There are two ways we
+ * can do this:
+ *
+ * 1) Increase the size of the resource. BARs are aligned on their
+ * size, so when we reallocate space for this resource, we'll
+ * allocate it with the larger alignment. This also prevents
+ * assignment of any other BARs inside the alignment region, so
+ * if we're requesting page alignment, this means no other BARs
+ * will share the page.
+ *
+ * The disadvantage is that this makes the resource larger than
+ * the hardware BAR, which may break drivers that compute things
+ * based on the resource size, e.g., to find registers at a
+ * fixed offset before the end of the BAR.
+ *
+ * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and
+ * set r->start to the desired alignment. By itself this
+ * doesn't prevent other BARs being put inside the alignment
+ * region, but if we realign *every* resource of every device in
+ * the system, none of them will share an alignment region.
+ *
+ * When the user has requested alignment for only some devices via
+ * the "pci=resource_alignment" argument, "resize" is true and we
+ * use the first method. Otherwise we assume we're aligning all
+ * devices and we use the second.
+ */
+
+ dev_info(&dev->dev, "BAR%d %pR: requesting alignment to %#llx\n",
+ bar, r, (unsigned long long)align);
+
+ if (resize) {
+ r->start = 0;
+ r->end = align - 1;
+ } else {
+ r->flags &= ~IORESOURCE_SIZEALIGN;
+ r->flags |= IORESOURCE_STARTALIGN;
+ r->start = align;
+ r->end = r->start + size - 1;
+ }
+ r->flags |= IORESOURCE_UNSET;
+}
+
/*
* This function disables memory decoding and releases memory resources
* of the device specified by kernel's boot parameter 'pci=resource_alignment='.
@@ -5054,8 +5238,9 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev)
{
int i;
struct resource *r;
- resource_size_t align, size;
+ resource_size_t align;
u16 command;
+ bool resize = false;
/*
* VF BARs are read-only zero according to SR-IOV spec r1.1, sec
@@ -5067,7 +5252,7 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev)
return;
/* check if specified PCI is target device to reassign */
- align = pci_specified_resource_alignment(dev);
+ align = pci_specified_resource_alignment(dev, &resize);
if (!align)
return;
@@ -5084,28 +5269,11 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev)
command &= ~PCI_COMMAND_MEMORY;
pci_write_config_word(dev, PCI_COMMAND, command);
- for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
- r = &dev->resource[i];
- if (!(r->flags & IORESOURCE_MEM))
- continue;
- if (r->flags & IORESOURCE_PCI_FIXED) {
- dev_info(&dev->dev, "Ignoring requested alignment for BAR%d: %pR\n",
- i, r);
- continue;
- }
+ for (i = 0; i <= PCI_ROM_RESOURCE; i++)
+ pci_request_resource_alignment(dev, i, align, resize);
- size = resource_size(r);
- if (size < align) {
- size = align;
- dev_info(&dev->dev,
- "Rounding up size of resource #%d to %#llx.\n",
- i, (unsigned long long)size);
- }
- r->flags |= IORESOURCE_UNSET;
- r->end = size - 1;
- r->start = 0;
- }
- /* Need to disable bridge's resource window,
+ /*
+ * Need to disable bridge's resource window,
* to enable the kernel to reassign new resource
* window later on.
*/
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 4dbf9f96ae5b..f8113e5b9812 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -23,14 +23,14 @@ void pci_create_firmware_label_files(struct pci_dev *pdev);
void pci_remove_firmware_label_files(struct pci_dev *pdev);
#endif
void pci_cleanup_rom(struct pci_dev *dev);
-#ifdef HAVE_PCI_MMAP
+
enum pci_mmap_api {
PCI_MMAP_SYSFS, /* mmap on /sys/bus/pci/devices/<BDF>/resource<N> */
PCI_MMAP_PROCFS /* mmap on /proc/bus/pci/<BDF> */
};
int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vmai,
enum pci_mmap_api mmap_api);
-#endif
+
int pci_probe_reset_function(struct pci_dev *dev);
/**
@@ -274,8 +274,23 @@ struct pci_sriov {
struct pci_dev *self; /* this PF */
struct mutex lock; /* lock for setting sriov_numvfs in sysfs */
resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */
+ bool drivers_autoprobe; /* auto probing of VFs by driver */
};
+/* pci_dev priv_flags */
+#define PCI_DEV_DISCONNECTED 0
+
+static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused)
+{
+ set_bit(PCI_DEV_DISCONNECTED, &dev->priv_flags);
+ return 0;
+}
+
+static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
+{
+ return test_bit(PCI_DEV_DISCONNECTED, &dev->priv_flags);
+}
+
#ifdef CONFIG_PCI_ATS
void pci_restore_ats_state(struct pci_dev *dev);
#else
diff --git a/drivers/pci/pcie/pcie-dpc.c b/drivers/pci/pcie/pcie-dpc.c
index d4d70ef4a2d7..77d2ca99d2ec 100644
--- a/drivers/pci/pcie/pcie-dpc.c
+++ b/drivers/pci/pcie/pcie-dpc.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pcieport_if.h>
+#include "../pci.h"
struct dpc_dev {
struct pcie_device *dev;
@@ -66,6 +67,10 @@ static void interrupt_event_handler(struct work_struct *work)
list_for_each_entry_safe_reverse(dev, temp, &parent->devices,
bus_list) {
pci_dev_get(dev);
+ pci_dev_set_disconnected(dev, NULL);
+ if (pci_has_subordinate(dev))
+ pci_walk_bus(dev->subordinate,
+ pci_dev_set_disconnected, NULL);
pci_stop_and_remove_bus_device(dev);
pci_dev_put(dev);
}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 90592d424e9b..01eb8038fceb 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -175,7 +175,7 @@ static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
struct resource *res, unsigned int pos)
{
- u32 l, sz, mask;
+ u32 l = 0, sz = 0, mask;
u64 l64, sz64, mask64;
u16 orig_cmd;
struct pci_bus_region region, inverted_region;
@@ -231,7 +231,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
res->flags |= IORESOURCE_ROM_ENABLE;
l64 = l & PCI_ROM_ADDRESS_MASK;
sz64 = sz & PCI_ROM_ADDRESS_MASK;
- mask64 = (u32)PCI_ROM_ADDRESS_MASK;
+ mask64 = PCI_ROM_ADDRESS_MASK;
}
if (res->flags & IORESOURCE_MEM_64) {
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index f82710a8694d..098360d7ff81 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -202,6 +202,8 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
#ifdef HAVE_PCI_MMAP
case PCIIOC_MMAP_IS_IO:
+ if (!arch_can_pci_mmap_io())
+ return -EINVAL;
fpriv->mmap_state = pci_mmap_io;
break;
@@ -210,14 +212,15 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
break;
case PCIIOC_WRITE_COMBINE:
- if (arg)
- fpriv->write_combine = 1;
- else
- fpriv->write_combine = 0;
- break;
-
+ if (arch_can_pci_mmap_wc()) {
+ if (arg)
+ fpriv->write_combine = 1;
+ else
+ fpriv->write_combine = 0;
+ break;
+ }
+ /* If arch decided it can't, fall through... */
#endif /* HAVE_PCI_MMAP */
-
default:
ret = -EINVAL;
break;
@@ -231,25 +234,35 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
{
struct pci_dev *dev = PDE_DATA(file_inode(file));
struct pci_filp_private *fpriv = file->private_data;
- int i, ret, write_combine;
+ int i, ret, write_combine = 0, res_bit = IORESOURCE_MEM;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
+ if (fpriv->mmap_state == pci_mmap_io) {
+ if (!arch_can_pci_mmap_io())
+ return -EINVAL;
+ res_bit = IORESOURCE_IO;
+ }
+
/* Make sure the caller is mapping a real resource for this device */
for (i = 0; i < PCI_ROM_RESOURCE; i++) {
- if (pci_mmap_fits(dev, i, vma, PCI_MMAP_PROCFS))
+ if (dev->resource[i].flags & res_bit &&
+ pci_mmap_fits(dev, i, vma, PCI_MMAP_PROCFS))
break;
}
if (i >= PCI_ROM_RESOURCE)
return -ENODEV;
- if (fpriv->mmap_state == pci_mmap_mem)
- write_combine = fpriv->write_combine;
- else
- write_combine = 0;
- ret = pci_mmap_page_range(dev, vma,
+ if (fpriv->mmap_state == pci_mmap_mem &&
+ fpriv->write_combine) {
+ if (dev->resource[i].flags & IORESOURCE_PREFETCH)
+ write_combine = 1;
+ else
+ return -EINVAL;
+ }
+ ret = pci_mmap_page_range(dev, i, vma,
fpriv->mmap_state, write_combine);
if (ret < 0)
return ret;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 673683660b5c..085fb787aa9e 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1685,6 +1685,29 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm);
#ifdef CONFIG_X86_IO_APIC
+static int dmi_disable_ioapicreroute(const struct dmi_system_id *d)
+{
+ noioapicreroute = 1;
+ pr_info("%s detected: disable boot interrupt reroute\n", d->ident);
+
+ return 0;
+}
+
+static struct dmi_system_id boot_interrupt_dmi_table[] = {
+ /*
+ * Systems to exclude from boot interrupt reroute quirks
+ */
+ {
+ .callback = dmi_disable_ioapicreroute,
+ .ident = "ASUSTek Computer INC. M2N-LR",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "M2N-LR"),
+ },
+ },
+ {}
+};
+
/*
* Boot interrupts on some chipsets cannot be turned off. For these chipsets,
* remap the original interrupt in the linux kernel to the boot interrupt, so
@@ -1693,6 +1716,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm);
*/
static void quirk_reroute_to_boot_interrupts_intel(struct pci_dev *dev)
{
+ dmi_check_system(boot_interrupt_dmi_table);
if (noioapicquirk || noioapicreroute)
return;
@@ -3642,19 +3666,11 @@ static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
*
* The 82599 supports FLR on VFs, but FLR support is reported only
* in the PF DEVCAP (sec 9.3.10.4), not in the VF DEVCAP (sec 9.5).
- * Therefore, we can't use pcie_flr(), which checks the VF DEVCAP.
+ * Thus we must call pcie_flr() directly without first checking if it is
+ * supported.
*/
-
- if (probe)
- return 0;
-
- if (!pci_wait_for_pending_transaction(dev))
- dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
-
- pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
-
- msleep(100);
-
+ if (!probe)
+ pcie_flr(dev);
return 0;
}
@@ -3759,20 +3775,7 @@ static int reset_chelsio_generic_dev(struct pci_dev *dev, int probe)
PCI_MSIX_FLAGS_ENABLE |
PCI_MSIX_FLAGS_MASKALL);
- /*
- * Start of pcie_flr() code sequence. This reset code is a copy of
- * the guts of pcie_flr() because that's not an exported function.
- */
-
- if (!pci_wait_for_pending_transaction(dev))
- dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
-
- pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
- msleep(100);
-
- /*
- * End of pcie_flr() code sequence.
- */
+ pcie_flr(dev);
/*
* Restore the configuration information (BAR values, etc.) including
@@ -3939,6 +3942,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ASMEDIA, 0x1080,
DECLARE_PCI_FIXUP_HEADER(0x10e3, 0x8113, quirk_use_pcie_bridge_dma_alias);
/* ITE 8892, https://bugzilla.kernel.org/show_bug.cgi?id=73551 */
DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias);
+/* ITE 8893 has the same problem as the 8892 */
+DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8893, quirk_use_pcie_bridge_dma_alias);
/* Intel 82801, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c49 */
DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias);
@@ -3958,6 +3963,20 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2260, quirk_mic_x200_dma_alias);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2264, quirk_mic_x200_dma_alias);
/*
+ * The IOMMU and interrupt controller on Broadcom Vulcan/Cavium ThunderX2 are
+ * associated not at the root bus, but at a bridge below. This quirk avoids
+ * generating invalid DMA aliases.
+ */
+static void quirk_bridge_cavm_thrx2_pcie_root(struct pci_dev *pdev)
+{
+ pdev->dev_flags |= PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9000,
+ quirk_bridge_cavm_thrx2_pcie_root);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9084,
+ quirk_bridge_cavm_thrx2_pcie_root);
+
+/*
* Intersil/Techwell TW686[4589]-based video capture cards have an empty (zero)
* class code. Fix it.
*/
@@ -4095,6 +4114,9 @@ static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
+ if (!((dev->device >= 0xa000) && (dev->device <= 0xa0ff)))
+ return -ENOTTY;
+
return acs_flags ? 0 : 1;
}
@@ -4634,3 +4656,11 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2030, quirk_no_aersid);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2031, quirk_no_aersid);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2032, quirk_no_aersid);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid);
+
+/* FLR may cause some 82579 devices to hang. */
+static void quirk_intel_no_flr(struct pci_dev *dev)
+{
+ dev->dev_flags |= PCI_DEV_FLAGS_NO_FLR_RESET;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_intel_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_intel_no_flr);
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 33e0f033a48e..4c6044ad7368 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -60,6 +60,10 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
tmp = bus->self;
+ /* stop at bridge where translation unit is associated */
+ if (tmp->dev_flags & PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT)
+ return ret;
+
/*
* PCIe-to-PCI/X bridges alias transactions from downstream
* devices using the subordinate bus number (PCI Express to
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index cb389277df41..958da7db9033 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1066,10 +1066,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
r->flags = 0;
continue;
}
- size += r_size;
+ size += max(r_size, align);
/* Exclude ranges with size > align from
calculation of the alignment. */
- if (r_size == align)
+ if (r_size <= align)
aligns[order] += align;
if (order > max_order)
max_order = order;
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 4bc589ee78d0..85774b7a316a 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -63,7 +63,7 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno)
mask = (u32)PCI_BASE_ADDRESS_IO_MASK;
new |= res->flags & ~PCI_BASE_ADDRESS_IO_MASK;
} else if (resno == PCI_ROM_RESOURCE) {
- mask = (u32)PCI_ROM_ADDRESS_MASK;
+ mask = PCI_ROM_ADDRESS_MASK;
} else {
mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK;
diff --git a/drivers/pci/switch/Kconfig b/drivers/pci/switch/Kconfig
new file mode 100644
index 000000000000..4c49648e0646
--- /dev/null
+++ b/drivers/pci/switch/Kconfig
@@ -0,0 +1,13 @@
+menu "PCI switch controller drivers"
+ depends on PCI
+
+config PCI_SW_SWITCHTEC
+ tristate "MicroSemi Switchtec PCIe Switch Management Driver"
+ help
+ Enables support for the management interface for the MicroSemi
+ Switchtec series of PCIe switches. Supports userspace access
+ to submit MRPC commands to the switch via /dev/switchtecX
+ devices. See <file:Documentation/switchtec.txt> for more
+ information.
+
+endmenu
diff --git a/drivers/pci/switch/Makefile b/drivers/pci/switch/Makefile
new file mode 100644
index 000000000000..37d8cfb03f3f
--- /dev/null
+++ b/drivers/pci/switch/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_PCI_SW_SWITCHTEC) += switchtec.o
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
new file mode 100644
index 000000000000..cc6e085008fb
--- /dev/null
+++ b/drivers/pci/switch/switchtec.c
@@ -0,0 +1,1600 @@
+/*
+ * Microsemi Switchtec(tm) PCIe Management Driver
+ * Copyright (c) 2017, Microsemi Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/switchtec_ioctl.h>
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/poll.h>
+#include <linux/pci.h>
+#include <linux/cdev.h>
+#include <linux/wait.h>
+
+MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
+MODULE_VERSION("0.1");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Microsemi Corporation");
+
+static int max_devices = 16;
+module_param(max_devices, int, 0644);
+MODULE_PARM_DESC(max_devices, "max number of switchtec device instances");
+
+static dev_t switchtec_devt;
+static struct class *switchtec_class;
+static DEFINE_IDA(switchtec_minor_ida);
+
+#define MICROSEMI_VENDOR_ID 0x11f8
+#define MICROSEMI_NTB_CLASSCODE 0x068000
+#define MICROSEMI_MGMT_CLASSCODE 0x058000
+
+#define SWITCHTEC_MRPC_PAYLOAD_SIZE 1024
+#define SWITCHTEC_MAX_PFF_CSR 48
+
+#define SWITCHTEC_EVENT_OCCURRED BIT(0)
+#define SWITCHTEC_EVENT_CLEAR BIT(0)
+#define SWITCHTEC_EVENT_EN_LOG BIT(1)
+#define SWITCHTEC_EVENT_EN_CLI BIT(2)
+#define SWITCHTEC_EVENT_EN_IRQ BIT(3)
+#define SWITCHTEC_EVENT_FATAL BIT(4)
+
+enum {
+ SWITCHTEC_GAS_MRPC_OFFSET = 0x0000,
+ SWITCHTEC_GAS_TOP_CFG_OFFSET = 0x1000,
+ SWITCHTEC_GAS_SW_EVENT_OFFSET = 0x1800,
+ SWITCHTEC_GAS_SYS_INFO_OFFSET = 0x2000,
+ SWITCHTEC_GAS_FLASH_INFO_OFFSET = 0x2200,
+ SWITCHTEC_GAS_PART_CFG_OFFSET = 0x4000,
+ SWITCHTEC_GAS_NTB_OFFSET = 0x10000,
+ SWITCHTEC_GAS_PFF_CSR_OFFSET = 0x134000,
+};
+
+struct mrpc_regs {
+ u8 input_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
+ u8 output_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
+ u32 cmd;
+ u32 status;
+ u32 ret_value;
+} __packed;
+
+enum mrpc_status {
+ SWITCHTEC_MRPC_STATUS_INPROGRESS = 1,
+ SWITCHTEC_MRPC_STATUS_DONE = 2,
+ SWITCHTEC_MRPC_STATUS_ERROR = 0xFF,
+ SWITCHTEC_MRPC_STATUS_INTERRUPTED = 0x100,
+};
+
+struct sw_event_regs {
+ u64 event_report_ctrl;
+ u64 reserved1;
+ u64 part_event_bitmap;
+ u64 reserved2;
+ u32 global_summary;
+ u32 reserved3[3];
+ u32 stack_error_event_hdr;
+ u32 stack_error_event_data;
+ u32 reserved4[4];
+ u32 ppu_error_event_hdr;
+ u32 ppu_error_event_data;
+ u32 reserved5[4];
+ u32 isp_error_event_hdr;
+ u32 isp_error_event_data;
+ u32 reserved6[4];
+ u32 sys_reset_event_hdr;
+ u32 reserved7[5];
+ u32 fw_exception_hdr;
+ u32 reserved8[5];
+ u32 fw_nmi_hdr;
+ u32 reserved9[5];
+ u32 fw_non_fatal_hdr;
+ u32 reserved10[5];
+ u32 fw_fatal_hdr;
+ u32 reserved11[5];
+ u32 twi_mrpc_comp_hdr;
+ u32 twi_mrpc_comp_data;
+ u32 reserved12[4];
+ u32 twi_mrpc_comp_async_hdr;
+ u32 twi_mrpc_comp_async_data;
+ u32 reserved13[4];
+ u32 cli_mrpc_comp_hdr;
+ u32 cli_mrpc_comp_data;
+ u32 reserved14[4];
+ u32 cli_mrpc_comp_async_hdr;
+ u32 cli_mrpc_comp_async_data;
+ u32 reserved15[4];
+ u32 gpio_interrupt_hdr;
+ u32 gpio_interrupt_data;
+ u32 reserved16[4];
+} __packed;
+
+struct sys_info_regs {
+ u32 device_id;
+ u32 device_version;
+ u32 firmware_version;
+ u32 reserved1;
+ u32 vendor_table_revision;
+ u32 table_format_version;
+ u32 partition_id;
+ u32 cfg_file_fmt_version;
+ u32 reserved2[58];
+ char vendor_id[8];
+ char product_id[16];
+ char product_revision[4];
+ char component_vendor[8];
+ u16 component_id;
+ u8 component_revision;
+} __packed;
+
+struct flash_info_regs {
+ u32 flash_part_map_upd_idx;
+
+ struct active_partition_info {
+ u32 address;
+ u32 build_version;
+ u32 build_string;
+ } active_img;
+
+ struct active_partition_info active_cfg;
+ struct active_partition_info inactive_img;
+ struct active_partition_info inactive_cfg;
+
+ u32 flash_length;
+
+ struct partition_info {
+ u32 address;
+ u32 length;
+ } cfg0;
+
+ struct partition_info cfg1;
+ struct partition_info img0;
+ struct partition_info img1;
+ struct partition_info nvlog;
+ struct partition_info vendor[8];
+};
+
+struct ntb_info_regs {
+ u8 partition_count;
+ u8 partition_id;
+ u16 reserved1;
+ u64 ep_map;
+ u16 requester_id;
+} __packed;
+
+struct part_cfg_regs {
+ u32 status;
+ u32 state;
+ u32 port_cnt;
+ u32 usp_port_mode;
+ u32 usp_pff_inst_id;
+ u32 vep_pff_inst_id;
+ u32 dsp_pff_inst_id[47];
+ u32 reserved1[11];
+ u16 vep_vector_number;
+ u16 usp_vector_number;
+ u32 port_event_bitmap;
+ u32 reserved2[3];
+ u32 part_event_summary;
+ u32 reserved3[3];
+ u32 part_reset_hdr;
+ u32 part_reset_data[5];
+ u32 mrpc_comp_hdr;
+ u32 mrpc_comp_data[5];
+ u32 mrpc_comp_async_hdr;
+ u32 mrpc_comp_async_data[5];
+ u32 dyn_binding_hdr;
+ u32 dyn_binding_data[5];
+ u32 reserved4[159];
+} __packed;
+
+enum {
+ SWITCHTEC_PART_CFG_EVENT_RESET = 1 << 0,
+ SWITCHTEC_PART_CFG_EVENT_MRPC_CMP = 1 << 1,
+ SWITCHTEC_PART_CFG_EVENT_MRPC_ASYNC_CMP = 1 << 2,
+ SWITCHTEC_PART_CFG_EVENT_DYN_PART_CMP = 1 << 3,
+};
+
+struct pff_csr_regs {
+ u16 vendor_id;
+ u16 device_id;
+ u32 pci_cfg_header[15];
+ u32 pci_cap_region[48];
+ u32 pcie_cap_region[448];
+ u32 indirect_gas_window[128];
+ u32 indirect_gas_window_off;
+ u32 reserved[127];
+ u32 pff_event_summary;
+ u32 reserved2[3];
+ u32 aer_in_p2p_hdr;
+ u32 aer_in_p2p_data[5];
+ u32 aer_in_vep_hdr;
+ u32 aer_in_vep_data[5];
+ u32 dpc_hdr;
+ u32 dpc_data[5];
+ u32 cts_hdr;
+ u32 cts_data[5];
+ u32 reserved3[6];
+ u32 hotplug_hdr;
+ u32 hotplug_data[5];
+ u32 ier_hdr;
+ u32 ier_data[5];
+ u32 threshold_hdr;
+ u32 threshold_data[5];
+ u32 power_mgmt_hdr;
+ u32 power_mgmt_data[5];
+ u32 tlp_throttling_hdr;
+ u32 tlp_throttling_data[5];
+ u32 force_speed_hdr;
+ u32 force_speed_data[5];
+ u32 credit_timeout_hdr;
+ u32 credit_timeout_data[5];
+ u32 link_state_hdr;
+ u32 link_state_data[5];
+ u32 reserved4[174];
+} __packed;
+
+struct switchtec_dev {
+ struct pci_dev *pdev;
+ struct device dev;
+ struct cdev cdev;
+
+ int partition;
+ int partition_count;
+ int pff_csr_count;
+ char pff_local[SWITCHTEC_MAX_PFF_CSR];
+
+ void __iomem *mmio;
+ struct mrpc_regs __iomem *mmio_mrpc;
+ struct sw_event_regs __iomem *mmio_sw_event;
+ struct sys_info_regs __iomem *mmio_sys_info;
+ struct flash_info_regs __iomem *mmio_flash_info;
+ struct ntb_info_regs __iomem *mmio_ntb;
+ struct part_cfg_regs __iomem *mmio_part_cfg;
+ struct part_cfg_regs __iomem *mmio_part_cfg_all;
+ struct pff_csr_regs __iomem *mmio_pff_csr;
+
+ /*
+ * The mrpc mutex must be held when accessing the other
+ * mrpc_ fields, alive flag and stuser->state field
+ */
+ struct mutex mrpc_mutex;
+ struct list_head mrpc_queue;
+ int mrpc_busy;
+ struct work_struct mrpc_work;
+ struct delayed_work mrpc_timeout;
+ bool alive;
+
+ wait_queue_head_t event_wq;
+ atomic_t event_cnt;
+};
+
+static struct switchtec_dev *to_stdev(struct device *dev)
+{
+ return container_of(dev, struct switchtec_dev, dev);
+}
+
+enum mrpc_state {
+ MRPC_IDLE = 0,
+ MRPC_QUEUED,
+ MRPC_RUNNING,
+ MRPC_DONE,
+};
+
+struct switchtec_user {
+ struct switchtec_dev *stdev;
+
+ enum mrpc_state state;
+
+ struct completion comp;
+ struct kref kref;
+ struct list_head list;
+
+ u32 cmd;
+ u32 status;
+ u32 return_code;
+ size_t data_len;
+ size_t read_len;
+ unsigned char data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
+ int event_cnt;
+};
+
+static struct switchtec_user *stuser_create(struct switchtec_dev *stdev)
+{
+ struct switchtec_user *stuser;
+
+ stuser = kzalloc(sizeof(*stuser), GFP_KERNEL);
+ if (!stuser)
+ return ERR_PTR(-ENOMEM);
+
+ get_device(&stdev->dev);
+ stuser->stdev = stdev;
+ kref_init(&stuser->kref);
+ INIT_LIST_HEAD(&stuser->list);
+ init_completion(&stuser->comp);
+ stuser->event_cnt = atomic_read(&stdev->event_cnt);
+
+ dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
+
+ return stuser;
+}
+
+static void stuser_free(struct kref *kref)
+{
+ struct switchtec_user *stuser;
+
+ stuser = container_of(kref, struct switchtec_user, kref);
+
+ dev_dbg(&stuser->stdev->dev, "%s: %p\n", __func__, stuser);
+
+ put_device(&stuser->stdev->dev);
+ kfree(stuser);
+}
+
+static void stuser_put(struct switchtec_user *stuser)
+{
+ kref_put(&stuser->kref, stuser_free);
+}
+
+static void stuser_set_state(struct switchtec_user *stuser,
+ enum mrpc_state state)
+{
+ /* requires the mrpc_mutex to already be held when called */
+
+ const char * const state_names[] = {
+ [MRPC_IDLE] = "IDLE",
+ [MRPC_QUEUED] = "QUEUED",
+ [MRPC_RUNNING] = "RUNNING",
+ [MRPC_DONE] = "DONE",
+ };
+
+ stuser->state = state;
+
+ dev_dbg(&stuser->stdev->dev, "stuser state %p -> %s",
+ stuser, state_names[state]);
+}
+
+static void mrpc_complete_cmd(struct switchtec_dev *stdev);
+
+static void mrpc_cmd_submit(struct switchtec_dev *stdev)
+{
+ /* requires the mrpc_mutex to already be held when called */
+
+ struct switchtec_user *stuser;
+
+ if (stdev->mrpc_busy)
+ return;
+
+ if (list_empty(&stdev->mrpc_queue))
+ return;
+
+ stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
+ list);
+
+ stuser_set_state(stuser, MRPC_RUNNING);
+ stdev->mrpc_busy = 1;
+ memcpy_toio(&stdev->mmio_mrpc->input_data,
+ stuser->data, stuser->data_len);
+ iowrite32(stuser->cmd, &stdev->mmio_mrpc->cmd);
+
+ stuser->status = ioread32(&stdev->mmio_mrpc->status);
+ if (stuser->status != SWITCHTEC_MRPC_STATUS_INPROGRESS)
+ mrpc_complete_cmd(stdev);
+
+ schedule_delayed_work(&stdev->mrpc_timeout,
+ msecs_to_jiffies(500));
+}
+
+static int mrpc_queue_cmd(struct switchtec_user *stuser)
+{
+ /* requires the mrpc_mutex to already be held when called */
+
+ struct switchtec_dev *stdev = stuser->stdev;
+
+ kref_get(&stuser->kref);
+ stuser->read_len = sizeof(stuser->data);
+ stuser_set_state(stuser, MRPC_QUEUED);
+ init_completion(&stuser->comp);
+ list_add_tail(&stuser->list, &stdev->mrpc_queue);
+
+ mrpc_cmd_submit(stdev);
+
+ return 0;
+}
+
+static void mrpc_complete_cmd(struct switchtec_dev *stdev)
+{
+ /* requires the mrpc_mutex to already be held when called */
+ struct switchtec_user *stuser;
+
+ if (list_empty(&stdev->mrpc_queue))
+ return;
+
+ stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
+ list);
+
+ stuser->status = ioread32(&stdev->mmio_mrpc->status);
+ if (stuser->status == SWITCHTEC_MRPC_STATUS_INPROGRESS)
+ return;
+
+ stuser_set_state(stuser, MRPC_DONE);
+ stuser->return_code = 0;
+
+ if (stuser->status != SWITCHTEC_MRPC_STATUS_DONE)
+ goto out;
+
+ stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value);
+ if (stuser->return_code != 0)
+ goto out;
+
+ memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data,
+ stuser->read_len);
+
+out:
+ complete_all(&stuser->comp);
+ list_del_init(&stuser->list);
+ stuser_put(stuser);
+ stdev->mrpc_busy = 0;
+
+ mrpc_cmd_submit(stdev);
+}
+
+static void mrpc_event_work(struct work_struct *work)
+{
+ struct switchtec_dev *stdev;
+
+ stdev = container_of(work, struct switchtec_dev, mrpc_work);
+
+ dev_dbg(&stdev->dev, "%s\n", __func__);
+
+ mutex_lock(&stdev->mrpc_mutex);
+ cancel_delayed_work(&stdev->mrpc_timeout);
+ mrpc_complete_cmd(stdev);
+ mutex_unlock(&stdev->mrpc_mutex);
+}
+
+static void mrpc_timeout_work(struct work_struct *work)
+{
+ struct switchtec_dev *stdev;
+ u32 status;
+
+ stdev = container_of(work, struct switchtec_dev, mrpc_timeout.work);
+
+ dev_dbg(&stdev->dev, "%s\n", __func__);
+
+ mutex_lock(&stdev->mrpc_mutex);
+
+ status = ioread32(&stdev->mmio_mrpc->status);
+ if (status == SWITCHTEC_MRPC_STATUS_INPROGRESS) {
+ schedule_delayed_work(&stdev->mrpc_timeout,
+ msecs_to_jiffies(500));
+ goto out;
+ }
+
+ mrpc_complete_cmd(stdev);
+
+out:
+ mutex_unlock(&stdev->mrpc_mutex);
+}
+
+static ssize_t device_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct switchtec_dev *stdev = to_stdev(dev);
+ u32 ver;
+
+ ver = ioread32(&stdev->mmio_sys_info->device_version);
+
+ return sprintf(buf, "%x\n", ver);
+}
+static DEVICE_ATTR_RO(device_version);
+
+static ssize_t fw_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct switchtec_dev *stdev = to_stdev(dev);
+ u32 ver;
+
+ ver = ioread32(&stdev->mmio_sys_info->firmware_version);
+
+ return sprintf(buf, "%08x\n", ver);
+}
+static DEVICE_ATTR_RO(fw_version);
+
+static ssize_t io_string_show(char *buf, void __iomem *attr, size_t len)
+{
+ int i;
+
+ memcpy_fromio(buf, attr, len);
+ buf[len] = '\n';
+ buf[len + 1] = 0;
+
+ for (i = len - 1; i > 0; i--) {
+ if (buf[i] != ' ')
+ break;
+ buf[i] = '\n';
+ buf[i + 1] = 0;
+ }
+
+ return strlen(buf);
+}
+
+#define DEVICE_ATTR_SYS_INFO_STR(field) \
+static ssize_t field ## _show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct switchtec_dev *stdev = to_stdev(dev); \
+ return io_string_show(buf, &stdev->mmio_sys_info->field, \
+ sizeof(stdev->mmio_sys_info->field)); \
+} \
+\
+static DEVICE_ATTR_RO(field)
+
+DEVICE_ATTR_SYS_INFO_STR(vendor_id);
+DEVICE_ATTR_SYS_INFO_STR(product_id);
+DEVICE_ATTR_SYS_INFO_STR(product_revision);
+DEVICE_ATTR_SYS_INFO_STR(component_vendor);
+
+static ssize_t component_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct switchtec_dev *stdev = to_stdev(dev);
+ int id = ioread16(&stdev->mmio_sys_info->component_id);
+
+ return sprintf(buf, "PM%04X\n", id);
+}
+static DEVICE_ATTR_RO(component_id);
+
+static ssize_t component_revision_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct switchtec_dev *stdev = to_stdev(dev);
+ int rev = ioread8(&stdev->mmio_sys_info->component_revision);
+
+ return sprintf(buf, "%d\n", rev);
+}
+static DEVICE_ATTR_RO(component_revision);
+
+static ssize_t partition_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct switchtec_dev *stdev = to_stdev(dev);
+
+ return sprintf(buf, "%d\n", stdev->partition);
+}
+static DEVICE_ATTR_RO(partition);
+
+static ssize_t partition_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct switchtec_dev *stdev = to_stdev(dev);
+
+ return sprintf(buf, "%d\n", stdev->partition_count);
+}
+static DEVICE_ATTR_RO(partition_count);
+
+static struct attribute *switchtec_device_attrs[] = {
+ &dev_attr_device_version.attr,
+ &dev_attr_fw_version.attr,
+ &dev_attr_vendor_id.attr,
+ &dev_attr_product_id.attr,
+ &dev_attr_product_revision.attr,
+ &dev_attr_component_vendor.attr,
+ &dev_attr_component_id.attr,
+ &dev_attr_component_revision.attr,
+ &dev_attr_partition.attr,
+ &dev_attr_partition_count.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(switchtec_device);
+
+static int switchtec_dev_open(struct inode *inode, struct file *filp)
+{
+ struct switchtec_dev *stdev;
+ struct switchtec_user *stuser;
+
+ stdev = container_of(inode->i_cdev, struct switchtec_dev, cdev);
+
+ stuser = stuser_create(stdev);
+ if (IS_ERR(stuser))
+ return PTR_ERR(stuser);
+
+ filp->private_data = stuser;
+ nonseekable_open(inode, filp);
+
+ dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
+
+ return 0;
+}
+
+static int switchtec_dev_release(struct inode *inode, struct file *filp)
+{
+ struct switchtec_user *stuser = filp->private_data;
+
+ stuser_put(stuser);
+
+ return 0;
+}
+
+static int lock_mutex_and_test_alive(struct switchtec_dev *stdev)
+{
+ if (mutex_lock_interruptible(&stdev->mrpc_mutex))
+ return -EINTR;
+
+ if (!stdev->alive) {
+ mutex_unlock(&stdev->mrpc_mutex);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static ssize_t switchtec_dev_write(struct file *filp, const char __user *data,
+ size_t size, loff_t *off)
+{
+ struct switchtec_user *stuser = filp->private_data;
+ struct switchtec_dev *stdev = stuser->stdev;
+ int rc;
+
+ if (size < sizeof(stuser->cmd) ||
+ size > sizeof(stuser->cmd) + sizeof(stuser->data))
+ return -EINVAL;
+
+ stuser->data_len = size - sizeof(stuser->cmd);
+
+ rc = lock_mutex_and_test_alive(stdev);
+ if (rc)
+ return rc;
+
+ if (stuser->state != MRPC_IDLE) {
+ rc = -EBADE;
+ goto out;
+ }
+
+ rc = copy_from_user(&stuser->cmd, data, sizeof(stuser->cmd));
+ if (rc) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ data += sizeof(stuser->cmd);
+ rc = copy_from_user(&stuser->data, data, size - sizeof(stuser->cmd));
+ if (rc) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ rc = mrpc_queue_cmd(stuser);
+
+out:
+ mutex_unlock(&stdev->mrpc_mutex);
+
+ if (rc)
+ return rc;
+
+ return size;
+}
+
+static ssize_t switchtec_dev_read(struct file *filp, char __user *data,
+ size_t size, loff_t *off)
+{
+ struct switchtec_user *stuser = filp->private_data;
+ struct switchtec_dev *stdev = stuser->stdev;
+ int rc;
+
+ if (size < sizeof(stuser->cmd) ||
+ size > sizeof(stuser->cmd) + sizeof(stuser->data))
+ return -EINVAL;
+
+ rc = lock_mutex_and_test_alive(stdev);
+ if (rc)
+ return rc;
+
+ if (stuser->state == MRPC_IDLE) {
+ mutex_unlock(&stdev->mrpc_mutex);
+ return -EBADE;
+ }
+
+ stuser->read_len = size - sizeof(stuser->return_code);
+
+ mutex_unlock(&stdev->mrpc_mutex);
+
+ if (filp->f_flags & O_NONBLOCK) {
+ if (!try_wait_for_completion(&stuser->comp))
+ return -EAGAIN;
+ } else {
+ rc = wait_for_completion_interruptible(&stuser->comp);
+ if (rc < 0)
+ return rc;
+ }
+
+ rc = lock_mutex_and_test_alive(stdev);
+ if (rc)
+ return rc;
+
+ if (stuser->state != MRPC_DONE) {
+ mutex_unlock(&stdev->mrpc_mutex);
+ return -EBADE;
+ }
+
+ rc = copy_to_user(data, &stuser->return_code,
+ sizeof(stuser->return_code));
+ if (rc) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ data += sizeof(stuser->return_code);
+ rc = copy_to_user(data, &stuser->data,
+ size - sizeof(stuser->return_code));
+ if (rc) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ stuser_set_state(stuser, MRPC_IDLE);
+
+out:
+ mutex_unlock(&stdev->mrpc_mutex);
+
+ if (stuser->status == SWITCHTEC_MRPC_STATUS_DONE)
+ return size;
+ else if (stuser->status == SWITCHTEC_MRPC_STATUS_INTERRUPTED)
+ return -ENXIO;
+ else
+ return -EBADMSG;
+}
+
+static unsigned int switchtec_dev_poll(struct file *filp, poll_table *wait)
+{
+ struct switchtec_user *stuser = filp->private_data;
+ struct switchtec_dev *stdev = stuser->stdev;
+ int ret = 0;
+
+ poll_wait(filp, &stuser->comp.wait, wait);
+ poll_wait(filp, &stdev->event_wq, wait);
+
+ if (lock_mutex_and_test_alive(stdev))
+ return POLLIN | POLLRDHUP | POLLOUT | POLLERR | POLLHUP;
+
+ mutex_unlock(&stdev->mrpc_mutex);
+
+ if (try_wait_for_completion(&stuser->comp))
+ ret |= POLLIN | POLLRDNORM;
+
+ if (stuser->event_cnt != atomic_read(&stdev->event_cnt))
+ ret |= POLLPRI | POLLRDBAND;
+
+ return ret;
+}
+
+static int ioctl_flash_info(struct switchtec_dev *stdev,
+ struct switchtec_ioctl_flash_info __user *uinfo)
+{
+ struct switchtec_ioctl_flash_info info = {0};
+ struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
+
+ info.flash_length = ioread32(&fi->flash_length);
+ info.num_partitions = SWITCHTEC_IOCTL_NUM_PARTITIONS;
+
+ if (copy_to_user(uinfo, &info, sizeof(info)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static void set_fw_info_part(struct switchtec_ioctl_flash_part_info *info,
+ struct partition_info __iomem *pi)
+{
+ info->address = ioread32(&pi->address);
+ info->length = ioread32(&pi->length);
+}
+
+static int ioctl_flash_part_info(struct switchtec_dev *stdev,
+ struct switchtec_ioctl_flash_part_info __user *uinfo)
+{
+ struct switchtec_ioctl_flash_part_info info = {0};
+ struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
+ u32 active_addr = -1;
+
+ if (copy_from_user(&info, uinfo, sizeof(info)))
+ return -EFAULT;
+
+ switch (info.flash_partition) {
+ case SWITCHTEC_IOCTL_PART_CFG0:
+ active_addr = ioread32(&fi->active_cfg);
+ set_fw_info_part(&info, &fi->cfg0);
+ break;
+ case SWITCHTEC_IOCTL_PART_CFG1:
+ active_addr = ioread32(&fi->active_cfg);
+ set_fw_info_part(&info, &fi->cfg1);
+ break;
+ case SWITCHTEC_IOCTL_PART_IMG0:
+ active_addr = ioread32(&fi->active_img);
+ set_fw_info_part(&info, &fi->img0);
+ break;
+ case SWITCHTEC_IOCTL_PART_IMG1:
+ active_addr = ioread32(&fi->active_img);
+ set_fw_info_part(&info, &fi->img1);
+ break;
+ case SWITCHTEC_IOCTL_PART_NVLOG:
+ set_fw_info_part(&info, &fi->nvlog);
+ break;
+ case SWITCHTEC_IOCTL_PART_VENDOR0:
+ set_fw_info_part(&info, &fi->vendor[0]);
+ break;
+ case SWITCHTEC_IOCTL_PART_VENDOR1:
+ set_fw_info_part(&info, &fi->vendor[1]);
+ break;
+ case SWITCHTEC_IOCTL_PART_VENDOR2:
+ set_fw_info_part(&info, &fi->vendor[2]);
+ break;
+ case SWITCHTEC_IOCTL_PART_VENDOR3:
+ set_fw_info_part(&info, &fi->vendor[3]);
+ break;
+ case SWITCHTEC_IOCTL_PART_VENDOR4:
+ set_fw_info_part(&info, &fi->vendor[4]);
+ break;
+ case SWITCHTEC_IOCTL_PART_VENDOR5:
+ set_fw_info_part(&info, &fi->vendor[5]);
+ break;
+ case SWITCHTEC_IOCTL_PART_VENDOR6:
+ set_fw_info_part(&info, &fi->vendor[6]);
+ break;
+ case SWITCHTEC_IOCTL_PART_VENDOR7:
+ set_fw_info_part(&info, &fi->vendor[7]);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (info.address == active_addr)
+ info.active = 1;
+
+ if (copy_to_user(uinfo, &info, sizeof(info)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int ioctl_event_summary(struct switchtec_dev *stdev,
+ struct switchtec_user *stuser,
+ struct switchtec_ioctl_event_summary __user *usum)
+{
+ struct switchtec_ioctl_event_summary s = {0};
+ int i;
+ u32 reg;
+
+ s.global = ioread32(&stdev->mmio_sw_event->global_summary);
+ s.part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap);
+ s.local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
+
+ for (i = 0; i < stdev->partition_count; i++) {
+ reg = ioread32(&stdev->mmio_part_cfg_all[i].part_event_summary);
+ s.part[i] = reg;
+ }
+
+ for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
+ reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
+ if (reg != MICROSEMI_VENDOR_ID)
+ break;
+
+ reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary);
+ s.pff[i] = reg;
+ }
+
+ if (copy_to_user(usum, &s, sizeof(s)))
+ return -EFAULT;
+
+ stuser->event_cnt = atomic_read(&stdev->event_cnt);
+
+ return 0;
+}
+
+static u32 __iomem *global_ev_reg(struct switchtec_dev *stdev,
+ size_t offset, int index)
+{
+ return (void __iomem *)stdev->mmio_sw_event + offset;
+}
+
+static u32 __iomem *part_ev_reg(struct switchtec_dev *stdev,
+ size_t offset, int index)
+{
+ return (void __iomem *)&stdev->mmio_part_cfg_all[index] + offset;
+}
+
+static u32 __iomem *pff_ev_reg(struct switchtec_dev *stdev,
+ size_t offset, int index)
+{
+ return (void __iomem *)&stdev->mmio_pff_csr[index] + offset;
+}
+
+#define EV_GLB(i, r)[i] = {offsetof(struct sw_event_regs, r), global_ev_reg}
+#define EV_PAR(i, r)[i] = {offsetof(struct part_cfg_regs, r), part_ev_reg}
+#define EV_PFF(i, r)[i] = {offsetof(struct pff_csr_regs, r), pff_ev_reg}
+
+const struct event_reg {
+ size_t offset;
+ u32 __iomem *(*map_reg)(struct switchtec_dev *stdev,
+ size_t offset, int index);
+} event_regs[] = {
+ EV_GLB(SWITCHTEC_IOCTL_EVENT_STACK_ERROR, stack_error_event_hdr),
+ EV_GLB(SWITCHTEC_IOCTL_EVENT_PPU_ERROR, ppu_error_event_hdr),
+ EV_GLB(SWITCHTEC_IOCTL_EVENT_ISP_ERROR, isp_error_event_hdr),
+ EV_GLB(SWITCHTEC_IOCTL_EVENT_SYS_RESET, sys_reset_event_hdr),
+ EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_EXC, fw_exception_hdr),
+ EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NMI, fw_nmi_hdr),
+ EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NON_FATAL, fw_non_fatal_hdr),
+ EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_FATAL, fw_fatal_hdr),
+ EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP, twi_mrpc_comp_hdr),
+ EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP_ASYNC,
+ twi_mrpc_comp_async_hdr),
+ EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP, cli_mrpc_comp_hdr),
+ EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP_ASYNC,
+ cli_mrpc_comp_async_hdr),
+ EV_GLB(SWITCHTEC_IOCTL_EVENT_GPIO_INT, gpio_interrupt_hdr),
+ EV_PAR(SWITCHTEC_IOCTL_EVENT_PART_RESET, part_reset_hdr),
+ EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP, mrpc_comp_hdr),
+ EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC, mrpc_comp_async_hdr),
+ EV_PAR(SWITCHTEC_IOCTL_EVENT_DYN_PART_BIND_COMP, dyn_binding_hdr),
+ EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_P2P, aer_in_p2p_hdr),
+ EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_VEP, aer_in_vep_hdr),
+ EV_PFF(SWITCHTEC_IOCTL_EVENT_DPC, dpc_hdr),
+ EV_PFF(SWITCHTEC_IOCTL_EVENT_CTS, cts_hdr),
+ EV_PFF(SWITCHTEC_IOCTL_EVENT_HOTPLUG, hotplug_hdr),
+ EV_PFF(SWITCHTEC_IOCTL_EVENT_IER, ier_hdr),
+ EV_PFF(SWITCHTEC_IOCTL_EVENT_THRESH, threshold_hdr),
+ EV_PFF(SWITCHTEC_IOCTL_EVENT_POWER_MGMT, power_mgmt_hdr),
+ EV_PFF(SWITCHTEC_IOCTL_EVENT_TLP_THROTTLING, tlp_throttling_hdr),
+ EV_PFF(SWITCHTEC_IOCTL_EVENT_FORCE_SPEED, force_speed_hdr),
+ EV_PFF(SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT, credit_timeout_hdr),
+ EV_PFF(SWITCHTEC_IOCTL_EVENT_LINK_STATE, link_state_hdr),
+};
+
+static u32 __iomem *event_hdr_addr(struct switchtec_dev *stdev,
+ int event_id, int index)
+{
+ size_t off;
+
+ if (event_id < 0 || event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
+ return ERR_PTR(-EINVAL);
+
+ off = event_regs[event_id].offset;
+
+ if (event_regs[event_id].map_reg == part_ev_reg) {
+ if (index == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
+ index = stdev->partition;
+ else if (index < 0 || index >= stdev->partition_count)
+ return ERR_PTR(-EINVAL);
+ } else if (event_regs[event_id].map_reg == pff_ev_reg) {
+ if (index < 0 || index >= stdev->pff_csr_count)
+ return ERR_PTR(-EINVAL);
+ }
+
+ return event_regs[event_id].map_reg(stdev, off, index);
+}
+
+static int event_ctl(struct switchtec_dev *stdev,
+ struct switchtec_ioctl_event_ctl *ctl)
+{
+ int i;
+ u32 __iomem *reg;
+ u32 hdr;
+
+ reg = event_hdr_addr(stdev, ctl->event_id, ctl->index);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ hdr = ioread32(reg);
+ for (i = 0; i < ARRAY_SIZE(ctl->data); i++)
+ ctl->data[i] = ioread32(&reg[i + 1]);
+
+ ctl->occurred = hdr & SWITCHTEC_EVENT_OCCURRED;
+ ctl->count = (hdr >> 5) & 0xFF;
+
+ if (!(ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_CLEAR))
+ hdr &= ~SWITCHTEC_EVENT_CLEAR;
+ if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL)
+ hdr |= SWITCHTEC_EVENT_EN_IRQ;
+ if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_POLL)
+ hdr &= ~SWITCHTEC_EVENT_EN_IRQ;
+ if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG)
+ hdr |= SWITCHTEC_EVENT_EN_LOG;
+ if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_LOG)
+ hdr &= ~SWITCHTEC_EVENT_EN_LOG;
+ if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI)
+ hdr |= SWITCHTEC_EVENT_EN_CLI;
+ if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_CLI)
+ hdr &= ~SWITCHTEC_EVENT_EN_CLI;
+ if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL)
+ hdr |= SWITCHTEC_EVENT_FATAL;
+ if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_FATAL)
+ hdr &= ~SWITCHTEC_EVENT_FATAL;
+
+ if (ctl->flags)
+ iowrite32(hdr, reg);
+
+ ctl->flags = 0;
+ if (hdr & SWITCHTEC_EVENT_EN_IRQ)
+ ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL;
+ if (hdr & SWITCHTEC_EVENT_EN_LOG)
+ ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG;
+ if (hdr & SWITCHTEC_EVENT_EN_CLI)
+ ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI;
+ if (hdr & SWITCHTEC_EVENT_FATAL)
+ ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL;
+
+ return 0;
+}
+
+static int ioctl_event_ctl(struct switchtec_dev *stdev,
+ struct switchtec_ioctl_event_ctl __user *uctl)
+{
+ int ret;
+ int nr_idxs;
+ struct switchtec_ioctl_event_ctl ctl;
+
+ if (copy_from_user(&ctl, uctl, sizeof(ctl)))
+ return -EFAULT;
+
+ if (ctl.event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
+ return -EINVAL;
+
+ if (ctl.flags & SWITCHTEC_IOCTL_EVENT_FLAG_UNUSED)
+ return -EINVAL;
+
+ if (ctl.index == SWITCHTEC_IOCTL_EVENT_IDX_ALL) {
+ if (event_regs[ctl.event_id].map_reg == global_ev_reg)
+ nr_idxs = 1;
+ else if (event_regs[ctl.event_id].map_reg == part_ev_reg)
+ nr_idxs = stdev->partition_count;
+ else if (event_regs[ctl.event_id].map_reg == pff_ev_reg)
+ nr_idxs = stdev->pff_csr_count;
+ else
+ return -EINVAL;
+
+ for (ctl.index = 0; ctl.index < nr_idxs; ctl.index++) {
+ ret = event_ctl(stdev, &ctl);
+ if (ret < 0)
+ return ret;
+ }
+ } else {
+ ret = event_ctl(stdev, &ctl);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (copy_to_user(uctl, &ctl, sizeof(ctl)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int ioctl_pff_to_port(struct switchtec_dev *stdev,
+ struct switchtec_ioctl_pff_port *up)
+{
+ int i, part;
+ u32 reg;
+ struct part_cfg_regs *pcfg;
+ struct switchtec_ioctl_pff_port p;
+
+ if (copy_from_user(&p, up, sizeof(p)))
+ return -EFAULT;
+
+ p.port = -1;
+ for (part = 0; part < stdev->partition_count; part++) {
+ pcfg = &stdev->mmio_part_cfg_all[part];
+ p.partition = part;
+
+ reg = ioread32(&pcfg->usp_pff_inst_id);
+ if (reg == p.pff) {
+ p.port = 0;
+ break;
+ }
+
+ reg = ioread32(&pcfg->vep_pff_inst_id);
+ if (reg == p.pff) {
+ p.port = SWITCHTEC_IOCTL_PFF_VEP;
+ break;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
+ reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
+ if (reg != p.pff)
+ continue;
+
+ p.port = i + 1;
+ break;
+ }
+
+ if (p.port != -1)
+ break;
+ }
+
+ if (copy_to_user(up, &p, sizeof(p)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int ioctl_port_to_pff(struct switchtec_dev *stdev,
+ struct switchtec_ioctl_pff_port *up)
+{
+ struct switchtec_ioctl_pff_port p;
+ struct part_cfg_regs *pcfg;
+
+ if (copy_from_user(&p, up, sizeof(p)))
+ return -EFAULT;
+
+ if (p.partition == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
+ pcfg = stdev->mmio_part_cfg;
+ else if (p.partition < stdev->partition_count)
+ pcfg = &stdev->mmio_part_cfg_all[p.partition];
+ else
+ return -EINVAL;
+
+ switch (p.port) {
+ case 0:
+ p.pff = ioread32(&pcfg->usp_pff_inst_id);
+ break;
+ case SWITCHTEC_IOCTL_PFF_VEP:
+ p.pff = ioread32(&pcfg->vep_pff_inst_id);
+ break;
+ default:
+ if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
+ return -EINVAL;
+ p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
+ break;
+ }
+
+ if (copy_to_user(up, &p, sizeof(p)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long switchtec_dev_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct switchtec_user *stuser = filp->private_data;
+ struct switchtec_dev *stdev = stuser->stdev;
+ int rc;
+ void __user *argp = (void __user *)arg;
+
+ rc = lock_mutex_and_test_alive(stdev);
+ if (rc)
+ return rc;
+
+ switch (cmd) {
+ case SWITCHTEC_IOCTL_FLASH_INFO:
+ rc = ioctl_flash_info(stdev, argp);
+ break;
+ case SWITCHTEC_IOCTL_FLASH_PART_INFO:
+ rc = ioctl_flash_part_info(stdev, argp);
+ break;
+ case SWITCHTEC_IOCTL_EVENT_SUMMARY:
+ rc = ioctl_event_summary(stdev, stuser, argp);
+ break;
+ case SWITCHTEC_IOCTL_EVENT_CTL:
+ rc = ioctl_event_ctl(stdev, argp);
+ break;
+ case SWITCHTEC_IOCTL_PFF_TO_PORT:
+ rc = ioctl_pff_to_port(stdev, argp);
+ break;
+ case SWITCHTEC_IOCTL_PORT_TO_PFF:
+ rc = ioctl_port_to_pff(stdev, argp);
+ break;
+ default:
+ rc = -ENOTTY;
+ break;
+ }
+
+ mutex_unlock(&stdev->mrpc_mutex);
+ return rc;
+}
+
+static const struct file_operations switchtec_fops = {
+ .owner = THIS_MODULE,
+ .open = switchtec_dev_open,
+ .release = switchtec_dev_release,
+ .write = switchtec_dev_write,
+ .read = switchtec_dev_read,
+ .poll = switchtec_dev_poll,
+ .unlocked_ioctl = switchtec_dev_ioctl,
+ .compat_ioctl = switchtec_dev_ioctl,
+};
+
+static void stdev_release(struct device *dev)
+{
+ struct switchtec_dev *stdev = to_stdev(dev);
+
+ kfree(stdev);
+}
+
+static void stdev_kill(struct switchtec_dev *stdev)
+{
+ struct switchtec_user *stuser, *tmpuser;
+
+ pci_clear_master(stdev->pdev);
+
+ cancel_delayed_work_sync(&stdev->mrpc_timeout);
+
+ /* Mark the hardware as unavailable and complete all completions */
+ mutex_lock(&stdev->mrpc_mutex);
+ stdev->alive = false;
+
+ /* Wake up and kill any users waiting on an MRPC request */
+ list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
+ complete_all(&stuser->comp);
+ list_del_init(&stuser->list);
+ stuser_put(stuser);
+ }
+
+ mutex_unlock(&stdev->mrpc_mutex);
+
+ /* Wake up any users waiting on event_wq */
+ wake_up_interruptible(&stdev->event_wq);
+}
+
+static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
+{
+ struct switchtec_dev *stdev;
+ int minor;
+ struct device *dev;
+ struct cdev *cdev;
+ int rc;
+
+ stdev = kzalloc_node(sizeof(*stdev), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
+ if (!stdev)
+ return ERR_PTR(-ENOMEM);
+
+ stdev->alive = true;
+ stdev->pdev = pdev;
+ INIT_LIST_HEAD(&stdev->mrpc_queue);
+ mutex_init(&stdev->mrpc_mutex);
+ stdev->mrpc_busy = 0;
+ INIT_WORK(&stdev->mrpc_work, mrpc_event_work);
+ INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work);
+ init_waitqueue_head(&stdev->event_wq);
+ atomic_set(&stdev->event_cnt, 0);
+
+ dev = &stdev->dev;
+ device_initialize(dev);
+ dev->class = switchtec_class;
+ dev->parent = &pdev->dev;
+ dev->groups = switchtec_device_groups;
+ dev->release = stdev_release;
+
+ minor = ida_simple_get(&switchtec_minor_ida, 0, 0,
+ GFP_KERNEL);
+ if (minor < 0) {
+ rc = minor;
+ goto err_put;
+ }
+
+ dev->devt = MKDEV(MAJOR(switchtec_devt), minor);
+ dev_set_name(dev, "switchtec%d", minor);
+
+ cdev = &stdev->cdev;
+ cdev_init(cdev, &switchtec_fops);
+ cdev->owner = THIS_MODULE;
+ cdev->kobj.parent = &dev->kobj;
+
+ return stdev;
+
+err_put:
+ put_device(&stdev->dev);
+ return ERR_PTR(rc);
+}
+
+static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
+{
+ size_t off = event_regs[eid].offset;
+ u32 __iomem *hdr_reg;
+ u32 hdr;
+
+ hdr_reg = event_regs[eid].map_reg(stdev, off, idx);
+ hdr = ioread32(hdr_reg);
+
+ if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
+ return 0;
+
+ dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
+ hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED);
+ iowrite32(hdr, hdr_reg);
+
+ return 1;
+}
+
+static int mask_all_events(struct switchtec_dev *stdev, int eid)
+{
+ int idx;
+ int count = 0;
+
+ if (event_regs[eid].map_reg == part_ev_reg) {
+ for (idx = 0; idx < stdev->partition_count; idx++)
+ count += mask_event(stdev, eid, idx);
+ } else if (event_regs[eid].map_reg == pff_ev_reg) {
+ for (idx = 0; idx < stdev->pff_csr_count; idx++) {
+ if (!stdev->pff_local[idx])
+ continue;
+ count += mask_event(stdev, eid, idx);
+ }
+ } else {
+ count += mask_event(stdev, eid, 0);
+ }
+
+ return count;
+}
+
+static irqreturn_t switchtec_event_isr(int irq, void *dev)
+{
+ struct switchtec_dev *stdev = dev;
+ u32 reg;
+ irqreturn_t ret = IRQ_NONE;
+ int eid, event_count = 0;
+
+ reg = ioread32(&stdev->mmio_part_cfg->mrpc_comp_hdr);
+ if (reg & SWITCHTEC_EVENT_OCCURRED) {
+ dev_dbg(&stdev->dev, "%s: mrpc comp\n", __func__);
+ ret = IRQ_HANDLED;
+ schedule_work(&stdev->mrpc_work);
+ iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr);
+ }
+
+ for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++)
+ event_count += mask_all_events(stdev, eid);
+
+ if (event_count) {
+ atomic_inc(&stdev->event_cnt);
+ wake_up_interruptible(&stdev->event_wq);
+ dev_dbg(&stdev->dev, "%s: %d events\n", __func__,
+ event_count);
+ return IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static int switchtec_init_isr(struct switchtec_dev *stdev)
+{
+ int nvecs;
+ int event_irq;
+
+ nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, 4,
+ PCI_IRQ_MSIX | PCI_IRQ_MSI);
+ if (nvecs < 0)
+ return nvecs;
+
+ event_irq = ioread32(&stdev->mmio_part_cfg->vep_vector_number);
+ if (event_irq < 0 || event_irq >= nvecs)
+ return -EFAULT;
+
+ event_irq = pci_irq_vector(stdev->pdev, event_irq);
+ if (event_irq < 0)
+ return event_irq;
+
+ return devm_request_irq(&stdev->pdev->dev, event_irq,
+ switchtec_event_isr, 0,
+ KBUILD_MODNAME, stdev);
+}
+
+static void init_pff(struct switchtec_dev *stdev)
+{
+ int i;
+ u32 reg;
+ struct part_cfg_regs *pcfg = stdev->mmio_part_cfg;
+
+ for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
+ reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
+ if (reg != MICROSEMI_VENDOR_ID)
+ break;
+ }
+
+ stdev->pff_csr_count = i;
+
+ reg = ioread32(&pcfg->usp_pff_inst_id);
+ if (reg < SWITCHTEC_MAX_PFF_CSR)
+ stdev->pff_local[reg] = 1;
+
+ reg = ioread32(&pcfg->vep_pff_inst_id);
+ if (reg < SWITCHTEC_MAX_PFF_CSR)
+ stdev->pff_local[reg] = 1;
+
+ for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
+ reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
+ if (reg < SWITCHTEC_MAX_PFF_CSR)
+ stdev->pff_local[reg] = 1;
+ }
+}
+
+static int switchtec_init_pci(struct switchtec_dev *stdev,
+ struct pci_dev *pdev)
+{
+ int rc;
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ rc = pcim_iomap_regions(pdev, 0x1, KBUILD_MODNAME);
+ if (rc)
+ return rc;
+
+ pci_set_master(pdev);
+
+ stdev->mmio = pcim_iomap_table(pdev)[0];
+ stdev->mmio_mrpc = stdev->mmio + SWITCHTEC_GAS_MRPC_OFFSET;
+ stdev->mmio_sw_event = stdev->mmio + SWITCHTEC_GAS_SW_EVENT_OFFSET;
+ stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
+ stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET;
+ stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET;
+ stdev->partition = ioread8(&stdev->mmio_ntb->partition_id);
+ stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count);
+ stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET;
+ stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition];
+ stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET;
+
+ init_pff(stdev);
+
+ pci_set_drvdata(pdev, stdev);
+
+ return 0;
+}
+
+static int switchtec_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct switchtec_dev *stdev;
+ int rc;
+
+ stdev = stdev_create(pdev);
+ if (IS_ERR(stdev))
+ return PTR_ERR(stdev);
+
+ rc = switchtec_init_pci(stdev, pdev);
+ if (rc)
+ goto err_put;
+
+ rc = switchtec_init_isr(stdev);
+ if (rc) {
+ dev_err(&stdev->dev, "failed to init isr.\n");
+ goto err_put;
+ }
+
+ iowrite32(SWITCHTEC_EVENT_CLEAR |
+ SWITCHTEC_EVENT_EN_IRQ,
+ &stdev->mmio_part_cfg->mrpc_comp_hdr);
+
+ rc = cdev_add(&stdev->cdev, stdev->dev.devt, 1);
+ if (rc)
+ goto err_put;
+
+ rc = device_add(&stdev->dev);
+ if (rc)
+ goto err_devadd;
+
+ dev_info(&stdev->dev, "Management device registered.\n");
+
+ return 0;
+
+err_devadd:
+ cdev_del(&stdev->cdev);
+ stdev_kill(stdev);
+err_put:
+ ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
+ put_device(&stdev->dev);
+ return rc;
+}
+
+static void switchtec_pci_remove(struct pci_dev *pdev)
+{
+ struct switchtec_dev *stdev = pci_get_drvdata(pdev);
+
+ pci_set_drvdata(pdev, NULL);
+
+ device_del(&stdev->dev);
+ cdev_del(&stdev->cdev);
+ ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
+ dev_info(&stdev->dev, "unregistered.\n");
+
+ stdev_kill(stdev);
+ put_device(&stdev->dev);
+}
+
+#define SWITCHTEC_PCI_DEVICE(device_id) \
+ { \
+ .vendor = MICROSEMI_VENDOR_ID, \
+ .device = device_id, \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, \
+ .class = MICROSEMI_MGMT_CLASSCODE, \
+ .class_mask = 0xFFFFFFFF, \
+ }, \
+ { \
+ .vendor = MICROSEMI_VENDOR_ID, \
+ .device = device_id, \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, \
+ .class = MICROSEMI_NTB_CLASSCODE, \
+ .class_mask = 0xFFFFFFFF, \
+ }
+
+static const struct pci_device_id switchtec_pci_tbl[] = {
+ SWITCHTEC_PCI_DEVICE(0x8531), //PFX 24xG3
+ SWITCHTEC_PCI_DEVICE(0x8532), //PFX 32xG3
+ SWITCHTEC_PCI_DEVICE(0x8533), //PFX 48xG3
+ SWITCHTEC_PCI_DEVICE(0x8534), //PFX 64xG3
+ SWITCHTEC_PCI_DEVICE(0x8535), //PFX 80xG3
+ SWITCHTEC_PCI_DEVICE(0x8536), //PFX 96xG3
+ SWITCHTEC_PCI_DEVICE(0x8543), //PSX 48xG3
+ SWITCHTEC_PCI_DEVICE(0x8544), //PSX 64xG3
+ SWITCHTEC_PCI_DEVICE(0x8545), //PSX 80xG3
+ SWITCHTEC_PCI_DEVICE(0x8546), //PSX 96xG3
+ {0}
+};
+MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
+
+static struct pci_driver switchtec_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = switchtec_pci_tbl,
+ .probe = switchtec_pci_probe,
+ .remove = switchtec_pci_remove,
+};
+
+static int __init switchtec_init(void)
+{
+ int rc;
+
+ rc = alloc_chrdev_region(&switchtec_devt, 0, max_devices,
+ "switchtec");
+ if (rc)
+ return rc;
+
+ switchtec_class = class_create(THIS_MODULE, "switchtec");
+ if (IS_ERR(switchtec_class)) {
+ rc = PTR_ERR(switchtec_class);
+ goto err_create_class;
+ }
+
+ rc = pci_register_driver(&switchtec_pci_driver);
+ if (rc)
+ goto err_pci_register;
+
+ pr_info(KBUILD_MODNAME ": loaded.\n");
+
+ return 0;
+
+err_pci_register:
+ class_destroy(switchtec_class);
+
+err_create_class:
+ unregister_chrdev_region(switchtec_devt, max_devices);
+
+ return rc;
+}
+module_init(switchtec_init);
+
+static void __exit switchtec_exit(void)
+{
+ pci_unregister_driver(&switchtec_pci_driver);
+ class_destroy(switchtec_class);
+ unregister_chrdev_region(switchtec_devt, max_devices);
+ ida_destroy(&switchtec_minor_ida);
+
+ pr_info(KBUILD_MODNAME ": unloaded.\n");
+}
+module_exit(switchtec_exit);
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 53144e78a369..a6fba4804672 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -155,7 +155,7 @@ extern int __must_check
request_percpu_irq(unsigned int irq, irq_handler_t handler,
const char *devname, void __percpu *percpu_dev_id);
-extern void free_irq(unsigned int, void *);
+extern const void *free_irq(unsigned int, void *);
extern void free_percpu_irq(unsigned int, void __percpu *);
struct device;
diff --git a/include/linux/io.h b/include/linux/io.h
index 82ef36eac8a1..2195d9ea4aaa 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -90,6 +90,27 @@ void devm_memunmap(struct device *dev, void *addr);
void *__devm_memremap_pages(struct device *dev, struct resource *res);
+#ifdef CONFIG_PCI
+/*
+ * The PCI specifications (Rev 3.0, 3.2.5 "Transaction Ordering and
+ * Posting") mandate non-posted configuration transactions. There is
+ * no ioremap API in the kernel that can guarantee non-posted write
+ * semantics across arches so provide a default implementation for
+ * mapping PCI config space that defaults to ioremap_nocache(); arches
+ * should override it if they have memory mapping implementations that
+ * guarantee non-posted writes semantics to make the memory mapping
+ * compliant with the PCI specification.
+ */
+#ifndef pci_remap_cfgspace
+#define pci_remap_cfgspace pci_remap_cfgspace
+static inline void __iomem *pci_remap_cfgspace(phys_addr_t offset,
+ size_t size)
+{
+ return ioremap_nocache(offset, size);
+}
+#endif
+#endif
+
/*
* Some systems do not have legacy ISA devices.
* /dev/port is not a valid interface on these systems.
diff --git a/include/linux/mfd/syscon/imx7-iomuxc-gpr.h b/include/linux/mfd/syscon/imx7-iomuxc-gpr.h
index 4585d6105d68..abbd52466573 100644
--- a/include/linux/mfd/syscon/imx7-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx7-iomuxc-gpr.h
@@ -44,4 +44,8 @@
#define IMX7D_GPR5_CSI_MUX_CONTROL_MIPI (0x1 << 4)
+#define IMX7D_GPR12_PCIE_PHY_REFCLK_SEL BIT(5)
+
+#define IMX7D_GPR22_PCIE_PHY_PLL_LOCKED BIT(31)
+
#endif /* __LINUX_IMX7_IOMUXC_GPR_H */
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 8850fcaf50db..566fda587fcf 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -428,6 +428,16 @@ struct i2c_device_id {
kernel_ulong_t driver_data; /* Data private to the driver */
};
+/* pci_epf */
+
+#define PCI_EPF_NAME_SIZE 20
+#define PCI_EPF_MODULE_PREFIX "pci_epf:"
+
+struct pci_epf_device_id {
+ char name[PCI_EPF_NAME_SIZE];
+ kernel_ulong_t driver_data;
+};
+
/* spi */
#define SPI_NAME_SIZE 32
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index 0e0974eceb80..518c8d20647a 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -85,15 +85,4 @@ static inline int of_pci_get_host_bridge_resources(struct device_node *dev,
}
#endif
-#if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI)
-int of_pci_msi_chip_add(struct msi_controller *chip);
-void of_pci_msi_chip_remove(struct msi_controller *chip);
-struct msi_controller *of_pci_find_msi_chip_by_node(struct device_node *of_node);
-#else
-static inline int of_pci_msi_chip_add(struct msi_controller *chip) { return -EINVAL; }
-static inline void of_pci_msi_chip_remove(struct msi_controller *chip) { }
-static inline struct msi_controller *
-of_pci_find_msi_chip_by_node(struct device_node *of_node) { return NULL; }
-#endif
-
#endif
diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
index f0d2b9451270..809c2f1873ac 100644
--- a/include/linux/pci-ecam.h
+++ b/include/linux/pci-ecam.h
@@ -16,6 +16,7 @@
#ifndef DRIVERS_PCI_ECAM_H
#define DRIVERS_PCI_ECAM_H
+#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
@@ -68,7 +69,7 @@ extern struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */
extern struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */
#endif
-#ifdef CONFIG_PCI_HOST_GENERIC
+#ifdef CONFIG_PCI_HOST_COMMON
/* for DT-based PCI controllers that support ECAM */
int pci_host_common_probe(struct platform_device *pdev,
struct pci_ecam_ops *ops);
diff --git a/include/linux/pci-ep-cfs.h b/include/linux/pci-ep-cfs.h
new file mode 100644
index 000000000000..263b89ea5705
--- /dev/null
+++ b/include/linux/pci-ep-cfs.h
@@ -0,0 +1,41 @@
+/**
+ * PCI Endpoint ConfigFS header file
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_PCI_EP_CFS_H
+#define __LINUX_PCI_EP_CFS_H
+
+#include <linux/configfs.h>
+
+#ifdef CONFIG_PCI_ENDPOINT_CONFIGFS
+struct config_group *pci_ep_cfs_add_epc_group(const char *name);
+void pci_ep_cfs_remove_epc_group(struct config_group *group);
+struct config_group *pci_ep_cfs_add_epf_group(const char *name);
+void pci_ep_cfs_remove_epf_group(struct config_group *group);
+#else
+static inline struct config_group *pci_ep_cfs_add_epc_group(const char *name)
+{
+ return 0;
+}
+
+static inline void pci_ep_cfs_remove_epc_group(struct config_group *group)
+{
+}
+
+static inline struct config_group *pci_ep_cfs_add_epf_group(const char *name)
+{
+ return 0;
+}
+
+static inline void pci_ep_cfs_remove_epf_group(struct config_group *group)
+{
+}
+#endif
+#endif /* __LINUX_PCI_EP_CFS_H */
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
new file mode 100644
index 000000000000..af5edbf3eea3
--- /dev/null
+++ b/include/linux/pci-epc.h
@@ -0,0 +1,144 @@
+/**
+ * PCI Endpoint *Controller* (EPC) header file
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_PCI_EPC_H
+#define __LINUX_PCI_EPC_H
+
+#include <linux/pci-epf.h>
+
+struct pci_epc;
+
+enum pci_epc_irq_type {
+ PCI_EPC_IRQ_UNKNOWN,
+ PCI_EPC_IRQ_LEGACY,
+ PCI_EPC_IRQ_MSI,
+};
+
+/**
+ * struct pci_epc_ops - set of function pointers for performing EPC operations
+ * @write_header: ops to populate configuration space header
+ * @set_bar: ops to configure the BAR
+ * @clear_bar: ops to reset the BAR
+ * @map_addr: ops to map CPU address to PCI address
+ * @unmap_addr: ops to unmap CPU address and PCI address
+ * @set_msi: ops to set the requested number of MSI interrupts in the MSI
+ * capability register
+ * @get_msi: ops to get the number of MSI interrupts allocated by the RC from
+ * the MSI capability register
+ * @raise_irq: ops to raise a legacy or MSI interrupt
+ * @start: ops to start the PCI link
+ * @stop: ops to stop the PCI link
+ * @owner: the module owner containing the ops
+ */
+struct pci_epc_ops {
+ int (*write_header)(struct pci_epc *pci_epc,
+ struct pci_epf_header *hdr);
+ int (*set_bar)(struct pci_epc *epc, enum pci_barno bar,
+ dma_addr_t bar_phys, size_t size, int flags);
+ void (*clear_bar)(struct pci_epc *epc, enum pci_barno bar);
+ int (*map_addr)(struct pci_epc *epc, phys_addr_t addr,
+ u64 pci_addr, size_t size);
+ void (*unmap_addr)(struct pci_epc *epc, phys_addr_t addr);
+ int (*set_msi)(struct pci_epc *epc, u8 interrupts);
+ int (*get_msi)(struct pci_epc *epc);
+ int (*raise_irq)(struct pci_epc *pci_epc,
+ enum pci_epc_irq_type type, u8 interrupt_num);
+ int (*start)(struct pci_epc *epc);
+ void (*stop)(struct pci_epc *epc);
+ struct module *owner;
+};
+
+/**
+ * struct pci_epc_mem - address space of the endpoint controller
+ * @phys_base: physical base address of the PCI address space
+ * @size: the size of the PCI address space
+ * @bitmap: bitmap to manage the PCI address space
+ * @pages: number of bits representing the address region
+ */
+struct pci_epc_mem {
+ phys_addr_t phys_base;
+ size_t size;
+ unsigned long *bitmap;
+ int pages;
+};
+
+/**
+ * struct pci_epc - represents the PCI EPC device
+ * @dev: PCI EPC device
+ * @pci_epf: list of endpoint functions present in this EPC device
+ * @ops: function pointers for performing endpoint operations
+ * @mem: address space of the endpoint controller
+ * @max_functions: max number of functions that can be configured in this EPC
+ * @group: configfs group representing the PCI EPC device
+ * @lock: spinlock to protect pci_epc ops
+ */
+struct pci_epc {
+ struct device dev;
+ struct list_head pci_epf;
+ const struct pci_epc_ops *ops;
+ struct pci_epc_mem *mem;
+ u8 max_functions;
+ struct config_group *group;
+ /* spinlock to protect against concurrent access of EP controller */
+ spinlock_t lock;
+};
+
+#define to_pci_epc(device) container_of((device), struct pci_epc, dev)
+
+#define pci_epc_create(dev, ops) \
+ __pci_epc_create((dev), (ops), THIS_MODULE)
+#define devm_pci_epc_create(dev, ops) \
+ __devm_pci_epc_create((dev), (ops), THIS_MODULE)
+
+static inline void epc_set_drvdata(struct pci_epc *epc, void *data)
+{
+ dev_set_drvdata(&epc->dev, data);
+}
+
+static inline void *epc_get_drvdata(struct pci_epc *epc)
+{
+ return dev_get_drvdata(&epc->dev);
+}
+
+struct pci_epc *
+__devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
+ struct module *owner);
+struct pci_epc *
+__pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
+ struct module *owner);
+void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc);
+void pci_epc_destroy(struct pci_epc *epc);
+int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf);
+void pci_epc_linkup(struct pci_epc *epc);
+void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf);
+int pci_epc_write_header(struct pci_epc *epc, struct pci_epf_header *hdr);
+int pci_epc_set_bar(struct pci_epc *epc, enum pci_barno bar,
+ dma_addr_t bar_phys, size_t size, int flags);
+void pci_epc_clear_bar(struct pci_epc *epc, int bar);
+int pci_epc_map_addr(struct pci_epc *epc, phys_addr_t phys_addr,
+ u64 pci_addr, size_t size);
+void pci_epc_unmap_addr(struct pci_epc *epc, phys_addr_t phys_addr);
+int pci_epc_set_msi(struct pci_epc *epc, u8 interrupts);
+int pci_epc_get_msi(struct pci_epc *epc);
+int pci_epc_raise_irq(struct pci_epc *epc, enum pci_epc_irq_type type,
+ u8 interrupt_num);
+int pci_epc_start(struct pci_epc *epc);
+void pci_epc_stop(struct pci_epc *epc);
+struct pci_epc *pci_epc_get(const char *epc_name);
+void pci_epc_put(struct pci_epc *epc);
+
+int pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_addr, size_t size);
+void pci_epc_mem_exit(struct pci_epc *epc);
+void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
+ phys_addr_t *phys_addr, size_t size);
+void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr,
+ void __iomem *virt_addr, size_t size);
+#endif /* __LINUX_PCI_EPC_H */
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
new file mode 100644
index 000000000000..0d529cb90143
--- /dev/null
+++ b/include/linux/pci-epf.h
@@ -0,0 +1,162 @@
+/**
+ * PCI Endpoint *Function* (EPF) header file
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_PCI_EPF_H
+#define __LINUX_PCI_EPF_H
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+struct pci_epf;
+
+enum pci_interrupt_pin {
+ PCI_INTERRUPT_UNKNOWN,
+ PCI_INTERRUPT_INTA,
+ PCI_INTERRUPT_INTB,
+ PCI_INTERRUPT_INTC,
+ PCI_INTERRUPT_INTD,
+};
+
+enum pci_barno {
+ BAR_0,
+ BAR_1,
+ BAR_2,
+ BAR_3,
+ BAR_4,
+ BAR_5,
+};
+
+/**
+ * struct pci_epf_header - represents standard configuration header
+ * @vendorid: identifies device manufacturer
+ * @deviceid: identifies a particular device
+ * @revid: specifies a device-specific revision identifier
+ * @progif_code: identifies a specific register-level programming interface
+ * @subclass_code: identifies more specifically the function of the device
+ * @baseclass_code: broadly classifies the type of function the device performs
+ * @cache_line_size: specifies the system cacheline size in units of DWORDs
+ * @subsys_vendor_id: vendor of the add-in card or subsystem
+ * @subsys_id: id specific to vendor
+ * @interrupt_pin: interrupt pin the device (or device function) uses
+ */
+struct pci_epf_header {
+ u16 vendorid;
+ u16 deviceid;
+ u8 revid;
+ u8 progif_code;
+ u8 subclass_code;
+ u8 baseclass_code;
+ u8 cache_line_size;
+ u16 subsys_vendor_id;
+ u16 subsys_id;
+ enum pci_interrupt_pin interrupt_pin;
+};
+
+/**
+ * struct pci_epf_ops - set of function pointers for performing EPF operations
+ * @bind: ops to perform when a EPC device has been bound to EPF device
+ * @unbind: ops to perform when a binding has been lost between a EPC device
+ * and EPF device
+ * @linkup: ops to perform when the EPC device has established a connection with
+ * a host system
+ */
+struct pci_epf_ops {
+ int (*bind)(struct pci_epf *epf);
+ void (*unbind)(struct pci_epf *epf);
+ void (*linkup)(struct pci_epf *epf);
+};
+
+/**
+ * struct pci_epf_driver - represents the PCI EPF driver
+ * @probe: ops to perform when a new EPF device has been bound to the EPF driver
+ * @remove: ops to perform when the binding between the EPF device and EPF
+ * driver is broken
+ * @driver: PCI EPF driver
+ * @ops: set of function pointers for performing EPF operations
+ * @owner: the owner of the module that registers the PCI EPF driver
+ * @group: configfs group corresponding to the PCI EPF driver
+ * @id_table: identifies EPF devices for probing
+ */
+struct pci_epf_driver {
+ int (*probe)(struct pci_epf *epf);
+ int (*remove)(struct pci_epf *epf);
+
+ struct device_driver driver;
+ struct pci_epf_ops *ops;
+ struct module *owner;
+ struct config_group *group;
+ const struct pci_epf_device_id *id_table;
+};
+
+#define to_pci_epf_driver(drv) (container_of((drv), struct pci_epf_driver, \
+ driver))
+
+/**
+ * struct pci_epf_bar - represents the BAR of EPF device
+ * @phys_addr: physical address that should be mapped to the BAR
+ * @size: the size of the address space present in BAR
+ */
+struct pci_epf_bar {
+ dma_addr_t phys_addr;
+ size_t size;
+};
+
+/**
+ * struct pci_epf - represents the PCI EPF device
+ * @dev: the PCI EPF device
+ * @name: the name of the PCI EPF device
+ * @header: represents standard configuration header
+ * @bar: represents the BAR of EPF device
+ * @msi_interrupts: number of MSI interrupts required by this function
+ * @func_no: unique function number within this endpoint device
+ * @epc: the EPC device to which this EPF device is bound
+ * @driver: the EPF driver to which this EPF device is bound
+ * @list: to add pci_epf as a list of PCI endpoint functions to pci_epc
+ */
+struct pci_epf {
+ struct device dev;
+ const char *name;
+ struct pci_epf_header *header;
+ struct pci_epf_bar bar[6];
+ u8 msi_interrupts;
+ u8 func_no;
+
+ struct pci_epc *epc;
+ struct pci_epf_driver *driver;
+ struct list_head list;
+};
+
+#define to_pci_epf(epf_dev) container_of((epf_dev), struct pci_epf, dev)
+
+#define pci_epf_register_driver(driver) \
+ __pci_epf_register_driver((driver), THIS_MODULE)
+
+static inline void epf_set_drvdata(struct pci_epf *epf, void *data)
+{
+ dev_set_drvdata(&epf->dev, data);
+}
+
+static inline void *epf_get_drvdata(struct pci_epf *epf)
+{
+ return dev_get_drvdata(&epf->dev);
+}
+
+struct pci_epf *pci_epf_create(const char *name);
+void pci_epf_destroy(struct pci_epf *epf);
+int __pci_epf_register_driver(struct pci_epf_driver *driver,
+ struct module *owner);
+void pci_epf_unregister_driver(struct pci_epf_driver *driver);
+void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar);
+void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar);
+int pci_epf_bind(struct pci_epf *epf);
+void pci_epf_unbind(struct pci_epf *epf);
+void pci_epf_linkup(struct pci_epf *epf);
+#endif /* __LINUX_PCI_EPF_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index f27be8432e82..33c2b0b77429 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -28,6 +28,7 @@
#include <linux/kobject.h>
#include <linux/atomic.h>
#include <linux/device.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/resource_ext.h>
#include <uapi/linux/pci.h>
@@ -178,6 +179,10 @@ enum pci_dev_flags {
PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
/* Get VPD from function 0 VPD */
PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
+ /* a non-root bridge where translation occurs, stop alias search here */
+ PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9),
+ /* Do not use FLR even if device advertises PCI_AF_CAP */
+ PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
};
enum pci_irq_reroute_variant {
@@ -397,6 +402,8 @@ struct pci_dev {
phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */
size_t romlen; /* Length of ROM if it's not from the BAR */
char *driver_override; /* Driver name to force a match */
+
+ unsigned long priv_flags; /* Private flags for the pci driver */
};
static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
@@ -941,32 +948,12 @@ int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
-static inline int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val)
-{
- return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val);
-}
-static inline int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val)
-{
- return pci_bus_read_config_word(dev->bus, dev->devfn, where, val);
-}
-static inline int pci_read_config_dword(const struct pci_dev *dev, int where,
- u32 *val)
-{
- return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val);
-}
-static inline int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val)
-{
- return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val);
-}
-static inline int pci_write_config_word(const struct pci_dev *dev, int where, u16 val)
-{
- return pci_bus_write_config_word(dev->bus, dev->devfn, where, val);
-}
-static inline int pci_write_config_dword(const struct pci_dev *dev, int where,
- u32 val)
-{
- return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val);
-}
+int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val);
+int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val);
+int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val);
+int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val);
+int pci_write_config_word(const struct pci_dev *dev, int where, u16 val);
+int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val);
int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
@@ -1053,6 +1040,7 @@ int pcie_get_mps(struct pci_dev *dev);
int pcie_set_mps(struct pci_dev *dev, int mps);
int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
enum pcie_link_width *width);
+void pcie_flr(struct pci_dev *dev);
int __pci_reset_function(struct pci_dev *dev);
int __pci_reset_function_locked(struct pci_dev *dev);
int pci_reset_function(struct pci_dev *dev);
@@ -1073,6 +1061,11 @@ int pci_select_bars(struct pci_dev *dev, unsigned long flags);
bool pci_device_is_present(struct pci_dev *pdev);
void pci_ignore_hotplug(struct pci_dev *dev);
+int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr,
+ irq_handler_t handler, irq_handler_t thread_fn, void *dev_id,
+ const char *fmt, ...);
+void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id);
+
/* ROM control related routines */
int pci_enable_rom(struct pci_dev *pdev);
void pci_disable_rom(struct pci_dev *pdev);
@@ -1200,6 +1193,11 @@ unsigned long pci_address_to_pio(phys_addr_t addr);
phys_addr_t pci_pio_to_address(unsigned long pio);
int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
void pci_unmap_iospace(struct resource *res);
+void __iomem *devm_pci_remap_cfgspace(struct device *dev,
+ resource_size_t offset,
+ resource_size_t size);
+void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
+ struct resource *res);
static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
{
@@ -1298,10 +1296,8 @@ struct msix_entry {
#ifdef CONFIG_PCI_MSI
int pci_msi_vec_count(struct pci_dev *dev);
-void pci_msi_shutdown(struct pci_dev *dev);
void pci_disable_msi(struct pci_dev *dev);
int pci_msix_vec_count(struct pci_dev *dev);
-void pci_msix_shutdown(struct pci_dev *dev);
void pci_disable_msix(struct pci_dev *dev);
void pci_restore_msi_state(struct pci_dev *dev);
int pci_msi_enabled(void);
@@ -1327,10 +1323,8 @@ int pci_irq_get_node(struct pci_dev *pdev, int vec);
#else
static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
-static inline void pci_msi_shutdown(struct pci_dev *dev) { }
static inline void pci_disable_msi(struct pci_dev *dev) { }
static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; }
-static inline void pci_msix_shutdown(struct pci_dev *dev) { }
static inline void pci_disable_msix(struct pci_dev *dev) { }
static inline void pci_restore_msi_state(struct pci_dev *dev) { }
static inline int pci_msi_enabled(void) { return 0; }
@@ -1623,6 +1617,36 @@ static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
#include <asm/pci.h>
+/* These two functions provide almost identical functionality. Depennding
+ * on the architecture, one will be implemented as a wrapper around the
+ * other (in drivers/pci/mmap.c).
+ *
+ * pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff
+ * is expected to be an offset within that region.
+ *
+ * pci_mmap_page_range() is the legacy architecture-specific interface,
+ * which accepts a "user visible" resource address converted by
+ * pci_resource_to_user(), as used in the legacy mmap() interface in
+ * /proc/bus/pci/.
+ */
+int pci_mmap_resource_range(struct pci_dev *dev, int bar,
+ struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int write_combine);
+int pci_mmap_page_range(struct pci_dev *pdev, int bar,
+ struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int write_combine);
+
+#ifndef arch_can_pci_mmap_wc
+#define arch_can_pci_mmap_wc() 0
+#endif
+
+#ifndef arch_can_pci_mmap_io
+#define arch_can_pci_mmap_io() 0
+#define pci_iobar_pfn(pdev, bar, vma) (-EINVAL)
+#else
+int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
+#endif
+
#ifndef pci_root_bus_fwnode
#define pci_root_bus_fwnode(bus) NULL
#endif
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index a4f77feecbb0..5f6b71d15393 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -862,6 +862,8 @@
#define PCI_DEVICE_ID_TI_X620 0xac8d
#define PCI_DEVICE_ID_TI_X420 0xac8e
#define PCI_DEVICE_ID_TI_XX20_FM 0xac8f
+#define PCI_DEVICE_ID_TI_DRA74x 0xb500
+#define PCI_DEVICE_ID_TI_DRA72x 0xb501
#define PCI_VENDOR_ID_SONY 0x104d
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 6b0e2758585f..662c592b74dd 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -333,6 +333,7 @@ header-y += parport.h
header-y += patchkey.h
header-y += pci.h
header-y += pci_regs.h
+header-y += pcitest.h
header-y += perf_event.h
header-y += personality.h
header-y += pfkeyv2.h
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 18a26c16bd80..d56bb0051009 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -114,7 +114,7 @@
#define PCI_SUBSYSTEM_ID 0x2e
#define PCI_ROM_ADDRESS 0x30 /* Bits 31..11 are address, 10..1 reserved */
#define PCI_ROM_ADDRESS_ENABLE 0x01
-#define PCI_ROM_ADDRESS_MASK (~0x7ffUL)
+#define PCI_ROM_ADDRESS_MASK (~0x7ffU)
#define PCI_CAPABILITY_LIST 0x34 /* Offset of first capability list entry */
diff --git a/include/uapi/linux/pcitest.h b/include/uapi/linux/pcitest.h
new file mode 100644
index 000000000000..a6aa10c45ad1
--- /dev/null
+++ b/include/uapi/linux/pcitest.h
@@ -0,0 +1,19 @@
+/**
+ * pcitest.h - PCI test uapi defines
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ */
+
+#ifndef __UAPI_LINUX_PCITEST_H
+#define __UAPI_LINUX_PCITEST_H
+
+#define PCITEST_BAR _IO('P', 0x1)
+#define PCITEST_LEGACY_IRQ _IO('P', 0x2)
+#define PCITEST_MSI _IOW('P', 0x3, int)
+#define PCITEST_WRITE _IOW('P', 0x4, unsigned long)
+#define PCITEST_READ _IOW('P', 0x5, unsigned long)
+#define PCITEST_COPY _IOW('P', 0x6, unsigned long)
+
+#endif /* __UAPI_LINUX_PCITEST_H */
diff --git a/include/uapi/linux/switchtec_ioctl.h b/include/uapi/linux/switchtec_ioctl.h
new file mode 100644
index 000000000000..3e824e1a6495
--- /dev/null
+++ b/include/uapi/linux/switchtec_ioctl.h
@@ -0,0 +1,132 @@
+/*
+ * Microsemi Switchtec PCIe Driver
+ * Copyright (c) 2017, Microsemi Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_SWITCHTEC_IOCTL_H
+#define _UAPI_LINUX_SWITCHTEC_IOCTL_H
+
+#include <linux/types.h>
+
+#define SWITCHTEC_IOCTL_PART_CFG0 0
+#define SWITCHTEC_IOCTL_PART_CFG1 1
+#define SWITCHTEC_IOCTL_PART_IMG0 2
+#define SWITCHTEC_IOCTL_PART_IMG1 3
+#define SWITCHTEC_IOCTL_PART_NVLOG 4
+#define SWITCHTEC_IOCTL_PART_VENDOR0 5
+#define SWITCHTEC_IOCTL_PART_VENDOR1 6
+#define SWITCHTEC_IOCTL_PART_VENDOR2 7
+#define SWITCHTEC_IOCTL_PART_VENDOR3 8
+#define SWITCHTEC_IOCTL_PART_VENDOR4 9
+#define SWITCHTEC_IOCTL_PART_VENDOR5 10
+#define SWITCHTEC_IOCTL_PART_VENDOR6 11
+#define SWITCHTEC_IOCTL_PART_VENDOR7 12
+#define SWITCHTEC_IOCTL_NUM_PARTITIONS 13
+
+struct switchtec_ioctl_flash_info {
+ __u64 flash_length;
+ __u32 num_partitions;
+ __u32 padding;
+};
+
+struct switchtec_ioctl_flash_part_info {
+ __u32 flash_partition;
+ __u32 address;
+ __u32 length;
+ __u32 active;
+};
+
+struct switchtec_ioctl_event_summary {
+ __u64 global;
+ __u64 part_bitmap;
+ __u32 local_part;
+ __u32 padding;
+ __u32 part[48];
+ __u32 pff[48];
+};
+
+#define SWITCHTEC_IOCTL_EVENT_STACK_ERROR 0
+#define SWITCHTEC_IOCTL_EVENT_PPU_ERROR 1
+#define SWITCHTEC_IOCTL_EVENT_ISP_ERROR 2
+#define SWITCHTEC_IOCTL_EVENT_SYS_RESET 3
+#define SWITCHTEC_IOCTL_EVENT_FW_EXC 4
+#define SWITCHTEC_IOCTL_EVENT_FW_NMI 5
+#define SWITCHTEC_IOCTL_EVENT_FW_NON_FATAL 6
+#define SWITCHTEC_IOCTL_EVENT_FW_FATAL 7
+#define SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP 8
+#define SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP_ASYNC 9
+#define SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP 10
+#define SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP_ASYNC 11
+#define SWITCHTEC_IOCTL_EVENT_GPIO_INT 12
+#define SWITCHTEC_IOCTL_EVENT_PART_RESET 13
+#define SWITCHTEC_IOCTL_EVENT_MRPC_COMP 14
+#define SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC 15
+#define SWITCHTEC_IOCTL_EVENT_DYN_PART_BIND_COMP 16
+#define SWITCHTEC_IOCTL_EVENT_AER_IN_P2P 17
+#define SWITCHTEC_IOCTL_EVENT_AER_IN_VEP 18
+#define SWITCHTEC_IOCTL_EVENT_DPC 19
+#define SWITCHTEC_IOCTL_EVENT_CTS 20
+#define SWITCHTEC_IOCTL_EVENT_HOTPLUG 21
+#define SWITCHTEC_IOCTL_EVENT_IER 22
+#define SWITCHTEC_IOCTL_EVENT_THRESH 23
+#define SWITCHTEC_IOCTL_EVENT_POWER_MGMT 24
+#define SWITCHTEC_IOCTL_EVENT_TLP_THROTTLING 25
+#define SWITCHTEC_IOCTL_EVENT_FORCE_SPEED 26
+#define SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT 27
+#define SWITCHTEC_IOCTL_EVENT_LINK_STATE 28
+#define SWITCHTEC_IOCTL_MAX_EVENTS 29
+
+#define SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX -1
+#define SWITCHTEC_IOCTL_EVENT_IDX_ALL -2
+
+#define SWITCHTEC_IOCTL_EVENT_FLAG_CLEAR (1 << 0)
+#define SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL (1 << 1)
+#define SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG (1 << 2)
+#define SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI (1 << 3)
+#define SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL (1 << 4)
+#define SWITCHTEC_IOCTL_EVENT_FLAG_DIS_POLL (1 << 5)
+#define SWITCHTEC_IOCTL_EVENT_FLAG_DIS_LOG (1 << 6)
+#define SWITCHTEC_IOCTL_EVENT_FLAG_DIS_CLI (1 << 7)
+#define SWITCHTEC_IOCTL_EVENT_FLAG_DIS_FATAL (1 << 8)
+#define SWITCHTEC_IOCTL_EVENT_FLAG_UNUSED (~0x1ff)
+
+struct switchtec_ioctl_event_ctl {
+ __u32 event_id;
+ __s32 index;
+ __u32 flags;
+ __u32 occurred;
+ __u32 count;
+ __u32 data[5];
+};
+
+#define SWITCHTEC_IOCTL_PFF_VEP 100
+struct switchtec_ioctl_pff_port {
+ __u32 pff;
+ __u32 partition;
+ __u32 port;
+};
+
+#define SWITCHTEC_IOCTL_FLASH_INFO \
+ _IOR('W', 0x40, struct switchtec_ioctl_flash_info)
+#define SWITCHTEC_IOCTL_FLASH_PART_INFO \
+ _IOWR('W', 0x41, struct switchtec_ioctl_flash_part_info)
+#define SWITCHTEC_IOCTL_EVENT_SUMMARY \
+ _IOR('W', 0x42, struct switchtec_ioctl_event_summary)
+#define SWITCHTEC_IOCTL_EVENT_CTL \
+ _IOWR('W', 0x43, struct switchtec_ioctl_event_ctl)
+#define SWITCHTEC_IOCTL_PFF_TO_PORT \
+ _IOWR('W', 0x44, struct switchtec_ioctl_pff_port)
+#define SWITCHTEC_IOCTL_PORT_TO_PFF \
+ _IOWR('W', 0x45, struct switchtec_ioctl_pff_port)
+
+#endif
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index ae1c90f20381..070be980c37a 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1559,7 +1559,7 @@ void remove_irq(unsigned int irq, struct irqaction *act)
struct irq_desc *desc = irq_to_desc(irq);
if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
- __free_irq(irq, act->dev_id);
+ __free_irq(irq, act->dev_id);
}
EXPORT_SYMBOL_GPL(remove_irq);
@@ -1576,20 +1576,27 @@ EXPORT_SYMBOL_GPL(remove_irq);
* have completed.
*
* This function must not be called from interrupt context.
+ *
+ * Returns the devname argument passed to request_irq.
*/
-void free_irq(unsigned int irq, void *dev_id)
+const void *free_irq(unsigned int irq, void *dev_id)
{
struct irq_desc *desc = irq_to_desc(irq);
+ struct irqaction *action;
+ const char *devname;
if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
- return;
+ return NULL;
#ifdef CONFIG_SMP
if (WARN_ON(desc->affinity_notify))
desc->affinity_notify = NULL;
#endif
- kfree(__free_irq(irq, dev_id));
+ action = __free_irq(irq, dev_id);
+ devname = action->name;
+ kfree(action);
+ return devname;
}
EXPORT_SYMBOL(free_irq);
diff --git a/lib/devres.c b/lib/devres.c
index cb1464c411a2..78eca713b1d9 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -17,7 +17,7 @@ static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
/**
* devm_ioremap - Managed ioremap()
* @dev: Generic device to remap IO address for
- * @offset: BUS offset to map
+ * @offset: Resource address to map
* @size: Size of map
*
* Managed ioremap(). Map is automatically unmapped on driver detach.
@@ -45,7 +45,7 @@ EXPORT_SYMBOL(devm_ioremap);
/**
* devm_ioremap_nocache - Managed ioremap_nocache()
* @dev: Generic device to remap IO address for
- * @offset: BUS offset to map
+ * @offset: Resource address to map
* @size: Size of map
*
* Managed ioremap_nocache(). Map is automatically unmapped on driver
@@ -74,7 +74,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
/**
* devm_ioremap_wc - Managed ioremap_wc()
* @dev: Generic device to remap IO address for
- * @offset: BUS offset to map
+ * @offset: Resource address to map
* @size: Size of map
*
* Managed ioremap_wc(). Map is automatically unmapped on driver detach.
diff --git a/tools/pci/pcitest.c b/tools/pci/pcitest.c
new file mode 100644
index 000000000000..ad54a58d7dda
--- /dev/null
+++ b/tools/pci/pcitest.c
@@ -0,0 +1,186 @@
+/**
+ * Userspace PCI Endpoint Test Module
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/ioctl.h>
+#include <time.h>
+#include <unistd.h>
+
+#include <linux/pcitest.h>
+
+#define BILLION 1E9
+
+static char *result[] = { "NOT OKAY", "OKAY" };
+
+struct pci_test {
+ char *device;
+ char barnum;
+ bool legacyirq;
+ unsigned int msinum;
+ bool read;
+ bool write;
+ bool copy;
+ unsigned long size;
+};
+
+static int run_test(struct pci_test *test)
+{
+ long ret;
+ int fd;
+ struct timespec start, end;
+ double time;
+
+ fd = open(test->device, O_RDWR);
+ if (fd < 0) {
+ perror("can't open PCI Endpoint Test device");
+ return fd;
+ }
+
+ if (test->barnum >= 0 && test->barnum <= 5) {
+ ret = ioctl(fd, PCITEST_BAR, test->barnum);
+ fprintf(stdout, "BAR%d:\t\t", test->barnum);
+ if (ret < 0)
+ fprintf(stdout, "TEST FAILED\n");
+ else
+ fprintf(stdout, "%s\n", result[ret]);
+ }
+
+ if (test->legacyirq) {
+ ret = ioctl(fd, PCITEST_LEGACY_IRQ, 0);
+ fprintf(stdout, "LEGACY IRQ:\t");
+ if (ret < 0)
+ fprintf(stdout, "TEST FAILED\n");
+ else
+ fprintf(stdout, "%s\n", result[ret]);
+ }
+
+ if (test->msinum > 0 && test->msinum <= 32) {
+ ret = ioctl(fd, PCITEST_MSI, test->msinum);
+ fprintf(stdout, "MSI%d:\t\t", test->msinum);
+ if (ret < 0)
+ fprintf(stdout, "TEST FAILED\n");
+ else
+ fprintf(stdout, "%s\n", result[ret]);
+ }
+
+ if (test->write) {
+ ret = ioctl(fd, PCITEST_WRITE, test->size);
+ fprintf(stdout, "WRITE (%7ld bytes):\t\t", test->size);
+ if (ret < 0)
+ fprintf(stdout, "TEST FAILED\n");
+ else
+ fprintf(stdout, "%s\n", result[ret]);
+ }
+
+ if (test->read) {
+ ret = ioctl(fd, PCITEST_READ, test->size);
+ fprintf(stdout, "READ (%7ld bytes):\t\t", test->size);
+ if (ret < 0)
+ fprintf(stdout, "TEST FAILED\n");
+ else
+ fprintf(stdout, "%s\n", result[ret]);
+ }
+
+ if (test->copy) {
+ ret = ioctl(fd, PCITEST_COPY, test->size);
+ fprintf(stdout, "COPY (%7ld bytes):\t\t", test->size);
+ if (ret < 0)
+ fprintf(stdout, "TEST FAILED\n");
+ else
+ fprintf(stdout, "%s\n", result[ret]);
+ }
+
+ fflush(stdout);
+}
+
+int main(int argc, char **argv)
+{
+ int c;
+ struct pci_test *test;
+
+ test = calloc(1, sizeof(*test));
+ if (!test) {
+ perror("Fail to allocate memory for pci_test\n");
+ return -ENOMEM;
+ }
+
+ /* since '0' is a valid BAR number, initialize it to -1 */
+ test->barnum = -1;
+
+ /* set default size as 100KB */
+ test->size = 0x19000;
+
+ /* set default endpoint device */
+ test->device = "/dev/pci-endpoint-test.0";
+
+ while ((c = getopt(argc, argv, "D:b:m:lrwcs:")) != EOF)
+ switch (c) {
+ case 'D':
+ test->device = optarg;
+ continue;
+ case 'b':
+ test->barnum = atoi(optarg);
+ if (test->barnum < 0 || test->barnum > 5)
+ goto usage;
+ continue;
+ case 'l':
+ test->legacyirq = true;
+ continue;
+ case 'm':
+ test->msinum = atoi(optarg);
+ if (test->msinum < 1 || test->msinum > 32)
+ goto usage;
+ continue;
+ case 'r':
+ test->read = true;
+ continue;
+ case 'w':
+ test->write = true;
+ continue;
+ case 'c':
+ test->copy = true;
+ continue;
+ case 's':
+ test->size = strtoul(optarg, NULL, 0);
+ continue;
+ case '?':
+ case 'h':
+ default:
+usage:
+ fprintf(stderr,
+ "usage: %s [options]\n"
+ "Options:\n"
+ "\t-D <dev> PCI endpoint test device {default: /dev/pci-endpoint-test.0}\n"
+ "\t-b <bar num> BAR test (bar number between 0..5)\n"
+ "\t-m <msi num> MSI test (msi number between 1..32)\n"
+ "\t-r Read buffer test\n"
+ "\t-w Write buffer test\n"
+ "\t-c Copy buffer test\n"
+ "\t-s <size> Size of buffer {default: 100KB}\n",
+ argv[0]);
+ return -EINVAL;
+ }
+
+ run_test(test);
+ return 0;
+}
diff --git a/tools/pci/pcitest.sh b/tools/pci/pcitest.sh
new file mode 100644
index 000000000000..5442bbea4c22
--- /dev/null
+++ b/tools/pci/pcitest.sh
@@ -0,0 +1,56 @@
+#!/bin/sh
+
+echo "BAR tests"
+echo
+
+bar=0
+
+while [ $bar -lt 6 ]
+do
+ pcitest -b $bar
+ bar=`expr $bar + 1`
+done
+echo
+
+echo "Interrupt tests"
+echo
+
+pcitest -l
+msi=1
+
+while [ $msi -lt 33 ]
+do
+ pcitest -m $msi
+ msi=`expr $msi + 1`
+done
+echo
+
+echo "Read Tests"
+echo
+
+pcitest -r -s 1
+pcitest -r -s 1024
+pcitest -r -s 1025
+pcitest -r -s 1024000
+pcitest -r -s 1024001
+echo
+
+echo "Write Tests"
+echo
+
+pcitest -w -s 1
+pcitest -w -s 1024
+pcitest -w -s 1025
+pcitest -w -s 1024000
+pcitest -w -s 1024001
+echo
+
+echo "Copy Tests"
+echo
+
+pcitest -c -s 1
+pcitest -c -s 1024
+pcitest -c -s 1025
+pcitest -c -s 1024000
+pcitest -c -s 1024001
+echo