Browse Source

Merge tag 'pci-v4.12-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci

Pull PCI updates from Bjorn Helgaas:

 - add framework for supporting PCIe devices in Endpoint mode (Kishon
   Vijay Abraham I)

 - use non-postable PCI config space mappings when possible (Lorenzo
   Pieralisi)

 - clean up and unify mmap of PCI BARs (David Woodhouse)

 - export and unify Function Level Reset support (Christoph Hellwig)

 - avoid FLR for Intel 82579 NICs (Sasha Neftin)

 - add pci_request_irq() and pci_free_irq() helpers (Christoph Hellwig)

 - short-circuit config access failures for disconnected devices (Keith
   Busch)

 - remove D3 sleep delay when possible (Adrian Hunter)

 - freeze PME scan before suspending devices (Lukas Wunner)

 - stop disabling MSI/MSI-X in pci_device_shutdown() (Prarit Bhargava)

 - disable boot interrupt quirk for ASUS M2N-LR (Stefan Assmann)

 - add arch-specific alignment control to improve device passthrough by
   avoiding multiple BARs in a page (Yongji Xie)

 - add sysfs sriov_drivers_autoprobe to control VF driver binding
   (Bodong Wang)

 - allow slots below PCI-to-PCIe "reverse bridges" (Bjorn Helgaas)

 - fix crashes when unbinding host controllers that don't support
   removal (Brian Norris)

 - add driver for MicroSemi Switchtec management interface (Logan
   Gunthorpe)

 - add driver for Faraday Technology FTPCI100 host bridge (Linus
   Walleij)

 - add i.MX7D support (Andrey Smirnov)

 - use generic MSI support for Aardvark (Thomas Petazzoni)

 - make Rockchip driver modular (Brian Norris)

 - advertise 128-byte Read Completion Boundary support for Rockchip
   (Shawn Lin)

 - advertise PCI_EXP_LNKSTA_SLC for Rockchip root port (Shawn Lin)

 - convert atomic_t to refcount_t in HV driver (Elena Reshetova)

 - add CPU IRQ affinity in HV driver (K. Y. Srinivasan)

 - fix PCI bus removal in HV driver (Long Li)

 - add support for ThunderX2 DMA alias topology (Jayachandran C)

 - add ThunderX pass2.x 2nd node MCFG quirk (Tomasz Nowicki)

 - add ITE 8893 bridge DMA alias quirk (Jarod Wilson)

 - restrict Cavium ACS quirk only to CN81xx/CN83xx/CN88xx devices
   (Manish Jaggi)

* tag 'pci-v4.12-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci: (146 commits)
  PCI: Don't allow unbinding host controllers that aren't prepared
  ARM: DRA7: clockdomain: Change the CLKTRCTRL of CM_PCIE_CLKSTCTRL to SW_WKUP
  MAINTAINERS: Add PCI Endpoint maintainer
  Documentation: PCI: Add userguide for PCI endpoint test function
  tools: PCI: Add sample test script to invoke pcitest
  tools: PCI: Add a userspace tool to test PCI endpoint
  Documentation: misc-devices: Add Documentation for pci-endpoint-test driver
  misc: Add host side PCI driver for PCI test function device
  PCI: Add device IDs for DRA74x and DRA72x
  dt-bindings: PCI: dra7xx: Add DT bindings to enable unaligned access
  PCI: dwc: dra7xx: Workaround for errata id i870
  dt-bindings: PCI: dra7xx: Add DT bindings for PCI dra7xx EP mode
  PCI: dwc: dra7xx: Add EP mode support
  PCI: dwc: dra7xx: Facilitate wrapper and MSI interrupts to be enabled independently
  dt-bindings: PCI: Add DT bindings for PCI designware EP mode
  PCI: dwc: designware: Add EP mode support
  Documentation: PCI: Add binding documentation for pci-test endpoint function
  ixgbe: Use pcie_flr() instead of duplicating it
  IB/hfi1: Use pcie_flr() instead of duplicating it
  PCI: imx6: Fix spelling mistake: "contol" -> "control"
  ...
Linus Torvalds 8 years ago
parent
commit
857f864014
100 changed files with 5807 additions and 759 deletions
  1. 22 0
      Documentation/ABI/testing/sysfs-bus-pci
  2. 96 0
      Documentation/ABI/testing/sysfs-class-switchtec
  3. 10 0
      Documentation/PCI/00-INDEX
  4. 17 0
      Documentation/PCI/endpoint/function/binding/pci-test.txt
  5. 105 0
      Documentation/PCI/endpoint/pci-endpoint-cfs.txt
  6. 215 0
      Documentation/PCI/endpoint/pci-endpoint.txt
  7. 66 0
      Documentation/PCI/endpoint/pci-test-function.txt
  8. 179 0
      Documentation/PCI/endpoint/pci-test-howto.txt
  9. 12 0
      Documentation/PCI/pci-iov-howto.txt
  10. 18 8
      Documentation/devicetree/bindings/pci/designware-pcie.txt
  11. 129 0
      Documentation/devicetree/bindings/pci/faraday,ftpci100.txt
  12. 13 1
      Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
  13. 35 7
      Documentation/devicetree/bindings/pci/ti-pci.txt
  14. 4 2
      Documentation/driver-model/devres.txt
  15. 12 3
      Documentation/filesystems/sysfs-pci.txt
  16. 1 0
      Documentation/ioctl/ioctl-number.txt
  17. 35 0
      Documentation/misc-devices/pci-endpoint-test.txt
  18. 80 0
      Documentation/switchtec.txt
  19. 20 0
      MAINTAINERS
  20. 10 0
      arch/arm/include/asm/io.h
  21. 1 2
      arch/arm/include/asm/pci.h
  22. 0 19
      arch/arm/kernel/bios32.c
  23. 1 1
      arch/arm/mach-omap2/clockdomains7xx_data.c
  24. 7 0
      arch/arm/mm/ioremap.c
  25. 12 0
      arch/arm/mm/nommu.c
  26. 10 0
      arch/arm64/include/asm/io.h
  27. 2 0
      arch/arm64/include/asm/pci.h
  28. 0 22
      arch/cris/arch-v32/drivers/pci/bios.c
  29. 1 3
      arch/cris/include/asm/pci.h
  30. 3 2
      arch/ia64/include/asm/pci.h
  31. 0 46
      arch/ia64/pci/pci.c
  32. 2 4
      arch/microblaze/include/asm/pci.h
  33. 1 1
      arch/microblaze/pci/pci-common.c
  34. 1 4
      arch/mips/include/asm/pci.h
  35. 0 24
      arch/mips/pci/pci.c
  36. 1 3
      arch/mn10300/include/asm/pci.h
  37. 0 23
      arch/mn10300/unit-asb2305/pci-asb2305.c
  38. 1 3
      arch/parisc/include/asm/pci.h
  39. 0 28
      arch/parisc/kernel/pci.c
  40. 2 0
      arch/powerpc/include/asm/machdep.h
  41. 4 5
      arch/powerpc/include/asm/pci.h
  42. 10 1
      arch/powerpc/kernel/pci-common.c
  43. 7 0
      arch/powerpc/platforms/powernv/pci-ioda.c
  44. 0 21
      arch/sh/drivers/pci/pci.c
  45. 2 2
      arch/sh/include/asm/pci.h
  46. 1 4
      arch/sparc/include/asm/pci_64.h
  47. 3 3
      arch/sparc/kernel/pci.c
  48. 1 2
      arch/unicore32/include/asm/pci.h
  49. 0 23
      arch/unicore32/kernel/pci.c
  50. 3 4
      arch/x86/include/asm/pci.h
  51. 0 47
      arch/x86/pci/i386.c
  52. 2 5
      arch/xtensa/include/asm/pci.h
  53. 3 21
      arch/xtensa/kernel/pci.c
  54. 2 0
      drivers/Makefile
  55. 11 3
      drivers/acpi/pci_mcfg.c
  56. 2 2
      drivers/infiniband/hw/hfi1/chip.c
  57. 0 1
      drivers/infiniband/hw/hfi1/hfi.h
  58. 0 30
      drivers/infiniband/hw/hfi1/pcie.c
  59. 7 0
      drivers/misc/Kconfig
  60. 1 0
      drivers/misc/Makefile
  61. 534 0
      drivers/misc/pci_endpoint_test.c
  62. 2 14
      drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
  63. 13 17
      drivers/nvme/host/pci.c
  64. 0 45
      drivers/of/of_pci.c
  65. 2 0
      drivers/pci/Kconfig
  66. 2 1
      drivers/pci/Makefile
  67. 59 2
      drivers/pci/access.c
  68. 32 4
      drivers/pci/dwc/Kconfig
  69. 4 1
      drivers/pci/dwc/Makefile
  70. 263 30
      drivers/pci/dwc/pci-dra7xx.c
  71. 8 6
      drivers/pci/dwc/pci-exynos.c
  72. 145 54
      drivers/pci/dwc/pci-imx6.c
  73. 1 1
      drivers/pci/dwc/pci-keystone-dw.c
  74. 2 1
      drivers/pci/dwc/pci-layerscape.c
  75. 2 1
      drivers/pci/dwc/pcie-armada8k.c
  76. 7 5
      drivers/pci/dwc/pcie-artpec6.c
  77. 342 0
      drivers/pci/dwc/pcie-designware-ep.c
  78. 21 18
      drivers/pci/dwc/pcie-designware-host.c
  79. 1 0
      drivers/pci/dwc/pcie-designware-plat.c
  80. 210 48
      drivers/pci/dwc/pcie-designware.c
  81. 131 4
      drivers/pci/dwc/pcie-designware.h
  82. 5 4
      drivers/pci/dwc/pcie-hisi.c
  83. 1 1
      drivers/pci/dwc/pcie-qcom.c
  84. 2 1
      drivers/pci/dwc/pcie-spear13xx.c
  85. 4 2
      drivers/pci/ecam.c
  86. 31 0
      drivers/pci/endpoint/Kconfig
  87. 7 0
      drivers/pci/endpoint/Makefile
  88. 12 0
      drivers/pci/endpoint/functions/Kconfig
  89. 5 0
      drivers/pci/endpoint/functions/Makefile
  90. 510 0
      drivers/pci/endpoint/functions/pci-epf-test.c
  91. 509 0
      drivers/pci/endpoint/pci-ep-cfs.c
  92. 580 0
      drivers/pci/endpoint/pci-epc-core.c
  93. 143 0
      drivers/pci/endpoint/pci-epc-mem.c
  94. 359 0
      drivers/pci/endpoint/pci-epf-core.c
  95. 8 2
      drivers/pci/host/Kconfig
  96. 1 0
      drivers/pci/host/Makefile
  97. 67 106
      drivers/pci/host/pci-aardvark.c
  98. 563 0
      drivers/pci/host/pci-ftpci100.c
  99. 1 0
      drivers/pci/host/pci-host-generic.c
  100. 35 11
      drivers/pci/host/pci-hyperv.c

+ 22 - 0
Documentation/ABI/testing/sysfs-bus-pci

@@ -301,3 +301,25 @@ Contact:	Emil Velikov <emil.l.velikov@gmail.com>
 Description:
 Description:
 		This file contains the revision field of the PCI device.
 		This file contains the revision field of the PCI device.
 		The value comes from device config space. The file is read only.
 		The value comes from device config space. The file is read only.
+
+What:		/sys/bus/pci/devices/.../sriov_drivers_autoprobe
+Date:		April 2017
+Contact:	Bodong Wang<bodong@mellanox.com>
+Description:
+		This file is associated with the PF of a device that
+		supports SR-IOV.  It determines whether newly-enabled VFs
+		are immediately bound to a driver.  It initially contains
+		1, which means the kernel automatically binds VFs to a
+		compatible driver immediately after they are enabled.  If
+		an application writes 0 to the file before enabling VFs,
+		the kernel will not bind VFs to a driver.
+
+		A typical use case is to write 0 to this file, then enable
+		VFs, then assign the newly-created VFs to virtual machines.
+		Note that changing this file does not affect already-
+		enabled VFs.  In this scenario, the user must first disable
+		the VFs, write 0 to sriov_drivers_autoprobe, then re-enable
+		the VFs.
+
+		This is similar to /sys/bus/pci/drivers_autoprobe, but
+		affects only the VFs associated with a specific PF.

+ 96 - 0
Documentation/ABI/testing/sysfs-class-switchtec

@@ -0,0 +1,96 @@
+switchtec - Microsemi Switchtec PCI Switch Management Endpoint
+
+For details on this subsystem look at Documentation/switchtec.txt.
+
+What: 		/sys/class/switchtec
+Date:		05-Jan-2017
+KernelVersion:	v4.11
+Contact:	Logan Gunthorpe <logang@deltatee.com>
+Description:	The switchtec class subsystem folder.
+		Each registered switchtec driver is represented by a switchtecX
+		subfolder (X being an integer >= 0).
+
+
+What:		/sys/class/switchtec/switchtec[0-9]+/component_id
+Date:		05-Jan-2017
+KernelVersion:	v4.11
+Contact:	Logan Gunthorpe <logang@deltatee.com>
+Description:	Component identifier as stored in the hardware (eg. PM8543)
+		(read only)
+Values: 	arbitrary string.
+
+
+What:		/sys/class/switchtec/switchtec[0-9]+/component_revision
+Date:		05-Jan-2017
+KernelVersion:	v4.11
+Contact:	Logan Gunthorpe <logang@deltatee.com>
+Description:	Component revision stored in the hardware (read only)
+Values: 	integer.
+
+
+What:		/sys/class/switchtec/switchtec[0-9]+/component_vendor
+Date:		05-Jan-2017
+KernelVersion:	v4.11
+Contact:	Logan Gunthorpe <logang@deltatee.com>
+Description:	Component vendor as stored in the hardware (eg. MICROSEM)
+		(read only)
+Values: 	arbitrary string.
+
+
+What:		/sys/class/switchtec/switchtec[0-9]+/device_version
+Date:		05-Jan-2017
+KernelVersion:	v4.11
+Contact:	Logan Gunthorpe <logang@deltatee.com>
+Description:	Device version as stored in the hardware (read only)
+Values: 	integer.
+
+
+What:		/sys/class/switchtec/switchtec[0-9]+/fw_version
+Date:		05-Jan-2017
+KernelVersion:	v4.11
+Contact:	Logan Gunthorpe <logang@deltatee.com>
+Description:	Currently running firmware version (read only)
+Values: 	integer (in hexadecimal).
+
+
+What:		/sys/class/switchtec/switchtec[0-9]+/partition
+Date:		05-Jan-2017
+KernelVersion:	v4.11
+Contact:	Logan Gunthorpe <logang@deltatee.com>
+Description:	Partition number for this device in the switch (read only)
+Values: 	integer.
+
+
+What:		/sys/class/switchtec/switchtec[0-9]+/partition_count
+Date:		05-Jan-2017
+KernelVersion:	v4.11
+Contact:	Logan Gunthorpe <logang@deltatee.com>
+Description:	Total number of partitions in the switch (read only)
+Values: 	integer.
+
+
+What:		/sys/class/switchtec/switchtec[0-9]+/product_id
+Date:		05-Jan-2017
+KernelVersion:	v4.11
+Contact:	Logan Gunthorpe <logang@deltatee.com>
+Description:	Product identifier as stored in the hardware (eg. PSX 48XG3)
+		(read only)
+Values: 	arbitrary string.
+
+
+What:		/sys/class/switchtec/switchtec[0-9]+/product_revision
+Date:		05-Jan-2017
+KernelVersion:	v4.11
+Contact:	Logan Gunthorpe <logang@deltatee.com>
+Description:	Product revision stored in the hardware (eg. RevB)
+		(read only)
+Values: 	arbitrary string.
+
+
+What:		/sys/class/switchtec/switchtec[0-9]+/product_vendor
+Date:		05-Jan-2017
+KernelVersion:	v4.11
+Contact:	Logan Gunthorpe <logang@deltatee.com>
+Description:	Product vendor as stored in the hardware (eg. MICROSEM)
+		(read only)
+Values: 	arbitrary string.

+ 10 - 0
Documentation/PCI/00-INDEX

@@ -12,3 +12,13 @@ pci.txt
 	- info on the PCI subsystem for device driver authors
 	- info on the PCI subsystem for device driver authors
 pcieaer-howto.txt
 pcieaer-howto.txt
 	- the PCI Express Advanced Error Reporting Driver Guide HOWTO
 	- the PCI Express Advanced Error Reporting Driver Guide HOWTO
+endpoint/pci-endpoint.txt
+	- guide to add endpoint controller driver and endpoint function driver.
+endpoint/pci-endpoint-cfs.txt
+	- guide to use configfs to configure the PCI endpoint function.
+endpoint/pci-test-function.txt
+	- specification of *PCI test* function device.
+endpoint/pci-test-howto.txt
+	- userguide for PCI endpoint test function.
+endpoint/function/binding/
+	- binding documentation for PCI endpoint function

+ 17 - 0
Documentation/PCI/endpoint/function/binding/pci-test.txt

@@ -0,0 +1,17 @@
+PCI TEST ENDPOINT FUNCTION
+
+name: Should be "pci_epf_test" to bind to the pci_epf_test driver.
+
+Configurable Fields:
+vendorid	 : should be 0x104c
+deviceid	 : should be 0xb500 for DRA74x and 0xb501 for DRA72x
+revid		 : don't care
+progif_code	 : don't care
+subclass_code	 : don't care
+baseclass_code	 : should be 0xff
+cache_line_size	 : don't care
+subsys_vendor_id : don't care
+subsys_id	 : don't care
+interrupt_pin	 : Should be 1 - INTA, 2 - INTB, 3 - INTC, 4 -INTD
+msi_interrupts	 : Should be 1 to 32 depending on the number of MSI interrupts
+		   to test

+ 105 - 0
Documentation/PCI/endpoint/pci-endpoint-cfs.txt

@@ -0,0 +1,105 @@
+                   CONFIGURING PCI ENDPOINT USING CONFIGFS
+                    Kishon Vijay Abraham I <kishon@ti.com>
+
+The PCI Endpoint Core exposes configfs entry (pci_ep) to configure the
+PCI endpoint function and to bind the endpoint function
+with the endpoint controller. (For introducing other mechanisms to
+configure the PCI Endpoint Function refer to [1]).
+
+*) Mounting configfs
+
+The PCI Endpoint Core layer creates pci_ep directory in the mounted configfs
+directory. configfs can be mounted using the following command.
+
+	mount -t configfs none /sys/kernel/config
+
+*) Directory Structure
+
+The pci_ep configfs has two directories at its root: controllers and
+functions. Every EPC device present in the system will have an entry in
+the *controllers* directory and and every EPF driver present in the system
+will have an entry in the *functions* directory.
+
+/sys/kernel/config/pci_ep/
+	.. controllers/
+	.. functions/
+
+*) Creating EPF Device
+
+Every registered EPF driver will be listed in controllers directory. The
+entries corresponding to EPF driver will be created by the EPF core.
+
+/sys/kernel/config/pci_ep/functions/
+	.. <EPF Driver1>/
+		... <EPF Device 11>/
+		... <EPF Device 21>/
+	.. <EPF Driver2>/
+		... <EPF Device 12>/
+		... <EPF Device 22>/
+
+In order to create a <EPF device> of the type probed by <EPF Driver>, the
+user has to create a directory inside <EPF DriverN>.
+
+Every <EPF device> directory consists of the following entries that can be
+used to configure the standard configuration header of the endpoint function.
+(These entries are created by the framework when any new <EPF Device> is
+created)
+
+	.. <EPF Driver1>/
+		... <EPF Device 11>/
+			... vendorid
+			... deviceid
+			... revid
+			... progif_code
+			... subclass_code
+			... baseclass_code
+			... cache_line_size
+			... subsys_vendor_id
+			... subsys_id
+			... interrupt_pin
+
+*) EPC Device
+
+Every registered EPC device will be listed in controllers directory. The
+entries corresponding to EPC device will be created by the EPC core.
+
+/sys/kernel/config/pci_ep/controllers/
+	.. <EPC Device1>/
+		... <Symlink EPF Device11>/
+		... <Symlink EPF Device12>/
+		... start
+	.. <EPC Device2>/
+		... <Symlink EPF Device21>/
+		... <Symlink EPF Device22>/
+		... start
+
+The <EPC Device> directory will have a list of symbolic links to
+<EPF Device>. These symbolic links should be created by the user to
+represent the functions present in the endpoint device.
+
+The <EPC Device> directory will also have a *start* field. Once
+"1" is written to this field, the endpoint device will be ready to
+establish the link with the host. This is usually done after
+all the EPF devices are created and linked with the EPC device.
+
+
+			 | controllers/
+				| <Directory: EPC name>/
+					| <Symbolic Link: Function>
+					| start
+			 | functions/
+				| <Directory: EPF driver>/
+					| <Directory: EPF device>/
+						| vendorid
+						| deviceid
+						| revid
+						| progif_code
+						| subclass_code
+						| baseclass_code
+						| cache_line_size
+						| subsys_vendor_id
+						| subsys_id
+						| interrupt_pin
+						| function
+
+[1] -> Documentation/PCI/endpoint/pci-endpoint.txt

+ 215 - 0
Documentation/PCI/endpoint/pci-endpoint.txt

@@ -0,0 +1,215 @@
+			    PCI ENDPOINT FRAMEWORK
+		    Kishon Vijay Abraham I <kishon@ti.com>
+
+This document is a guide to use the PCI Endpoint Framework in order to create
+endpoint controller driver, endpoint function driver, and using configfs
+interface to bind the function driver to the controller driver.
+
+1. Introduction
+
+Linux has a comprehensive PCI subsystem to support PCI controllers that
+operates in Root Complex mode. The subsystem has capability to scan PCI bus,
+assign memory resources and IRQ resources, load PCI driver (based on
+vendor ID, device ID), support other services like hot-plug, power management,
+advanced error reporting and virtual channels.
+
+However the PCI controller IP integrated in some SoCs is capable of operating
+either in Root Complex mode or Endpoint mode. PCI Endpoint Framework will
+add endpoint mode support in Linux. This will help to run Linux in an
+EP system which can have a wide variety of use cases from testing or
+validation, co-processor accelerator, etc.
+
+2. PCI Endpoint Core
+
+The PCI Endpoint Core layer comprises 3 components: the Endpoint Controller
+library, the Endpoint Function library, and the configfs layer to bind the
+endpoint function with the endpoint controller.
+
+2.1 PCI Endpoint Controller(EPC) Library
+
+The EPC library provides APIs to be used by the controller that can operate
+in endpoint mode. It also provides APIs to be used by function driver/library
+in order to implement a particular endpoint function.
+
+2.1.1 APIs for the PCI controller Driver
+
+This section lists the APIs that the PCI Endpoint core provides to be used
+by the PCI controller driver.
+
+*) devm_pci_epc_create()/pci_epc_create()
+
+   The PCI controller driver should implement the following ops:
+	 * write_header: ops to populate configuration space header
+	 * set_bar: ops to configure the BAR
+	 * clear_bar: ops to reset the BAR
+	 * alloc_addr_space: ops to allocate in PCI controller address space
+	 * free_addr_space: ops to free the allocated address space
+	 * raise_irq: ops to raise a legacy or MSI interrupt
+	 * start: ops to start the PCI link
+	 * stop: ops to stop the PCI link
+
+   The PCI controller driver can then create a new EPC device by invoking
+   devm_pci_epc_create()/pci_epc_create().
+
+*) devm_pci_epc_destroy()/pci_epc_destroy()
+
+   The PCI controller driver can destroy the EPC device created by either
+   devm_pci_epc_create() or pci_epc_create() using devm_pci_epc_destroy() or
+   pci_epc_destroy().
+
+*) pci_epc_linkup()
+
+   In order to notify all the function devices that the EPC device to which
+   they are linked has established a link with the host, the PCI controller
+   driver should invoke pci_epc_linkup().
+
+*) pci_epc_mem_init()
+
+   Initialize the pci_epc_mem structure used for allocating EPC addr space.
+
+*) pci_epc_mem_exit()
+
+   Cleanup the pci_epc_mem structure allocated during pci_epc_mem_init().
+
+2.1.2 APIs for the PCI Endpoint Function Driver
+
+This section lists the APIs that the PCI Endpoint core provides to be used
+by the PCI endpoint function driver.
+
+*) pci_epc_write_header()
+
+   The PCI endpoint function driver should use pci_epc_write_header() to
+   write the standard configuration header to the endpoint controller.
+
+*) pci_epc_set_bar()
+
+   The PCI endpoint function driver should use pci_epc_set_bar() to configure
+   the Base Address Register in order for the host to assign PCI addr space.
+   Register space of the function driver is usually configured
+   using this API.
+
+*) pci_epc_clear_bar()
+
+   The PCI endpoint function driver should use pci_epc_clear_bar() to reset
+   the BAR.
+
+*) pci_epc_raise_irq()
+
+   The PCI endpoint function driver should use pci_epc_raise_irq() to raise
+   Legacy Interrupt or MSI Interrupt.
+
+*) pci_epc_mem_alloc_addr()
+
+   The PCI endpoint function driver should use pci_epc_mem_alloc_addr(), to
+   allocate memory address from EPC addr space which is required to access
+   RC's buffer
+
+*) pci_epc_mem_free_addr()
+
+   The PCI endpoint function driver should use pci_epc_mem_free_addr() to
+   free the memory space allocated using pci_epc_mem_alloc_addr().
+
+2.1.3 Other APIs
+
+There are other APIs provided by the EPC library. These are used for binding
+the EPF device with EPC device. pci-ep-cfs.c can be used as reference for
+using these APIs.
+
+*) pci_epc_get()
+
+   Get a reference to the PCI endpoint controller based on the device name of
+   the controller.
+
+*) pci_epc_put()
+
+   Release the reference to the PCI endpoint controller obtained using
+   pci_epc_get()
+
+*) pci_epc_add_epf()
+
+   Add a PCI endpoint function to a PCI endpoint controller. A PCIe device
+   can have up to 8 functions according to the specification.
+
+*) pci_epc_remove_epf()
+
+   Remove the PCI endpoint function from PCI endpoint controller.
+
+*) pci_epc_start()
+
+   The PCI endpoint function driver should invoke pci_epc_start() once it
+   has configured the endpoint function and wants to start the PCI link.
+
+*) pci_epc_stop()
+
+   The PCI endpoint function driver should invoke pci_epc_stop() to stop
+   the PCI LINK.
+
+2.2 PCI Endpoint Function(EPF) Library
+
+The EPF library provides APIs to be used by the function driver and the EPC
+library to provide endpoint mode functionality.
+
+2.2.1 APIs for the PCI Endpoint Function Driver
+
+This section lists the APIs that the PCI Endpoint core provides to be used
+by the PCI endpoint function driver.
+
+*) pci_epf_register_driver()
+
+   The PCI Endpoint Function driver should implement the following ops:
+	 * bind: ops to perform when a EPC device has been bound to EPF device
+	 * unbind: ops to perform when a binding has been lost between a EPC
+	   device and EPF device
+	 * linkup: ops to perform when the EPC device has established a
+	   connection with a host system
+
+  The PCI Function driver can then register the PCI EPF driver by using
+  pci_epf_register_driver().
+
+*) pci_epf_unregister_driver()
+
+  The PCI Function driver can unregister the PCI EPF driver by using
+  pci_epf_unregister_driver().
+
+*) pci_epf_alloc_space()
+
+  The PCI Function driver can allocate space for a particular BAR using
+  pci_epf_alloc_space().
+
+*) pci_epf_free_space()
+
+  The PCI Function driver can free the allocated space
+  (using pci_epf_alloc_space) by invoking pci_epf_free_space().
+
+2.2.2 APIs for the PCI Endpoint Controller Library
+This section lists the APIs that the PCI Endpoint core provides to be used
+by the PCI endpoint controller library.
+
+*) pci_epf_linkup()
+
+   The PCI endpoint controller library invokes pci_epf_linkup() when the
+   EPC device has established the connection to the host.
+
+2.2.2 Other APIs
+There are other APIs provided by the EPF library. These are used to notify
+the function driver when the EPF device is bound to the EPC device.
+pci-ep-cfs.c can be used as reference for using these APIs.
+
+*) pci_epf_create()
+
+   Create a new PCI EPF device by passing the name of the PCI EPF device.
+   This name will be used to bind the the EPF device to a EPF driver.
+
+*) pci_epf_destroy()
+
+   Destroy the created PCI EPF device.
+
+*) pci_epf_bind()
+
+   pci_epf_bind() should be invoked when the EPF device has been bound to
+   a EPC device.
+
+*) pci_epf_unbind()
+
+   pci_epf_unbind() should be invoked when the binding between EPC device
+   and EPF device is lost.

+ 66 - 0
Documentation/PCI/endpoint/pci-test-function.txt

@@ -0,0 +1,66 @@
+				PCI TEST
+		    Kishon Vijay Abraham I <kishon@ti.com>
+
+Traditionally PCI RC has always been validated by using standard
+PCI cards like ethernet PCI cards or USB PCI cards or SATA PCI cards.
+However with the addition of EP-core in linux kernel, it is possible
+to configure a PCI controller that can operate in EP mode to work as
+a test device.
+
+The PCI endpoint test device is a virtual device (defined in software)
+used to test the endpoint functionality and serve as a sample driver
+for other PCI endpoint devices (to use the EP framework).
+
+The PCI endpoint test device has the following registers:
+
+	1) PCI_ENDPOINT_TEST_MAGIC
+	2) PCI_ENDPOINT_TEST_COMMAND
+	3) PCI_ENDPOINT_TEST_STATUS
+	4) PCI_ENDPOINT_TEST_SRC_ADDR
+	5) PCI_ENDPOINT_TEST_DST_ADDR
+	6) PCI_ENDPOINT_TEST_SIZE
+	7) PCI_ENDPOINT_TEST_CHECKSUM
+
+*) PCI_ENDPOINT_TEST_MAGIC
+
+This register will be used to test BAR0. A known pattern will be written
+and read back from MAGIC register to verify BAR0.
+
+*) PCI_ENDPOINT_TEST_COMMAND:
+
+This register will be used by the host driver to indicate the function
+that the endpoint device must perform.
+
+Bitfield Description:
+  Bit 0		: raise legacy IRQ
+  Bit 1		: raise MSI IRQ
+  Bit 2 - 7	: MSI interrupt number
+  Bit 8		: read command (read data from RC buffer)
+  Bit 9		: write command (write data to RC buffer)
+  Bit 10	: copy command (copy data from one RC buffer to another
+		  RC buffer)
+
+*) PCI_ENDPOINT_TEST_STATUS
+
+This register reflects the status of the PCI endpoint device.
+
+Bitfield Description:
+  Bit 0		: read success
+  Bit 1		: read fail
+  Bit 2		: write success
+  Bit 3		: write fail
+  Bit 4		: copy success
+  Bit 5		: copy fail
+  Bit 6		: IRQ raised
+  Bit 7		: source address is invalid
+  Bit 8		: destination address is invalid
+
+*) PCI_ENDPOINT_TEST_SRC_ADDR
+
+This register contains the source address (RC buffer address) for the
+COPY/READ command.
+
+*) PCI_ENDPOINT_TEST_DST_ADDR
+
+This register contains the destination address (RC buffer address) for
+the COPY/WRITE command.

+ 179 - 0
Documentation/PCI/endpoint/pci-test-howto.txt

@@ -0,0 +1,179 @@
+			    PCI TEST USERGUIDE
+		    Kishon Vijay Abraham I <kishon@ti.com>
+
+This document is a guide to help users use pci-epf-test function driver
+and pci_endpoint_test host driver for testing PCI. The list of steps to
+be followed in the host side and EP side is given below.
+
+1. Endpoint Device
+
+1.1 Endpoint Controller Devices
+
+To find the list of endpoint controller devices in the system:
+
+	# ls /sys/class/pci_epc/
+	  51000000.pcie_ep
+
+If PCI_ENDPOINT_CONFIGFS is enabled
+	# ls /sys/kernel/config/pci_ep/controllers
+	  51000000.pcie_ep
+
+1.2 Endpoint Function Drivers
+
+To find the list of endpoint function drivers in the system:
+
+	# ls /sys/bus/pci-epf/drivers
+	  pci_epf_test
+
+If PCI_ENDPOINT_CONFIGFS is enabled
+	# ls /sys/kernel/config/pci_ep/functions
+	  pci_epf_test
+
+1.3 Creating pci-epf-test Device
+
+PCI endpoint function device can be created using the configfs. To create
+pci-epf-test device, the following commands can be used
+
+	# mount -t configfs none /sys/kernel/config
+	# cd /sys/kernel/config/pci_ep/
+	# mkdir functions/pci_epf_test/func1
+
+The "mkdir func1" above creates the pci-epf-test function device that will
+be probed by pci_epf_test driver.
+
+The PCI endpoint framework populates the directory with the following
+configurable fields.
+
+	# ls functions/pci_epf_test/func1
+	  baseclass_code	interrupt_pin	revid		subsys_vendor_id
+	  cache_line_size	msi_interrupts	subclass_code	vendorid
+	  deviceid          	progif_code	subsys_id
+
+The PCI endpoint function driver populates these entries with default values
+when the device is bound to the driver. The pci-epf-test driver populates
+vendorid with 0xffff and interrupt_pin with 0x0001
+
+	# cat functions/pci_epf_test/func1/vendorid
+	  0xffff
+	# cat functions/pci_epf_test/func1/interrupt_pin
+	  0x0001
+
+1.4 Configuring pci-epf-test Device
+
+The user can configure the pci-epf-test device using configfs entry. In order
+to change the vendorid and the number of MSI interrupts used by the function
+device, the following commands can be used.
+
+	# echo 0x104c > functions/pci_epf_test/func1/vendorid
+	# echo 0xb500 > functions/pci_epf_test/func1/deviceid
+	# echo 16 > functions/pci_epf_test/func1/msi_interrupts
+
+1.5 Binding pci-epf-test Device to EP Controller
+
+In order for the endpoint function device to be useful, it has to be bound to
+a PCI endpoint controller driver. Use the configfs to bind the function
+device to one of the controller driver present in the system.
+
+	# ln -s functions/pci_epf_test/func1 controllers/51000000.pcie_ep/
+
+Once the above step is completed, the PCI endpoint is ready to establish a link
+with the host.
+
+1.6 Start the Link
+
+In order for the endpoint device to establish a link with the host, the _start_
+field should be populated with '1'.
+
+	# echo 1 > controllers/51000000.pcie_ep/start
+
+2. RootComplex Device
+
+2.1 lspci Output
+
+Note that the devices listed here correspond to the value populated in 1.4 above
+
+	00:00.0 PCI bridge: Texas Instruments Device 8888 (rev 01)
+	01:00.0 Unassigned class [ff00]: Texas Instruments Device b500
+
+2.2 Using Endpoint Test function Device
+
+pcitest.sh added in tools/pci/ can be used to run all the default PCI endpoint
+tests. Before pcitest.sh can be used pcitest.c should be compiled using the
+following commands.
+
+	cd <kernel-dir>
+	make headers_install ARCH=arm
+	arm-linux-gnueabihf-gcc -Iusr/include tools/pci/pcitest.c -o pcitest
+	cp pcitest  <rootfs>/usr/sbin/
+	cp tools/pci/pcitest.sh <rootfs>
+
+2.2.1 pcitest.sh Output
+	# ./pcitest.sh
+	BAR tests
+
+	BAR0:           OKAY
+	BAR1:           OKAY
+	BAR2:           OKAY
+	BAR3:           OKAY
+	BAR4:           NOT OKAY
+	BAR5:           NOT OKAY
+
+	Interrupt tests
+
+	LEGACY IRQ:     NOT OKAY
+	MSI1:           OKAY
+	MSI2:           OKAY
+	MSI3:           OKAY
+	MSI4:           OKAY
+	MSI5:           OKAY
+	MSI6:           OKAY
+	MSI7:           OKAY
+	MSI8:           OKAY
+	MSI9:           OKAY
+	MSI10:          OKAY
+	MSI11:          OKAY
+	MSI12:          OKAY
+	MSI13:          OKAY
+	MSI14:          OKAY
+	MSI15:          OKAY
+	MSI16:          OKAY
+	MSI17:          NOT OKAY
+	MSI18:          NOT OKAY
+	MSI19:          NOT OKAY
+	MSI20:          NOT OKAY
+	MSI21:          NOT OKAY
+	MSI22:          NOT OKAY
+	MSI23:          NOT OKAY
+	MSI24:          NOT OKAY
+	MSI25:          NOT OKAY
+	MSI26:          NOT OKAY
+	MSI27:          NOT OKAY
+	MSI28:          NOT OKAY
+	MSI29:          NOT OKAY
+	MSI30:          NOT OKAY
+	MSI31:          NOT OKAY
+	MSI32:          NOT OKAY
+
+	Read Tests
+
+	READ (      1 bytes):           OKAY
+	READ (   1024 bytes):           OKAY
+	READ (   1025 bytes):           OKAY
+	READ (1024000 bytes):           OKAY
+	READ (1024001 bytes):           OKAY
+
+	Write Tests
+
+	WRITE (      1 bytes):          OKAY
+	WRITE (   1024 bytes):          OKAY
+	WRITE (   1025 bytes):          OKAY
+	WRITE (1024000 bytes):          OKAY
+	WRITE (1024001 bytes):          OKAY
+
+	Copy Tests
+
+	COPY (      1 bytes):           OKAY
+	COPY (   1024 bytes):           OKAY
+	COPY (   1025 bytes):           OKAY
+	COPY (1024000 bytes):           OKAY
+	COPY (1024001 bytes):           OKAY

+ 12 - 0
Documentation/PCI/pci-iov-howto.txt

@@ -68,6 +68,18 @@ To disable SR-IOV capability:
 	echo  0 > \
 	echo  0 > \
         /sys/bus/pci/devices/<DOMAIN:BUS:DEVICE.FUNCTION>/sriov_numvfs
         /sys/bus/pci/devices/<DOMAIN:BUS:DEVICE.FUNCTION>/sriov_numvfs
 
 
+To enable auto probing VFs by a compatible driver on the host, run
+command below before enabling SR-IOV capabilities. This is the
+default behavior.
+	echo 1 > \
+        /sys/bus/pci/devices/<DOMAIN:BUS:DEVICE.FUNCTION>/sriov_drivers_autoprobe
+
+To disable auto probing VFs by a compatible driver on the host, run
+command below before enabling SR-IOV capabilities. Updating this
+entry will not affect VFs which are already probed.
+	echo  0 > \
+        /sys/bus/pci/devices/<DOMAIN:BUS:DEVICE.FUNCTION>/sriov_drivers_autoprobe
+
 3.2 Usage example
 3.2 Usage example
 
 
 Following piece of code illustrates the usage of the SR-IOV API.
 Following piece of code illustrates the usage of the SR-IOV API.

+ 18 - 8
Documentation/devicetree/bindings/pci/designware-pcie.txt

@@ -6,30 +6,40 @@ Required properties:
 - reg-names: Must be "config" for the PCIe configuration space.
 - reg-names: Must be "config" for the PCIe configuration space.
     (The old way of getting the configuration address space from "ranges"
     (The old way of getting the configuration address space from "ranges"
     is deprecated and should be avoided.)
     is deprecated and should be avoided.)
+- num-lanes: number of lanes to use
+RC mode:
 - #address-cells: set to <3>
 - #address-cells: set to <3>
 - #size-cells: set to <2>
 - #size-cells: set to <2>
 - device_type: set to "pci"
 - device_type: set to "pci"
 - ranges: ranges for the PCI memory and I/O regions
 - ranges: ranges for the PCI memory and I/O regions
 - #interrupt-cells: set to <1>
 - #interrupt-cells: set to <1>
-- interrupt-map-mask and interrupt-map: standard PCI properties
-	to define the mapping of the PCIe interface to interrupt
+- interrupt-map-mask and interrupt-map: standard PCI
+	properties to define the mapping of the PCIe interface to interrupt
 	numbers.
 	numbers.
-- num-lanes: number of lanes to use
+EP mode:
+- num-ib-windows: number of inbound address translation
+        windows
+- num-ob-windows: number of outbound address translation
+        windows
 
 
 Optional properties:
 Optional properties:
-- num-viewport: number of view ports configured in hardware.  If a platform
-  does not specify it, the driver assumes 2.
 - num-lanes: number of lanes to use (this property should be specified unless
 - num-lanes: number of lanes to use (this property should be specified unless
   the link is brought already up in BIOS)
   the link is brought already up in BIOS)
 - reset-gpio: gpio pin number of power good signal
 - reset-gpio: gpio pin number of power good signal
-- bus-range: PCI bus numbers covered (it is recommended for new devicetrees to
-  specify this property, to keep backwards compatibility a range of 0x00-0xff
-  is assumed if not present)
 - clocks: Must contain an entry for each entry in clock-names.
 - clocks: Must contain an entry for each entry in clock-names.
 	See ../clocks/clock-bindings.txt for details.
 	See ../clocks/clock-bindings.txt for details.
 - clock-names: Must include the following entries:
 - clock-names: Must include the following entries:
 	- "pcie"
 	- "pcie"
 	- "pcie_bus"
 	- "pcie_bus"
+RC mode:
+- num-viewport: number of view ports configured in
+  hardware. If a platform does not specify it, the driver assumes 2.
+- bus-range: PCI bus numbers covered (it is recommended
+  for new devicetrees to specify this property, to keep backwards
+  compatibility a range of 0x00-0xff is assumed if not present)
+EP mode:
+- max-functions: maximum number of functions that can be
+  configured
 
 
 Example configuration:
 Example configuration:
 
 

+ 129 - 0
Documentation/devicetree/bindings/pci/faraday,ftpci100.txt

@@ -0,0 +1,129 @@
+Faraday Technology FTPCI100 PCI Host Bridge
+
+This PCI bridge is found inside that Cortina Systems Gemini SoC platform and
+is a generic IP block from Faraday Technology. It exists in two variants:
+plain and dual PCI. The plain version embeds a cascading interrupt controller
+into the host bridge. The dual version routes the interrupts to the host
+chips interrupt controller.
+
+The host controller appear on the PCI bus with vendor ID 0x159b (Faraday
+Technology) and product ID 0x4321.
+
+Mandatory properties:
+
+- compatible: ranging from specific to generic, should be one of
+  "cortina,gemini-pci", "faraday,ftpci100"
+  "cortina,gemini-pci-dual", "faraday,ftpci100-dual"
+  "faraday,ftpci100"
+  "faraday,ftpci100-dual"
+- reg: memory base and size for the host bridge
+- #address-cells: set to <3>
+- #size-cells: set to <2>
+- #interrupt-cells: set to <1>
+- bus-range: set to <0x00 0xff>
+- device_type, set to "pci"
+- ranges: see pci.txt
+- interrupt-map-mask: see pci.txt
+- interrupt-map: see pci.txt
+- dma-ranges: three ranges for the inbound memory region. The ranges must
+  be aligned to a 1MB boundary, and may be 1MB, 2MB, 4MB, 8MB, 16MB, 32MB, 64MB,
+  128MB, 256MB, 512MB, 1GB or 2GB in size. The memory should be marked as
+  pre-fetchable.
+
+Mandatory subnodes:
+- For "faraday,ftpci100" a node representing the interrupt-controller inside the
+  host bridge is mandatory. It has the following mandatory properties:
+  - interrupt: see interrupt-controller/interrupts.txt
+  - interrupt-parent: see interrupt-controller/interrupts.txt
+  - interrupt-controller: see interrupt-controller/interrupts.txt
+  - #address-cells: set to <0>
+  - #interrupt-cells: set to <1>
+
+I/O space considerations:
+
+The plain variant has 128MiB of non-prefetchable memory space, whereas the
+"dual" variant has 64MiB. Take this into account when describing the ranges.
+
+Interrupt map considerations:
+
+The "dual" variant will get INT A, B, C, D from the system interrupt controller
+and should point to respective interrupt in that controller in its
+interrupt-map.
+
+The code which is the only documentation of how the Faraday PCI (the non-dual
+variant) interrupts assigns the default interrupt mapping/swizzling has
+typically been like this, doing the swizzling on the interrupt controller side
+rather than in the interconnect:
+
+interrupt-map-mask = <0xf800 0 0 7>;
+interrupt-map =
+	<0x4800 0 0 1 &pci_intc 0>, /* Slot 9 */
+	<0x4800 0 0 2 &pci_intc 1>,
+	<0x4800 0 0 3 &pci_intc 2>,
+	<0x4800 0 0 4 &pci_intc 3>,
+	<0x5000 0 0 1 &pci_intc 1>, /* Slot 10 */
+	<0x5000 0 0 2 &pci_intc 2>,
+	<0x5000 0 0 3 &pci_intc 3>,
+	<0x5000 0 0 4 &pci_intc 0>,
+	<0x5800 0 0 1 &pci_intc 2>, /* Slot 11 */
+	<0x5800 0 0 2 &pci_intc 3>,
+	<0x5800 0 0 3 &pci_intc 0>,
+	<0x5800 0 0 4 &pci_intc 1>,
+	<0x6000 0 0 1 &pci_intc 3>, /* Slot 12 */
+	<0x6000 0 0 2 &pci_intc 0>,
+	<0x6000 0 0 3 &pci_intc 1>,
+	<0x6000 0 0 4 &pci_intc 2>;
+
+Example:
+
+pci@50000000 {
+	compatible = "cortina,gemini-pci", "faraday,ftpci100";
+	reg = <0x50000000 0x100>;
+	interrupts = <8 IRQ_TYPE_LEVEL_HIGH>, /* PCI A */
+			<26 IRQ_TYPE_LEVEL_HIGH>, /* PCI B */
+			<27 IRQ_TYPE_LEVEL_HIGH>, /* PCI C */
+			<28 IRQ_TYPE_LEVEL_HIGH>; /* PCI D */
+	#address-cells = <3>;
+	#size-cells = <2>;
+	#interrupt-cells = <1>;
+
+	bus-range = <0x00 0xff>;
+	ranges = /* 1MiB I/O space 0x50000000-0x500fffff */
+		 <0x01000000 0 0          0x50000000 0 0x00100000>,
+		 /* 128MiB non-prefetchable memory 0x58000000-0x5fffffff */
+		 <0x02000000 0 0x58000000 0x58000000 0 0x08000000>;
+
+	/* DMA ranges */
+	dma-ranges =
+	/* 128MiB at 0x00000000-0x07ffffff */
+	<0x02000000 0 0x00000000 0x00000000 0 0x08000000>,
+	/* 64MiB at 0x00000000-0x03ffffff */
+	<0x02000000 0 0x00000000 0x00000000 0 0x04000000>,
+	/* 64MiB at 0x00000000-0x03ffffff */
+	<0x02000000 0 0x00000000 0x00000000 0 0x04000000>;
+
+	interrupt-map-mask = <0xf800 0 0 7>;
+	interrupt-map =
+		<0x4800 0 0 1 &pci_intc 0>, /* Slot 9 */
+		<0x4800 0 0 2 &pci_intc 1>,
+		<0x4800 0 0 3 &pci_intc 2>,
+		<0x4800 0 0 4 &pci_intc 3>,
+		<0x5000 0 0 1 &pci_intc 1>, /* Slot 10 */
+		<0x5000 0 0 2 &pci_intc 2>,
+		<0x5000 0 0 3 &pci_intc 3>,
+		<0x5000 0 0 4 &pci_intc 0>,
+		<0x5800 0 0 1 &pci_intc 2>, /* Slot 11 */
+		<0x5800 0 0 2 &pci_intc 3>,
+		<0x5800 0 0 3 &pci_intc 0>,
+		<0x5800 0 0 4 &pci_intc 1>,
+		<0x6000 0 0 1 &pci_intc 3>, /* Slot 12 */
+		<0x6000 0 0 2 &pci_intc 0>,
+		<0x6000 0 0 3 &pci_intc 0>,
+		<0x6000 0 0 4 &pci_intc 0>;
+	pci_intc: interrupt-controller {
+		interrupt-parent = <&intcon>;
+		interrupt-controller;
+		#address-cells = <0>;
+		#interrupt-cells = <1>;
+	};
+};

+ 13 - 1
Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt

@@ -4,7 +4,11 @@ This PCIe host controller is based on the Synopsis Designware PCIe IP
 and thus inherits all the common properties defined in designware-pcie.txt.
 and thus inherits all the common properties defined in designware-pcie.txt.
 
 
 Required properties:
 Required properties:
-- compatible: "fsl,imx6q-pcie", "fsl,imx6sx-pcie", "fsl,imx6qp-pcie"
+- compatible:
+	- "fsl,imx6q-pcie"
+	- "fsl,imx6sx-pcie",
+	- "fsl,imx6qp-pcie"
+	- "fsl,imx7d-pcie"
 - reg: base address and length of the PCIe controller
 - reg: base address and length of the PCIe controller
 - interrupts: A list of interrupt outputs of the controller. Must contain an
 - interrupts: A list of interrupt outputs of the controller. Must contain an
   entry for each entry in the interrupt-names property.
   entry for each entry in the interrupt-names property.
@@ -34,6 +38,14 @@ Additional required properties for imx6sx-pcie:
 - clock names: Must include the following additional entries:
 - clock names: Must include the following additional entries:
 	- "pcie_inbound_axi"
 	- "pcie_inbound_axi"
 
 
+Additional required properties for imx7d-pcie:
+- power-domains: Must be set to a phandle pointing to PCIE_PHY power domain
+- resets: Must contain phandles to PCIe-related reset lines exposed by SRC
+  IP block
+- reset-names: Must contain the following entires:
+	       - "pciephy"
+	       - "apps"
+
 Example:
 Example:
 
 
 	pcie@0x01000000 {
 	pcie@0x01000000 {

+ 35 - 7
Documentation/devicetree/bindings/pci/ti-pci.txt

@@ -1,17 +1,22 @@
 TI PCI Controllers
 TI PCI Controllers
 
 
 PCIe Designware Controller
 PCIe Designware Controller
- - compatible: Should be "ti,dra7-pcie""
- - reg : Two register ranges as listed in the reg-names property
- - reg-names : The first entry must be "ti-conf" for the TI specific registers
-	       The second entry must be "rc-dbics" for the designware pcie
-	       registers
-	       The third entry must be "config" for the PCIe configuration space
+ - compatible: Should be "ti,dra7-pcie" for RC
+	       Should be "ti,dra7-pcie-ep" for EP
  - phys : list of PHY specifiers (used by generic PHY framework)
  - phys : list of PHY specifiers (used by generic PHY framework)
  - phy-names : must be "pcie-phy0", "pcie-phy1", "pcie-phyN".. based on the
  - phy-names : must be "pcie-phy0", "pcie-phy1", "pcie-phyN".. based on the
 	       number of PHYs as specified in *phys* property.
 	       number of PHYs as specified in *phys* property.
  - ti,hwmods : Name of the hwmod associated to the pcie, "pcie<X>",
  - ti,hwmods : Name of the hwmod associated to the pcie, "pcie<X>",
 	       where <X> is the instance number of the pcie from the HW spec.
 	       where <X> is the instance number of the pcie from the HW spec.
+ - num-lanes as specified in ../designware-pcie.txt
+
+HOST MODE
+=========
+ - reg : Two register ranges as listed in the reg-names property
+ - reg-names : The first entry must be "ti-conf" for the TI specific registers
+	       The second entry must be "rc-dbics" for the DesignWare PCIe
+	       registers
+	       The third entry must be "config" for the PCIe configuration space
  - interrupts : Two interrupt entries must be specified. The first one is for
  - interrupts : Two interrupt entries must be specified. The first one is for
 		main interrupt line and the second for MSI interrupt line.
 		main interrupt line and the second for MSI interrupt line.
  - #address-cells,
  - #address-cells,
@@ -19,13 +24,36 @@ PCIe Designware Controller
    #interrupt-cells,
    #interrupt-cells,
    device_type,
    device_type,
    ranges,
    ranges,
-   num-lanes,
    interrupt-map-mask,
    interrupt-map-mask,
    interrupt-map : as specified in ../designware-pcie.txt
    interrupt-map : as specified in ../designware-pcie.txt
 
 
+DEVICE MODE
+===========
+ - reg : Four register ranges as listed in the reg-names property
+ - reg-names : "ti-conf" for the TI specific registers
+	       "ep_dbics" for the standard configuration registers as
+		they are locally accessed within the DIF CS space
+	       "ep_dbics2" for the standard configuration registers as
+		they are locally accessed within the DIF CS2 space
+	       "addr_space" used to map remote RC address space
+ - interrupts : one interrupt entries must be specified for main interrupt.
+ - num-ib-windows : number of inbound address translation windows
+ - num-ob-windows : number of outbound address translation windows
+ - ti,syscon-unaligned-access: phandle to the syscon DT node. The 1st argument
+			       should contain the register offset within syscon
+			       and the 2nd argument should contain the bit field
+			       for setting the bit to enable unaligned
+			       access.
+
 Optional Property:
 Optional Property:
  - gpios : Should be added if a gpio line is required to drive PERST# line
  - gpios : Should be added if a gpio line is required to drive PERST# line
 
 
+NOTE: Two DT nodes may be added for each PCI controller; one for host
+mode and another for device mode. So in order for PCI to
+work in host mode, EP mode DT node should be disabled and in order to PCI to
+work in EP mode, host mode DT node should be disabled. Host mode and EP
+mode are mutually exclusive.
+
 Example:
 Example:
 axi {
 axi {
 	compatible = "simple-bus";
 	compatible = "simple-bus";

+ 4 - 2
Documentation/driver-model/devres.txt

@@ -342,8 +342,10 @@ PER-CPU MEM
   devm_free_percpu()
   devm_free_percpu()
 
 
 PCI
 PCI
-  pcim_enable_device()	: after success, all PCI ops become managed
-  pcim_pin_device()	: keep PCI device enabled after release
+  devm_pci_remap_cfgspace()	: ioremap PCI configuration space
+  devm_pci_remap_cfg_resource()	: ioremap PCI configuration space resource
+  pcim_enable_device()		: after success, all PCI ops become managed
+  pcim_pin_device()		: keep PCI device enabled after release
 
 
 PHY
 PHY
   devm_usb_get_phy()
   devm_usb_get_phy()

+ 12 - 3
Documentation/filesystems/sysfs-pci.txt

@@ -113,9 +113,18 @@ Supporting PCI access on new platforms
 --------------------------------------
 --------------------------------------
 
 
 In order to support PCI resource mapping as described above, Linux platform
 In order to support PCI resource mapping as described above, Linux platform
-code must define HAVE_PCI_MMAP and provide a pci_mmap_page_range function.
-Platforms are free to only support subsets of the mmap functionality, but
-useful return codes should be provided.
+code should ideally define ARCH_GENERIC_PCI_MMAP_RESOURCE and use the generic
+implementation of that functionality. To support the historical interface of
+mmap() through files in /proc/bus/pci, platforms may also set HAVE_PCI_MMAP.
+
+Alternatively, platforms which set HAVE_PCI_MMAP may provide their own
+implementation of pci_mmap_page_range() instead of defining
+ARCH_GENERIC_PCI_MMAP_RESOURCE.
+
+Platforms which support write-combining maps of PCI resources must define
+arch_can_pci_mmap_wc() which shall evaluate to non-zero at runtime when
+write-combining is permitted. Platforms which support maps of I/O resources
+define arch_can_pci_mmap_io() similarly.
 
 
 Legacy resources are protected by the HAVE_PCI_LEGACY define.  Platforms
 Legacy resources are protected by the HAVE_PCI_LEGACY define.  Platforms
 wishing to support legacy functionality should define it and provide
 wishing to support legacy functionality should define it and provide

+ 1 - 0
Documentation/ioctl/ioctl-number.txt

@@ -191,6 +191,7 @@ Code  Seq#(hex)	Include File		Comments
 'W'	00-1F	linux/watchdog.h	conflict!
 'W'	00-1F	linux/watchdog.h	conflict!
 'W'	00-1F	linux/wanrouter.h	conflict!		(pre 3.9)
 'W'	00-1F	linux/wanrouter.h	conflict!		(pre 3.9)
 'W'	00-3F	sound/asound.h		conflict!
 'W'	00-3F	sound/asound.h		conflict!
+'W'	40-5F   drivers/pci/switch/switchtec.c
 'X'	all	fs/xfs/xfs_fs.h		conflict!
 'X'	all	fs/xfs/xfs_fs.h		conflict!
 		and fs/xfs/linux-2.6/xfs_ioctl32.h
 		and fs/xfs/linux-2.6/xfs_ioctl32.h
 		and include/linux/falloc.h
 		and include/linux/falloc.h

+ 35 - 0
Documentation/misc-devices/pci-endpoint-test.txt

@@ -0,0 +1,35 @@
+Driver for PCI Endpoint Test Function
+
+This driver should be used as a host side driver if the root complex is
+connected to a configurable PCI endpoint running *pci_epf_test* function
+driver configured according to [1].
+
+The "pci_endpoint_test" driver can be used to perform the following tests.
+
+The PCI driver for the test device performs the following tests
+	*) verifying addresses programmed in BAR
+	*) raise legacy IRQ
+	*) raise MSI IRQ
+	*) read data
+	*) write data
+	*) copy data
+
+This misc driver creates /dev/pci-endpoint-test.<num> for every
+*pci_epf_test* function connected to the root complex and "ioctls"
+should be used to perform the above tests.
+
+ioctl
+-----
+ PCITEST_BAR: Tests the BAR. The number of the BAR to be tested
+	      should be passed as argument.
+ PCITEST_LEGACY_IRQ: Tests legacy IRQ
+ PCITEST_MSI: Tests message signalled interrupts. The MSI number
+	      to be tested should be passed as argument.
+ PCITEST_WRITE: Perform write tests. The size of the buffer should be passed
+		as argument.
+ PCITEST_READ: Perform read tests. The size of the buffer should be passed
+	       as argument.
+ PCITEST_COPY: Perform read tests. The size of the buffer should be passed
+	       as argument.
+
+[1] -> Documentation/PCI/endpoint/function/binding/pci-test.txt

+ 80 - 0
Documentation/switchtec.txt

@@ -0,0 +1,80 @@
+========================
+Linux Switchtec Support
+========================
+
+Microsemi's "Switchtec" line of PCI switch devices is already
+supported by the kernel with standard PCI switch drivers. However, the
+Switchtec device advertises a special management endpoint which
+enables some additional functionality. This includes:
+
+* Packet and Byte Counters
+* Firmware Upgrades
+* Event and Error logs
+* Querying port link status
+* Custom user firmware commands
+
+The switchtec kernel module implements this functionality.
+
+
+Interface
+=========
+
+The primary means of communicating with the Switchtec management firmware is
+through the Memory-mapped Remote Procedure Call (MRPC) interface.
+Commands are submitted to the interface with a 4-byte command
+identifier and up to 1KB of command specific data. The firmware will
+respond with a 4 bytes return code and up to 1KB of command specific
+data. The interface only processes a single command at a time.
+
+
+Userspace Interface
+===================
+
+The MRPC interface will be exposed to userspace through a simple char
+device: /dev/switchtec#, one for each management endpoint in the system.
+
+The char device has the following semantics:
+
+* A write must consist of at least 4 bytes and no more than 1028 bytes.
+  The first four bytes will be interpreted as the command to run and
+  the remainder will be used as the input data. A write will send the
+  command to the firmware to begin processing.
+
+* Each write must be followed by exactly one read. Any double write will
+  produce an error and any read that doesn't follow a write will
+  produce an error.
+
+* A read will block until the firmware completes the command and return
+  the four bytes of status plus up to 1024 bytes of output data. (The
+  length will be specified by the size parameter of the read call --
+  reading less than 4 bytes will produce an error.
+
+* The poll call will also be supported for userspace applications that
+  need to do other things while waiting for the command to complete.
+
+The following IOCTLs are also supported by the device:
+
+* SWITCHTEC_IOCTL_FLASH_INFO - Retrieve firmware length and number
+  of partitions in the device.
+
+* SWITCHTEC_IOCTL_FLASH_PART_INFO - Retrieve address and lengeth for
+  any specified partition in flash.
+
+* SWITCHTEC_IOCTL_EVENT_SUMMARY - Read a structure of bitmaps
+  indicating all uncleared events.
+
+* SWITCHTEC_IOCTL_EVENT_CTL - Get the current count, clear and set flags
+  for any event. This ioctl takes in a switchtec_ioctl_event_ctl struct
+  with the event_id, index and flags set (index being the partition or PFF
+  number for non-global events). It returns whether the event has
+  occurred, the number of times and any event specific data. The flags
+  can be used to clear the count or enable and disable actions to
+  happen when the event occurs.
+  By using the SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL flag,
+  you can set an event to trigger a poll command to return with
+  POLLPRI. In this way, userspace can wait for events to occur.
+
+* SWITCHTEC_IOCTL_PFF_TO_PORT and SWITCHTEC_IOCTL_PORT_TO_PFF convert
+  between PCI Function Framework number (used by the event system)
+  and Switchtec Logic Port ID and Partition number (which is more
+  user friendly).

+ 20 - 0
MAINTAINERS

@@ -9723,6 +9723,15 @@ F:	include/linux/pci*
 F:	arch/x86/pci/
 F:	arch/x86/pci/
 F:	arch/x86/kernel/quirks.c
 F:	arch/x86/kernel/quirks.c
 
 
+PCI ENDPOINT SUBSYSTEM
+M:	Kishon Vijay Abraham I <kishon@ti.com>
+L:	linux-pci@vger.kernel.org
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/kishon/pci-endpoint.git
+S:	Supported
+F:	drivers/pci/endpoint/
+F:	drivers/misc/pci_endpoint_test.c
+F:	tools/pci/
+
 PCI DRIVER FOR ALTERA PCIE IP
 PCI DRIVER FOR ALTERA PCIE IP
 M:	Ley Foon Tan <lftan@altera.com>
 M:	Ley Foon Tan <lftan@altera.com>
 L:	rfi@lists.rocketboards.org (moderated for non-subscribers)
 L:	rfi@lists.rocketboards.org (moderated for non-subscribers)
@@ -9797,6 +9806,17 @@ S:	Maintained
 F:	Documentation/devicetree/bindings/pci/aardvark-pci.txt
 F:	Documentation/devicetree/bindings/pci/aardvark-pci.txt
 F:	drivers/pci/host/pci-aardvark.c
 F:	drivers/pci/host/pci-aardvark.c
 
 
+PCI DRIVER FOR MICROSEMI SWITCHTEC
+M:	Kurt Schwemmer <kurt.schwemmer@microsemi.com>
+M:	Stephen Bates <stephen.bates@microsemi.com>
+M:	Logan Gunthorpe <logang@deltatee.com>
+L:	linux-pci@vger.kernel.org
+S:	Maintained
+F:	Documentation/switchtec.txt
+F:	Documentation/ABI/testing/sysfs-class-switchtec
+F:	drivers/pci/switch/switchtec*
+F:	include/uapi/linux/switchtec_ioctl.h
+
 PCI DRIVER FOR NVIDIA TEGRA
 PCI DRIVER FOR NVIDIA TEGRA
 M:	Thierry Reding <thierry.reding@gmail.com>
 M:	Thierry Reding <thierry.reding@gmail.com>
 L:	linux-tegra@vger.kernel.org
 L:	linux-tegra@vger.kernel.org

+ 10 - 0
arch/arm/include/asm/io.h

@@ -186,6 +186,16 @@ static inline void pci_ioremap_set_mem_type(int mem_type) {}
 
 
 extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr);
 extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr);
 
 
+/*
+ * PCI configuration space mapping function.
+ *
+ * The PCI specification does not allow configuration write
+ * transactions to be posted. Add an arch specific
+ * pci_remap_cfgspace() definition that is implemented
+ * through strongly ordered memory mappings.
+ */
+#define pci_remap_cfgspace pci_remap_cfgspace
+void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size);
 /*
 /*
  * Now, pick up the machine-defined IO definitions
  * Now, pick up the machine-defined IO definitions
  */
  */

+ 1 - 2
arch/arm/include/asm/pci.h

@@ -29,8 +29,7 @@ static inline int pci_proc_domain(struct pci_bus *bus)
 #define PCI_DMA_BUS_IS_PHYS     (1)
 #define PCI_DMA_BUS_IS_PHYS     (1)
 
 
 #define HAVE_PCI_MMAP
 #define HAVE_PCI_MMAP
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
-                               enum pci_mmap_state mmap_state, int write_combine);
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
 
 
 static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
 static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
 {
 {

+ 0 - 19
arch/arm/kernel/bios32.c

@@ -597,25 +597,6 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
 	return start;
 	return start;
 }
 }
 
 
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
-			enum pci_mmap_state mmap_state, int write_combine)
-{
-	if (mmap_state == pci_mmap_io)
-		return -EINVAL;
-
-	/*
-	 * Mark this as IO
-	 */
-	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
-	if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
-			     vma->vm_end - vma->vm_start,
-			     vma->vm_page_prot))
-		return -EAGAIN;
-
-	return 0;
-}
-
 void __init pci_map_io_early(unsigned long pfn)
 void __init pci_map_io_early(unsigned long pfn)
 {
 {
 	struct map_desc pci_io_desc = {
 	struct map_desc pci_io_desc = {

+ 1 - 1
arch/arm/mach-omap2/clockdomains7xx_data.c

@@ -524,7 +524,7 @@ static struct clockdomain pcie_7xx_clkdm = {
 	.dep_bit	  = DRA7XX_PCIE_STATDEP_SHIFT,
 	.dep_bit	  = DRA7XX_PCIE_STATDEP_SHIFT,
 	.wkdep_srcs	  = pcie_wkup_sleep_deps,
 	.wkdep_srcs	  = pcie_wkup_sleep_deps,
 	.sleepdep_srcs	  = pcie_wkup_sleep_deps,
 	.sleepdep_srcs	  = pcie_wkup_sleep_deps,
-	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+	.flags		  = CLKDM_CAN_SWSUP,
 };
 };
 
 
 static struct clockdomain atl_7xx_clkdm = {
 static struct clockdomain atl_7xx_clkdm = {

+ 7 - 0
arch/arm/mm/ioremap.c

@@ -481,6 +481,13 @@ int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
 				  __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
 				  __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
 }
 }
 EXPORT_SYMBOL_GPL(pci_ioremap_io);
 EXPORT_SYMBOL_GPL(pci_ioremap_io);
+
+void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
+{
+	return arch_ioremap_caller(res_cookie, size, MT_UNCACHED,
+				   __builtin_return_address(0));
+}
+EXPORT_SYMBOL_GPL(pci_remap_cfgspace);
 #endif
 #endif
 
 
 /*
 /*

+ 12 - 0
arch/arm/mm/nommu.c

@@ -436,6 +436,18 @@ void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
 }
 }
 EXPORT_SYMBOL(ioremap_wc);
 EXPORT_SYMBOL(ioremap_wc);
 
 
+#ifdef CONFIG_PCI
+
+#include <asm/mach/map.h>
+
+void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
+{
+	return arch_ioremap_caller(res_cookie, size, MT_UNCACHED,
+				   __builtin_return_address(0));
+}
+EXPORT_SYMBOL_GPL(pci_remap_cfgspace);
+#endif
+
 void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
 void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
 {
 {
 	return (void *)phys_addr;
 	return (void *)phys_addr;

+ 10 - 0
arch/arm64/include/asm/io.h

@@ -172,6 +172,16 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
 #define ioremap_wt(addr, size)		__ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
 #define ioremap_wt(addr, size)		__ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
 #define iounmap				__iounmap
 #define iounmap				__iounmap
 
 
+/*
+ * PCI configuration space mapping function.
+ *
+ * The PCI specification disallows posted write configuration transactions.
+ * Add an arch specific pci_remap_cfgspace() definition that is implemented
+ * through nGnRnE device memory attribute as recommended by the ARM v8
+ * Architecture reference manual Issue A.k B2.8.2 "Device memory".
+ */
+#define pci_remap_cfgspace(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRnE))
+
 /*
 /*
  * io{read,write}{16,32,64}be() macros
  * io{read,write}{16,32,64}be() macros
  */
  */

+ 2 - 0
arch/arm64/include/asm/pci.h

@@ -22,6 +22,8 @@
  */
  */
 #define PCI_DMA_BUS_IS_PHYS	(0)
 #define PCI_DMA_BUS_IS_PHYS	(0)
 
 
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE	1
+
 extern int isa_dma_bridge_buggy;
 extern int isa_dma_bridge_buggy;
 
 
 #ifdef CONFIG_PCI
 #ifdef CONFIG_PCI

+ 0 - 22
arch/cris/arch-v32/drivers/pci/bios.c

@@ -14,28 +14,6 @@ void pcibios_set_master(struct pci_dev *dev)
 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
 }
 }
 
 
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
-			enum pci_mmap_state mmap_state, int write_combine)
-{
-	unsigned long prot;
-
-	/* Leave vm_pgoff as-is, the PCI space address is the physical
-	 * address on this platform.
-	 */
-	prot = pgprot_val(vma->vm_page_prot);
-	vma->vm_page_prot = __pgprot(prot);
-
-	/* Write-combine setting is ignored, it is changed via the mtrr
-	 * interfaces on this platform.
-	 */
-	if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
-			     vma->vm_end - vma->vm_start,
-			     vma->vm_page_prot))
-		return -EAGAIN;
-
-	return 0;
-}
-
 resource_size_t
 resource_size_t
 pcibios_align_resource(void *data, const struct resource *res,
 pcibios_align_resource(void *data, const struct resource *res,
 		       resource_size_t size, resource_size_t align)
 		       resource_size_t size, resource_size_t align)

+ 1 - 3
arch/cris/include/asm/pci.h

@@ -42,9 +42,7 @@ struct pci_dev;
 #define PCI_DMA_BUS_IS_PHYS	(1)
 #define PCI_DMA_BUS_IS_PHYS	(1)
 
 
 #define HAVE_PCI_MMAP
 #define HAVE_PCI_MMAP
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
-			       enum pci_mmap_state mmap_state, int write_combine);
-
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
 
 
 #endif /* __KERNEL__ */
 #endif /* __KERNEL__ */
 
 

+ 3 - 2
arch/ia64/include/asm/pci.h

@@ -51,8 +51,9 @@ extern unsigned long ia64_max_iommu_merge_mask;
 #define PCI_DMA_BUS_IS_PHYS	(ia64_max_iommu_merge_mask == ~0UL)
 #define PCI_DMA_BUS_IS_PHYS	(ia64_max_iommu_merge_mask == ~0UL)
 
 
 #define HAVE_PCI_MMAP
 #define HAVE_PCI_MMAP
-extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
-				enum pci_mmap_state mmap_state, int write_combine);
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
+#define arch_can_pci_mmap_wc()	1
+
 #define HAVE_PCI_LEGACY
 #define HAVE_PCI_LEGACY
 extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
 extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
 				      struct vm_area_struct *vma,
 				      struct vm_area_struct *vma,

+ 0 - 46
arch/ia64/pci/pci.c

@@ -418,52 +418,6 @@ pcibios_align_resource (void *data, const struct resource *res,
 	return res->start;
 	return res->start;
 }
 }
 
 
-int
-pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
-		     enum pci_mmap_state mmap_state, int write_combine)
-{
-	unsigned long size = vma->vm_end - vma->vm_start;
-	pgprot_t prot;
-
-	/*
-	 * I/O space cannot be accessed via normal processor loads and
-	 * stores on this platform.
-	 */
-	if (mmap_state == pci_mmap_io)
-		/*
-		 * XXX we could relax this for I/O spaces for which ACPI
-		 * indicates that the space is 1-to-1 mapped.  But at the
-		 * moment, we don't support multiple PCI address spaces and
-		 * the legacy I/O space is not 1-to-1 mapped, so this is moot.
-		 */
-		return -EINVAL;
-
-	if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
-		return -EINVAL;
-
-	prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
-				    vma->vm_page_prot);
-
-	/*
-	 * If the user requested WC, the kernel uses UC or WC for this region,
-	 * and the chipset supports WC, we can use WC. Otherwise, we have to
-	 * use the same attribute the kernel uses.
-	 */
-	if (write_combine &&
-	    ((pgprot_val(prot) & _PAGE_MA_MASK) == _PAGE_MA_UC ||
-	     (pgprot_val(prot) & _PAGE_MA_MASK) == _PAGE_MA_WC) &&
-	    efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
-		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-	else
-		vma->vm_page_prot = prot;
-
-	if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
-			     vma->vm_end - vma->vm_start, vma->vm_page_prot))
-		return -EAGAIN;
-
-	return 0;
-}
-
 /**
 /**
  * ia64_pci_get_legacy_mem - generic legacy mem routine
  * ia64_pci_get_legacy_mem - generic legacy mem routine
  * @bus: bus to get legacy memory base address for
  * @bus: bus to get legacy memory base address for

+ 2 - 4
arch/microblaze/include/asm/pci.h

@@ -46,12 +46,10 @@ extern int pci_domain_nr(struct pci_bus *bus);
 extern int pci_proc_domain(struct pci_bus *bus);
 extern int pci_proc_domain(struct pci_bus *bus);
 
 
 struct vm_area_struct;
 struct vm_area_struct;
-/* Map a range of PCI memory or I/O space for a device into user space */
-int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
-			enum pci_mmap_state mmap_state, int write_combine);
 
 
 /* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
 /* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
-#define HAVE_PCI_MMAP	1
+#define HAVE_PCI_MMAP		1
+#define arch_can_pci_mmap_io()	1
 
 
 extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val,
 extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val,
 			   size_t count);
 			   size_t count);

+ 1 - 1
arch/microblaze/pci/pci-common.c

@@ -278,7 +278,7 @@ pgprot_t pci_phys_mem_access_prot(struct file *file,
  *
  *
  * Returns a negative error code on failure, zero on success.
  * Returns a negative error code on failure, zero on success.
  */
  */
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+int pci_mmap_page_range(struct pci_dev *dev, int bar, struct vm_area_struct *vma,
 			enum pci_mmap_state mmap_state, int write_combine)
 			enum pci_mmap_state mmap_state, int write_combine)
 {
 {
 	resource_size_t offset =
 	resource_size_t offset =

+ 1 - 4
arch/mips/include/asm/pci.h

@@ -110,10 +110,7 @@ extern unsigned long PCIBIOS_MIN_MEM;
 extern void pcibios_set_master(struct pci_dev *dev);
 extern void pcibios_set_master(struct pci_dev *dev);
 
 
 #define HAVE_PCI_MMAP
 #define HAVE_PCI_MMAP
-
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
-	enum pci_mmap_state mmap_state, int write_combine);
-
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
 #define HAVE_ARCH_PCI_RESOURCE_TO_USER
 #define HAVE_ARCH_PCI_RESOURCE_TO_USER
 
 
 /*
 /*

+ 0 - 24
arch/mips/pci/pci.c

@@ -57,27 +57,3 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
 	*start = fixup_bigphys_addr(rsrc->start, size);
 	*start = fixup_bigphys_addr(rsrc->start, size);
 	*end = rsrc->start + size;
 	*end = rsrc->start + size;
 }
 }
-
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
-			enum pci_mmap_state mmap_state, int write_combine)
-{
-	unsigned long prot;
-
-	/*
-	 * I/O space can be accessed via normal processor loads and stores on
-	 * this platform but for now we elect not to do this and portable
-	 * drivers should not do this anyway.
-	 */
-	if (mmap_state == pci_mmap_io)
-		return -EINVAL;
-
-	/*
-	 * Ignore write-combine; for now only return uncached mappings.
-	 */
-	prot = pgprot_val(vma->vm_page_prot);
-	prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
-	vma->vm_page_prot = __pgprot(prot);
-
-	return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
-		vma->vm_end - vma->vm_start, vma->vm_page_prot);
-}

+ 1 - 3
arch/mn10300/include/asm/pci.h

@@ -74,9 +74,7 @@ static inline int pci_controller_num(struct pci_dev *dev)
 }
 }
 
 
 #define HAVE_PCI_MMAP
 #define HAVE_PCI_MMAP
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
-			       enum pci_mmap_state mmap_state,
-			       int write_combine);
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
 
 
 #endif /* __KERNEL__ */
 #endif /* __KERNEL__ */
 
 

+ 0 - 23
arch/mn10300/unit-asb2305/pci-asb2305.c

@@ -210,26 +210,3 @@ void __init pcibios_resource_survey(void)
 	pcibios_allocate_resources(0);
 	pcibios_allocate_resources(0);
 	pcibios_allocate_resources(1);
 	pcibios_allocate_resources(1);
 }
 }
-
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
-			enum pci_mmap_state mmap_state, int write_combine)
-{
-	unsigned long prot;
-
-	/* Leave vm_pgoff as-is, the PCI space address is the physical
-	 * address on this platform.
-	 */
-	vma->vm_flags |= VM_LOCKED;
-
-	prot = pgprot_val(vma->vm_page_prot);
-	prot &= ~_PAGE_CACHE;
-	vma->vm_page_prot = __pgprot(prot);
-
-	/* Write-combine setting is ignored */
-	if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
-			       vma->vm_end - vma->vm_start,
-			       vma->vm_page_prot))
-		return -EAGAIN;
-
-	return 0;
-}

+ 1 - 3
arch/parisc/include/asm/pci.h

@@ -200,8 +200,6 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
 }
 }
 
 
 #define HAVE_PCI_MMAP
 #define HAVE_PCI_MMAP
-
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
-	enum pci_mmap_state mmap_state, int write_combine);
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
 
 
 #endif /* __ASM_PARISC_PCI_H */
 #endif /* __ASM_PARISC_PCI_H */

+ 0 - 28
arch/parisc/kernel/pci.c

@@ -227,34 +227,6 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
 	return start;
 	return start;
 }
 }
 
 
-
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
-			enum pci_mmap_state mmap_state, int write_combine)
-{
-	unsigned long prot;
-
-	/*
-	 * I/O space can be accessed via normal processor loads and stores on
-	 * this platform but for now we elect not to do this and portable
-	 * drivers should not do this anyway.
-	 */
-	if (mmap_state == pci_mmap_io)
-		return -EINVAL;
-
-	if (write_combine)
-		return -EINVAL;
-
-	/*
-	 * Ignore write-combine; for now only return uncached mappings.
-	 */
-	prot = pgprot_val(vma->vm_page_prot);
-	prot |= _PAGE_NO_CACHE;
-	vma->vm_page_prot = __pgprot(prot);
-
-	return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
-		vma->vm_end - vma->vm_start, vma->vm_page_prot);
-}
-
 /*
 /*
  * A driver is enabling the device.  We make sure that all the appropriate
  * A driver is enabling the device.  We make sure that all the appropriate
  * bits are set to allow the device to operate as the driver is expecting.
  * bits are set to allow the device to operate as the driver is expecting.

+ 2 - 0
arch/powerpc/include/asm/machdep.h

@@ -173,6 +173,8 @@ struct machdep_calls {
 	/* Called after scan and before resource survey */
 	/* Called after scan and before resource survey */
 	void (*pcibios_fixup_phb)(struct pci_controller *hose);
 	void (*pcibios_fixup_phb)(struct pci_controller *hose);
 
 
+	resource_size_t (*pcibios_default_alignment)(void);
+
 #ifdef CONFIG_PCI_IOV
 #ifdef CONFIG_PCI_IOV
 	void (*pcibios_fixup_sriov)(struct pci_dev *pdev);
 	void (*pcibios_fixup_sriov)(struct pci_dev *pdev);
 	resource_size_t (*pcibios_iov_resource_alignment)(struct pci_dev *, int resno);
 	resource_size_t (*pcibios_iov_resource_alignment)(struct pci_dev *, int resno);

+ 4 - 5
arch/powerpc/include/asm/pci.h

@@ -77,12 +77,11 @@ extern int pci_domain_nr(struct pci_bus *bus);
 extern int pci_proc_domain(struct pci_bus *bus);
 extern int pci_proc_domain(struct pci_bus *bus);
 
 
 struct vm_area_struct;
 struct vm_area_struct;
-/* Map a range of PCI memory or I/O space for a device into user space */
-int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
-			enum pci_mmap_state mmap_state, int write_combine);
 
 
-/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
-#define HAVE_PCI_MMAP	1
+/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() and it does WC */
+#define HAVE_PCI_MMAP		1
+#define arch_can_pci_mmap_io()	1
+#define arch_can_pci_mmap_wc()	1
 
 
 extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val,
 extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val,
 			   size_t count);
 			   size_t count);

+ 10 - 1
arch/powerpc/kernel/pci-common.c

@@ -233,6 +233,14 @@ void pcibios_reset_secondary_bus(struct pci_dev *dev)
 	pci_reset_secondary_bus(dev);
 	pci_reset_secondary_bus(dev);
 }
 }
 
 
+resource_size_t pcibios_default_alignment(void)
+{
+	if (ppc_md.pcibios_default_alignment)
+		return ppc_md.pcibios_default_alignment();
+
+	return 0;
+}
+
 #ifdef CONFIG_PCI_IOV
 #ifdef CONFIG_PCI_IOV
 resource_size_t pcibios_iov_resource_alignment(struct pci_dev *pdev, int resno)
 resource_size_t pcibios_iov_resource_alignment(struct pci_dev *pdev, int resno)
 {
 {
@@ -513,7 +521,8 @@ pgprot_t pci_phys_mem_access_prot(struct file *file,
  *
  *
  * Returns a negative error code on failure, zero on success.
  * Returns a negative error code on failure, zero on success.
  */
  */
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+int pci_mmap_page_range(struct pci_dev *dev, int bar,
+			struct vm_area_struct *vma,
 			enum pci_mmap_state mmap_state, int write_combine)
 			enum pci_mmap_state mmap_state, int write_combine)
 {
 {
 	resource_size_t offset =
 	resource_size_t offset =

+ 7 - 0
arch/powerpc/platforms/powernv/pci-ioda.c

@@ -3330,6 +3330,11 @@ static void pnv_pci_setup_bridge(struct pci_bus *bus, unsigned long type)
 	}
 	}
 }
 }
 
 
+static resource_size_t pnv_pci_default_alignment(void)
+{
+	return PAGE_SIZE;
+}
+
 #ifdef CONFIG_PCI_IOV
 #ifdef CONFIG_PCI_IOV
 static resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev,
 static resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev,
 						      int resno)
 						      int resno)
@@ -3863,6 +3868,8 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
 		hose->controller_ops = pnv_pci_ioda_controller_ops;
 		hose->controller_ops = pnv_pci_ioda_controller_ops;
 	}
 	}
 
 
+	ppc_md.pcibios_default_alignment = pnv_pci_default_alignment;
+
 #ifdef CONFIG_PCI_IOV
 #ifdef CONFIG_PCI_IOV
 	ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources;
 	ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources;
 	ppc_md.pcibios_iov_resource_alignment = pnv_pci_iov_resource_alignment;
 	ppc_md.pcibios_iov_resource_alignment = pnv_pci_iov_resource_alignment;

+ 0 - 21
arch/sh/drivers/pci/pci.c

@@ -269,27 +269,6 @@ void __ref pcibios_report_status(unsigned int status_mask, int warn)
 	}
 	}
 }
 }
 
 
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
-			enum pci_mmap_state mmap_state, int write_combine)
-{
-	/*
-	 * I/O space can be accessed via normal processor loads and stores on
-	 * this platform but for now we elect not to do this and portable
-	 * drivers should not do this anyway.
-	 */
-	if (mmap_state == pci_mmap_io)
-		return -EINVAL;
-
-	/*
-	 * Ignore write-combine; for now only return uncached mappings.
-	 */
-	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
-	return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
-			       vma->vm_end - vma->vm_start,
-			       vma->vm_page_prot);
-}
-
 #ifndef CONFIG_GENERIC_IOMAP
 #ifndef CONFIG_GENERIC_IOMAP
 
 
 void __iomem *__pci_ioport_map(struct pci_dev *dev,
 void __iomem *__pci_ioport_map(struct pci_dev *dev,

+ 2 - 2
arch/sh/include/asm/pci.h

@@ -66,8 +66,8 @@ extern unsigned long PCIBIOS_MIN_IO, PCIBIOS_MIN_MEM;
 struct pci_dev;
 struct pci_dev;
 
 
 #define HAVE_PCI_MMAP
 #define HAVE_PCI_MMAP
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
-	enum pci_mmap_state mmap_state, int write_combine);
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
+
 extern void pcibios_set_master(struct pci_dev *dev);
 extern void pcibios_set_master(struct pci_dev *dev);
 
 
 /* Dynamic DMA mapping stuff.
 /* Dynamic DMA mapping stuff.

+ 1 - 4
arch/sparc/include/asm/pci_64.h

@@ -42,13 +42,10 @@ static inline int pci_proc_domain(struct pci_bus *bus)
 /* Platform support for /proc/bus/pci/X/Y mmap()s. */
 /* Platform support for /proc/bus/pci/X/Y mmap()s. */
 
 
 #define HAVE_PCI_MMAP
 #define HAVE_PCI_MMAP
+#define arch_can_pci_mmap_io()	1
 #define HAVE_ARCH_PCI_GET_UNMAPPED_AREA
 #define HAVE_ARCH_PCI_GET_UNMAPPED_AREA
 #define get_pci_unmapped_area get_fb_unmapped_area
 #define get_pci_unmapped_area get_fb_unmapped_area
 
 
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
-			enum pci_mmap_state mmap_state,
-			int write_combine);
-
 static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
 static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
 {
 {
 	return PCI_IRQ_NONE;
 	return PCI_IRQ_NONE;

+ 3 - 3
arch/sparc/kernel/pci.c

@@ -862,9 +862,9 @@ static void __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vm
  *
  *
  * Returns a negative error code on failure, zero on success.
  * Returns a negative error code on failure, zero on success.
  */
  */
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
-			enum pci_mmap_state mmap_state,
-			int write_combine)
+int pci_mmap_page_range(struct pci_dev *dev, int bar,
+			struct vm_area_struct *vma,
+			enum pci_mmap_state mmap_state, int write_combine)
 {
 {
 	int ret;
 	int ret;
 
 

+ 1 - 2
arch/unicore32/include/asm/pci.h

@@ -17,8 +17,7 @@
 #include <mach/hardware.h> /* for PCIBIOS_MIN_* */
 #include <mach/hardware.h> /* for PCIBIOS_MIN_* */
 
 
 #define HAVE_PCI_MMAP
 #define HAVE_PCI_MMAP
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
-	enum pci_mmap_state mmap_state, int write_combine);
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
 
 
 #endif /* __KERNEL__ */
 #endif /* __KERNEL__ */
 #endif
 #endif

+ 0 - 23
arch/unicore32/kernel/pci.c

@@ -356,26 +356,3 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
 	}
 	}
 	return 0;
 	return 0;
 }
 }
-
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
-			enum pci_mmap_state mmap_state, int write_combine)
-{
-	unsigned long phys;
-
-	if (mmap_state == pci_mmap_io)
-		return -EINVAL;
-
-	phys = vma->vm_pgoff;
-
-	/*
-	 * Mark this as IO
-	 */
-	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
-	if (remap_pfn_range(vma, vma->vm_start, phys,
-			     vma->vm_end - vma->vm_start,
-			     vma->vm_page_prot))
-		return -EAGAIN;
-
-	return 0;
-}

+ 3 - 4
arch/x86/include/asm/pci.h

@@ -7,6 +7,7 @@
 #include <linux/string.h>
 #include <linux/string.h>
 #include <linux/scatterlist.h>
 #include <linux/scatterlist.h>
 #include <asm/io.h>
 #include <asm/io.h>
+#include <asm/pat.h>
 #include <asm/x86_init.h>
 #include <asm/x86_init.h>
 
 
 #ifdef __KERNEL__
 #ifdef __KERNEL__
@@ -102,10 +103,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
 
 
 
 
 #define HAVE_PCI_MMAP
 #define HAVE_PCI_MMAP
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
-			       enum pci_mmap_state mmap_state,
-			       int write_combine);
-
+#define arch_can_pci_mmap_wc()	pat_enabled()
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
 
 
 #ifdef CONFIG_PCI
 #ifdef CONFIG_PCI
 extern void early_quirks(void);
 extern void early_quirks(void);

+ 0 - 47
arch/x86/pci/i386.c

@@ -406,50 +406,3 @@ void __init pcibios_resource_survey(void)
 	 */
 	 */
 	ioapic_insert_resources();
 	ioapic_insert_resources();
 }
 }
-
-static const struct vm_operations_struct pci_mmap_ops = {
-	.access = generic_access_phys,
-};
-
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
-			enum pci_mmap_state mmap_state, int write_combine)
-{
-	unsigned long prot;
-
-	/* I/O space cannot be accessed via normal processor loads and
-	 * stores on this platform.
-	 */
-	if (mmap_state == pci_mmap_io)
-		return -EINVAL;
-
-	prot = pgprot_val(vma->vm_page_prot);
-
-	/*
- 	 * Return error if pat is not enabled and write_combine is requested.
- 	 * Caller can followup with UC MINUS request and add a WC mtrr if there
- 	 * is a free mtrr slot.
- 	 */
-	if (!pat_enabled() && write_combine)
-		return -EINVAL;
-
-	if (pat_enabled() && write_combine)
-		prot |= cachemode2protval(_PAGE_CACHE_MODE_WC);
-	else if (pat_enabled() || boot_cpu_data.x86 > 3)
-		/*
-		 * ioremap() and ioremap_nocache() defaults to UC MINUS for now.
-		 * To avoid attribute conflicts, request UC MINUS here
-		 * as well.
-		 */
-		prot |= cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS);
-
-	vma->vm_page_prot = __pgprot(prot);
-
-	if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
-			       vma->vm_end - vma->vm_start,
-			       vma->vm_page_prot))
-		return -EAGAIN;
-
-	vma->vm_ops = &pci_mmap_ops;
-
-	return 0;
-}

+ 2 - 5
arch/xtensa/include/asm/pci.h

@@ -46,12 +46,9 @@ struct pci_dev;
 
 
 #define PCI_DMA_BUS_IS_PHYS	(1)
 #define PCI_DMA_BUS_IS_PHYS	(1)
 
 
-/* Map a range of PCI memory or I/O space for a device into user space */
-int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
-			enum pci_mmap_state mmap_state, int write_combine);
-
 /* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
 /* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
-#define HAVE_PCI_MMAP	1
+#define HAVE_PCI_MMAP		1
+#define arch_can_pci_mmap_io()	1
 
 
 #endif /* __KERNEL__ */
 #endif /* __KERNEL__ */
 
 

+ 3 - 21
arch/xtensa/kernel/pci.c

@@ -333,25 +333,6 @@ __pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vma,
 	return -EINVAL;
 	return -EINVAL;
 }
 }
 
 
-/*
- * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
- * device mapping.
- */
-static __inline__ void
-__pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
-		      enum pci_mmap_state mmap_state, int write_combine)
-{
-	int prot = pgprot_val(vma->vm_page_prot);
-
-	/* Set to write-through */
-	prot = (prot & _PAGE_CA_MASK) | _PAGE_CA_WT;
-#if 0
-	if (!write_combine)
-		prot |= _PAGE_WRITETHRU;
-#endif
-	vma->vm_page_prot = __pgprot(prot);
-}
-
 /*
 /*
  * Perform the actual remap of the pages for a PCI device mapping, as
  * Perform the actual remap of the pages for a PCI device mapping, as
  * appropriate for this architecture.  The region in the process to map
  * appropriate for this architecture.  The region in the process to map
@@ -362,7 +343,8 @@ __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
  *
  *
  * Returns a negative error code on failure, zero on success.
  * Returns a negative error code on failure, zero on success.
  */
  */
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+int pci_mmap_page_range(struct pci_dev *dev, int bar,
+			struct vm_area_struct *vma,
 			enum pci_mmap_state mmap_state,
 			enum pci_mmap_state mmap_state,
 			int write_combine)
 			int write_combine)
 {
 {
@@ -372,7 +354,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
 	if (ret < 0)
 	if (ret < 0)
 		return ret;
 		return ret;
 
 
-	__pci_mmap_set_pgprot(dev, vma, mmap_state, write_combine);
+	vma->vm_page_prot = pgprot_device(vma->vm_page_prot);
 
 
 	ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
 	ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
 			         vma->vm_end - vma->vm_start,vma->vm_page_prot);
 			         vma->vm_end - vma->vm_start,vma->vm_page_prot);

+ 2 - 0
drivers/Makefile

@@ -14,7 +14,9 @@ obj-$(CONFIG_GENERIC_PHY)	+= phy/
 obj-$(CONFIG_PINCTRL)		+= pinctrl/
 obj-$(CONFIG_PINCTRL)		+= pinctrl/
 obj-$(CONFIG_GPIOLIB)		+= gpio/
 obj-$(CONFIG_GPIOLIB)		+= gpio/
 obj-y				+= pwm/
 obj-y				+= pwm/
+
 obj-$(CONFIG_PCI)		+= pci/
 obj-$(CONFIG_PCI)		+= pci/
+obj-$(CONFIG_PCI_ENDPOINT)	+= pci/endpoint/
 # PCI dwc controller drivers
 # PCI dwc controller drivers
 obj-y				+= pci/dwc/
 obj-y				+= pci/dwc/
 
 

+ 11 - 3
drivers/acpi/pci_mcfg.c

@@ -54,6 +54,7 @@ static struct mcfg_fixup mcfg_quirks[] = {
 
 
 #define QCOM_ECAM32(seg) \
 #define QCOM_ECAM32(seg) \
 	{ "QCOM  ", "QDF2432 ", 1, seg, MCFG_BUS_ANY, &pci_32b_ops }
 	{ "QCOM  ", "QDF2432 ", 1, seg, MCFG_BUS_ANY, &pci_32b_ops }
+
 	QCOM_ECAM32(0),
 	QCOM_ECAM32(0),
 	QCOM_ECAM32(1),
 	QCOM_ECAM32(1),
 	QCOM_ECAM32(2),
 	QCOM_ECAM32(2),
@@ -68,6 +69,7 @@ static struct mcfg_fixup mcfg_quirks[] = {
 	{ "HISI  ", table_id, 0, (seg) + 1, MCFG_BUS_ANY, ops }, \
 	{ "HISI  ", table_id, 0, (seg) + 1, MCFG_BUS_ANY, ops }, \
 	{ "HISI  ", table_id, 0, (seg) + 2, MCFG_BUS_ANY, ops }, \
 	{ "HISI  ", table_id, 0, (seg) + 2, MCFG_BUS_ANY, ops }, \
 	{ "HISI  ", table_id, 0, (seg) + 3, MCFG_BUS_ANY, ops }
 	{ "HISI  ", table_id, 0, (seg) + 3, MCFG_BUS_ANY, ops }
+
 	HISI_QUAD_DOM("HIP05   ",  0, &hisi_pcie_ops),
 	HISI_QUAD_DOM("HIP05   ",  0, &hisi_pcie_ops),
 	HISI_QUAD_DOM("HIP06   ",  0, &hisi_pcie_ops),
 	HISI_QUAD_DOM("HIP06   ",  0, &hisi_pcie_ops),
 	HISI_QUAD_DOM("HIP07   ",  0, &hisi_pcie_ops),
 	HISI_QUAD_DOM("HIP07   ",  0, &hisi_pcie_ops),
@@ -77,6 +79,7 @@ static struct mcfg_fixup mcfg_quirks[] = {
 
 
 #define THUNDER_PEM_RES(addr, node) \
 #define THUNDER_PEM_RES(addr, node) \
 	DEFINE_RES_MEM((addr) + ((u64) (node) << 44), 0x39 * SZ_16M)
 	DEFINE_RES_MEM((addr) + ((u64) (node) << 44), 0x39 * SZ_16M)
+
 #define THUNDER_PEM_QUIRK(rev, node) \
 #define THUNDER_PEM_QUIRK(rev, node) \
 	{ "CAVIUM", "THUNDERX", rev, 4 + (10 * (node)), MCFG_BUS_ANY,	    \
 	{ "CAVIUM", "THUNDERX", rev, 4 + (10 * (node)), MCFG_BUS_ANY,	    \
 	  &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x88001f000000UL, node) },  \
 	  &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x88001f000000UL, node) },  \
@@ -90,13 +93,16 @@ static struct mcfg_fixup mcfg_quirks[] = {
 	  &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x894057000000UL, node) },  \
 	  &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x894057000000UL, node) },  \
 	{ "CAVIUM", "THUNDERX", rev, 9 + (10 * (node)), MCFG_BUS_ANY,	    \
 	{ "CAVIUM", "THUNDERX", rev, 9 + (10 * (node)), MCFG_BUS_ANY,	    \
 	  &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x89808f000000UL, node) }
 	  &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x89808f000000UL, node) }
-	/* SoC pass2.x */
-	THUNDER_PEM_QUIRK(1, 0),
-	THUNDER_PEM_QUIRK(1, 1),
 
 
 #define THUNDER_ECAM_QUIRK(rev, seg)					\
 #define THUNDER_ECAM_QUIRK(rev, seg)					\
 	{ "CAVIUM", "THUNDERX", rev, seg, MCFG_BUS_ANY,			\
 	{ "CAVIUM", "THUNDERX", rev, seg, MCFG_BUS_ANY,			\
 	&pci_thunder_ecam_ops }
 	&pci_thunder_ecam_ops }
+
+	/* SoC pass2.x */
+	THUNDER_PEM_QUIRK(1, 0),
+	THUNDER_PEM_QUIRK(1, 1),
+	THUNDER_ECAM_QUIRK(1, 10),
+
 	/* SoC pass1.x */
 	/* SoC pass1.x */
 	THUNDER_PEM_QUIRK(2, 0),	/* off-chip devices */
 	THUNDER_PEM_QUIRK(2, 0),	/* off-chip devices */
 	THUNDER_PEM_QUIRK(2, 1),	/* off-chip devices */
 	THUNDER_PEM_QUIRK(2, 1),	/* off-chip devices */
@@ -112,9 +118,11 @@ static struct mcfg_fixup mcfg_quirks[] = {
 #define XGENE_V1_ECAM_MCFG(rev, seg) \
 #define XGENE_V1_ECAM_MCFG(rev, seg) \
 	{"APM   ", "XGENE   ", rev, seg, MCFG_BUS_ANY, \
 	{"APM   ", "XGENE   ", rev, seg, MCFG_BUS_ANY, \
 		&xgene_v1_pcie_ecam_ops }
 		&xgene_v1_pcie_ecam_ops }
+
 #define XGENE_V2_ECAM_MCFG(rev, seg) \
 #define XGENE_V2_ECAM_MCFG(rev, seg) \
 	{"APM   ", "XGENE   ", rev, seg, MCFG_BUS_ANY, \
 	{"APM   ", "XGENE   ", rev, seg, MCFG_BUS_ANY, \
 		&xgene_v2_pcie_ecam_ops }
 		&xgene_v2_pcie_ecam_ops }
+
 	/* X-Gene SoC with v1 PCIe controller */
 	/* X-Gene SoC with v1 PCIe controller */
 	XGENE_V1_ECAM_MCFG(1, 0),
 	XGENE_V1_ECAM_MCFG(1, 0),
 	XGENE_V1_ECAM_MCFG(1, 1),
 	XGENE_V1_ECAM_MCFG(1, 1),

+ 2 - 2
drivers/infiniband/hw/hfi1/chip.c

@@ -13841,14 +13841,14 @@ static void init_chip(struct hfi1_devdata *dd)
 		dd_dev_info(dd, "Resetting CSRs with FLR\n");
 		dd_dev_info(dd, "Resetting CSRs with FLR\n");
 
 
 		/* do the FLR, the DC reset will remain */
 		/* do the FLR, the DC reset will remain */
-		hfi1_pcie_flr(dd);
+		pcie_flr(dd->pcidev);
 
 
 		/* restore command and BARs */
 		/* restore command and BARs */
 		restore_pci_variables(dd);
 		restore_pci_variables(dd);
 
 
 		if (is_ax(dd)) {
 		if (is_ax(dd)) {
 			dd_dev_info(dd, "Resetting CSRs with FLR\n");
 			dd_dev_info(dd, "Resetting CSRs with FLR\n");
-			hfi1_pcie_flr(dd);
+			pcie_flr(dd->pcidev);
 			restore_pci_variables(dd);
 			restore_pci_variables(dd);
 		}
 		}
 	} else {
 	} else {

+ 0 - 1
drivers/infiniband/hw/hfi1/hfi.h

@@ -1825,7 +1825,6 @@ int hfi1_pcie_init(struct pci_dev *, const struct pci_device_id *);
 void hfi1_pcie_cleanup(struct pci_dev *);
 void hfi1_pcie_cleanup(struct pci_dev *);
 int hfi1_pcie_ddinit(struct hfi1_devdata *, struct pci_dev *);
 int hfi1_pcie_ddinit(struct hfi1_devdata *, struct pci_dev *);
 void hfi1_pcie_ddcleanup(struct hfi1_devdata *);
 void hfi1_pcie_ddcleanup(struct hfi1_devdata *);
-void hfi1_pcie_flr(struct hfi1_devdata *);
 int pcie_speeds(struct hfi1_devdata *);
 int pcie_speeds(struct hfi1_devdata *);
 void request_msix(struct hfi1_devdata *, u32 *, struct hfi1_msix_entry *);
 void request_msix(struct hfi1_devdata *, u32 *, struct hfi1_msix_entry *);
 void hfi1_enable_intx(struct pci_dev *);
 void hfi1_enable_intx(struct pci_dev *);

+ 0 - 30
drivers/infiniband/hw/hfi1/pcie.c

@@ -240,36 +240,6 @@ void hfi1_pcie_ddcleanup(struct hfi1_devdata *dd)
 		iounmap(dd->piobase);
 		iounmap(dd->piobase);
 }
 }
 
 
-/*
- * Do a Function Level Reset (FLR) on the device.
- * Based on static function drivers/pci/pci.c:pcie_flr().
- */
-void hfi1_pcie_flr(struct hfi1_devdata *dd)
-{
-	int i;
-	u16 status;
-
-	/* no need to check for the capability - we know the device has it */
-
-	/* wait for Transaction Pending bit to clear, at most a few ms */
-	for (i = 0; i < 4; i++) {
-		if (i)
-			msleep((1 << (i - 1)) * 100);
-
-		pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVSTA, &status);
-		if (!(status & PCI_EXP_DEVSTA_TRPND))
-			goto clear;
-	}
-
-	dd_dev_err(dd, "Transaction Pending bit is not clearing, proceeding with reset anyway\n");
-
-clear:
-	pcie_capability_set_word(dd->pcidev, PCI_EXP_DEVCTL,
-				 PCI_EXP_DEVCTL_BCR_FLR);
-	/* PCIe spec requires the function to be back within 100ms */
-	msleep(100);
-}
-
 static void msix_setup(struct hfi1_devdata *dd, int pos, u32 *msixcnt,
 static void msix_setup(struct hfi1_devdata *dd, int pos, u32 *msixcnt,
 		       struct hfi1_msix_entry *hfi1_msix_entry)
 		       struct hfi1_msix_entry *hfi1_msix_entry)
 {
 {

+ 7 - 0
drivers/misc/Kconfig

@@ -490,6 +490,13 @@ config ASPEED_LPC_CTRL
 	  ioctl()s, the driver also provides a read/write interface to a BMC ram
 	  ioctl()s, the driver also provides a read/write interface to a BMC ram
 	  region where the host LPC read/write region can be buffered.
 	  region where the host LPC read/write region can be buffered.
 
 
+config PCI_ENDPOINT_TEST
+	depends on PCI
+	tristate "PCI Endpoint Test driver"
+	---help---
+           Enable this configuration option to enable the host side test driver
+           for PCI Endpoint.
+
 source "drivers/misc/c2port/Kconfig"
 source "drivers/misc/c2port/Kconfig"
 source "drivers/misc/eeprom/Kconfig"
 source "drivers/misc/eeprom/Kconfig"
 source "drivers/misc/cb710/Kconfig"
 source "drivers/misc/cb710/Kconfig"

+ 1 - 0
drivers/misc/Makefile

@@ -53,6 +53,7 @@ obj-$(CONFIG_ECHO)		+= echo/
 obj-$(CONFIG_VEXPRESS_SYSCFG)	+= vexpress-syscfg.o
 obj-$(CONFIG_VEXPRESS_SYSCFG)	+= vexpress-syscfg.o
 obj-$(CONFIG_CXL_BASE)		+= cxl/
 obj-$(CONFIG_CXL_BASE)		+= cxl/
 obj-$(CONFIG_ASPEED_LPC_CTRL)	+= aspeed-lpc-ctrl.o
 obj-$(CONFIG_ASPEED_LPC_CTRL)	+= aspeed-lpc-ctrl.o
+obj-$(CONFIG_PCI_ENDPOINT_TEST)	+= pci_endpoint_test.o
 
 
 lkdtm-$(CONFIG_LKDTM)		+= lkdtm_core.o
 lkdtm-$(CONFIG_LKDTM)		+= lkdtm_core.o
 lkdtm-$(CONFIG_LKDTM)		+= lkdtm_bugs.o
 lkdtm-$(CONFIG_LKDTM)		+= lkdtm_bugs.o

+ 534 - 0
drivers/misc/pci_endpoint_test.c

@@ -0,0 +1,534 @@
+/**
+ * Host side test driver to test endpoint functionality
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+
+#include <linux/pci_regs.h>
+
+#include <uapi/linux/pcitest.h>
+
+#define DRV_MODULE_NAME			"pci-endpoint-test"
+
+#define PCI_ENDPOINT_TEST_MAGIC		0x0
+
+#define PCI_ENDPOINT_TEST_COMMAND	0x4
+#define COMMAND_RAISE_LEGACY_IRQ	BIT(0)
+#define COMMAND_RAISE_MSI_IRQ		BIT(1)
+#define MSI_NUMBER_SHIFT		2
+/* 6 bits for MSI number */
+#define COMMAND_READ                    BIT(8)
+#define COMMAND_WRITE                   BIT(9)
+#define COMMAND_COPY                    BIT(10)
+
+#define PCI_ENDPOINT_TEST_STATUS	0x8
+#define STATUS_READ_SUCCESS             BIT(0)
+#define STATUS_READ_FAIL                BIT(1)
+#define STATUS_WRITE_SUCCESS            BIT(2)
+#define STATUS_WRITE_FAIL               BIT(3)
+#define STATUS_COPY_SUCCESS             BIT(4)
+#define STATUS_COPY_FAIL                BIT(5)
+#define STATUS_IRQ_RAISED               BIT(6)
+#define STATUS_SRC_ADDR_INVALID         BIT(7)
+#define STATUS_DST_ADDR_INVALID         BIT(8)
+
+#define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR	0xc
+#define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR	0x10
+
+#define PCI_ENDPOINT_TEST_LOWER_DST_ADDR	0x14
+#define PCI_ENDPOINT_TEST_UPPER_DST_ADDR	0x18
+
+#define PCI_ENDPOINT_TEST_SIZE		0x1c
+#define PCI_ENDPOINT_TEST_CHECKSUM	0x20
+
+static DEFINE_IDA(pci_endpoint_test_ida);
+
+#define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
+					    miscdev)
+enum pci_barno {
+	BAR_0,
+	BAR_1,
+	BAR_2,
+	BAR_3,
+	BAR_4,
+	BAR_5,
+};
+
+struct pci_endpoint_test {
+	struct pci_dev	*pdev;
+	void __iomem	*base;
+	void __iomem	*bar[6];
+	struct completion irq_raised;
+	int		last_irq;
+	/* mutex to protect the ioctls */
+	struct mutex	mutex;
+	struct miscdevice miscdev;
+};
+
+static int bar_size[] = { 4, 512, 1024, 16384, 131072, 1048576 };
+
+static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
+					  u32 offset)
+{
+	return readl(test->base + offset);
+}
+
+static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
+					    u32 offset, u32 value)
+{
+	writel(value, test->base + offset);
+}
+
+static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test,
+					      int bar, int offset)
+{
+	return readl(test->bar[bar] + offset);
+}
+
+static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test,
+						int bar, u32 offset, u32 value)
+{
+	writel(value, test->bar[bar] + offset);
+}
+
+static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
+{
+	struct pci_endpoint_test *test = dev_id;
+	u32 reg;
+
+	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
+	if (reg & STATUS_IRQ_RAISED) {
+		test->last_irq = irq;
+		complete(&test->irq_raised);
+		reg &= ~STATUS_IRQ_RAISED;
+	}
+	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS,
+				 reg);
+
+	return IRQ_HANDLED;
+}
+
+static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
+				  enum pci_barno barno)
+{
+	int j;
+	u32 val;
+	int size;
+
+	if (!test->bar[barno])
+		return false;
+
+	size = bar_size[barno];
+
+	for (j = 0; j < size; j += 4)
+		pci_endpoint_test_bar_writel(test, barno, j, 0xA0A0A0A0);
+
+	for (j = 0; j < size; j += 4) {
+		val = pci_endpoint_test_bar_readl(test, barno, j);
+		if (val != 0xA0A0A0A0)
+			return false;
+	}
+
+	return true;
+}
+
+static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test)
+{
+	u32 val;
+
+	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
+				 COMMAND_RAISE_LEGACY_IRQ);
+	val = wait_for_completion_timeout(&test->irq_raised,
+					  msecs_to_jiffies(1000));
+	if (!val)
+		return false;
+
+	return true;
+}
+
+static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
+				      u8 msi_num)
+{
+	u32 val;
+	struct pci_dev *pdev = test->pdev;
+
+	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
+				 msi_num << MSI_NUMBER_SHIFT |
+				 COMMAND_RAISE_MSI_IRQ);
+	val = wait_for_completion_timeout(&test->irq_raised,
+					  msecs_to_jiffies(1000));
+	if (!val)
+		return false;
+
+	if (test->last_irq - pdev->irq == msi_num - 1)
+		return true;
+
+	return false;
+}
+
+static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
+{
+	bool ret = false;
+	void *src_addr;
+	void *dst_addr;
+	dma_addr_t src_phys_addr;
+	dma_addr_t dst_phys_addr;
+	struct pci_dev *pdev = test->pdev;
+	struct device *dev = &pdev->dev;
+	u32 src_crc32;
+	u32 dst_crc32;
+
+	src_addr = dma_alloc_coherent(dev, size, &src_phys_addr, GFP_KERNEL);
+	if (!src_addr) {
+		dev_err(dev, "failed to allocate source buffer\n");
+		ret = false;
+		goto err;
+	}
+
+	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
+				 lower_32_bits(src_phys_addr));
+
+	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
+				 upper_32_bits(src_phys_addr));
+
+	get_random_bytes(src_addr, size);
+	src_crc32 = crc32_le(~0, src_addr, size);
+
+	dst_addr = dma_alloc_coherent(dev, size, &dst_phys_addr, GFP_KERNEL);
+	if (!dst_addr) {
+		dev_err(dev, "failed to allocate destination address\n");
+		ret = false;
+		goto err_src_addr;
+	}
+
+	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
+				 lower_32_bits(dst_phys_addr));
+	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
+				 upper_32_bits(dst_phys_addr));
+
+	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
+				 size);
+
+	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
+				 1 << MSI_NUMBER_SHIFT | COMMAND_COPY);
+
+	wait_for_completion(&test->irq_raised);
+
+	dst_crc32 = crc32_le(~0, dst_addr, size);
+	if (dst_crc32 == src_crc32)
+		ret = true;
+
+	dma_free_coherent(dev, size, dst_addr, dst_phys_addr);
+
+err_src_addr:
+	dma_free_coherent(dev, size, src_addr, src_phys_addr);
+
+err:
+	return ret;
+}
+
+static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
+{
+	bool ret = false;
+	u32 reg;
+	void *addr;
+	dma_addr_t phys_addr;
+	struct pci_dev *pdev = test->pdev;
+	struct device *dev = &pdev->dev;
+	u32 crc32;
+
+	addr = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL);
+	if (!addr) {
+		dev_err(dev, "failed to allocate address\n");
+		ret = false;
+		goto err;
+	}
+
+	get_random_bytes(addr, size);
+
+	crc32 = crc32_le(~0, addr, size);
+	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
+				 crc32);
+
+	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
+				 lower_32_bits(phys_addr));
+	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
+				 upper_32_bits(phys_addr));
+
+	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
+
+	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
+				 1 << MSI_NUMBER_SHIFT | COMMAND_READ);
+
+	wait_for_completion(&test->irq_raised);
+
+	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
+	if (reg & STATUS_READ_SUCCESS)
+		ret = true;
+
+	dma_free_coherent(dev, size, addr, phys_addr);
+
+err:
+	return ret;
+}
+
+static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size)
+{
+	bool ret = false;
+	void *addr;
+	dma_addr_t phys_addr;
+	struct pci_dev *pdev = test->pdev;
+	struct device *dev = &pdev->dev;
+	u32 crc32;
+
+	addr = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL);
+	if (!addr) {
+		dev_err(dev, "failed to allocate destination address\n");
+		ret = false;
+		goto err;
+	}
+
+	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
+				 lower_32_bits(phys_addr));
+	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
+				 upper_32_bits(phys_addr));
+
+	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
+
+	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
+				 1 << MSI_NUMBER_SHIFT | COMMAND_WRITE);
+
+	wait_for_completion(&test->irq_raised);
+
+	crc32 = crc32_le(~0, addr, size);
+	if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
+		ret = true;
+
+	dma_free_coherent(dev, size, addr, phys_addr);
+err:
+	return ret;
+}
+
+static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
+				    unsigned long arg)
+{
+	int ret = -EINVAL;
+	enum pci_barno bar;
+	struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
+
+	mutex_lock(&test->mutex);
+	switch (cmd) {
+	case PCITEST_BAR:
+		bar = arg;
+		if (bar < 0 || bar > 5)
+			goto ret;
+		ret = pci_endpoint_test_bar(test, bar);
+		break;
+	case PCITEST_LEGACY_IRQ:
+		ret = pci_endpoint_test_legacy_irq(test);
+		break;
+	case PCITEST_MSI:
+		ret = pci_endpoint_test_msi_irq(test, arg);
+		break;
+	case PCITEST_WRITE:
+		ret = pci_endpoint_test_write(test, arg);
+		break;
+	case PCITEST_READ:
+		ret = pci_endpoint_test_read(test, arg);
+		break;
+	case PCITEST_COPY:
+		ret = pci_endpoint_test_copy(test, arg);
+		break;
+	}
+
+ret:
+	mutex_unlock(&test->mutex);
+	return ret;
+}
+
+static const struct file_operations pci_endpoint_test_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = pci_endpoint_test_ioctl,
+};
+
+static int pci_endpoint_test_probe(struct pci_dev *pdev,
+				   const struct pci_device_id *ent)
+{
+	int i;
+	int err;
+	int irq;
+	int id;
+	char name[20];
+	enum pci_barno bar;
+	void __iomem *base;
+	struct device *dev = &pdev->dev;
+	struct pci_endpoint_test *test;
+	struct miscdevice *misc_device;
+
+	if (pci_is_bridge(pdev))
+		return -ENODEV;
+
+	test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
+	if (!test)
+		return -ENOMEM;
+
+	test->pdev = pdev;
+	init_completion(&test->irq_raised);
+	mutex_init(&test->mutex);
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(dev, "Cannot enable PCI device\n");
+		return err;
+	}
+
+	err = pci_request_regions(pdev, DRV_MODULE_NAME);
+	if (err) {
+		dev_err(dev, "Cannot obtain PCI resources\n");
+		goto err_disable_pdev;
+	}
+
+	pci_set_master(pdev);
+
+	irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
+	if (irq < 0)
+		dev_err(dev, "failed to get MSI interrupts\n");
+
+	err = devm_request_irq(dev, pdev->irq, pci_endpoint_test_irqhandler,
+			       IRQF_SHARED, DRV_MODULE_NAME, test);
+	if (err) {
+		dev_err(dev, "failed to request IRQ %d\n", pdev->irq);
+		goto err_disable_msi;
+	}
+
+	for (i = 1; i < irq; i++) {
+		err = devm_request_irq(dev, pdev->irq + i,
+				       pci_endpoint_test_irqhandler,
+				       IRQF_SHARED, DRV_MODULE_NAME, test);
+		if (err)
+			dev_err(dev, "failed to request IRQ %d for MSI %d\n",
+				pdev->irq + i, i + 1);
+	}
+
+	for (bar = BAR_0; bar <= BAR_5; bar++) {
+		base = pci_ioremap_bar(pdev, bar);
+		if (!base) {
+			dev_err(dev, "failed to read BAR%d\n", bar);
+			WARN_ON(bar == BAR_0);
+		}
+		test->bar[bar] = base;
+	}
+
+	test->base = test->bar[0];
+	if (!test->base) {
+		dev_err(dev, "Cannot perform PCI test without BAR0\n");
+		goto err_iounmap;
+	}
+
+	pci_set_drvdata(pdev, test);
+
+	id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
+	if (id < 0) {
+		dev_err(dev, "unable to get id\n");
+		goto err_iounmap;
+	}
+
+	snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
+	misc_device = &test->miscdev;
+	misc_device->minor = MISC_DYNAMIC_MINOR;
+	misc_device->name = name;
+	misc_device->fops = &pci_endpoint_test_fops,
+
+	err = misc_register(misc_device);
+	if (err) {
+		dev_err(dev, "failed to register device\n");
+		goto err_ida_remove;
+	}
+
+	return 0;
+
+err_ida_remove:
+	ida_simple_remove(&pci_endpoint_test_ida, id);
+
+err_iounmap:
+	for (bar = BAR_0; bar <= BAR_5; bar++) {
+		if (test->bar[bar])
+			pci_iounmap(pdev, test->bar[bar]);
+	}
+
+err_disable_msi:
+	pci_disable_msi(pdev);
+	pci_release_regions(pdev);
+
+err_disable_pdev:
+	pci_disable_device(pdev);
+
+	return err;
+}
+
+static void pci_endpoint_test_remove(struct pci_dev *pdev)
+{
+	int id;
+	enum pci_barno bar;
+	struct pci_endpoint_test *test = pci_get_drvdata(pdev);
+	struct miscdevice *misc_device = &test->miscdev;
+
+	if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
+		return;
+
+	misc_deregister(&test->miscdev);
+	ida_simple_remove(&pci_endpoint_test_ida, id);
+	for (bar = BAR_0; bar <= BAR_5; bar++) {
+		if (test->bar[bar])
+			pci_iounmap(pdev, test->bar[bar]);
+	}
+	pci_disable_msi(pdev);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+}
+
+static const struct pci_device_id pci_endpoint_test_tbl[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) },
+	{ }
+};
+MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
+
+static struct pci_driver pci_endpoint_test_driver = {
+	.name		= DRV_MODULE_NAME,
+	.id_table	= pci_endpoint_test_tbl,
+	.probe		= pci_endpoint_test_probe,
+	.remove		= pci_endpoint_test_remove,
+};
+module_pci_driver(pci_endpoint_test_driver);
+
+MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_LICENSE("GPL v2");

+ 2 - 14
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c

@@ -7332,18 +7332,6 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
 }
 }
 
 
 #ifdef CONFIG_PCI_IOV
 #ifdef CONFIG_PCI_IOV
-static inline void ixgbe_issue_vf_flr(struct ixgbe_adapter *adapter,
-				      struct pci_dev *vfdev)
-{
-	if (!pci_wait_for_pending_transaction(vfdev))
-		e_dev_warn("Issuing VFLR with pending transactions\n");
-
-	e_dev_err("Issuing VFLR for VF %s\n", pci_name(vfdev));
-	pcie_capability_set_word(vfdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
-
-	msleep(100);
-}
-
 static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
 static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
 {
 {
 	struct ixgbe_hw *hw = &adapter->hw;
 	struct ixgbe_hw *hw = &adapter->hw;
@@ -7376,7 +7364,7 @@ static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
 		pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
 		pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
 		if (status_reg != IXGBE_FAILED_READ_CFG_WORD &&
 		if (status_reg != IXGBE_FAILED_READ_CFG_WORD &&
 		    status_reg & PCI_STATUS_REC_MASTER_ABORT)
 		    status_reg & PCI_STATUS_REC_MASTER_ABORT)
-			ixgbe_issue_vf_flr(adapter, vfdev);
+			pcie_flr(vfdev);
 	}
 	}
 }
 }
 
 
@@ -10602,7 +10590,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
 		 * VFLR.  Just clean up the AER in that case.
 		 * VFLR.  Just clean up the AER in that case.
 		 */
 		 */
 		if (vfdev) {
 		if (vfdev) {
-			ixgbe_issue_vf_flr(adapter, vfdev);
+			pcie_flr(vfdev);
 			/* Free device reference count */
 			/* Free device reference count */
 			pci_dev_put(vfdev);
 			pci_dev_put(vfdev);
 		}
 		}

+ 13 - 17
drivers/nvme/host/pci.c

@@ -132,7 +132,6 @@ static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
 struct nvme_queue {
 struct nvme_queue {
 	struct device *q_dmadev;
 	struct device *q_dmadev;
 	struct nvme_dev *dev;
 	struct nvme_dev *dev;
-	char irqname[24];	/* nvme4294967295-65535\0 */
 	spinlock_t q_lock;
 	spinlock_t q_lock;
 	struct nvme_command *sq_cmds;
 	struct nvme_command *sq_cmds;
 	struct nvme_command __iomem *sq_cmds_io;
 	struct nvme_command __iomem *sq_cmds_io;
@@ -329,11 +328,6 @@ static unsigned int nvme_cmd_size(struct nvme_dev *dev)
 		nvme_iod_alloc_size(dev, NVME_INT_BYTES(dev), NVME_INT_PAGES);
 		nvme_iod_alloc_size(dev, NVME_INT_BYTES(dev), NVME_INT_PAGES);
 }
 }
 
 
-static int nvmeq_irq(struct nvme_queue *nvmeq)
-{
-	return pci_irq_vector(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector);
-}
-
 static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
 static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
 				unsigned int hctx_idx)
 				unsigned int hctx_idx)
 {
 {
@@ -1078,7 +1072,7 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
 		spin_unlock_irq(&nvmeq->q_lock);
 		spin_unlock_irq(&nvmeq->q_lock);
 		return 1;
 		return 1;
 	}
 	}
-	vector = nvmeq_irq(nvmeq);
+	vector = nvmeq->cq_vector;
 	nvmeq->dev->online_queues--;
 	nvmeq->dev->online_queues--;
 	nvmeq->cq_vector = -1;
 	nvmeq->cq_vector = -1;
 	spin_unlock_irq(&nvmeq->q_lock);
 	spin_unlock_irq(&nvmeq->q_lock);
@@ -1086,7 +1080,7 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
 	if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
 	if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
 		blk_mq_stop_hw_queues(nvmeq->dev->ctrl.admin_q);
 		blk_mq_stop_hw_queues(nvmeq->dev->ctrl.admin_q);
 
 
-	free_irq(vector, nvmeq);
+	pci_free_irq(to_pci_dev(nvmeq->dev->dev), vector, nvmeq);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -1171,8 +1165,6 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
 
 
 	nvmeq->q_dmadev = dev->dev;
 	nvmeq->q_dmadev = dev->dev;
 	nvmeq->dev = dev;
 	nvmeq->dev = dev;
-	snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
-			dev->ctrl.instance, qid);
 	spin_lock_init(&nvmeq->q_lock);
 	spin_lock_init(&nvmeq->q_lock);
 	nvmeq->cq_head = 0;
 	nvmeq->cq_head = 0;
 	nvmeq->cq_phase = 1;
 	nvmeq->cq_phase = 1;
@@ -1195,12 +1187,16 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
 
 
 static int queue_request_irq(struct nvme_queue *nvmeq)
 static int queue_request_irq(struct nvme_queue *nvmeq)
 {
 {
-	if (use_threaded_interrupts)
-		return request_threaded_irq(nvmeq_irq(nvmeq), nvme_irq_check,
-				nvme_irq, IRQF_SHARED, nvmeq->irqname, nvmeq);
-	else
-		return request_irq(nvmeq_irq(nvmeq), nvme_irq, IRQF_SHARED,
-				nvmeq->irqname, nvmeq);
+	struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev);
+	int nr = nvmeq->dev->ctrl.instance;
+
+	if (use_threaded_interrupts) {
+		return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check,
+				nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
+	} else {
+		return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq,
+				NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
+	}
 }
 }
 
 
 static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
 static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
@@ -1557,7 +1553,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
 	}
 	}
 
 
 	/* Deregister the admin queue's interrupt */
 	/* Deregister the admin queue's interrupt */
-	free_irq(pci_irq_vector(pdev, 0), adminq);
+	pci_free_irq(pdev, 0, adminq);
 
 
 	/*
 	/*
 	 * If we enable msix early due to not intx, disable it again before
 	 * If we enable msix early due to not intx, disable it again before

+ 0 - 45
drivers/of/of_pci.c

@@ -285,51 +285,6 @@ parse_failed:
 EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources);
 EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources);
 #endif /* CONFIG_OF_ADDRESS */
 #endif /* CONFIG_OF_ADDRESS */
 
 
-#ifdef CONFIG_PCI_MSI
-
-static LIST_HEAD(of_pci_msi_chip_list);
-static DEFINE_MUTEX(of_pci_msi_chip_mutex);
-
-int of_pci_msi_chip_add(struct msi_controller *chip)
-{
-	if (!of_property_read_bool(chip->of_node, "msi-controller"))
-		return -EINVAL;
-
-	mutex_lock(&of_pci_msi_chip_mutex);
-	list_add(&chip->list, &of_pci_msi_chip_list);
-	mutex_unlock(&of_pci_msi_chip_mutex);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(of_pci_msi_chip_add);
-
-void of_pci_msi_chip_remove(struct msi_controller *chip)
-{
-	mutex_lock(&of_pci_msi_chip_mutex);
-	list_del(&chip->list);
-	mutex_unlock(&of_pci_msi_chip_mutex);
-}
-EXPORT_SYMBOL_GPL(of_pci_msi_chip_remove);
-
-struct msi_controller *of_pci_find_msi_chip_by_node(struct device_node *of_node)
-{
-	struct msi_controller *c;
-
-	mutex_lock(&of_pci_msi_chip_mutex);
-	list_for_each_entry(c, &of_pci_msi_chip_list, list) {
-		if (c->of_node == of_node) {
-			mutex_unlock(&of_pci_msi_chip_mutex);
-			return c;
-		}
-	}
-	mutex_unlock(&of_pci_msi_chip_mutex);
-
-	return NULL;
-}
-EXPORT_SYMBOL_GPL(of_pci_find_msi_chip_by_node);
-
-#endif /* CONFIG_PCI_MSI */
-
 /**
 /**
  * of_pci_map_rid - Translate a requester ID through a downstream mapping.
  * of_pci_map_rid - Translate a requester ID through a downstream mapping.
  * @np: root complex device node.
  * @np: root complex device node.

+ 2 - 0
drivers/pci/Kconfig

@@ -134,3 +134,5 @@ config PCI_HYPERV
 source "drivers/pci/hotplug/Kconfig"
 source "drivers/pci/hotplug/Kconfig"
 source "drivers/pci/dwc/Kconfig"
 source "drivers/pci/dwc/Kconfig"
 source "drivers/pci/host/Kconfig"
 source "drivers/pci/host/Kconfig"
+source "drivers/pci/endpoint/Kconfig"
+source "drivers/pci/switch/Kconfig"

+ 2 - 1
drivers/pci/Makefile

@@ -4,7 +4,7 @@
 
 
 obj-y		+= access.o bus.o probe.o host-bridge.o remove.o pci.o \
 obj-y		+= access.o bus.o probe.o host-bridge.o remove.o pci.o \
 			pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \
 			pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \
-			irq.o vpd.o setup-bus.o vc.o
+			irq.o vpd.o setup-bus.o vc.o mmap.o
 obj-$(CONFIG_PROC_FS) += proc.o
 obj-$(CONFIG_PROC_FS) += proc.o
 obj-$(CONFIG_SYSFS) += slot.o
 obj-$(CONFIG_SYSFS) += slot.o
 
 
@@ -68,3 +68,4 @@ ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG
 
 
 # PCI host controller drivers
 # PCI host controller drivers
 obj-y += host/
 obj-y += host/
+obj-y += switch/

+ 59 - 2
drivers/pci/access.c

@@ -629,7 +629,7 @@ void pci_vpd_release(struct pci_dev *dev)
  *
  *
  * When access is locked, any userspace reads or writes to config
  * When access is locked, any userspace reads or writes to config
  * space and concurrent lock requests will sleep until access is
  * space and concurrent lock requests will sleep until access is
- * allowed via pci_cfg_access_unlocked again.
+ * allowed via pci_cfg_access_unlock() again.
  */
  */
 void pci_cfg_access_lock(struct pci_dev *dev)
 void pci_cfg_access_lock(struct pci_dev *dev)
 {
 {
@@ -700,7 +700,8 @@ static bool pcie_downstream_port(const struct pci_dev *dev)
 	int type = pci_pcie_type(dev);
 	int type = pci_pcie_type(dev);
 
 
 	return type == PCI_EXP_TYPE_ROOT_PORT ||
 	return type == PCI_EXP_TYPE_ROOT_PORT ||
-	       type == PCI_EXP_TYPE_DOWNSTREAM;
+	       type == PCI_EXP_TYPE_DOWNSTREAM ||
+	       type == PCI_EXP_TYPE_PCIE_BRIDGE;
 }
 }
 
 
 bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
 bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
@@ -890,3 +891,59 @@ int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
 	return ret;
 	return ret;
 }
 }
 EXPORT_SYMBOL(pcie_capability_clear_and_set_dword);
 EXPORT_SYMBOL(pcie_capability_clear_and_set_dword);
+
+int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val)
+{
+	if (pci_dev_is_disconnected(dev)) {
+		*val = ~0;
+		return -ENODEV;
+	}
+	return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val);
+}
+EXPORT_SYMBOL(pci_read_config_byte);
+
+int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val)
+{
+	if (pci_dev_is_disconnected(dev)) {
+		*val = ~0;
+		return -ENODEV;
+	}
+	return pci_bus_read_config_word(dev->bus, dev->devfn, where, val);
+}
+EXPORT_SYMBOL(pci_read_config_word);
+
+int pci_read_config_dword(const struct pci_dev *dev, int where,
+					u32 *val)
+{
+	if (pci_dev_is_disconnected(dev)) {
+		*val = ~0;
+		return -ENODEV;
+	}
+	return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val);
+}
+EXPORT_SYMBOL(pci_read_config_dword);
+
+int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val)
+{
+	if (pci_dev_is_disconnected(dev))
+		return -ENODEV;
+	return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val);
+}
+EXPORT_SYMBOL(pci_write_config_byte);
+
+int pci_write_config_word(const struct pci_dev *dev, int where, u16 val)
+{
+	if (pci_dev_is_disconnected(dev))
+		return -ENODEV;
+	return pci_bus_write_config_word(dev->bus, dev->devfn, where, val);
+}
+EXPORT_SYMBOL(pci_write_config_word);
+
+int pci_write_config_dword(const struct pci_dev *dev, int where,
+					 u32 val)
+{
+	if (pci_dev_is_disconnected(dev))
+		return -ENODEV;
+	return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val);
+}
+EXPORT_SYMBOL(pci_write_config_dword);

+ 32 - 4
drivers/pci/dwc/Kconfig

@@ -9,16 +9,44 @@ config PCIE_DW_HOST
 	depends on PCI_MSI_IRQ_DOMAIN
 	depends on PCI_MSI_IRQ_DOMAIN
         select PCIE_DW
         select PCIE_DW
 
 
+config PCIE_DW_EP
+	bool
+	depends on PCI_ENDPOINT
+	select PCIE_DW
+
 config PCI_DRA7XX
 config PCI_DRA7XX
 	bool "TI DRA7xx PCIe controller"
 	bool "TI DRA7xx PCIe controller"
-	depends on PCI
+	depends on (PCI && PCI_MSI_IRQ_DOMAIN) || PCI_ENDPOINT
 	depends on OF && HAS_IOMEM && TI_PIPE3
 	depends on OF && HAS_IOMEM && TI_PIPE3
+	help
+	 Enables support for the PCIe controller in the DRA7xx SoC. There
+	 are two instances of PCIe controller in DRA7xx. This controller can
+	 work either as EP or RC. In order to enable host-specific features
+	 PCI_DRA7XX_HOST must be selected and in order to enable device-
+	 specific features PCI_DRA7XX_EP must be selected. This uses
+	 the Designware core.
+
+if PCI_DRA7XX
+
+config PCI_DRA7XX_HOST
+	bool "PCI DRA7xx Host Mode"
+	depends on PCI
 	depends on PCI_MSI_IRQ_DOMAIN
 	depends on PCI_MSI_IRQ_DOMAIN
 	select PCIE_DW_HOST
 	select PCIE_DW_HOST
+	default y
 	help
 	help
-	 Enables support for the PCIe controller in the DRA7xx SoC.  There
-	 are two instances of PCIe controller in DRA7xx.  This controller can
-	 act both as EP and RC.  This reuses the Designware core.
+	 Enables support for the PCIe controller in the DRA7xx SoC to work in
+	 host mode.
+
+config PCI_DRA7XX_EP
+	bool "PCI DRA7xx Endpoint Mode"
+	depends on PCI_ENDPOINT
+	select PCIE_DW_EP
+	help
+	 Enables support for the PCIe controller in the DRA7xx SoC to work in
+	 endpoint mode.
+
+endif
 
 
 config PCIE_DW_PLAT
 config PCIE_DW_PLAT
 	bool "Platform bus based DesignWare PCIe Controller"
 	bool "Platform bus based DesignWare PCIe Controller"

+ 4 - 1
drivers/pci/dwc/Makefile

@@ -1,7 +1,10 @@
 obj-$(CONFIG_PCIE_DW) += pcie-designware.o
 obj-$(CONFIG_PCIE_DW) += pcie-designware.o
 obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o
 obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o
+obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o
 obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o
 obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o
-obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
+ifneq ($(filter y,$(CONFIG_PCI_DRA7XX_HOST) $(CONFIG_PCI_DRA7XX_EP)),)
+        obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
+endif
 obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
 obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
 obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
 obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
 obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
 obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o

+ 263 - 30
drivers/pci/dwc/pci-dra7xx.c

@@ -10,12 +10,14 @@
  * published by the Free Software Foundation.
  * published by the Free Software Foundation.
  */
  */
 
 
+#include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/err.h>
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/irq.h>
 #include <linux/irqdomain.h>
 #include <linux/irqdomain.h>
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/init.h>
+#include <linux/of_device.h>
 #include <linux/of_gpio.h>
 #include <linux/of_gpio.h>
 #include <linux/of_pci.h>
 #include <linux/of_pci.h>
 #include <linux/pci.h>
 #include <linux/pci.h>
@@ -24,6 +26,8 @@
 #include <linux/pm_runtime.h>
 #include <linux/pm_runtime.h>
 #include <linux/resource.h>
 #include <linux/resource.h>
 #include <linux/types.h>
 #include <linux/types.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
 
 
 #include "pcie-designware.h"
 #include "pcie-designware.h"
 
 
@@ -57,6 +61,11 @@
 #define	MSI						BIT(4)
 #define	MSI						BIT(4)
 #define	LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD)
 #define	LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD)
 
 
+#define	PCIECTRL_TI_CONF_DEVICE_TYPE			0x0100
+#define	DEVICE_TYPE_EP					0x0
+#define	DEVICE_TYPE_LEG_EP				0x1
+#define	DEVICE_TYPE_RC					0x4
+
 #define	PCIECTRL_DRA7XX_CONF_DEVICE_CMD			0x0104
 #define	PCIECTRL_DRA7XX_CONF_DEVICE_CMD			0x0104
 #define	LTSSM_EN					0x1
 #define	LTSSM_EN					0x1
 
 
@@ -66,6 +75,13 @@
 
 
 #define EXP_CAP_ID_OFFSET				0x70
 #define EXP_CAP_ID_OFFSET				0x70
 
 
+#define	PCIECTRL_TI_CONF_INTX_ASSERT			0x0124
+#define	PCIECTRL_TI_CONF_INTX_DEASSERT			0x0128
+
+#define	PCIECTRL_TI_CONF_MSI_XMT			0x012c
+#define MSI_REQ_GRANT					BIT(0)
+#define MSI_VECTOR_SHIFT				7
+
 struct dra7xx_pcie {
 struct dra7xx_pcie {
 	struct dw_pcie		*pci;
 	struct dw_pcie		*pci;
 	void __iomem		*base;		/* DT ti_conf */
 	void __iomem		*base;		/* DT ti_conf */
@@ -73,6 +89,11 @@ struct dra7xx_pcie {
 	struct phy		**phy;
 	struct phy		**phy;
 	int			link_gen;
 	int			link_gen;
 	struct irq_domain	*irq_domain;
 	struct irq_domain	*irq_domain;
+	enum dw_pcie_device_mode mode;
+};
+
+struct dra7xx_pcie_of_data {
+	enum dw_pcie_device_mode mode;
 };
 };
 
 
 #define to_dra7xx_pcie(x)	dev_get_drvdata((x)->dev)
 #define to_dra7xx_pcie(x)	dev_get_drvdata((x)->dev)
@@ -88,6 +109,11 @@ static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
 	writel(value, pcie->base + offset);
 	writel(value, pcie->base + offset);
 }
 }
 
 
+static u64 dra7xx_pcie_cpu_addr_fixup(u64 pci_addr)
+{
+	return pci_addr & DRA7XX_CPU_TO_BUS_ADDR;
+}
+
 static int dra7xx_pcie_link_up(struct dw_pcie *pci)
 static int dra7xx_pcie_link_up(struct dw_pcie *pci)
 {
 {
 	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
 	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
@@ -96,9 +122,19 @@ static int dra7xx_pcie_link_up(struct dw_pcie *pci)
 	return !!(reg & LINK_UP);
 	return !!(reg & LINK_UP);
 }
 }
 
 
-static int dra7xx_pcie_establish_link(struct dra7xx_pcie *dra7xx)
+static void dra7xx_pcie_stop_link(struct dw_pcie *pci)
 {
 {
-	struct dw_pcie *pci = dra7xx->pci;
+	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+	u32 reg;
+
+	reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
+	reg &= ~LTSSM_EN;
+	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
+}
+
+static int dra7xx_pcie_establish_link(struct dw_pcie *pci)
+{
+	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
 	struct device *dev = pci->dev;
 	struct device *dev = pci->dev;
 	u32 reg;
 	u32 reg;
 	u32 exp_cap_off = EXP_CAP_ID_OFFSET;
 	u32 exp_cap_off = EXP_CAP_ID_OFFSET;
@@ -132,34 +168,42 @@ static int dra7xx_pcie_establish_link(struct dra7xx_pcie *dra7xx)
 	reg |= LTSSM_EN;
 	reg |= LTSSM_EN;
 	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
 	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
 
 
-	return dw_pcie_wait_for_link(pci);
+	return 0;
 }
 }
 
 
-static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
+static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx)
 {
 {
-	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
-			   ~INTERRUPTS);
-	dra7xx_pcie_writel(dra7xx,
-			   PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, INTERRUPTS);
 	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI,
 	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI,
 			   ~LEG_EP_INTERRUPTS & ~MSI);
 			   ~LEG_EP_INTERRUPTS & ~MSI);
-	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
+
+	dra7xx_pcie_writel(dra7xx,
+			   PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
 			   MSI | LEG_EP_INTERRUPTS);
 			   MSI | LEG_EP_INTERRUPTS);
 }
 }
 
 
+static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx)
+{
+	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
+			   ~INTERRUPTS);
+	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN,
+			   INTERRUPTS);
+}
+
+static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
+{
+	dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
+	dra7xx_pcie_enable_msi_interrupts(dra7xx);
+}
+
 static void dra7xx_pcie_host_init(struct pcie_port *pp)
 static void dra7xx_pcie_host_init(struct pcie_port *pp)
 {
 {
 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
 	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
 
 
-	pp->io_base &= DRA7XX_CPU_TO_BUS_ADDR;
-	pp->mem_base &= DRA7XX_CPU_TO_BUS_ADDR;
-	pp->cfg0_base &= DRA7XX_CPU_TO_BUS_ADDR;
-	pp->cfg1_base &= DRA7XX_CPU_TO_BUS_ADDR;
-
 	dw_pcie_setup_rc(pp);
 	dw_pcie_setup_rc(pp);
 
 
-	dra7xx_pcie_establish_link(dra7xx);
+	dra7xx_pcie_establish_link(pci);
+	dw_pcie_wait_for_link(pci);
 	dw_pcie_msi_init(pp);
 	dw_pcie_msi_init(pp);
 	dra7xx_pcie_enable_interrupts(dra7xx);
 	dra7xx_pcie_enable_interrupts(dra7xx);
 }
 }
@@ -237,6 +281,7 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
 	struct dra7xx_pcie *dra7xx = arg;
 	struct dra7xx_pcie *dra7xx = arg;
 	struct dw_pcie *pci = dra7xx->pci;
 	struct dw_pcie *pci = dra7xx->pci;
 	struct device *dev = pci->dev;
 	struct device *dev = pci->dev;
+	struct dw_pcie_ep *ep = &pci->ep;
 	u32 reg;
 	u32 reg;
 
 
 	reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN);
 	reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN);
@@ -273,8 +318,11 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
 	if (reg & LINK_REQ_RST)
 	if (reg & LINK_REQ_RST)
 		dev_dbg(dev, "Link Request Reset\n");
 		dev_dbg(dev, "Link Request Reset\n");
 
 
-	if (reg & LINK_UP_EVT)
+	if (reg & LINK_UP_EVT) {
+		if (dra7xx->mode == DW_PCIE_EP_TYPE)
+			dw_pcie_ep_linkup(ep);
 		dev_dbg(dev, "Link-up state change\n");
 		dev_dbg(dev, "Link-up state change\n");
+	}
 
 
 	if (reg & CFG_BME_EVT)
 	if (reg & CFG_BME_EVT)
 		dev_dbg(dev, "CFG 'Bus Master Enable' change\n");
 		dev_dbg(dev, "CFG 'Bus Master Enable' change\n");
@@ -287,6 +335,94 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
 	return IRQ_HANDLED;
 	return IRQ_HANDLED;
 }
 }
 
 
+static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+
+	dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
+}
+
+static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx)
+{
+	dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1);
+	mdelay(1);
+	dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1);
+}
+
+static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx,
+				      u8 interrupt_num)
+{
+	u32 reg;
+
+	reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT;
+	reg |= MSI_REQ_GRANT;
+	dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg);
+}
+
+static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep,
+				 enum pci_epc_irq_type type, u8 interrupt_num)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+
+	switch (type) {
+	case PCI_EPC_IRQ_LEGACY:
+		dra7xx_pcie_raise_legacy_irq(dra7xx);
+		break;
+	case PCI_EPC_IRQ_MSI:
+		dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num);
+		break;
+	default:
+		dev_err(pci->dev, "UNKNOWN IRQ type\n");
+	}
+
+	return 0;
+}
+
+static struct dw_pcie_ep_ops pcie_ep_ops = {
+	.ep_init = dra7xx_pcie_ep_init,
+	.raise_irq = dra7xx_pcie_raise_irq,
+};
+
+static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
+				     struct platform_device *pdev)
+{
+	int ret;
+	struct dw_pcie_ep *ep;
+	struct resource *res;
+	struct device *dev = &pdev->dev;
+	struct dw_pcie *pci = dra7xx->pci;
+
+	ep = &pci->ep;
+	ep->ops = &pcie_ep_ops;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics");
+	pci->dbi_base = devm_ioremap(dev, res->start, resource_size(res));
+	if (!pci->dbi_base)
+		return -ENOMEM;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2");
+	pci->dbi_base2 = devm_ioremap(dev, res->start, resource_size(res));
+	if (!pci->dbi_base2)
+		return -ENOMEM;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+	if (!res)
+		return -EINVAL;
+
+	ep->phys_base = res->start;
+	ep->addr_size = resource_size(res);
+
+	ret = dw_pcie_ep_init(ep);
+	if (ret) {
+		dev_err(dev, "failed to initialize endpoint\n");
+		return ret;
+	}
+
+	return 0;
+}
+
 static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
 static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
 				       struct platform_device *pdev)
 				       struct platform_device *pdev)
 {
 {
@@ -329,6 +465,9 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
 }
 }
 
 
 static const struct dw_pcie_ops dw_pcie_ops = {
 static const struct dw_pcie_ops dw_pcie_ops = {
+	.cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup,
+	.start_link = dra7xx_pcie_establish_link,
+	.stop_link = dra7xx_pcie_stop_link,
 	.link_up = dra7xx_pcie_link_up,
 	.link_up = dra7xx_pcie_link_up,
 };
 };
 
 
@@ -371,6 +510,68 @@ err_phy:
 	return ret;
 	return ret;
 }
 }
 
 
+static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = {
+	.mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = {
+	.mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct of_device_id of_dra7xx_pcie_match[] = {
+	{
+		.compatible = "ti,dra7-pcie",
+		.data = &dra7xx_pcie_rc_of_data,
+	},
+	{
+		.compatible = "ti,dra7-pcie-ep",
+		.data = &dra7xx_pcie_ep_of_data,
+	},
+	{},
+};
+
+/*
+ * dra7xx_pcie_ep_unaligned_memaccess: workaround for AM572x/AM571x Errata i870
+ * @dra7xx: the dra7xx device where the workaround should be applied
+ *
+ * Access to the PCIe slave port that are not 32-bit aligned will result
+ * in incorrect mapping to TLP Address and Byte enable fields. Therefore,
+ * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or
+ * 0x3.
+ *
+ * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1.
+ */
+static int dra7xx_pcie_ep_unaligned_memaccess(struct device *dev)
+{
+	int ret;
+	struct device_node *np = dev->of_node;
+	struct of_phandle_args args;
+	struct regmap *regmap;
+
+	regmap = syscon_regmap_lookup_by_phandle(np,
+						 "ti,syscon-unaligned-access");
+	if (IS_ERR(regmap)) {
+		dev_dbg(dev, "can't get ti,syscon-unaligned-access\n");
+		return -EINVAL;
+	}
+
+	ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access",
+					       2, 0, &args);
+	if (ret) {
+		dev_err(dev, "failed to parse ti,syscon-unaligned-access\n");
+		return ret;
+	}
+
+	ret = regmap_update_bits(regmap, args.args[0], args.args[1],
+				 args.args[1]);
+	if (ret)
+		dev_err(dev, "failed to enable unaligned access\n");
+
+	of_node_put(args.np);
+
+	return ret;
+}
+
 static int __init dra7xx_pcie_probe(struct platform_device *pdev)
 static int __init dra7xx_pcie_probe(struct platform_device *pdev)
 {
 {
 	u32 reg;
 	u32 reg;
@@ -388,6 +589,16 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
 	struct device_node *np = dev->of_node;
 	struct device_node *np = dev->of_node;
 	char name[10];
 	char name[10];
 	struct gpio_desc *reset;
 	struct gpio_desc *reset;
+	const struct of_device_id *match;
+	const struct dra7xx_pcie_of_data *data;
+	enum dw_pcie_device_mode mode;
+
+	match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev);
+	if (!match)
+		return -EINVAL;
+
+	data = (struct dra7xx_pcie_of_data *)match->data;
+	mode = (enum dw_pcie_device_mode)data->mode;
 
 
 	dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL);
 	dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL);
 	if (!dra7xx)
 	if (!dra7xx)
@@ -409,13 +620,6 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
-	ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler,
-			       IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
-	if (ret) {
-		dev_err(dev, "failed to request irq\n");
-		return ret;
-	}
-
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf");
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf");
 	base = devm_ioremap_nocache(dev, res->start, resource_size(res));
 	base = devm_ioremap_nocache(dev, res->start, resource_size(res));
 	if (!base)
 	if (!base)
@@ -473,9 +677,37 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
 	if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2)
 	if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2)
 		dra7xx->link_gen = 2;
 		dra7xx->link_gen = 2;
 
 
-	ret = dra7xx_add_pcie_port(dra7xx, pdev);
-	if (ret < 0)
+	switch (mode) {
+	case DW_PCIE_RC_TYPE:
+		dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
+				   DEVICE_TYPE_RC);
+		ret = dra7xx_add_pcie_port(dra7xx, pdev);
+		if (ret < 0)
+			goto err_gpio;
+		break;
+	case DW_PCIE_EP_TYPE:
+		dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
+				   DEVICE_TYPE_EP);
+
+		ret = dra7xx_pcie_ep_unaligned_memaccess(dev);
+		if (ret)
+			goto err_gpio;
+
+		ret = dra7xx_add_pcie_ep(dra7xx, pdev);
+		if (ret < 0)
+			goto err_gpio;
+		break;
+	default:
+		dev_err(dev, "INVALID device type %d\n", mode);
+	}
+	dra7xx->mode = mode;
+
+	ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler,
+			       IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
+	if (ret) {
+		dev_err(dev, "failed to request irq\n");
 		goto err_gpio;
 		goto err_gpio;
+	}
 
 
 	return 0;
 	return 0;
 
 
@@ -496,6 +728,9 @@ static int dra7xx_pcie_suspend(struct device *dev)
 	struct dw_pcie *pci = dra7xx->pci;
 	struct dw_pcie *pci = dra7xx->pci;
 	u32 val;
 	u32 val;
 
 
+	if (dra7xx->mode != DW_PCIE_RC_TYPE)
+		return 0;
+
 	/* clear MSE */
 	/* clear MSE */
 	val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
 	val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
 	val &= ~PCI_COMMAND_MEMORY;
 	val &= ~PCI_COMMAND_MEMORY;
@@ -510,6 +745,9 @@ static int dra7xx_pcie_resume(struct device *dev)
 	struct dw_pcie *pci = dra7xx->pci;
 	struct dw_pcie *pci = dra7xx->pci;
 	u32 val;
 	u32 val;
 
 
+	if (dra7xx->mode != DW_PCIE_RC_TYPE)
+		return 0;
+
 	/* set MSE */
 	/* set MSE */
 	val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
 	val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
 	val |= PCI_COMMAND_MEMORY;
 	val |= PCI_COMMAND_MEMORY;
@@ -548,11 +786,6 @@ static const struct dev_pm_ops dra7xx_pcie_pm_ops = {
 				      dra7xx_pcie_resume_noirq)
 				      dra7xx_pcie_resume_noirq)
 };
 };
 
 
-static const struct of_device_id of_dra7xx_pcie_match[] = {
-	{ .compatible = "ti,dra7-pcie", },
-	{},
-};
-
 static struct platform_driver dra7xx_pcie_driver = {
 static struct platform_driver dra7xx_pcie_driver = {
 	.driver = {
 	.driver = {
 		.name	= "dra7-pcie",
 		.name	= "dra7-pcie",

+ 8 - 6
drivers/pci/dwc/pci-exynos.c

@@ -521,23 +521,25 @@ static void exynos_pcie_enable_interrupts(struct exynos_pcie *ep)
 		exynos_pcie_msi_init(ep);
 		exynos_pcie_msi_init(ep);
 }
 }
 
 
-static u32 exynos_pcie_readl_dbi(struct dw_pcie *pci, u32 reg)
+static u32 exynos_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
+				u32 reg, size_t size)
 {
 {
 	struct exynos_pcie *ep = to_exynos_pcie(pci);
 	struct exynos_pcie *ep = to_exynos_pcie(pci);
 	u32 val;
 	u32 val;
 
 
 	exynos_pcie_sideband_dbi_r_mode(ep, true);
 	exynos_pcie_sideband_dbi_r_mode(ep, true);
-	val = readl(pci->dbi_base + reg);
+	dw_pcie_read(base + reg, size, &val);
 	exynos_pcie_sideband_dbi_r_mode(ep, false);
 	exynos_pcie_sideband_dbi_r_mode(ep, false);
 	return val;
 	return val;
 }
 }
 
 
-static void exynos_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
+static void exynos_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
+				  u32 reg, size_t size, u32 val)
 {
 {
 	struct exynos_pcie *ep = to_exynos_pcie(pci);
 	struct exynos_pcie *ep = to_exynos_pcie(pci);
 
 
 	exynos_pcie_sideband_dbi_w_mode(ep, true);
 	exynos_pcie_sideband_dbi_w_mode(ep, true);
-	writel(val, pci->dbi_base + reg);
+	dw_pcie_write(base + reg, size, val);
 	exynos_pcie_sideband_dbi_w_mode(ep, false);
 	exynos_pcie_sideband_dbi_w_mode(ep, false);
 }
 }
 
 
@@ -644,8 +646,8 @@ static int __init exynos_add_pcie_port(struct exynos_pcie *ep,
 }
 }
 
 
 static const struct dw_pcie_ops dw_pcie_ops = {
 static const struct dw_pcie_ops dw_pcie_ops = {
-	.readl_dbi = exynos_pcie_readl_dbi,
-	.writel_dbi = exynos_pcie_writel_dbi,
+	.read_dbi = exynos_pcie_read_dbi,
+	.write_dbi = exynos_pcie_write_dbi,
 	.link_up = exynos_pcie_link_up,
 	.link_up = exynos_pcie_link_up,
 };
 };
 
 

+ 145 - 54
drivers/pci/dwc/pci-imx6.c

@@ -17,6 +17,7 @@
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/mfd/syscon.h>
 #include <linux/mfd/syscon.h>
 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/of_gpio.h>
 #include <linux/of_gpio.h>
 #include <linux/of_device.h>
 #include <linux/of_device.h>
@@ -27,6 +28,7 @@
 #include <linux/signal.h>
 #include <linux/signal.h>
 #include <linux/types.h>
 #include <linux/types.h>
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
+#include <linux/reset.h>
 
 
 #include "pcie-designware.h"
 #include "pcie-designware.h"
 
 
@@ -36,6 +38,7 @@ enum imx6_pcie_variants {
 	IMX6Q,
 	IMX6Q,
 	IMX6SX,
 	IMX6SX,
 	IMX6QP,
 	IMX6QP,
+	IMX7D,
 };
 };
 
 
 struct imx6_pcie {
 struct imx6_pcie {
@@ -47,6 +50,8 @@ struct imx6_pcie {
 	struct clk		*pcie_inbound_axi;
 	struct clk		*pcie_inbound_axi;
 	struct clk		*pcie;
 	struct clk		*pcie;
 	struct regmap		*iomuxc_gpr;
 	struct regmap		*iomuxc_gpr;
+	struct reset_control	*pciephy_reset;
+	struct reset_control	*apps_reset;
 	enum imx6_pcie_variants variant;
 	enum imx6_pcie_variants variant;
 	u32			tx_deemph_gen1;
 	u32			tx_deemph_gen1;
 	u32			tx_deemph_gen2_3p5db;
 	u32			tx_deemph_gen2_3p5db;
@@ -56,6 +61,11 @@ struct imx6_pcie {
 	int			link_gen;
 	int			link_gen;
 };
 };
 
 
+/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
+#define PHY_PLL_LOCK_WAIT_MAX_RETRIES	2000
+#define PHY_PLL_LOCK_WAIT_USLEEP_MIN	50
+#define PHY_PLL_LOCK_WAIT_USLEEP_MAX	200
+
 /* PCIe Root Complex registers (memory-mapped) */
 /* PCIe Root Complex registers (memory-mapped) */
 #define PCIE_RC_LCR				0x7c
 #define PCIE_RC_LCR				0x7c
 #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1	0x1
 #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1	0x1
@@ -248,6 +258,10 @@ static int imx6q_pcie_abort_handler(unsigned long addr,
 static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
 static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
 {
 {
 	switch (imx6_pcie->variant) {
 	switch (imx6_pcie->variant) {
+	case IMX7D:
+		reset_control_assert(imx6_pcie->pciephy_reset);
+		reset_control_assert(imx6_pcie->apps_reset);
+		break;
 	case IMX6SX:
 	case IMX6SX:
 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
 				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
 				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
@@ -303,11 +317,32 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
 				   IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
 				   IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
 		break;
 		break;
+	case IMX7D:
+		break;
 	}
 	}
 
 
 	return ret;
 	return ret;
 }
 }
 
 
+static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
+{
+	u32 val;
+	unsigned int retries;
+	struct device *dev = imx6_pcie->pci->dev;
+
+	for (retries = 0; retries < PHY_PLL_LOCK_WAIT_MAX_RETRIES; retries++) {
+		regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR22, &val);
+
+		if (val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED)
+			return;
+
+		usleep_range(PHY_PLL_LOCK_WAIT_USLEEP_MIN,
+			     PHY_PLL_LOCK_WAIT_USLEEP_MAX);
+	}
+
+	dev_err(dev, "PCIe PLL lock timeout\n");
+}
+
 static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
 static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
 {
 {
 	struct dw_pcie *pci = imx6_pcie->pci;
 	struct dw_pcie *pci = imx6_pcie->pci;
@@ -351,6 +386,10 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
 	}
 	}
 
 
 	switch (imx6_pcie->variant) {
 	switch (imx6_pcie->variant) {
+	case IMX7D:
+		reset_control_deassert(imx6_pcie->pciephy_reset);
+		imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie);
+		break;
 	case IMX6SX:
 	case IMX6SX:
 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
 				   IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
 				   IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
@@ -377,35 +416,44 @@ err_pcie_bus:
 
 
 static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
 static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
 {
 {
-	if (imx6_pcie->variant == IMX6SX)
+	switch (imx6_pcie->variant) {
+	case IMX7D:
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+				   IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
+		break;
+	case IMX6SX:
 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
 				   IMX6SX_GPR12_PCIE_RX_EQ_MASK,
 				   IMX6SX_GPR12_PCIE_RX_EQ_MASK,
 				   IMX6SX_GPR12_PCIE_RX_EQ_2);
 				   IMX6SX_GPR12_PCIE_RX_EQ_2);
+		/* FALLTHROUGH */
+	default:
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+				   IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
 
 
-	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
-			IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
+		/* configure constant input signal to the pcie ctrl and phy */
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+				   IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
+
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+				   IMX6Q_GPR8_TX_DEEMPH_GEN1,
+				   imx6_pcie->tx_deemph_gen1 << 0);
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+				   IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
+				   imx6_pcie->tx_deemph_gen2_3p5db << 6);
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+				   IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
+				   imx6_pcie->tx_deemph_gen2_6db << 12);
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+				   IMX6Q_GPR8_TX_SWING_FULL,
+				   imx6_pcie->tx_swing_full << 18);
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+				   IMX6Q_GPR8_TX_SWING_LOW,
+				   imx6_pcie->tx_swing_low << 25);
+		break;
+	}
 
 
-	/* configure constant input signal to the pcie ctrl and phy */
 	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
 	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
 			IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
 			IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
-	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
-			IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
-
-	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
-			   IMX6Q_GPR8_TX_DEEMPH_GEN1,
-			   imx6_pcie->tx_deemph_gen1 << 0);
-	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
-			   IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
-			   imx6_pcie->tx_deemph_gen2_3p5db << 6);
-	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
-			   IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
-			   imx6_pcie->tx_deemph_gen2_6db << 12);
-	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
-			   IMX6Q_GPR8_TX_SWING_FULL,
-			   imx6_pcie->tx_swing_full << 18);
-	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
-			   IMX6Q_GPR8_TX_SWING_LOW,
-			   imx6_pcie->tx_swing_low << 25);
 }
 }
 
 
 static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie)
 static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie)
@@ -469,8 +517,11 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
 	dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
 	dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
 
 
 	/* Start LTSSM. */
 	/* Start LTSSM. */
-	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
-			IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
+	if (imx6_pcie->variant == IMX7D)
+		reset_control_deassert(imx6_pcie->apps_reset);
+	else
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+				   IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
 
 
 	ret = imx6_pcie_wait_for_link(imx6_pcie);
 	ret = imx6_pcie_wait_for_link(imx6_pcie);
 	if (ret)
 	if (ret)
@@ -482,29 +533,40 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
 		tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
 		tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
 		tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
 		tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
 		dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
 		dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
-	} else {
-		dev_info(dev, "Link: Gen2 disabled\n");
-	}
-
-	/*
-	 * Start Directed Speed Change so the best possible speed both link
-	 * partners support can be negotiated.
-	 */
-	tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
-	tmp |= PORT_LOGIC_SPEED_CHANGE;
-	dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
 
 
-	ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
-	if (ret) {
-		dev_err(dev, "Failed to bring link up!\n");
-		goto err_reset_phy;
-	}
+		/*
+		 * Start Directed Speed Change so the best possible
+		 * speed both link partners support can be negotiated.
+		 */
+		tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+		tmp |= PORT_LOGIC_SPEED_CHANGE;
+		dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
+
+		if (imx6_pcie->variant != IMX7D) {
+			/*
+			 * On i.MX7, DIRECT_SPEED_CHANGE behaves differently
+			 * from i.MX6 family when no link speed transition
+			 * occurs and we go Gen1 -> yep, Gen1. The difference
+			 * is that, in such case, it will not be cleared by HW
+			 * which will cause the following code to report false
+			 * failure.
+			 */
+
+			ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
+			if (ret) {
+				dev_err(dev, "Failed to bring link up!\n");
+				goto err_reset_phy;
+			}
+		}
 
 
-	/* Make sure link training is finished as well! */
-	ret = imx6_pcie_wait_for_link(imx6_pcie);
-	if (ret) {
-		dev_err(dev, "Failed to bring link up!\n");
-		goto err_reset_phy;
+		/* Make sure link training is finished as well! */
+		ret = imx6_pcie_wait_for_link(imx6_pcie);
+		if (ret) {
+			dev_err(dev, "Failed to bring link up!\n");
+			goto err_reset_phy;
+		}
+	} else {
+		dev_info(dev, "Link: Gen2 disabled\n");
 	}
 	}
 
 
 	tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCSR);
 	tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCSR);
@@ -544,8 +606,8 @@ static struct dw_pcie_host_ops imx6_pcie_host_ops = {
 	.host_init = imx6_pcie_host_init,
 	.host_init = imx6_pcie_host_init,
 };
 };
 
 
-static int __init imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
-				     struct platform_device *pdev)
+static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
+			      struct platform_device *pdev)
 {
 {
 	struct dw_pcie *pci = imx6_pcie->pci;
 	struct dw_pcie *pci = imx6_pcie->pci;
 	struct pcie_port *pp = &pci->pp;
 	struct pcie_port *pp = &pci->pp;
@@ -585,7 +647,7 @@ static const struct dw_pcie_ops dw_pcie_ops = {
 	.link_up = imx6_pcie_link_up,
 	.link_up = imx6_pcie_link_up,
 };
 };
 
 
-static int __init imx6_pcie_probe(struct platform_device *pdev)
+static int imx6_pcie_probe(struct platform_device *pdev)
 {
 {
 	struct device *dev = &pdev->dev;
 	struct device *dev = &pdev->dev;
 	struct dw_pcie *pci;
 	struct dw_pcie *pci;
@@ -609,10 +671,6 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
 	imx6_pcie->variant =
 	imx6_pcie->variant =
 		(enum imx6_pcie_variants)of_device_get_match_data(dev);
 		(enum imx6_pcie_variants)of_device_get_match_data(dev);
 
 
-	/* Added for PCI abort handling */
-	hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
-		"imprecise external abort");
-
 	dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
 	pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
 	if (IS_ERR(pci->dbi_base))
 	if (IS_ERR(pci->dbi_base))
@@ -632,6 +690,8 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
 			dev_err(dev, "unable to get reset gpio\n");
 			dev_err(dev, "unable to get reset gpio\n");
 			return ret;
 			return ret;
 		}
 		}
+	} else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) {
+		return imx6_pcie->reset_gpio;
 	}
 	}
 
 
 	/* Fetch clocks */
 	/* Fetch clocks */
@@ -653,13 +713,31 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
 		return PTR_ERR(imx6_pcie->pcie);
 		return PTR_ERR(imx6_pcie->pcie);
 	}
 	}
 
 
-	if (imx6_pcie->variant == IMX6SX) {
+	switch (imx6_pcie->variant) {
+	case IMX6SX:
 		imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
 		imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
 							   "pcie_inbound_axi");
 							   "pcie_inbound_axi");
 		if (IS_ERR(imx6_pcie->pcie_inbound_axi)) {
 		if (IS_ERR(imx6_pcie->pcie_inbound_axi)) {
 			dev_err(dev, "pcie_inbound_axi clock missing or invalid\n");
 			dev_err(dev, "pcie_inbound_axi clock missing or invalid\n");
 			return PTR_ERR(imx6_pcie->pcie_inbound_axi);
 			return PTR_ERR(imx6_pcie->pcie_inbound_axi);
 		}
 		}
+		break;
+	case IMX7D:
+		imx6_pcie->pciephy_reset = devm_reset_control_get(dev,
+								  "pciephy");
+		if (IS_ERR(imx6_pcie->pciephy_reset)) {
+			dev_err(dev, "Failed to get PCIEPHY reset control\n");
+			return PTR_ERR(imx6_pcie->pciephy_reset);
+		}
+
+		imx6_pcie->apps_reset = devm_reset_control_get(dev, "apps");
+		if (IS_ERR(imx6_pcie->apps_reset)) {
+			dev_err(dev, "Failed to get PCIE APPS reset control\n");
+			return PTR_ERR(imx6_pcie->apps_reset);
+		}
+		break;
+	default:
+		break;
 	}
 	}
 
 
 	/* Grab GPR config register range */
 	/* Grab GPR config register range */
@@ -718,6 +796,7 @@ static const struct of_device_id imx6_pcie_of_match[] = {
 	{ .compatible = "fsl,imx6q-pcie",  .data = (void *)IMX6Q,  },
 	{ .compatible = "fsl,imx6q-pcie",  .data = (void *)IMX6Q,  },
 	{ .compatible = "fsl,imx6sx-pcie", .data = (void *)IMX6SX, },
 	{ .compatible = "fsl,imx6sx-pcie", .data = (void *)IMX6SX, },
 	{ .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, },
 	{ .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, },
+	{ .compatible = "fsl,imx7d-pcie",  .data = (void *)IMX7D,  },
 	{},
 	{},
 };
 };
 
 
@@ -725,12 +804,24 @@ static struct platform_driver imx6_pcie_driver = {
 	.driver = {
 	.driver = {
 		.name	= "imx6q-pcie",
 		.name	= "imx6q-pcie",
 		.of_match_table = imx6_pcie_of_match,
 		.of_match_table = imx6_pcie_of_match,
+		.suppress_bind_attrs = true,
 	},
 	},
+	.probe    = imx6_pcie_probe,
 	.shutdown = imx6_pcie_shutdown,
 	.shutdown = imx6_pcie_shutdown,
 };
 };
 
 
 static int __init imx6_pcie_init(void)
 static int __init imx6_pcie_init(void)
 {
 {
-	return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe);
+	/*
+	 * Since probe() can be deferred we need to make sure that
+	 * hook_fault_code is not called after __init memory is freed
+	 * by kernel and since imx6q_pcie_abort_handler() is a no-op,
+	 * we can install the handler here without risking it
+	 * accessing some uninitialized driver state.
+	 */
+	hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
+			"imprecise external abort");
+
+	return platform_driver_register(&imx6_pcie_driver);
 }
 }
 device_initcall(imx6_pcie_init);
 device_initcall(imx6_pcie_init);

+ 1 - 1
drivers/pci/dwc/pci-keystone-dw.c

@@ -543,7 +543,7 @@ int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
 
 
 	/* Index 0 is the config reg. space address */
 	/* Index 0 is the config reg. space address */
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	pci->dbi_base = devm_ioremap_resource(dev, res);
+	pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
 	if (IS_ERR(pci->dbi_base))
 	if (IS_ERR(pci->dbi_base))
 		return PTR_ERR(pci->dbi_base);
 		return PTR_ERR(pci->dbi_base);
 
 

+ 2 - 1
drivers/pci/dwc/pci-layerscape.c

@@ -283,7 +283,7 @@ static int __init ls_pcie_probe(struct platform_device *pdev)
 	pcie->pci = pci;
 	pcie->pci = pci;
 
 
 	dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
 	dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
-	pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
+	pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
 	if (IS_ERR(pci->dbi_base))
 	if (IS_ERR(pci->dbi_base))
 		return PTR_ERR(pci->dbi_base);
 		return PTR_ERR(pci->dbi_base);
 
 
@@ -305,6 +305,7 @@ static struct platform_driver ls_pcie_driver = {
 	.driver = {
 	.driver = {
 		.name = "layerscape-pcie",
 		.name = "layerscape-pcie",
 		.of_match_table = ls_pcie_of_match,
 		.of_match_table = ls_pcie_of_match,
+		.suppress_bind_attrs = true,
 	},
 	},
 };
 };
 builtin_platform_driver_probe(ls_pcie_driver, ls_pcie_probe);
 builtin_platform_driver_probe(ls_pcie_driver, ls_pcie_probe);

+ 2 - 1
drivers/pci/dwc/pcie-armada8k.c

@@ -230,7 +230,7 @@ static int armada8k_pcie_probe(struct platform_device *pdev)
 
 
 	/* Get the dw-pcie unit configuration/control registers base. */
 	/* Get the dw-pcie unit configuration/control registers base. */
 	base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
 	base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
-	pci->dbi_base = devm_ioremap_resource(dev, base);
+	pci->dbi_base = devm_pci_remap_cfg_resource(dev, base);
 	if (IS_ERR(pci->dbi_base)) {
 	if (IS_ERR(pci->dbi_base)) {
 		dev_err(dev, "couldn't remap regs base %p\n", base);
 		dev_err(dev, "couldn't remap regs base %p\n", base);
 		ret = PTR_ERR(pci->dbi_base);
 		ret = PTR_ERR(pci->dbi_base);
@@ -262,6 +262,7 @@ static struct platform_driver armada8k_pcie_driver = {
 	.driver = {
 	.driver = {
 		.name	= "armada8k-pcie",
 		.name	= "armada8k-pcie",
 		.of_match_table = of_match_ptr(armada8k_pcie_of_match),
 		.of_match_table = of_match_ptr(armada8k_pcie_of_match),
+		.suppress_bind_attrs = true,
 	},
 	},
 };
 };
 builtin_platform_driver(armada8k_pcie_driver);
 builtin_platform_driver(armada8k_pcie_driver);

+ 7 - 5
drivers/pci/dwc/pcie-artpec6.c

@@ -78,6 +78,11 @@ static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u
 	regmap_write(artpec6_pcie->regmap, offset, val);
 	regmap_write(artpec6_pcie->regmap, offset, val);
 }
 }
 
 
+static u64 artpec6_pcie_cpu_addr_fixup(u64 pci_addr)
+{
+	return pci_addr & ARTPEC6_CPU_TO_BUS_ADDR;
+}
+
 static int artpec6_pcie_establish_link(struct artpec6_pcie *artpec6_pcie)
 static int artpec6_pcie_establish_link(struct artpec6_pcie *artpec6_pcie)
 {
 {
 	struct dw_pcie *pci = artpec6_pcie->pci;
 	struct dw_pcie *pci = artpec6_pcie->pci;
@@ -142,11 +147,6 @@ static int artpec6_pcie_establish_link(struct artpec6_pcie *artpec6_pcie)
 	 */
 	 */
 	dw_pcie_writel_dbi(pci, MISC_CONTROL_1_OFF, DBI_RO_WR_EN);
 	dw_pcie_writel_dbi(pci, MISC_CONTROL_1_OFF, DBI_RO_WR_EN);
 
 
-	pp->io_base &= ARTPEC6_CPU_TO_BUS_ADDR;
-	pp->mem_base &= ARTPEC6_CPU_TO_BUS_ADDR;
-	pp->cfg0_base &= ARTPEC6_CPU_TO_BUS_ADDR;
-	pp->cfg1_base &= ARTPEC6_CPU_TO_BUS_ADDR;
-
 	/* setup root complex */
 	/* setup root complex */
 	dw_pcie_setup_rc(pp);
 	dw_pcie_setup_rc(pp);
 
 
@@ -235,6 +235,7 @@ static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
 }
 }
 
 
 static const struct dw_pcie_ops dw_pcie_ops = {
 static const struct dw_pcie_ops dw_pcie_ops = {
+	.cpu_addr_fixup = artpec6_pcie_cpu_addr_fixup,
 };
 };
 
 
 static int artpec6_pcie_probe(struct platform_device *pdev)
 static int artpec6_pcie_probe(struct platform_device *pdev)
@@ -294,6 +295,7 @@ static struct platform_driver artpec6_pcie_driver = {
 	.driver = {
 	.driver = {
 		.name	= "artpec6-pcie",
 		.name	= "artpec6-pcie",
 		.of_match_table = artpec6_pcie_of_match,
 		.of_match_table = artpec6_pcie_of_match,
+		.suppress_bind_attrs = true,
 	},
 	},
 };
 };
 builtin_platform_driver(artpec6_pcie_driver);
 builtin_platform_driver(artpec6_pcie_driver);

+ 342 - 0
drivers/pci/dwc/pcie-designware-ep.c

@@ -0,0 +1,342 @@
+/**
+ * Synopsys Designware PCIe Endpoint controller driver
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/of.h>
+
+#include "pcie-designware.h"
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+
+void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
+{
+	struct pci_epc *epc = ep->epc;
+
+	pci_epc_linkup(epc);
+}
+
+static void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
+{
+	u32 reg;
+
+	reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+	dw_pcie_writel_dbi2(pci, reg, 0x0);
+	dw_pcie_writel_dbi(pci, reg, 0x0);
+}
+
+static int dw_pcie_ep_write_header(struct pci_epc *epc,
+				   struct pci_epf_header *hdr)
+{
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+	dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, hdr->vendorid);
+	dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, hdr->deviceid);
+	dw_pcie_writeb_dbi(pci, PCI_REVISION_ID, hdr->revid);
+	dw_pcie_writeb_dbi(pci, PCI_CLASS_PROG, hdr->progif_code);
+	dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE,
+			   hdr->subclass_code | hdr->baseclass_code << 8);
+	dw_pcie_writeb_dbi(pci, PCI_CACHE_LINE_SIZE,
+			   hdr->cache_line_size);
+	dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_VENDOR_ID,
+			   hdr->subsys_vendor_id);
+	dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_ID, hdr->subsys_id);
+	dw_pcie_writeb_dbi(pci, PCI_INTERRUPT_PIN,
+			   hdr->interrupt_pin);
+
+	return 0;
+}
+
+static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar,
+				  dma_addr_t cpu_addr,
+				  enum dw_pcie_as_type as_type)
+{
+	int ret;
+	u32 free_win;
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+	free_win = find_first_zero_bit(&ep->ib_window_map,
+				       sizeof(ep->ib_window_map));
+	if (free_win >= ep->num_ib_windows) {
+		dev_err(pci->dev, "no free inbound window\n");
+		return -EINVAL;
+	}
+
+	ret = dw_pcie_prog_inbound_atu(pci, free_win, bar, cpu_addr,
+				       as_type);
+	if (ret < 0) {
+		dev_err(pci->dev, "Failed to program IB window\n");
+		return ret;
+	}
+
+	ep->bar_to_atu[bar] = free_win;
+	set_bit(free_win, &ep->ib_window_map);
+
+	return 0;
+}
+
+static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, phys_addr_t phys_addr,
+				   u64 pci_addr, size_t size)
+{
+	u32 free_win;
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+	free_win = find_first_zero_bit(&ep->ob_window_map,
+				       sizeof(ep->ob_window_map));
+	if (free_win >= ep->num_ob_windows) {
+		dev_err(pci->dev, "no free outbound window\n");
+		return -EINVAL;
+	}
+
+	dw_pcie_prog_outbound_atu(pci, free_win, PCIE_ATU_TYPE_MEM,
+				  phys_addr, pci_addr, size);
+
+	set_bit(free_win, &ep->ob_window_map);
+	ep->outbound_addr[free_win] = phys_addr;
+
+	return 0;
+}
+
+static void dw_pcie_ep_clear_bar(struct pci_epc *epc, enum pci_barno bar)
+{
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	u32 atu_index = ep->bar_to_atu[bar];
+
+	dw_pcie_ep_reset_bar(pci, bar);
+
+	dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND);
+	clear_bit(atu_index, &ep->ib_window_map);
+}
+
+static int dw_pcie_ep_set_bar(struct pci_epc *epc, enum pci_barno bar,
+			      dma_addr_t bar_phys, size_t size, int flags)
+{
+	int ret;
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	enum dw_pcie_as_type as_type;
+	u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+
+	if (!(flags & PCI_BASE_ADDRESS_SPACE))
+		as_type = DW_PCIE_AS_MEM;
+	else
+		as_type = DW_PCIE_AS_IO;
+
+	ret = dw_pcie_ep_inbound_atu(ep, bar, bar_phys, as_type);
+	if (ret)
+		return ret;
+
+	dw_pcie_writel_dbi2(pci, reg, size - 1);
+	dw_pcie_writel_dbi(pci, reg, flags);
+
+	return 0;
+}
+
+static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
+			      u32 *atu_index)
+{
+	u32 index;
+
+	for (index = 0; index < ep->num_ob_windows; index++) {
+		if (ep->outbound_addr[index] != addr)
+			continue;
+		*atu_index = index;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, phys_addr_t addr)
+{
+	int ret;
+	u32 atu_index;
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+	ret = dw_pcie_find_index(ep, addr, &atu_index);
+	if (ret < 0)
+		return;
+
+	dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_OUTBOUND);
+	clear_bit(atu_index, &ep->ob_window_map);
+}
+
+static int dw_pcie_ep_map_addr(struct pci_epc *epc, phys_addr_t addr,
+			       u64 pci_addr, size_t size)
+{
+	int ret;
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+	ret = dw_pcie_ep_outbound_atu(ep, addr, pci_addr, size);
+	if (ret) {
+		dev_err(pci->dev, "failed to enable address\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int dw_pcie_ep_get_msi(struct pci_epc *epc)
+{
+	int val;
+	u32 lower_addr;
+	u32 upper_addr;
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+	val = dw_pcie_readb_dbi(pci, MSI_MESSAGE_CONTROL);
+	val = (val & MSI_CAP_MME_MASK) >> MSI_CAP_MME_SHIFT;
+
+	lower_addr = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_L32);
+	upper_addr = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_U32);
+
+	if (!(lower_addr || upper_addr))
+		return -EINVAL;
+
+	return val;
+}
+
+static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 encode_int)
+{
+	int val;
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+	val = (encode_int << MSI_CAP_MMC_SHIFT);
+	dw_pcie_writew_dbi(pci, MSI_MESSAGE_CONTROL, val);
+
+	return 0;
+}
+
+static int dw_pcie_ep_raise_irq(struct pci_epc *epc,
+				enum pci_epc_irq_type type, u8 interrupt_num)
+{
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+
+	if (!ep->ops->raise_irq)
+		return -EINVAL;
+
+	return ep->ops->raise_irq(ep, type, interrupt_num);
+}
+
+static void dw_pcie_ep_stop(struct pci_epc *epc)
+{
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+	if (!pci->ops->stop_link)
+		return;
+
+	pci->ops->stop_link(pci);
+}
+
+static int dw_pcie_ep_start(struct pci_epc *epc)
+{
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+	if (!pci->ops->start_link)
+		return -EINVAL;
+
+	return pci->ops->start_link(pci);
+}
+
+static const struct pci_epc_ops epc_ops = {
+	.write_header		= dw_pcie_ep_write_header,
+	.set_bar		= dw_pcie_ep_set_bar,
+	.clear_bar		= dw_pcie_ep_clear_bar,
+	.map_addr		= dw_pcie_ep_map_addr,
+	.unmap_addr		= dw_pcie_ep_unmap_addr,
+	.set_msi		= dw_pcie_ep_set_msi,
+	.get_msi		= dw_pcie_ep_get_msi,
+	.raise_irq		= dw_pcie_ep_raise_irq,
+	.start			= dw_pcie_ep_start,
+	.stop			= dw_pcie_ep_stop,
+};
+
+void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
+{
+	struct pci_epc *epc = ep->epc;
+
+	pci_epc_mem_exit(epc);
+}
+
+int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+	int ret;
+	void *addr;
+	enum pci_barno bar;
+	struct pci_epc *epc;
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	struct device *dev = pci->dev;
+	struct device_node *np = dev->of_node;
+
+	if (!pci->dbi_base || !pci->dbi_base2) {
+		dev_err(dev, "dbi_base/deb_base2 is not populated\n");
+		return -EINVAL;
+	}
+
+	ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows);
+	if (ret < 0) {
+		dev_err(dev, "unable to read *num-ib-windows* property\n");
+		return ret;
+	}
+
+	ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows);
+	if (ret < 0) {
+		dev_err(dev, "unable to read *num-ob-windows* property\n");
+		return ret;
+	}
+
+	addr = devm_kzalloc(dev, sizeof(phys_addr_t) * ep->num_ob_windows,
+			    GFP_KERNEL);
+	if (!addr)
+		return -ENOMEM;
+	ep->outbound_addr = addr;
+
+	for (bar = BAR_0; bar <= BAR_5; bar++)
+		dw_pcie_ep_reset_bar(pci, bar);
+
+	if (ep->ops->ep_init)
+		ep->ops->ep_init(ep);
+
+	epc = devm_pci_epc_create(dev, &epc_ops);
+	if (IS_ERR(epc)) {
+		dev_err(dev, "failed to create epc device\n");
+		return PTR_ERR(epc);
+	}
+
+	ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
+	if (ret < 0)
+		epc->max_functions = 1;
+
+	ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size);
+	if (ret < 0) {
+		dev_err(dev, "Failed to initialize address space\n");
+		return ret;
+	}
+
+	ep->epc = epc;
+	epc_set_drvdata(epc, ep);
+	dw_pcie_setup(pci);
+
+	return 0;
+}

+ 21 - 18
drivers/pci/dwc/pcie-designware-host.c

@@ -56,24 +56,25 @@ static struct irq_chip dw_msi_irq_chip = {
 /* MSI int handler */
 /* MSI int handler */
 irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
 irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
 {
 {
-	unsigned long val;
+	u32 val;
 	int i, pos, irq;
 	int i, pos, irq;
 	irqreturn_t ret = IRQ_NONE;
 	irqreturn_t ret = IRQ_NONE;
 
 
 	for (i = 0; i < MAX_MSI_CTRLS; i++) {
 	for (i = 0; i < MAX_MSI_CTRLS; i++) {
 		dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
 		dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
-				    (u32 *)&val);
-		if (val) {
-			ret = IRQ_HANDLED;
-			pos = 0;
-			while ((pos = find_next_bit(&val, 32, pos)) != 32) {
-				irq = irq_find_mapping(pp->irq_domain,
-						       i * 32 + pos);
-				dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS +
-						    i * 12, 4, 1 << pos);
-				generic_handle_irq(irq);
-				pos++;
-			}
+				    &val);
+		if (!val)
+			continue;
+
+		ret = IRQ_HANDLED;
+		pos = 0;
+		while ((pos = find_next_bit((unsigned long *) &val, 32,
+					    pos)) != 32) {
+			irq = irq_find_mapping(pp->irq_domain, i * 32 + pos);
+			dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12,
+					    4, 1 << pos);
+			generic_handle_irq(irq);
+			pos++;
 		}
 		}
 	}
 	}
 
 
@@ -338,8 +339,9 @@ int dw_pcie_host_init(struct pcie_port *pp)
 	}
 	}
 
 
 	if (!pci->dbi_base) {
 	if (!pci->dbi_base) {
-		pci->dbi_base = devm_ioremap(dev, pp->cfg->start,
-					resource_size(pp->cfg));
+		pci->dbi_base = devm_pci_remap_cfgspace(dev,
+						pp->cfg->start,
+						resource_size(pp->cfg));
 		if (!pci->dbi_base) {
 		if (!pci->dbi_base) {
 			dev_err(dev, "error with ioremap\n");
 			dev_err(dev, "error with ioremap\n");
 			ret = -ENOMEM;
 			ret = -ENOMEM;
@@ -350,8 +352,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
 	pp->mem_base = pp->mem->start;
 	pp->mem_base = pp->mem->start;
 
 
 	if (!pp->va_cfg0_base) {
 	if (!pp->va_cfg0_base) {
-		pp->va_cfg0_base = devm_ioremap(dev, pp->cfg0_base,
-						pp->cfg0_size);
+		pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,
+					pp->cfg0_base, pp->cfg0_size);
 		if (!pp->va_cfg0_base) {
 		if (!pp->va_cfg0_base) {
 			dev_err(dev, "error with ioremap in function\n");
 			dev_err(dev, "error with ioremap in function\n");
 			ret = -ENOMEM;
 			ret = -ENOMEM;
@@ -360,7 +362,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
 	}
 	}
 
 
 	if (!pp->va_cfg1_base) {
 	if (!pp->va_cfg1_base) {
-		pp->va_cfg1_base = devm_ioremap(dev, pp->cfg1_base,
+		pp->va_cfg1_base = devm_pci_remap_cfgspace(dev,
+						pp->cfg1_base,
 						pp->cfg1_size);
 						pp->cfg1_size);
 		if (!pp->va_cfg1_base) {
 		if (!pp->va_cfg1_base) {
 			dev_err(dev, "error with ioremap\n");
 			dev_err(dev, "error with ioremap\n");

+ 1 - 0
drivers/pci/dwc/pcie-designware-plat.c

@@ -133,6 +133,7 @@ static struct platform_driver dw_plat_pcie_driver = {
 	.driver = {
 	.driver = {
 		.name	= "dw-pcie",
 		.name	= "dw-pcie",
 		.of_match_table = dw_plat_pcie_of_match,
 		.of_match_table = dw_plat_pcie_of_match,
+		.suppress_bind_attrs = true,
 	},
 	},
 	.probe = dw_plat_pcie_probe,
 	.probe = dw_plat_pcie_probe,
 };
 };

+ 210 - 48
drivers/pci/dwc/pcie-designware.c

@@ -61,91 +61,253 @@ int dw_pcie_write(void __iomem *addr, int size, u32 val)
 	return PCIBIOS_SUCCESSFUL;
 	return PCIBIOS_SUCCESSFUL;
 }
 }
 
 
-u32 dw_pcie_readl_dbi(struct dw_pcie *pci, u32 reg)
+u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
+		       size_t size)
 {
 {
-	if (pci->ops->readl_dbi)
-		return pci->ops->readl_dbi(pci, reg);
+	int ret;
+	u32 val;
 
 
-	return readl(pci->dbi_base + reg);
+	if (pci->ops->read_dbi)
+		return pci->ops->read_dbi(pci, base, reg, size);
+
+	ret = dw_pcie_read(base + reg, size, &val);
+	if (ret)
+		dev_err(pci->dev, "read DBI address failed\n");
+
+	return val;
 }
 }
 
 
-void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
+void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
+			 size_t size, u32 val)
 {
 {
-	if (pci->ops->writel_dbi)
-		pci->ops->writel_dbi(pci, reg, val);
-	else
-		writel(val, pci->dbi_base + reg);
+	int ret;
+
+	if (pci->ops->write_dbi) {
+		pci->ops->write_dbi(pci, base, reg, size, val);
+		return;
+	}
+
+	ret = dw_pcie_write(base + reg, size, val);
+	if (ret)
+		dev_err(pci->dev, "write DBI address failed\n");
 }
 }
 
 
-static u32 dw_pcie_readl_unroll(struct dw_pcie *pci, u32 index, u32 reg)
+static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
 {
 {
 	u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
 	u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
 
 
 	return dw_pcie_readl_dbi(pci, offset + reg);
 	return dw_pcie_readl_dbi(pci, offset + reg);
 }
 }
 
 
-static void dw_pcie_writel_unroll(struct dw_pcie *pci, u32 index, u32 reg,
-				  u32 val)
+static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
+				     u32 val)
 {
 {
 	u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
 	u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
 
 
 	dw_pcie_writel_dbi(pci, offset + reg, val);
 	dw_pcie_writel_dbi(pci, offset + reg, val);
 }
 }
 
 
+void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index, int type,
+				      u64 cpu_addr, u64 pci_addr, u32 size)
+{
+	u32 retries, val;
+
+	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
+				 lower_32_bits(cpu_addr));
+	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
+				 upper_32_bits(cpu_addr));
+	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LIMIT,
+				 lower_32_bits(cpu_addr + size - 1));
+	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
+				 lower_32_bits(pci_addr));
+	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
+				 upper_32_bits(pci_addr));
+	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
+				 type);
+	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
+				 PCIE_ATU_ENABLE);
+
+	/*
+	 * Make sure ATU enable takes effect before any subsequent config
+	 * and I/O accesses.
+	 */
+	for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
+		val = dw_pcie_readl_ob_unroll(pci, index,
+					      PCIE_ATU_UNR_REGION_CTRL2);
+		if (val & PCIE_ATU_ENABLE)
+			return;
+
+		usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
+	}
+	dev_err(pci->dev, "outbound iATU is not being enabled\n");
+}
+
 void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
 void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
 			       u64 cpu_addr, u64 pci_addr, u32 size)
 			       u64 cpu_addr, u64 pci_addr, u32 size)
 {
 {
 	u32 retries, val;
 	u32 retries, val;
 
 
+	if (pci->ops->cpu_addr_fixup)
+		cpu_addr = pci->ops->cpu_addr_fixup(cpu_addr);
+
 	if (pci->iatu_unroll_enabled) {
 	if (pci->iatu_unroll_enabled) {
-		dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
-				      lower_32_bits(cpu_addr));
-		dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
-				      upper_32_bits(cpu_addr));
-		dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_LIMIT,
-				      lower_32_bits(cpu_addr + size - 1));
-		dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
-				      lower_32_bits(pci_addr));
-		dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
-				      upper_32_bits(pci_addr));
-		dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
-				      type);
-		dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
-				      PCIE_ATU_ENABLE);
-	} else {
-		dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT,
-				   PCIE_ATU_REGION_OUTBOUND | index);
-		dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE,
-				   lower_32_bits(cpu_addr));
-		dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE,
-				   upper_32_bits(cpu_addr));
-		dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
-				   lower_32_bits(cpu_addr + size - 1));
-		dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
-				   lower_32_bits(pci_addr));
-		dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
-				   upper_32_bits(pci_addr));
-		dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
-		dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
+		dw_pcie_prog_outbound_atu_unroll(pci, index, type, cpu_addr,
+						 pci_addr, size);
+		return;
 	}
 	}
 
 
+	dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT,
+			   PCIE_ATU_REGION_OUTBOUND | index);
+	dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE,
+			   lower_32_bits(cpu_addr));
+	dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE,
+			   upper_32_bits(cpu_addr));
+	dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
+			   lower_32_bits(cpu_addr + size - 1));
+	dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
+			   lower_32_bits(pci_addr));
+	dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
+			   upper_32_bits(pci_addr));
+	dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
+	dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
+
 	/*
 	/*
 	 * Make sure ATU enable takes effect before any subsequent config
 	 * Make sure ATU enable takes effect before any subsequent config
 	 * and I/O accesses.
 	 * and I/O accesses.
 	 */
 	 */
 	for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
 	for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
-		if (pci->iatu_unroll_enabled)
-			val = dw_pcie_readl_unroll(pci, index,
-						   PCIE_ATU_UNR_REGION_CTRL2);
-		else
-			val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
-
+		val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
 		if (val == PCIE_ATU_ENABLE)
 		if (val == PCIE_ATU_ENABLE)
 			return;
 			return;
 
 
 		usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
 		usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
 	}
 	}
-	dev_err(pci->dev, "iATU is not being enabled\n");
+	dev_err(pci->dev, "outbound iATU is not being enabled\n");
+}
+
+static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
+{
+	u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
+
+	return dw_pcie_readl_dbi(pci, offset + reg);
+}
+
+static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
+				     u32 val)
+{
+	u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
+
+	dw_pcie_writel_dbi(pci, offset + reg, val);
+}
+
+int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index, int bar,
+				    u64 cpu_addr, enum dw_pcie_as_type as_type)
+{
+	int type;
+	u32 retries, val;
+
+	dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
+				 lower_32_bits(cpu_addr));
+	dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
+				 upper_32_bits(cpu_addr));
+
+	switch (as_type) {
+	case DW_PCIE_AS_MEM:
+		type = PCIE_ATU_TYPE_MEM;
+		break;
+	case DW_PCIE_AS_IO:
+		type = PCIE_ATU_TYPE_IO;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type);
+	dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
+				 PCIE_ATU_ENABLE |
+				 PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
+
+	/*
+	 * Make sure ATU enable takes effect before any subsequent config
+	 * and I/O accesses.
+	 */
+	for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
+		val = dw_pcie_readl_ib_unroll(pci, index,
+					      PCIE_ATU_UNR_REGION_CTRL2);
+		if (val & PCIE_ATU_ENABLE)
+			return 0;
+
+		usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
+	}
+	dev_err(pci->dev, "inbound iATU is not being enabled\n");
+
+	return -EBUSY;
+}
+
+int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
+			     u64 cpu_addr, enum dw_pcie_as_type as_type)
+{
+	int type;
+	u32 retries, val;
+
+	if (pci->iatu_unroll_enabled)
+		return dw_pcie_prog_inbound_atu_unroll(pci, index, bar,
+						       cpu_addr, as_type);
+
+	dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND |
+			   index);
+	dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr));
+	dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr));
+
+	switch (as_type) {
+	case DW_PCIE_AS_MEM:
+		type = PCIE_ATU_TYPE_MEM;
+		break;
+	case DW_PCIE_AS_IO:
+		type = PCIE_ATU_TYPE_IO;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
+	dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE
+			   | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
+
+	/*
+	 * Make sure ATU enable takes effect before any subsequent config
+	 * and I/O accesses.
+	 */
+	for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
+		val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
+		if (val & PCIE_ATU_ENABLE)
+			return 0;
+
+		usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
+	}
+	dev_err(pci->dev, "inbound iATU is not being enabled\n");
+
+	return -EBUSY;
+}
+
+void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
+			 enum dw_pcie_region_type type)
+{
+	int region;
+
+	switch (type) {
+	case DW_PCIE_REGION_INBOUND:
+		region = PCIE_ATU_REGION_INBOUND;
+		break;
+	case DW_PCIE_REGION_OUTBOUND:
+		region = PCIE_ATU_REGION_OUTBOUND;
+		break;
+	default:
+		return;
+	}
+
+	dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index);
+	dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~PCIE_ATU_ENABLE);
 }
 }
 
 
 int dw_pcie_wait_for_link(struct dw_pcie *pci)
 int dw_pcie_wait_for_link(struct dw_pcie *pci)

+ 131 - 4
drivers/pci/dwc/pcie-designware.h

@@ -18,6 +18,9 @@
 #include <linux/msi.h>
 #include <linux/msi.h>
 #include <linux/pci.h>
 #include <linux/pci.h>
 
 
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+
 /* Parameters for the waiting for link up routine */
 /* Parameters for the waiting for link up routine */
 #define LINK_WAIT_MAX_RETRIES		10
 #define LINK_WAIT_MAX_RETRIES		10
 #define LINK_WAIT_USLEEP_MIN		90000
 #define LINK_WAIT_USLEEP_MIN		90000
@@ -89,6 +92,16 @@
 #define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region)	\
 #define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region)	\
 			((0x3 << 20) | ((region) << 9))
 			((0x3 << 20) | ((region) << 9))
 
 
+#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region)				\
+			((0x3 << 20) | ((region) << 9) | (0x1 << 8))
+
+#define MSI_MESSAGE_CONTROL		0x52
+#define MSI_CAP_MMC_SHIFT		1
+#define MSI_CAP_MME_SHIFT		4
+#define MSI_CAP_MME_MASK		(7 << MSI_CAP_MME_SHIFT)
+#define MSI_MESSAGE_ADDR_L32		0x54
+#define MSI_MESSAGE_ADDR_U32		0x58
+
 /*
 /*
  * Maximum number of MSI IRQs can be 256 per controller. But keep
  * Maximum number of MSI IRQs can be 256 per controller. But keep
  * it 32 as of now. Probably we will never need more than 32. If needed,
  * it 32 as of now. Probably we will never need more than 32. If needed,
@@ -99,6 +112,20 @@
 
 
 struct pcie_port;
 struct pcie_port;
 struct dw_pcie;
 struct dw_pcie;
+struct dw_pcie_ep;
+
+enum dw_pcie_region_type {
+	DW_PCIE_REGION_UNKNOWN,
+	DW_PCIE_REGION_INBOUND,
+	DW_PCIE_REGION_OUTBOUND,
+};
+
+enum dw_pcie_device_mode {
+	DW_PCIE_UNKNOWN_TYPE,
+	DW_PCIE_EP_TYPE,
+	DW_PCIE_LEG_EP_TYPE,
+	DW_PCIE_RC_TYPE,
+};
 
 
 struct dw_pcie_host_ops {
 struct dw_pcie_host_ops {
 	int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val);
 	int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val);
@@ -142,35 +169,116 @@ struct pcie_port {
 	DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
 	DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
 };
 };
 
 
+enum dw_pcie_as_type {
+	DW_PCIE_AS_UNKNOWN,
+	DW_PCIE_AS_MEM,
+	DW_PCIE_AS_IO,
+};
+
+struct dw_pcie_ep_ops {
+	void	(*ep_init)(struct dw_pcie_ep *ep);
+	int	(*raise_irq)(struct dw_pcie_ep *ep, enum pci_epc_irq_type type,
+			     u8 interrupt_num);
+};
+
+struct dw_pcie_ep {
+	struct pci_epc		*epc;
+	struct dw_pcie_ep_ops	*ops;
+	phys_addr_t		phys_base;
+	size_t			addr_size;
+	u8			bar_to_atu[6];
+	phys_addr_t		*outbound_addr;
+	unsigned long		ib_window_map;
+	unsigned long		ob_window_map;
+	u32			num_ib_windows;
+	u32			num_ob_windows;
+};
+
 struct dw_pcie_ops {
 struct dw_pcie_ops {
-	u32	(*readl_dbi)(struct dw_pcie *pcie, u32 reg);
-	void	(*writel_dbi)(struct dw_pcie *pcie, u32 reg, u32 val);
+	u64	(*cpu_addr_fixup)(u64 cpu_addr);
+	u32	(*read_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
+			    size_t size);
+	void	(*write_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
+			     size_t size, u32 val);
 	int	(*link_up)(struct dw_pcie *pcie);
 	int	(*link_up)(struct dw_pcie *pcie);
+	int	(*start_link)(struct dw_pcie *pcie);
+	void	(*stop_link)(struct dw_pcie *pcie);
 };
 };
 
 
 struct dw_pcie {
 struct dw_pcie {
 	struct device		*dev;
 	struct device		*dev;
 	void __iomem		*dbi_base;
 	void __iomem		*dbi_base;
+	void __iomem		*dbi_base2;
 	u32			num_viewport;
 	u32			num_viewport;
 	u8			iatu_unroll_enabled;
 	u8			iatu_unroll_enabled;
 	struct pcie_port	pp;
 	struct pcie_port	pp;
+	struct dw_pcie_ep	ep;
 	const struct dw_pcie_ops *ops;
 	const struct dw_pcie_ops *ops;
 };
 };
 
 
 #define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp)
 #define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp)
 
 
+#define to_dw_pcie_from_ep(endpoint)   \
+		container_of((endpoint), struct dw_pcie, ep)
+
 int dw_pcie_read(void __iomem *addr, int size, u32 *val);
 int dw_pcie_read(void __iomem *addr, int size, u32 *val);
 int dw_pcie_write(void __iomem *addr, int size, u32 val);
 int dw_pcie_write(void __iomem *addr, int size, u32 val);
 
 
-u32 dw_pcie_readl_dbi(struct dw_pcie *pci, u32 reg);
-void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val);
+u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
+		       size_t size);
+void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
+			 size_t size, u32 val);
 int dw_pcie_link_up(struct dw_pcie *pci);
 int dw_pcie_link_up(struct dw_pcie *pci);
 int dw_pcie_wait_for_link(struct dw_pcie *pci);
 int dw_pcie_wait_for_link(struct dw_pcie *pci);
 void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,
 void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,
 			       int type, u64 cpu_addr, u64 pci_addr,
 			       int type, u64 cpu_addr, u64 pci_addr,
 			       u32 size);
 			       u32 size);
+int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
+			     u64 cpu_addr, enum dw_pcie_as_type as_type);
+void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
+			 enum dw_pcie_region_type type);
 void dw_pcie_setup(struct dw_pcie *pci);
 void dw_pcie_setup(struct dw_pcie *pci);
 
 
+static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
+{
+	__dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x4, val);
+}
+
+static inline u32 dw_pcie_readl_dbi(struct dw_pcie *pci, u32 reg)
+{
+	return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x4);
+}
+
+static inline void dw_pcie_writew_dbi(struct dw_pcie *pci, u32 reg, u16 val)
+{
+	__dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x2, val);
+}
+
+static inline u16 dw_pcie_readw_dbi(struct dw_pcie *pci, u32 reg)
+{
+	return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x2);
+}
+
+static inline void dw_pcie_writeb_dbi(struct dw_pcie *pci, u32 reg, u8 val)
+{
+	__dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x1, val);
+}
+
+static inline u8 dw_pcie_readb_dbi(struct dw_pcie *pci, u32 reg)
+{
+	return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x1);
+}
+
+static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val)
+{
+	__dw_pcie_write_dbi(pci, pci->dbi_base2, reg, 0x4, val);
+}
+
+static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg)
+{
+	return __dw_pcie_read_dbi(pci, pci->dbi_base2, reg, 0x4);
+}
+
 #ifdef CONFIG_PCIE_DW_HOST
 #ifdef CONFIG_PCIE_DW_HOST
 irqreturn_t dw_handle_msi_irq(struct pcie_port *pp);
 irqreturn_t dw_handle_msi_irq(struct pcie_port *pp);
 void dw_pcie_msi_init(struct pcie_port *pp);
 void dw_pcie_msi_init(struct pcie_port *pp);
@@ -195,4 +303,23 @@ static inline int dw_pcie_host_init(struct pcie_port *pp)
 	return 0;
 	return 0;
 }
 }
 #endif
 #endif
+
+#ifdef CONFIG_PCIE_DW_EP
+void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
+int dw_pcie_ep_init(struct dw_pcie_ep *ep);
+void dw_pcie_ep_exit(struct dw_pcie_ep *ep);
+#else
+static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
+{
+}
+
+static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+	return 0;
+}
+
+static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
+{
+}
+#endif
 #endif /* _PCIE_DESIGNWARE_H */
 #endif /* _PCIE_DESIGNWARE_H */

+ 5 - 4
drivers/pci/dwc/pcie-hisi.c

@@ -99,7 +99,7 @@ static int hisi_pcie_init(struct pci_config_window *cfg)
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
 
 
-	reg_base = devm_ioremap(dev, res->start, resource_size(res));
+	reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res));
 	if (!reg_base)
 	if (!reg_base)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
@@ -296,10 +296,9 @@ static int hisi_pcie_probe(struct platform_device *pdev)
 	}
 	}
 
 
 	reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi");
 	reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi");
-	pci->dbi_base = devm_ioremap_resource(dev, reg);
+	pci->dbi_base = devm_pci_remap_cfg_resource(dev, reg);
 	if (IS_ERR(pci->dbi_base))
 	if (IS_ERR(pci->dbi_base))
 		return PTR_ERR(pci->dbi_base);
 		return PTR_ERR(pci->dbi_base);
-
 	platform_set_drvdata(pdev, hisi_pcie);
 	platform_set_drvdata(pdev, hisi_pcie);
 
 
 	ret = hisi_add_pcie_port(hisi_pcie, pdev);
 	ret = hisi_add_pcie_port(hisi_pcie, pdev);
@@ -334,6 +333,7 @@ static struct platform_driver hisi_pcie_driver = {
 	.driver = {
 	.driver = {
 		   .name = "hisi-pcie",
 		   .name = "hisi-pcie",
 		   .of_match_table = hisi_pcie_of_match,
 		   .of_match_table = hisi_pcie_of_match,
+		   .suppress_bind_attrs = true,
 	},
 	},
 };
 };
 builtin_platform_driver(hisi_pcie_driver);
 builtin_platform_driver(hisi_pcie_driver);
@@ -360,7 +360,7 @@ static int hisi_pcie_platform_init(struct pci_config_window *cfg)
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
-	reg_base = devm_ioremap(dev, res->start, resource_size(res));
+	reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res));
 	if (!reg_base)
 	if (!reg_base)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
@@ -395,6 +395,7 @@ static struct platform_driver hisi_pcie_almost_ecam_driver = {
 	.driver = {
 	.driver = {
 		   .name = "hisi-pcie-almost-ecam",
 		   .name = "hisi-pcie-almost-ecam",
 		   .of_match_table = hisi_pcie_almost_ecam_of_match,
 		   .of_match_table = hisi_pcie_almost_ecam_of_match,
+		   .suppress_bind_attrs = true,
 	},
 	},
 };
 };
 builtin_platform_driver(hisi_pcie_almost_ecam_driver);
 builtin_platform_driver(hisi_pcie_almost_ecam_driver);

+ 1 - 1
drivers/pci/dwc/pcie-qcom.c

@@ -700,7 +700,7 @@ static int qcom_pcie_probe(struct platform_device *pdev)
 		return PTR_ERR(pcie->parf);
 		return PTR_ERR(pcie->parf);
 
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
-	pci->dbi_base = devm_ioremap_resource(dev, res);
+	pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
 	if (IS_ERR(pci->dbi_base))
 	if (IS_ERR(pci->dbi_base))
 		return PTR_ERR(pci->dbi_base);
 		return PTR_ERR(pci->dbi_base);
 
 

+ 2 - 1
drivers/pci/dwc/pcie-spear13xx.c

@@ -273,7 +273,7 @@ static int spear13xx_pcie_probe(struct platform_device *pdev)
 	}
 	}
 
 
 	dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
 	dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
-	pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
+	pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
 	if (IS_ERR(pci->dbi_base)) {
 	if (IS_ERR(pci->dbi_base)) {
 		dev_err(dev, "couldn't remap dbi base %p\n", dbi_base);
 		dev_err(dev, "couldn't remap dbi base %p\n", dbi_base);
 		ret = PTR_ERR(pci->dbi_base);
 		ret = PTR_ERR(pci->dbi_base);
@@ -308,6 +308,7 @@ static struct platform_driver spear13xx_pcie_driver = {
 	.driver = {
 	.driver = {
 		.name	= "spear-pcie",
 		.name	= "spear-pcie",
 		.of_match_table = of_match_ptr(spear13xx_pcie_of_match),
 		.of_match_table = of_match_ptr(spear13xx_pcie_of_match),
+		.suppress_bind_attrs = true,
 	},
 	},
 };
 };
 
 

+ 4 - 2
drivers/pci/ecam.c

@@ -84,12 +84,14 @@ struct pci_config_window *pci_ecam_create(struct device *dev,
 		if (!cfg->winp)
 		if (!cfg->winp)
 			goto err_exit_malloc;
 			goto err_exit_malloc;
 		for (i = 0; i < bus_range; i++) {
 		for (i = 0; i < bus_range; i++) {
-			cfg->winp[i] = ioremap(cfgres->start + i * bsz, bsz);
+			cfg->winp[i] =
+				pci_remap_cfgspace(cfgres->start + i * bsz,
+						   bsz);
 			if (!cfg->winp[i])
 			if (!cfg->winp[i])
 				goto err_exit_iomap;
 				goto err_exit_iomap;
 		}
 		}
 	} else {
 	} else {
-		cfg->win = ioremap(cfgres->start, bus_range * bsz);
+		cfg->win = pci_remap_cfgspace(cfgres->start, bus_range * bsz);
 		if (!cfg->win)
 		if (!cfg->win)
 			goto err_exit_iomap;
 			goto err_exit_iomap;
 	}
 	}

+ 31 - 0
drivers/pci/endpoint/Kconfig

@@ -0,0 +1,31 @@
+#
+# PCI Endpoint Support
+#
+
+menu "PCI Endpoint"
+
+config PCI_ENDPOINT
+	bool "PCI Endpoint Support"
+	help
+	   Enable this configuration option to support configurable PCI
+	   endpoint. This should be enabled if the platform has a PCI
+	   controller that can operate in endpoint mode.
+
+	   Enabling this option will build the endpoint library, which
+	   includes endpoint controller library and endpoint function
+	   library.
+
+	   If in doubt, say "N" to disable Endpoint support.
+
+config PCI_ENDPOINT_CONFIGFS
+	bool "PCI Endpoint Configfs Support"
+	depends on PCI_ENDPOINT
+	select CONFIGFS_FS
+	help
+	   This will enable the configfs entry that can be used to
+	   configure the endpoint function and used to bind the
+	   function with a endpoint controller.
+
+source "drivers/pci/endpoint/functions/Kconfig"
+
+endmenu

+ 7 - 0
drivers/pci/endpoint/Makefile

@@ -0,0 +1,7 @@
+#
+# Makefile for PCI Endpoint Support
+#
+
+obj-$(CONFIG_PCI_ENDPOINT_CONFIGFS)	+= pci-ep-cfs.o
+obj-$(CONFIG_PCI_ENDPOINT)		+= pci-epc-core.o pci-epf-core.o\
+					   pci-epc-mem.o functions/

+ 12 - 0
drivers/pci/endpoint/functions/Kconfig

@@ -0,0 +1,12 @@
+#
+# PCI Endpoint Functions
+#
+
+config PCI_EPF_TEST
+	tristate "PCI Endpoint Test driver"
+	depends on PCI_ENDPOINT
+	help
+	   Enable this configuration option to enable the test driver
+	   for PCI Endpoint.
+
+	   If in doubt, say "N" to disable Endpoint test driver.

+ 5 - 0
drivers/pci/endpoint/functions/Makefile

@@ -0,0 +1,5 @@
+#
+# Makefile for PCI Endpoint Functions
+#
+
+obj-$(CONFIG_PCI_EPF_TEST)		+= pci-epf-test.o

+ 510 - 0
drivers/pci/endpoint/functions/pci-epf-test.c

@@ -0,0 +1,510 @@
+/**
+ * Test driver to test endpoint functionality
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci_ids.h>
+#include <linux/random.h>
+
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+#include <linux/pci_regs.h>
+
+#define COMMAND_RAISE_LEGACY_IRQ	BIT(0)
+#define COMMAND_RAISE_MSI_IRQ		BIT(1)
+#define MSI_NUMBER_SHIFT		2
+#define MSI_NUMBER_MASK			(0x3f << MSI_NUMBER_SHIFT)
+#define COMMAND_READ			BIT(8)
+#define COMMAND_WRITE			BIT(9)
+#define COMMAND_COPY			BIT(10)
+
+#define STATUS_READ_SUCCESS		BIT(0)
+#define STATUS_READ_FAIL		BIT(1)
+#define STATUS_WRITE_SUCCESS		BIT(2)
+#define STATUS_WRITE_FAIL		BIT(3)
+#define STATUS_COPY_SUCCESS		BIT(4)
+#define STATUS_COPY_FAIL		BIT(5)
+#define STATUS_IRQ_RAISED		BIT(6)
+#define STATUS_SRC_ADDR_INVALID		BIT(7)
+#define STATUS_DST_ADDR_INVALID		BIT(8)
+
+#define TIMER_RESOLUTION		1
+
+static struct workqueue_struct *kpcitest_workqueue;
+
+struct pci_epf_test {
+	void			*reg[6];
+	struct pci_epf		*epf;
+	struct delayed_work	cmd_handler;
+};
+
+struct pci_epf_test_reg {
+	u32	magic;
+	u32	command;
+	u32	status;
+	u64	src_addr;
+	u64	dst_addr;
+	u32	size;
+	u32	checksum;
+} __packed;
+
+static struct pci_epf_header test_header = {
+	.vendorid	= PCI_ANY_ID,
+	.deviceid	= PCI_ANY_ID,
+	.baseclass_code = PCI_CLASS_OTHERS,
+	.interrupt_pin	= PCI_INTERRUPT_INTA,
+};
+
+static int bar_size[] = { 512, 1024, 16384, 131072, 1048576 };
+
+static int pci_epf_test_copy(struct pci_epf_test *epf_test)
+{
+	int ret;
+	void __iomem *src_addr;
+	void __iomem *dst_addr;
+	phys_addr_t src_phys_addr;
+	phys_addr_t dst_phys_addr;
+	struct pci_epf *epf = epf_test->epf;
+	struct device *dev = &epf->dev;
+	struct pci_epc *epc = epf->epc;
+	struct pci_epf_test_reg *reg = epf_test->reg[0];
+
+	src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size);
+	if (!src_addr) {
+		dev_err(dev, "failed to allocate source address\n");
+		reg->status = STATUS_SRC_ADDR_INVALID;
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	ret = pci_epc_map_addr(epc, src_phys_addr, reg->src_addr, reg->size);
+	if (ret) {
+		dev_err(dev, "failed to map source address\n");
+		reg->status = STATUS_SRC_ADDR_INVALID;
+		goto err_src_addr;
+	}
+
+	dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size);
+	if (!dst_addr) {
+		dev_err(dev, "failed to allocate destination address\n");
+		reg->status = STATUS_DST_ADDR_INVALID;
+		ret = -ENOMEM;
+		goto err_src_map_addr;
+	}
+
+	ret = pci_epc_map_addr(epc, dst_phys_addr, reg->dst_addr, reg->size);
+	if (ret) {
+		dev_err(dev, "failed to map destination address\n");
+		reg->status = STATUS_DST_ADDR_INVALID;
+		goto err_dst_addr;
+	}
+
+	memcpy(dst_addr, src_addr, reg->size);
+
+	pci_epc_unmap_addr(epc, dst_phys_addr);
+
+err_dst_addr:
+	pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
+
+err_src_map_addr:
+	pci_epc_unmap_addr(epc, src_phys_addr);
+
+err_src_addr:
+	pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
+
+err:
+	return ret;
+}
+
+static int pci_epf_test_read(struct pci_epf_test *epf_test)
+{
+	int ret;
+	void __iomem *src_addr;
+	void *buf;
+	u32 crc32;
+	phys_addr_t phys_addr;
+	struct pci_epf *epf = epf_test->epf;
+	struct device *dev = &epf->dev;
+	struct pci_epc *epc = epf->epc;
+	struct pci_epf_test_reg *reg = epf_test->reg[0];
+
+	src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
+	if (!src_addr) {
+		dev_err(dev, "failed to allocate address\n");
+		reg->status = STATUS_SRC_ADDR_INVALID;
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	ret = pci_epc_map_addr(epc, phys_addr, reg->src_addr, reg->size);
+	if (ret) {
+		dev_err(dev, "failed to map address\n");
+		reg->status = STATUS_SRC_ADDR_INVALID;
+		goto err_addr;
+	}
+
+	buf = kzalloc(reg->size, GFP_KERNEL);
+	if (!buf) {
+		ret = -ENOMEM;
+		goto err_map_addr;
+	}
+
+	memcpy(buf, src_addr, reg->size);
+
+	crc32 = crc32_le(~0, buf, reg->size);
+	if (crc32 != reg->checksum)
+		ret = -EIO;
+
+	kfree(buf);
+
+err_map_addr:
+	pci_epc_unmap_addr(epc, phys_addr);
+
+err_addr:
+	pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
+
+err:
+	return ret;
+}
+
+static int pci_epf_test_write(struct pci_epf_test *epf_test)
+{
+	int ret;
+	void __iomem *dst_addr;
+	void *buf;
+	phys_addr_t phys_addr;
+	struct pci_epf *epf = epf_test->epf;
+	struct device *dev = &epf->dev;
+	struct pci_epc *epc = epf->epc;
+	struct pci_epf_test_reg *reg = epf_test->reg[0];
+
+	dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
+	if (!dst_addr) {
+		dev_err(dev, "failed to allocate address\n");
+		reg->status = STATUS_DST_ADDR_INVALID;
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	ret = pci_epc_map_addr(epc, phys_addr, reg->dst_addr, reg->size);
+	if (ret) {
+		dev_err(dev, "failed to map address\n");
+		reg->status = STATUS_DST_ADDR_INVALID;
+		goto err_addr;
+	}
+
+	buf = kzalloc(reg->size, GFP_KERNEL);
+	if (!buf) {
+		ret = -ENOMEM;
+		goto err_map_addr;
+	}
+
+	get_random_bytes(buf, reg->size);
+	reg->checksum = crc32_le(~0, buf, reg->size);
+
+	memcpy(dst_addr, buf, reg->size);
+
+	/*
+	 * wait 1ms inorder for the write to complete. Without this delay L3
+	 * error in observed in the host system.
+	 */
+	mdelay(1);
+
+	kfree(buf);
+
+err_map_addr:
+	pci_epc_unmap_addr(epc, phys_addr);
+
+err_addr:
+	pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
+
+err:
+	return ret;
+}
+
+static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test)
+{
+	u8 irq;
+	u8 msi_count;
+	struct pci_epf *epf = epf_test->epf;
+	struct pci_epc *epc = epf->epc;
+	struct pci_epf_test_reg *reg = epf_test->reg[0];
+
+	reg->status |= STATUS_IRQ_RAISED;
+	msi_count = pci_epc_get_msi(epc);
+	irq = (reg->command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT;
+	if (irq > msi_count || msi_count <= 0)
+		pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0);
+	else
+		pci_epc_raise_irq(epc, PCI_EPC_IRQ_MSI, irq);
+}
+
+static void pci_epf_test_cmd_handler(struct work_struct *work)
+{
+	int ret;
+	u8 irq;
+	u8 msi_count;
+	struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
+						     cmd_handler.work);
+	struct pci_epf *epf = epf_test->epf;
+	struct pci_epc *epc = epf->epc;
+	struct pci_epf_test_reg *reg = epf_test->reg[0];
+
+	if (!reg->command)
+		goto reset_handler;
+
+	if (reg->command & COMMAND_RAISE_LEGACY_IRQ) {
+		reg->status = STATUS_IRQ_RAISED;
+		pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0);
+		goto reset_handler;
+	}
+
+	if (reg->command & COMMAND_WRITE) {
+		ret = pci_epf_test_write(epf_test);
+		if (ret)
+			reg->status |= STATUS_WRITE_FAIL;
+		else
+			reg->status |= STATUS_WRITE_SUCCESS;
+		pci_epf_test_raise_irq(epf_test);
+		goto reset_handler;
+	}
+
+	if (reg->command & COMMAND_READ) {
+		ret = pci_epf_test_read(epf_test);
+		if (!ret)
+			reg->status |= STATUS_READ_SUCCESS;
+		else
+			reg->status |= STATUS_READ_FAIL;
+		pci_epf_test_raise_irq(epf_test);
+		goto reset_handler;
+	}
+
+	if (reg->command & COMMAND_COPY) {
+		ret = pci_epf_test_copy(epf_test);
+		if (!ret)
+			reg->status |= STATUS_COPY_SUCCESS;
+		else
+			reg->status |= STATUS_COPY_FAIL;
+		pci_epf_test_raise_irq(epf_test);
+		goto reset_handler;
+	}
+
+	if (reg->command & COMMAND_RAISE_MSI_IRQ) {
+		msi_count = pci_epc_get_msi(epc);
+		irq = (reg->command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT;
+		if (irq > msi_count || msi_count <= 0)
+			goto reset_handler;
+		reg->status = STATUS_IRQ_RAISED;
+		pci_epc_raise_irq(epc, PCI_EPC_IRQ_MSI, irq);
+		goto reset_handler;
+	}
+
+reset_handler:
+	reg->command = 0;
+
+	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
+			   msecs_to_jiffies(1));
+}
+
+static void pci_epf_test_linkup(struct pci_epf *epf)
+{
+	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+
+	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
+			   msecs_to_jiffies(1));
+}
+
+static void pci_epf_test_unbind(struct pci_epf *epf)
+{
+	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+	struct pci_epc *epc = epf->epc;
+	int bar;
+
+	cancel_delayed_work(&epf_test->cmd_handler);
+	pci_epc_stop(epc);
+	for (bar = BAR_0; bar <= BAR_5; bar++) {
+		if (epf_test->reg[bar]) {
+			pci_epf_free_space(epf, epf_test->reg[bar], bar);
+			pci_epc_clear_bar(epc, bar);
+		}
+	}
+}
+
+static int pci_epf_test_set_bar(struct pci_epf *epf)
+{
+	int flags;
+	int bar;
+	int ret;
+	struct pci_epf_bar *epf_bar;
+	struct pci_epc *epc = epf->epc;
+	struct device *dev = &epf->dev;
+	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+
+	flags = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32;
+	if (sizeof(dma_addr_t) == 0x8)
+		flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
+
+	for (bar = BAR_0; bar <= BAR_5; bar++) {
+		epf_bar = &epf->bar[bar];
+		ret = pci_epc_set_bar(epc, bar, epf_bar->phys_addr,
+				      epf_bar->size, flags);
+		if (ret) {
+			pci_epf_free_space(epf, epf_test->reg[bar], bar);
+			dev_err(dev, "failed to set BAR%d\n", bar);
+			if (bar == BAR_0)
+				return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int pci_epf_test_alloc_space(struct pci_epf *epf)
+{
+	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+	struct device *dev = &epf->dev;
+	void *base;
+	int bar;
+
+	base = pci_epf_alloc_space(epf, sizeof(struct pci_epf_test_reg),
+				   BAR_0);
+	if (!base) {
+		dev_err(dev, "failed to allocated register space\n");
+		return -ENOMEM;
+	}
+	epf_test->reg[0] = base;
+
+	for (bar = BAR_1; bar <= BAR_5; bar++) {
+		base = pci_epf_alloc_space(epf, bar_size[bar - 1], bar);
+		if (!base)
+			dev_err(dev, "failed to allocate space for BAR%d\n",
+				bar);
+		epf_test->reg[bar] = base;
+	}
+
+	return 0;
+}
+
+static int pci_epf_test_bind(struct pci_epf *epf)
+{
+	int ret;
+	struct pci_epf_header *header = epf->header;
+	struct pci_epc *epc = epf->epc;
+	struct device *dev = &epf->dev;
+
+	if (WARN_ON_ONCE(!epc))
+		return -EINVAL;
+
+	ret = pci_epc_write_header(epc, header);
+	if (ret) {
+		dev_err(dev, "configuration header write failed\n");
+		return ret;
+	}
+
+	ret = pci_epf_test_alloc_space(epf);
+	if (ret)
+		return ret;
+
+	ret = pci_epf_test_set_bar(epf);
+	if (ret)
+		return ret;
+
+	ret = pci_epc_set_msi(epc, epf->msi_interrupts);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int pci_epf_test_probe(struct pci_epf *epf)
+{
+	struct pci_epf_test *epf_test;
+	struct device *dev = &epf->dev;
+
+	epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL);
+	if (!epf_test)
+		return -ENOMEM;
+
+	epf->header = &test_header;
+	epf_test->epf = epf;
+
+	INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);
+
+	epf_set_drvdata(epf, epf_test);
+	return 0;
+}
+
+static int pci_epf_test_remove(struct pci_epf *epf)
+{
+	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+
+	kfree(epf_test);
+	return 0;
+}
+
+static struct pci_epf_ops ops = {
+	.unbind	= pci_epf_test_unbind,
+	.bind	= pci_epf_test_bind,
+	.linkup = pci_epf_test_linkup,
+};
+
+static const struct pci_epf_device_id pci_epf_test_ids[] = {
+	{
+		.name = "pci_epf_test",
+	},
+	{},
+};
+
+static struct pci_epf_driver test_driver = {
+	.driver.name	= "pci_epf_test",
+	.probe		= pci_epf_test_probe,
+	.remove		= pci_epf_test_remove,
+	.id_table	= pci_epf_test_ids,
+	.ops		= &ops,
+	.owner		= THIS_MODULE,
+};
+
+static int __init pci_epf_test_init(void)
+{
+	int ret;
+
+	kpcitest_workqueue = alloc_workqueue("kpcitest",
+					     WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+	ret = pci_epf_register_driver(&test_driver);
+	if (ret) {
+		pr_err("failed to register pci epf test driver --> %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+module_init(pci_epf_test_init);
+
+static void __exit pci_epf_test_exit(void)
+{
+	pci_epf_unregister_driver(&test_driver);
+}
+module_exit(pci_epf_test_exit);
+
+MODULE_DESCRIPTION("PCI EPF TEST DRIVER");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_LICENSE("GPL v2");

+ 509 - 0
drivers/pci/endpoint/pci-ep-cfs.c

@@ -0,0 +1,509 @@
+/**
+ * configfs to configure the PCI endpoint
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+#include <linux/pci-ep-cfs.h>
+
+static struct config_group *functions_group;
+static struct config_group *controllers_group;
+
+struct pci_epf_group {
+	struct config_group group;
+	struct pci_epf *epf;
+};
+
+struct pci_epc_group {
+	struct config_group group;
+	struct pci_epc *epc;
+	bool start;
+	unsigned long function_num_map;
+};
+
+static inline struct pci_epf_group *to_pci_epf_group(struct config_item *item)
+{
+	return container_of(to_config_group(item), struct pci_epf_group, group);
+}
+
+static inline struct pci_epc_group *to_pci_epc_group(struct config_item *item)
+{
+	return container_of(to_config_group(item), struct pci_epc_group, group);
+}
+
+static ssize_t pci_epc_start_store(struct config_item *item, const char *page,
+				   size_t len)
+{
+	int ret;
+	bool start;
+	struct pci_epc *epc;
+	struct pci_epc_group *epc_group = to_pci_epc_group(item);
+
+	epc = epc_group->epc;
+
+	ret = kstrtobool(page, &start);
+	if (ret)
+		return ret;
+
+	if (!start) {
+		pci_epc_stop(epc);
+		return len;
+	}
+
+	ret = pci_epc_start(epc);
+	if (ret) {
+		dev_err(&epc->dev, "failed to start endpoint controller\n");
+		return -EINVAL;
+	}
+
+	epc_group->start = start;
+
+	return len;
+}
+
+static ssize_t pci_epc_start_show(struct config_item *item, char *page)
+{
+	return sprintf(page, "%d\n",
+		       to_pci_epc_group(item)->start);
+}
+
+CONFIGFS_ATTR(pci_epc_, start);
+
+static struct configfs_attribute *pci_epc_attrs[] = {
+	&pci_epc_attr_start,
+	NULL,
+};
+
+static int pci_epc_epf_link(struct config_item *epc_item,
+			    struct config_item *epf_item)
+{
+	int ret;
+	u32 func_no = 0;
+	struct pci_epc *epc;
+	struct pci_epf *epf;
+	struct pci_epf_group *epf_group = to_pci_epf_group(epf_item);
+	struct pci_epc_group *epc_group = to_pci_epc_group(epc_item);
+
+	epc = epc_group->epc;
+	epf = epf_group->epf;
+	ret = pci_epc_add_epf(epc, epf);
+	if (ret)
+		goto err_add_epf;
+
+	func_no = find_first_zero_bit(&epc_group->function_num_map,
+				      sizeof(epc_group->function_num_map));
+	set_bit(func_no, &epc_group->function_num_map);
+	epf->func_no = func_no;
+
+	ret = pci_epf_bind(epf);
+	if (ret)
+		goto err_epf_bind;
+
+	return 0;
+
+err_epf_bind:
+	pci_epc_remove_epf(epc, epf);
+
+err_add_epf:
+	clear_bit(func_no, &epc_group->function_num_map);
+
+	return ret;
+}
+
+static void pci_epc_epf_unlink(struct config_item *epc_item,
+			       struct config_item *epf_item)
+{
+	struct pci_epc *epc;
+	struct pci_epf *epf;
+	struct pci_epf_group *epf_group = to_pci_epf_group(epf_item);
+	struct pci_epc_group *epc_group = to_pci_epc_group(epc_item);
+
+	WARN_ON_ONCE(epc_group->start);
+
+	epc = epc_group->epc;
+	epf = epf_group->epf;
+	clear_bit(epf->func_no, &epc_group->function_num_map);
+	pci_epf_unbind(epf);
+	pci_epc_remove_epf(epc, epf);
+}
+
+static struct configfs_item_operations pci_epc_item_ops = {
+	.allow_link	= pci_epc_epf_link,
+	.drop_link	= pci_epc_epf_unlink,
+};
+
+static struct config_item_type pci_epc_type = {
+	.ct_item_ops	= &pci_epc_item_ops,
+	.ct_attrs	= pci_epc_attrs,
+	.ct_owner	= THIS_MODULE,
+};
+
+struct config_group *pci_ep_cfs_add_epc_group(const char *name)
+{
+	int ret;
+	struct pci_epc *epc;
+	struct config_group *group;
+	struct pci_epc_group *epc_group;
+
+	epc_group = kzalloc(sizeof(*epc_group), GFP_KERNEL);
+	if (!epc_group) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	group = &epc_group->group;
+
+	config_group_init_type_name(group, name, &pci_epc_type);
+	ret = configfs_register_group(controllers_group, group);
+	if (ret) {
+		pr_err("failed to register configfs group for %s\n", name);
+		goto err_register_group;
+	}
+
+	epc = pci_epc_get(name);
+	if (IS_ERR(epc)) {
+		ret = PTR_ERR(epc);
+		goto err_epc_get;
+	}
+
+	epc_group->epc = epc;
+
+	return group;
+
+err_epc_get:
+	configfs_unregister_group(group);
+
+err_register_group:
+	kfree(epc_group);
+
+err:
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(pci_ep_cfs_add_epc_group);
+
+void pci_ep_cfs_remove_epc_group(struct config_group *group)
+{
+	struct pci_epc_group *epc_group;
+
+	if (!group)
+		return;
+
+	epc_group = container_of(group, struct pci_epc_group, group);
+	pci_epc_put(epc_group->epc);
+	configfs_unregister_group(&epc_group->group);
+	kfree(epc_group);
+}
+EXPORT_SYMBOL(pci_ep_cfs_remove_epc_group);
+
+#define PCI_EPF_HEADER_R(_name)						       \
+static ssize_t pci_epf_##_name##_show(struct config_item *item,	char *page)    \
+{									       \
+	struct pci_epf *epf = to_pci_epf_group(item)->epf;		       \
+	if (WARN_ON_ONCE(!epf->header))					       \
+		return -EINVAL;						       \
+	return sprintf(page, "0x%04x\n", epf->header->_name);		       \
+}
+
+#define PCI_EPF_HEADER_W_u32(_name)					       \
+static ssize_t pci_epf_##_name##_store(struct config_item *item,	       \
+				       const char *page, size_t len)	       \
+{									       \
+	u32 val;							       \
+	int ret;							       \
+	struct pci_epf *epf = to_pci_epf_group(item)->epf;		       \
+	if (WARN_ON_ONCE(!epf->header))					       \
+		return -EINVAL;						       \
+	ret = kstrtou32(page, 0, &val);					       \
+	if (ret)							       \
+		return ret;						       \
+	epf->header->_name = val;					       \
+	return len;							       \
+}
+
+#define PCI_EPF_HEADER_W_u16(_name)					       \
+static ssize_t pci_epf_##_name##_store(struct config_item *item,	       \
+				       const char *page, size_t len)	       \
+{									       \
+	u16 val;							       \
+	int ret;							       \
+	struct pci_epf *epf = to_pci_epf_group(item)->epf;		       \
+	if (WARN_ON_ONCE(!epf->header))					       \
+		return -EINVAL;						       \
+	ret = kstrtou16(page, 0, &val);					       \
+	if (ret)							       \
+		return ret;						       \
+	epf->header->_name = val;					       \
+	return len;							       \
+}
+
+#define PCI_EPF_HEADER_W_u8(_name)					       \
+static ssize_t pci_epf_##_name##_store(struct config_item *item,	       \
+				       const char *page, size_t len)	       \
+{									       \
+	u8 val;								       \
+	int ret;							       \
+	struct pci_epf *epf = to_pci_epf_group(item)->epf;		       \
+	if (WARN_ON_ONCE(!epf->header))					       \
+		return -EINVAL;						       \
+	ret = kstrtou8(page, 0, &val);					       \
+	if (ret)							       \
+		return ret;						       \
+	epf->header->_name = val;					       \
+	return len;							       \
+}
+
+static ssize_t pci_epf_msi_interrupts_store(struct config_item *item,
+					    const char *page, size_t len)
+{
+	u8 val;
+	int ret;
+
+	ret = kstrtou8(page, 0, &val);
+	if (ret)
+		return ret;
+
+	to_pci_epf_group(item)->epf->msi_interrupts = val;
+
+	return len;
+}
+
+static ssize_t pci_epf_msi_interrupts_show(struct config_item *item,
+					   char *page)
+{
+	return sprintf(page, "%d\n",
+		       to_pci_epf_group(item)->epf->msi_interrupts);
+}
+
+PCI_EPF_HEADER_R(vendorid)
+PCI_EPF_HEADER_W_u16(vendorid)
+
+PCI_EPF_HEADER_R(deviceid)
+PCI_EPF_HEADER_W_u16(deviceid)
+
+PCI_EPF_HEADER_R(revid)
+PCI_EPF_HEADER_W_u8(revid)
+
+PCI_EPF_HEADER_R(progif_code)
+PCI_EPF_HEADER_W_u8(progif_code)
+
+PCI_EPF_HEADER_R(subclass_code)
+PCI_EPF_HEADER_W_u8(subclass_code)
+
+PCI_EPF_HEADER_R(baseclass_code)
+PCI_EPF_HEADER_W_u8(baseclass_code)
+
+PCI_EPF_HEADER_R(cache_line_size)
+PCI_EPF_HEADER_W_u8(cache_line_size)
+
+PCI_EPF_HEADER_R(subsys_vendor_id)
+PCI_EPF_HEADER_W_u16(subsys_vendor_id)
+
+PCI_EPF_HEADER_R(subsys_id)
+PCI_EPF_HEADER_W_u16(subsys_id)
+
+PCI_EPF_HEADER_R(interrupt_pin)
+PCI_EPF_HEADER_W_u8(interrupt_pin)
+
+CONFIGFS_ATTR(pci_epf_, vendorid);
+CONFIGFS_ATTR(pci_epf_, deviceid);
+CONFIGFS_ATTR(pci_epf_, revid);
+CONFIGFS_ATTR(pci_epf_, progif_code);
+CONFIGFS_ATTR(pci_epf_, subclass_code);
+CONFIGFS_ATTR(pci_epf_, baseclass_code);
+CONFIGFS_ATTR(pci_epf_, cache_line_size);
+CONFIGFS_ATTR(pci_epf_, subsys_vendor_id);
+CONFIGFS_ATTR(pci_epf_, subsys_id);
+CONFIGFS_ATTR(pci_epf_, interrupt_pin);
+CONFIGFS_ATTR(pci_epf_, msi_interrupts);
+
+static struct configfs_attribute *pci_epf_attrs[] = {
+	&pci_epf_attr_vendorid,
+	&pci_epf_attr_deviceid,
+	&pci_epf_attr_revid,
+	&pci_epf_attr_progif_code,
+	&pci_epf_attr_subclass_code,
+	&pci_epf_attr_baseclass_code,
+	&pci_epf_attr_cache_line_size,
+	&pci_epf_attr_subsys_vendor_id,
+	&pci_epf_attr_subsys_id,
+	&pci_epf_attr_interrupt_pin,
+	&pci_epf_attr_msi_interrupts,
+	NULL,
+};
+
+static void pci_epf_release(struct config_item *item)
+{
+	struct pci_epf_group *epf_group = to_pci_epf_group(item);
+
+	pci_epf_destroy(epf_group->epf);
+	kfree(epf_group);
+}
+
+static struct configfs_item_operations pci_epf_ops = {
+	.release		= pci_epf_release,
+};
+
+static struct config_item_type pci_epf_type = {
+	.ct_item_ops	= &pci_epf_ops,
+	.ct_attrs	= pci_epf_attrs,
+	.ct_owner	= THIS_MODULE,
+};
+
+static struct config_group *pci_epf_make(struct config_group *group,
+					 const char *name)
+{
+	struct pci_epf_group *epf_group;
+	struct pci_epf *epf;
+
+	epf_group = kzalloc(sizeof(*epf_group), GFP_KERNEL);
+	if (!epf_group)
+		return ERR_PTR(-ENOMEM);
+
+	config_group_init_type_name(&epf_group->group, name, &pci_epf_type);
+
+	epf = pci_epf_create(group->cg_item.ci_name);
+	if (IS_ERR(epf)) {
+		pr_err("failed to create endpoint function device\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	epf_group->epf = epf;
+
+	return &epf_group->group;
+}
+
+static void pci_epf_drop(struct config_group *group, struct config_item *item)
+{
+	config_item_put(item);
+}
+
+static struct configfs_group_operations pci_epf_group_ops = {
+	.make_group     = &pci_epf_make,
+	.drop_item      = &pci_epf_drop,
+};
+
+static struct config_item_type pci_epf_group_type = {
+	.ct_group_ops	= &pci_epf_group_ops,
+	.ct_owner	= THIS_MODULE,
+};
+
+struct config_group *pci_ep_cfs_add_epf_group(const char *name)
+{
+	struct config_group *group;
+
+	group = configfs_register_default_group(functions_group, name,
+						&pci_epf_group_type);
+	if (IS_ERR(group))
+		pr_err("failed to register configfs group for %s function\n",
+		       name);
+
+	return group;
+}
+EXPORT_SYMBOL(pci_ep_cfs_add_epf_group);
+
+void pci_ep_cfs_remove_epf_group(struct config_group *group)
+{
+	if (IS_ERR_OR_NULL(group))
+		return;
+
+	configfs_unregister_default_group(group);
+}
+EXPORT_SYMBOL(pci_ep_cfs_remove_epf_group);
+
+static struct config_item_type pci_functions_type = {
+	.ct_owner	= THIS_MODULE,
+};
+
+static struct config_item_type pci_controllers_type = {
+	.ct_owner	= THIS_MODULE,
+};
+
+static struct config_item_type pci_ep_type = {
+	.ct_owner	= THIS_MODULE,
+};
+
+static struct configfs_subsystem pci_ep_cfs_subsys = {
+	.su_group = {
+		.cg_item = {
+			.ci_namebuf = "pci_ep",
+			.ci_type = &pci_ep_type,
+		},
+	},
+	.su_mutex = __MUTEX_INITIALIZER(pci_ep_cfs_subsys.su_mutex),
+};
+
+static int __init pci_ep_cfs_init(void)
+{
+	int ret;
+	struct config_group *root = &pci_ep_cfs_subsys.su_group;
+
+	config_group_init(root);
+
+	ret = configfs_register_subsystem(&pci_ep_cfs_subsys);
+	if (ret) {
+		pr_err("Error %d while registering subsystem %s\n",
+		       ret, root->cg_item.ci_namebuf);
+		goto err;
+	}
+
+	functions_group = configfs_register_default_group(root, "functions",
+							  &pci_functions_type);
+	if (IS_ERR(functions_group)) {
+		ret = PTR_ERR(functions_group);
+		pr_err("Error %d while registering functions group\n",
+		       ret);
+		goto err_functions_group;
+	}
+
+	controllers_group =
+		configfs_register_default_group(root, "controllers",
+						&pci_controllers_type);
+	if (IS_ERR(controllers_group)) {
+		ret = PTR_ERR(controllers_group);
+		pr_err("Error %d while registering controllers group\n",
+		       ret);
+		goto err_controllers_group;
+	}
+
+	return 0;
+
+err_controllers_group:
+	configfs_unregister_default_group(functions_group);
+
+err_functions_group:
+	configfs_unregister_subsystem(&pci_ep_cfs_subsys);
+
+err:
+	return ret;
+}
+module_init(pci_ep_cfs_init);
+
+static void __exit pci_ep_cfs_exit(void)
+{
+	configfs_unregister_default_group(controllers_group);
+	configfs_unregister_default_group(functions_group);
+	configfs_unregister_subsystem(&pci_ep_cfs_subsys);
+}
+module_exit(pci_ep_cfs_exit);
+
+MODULE_DESCRIPTION("PCI EP CONFIGFS");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_LICENSE("GPL v2");

+ 580 - 0
drivers/pci/endpoint/pci-epc-core.c

@@ -0,0 +1,580 @@
+/**
+ * PCI Endpoint *Controller* (EPC) library
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+#include <linux/pci-ep-cfs.h>
+
+static struct class *pci_epc_class;
+
+static void devm_pci_epc_release(struct device *dev, void *res)
+{
+	struct pci_epc *epc = *(struct pci_epc **)res;
+
+	pci_epc_destroy(epc);
+}
+
+static int devm_pci_epc_match(struct device *dev, void *res, void *match_data)
+{
+	struct pci_epc **epc = res;
+
+	return *epc == match_data;
+}
+
+/**
+ * pci_epc_put() - release the PCI endpoint controller
+ * @epc: epc returned by pci_epc_get()
+ *
+ * release the refcount the caller obtained by invoking pci_epc_get()
+ */
+void pci_epc_put(struct pci_epc *epc)
+{
+	if (!epc || IS_ERR(epc))
+		return;
+
+	module_put(epc->ops->owner);
+	put_device(&epc->dev);
+}
+EXPORT_SYMBOL_GPL(pci_epc_put);
+
+/**
+ * pci_epc_get() - get the PCI endpoint controller
+ * @epc_name: device name of the endpoint controller
+ *
+ * Invoke to get struct pci_epc * corresponding to the device name of the
+ * endpoint controller
+ */
+struct pci_epc *pci_epc_get(const char *epc_name)
+{
+	int ret = -EINVAL;
+	struct pci_epc *epc;
+	struct device *dev;
+	struct class_dev_iter iter;
+
+	class_dev_iter_init(&iter, pci_epc_class, NULL, NULL);
+	while ((dev = class_dev_iter_next(&iter))) {
+		if (strcmp(epc_name, dev_name(dev)))
+			continue;
+
+		epc = to_pci_epc(dev);
+		if (!try_module_get(epc->ops->owner)) {
+			ret = -EINVAL;
+			goto err;
+		}
+
+		class_dev_iter_exit(&iter);
+		get_device(&epc->dev);
+		return epc;
+	}
+
+err:
+	class_dev_iter_exit(&iter);
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(pci_epc_get);
+
+/**
+ * pci_epc_stop() - stop the PCI link
+ * @epc: the link of the EPC device that has to be stopped
+ *
+ * Invoke to stop the PCI link
+ */
+void pci_epc_stop(struct pci_epc *epc)
+{
+	unsigned long flags;
+
+	if (IS_ERR(epc) || !epc->ops->stop)
+		return;
+
+	spin_lock_irqsave(&epc->lock, flags);
+	epc->ops->stop(epc);
+	spin_unlock_irqrestore(&epc->lock, flags);
+}
+EXPORT_SYMBOL_GPL(pci_epc_stop);
+
+/**
+ * pci_epc_start() - start the PCI link
+ * @epc: the link of *this* EPC device has to be started
+ *
+ * Invoke to start the PCI link
+ */
+int pci_epc_start(struct pci_epc *epc)
+{
+	int ret;
+	unsigned long flags;
+
+	if (IS_ERR(epc))
+		return -EINVAL;
+
+	if (!epc->ops->start)
+		return 0;
+
+	spin_lock_irqsave(&epc->lock, flags);
+	ret = epc->ops->start(epc);
+	spin_unlock_irqrestore(&epc->lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_start);
+
+/**
+ * pci_epc_raise_irq() - interrupt the host system
+ * @epc: the EPC device which has to interrupt the host
+ * @type: specify the type of interrupt; legacy or MSI
+ * @interrupt_num: the MSI interrupt number
+ *
+ * Invoke to raise an MSI or legacy interrupt
+ */
+int pci_epc_raise_irq(struct pci_epc *epc, enum pci_epc_irq_type type,
+		      u8 interrupt_num)
+{
+	int ret;
+	unsigned long flags;
+
+	if (IS_ERR(epc))
+		return -EINVAL;
+
+	if (!epc->ops->raise_irq)
+		return 0;
+
+	spin_lock_irqsave(&epc->lock, flags);
+	ret = epc->ops->raise_irq(epc, type, interrupt_num);
+	spin_unlock_irqrestore(&epc->lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
+
+/**
+ * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
+ * @epc: the EPC device to which MSI interrupts was requested
+ *
+ * Invoke to get the number of MSI interrupts allocated by the RC
+ */
+int pci_epc_get_msi(struct pci_epc *epc)
+{
+	int interrupt;
+	unsigned long flags;
+
+	if (IS_ERR(epc))
+		return 0;
+
+	if (!epc->ops->get_msi)
+		return 0;
+
+	spin_lock_irqsave(&epc->lock, flags);
+	interrupt = epc->ops->get_msi(epc);
+	spin_unlock_irqrestore(&epc->lock, flags);
+
+	if (interrupt < 0)
+		return 0;
+
+	interrupt = 1 << interrupt;
+
+	return interrupt;
+}
+EXPORT_SYMBOL_GPL(pci_epc_get_msi);
+
+/**
+ * pci_epc_set_msi() - set the number of MSI interrupt numbers required
+ * @epc: the EPC device on which MSI has to be configured
+ * @interrupts: number of MSI interrupts required by the EPF
+ *
+ * Invoke to set the required number of MSI interrupts.
+ */
+int pci_epc_set_msi(struct pci_epc *epc, u8 interrupts)
+{
+	int ret;
+	u8 encode_int;
+	unsigned long flags;
+
+	if (IS_ERR(epc))
+		return -EINVAL;
+
+	if (!epc->ops->set_msi)
+		return 0;
+
+	encode_int = order_base_2(interrupts);
+
+	spin_lock_irqsave(&epc->lock, flags);
+	ret = epc->ops->set_msi(epc, encode_int);
+	spin_unlock_irqrestore(&epc->lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_set_msi);
+
+/**
+ * pci_epc_unmap_addr() - unmap CPU address from PCI address
+ * @epc: the EPC device on which address is allocated
+ * @phys_addr: physical address of the local system
+ *
+ * Invoke to unmap the CPU address from PCI address.
+ */
+void pci_epc_unmap_addr(struct pci_epc *epc, phys_addr_t phys_addr)
+{
+	unsigned long flags;
+
+	if (IS_ERR(epc))
+		return;
+
+	if (!epc->ops->unmap_addr)
+		return;
+
+	spin_lock_irqsave(&epc->lock, flags);
+	epc->ops->unmap_addr(epc, phys_addr);
+	spin_unlock_irqrestore(&epc->lock, flags);
+}
+EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
+
+/**
+ * pci_epc_map_addr() - map CPU address to PCI address
+ * @epc: the EPC device on which address is allocated
+ * @phys_addr: physical address of the local system
+ * @pci_addr: PCI address to which the physical address should be mapped
+ * @size: the size of the allocation
+ *
+ * Invoke to map CPU address with PCI address.
+ */
+int pci_epc_map_addr(struct pci_epc *epc, phys_addr_t phys_addr,
+		     u64 pci_addr, size_t size)
+{
+	int ret;
+	unsigned long flags;
+
+	if (IS_ERR(epc))
+		return -EINVAL;
+
+	if (!epc->ops->map_addr)
+		return 0;
+
+	spin_lock_irqsave(&epc->lock, flags);
+	ret = epc->ops->map_addr(epc, phys_addr, pci_addr, size);
+	spin_unlock_irqrestore(&epc->lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_map_addr);
+
+/**
+ * pci_epc_clear_bar() - reset the BAR
+ * @epc: the EPC device for which the BAR has to be cleared
+ * @bar: the BAR number that has to be reset
+ *
+ * Invoke to reset the BAR of the endpoint device.
+ */
+void pci_epc_clear_bar(struct pci_epc *epc, int bar)
+{
+	unsigned long flags;
+
+	if (IS_ERR(epc))
+		return;
+
+	if (!epc->ops->clear_bar)
+		return;
+
+	spin_lock_irqsave(&epc->lock, flags);
+	epc->ops->clear_bar(epc, bar);
+	spin_unlock_irqrestore(&epc->lock, flags);
+}
+EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
+
+/**
+ * pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space
+ * @epc: the EPC device on which BAR has to be configured
+ * @bar: the BAR number that has to be configured
+ * @size: the size of the addr space
+ * @flags: specify memory allocation/io allocation/32bit address/64 bit address
+ *
+ * Invoke to configure the BAR of the endpoint device.
+ */
+int pci_epc_set_bar(struct pci_epc *epc, enum pci_barno bar,
+		    dma_addr_t bar_phys, size_t size, int flags)
+{
+	int ret;
+	unsigned long irq_flags;
+
+	if (IS_ERR(epc))
+		return -EINVAL;
+
+	if (!epc->ops->set_bar)
+		return 0;
+
+	spin_lock_irqsave(&epc->lock, irq_flags);
+	ret = epc->ops->set_bar(epc, bar, bar_phys, size, flags);
+	spin_unlock_irqrestore(&epc->lock, irq_flags);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_set_bar);
+
+/**
+ * pci_epc_write_header() - write standard configuration header
+ * @epc: the EPC device to which the configuration header should be written
+ * @header: standard configuration header fields
+ *
+ * Invoke to write the configuration header to the endpoint controller. Every
+ * endpoint controller will have a dedicated location to which the standard
+ * configuration header would be written. The callback function should write
+ * the header fields to this dedicated location.
+ */
+int pci_epc_write_header(struct pci_epc *epc, struct pci_epf_header *header)
+{
+	int ret;
+	unsigned long flags;
+
+	if (IS_ERR(epc))
+		return -EINVAL;
+
+	if (!epc->ops->write_header)
+		return 0;
+
+	spin_lock_irqsave(&epc->lock, flags);
+	ret = epc->ops->write_header(epc, header);
+	spin_unlock_irqrestore(&epc->lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_write_header);
+
+/**
+ * pci_epc_add_epf() - bind PCI endpoint function to an endpoint controller
+ * @epc: the EPC device to which the endpoint function should be added
+ * @epf: the endpoint function to be added
+ *
+ * A PCI endpoint device can have one or more functions. In the case of PCIe,
+ * the specification allows up to 8 PCIe endpoint functions. Invoke
+ * pci_epc_add_epf() to add a PCI endpoint function to an endpoint controller.
+ */
+int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf)
+{
+	unsigned long flags;
+
+	if (epf->epc)
+		return -EBUSY;
+
+	if (IS_ERR(epc))
+		return -EINVAL;
+
+	if (epf->func_no > epc->max_functions - 1)
+		return -EINVAL;
+
+	epf->epc = epc;
+	dma_set_coherent_mask(&epf->dev, epc->dev.coherent_dma_mask);
+	epf->dev.dma_mask = epc->dev.dma_mask;
+
+	spin_lock_irqsave(&epc->lock, flags);
+	list_add_tail(&epf->list, &epc->pci_epf);
+	spin_unlock_irqrestore(&epc->lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(pci_epc_add_epf);
+
+/**
+ * pci_epc_remove_epf() - remove PCI endpoint function from endpoint controller
+ * @epc: the EPC device from which the endpoint function should be removed
+ * @epf: the endpoint function to be removed
+ *
+ * Invoke to remove PCI endpoint function from the endpoint controller.
+ */
+void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf)
+{
+	unsigned long flags;
+
+	if (!epc || IS_ERR(epc))
+		return;
+
+	spin_lock_irqsave(&epc->lock, flags);
+	list_del(&epf->list);
+	spin_unlock_irqrestore(&epc->lock, flags);
+}
+EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
+
+/**
+ * pci_epc_linkup() - Notify the EPF device that EPC device has established a
+ *		      connection with the Root Complex.
+ * @epc: the EPC device which has established link with the host
+ *
+ * Invoke to Notify the EPF device that the EPC device has established a
+ * connection with the Root Complex.
+ */
+void pci_epc_linkup(struct pci_epc *epc)
+{
+	unsigned long flags;
+	struct pci_epf *epf;
+
+	if (!epc || IS_ERR(epc))
+		return;
+
+	spin_lock_irqsave(&epc->lock, flags);
+	list_for_each_entry(epf, &epc->pci_epf, list)
+		pci_epf_linkup(epf);
+	spin_unlock_irqrestore(&epc->lock, flags);
+}
+EXPORT_SYMBOL_GPL(pci_epc_linkup);
+
+/**
+ * pci_epc_destroy() - destroy the EPC device
+ * @epc: the EPC device that has to be destroyed
+ *
+ * Invoke to destroy the PCI EPC device
+ */
+void pci_epc_destroy(struct pci_epc *epc)
+{
+	pci_ep_cfs_remove_epc_group(epc->group);
+	device_unregister(&epc->dev);
+	kfree(epc);
+}
+EXPORT_SYMBOL_GPL(pci_epc_destroy);
+
+/**
+ * devm_pci_epc_destroy() - destroy the EPC device
+ * @dev: device that wants to destroy the EPC
+ * @epc: the EPC device that has to be destroyed
+ *
+ * Invoke to destroy the devres associated with this
+ * pci_epc and destroy the EPC device.
+ */
+void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc)
+{
+	int r;
+
+	r = devres_destroy(dev, devm_pci_epc_release, devm_pci_epc_match,
+			   epc);
+	dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n");
+}
+EXPORT_SYMBOL_GPL(devm_pci_epc_destroy);
+
+/**
+ * __pci_epc_create() - create a new endpoint controller (EPC) device
+ * @dev: device that is creating the new EPC
+ * @ops: function pointers for performing EPC operations
+ * @owner: the owner of the module that creates the EPC device
+ *
+ * Invoke to create a new EPC device and add it to pci_epc class.
+ */
+struct pci_epc *
+__pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
+		 struct module *owner)
+{
+	int ret;
+	struct pci_epc *epc;
+
+	if (WARN_ON(!dev)) {
+		ret = -EINVAL;
+		goto err_ret;
+	}
+
+	epc = kzalloc(sizeof(*epc), GFP_KERNEL);
+	if (!epc) {
+		ret = -ENOMEM;
+		goto err_ret;
+	}
+
+	spin_lock_init(&epc->lock);
+	INIT_LIST_HEAD(&epc->pci_epf);
+
+	device_initialize(&epc->dev);
+	dma_set_coherent_mask(&epc->dev, dev->coherent_dma_mask);
+	epc->dev.class = pci_epc_class;
+	epc->dev.dma_mask = dev->dma_mask;
+	epc->ops = ops;
+
+	ret = dev_set_name(&epc->dev, "%s", dev_name(dev));
+	if (ret)
+		goto put_dev;
+
+	ret = device_add(&epc->dev);
+	if (ret)
+		goto put_dev;
+
+	epc->group = pci_ep_cfs_add_epc_group(dev_name(dev));
+
+	return epc;
+
+put_dev:
+	put_device(&epc->dev);
+	kfree(epc);
+
+err_ret:
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(__pci_epc_create);
+
+/**
+ * __devm_pci_epc_create() - create a new endpoint controller (EPC) device
+ * @dev: device that is creating the new EPC
+ * @ops: function pointers for performing EPC operations
+ * @owner: the owner of the module that creates the EPC device
+ *
+ * Invoke to create a new EPC device and add it to pci_epc class.
+ * While at that, it also associates the device with the pci_epc using devres.
+ * On driver detach, release function is invoked on the devres data,
+ * then, devres data is freed.
+ */
+struct pci_epc *
+__devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
+		      struct module *owner)
+{
+	struct pci_epc **ptr, *epc;
+
+	ptr = devres_alloc(devm_pci_epc_release, sizeof(*ptr), GFP_KERNEL);
+	if (!ptr)
+		return ERR_PTR(-ENOMEM);
+
+	epc = __pci_epc_create(dev, ops, owner);
+	if (!IS_ERR(epc)) {
+		*ptr = epc;
+		devres_add(dev, ptr);
+	} else {
+		devres_free(ptr);
+	}
+
+	return epc;
+}
+EXPORT_SYMBOL_GPL(__devm_pci_epc_create);
+
+static int __init pci_epc_init(void)
+{
+	pci_epc_class = class_create(THIS_MODULE, "pci_epc");
+	if (IS_ERR(pci_epc_class)) {
+		pr_err("failed to create pci epc class --> %ld\n",
+		       PTR_ERR(pci_epc_class));
+		return PTR_ERR(pci_epc_class);
+	}
+
+	return 0;
+}
+module_init(pci_epc_init);
+
+static void __exit pci_epc_exit(void)
+{
+	class_destroy(pci_epc_class);
+}
+module_exit(pci_epc_exit);
+
+MODULE_DESCRIPTION("PCI EPC Library");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_LICENSE("GPL v2");

+ 143 - 0
drivers/pci/endpoint/pci-epc-mem.c

@@ -0,0 +1,143 @@
+/**
+ * PCI Endpoint *Controller* Address Space Management
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <linux/pci-epc.h>
+
+/**
+ * pci_epc_mem_init() - initialize the pci_epc_mem structure
+ * @epc: the EPC device that invoked pci_epc_mem_init
+ * @phys_base: the physical address of the base
+ * @size: the size of the address space
+ *
+ * Invoke to initialize the pci_epc_mem structure used by the
+ * endpoint functions to allocate mapped PCI address.
+ */
+int pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_base, size_t size)
+{
+	int ret;
+	struct pci_epc_mem *mem;
+	unsigned long *bitmap;
+	int pages = size >> PAGE_SHIFT;
+	int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
+
+	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+	if (!mem) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+	if (!bitmap) {
+		ret = -ENOMEM;
+		goto err_mem;
+	}
+
+	mem->bitmap = bitmap;
+	mem->phys_base = phys_base;
+	mem->pages = pages;
+	mem->size = size;
+
+	epc->mem = mem;
+
+	return 0;
+
+err_mem:
+	kfree(mem);
+
+err:
+return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_mem_init);
+
+/**
+ * pci_epc_mem_exit() - cleanup the pci_epc_mem structure
+ * @epc: the EPC device that invoked pci_epc_mem_exit
+ *
+ * Invoke to cleanup the pci_epc_mem structure allocated in
+ * pci_epc_mem_init().
+ */
+void pci_epc_mem_exit(struct pci_epc *epc)
+{
+	struct pci_epc_mem *mem = epc->mem;
+
+	epc->mem = NULL;
+	kfree(mem->bitmap);
+	kfree(mem);
+}
+EXPORT_SYMBOL_GPL(pci_epc_mem_exit);
+
+/**
+ * pci_epc_mem_alloc_addr() - allocate memory address from EPC addr space
+ * @epc: the EPC device on which memory has to be allocated
+ * @phys_addr: populate the allocated physical address here
+ * @size: the size of the address space that has to be allocated
+ *
+ * Invoke to allocate memory address from the EPC address space. This
+ * is usually done to map the remote RC address into the local system.
+ */
+void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
+				     phys_addr_t *phys_addr, size_t size)
+{
+	int pageno;
+	void __iomem *virt_addr;
+	struct pci_epc_mem *mem = epc->mem;
+	int order = get_order(size);
+
+	pageno = bitmap_find_free_region(mem->bitmap, mem->pages, order);
+	if (pageno < 0)
+		return NULL;
+
+	*phys_addr = mem->phys_base + (pageno << PAGE_SHIFT);
+	virt_addr = ioremap(*phys_addr, size);
+	if (!virt_addr)
+		bitmap_release_region(mem->bitmap, pageno, order);
+
+	return virt_addr;
+}
+EXPORT_SYMBOL_GPL(pci_epc_mem_alloc_addr);
+
+/**
+ * pci_epc_mem_free_addr() - free the allocated memory address
+ * @epc: the EPC device on which memory was allocated
+ * @phys_addr: the allocated physical address
+ * @virt_addr: virtual address of the allocated mem space
+ * @size: the size of the allocated address space
+ *
+ * Invoke to free the memory allocated using pci_epc_mem_alloc_addr.
+ */
+void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr,
+			   void __iomem *virt_addr, size_t size)
+{
+	int pageno;
+	int order = get_order(size);
+	struct pci_epc_mem *mem = epc->mem;
+
+	iounmap(virt_addr);
+	pageno = (phys_addr - mem->phys_base) >> PAGE_SHIFT;
+	bitmap_release_region(mem->bitmap, pageno, order);
+}
+EXPORT_SYMBOL_GPL(pci_epc_mem_free_addr);
+
+MODULE_DESCRIPTION("PCI EPC Address Space Management");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_LICENSE("GPL v2");

+ 359 - 0
drivers/pci/endpoint/pci-epf-core.c

@@ -0,0 +1,359 @@
+/**
+ * PCI Endpoint *Function* (EPF) library
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+#include <linux/pci-ep-cfs.h>
+
+static struct bus_type pci_epf_bus_type;
+static struct device_type pci_epf_type;
+
+/**
+ * pci_epf_linkup() - Notify the function driver that EPC device has
+ *		      established a connection with the Root Complex.
+ * @epf: the EPF device bound to the EPC device which has established
+ *	 the connection with the host
+ *
+ * Invoke to notify the function driver that EPC device has established
+ * a connection with the Root Complex.
+ */
+void pci_epf_linkup(struct pci_epf *epf)
+{
+	if (!epf->driver) {
+		dev_WARN(&epf->dev, "epf device not bound to driver\n");
+		return;
+	}
+
+	epf->driver->ops->linkup(epf);
+}
+EXPORT_SYMBOL_GPL(pci_epf_linkup);
+
+/**
+ * pci_epf_unbind() - Notify the function driver that the binding between the
+ *		      EPF device and EPC device has been lost
+ * @epf: the EPF device which has lost the binding with the EPC device
+ *
+ * Invoke to notify the function driver that the binding between the EPF device
+ * and EPC device has been lost.
+ */
+void pci_epf_unbind(struct pci_epf *epf)
+{
+	if (!epf->driver) {
+		dev_WARN(&epf->dev, "epf device not bound to driver\n");
+		return;
+	}
+
+	epf->driver->ops->unbind(epf);
+	module_put(epf->driver->owner);
+}
+EXPORT_SYMBOL_GPL(pci_epf_unbind);
+
+/**
+ * pci_epf_bind() - Notify the function driver that the EPF device has been
+ *		    bound to a EPC device
+ * @epf: the EPF device which has been bound to the EPC device
+ *
+ * Invoke to notify the function driver that it has been bound to a EPC device
+ */
+int pci_epf_bind(struct pci_epf *epf)
+{
+	if (!epf->driver) {
+		dev_WARN(&epf->dev, "epf device not bound to driver\n");
+		return -EINVAL;
+	}
+
+	if (!try_module_get(epf->driver->owner))
+		return -EAGAIN;
+
+	return epf->driver->ops->bind(epf);
+}
+EXPORT_SYMBOL_GPL(pci_epf_bind);
+
+/**
+ * pci_epf_free_space() - free the allocated PCI EPF register space
+ * @addr: the virtual address of the PCI EPF register space
+ * @bar: the BAR number corresponding to the register space
+ *
+ * Invoke to free the allocated PCI EPF register space.
+ */
+void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar)
+{
+	struct device *dev = &epf->dev;
+
+	if (!addr)
+		return;
+
+	dma_free_coherent(dev, epf->bar[bar].size, addr,
+			  epf->bar[bar].phys_addr);
+
+	epf->bar[bar].phys_addr = 0;
+	epf->bar[bar].size = 0;
+}
+EXPORT_SYMBOL_GPL(pci_epf_free_space);
+
+/**
+ * pci_epf_alloc_space() - allocate memory for the PCI EPF register space
+ * @size: the size of the memory that has to be allocated
+ * @bar: the BAR number corresponding to the allocated register space
+ *
+ * Invoke to allocate memory for the PCI EPF register space.
+ */
+void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar)
+{
+	void *space;
+	struct device *dev = &epf->dev;
+	dma_addr_t phys_addr;
+
+	if (size < 128)
+		size = 128;
+	size = roundup_pow_of_two(size);
+
+	space = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL);
+	if (!space) {
+		dev_err(dev, "failed to allocate mem space\n");
+		return NULL;
+	}
+
+	epf->bar[bar].phys_addr = phys_addr;
+	epf->bar[bar].size = size;
+
+	return space;
+}
+EXPORT_SYMBOL_GPL(pci_epf_alloc_space);
+
+/**
+ * pci_epf_unregister_driver() - unregister the PCI EPF driver
+ * @driver: the PCI EPF driver that has to be unregistered
+ *
+ * Invoke to unregister the PCI EPF driver.
+ */
+void pci_epf_unregister_driver(struct pci_epf_driver *driver)
+{
+	pci_ep_cfs_remove_epf_group(driver->group);
+	driver_unregister(&driver->driver);
+}
+EXPORT_SYMBOL_GPL(pci_epf_unregister_driver);
+
+/**
+ * __pci_epf_register_driver() - register a new PCI EPF driver
+ * @driver: structure representing PCI EPF driver
+ * @owner: the owner of the module that registers the PCI EPF driver
+ *
+ * Invoke to register a new PCI EPF driver.
+ */
+int __pci_epf_register_driver(struct pci_epf_driver *driver,
+			      struct module *owner)
+{
+	int ret;
+
+	if (!driver->ops)
+		return -EINVAL;
+
+	if (!driver->ops->bind || !driver->ops->unbind || !driver->ops->linkup)
+		return -EINVAL;
+
+	driver->driver.bus = &pci_epf_bus_type;
+	driver->driver.owner = owner;
+
+	ret = driver_register(&driver->driver);
+	if (ret)
+		return ret;
+
+	driver->group = pci_ep_cfs_add_epf_group(driver->driver.name);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(__pci_epf_register_driver);
+
+/**
+ * pci_epf_destroy() - destroy the created PCI EPF device
+ * @epf: the PCI EPF device that has to be destroyed.
+ *
+ * Invoke to destroy the PCI EPF device created by invoking pci_epf_create().
+ */
+void pci_epf_destroy(struct pci_epf *epf)
+{
+	device_unregister(&epf->dev);
+}
+EXPORT_SYMBOL_GPL(pci_epf_destroy);
+
+/**
+ * pci_epf_create() - create a new PCI EPF device
+ * @name: the name of the PCI EPF device. This name will be used to bind the
+ *	  the EPF device to a EPF driver
+ *
+ * Invoke to create a new PCI EPF device by providing the name of the function
+ * device.
+ */
+struct pci_epf *pci_epf_create(const char *name)
+{
+	int ret;
+	struct pci_epf *epf;
+	struct device *dev;
+	char *func_name;
+	char *buf;
+
+	epf = kzalloc(sizeof(*epf), GFP_KERNEL);
+	if (!epf) {
+		ret = -ENOMEM;
+		goto err_ret;
+	}
+
+	buf = kstrdup(name, GFP_KERNEL);
+	if (!buf) {
+		ret = -ENOMEM;
+		goto free_epf;
+	}
+
+	func_name = buf;
+	buf = strchrnul(buf, '.');
+	*buf = '\0';
+
+	epf->name = kstrdup(func_name, GFP_KERNEL);
+	if (!epf->name) {
+		ret = -ENOMEM;
+		goto free_func_name;
+	}
+
+	dev = &epf->dev;
+	device_initialize(dev);
+	dev->bus = &pci_epf_bus_type;
+	dev->type = &pci_epf_type;
+
+	ret = dev_set_name(dev, "%s", name);
+	if (ret)
+		goto put_dev;
+
+	ret = device_add(dev);
+	if (ret)
+		goto put_dev;
+
+	kfree(func_name);
+	return epf;
+
+put_dev:
+	put_device(dev);
+	kfree(epf->name);
+
+free_func_name:
+	kfree(func_name);
+
+free_epf:
+	kfree(epf);
+
+err_ret:
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(pci_epf_create);
+
+static void pci_epf_dev_release(struct device *dev)
+{
+	struct pci_epf *epf = to_pci_epf(dev);
+
+	kfree(epf->name);
+	kfree(epf);
+}
+
+static struct device_type pci_epf_type = {
+	.release	= pci_epf_dev_release,
+};
+
+static int
+pci_epf_match_id(const struct pci_epf_device_id *id, const struct pci_epf *epf)
+{
+	while (id->name[0]) {
+		if (strcmp(epf->name, id->name) == 0)
+			return true;
+		id++;
+	}
+
+	return false;
+}
+
+static int pci_epf_device_match(struct device *dev, struct device_driver *drv)
+{
+	struct pci_epf *epf = to_pci_epf(dev);
+	struct pci_epf_driver *driver = to_pci_epf_driver(drv);
+
+	if (driver->id_table)
+		return pci_epf_match_id(driver->id_table, epf);
+
+	return !strcmp(epf->name, drv->name);
+}
+
+static int pci_epf_device_probe(struct device *dev)
+{
+	struct pci_epf *epf = to_pci_epf(dev);
+	struct pci_epf_driver *driver = to_pci_epf_driver(dev->driver);
+
+	if (!driver->probe)
+		return -ENODEV;
+
+	epf->driver = driver;
+
+	return driver->probe(epf);
+}
+
+static int pci_epf_device_remove(struct device *dev)
+{
+	int ret;
+	struct pci_epf *epf = to_pci_epf(dev);
+	struct pci_epf_driver *driver = to_pci_epf_driver(dev->driver);
+
+	ret = driver->remove(epf);
+	epf->driver = NULL;
+
+	return ret;
+}
+
+static struct bus_type pci_epf_bus_type = {
+	.name		= "pci-epf",
+	.match		= pci_epf_device_match,
+	.probe		= pci_epf_device_probe,
+	.remove		= pci_epf_device_remove,
+};
+
+static int __init pci_epf_init(void)
+{
+	int ret;
+
+	ret = bus_register(&pci_epf_bus_type);
+	if (ret) {
+		pr_err("failed to register pci epf bus --> %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+module_init(pci_epf_init);
+
+static void __exit pci_epf_exit(void)
+{
+	bus_unregister(&pci_epf_bus_type);
+}
+module_exit(pci_epf_exit);
+
+MODULE_DESCRIPTION("PCI EPF Library");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_LICENSE("GPL v2");

+ 8 - 2
drivers/pci/host/Kconfig

@@ -27,6 +27,12 @@ config PCIE_XILINX_NWL
 	 or End Point. The current option selection will only
 	 or End Point. The current option selection will only
 	 support root port enabling.
 	 support root port enabling.
 
 
+config PCI_FTPCI100
+	bool "Faraday Technology FTPCI100 PCI controller"
+	depends on OF
+	depends on ARM
+	default ARCH_GEMINI
+
 config PCI_TEGRA
 config PCI_TEGRA
 	bool "NVIDIA Tegra PCIe controller"
 	bool "NVIDIA Tegra PCIe controller"
 	depends on ARCH_TEGRA
 	depends on ARCH_TEGRA
@@ -95,6 +101,7 @@ config PCI_VERSATILE
 
 
 config PCIE_IPROC
 config PCIE_IPROC
 	tristate
 	tristate
+	select PCI_DOMAINS
 	help
 	help
 	  This enables the iProc PCIe core controller support for Broadcom's
 	  This enables the iProc PCIe core controller support for Broadcom's
 	  iProc family of SoCs. An appropriate bus interface driver needs
 	  iProc family of SoCs. An appropriate bus interface driver needs
@@ -115,7 +122,6 @@ config PCIE_IPROC_BCMA
 	depends on ARM && (ARCH_BCM_IPROC || COMPILE_TEST)
 	depends on ARM && (ARCH_BCM_IPROC || COMPILE_TEST)
 	select PCIE_IPROC
 	select PCIE_IPROC
 	select BCMA
 	select BCMA
-	select PCI_DOMAINS
 	default ARCH_BCM_5301X
 	default ARCH_BCM_5301X
 	help
 	help
 	  Say Y here if you want to use the Broadcom iProc PCIe controller
 	  Say Y here if you want to use the Broadcom iProc PCIe controller
@@ -164,7 +170,7 @@ config PCI_HOST_THUNDER_ECAM
 	  Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs.
 	  Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs.
 
 
 config PCIE_ROCKCHIP
 config PCIE_ROCKCHIP
-	bool "Rockchip PCIe controller"
+	tristate "Rockchip PCIe controller"
 	depends on ARCH_ROCKCHIP || COMPILE_TEST
 	depends on ARCH_ROCKCHIP || COMPILE_TEST
 	depends on OF
 	depends on OF
 	depends on PCI_MSI_IRQ_DOMAIN
 	depends on PCI_MSI_IRQ_DOMAIN

+ 1 - 0
drivers/pci/host/Makefile

@@ -1,3 +1,4 @@
+obj-$(CONFIG_PCI_FTPCI100) += pci-ftpci100.o
 obj-$(CONFIG_PCI_HYPERV) += pci-hyperv.o
 obj-$(CONFIG_PCI_HYPERV) += pci-hyperv.o
 obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
 obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
 obj-$(CONFIG_PCI_AARDVARK) += pci-aardvark.o
 obj-$(CONFIG_PCI_AARDVARK) += pci-aardvark.o

+ 67 - 106
drivers/pci/host/pci-aardvark.c

@@ -200,10 +200,12 @@ struct advk_pcie {
 	struct list_head resources;
 	struct list_head resources;
 	struct irq_domain *irq_domain;
 	struct irq_domain *irq_domain;
 	struct irq_chip irq_chip;
 	struct irq_chip irq_chip;
-	struct msi_controller msi;
 	struct irq_domain *msi_domain;
 	struct irq_domain *msi_domain;
+	struct irq_domain *msi_inner_domain;
+	struct irq_chip msi_bottom_irq_chip;
 	struct irq_chip msi_irq_chip;
 	struct irq_chip msi_irq_chip;
-	DECLARE_BITMAP(msi_irq_in_use, MSI_IRQ_NUM);
+	struct msi_domain_info msi_domain_info;
+	DECLARE_BITMAP(msi_used, MSI_IRQ_NUM);
 	struct mutex msi_used_lock;
 	struct mutex msi_used_lock;
 	u16 msi_msg;
 	u16 msi_msg;
 	int root_bus_nr;
 	int root_bus_nr;
@@ -545,94 +547,64 @@ static struct pci_ops advk_pcie_ops = {
 	.write = advk_pcie_wr_conf,
 	.write = advk_pcie_wr_conf,
 };
 };
 
 
-static int advk_pcie_alloc_msi(struct advk_pcie *pcie)
+static void advk_msi_irq_compose_msi_msg(struct irq_data *data,
+					 struct msi_msg *msg)
 {
 {
-	int hwirq;
+	struct advk_pcie *pcie = irq_data_get_irq_chip_data(data);
+	phys_addr_t msi_msg = virt_to_phys(&pcie->msi_msg);
 
 
-	mutex_lock(&pcie->msi_used_lock);
-	hwirq = find_first_zero_bit(pcie->msi_irq_in_use, MSI_IRQ_NUM);
-	if (hwirq >= MSI_IRQ_NUM)
-		hwirq = -ENOSPC;
-	else
-		set_bit(hwirq, pcie->msi_irq_in_use);
-	mutex_unlock(&pcie->msi_used_lock);
-
-	return hwirq;
+	msg->address_lo = lower_32_bits(msi_msg);
+	msg->address_hi = upper_32_bits(msi_msg);
+	msg->data = data->irq;
 }
 }
 
 
-static void advk_pcie_free_msi(struct advk_pcie *pcie, int hwirq)
+static int advk_msi_set_affinity(struct irq_data *irq_data,
+				 const struct cpumask *mask, bool force)
 {
 {
-	struct device *dev = &pcie->pdev->dev;
-
-	mutex_lock(&pcie->msi_used_lock);
-	if (!test_bit(hwirq, pcie->msi_irq_in_use))
-		dev_err(dev, "trying to free unused MSI#%d\n", hwirq);
-	else
-		clear_bit(hwirq, pcie->msi_irq_in_use);
-	mutex_unlock(&pcie->msi_used_lock);
+	return -EINVAL;
 }
 }
 
 
-static int advk_pcie_setup_msi_irq(struct msi_controller *chip,
-				   struct pci_dev *pdev,
-				   struct msi_desc *desc)
+static int advk_msi_irq_domain_alloc(struct irq_domain *domain,
+				     unsigned int virq,
+				     unsigned int nr_irqs, void *args)
 {
 {
-	struct advk_pcie *pcie = pdev->bus->sysdata;
-	struct msi_msg msg;
-	int virq, hwirq;
-	phys_addr_t msi_msg_phys;
-
-	/* We support MSI, but not MSI-X */
-	if (desc->msi_attrib.is_msix)
-		return -EINVAL;
-
-	hwirq = advk_pcie_alloc_msi(pcie);
-	if (hwirq < 0)
-		return hwirq;
+	struct advk_pcie *pcie = domain->host_data;
+	int hwirq, i;
 
 
-	virq = irq_create_mapping(pcie->msi_domain, hwirq);
-	if (!virq) {
-		advk_pcie_free_msi(pcie, hwirq);
-		return -EINVAL;
+	mutex_lock(&pcie->msi_used_lock);
+	hwirq = bitmap_find_next_zero_area(pcie->msi_used, MSI_IRQ_NUM,
+					   0, nr_irqs, 0);
+	if (hwirq >= MSI_IRQ_NUM) {
+		mutex_unlock(&pcie->msi_used_lock);
+		return -ENOSPC;
 	}
 	}
 
 
-	irq_set_msi_desc(virq, desc);
-
-	msi_msg_phys = virt_to_phys(&pcie->msi_msg);
-
-	msg.address_lo = lower_32_bits(msi_msg_phys);
-	msg.address_hi = upper_32_bits(msi_msg_phys);
-	msg.data = virq;
-
-	pci_write_msi_msg(virq, &msg);
-
-	return 0;
-}
+	bitmap_set(pcie->msi_used, hwirq, nr_irqs);
+	mutex_unlock(&pcie->msi_used_lock);
 
 
-static void advk_pcie_teardown_msi_irq(struct msi_controller *chip,
-				       unsigned int irq)
-{
-	struct irq_data *d = irq_get_irq_data(irq);
-	struct msi_desc *msi = irq_data_get_msi_desc(d);
-	struct advk_pcie *pcie = msi_desc_to_pci_sysdata(msi);
-	unsigned long hwirq = d->hwirq;
+	for (i = 0; i < nr_irqs; i++)
+		irq_domain_set_info(domain, virq + i, hwirq + i,
+				    &pcie->msi_bottom_irq_chip,
+				    domain->host_data, handle_simple_irq,
+				    NULL, NULL);
 
 
-	irq_dispose_mapping(irq);
-	advk_pcie_free_msi(pcie, hwirq);
+	return hwirq;
 }
 }
 
 
-static int advk_pcie_msi_map(struct irq_domain *domain,
-			     unsigned int virq, irq_hw_number_t hw)
+static void advk_msi_irq_domain_free(struct irq_domain *domain,
+				     unsigned int virq, unsigned int nr_irqs)
 {
 {
+	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
 	struct advk_pcie *pcie = domain->host_data;
 	struct advk_pcie *pcie = domain->host_data;
 
 
-	irq_set_chip_and_handler(virq, &pcie->msi_irq_chip,
-				 handle_simple_irq);
-
-	return 0;
+	mutex_lock(&pcie->msi_used_lock);
+	bitmap_clear(pcie->msi_used, d->hwirq, nr_irqs);
+	mutex_unlock(&pcie->msi_used_lock);
 }
 }
 
 
-static const struct irq_domain_ops advk_pcie_msi_irq_ops = {
-	.map = advk_pcie_msi_map,
+static const struct irq_domain_ops advk_msi_domain_ops = {
+	.alloc = advk_msi_irq_domain_alloc,
+	.free = advk_msi_irq_domain_free,
 };
 };
 
 
 static void advk_pcie_irq_mask(struct irq_data *d)
 static void advk_pcie_irq_mask(struct irq_data *d)
@@ -680,30 +652,25 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
 {
 {
 	struct device *dev = &pcie->pdev->dev;
 	struct device *dev = &pcie->pdev->dev;
 	struct device_node *node = dev->of_node;
 	struct device_node *node = dev->of_node;
-	struct irq_chip *msi_irq_chip;
-	struct msi_controller *msi;
+	struct irq_chip *bottom_ic, *msi_ic;
+	struct msi_domain_info *msi_di;
 	phys_addr_t msi_msg_phys;
 	phys_addr_t msi_msg_phys;
-	int ret;
 
 
-	msi_irq_chip = &pcie->msi_irq_chip;
+	mutex_init(&pcie->msi_used_lock);
 
 
-	msi_irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-msi",
-					    dev_name(dev));
-	if (!msi_irq_chip->name)
-		return -ENOMEM;
+	bottom_ic = &pcie->msi_bottom_irq_chip;
 
 
-	msi_irq_chip->irq_enable = pci_msi_unmask_irq;
-	msi_irq_chip->irq_disable = pci_msi_mask_irq;
-	msi_irq_chip->irq_mask = pci_msi_mask_irq;
-	msi_irq_chip->irq_unmask = pci_msi_unmask_irq;
+	bottom_ic->name = "MSI";
+	bottom_ic->irq_compose_msi_msg = advk_msi_irq_compose_msi_msg;
+	bottom_ic->irq_set_affinity = advk_msi_set_affinity;
 
 
-	msi = &pcie->msi;
+	msi_ic = &pcie->msi_irq_chip;
+	msi_ic->name = "advk-MSI";
 
 
-	msi->setup_irq = advk_pcie_setup_msi_irq;
-	msi->teardown_irq = advk_pcie_teardown_msi_irq;
-	msi->of_node = node;
-
-	mutex_init(&pcie->msi_used_lock);
+	msi_di = &pcie->msi_domain_info;
+	msi_di->flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+		MSI_FLAG_MULTI_PCI_MSI;
+	msi_di->chip = msi_ic;
 
 
 	msi_msg_phys = virt_to_phys(&pcie->msi_msg);
 	msi_msg_phys = virt_to_phys(&pcie->msi_msg);
 
 
@@ -712,16 +679,18 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
 	advk_writel(pcie, upper_32_bits(msi_msg_phys),
 	advk_writel(pcie, upper_32_bits(msi_msg_phys),
 		    PCIE_MSI_ADDR_HIGH_REG);
 		    PCIE_MSI_ADDR_HIGH_REG);
 
 
-	pcie->msi_domain =
+	pcie->msi_inner_domain =
 		irq_domain_add_linear(NULL, MSI_IRQ_NUM,
 		irq_domain_add_linear(NULL, MSI_IRQ_NUM,
-				      &advk_pcie_msi_irq_ops, pcie);
-	if (!pcie->msi_domain)
+				      &advk_msi_domain_ops, pcie);
+	if (!pcie->msi_inner_domain)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
-	ret = of_pci_msi_chip_add(msi);
-	if (ret < 0) {
-		irq_domain_remove(pcie->msi_domain);
-		return ret;
+	pcie->msi_domain =
+		pci_msi_create_irq_domain(of_node_to_fwnode(node),
+					  msi_di, pcie->msi_inner_domain);
+	if (!pcie->msi_domain) {
+		irq_domain_remove(pcie->msi_inner_domain);
+		return -ENOMEM;
 	}
 	}
 
 
 	return 0;
 	return 0;
@@ -729,8 +698,8 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
 
 
 static void advk_pcie_remove_msi_irq_domain(struct advk_pcie *pcie)
 static void advk_pcie_remove_msi_irq_domain(struct advk_pcie *pcie)
 {
 {
-	of_pci_msi_chip_remove(&pcie->msi);
 	irq_domain_remove(pcie->msi_domain);
 	irq_domain_remove(pcie->msi_domain);
+	irq_domain_remove(pcie->msi_inner_domain);
 }
 }
 
 
 static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
 static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
@@ -917,8 +886,6 @@ static int advk_pcie_probe(struct platform_device *pdev)
 	struct advk_pcie *pcie;
 	struct advk_pcie *pcie;
 	struct resource *res;
 	struct resource *res;
 	struct pci_bus *bus, *child;
 	struct pci_bus *bus, *child;
-	struct msi_controller *msi;
-	struct device_node *msi_node;
 	int ret, irq;
 	int ret, irq;
 
 
 	pcie = devm_kzalloc(dev, sizeof(struct advk_pcie), GFP_KERNEL);
 	pcie = devm_kzalloc(dev, sizeof(struct advk_pcie), GFP_KERNEL);
@@ -962,14 +929,8 @@ static int advk_pcie_probe(struct platform_device *pdev)
 		return ret;
 		return ret;
 	}
 	}
 
 
-	msi_node = of_parse_phandle(dev->of_node, "msi-parent", 0);
-	if (msi_node)
-		msi = of_pci_find_msi_chip_by_node(msi_node);
-	else
-		msi = NULL;
-
-	bus = pci_scan_root_bus_msi(dev, 0, &advk_pcie_ops,
-				    pcie, &pcie->resources, &pcie->msi);
+	bus = pci_scan_root_bus(dev, 0, &advk_pcie_ops,
+				pcie, &pcie->resources);
 	if (!bus) {
 	if (!bus) {
 		advk_pcie_remove_msi_irq_domain(pcie);
 		advk_pcie_remove_msi_irq_domain(pcie);
 		advk_pcie_remove_irq_domain(pcie);
 		advk_pcie_remove_irq_domain(pcie);

+ 563 - 0
drivers/pci/host/pci-ftpci100.c

@@ -0,0 +1,563 @@
+/*
+ * Support for Faraday Technology FTPC100 PCI Controller
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based on the out-of-tree OpenWRT patch for Cortina Gemini:
+ * Copyright (C) 2009 Janos Laube <janos.dev@gmail.com>
+ * Copyright (C) 2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
+ * Based on SL2312 PCI controller code
+ * Storlink (C) 2003
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/bitops.h>
+#include <linux/irq.h>
+
+/*
+ * Special configuration registers directly in the first few words
+ * in I/O space.
+ */
+#define PCI_IOSIZE	0x00
+#define PCI_PROT	0x04 /* AHB protection */
+#define PCI_CTRL	0x08 /* PCI control signal */
+#define PCI_SOFTRST	0x10 /* Soft reset counter and response error enable */
+#define PCI_CONFIG	0x28 /* PCI configuration command register */
+#define PCI_DATA	0x2C
+
+#define FARADAY_PCI_PMC			0x40 /* Power management control */
+#define FARADAY_PCI_PMCSR		0x44 /* Power management status */
+#define FARADAY_PCI_CTRL1		0x48 /* Control register 1 */
+#define FARADAY_PCI_CTRL2		0x4C /* Control register 2 */
+#define FARADAY_PCI_MEM1_BASE_SIZE	0x50 /* Memory base and size #1 */
+#define FARADAY_PCI_MEM2_BASE_SIZE	0x54 /* Memory base and size #2 */
+#define FARADAY_PCI_MEM3_BASE_SIZE	0x58 /* Memory base and size #3 */
+
+/* Bits 31..28 gives INTD..INTA status */
+#define PCI_CTRL2_INTSTS_SHIFT		28
+#define PCI_CTRL2_INTMASK_CMDERR	BIT(27)
+#define PCI_CTRL2_INTMASK_PARERR	BIT(26)
+/* Bits 25..22 masks INTD..INTA */
+#define PCI_CTRL2_INTMASK_SHIFT		22
+#define PCI_CTRL2_INTMASK_MABRT_RX	BIT(21)
+#define PCI_CTRL2_INTMASK_TABRT_RX	BIT(20)
+#define PCI_CTRL2_INTMASK_TABRT_TX	BIT(19)
+#define PCI_CTRL2_INTMASK_RETRY4	BIT(18)
+#define PCI_CTRL2_INTMASK_SERR_RX	BIT(17)
+#define PCI_CTRL2_INTMASK_PERR_RX	BIT(16)
+/* Bit 15 reserved */
+#define PCI_CTRL2_MSTPRI_REQ6		BIT(14)
+#define PCI_CTRL2_MSTPRI_REQ5		BIT(13)
+#define PCI_CTRL2_MSTPRI_REQ4		BIT(12)
+#define PCI_CTRL2_MSTPRI_REQ3		BIT(11)
+#define PCI_CTRL2_MSTPRI_REQ2		BIT(10)
+#define PCI_CTRL2_MSTPRI_REQ1		BIT(9)
+#define PCI_CTRL2_MSTPRI_REQ0		BIT(8)
+/* Bits 7..4 reserved */
+/* Bits 3..0 TRDYW */
+
+/*
+ * Memory configs:
+ * Bit 31..20 defines the PCI side memory base
+ * Bit 19..16 (4 bits) defines the size per below
+ */
+#define FARADAY_PCI_MEMBASE_MASK	0xfff00000
+#define FARADAY_PCI_MEMSIZE_1MB		0x0
+#define FARADAY_PCI_MEMSIZE_2MB		0x1
+#define FARADAY_PCI_MEMSIZE_4MB		0x2
+#define FARADAY_PCI_MEMSIZE_8MB		0x3
+#define FARADAY_PCI_MEMSIZE_16MB	0x4
+#define FARADAY_PCI_MEMSIZE_32MB	0x5
+#define FARADAY_PCI_MEMSIZE_64MB	0x6
+#define FARADAY_PCI_MEMSIZE_128MB	0x7
+#define FARADAY_PCI_MEMSIZE_256MB	0x8
+#define FARADAY_PCI_MEMSIZE_512MB	0x9
+#define FARADAY_PCI_MEMSIZE_1GB		0xa
+#define FARADAY_PCI_MEMSIZE_2GB		0xb
+#define FARADAY_PCI_MEMSIZE_SHIFT	16
+
+/*
+ * The DMA base is set to 0x0 for all memory segments, it reflects the
+ * fact that the memory of the host system starts at 0x0.
+ */
+#define FARADAY_PCI_DMA_MEM1_BASE	0x00000000
+#define FARADAY_PCI_DMA_MEM2_BASE	0x00000000
+#define FARADAY_PCI_DMA_MEM3_BASE	0x00000000
+
+/* Defines for PCI configuration command register */
+#define PCI_CONF_ENABLE		BIT(31)
+#define PCI_CONF_WHERE(r)	((r) & 0xFC)
+#define PCI_CONF_BUS(b)		(((b) & 0xFF) << 16)
+#define PCI_CONF_DEVICE(d)	(((d) & 0x1F) << 11)
+#define PCI_CONF_FUNCTION(f)	(((f) & 0x07) << 8)
+
+/**
+ * struct faraday_pci_variant - encodes IP block differences
+ * @cascaded_irq: this host has cascaded IRQs from an interrupt controller
+ *	embedded in the host bridge.
+ */
+struct faraday_pci_variant {
+	bool cascaded_irq;
+};
+
+struct faraday_pci {
+	struct device *dev;
+	void __iomem *base;
+	struct irq_domain *irqdomain;
+	struct pci_bus *bus;
+};
+
+static int faraday_res_to_memcfg(resource_size_t mem_base,
+				 resource_size_t mem_size, u32 *val)
+{
+	u32 outval;
+
+	switch (mem_size) {
+	case SZ_1M:
+		outval = FARADAY_PCI_MEMSIZE_1MB;
+		break;
+	case SZ_2M:
+		outval = FARADAY_PCI_MEMSIZE_2MB;
+		break;
+	case SZ_4M:
+		outval = FARADAY_PCI_MEMSIZE_4MB;
+		break;
+	case SZ_8M:
+		outval = FARADAY_PCI_MEMSIZE_8MB;
+		break;
+	case SZ_16M:
+		outval = FARADAY_PCI_MEMSIZE_16MB;
+		break;
+	case SZ_32M:
+		outval = FARADAY_PCI_MEMSIZE_32MB;
+		break;
+	case SZ_64M:
+		outval = FARADAY_PCI_MEMSIZE_64MB;
+		break;
+	case SZ_128M:
+		outval = FARADAY_PCI_MEMSIZE_128MB;
+		break;
+	case SZ_256M:
+		outval = FARADAY_PCI_MEMSIZE_256MB;
+		break;
+	case SZ_512M:
+		outval = FARADAY_PCI_MEMSIZE_512MB;
+		break;
+	case SZ_1G:
+		outval = FARADAY_PCI_MEMSIZE_1GB;
+		break;
+	case SZ_2G:
+		outval = FARADAY_PCI_MEMSIZE_2GB;
+		break;
+	default:
+		return -EINVAL;
+	}
+	outval <<= FARADAY_PCI_MEMSIZE_SHIFT;
+
+	/* This is probably not good */
+	if (mem_base & ~(FARADAY_PCI_MEMBASE_MASK))
+		pr_warn("truncated PCI memory base\n");
+	/* Translate to bridge side address space */
+	outval |= (mem_base & FARADAY_PCI_MEMBASE_MASK);
+	pr_debug("Translated pci base @%pap, size %pap to config %08x\n",
+		 &mem_base, &mem_size, outval);
+
+	*val = outval;
+	return 0;
+}
+
+static int faraday_pci_read_config(struct pci_bus *bus, unsigned int fn,
+				   int config, int size, u32 *value)
+{
+	struct faraday_pci *p = bus->sysdata;
+
+	writel(PCI_CONF_BUS(bus->number) |
+			PCI_CONF_DEVICE(PCI_SLOT(fn)) |
+			PCI_CONF_FUNCTION(PCI_FUNC(fn)) |
+			PCI_CONF_WHERE(config) |
+			PCI_CONF_ENABLE,
+			p->base + PCI_CONFIG);
+
+	*value = readl(p->base + PCI_DATA);
+
+	if (size == 1)
+		*value = (*value >> (8 * (config & 3))) & 0xFF;
+	else if (size == 2)
+		*value = (*value >> (8 * (config & 3))) & 0xFFFF;
+
+	dev_dbg(&bus->dev,
+		"[read]  slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n",
+		PCI_SLOT(fn), PCI_FUNC(fn), config, size, *value);
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int faraday_pci_write_config(struct pci_bus *bus, unsigned int fn,
+				    int config, int size, u32 value)
+{
+	struct faraday_pci *p = bus->sysdata;
+	int ret = PCIBIOS_SUCCESSFUL;
+
+	dev_dbg(&bus->dev,
+		"[write] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n",
+		PCI_SLOT(fn), PCI_FUNC(fn), config, size, value);
+
+	writel(PCI_CONF_BUS(bus->number) |
+			PCI_CONF_DEVICE(PCI_SLOT(fn)) |
+			PCI_CONF_FUNCTION(PCI_FUNC(fn)) |
+			PCI_CONF_WHERE(config) |
+			PCI_CONF_ENABLE,
+			p->base + PCI_CONFIG);
+
+	switch (size) {
+	case 4:
+		writel(value, p->base + PCI_DATA);
+		break;
+	case 2:
+		writew(value, p->base + PCI_DATA + (config & 3));
+		break;
+	case 1:
+		writeb(value, p->base + PCI_DATA + (config & 3));
+		break;
+	default:
+		ret = PCIBIOS_BAD_REGISTER_NUMBER;
+	}
+
+	return ret;
+}
+
+static struct pci_ops faraday_pci_ops = {
+	.read	= faraday_pci_read_config,
+	.write	= faraday_pci_write_config,
+};
+
+static void faraday_pci_ack_irq(struct irq_data *d)
+{
+	struct faraday_pci *p = irq_data_get_irq_chip_data(d);
+	unsigned int reg;
+
+	faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, &reg);
+	reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT);
+	reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTSTS_SHIFT);
+	faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, reg);
+}
+
+static void faraday_pci_mask_irq(struct irq_data *d)
+{
+	struct faraday_pci *p = irq_data_get_irq_chip_data(d);
+	unsigned int reg;
+
+	faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, &reg);
+	reg &= ~((0xF << PCI_CTRL2_INTSTS_SHIFT)
+		 | BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT));
+	faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, reg);
+}
+
+static void faraday_pci_unmask_irq(struct irq_data *d)
+{
+	struct faraday_pci *p = irq_data_get_irq_chip_data(d);
+	unsigned int reg;
+
+	faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, &reg);
+	reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT);
+	reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT);
+	faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, reg);
+}
+
+static void faraday_pci_irq_handler(struct irq_desc *desc)
+{
+	struct faraday_pci *p = irq_desc_get_handler_data(desc);
+	struct irq_chip *irqchip = irq_desc_get_chip(desc);
+	unsigned int irq_stat, reg, i;
+
+	faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, &reg);
+	irq_stat = reg >> PCI_CTRL2_INTSTS_SHIFT;
+
+	chained_irq_enter(irqchip, desc);
+
+	for (i = 0; i < 4; i++) {
+		if ((irq_stat & BIT(i)) == 0)
+			continue;
+		generic_handle_irq(irq_find_mapping(p->irqdomain, i));
+	}
+
+	chained_irq_exit(irqchip, desc);
+}
+
+static struct irq_chip faraday_pci_irq_chip = {
+	.name = "PCI",
+	.irq_ack = faraday_pci_ack_irq,
+	.irq_mask = faraday_pci_mask_irq,
+	.irq_unmask = faraday_pci_unmask_irq,
+};
+
+static int faraday_pci_irq_map(struct irq_domain *domain, unsigned int irq,
+			       irq_hw_number_t hwirq)
+{
+	irq_set_chip_and_handler(irq, &faraday_pci_irq_chip, handle_level_irq);
+	irq_set_chip_data(irq, domain->host_data);
+
+	return 0;
+}
+
+static const struct irq_domain_ops faraday_pci_irqdomain_ops = {
+	.map = faraday_pci_irq_map,
+};
+
+static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p)
+{
+	struct device_node *intc = of_get_next_child(p->dev->of_node, NULL);
+	int irq;
+	int i;
+
+	if (!intc) {
+		dev_err(p->dev, "missing child interrupt-controller node\n");
+		return -EINVAL;
+	}
+
+	/* All PCI IRQs cascade off this one */
+	irq = of_irq_get(intc, 0);
+	if (!irq) {
+		dev_err(p->dev, "failed to get parent IRQ\n");
+		return -EINVAL;
+	}
+
+	p->irqdomain = irq_domain_add_linear(intc, 4,
+					     &faraday_pci_irqdomain_ops, p);
+	if (!p->irqdomain) {
+		dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n");
+		return -EINVAL;
+	}
+
+	irq_set_chained_handler_and_data(irq, faraday_pci_irq_handler, p);
+
+	for (i = 0; i < 4; i++)
+		irq_create_mapping(p->irqdomain, i);
+
+	return 0;
+}
+
+static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
+				     struct device_node *node)
+{
+	const int na = 3, ns = 2;
+	int rlen;
+
+	parser->node = node;
+	parser->pna = of_n_addr_cells(node);
+	parser->np = parser->pna + na + ns;
+
+	parser->range = of_get_property(node, "dma-ranges", &rlen);
+	if (!parser->range)
+		return -ENOENT;
+	parser->end = parser->range + rlen / sizeof(__be32);
+
+	return 0;
+}
+
+static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p,
+					    struct device_node *np)
+{
+	struct of_pci_range range;
+	struct of_pci_range_parser parser;
+	struct device *dev = p->dev;
+	u32 confreg[3] = {
+		FARADAY_PCI_MEM1_BASE_SIZE,
+		FARADAY_PCI_MEM2_BASE_SIZE,
+		FARADAY_PCI_MEM3_BASE_SIZE,
+	};
+	int i = 0;
+	u32 val;
+
+	if (pci_dma_range_parser_init(&parser, np)) {
+		dev_err(dev, "missing dma-ranges property\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Get the dma-ranges from the device tree
+	 */
+	for_each_of_pci_range(&parser, &range) {
+		u64 end = range.pci_addr + range.size - 1;
+		int ret;
+
+		ret = faraday_res_to_memcfg(range.pci_addr, range.size, &val);
+		if (ret) {
+			dev_err(dev,
+				"DMA range %d: illegal MEM resource size\n", i);
+			return -EINVAL;
+		}
+
+		dev_info(dev, "DMA MEM%d BASE: 0x%016llx -> 0x%016llx config %08x\n",
+			 i + 1, range.pci_addr, end, val);
+		if (i <= 2) {
+			faraday_pci_write_config(p->bus, 0, confreg[i],
+						 4, val);
+		} else {
+			dev_err(dev, "ignore extraneous dma-range %d\n", i);
+			break;
+		}
+
+		i++;
+	}
+
+	return 0;
+}
+
+static int faraday_pci_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	const struct faraday_pci_variant *variant =
+		of_device_get_match_data(dev);
+	struct resource *regs;
+	resource_size_t io_base;
+	struct resource_entry *win;
+	struct faraday_pci *p;
+	struct resource *mem;
+	struct resource *io;
+	struct pci_host_bridge *host;
+	int ret;
+	u32 val;
+	LIST_HEAD(res);
+
+	host = pci_alloc_host_bridge(sizeof(*p));
+	if (!host)
+		return -ENOMEM;
+
+	host->dev.parent = dev;
+	host->ops = &faraday_pci_ops;
+	host->busnr = 0;
+	host->msi = NULL;
+	p = pci_host_bridge_priv(host);
+	host->sysdata = p;
+	p->dev = dev;
+
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	p->base = devm_ioremap_resource(dev, regs);
+	if (IS_ERR(p->base))
+		return PTR_ERR(p->base);
+
+	ret = of_pci_get_host_bridge_resources(dev->of_node, 0, 0xff,
+					       &res, &io_base);
+	if (ret)
+		return ret;
+
+	ret = devm_request_pci_bus_resources(dev, &res);
+	if (ret)
+		return ret;
+
+	/* Get the I/O and memory ranges from DT */
+	resource_list_for_each_entry(win, &res) {
+		switch (resource_type(win->res)) {
+		case IORESOURCE_IO:
+			io = win->res;
+			io->name = "Gemini PCI I/O";
+			if (!faraday_res_to_memcfg(io->start - win->offset,
+						   resource_size(io), &val)) {
+				/* setup I/O space size */
+				writel(val, p->base + PCI_IOSIZE);
+			} else {
+				dev_err(dev, "illegal IO mem size\n");
+				return -EINVAL;
+			}
+			ret = pci_remap_iospace(io, io_base);
+			if (ret) {
+				dev_warn(dev, "error %d: failed to map resource %pR\n",
+					 ret, io);
+				continue;
+			}
+			break;
+		case IORESOURCE_MEM:
+			mem = win->res;
+			mem->name = "Gemini PCI MEM";
+			break;
+		case IORESOURCE_BUS:
+			break;
+		default:
+			break;
+		}
+	}
+
+	/* Setup hostbridge */
+	val = readl(p->base + PCI_CTRL);
+	val |= PCI_COMMAND_IO;
+	val |= PCI_COMMAND_MEMORY;
+	val |= PCI_COMMAND_MASTER;
+	writel(val, p->base + PCI_CTRL);
+
+	list_splice_init(&res, &host->windows);
+	ret = pci_register_host_bridge(host);
+	if (ret) {
+		dev_err(dev, "failed to register host: %d\n", ret);
+		return ret;
+	}
+	p->bus = host->bus;
+
+	/* Mask and clear all interrupts */
+	faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2 + 2, 2, 0xF000);
+	if (variant->cascaded_irq) {
+		ret = faraday_pci_setup_cascaded_irq(p);
+		if (ret) {
+			dev_err(dev, "failed to setup cascaded IRQ\n");
+			return ret;
+		}
+	}
+
+	ret = faraday_pci_parse_map_dma_ranges(p, dev->of_node);
+	if (ret)
+		return ret;
+
+	pci_scan_child_bus(p->bus);
+	pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
+	pci_bus_assign_resources(p->bus);
+	pci_bus_add_devices(p->bus);
+	pci_free_resource_list(&res);
+
+	return 0;
+}
+
+/*
+ * We encode bridge variants here, we have at least two so it doesn't
+ * hurt to have infrastructure to encompass future variants as well.
+ */
+const struct faraday_pci_variant faraday_regular = {
+	.cascaded_irq = true,
+};
+
+const struct faraday_pci_variant faraday_dual = {
+	.cascaded_irq = false,
+};
+
+static const struct of_device_id faraday_pci_of_match[] = {
+	{
+		.compatible = "faraday,ftpci100",
+		.data = &faraday_regular,
+	},
+	{
+		.compatible = "faraday,ftpci100-dual",
+		.data = &faraday_dual,
+	},
+	{},
+};
+
+static struct platform_driver faraday_pci_driver = {
+	.driver = {
+		.name = "ftpci100",
+		.of_match_table = of_match_ptr(faraday_pci_of_match),
+		.suppress_bind_attrs = true,
+	},
+	.probe  = faraday_pci_probe,
+};
+builtin_platform_driver(faraday_pci_driver);

+ 1 - 0
drivers/pci/host/pci-host-generic.c

@@ -60,6 +60,7 @@ static struct platform_driver gen_pci_driver = {
 	.driver = {
 	.driver = {
 		.name = "pci-host-generic",
 		.name = "pci-host-generic",
 		.of_match_table = gen_pci_of_match,
 		.of_match_table = gen_pci_of_match,
+		.suppress_bind_attrs = true,
 	},
 	},
 	.probe = gen_pci_probe,
 	.probe = gen_pci_probe,
 };
 };

+ 35 - 11
drivers/pci/host/pci-hyperv.c

@@ -56,6 +56,7 @@
 #include <asm/apic.h>
 #include <asm/apic.h>
 #include <linux/msi.h>
 #include <linux/msi.h>
 #include <linux/hyperv.h>
 #include <linux/hyperv.h>
+#include <linux/refcount.h>
 #include <asm/mshyperv.h>
 #include <asm/mshyperv.h>
 
 
 /*
 /*
@@ -72,6 +73,7 @@ enum {
 	PCI_PROTOCOL_VERSION_CURRENT = PCI_PROTOCOL_VERSION_1_1
 	PCI_PROTOCOL_VERSION_CURRENT = PCI_PROTOCOL_VERSION_1_1
 };
 };
 
 
+#define CPU_AFFINITY_ALL	-1ULL
 #define PCI_CONFIG_MMIO_LENGTH	0x2000
 #define PCI_CONFIG_MMIO_LENGTH	0x2000
 #define CFG_PAGE_OFFSET 0x1000
 #define CFG_PAGE_OFFSET 0x1000
 #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
 #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
@@ -350,6 +352,7 @@ enum hv_pcibus_state {
 	hv_pcibus_init = 0,
 	hv_pcibus_init = 0,
 	hv_pcibus_probed,
 	hv_pcibus_probed,
 	hv_pcibus_installed,
 	hv_pcibus_installed,
+	hv_pcibus_removed,
 	hv_pcibus_maximum
 	hv_pcibus_maximum
 };
 };
 
 
@@ -421,7 +424,7 @@ enum hv_pcidev_ref_reason {
 struct hv_pci_dev {
 struct hv_pci_dev {
 	/* List protected by pci_rescan_remove_lock */
 	/* List protected by pci_rescan_remove_lock */
 	struct list_head list_entry;
 	struct list_head list_entry;
-	atomic_t refs;
+	refcount_t refs;
 	enum hv_pcichild_state state;
 	enum hv_pcichild_state state;
 	struct pci_function_description desc;
 	struct pci_function_description desc;
 	bool reported_missing;
 	bool reported_missing;
@@ -876,7 +879,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
 		hv_int_desc_free(hpdev, int_desc);
 		hv_int_desc_free(hpdev, int_desc);
 	}
 	}
 
 
-	int_desc = kzalloc(sizeof(*int_desc), GFP_KERNEL);
+	int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC);
 	if (!int_desc)
 	if (!int_desc)
 		goto drop_reference;
 		goto drop_reference;
 
 
@@ -897,9 +900,13 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
 	 * processors because Hyper-V only supports 64 in a guest.
 	 * processors because Hyper-V only supports 64 in a guest.
 	 */
 	 */
 	affinity = irq_data_get_affinity_mask(data);
 	affinity = irq_data_get_affinity_mask(data);
-	for_each_cpu_and(cpu, affinity, cpu_online_mask) {
-		int_pkt->int_desc.cpu_mask |=
-			(1ULL << vmbus_cpu_number_to_vp_number(cpu));
+	if (cpumask_weight(affinity) >= 32) {
+		int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL;
+	} else {
+		for_each_cpu_and(cpu, affinity, cpu_online_mask) {
+			int_pkt->int_desc.cpu_mask |=
+				(1ULL << vmbus_cpu_number_to_vp_number(cpu));
+		}
 	}
 	}
 
 
 	ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt,
 	ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt,
@@ -1208,9 +1215,11 @@ static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
 	hbus->pci_bus->msi = &hbus->msi_chip;
 	hbus->pci_bus->msi = &hbus->msi_chip;
 	hbus->pci_bus->msi->dev = &hbus->hdev->device;
 	hbus->pci_bus->msi->dev = &hbus->hdev->device;
 
 
+	pci_lock_rescan_remove();
 	pci_scan_child_bus(hbus->pci_bus);
 	pci_scan_child_bus(hbus->pci_bus);
 	pci_bus_assign_resources(hbus->pci_bus);
 	pci_bus_assign_resources(hbus->pci_bus);
 	pci_bus_add_devices(hbus->pci_bus);
 	pci_bus_add_devices(hbus->pci_bus);
+	pci_unlock_rescan_remove();
 	hbus->state = hv_pcibus_installed;
 	hbus->state = hv_pcibus_installed;
 	return 0;
 	return 0;
 }
 }
@@ -1254,13 +1263,13 @@ static void q_resource_requirements(void *context, struct pci_response *resp,
 static void get_pcichild(struct hv_pci_dev *hpdev,
 static void get_pcichild(struct hv_pci_dev *hpdev,
 			    enum hv_pcidev_ref_reason reason)
 			    enum hv_pcidev_ref_reason reason)
 {
 {
-	atomic_inc(&hpdev->refs);
+	refcount_inc(&hpdev->refs);
 }
 }
 
 
 static void put_pcichild(struct hv_pci_dev *hpdev,
 static void put_pcichild(struct hv_pci_dev *hpdev,
 			    enum hv_pcidev_ref_reason reason)
 			    enum hv_pcidev_ref_reason reason)
 {
 {
-	if (atomic_dec_and_test(&hpdev->refs))
+	if (refcount_dec_and_test(&hpdev->refs))
 		kfree(hpdev);
 		kfree(hpdev);
 }
 }
 
 
@@ -1314,7 +1323,7 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
 	wait_for_completion(&comp_pkt.host_event);
 	wait_for_completion(&comp_pkt.host_event);
 
 
 	hpdev->desc = *desc;
 	hpdev->desc = *desc;
-	get_pcichild(hpdev, hv_pcidev_ref_initial);
+	refcount_set(&hpdev->refs, 1);
 	get_pcichild(hpdev, hv_pcidev_ref_childlist);
 	get_pcichild(hpdev, hv_pcidev_ref_childlist);
 	spin_lock_irqsave(&hbus->device_list_lock, flags);
 	spin_lock_irqsave(&hbus->device_list_lock, flags);
 
 
@@ -1504,13 +1513,24 @@ static void pci_devices_present_work(struct work_struct *work)
 		put_pcichild(hpdev, hv_pcidev_ref_initial);
 		put_pcichild(hpdev, hv_pcidev_ref_initial);
 	}
 	}
 
 
-	/* Tell the core to rescan bus because there may have been changes. */
-	if (hbus->state == hv_pcibus_installed) {
+	switch(hbus->state) {
+	case hv_pcibus_installed:
+		/*
+		* Tell the core to rescan bus
+		* because there may have been changes.
+		*/
 		pci_lock_rescan_remove();
 		pci_lock_rescan_remove();
 		pci_scan_child_bus(hbus->pci_bus);
 		pci_scan_child_bus(hbus->pci_bus);
 		pci_unlock_rescan_remove();
 		pci_unlock_rescan_remove();
-	} else {
+		break;
+
+	case hv_pcibus_init:
+	case hv_pcibus_probed:
 		survey_child_resources(hbus);
 		survey_child_resources(hbus);
+		break;
+
+	default:
+		break;
 	}
 	}
 
 
 	up(&hbus->enum_sem);
 	up(&hbus->enum_sem);
@@ -1600,8 +1620,10 @@ static void hv_eject_device_work(struct work_struct *work)
 	pdev = pci_get_domain_bus_and_slot(hpdev->hbus->sysdata.domain, 0,
 	pdev = pci_get_domain_bus_and_slot(hpdev->hbus->sysdata.domain, 0,
 					   wslot);
 					   wslot);
 	if (pdev) {
 	if (pdev) {
+		pci_lock_rescan_remove();
 		pci_stop_and_remove_bus_device(pdev);
 		pci_stop_and_remove_bus_device(pdev);
 		pci_dev_put(pdev);
 		pci_dev_put(pdev);
+		pci_unlock_rescan_remove();
 	}
 	}
 
 
 	spin_lock_irqsave(&hpdev->hbus->device_list_lock, flags);
 	spin_lock_irqsave(&hpdev->hbus->device_list_lock, flags);
@@ -2185,6 +2207,7 @@ static int hv_pci_probe(struct hv_device *hdev,
 	hbus = kzalloc(sizeof(*hbus), GFP_KERNEL);
 	hbus = kzalloc(sizeof(*hbus), GFP_KERNEL);
 	if (!hbus)
 	if (!hbus)
 		return -ENOMEM;
 		return -ENOMEM;
+	hbus->state = hv_pcibus_init;
 
 
 	/*
 	/*
 	 * The PCI bus "domain" is what is called "segment" in ACPI and
 	 * The PCI bus "domain" is what is called "segment" in ACPI and
@@ -2348,6 +2371,7 @@ static int hv_pci_remove(struct hv_device *hdev)
 		pci_stop_root_bus(hbus->pci_bus);
 		pci_stop_root_bus(hbus->pci_bus);
 		pci_remove_root_bus(hbus->pci_bus);
 		pci_remove_root_bus(hbus->pci_bus);
 		pci_unlock_rescan_remove();
 		pci_unlock_rescan_remove();
+		hbus->state = hv_pcibus_removed;
 	}
 	}
 
 
 	hv_pci_bus_exit(hdev);
 	hv_pci_bus_exit(hdev);

Some files were not shown because too many files changed in this diff