diff options
69 files changed, 1282 insertions, 701 deletions
diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt index 0f7afb2bb442..aef8cc5a677b 100644 --- a/Documentation/DMA-API-HOWTO.txt +++ b/Documentation/DMA-API-HOWTO.txt @@ -25,13 +25,18 @@ physical addresses. These are the addresses in /proc/iomem. The physical address is not directly useful to a driver; it must use ioremap() to map the space and produce a virtual address. -I/O devices use a third kind of address: a "bus address" or "DMA address". -If a device has registers at an MMIO address, or if it performs DMA to read -or write system memory, the addresses used by the device are bus addresses. -In some systems, bus addresses are identical to CPU physical addresses, but -in general they are not. IOMMUs and host bridges can produce arbitrary +I/O devices use a third kind of address: a "bus address". If a device has +registers at an MMIO address, or if it performs DMA to read or write system +memory, the addresses used by the device are bus addresses. In some +systems, bus addresses are identical to CPU physical addresses, but in +general they are not. IOMMUs and host bridges can produce arbitrary mappings between physical and bus addresses. +From a device's point of view, DMA uses the bus address space, but it may +be restricted to a subset of that space. For example, even if a system +supports 64-bit addresses for main memory and PCI BARs, it may use an IOMMU +so devices only need to use 32-bit DMA addresses. + Here's a picture and some examples: CPU CPU Bus @@ -72,11 +77,11 @@ can use virtual address X to access the buffer, but the device itself cannot because DMA doesn't go through the CPU virtual memory system. In some simple systems, the device can do DMA directly to physical address -Y. But in many others, there is IOMMU hardware that translates bus +Y. But in many others, there is IOMMU hardware that translates DMA addresses to physical addresses, e.g., it translates Z to Y. This is part of the reason for the DMA API: the driver can give a virtual address X to an interface like dma_map_single(), which sets up any required IOMMU -mapping and returns the bus address Z. The driver then tells the device to +mapping and returns the DMA address Z. The driver then tells the device to do DMA to Z, and the IOMMU maps it to the buffer at address Y in system RAM. @@ -98,7 +103,7 @@ First of all, you should make sure #include <linux/dma-mapping.h> is in your driver, which provides the definition of dma_addr_t. This type -can hold any valid DMA or bus address for the platform and should be used +can hold any valid DMA address for the platform and should be used everywhere you hold a DMA address returned from the DMA mapping functions. What memory is DMA'able? @@ -316,7 +321,7 @@ There are two types of DMA mappings: Think of "consistent" as "synchronous" or "coherent". The current default is to return consistent memory in the low 32 - bits of the bus space. However, for future compatibility you should + bits of the DMA space. However, for future compatibility you should set the consistent mask even if this default is fine for your driver. @@ -403,7 +408,7 @@ dma_alloc_coherent() returns two values: the virtual address which you can use to access it from the CPU and dma_handle which you pass to the card. -The CPU virtual address and the DMA bus address are both +The CPU virtual address and the DMA address are both guaranteed to be aligned to the smallest PAGE_SIZE order which is greater than or equal to the requested size. This invariant exists (for example) to guarantee that if you allocate a chunk @@ -645,8 +650,8 @@ PLEASE NOTE: The 'nents' argument to the dma_unmap_sg call must be dma_map_sg call. Every dma_map_{single,sg}() call should have its dma_unmap_{single,sg}() -counterpart, because the bus address space is a shared resource and -you could render the machine unusable by consuming all bus addresses. +counterpart, because the DMA address space is a shared resource and +you could render the machine unusable by consuming all DMA addresses. If you need to use the same streaming DMA region multiple times and touch the data in between the DMA transfers, the buffer needs to be synced diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt index 52088408668a..7eba542eff7c 100644 --- a/Documentation/DMA-API.txt +++ b/Documentation/DMA-API.txt @@ -18,10 +18,10 @@ Part I - dma_ API To get the dma_ API, you must #include <linux/dma-mapping.h>. This provides dma_addr_t and the interfaces described below. -A dma_addr_t can hold any valid DMA or bus address for the platform. It -can be given to a device to use as a DMA source or target. A CPU cannot -reference a dma_addr_t directly because there may be translation between -its physical address space and the bus address space. +A dma_addr_t can hold any valid DMA address for the platform. It can be +given to a device to use as a DMA source or target. A CPU cannot reference +a dma_addr_t directly because there may be translation between its physical +address space and the DMA address space. Part Ia - Using large DMA-coherent buffers ------------------------------------------ @@ -42,7 +42,7 @@ It returns a pointer to the allocated region (in the processor's virtual address space) or NULL if the allocation failed. It also returns a <dma_handle> which may be cast to an unsigned integer the -same width as the bus and given to the device as the bus address base of +same width as the bus and given to the device as the DMA address base of the region. Note: consistent memory can be expensive on some platforms, and the @@ -193,7 +193,7 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction direction) Maps a piece of processor virtual memory so it can be accessed by the -device and returns the bus address of the memory. +device and returns the DMA address of the memory. The direction for both APIs may be converted freely by casting. However the dma_ API uses a strongly typed enumerator for its @@ -212,20 +212,20 @@ contiguous piece of memory. For this reason, memory to be mapped by this API should be obtained from sources which guarantee it to be physically contiguous (like kmalloc). -Further, the bus address of the memory must be within the +Further, the DMA address of the memory must be within the dma_mask of the device (the dma_mask is a bit mask of the -addressable region for the device, i.e., if the bus address of -the memory ANDed with the dma_mask is still equal to the bus +addressable region for the device, i.e., if the DMA address of +the memory ANDed with the dma_mask is still equal to the DMA address, then the device can perform DMA to the memory). To ensure that the memory allocated by kmalloc is within the dma_mask, the driver may specify various platform-dependent flags to restrict -the bus address range of the allocation (e.g., on x86, GFP_DMA -guarantees to be within the first 16MB of available bus addresses, +the DMA address range of the allocation (e.g., on x86, GFP_DMA +guarantees to be within the first 16MB of available DMA addresses, as required by ISA devices). Note also that the above constraints on physical contiguity and dma_mask may not apply if the platform has an IOMMU (a device which -maps an I/O bus address to a physical memory address). However, to be +maps an I/O DMA address to a physical memory address). However, to be portable, device driver writers may *not* assume that such an IOMMU exists. @@ -296,7 +296,7 @@ reduce current DMA mapping usage or delay and try again later). dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) -Returns: the number of bus address segments mapped (this may be shorter +Returns: the number of DMA address segments mapped (this may be shorter than <nents> passed in if some elements of the scatter/gather list are physically or virtually adjacent and an IOMMU maps them with a single entry). @@ -340,7 +340,7 @@ must be the same as those and passed in to the scatter/gather mapping API. Note: <nents> must be the number you passed in, *not* the number of -bus address entries returned. +DMA address entries returned. void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, @@ -507,7 +507,7 @@ it's asked for coherent memory for this device. phys_addr is the CPU physical address to which the memory is currently assigned (this will be ioremapped so the CPU can access the region). -device_addr is the bus address the device needs to be programmed +device_addr is the DMA address the device needs to be programmed with to actually address this memory (this will be handed out as the dma_addr_t in dma_alloc_coherent()). diff --git a/Documentation/devicetree/bindings/pci/xgene-pci-msi.txt b/Documentation/devicetree/bindings/pci/xgene-pci-msi.txt new file mode 100644 index 000000000000..36d881c8e6d4 --- /dev/null +++ b/Documentation/devicetree/bindings/pci/xgene-pci-msi.txt @@ -0,0 +1,68 @@ +* AppliedMicro X-Gene v1 PCIe MSI controller + +Required properties: + +- compatible: should be "apm,xgene1-msi" to identify + X-Gene v1 PCIe MSI controller block. +- msi-controller: indicates that this is X-Gene v1 PCIe MSI controller node +- reg: physical base address (0x79000000) and length (0x900000) for controller + registers. These registers include the MSI termination address and data + registers as well as the MSI interrupt status registers. +- reg-names: not required +- interrupts: A list of 16 interrupt outputs of the controller, starting from + interrupt number 0x10 to 0x1f. +- interrupt-names: not required + +Each PCIe node needs to have property msi-parent that points to msi controller node + +Examples: + +SoC DTSI: + + + MSI node: + msi@79000000 { + compatible = "apm,xgene1-msi"; + msi-controller; + reg = <0x00 0x79000000 0x0 0x900000>; + interrupts = <0x0 0x10 0x4> + <0x0 0x11 0x4> + <0x0 0x12 0x4> + <0x0 0x13 0x4> + <0x0 0x14 0x4> + <0x0 0x15 0x4> + <0x0 0x16 0x4> + <0x0 0x17 0x4> + <0x0 0x18 0x4> + <0x0 0x19 0x4> + <0x0 0x1a 0x4> + <0x0 0x1b 0x4> + <0x0 0x1c 0x4> + <0x0 0x1d 0x4> + <0x0 0x1e 0x4> + <0x0 0x1f 0x4>; + }; + + + PCIe controller node with msi-parent property pointing to MSI node: + pcie0: pcie@1f2b0000 { + status = "disabled"; + device_type = "pci"; + compatible = "apm,xgene-storm-pcie", "apm,xgene-pcie"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = < 0x00 0x1f2b0000 0x0 0x00010000 /* Controller registers */ + 0xe0 0xd0000000 0x0 0x00040000>; /* PCI config space */ + reg-names = "csr", "cfg"; + ranges = <0x01000000 0x00 0x00000000 0xe0 0x10000000 0x00 0x00010000 /* io */ + 0x02000000 0x00 0x80000000 0xe1 0x80000000 0x00 0x80000000>; /* mem */ + dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000 + 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>; + interrupt-map-mask = <0x0 0x0 0x0 0x7>; + interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xc2 0x1 + 0x0 0x0 0x0 0x2 &gic 0x0 0xc3 0x1 + 0x0 0x0 0x0 0x3 &gic 0x0 0xc4 0x1 + 0x0 0x0 0x0 0x4 &gic 0x0 0xc5 0x1>; + dma-coherent; + clocks = <&pcie0clk 0>; + msi-parent= <&msi>; + }; diff --git a/MAINTAINERS b/MAINTAINERS index 781e099495d3..59aa7b321c40 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7564,6 +7564,14 @@ L: linux-pci@vger.kernel.org S: Orphan F: drivers/pci/host/*spear* +PCI MSI DRIVER FOR APPLIEDMICRO XGENE +M: Duc Dang <dhdang@apm.com> +L: linux-pci@vger.kernel.org +L: linux-arm-kernel@lists.infradead.org +S: Maintained +F: Documentation/devicetree/bindings/pci/xgene-pci-msi.txt +F: drivers/pci/host/pci-xgene-msi.c + PCMCIA SUBSYSTEM P: Linux PCMCIA Team L: linux-pcmcia@lists.infradead.org diff --git a/arch/alpha/include/asm/pci.h b/arch/alpha/include/asm/pci.h index f7f680f7457d..8b02afeb6319 100644 --- a/arch/alpha/include/asm/pci.h +++ b/arch/alpha/include/asm/pci.h @@ -71,22 +71,6 @@ extern void pcibios_set_master(struct pci_dev *dev); /* implement the pci_ DMA API in terms of the generic device dma_ one */ #include <asm-generic/pci-dma-compat.h> -static inline void pci_dma_burst_advice(struct pci_dev *pdev, - enum pci_dma_burst_strategy *strat, - unsigned long *strategy_parameter) -{ - unsigned long cacheline_size; - u8 byte; - - pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte); - if (byte == 0) - cacheline_size = 1024; - else - cacheline_size = (int) byte * 4; - - *strat = PCI_DMA_BURST_BOUNDARY; - *strategy_parameter = cacheline_size; -} #endif /* TODO: integrate with include/asm-generic/pci.h ? */ diff --git a/arch/alpha/kernel/core_irongate.c b/arch/alpha/kernel/core_irongate.c index 00096df0f6ad..83d0a359a1b2 100644 --- a/arch/alpha/kernel/core_irongate.c +++ b/arch/alpha/kernel/core_irongate.c @@ -22,7 +22,6 @@ #include <linux/bootmem.h> #include <asm/ptrace.h> -#include <asm/pci.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> diff --git a/arch/alpha/kernel/sys_eiger.c b/arch/alpha/kernel/sys_eiger.c index 79d69d7f63f8..15f42083bdb3 100644 --- a/arch/alpha/kernel/sys_eiger.c +++ b/arch/alpha/kernel/sys_eiger.c @@ -22,7 +22,6 @@ #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/io.h> -#include <asm/pci.h> #include <asm/pgtable.h> #include <asm/core_tsunami.h> #include <asm/hwrpb.h> diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c index 700686d04869..2cfaa0e5c577 100644 --- a/arch/alpha/kernel/sys_nautilus.c +++ b/arch/alpha/kernel/sys_nautilus.c @@ -39,7 +39,6 @@ #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/io.h> -#include <asm/pci.h> #include <asm/pgtable.h> #include <asm/core_irongate.h> #include <asm/hwrpb.h> diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h index 585dc33a7a24..a5635444ca41 100644 --- a/arch/arm/include/asm/pci.h +++ b/arch/arm/include/asm/pci.h @@ -31,16 +31,6 @@ static inline int pci_proc_domain(struct pci_bus *bus) */ #define PCI_DMA_BUS_IS_PHYS (1) -#ifdef CONFIG_PCI -static inline void pci_dma_burst_advice(struct pci_dev *pdev, - enum pci_dma_burst_strategy *strat, - unsigned long *strategy_parameter) -{ - *strat = PCI_DMA_BURST_INFINITY; - *strategy_parameter = ~0UL; -} -#endif - #define HAVE_PCI_MMAP extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine); diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi index c8d3e0e86678..d8f3a1c65ecd 100644 --- a/arch/arm64/boot/dts/apm/apm-storm.dtsi +++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi @@ -374,6 +374,28 @@ }; }; + msi: msi@79000000 { + compatible = "apm,xgene1-msi"; + msi-controller; + reg = <0x00 0x79000000 0x0 0x900000>; + interrupts = < 0x0 0x10 0x4 + 0x0 0x11 0x4 + 0x0 0x12 0x4 + 0x0 0x13 0x4 + 0x0 0x14 0x4 + 0x0 0x15 0x4 + 0x0 0x16 0x4 + 0x0 0x17 0x4 + 0x0 0x18 0x4 + 0x0 0x19 0x4 + 0x0 0x1a 0x4 + 0x0 0x1b 0x4 + 0x0 0x1c 0x4 + 0x0 0x1d 0x4 + 0x0 0x1e 0x4 + 0x0 0x1f 0x4>; + }; + pcie0: pcie@1f2b0000 { status = "disabled"; device_type = "pci"; @@ -395,6 +417,7 @@ 0x0 0x0 0x0 0x4 &gic 0x0 0xc5 0x1>; dma-coherent; clocks = <&pcie0clk 0>; + msi-parent = <&msi>; }; pcie1: pcie@1f2c0000 { @@ -418,6 +441,7 @@ 0x0 0x0 0x0 0x4 &gic 0x0 0xcb 0x1>; dma-coherent; clocks = <&pcie1clk 0>; + msi-parent = <&msi>; }; pcie2: pcie@1f2d0000 { @@ -441,6 +465,7 @@ 0x0 0x0 0x0 0x4 &gic 0x0 0xd1 0x1>; dma-coherent; clocks = <&pcie2clk 0>; + msi-parent = <&msi>; }; pcie3: pcie@1f500000 { @@ -464,6 +489,7 @@ 0x0 0x0 0x0 0x4 &gic 0x0 0xd7 0x1>; dma-coherent; clocks = <&pcie3clk 0>; + msi-parent = <&msi>; }; pcie4: pcie@1f510000 { @@ -487,6 +513,7 @@ 0x0 0x0 0x0 0x4 &gic 0x0 0xdd 0x1>; dma-coherent; clocks = <&pcie4clk 0>; + msi-parent = <&msi>; }; serial0: serial@1c020000 { diff --git a/arch/frv/include/asm/pci.h b/arch/frv/include/asm/pci.h index 2035a4d3f9b9..a6d4ed042c70 100644 --- a/arch/frv/include/asm/pci.h +++ b/arch/frv/include/asm/pci.h @@ -41,16 +41,6 @@ extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, /* Return the index of the PCI controller for device PDEV. */ #define pci_controller_num(PDEV) (0) -#ifdef CONFIG_PCI -static inline void pci_dma_burst_advice(struct pci_dev *pdev, - enum pci_dma_burst_strategy *strat, - unsigned long *strategy_parameter) -{ - *strat = PCI_DMA_BURST_INFINITY; - *strategy_parameter = ~0UL; -} -#endif - /* * These are pretty much arbitrary with the CoMEM implementation. * We have the whole address space to ourselves. diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h index 52af5ed9f60b..b897fae1f0ca 100644 --- a/arch/ia64/include/asm/pci.h +++ b/arch/ia64/include/asm/pci.h @@ -52,25 +52,6 @@ extern unsigned long ia64_max_iommu_merge_mask; #include <asm-generic/pci-dma-compat.h> -#ifdef CONFIG_PCI -static inline void pci_dma_burst_advice(struct pci_dev *pdev, - enum pci_dma_burst_strategy *strat, - unsigned long *strategy_parameter) -{ - unsigned long cacheline_size; - u8 byte; - - pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte); - if (byte == 0) - cacheline_size = 1024; - else - cacheline_size = (int) byte * 4; - - *strat = PCI_DMA_BURST_MULTIPLE; - *strategy_parameter = cacheline_size; -} -#endif - #define HAVE_PCI_MMAP extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine); @@ -108,19 +89,6 @@ static inline int pci_proc_domain(struct pci_bus *bus) return (pci_domain_nr(bus) != 0); } -static inline struct resource * -pcibios_select_root(struct pci_dev *pdev, struct resource *res) -{ - struct resource *root = NULL; - - if (res->flags & IORESOURCE_IO) - root = &ioport_resource; - if (res->flags & IORESOURCE_MEM) - root = &iomem_resource; - - return root; -} - #define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) { diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h index 468aca8cec0d..fdf2e75d7033 100644 --- a/arch/microblaze/include/asm/pci.h +++ b/arch/microblaze/include/asm/pci.h @@ -44,16 +44,6 @@ struct pci_dev; */ #define pcibios_assign_all_busses() 0 -#ifdef CONFIG_PCI -static inline void pci_dma_burst_advice(struct pci_dev *pdev, - enum pci_dma_burst_strategy *strat, - unsigned long *strategy_parameter) -{ - *strat = PCI_DMA_BURST_INFINITY; - *strategy_parameter = ~0UL; -} -#endif - extern int pci_domain_nr(struct pci_bus *bus); /* Decide whether to display the domain number in /proc */ @@ -83,19 +73,6 @@ extern int pci_mmap_legacy_page_range(struct pci_bus *bus, */ #define PCI_DMA_BUS_IS_PHYS (1) -static inline struct resource *pcibios_select_root(struct pci_dev *pdev, - struct resource *res) -{ - struct resource *root = NULL; - - if (res->flags & IORESOURCE_IO) - root = &ioport_resource; - if (res->flags & IORESOURCE_MEM) - root = &iomem_resource; - - return root; -} - extern void pcibios_claim_one_bus(struct pci_bus *b); extern void pcibios_finish_adding_to_bus(struct pci_bus *bus); diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h index d9692993fc83..70dcc5498128 100644 --- a/arch/mips/include/asm/pci.h +++ b/arch/mips/include/asm/pci.h @@ -113,16 +113,6 @@ struct pci_dev; */ extern unsigned int PCI_DMA_BUS_IS_PHYS; -#ifdef CONFIG_PCI -static inline void pci_dma_burst_advice(struct pci_dev *pdev, - enum pci_dma_burst_strategy *strat, - unsigned long *strategy_parameter) -{ - *strat = PCI_DMA_BURST_INFINITY; - *strategy_parameter = ~0UL; -} -#endif - #ifdef CONFIG_PCI_DOMAINS #define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index diff --git a/arch/mips/pci/fixup-cobalt.c b/arch/mips/pci/fixup-cobalt.c index a138e8ee5cfc..b3ab59318d91 100644 --- a/arch/mips/pci/fixup-cobalt.c +++ b/arch/mips/pci/fixup-cobalt.c @@ -13,7 +13,6 @@ #include <linux/kernel.h> #include <linux/init.h> -#include <asm/pci.h> #include <asm/io.h> #include <asm/gt64120.h> diff --git a/arch/mips/pci/ops-mace.c b/arch/mips/pci/ops-mace.c index 6b5821febc38..951d8070fb48 100644 --- a/arch/mips/pci/ops-mace.c +++ b/arch/mips/pci/ops-mace.c @@ -8,7 +8,6 @@ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/types.h> -#include <asm/pci.h> #include <asm/ip32/mace.h> #if 0 diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c index 8b117e638306..c5347d99cf3a 100644 --- a/arch/mips/pci/pci-lantiq.c +++ b/arch/mips/pci/pci-lantiq.c @@ -20,7 +20,6 @@ #include <linux/of_irq.h> #include <linux/of_pci.h> -#include <asm/pci.h> #include <asm/gpio.h> #include <asm/addrspace.h> diff --git a/arch/mn10300/include/asm/pci.h b/arch/mn10300/include/asm/pci.h index 5f70af25c7d0..c222d1792d5b 100644 --- a/arch/mn10300/include/asm/pci.h +++ b/arch/mn10300/include/asm/pci.h @@ -83,19 +83,6 @@ extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, /* implement the pci_ DMA API in terms of the generic device dma_ one */ #include <asm-generic/pci-dma-compat.h> -static inline struct resource * -pcibios_select_root(struct pci_dev *pdev, struct resource *res) -{ - struct resource *root = NULL; - - if (res->flags & IORESOURCE_IO) - root = &ioport_resource; - if (res->flags & IORESOURCE_MEM) - root = &iomem_resource; - - return root; -} - static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) { return channel ? 15 : 14; diff --git a/arch/parisc/include/asm/pci.h b/arch/parisc/include/asm/pci.h index 20df2b04fc09..bf5e044281d6 100644 --- a/arch/parisc/include/asm/pci.h +++ b/arch/parisc/include/asm/pci.h @@ -196,25 +196,6 @@ static inline void pcibios_register_hba(struct pci_hba_data *x) /* export the pci_ DMA API in terms of the dma_ one */ #include <asm-generic/pci-dma-compat.h> -#ifdef CONFIG_PCI -static inline void pci_dma_burst_advice(struct pci_dev *pdev, - enum pci_dma_burst_strategy *strat, - unsigned long *strategy_parameter) -{ - unsigned long cacheline_size; - u8 byte; - - pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte); - if (byte == 0) - cacheline_size = 1024; - else - cacheline_size = (int) byte * 4; - - *strat = PCI_DMA_BURST_MULTIPLE; - *strategy_parameter = cacheline_size; -} -#endif - static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) { return channel ? 15 : 14; diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index 4aef8d660999..99dc432b256a 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h @@ -71,36 +71,6 @@ extern struct dma_map_ops *get_pci_dma_ops(void); */ #define PCI_DISABLE_MWI -#ifdef CONFIG_PCI -static inline void pci_dma_burst_advice(struct pci_dev *pdev, - enum pci_dma_burst_strategy *strat, - unsigned long *strategy_parameter) -{ - unsigned long cacheline_size; - u8 byte; - - pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte); - if (byte == 0) - cacheline_size = 1024; - else - cacheline_size = (int) byte * 4; - - *strat = PCI_DMA_BURST_MULTIPLE; - *strategy_parameter = cacheline_size; -} -#endif - -#else /* 32-bit */ - -#ifdef CONFIG_PCI -static inline void pci_dma_burst_advice(struct pci_dev *pdev, - enum pci_dma_burst_strategy *strat, - unsigned long *strategy_parameter) -{ - *strat = PCI_DMA_BURST_INFINITY; - *strategy_parameter = ~0UL; -} -#endif #endif /* CONFIG_PPC64 */ extern int pci_domain_nr(struct pci_bus *bus); diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 308c5e15676b..00fdea24f854 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -46,7 +46,6 @@ #include <asm/mmu.h> #include <asm/paca.h> #include <asm/pgtable.h> -#include <asm/pci.h> #include <asm/iommu.h> #include <asm/btext.h> #include <asm/sections.h> diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index fd1fe4c37599..fcca8077e6a2 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -37,7 +37,6 @@ #include <asm/smp.h> #include <asm/mmu.h> #include <asm/pgtable.h> -#include <asm/pci.h> #include <asm/iommu.h> #include <asm/btext.h> #include <asm/sections.h> diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pci.c b/arch/powerpc/platforms/52xx/mpc52xx_pci.c index e2d401ad8fbb..6eb3b2abae90 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pci.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pci.c @@ -12,7 +12,7 @@ #undef DEBUG -#include <asm/pci.h> +#include <linux/pci.h> #include <asm/mpc52xx.h> #include <asm/delay.h> #include <asm/machdep.h> diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c index d3236c9e226b..39e2f41b6cf0 100644 --- a/arch/s390/kernel/suspend.c +++ b/arch/s390/kernel/suspend.c @@ -9,10 +9,10 @@ #include <linux/pfn.h> #include <linux/suspend.h> #include <linux/mm.h> +#include <linux/pci.h> #include <asm/ctl_reg.h> #include <asm/ipl.h> #include <asm/cio.h> -#include <asm/pci.h> #include <asm/sections.h> #include "entry.h" diff --git a/arch/sh/drivers/pci/ops-sh5.c b/arch/sh/drivers/pci/ops-sh5.c index 4ce95a001b80..45361946460f 100644 --- a/arch/sh/drivers/pci/ops-sh5.c +++ b/arch/sh/drivers/pci/ops-sh5.c @@ -18,7 +18,6 @@ #include <linux/delay.h> #include <linux/types.h> #include <linux/irq.h> -#include <asm/pci.h> #include <asm/io.h> #include "pci-sh5.h" diff --git a/arch/sh/drivers/pci/pci-sh5.c b/arch/sh/drivers/pci/pci-sh5.c index 16c1e721bf54..8229114c6a58 100644 --- a/arch/sh/drivers/pci/pci-sh5.c +++ b/arch/sh/drivers/pci/pci-sh5.c @@ -20,7 +20,6 @@ #include <linux/types.h> #include <linux/irq.h> #include <cpu/irq.h> -#include <asm/pci.h> #include <asm/io.h> #include "pci-sh5.h" diff --git a/arch/sh/include/asm/pci.h b/arch/sh/include/asm/pci.h index 5b4511552998..e343dbd02e41 100644 --- a/arch/sh/include/asm/pci.h +++ b/arch/sh/include/asm/pci.h @@ -86,24 +86,6 @@ extern void pcibios_set_master(struct pci_dev *dev); * direct memory write. */ #define PCI_DISABLE_MWI - -static inline void pci_dma_burst_advice(struct pci_dev *pdev, - enum pci_dma_burst_strategy *strat, - unsigned long *strategy_parameter) -{ - unsigned long cacheline_size; - u8 byte; - - pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte); - - if (byte == 0) - cacheline_size = L1_CACHE_BYTES; - else - cacheline_size = byte << 2; - - *strat = PCI_DMA_BURST_MULTIPLE; - *strategy_parameter = cacheline_size; -} #endif /* Board-specific fixup routines. */ diff --git a/arch/sparc/include/asm/pci_32.h b/arch/sparc/include/asm/pci_32.h index 53e9b4987db0..b7c092df3134 100644 --- a/arch/sparc/include/asm/pci_32.h +++ b/arch/sparc/include/asm/pci_32.h @@ -22,16 +22,6 @@ struct pci_dev; -#ifdef CONFIG_PCI -static inline void pci_dma_burst_advice(struct pci_dev *pdev, - enum pci_dma_burst_strategy *strat, - unsigned long *strategy_parameter) -{ - *strat = PCI_DMA_BURST_INFINITY; - *strategy_parameter = ~0UL; -} -#endif - #endif /* __KERNEL__ */ #ifndef CONFIG_LEON_PCI diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h index bd00a6226169..022d16008a00 100644 --- a/arch/sparc/include/asm/pci_64.h +++ b/arch/sparc/include/asm/pci_64.h @@ -31,25 +31,6 @@ #define PCI64_REQUIRED_MASK (~(u64)0) #define PCI64_ADDR_BASE 0xfffc000000000000UL -#ifdef CONFIG_PCI -static inline void pci_dma_burst_advice(struct pci_dev *pdev, - enum pci_dma_burst_strategy *strat, - unsigned long *strategy_parameter) -{ - unsigned long cacheline_size; - u8 byte; - - pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte); - if (byte == 0) - cacheline_size = 1024; - else - cacheline_size = (int) byte * 4; - - *strat = PCI_DMA_BURST_BOUNDARY; - *strategy_parameter = cacheline_size; -} -#endif - /* Return the index of the PCI controller for device PDEV. */ int pci_domain_nr(struct pci_bus *bus); diff --git a/arch/unicore32/include/asm/pci.h b/arch/unicore32/include/asm/pci.h index 654407e98619..38b3f3785c3c 100644 --- a/arch/unicore32/include/asm/pci.h +++ b/arch/unicore32/include/asm/pci.h @@ -18,16 +18,6 @@ #include <asm-generic/pci.h> #include <mach/hardware.h> /* for PCIBIOS_MIN_* */ -#ifdef CONFIG_PCI -static inline void pci_dma_burst_advice(struct pci_dev *pdev, - enum pci_dma_burst_strategy *strat, - unsigned long *strategy_parameter) -{ - *strat = PCI_DMA_BURST_INFINITY; - *strategy_parameter = ~0UL; -} -#endif - #define HAVE_PCI_MMAP extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine); diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index 4e370a5d8117..e51c229a29a4 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h @@ -80,13 +80,6 @@ extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, #ifdef CONFIG_PCI extern void early_quirks(void); -static inline void pci_dma_burst_advice(struct pci_dev *pdev, - enum pci_dma_burst_strategy *strat, - unsigned long *strategy_parameter) -{ - *strat = PCI_DMA_BURST_INFINITY; - *strategy_parameter = ~0UL; -} #else static inline void early_quirks(void) { } #endif diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index 234b0722de53..eed5625b10e8 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c @@ -11,7 +11,6 @@ #include <asm/bios_ebda.h> #include <asm/paravirt.h> #include <asm/pci_x86.h> -#include <asm/pci.h> #include <asm/mpspec.h> #include <asm/setup.h> #include <asm/apic.h> diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index e4695985f9de..2ae7ce240e02 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c @@ -81,6 +81,17 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"), }, }, + /* https://bugs.launchpad.net/ubuntu/+source/alsa-driver/+bug/931368 */ + /* https://bugs.launchpad.net/ubuntu/+source/alsa-driver/+bug/1033299 */ + { + .callback = set_use_crs, + .ident = "Foxconn K8M890-8237A", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Foxconn"), + DMI_MATCH(DMI_BOARD_NAME, "K8M890-8237A"), + DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"), + }, + }, /* Now for the blacklist.. */ @@ -121,8 +132,10 @@ void __init pci_acpi_crs_quirks(void) { int year; - if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008) - pci_use_crs = false; + if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008) { + if (iomem_resource.end <= 0xffffffff) + pci_use_crs = false; + } dmi_check_system(pci_crs_quirks); diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index b1def411c0b8..4db10b189104 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c @@ -163,7 +163,7 @@ static int acpi_pci_irq_check_entry(acpi_handle handle, struct pci_dev *dev, { int segment = pci_domain_nr(dev->bus); int bus = dev->bus->number; - int device = PCI_SLOT(dev->devfn); + int device = pci_ari_enabled(dev->bus) ? 0 : PCI_SLOT(dev->devfn); struct acpi_prt_entry *entry; if (((prt->address >> 16) & 0xffff) != device || diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 3dc1f68b322d..6ce973187225 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -3058,7 +3058,6 @@ static void cas_init_mac(struct cas *cp) /* setup core arbitration weight register */ writel(CAWR_RR_DIS, cp->regs + REG_CAWR); - /* XXX Use pci_dma_burst_advice() */ #if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA) /* set the infinite burst register for chips that don't have * pci issues. diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c index cd29b1038c5e..8225cbcd6eb8 100644 --- a/drivers/ntb/ntb_hw.c +++ b/drivers/ntb/ntb_hw.c @@ -1313,8 +1313,6 @@ static int ntb_setup_intx(struct ntb_device *ndev) struct pci_dev *pdev = ndev->pdev; int rc; - pci_msi_off(pdev); - /* Verify intx is enabled */ pci_intx(pdev, 1); diff --git a/drivers/of/address.c b/drivers/of/address.c index 78a7dcbec7d8..6906a3f61bd8 100644 --- a/drivers/of/address.c +++ b/drivers/of/address.c @@ -765,7 +765,7 @@ unsigned long __weak pci_address_to_pio(phys_addr_t address) spin_lock(&io_range_lock); list_for_each_entry(res, &io_range_list, list) { if (address >= res->start && address < res->start + res->size) { - addr = res->start - address + offset; + addr = address - res->start + offset; break; } offset += res->size; diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index 7a8f1c5e65af..73de4efcbe6e 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig @@ -1,6 +1,10 @@ # # PCI configuration # +config PCI_BUS_ADDR_T_64BIT + def_bool y if (ARCH_DMA_ADDR_T_64BIT || 64BIT) + depends on PCI + config PCI_MSI bool "Message Signaled Interrupts (MSI and MSI-X)" depends on PCI diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 90fa3a78fb7c..6fbd3f2b5992 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -92,11 +92,11 @@ void pci_bus_remove_resources(struct pci_bus *bus) } static struct pci_bus_region pci_32_bit = {0, 0xffffffffULL}; -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT +#ifdef CONFIG_PCI_BUS_ADDR_T_64BIT static struct pci_bus_region pci_64_bit = {0, - (dma_addr_t) 0xffffffffffffffffULL}; -static struct pci_bus_region pci_high = {(dma_addr_t) 0x100000000ULL, - (dma_addr_t) 0xffffffffffffffffULL}; + (pci_bus_addr_t) 0xffffffffffffffffULL}; +static struct pci_bus_region pci_high = {(pci_bus_addr_t) 0x100000000ULL, + (pci_bus_addr_t) 0xffffffffffffffffULL}; #endif /* @@ -200,7 +200,7 @@ int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res, resource_size_t), void *alignf_data) { -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT +#ifdef CONFIG_PCI_BUS_ADDR_T_64BIT int rc; if (res->flags & IORESOURCE_MEM_64) { diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig index 1dfb567b3522..c132bddc03f3 100644 --- a/drivers/pci/host/Kconfig +++ b/drivers/pci/host/Kconfig @@ -89,11 +89,20 @@ config PCI_XGENE depends on ARCH_XGENE depends on OF select PCIEPORTBUS + select PCI_MSI_IRQ_DOMAIN if PCI_MSI help Say Y here if you want internal PCI support on APM X-Gene SoC. There are 5 internal PCIe ports available. Each port is GEN3 capable and have varied lanes from x1 to x8. +config PCI_XGENE_MSI + bool "X-Gene v1 PCIe MSI feature" + depends on PCI_XGENE && PCI_MSI + default y + help + Say Y here if you want PCIe MSI support for the APM X-Gene v1 SoC. + This MSI driver supports 5 PCIe ports on the APM X-Gene v1 SoC. + config PCI_LAYERSCAPE bool "Freescale Layerscape PCIe controller" depends on OF && ARM @@ -125,4 +134,15 @@ config PCIE_IPROC_PLATFORM Say Y here if you want to use the Broadcom iProc PCIe controller through the generic platform bus interface +config PCIE_IPROC_BCMA + bool "Broadcom iProc PCIe BCMA bus driver" + depends on ARCH_BCM_IPROC || (ARM && COMPILE_TEST) + select PCIE_IPROC + select BCMA + select PCI_DOMAINS + default ARCH_BCM_5301X + help + Say Y here if you want to use the Broadcom iProc PCIe controller + through the BCMA bus interface + endmenu diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile index f733b4e27642..140d66f796e4 100644 --- a/drivers/pci/host/Makefile +++ b/drivers/pci/host/Makefile @@ -11,7 +11,9 @@ obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o obj-$(CONFIG_PCI_XGENE) += pci-xgene.o +obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o obj-$(CONFIG_PCIE_IPROC_PLATFORM) += pcie-iproc-platform.o +obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c index 2d57e19a2cd4..80db09e47800 100644 --- a/drivers/pci/host/pci-dra7xx.c +++ b/drivers/pci/host/pci-dra7xx.c @@ -93,9 +93,9 @@ static int dra7xx_pcie_link_up(struct pcie_port *pp) static int dra7xx_pcie_establish_link(struct pcie_port *pp) { - u32 reg; - unsigned int retries = 1000; struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp); + u32 reg; + unsigned int retries; if (dw_pcie_link_up(pp)) { dev_err(pp->dev, "link is already up\n"); @@ -106,19 +106,14 @@ static int dra7xx_pcie_establish_link(struct pcie_port *pp) reg |= LTSSM_EN; dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); - while (retries--) { - reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS); - if (reg & LINK_UP) - break; + for (retries = 0; retries < 1000; retries++) { + if (dw_pcie_link_up(pp)) + return 0; usleep_range(10, 20); } - if (retries == 0) { - dev_err(pp->dev, "link is not up\n"); - return -ETIMEDOUT; - } - - return 0; + dev_err(pp->dev, "link is not up\n"); + return -EINVAL; } static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp) diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c index c139237e0e52..f9f468d9a819 100644 --- a/drivers/pci/host/pci-exynos.c +++ b/drivers/pci/host/pci-exynos.c @@ -316,9 +316,9 @@ static void exynos_pcie_assert_reset(struct pcie_port *pp) static int exynos_pcie_establish_link(struct pcie_port *pp) { - u32 val; - int count = 0; struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); + u32 val; + unsigned int retries; if (dw_pcie_link_up(pp)) { dev_err(pp->dev, "Link already up\n"); @@ -357,27 +357,23 @@ static int exynos_pcie_establish_link(struct pcie_port *pp) PCIE_APP_LTSSM_ENABLE); /* check if the link is up or not */ - while (!dw_pcie_link_up(pp)) { - mdelay(100); - count++; - if (count == 10) { - while (exynos_phy_readl(exynos_pcie, - PCIE_PHY_PLL_LOCKED) == 0) { - val = exynos_blk_readl(exynos_pcie, - PCIE_PHY_PLL_LOCKED); - dev_info(pp->dev, "PLL Locked: 0x%x\n", val); - } - /* power off phy */ - exynos_pcie_power_off_phy(pp); - - dev_err(pp->dev, "PCIe Link Fail\n"); - return -EINVAL; + for (retries = 0; retries < 10; retries++) { + if (dw_pcie_link_up(pp)) { + dev_info(pp->dev, "Link up\n"); + return 0; } + mdelay(100); } - dev_info(pp->dev, "Link up\n"); + while (exynos_phy_readl(exynos_pcie, PCIE_PHY_PLL_LOCKED) == 0) { + val = exynos_blk_readl(exynos_pcie, PCIE_PHY_PLL_LOCKED); + dev_info(pp->dev, "PLL Locked: 0x%x\n", val); + } + /* power off phy */ + exynos_pcie_power_off_phy(pp); - return 0; + dev_err(pp->dev, "PCIe Link Fail\n"); + return -EINVAL; } static void exynos_pcie_clear_irq_pulse(struct pcie_port *pp) diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c index fdb95367721e..233a196c6e66 100644 --- a/drivers/pci/host/pci-imx6.c +++ b/drivers/pci/host/pci-imx6.c @@ -47,6 +47,8 @@ struct imx6_pcie { #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2 #define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf +#define PCIE_RC_LCSR 0x80 + /* PCIe Port Logic registers (memory-mapped) */ #define PL_OFFSET 0x700 #define PCIE_PL_PFLR (PL_OFFSET + 0x08) @@ -335,21 +337,36 @@ static void imx6_pcie_init_phy(struct pcie_port *pp) static int imx6_pcie_wait_for_link(struct pcie_port *pp) { - int count = 200; + unsigned int retries; - while (!dw_pcie_link_up(pp)) { + for (retries = 0; retries < 200; retries++) { + if (dw_pcie_link_up(pp)) + return 0; usleep_range(100, 1000); - if (--count) - continue; - - dev_err(pp->dev, "phy link never came up\n"); - dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", - readl(pp->dbi_base + PCIE_PHY_DEBUG_R0), - readl(pp->dbi_base + PCIE_PHY_DEBUG_R1)); - return -EINVAL; } - return 0; + dev_err(pp->dev, "phy link never came up\n"); + dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", + readl(pp->dbi_base + PCIE_PHY_DEBUG_R0), + readl(pp->dbi_base + PCIE_PHY_DEBUG_R1)); + return -EINVAL; +} + +static int imx6_pcie_wait_for_speed_change(struct pcie_port *pp) +{ + u32 tmp; + unsigned int retries; + + for (retries = 0; retries < 200; retries++) { + tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); + /* Test if the speed change finished. */ + if (!(tmp & PORT_LOGIC_SPEED_CHANGE)) + return 0; + usleep_range(100, 1000); + } + + dev_err(pp->dev, "Speed change timeout\n"); + return -EINVAL; } static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg) @@ -359,11 +376,11 @@ static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg) return dw_handle_msi_irq(pp); } -static int imx6_pcie_start_link(struct pcie_port *pp) +static int imx6_pcie_establish_link(struct pcie_port *pp) { struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp); - uint32_t tmp; - int ret, count; + u32 tmp; + int ret; /* * Force Gen1 operation when starting the link. In case the link is @@ -397,29 +414,22 @@ static int imx6_pcie_start_link(struct pcie_port *pp) tmp |= PORT_LOGIC_SPEED_CHANGE; writel(tmp, pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); - count = 200; - while (count--) { - tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); - /* Test if the speed change finished. */ - if (!(tmp & PORT_LOGIC_SPEED_CHANGE)) - break; - usleep_range(100, 1000); + ret = imx6_pcie_wait_for_speed_change(pp); + if (ret) { + dev_err(pp->dev, "Failed to bring link up!\n"); + return ret; } /* Make sure link training is finished as well! */ - if (count) - ret = imx6_pcie_wait_for_link(pp); - else - ret = -EINVAL; - + ret = imx6_pcie_wait_for_link(pp); if (ret) { dev_err(pp->dev, "Failed to bring link up!\n"); - } else { - tmp = readl(pp->dbi_base + 0x80); - dev_dbg(pp->dev, "Link up, Gen=%i\n", (tmp >> 16) & 0xf); + return ret; } - return ret; + tmp = readl(pp->dbi_base + PCIE_RC_LCSR); + dev_dbg(pp->dev, "Link up, Gen=%i\n", (tmp >> 16) & 0xf); + return 0; } static void imx6_pcie_host_init(struct pcie_port *pp) @@ -432,7 +442,7 @@ static void imx6_pcie_host_init(struct pcie_port *pp) dw_pcie_setup_rc(pp); - imx6_pcie_start_link(pp); + imx6_pcie_establish_link(pp); if (IS_ENABLED(CONFIG_PCI_MSI)) dw_pcie_msi_init(pp); @@ -440,19 +450,19 @@ static void imx6_pcie_host_init(struct pcie_port *pp) static void imx6_pcie_reset_phy(struct pcie_port *pp) { - uint32_t temp; + u32 tmp; - pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &temp); - temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | - PHY_RX_OVRD_IN_LO_RX_PLL_EN); - pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, temp); + pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp); + tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | + PHY_RX_OVRD_IN_LO_RX_PLL_EN); + pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp); usleep_range(2000, 3000); - pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &temp); - temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | + pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp); + tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | PHY_RX_OVRD_IN_LO_RX_PLL_EN); - pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, temp); + pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp); } static int imx6_pcie_link_up(struct pcie_port *pp) diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c index 75333b0c4f0a..b75d684aefcd 100644 --- a/drivers/pci/host/pci-keystone.c +++ b/drivers/pci/host/pci-keystone.c @@ -88,7 +88,7 @@ DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, quirk_limit_mrrs); static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie) { struct pcie_port *pp = &ks_pcie->pp; - int count = 200; + unsigned int retries; dw_pcie_setup_rc(pp); @@ -99,17 +99,15 @@ static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie) ks_dw_pcie_initiate_link_train(ks_pcie); /* check if the link is up or not */ - while (!dw_pcie_link_up(pp)) { + for (retries = 0; retries < 200; retries++) { + if (dw_pcie_link_up(pp)) + return 0; usleep_range(100, 1000); - if (--count) { - ks_dw_pcie_initiate_link_train(ks_pcie); - continue; - } - dev_err(pp->dev, "phy link never came up\n"); - return -EINVAL; + ks_dw_pcie_initiate_link_train(ks_pcie); } - return 0; + dev_err(pp->dev, "phy link never came up\n"); + return -EINVAL; } static void ks_pcie_msi_irq_handler(unsigned int irq, struct irq_desc *desc) diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c index 4a6e62f67579..b2328ea13dcf 100644 --- a/drivers/pci/host/pci-layerscape.c +++ b/drivers/pci/host/pci-layerscape.c @@ -62,22 +62,27 @@ static int ls_pcie_link_up(struct pcie_port *pp) return 1; } +static int ls_pcie_establish_link(struct pcie_port *pp) +{ + unsigned int retries; + + for (retries = 0; retries < 200; retries++) { + if (dw_pcie_link_up(pp)) + return 0; + usleep_range(100, 1000); + } + + dev_err(pp->dev, "phy link never came up\n"); + return -EINVAL; +} + static void ls_pcie_host_init(struct pcie_port *pp) { struct ls_pcie *pcie = to_ls_pcie(pp); - int count = 0; u32 val; dw_pcie_setup_rc(pp); - - while (!ls_pcie_link_up(pp)) { - usleep_range(100, 1000); - count++; - if (count >= 200) { - dev_err(pp->dev, "phy link never came up\n"); - return; - } - } + ls_pcie_establish_link(pp); /* * LS1021A Workaround for internal TKT228622 diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c index 1ab863551920..70aa09556ec5 100644 --- a/drivers/pci/host/pci-mvebu.c +++ b/drivers/pci/host/pci-mvebu.c @@ -751,21 +751,6 @@ static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys) return 1; } -static struct pci_bus *mvebu_pcie_scan_bus(int nr, struct pci_sys_data *sys) -{ - struct mvebu_pcie *pcie = sys_to_pcie(sys); - struct pci_bus *bus; - - bus = pci_create_root_bus(&pcie->pdev->dev, sys->busnr, - &mvebu_pcie_ops, sys, &sys->resources); - if (!bus) - return NULL; - - pci_scan_child_bus(bus); - - return bus; -} - static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev, const struct resource *res, resource_size_t start, @@ -809,12 +794,11 @@ static void mvebu_pcie_enable(struct mvebu_pcie *pcie) hw.nr_controllers = 1; hw.private_data = (void **)&pcie; hw.setup = mvebu_pcie_setup; - hw.scan = mvebu_pcie_scan_bus; hw.map_irq = of_irq_parse_and_map_pci; hw.ops = &mvebu_pcie_ops; hw.align_resource = mvebu_pcie_align_resource; - pci_common_init(&hw); + pci_common_init_dev(&pcie->pdev->dev, &hw); } /* diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c index 00e92720d7f7..10c05718dbfd 100644 --- a/drivers/pci/host/pci-tegra.c +++ b/drivers/pci/host/pci-tegra.c @@ -630,21 +630,6 @@ static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin) return irq; } -static struct pci_bus *tegra_pcie_scan_bus(int nr, struct pci_sys_data *sys) -{ - struct tegra_pcie *pcie = sys_to_pcie(sys); - struct pci_bus *bus; - - bus = pci_create_root_bus(pcie->dev, sys->busnr, &tegra_pcie_ops, sys, - &sys->resources); - if (!bus) - return NULL; - - pci_scan_child_bus(bus); - - return bus; -} - static irqreturn_t tegra_pcie_isr(int irq, void *arg) { const char *err_msg[] = { @@ -1831,7 +1816,6 @@ static int tegra_pcie_enable(struct tegra_pcie *pcie) hw.private_data = (void **)&pcie; hw.setup = tegra_pcie_setup; hw.map_irq = tegra_pcie_map_irq; - hw.scan = tegra_pcie_scan_bus; hw.ops = &tegra_pcie_ops; pci_common_init_dev(pcie->dev, &hw); diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c new file mode 100644 index 000000000000..2d31d4d6fd08 --- /dev/null +++ b/drivers/pci/host/pci-xgene-msi.c @@ -0,0 +1,596 @@ +/* + * APM X-Gene MSI Driver + * + * Copyright (c) 2014, Applied Micro Circuits Corporation + * Author: Tanmay Inamdar <tinamdar@apm.com> + * Duc Dang <dhdang@apm.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include <linux/cpu.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/msi.h> +#include <linux/of_irq.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/of_pci.h> + +#define MSI_IR0 0x000000 +#define MSI_INT0 0x800000 +#define IDX_PER_GROUP 8 +#define IRQS_PER_IDX 16 +#define NR_HW_IRQS 16 +#define NR_MSI_VEC (IDX_PER_GROUP * IRQS_PER_IDX * NR_HW_IRQS) + +struct xgene_msi_group { + struct xgene_msi *msi; + int gic_irq; + u32 msi_grp; +}; + +struct xgene_msi { + struct device_node *node; + struct msi_controller mchip; + struct irq_domain *domain; + u64 msi_addr; + void __iomem *msi_regs; + unsigned long *bitmap; + struct mutex bitmap_lock; + struct xgene_msi_group *msi_groups; + int num_cpus; +}; + +/* Global data */ +static struct xgene_msi xgene_msi_ctrl; + +static struct irq_chip xgene_msi_top_irq_chip = { + .name = "X-Gene1 MSI", + .irq_enable = pci_msi_unmask_irq, + .irq_disable = pci_msi_mask_irq, + .irq_mask = pci_msi_mask_irq, + .irq_unmask = pci_msi_unmask_irq, +}; + +static struct msi_domain_info xgene_msi_domain_info = { + .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_PCI_MSIX), + .chip = &xgene_msi_top_irq_chip, +}; + +/* + * X-Gene v1 has 16 groups of MSI termination registers MSInIRx, where + * n is group number (0..F), x is index of registers in each group (0..7) + * The register layout is as follows: + * MSI0IR0 base_addr + * MSI0IR1 base_addr + 0x10000 + * ... ... + * MSI0IR6 base_addr + 0x60000 + * MSI0IR7 base_addr + 0x70000 + * MSI1IR0 base_addr + 0x80000 + * MSI1IR1 base_addr + 0x90000 + * ... ... + * MSI1IR7 base_addr + 0xF0000 + * MSI2IR0 base_addr + 0x100000 + * ... ... + * MSIFIR0 base_addr + 0x780000 + * MSIFIR1 base_addr + 0x790000 + * ... ... + * MSIFIR7 base_addr + 0x7F0000 + * MSIINT0 base_addr + 0x800000 + * MSIINT1 base_addr + 0x810000 + * ... ... + * MSIINTF base_addr + 0x8F0000 + * + * Each index register supports 16 MSI vectors (0..15) to generate interrupt. + * There are total 16 GIC IRQs assigned for these 16 groups of MSI termination + * registers. + * + * Each MSI termination group has 1 MSIINTn register (n is 0..15) to indicate + * the MSI pending status caused by 1 of its 8 index registers. + */ + +/* MSInIRx read helper */ +static u32 xgene_msi_ir_read(struct xgene_msi *msi, + u32 msi_grp, u32 msir_idx) +{ + return readl_relaxed(msi->msi_regs + MSI_IR0 + + (msi_grp << 19) + (msir_idx << 16)); +} + +/* MSIINTn read helper */ +static u32 xgene_msi_int_read(struct xgene_msi *msi, u32 msi_grp) +{ + return readl_relaxed(msi->msi_regs + MSI_INT0 + (msi_grp << 16)); +} + +/* + * With 2048 MSI vectors supported, the MSI message can be constructed using + * following scheme: + * - Divide into 8 256-vector groups + * Group 0: 0-255 + * Group 1: 256-511 + * Group 2: 512-767 + * ... + * Group 7: 1792-2047 + * - Each 256-vector group is divided into 16 16-vector groups + * As an example: 16 16-vector groups for 256-vector group 0-255 is + * Group 0: 0-15 + * Group 1: 16-32 + * ... + * Group 15: 240-255 + * - The termination address of MSI vector in 256-vector group n and 16-vector + * group x is the address of MSIxIRn + * - The data for MSI vector in 16-vector group x is x + */ +static u32 hwirq_to_reg_set(unsigned long hwirq) +{ + return (hwirq / (NR_HW_IRQS * IRQS_PER_IDX)); +} + +static u32 hwirq_to_group(unsigned long hwirq) +{ + return (hwirq % NR_HW_IRQS); +} + +static u32 hwirq_to_msi_data(unsigned long hwirq) +{ + return ((hwirq / NR_HW_IRQS) % IRQS_PER_IDX); +} + +static void xgene_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) +{ + struct xgene_msi *msi = irq_data_get_irq_chip_data(data); + u32 reg_set = hwirq_to_reg_set(data->hwirq); + u32 group = hwirq_to_group(data->hwirq); + u64 target_addr = msi->msi_addr + (((8 * group) + reg_set) << 16); + + msg->address_hi = upper_32_bits(target_addr); + msg->address_lo = lower_32_bits(target_addr); + msg->data = hwirq_to_msi_data(data->hwirq); +} + +/* + * X-Gene v1 only has 16 MSI GIC IRQs for 2048 MSI vectors. To maintain + * the expected behaviour of .set_affinity for each MSI interrupt, the 16 + * MSI GIC IRQs are statically allocated to 8 X-Gene v1 cores (2 GIC IRQs + * for each core). The MSI vector is moved fom 1 MSI GIC IRQ to another + * MSI GIC IRQ to steer its MSI interrupt to correct X-Gene v1 core. As a + * consequence, the total MSI vectors that X-Gene v1 supports will be + * reduced to 256 (2048/8) vectors. + */ +static int hwirq_to_cpu(unsigned long hwirq) +{ + return (hwirq % xgene_msi_ctrl.num_cpus); +} + +static unsigned long hwirq_to_canonical_hwirq(unsigned long hwirq) +{ + return (hwirq - hwirq_to_cpu(hwirq)); +} + +static int xgene_msi_set_affinity(struct irq_data *irqdata, + const struct cpumask *mask, bool force) +{ + int target_cpu = cpumask_first(mask); + int curr_cpu; + + curr_cpu = hwirq_to_cpu(irqdata->hwirq); + if (curr_cpu == target_cpu) + return IRQ_SET_MASK_OK_DONE; + + /* Update MSI number to target the new CPU */ + irqdata->hwirq = hwirq_to_canonical_hwirq(irqdata->hwirq) + target_cpu; + + return IRQ_SET_MASK_OK; +} + +static struct irq_chip xgene_msi_bottom_irq_chip = { + .name = "MSI", + .irq_set_affinity = xgene_msi_set_affinity, + .irq_compose_msi_msg = xgene_compose_msi_msg, +}; + +static int xgene_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *args) +{ + struct xgene_msi *msi = domain->host_data; + int msi_irq; + + mutex_lock(&msi->bitmap_lock); + + msi_irq = bitmap_find_next_zero_area(msi->bitmap, NR_MSI_VEC, 0, + msi->num_cpus, 0); + if (msi_irq < NR_MSI_VEC) + bitmap_set(msi->bitmap, msi_irq, msi->num_cpus); + else + msi_irq = -ENOSPC; + + mutex_unlock(&msi->bitmap_lock); + + if (msi_irq < 0) + return msi_irq; + + irq_domain_set_info(domain, virq, msi_irq, + &xgene_msi_bottom_irq_chip, domain->host_data, + handle_simple_irq, NULL, NULL); + set_irq_flags(virq, IRQF_VALID); + + return 0; +} + +static void xgene_irq_domain_free(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) +{ + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + struct xgene_msi *msi = irq_data_get_irq_chip_data(d); + u32 hwirq; + + mutex_lock(&msi->bitmap_lock); + + hwirq = hwirq_to_canonical_hwirq(d->hwirq); + bitmap_clear(msi->bitmap, hwirq, msi->num_cpus); + + mutex_unlock(&msi->bitmap_lock); + + irq_domain_free_irqs_parent(domain, virq, nr_irqs); +} + +static const struct irq_domain_ops msi_domain_ops = { + .alloc = xgene_irq_domain_alloc, + .free = xgene_irq_domain_free, +}; + +static int xgene_allocate_domains(struct xgene_msi *msi) +{ + msi->domain = irq_domain_add_linear(NULL, NR_MSI_VEC, + &msi_domain_ops, msi); + if (!msi->domain) + return -ENOMEM; + + msi->mchip.domain = pci_msi_create_irq_domain(msi->mchip.of_node, + &xgene_msi_domain_info, + msi->domain); + + if (!msi->mchip.domain) { + irq_domain_remove(msi->domain); + return -ENOMEM; + } + + return 0; +} + +static void xgene_free_domains(struct xgene_msi *msi) +{ + if (msi->mchip.domain) + irq_domain_remove(msi->mchip.domain); + if (msi->domain) + irq_domain_remove(msi->domain); +} + +static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi) +{ + int size = BITS_TO_LONGS(NR_MSI_VEC) * sizeof(long); + + xgene_msi->bitmap = kzalloc(size, GFP_KERNEL); + if (!xgene_msi->bitmap) + return -ENOMEM; + + mutex_init(&xgene_msi->bitmap_lock); + + xgene_msi->msi_groups = kcalloc(NR_HW_IRQS, + sizeof(struct xgene_msi_group), + GFP_KERNEL); + if (!xgene_msi->msi_groups) + return -ENOMEM; + + return 0; +} + +static void xgene_msi_isr(unsigned int irq, struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct xgene_msi_group *msi_groups; + struct xgene_msi *xgene_msi; + unsigned int virq; + int msir_index, msir_val, hw_irq; + u32 intr_index, grp_select, msi_grp; + + chained_irq_enter(chip, desc); + + msi_groups = irq_desc_get_handler_data(desc); + xgene_msi = msi_groups->msi; + msi_grp = msi_groups->msi_grp; + + /* + * MSIINTn (n is 0..F) indicates if there is a pending MSI interrupt + * If bit x of this register is set (x is 0..7), one or more interupts + * corresponding to MSInIRx is set. + */ + grp_select = xgene_msi_int_read(xgene_msi, msi_grp); + while (grp_select) { + msir_index = ffs(grp_select) - 1; + /* + * Calculate MSInIRx address to read to check for interrupts + * (refer to termination address and data assignment + * described in xgene_compose_msi_msg() ) + */ + msir_val = xgene_msi_ir_read(xgene_msi, msi_grp, msir_index); + while (msir_val) { + intr_index = ffs(msir_val) - 1; + /* + * Calculate MSI vector number (refer to the termination + * address and data assignment described in + * xgene_compose_msi_msg function) + */ + hw_irq = (((msir_index * IRQS_PER_IDX) + intr_index) * + NR_HW_IRQS) + msi_grp; + /* + * As we have multiple hw_irq that maps to single MSI, + * always look up the virq using the hw_irq as seen from + * CPU0 + */ + hw_irq = hwirq_to_canonical_hwirq(hw_irq); + virq = irq_find_mapping(xgene_msi->domain, hw_irq); + WARN_ON(!virq); + if (virq != 0) + generic_handle_irq(virq); + msir_val &= ~(1 << intr_index); + } + grp_select &= ~(1 << msir_index); + + if (!grp_select) { + /* + * We handled all interrupts happened in this group, + * resample this group MSI_INTx register in case + * something else has been made pending in the meantime + */ + grp_select = xgene_msi_int_read(xgene_msi, msi_grp); + } + } + + chained_irq_exit(chip, desc); +} + +static int xgene_msi_remove(struct platform_device *pdev) +{ + int virq, i; + struct xgene_msi *msi = platform_get_drvdata(pdev); + + for (i = 0; i < NR_HW_IRQS; i++) { + virq = msi->msi_groups[i].gic_irq; + if (virq != 0) { + irq_set_chained_handler(virq, NULL); + irq_set_handler_data(virq, NULL); + } + } + kfree(msi->msi_groups); + + kfree(msi->bitmap); + msi->bitmap = NULL; + + xgene_free_domains(msi); + + return 0; +} + +static int xgene_msi_hwirq_alloc(unsigned int cpu) +{ + struct xgene_msi *msi = &xgene_msi_ctrl; + struct xgene_msi_group *msi_group; + cpumask_var_t mask; + int i; + int err; + + for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) { + msi_group = &msi->msi_groups[i]; + if (!msi_group->gic_irq) + continue; + + irq_set_chained_handler(msi_group->gic_irq, + xgene_msi_isr); + err = irq_set_handler_data(msi_group->gic_irq, msi_group); + if (err) { + pr_err("failed to register GIC IRQ handler\n"); + return -EINVAL; + } + /* + * Statically allocate MSI GIC IRQs to each CPU core. + * With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated + * to each core. + */ + if (alloc_cpumask_var(&mask, GFP_KERNEL)) { + cpumask_clear(mask); + cpumask_set_cpu(cpu, mask); + err = irq_set_affinity(msi_group->gic_irq, mask); + if (err) + pr_err("failed to set affinity for GIC IRQ"); + free_cpumask_var(mask); + } else { + pr_err("failed to alloc CPU mask for affinity\n"); + err = -EINVAL; + } + + if (err) { + irq_set_chained_handler(msi_group->gic_irq, NULL); + irq_set_handler_data(msi_group->gic_irq, NULL); + return err; + } + } + + return 0; +} + +static void xgene_msi_hwirq_free(unsigned int cpu) +{ + struct xgene_msi *msi = &xgene_msi_ctrl; + struct xgene_msi_group *msi_group; + int i; + + for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) { + msi_group = &msi->msi_groups[i]; + if (!msi_group->gic_irq) + continue; + + irq_set_chained_handler(msi_group->gic_irq, NULL); + irq_set_handler_data(msi_group->gic_irq, NULL); + } +} + +static int xgene_msi_cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + unsigned cpu = (unsigned long)hcpu; + + switch (action) { + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + xgene_msi_hwirq_alloc(cpu); + break; + case CPU_DEAD: + case CPU_DEAD_FROZEN: + xgene_msi_hwirq_free(cpu); + break; + default: + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block xgene_msi_cpu_notifier = { + .notifier_call = xgene_msi_cpu_callback, +}; + +static const struct of_device_id xgene_msi_match_table[] = { + {.compatible = "apm,xgene1-msi"}, + {}, +}; + +static int xgene_msi_probe(struct platform_device *pdev) +{ + struct resource *res; + int rc, irq_index; + struct xgene_msi *xgene_msi; + unsigned int cpu; + int virt_msir; + u32 msi_val, msi_idx; + + xgene_msi = &xgene_msi_ctrl; + + platform_set_drvdata(pdev, xgene_msi); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + xgene_msi->msi_regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(xgene_msi->msi_regs)) { + dev_err(&pdev->dev, "no reg space\n"); + rc = -EINVAL; + goto error; + } + xgene_msi->msi_addr = res->start; + + xgene_msi->num_cpus = num_possible_cpus(); + + rc = xgene_msi_init_allocator(xgene_msi); + if (rc) { + dev_err(&pdev->dev, "Error allocating MSI bitmap\n"); + goto error; + } + + rc = xgene_allocate_domains(xgene_msi); + if (rc) { + dev_err(&pdev->dev, "Failed to allocate MSI domain\n"); + goto error; + } + + for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) { + virt_msir = platform_get_irq(pdev, irq_index); + if (virt_msir < 0) { + dev_err(&pdev->dev, "Cannot translate IRQ index %d\n", + irq_index); + rc = -EINVAL; + goto error; + } + xgene_msi->msi_groups[irq_index].gic_irq = virt_msir; + xgene_msi->msi_groups[irq_index].msi_grp = irq_index; + xgene_msi->msi_groups[irq_index].msi = xgene_msi; + } + + /* + * MSInIRx registers are read-to-clear; before registering + * interrupt handlers, read all of them to clear spurious + * interrupts that may occur before the driver is probed. + */ + for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) { + for (msi_idx = 0; msi_idx < IDX_PER_GROUP; msi_idx++) + msi_val = xgene_msi_ir_read(xgene_msi, irq_index, + msi_idx); + /* Read MSIINTn to confirm */ + msi_val = xgene_msi_int_read(xgene_msi, irq_index); + if (msi_val) { + dev_err(&pdev->dev, "Failed to clear spurious IRQ\n"); + rc = -EINVAL; + goto error; + } + } + + cpu_notifier_register_begin(); + + for_each_online_cpu(cpu) + if (xgene_msi_hwirq_alloc(cpu)) { + dev_err(&pdev->dev, "failed to register MSI handlers\n"); + cpu_notifier_register_done(); + goto error; + } + + rc = __register_hotcpu_notifier(&xgene_msi_cpu_notifier); + if (rc) { + dev_err(&pdev->dev, "failed to add CPU MSI notifier\n"); + cpu_notifier_register_done(); + goto error; + } + + cpu_notifier_register_done(); + + xgene_msi->mchip.of_node = pdev->dev.of_node; + rc = of_pci_msi_chip_add(&xgene_msi->mchip); + if (rc) { + dev_err(&pdev->dev, "failed to add MSI controller chip\n"); + goto error_notifier; + } + + dev_info(&pdev->dev, "APM X-Gene PCIe MSI driver loaded\n"); + + return 0; + +error_notifier: + unregister_hotcpu_notifier(&xgene_msi_cpu_notifier); +error: + xgene_msi_remove(pdev); + return rc; +} + +static struct platform_driver xgene_msi_driver = { + .driver = { + .name = "xgene-msi", + .owner = THIS_MODULE, + .of_match_table = xgene_msi_match_table, + }, + .probe = xgene_msi_probe, + .remove = xgene_msi_remove, +}; + +static int __init xgene_pcie_msi_init(void) +{ + return platform_driver_register(&xgene_msi_driver); +} +subsys_initcall(xgene_pcie_msi_init); diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c index ee082c0366ec..a9dfb70d623a 100644 --- a/drivers/pci/host/pci-xgene.c +++ b/drivers/pci/host/pci-xgene.c @@ -59,6 +59,12 @@ #define SZ_1T (SZ_1G*1024ULL) #define PIPE_PHY_RATE_RD(src) ((0xc000 & (u32)(src)) >> 0xe) +#define ROOT_CAP_AND_CTRL 0x5C + +/* PCIe IP version */ +#define XGENE_PCIE_IP_VER_UNKN 0 +#define XGENE_PCIE_IP_VER_1 1 + struct xgene_pcie_port { struct device_node *node; struct device *dev; @@ -67,6 +73,7 @@ struct xgene_pcie_port { void __iomem *cfg_base; unsigned long cfg_addr; bool link_up; + u32 version; }; static inline u32 pcie_bar_low_val(u32 addr, u32 flags) @@ -130,9 +137,7 @@ static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset) static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, int offset) { - struct xgene_pcie_port *port = bus->sysdata; - - if ((pci_is_root_bus(bus) && devfn != 0) || !port->link_up || + if ((pci_is_root_bus(bus) && devfn != 0) || xgene_pcie_hide_rc_bars(bus, offset)) return NULL; @@ -140,9 +145,37 @@ static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, return xgene_pcie_get_cfg_base(bus) + offset; } +static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + struct xgene_pcie_port *port = bus->sysdata; + + if (pci_generic_config_read32(bus, devfn, where & ~0x3, 4, val) != + PCIBIOS_SUCCESSFUL) + return PCIBIOS_DEVICE_NOT_FOUND; + + /* + * The v1 controller has a bug in its Configuration Request + * Retry Status (CRS) logic: when CRS is enabled and we read the + * Vendor and Device ID of a non-existent device, the controller + * fabricates return data of 0xFFFF0001 ("device exists but is not + * ready") instead of 0xFFFFFFFF ("device does not exist"). This + * causes the PCI core to retry the read until it times out. + * Avoid this by not claiming to support CRS. + */ + if (pci_is_root_bus(bus) && (port->version == XGENE_PCIE_IP_VER_1) && + ((where & ~0x3) == ROOT_CAP_AND_CTRL)) + *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16); + + if (size <= 2) + *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); + + return PCIBIOS_SUCCESSFUL; +} + static struct pci_ops xgene_pcie_ops = { .map_bus = xgene_pcie_map_bus, - .read = pci_generic_config_read32, + .read = xgene_pcie_config_read32, .write = pci_generic_config_write32, }; @@ -468,6 +501,23 @@ static int xgene_pcie_setup(struct xgene_pcie_port *port, return 0; } +static int xgene_pcie_msi_enable(struct pci_bus *bus) +{ + struct device_node *msi_node; + + msi_node = of_parse_phandle(bus->dev.of_node, + "msi-parent", 0); + if (!msi_node) + return -ENODEV; + + bus->msi = of_pci_find_msi_chip_by_node(msi_node); + if (!bus->msi) + return -ENODEV; + + bus->msi->dev = &bus->dev; + return 0; +} + static int xgene_pcie_probe_bridge(struct platform_device *pdev) { struct device_node *dn = pdev->dev.of_node; @@ -483,6 +533,10 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev) port->node = of_node_get(pdev->dev.of_node); port->dev = &pdev->dev; + port->version = XGENE_PCIE_IP_VER_UNKN; + if (of_device_is_compatible(port->node, "apm,xgene-pcie")) + port->version = XGENE_PCIE_IP_VER_1; + ret = xgene_pcie_map_reg(port, pdev); if (ret) return ret; @@ -504,6 +558,10 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev) if (!bus) return -ENOMEM; + if (IS_ENABLED(CONFIG_PCI_MSI)) + if (xgene_pcie_msi_enable(bus)) + dev_info(port->dev, "failed to enable MSI\n"); + pci_scan_child_bus(bus); pci_assign_unassigned_bus_resources(bus); pci_bus_add_devices(bus); diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c index 2e9f84fdd9ce..69486be7181e 100644 --- a/drivers/pci/host/pcie-designware.c +++ b/drivers/pci/host/pcie-designware.c @@ -31,6 +31,7 @@ #define PORT_LINK_MODE_1_LANES (0x1 << 16) #define PORT_LINK_MODE_2_LANES (0x3 << 16) #define PORT_LINK_MODE_4_LANES (0x7 << 16) +#define PORT_LINK_MODE_8_LANES (0xf << 16) #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) @@ -38,6 +39,7 @@ #define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8) #define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8) #define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8) +#define PORT_LOGIC_LINK_WIDTH_8_LANES (0x8 << 8) #define PCIE_MSI_ADDR_LO 0x820 #define PCIE_MSI_ADDR_HI 0x824 @@ -150,6 +152,21 @@ static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, return ret; } +static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index, + int type, u64 cpu_addr, u64 pci_addr, u32 size) +{ + dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index, + PCIE_ATU_VIEWPORT); + dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr), PCIE_ATU_LOWER_BASE); + dw_pcie_writel_rc(pp, upper_32_bits(cpu_addr), PCIE_ATU_UPPER_BASE); + dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr + size - 1), + PCIE_ATU_LIMIT); + dw_pcie_writel_rc(pp, lower_32_bits(pci_addr), PCIE_ATU_LOWER_TARGET); + dw_pcie_writel_rc(pp, upper_32_bits(pci_addr), PCIE_ATU_UPPER_TARGET); + dw_pcie_writel_rc(pp, type, PCIE_ATU_CR1); + dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); +} + static struct irq_chip dw_msi_irq_chip = { .name = "PCI-MSI", .irq_enable = pci_msi_unmask_irq, @@ -493,6 +510,11 @@ int dw_pcie_host_init(struct pcie_port *pp) if (pp->ops->host_init) pp->ops->host_init(pp); + if (!pp->ops->rd_other_conf) + dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1, + PCIE_ATU_TYPE_MEM, pp->mem_mod_base, + pp->mem_bus_addr, pp->mem_size); + dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); /* program correct class for RC */ @@ -515,115 +537,73 @@ int dw_pcie_host_init(struct pcie_port *pp) return 0; } -static void dw_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev) -{ - /* Program viewport 0 : OUTBOUND : CFG0 */ - dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0, - PCIE_ATU_VIEWPORT); - dw_pcie_writel_rc(pp, pp->cfg0_mod_base, PCIE_ATU_LOWER_BASE); - dw_pcie_writel_rc(pp, (pp->cfg0_mod_base >> 32), PCIE_ATU_UPPER_BASE); - dw_pcie_writel_rc(pp, pp->cfg0_mod_base + pp->cfg0_size - 1, - PCIE_ATU_LIMIT); - dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET); - dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET); - dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG0, PCIE_ATU_CR1); - dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); -} - -static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev) -{ - /* Program viewport 1 : OUTBOUND : CFG1 */ - dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1, - PCIE_ATU_VIEWPORT); - dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1); - dw_pcie_writel_rc(pp, pp->cfg1_mod_base, PCIE_ATU_LOWER_BASE); - dw_pcie_writel_rc(pp, (pp->cfg1_mod_base >> 32), PCIE_ATU_UPPER_BASE); - dw_pcie_writel_rc(pp, pp->cfg1_mod_base + pp->cfg1_size - 1, - PCIE_ATU_LIMIT); - dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET); - dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET); - dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); -} - -static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp) -{ - /* Program viewport 0 : OUTBOUND : MEM */ - dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0, - PCIE_ATU_VIEWPORT); - dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1); - dw_pcie_writel_rc(pp, pp->mem_mod_base, PCIE_ATU_LOWER_BASE); - dw_pcie_writel_rc(pp, (pp->mem_mod_base >> 32), PCIE_ATU_UPPER_BASE); - dw_pcie_writel_rc(pp, pp->mem_mod_base + pp->mem_size - 1, - PCIE_ATU_LIMIT); - dw_pcie_writel_rc(pp, pp->mem_bus_addr, PCIE_ATU_LOWER_TARGET); - dw_pcie_writel_rc(pp, upper_32_bits(pp->mem_bus_addr), - PCIE_ATU_UPPER_TARGET); - dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); -} - -static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp) -{ - /* Program viewport 1 : OUTBOUND : IO */ - dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1, - PCIE_ATU_VIEWPORT); - dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1); - dw_pcie_writel_rc(pp, pp->io_mod_base, PCIE_ATU_LOWER_BASE); - dw_pcie_writel_rc(pp, (pp->io_mod_base >> 32), PCIE_ATU_UPPER_BASE); - dw_pcie_writel_rc(pp, pp->io_mod_base + pp->io_size - 1, - PCIE_ATU_LIMIT); - dw_pcie_writel_rc(pp, pp->io_bus_addr, PCIE_ATU_LOWER_TARGET); - dw_pcie_writel_rc(pp, upper_32_bits(pp->io_bus_addr), - PCIE_ATU_UPPER_TARGET); - dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); -} - static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, u32 devfn, int where, int size, u32 *val) { - int ret = PCIBIOS_SUCCESSFUL; - u32 address, busdev; + int ret, type; + u32 address, busdev, cfg_size; + u64 cpu_addr; + void __iomem *va_cfg_base; busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | PCIE_ATU_FUNC(PCI_FUNC(devfn)); address = where & ~0x3; if (bus->parent->number == pp->root_bus_nr) { - dw_pcie_prog_viewport_cfg0(pp, busdev); - ret = dw_pcie_cfg_read(pp->va_cfg0_base + address, where, size, - val); - dw_pcie_prog_viewport_mem_outbound(pp); + type = PCIE_ATU_TYPE_CFG0; + cpu_addr = pp->cfg0_mod_base; + cfg_size = pp->cfg0_size; + va_cfg_base = pp->va_cfg0_base; } else { - dw_pcie_prog_viewport_cfg1(pp, busdev); - ret = dw_pcie_cfg_read(pp->va_cfg1_base + address, where, size, - val); - dw_pcie_prog_viewport_io_outbound(pp); + type = PCIE_ATU_TYPE_CFG1; + cpu_addr = pp->cfg1_mod_base; + cfg_size = pp->cfg1_size; + va_cfg_base = pp->va_cfg1_base; } + dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, + type, cpu_addr, + busdev, cfg_size); + ret = dw_pcie_cfg_read(va_cfg_base + address, where, size, val); + dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, + PCIE_ATU_TYPE_IO, pp->io_mod_base, + pp->io_bus_addr, pp->io_size); + return ret; } static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, u32 devfn, int where, int size, u32 val) { - int ret = PCIBIOS_SUCCESSFUL; - u32 address, busdev; + int ret, type; + u32 address, busdev, cfg_size; + u64 cpu_addr; + void __iomem *va_cfg_base; busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | PCIE_ATU_FUNC(PCI_FUNC(devfn)); address = where & ~0x3; if (bus->parent->number == pp->root_bus_nr) { - dw_pcie_prog_viewport_cfg0(pp, busdev); - ret = dw_pcie_cfg_write(pp->va_cfg0_base + address, where, size, - val); - dw_pcie_prog_viewport_mem_outbound(pp); + type = PCIE_ATU_TYPE_CFG0; + cpu_addr = pp->cfg0_mod_base; + cfg_size = pp->cfg0_size; + va_cfg_base = pp->va_cfg0_base; } else { - dw_pcie_prog_viewport_cfg1(pp, busdev); - ret = dw_pcie_cfg_write(pp->va_cfg1_base + address, where, size, - val); - dw_pcie_prog_viewport_io_outbound(pp); + type = PCIE_ATU_TYPE_CFG1; + cpu_addr = pp->cfg1_mod_base; + cfg_size = pp->cfg1_size; + va_cfg_base = pp->va_cfg1_base; } + dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, + type, cpu_addr, + busdev, cfg_size); + ret = dw_pcie_cfg_write(va_cfg_base + address, where, size, val); + dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, + PCIE_ATU_TYPE_IO, pp->io_mod_base, + pp->io_bus_addr, pp->io_size); + return ret; } @@ -728,13 +708,11 @@ static struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys) struct pcie_port *pp = sys_to_pcie(sys); pp->root_bus_nr = sys->busnr; - bus = pci_create_root_bus(pp->dev, sys->busnr, + bus = pci_scan_root_bus(pp->dev, sys->busnr, &dw_pcie_ops, sys, &sys->resources); if (!bus) return NULL; - pci_scan_child_bus(bus); - if (bus && pp->ops->scan_bus) pp->ops->scan_bus(pp); @@ -778,6 +756,9 @@ void dw_pcie_setup_rc(struct pcie_port *pp) case 4: val |= PORT_LINK_MODE_4_LANES; break; + case 8: + val |= PORT_LINK_MODE_8_LANES; + break; } dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL); @@ -794,6 +775,9 @@ void dw_pcie_setup_rc(struct pcie_port *pp) case 4: val |= PORT_LOGIC_LINK_WIDTH_4_LANES; break; + case 8: + val |= PORT_LOGIC_LINK_WIDTH_8_LANES; + break; } dw_pcie_writel_rc(pp, val, PCIE_LINK_WIDTH_SPEED_CONTROL); diff --git a/drivers/pci/host/pcie-iproc-bcma.c b/drivers/pci/host/pcie-iproc-bcma.c new file mode 100644 index 000000000000..96a7d999fd5e --- /dev/null +++ b/drivers/pci/host/pcie-iproc-bcma.c @@ -0,0 +1,110 @@ +/* + * Copyright (C) 2015 Broadcom Corporation + * Copyright (C) 2015 Hauke Mehrtens <hauke@hauke-m.de> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/phy/phy.h> +#include <linux/bcma/bcma.h> +#include <linux/ioport.h> + +#include "pcie-iproc.h" + + +/* NS: CLASS field is R/O, and set to wrong 0x200 value */ +static void bcma_pcie2_fixup_class(struct pci_dev *dev) +{ + dev->class = PCI_CLASS_BRIDGE_PCI << 8; +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8011, bcma_pcie2_fixup_class); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8012, bcma_pcie2_fixup_class); + +static int iproc_pcie_bcma_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + struct pci_sys_data *sys = dev->sysdata; + struct iproc_pcie *pcie = sys->private_data; + struct bcma_device *bdev = container_of(pcie->dev, struct bcma_device, dev); + + return bcma_core_irq(bdev, 5); +} + +static int iproc_pcie_bcma_probe(struct bcma_device *bdev) +{ + struct iproc_pcie *pcie; + LIST_HEAD(res); + struct resource res_mem; + int ret; + + pcie = devm_kzalloc(&bdev->dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pcie->dev = &bdev->dev; + bcma_set_drvdata(bdev, pcie); + + pcie->base = bdev->io_addr; + + res_mem.start = bdev->addr_s[0]; + res_mem.end = bdev->addr_s[0] + SZ_128M - 1; + res_mem.name = "PCIe MEM space"; + res_mem.flags = IORESOURCE_MEM; + pci_add_resource(&res, &res_mem); + + pcie->map_irq = iproc_pcie_bcma_map_irq; + + ret = iproc_pcie_setup(pcie, &res); + if (ret) + dev_err(pcie->dev, "PCIe controller setup failed\n"); + + pci_free_resource_list(&res); + + return ret; +} + +static void iproc_pcie_bcma_remove(struct bcma_device *bdev) +{ + struct iproc_pcie *pcie = bcma_get_drvdata(bdev); + + iproc_pcie_remove(pcie); +} + +static const struct bcma_device_id iproc_pcie_bcma_table[] = { + BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_PCIEG2, BCMA_ANY_REV, BCMA_ANY_CLASS), + {}, +}; +MODULE_DEVICE_TABLE(bcma, iproc_pcie_bcma_table); + +static struct bcma_driver iproc_pcie_bcma_driver = { + .name = KBUILD_MODNAME, + .id_table = iproc_pcie_bcma_table, + .probe = iproc_pcie_bcma_probe, + .remove = iproc_pcie_bcma_remove, +}; + +static int __init iproc_pcie_bcma_init(void) +{ + return bcma_driver_register(&iproc_pcie_bcma_driver); +} +module_init(iproc_pcie_bcma_init); + +static void __exit iproc_pcie_bcma_exit(void) +{ + bcma_driver_unregister(&iproc_pcie_bcma_driver); +} +module_exit(iproc_pcie_bcma_exit); + +MODULE_AUTHOR("Hauke Mehrtens"); +MODULE_DESCRIPTION("Broadcom iProc PCIe BCMA driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c index afad6c21fcfa..9aedc8eb2c6e 100644 --- a/drivers/pci/host/pcie-iproc-platform.c +++ b/drivers/pci/host/pcie-iproc-platform.c @@ -69,15 +69,15 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev) return ret; } - pcie->resources = &res; + pcie->map_irq = of_irq_parse_and_map_pci; - ret = iproc_pcie_setup(pcie); - if (ret) { + ret = iproc_pcie_setup(pcie, &res); + if (ret) dev_err(pcie->dev, "PCIe controller setup failed\n"); - return ret; - } - return 0; + pci_free_resource_list(&res); + + return ret; } static int iproc_pcie_pltfm_remove(struct platform_device *pdev) diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c index 329e1b54528b..d77481ea553e 100644 --- a/drivers/pci/host/pcie-iproc.c +++ b/drivers/pci/host/pcie-iproc.c @@ -183,7 +183,7 @@ static void iproc_pcie_enable(struct iproc_pcie *pcie) writel(SYS_RC_INTX_MASK, pcie->base + SYS_RC_INTX_EN); } -int iproc_pcie_setup(struct iproc_pcie *pcie) +int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) { int ret; struct pci_bus *bus; @@ -211,7 +211,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie) pcie->sysdata.private_data = pcie; bus = pci_create_root_bus(pcie->dev, 0, &iproc_pcie_ops, - &pcie->sysdata, pcie->resources); + &pcie->sysdata, res); if (!bus) { dev_err(pcie->dev, "unable to create PCI root bus\n"); ret = -ENOMEM; @@ -229,7 +229,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie) pci_scan_child_bus(bus); pci_assign_unassigned_bus_resources(bus); - pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); + pci_fixup_irqs(pci_common_swizzle, pcie->map_irq); pci_bus_add_devices(bus); return 0; diff --git a/drivers/pci/host/pcie-iproc.h b/drivers/pci/host/pcie-iproc.h index e28075ed1856..ba0a108309cc 100644 --- a/drivers/pci/host/pcie-iproc.h +++ b/drivers/pci/host/pcie-iproc.h @@ -29,14 +29,14 @@ struct iproc_pcie { struct device *dev; void __iomem *base; - struct list_head *resources; struct pci_sys_data sysdata; struct pci_bus *root_bus; struct phy *phy; int irqs[IPROC_PCIE_MAX_NUM_IRQS]; + int (*map_irq)(const struct pci_dev *, u8, u8); }; -int iproc_pcie_setup(struct iproc_pcie *pcie); +int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res); int iproc_pcie_remove(struct iproc_pcie *pcie); #endif /* _PCIE_IPROC_H */ diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c index 020d78890719..dfec4281bd50 100644 --- a/drivers/pci/host/pcie-spear13xx.c +++ b/drivers/pci/host/pcie-spear13xx.c @@ -146,10 +146,10 @@ struct pcie_app_reg { static int spear13xx_pcie_establish_link(struct pcie_port *pp) { u32 val; - int count = 0; struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp); struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; u32 exp_cap_off = EXP_CAP_ID_OFFSET; + unsigned int retries; if (dw_pcie_link_up(pp)) { dev_err(pp->dev, "link already up\n"); @@ -201,17 +201,16 @@ static int spear13xx_pcie_establish_link(struct pcie_port *pp) &app_reg->app_ctrl_0); /* check if the link is up or not */ - while (!dw_pcie_link_up(pp)) { - mdelay(100); - count++; - if (count == 10) { - dev_err(pp->dev, "link Fail\n"); - return -EINVAL; + for (retries = 0; retries < 10; retries++) { + if (dw_pcie_link_up(pp)) { + dev_info(pp->dev, "link up\n"); + return 0; } + mdelay(100); } - dev_info(pp->dev, "link up\n"); - return 0; + dev_err(pp->dev, "link Fail\n"); + return -EINVAL; } static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg) diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index c3e7dfcf9ff5..f66be868ad21 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -185,27 +185,6 @@ void __weak arch_restore_msi_irqs(struct pci_dev *dev) return default_restore_msi_irqs(dev); } -static void msi_set_enable(struct pci_dev *dev, int enable) -{ - u16 control; - - pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); - control &= ~PCI_MSI_FLAGS_ENABLE; - if (enable) - control |= PCI_MSI_FLAGS_ENABLE; - pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); -} - -static void msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u16 set) -{ - u16 ctrl; - - pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl); - ctrl &= ~clear; - ctrl |= set; - pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl); -} - static inline __attribute_const__ u32 msi_mask(unsigned x) { /* Don't shift by >= width of type */ @@ -452,7 +431,7 @@ static void __pci_restore_msi_state(struct pci_dev *dev) entry = irq_get_msi_desc(dev->irq); pci_intx_for_msi(dev, 0); - msi_set_enable(dev, 0); + pci_msi_set_enable(dev, 0); arch_restore_msi_irqs(dev); pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); @@ -473,14 +452,14 @@ static void __pci_restore_msix_state(struct pci_dev *dev) /* route the table */ pci_intx_for_msi(dev, 0); - msix_clear_and_set_ctrl(dev, 0, + pci_msix_clear_and_set_ctrl(dev, 0, PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL); arch_restore_msi_irqs(dev); list_for_each_entry(entry, &dev->msi_list, list) msix_mask_irq(entry, entry->masked); - msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); + pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); } void pci_restore_msi_state(struct pci_dev *dev) @@ -647,7 +626,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) int ret; unsigned mask; - msi_set_enable(dev, 0); /* Disable MSI during set up */ + pci_msi_set_enable(dev, 0); /* Disable MSI during set up */ entry = msi_setup_entry(dev, nvec); if (!entry) @@ -683,7 +662,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) /* Set MSI enabled bits */ pci_intx_for_msi(dev, 0); - msi_set_enable(dev, 1); + pci_msi_set_enable(dev, 1); dev->msi_enabled = 1; dev->irq = entry->irq; @@ -775,7 +754,7 @@ static int msix_capability_init(struct pci_dev *dev, void __iomem *base; /* Ensure MSI-X is disabled while it is set up */ - msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); + pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); /* Request & Map MSI-X table region */ @@ -801,7 +780,7 @@ static int msix_capability_init(struct pci_dev *dev, * MSI-X registers. We need to mask all the vectors to prevent * interrupts coming in before they're fully set up. */ - msix_clear_and_set_ctrl(dev, 0, + pci_msix_clear_and_set_ctrl(dev, 0, PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE); msix_program_entries(dev, entries); @@ -814,7 +793,7 @@ static int msix_capability_init(struct pci_dev *dev, pci_intx_for_msi(dev, 0); dev->msix_enabled = 1; - msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); + pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); return 0; @@ -919,7 +898,7 @@ void pci_msi_shutdown(struct pci_dev *dev) BUG_ON(list_empty(&dev->msi_list)); desc = list_first_entry(&dev->msi_list, struct msi_desc, list); - msi_set_enable(dev, 0); + pci_msi_set_enable(dev, 0); pci_intx_for_msi(dev, 1); dev->msi_enabled = 0; @@ -1027,7 +1006,7 @@ void pci_msix_shutdown(struct pci_dev *dev) __pci_msix_desc_mask_irq(entry, 1); } - msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); + pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); pci_intx_for_msi(dev, 1); dev->msix_enabled = 0; } @@ -1062,18 +1041,6 @@ EXPORT_SYMBOL(pci_msi_enabled); void pci_msi_init_pci_dev(struct pci_dev *dev) { INIT_LIST_HEAD(&dev->msi_list); - - /* Disable the msi hardware to avoid screaming interrupts - * during boot. This is the power on reset default so - * usually this should be a noop. - */ - dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI); - if (dev->msi_cap) - msi_set_enable(dev, 0); - - dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX); - if (dev->msix_cap) - msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); } /** diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index c44393f26fd3..0008c950452c 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -3101,39 +3101,6 @@ bool pci_check_and_unmask_intx(struct pci_dev *dev) } EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx); -/** - * pci_msi_off - disables any MSI or MSI-X capabilities - * @dev: the PCI device to operate on - * - * If you want to use MSI, see pci_enable_msi() and friends. - * This is a lower-level primitive that allows us to disable - * MSI operation at the device level. - */ -void pci_msi_off(struct pci_dev *dev) -{ - int pos; - u16 control; - - /* - * This looks like it could go in msi.c, but we need it even when - * CONFIG_PCI_MSI=n. For the same reason, we can't use - * dev->msi_cap or dev->msix_cap here. - */ - pos = pci_find_capability(dev, PCI_CAP_ID_MSI); - if (pos) { - pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); - control &= ~PCI_MSI_FLAGS_ENABLE; - pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); - } - pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); - if (pos) { - pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); - control &= ~PCI_MSIX_FLAGS_ENABLE; - pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); - } -} -EXPORT_SYMBOL_GPL(pci_msi_off); - int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size) { return dma_set_max_seg_size(&dev->dev, size); diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 9bd762c237ab..4ff0ff1c4088 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -146,6 +146,27 @@ static inline void pci_no_msi(void) { } static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { } #endif +static inline void pci_msi_set_enable(struct pci_dev *dev, int enable) +{ + u16 control; + + pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); + control &= ~PCI_MSI_FLAGS_ENABLE; + if (enable) + control |= PCI_MSI_FLAGS_ENABLE; + pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); +} + +static inline void pci_msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u16 set) +{ + u16 ctrl; + + pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl); + ctrl &= ~clear; + ctrl |= set; + pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl); +} + void pci_realloc_get_opt(char *); static inline int pci_no_d1d2(struct pci_dev *dev) @@ -216,17 +237,6 @@ void __pci_bus_assign_resources(const struct pci_bus *bus, struct list_head *fail_head); bool pci_bus_clip_resource(struct pci_dev *dev, int idx); -/** - * pci_ari_enabled - query ARI forwarding status - * @bus: the PCI bus - * - * Returns 1 if ARI forwarding is enabled, or 0 if not enabled; - */ -static inline int pci_ari_enabled(struct pci_bus *bus) -{ - return bus->self && bus->self->ari_enabled; -} - void pci_reassigndev_resource_alignment(struct pci_dev *dev); void pci_disable_bridge_window(struct pci_dev *dev); diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index 5653ea94547f..9803e3d039fe 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c @@ -425,8 +425,7 @@ static pci_ers_result_t reset_link(struct pci_dev *dev) if (driver && driver->reset_link) { status = driver->reset_link(udev); - } else if (pci_pcie_type(udev) == PCI_EXP_TYPE_DOWNSTREAM || - pci_pcie_type(udev) == PCI_EXP_TYPE_ROOT_PORT) { + } else if (udev->has_secondary_link) { status = default_reset_link(udev); } else { dev_printk(KERN_DEBUG, &dev->dev, diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 7d4fcdc512aa..317e3558a35e 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -127,15 +127,12 @@ static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable) { struct pci_dev *child; struct pci_bus *linkbus = link->pdev->subordinate; + u32 val = enable ? PCI_EXP_LNKCTL_CLKREQ_EN : 0; - list_for_each_entry(child, &linkbus->devices, bus_list) { - if (enable) - pcie_capability_set_word(child, PCI_EXP_LNKCTL, - PCI_EXP_LNKCTL_CLKREQ_EN); - else - pcie_capability_clear_word(child, PCI_EXP_LNKCTL, - PCI_EXP_LNKCTL_CLKREQ_EN); - } + list_for_each_entry(child, &linkbus->devices, bus_list) + pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN, + val); link->clkpm_enabled = !!enable; } @@ -525,7 +522,7 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev) INIT_LIST_HEAD(&link->children); INIT_LIST_HEAD(&link->link); link->pdev = pdev; - if (pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM) { + if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) { struct pcie_link_state *parent; parent = pdev->bus->parent->self->link_state; if (!parent) { @@ -559,10 +556,15 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev) if (!aspm_support_enabled) return; - if (!pci_is_pcie(pdev) || pdev->link_state) + if (pdev->link_state) return; - if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT && - pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM) + + /* + * We allocate pcie_link_state for the component on the upstream + * end of a Link, so there's nothing to do unless this device has a + * Link on its secondary side. + */ + if (!pdev->has_secondary_link) return; /* VIA has a strange chipset, root port is under a bridge */ @@ -675,10 +677,7 @@ void pcie_aspm_pm_state_change(struct pci_dev *pdev) { struct pcie_link_state *link = pdev->link_state; - if (aspm_disabled || !pci_is_pcie(pdev) || !link) - return; - if ((pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) && - (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM)) + if (aspm_disabled || !link) return; /* * Devices changed PM state, we should recheck if latency @@ -696,16 +695,12 @@ void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { struct pcie_link_state *link = pdev->link_state; - if (aspm_disabled || !pci_is_pcie(pdev) || !link) + if (aspm_disabled || !link) return; if (aspm_policy != POLICY_POWERSAVE) return; - if ((pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) && - (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM)) - return; - down_read(&pci_bus_sem); mutex_lock(&aspm_lock); pcie_config_aspm_path(link); @@ -714,8 +709,7 @@ void pcie_aspm_powersave_config_link(struct pci_dev *pdev) up_read(&pci_bus_sem); } -static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem, - bool force) +static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem) { struct pci_dev *parent = pdev->bus->self; struct pcie_link_state *link; @@ -723,8 +717,7 @@ static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem, if (!pci_is_pcie(pdev)) return; - if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT || - pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM) + if (pdev->has_secondary_link) parent = pdev; if (!parent || !parent->link_state) return; @@ -737,7 +730,7 @@ static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem, * a similar mechanism using "PciASPMOptOut", which is also * ignored in this situation. */ - if (aspm_disabled && !force) { + if (aspm_disabled) { dev_warn(&pdev->dev, "can't disable ASPM; OS doesn't have ASPM control\n"); return; } @@ -763,7 +756,7 @@ static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem, void pci_disable_link_state_locked(struct pci_dev *pdev, int state) { - __pci_disable_link_state(pdev, state, false, false); + __pci_disable_link_state(pdev, state, false); } EXPORT_SYMBOL(pci_disable_link_state_locked); @@ -778,7 +771,7 @@ EXPORT_SYMBOL(pci_disable_link_state_locked); */ void pci_disable_link_state(struct pci_dev *pdev, int state) { - __pci_disable_link_state(pdev, state, true, false); + __pci_disable_link_state(pdev, state, true); } EXPORT_SYMBOL(pci_disable_link_state); @@ -907,9 +900,7 @@ void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev) { struct pcie_link_state *link_state = pdev->link_state; - if (!pci_is_pcie(pdev) || - (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT && - pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM) || !link_state) + if (!link_state) return; if (link_state->aspm_support) @@ -924,9 +915,7 @@ void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev) { struct pcie_link_state *link_state = pdev->link_state; - if (!pci_is_pcie(pdev) || - (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT && - pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM) || !link_state) + if (!link_state) return; if (link_state->aspm_support) diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 6675a7a1b9fc..cefd636681b6 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -254,8 +254,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, } if (res->flags & IORESOURCE_MEM_64) { - if ((sizeof(dma_addr_t) < 8 || sizeof(resource_size_t) < 8) && - sz64 > 0x100000000ULL) { + if ((sizeof(pci_bus_addr_t) < 8 || sizeof(resource_size_t) < 8) + && sz64 > 0x100000000ULL) { res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED; res->start = 0; res->end = 0; @@ -264,7 +264,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, goto out; } - if ((sizeof(dma_addr_t) < 8) && l) { + if ((sizeof(pci_bus_addr_t) < 8) && l) { /* Above 32-bit boundary; try to reallocate */ res->flags |= IORESOURCE_UNSET; res->start = 0; @@ -399,7 +399,7 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child) struct pci_dev *dev = child->self; u16 mem_base_lo, mem_limit_lo; u64 base64, limit64; - dma_addr_t base, limit; + pci_bus_addr_t base, limit; struct pci_bus_region region; struct resource *res; @@ -426,8 +426,8 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child) } } - base = (dma_addr_t) base64; - limit = (dma_addr_t) limit64; + base = (pci_bus_addr_t) base64; + limit = (pci_bus_addr_t) limit64; if (base != base64) { dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n", @@ -973,6 +973,8 @@ void set_pcie_port_type(struct pci_dev *pdev) { int pos; u16 reg16; + int type; + struct pci_dev *parent; pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); if (!pos) @@ -982,6 +984,22 @@ void set_pcie_port_type(struct pci_dev *pdev) pdev->pcie_flags_reg = reg16; pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, ®16); pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD; + + /* + * A Root Port is always the upstream end of a Link. No PCIe + * component has two Links. Two Links are connected by a Switch + * that has a Port on each Link and internal logic to connect the + * two Ports. + */ + type = pci_pcie_type(pdev); + if (type == PCI_EXP_TYPE_ROOT_PORT) + pdev->has_secondary_link = 1; + else if (type == PCI_EXP_TYPE_UPSTREAM || + type == PCI_EXP_TYPE_DOWNSTREAM) { + parent = pci_upstream_bridge(pdev); + if (!parent->has_secondary_link) + pdev->has_secondary_link = 1; + } } void set_pcie_hotplug_bridge(struct pci_dev *pdev) @@ -1085,6 +1103,22 @@ int pci_cfg_space_size(struct pci_dev *dev) #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) +static void pci_msi_setup_pci_dev(struct pci_dev *dev) +{ + /* + * Disable the MSI hardware to avoid screaming interrupts + * during boot. This is the power on reset default so + * usually this should be a noop. + */ + dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI); + if (dev->msi_cap) + pci_msi_set_enable(dev, 0); + + dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX); + if (dev->msix_cap) + pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); +} + /** * pci_setup_device - fill in class and map information of a device * @dev: the device structure to fill @@ -1140,6 +1174,8 @@ int pci_setup_device(struct pci_dev *dev) /* "Unknown power state" */ dev->current_state = PCI_UNKNOWN; + pci_msi_setup_pci_dev(dev); + /* Early fixups, before probing the BARs */ pci_fixup_device(pci_fixup_early, dev); /* device class may be changed after fixup */ @@ -1611,7 +1647,7 @@ static int only_one_child(struct pci_bus *bus) return 0; if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT) return 1; - if (pci_pcie_type(parent) == PCI_EXP_TYPE_DOWNSTREAM && + if (parent->has_secondary_link && !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS)) return 1; return 0; @@ -2094,25 +2130,6 @@ struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, } EXPORT_SYMBOL(pci_scan_root_bus); -/* Deprecated; use pci_scan_root_bus() instead */ -struct pci_bus *pci_scan_bus_parented(struct device *parent, - int bus, struct pci_ops *ops, void *sysdata) -{ - LIST_HEAD(resources); - struct pci_bus *b; - - pci_add_resource(&resources, &ioport_resource); - pci_add_resource(&resources, &iomem_resource); - pci_add_resource(&resources, &busn_resource); - b = pci_create_root_bus(parent, bus, ops, sysdata, &resources); - if (b) - pci_scan_child_bus(b); - else - pci_free_resource_list(&resources); - return b; -} -EXPORT_SYMBOL(pci_scan_bus_parented); - struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata) { diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index c6dc1dfd25d5..c77dfe5aaed6 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -1600,7 +1600,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_a static void quirk_pcie_mch(struct pci_dev *pdev) { - pci_msi_off(pdev); pdev->no_msi = 1; } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch); @@ -1614,7 +1613,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quir */ static void quirk_pcie_pxh(struct pci_dev *dev) { - pci_msi_off(dev); dev->no_msi = 1; dev_warn(&dev->dev, "PXH quirk detected; SHPC device MSI disabled\n"); } @@ -3572,6 +3570,8 @@ static void quirk_dma_func1_alias(struct pci_dev *dev) * SKUs this function is not present, making this a ghost requester. * https://bugzilla.kernel.org/show_bug.cgi?id=42679 */ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9120, + quirk_dma_func1_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123, quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */ @@ -3740,6 +3740,8 @@ static const u16 pci_quirk_intel_pch_acs_ids[] = { /* Wellsburg (X99) PCH */ 0x8d10, 0x8d11, 0x8d12, 0x8d13, 0x8d14, 0x8d15, 0x8d16, 0x8d17, 0x8d18, 0x8d19, 0x8d1a, 0x8d1b, 0x8d1c, 0x8d1d, 0x8d1e, + /* Lynx Point (9 series) PCH */ + 0x8c90, 0x8c92, 0x8c94, 0x8c96, 0x8c98, 0x8c9a, 0x8c9c, 0x8c9e, }; static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev) diff --git a/drivers/pci/vc.c b/drivers/pci/vc.c index 7e1304d2e389..dfbab61a1b47 100644 --- a/drivers/pci/vc.c +++ b/drivers/pci/vc.c @@ -108,8 +108,7 @@ static void pci_vc_enable(struct pci_dev *dev, int pos, int res) struct pci_dev *link = NULL; /* Enable VCs from the downstream device */ - if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT || - pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) + if (!dev->has_secondary_link) return; ctrl_pos = pos + PCI_VC_RES_CTRL + (res * PCI_CAP_VC_PER_VC_SIZEOF); diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c index 7cfd2db02deb..240f38872085 100644 --- a/drivers/pci/xen-pcifront.c +++ b/drivers/pci/xen-pcifront.c @@ -446,9 +446,15 @@ static int pcifront_scan_root(struct pcifront_device *pdev, unsigned int domain, unsigned int bus) { struct pci_bus *b; + LIST_HEAD(resources); struct pcifront_sd *sd = NULL; struct pci_bus_entry *bus_entry = NULL; int err = 0; + static struct resource busn_res = { + .start = 0, + .end = 255, + .flags = IORESOURCE_BUS, + }; #ifndef CONFIG_PCI_DOMAINS if (domain != 0) { @@ -470,17 +476,21 @@ static int pcifront_scan_root(struct pcifront_device *pdev, err = -ENOMEM; goto err_out; } + pci_add_resource(&resources, &ioport_resource); + pci_add_resource(&resources, &iomem_resource); + pci_add_resource(&resources, &busn_res); pcifront_init_sd(sd, domain, bus, pdev); pci_lock_rescan_remove(); - b = pci_scan_bus_parented(&pdev->xdev->dev, bus, - &pcifront_bus_ops, sd); + b = pci_scan_root_bus(&pdev->xdev->dev, bus, + &pcifront_bus_ops, sd, &resources); if (!b) { dev_err(&pdev->xdev->dev, "Error creating PCI Frontend Bus!\n"); err = -ENOMEM; pci_unlock_rescan_remove(); + pci_free_resource_list(&resources); goto err_out; } @@ -488,7 +498,7 @@ static int pcifront_scan_root(struct pcifront_device *pdev, list_add(&bus_entry->list, &pdev->root_buses); - /* pci_scan_bus_parented skips devices which do not have a have + /* pci_scan_root_bus skips devices which do not have a * devfn==0. The pcifront_scan_bus enumerates all devfn. */ err = pcifront_scan_bus(pdev, domain, bus, b); diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index e894eb278d83..806bb2c2e382 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -501,9 +501,6 @@ static int virtio_pci_probe(struct pci_dev *pci_dev, INIT_LIST_HEAD(&vp_dev->virtqueues); spin_lock_init(&vp_dev->lock); - /* Disable MSI/MSIX to bring device to a known good state. */ - pci_msi_off(pci_dev); - /* enable the device */ rc = pci_enable_device(pci_dev); if (rc) diff --git a/include/asm-generic/pci.h b/include/asm-generic/pci.h index e80a0495e5b0..f24bc519bf31 100644 --- a/include/asm-generic/pci.h +++ b/include/asm-generic/pci.h @@ -6,19 +6,6 @@ #ifndef _ASM_GENERIC_PCI_H #define _ASM_GENERIC_PCI_H -static inline struct resource * -pcibios_select_root(struct pci_dev *pdev, struct resource *res) -{ - struct resource *root = NULL; - - if (res->flags & IORESOURCE_IO) - root = &ioport_resource; - if (res->flags & IORESOURCE_MEM) - root = &iomem_resource; - - return root; -} - #ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) { diff --git a/include/linux/pci.h b/include/linux/pci.h index ef45ffe9ca88..8a0321a8fb59 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -355,6 +355,7 @@ struct pci_dev { unsigned int broken_intx_masking:1; unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */ unsigned int irq_managed:1; + unsigned int has_secondary_link:1; pci_dev_flags_t dev_flags; atomic_t enable_cnt; /* pci_enable_device has been called */ @@ -577,9 +578,15 @@ int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn, int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn, int reg, int len, u32 val); +#ifdef CONFIG_PCI_BUS_ADDR_T_64BIT +typedef u64 pci_bus_addr_t; +#else +typedef u32 pci_bus_addr_t; +#endif + struct pci_bus_region { - dma_addr_t start; - dma_addr_t end; + pci_bus_addr_t start; + pci_bus_addr_t end; }; struct pci_dynids { @@ -773,8 +780,6 @@ void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res, void pcibios_scan_specific_bus(int busn); struct pci_bus *pci_find_bus(int domain, int busnr); void pci_bus_add_devices(const struct pci_bus *bus); -struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus, - struct pci_ops *ops, void *sysdata); struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata); struct pci_bus *pci_create_root_bus(struct device *parent, int bus, struct pci_ops *ops, void *sysdata, @@ -974,7 +979,6 @@ void pci_intx(struct pci_dev *dev, int enable); bool pci_intx_mask_supported(struct pci_dev *dev); bool pci_check_and_mask_intx(struct pci_dev *dev); bool pci_check_and_unmask_intx(struct pci_dev *dev); -void pci_msi_off(struct pci_dev *dev); int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size); int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask); int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask); @@ -1124,7 +1128,7 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus, int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr); -static inline dma_addr_t pci_bus_address(struct pci_dev *pdev, int bar) +static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar) { struct pci_bus_region region; @@ -1193,15 +1197,6 @@ int pci_set_vga_state(struct pci_dev *pdev, bool decode, #define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle) #define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr) -enum pci_dma_burst_strategy { - PCI_DMA_BURST_INFINITY, /* make bursts as large as possible, - strategy_parameter is N/A */ - PCI_DMA_BURST_BOUNDARY, /* disconnect at every strategy_parameter - byte boundaries */ - PCI_DMA_BURST_MULTIPLE, /* disconnect at some multiple of - strategy_parameter byte boundaries */ -}; - struct msix_entry { u32 vector; /* kernel uses to write allocated vector */ u16 entry; /* driver uses to specify entry, OS writes */ @@ -1426,8 +1421,6 @@ static inline int pci_request_regions(struct pci_dev *dev, const char *res_name) { return -EIO; } static inline void pci_release_regions(struct pci_dev *dev) { } -#define pci_dma_burst_advice(pdev, strat, strategy_parameter) do { } while (0) - static inline void pci_block_cfg_access(struct pci_dev *dev) { } static inline int pci_block_cfg_access_in_atomic(struct pci_dev *dev) { return 0; } @@ -1901,4 +1894,15 @@ static inline bool pci_is_dev_assigned(struct pci_dev *pdev) { return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED; } + +/** + * pci_ari_enabled - query ARI forwarding status + * @bus: the PCI bus + * + * Returns true if ARI forwarding is enabled. + */ +static inline bool pci_ari_enabled(struct pci_bus *bus) +{ + return bus->self && bus->self->ari_enabled; +} #endif /* LINUX_PCI_H */ diff --git a/include/linux/types.h b/include/linux/types.h index 59698be03490..8715287c3b1f 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -139,12 +139,20 @@ typedef unsigned long blkcnt_t; */ #define pgoff_t unsigned long -/* A dma_addr_t can hold any valid DMA or bus address for the platform */ +/* + * A dma_addr_t can hold any valid DMA address, i.e., any address returned + * by the DMA API. + * + * If the DMA API only uses 32-bit addresses, dma_addr_t need only be 32 + * bits wide. Bus addresses, e.g., PCI BARs, may be wider than 32 bits, + * but drivers do memory-mapped I/O to ioremapped kernel virtual addresses, + * so they don't care about the size of the actual bus addresses. + */ #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT typedef u64 dma_addr_t; #else typedef u32 dma_addr_t; -#endif /* dma_addr_t */ +#endif typedef unsigned __bitwise__ gfp_t; typedef unsigned __bitwise__ fmode_t; |