diff options
248 files changed, 9683 insertions, 2128 deletions
diff --git a/Documentation/devicetree/bindings/dma/arm-pl08x.txt b/Documentation/devicetree/bindings/dma/arm-pl08x.txt index 8a0097a029d3..0ba81f79266f 100644 --- a/Documentation/devicetree/bindings/dma/arm-pl08x.txt +++ b/Documentation/devicetree/bindings/dma/arm-pl08x.txt @@ -3,6 +3,11 @@ Required properties: - compatible: "arm,pl080", "arm,primecell"; "arm,pl081", "arm,primecell"; + "faraday,ftdmac020", "arm,primecell" +- arm,primecell-periphid: on the FTDMAC020 the primecell ID is not hard-coded + in the hardware and must be specified here as <0x0003b080>. This number + follows the PrimeCell standard numbering using the JEP106 vendor code 0x38 + for Faraday Technology. - reg: Address range of the PL08x registers - interrupt: The PL08x interrupt number - clocks: The clock running the IP core clock @@ -20,8 +25,8 @@ Optional properties: - dma-requests: contains the total number of DMA requests supported by the DMAC - memcpy-burst-size: the size of the bursts for memcpy: 1, 4, 8, 16, 32 64, 128 or 256 bytes are legal values -- memcpy-bus-width: the bus width used for memcpy: 8, 16 or 32 are legal - values +- memcpy-bus-width: the bus width used for memcpy in bits: 8, 16 or 32 are legal + values, the Faraday FTDMAC020 can also accept 64 bits Clients Required properties: diff --git a/Documentation/devicetree/bindings/dma/brcm,iproc-sba.txt b/Documentation/devicetree/bindings/dma/brcm,iproc-sba.txt new file mode 100644 index 000000000000..092913a28457 --- /dev/null +++ b/Documentation/devicetree/bindings/dma/brcm,iproc-sba.txt @@ -0,0 +1,29 @@ +* Broadcom SBA RAID engine + +Required properties: +- compatible: Should be one of the following + "brcm,iproc-sba" + "brcm,iproc-sba-v2" + The "brcm,iproc-sba" has support for only 6 PQ coefficients + The "brcm,iproc-sba-v2" has support for only 30 PQ coefficients +- mboxes: List of phandle and mailbox channel specifiers + +Example: + +raid_mbox: mbox@67400000 { + ... + #mbox-cells = <3>; + ... +}; + +raid0 { + compatible = "brcm,iproc-sba-v2"; + mboxes = <&raid_mbox 0 0x1 0xffff>, + <&raid_mbox 1 0x1 0xffff>, + <&raid_mbox 2 0x1 0xffff>, + <&raid_mbox 3 0x1 0xffff>, + <&raid_mbox 4 0x1 0xffff>, + <&raid_mbox 5 0x1 0xffff>, + <&raid_mbox 6 0x1 0xffff>, + <&raid_mbox 7 0x1 0xffff>; +}; diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt index 3316a9c2e638..79a204d50234 100644 --- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt +++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt @@ -30,8 +30,9 @@ Required Properties: - interrupts: interrupt specifiers for the DMAC, one for each entry in interrupt-names. -- interrupt-names: one entry per channel, named "ch%u", where %u is the - channel number ranging from zero to the number of channels minus one. +- interrupt-names: one entry for the error interrupt, named "error", plus one + entry per channel, named "ch%u", where %u is the channel number ranging from + zero to the number of channels minus one. - clock-names: "fck" for the functional clock - clocks: a list of phandle + clock-specifier pairs, one for each entry diff --git a/Documentation/devicetree/bindings/dma/shdma.txt b/Documentation/devicetree/bindings/dma/shdma.txt index 2a3f3b8946b9..a91920a49433 100644 --- a/Documentation/devicetree/bindings/dma/shdma.txt +++ b/Documentation/devicetree/bindings/dma/shdma.txt @@ -1,6 +1,6 @@ * SHDMA Device Tree bindings -Sh-/r-mobile and r-car systems often have multiple identical DMA controller +Sh-/r-mobile and R-Car systems often have multiple identical DMA controller instances, capable of serving any of a common set of DMA slave devices, using the same configuration. To describe this topology we require all compatible SHDMA DT nodes to be placed under a DMA multiplexer node. All such compatible diff --git a/Documentation/devicetree/bindings/input/dlink,dir685-touchkeys.txt b/Documentation/devicetree/bindings/input/dlink,dir685-touchkeys.txt new file mode 100644 index 000000000000..10dec1c57abf --- /dev/null +++ b/Documentation/devicetree/bindings/input/dlink,dir685-touchkeys.txt @@ -0,0 +1,21 @@ +* D-Link DIR-685 Touchkeys + +This is a I2C one-off touchkey controller based on the Cypress Semiconductor +CY8C214 MCU with some firmware in its internal 8KB flash. The circuit +board inside the router is named E119921. + +The touchkey device node should be placed inside an I2C bus node. + +Required properties: +- compatible: must be "dlink,dir685-touchkeys" +- reg: the I2C address of the touchkeys +- interrupts: reference to the interrupt number + +Example: + +touchkeys@26 { + compatible = "dlink,dir685-touchkeys"; + reg = <0x26>; + interrupt-parent = <&gpio0>; + interrupts = <17 IRQ_TYPE_EDGE_FALLING>; +}; diff --git a/Documentation/devicetree/bindings/input/touchscreen/st,stmfts.txt b/Documentation/devicetree/bindings/input/touchscreen/st,stmfts.txt new file mode 100644 index 000000000000..9683595cd0f5 --- /dev/null +++ b/Documentation/devicetree/bindings/input/touchscreen/st,stmfts.txt @@ -0,0 +1,43 @@ +* ST-Microelectronics FingerTip touchscreen controller + +The ST-Microelectronics FingerTip device provides a basic touchscreen +functionality. Along with it the user can enable the touchkey which can work as +a basic HOME and BACK key for phones. + +The driver supports also hovering as an absolute single touch event with x, y, z +coordinates. + +Required properties: +- compatible : must be "st,stmfts" +- reg : I2C slave address, (e.g. 0x49) +- interrupt-parent : the phandle to the interrupt controller which provides + the interrupt +- interrupts : interrupt specification +- avdd-supply : analogic power supply +- vdd-supply : power supply +- touchscreen-size-x : see touchscreen.txt +- touchscreen-size-y : see touchscreen.txt + +Optional properties: +- touch-key-connected : specifies whether the touchkey feature is connected +- ledvdd-supply : power supply to the touch key leds + +Example: + +i2c@00000000 { + + /* ... */ + + touchscreen@49 { + compatible = "st,stmfts"; + reg = <0x49>; + interrupt-parent = <&gpa1>; + interrupts = <1 IRQ_TYPE_NONE>; + touchscreen-size-x = <1599>; + touchscreen-size-y = <2559>; + touch-key-connected; + avdd-supply = <&ldo30_reg>; + vdd-supply = <&ldo31_reg>; + ledvdd-supply = <&ldo33_reg>; + }; +}; diff --git a/Documentation/devicetree/bindings/pci/faraday,ftpci100.txt b/Documentation/devicetree/bindings/pci/faraday,ftpci100.txt index 35d4a979bb7b..89a84f8aa621 100644 --- a/Documentation/devicetree/bindings/pci/faraday,ftpci100.txt +++ b/Documentation/devicetree/bindings/pci/faraday,ftpci100.txt @@ -30,6 +30,13 @@ Mandatory properties: 128MB, 256MB, 512MB, 1GB or 2GB in size. The memory should be marked as pre-fetchable. +Optional properties: +- clocks: when present, this should contain the peripheral clock (PCLK) and the + PCI clock (PCICLK). If these are not present, they are assumed to be + hard-wired enabled and always on. The PCI clock will be 33 or 66 MHz. +- clock-names: when present, this should contain "PCLK" for the peripheral + clock and "PCICLK" for the PCI-side clock. + Mandatory subnodes: - For "faraday,ftpci100" a node representing the interrupt-controller inside the host bridge is mandatory. It has the following mandatory properties: diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt index e3d5680875b1..cf92d3ba5a26 100644 --- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt +++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt @@ -33,6 +33,10 @@ Optional properties: - reset-gpio-active-high: If present then the reset sequence using the GPIO specified in the "reset-gpio" property is reversed (H=reset state, L=operation state). +- vpcie-supply: Should specify the regulator in charge of PCIe port power. + The regulator will be enabled when initializing the PCIe host and + disabled either as part of the init process or when shutting down the + host. Additional required properties for imx6sx-pcie: - clock names: Must include the following additional entries: diff --git a/Documentation/devicetree/bindings/pci/mediatek,mt7623-pcie.txt b/Documentation/devicetree/bindings/pci/mediatek,mt7623-pcie.txt new file mode 100644 index 000000000000..fe80dda9bf73 --- /dev/null +++ b/Documentation/devicetree/bindings/pci/mediatek,mt7623-pcie.txt @@ -0,0 +1,130 @@ +MediaTek Gen2 PCIe controller which is available on MT7623 series SoCs + +PCIe subsys supports single root complex (RC) with 3 Root Ports. Each root +ports supports a Gen2 1-lane Link and has PIPE interface to PHY. + +Required properties: +- compatible: Should contain "mediatek,mt7623-pcie". +- device_type: Must be "pci" +- reg: Base addresses and lengths of the PCIe controller. +- #address-cells: Address representation for root ports (must be 3) +- #size-cells: Size representation for root ports (must be 2) +- #interrupt-cells: Size representation for interrupts (must be 1) +- interrupt-map-mask and interrupt-map: Standard PCI IRQ mapping properties + Please refer to the standard PCI bus binding document for a more detailed + explanation. +- clocks: Must contain an entry for each entry in clock-names. + See ../clocks/clock-bindings.txt for details. +- clock-names: Must include the following entries: + - free_ck :for reference clock of PCIe subsys + - sys_ck0 :for clock of Port0 + - sys_ck1 :for clock of Port1 + - sys_ck2 :for clock of Port2 +- resets: Must contain an entry for each entry in reset-names. + See ../reset/reset.txt for details. +- reset-names: Must include the following entries: + - pcie-rst0 :port0 reset + - pcie-rst1 :port1 reset + - pcie-rst2 :port2 reset +- phys: List of PHY specifiers (used by generic PHY framework). +- phy-names : Must be "pcie-phy0", "pcie-phy1", "pcie-phyN".. based on the + number of PHYs as specified in *phys* property. +- power-domains: A phandle and power domain specifier pair to the power domain + which is responsible for collapsing and restoring power to the peripheral. +- bus-range: Range of bus numbers associated with this controller. +- ranges: Ranges for the PCI memory and I/O regions. + +In addition, the device tree node must have sub-nodes describing each +PCIe port interface, having the following mandatory properties: + +Required properties: +- device_type: Must be "pci" +- reg: Only the first four bytes are used to refer to the correct bus number + and device number. +- #address-cells: Must be 3 +- #size-cells: Must be 2 +- #interrupt-cells: Must be 1 +- interrupt-map-mask and interrupt-map: Standard PCI IRQ mapping properties + Please refer to the standard PCI bus binding document for a more detailed + explanation. +- ranges: Sub-ranges distributed from the PCIe controller node. An empty + property is sufficient. +- num-lanes: Number of lanes to use for this port. + +Examples: + + hifsys: syscon@1a000000 { + compatible = "mediatek,mt7623-hifsys", + "mediatek,mt2701-hifsys", + "syscon"; + reg = <0 0x1a000000 0 0x1000>; + #clock-cells = <1>; + #reset-cells = <1>; + }; + + pcie: pcie-controller@1a140000 { + compatible = "mediatek,mt7623-pcie"; + device_type = "pci"; + reg = <0 0x1a140000 0 0x1000>, /* PCIe shared registers */ + <0 0x1a142000 0 0x1000>, /* Port0 registers */ + <0 0x1a143000 0 0x1000>, /* Port1 registers */ + <0 0x1a144000 0 0x1000>; /* Port2 registers */ + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + interrupt-map-mask = <0xf800 0 0 0>; + interrupt-map = <0x0000 0 0 0 &sysirq GIC_SPI 193 IRQ_TYPE_LEVEL_LOW>, + <0x0800 0 0 0 &sysirq GIC_SPI 194 IRQ_TYPE_LEVEL_LOW>, + <0x1000 0 0 0 &sysirq GIC_SPI 195 IRQ_TYPE_LEVEL_LOW>; + clocks = <&topckgen CLK_TOP_ETHIF_SEL>, + <&hifsys CLK_HIFSYS_PCIE0>, + <&hifsys CLK_HIFSYS_PCIE1>, + <&hifsys CLK_HIFSYS_PCIE2>; + clock-names = "free_ck", "sys_ck0", "sys_ck1", "sys_ck2"; + resets = <&hifsys MT2701_HIFSYS_PCIE0_RST>, + <&hifsys MT2701_HIFSYS_PCIE1_RST>, + <&hifsys MT2701_HIFSYS_PCIE2_RST>; + reset-names = "pcie-rst0", "pcie-rst1", "pcie-rst2"; + phys = <&pcie0_phy>, <&pcie1_phy>, <&pcie2_phy>; + phy-names = "pcie-phy0", "pcie-phy1", "pcie-phy2"; + power-domains = <&scpsys MT2701_POWER_DOMAIN_HIF>; + bus-range = <0x00 0xff>; + ranges = <0x81000000 0 0x1a160000 0 0x1a160000 0 0x00010000 /* I/O space */ + 0x83000000 0 0x60000000 0 0x60000000 0 0x10000000>; /* memory space */ + + pcie@0,0 { + device_type = "pci"; + reg = <0x0000 0 0 0 0>; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + interrupt-map-mask = <0 0 0 0>; + interrupt-map = <0 0 0 0 &sysirq GIC_SPI 193 IRQ_TYPE_LEVEL_LOW>; + ranges; + num-lanes = <1>; + }; + + pcie@1,0 { + device_type = "pci"; + reg = <0x0800 0 0 0 0>; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + interrupt-map-mask = <0 0 0 0>; + interrupt-map = <0 0 0 0 &sysirq GIC_SPI 194 IRQ_TYPE_LEVEL_LOW>; + ranges; + num-lanes = <1>; + }; + + pcie@2,0 { + device_type = "pci"; + reg = <0x1000 0 0 0 0>; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + interrupt-map-mask = <0 0 0 0>; + interrupt-map = <0 0 0 0 &sysirq GIC_SPI 195 IRQ_TYPE_LEVEL_LOW>; + ranges; + num-lanes = <1>; + }; + }; diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie.txt b/Documentation/devicetree/bindings/pci/qcom,pcie.txt index e15f9b19901f..9d418b71774f 100644 --- a/Documentation/devicetree/bindings/pci/qcom,pcie.txt +++ b/Documentation/devicetree/bindings/pci/qcom,pcie.txt @@ -8,6 +8,7 @@ - "qcom,pcie-apq8064" for apq8064 - "qcom,pcie-apq8084" for apq8084 - "qcom,pcie-msm8996" for msm8996 or apq8096 + - "qcom,pcie-ipq4019" for ipq4019 - reg: Usage: required @@ -87,7 +88,7 @@ - "core" Clocks the pcie hw block - "phy" Clocks the pcie PHY block - clock-names: - Usage: required for apq8084 + Usage: required for apq8084/ipq4019 Value type: <stringlist> Definition: Should contain the following entries - "aux" Auxiliary (AUX) clock @@ -126,6 +127,23 @@ Definition: Should contain the following entries - "core" Core reset +- reset-names: + Usage: required for ipq/apq8064 + Value type: <stringlist> + Definition: Should contain the following entries + - "axi_m" AXI master reset + - "axi_s" AXI slave reset + - "pipe" PIPE reset + - "axi_m_vmid" VMID reset + - "axi_s_xpu" XPU reset + - "parf" PARF reset + - "phy" PHY reset + - "axi_m_sticky" AXI sticky reset + - "pipe_sticky" PIPE sticky reset + - "pwr" PWR reset + - "ahb" AHB reset + - "phy_ahb" PHY AHB reset + - power-domains: Usage: required for apq8084 and msm8996/apq8096 Value type: <prop-encoded-array> diff --git a/Documentation/devicetree/bindings/pci/rcar-pci.txt b/Documentation/devicetree/bindings/pci/rcar-pci.txt index 34712d6fd253..bd27428dda61 100644 --- a/Documentation/devicetree/bindings/pci/rcar-pci.txt +++ b/Documentation/devicetree/bindings/pci/rcar-pci.txt @@ -1,4 +1,4 @@ -* Renesas RCar PCIe interface +* Renesas R-Car PCIe interface Required properties: compatible: "renesas,pcie-r8a7779" for the R8A7779 SoC; diff --git a/Documentation/devicetree/bindings/pci/tango-pcie.txt b/Documentation/devicetree/bindings/pci/tango-pcie.txt new file mode 100644 index 000000000000..244683836a79 --- /dev/null +++ b/Documentation/devicetree/bindings/pci/tango-pcie.txt @@ -0,0 +1,29 @@ +Sigma Designs Tango PCIe controller + +Required properties: + +- compatible: "sigma,smp8759-pcie" +- reg: address/size of PCI configuration space, address/size of register area +- bus-range: defined by size of PCI configuration space +- device_type: "pci" +- #size-cells: <2> +- #address-cells: <3> +- msi-controller +- ranges: translation from system to bus addresses +- interrupts: spec for misc interrupts, spec for MSI + +Example: + + pcie@2e000 { + compatible = "sigma,smp8759-pcie"; + reg = <0x50000000 0x400000>, <0x2e000 0x100>; + bus-range = <0 3>; + device_type = "pci"; + #size-cells = <2>; + #address-cells = <3>; + msi-controller; + ranges = <0x02000000 0x0 0x00400000 0x50400000 0x0 0x3c00000>; + interrupts = + <54 IRQ_TYPE_LEVEL_HIGH>, /* misc interrupts */ + <55 IRQ_TYPE_LEVEL_HIGH>; /* MSI */ + }; diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt index 4231b45b2f8f..30e04f7a690d 100644 --- a/Documentation/driver-model/devres.txt +++ b/Documentation/driver-model/devres.txt @@ -348,6 +348,7 @@ PER-CPU MEM devm_free_percpu() PCI + devm_pci_alloc_host_bridge() : managed PCI host bridge allocation devm_pci_remap_cfgspace() : ioremap PCI configuration space devm_pci_remap_cfg_resource() : ioremap PCI configuration space resource pcim_enable_device() : after success, all PCI ops become managed diff --git a/Documentation/networking/segmentation-offloads.txt b/Documentation/networking/segmentation-offloads.txt index f200467ade38..2f09455a993a 100644 --- a/Documentation/networking/segmentation-offloads.txt +++ b/Documentation/networking/segmentation-offloads.txt @@ -55,7 +55,7 @@ IPIP, SIT, GRE, UDP Tunnel, and Remote Checksum Offloads In addition to the offloads described above it is possible for a frame to contain additional headers such as an outer tunnel. In order to account for such instances an additional set of segmentation offload types were -introduced including SKB_GSO_IPIP, SKB_GSO_SIT, SKB_GSO_GRE, and +introduced including SKB_GSO_IPXIP4, SKB_GSO_IPXIP6, SKB_GSO_GRE, and SKB_GSO_UDP_TUNNEL. These extra segmentation types are used to identify cases where there are more than just 1 set of headers. For example in the case of IPIP and SIT we should have the network and transport headers moved diff --git a/MAINTAINERS b/MAINTAINERS index 71d438072d57..4f4057c8ffca 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3846,6 +3846,12 @@ S: Supported F: drivers/input/touchscreen/cyttsp* F: include/linux/input/cyttsp.h +D-LINK DIR-685 TOUCHKEYS DRIVER +M: Linus Walleij <linus.walleij@linaro.org> +L: linux-input@vger.kernel.org +S: Supported +F: drivers/input/dlink-dir685-touchkeys.c + DALLAS/MAXIM DS1685-FAMILY REAL TIME CLOCK M: Joshua Kinard <kumba@gentoo.org> S: Maintained @@ -6689,8 +6695,10 @@ S: Maintained F: drivers/input/ F: include/linux/input.h F: include/uapi/linux/input.h +F: include/uapi/linux/input-event-codes.h F: include/linux/input/ F: Documentation/devicetree/bindings/input/ +F: Documentation/input/ INPUT MULTITOUCH (MT) PROTOCOL M: Henrik Rydberg <rydberg@bitmath.org> @@ -9129,9 +9137,7 @@ F: lib/random32.c NETWORKING [IPv4/IPv6] M: "David S. Miller" <davem@davemloft.net> M: Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> -M: James Morris <jmorris@namei.org> M: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org> -M: Patrick McHardy <kaber@trash.net> L: netdev@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git S: Maintained @@ -10154,9 +10160,16 @@ S: Maintained F: Documentation/devicetree/bindings/pci/hisilicon-pcie.txt F: drivers/pci/dwc/pcie-hisi.c +PCIE DRIVER FOR HISILICON KIRIN +M: Xiaowei Song <songxiaowei@hisilicon.com> +M: Binghui Wang <wangbinghui@hisilicon.com> +L: linux-pci@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/pci/pcie-kirin.txt +F: drivers/pci/dwc/pcie-kirin.c + PCIE DRIVER FOR ROCKCHIP M: Shawn Lin <shawn.lin@rock-chips.com> -M: Wenrui Li <wenrui.li@rock-chips.com> L: linux-pci@vger.kernel.org L: linux-rockchip@lists.infradead.org S: Maintained @@ -10178,6 +10191,14 @@ S: Supported F: Documentation/devicetree/bindings/pci/pci-thunder-* F: drivers/pci/host/pci-thunder-* +PCIE DRIVER FOR MEDIATEK +M: Ryder Lee <ryder.lee@mediatek.com> +L: linux-pci@vger.kernel.org +L: linux-mediatek@lists.infradead.org +S: Supported +F: Documentation/devicetree/bindings/pci/mediatek* +F: drivers/pci/host/*mediatek* + PCMCIA SUBSYSTEM P: Linux PCMCIA Team L: linux-pcmcia@lists.infradead.org diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 0b731e8ab17e..a208bfe367b5 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -58,6 +58,7 @@ config ARM select HAVE_DMA_API_DEBUG select HAVE_DMA_CONTIGUOUS if MMU select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL) && !CPU_ENDIAN_BE32 && MMU + select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU select HAVE_EXIT_THREAD select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL) diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h index e943e6cee254..f308c8c40cb9 100644 --- a/arch/arm/include/asm/bitops.h +++ b/arch/arm/include/asm/bitops.h @@ -159,16 +159,16 @@ extern int _test_and_change_bit(int nr, volatile unsigned long * p); /* * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. */ -extern int _find_first_zero_bit_le(const void * p, unsigned size); -extern int _find_next_zero_bit_le(const void * p, int size, int offset); +extern int _find_first_zero_bit_le(const unsigned long *p, unsigned size); +extern int _find_next_zero_bit_le(const unsigned long *p, int size, int offset); extern int _find_first_bit_le(const unsigned long *p, unsigned size); extern int _find_next_bit_le(const unsigned long *p, int size, int offset); /* * Big endian assembly bitops. nr = 0 -> byte 3 bit 0. */ -extern int _find_first_zero_bit_be(const void * p, unsigned size); -extern int _find_next_zero_bit_be(const void * p, int size, int offset); +extern int _find_first_zero_bit_be(const unsigned long *p, unsigned size); +extern int _find_next_zero_bit_be(const unsigned long *p, int size, int offset); extern int _find_first_bit_be(const unsigned long *p, unsigned size); extern int _find_next_bit_be(const unsigned long *p, int size, int offset); diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h index 22b73112b75f..f379881d5cc3 100644 --- a/arch/arm/include/asm/ftrace.h +++ b/arch/arm/include/asm/ftrace.h @@ -1,6 +1,10 @@ #ifndef _ASM_ARM_FTRACE #define _ASM_ARM_FTRACE +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +#define ARCH_SUPPORTS_FTRACE_OPS 1 +#endif + #ifdef CONFIG_FUNCTION_TRACER #define MCOUNT_ADDR ((unsigned long)(__gnu_mcount_nc)) #define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h index 2d88af5be45f..233b4b50eff3 100644 --- a/arch/arm/include/asm/mach/pci.h +++ b/arch/arm/include/asm/mach/pci.h @@ -16,6 +16,7 @@ struct pci_sys_data; struct pci_ops; struct pci_bus; +struct pci_host_bridge; struct device; struct hw_pci { @@ -25,7 +26,7 @@ struct hw_pci { unsigned int io_optional:1; void **private_data; int (*setup)(int nr, struct pci_sys_data *); - struct pci_bus *(*scan)(int nr, struct pci_sys_data *); + int (*scan)(int nr, struct pci_host_bridge *); void (*preinit)(void); void (*postinit)(void); u8 (*swizzle)(struct pci_dev *dev, u8 *pin); diff --git a/arch/arm/include/asm/page-nommu.h b/arch/arm/include/asm/page-nommu.h index 503f488053de..8f2c47bec375 100644 --- a/arch/arm/include/asm/page-nommu.h +++ b/arch/arm/include/asm/page-nommu.h @@ -11,12 +11,6 @@ #ifndef _ASMARM_PAGE_NOMMU_H #define _ASMARM_PAGE_NOMMU_H -#if !defined(CONFIG_SMALL_TASKS) && PAGE_SHIFT < 13 -#define KTHREAD_SIZE (8192) -#else -#define KTHREAD_SIZE PAGE_SIZE -#endif - #define clear_page(page) memset((page), 0, PAGE_SIZE) #define copy_page(to,from) memcpy((to), (from), PAGE_SIZE) diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index b259956365a0..56dc1a3a33b4 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c @@ -458,10 +458,14 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw, int nr, busnr; for (nr = busnr = 0; nr < hw->nr_controllers; nr++) { - sys = kzalloc(sizeof(struct pci_sys_data), GFP_KERNEL); - if (WARN(!sys, "PCI: unable to allocate sys data!")) + struct pci_host_bridge *bridge; + + bridge = pci_alloc_host_bridge(sizeof(struct pci_sys_data)); + if (WARN(!bridge, "PCI: unable to allocate bridge!")) break; + sys = pci_host_bridge_priv(bridge); + sys->busnr = busnr; sys->swizzle = hw->swizzle; sys->map_irq = hw->map_irq; @@ -473,7 +477,6 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw, ret = hw->setup(nr, sys); if (ret > 0) { - struct pci_host_bridge *host_bridge; ret = pcibios_init_resource(nr, sys, hw->io_optional); if (ret) { @@ -481,26 +484,37 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw, break; } + bridge->map_irq = pcibios_map_irq; + bridge->swizzle_irq = pcibios_swizzle; + if (hw->scan) - sys->bus = hw->scan(nr, sys); - else - sys->bus = pci_scan_root_bus_msi(parent, - sys->busnr, hw->ops, sys, - &sys->resources, hw->msi_ctrl); + ret = hw->scan(nr, bridge); + else { + list_splice_init(&sys->resources, + &bridge->windows); + bridge->dev.parent = parent; + bridge->sysdata = sys; + bridge->busnr = sys->busnr; + bridge->ops = hw->ops; + bridge->msi = hw->msi_ctrl; + bridge->align_resource = + hw->align_resource; + + ret = pci_scan_root_bus_bridge(bridge); + } - if (WARN(!sys->bus, "PCI: unable to scan bus!")) { - kfree(sys); + if (WARN(ret < 0, "PCI: unable to scan bus!")) { + pci_free_host_bridge(bridge); break; } + sys->bus = bridge->bus; + busnr = sys->bus->busn_res.end + 1; list_add(&sys->node, head); - - host_bridge = pci_find_host_bridge(sys->bus); - host_bridge->align_resource = hw->align_resource; } else { - kfree(sys); + pci_free_host_bridge(bridge); if (ret < 0) break; } @@ -519,8 +533,6 @@ void pci_common_init_dev(struct device *parent, struct hw_pci *hw) if (hw->postinit) hw->postinit(); - pci_fixup_irqs(pcibios_swizzle, pcibios_map_irq); - list_for_each_entry(sys, &head, node) { struct pci_bus *bus = sys->bus; diff --git a/arch/arm/kernel/entry-ftrace.S b/arch/arm/kernel/entry-ftrace.S index c73c4030ca5d..efcd9f25a14b 100644 --- a/arch/arm/kernel/entry-ftrace.S +++ b/arch/arm/kernel/entry-ftrace.S @@ -92,12 +92,95 @@ 2: mcount_exit .endm +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + +.macro __ftrace_regs_caller + + sub sp, sp, #8 @ space for PC and CPSR OLD_R0, + @ OLD_R0 will overwrite previous LR + + add ip, sp, #12 @ move in IP the value of SP as it was + @ before the push {lr} of the mcount mechanism + + str lr, [sp, #0] @ store LR instead of PC + + ldr lr, [sp, #8] @ get previous LR + + str r0, [sp, #8] @ write r0 as OLD_R0 over previous LR + + stmdb sp!, {ip, lr} + stmdb sp!, {r0-r11, lr} + + @ stack content at this point: + @ 0 4 48 52 56 60 64 68 72 + @ R0 | R1 | ... | LR | SP + 4 | previous LR | LR | PSR | OLD_R0 | + + mov r3, sp @ struct pt_regs* + + ldr r2, =function_trace_op + ldr r2, [r2] @ pointer to the current + @ function tracing op + + ldr r1, [sp, #S_LR] @ lr of instrumented func + + ldr lr, [sp, #S_PC] @ get LR + + mcount_adjust_addr r0, lr @ instrumented function + + .globl ftrace_regs_call +ftrace_regs_call: + bl ftrace_stub + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + .globl ftrace_graph_regs_call +ftrace_graph_regs_call: + mov r0, r0 +#endif + + @ pop saved regs + ldmia sp!, {r0-r12} @ restore r0 through r12 + ldr ip, [sp, #8] @ restore PC + ldr lr, [sp, #4] @ restore LR + ldr sp, [sp, #0] @ restore SP + mov pc, ip @ return +.endm + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +.macro __ftrace_graph_regs_caller + + sub r0, fp, #4 @ lr of instrumented routine (parent) + + @ called from __ftrace_regs_caller + ldr r1, [sp, #S_PC] @ instrumented routine (func) + mcount_adjust_addr r1, r1 + + mov r2, fp @ frame pointer + bl prepare_ftrace_return + + @ pop registers saved in ftrace_regs_caller + ldmia sp!, {r0-r12} @ restore r0 through r12 + ldr ip, [sp, #8] @ restore PC + ldr lr, [sp, #4] @ restore LR + ldr sp, [sp, #0] @ restore SP + mov pc, ip @ return + +.endm +#endif +#endif + .macro __ftrace_caller suffix mcount_enter mcount_get_lr r1 @ lr of instrumented func mcount_adjust_addr r0, lr @ instrumented function +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + ldr r2, =function_trace_op + ldr r2, [r2] @ pointer to the current + @ function tracing op + mov r3, #0 @ regs is NULL +#endif + .globl ftrace_call\suffix ftrace_call\suffix: bl ftrace_stub @@ -212,6 +295,15 @@ UNWIND(.fnstart) __ftrace_caller UNWIND(.fnend) ENDPROC(ftrace_caller) + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +ENTRY(ftrace_regs_caller) +UNWIND(.fnstart) + __ftrace_regs_caller +UNWIND(.fnend) +ENDPROC(ftrace_regs_caller) +#endif + #endif #ifdef CONFIG_FUNCTION_GRAPH_TRACER @@ -220,6 +312,14 @@ UNWIND(.fnstart) __ftrace_graph_caller UNWIND(.fnend) ENDPROC(ftrace_graph_caller) + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +ENTRY(ftrace_graph_regs_caller) +UNWIND(.fnstart) + __ftrace_graph_regs_caller +UNWIND(.fnend) +ENDPROC(ftrace_graph_regs_caller) +#endif #endif .purgem mcount_enter diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index 833c991075a1..5617932a83df 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c @@ -141,6 +141,15 @@ int ftrace_update_ftrace_func(ftrace_func_t func) ret = ftrace_modify_code(pc, 0, new, false); +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + if (!ret) { + pc = (unsigned long)&ftrace_regs_call; + new = ftrace_call_replace(pc, (unsigned long)func); + + ret = ftrace_modify_code(pc, 0, new, false); + } +#endif + #ifdef CONFIG_OLD_MCOUNT if (!ret) { pc = (unsigned long)&ftrace_call_old; @@ -159,11 +168,29 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) unsigned long ip = rec->ip; old = ftrace_nop_replace(rec); + + new = ftrace_call_replace(ip, adjust_address(rec, addr)); + + return ftrace_modify_code(rec->ip, old, new, true); +} + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + +int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, + unsigned long addr) +{ + unsigned long new, old; + unsigned long ip = rec->ip; + + old = ftrace_call_replace(ip, adjust_address(rec, old_addr)); + new = ftrace_call_replace(ip, adjust_address(rec, addr)); return ftrace_modify_code(rec->ip, old, new, true); } +#endif + int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { @@ -231,6 +258,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, extern unsigned long ftrace_graph_call; extern unsigned long ftrace_graph_call_old; extern void ftrace_graph_caller_old(void); +extern unsigned long ftrace_graph_regs_call; +extern void ftrace_graph_regs_caller(void); static int __ftrace_modify_caller(unsigned long *callsite, void (*func) (void), bool enable) @@ -253,6 +282,14 @@ static int ftrace_modify_graph_caller(bool enable) ftrace_graph_caller, enable); +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + if (!ret) + ret = __ftrace_modify_caller(&ftrace_graph_regs_call, + ftrace_graph_regs_caller, + enable); +#endif + + #ifdef CONFIG_OLD_MCOUNT if (!ret) ret = __ftrace_modify_caller(&ftrace_graph_call_old, diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 939e8b58c59d..d96714e1858c 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -123,10 +123,10 @@ void __show_regs(struct pt_regs *regs) print_symbol("PC is at %s\n", instruction_pointer(regs)); print_symbol("LR is at %s\n", regs->ARM_lr); - printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n" - "sp : %08lx ip : %08lx fp : %08lx\n", - regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr, - regs->ARM_sp, regs->ARM_ip, regs->ARM_fp); + printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n", + regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr); + printk("sp : %08lx ip : %08lx fp : %08lx\n", + regs->ARM_sp, regs->ARM_ip, regs->ARM_fp); printk("r10: %08lx r9 : %08lx r8 : %08lx\n", regs->ARM_r10, regs->ARM_r9, regs->ARM_r8); @@ -404,9 +404,17 @@ static unsigned long sigpage_addr(const struct mm_struct *mm, static struct page *signal_page; extern struct page *get_signal_page(void); +static int sigpage_mremap(const struct vm_special_mapping *sm, + struct vm_area_struct *new_vma) +{ + current->mm->context.sigpage = new_vma->vm_start; + return 0; +} + static const struct vm_special_mapping sigpage_mapping = { .name = "[sigpage]", .pages = &signal_page, + .mremap = sigpage_mremap, }; int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c index 53cf86cf2d1a..a4d6dc0f2427 100644 --- a/arch/arm/kernel/vdso.c +++ b/arch/arm/kernel/vdso.c @@ -54,8 +54,26 @@ static const struct vm_special_mapping vdso_data_mapping = { .pages = &vdso_data_page, }; +static int vdso_mremap(const struct vm_special_mapping *sm, + struct vm_area_struct *new_vma) +{ + unsigned long new_size = new_vma->vm_end - new_vma->vm_start; + unsigned long vdso_size; + + /* without VVAR page */ + vdso_size = (vdso_total_pages - 1) << PAGE_SHIFT; + + if (vdso_size != new_size) + return -EINVAL; + + current->mm->context.vdso = new_vma->vm_start; + + return 0; +} + static struct vm_special_mapping vdso_text_mapping __ro_after_init = { .name = "[vdso]", + .mremap = vdso_mremap, }; struct elfinfo { diff --git a/arch/arm/mach-dove/pcie.c b/arch/arm/mach-dove/pcie.c index 91fe97144570..dfb62f3f5dcf 100644 --- a/arch/arm/mach-dove/pcie.c +++ b/arch/arm/mach-dove/pcie.c @@ -152,16 +152,23 @@ static void rc_pci_fixup(struct pci_dev *dev) } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL, PCI_ANY_ID, rc_pci_fixup); -static struct pci_bus __init * -dove_pcie_scan_bus(int nr, struct pci_sys_data *sys) +static int __init +dove_pcie_scan_bus(int nr, struct pci_host_bridge *bridge) { + struct pci_sys_data *sys = pci_host_bridge_priv(bridge); + if (nr >= num_pcie_ports) { BUG(); - return NULL; + return -EINVAL; } - return pci_scan_root_bus(NULL, sys->busnr, &pcie_ops, sys, - &sys->resources); + list_splice_init(&sys->resources, &bridge->windows); + bridge->dev.parent = NULL; + bridge->sysdata = sys; + bridge->busnr = sys->busnr; + bridge->ops = &pcie_ops; + + return pci_scan_root_bus_bridge(bridge); } static int __init dove_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) diff --git a/arch/arm/mach-iop13xx/pci.c b/arch/arm/mach-iop13xx/pci.c index 204eb4460271..070d92ae1b6f 100644 --- a/arch/arm/mach-iop13xx/pci.c +++ b/arch/arm/mach-iop13xx/pci.c @@ -504,10 +504,10 @@ iop13xx_pci_abort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) /* Scan an IOP13XX PCI bus. nr selects which ATU we use. */ -struct pci_bus *iop13xx_scan_bus(int nr, struct pci_sys_data *sys) +int iop13xx_scan_bus(int nr, struct pci_host_bridge *bridge) { - int which_atu; - struct pci_bus *bus = NULL; + int which_atu, ret; + struct pci_sys_data *sys = pci_host_bridge_priv(bridge); switch (init_atu) { case IOP13XX_INIT_ATU_ATUX: @@ -525,9 +525,14 @@ struct pci_bus *iop13xx_scan_bus(int nr, struct pci_sys_data *sys) if (!which_atu) { BUG(); - return NULL; + return -ENODEV; } + list_splice_init(&sys->resources, &bridge->windows); + bridge->dev.parent = NULL; + bridge->sysdata = sys; + bridge->busnr = sys->busnr; + switch (which_atu) { case IOP13XX_INIT_ATU_ATUX: if (time_after_eq(jiffies + msecs_to_jiffies(1000), @@ -535,18 +540,22 @@ struct pci_bus *iop13xx_scan_bus(int nr, struct pci_sys_data *sys) while(time_before(jiffies, atux_trhfa_timeout)) udelay(100); - bus = pci_bus_atux = pci_scan_root_bus(NULL, sys->busnr, - &iop13xx_atux_ops, - sys, &sys->resources); + bridge->ops = &iop13xx_atux_ops; + ret = pci_scan_root_bus_bridge(bridge); + if (!ret) + pci_bus_atux = bridge->bus; break; case IOP13XX_INIT_ATU_ATUE: - bus = pci_bus_atue = pci_scan_root_bus(NULL, sys->busnr, - &iop13xx_atue_ops, - sys, &sys->resources); + bridge->ops = &iop13xx_atue_ops; + ret = pci_scan_root_bus_bridge(bridge); + if (!ret) + pci_bus_atue = bridge->bus; break; + default: + ret = -EINVAL; } - return bus; + return ret; } /* This function is called from iop13xx_pci_init() after assigning valid diff --git a/arch/arm/mach-iop13xx/pci.h b/arch/arm/mach-iop13xx/pci.h index 71b9c57e1fde..8dc343cb887a 100644 --- a/arch/arm/mach-iop13xx/pci.h +++ b/arch/arm/mach-iop13xx/pci.h @@ -11,9 +11,10 @@ extern size_t iop13xx_atue_mem_size; extern size_t iop13xx_atux_mem_size; struct pci_sys_data; +struct pci_host_bridge; struct hw_pci; int iop13xx_pci_setup(int nr, struct pci_sys_data *sys); -struct pci_bus *iop13xx_scan_bus(int nr, struct pci_sys_data *); +int iop13xx_scan_bus(int nr, struct pci_host_bridge *bridge); void iop13xx_atu_select(struct hw_pci *plat_pci); void iop13xx_pci_init(void); void iop13xx_map_pci_memory(void); diff --git a/arch/arm/mach-lpc32xx/phy3250.c b/arch/arm/mach-lpc32xx/phy3250.c index 6c52bd32610e..e48cc06c2aec 100644 --- a/arch/arm/mach-lpc32xx/phy3250.c +++ b/arch/arm/mach-lpc32xx/phy3250.c @@ -137,6 +137,9 @@ static void pl08x_put_signal(const struct pl08x_channel_data *cd, int ch) } static struct pl08x_platform_data pl08x_pd = { + /* Some reasonable memcpy defaults */ + .memcpy_burst_size = PL08X_BURST_SZ_256, + .memcpy_bus_width = PL08X_BUS_WIDTH_32_BITS, .slave_channels = &pl08x_slave_channels[0], .num_slave_channels = ARRAY_SIZE(pl08x_slave_channels), .get_xfer_signal = pl08x_get_signal, diff --git a/arch/arm/mach-mv78xx0/pcie.c b/arch/arm/mach-mv78xx0/pcie.c index 81ff4327a962..636d84b40466 100644 --- a/arch/arm/mach-mv78xx0/pcie.c +++ b/arch/arm/mach-mv78xx0/pcie.c @@ -194,16 +194,22 @@ static void rc_pci_fixup(struct pci_dev *dev) } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL, PCI_ANY_ID, rc_pci_fixup); -static struct pci_bus __init * -mv78xx0_pcie_scan_bus(int nr, struct pci_sys_data *sys) +static int __init mv78xx0_pcie_scan_bus(int nr, struct pci_host_bridge *bridge) { + struct pci_sys_data *sys = pci_host_bridge_priv(bridge); + if (nr >= num_pcie_ports) { BUG(); - return NULL; + return -EINVAL; } - return pci_scan_root_bus(NULL, sys->busnr, &pcie_ops, sys, - &sys->resources); + list_splice_init(&sys->resources, &bridge->windows); + bridge->dev.parent = NULL; + bridge->sysdata = sys; + bridge->busnr = sys->busnr; + bridge->ops = &pcie_ops; + + return pci_scan_root_bus_bridge(bridge); } static int __init mv78xx0_pcie_map_irq(const struct pci_dev *dev, u8 slot, diff --git a/arch/arm/mach-orion5x/common.h b/arch/arm/mach-orion5x/common.h index efeffc6b4ebb..4c0c7de665c3 100644 --- a/arch/arm/mach-orion5x/common.h +++ b/arch/arm/mach-orion5x/common.h @@ -54,6 +54,7 @@ void orion5x_restart(enum reboot_mode, const char *); * PCIe/PCI functions. */ struct pci_bus; +struct pci_host_bridge; struct pci_sys_data; struct pci_dev; @@ -61,7 +62,7 @@ void orion5x_pcie_id(u32 *dev, u32 *rev); void orion5x_pci_disable(void); void orion5x_pci_set_cardbus_mode(void); int orion5x_pci_sys_setup(int nr, struct pci_sys_data *sys); -struct pci_bus *orion5x_pci_sys_scan_bus(int nr, struct pci_sys_data *sys); +int orion5x_pci_sys_scan_bus(int nr, struct pci_host_bridge *bridge); int orion5x_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); struct tag; diff --git a/arch/arm/mach-orion5x/pci.c b/arch/arm/mach-orion5x/pci.c index ecb998e7f8dc..76951bfbacf5 100644 --- a/arch/arm/mach-orion5x/pci.c +++ b/arch/arm/mach-orion5x/pci.c @@ -555,18 +555,27 @@ int __init orion5x_pci_sys_setup(int nr, struct pci_sys_data *sys) return 0; } -struct pci_bus __init *orion5x_pci_sys_scan_bus(int nr, struct pci_sys_data *sys) +int __init orion5x_pci_sys_scan_bus(int nr, struct pci_host_bridge *bridge) { - if (nr == 0) - return pci_scan_root_bus(NULL, sys->busnr, &pcie_ops, sys, - &sys->resources); + struct pci_sys_data *sys = pci_host_bridge_priv(bridge); - if (nr == 1 && !orion5x_pci_disabled) - return pci_scan_root_bus(NULL, sys->busnr, &pci_ops, sys, - &sys->resources); + list_splice_init(&sys->resources, &bridge->windows); + bridge->dev.parent = NULL; + bridge->sysdata = sys; + bridge->busnr = sys->busnr; + + if (nr == 0) { + bridge->ops = &pcie_ops; + return pci_scan_root_bus_bridge(bridge); + } + + if (nr == 1 && !orion5x_pci_disabled) { + bridge->ops = &pci_ops; + return pci_scan_root_bus_bridge(bridge); + } BUG(); - return NULL; + return -ENODEV; } int __init orion5x_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) diff --git a/arch/arm/mach-s3c64xx/pl080.c b/arch/arm/mach-s3c64xx/pl080.c index 261820a855ec..66fc774b70ec 100644 --- a/arch/arm/mach-s3c64xx/pl080.c +++ b/arch/arm/mach-s3c64xx/pl080.c @@ -137,16 +137,10 @@ static const struct dma_slave_map s3c64xx_dma0_slave_map[] = { }; struct pl08x_platform_data s3c64xx_dma0_plat_data = { - .memcpy_channel = { - .bus_id = "memcpy", - .cctl_memcpy = - (PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT | - PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT | - PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | - PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT | - PL080_CONTROL_PROT_BUFF | PL080_CONTROL_PROT_CACHE | - PL080_CONTROL_PROT_SYS), - }, + .memcpy_burst_size = PL08X_BURST_SZ_4, + .memcpy_bus_width = PL08X_BUS_WIDTH_32_BITS, + .memcpy_prot_buff = true, + .memcpy_prot_cache = true, .lli_buses = PL08X_AHB1, .mem_buses = PL08X_AHB1, .get_xfer_signal = pl08x_get_xfer_signal, @@ -238,16 +232,10 @@ static const struct dma_slave_map s3c64xx_dma1_slave_map[] = { }; struct pl08x_platform_data s3c64xx_dma1_plat_data = { - .memcpy_channel = { - .bus_id = "memcpy", - .cctl_memcpy = - (PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT | - PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT | - PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | - PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT | - PL080_CONTROL_PROT_BUFF | PL080_CONTROL_PROT_CACHE | - PL080_CONTROL_PROT_SYS), - }, + .memcpy_burst_size = PL08X_BURST_SZ_4, + .memcpy_bus_width = PL08X_BUS_WIDTH_32_BITS, + .memcpy_prot_buff = true, + .memcpy_prot_cache = true, .lli_buses = PL08X_AHB1, .mem_buses = PL08X_AHB1, .get_xfer_signal = pl08x_get_xfer_signal, diff --git a/arch/arm/mach-spear/spear3xx.c b/arch/arm/mach-spear/spear3xx.c index 23394ac76cf2..8537fcffe5a8 100644 --- a/arch/arm/mach-spear/spear3xx.c +++ b/arch/arm/mach-spear/spear3xx.c @@ -44,16 +44,10 @@ struct pl022_ssp_controller pl022_plat_data = { /* dmac device registration */ struct pl08x_platform_data pl080_plat_data = { - .memcpy_channel = { - .bus_id = "memcpy", - .cctl_memcpy = - (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \ - PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT | \ - PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | \ - PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT | \ - PL080_CONTROL_PROT_BUFF | PL080_CONTROL_PROT_CACHE | \ - PL080_CONTROL_PROT_SYS), - }, + .memcpy_burst_size = PL08X_BURST_SZ_16, + .memcpy_bus_width = PL08X_BUS_WIDTH_32_BITS, + .memcpy_prot_buff = true, + .memcpy_prot_cache = true, .lli_buses = PL08X_AHB1, .mem_buses = PL08X_AHB1, .get_xfer_signal = pl080_get_signal, diff --git a/arch/arm/mach-spear/spear6xx.c b/arch/arm/mach-spear/spear6xx.c index ccf3573b831c..c5fc110134ba 100644 --- a/arch/arm/mach-spear/spear6xx.c +++ b/arch/arm/mach-spear/spear6xx.c @@ -322,16 +322,10 @@ static struct pl08x_channel_data spear600_dma_info[] = { }; static struct pl08x_platform_data spear6xx_pl080_plat_data = { - .memcpy_channel = { - .bus_id = "memcpy", - .cctl_memcpy = - (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \ - PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT | \ - PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | \ - PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT | \ - PL080_CONTROL_PROT_BUFF | PL080_CONTROL_PROT_CACHE | \ - PL080_CONTROL_PROT_SYS), - }, + .memcpy_burst_size = PL08X_BURST_SZ_16, + .memcpy_bus_width = PL08X_BUS_WIDTH_32_BITS, + .memcpy_prot_buff = true, + .memcpy_prot_cache = true, .lli_buses = PL08X_AHB1, .mem_buses = PL08X_AHB1, .get_xfer_signal = pl080_get_signal, diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 877a0e3fd17d..60cdfdc151aa 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -679,7 +679,7 @@ config ARCH_DMA_ADDR_T_64BIT bool config ARM_THUMB - bool "Support Thumb user binaries" if !CPU_THUMBONLY + bool "Support Thumb user binaries" if !CPU_THUMBONLY && EXPERT depends on CPU_THUMB_CAPABLE default y help @@ -690,6 +690,10 @@ config ARM_THUMB instruction set resulting in smaller binaries at the expense of slightly less efficient code. + If this option is disabled, and you run userspace that switches to + Thumb mode, signal handling will not work correctly, resulting in + segmentation faults or illegal instruction aborts. + If you don't know what this all is, saying Y is a safe choice. config ARM_THUMBEE diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c index a7f6c01c13b9..e2b7e4f9cc31 100644 --- a/arch/arm64/kernel/pci.c +++ b/arch/arm64/kernel/pci.c @@ -39,20 +39,18 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res, return res->start; } +#ifdef CONFIG_ACPI /* * Try to assign the IRQ number when probing a new device */ int pcibios_alloc_irq(struct pci_dev *dev) { - if (acpi_disabled) - dev->irq = of_irq_parse_and_map_pci(dev, 0, 0); -#ifdef CONFIG_ACPI - else - return acpi_pci_irq_enable(dev); -#endif + if (!acpi_disabled) + acpi_pci_irq_enable(dev); return 0; } +#endif /* * raw_pci_read/write - Platform-specific PCI config space access. diff --git a/arch/mips/include/asm/mach-loongson64/cs5536/cs5536_pci.h b/arch/mips/include/asm/mach-loongson64/cs5536/cs5536_pci.h index 8a7ecb4d5c64..bf9dd9eb4ceb 100644 --- a/arch/mips/include/asm/mach-loongson64/cs5536/cs5536_pci.h +++ b/arch/mips/include/asm/mach-loongson64/cs5536/cs5536_pci.h @@ -80,7 +80,6 @@ extern u32 cs5536_pci_conf_read4(int function, int reg); #define PCI_BAR3_REG 0x1c #define PCI_BAR4_REG 0x20 #define PCI_BAR5_REG 0x24 -#define PCI_BAR_COUNT 6 #define PCI_BAR_RANGE_MASK 0xFFFFFFFF /* CARDBUS CIS POINTER */ diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h index 1000c1b4c875..52f551ee492d 100644 --- a/arch/mips/include/asm/pci.h +++ b/arch/mips/include/asm/pci.h @@ -39,7 +39,6 @@ struct pci_controller { unsigned long io_offset; unsigned long io_map_base; struct resource *busn_resource; - unsigned long busn_offset; #ifndef CONFIG_PCI_DOMAINS_GENERIC unsigned int index; diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c index 3a84f6c0c840..174575a9a112 100644 --- a/arch/mips/pci/pci-legacy.c +++ b/arch/mips/pci/pci-legacy.c @@ -86,8 +86,7 @@ static void pcibios_scanbus(struct pci_controller *hose) hose->mem_resource, hose->mem_offset); pci_add_resource_offset(&resources, hose->io_resource, hose->io_offset); - pci_add_resource_offset(&resources, - hose->busn_resource, hose->busn_offset); + pci_add_resource(&resources, hose->busn_resource); bus = pci_scan_root_bus(NULL, next_busno, hose->pci_ops, hose, &resources); hose->bus = bus; diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c index 6d612792f6b8..1faf6cb93dcb 100644 --- a/arch/sh/boards/mach-ecovec24/setup.c +++ b/arch/sh/boards/mach-ecovec24/setup.c @@ -24,7 +24,7 @@ #include <linux/usb/r8a66597.h> #include <linux/usb/renesas_usbhs.h> #include <linux/i2c.h> -#include <linux/i2c/tsc2007.h> +#include <linux/platform_data/tsc2007.h> #include <linux/spi/spi.h> #include <linux/spi/sh_msiof.h> #include <linux/spi/mmc_spi.h> diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 5639c9fe5b55..a4a626199c47 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -83,6 +83,8 @@ config SPARC64 select ARCH_SUPPORTS_ATOMIC_RMW select HAVE_NMI select HAVE_REGS_AND_STACK_ACCESS_API + select ARCH_USE_QUEUED_RWLOCKS + select ARCH_USE_QUEUED_SPINLOCKS config ARCH_DEFCONFIG string @@ -92,6 +94,9 @@ config ARCH_DEFCONFIG config ARCH_PROC_KCORE_TEXT def_bool y +config CPU_BIG_ENDIAN + def_bool y + config ARCH_ATU bool default y if SPARC64 diff --git a/arch/sparc/include/asm/cmpxchg_64.h b/arch/sparc/include/asm/cmpxchg_64.h index faa2f61058c2..4028f4f1e561 100644 --- a/arch/sparc/include/asm/cmpxchg_64.h +++ b/arch/sparc/include/asm/cmpxchg_64.h @@ -6,6 +6,17 @@ #ifndef __ARCH_SPARC64_CMPXCHG__ #define __ARCH_SPARC64_CMPXCHG__ +static inline unsigned long +__cmpxchg_u32(volatile int *m, int old, int new) +{ + __asm__ __volatile__("cas [%2], %3, %0" + : "=&r" (new) + : "0" (new), "r" (m), "r" (old) + : "memory"); + + return new; +} + static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val) { unsigned long tmp1, tmp2; @@ -44,10 +55,38 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long void __xchg_called_with_bad_pointer(void); +/* + * Use 4 byte cas instruction to achieve 2 byte xchg. Main logic + * here is to get the bit shift of the byte we are interested in. + * The XOR is handy for reversing the bits for big-endian byte order. + */ +static inline unsigned long +xchg16(__volatile__ unsigned short *m, unsigned short val) +{ + unsigned long maddr = (unsigned long)m; + int bit_shift = (((unsigned long)m & 2) ^ 2) << 3; + unsigned int mask = 0xffff << bit_shift; + unsigned int *ptr = (unsigned int *) (maddr & ~2); + unsigned int old32, new32, load32; + + /* Read the old value */ + load32 = *ptr; + + do { + old32 = load32; + new32 = (load32 & (~mask)) | val << bit_shift; + load32 = __cmpxchg_u32(ptr, old32, new32); + } while (load32 != old32); + + return (load32 & mask) >> bit_shift; +} + static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size) { switch (size) { + case 2: + return xchg16(ptr, x); case 4: return xchg32(ptr, x); case 8: @@ -65,10 +104,11 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, #include <asm-generic/cmpxchg-local.h> + static inline unsigned long -__cmpxchg_u32(volatile int *m, int old, int new) +__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) { - __asm__ __volatile__("cas [%2], %3, %0" + __asm__ __volatile__("casx [%2], %3, %0" : "=&r" (new) : "0" (new), "r" (m), "r" (old) : "memory"); @@ -76,15 +116,31 @@ __cmpxchg_u32(volatile int *m, int old, int new) return new; } +/* + * Use 4 byte cas instruction to achieve 1 byte cmpxchg. Main logic + * here is to get the bit shift of the byte we are interested in. + * The XOR is handy for reversing the bits for big-endian byte order + */ static inline unsigned long -__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) +__cmpxchg_u8(volatile unsigned char *m, unsigned char old, unsigned char new) { - __asm__ __volatile__("casx [%2], %3, %0" - : "=&r" (new) - : "0" (new), "r" (m), "r" (old) - : "memory"); - - return new; + unsigned long maddr = (unsigned long)m; + int bit_shift = (((unsigned long)m & 3) ^ 3) << 3; + unsigned int mask = 0xff << bit_shift; + unsigned int *ptr = (unsigned int *) (maddr & ~3); + unsigned int old32, new32, load; + unsigned int load32 = *ptr; + + do { + new32 = (load32 & ~mask) | (new << bit_shift); + old32 = (load32 & ~mask) | (old << bit_shift); + load32 = __cmpxchg_u32(ptr, old32, new32); + if (load32 == old32) + return old; + load = (load32 & mask) >> bit_shift; + } while (load == old); + + return load; } /* This function doesn't exist, so you'll get a linker error @@ -95,6 +151,8 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) { switch (size) { + case 1: + return __cmpxchg_u8(ptr, old, new); case 4: return __cmpxchg_u32(ptr, old, new); case 8: diff --git a/arch/sparc/include/asm/ldc.h b/arch/sparc/include/asm/ldc.h index 6e9004aa6f25..698738a10414 100644 --- a/arch/sparc/include/asm/ldc.h +++ b/arch/sparc/include/asm/ldc.h @@ -48,6 +48,8 @@ struct ldc_channel_config { #define LDC_STATE_READY 0x03 #define LDC_STATE_CONNECTED 0x04 +#define LDC_PACKET_SIZE 64 + struct ldc_channel; /* Allocate state for a channel. */ @@ -72,6 +74,12 @@ int ldc_connect(struct ldc_channel *lp); int ldc_disconnect(struct ldc_channel *lp); int ldc_state(struct ldc_channel *lp); +void ldc_set_state(struct ldc_channel *lp, u8 state); +int ldc_mode(struct ldc_channel *lp); +void __ldc_print(struct ldc_channel *lp, const char *caller); +int ldc_rx_reset(struct ldc_channel *lp); + +#define ldc_print(chan) __ldc_print(chan, __func__) /* Read and write operations. Only valid when the link is up. */ int ldc_write(struct ldc_channel *lp, const void *buf, diff --git a/arch/sparc/include/asm/mdesc.h b/arch/sparc/include/asm/mdesc.h index aebeb88f70db..e8a4c413a1c7 100644 --- a/arch/sparc/include/asm/mdesc.h +++ b/arch/sparc/include/asm/mdesc.h @@ -16,6 +16,7 @@ struct mdesc_handle *mdesc_grab(void); void mdesc_release(struct mdesc_handle *); #define MDESC_NODE_NULL (~(u64)0) +#define MDESC_MAX_STR_LEN 256 u64 mdesc_node_by_name(struct mdesc_handle *handle, u64 from_node, const char *name); @@ -62,15 +63,32 @@ u64 mdesc_arc_target(struct mdesc_handle *hp, u64 arc); void mdesc_update(void); struct mdesc_notifier_client { - void (*add)(struct mdesc_handle *handle, u64 node); - void (*remove)(struct mdesc_handle *handle, u64 node); - + void (*add)(struct mdesc_handle *handle, u64 node, + const char *node_name); + void (*remove)(struct mdesc_handle *handle, u64 node, + const char *node_name); const char *node_name; struct mdesc_notifier_client *next; }; void mdesc_register_notifier(struct mdesc_notifier_client *client); +union md_node_info { + struct vdev_port { + u64 id; /* id */ + u64 parent_cfg_hdl; /* parent config handle */ + const char *name; /* name (property) */ + } vdev_port; + struct ds_port { + u64 id; /* id */ + } ds_port; +}; + +u64 mdesc_get_node(struct mdesc_handle *hp, const char *node_name, + union md_node_info *node_info); +int mdesc_get_node_info(struct mdesc_handle *hp, u64 node, + const char *node_name, union md_node_info *node_info); + void mdesc_fill_in_cpu_data(cpumask_t *mask); void mdesc_populate_present_mask(cpumask_t *mask); void mdesc_get_page_sizes(cpumask_t *mask, unsigned long *pgsz_mask); diff --git a/arch/sparc/include/asm/qrwlock.h b/arch/sparc/include/asm/qrwlock.h new file mode 100644 index 000000000000..d68a4b102100 --- /dev/null +++ b/arch/sparc/include/asm/qrwlock.h @@ -0,0 +1,7 @@ +#ifndef _ASM_SPARC_QRWLOCK_H +#define _ASM_SPARC_QRWLOCK_H + +#include <asm-generic/qrwlock_types.h> +#include <asm-generic/qrwlock.h> + +#endif /* _ASM_SPARC_QRWLOCK_H */ diff --git a/arch/sparc/include/asm/qspinlock.h b/arch/sparc/include/asm/qspinlock.h new file mode 100644 index 000000000000..5ae9a2802846 --- /dev/null +++ b/arch/sparc/include/asm/qspinlock.h @@ -0,0 +1,7 @@ +#ifndef _ASM_SPARC_QSPINLOCK_H +#define _ASM_SPARC_QSPINLOCK_H + +#include <asm-generic/qspinlock_types.h> +#include <asm-generic/qspinlock.h> + +#endif /* _ASM_SPARC_QSPINLOCK_H */ diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h index 3fae200dd251..8b32538084f7 100644 --- a/arch/sparc/include/asm/setup.h +++ b/arch/sparc/include/asm/setup.h @@ -1,5 +1,5 @@ /* - * Just a place holder. + * Just a place holder. */ #ifndef _SPARC_SETUP_H #define _SPARC_SETUP_H diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index 07c9f2e9bf57..f7028f5e1a5a 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h @@ -10,216 +10,12 @@ #include <asm/processor.h> #include <asm/barrier.h> - -/* To get debugging spinlocks which detect and catch - * deadlock situations, set CONFIG_DEBUG_SPINLOCK - * and rebuild your kernel. - */ - -/* Because we play games to save cycles in the non-contention case, we - * need to be extra careful about branch targets into the "spinning" - * code. They live in their own section, but the newer V9 branches - * have a shorter range than the traditional 32-bit sparc branch - * variants. The rule is that the branches that go into and out of - * the spinner sections must be pre-V9 branches. - */ - -#define arch_spin_is_locked(lp) ((lp)->lock != 0) - -static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) -{ - smp_cond_load_acquire(&lock->lock, !VAL); -} - -static inline void arch_spin_lock(arch_spinlock_t *lock) -{ - unsigned long tmp; - - __asm__ __volatile__( -"1: ldstub [%1], %0\n" -" brnz,pn %0, 2f\n" -" nop\n" -" .subsection 2\n" -"2: ldub [%1], %0\n" -" brnz,pt %0, 2b\n" -" nop\n" -" ba,a,pt %%xcc, 1b\n" -" .previous" - : "=&r" (tmp) - : "r" (lock) - : "memory"); -} - -static inline int arch_spin_trylock(arch_spinlock_t *lock) -{ - unsigned long result; - - __asm__ __volatile__( -" ldstub [%1], %0\n" - : "=r" (result) - : "r" (lock) - : "memory"); - - return (result == 0UL); -} - -static inline void arch_spin_unlock(arch_spinlock_t *lock) -{ - __asm__ __volatile__( -" stb %%g0, [%0]" - : /* No outputs */ - : "r" (lock) - : "memory"); -} - -static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) -{ - unsigned long tmp1, tmp2; - - __asm__ __volatile__( -"1: ldstub [%2], %0\n" -" brnz,pn %0, 2f\n" -" nop\n" -" .subsection 2\n" -"2: rdpr %%pil, %1\n" -" wrpr %3, %%pil\n" -"3: ldub [%2], %0\n" -" brnz,pt %0, 3b\n" -" nop\n" -" ba,pt %%xcc, 1b\n" -" wrpr %1, %%pil\n" -" .previous" - : "=&r" (tmp1), "=&r" (tmp2) - : "r"(lock), "r"(flags) - : "memory"); -} - -/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ - -static inline void arch_read_lock(arch_rwlock_t *lock) -{ - unsigned long tmp1, tmp2; - - __asm__ __volatile__ ( -"1: ldsw [%2], %0\n" -" brlz,pn %0, 2f\n" -"4: add %0, 1, %1\n" -" cas [%2], %0, %1\n" -" cmp %0, %1\n" -" bne,pn %%icc, 1b\n" -" nop\n" -" .subsection 2\n" -"2: ldsw [%2], %0\n" -" brlz,pt %0, 2b\n" -" nop\n" -" ba,a,pt %%xcc, 4b\n" -" .previous" - : "=&r" (tmp1), "=&r" (tmp2) - : "r" (lock) - : "memory"); -} - -static inline int arch_read_trylock(arch_rwlock_t *lock) -{ - int tmp1, tmp2; - - __asm__ __volatile__ ( -"1: ldsw [%2], %0\n" -" brlz,a,pn %0, 2f\n" -" mov 0, %0\n" -" add %0, 1, %1\n" -" cas [%2], %0, %1\n" -" cmp %0, %1\n" -" bne,pn %%icc, 1b\n" -" mov 1, %0\n" -"2:" - : "=&r" (tmp1), "=&r" (tmp2) - : "r" (lock) - : "memory"); - - return tmp1; -} - -static inline void arch_read_unlock(arch_rwlock_t *lock) -{ - unsigned long tmp1, tmp2; - - __asm__ __volatile__( -"1: lduw [%2], %0\n" -" sub %0, 1, %1\n" -" cas [%2], %0, %1\n" -" cmp %0, %1\n" -" bne,pn %%xcc, 1b\n" -" nop" - : "=&r" (tmp1), "=&r" (tmp2) - : "r" (lock) - : "memory"); -} - -static inline void arch_write_lock(arch_rwlock_t *lock) -{ - unsigned long mask, tmp1, tmp2; - - mask = 0x80000000UL; - - __asm__ __volatile__( -"1: lduw [%2], %0\n" -" brnz,pn %0, 2f\n" -"4: or %0, %3, %1\n" -" cas [%2], %0, %1\n" -" cmp %0, %1\n" -" bne,pn %%icc, 1b\n" -" nop\n" -" .subsection 2\n" -"2: lduw [%2], %0\n" -" brnz,pt %0, 2b\n" -" nop\n" -" ba,a,pt %%xcc, 4b\n" -" .previous" - : "=&r" (tmp1), "=&r" (tmp2) - : "r" (lock), "r" (mask) - : "memory"); -} - -static inline void arch_write_unlock(arch_rwlock_t *lock) -{ - __asm__ __volatile__( -" stw %%g0, [%0]" - : /* no outputs */ - : "r" (lock) - : "memory"); -} - -static inline int arch_write_trylock(arch_rwlock_t *lock) -{ - unsigned long mask, tmp1, tmp2, result; - - mask = 0x80000000UL; - - __asm__ __volatile__( -" mov 0, %2\n" -"1: lduw [%3], %0\n" -" brnz,pn %0, 2f\n" -" or %0, %4, %1\n" -" cas [%3], %0, %1\n" -" cmp %0, %1\n" -" bne,pn %%icc, 1b\n" -" nop\n" -" mov 1, %2\n" -"2:" - : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result) - : "r" (lock), "r" (mask) - : "memory"); - - return result; -} +#include <asm/qrwlock.h> +#include <asm/qspinlock.h> #define arch_read_lock_flags(p, f) arch_read_lock(p) #define arch_write_lock_flags(p, f) arch_write_lock(p) -#define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) -#define arch_write_can_lock(rw) (!(rw)->lock) - #define arch_spin_relax(lock) cpu_relax() #define arch_read_relax(lock) cpu_relax() #define arch_write_relax(lock) cpu_relax() diff --git a/arch/sparc/include/asm/spinlock_types.h b/arch/sparc/include/asm/spinlock_types.h index 9c454fdeaad8..bce8ef44dfa9 100644 --- a/arch/sparc/include/asm/spinlock_types.h +++ b/arch/sparc/include/asm/spinlock_types.h @@ -1,20 +1,24 @@ #ifndef __SPARC_SPINLOCK_TYPES_H #define __SPARC_SPINLOCK_TYPES_H -#ifndef __LINUX_SPINLOCK_TYPES_H -# error "please don't include this file directly" -#endif +#ifdef CONFIG_QUEUED_SPINLOCKS +#include <asm-generic/qspinlock_types.h> +#else typedef struct { volatile unsigned char lock; } arch_spinlock_t; #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } +#endif /* CONFIG_QUEUED_SPINLOCKS */ +#ifdef CONFIG_QUEUED_RWLOCKS +#include <asm-generic/qrwlock_types.h> +#else typedef struct { volatile unsigned int lock; } arch_rwlock_t; #define __ARCH_RW_LOCK_UNLOCKED { 0 } - +#endif /* CONFIG_QUEUED_RWLOCKS */ #endif diff --git a/arch/sparc/include/asm/timer_64.h b/arch/sparc/include/asm/timer_64.h index fce415034000..51bc3bc54bfe 100644 --- a/arch/sparc/include/asm/timer_64.h +++ b/arch/sparc/include/asm/timer_64.h @@ -9,7 +9,12 @@ #include <linux/types.h> #include <linux/init.h> +/* The most frequently accessed fields should be first, + * to fit into the same cacheline. + */ struct sparc64_tick_ops { + unsigned long ticks_per_nsec_quotient; + unsigned long offset; unsigned long long (*get_tick)(void); int (*add_compare)(unsigned long); unsigned long softint_mask; @@ -17,6 +22,8 @@ struct sparc64_tick_ops { void (*init_tick)(void); unsigned long (*add_tick)(unsigned long); + unsigned long (*get_frequency)(void); + unsigned long frequency; char *name; }; @@ -27,4 +34,64 @@ unsigned long sparc64_get_clock_tick(unsigned int cpu); void setup_sparc64_timer(void); void __init time_init(void); +#define TICK_PRIV_BIT BIT(63) +#define TICKCMP_IRQ_BIT BIT(63) + +#define HBIRD_STICKCMP_ADDR 0x1fe0000f060UL +#define HBIRD_STICK_ADDR 0x1fe0000f070UL + +#define GET_TICK_NINSTR 13 +struct get_tick_patch { + unsigned int addr; + unsigned int tick[GET_TICK_NINSTR]; + unsigned int stick[GET_TICK_NINSTR]; +}; + +extern struct get_tick_patch __get_tick_patch; +extern struct get_tick_patch __get_tick_patch_end; + +static inline unsigned long get_tick(void) +{ + unsigned long tick, tmp1, tmp2; + + __asm__ __volatile__( + /* read hbtick 13 instructions */ + "661:\n" + " mov 0x1fe, %1\n" + " sllx %1, 0x20, %1\n" + " sethi %%hi(0xf000), %2\n" + " or %2, 0x70, %2\n" + " or %1, %2, %1\n" /* %1 = HBIRD_STICK_ADDR */ + " add %1, 8, %2\n" + " ldxa [%2]%3, %0\n" + " ldxa [%1]%3, %1\n" + " ldxa [%2]%3, %2\n" + " sub %2, %0, %0\n" /* don't modify %xcc */ + " brnz,pn %0, 661b\n" /* restart to save one register */ + " sllx %2, 32, %2\n" + " or %2, %1, %0\n" + /* Common/not patched code */ + " sllx %0, 1, %0\n" + " srlx %0, 1, %0\n" /* Clear TICK_PRIV_BIT */ + /* Beginning of patch section */ + " .section .get_tick_patch, \"ax\"\n" + " .word 661b\n" + /* read tick 2 instructions and 11 skipped */ + " ba 1f\n" + " rd %%tick, %0\n" + " .skip 4 * (%4 - 2)\n" + "1:\n" + /* read stick 2 instructions and 11 skipped */ + " ba 1f\n" + " rd %%asr24, %0\n" + " .skip 4 * (%4 - 2)\n" + "1:\n" + /* End of patch section */ + " .previous\n" + : "=&r" (tick), "=&r" (tmp1), "=&r" (tmp2) + : "i" (ASI_PHYS_BYPASS_EC_E), "i" (GET_TICK_NINSTR)); + + return tick; +} + #endif /* _SPARC64_TIMER_H */ diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h index 9dca7a892978..d1c47e9f0090 100644 --- a/arch/sparc/include/asm/vio.h +++ b/arch/sparc/include/asm/vio.h @@ -316,24 +316,33 @@ static inline u32 vio_dring_prev(struct vio_dring_state *dr, u32 index) } #define VIO_MAX_TYPE_LEN 32 +#define VIO_MAX_NAME_LEN 32 #define VIO_MAX_COMPAT_LEN 64 struct vio_dev { u64 mp; struct device_node *dp; + char node_name[VIO_MAX_NAME_LEN]; char type[VIO_MAX_TYPE_LEN]; char compat[VIO_MAX_COMPAT_LEN]; int compat_len; u64 dev_no; - u64 id; + unsigned long port_id; unsigned long channel_id; unsigned int tx_irq; unsigned int rx_irq; u64 rx_ino; + u64 tx_ino; + + /* Handle to the root of "channel-devices" sub-tree in MDESC */ + u64 cdev_handle; + + /* MD specific data used to identify the vdev in MD */ + union md_node_info md_node_info; struct device dev; }; @@ -347,6 +356,7 @@ struct vio_driver { void (*shutdown)(struct vio_dev *dev); unsigned long driver_data; struct device_driver driver; + bool no_irq; }; struct vio_version { @@ -490,5 +500,6 @@ int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev, void vio_port_up(struct vio_driver_state *vio); int vio_set_intr(unsigned long dev_ino, int state); +u64 vio_vdev_node(struct mdesc_handle *hp, struct vio_dev *vdev); #endif /* _SPARC64_VIO_H */ diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c index 9ebc37e7d64c..c988e7fa069b 100644 --- a/arch/sparc/kernel/apc.c +++ b/arch/sparc/kernel/apc.c @@ -167,7 +167,7 @@ static int apc_probe(struct platform_device *op) return 0; } -static struct of_device_id apc_match[] = { +static const struct of_device_id apc_match[] = { { .name = APC_OBPNAME, }, diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h index 6ae1e77be0bf..b625db4cfb78 100644 --- a/arch/sparc/kernel/kernel.h +++ b/arch/sparc/kernel/kernel.h @@ -52,6 +52,9 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs); void do_signal32(struct pt_regs * regs); asmlinkage int do_sys32_sigstack(u32 u_ssptr, u32 u_ossptr, unsigned long sp); +/* time_64.c */ +void __init time_init_early(void); + /* compat_audit.c */ extern unsigned int sparc32_dir_class[]; extern unsigned int sparc32_chattr_class[]; diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c index 59d503866431..840e0b21bfe3 100644 --- a/arch/sparc/kernel/ldc.c +++ b/arch/sparc/kernel/ldc.c @@ -34,7 +34,6 @@ static char version[] = DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; -#define LDC_PACKET_SIZE 64 /* Packet header layout for unreliable and reliable mode frames. * When in RAW mode, packets are simply straight 64-byte payloads @@ -178,6 +177,8 @@ do { if (lp->cfg.debug & LDC_DEBUG_##TYPE) \ printk(KERN_INFO PFX "ID[%lu] " f, lp->id, ## a); \ } while (0) +#define LDC_ABORT(lp) ldc_abort((lp), __func__) + static const char *state_to_str(u8 state) { switch (state) { @@ -196,15 +197,6 @@ static const char *state_to_str(u8 state) } } -static void ldc_set_state(struct ldc_channel *lp, u8 state) -{ - ldcdbg(STATE, "STATE (%s) --> (%s)\n", - state_to_str(lp->state), - state_to_str(state)); - - lp->state = state; -} - static unsigned long __advance(unsigned long off, unsigned long num_entries) { off += LDC_PACKET_SIZE; @@ -516,11 +508,12 @@ static int send_data_nack(struct ldc_channel *lp, struct ldc_packet *data_pkt) return err; } -static int ldc_abort(struct ldc_channel *lp) +static int ldc_abort(struct ldc_channel *lp, const char *msg) { unsigned long hv_err; - ldcdbg(STATE, "ABORT\n"); + ldcdbg(STATE, "ABORT[%s]\n", msg); + ldc_print(lp); /* We report but do not act upon the hypervisor errors because * there really isn't much we can do if they fail at this point. @@ -605,7 +598,7 @@ static int process_ver_info(struct ldc_channel *lp, struct ldc_version *vp) } } if (err) - return ldc_abort(lp); + return LDC_ABORT(lp); return 0; } @@ -618,13 +611,13 @@ static int process_ver_ack(struct ldc_channel *lp, struct ldc_version *vp) if (lp->hs_state == LDC_HS_GOTVERS) { if (lp->ver.major != vp->major || lp->ver.minor != vp->minor) - return ldc_abort(lp); + return LDC_ABORT(lp); } else { lp->ver = *vp; lp->hs_state = LDC_HS_GOTVERS; } if (send_rts(lp)) - return ldc_abort(lp); + return LDC_ABORT(lp); return 0; } @@ -635,17 +628,17 @@ static int process_ver_nack(struct ldc_channel *lp, struct ldc_version *vp) unsigned long new_tail; if (vp->major == 0 && vp->minor == 0) - return ldc_abort(lp); + return LDC_ABORT(lp); vap = find_by_major(vp->major); if (!vap) - return ldc_abort(lp); + return LDC_ABORT(lp); p = handshake_compose_ctrl(lp, LDC_INFO, LDC_VERS, vap, sizeof(*vap), &new_tail); if (!p) - return ldc_abort(lp); + return LDC_ABORT(lp); return send_tx_packet(lp, p, new_tail); } @@ -668,7 +661,7 @@ static int process_version(struct ldc_channel *lp, return process_ver_nack(lp, vp); default: - return ldc_abort(lp); + return LDC_ABORT(lp); } } @@ -681,13 +674,13 @@ static int process_rts(struct ldc_channel *lp, if (p->stype != LDC_INFO || lp->hs_state != LDC_HS_GOTVERS || p->env != lp->cfg.mode) - return ldc_abort(lp); + return LDC_ABORT(lp); lp->snd_nxt = p->seqid; lp->rcv_nxt = p->seqid; lp->hs_state = LDC_HS_SENTRTR; if (send_rtr(lp)) - return ldc_abort(lp); + return LDC_ABORT(lp); return 0; } @@ -700,7 +693,7 @@ static int process_rtr(struct ldc_channel *lp, if (p->stype != LDC_INFO || p->env != lp->cfg.mode) - return ldc_abort(lp); + return LDC_ABORT(lp); lp->snd_nxt = p->seqid; lp->hs_state = LDC_HS_COMPLETE; @@ -723,7 +716,7 @@ static int process_rdx(struct ldc_channel *lp, if (p->stype != LDC_INFO || !(rx_seq_ok(lp, p->seqid))) - return ldc_abort(lp); + return LDC_ABORT(lp); lp->rcv_nxt = p->seqid; @@ -750,14 +743,14 @@ static int process_control_frame(struct ldc_channel *lp, return process_rdx(lp, p); default: - return ldc_abort(lp); + return LDC_ABORT(lp); } } static int process_error_frame(struct ldc_channel *lp, struct ldc_packet *p) { - return ldc_abort(lp); + return LDC_ABORT(lp); } static int process_data_ack(struct ldc_channel *lp, @@ -776,7 +769,7 @@ static int process_data_ack(struct ldc_channel *lp, return 0; } if (head == lp->tx_tail) - return ldc_abort(lp); + return LDC_ABORT(lp); } return 0; @@ -820,16 +813,21 @@ static irqreturn_t ldc_rx(int irq, void *dev_id) lp->hs_state = LDC_HS_COMPLETE; ldc_set_state(lp, LDC_STATE_CONNECTED); - event_mask |= LDC_EVENT_UP; - - orig_state = lp->chan_state; + /* + * Generate an LDC_EVENT_UP event if the channel + * was not already up. + */ + if (orig_state != LDC_CHANNEL_UP) { + event_mask |= LDC_EVENT_UP; + orig_state = lp->chan_state; + } } /* If we are in reset state, flush the RX queue and ignore * everything. */ if (lp->flags & LDC_FLAG_RESET) { - (void) __set_rx_head(lp, lp->rx_tail); + (void) ldc_rx_reset(lp); goto out; } @@ -880,7 +878,7 @@ handshake_complete: break; default: - err = ldc_abort(lp); + err = LDC_ABORT(lp); break; } @@ -895,7 +893,7 @@ handshake_complete: err = __set_rx_head(lp, new); if (err < 0) { - (void) ldc_abort(lp); + (void) LDC_ABORT(lp); break; } if (lp->hs_state == LDC_HS_COMPLETE) @@ -936,7 +934,14 @@ static irqreturn_t ldc_tx(int irq, void *dev_id) lp->hs_state = LDC_HS_COMPLETE; ldc_set_state(lp, LDC_STATE_CONNECTED); - event_mask |= LDC_EVENT_UP; + /* + * Generate an LDC_EVENT_UP event if the channel + * was not already up. + */ + if (orig_state != LDC_CHANNEL_UP) { + event_mask |= LDC_EVENT_UP; + orig_state = lp->chan_state; + } } spin_unlock_irqrestore(&lp->lock, flags); @@ -1342,6 +1347,14 @@ int ldc_bind(struct ldc_channel *lp) lp->hs_state = LDC_HS_OPEN; ldc_set_state(lp, LDC_STATE_BOUND); + if (lp->cfg.mode == LDC_MODE_RAW) { + /* + * There is no handshake in RAW mode, so handshake + * is completed. + */ + lp->hs_state = LDC_HS_COMPLETE; + } + spin_unlock_irqrestore(&lp->lock, flags); return 0; @@ -1447,12 +1460,54 @@ int ldc_state(struct ldc_channel *lp) } EXPORT_SYMBOL(ldc_state); +void ldc_set_state(struct ldc_channel *lp, u8 state) +{ + ldcdbg(STATE, "STATE (%s) --> (%s)\n", + state_to_str(lp->state), + state_to_str(state)); + + lp->state = state; +} +EXPORT_SYMBOL(ldc_set_state); + +int ldc_mode(struct ldc_channel *lp) +{ + return lp->cfg.mode; +} +EXPORT_SYMBOL(ldc_mode); + +int ldc_rx_reset(struct ldc_channel *lp) +{ + return __set_rx_head(lp, lp->rx_tail); +} + +void __ldc_print(struct ldc_channel *lp, const char *caller) +{ + pr_info("%s: id=0x%lx flags=0x%x state=%s cstate=0x%lx hsstate=0x%x\n" + "\trx_h=0x%lx rx_t=0x%lx rx_n=%ld\n" + "\ttx_h=0x%lx tx_t=0x%lx tx_n=%ld\n" + "\trcv_nxt=%u snd_nxt=%u\n", + caller, lp->id, lp->flags, state_to_str(lp->state), + lp->chan_state, lp->hs_state, + lp->rx_head, lp->rx_tail, lp->rx_num_entries, + lp->tx_head, lp->tx_tail, lp->tx_num_entries, + lp->rcv_nxt, lp->snd_nxt); +} + static int write_raw(struct ldc_channel *lp, const void *buf, unsigned int size) { struct ldc_packet *p; - unsigned long new_tail; + unsigned long new_tail, hv_err; int err; + hv_err = sun4v_ldc_tx_get_state(lp->id, &lp->tx_head, &lp->tx_tail, + &lp->chan_state); + if (unlikely(hv_err)) + return -EBUSY; + + if (unlikely(lp->chan_state != LDC_CHANNEL_UP)) + return LDC_ABORT(lp); + if (size > LDC_PACKET_SIZE) return -EMSGSIZE; @@ -1483,7 +1538,7 @@ static int read_raw(struct ldc_channel *lp, void *buf, unsigned int size) &lp->rx_tail, &lp->chan_state); if (hv_err) - return ldc_abort(lp); + return LDC_ABORT(lp); if (lp->chan_state == LDC_CHANNEL_DOWN || lp->chan_state == LDC_CHANNEL_RESETTING) @@ -1526,7 +1581,7 @@ static int write_nonraw(struct ldc_channel *lp, const void *buf, return -EBUSY; if (unlikely(lp->chan_state != LDC_CHANNEL_UP)) - return ldc_abort(lp); + return LDC_ABORT(lp); if (!tx_has_space_for(lp, size)) return -EAGAIN; @@ -1592,9 +1647,9 @@ static int rx_bad_seq(struct ldc_channel *lp, struct ldc_packet *p, if (err) return err; - err = __set_rx_head(lp, lp->rx_tail); + err = ldc_rx_reset(lp); if (err < 0) - return ldc_abort(lp); + return LDC_ABORT(lp); return 0; } @@ -1607,7 +1662,7 @@ static int data_ack_nack(struct ldc_channel *lp, struct ldc_packet *p) return err; } if (p->stype & LDC_NACK) - return ldc_abort(lp); + return LDC_ABORT(lp); return 0; } @@ -1627,7 +1682,7 @@ static int rx_data_wait(struct ldc_channel *lp, unsigned long cur_head) &lp->rx_tail, &lp->chan_state); if (hv_err) - return ldc_abort(lp); + return LDC_ABORT(lp); if (lp->chan_state == LDC_CHANNEL_DOWN || lp->chan_state == LDC_CHANNEL_RESETTING) @@ -1650,7 +1705,7 @@ static int rx_set_head(struct ldc_channel *lp, unsigned long head) int err = __set_rx_head(lp, head); if (err < 0) - return ldc_abort(lp); + return LDC_ABORT(lp); lp->rx_head = head; return 0; @@ -1689,7 +1744,7 @@ static int read_nonraw(struct ldc_channel *lp, void *buf, unsigned int size) &lp->rx_tail, &lp->chan_state); if (hv_err) - return ldc_abort(lp); + return LDC_ABORT(lp); if (lp->chan_state == LDC_CHANNEL_DOWN || lp->chan_state == LDC_CHANNEL_RESETTING) @@ -1733,9 +1788,14 @@ static int read_nonraw(struct ldc_channel *lp, void *buf, unsigned int size) lp->rcv_nxt = p->seqid; + /* + * If this is a control-only packet, there is nothing + * else to do but advance the rx queue since the packet + * was already processed above. + */ if (!(p->type & LDC_DATA)) { new = rx_advance(lp, new); - goto no_data; + break; } if (p->stype & (LDC_ACK | LDC_NACK)) { err = data_ack_nack(lp, p); @@ -1900,6 +1960,8 @@ int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size) unsigned long flags; int err; + ldcdbg(RX, "%s: entered size=%d\n", __func__, size); + if (!buf) return -EINVAL; @@ -1915,6 +1977,9 @@ int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size) spin_unlock_irqrestore(&lp->lock, flags); + ldcdbg(RX, "%s: mode=%d, head=%lu, tail=%lu rv=%d\n", __func__, + lp->cfg.mode, lp->rx_head, lp->rx_tail, err); + return err; } EXPORT_SYMBOL(ldc_read); diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c index c0765bbf60ea..e4b4e790bf89 100644 --- a/arch/sparc/kernel/mdesc.c +++ b/arch/sparc/kernel/mdesc.c @@ -75,6 +75,74 @@ struct mdesc_handle { struct mdesc_hdr mdesc; }; +typedef int (*mdesc_node_info_get_f)(struct mdesc_handle *, u64, + union md_node_info *); +typedef void (*mdesc_node_info_rel_f)(union md_node_info *); +typedef bool (*mdesc_node_match_f)(union md_node_info *, union md_node_info *); + +struct md_node_ops { + char *name; + mdesc_node_info_get_f get_info; + mdesc_node_info_rel_f rel_info; + mdesc_node_match_f node_match; +}; + +static int get_vdev_port_node_info(struct mdesc_handle *md, u64 node, + union md_node_info *node_info); +static void rel_vdev_port_node_info(union md_node_info *node_info); +static bool vdev_port_node_match(union md_node_info *a_node_info, + union md_node_info *b_node_info); + +static int get_ds_port_node_info(struct mdesc_handle *md, u64 node, + union md_node_info *node_info); +static void rel_ds_port_node_info(union md_node_info *node_info); +static bool ds_port_node_match(union md_node_info *a_node_info, + union md_node_info *b_node_info); + +/* supported node types which can be registered */ +static struct md_node_ops md_node_ops_table[] = { + {"virtual-device-port", get_vdev_port_node_info, + rel_vdev_port_node_info, vdev_port_node_match}, + {"domain-services-port", get_ds_port_node_info, + rel_ds_port_node_info, ds_port_node_match}, + {NULL, NULL, NULL, NULL} +}; + +static void mdesc_get_node_ops(const char *node_name, + mdesc_node_info_get_f *get_info_f, + mdesc_node_info_rel_f *rel_info_f, + mdesc_node_match_f *match_f) +{ + int i; + + if (get_info_f) + *get_info_f = NULL; + + if (rel_info_f) + *rel_info_f = NULL; + + if (match_f) + *match_f = NULL; + + if (!node_name) + return; + + for (i = 0; md_node_ops_table[i].name != NULL; i++) { + if (strcmp(md_node_ops_table[i].name, node_name) == 0) { + if (get_info_f) + *get_info_f = md_node_ops_table[i].get_info; + + if (rel_info_f) + *rel_info_f = md_node_ops_table[i].rel_info; + + if (match_f) + *match_f = md_node_ops_table[i].node_match; + + break; + } + } +} + static void mdesc_handle_init(struct mdesc_handle *hp, unsigned int handle_size, void *base) @@ -137,12 +205,10 @@ static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size) handle_size = (sizeof(struct mdesc_handle) - sizeof(struct mdesc_hdr) + mdesc_size); + base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_REPEAT); + if (!base) + return NULL; - /* - * Allocation has to succeed because mdesc update would be missed - * and such events are not retransmitted. - */ - base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_NOFAIL); addr = (unsigned long)base; addr = (addr + 15UL) & ~15UL; hp = (struct mdesc_handle *) addr; @@ -218,14 +284,31 @@ static struct mdesc_notifier_client *client_list; void mdesc_register_notifier(struct mdesc_notifier_client *client) { + bool supported = false; u64 node; + int i; mutex_lock(&mdesc_mutex); + + /* check to see if the node is supported for registration */ + for (i = 0; md_node_ops_table[i].name != NULL; i++) { + if (strcmp(md_node_ops_table[i].name, client->node_name) == 0) { + supported = true; + break; + } + } + + if (!supported) { + pr_err("MD: %s node not supported\n", client->node_name); + mutex_unlock(&mdesc_mutex); + return; + } + client->next = client_list; client_list = client; mdesc_for_each_node_by_name(cur_mdesc, node, client->node_name) - client->add(cur_mdesc, node); + client->add(cur_mdesc, node, client->node_name); mutex_unlock(&mdesc_mutex); } @@ -249,59 +332,145 @@ static const u64 *parent_cfg_handle(struct mdesc_handle *hp, u64 node) return id; } +static int get_vdev_port_node_info(struct mdesc_handle *md, u64 node, + union md_node_info *node_info) +{ + const u64 *parent_cfg_hdlp; + const char *name; + const u64 *idp; + + /* + * Virtual device nodes are distinguished by: + * 1. "id" property + * 2. "name" property + * 3. parent node "cfg-handle" property + */ + idp = mdesc_get_property(md, node, "id", NULL); + name = mdesc_get_property(md, node, "name", NULL); + parent_cfg_hdlp = parent_cfg_handle(md, node); + + if (!idp || !name || !parent_cfg_hdlp) + return -1; + + node_info->vdev_port.id = *idp; + node_info->vdev_port.name = kstrdup_const(name, GFP_KERNEL); + node_info->vdev_port.parent_cfg_hdl = *parent_cfg_hdlp; + + return 0; +} + +static void rel_vdev_port_node_info(union md_node_info *node_info) +{ + if (node_info && node_info->vdev_port.name) { + kfree_const(node_info->vdev_port.name); + node_info->vdev_port.name = NULL; + } +} + +static bool vdev_port_node_match(union md_node_info *a_node_info, + union md_node_info *b_node_info) +{ + if (a_node_info->vdev_port.id != b_node_info->vdev_port.id) + return false; + + if (a_node_info->vdev_port.parent_cfg_hdl != + b_node_info->vdev_port.parent_cfg_hdl) + return false; + + if (strncmp(a_node_info->vdev_port.name, + b_node_info->vdev_port.name, MDESC_MAX_STR_LEN) != 0) + return false; + + return true; +} + +static int get_ds_port_node_info(struct mdesc_handle *md, u64 node, + union md_node_info *node_info) +{ + const u64 *idp; + + /* DS port nodes use the "id" property to distinguish them */ + idp = mdesc_get_property(md, node, "id", NULL); + if (!idp) + return -1; + + node_info->ds_port.id = *idp; + + return 0; +} + +static void rel_ds_port_node_info(union md_node_info *node_info) +{ +} + +static bool ds_port_node_match(union md_node_info *a_node_info, + union md_node_info *b_node_info) +{ + if (a_node_info->ds_port.id != b_node_info->ds_port.id) + return false; + + return true; +} + /* Run 'func' on nodes which are in A but not in B. */ static void invoke_on_missing(const char *name, struct mdesc_handle *a, struct mdesc_handle *b, - void (*func)(struct mdesc_handle *, u64)) + void (*func)(struct mdesc_handle *, u64, + const char *node_name)) { - u64 node; + mdesc_node_info_get_f get_info_func; + mdesc_node_info_rel_f rel_info_func; + mdesc_node_match_f node_match_func; + union md_node_info a_node_info; + union md_node_info b_node_info; + bool found; + u64 a_node; + u64 b_node; + int rv; - mdesc_for_each_node_by_name(a, node, name) { - int found = 0, is_vdc_port = 0; - const char *name_prop; - const u64 *id; - u64 fnode; - - name_prop = mdesc_get_property(a, node, "name", NULL); - if (name_prop && !strcmp(name_prop, "vdc-port")) { - is_vdc_port = 1; - id = parent_cfg_handle(a, node); - } else - id = mdesc_get_property(a, node, "id", NULL); - - if (!id) { - printk(KERN_ERR "MD: Cannot find ID for %s node.\n", - (name_prop ? name_prop : name)); + /* + * Find the get_info, rel_info and node_match ops for the given + * node name + */ + mdesc_get_node_ops(name, &get_info_func, &rel_info_func, + &node_match_func); + + /* If we didn't find a match, the node type is not supported */ + if (!get_info_func || !rel_info_func || !node_match_func) { + pr_err("MD: %s node type is not supported\n", name); + return; + } + + mdesc_for_each_node_by_name(a, a_node, name) { + found = false; + + rv = get_info_func(a, a_node, &a_node_info); + if (rv != 0) { + pr_err("MD: Cannot find 1 or more required match properties for %s node.\n", + name); continue; } - mdesc_for_each_node_by_name(b, fnode, name) { - const u64 *fid; - - if (is_vdc_port) { - name_prop = mdesc_get_property(b, fnode, - "name", NULL); - if (!name_prop || - strcmp(name_prop, "vdc-port")) - continue; - fid = parent_cfg_handle(b, fnode); - if (!fid) { - printk(KERN_ERR "MD: Cannot find ID " - "for vdc-port node.\n"); - continue; - } - } else - fid = mdesc_get_property(b, fnode, - "id", NULL); - - if (*id == *fid) { - found = 1; + /* Check each node in B for node matching a_node */ + mdesc_for_each_node_by_name(b, b_node, name) { + rv = get_info_func(b, b_node, &b_node_info); + if (rv != 0) + continue; + + if (node_match_func(&a_node_info, &b_node_info)) { + found = true; + rel_info_func(&b_node_info); break; } + + rel_info_func(&b_node_info); } + + rel_info_func(&a_node_info); + if (!found) - func(a, node); + func(a, a_node, name); } } @@ -367,6 +536,76 @@ out: mutex_unlock(&mdesc_mutex); } +u64 mdesc_get_node(struct mdesc_handle *hp, const char *node_name, + union md_node_info *node_info) +{ + mdesc_node_info_get_f get_info_func; + mdesc_node_info_rel_f rel_info_func; + mdesc_node_match_f node_match_func; + union md_node_info hp_node_info; + u64 hp_node; + int rv; + + if (hp == NULL || node_name == NULL || node_info == NULL) + return MDESC_NODE_NULL; + + /* Find the ops for the given node name */ + mdesc_get_node_ops(node_name, &get_info_func, &rel_info_func, + &node_match_func); + + /* If we didn't find ops for the given node name, it is not supported */ + if (!get_info_func || !rel_info_func || !node_match_func) { + pr_err("MD: %s node is not supported\n", node_name); + return -EINVAL; + } + + mdesc_for_each_node_by_name(hp, hp_node, node_name) { + rv = get_info_func(hp, hp_node, &hp_node_info); + if (rv != 0) + continue; + + if (node_match_func(node_info, &hp_node_info)) + break; + + rel_info_func(&hp_node_info); + } + + rel_info_func(&hp_node_info); + + return hp_node; +} +EXPORT_SYMBOL(mdesc_get_node); + +int mdesc_get_node_info(struct mdesc_handle *hp, u64 node, + const char *node_name, union md_node_info *node_info) +{ + mdesc_node_info_get_f get_info_func; + int rv; + + if (hp == NULL || node == MDESC_NODE_NULL || + node_name == NULL || node_info == NULL) + return -EINVAL; + + /* Find the get_info op for the given node name */ + mdesc_get_node_ops(node_name, &get_info_func, NULL, NULL); + + /* If we didn't find a get_info_func, the node name is not supported */ + if (get_info_func == NULL) { + pr_err("MD: %s node is not supported\n", node_name); + return -EINVAL; + } + + rv = get_info_func(hp, node, node_info); + if (rv != 0) { + pr_err("MD: Cannot find 1 or more required match properties for %s node.\n", + node_name); + return -1; + } + + return 0; +} +EXPORT_SYMBOL(mdesc_get_node_info); + static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc) { return (struct mdesc_elem *) (mdesc + 1); diff --git a/arch/sparc/kernel/pmc.c b/arch/sparc/kernel/pmc.c index f12b23f7b515..3b26cf62df6d 100644 --- a/arch/sparc/kernel/pmc.c +++ b/arch/sparc/kernel/pmc.c @@ -71,7 +71,7 @@ static int pmc_probe(struct platform_device *op) return 0; } -static struct of_device_id pmc_match[] = { +static const struct of_device_id pmc_match[] = { { .name = PMC_OBPNAME, }, diff --git a/arch/sparc/kernel/prom_64.c b/arch/sparc/kernel/prom_64.c index 20cc5d80a471..baeaeed64993 100644 --- a/arch/sparc/kernel/prom_64.c +++ b/arch/sparc/kernel/prom_64.c @@ -381,7 +381,7 @@ bool arch_find_n_match_cpu_physical_id(struct device_node *cpun, int this_cpu_id; /* On hypervisor based platforms we interrogate the 'reg' - * property. On everything else we look for a 'upa-portis', + * property. On everything else we look for a 'upa-portid', * 'portid', or 'cpuid' property. */ diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index 422b17880955..4d9c3e13c150 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c @@ -95,7 +95,7 @@ static struct console prom_early_console = { .index = -1, }; -/* +/* * Process kernel command line switches that are specific to the * SPARC or that require special low-level processing. */ @@ -365,6 +365,7 @@ void __init start_early_boot(void) } current_thread_info()->cpu = cpu; + time_init_early(); prom_init_report(); start_kernel(); } @@ -639,7 +640,7 @@ void __init setup_arch(char **cmdline_p) #ifdef CONFIG_BLK_DEV_RAM rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK; rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0); - rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0); + rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0); #endif task_thread_info(&init_task)->kregs = &fake_swapper_regs; @@ -648,7 +649,7 @@ void __init setup_arch(char **cmdline_p) if (!ic_set_manually) { phandle chosen = prom_finddevice("/chosen"); u32 cl, sv, gw; - + cl = prom_getintdefault (chosen, "client-ip", 0); sv = prom_getintdefault (chosen, "server-ip", 0); gw = prom_getintdefault (chosen, "gateway-ip", 0); diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c index 9f575dfc2e41..2ce2e7b2abbb 100644 --- a/arch/sparc/kernel/time_32.c +++ b/arch/sparc/kernel/time_32.c @@ -298,7 +298,7 @@ static int clock_probe(struct platform_device *op) return 0; } -static struct of_device_id clock_match[] = { +static const struct of_device_id clock_match[] = { { .name = "eeprom", }, diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c index 98d05de8da66..564f0e46ffd4 100644 --- a/arch/sparc/kernel/time_64.c +++ b/arch/sparc/kernel/time_64.c @@ -32,7 +32,6 @@ #include <linux/kernel_stat.h> #include <linux/clockchips.h> #include <linux/clocksource.h> -#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/ftrace.h> @@ -47,14 +46,13 @@ #include <asm/cpudata.h> #include <linux/uaccess.h> #include <asm/irq_regs.h> +#include <asm/cacheflush.h> #include "entry.h" +#include "kernel.h" DEFINE_SPINLOCK(rtc_lock); -#define TICK_PRIV_BIT (1UL << 63) -#define TICKCMP_IRQ_BIT (1UL << 63) - #ifdef CONFIG_SMP unsigned long profile_pc(struct pt_regs *regs) { @@ -164,13 +162,44 @@ static unsigned long tick_add_tick(unsigned long adj) return new_tick; } -static struct sparc64_tick_ops tick_operations __read_mostly = { +/* Searches for cpu clock frequency with given cpuid in OpenBoot tree */ +static unsigned long cpuid_to_freq(phandle node, int cpuid) +{ + bool is_cpu_node = false; + unsigned long freq = 0; + char type[128]; + + if (!node) + return freq; + + if (prom_getproperty(node, "device_type", type, sizeof(type)) != -1) + is_cpu_node = (strcmp(type, "cpu") == 0); + + /* try upa-portid then cpuid to get cpuid, see prom_64.c */ + if (is_cpu_node && (prom_getint(node, "upa-portid") == cpuid || + prom_getint(node, "cpuid") == cpuid)) + freq = prom_getintdefault(node, "clock-frequency", 0); + if (!freq) + freq = cpuid_to_freq(prom_getchild(node), cpuid); + if (!freq) + freq = cpuid_to_freq(prom_getsibling(node), cpuid); + + return freq; +} + +static unsigned long tick_get_frequency(void) +{ + return cpuid_to_freq(prom_root_node, hard_smp_processor_id()); +} + +static struct sparc64_tick_ops tick_operations __cacheline_aligned = { .name = "tick", .init_tick = tick_init_tick, .disable_irq = tick_disable_irq, .get_tick = tick_get_tick, .add_tick = tick_add_tick, .add_compare = tick_add_compare, + .get_frequency = tick_get_frequency, .softint_mask = 1UL << 0, }; @@ -250,6 +279,11 @@ static int stick_add_compare(unsigned long adj) return ((long)(new_tick - (orig_tick+adj))) > 0L; } +static unsigned long stick_get_frequency(void) +{ + return prom_getintdefault(prom_root_node, "stick-frequency", 0); +} + static struct sparc64_tick_ops stick_operations __read_mostly = { .name = "stick", .init_tick = stick_init_tick, @@ -257,6 +291,7 @@ static struct sparc64_tick_ops stick_operations __read_mostly = { .get_tick = stick_get_tick, .add_tick = stick_add_tick, .add_compare = stick_add_compare, + .get_frequency = stick_get_frequency, .softint_mask = 1UL << 16, }; @@ -277,9 +312,6 @@ static struct sparc64_tick_ops stick_operations __read_mostly = { * 2) write high * 3) write low */ -#define HBIRD_STICKCMP_ADDR 0x1fe0000f060UL -#define HBIRD_STICK_ADDR 0x1fe0000f070UL - static unsigned long __hbird_read_stick(void) { unsigned long ret, tmp1, tmp2, tmp3; @@ -381,6 +413,11 @@ static int hbtick_add_compare(unsigned long adj) return ((long)(val2 - val)) > 0L; } +static unsigned long hbtick_get_frequency(void) +{ + return prom_getintdefault(prom_root_node, "stick-frequency", 0); +} + static struct sparc64_tick_ops hbtick_operations __read_mostly = { .name = "hbtick", .init_tick = hbtick_init_tick, @@ -388,11 +425,10 @@ static struct sparc64_tick_ops hbtick_operations __read_mostly = { .get_tick = hbtick_get_tick, .add_tick = hbtick_add_tick, .add_compare = hbtick_add_compare, + .get_frequency = hbtick_get_frequency, .softint_mask = 1UL << 0, }; -static unsigned long timer_ticks_per_nsec_quotient __read_mostly; - unsigned long cmos_regs; EXPORT_SYMBOL(cmos_regs); @@ -582,34 +618,17 @@ static int __init clock_init(void) */ fs_initcall(clock_init); -/* This is gets the master TICK_INT timer going. */ -static unsigned long sparc64_init_timers(void) +/* Return true if this is Hummingbird, aka Ultra-IIe */ +static bool is_hummingbird(void) { - struct device_node *dp; - unsigned long freq; + unsigned long ver, manuf, impl; - dp = of_find_node_by_path("/"); - if (tlb_type == spitfire) { - unsigned long ver, manuf, impl; - - __asm__ __volatile__ ("rdpr %%ver, %0" - : "=&r" (ver)); - manuf = ((ver >> 48) & 0xffff); - impl = ((ver >> 32) & 0xffff); - if (manuf == 0x17 && impl == 0x13) { - /* Hummingbird, aka Ultra-IIe */ - tick_ops = &hbtick_operations; - freq = of_getintprop_default(dp, "stick-frequency", 0); - } else { - tick_ops = &tick_operations; - freq = local_cpu_data().clock_tick; - } - } else { - tick_ops = &stick_operations; - freq = of_getintprop_default(dp, "stick-frequency", 0); - } + __asm__ __volatile__ ("rdpr %%ver, %0" + : "=&r" (ver)); + manuf = ((ver >> 48) & 0xffff); + impl = ((ver >> 32) & 0xffff); - return freq; + return (manuf == 0x17 && impl == 0x13); } struct freq_table { @@ -671,12 +690,12 @@ core_initcall(register_sparc64_cpufreq_notifier); static int sparc64_next_event(unsigned long delta, struct clock_event_device *evt) { - return tick_ops->add_compare(delta) ? -ETIME : 0; + return tick_operations.add_compare(delta) ? -ETIME : 0; } static int sparc64_timer_shutdown(struct clock_event_device *evt) { - tick_ops->disable_irq(); + tick_operations.disable_irq(); return 0; } @@ -693,7 +712,7 @@ static DEFINE_PER_CPU(struct clock_event_device, sparc64_events); void __irq_entry timer_interrupt(int irq, struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); - unsigned long tick_mask = tick_ops->softint_mask; + unsigned long tick_mask = tick_operations.softint_mask; int cpu = smp_processor_id(); struct clock_event_device *evt = &per_cpu(sparc64_events, cpu); @@ -728,7 +747,7 @@ void setup_sparc64_timer(void) : "=r" (pstate) : "i" (PSTATE_IE)); - tick_ops->init_tick(); + tick_operations.init_tick(); /* Restore PSTATE_IE. */ __asm__ __volatile__("wrpr %0, 0x0, %%pstate" @@ -755,12 +774,10 @@ static unsigned long tb_ticks_per_usec __read_mostly; void __delay(unsigned long loops) { - unsigned long bclock, now; + unsigned long bclock = get_tick(); - bclock = tick_ops->get_tick(); - do { - now = tick_ops->get_tick(); - } while ((now-bclock) < loops); + while ((get_tick() - bclock) < loops) + ; } EXPORT_SYMBOL(__delay); @@ -772,26 +789,71 @@ EXPORT_SYMBOL(udelay); static u64 clocksource_tick_read(struct clocksource *cs) { - return tick_ops->get_tick(); + return get_tick(); +} + +static void __init get_tick_patch(void) +{ + unsigned int *addr, *instr, i; + struct get_tick_patch *p; + + if (tlb_type == spitfire && is_hummingbird()) + return; + + for (p = &__get_tick_patch; p < &__get_tick_patch_end; p++) { + instr = (tlb_type == spitfire) ? p->tick : p->stick; + addr = (unsigned int *)(unsigned long)p->addr; + for (i = 0; i < GET_TICK_NINSTR; i++) { + addr[i] = instr[i]; + /* ensure that address is modified before flush */ + wmb(); + flushi(&addr[i]); + } + } +} + +static void init_tick_ops(struct sparc64_tick_ops *ops) +{ + unsigned long freq, quotient, tick; + + freq = ops->get_frequency(); + quotient = clocksource_hz2mult(freq, SPARC64_NSEC_PER_CYC_SHIFT); + tick = ops->get_tick(); + + ops->offset = (tick * quotient) >> SPARC64_NSEC_PER_CYC_SHIFT; + ops->ticks_per_nsec_quotient = quotient; + ops->frequency = freq; + tick_operations = *ops; + get_tick_patch(); +} + +void __init time_init_early(void) +{ + if (tlb_type == spitfire) { + if (is_hummingbird()) + init_tick_ops(&hbtick_operations); + else + init_tick_ops(&tick_operations); + } else { + init_tick_ops(&stick_operations); + } } void __init time_init(void) { - unsigned long freq = sparc64_init_timers(); + unsigned long freq; + freq = tick_operations.frequency; tb_ticks_per_usec = freq / USEC_PER_SEC; - timer_ticks_per_nsec_quotient = - clocksource_hz2mult(freq, SPARC64_NSEC_PER_CYC_SHIFT); - - clocksource_tick.name = tick_ops->name; + clocksource_tick.name = tick_operations.name; clocksource_tick.read = clocksource_tick_read; clocksource_register_hz(&clocksource_tick, freq); printk("clocksource: mult[%x] shift[%d]\n", clocksource_tick.mult, clocksource_tick.shift); - sparc64_clockevent.name = tick_ops->name; + sparc64_clockevent.name = tick_operations.name; clockevents_calc_mult_shift(&sparc64_clockevent, freq, 4); sparc64_clockevent.max_delta_ns = @@ -809,14 +871,21 @@ void __init time_init(void) unsigned long long sched_clock(void) { - unsigned long ticks = tick_ops->get_tick(); + unsigned long quotient = tick_operations.ticks_per_nsec_quotient; + unsigned long offset = tick_operations.offset; + + /* Use barrier so the compiler emits the loads first and overlaps load + * latency with reading tick, because reading %tick/%stick is a + * post-sync instruction that will flush and restart subsequent + * instructions after it commits. + */ + barrier(); - return (ticks * timer_ticks_per_nsec_quotient) - >> SPARC64_NSEC_PER_CYC_SHIFT; + return ((get_tick() * quotient) >> SPARC64_NSEC_PER_CYC_SHIFT) - offset; } int read_current_timer(unsigned long *timer_val) { - *timer_val = tick_ops->get_tick(); + *timer_val = get_tick(); return 0; } diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c index 7dd9b57f3a61..1c8763c9c52b 100644 --- a/arch/sparc/kernel/vio.c +++ b/arch/sparc/kernel/vio.c @@ -70,15 +70,26 @@ static int vio_device_probe(struct device *dev) struct vio_dev *vdev = to_vio_dev(dev); struct vio_driver *drv = to_vio_driver(dev->driver); const struct vio_device_id *id; - int error = -ENODEV; - if (drv->probe) { - id = vio_match_device(drv->id_table, vdev); - if (id) - error = drv->probe(vdev, id); + if (!drv->probe) + return -ENODEV; + + id = vio_match_device(drv->id_table, vdev); + if (!id) + return -ENODEV; + + /* alloc irqs (unless the driver specified not to) */ + if (!drv->no_irq) { + if (vdev->tx_irq == 0 && vdev->tx_ino != ~0UL) + vdev->tx_irq = sun4v_build_virq(vdev->cdev_handle, + vdev->tx_ino); + + if (vdev->rx_irq == 0 && vdev->rx_ino != ~0UL) + vdev->rx_irq = sun4v_build_virq(vdev->cdev_handle, + vdev->rx_ino); } - return error; + return drv->probe(vdev, id); } static int vio_device_remove(struct device *dev) @@ -86,8 +97,15 @@ static int vio_device_remove(struct device *dev) struct vio_dev *vdev = to_vio_dev(dev); struct vio_driver *drv = to_vio_driver(dev->driver); - if (drv->remove) + if (drv->remove) { + /* + * Ideally, we would remove/deallocate tx/rx virqs + * here - however, there are currently no support + * routines to do so at the moment. TBD + */ + return drv->remove(vdev); + } return 1; } @@ -185,11 +203,58 @@ static struct device_node *cdev_node; static struct vio_dev *root_vdev; static u64 cdev_cfg_handle; +static const u64 *vio_cfg_handle(struct mdesc_handle *hp, u64 node) +{ + const u64 *cfg_handle = NULL; + u64 a; + + mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) { + u64 target; + + target = mdesc_arc_target(hp, a); + cfg_handle = mdesc_get_property(hp, target, + "cfg-handle", NULL); + if (cfg_handle) + break; + } + + return cfg_handle; +} + +/** + * vio_vdev_node() - Find VDEV node in MD + * @hp: Handle to the MD + * @vdev: Pointer to VDEV + * + * Find the node in the current MD which matches the given vio_dev. This + * must be done dynamically since the node value can change if the MD + * is updated. + * + * NOTE: the MD must be locked, using mdesc_grab(), when calling this routine + * + * Return: The VDEV node in MDESC + */ +u64 vio_vdev_node(struct mdesc_handle *hp, struct vio_dev *vdev) +{ + u64 node; + + if (vdev == NULL) + return MDESC_NODE_NULL; + + node = mdesc_get_node(hp, (const char *)vdev->node_name, + &vdev->md_node_info); + + return node; +} + static void vio_fill_channel_info(struct mdesc_handle *hp, u64 mp, struct vio_dev *vdev) { u64 a; + vdev->tx_ino = ~0UL; + vdev->rx_ino = ~0UL; + vdev->channel_id = ~0UL; mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) { const u64 *chan_id; const u64 *irq; @@ -199,18 +264,18 @@ static void vio_fill_channel_info(struct mdesc_handle *hp, u64 mp, irq = mdesc_get_property(hp, target, "tx-ino", NULL); if (irq) - vdev->tx_irq = sun4v_build_virq(cdev_cfg_handle, *irq); + vdev->tx_ino = *irq; irq = mdesc_get_property(hp, target, "rx-ino", NULL); - if (irq) { - vdev->rx_irq = sun4v_build_virq(cdev_cfg_handle, *irq); + if (irq) vdev->rx_ino = *irq; - } chan_id = mdesc_get_property(hp, target, "id", NULL); if (chan_id) vdev->channel_id = *chan_id; } + + vdev->cdev_handle = cdev_cfg_handle; } int vio_set_intr(unsigned long dev_ino, int state) @@ -223,14 +288,14 @@ int vio_set_intr(unsigned long dev_ino, int state) EXPORT_SYMBOL(vio_set_intr); static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp, + const char *node_name, struct device *parent) { - const char *type, *compat, *bus_id_name; + const char *type, *compat; struct device_node *dp; struct vio_dev *vdev; int err, tlen, clen; const u64 *id, *cfg_handle; - u64 a; type = mdesc_get_property(hp, mp, "device-type", &tlen); if (!type) { @@ -240,7 +305,7 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp, tlen = strlen(type) + 1; } } - if (tlen > VIO_MAX_TYPE_LEN) { + if (tlen > VIO_MAX_TYPE_LEN || strlen(type) >= VIO_MAX_TYPE_LEN) { printk(KERN_ERR "VIO: Type string [%s] is too long.\n", type); return NULL; @@ -248,31 +313,7 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp, id = mdesc_get_property(hp, mp, "id", NULL); - cfg_handle = NULL; - mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) { - u64 target; - - target = mdesc_arc_target(hp, a); - cfg_handle = mdesc_get_property(hp, target, - "cfg-handle", NULL); - if (cfg_handle) - break; - } - - bus_id_name = type; - if (!strcmp(type, "domain-services-port")) - bus_id_name = "ds"; - - /* - * 20 char is the old driver-core name size limit, which is no more. - * This check can probably be removed after review and possible - * adaption of the vio users name length handling. - */ - if (strlen(bus_id_name) >= 20 - 4) { - printk(KERN_ERR "VIO: bus_id_name [%s] is too long.\n", - bus_id_name); - return NULL; - } + cfg_handle = vio_cfg_handle(hp, mp); compat = mdesc_get_property(hp, mp, "device-type", &clen); if (!compat) { @@ -297,25 +338,23 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp, memset(vdev->compat, 0, sizeof(vdev->compat)); vdev->compat_len = clen; - vdev->channel_id = ~0UL; - vdev->tx_irq = ~0; - vdev->rx_irq = ~0; + vdev->port_id = ~0UL; + vdev->tx_irq = 0; + vdev->rx_irq = 0; vio_fill_channel_info(hp, mp, vdev); if (!id) { - dev_set_name(&vdev->dev, "%s", bus_id_name); + dev_set_name(&vdev->dev, "%s", type); vdev->dev_no = ~(u64)0; - vdev->id = ~(u64)0; } else if (!cfg_handle) { - dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id); + dev_set_name(&vdev->dev, "%s-%llu", type, *id); vdev->dev_no = *id; - vdev->id = ~(u64)0; } else { - dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name, + dev_set_name(&vdev->dev, "%s-%llu-%llu", type, *cfg_handle, *id); vdev->dev_no = *cfg_handle; - vdev->id = *id; + vdev->port_id = *id; } vdev->dev.parent = parent; @@ -337,7 +376,26 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp, } vdev->dp = dp; - printk(KERN_INFO "VIO: Adding device %s\n", dev_name(&vdev->dev)); + /* + * node_name is NULL for the parent/channel-devices node and + * the parent doesn't require the MD node info. + */ + if (node_name != NULL) { + (void) snprintf(vdev->node_name, VIO_MAX_NAME_LEN, "%s", + node_name); + + err = mdesc_get_node_info(hp, mp, node_name, + &vdev->md_node_info); + if (err) { + pr_err("VIO: Could not get MD node info %s, err=%d\n", + dev_name(&vdev->dev), err); + kfree(vdev); + return NULL; + } + } + + pr_info("VIO: Adding device %s (tx_ino = %llx, rx_ino = %llx)\n", + dev_name(&vdev->dev), vdev->tx_ino, vdev->rx_ino); err = device_register(&vdev->dev); if (err) { @@ -353,73 +411,42 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp, return vdev; } -static void vio_add(struct mdesc_handle *hp, u64 node) +static void vio_add(struct mdesc_handle *hp, u64 node, + const char *node_name) { - (void) vio_create_one(hp, node, &root_vdev->dev); + (void) vio_create_one(hp, node, node_name, &root_vdev->dev); } -struct vio_md_node_query { - const char *type; - u64 dev_no; - u64 id; +struct vio_remove_node_data { + struct mdesc_handle *hp; + u64 node; }; static int vio_md_node_match(struct device *dev, void *arg) { - struct vio_md_node_query *query = (struct vio_md_node_query *) arg; struct vio_dev *vdev = to_vio_dev(dev); + struct vio_remove_node_data *node_data; + u64 node; - if (vdev->dev_no != query->dev_no) - return 0; - if (vdev->id != query->id) - return 0; - if (strcmp(vdev->type, query->type)) - return 0; + node_data = (struct vio_remove_node_data *)arg; - return 1; + node = vio_vdev_node(node_data->hp, vdev); + + if (node == node_data->node) + return 1; + else + return 0; } -static void vio_remove(struct mdesc_handle *hp, u64 node) +static void vio_remove(struct mdesc_handle *hp, u64 node, const char *node_name) { - const char *type; - const u64 *id, *cfg_handle; - u64 a; - struct vio_md_node_query query; + struct vio_remove_node_data node_data; struct device *dev; - type = mdesc_get_property(hp, node, "device-type", NULL); - if (!type) { - type = mdesc_get_property(hp, node, "name", NULL); - if (!type) - type = mdesc_node_name(hp, node); - } - - query.type = type; - - id = mdesc_get_property(hp, node, "id", NULL); - cfg_handle = NULL; - mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) { - u64 target; - - target = mdesc_arc_target(hp, a); - cfg_handle = mdesc_get_property(hp, target, - "cfg-handle", NULL); - if (cfg_handle) - break; - } - - if (!id) { - query.dev_no = ~(u64)0; - query.id = ~(u64)0; - } else if (!cfg_handle) { - query.dev_no = *id; - query.id = ~(u64)0; - } else { - query.dev_no = *cfg_handle; - query.id = *id; - } + node_data.hp = hp; + node_data.node = node; - dev = device_find_child(&root_vdev->dev, &query, + dev = device_find_child(&root_vdev->dev, (void *)&node_data, vio_md_node_match); if (dev) { printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev)); @@ -427,15 +454,7 @@ static void vio_remove(struct mdesc_handle *hp, u64 node) device_unregister(dev); put_device(dev); } else { - if (!id) - printk(KERN_ERR "VIO: Removed unknown %s node.\n", - type); - else if (!cfg_handle) - printk(KERN_ERR "VIO: Removed unknown %s node %llu.\n", - type, *id); - else - printk(KERN_ERR "VIO: Removed unknown %s node %llu-%llu.\n", - type, *cfg_handle, *id); + pr_err("VIO: %s node not found in MDESC\n", node_name); } } @@ -450,7 +469,8 @@ static struct mdesc_notifier_client vio_device_notifier = { * under "openboot" that we should not mess with as aparently that is * reserved exclusively for OBP use. */ -static void vio_add_ds(struct mdesc_handle *hp, u64 node) +static void vio_add_ds(struct mdesc_handle *hp, u64 node, + const char *node_name) { int found; u64 a; @@ -467,7 +487,7 @@ static void vio_add_ds(struct mdesc_handle *hp, u64 node) } if (found) - (void) vio_create_one(hp, node, &root_vdev->dev); + (void) vio_create_one(hp, node, node_name, &root_vdev->dev); } static struct mdesc_notifier_client vio_ds_notifier = { @@ -534,7 +554,7 @@ static int __init vio_init(void) cdev_cfg_handle = *cfg_handle; - root_vdev = vio_create_one(hp, root, NULL); + root_vdev = vio_create_one(hp, root, NULL, NULL); err = -ENODEV; if (!root_vdev) { printk(KERN_ERR "VIO: Could not create root device.\n"); diff --git a/arch/sparc/kernel/viohs.c b/arch/sparc/kernel/viohs.c index b30b30ab3ddd..d4f13c037a40 100644 --- a/arch/sparc/kernel/viohs.c +++ b/arch/sparc/kernel/viohs.c @@ -223,6 +223,9 @@ static int send_rdx(struct vio_driver_state *vio) static int send_attr(struct vio_driver_state *vio) { + if (!vio->ops) + return -EINVAL; + return vio->ops->send_attr(vio); } @@ -283,6 +286,7 @@ static int process_ver_info(struct vio_driver_state *vio, ver.minor = vap->minor; pkt->minor = ver.minor; pkt->tag.stype = VIO_SUBTYPE_ACK; + pkt->dev_class = vio->dev_class; viodbg(HS, "SEND VERSION ACK maj[%u] min[%u]\n", pkt->major, pkt->minor); err = send_ctrl(vio, &pkt->tag, sizeof(*pkt)); @@ -374,6 +378,9 @@ static int process_attr(struct vio_driver_state *vio, void *pkt) if (!(vio->hs_state & VIO_HS_GOTVERS)) return handshake_failure(vio); + if (!vio->ops) + return 0; + err = vio->ops->handle_attr(vio, pkt); if (err < 0) { return handshake_failure(vio); @@ -388,6 +395,7 @@ static int process_attr(struct vio_driver_state *vio, void *pkt) vio->hs_state |= VIO_HS_SENT_DREG; } } + return 0; } @@ -647,10 +655,13 @@ int vio_control_pkt_engine(struct vio_driver_state *vio, void *pkt) err = process_unknown(vio, pkt); break; } + if (!err && vio->hs_state != prev_state && - (vio->hs_state & VIO_HS_COMPLETE)) - vio->ops->handshake_complete(vio); + (vio->hs_state & VIO_HS_COMPLETE)) { + if (vio->ops) + vio->ops->handshake_complete(vio); + } return err; } @@ -765,7 +776,11 @@ void vio_port_up(struct vio_driver_state *vio) } if (!err) { - err = ldc_connect(vio->lp); + if (ldc_mode(vio->lp) == LDC_MODE_RAW) + ldc_set_state(vio->lp, LDC_STATE_CONNECTED); + else + err = ldc_connect(vio->lp); + if (err) printk(KERN_WARNING "%s: Port %lu connect failed, " "err=%d\n", @@ -805,8 +820,7 @@ int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev, return -EINVAL; } - if (!ops->send_attr || - !ops->handle_attr || + if (!ops || !ops->send_attr || !ops->handle_attr || !ops->handshake_complete) return -EINVAL; diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index 572db686f845..03b3d65d1266 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -149,6 +149,11 @@ SECTIONS *(.sun_m7_2insn_patch) __sun_m7_2insn_patch_end = .; } + .get_tick_patch : { + __get_tick_patch = .; + *(.get_tick_patch) + __get_tick_patch_end = .; + } PERCPU_SECTION(SMP_CACHE_BYTES) #ifdef CONFIG_JUMP_LABEL diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index 139ad7726e10..726355ce8497 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c @@ -78,9 +78,6 @@ static int vdso_mremap(const struct vm_special_mapping *sm, if (image->size != new_size) return -EINVAL; - if (WARN_ON_ONCE(current->mm != new_vma->vm_mm)) - return -EFAULT; - vdso_fix_landing(image, new_vma); current->mm->context.vdso = (void __user *)new_vma->vm_start; diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h index f4fef5a24ebd..127ddadee1a5 100644 --- a/arch/x86/include/uapi/asm/hyperv.h +++ b/arch/x86/include/uapi/asm/hyperv.h @@ -150,6 +150,12 @@ #define HV_X64_DEPRECATING_AEOI_RECOMMENDED (1 << 9) /* + * HV_VP_SET available + */ +#define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED (1 << 11) + + +/* * Crash notification flag. */ #define HV_CRASH_CTL_CRASH_NOTIFY (1ULL << 63) diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index cfd1a89fd04e..dbe2132b0ed4 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c @@ -24,7 +24,6 @@ unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 | unsigned int pci_early_dump_regs; static int pci_bf_sort; -static int smbios_type_b1_flag; int pci_routeirq; int noioapicquirk; #ifdef CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS @@ -197,34 +196,18 @@ static int __init set_bf_sort(const struct dmi_system_id *d) static void __init read_dmi_type_b1(const struct dmi_header *dm, void *private_data) { - u8 *d = (u8 *)dm + 4; + u8 *data = (u8 *)dm + 4; if (dm->type != 0xB1) return; - switch (((*(u32 *)d) >> 9) & 0x03) { - case 0x00: - printk(KERN_INFO "dmi type 0xB1 record - unknown flag\n"); - break; - case 0x01: /* set pci=bfsort */ - smbios_type_b1_flag = 1; - break; - case 0x02: /* do not set pci=bfsort */ - smbios_type_b1_flag = 2; - break; - default: - break; - } + if ((((*(u32 *)data) >> 9) & 0x03) == 0x01) + set_bf_sort((const struct dmi_system_id *)private_data); } static int __init find_sort_method(const struct dmi_system_id *d) { - dmi_walk(read_dmi_type_b1, NULL); - - if (smbios_type_b1_flag == 1) { - set_bf_sort(d); - return 0; - } - return -1; + dmi_walk(read_dmi_type_b1, (void *)d); + return 0; } /* diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index 6d52b94f4bb9..11e407489db0 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c @@ -571,3 +571,50 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, pci_invalid_bar); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar); + +/* + * Device [1022:7808] + * 23. USB Wake on Connect/Disconnect with Low Speed Devices + * https://support.amd.com/TechDocs/46837.pdf + * Appendix A2 + * https://support.amd.com/TechDocs/42413.pdf + */ +static void pci_fixup_amd_ehci_pme(struct pci_dev *dev) +{ + dev_info(&dev->dev, "PME# does not work under D3, disabling it\n"); + dev->pme_support &= ~((PCI_PM_CAP_PME_D3 | PCI_PM_CAP_PME_D3cold) + >> PCI_PM_CAP_PME_SHIFT); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x7808, pci_fixup_amd_ehci_pme); + +/* + * Apple MacBook Pro: Avoid [mem 0x7fa00000-0x7fbfffff] + * + * Using the [mem 0x7fa00000-0x7fbfffff] region, e.g., by assigning it to + * the 00:1c.0 Root Port, causes a conflict with [io 0x1804], which is used + * for soft poweroff and suspend-to-RAM. + * + * As far as we know, this is related to the address space, not to the Root + * Port itself. Attaching the quirk to the Root Port is a convenience, but + * it could probably also be a standalone DMI quirk. + * + * https://bugzilla.kernel.org/show_bug.cgi?id=103211 + */ +static void quirk_apple_mbp_poweroff(struct pci_dev *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + + if ((!dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,4") && + !dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,5")) || + pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x1c, 0)) + return; + + res = request_mem_region(0x7fa00000, 0x200000, + "MacBook Pro poweroff workaround"); + if (res) + dev_info(dev, "claimed %s %pR\n", res->name, res); + else + dev_info(dev, "can't work around MacBook Pro poweroff issue\n"); +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8c10, quirk_apple_mbp_poweroff); diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c index c1bdb9edcae7..76595408ff53 100644 --- a/arch/x86/pci/pcbios.c +++ b/arch/x86/pci/pcbios.c @@ -46,7 +46,7 @@ static inline void set_bios_x(void) pcibios_enabled = 1; set_memory_x(PAGE_OFFSET + BIOS_BEGIN, (BIOS_END - BIOS_BEGIN) >> PAGE_SHIFT); if (__supported_pte_mask & _PAGE_NX) - printk(KERN_INFO "PCI : PCI BIOS area is rw and x. Use pci=nobios if you want it NX.\n"); + printk(KERN_INFO "PCI: PCI BIOS area is rw and x. Use pci=nobios if you want it NX.\n"); } /* diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index f83de99d7d71..56bd612927ab 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c @@ -62,9 +62,6 @@ do_async_gen_syndrome(struct dma_chan *chan, dma_addr_t dma_dest[2]; int src_off = 0; - if (submit->flags & ASYNC_TX_FENCE) - dma_flags |= DMA_PREP_FENCE; - while (src_cnt > 0) { submit->flags = flags_orig; pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags)); @@ -83,6 +80,8 @@ do_async_gen_syndrome(struct dma_chan *chan, if (cb_fn_orig) dma_flags |= DMA_PREP_INTERRUPT; } + if (submit->flags & ASYNC_TX_FENCE) + dma_flags |= DMA_PREP_FENCE; /* Drivers force forward progress in case they can not provide * a descriptor diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 24e8597b2c3e..fa8f9c07ce73 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -62,8 +62,10 @@ config AMBA_PL08X select DMA_ENGINE select DMA_VIRTUAL_CHANNELS help - Platform has a PL08x DMAC device - which can provide DMA engine support + Say yes if your platform has a PL08x DMAC device which can + provide DMA engine support. This includes the original ARM + PL080 and PL081, Samsungs PL080 derivative and Faraday + Technology's FTDMAC020 PL080 derivative. config AMCC_PPC440SPE_ADMA tristate "AMCC PPC440SPe ADMA support" @@ -99,6 +101,21 @@ config AXI_DMAC controller is often used in Analog Device's reference designs for FPGA platforms. +config BCM_SBA_RAID + tristate "Broadcom SBA RAID engine support" + depends on ARM64 || COMPILE_TEST + depends on MAILBOX && RAID6_PQ + select DMA_ENGINE + select DMA_ENGINE_RAID + select ASYNC_TX_DISABLE_XOR_VAL_DMA + select ASYNC_TX_DISABLE_PQ_VAL_DMA + default ARCH_BCM_IPROC + help + Enable support for Broadcom SBA RAID Engine. The SBA RAID + engine is available on most of the Broadcom iProc SoCs. It + has the capability to offload memcpy, xor and pq computation + for raid5/6. + config COH901318 bool "ST-Ericsson COH901318 DMA support" select DMA_ENGINE @@ -354,13 +371,12 @@ config MV_XOR_V2 config MXS_DMA bool "MXS DMA support" - depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q || SOC_IMX6UL + depends on ARCH_MXS || ARCH_MXC || COMPILE_TEST select STMP_DEVICE select DMA_ENGINE help Support the MXS DMA engine. This engine including APBH-DMA - and APBX-DMA is integrated into Freescale - i.MX23/28/MX6Q/MX6DL/MX6UL chips. + and APBX-DMA is integrated into some Freescale chips. config MX3_IPU bool "MX3x Image Processing Unit support" diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 0b723e94d9e6..d12ab2985ed1 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -17,6 +17,7 @@ obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ obj-$(CONFIG_AT_HDMAC) += at_hdmac.o obj-$(CONFIG_AT_XDMAC) += at_xdmac.o obj-$(CONFIG_AXI_DMAC) += dma-axi-dmac.o +obj-$(CONFIG_BCM_SBA_RAID) += bcm-sba-raid.o obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 6bb8813ca275..13cc95c0474c 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -1,9 +1,10 @@ /* * Copyright (c) 2006 ARM Ltd. * Copyright (c) 2010 ST-Ericsson SA + * Copyirght (c) 2017 Linaro Ltd. * * Author: Peter Pearse <peter.pearse@arm.com> - * Author: Linus Walleij <linus.walleij@stericsson.com> + * Author: Linus Walleij <linus.walleij@linaro.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free @@ -110,11 +111,12 @@ struct pl08x_driver_data; * @channels: the number of channels available in this variant * @signals: the number of request signals available from the hardware * @dualmaster: whether this version supports dual AHB masters or not. - * @nomadik: whether the channels have Nomadik security extension bits - * that need to be checked for permission before use and some registers are - * missing - * @pl080s: whether this version is a PL080S, which has separate register and - * LLI word for transfer size. + * @nomadik: whether this variant is a ST Microelectronics Nomadik, where the + * channels have Nomadik security extension bits that need to be checked + * for permission before use and some registers are missing + * @pl080s: whether this variant is a Samsung PL080S, which has separate + * register and LLI word for transfer size. + * @ftdmac020: whether this variant is a Faraday Technology FTDMAC020 * @max_transfer_size: the maximum single element transfer size for this * PL08x variant. */ @@ -125,6 +127,7 @@ struct vendor_data { bool dualmaster; bool nomadik; bool pl080s; + bool ftdmac020; u32 max_transfer_size; }; @@ -148,19 +151,34 @@ struct pl08x_bus_data { * @id: physical index to this channel * @base: memory base address for this physical channel * @reg_config: configuration address for this physical channel + * @reg_control: control address for this physical channel + * @reg_src: transfer source address register + * @reg_dst: transfer destination address register + * @reg_lli: transfer LLI address register + * @reg_busy: if the variant has a special per-channel busy register, + * this contains a pointer to it * @lock: a lock to use when altering an instance of this struct * @serving: the virtual channel currently being served by this physical * channel * @locked: channel unavailable for the system, e.g. dedicated to secure * world + * @ftdmac020: channel is on a FTDMAC020 + * @pl080s: channel is on a PL08s */ struct pl08x_phy_chan { unsigned int id; void __iomem *base; void __iomem *reg_config; + void __iomem *reg_control; + void __iomem *reg_src; + void __iomem *reg_dst; + void __iomem *reg_lli; + void __iomem *reg_busy; spinlock_t lock; struct pl08x_dma_chan *serving; bool locked; + bool ftdmac020; + bool pl080s; }; /** @@ -253,8 +271,9 @@ struct pl08x_dma_chan { /** * struct pl08x_driver_data - the local state holder for the PL08x - * @slave: slave engine for this instance + * @slave: optional slave engine for this instance * @memcpy: memcpy engine for this instance + * @has_slave: the PL08x has a slave engine (routed signals) * @base: virtual memory base (remapped) for the PL08x * @adev: the corresponding AMBA (PrimeCell) bus entry * @vd: vendor data for this PL08x variant @@ -269,6 +288,7 @@ struct pl08x_dma_chan { struct pl08x_driver_data { struct dma_device slave; struct dma_device memcpy; + bool has_slave; void __iomem *base; struct amba_device *adev; const struct vendor_data *vd; @@ -360,10 +380,24 @@ static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch) { unsigned int val; + /* If we have a special busy register, take a shortcut */ + if (ch->reg_busy) { + val = readl(ch->reg_busy); + return !!(val & BIT(ch->id)); + } val = readl(ch->reg_config); return val & PL080_CONFIG_ACTIVE; } +/* + * pl08x_write_lli() - Write an LLI into the DMA controller. + * + * The PL08x derivatives support linked lists, but the first item of the + * list containing the source, destination, control word and next LLI is + * ignored. Instead the driver has to write those values directly into the + * SRC, DST, LLI and control registers. On FTDMAC020 also the SIZE + * register need to be set up for the first transfer. + */ static void pl08x_write_lli(struct pl08x_driver_data *pl08x, struct pl08x_phy_chan *phychan, const u32 *lli, u32 ccfg) { @@ -381,11 +415,112 @@ static void pl08x_write_lli(struct pl08x_driver_data *pl08x, phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST], lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL], ccfg); - writel_relaxed(lli[PL080_LLI_SRC], phychan->base + PL080_CH_SRC_ADDR); - writel_relaxed(lli[PL080_LLI_DST], phychan->base + PL080_CH_DST_ADDR); - writel_relaxed(lli[PL080_LLI_LLI], phychan->base + PL080_CH_LLI); - writel_relaxed(lli[PL080_LLI_CCTL], phychan->base + PL080_CH_CONTROL); + writel_relaxed(lli[PL080_LLI_SRC], phychan->reg_src); + writel_relaxed(lli[PL080_LLI_DST], phychan->reg_dst); + writel_relaxed(lli[PL080_LLI_LLI], phychan->reg_lli); + + /* + * The FTMAC020 has a different layout in the CCTL word of the LLI + * and the CCTL register which is split in CSR and SIZE registers. + * Convert the LLI item CCTL into the proper values to write into + * the CSR and SIZE registers. + */ + if (phychan->ftdmac020) { + u32 llictl = lli[PL080_LLI_CCTL]; + u32 val = 0; + + /* Write the transfer size (12 bits) to the size register */ + writel_relaxed(llictl & FTDMAC020_LLI_TRANSFER_SIZE_MASK, + phychan->base + FTDMAC020_CH_SIZE); + /* + * Then write the control bits 28..16 to the control register + * by shuffleing the bits around to where they are in the + * main register. The mapping is as follows: + * Bit 28: TC_MSK - mask on all except last LLI + * Bit 27..25: SRC_WIDTH + * Bit 24..22: DST_WIDTH + * Bit 21..20: SRCAD_CTRL + * Bit 19..17: DSTAD_CTRL + * Bit 17: SRC_SEL + * Bit 16: DST_SEL + */ + if (llictl & FTDMAC020_LLI_TC_MSK) + val |= FTDMAC020_CH_CSR_TC_MSK; + val |= ((llictl & FTDMAC020_LLI_SRC_WIDTH_MSK) >> + (FTDMAC020_LLI_SRC_WIDTH_SHIFT - + FTDMAC020_CH_CSR_SRC_WIDTH_SHIFT)); + val |= ((llictl & FTDMAC020_LLI_DST_WIDTH_MSK) >> + (FTDMAC020_LLI_DST_WIDTH_SHIFT - + FTDMAC020_CH_CSR_DST_WIDTH_SHIFT)); + val |= ((llictl & FTDMAC020_LLI_SRCAD_CTL_MSK) >> + (FTDMAC020_LLI_SRCAD_CTL_SHIFT - + FTDMAC020_CH_CSR_SRCAD_CTL_SHIFT)); + val |= ((llictl & FTDMAC020_LLI_DSTAD_CTL_MSK) >> + (FTDMAC020_LLI_DSTAD_CTL_SHIFT - + FTDMAC020_CH_CSR_DSTAD_CTL_SHIFT)); + if (llictl & FTDMAC020_LLI_SRC_SEL) + val |= FTDMAC020_CH_CSR_SRC_SEL; + if (llictl & FTDMAC020_LLI_DST_SEL) + val |= FTDMAC020_CH_CSR_DST_SEL; + + /* + * Set up the bits that exist in the CSR but are not + * part the LLI, i.e. only gets written to the control + * register right here. + * + * FIXME: do not just handle memcpy, also handle slave DMA. + */ + switch (pl08x->pd->memcpy_burst_size) { + default: + case PL08X_BURST_SZ_1: + val |= PL080_BSIZE_1 << + FTDMAC020_CH_CSR_SRC_SIZE_SHIFT; + break; + case PL08X_BURST_SZ_4: + val |= PL080_BSIZE_4 << + FTDMAC020_CH_CSR_SRC_SIZE_SHIFT; + break; + case PL08X_BURST_SZ_8: + val |= PL080_BSIZE_8 << + FTDMAC020_CH_CSR_SRC_SIZE_SHIFT; + break; + case PL08X_BURST_SZ_16: + val |= PL080_BSIZE_16 << + FTDMAC020_CH_CSR_SRC_SIZE_SHIFT; + break; + case PL08X_BURST_SZ_32: + val |= PL080_BSIZE_32 << + FTDMAC020_CH_CSR_SRC_SIZE_SHIFT; + break; + case PL08X_BURST_SZ_64: + val |= PL080_BSIZE_64 << + FTDMAC020_CH_CSR_SRC_SIZE_SHIFT; + break; + case PL08X_BURST_SZ_128: + val |= PL080_BSIZE_128 << + FTDMAC020_CH_CSR_SRC_SIZE_SHIFT; + break; + case PL08X_BURST_SZ_256: + val |= PL080_BSIZE_256 << + FTDMAC020_CH_CSR_SRC_SIZE_SHIFT; + break; + } + + /* Protection flags */ + if (pl08x->pd->memcpy_prot_buff) + val |= FTDMAC020_CH_CSR_PROT2; + if (pl08x->pd->memcpy_prot_cache) + val |= FTDMAC020_CH_CSR_PROT3; + /* We are the kernel, so we are in privileged mode */ + val |= FTDMAC020_CH_CSR_PROT1; + + writel_relaxed(val, phychan->reg_control); + } else { + /* Bits are just identical */ + writel_relaxed(lli[PL080_LLI_CCTL], phychan->reg_control); + } + /* Second control word on the PL080s */ if (pl08x->vd->pl080s) writel_relaxed(lli[PL080S_LLI_CCTL2], phychan->base + PL080S_CH_CONTROL2); @@ -423,11 +558,25 @@ static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan) cpu_relax(); /* Do not access config register until channel shows as inactive */ - val = readl(phychan->reg_config); - while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE)) + if (phychan->ftdmac020) { + val = readl(phychan->reg_config); + while (val & FTDMAC020_CH_CFG_BUSY) + val = readl(phychan->reg_config); + + val = readl(phychan->reg_control); + while (val & FTDMAC020_CH_CSR_EN) + val = readl(phychan->reg_control); + + writel(val | FTDMAC020_CH_CSR_EN, + phychan->reg_control); + } else { val = readl(phychan->reg_config); + while ((val & PL080_CONFIG_ACTIVE) || + (val & PL080_CONFIG_ENABLE)) + val = readl(phychan->reg_config); - writel(val | PL080_CONFIG_ENABLE, phychan->reg_config); + writel(val | PL080_CONFIG_ENABLE, phychan->reg_config); + } } /* @@ -445,6 +594,14 @@ static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) u32 val; int timeout; + if (ch->ftdmac020) { + /* Use the enable bit on the FTDMAC020 */ + val = readl(ch->reg_control); + val &= ~FTDMAC020_CH_CSR_EN; + writel(val, ch->reg_control); + return; + } + /* Set the HALT bit and wait for the FIFO to drain */ val = readl(ch->reg_config); val |= PL080_CONFIG_HALT; @@ -464,6 +621,14 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) { u32 val; + /* Use the enable bit on the FTDMAC020 */ + if (ch->ftdmac020) { + val = readl(ch->reg_control); + val |= FTDMAC020_CH_CSR_EN; + writel(val, ch->reg_control); + return; + } + /* Clear the HALT bit */ val = readl(ch->reg_config); val &= ~PL080_CONFIG_HALT; @@ -479,25 +644,68 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x, struct pl08x_phy_chan *ch) { - u32 val = readl(ch->reg_config); + u32 val; + /* The layout for the FTDMAC020 is different */ + if (ch->ftdmac020) { + /* Disable all interrupts */ + val = readl(ch->reg_config); + val |= (FTDMAC020_CH_CFG_INT_ABT_MASK | + FTDMAC020_CH_CFG_INT_ERR_MASK | + FTDMAC020_CH_CFG_INT_TC_MASK); + writel(val, ch->reg_config); + + /* Abort and disable channel */ + val = readl(ch->reg_control); + val &= ~FTDMAC020_CH_CSR_EN; + val |= FTDMAC020_CH_CSR_ABT; + writel(val, ch->reg_control); + + /* Clear ABT and ERR interrupt flags */ + writel(BIT(ch->id) | BIT(ch->id + 16), + pl08x->base + PL080_ERR_CLEAR); + writel(BIT(ch->id), pl08x->base + PL080_TC_CLEAR); + + return; + } + + val = readl(ch->reg_config); val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK | PL080_CONFIG_TC_IRQ_MASK); - writel(val, ch->reg_config); writel(BIT(ch->id), pl08x->base + PL080_ERR_CLEAR); writel(BIT(ch->id), pl08x->base + PL080_TC_CLEAR); } -static inline u32 get_bytes_in_cctl(u32 cctl) +static u32 get_bytes_in_phy_channel(struct pl08x_phy_chan *ch) { - /* The source width defines the number of bytes */ - u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK; + u32 val; + u32 bytes; + + if (ch->ftdmac020) { + bytes = readl(ch->base + FTDMAC020_CH_SIZE); - cctl &= PL080_CONTROL_SWIDTH_MASK; + val = readl(ch->reg_control); + val &= FTDMAC020_CH_CSR_SRC_WIDTH_MSK; + val >>= FTDMAC020_CH_CSR_SRC_WIDTH_SHIFT; + } else if (ch->pl080s) { + val = readl(ch->base + PL080S_CH_CONTROL2); + bytes = val & PL080S_CONTROL_TRANSFER_SIZE_MASK; - switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) { + val = readl(ch->reg_control); + val &= PL080_CONTROL_SWIDTH_MASK; + val >>= PL080_CONTROL_SWIDTH_SHIFT; + } else { + /* Plain PL08x */ + val = readl(ch->reg_control); + bytes = val & PL080_CONTROL_TRANSFER_SIZE_MASK; + + val &= PL080_CONTROL_SWIDTH_MASK; + val >>= PL080_CONTROL_SWIDTH_SHIFT; + } + + switch (val) { case PL080_WIDTH_8BIT: break; case PL080_WIDTH_16BIT: @@ -510,14 +718,35 @@ static inline u32 get_bytes_in_cctl(u32 cctl) return bytes; } -static inline u32 get_bytes_in_cctl_pl080s(u32 cctl, u32 cctl1) +static u32 get_bytes_in_lli(struct pl08x_phy_chan *ch, const u32 *llis_va) { - /* The source width defines the number of bytes */ - u32 bytes = cctl1 & PL080S_CONTROL_TRANSFER_SIZE_MASK; + u32 val; + u32 bytes; + + if (ch->ftdmac020) { + val = llis_va[PL080_LLI_CCTL]; + bytes = val & FTDMAC020_LLI_TRANSFER_SIZE_MASK; + + val = llis_va[PL080_LLI_CCTL]; + val &= FTDMAC020_LLI_SRC_WIDTH_MSK; + val >>= FTDMAC020_LLI_SRC_WIDTH_SHIFT; + } else if (ch->pl080s) { + val = llis_va[PL080S_LLI_CCTL2]; + bytes = val & PL080S_CONTROL_TRANSFER_SIZE_MASK; + + val = llis_va[PL080_LLI_CCTL]; + val &= PL080_CONTROL_SWIDTH_MASK; + val >>= PL080_CONTROL_SWIDTH_SHIFT; + } else { + /* Plain PL08x */ + val = llis_va[PL080_LLI_CCTL]; + bytes = val & PL080_CONTROL_TRANSFER_SIZE_MASK; - cctl &= PL080_CONTROL_SWIDTH_MASK; + val &= PL080_CONTROL_SWIDTH_MASK; + val >>= PL080_CONTROL_SWIDTH_SHIFT; + } - switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) { + switch (val) { case PL080_WIDTH_8BIT: break; case PL080_WIDTH_16BIT: @@ -552,15 +781,10 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) * Follow the LLIs to get the number of remaining * bytes in the currently active transaction. */ - clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2; + clli = readl(ch->reg_lli) & ~PL080_LLI_LM_AHB2; /* First get the remaining bytes in the active transfer */ - if (pl08x->vd->pl080s) - bytes = get_bytes_in_cctl_pl080s( - readl(ch->base + PL080_CH_CONTROL), - readl(ch->base + PL080S_CH_CONTROL2)); - else - bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL)); + bytes = get_bytes_in_phy_channel(ch); if (!clli) return bytes; @@ -581,12 +805,7 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) llis_va_limit = llis_va + llis_max_words; for (; llis_va < llis_va_limit; llis_va += pl08x->lli_words) { - if (pl08x->vd->pl080s) - bytes += get_bytes_in_cctl_pl080s( - llis_va[PL080_LLI_CCTL], - llis_va[PL080S_LLI_CCTL2]); - else - bytes += get_bytes_in_cctl(llis_va[PL080_LLI_CCTL]); + bytes += get_bytes_in_lli(ch, llis_va); /* * A LLI pointer going backward terminates the LLI list @@ -705,7 +924,7 @@ static void pl08x_phy_free(struct pl08x_dma_chan *plchan) break; } - if (!next) { + if (!next && pl08x->has_slave) { list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node) if (p->state == PL08X_CHAN_WAITING) { next = p; @@ -746,9 +965,30 @@ static void pl08x_phy_free(struct pl08x_dma_chan *plchan) * LLI handling */ -static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded) +static inline unsigned int +pl08x_get_bytes_for_lli(struct pl08x_driver_data *pl08x, + u32 cctl, + bool source) { - switch (coded) { + u32 val; + + if (pl08x->vd->ftdmac020) { + if (source) + val = (cctl & FTDMAC020_LLI_SRC_WIDTH_MSK) >> + FTDMAC020_LLI_SRC_WIDTH_SHIFT; + else + val = (cctl & FTDMAC020_LLI_DST_WIDTH_MSK) >> + FTDMAC020_LLI_DST_WIDTH_SHIFT; + } else { + if (source) + val = (cctl & PL080_CONTROL_SWIDTH_MASK) >> + PL080_CONTROL_SWIDTH_SHIFT; + else + val = (cctl & PL080_CONTROL_DWIDTH_MASK) >> + PL080_CONTROL_DWIDTH_SHIFT; + } + + switch (val) { case PL080_WIDTH_8BIT: return 1; case PL080_WIDTH_16BIT: @@ -762,49 +1002,106 @@ static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded) return 0; } -static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, - size_t tsize) +static inline u32 pl08x_lli_control_bits(struct pl08x_driver_data *pl08x, + u32 cctl, + u8 srcwidth, u8 dstwidth, + size_t tsize) { u32 retbits = cctl; - /* Remove all src, dst and transfer size bits */ - retbits &= ~PL080_CONTROL_DWIDTH_MASK; - retbits &= ~PL080_CONTROL_SWIDTH_MASK; - retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK; + /* + * Remove all src, dst and transfer size bits, then set the + * width and size according to the parameters. The bit offsets + * are different in the FTDMAC020 so we need to accound for this. + */ + if (pl08x->vd->ftdmac020) { + retbits &= ~FTDMAC020_LLI_DST_WIDTH_MSK; + retbits &= ~FTDMAC020_LLI_SRC_WIDTH_MSK; + retbits &= ~FTDMAC020_LLI_TRANSFER_SIZE_MASK; + + switch (srcwidth) { + case 1: + retbits |= PL080_WIDTH_8BIT << + FTDMAC020_LLI_SRC_WIDTH_SHIFT; + break; + case 2: + retbits |= PL080_WIDTH_16BIT << + FTDMAC020_LLI_SRC_WIDTH_SHIFT; + break; + case 4: + retbits |= PL080_WIDTH_32BIT << + FTDMAC020_LLI_SRC_WIDTH_SHIFT; + break; + default: + BUG(); + break; + } - /* Then set the bits according to the parameters */ - switch (srcwidth) { - case 1: - retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT; - break; - case 2: - retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT; - break; - case 4: - retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT; - break; - default: - BUG(); - break; - } + switch (dstwidth) { + case 1: + retbits |= PL080_WIDTH_8BIT << + FTDMAC020_LLI_DST_WIDTH_SHIFT; + break; + case 2: + retbits |= PL080_WIDTH_16BIT << + FTDMAC020_LLI_DST_WIDTH_SHIFT; + break; + case 4: + retbits |= PL080_WIDTH_32BIT << + FTDMAC020_LLI_DST_WIDTH_SHIFT; + break; + default: + BUG(); + break; + } - switch (dstwidth) { - case 1: - retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT; - break; - case 2: - retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT; - break; - case 4: - retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT; - break; - default: - BUG(); - break; + tsize &= FTDMAC020_LLI_TRANSFER_SIZE_MASK; + retbits |= tsize << FTDMAC020_LLI_TRANSFER_SIZE_SHIFT; + } else { + retbits &= ~PL080_CONTROL_DWIDTH_MASK; + retbits &= ~PL080_CONTROL_SWIDTH_MASK; + retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK; + + switch (srcwidth) { + case 1: + retbits |= PL080_WIDTH_8BIT << + PL080_CONTROL_SWIDTH_SHIFT; + break; + case 2: + retbits |= PL080_WIDTH_16BIT << + PL080_CONTROL_SWIDTH_SHIFT; + break; + case 4: + retbits |= PL080_WIDTH_32BIT << + PL080_CONTROL_SWIDTH_SHIFT; + break; + default: + BUG(); + break; + } + + switch (dstwidth) { + case 1: + retbits |= PL080_WIDTH_8BIT << + PL080_CONTROL_DWIDTH_SHIFT; + break; + case 2: + retbits |= PL080_WIDTH_16BIT << + PL080_CONTROL_DWIDTH_SHIFT; + break; + case 4: + retbits |= PL080_WIDTH_32BIT << + PL080_CONTROL_DWIDTH_SHIFT; + break; + default: + BUG(); + break; + } + + tsize &= PL080_CONTROL_TRANSFER_SIZE_MASK; + retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT; } - tsize &= PL080_CONTROL_TRANSFER_SIZE_MASK; - retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT; return retbits; } @@ -825,13 +1122,35 @@ struct pl08x_lli_build_data { * - prefers the destination bus if both available * - prefers bus with fixed address (i.e. peripheral) */ -static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd, - struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl) +static void pl08x_choose_master_bus(struct pl08x_driver_data *pl08x, + struct pl08x_lli_build_data *bd, + struct pl08x_bus_data **mbus, + struct pl08x_bus_data **sbus, + u32 cctl) { - if (!(cctl & PL080_CONTROL_DST_INCR)) { + bool dst_incr; + bool src_incr; + + /* + * The FTDMAC020 only supports memory-to-memory transfer, so + * source and destination always increase. + */ + if (pl08x->vd->ftdmac020) { + dst_incr = true; + src_incr = true; + } else { + dst_incr = !!(cctl & PL080_CONTROL_DST_INCR); + src_incr = !!(cctl & PL080_CONTROL_SRC_INCR); + } + + /* + * If either bus is not advancing, i.e. it is a peripheral, that + * one becomes master + */ + if (!dst_incr) { *mbus = &bd->dstbus; *sbus = &bd->srcbus; - } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { + } else if (!src_incr) { *mbus = &bd->srcbus; *sbus = &bd->dstbus; } else { @@ -869,10 +1188,16 @@ static void pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x, if (pl08x->vd->pl080s) llis_va[PL080S_LLI_CCTL2] = cctl2; - if (cctl & PL080_CONTROL_SRC_INCR) + if (pl08x->vd->ftdmac020) { + /* FIXME: only memcpy so far so both increase */ bd->srcbus.addr += len; - if (cctl & PL080_CONTROL_DST_INCR) bd->dstbus.addr += len; + } else { + if (cctl & PL080_CONTROL_SRC_INCR) + bd->srcbus.addr += len; + if (cctl & PL080_CONTROL_DST_INCR) + bd->dstbus.addr += len; + } BUG_ON(bd->remainder < len); @@ -883,12 +1208,12 @@ static inline void prep_byte_width_lli(struct pl08x_driver_data *pl08x, struct pl08x_lli_build_data *bd, u32 *cctl, u32 len, int num_llis, size_t *total_bytes) { - *cctl = pl08x_cctl_bits(*cctl, 1, 1, len); + *cctl = pl08x_lli_control_bits(pl08x, *cctl, 1, 1, len); pl08x_fill_lli_for_desc(pl08x, bd, num_llis, len, *cctl, len); (*total_bytes) += len; } -#ifdef VERBOSE_DEBUG +#if 1 static void pl08x_dump_lli(struct pl08x_driver_data *pl08x, const u32 *llis_va, int num_llis) { @@ -953,14 +1278,10 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, cctl = txd->cctl; /* Find maximum width of the source bus */ - bd.srcbus.maxwidth = - pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >> - PL080_CONTROL_SWIDTH_SHIFT); + bd.srcbus.maxwidth = pl08x_get_bytes_for_lli(pl08x, cctl, true); /* Find maximum width of the destination bus */ - bd.dstbus.maxwidth = - pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> - PL080_CONTROL_DWIDTH_SHIFT); + bd.dstbus.maxwidth = pl08x_get_bytes_for_lli(pl08x, cctl, false); list_for_each_entry(dsg, &txd->dsg_list, node) { total_bytes = 0; @@ -972,7 +1293,7 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, bd.srcbus.buswidth = bd.srcbus.maxwidth; bd.dstbus.buswidth = bd.dstbus.maxwidth; - pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); + pl08x_choose_master_bus(pl08x, &bd, &mbus, &sbus, cctl); dev_vdbg(&pl08x->adev->dev, "src=0x%08llx%s/%u dst=0x%08llx%s/%u len=%zu\n", @@ -1009,8 +1330,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, * supported. Thus, we can't have scattered addresses. */ if (!bd.remainder) { - u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >> - PL080_CONFIG_FLOW_CONTROL_SHIFT; + u32 fc; + + /* FTDMAC020 only does memory-to-memory */ + if (pl08x->vd->ftdmac020) + fc = PL080_FLOW_MEM2MEM; + else + fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >> + PL080_CONFIG_FLOW_CONTROL_SHIFT; if (!((fc >= PL080_FLOW_SRC2DST_DST) && (fc <= PL080_FLOW_SRC2DST_SRC))) { dev_err(&pl08x->adev->dev, "%s sg len can't be zero", @@ -1027,8 +1354,9 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, return 0; } - cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, - bd.dstbus.buswidth, 0); + cctl = pl08x_lli_control_bits(pl08x, cctl, + bd.srcbus.buswidth, bd.dstbus.buswidth, + 0); pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++, 0, cctl, 0); break; @@ -1107,8 +1435,9 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, "size 0x%08zx (remainder 0x%08zx)\n", __func__, lli_len, bd.remainder); - cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, - bd.dstbus.buswidth, tsize); + cctl = pl08x_lli_control_bits(pl08x, cctl, + bd.srcbus.buswidth, bd.dstbus.buswidth, + tsize); pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++, lli_len, cctl, tsize); total_bytes += lli_len; @@ -1151,7 +1480,10 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, /* The final LLI terminates the LLI. */ last_lli[PL080_LLI_LLI] = 0; /* The final LLI element shall also fire an interrupt. */ - last_lli[PL080_LLI_CCTL] |= PL080_CONTROL_TC_IRQ_EN; + if (pl08x->vd->ftdmac020) + last_lli[PL080_LLI_CCTL] &= ~FTDMAC020_LLI_TC_MSK; + else + last_lli[PL080_LLI_CCTL] |= PL080_CONTROL_TC_IRQ_EN; } pl08x_dump_lli(pl08x, llis_va, num_llis); @@ -1317,14 +1649,25 @@ static const struct burst_table burst_sizes[] = { * will be routed to each port. We try to have source and destination * on separate ports, but always respect the allowable settings. */ -static u32 pl08x_select_bus(u8 src, u8 dst) +static u32 pl08x_select_bus(bool ftdmac020, u8 src, u8 dst) { u32 cctl = 0; + u32 dst_ahb2; + u32 src_ahb2; + + /* The FTDMAC020 use different bits to indicate src/dst bus */ + if (ftdmac020) { + dst_ahb2 = FTDMAC020_LLI_DST_SEL; + src_ahb2 = FTDMAC020_LLI_SRC_SEL; + } else { + dst_ahb2 = PL080_CONTROL_DST_AHB2; + src_ahb2 = PL080_CONTROL_SRC_AHB2; + } if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1))) - cctl |= PL080_CONTROL_DST_AHB2; + cctl |= dst_ahb2; if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2))) - cctl |= PL080_CONTROL_SRC_AHB2; + cctl |= src_ahb2; return cctl; } @@ -1412,14 +1755,134 @@ static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan) { struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); - if (txd) { + if (txd) INIT_LIST_HEAD(&txd->dsg_list); + return txd; +} - /* Always enable error and terminal interrupts */ - txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK | - PL080_CONFIG_TC_IRQ_MASK; +static u32 pl08x_memcpy_cctl(struct pl08x_driver_data *pl08x) +{ + u32 cctl = 0; + + /* Conjure cctl */ + switch (pl08x->pd->memcpy_burst_size) { + default: + dev_err(&pl08x->adev->dev, + "illegal burst size for memcpy, set to 1\n"); + /* Fall through */ + case PL08X_BURST_SZ_1: + cctl |= PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT | + PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT; + break; + case PL08X_BURST_SZ_4: + cctl |= PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT | + PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT; + break; + case PL08X_BURST_SZ_8: + cctl |= PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT | + PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT; + break; + case PL08X_BURST_SZ_16: + cctl |= PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | + PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT; + break; + case PL08X_BURST_SZ_32: + cctl |= PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT | + PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT; + break; + case PL08X_BURST_SZ_64: + cctl |= PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT | + PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT; + break; + case PL08X_BURST_SZ_128: + cctl |= PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT | + PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT; + break; + case PL08X_BURST_SZ_256: + cctl |= PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT | + PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT; + break; } - return txd; + + switch (pl08x->pd->memcpy_bus_width) { + default: + dev_err(&pl08x->adev->dev, + "illegal bus width for memcpy, set to 8 bits\n"); + /* Fall through */ + case PL08X_BUS_WIDTH_8_BITS: + cctl |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT | + PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT; + break; + case PL08X_BUS_WIDTH_16_BITS: + cctl |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT | + PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT; + break; + case PL08X_BUS_WIDTH_32_BITS: + cctl |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | + PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT; + break; + } + + /* Protection flags */ + if (pl08x->pd->memcpy_prot_buff) + cctl |= PL080_CONTROL_PROT_BUFF; + if (pl08x->pd->memcpy_prot_cache) + cctl |= PL080_CONTROL_PROT_CACHE; + + /* We are the kernel, so we are in privileged mode */ + cctl |= PL080_CONTROL_PROT_SYS; + + /* Both to be incremented or the code will break */ + cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; + + if (pl08x->vd->dualmaster) + cctl |= pl08x_select_bus(false, + pl08x->mem_buses, + pl08x->mem_buses); + + return cctl; +} + +static u32 pl08x_ftdmac020_memcpy_cctl(struct pl08x_driver_data *pl08x) +{ + u32 cctl = 0; + + /* Conjure cctl */ + switch (pl08x->pd->memcpy_bus_width) { + default: + dev_err(&pl08x->adev->dev, + "illegal bus width for memcpy, set to 8 bits\n"); + /* Fall through */ + case PL08X_BUS_WIDTH_8_BITS: + cctl |= PL080_WIDTH_8BIT << FTDMAC020_LLI_SRC_WIDTH_SHIFT | + PL080_WIDTH_8BIT << FTDMAC020_LLI_DST_WIDTH_SHIFT; + break; + case PL08X_BUS_WIDTH_16_BITS: + cctl |= PL080_WIDTH_16BIT << FTDMAC020_LLI_SRC_WIDTH_SHIFT | + PL080_WIDTH_16BIT << FTDMAC020_LLI_DST_WIDTH_SHIFT; + break; + case PL08X_BUS_WIDTH_32_BITS: + cctl |= PL080_WIDTH_32BIT << FTDMAC020_LLI_SRC_WIDTH_SHIFT | + PL080_WIDTH_32BIT << FTDMAC020_LLI_DST_WIDTH_SHIFT; + break; + } + + /* + * By default mask the TC IRQ on all LLIs, it will be unmasked on + * the last LLI item by other code. + */ + cctl |= FTDMAC020_LLI_TC_MSK; + + /* + * Both to be incremented so leave bits FTDMAC020_LLI_SRCAD_CTL + * and FTDMAC020_LLI_DSTAD_CTL as zero + */ + if (pl08x->vd->dualmaster) + cctl |= pl08x_select_bus(true, + pl08x->mem_buses, + pl08x->mem_buses); + + return cctl; } /* @@ -1452,18 +1915,16 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( dsg->src_addr = src; dsg->dst_addr = dest; dsg->len = len; - - /* Set platform data for m2m */ - txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; - txd->cctl = pl08x->pd->memcpy_channel.cctl_memcpy & - ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); - - /* Both to be incremented or the code will break */ - txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; - - if (pl08x->vd->dualmaster) - txd->cctl |= pl08x_select_bus(pl08x->mem_buses, - pl08x->mem_buses); + if (pl08x->vd->ftdmac020) { + /* Writing CCFG zero ENABLES all interrupts */ + txd->ccfg = 0; + txd->cctl = pl08x_ftdmac020_memcpy_cctl(pl08x); + } else { + txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK | + PL080_CONFIG_TC_IRQ_MASK | + PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; + txd->cctl = pl08x_memcpy_cctl(pl08x); + } ret = pl08x_fill_llis_for_desc(plchan->host, txd); if (!ret) { @@ -1527,7 +1988,7 @@ static struct pl08x_txd *pl08x_init_txd( return NULL; } - txd->cctl = cctl | pl08x_select_bus(src_buses, dst_buses); + txd->cctl = cctl | pl08x_select_bus(false, src_buses, dst_buses); if (plchan->cfg.device_fc) tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER : @@ -1536,7 +1997,9 @@ static struct pl08x_txd *pl08x_init_txd( tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER : PL080_FLOW_PER2MEM; - txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; + txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK | + PL080_CONFIG_TC_IRQ_MASK | + tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; ret = pl08x_request_mux(plchan); if (ret < 0) { @@ -1813,6 +2276,11 @@ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) /* The Nomadik variant does not have the config register */ if (pl08x->vd->nomadik) return; + /* The FTDMAC020 variant does this in another register */ + if (pl08x->vd->ftdmac020) { + writel(PL080_CONFIG_ENABLE, pl08x->base + FTDMAC020_CSR); + return; + } writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG); } @@ -1925,9 +2393,16 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, chan->signal = i; pl08x_dma_slave_init(chan); } else { - chan->cd = &pl08x->pd->memcpy_channel; + chan->cd = kzalloc(sizeof(*chan->cd), GFP_KERNEL); + if (!chan->cd) { + kfree(chan); + return -ENOMEM; + } + chan->cd->bus_id = "memcpy"; + chan->cd->periph_buses = pl08x->pd->mem_buses; chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); if (!chan->name) { + kfree(chan->cd); kfree(chan); return -ENOMEM; } @@ -2009,12 +2484,15 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data) pl08x_state_str(chan->state)); } - seq_printf(s, "\nPL08x virtual slave channels:\n"); - seq_printf(s, "CHANNEL:\tSTATE:\n"); - seq_printf(s, "--------\t------\n"); - list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) { - seq_printf(s, "%s\t\t%s\n", chan->name, - pl08x_state_str(chan->state)); + if (pl08x->has_slave) { + seq_printf(s, "\nPL08x virtual slave channels:\n"); + seq_printf(s, "CHANNEL:\tSTATE:\n"); + seq_printf(s, "--------\t------\n"); + list_for_each_entry(chan, &pl08x->slave.channels, + vc.chan.device_node) { + seq_printf(s, "%s\t\t%s\n", chan->name, + pl08x_state_str(chan->state)); + } } return 0; @@ -2052,6 +2530,10 @@ static struct dma_chan *pl08x_find_chan_id(struct pl08x_driver_data *pl08x, { struct pl08x_dma_chan *chan; + /* Trying to get a slave channel from something with no slave support */ + if (!pl08x->has_slave) + return NULL; + list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) { if (chan->signal == id) return &chan->vc.chan; @@ -2099,7 +2581,6 @@ static int pl08x_of_probe(struct amba_device *adev, { struct pl08x_platform_data *pd; struct pl08x_channel_data *chanp = NULL; - u32 cctl_memcpy = 0; u32 val; int ret; int i; @@ -2139,36 +2620,28 @@ static int pl08x_of_probe(struct amba_device *adev, dev_err(&adev->dev, "illegal burst size for memcpy, set to 1\n"); /* Fall through */ case 1: - cctl_memcpy |= PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT | - PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT; + pd->memcpy_burst_size = PL08X_BURST_SZ_1; break; case 4: - cctl_memcpy |= PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT | - PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT; + pd->memcpy_burst_size = PL08X_BURST_SZ_4; break; case 8: - cctl_memcpy |= PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT | - PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT; + pd->memcpy_burst_size = PL08X_BURST_SZ_8; break; case 16: - cctl_memcpy |= PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | - PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT; + pd->memcpy_burst_size = PL08X_BURST_SZ_16; break; case 32: - cctl_memcpy |= PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT | - PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT; + pd->memcpy_burst_size = PL08X_BURST_SZ_32; break; case 64: - cctl_memcpy |= PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT | - PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT; + pd->memcpy_burst_size = PL08X_BURST_SZ_64; break; case 128: - cctl_memcpy |= PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT | - PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT; + pd->memcpy_burst_size = PL08X_BURST_SZ_128; break; case 256: - cctl_memcpy |= PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT | - PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT; + pd->memcpy_burst_size = PL08X_BURST_SZ_256; break; } @@ -2182,48 +2655,40 @@ static int pl08x_of_probe(struct amba_device *adev, dev_err(&adev->dev, "illegal bus width for memcpy, set to 8 bits\n"); /* Fall through */ case 8: - cctl_memcpy |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT | - PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT; + pd->memcpy_bus_width = PL08X_BUS_WIDTH_8_BITS; break; case 16: - cctl_memcpy |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT | - PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT; + pd->memcpy_bus_width = PL08X_BUS_WIDTH_16_BITS; break; case 32: - cctl_memcpy |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | - PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT; + pd->memcpy_bus_width = PL08X_BUS_WIDTH_32_BITS; break; } - /* This is currently the only thing making sense */ - cctl_memcpy |= PL080_CONTROL_PROT_SYS; - - /* Set up memcpy channel */ - pd->memcpy_channel.bus_id = "memcpy"; - pd->memcpy_channel.cctl_memcpy = cctl_memcpy; - /* Use the buses that can access memory, obviously */ - pd->memcpy_channel.periph_buses = pd->mem_buses; - /* * Allocate channel data for all possible slave channels (one * for each possible signal), channels will then be allocated * for a device and have it's AHB interfaces set up at * translation time. */ - chanp = devm_kcalloc(&adev->dev, - pl08x->vd->signals, - sizeof(struct pl08x_channel_data), - GFP_KERNEL); - if (!chanp) - return -ENOMEM; + if (pl08x->vd->signals) { + chanp = devm_kcalloc(&adev->dev, + pl08x->vd->signals, + sizeof(struct pl08x_channel_data), + GFP_KERNEL); + if (!chanp) + return -ENOMEM; - pd->slave_channels = chanp; - for (i = 0; i < pl08x->vd->signals; i++) { - /* chanp->periph_buses will be assigned at translation */ - chanp->bus_id = kasprintf(GFP_KERNEL, "slave%d", i); - chanp++; + pd->slave_channels = chanp; + for (i = 0; i < pl08x->vd->signals; i++) { + /* + * chanp->periph_buses will be assigned at translation + */ + chanp->bus_id = kasprintf(GFP_KERNEL, "slave%d", i); + chanp++; + } + pd->num_slave_channels = pl08x->vd->signals; } - pd->num_slave_channels = pl08x->vd->signals; pl08x->pd = pd; @@ -2242,7 +2707,7 @@ static inline int pl08x_of_probe(struct amba_device *adev, static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) { struct pl08x_driver_data *pl08x; - const struct vendor_data *vd = id->data; + struct vendor_data *vd = id->data; struct device_node *np = adev->dev.of_node; u32 tsfr_size; int ret = 0; @@ -2268,6 +2733,34 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) pl08x->adev = adev; pl08x->vd = vd; + pl08x->base = ioremap(adev->res.start, resource_size(&adev->res)); + if (!pl08x->base) { + ret = -ENOMEM; + goto out_no_ioremap; + } + + if (vd->ftdmac020) { + u32 val; + + val = readl(pl08x->base + FTDMAC020_REVISION); + dev_info(&pl08x->adev->dev, "FTDMAC020 %d.%d rel %d\n", + (val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff); + val = readl(pl08x->base + FTDMAC020_FEATURE); + dev_info(&pl08x->adev->dev, "FTDMAC020 %d channels, " + "%s built-in bridge, %s, %s linked lists\n", + (val >> 12) & 0x0f, + (val & BIT(10)) ? "no" : "has", + (val & BIT(9)) ? "AHB0 and AHB1" : "AHB0", + (val & BIT(8)) ? "supports" : "does not support"); + + /* Vendor data from feature register */ + if (!(val & BIT(8))) + dev_warn(&pl08x->adev->dev, + "linked lists not supported, required\n"); + vd->channels = (val >> 12) & 0x0f; + vd->dualmaster = !!(val & BIT(9)); + } + /* Initialize memcpy engine */ dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); pl08x->memcpy.dev = &adev->dev; @@ -2284,25 +2777,38 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) pl08x->memcpy.dst_addr_widths = PL80X_DMA_BUSWIDTHS; pl08x->memcpy.directions = BIT(DMA_MEM_TO_MEM); pl08x->memcpy.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; + if (vd->ftdmac020) + pl08x->memcpy.copy_align = DMAENGINE_ALIGN_4_BYTES; - /* Initialize slave engine */ - dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); - dma_cap_set(DMA_CYCLIC, pl08x->slave.cap_mask); - pl08x->slave.dev = &adev->dev; - pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources; - pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; - pl08x->slave.device_tx_status = pl08x_dma_tx_status; - pl08x->slave.device_issue_pending = pl08x_issue_pending; - pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; - pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic; - pl08x->slave.device_config = pl08x_config; - pl08x->slave.device_pause = pl08x_pause; - pl08x->slave.device_resume = pl08x_resume; - pl08x->slave.device_terminate_all = pl08x_terminate_all; - pl08x->slave.src_addr_widths = PL80X_DMA_BUSWIDTHS; - pl08x->slave.dst_addr_widths = PL80X_DMA_BUSWIDTHS; - pl08x->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); - pl08x->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; + + /* + * Initialize slave engine, if the block has no signals, that means + * we have no slave support. + */ + if (vd->signals) { + pl08x->has_slave = true; + dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); + dma_cap_set(DMA_CYCLIC, pl08x->slave.cap_mask); + pl08x->slave.dev = &adev->dev; + pl08x->slave.device_free_chan_resources = + pl08x_free_chan_resources; + pl08x->slave.device_prep_dma_interrupt = + pl08x_prep_dma_interrupt; + pl08x->slave.device_tx_status = pl08x_dma_tx_status; + pl08x->slave.device_issue_pending = pl08x_issue_pending; + pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; + pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic; + pl08x->slave.device_config = pl08x_config; + pl08x->slave.device_pause = pl08x_pause; + pl08x->slave.device_resume = pl08x_resume; + pl08x->slave.device_terminate_all = pl08x_terminate_all; + pl08x->slave.src_addr_widths = PL80X_DMA_BUSWIDTHS; + pl08x->slave.dst_addr_widths = PL80X_DMA_BUSWIDTHS; + pl08x->slave.directions = + BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + pl08x->slave.residue_granularity = + DMA_RESIDUE_GRANULARITY_SEGMENT; + } /* Get the platform data */ pl08x->pd = dev_get_platdata(&adev->dev); @@ -2344,19 +2850,18 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) goto out_no_lli_pool; } - pl08x->base = ioremap(adev->res.start, resource_size(&adev->res)); - if (!pl08x->base) { - ret = -ENOMEM; - goto out_no_ioremap; - } - /* Turn on the PL08x */ pl08x_ensure_on(pl08x); - /* Attach the interrupt handler */ - writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); + /* Clear any pending interrupts */ + if (vd->ftdmac020) + /* This variant has error IRQs in bits 16-19 */ + writel(0x0000FFFF, pl08x->base + PL080_ERR_CLEAR); + else + writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); + /* Attach the interrupt handler */ ret = request_irq(adev->irq[0], pl08x_irq, 0, DRIVER_NAME, pl08x); if (ret) { dev_err(&adev->dev, "%s failed to request interrupt %d\n", @@ -2377,7 +2882,25 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) ch->id = i; ch->base = pl08x->base + PL080_Cx_BASE(i); - ch->reg_config = ch->base + vd->config_offset; + if (vd->ftdmac020) { + /* FTDMA020 has a special channel busy register */ + ch->reg_busy = ch->base + FTDMAC020_CH_BUSY; + ch->reg_config = ch->base + FTDMAC020_CH_CFG; + ch->reg_control = ch->base + FTDMAC020_CH_CSR; + ch->reg_src = ch->base + FTDMAC020_CH_SRC_ADDR; + ch->reg_dst = ch->base + FTDMAC020_CH_DST_ADDR; + ch->reg_lli = ch->base + FTDMAC020_CH_LLP; + ch->ftdmac020 = true; + } else { + ch->reg_config = ch->base + vd->config_offset; + ch->reg_control = ch->base + PL080_CH_CONTROL; + ch->reg_src = ch->base + PL080_CH_SRC_ADDR; + ch->reg_dst = ch->base + PL080_CH_DST_ADDR; + ch->reg_lli = ch->base + PL080_CH_LLI; + } + if (vd->pl080s) + ch->pl080s = true; + spin_lock_init(&ch->lock); /* @@ -2410,13 +2933,15 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) } /* Register slave channels */ - ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, - pl08x->pd->num_slave_channels, true); - if (ret < 0) { - dev_warn(&pl08x->adev->dev, - "%s failed to enumerate slave channels - %d\n", - __func__, ret); - goto out_no_slave; + if (pl08x->has_slave) { + ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, + pl08x->pd->num_slave_channels, true); + if (ret < 0) { + dev_warn(&pl08x->adev->dev, + "%s failed to enumerate slave channels - %d\n", + __func__, ret); + goto out_no_slave; + } } ret = dma_async_device_register(&pl08x->memcpy); @@ -2427,12 +2952,14 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) goto out_no_memcpy_reg; } - ret = dma_async_device_register(&pl08x->slave); - if (ret) { - dev_warn(&pl08x->adev->dev, + if (pl08x->has_slave) { + ret = dma_async_device_register(&pl08x->slave); + if (ret) { + dev_warn(&pl08x->adev->dev, "%s failed to register slave as an async device - %d\n", __func__, ret); - goto out_no_slave_reg; + goto out_no_slave_reg; + } } amba_set_drvdata(adev, pl08x); @@ -2446,7 +2973,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) out_no_slave_reg: dma_async_device_unregister(&pl08x->memcpy); out_no_memcpy_reg: - pl08x_free_virtual_channels(&pl08x->slave); + if (pl08x->has_slave) + pl08x_free_virtual_channels(&pl08x->slave); out_no_slave: pl08x_free_virtual_channels(&pl08x->memcpy); out_no_memcpy: @@ -2454,11 +2982,11 @@ out_no_memcpy: out_no_phychans: free_irq(adev->irq[0], pl08x); out_no_irq: - iounmap(pl08x->base); -out_no_ioremap: dma_pool_destroy(pl08x->pool); out_no_lli_pool: out_no_platdata: + iounmap(pl08x->base); +out_no_ioremap: kfree(pl08x); out_no_pl08x: amba_release_regions(adev); @@ -2499,6 +3027,12 @@ static struct vendor_data vendor_pl081 = { .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, }; +static struct vendor_data vendor_ftdmac020 = { + .config_offset = PL080_CH_CONFIG, + .ftdmac020 = true, + .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, +}; + static struct amba_id pl08x_ids[] = { /* Samsung PL080S variant */ { @@ -2524,6 +3058,12 @@ static struct amba_id pl08x_ids[] = { .mask = 0x00ffffff, .data = &vendor_nomadik, }, + /* Faraday Technology FTDMAC020 */ + { + .id = 0x0003b080, + .mask = 0x000fffff, + .data = &vendor_ftdmac020, + }, { 0, 0 }, }; diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c new file mode 100644 index 000000000000..e41bbc7cb094 --- /dev/null +++ b/drivers/dma/bcm-sba-raid.c @@ -0,0 +1,1785 @@ +/* + * Copyright (C) 2017 Broadcom + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* + * Broadcom SBA RAID Driver + * + * The Broadcom stream buffer accelerator (SBA) provides offloading + * capabilities for RAID operations. The SBA offload engine is accessible + * via Broadcom SoC specific ring manager. Two or more offload engines + * can share same Broadcom SoC specific ring manager due to this Broadcom + * SoC specific ring manager driver is implemented as a mailbox controller + * driver and offload engine drivers are implemented as mallbox clients. + * + * Typically, Broadcom SoC specific ring manager will implement larger + * number of hardware rings over one or more SBA hardware devices. By + * design, the internal buffer size of SBA hardware device is limited + * but all offload operations supported by SBA can be broken down into + * multiple small size requests and executed parallely on multiple SBA + * hardware devices for achieving high through-put. + * + * The Broadcom SBA RAID driver does not require any register programming + * except submitting request to SBA hardware device via mailbox channels. + * This driver implements a DMA device with one DMA channel using a set + * of mailbox channels provided by Broadcom SoC specific ring manager + * driver. To exploit parallelism (as described above), all DMA request + * coming to SBA RAID DMA channel are broken down to smaller requests + * and submitted to multiple mailbox channels in round-robin fashion. + * For having more SBA DMA channels, we can create more SBA device nodes + * in Broadcom SoC specific DTS based on number of hardware rings supported + * by Broadcom SoC ring manager. + */ + +#include <linux/bitops.h> +#include <linux/dma-mapping.h> +#include <linux/dmaengine.h> +#include <linux/list.h> +#include <linux/mailbox_client.h> +#include <linux/mailbox/brcm-message.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/slab.h> +#include <linux/raid/pq.h> + +#include "dmaengine.h" + +/* SBA command related defines */ +#define SBA_TYPE_SHIFT 48 +#define SBA_TYPE_MASK GENMASK(1, 0) +#define SBA_TYPE_A 0x0 +#define SBA_TYPE_B 0x2 +#define SBA_TYPE_C 0x3 +#define SBA_USER_DEF_SHIFT 32 +#define SBA_USER_DEF_MASK GENMASK(15, 0) +#define SBA_R_MDATA_SHIFT 24 +#define SBA_R_MDATA_MASK GENMASK(7, 0) +#define SBA_C_MDATA_MS_SHIFT 18 +#define SBA_C_MDATA_MS_MASK GENMASK(1, 0) +#define SBA_INT_SHIFT 17 +#define SBA_INT_MASK BIT(0) +#define SBA_RESP_SHIFT 16 +#define SBA_RESP_MASK BIT(0) +#define SBA_C_MDATA_SHIFT 8 +#define SBA_C_MDATA_MASK GENMASK(7, 0) +#define SBA_C_MDATA_BNUMx_SHIFT(__bnum) (2 * (__bnum)) +#define SBA_C_MDATA_BNUMx_MASK GENMASK(1, 0) +#define SBA_C_MDATA_DNUM_SHIFT 5 +#define SBA_C_MDATA_DNUM_MASK GENMASK(4, 0) +#define SBA_C_MDATA_LS(__v) ((__v) & 0xff) +#define SBA_C_MDATA_MS(__v) (((__v) >> 8) & 0x3) +#define SBA_CMD_SHIFT 0 +#define SBA_CMD_MASK GENMASK(3, 0) +#define SBA_CMD_ZERO_BUFFER 0x4 +#define SBA_CMD_ZERO_ALL_BUFFERS 0x8 +#define SBA_CMD_LOAD_BUFFER 0x9 +#define SBA_CMD_XOR 0xa +#define SBA_CMD_GALOIS_XOR 0xb +#define SBA_CMD_WRITE_BUFFER 0xc +#define SBA_CMD_GALOIS 0xe + +/* Driver helper macros */ +#define to_sba_request(tx) \ + container_of(tx, struct sba_request, tx) +#define to_sba_device(dchan) \ + container_of(dchan, struct sba_device, dma_chan) + +enum sba_request_state { + SBA_REQUEST_STATE_FREE = 1, + SBA_REQUEST_STATE_ALLOCED = 2, + SBA_REQUEST_STATE_PENDING = 3, + SBA_REQUEST_STATE_ACTIVE = 4, + SBA_REQUEST_STATE_RECEIVED = 5, + SBA_REQUEST_STATE_COMPLETED = 6, + SBA_REQUEST_STATE_ABORTED = 7, +}; + +struct sba_request { + /* Global state */ + struct list_head node; + struct sba_device *sba; + enum sba_request_state state; + bool fence; + /* Chained requests management */ + struct sba_request *first; + struct list_head next; + unsigned int next_count; + atomic_t next_pending_count; + /* BRCM message data */ + void *resp; + dma_addr_t resp_dma; + struct brcm_sba_command *cmds; + struct brcm_message msg; + struct dma_async_tx_descriptor tx; +}; + +enum sba_version { + SBA_VER_1 = 0, + SBA_VER_2 +}; + +struct sba_device { + /* Underlying device */ + struct device *dev; + /* DT configuration parameters */ + enum sba_version ver; + /* Derived configuration parameters */ + u32 max_req; + u32 hw_buf_size; + u32 hw_resp_size; + u32 max_pq_coefs; + u32 max_pq_srcs; + u32 max_cmd_per_req; + u32 max_xor_srcs; + u32 max_resp_pool_size; + u32 max_cmds_pool_size; + /* Maibox client and Mailbox channels */ + struct mbox_client client; + int mchans_count; + atomic_t mchans_current; + struct mbox_chan **mchans; + struct device *mbox_dev; + /* DMA device and DMA channel */ + struct dma_device dma_dev; + struct dma_chan dma_chan; + /* DMA channel resources */ + void *resp_base; + dma_addr_t resp_dma_base; + void *cmds_base; + dma_addr_t cmds_dma_base; + spinlock_t reqs_lock; + struct sba_request *reqs; + bool reqs_fence; + struct list_head reqs_alloc_list; + struct list_head reqs_pending_list; + struct list_head reqs_active_list; + struct list_head reqs_received_list; + struct list_head reqs_completed_list; + struct list_head reqs_aborted_list; + struct list_head reqs_free_list; + int reqs_free_count; +}; + +/* ====== SBA command helper routines ===== */ + +static inline u64 __pure sba_cmd_enc(u64 cmd, u32 val, u32 shift, u32 mask) +{ + cmd &= ~((u64)mask << shift); + cmd |= ((u64)(val & mask) << shift); + return cmd; +} + +static inline u32 __pure sba_cmd_load_c_mdata(u32 b0) +{ + return b0 & SBA_C_MDATA_BNUMx_MASK; +} + +static inline u32 __pure sba_cmd_write_c_mdata(u32 b0) +{ + return b0 & SBA_C_MDATA_BNUMx_MASK; +} + +static inline u32 __pure sba_cmd_xor_c_mdata(u32 b1, u32 b0) +{ + return (b0 & SBA_C_MDATA_BNUMx_MASK) | + ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1)); +} + +static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0) +{ + return (b0 & SBA_C_MDATA_BNUMx_MASK) | + ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1)) | + ((d & SBA_C_MDATA_DNUM_MASK) << SBA_C_MDATA_DNUM_SHIFT); +} + +/* ====== Channel resource management routines ===== */ + +static struct sba_request *sba_alloc_request(struct sba_device *sba) +{ + unsigned long flags; + struct sba_request *req = NULL; + + spin_lock_irqsave(&sba->reqs_lock, flags); + + req = list_first_entry_or_null(&sba->reqs_free_list, + struct sba_request, node); + if (req) { + list_move_tail(&req->node, &sba->reqs_alloc_list); + req->state = SBA_REQUEST_STATE_ALLOCED; + req->fence = false; + req->first = req; + INIT_LIST_HEAD(&req->next); + req->next_count = 1; + atomic_set(&req->next_pending_count, 1); + + sba->reqs_free_count--; + + dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan); + } + + spin_unlock_irqrestore(&sba->reqs_lock, flags); + + return req; +} + +/* Note: Must be called with sba->reqs_lock held */ +static void _sba_pending_request(struct sba_device *sba, + struct sba_request *req) +{ + lockdep_assert_held(&sba->reqs_lock); + req->state = SBA_REQUEST_STATE_PENDING; + list_move_tail(&req->node, &sba->reqs_pending_list); + if (list_empty(&sba->reqs_active_list)) + sba->reqs_fence = false; +} + +/* Note: Must be called with sba->reqs_lock held */ +static bool _sba_active_request(struct sba_device *sba, + struct sba_request *req) +{ + lockdep_assert_held(&sba->reqs_lock); + if (list_empty(&sba->reqs_active_list)) + sba->reqs_fence = false; + if (sba->reqs_fence) + return false; + req->state = SBA_REQUEST_STATE_ACTIVE; + list_move_tail(&req->node, &sba->reqs_active_list); + if (req->fence) + sba->reqs_fence = true; + return true; +} + +/* Note: Must be called with sba->reqs_lock held */ +static void _sba_abort_request(struct sba_device *sba, + struct sba_request *req) +{ + lockdep_assert_held(&sba->reqs_lock); + req->state = SBA_REQUEST_STATE_ABORTED; + list_move_tail(&req->node, &sba->reqs_aborted_list); + if (list_empty(&sba->reqs_active_list)) + sba->reqs_fence = false; +} + +/* Note: Must be called with sba->reqs_lock held */ +static void _sba_free_request(struct sba_device *sba, + struct sba_request *req) +{ + lockdep_assert_held(&sba->reqs_lock); + req->state = SBA_REQUEST_STATE_FREE; + list_move_tail(&req->node, &sba->reqs_free_list); + if (list_empty(&sba->reqs_active_list)) + sba->reqs_fence = false; + sba->reqs_free_count++; +} + +static void sba_received_request(struct sba_request *req) +{ + unsigned long flags; + struct sba_device *sba = req->sba; + + spin_lock_irqsave(&sba->reqs_lock, flags); + req->state = SBA_REQUEST_STATE_RECEIVED; + list_move_tail(&req->node, &sba->reqs_received_list); + spin_unlock_irqrestore(&sba->reqs_lock, flags); +} + +static void sba_complete_chained_requests(struct sba_request *req) +{ + unsigned long flags; + struct sba_request *nreq; + struct sba_device *sba = req->sba; + + spin_lock_irqsave(&sba->reqs_lock, flags); + + req->state = SBA_REQUEST_STATE_COMPLETED; + list_move_tail(&req->node, &sba->reqs_completed_list); + list_for_each_entry(nreq, &req->next, next) { + nreq->state = SBA_REQUEST_STATE_COMPLETED; + list_move_tail(&nreq->node, &sba->reqs_completed_list); + } + if (list_empty(&sba->reqs_active_list)) + sba->reqs_fence = false; + + spin_unlock_irqrestore(&sba->reqs_lock, flags); +} + +static void sba_free_chained_requests(struct sba_request *req) +{ + unsigned long flags; + struct sba_request *nreq; + struct sba_device *sba = req->sba; + + spin_lock_irqsave(&sba->reqs_lock, flags); + + _sba_free_request(sba, req); + list_for_each_entry(nreq, &req->next, next) + _sba_free_request(sba, nreq); + + spin_unlock_irqrestore(&sba->reqs_lock, flags); +} + +static void sba_chain_request(struct sba_request *first, + struct sba_request *req) +{ + unsigned long flags; + struct sba_device *sba = req->sba; + + spin_lock_irqsave(&sba->reqs_lock, flags); + + list_add_tail(&req->next, &first->next); + req->first = first; + first->next_count++; + atomic_set(&first->next_pending_count, first->next_count); + + spin_unlock_irqrestore(&sba->reqs_lock, flags); +} + +static void sba_cleanup_nonpending_requests(struct sba_device *sba) +{ + unsigned long flags; + struct sba_request *req, *req1; + + spin_lock_irqsave(&sba->reqs_lock, flags); + + /* Freeup all alloced request */ + list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node) + _sba_free_request(sba, req); + + /* Freeup all received request */ + list_for_each_entry_safe(req, req1, &sba->reqs_received_list, node) + _sba_free_request(sba, req); + + /* Freeup all completed request */ + list_for_each_entry_safe(req, req1, &sba->reqs_completed_list, node) + _sba_free_request(sba, req); + + /* Set all active requests as aborted */ + list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node) + _sba_abort_request(sba, req); + + /* + * Note: We expect that aborted request will be eventually + * freed by sba_receive_message() + */ + + spin_unlock_irqrestore(&sba->reqs_lock, flags); +} + +static void sba_cleanup_pending_requests(struct sba_device *sba) +{ + unsigned long flags; + struct sba_request *req, *req1; + + spin_lock_irqsave(&sba->reqs_lock, flags); + + /* Freeup all pending request */ + list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node) + _sba_free_request(sba, req); + + spin_unlock_irqrestore(&sba->reqs_lock, flags); +} + +/* ====== DMAENGINE callbacks ===== */ + +static void sba_free_chan_resources(struct dma_chan *dchan) +{ + /* + * Channel resources are pre-alloced so we just free-up + * whatever we can so that we can re-use pre-alloced + * channel resources next time. + */ + sba_cleanup_nonpending_requests(to_sba_device(dchan)); +} + +static int sba_device_terminate_all(struct dma_chan *dchan) +{ + /* Cleanup all pending requests */ + sba_cleanup_pending_requests(to_sba_device(dchan)); + + return 0; +} + +static int sba_send_mbox_request(struct sba_device *sba, + struct sba_request *req) +{ + int mchans_idx, ret = 0; + + /* Select mailbox channel in round-robin fashion */ + mchans_idx = atomic_inc_return(&sba->mchans_current); + mchans_idx = mchans_idx % sba->mchans_count; + + /* Send message for the request */ + req->msg.error = 0; + ret = mbox_send_message(sba->mchans[mchans_idx], &req->msg); + if (ret < 0) { + dev_err(sba->dev, "send message failed with error %d", ret); + return ret; + } + ret = req->msg.error; + if (ret < 0) { + dev_err(sba->dev, "message error %d", ret); + return ret; + } + + return 0; +} + +static void sba_issue_pending(struct dma_chan *dchan) +{ + int ret; + unsigned long flags; + struct sba_request *req, *req1; + struct sba_device *sba = to_sba_device(dchan); + + spin_lock_irqsave(&sba->reqs_lock, flags); + + /* Process all pending request */ + list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node) { + /* Try to make request active */ + if (!_sba_active_request(sba, req)) + break; + + /* Send request to mailbox channel */ + spin_unlock_irqrestore(&sba->reqs_lock, flags); + ret = sba_send_mbox_request(sba, req); + spin_lock_irqsave(&sba->reqs_lock, flags); + + /* If something went wrong then keep request pending */ + if (ret < 0) { + _sba_pending_request(sba, req); + break; + } + } + + spin_unlock_irqrestore(&sba->reqs_lock, flags); +} + +static dma_cookie_t sba_tx_submit(struct dma_async_tx_descriptor *tx) +{ + unsigned long flags; + dma_cookie_t cookie; + struct sba_device *sba; + struct sba_request *req, *nreq; + + if (unlikely(!tx)) + return -EINVAL; + + sba = to_sba_device(tx->chan); + req = to_sba_request(tx); + + /* Assign cookie and mark all chained requests pending */ + spin_lock_irqsave(&sba->reqs_lock, flags); + cookie = dma_cookie_assign(tx); + _sba_pending_request(sba, req); + list_for_each_entry(nreq, &req->next, next) + _sba_pending_request(sba, nreq); + spin_unlock_irqrestore(&sba->reqs_lock, flags); + + return cookie; +} + +static enum dma_status sba_tx_status(struct dma_chan *dchan, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + int mchan_idx; + enum dma_status ret; + struct sba_device *sba = to_sba_device(dchan); + + for (mchan_idx = 0; mchan_idx < sba->mchans_count; mchan_idx++) + mbox_client_peek_data(sba->mchans[mchan_idx]); + + ret = dma_cookie_status(dchan, cookie, txstate); + if (ret == DMA_COMPLETE) + return ret; + + return dma_cookie_status(dchan, cookie, txstate); +} + +static void sba_fillup_interrupt_msg(struct sba_request *req, + struct brcm_sba_command *cmds, + struct brcm_message *msg) +{ + u64 cmd; + u32 c_mdata; + struct brcm_sba_command *cmdsp = cmds; + + /* Type-B command to load dummy data into buf0 */ + cmd = sba_cmd_enc(0x0, SBA_TYPE_B, + SBA_TYPE_SHIFT, SBA_TYPE_MASK); + cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size, + SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); + c_mdata = sba_cmd_load_c_mdata(0); + cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), + SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); + cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER, + SBA_CMD_SHIFT, SBA_CMD_MASK); + cmdsp->cmd = cmd; + *cmdsp->cmd_dma = cpu_to_le64(cmd); + cmdsp->flags = BRCM_SBA_CMD_TYPE_B; + cmdsp->data = req->resp_dma; + cmdsp->data_len = req->sba->hw_resp_size; + cmdsp++; + + /* Type-A command to write buf0 to dummy location */ + cmd = sba_cmd_enc(0x0, SBA_TYPE_A, + SBA_TYPE_SHIFT, SBA_TYPE_MASK); + cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size, + SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); + cmd = sba_cmd_enc(cmd, 0x1, + SBA_RESP_SHIFT, SBA_RESP_MASK); + c_mdata = sba_cmd_write_c_mdata(0); + cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), + SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); + cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER, + SBA_CMD_SHIFT, SBA_CMD_MASK); + cmdsp->cmd = cmd; + *cmdsp->cmd_dma = cpu_to_le64(cmd); + cmdsp->flags = BRCM_SBA_CMD_TYPE_A; + if (req->sba->hw_resp_size) { + cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; + cmdsp->resp = req->resp_dma; + cmdsp->resp_len = req->sba->hw_resp_size; + } + cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; + cmdsp->data = req->resp_dma; + cmdsp->data_len = req->sba->hw_resp_size; + cmdsp++; + + /* Fillup brcm_message */ + msg->type = BRCM_MESSAGE_SBA; + msg->sba.cmds = cmds; + msg->sba.cmds_count = cmdsp - cmds; + msg->ctx = req; + msg->error = 0; +} + +static struct dma_async_tx_descriptor * +sba_prep_dma_interrupt(struct dma_chan *dchan, unsigned long flags) +{ + struct sba_request *req = NULL; + struct sba_device *sba = to_sba_device(dchan); + + /* Alloc new request */ + req = sba_alloc_request(sba); + if (!req) + return NULL; + + /* + * Force fence so that no requests are submitted + * until DMA callback for this request is invoked. + */ + req->fence = true; + + /* Fillup request message */ + sba_fillup_interrupt_msg(req, req->cmds, &req->msg); + + /* Init async_tx descriptor */ + req->tx.flags = flags; + req->tx.cookie = -EBUSY; + + return &req->tx; +} + +static void sba_fillup_memcpy_msg(struct sba_request *req, + struct brcm_sba_command *cmds, + struct brcm_message *msg, + dma_addr_t msg_offset, size_t msg_len, + dma_addr_t dst, dma_addr_t src) +{ + u64 cmd; + u32 c_mdata; + struct brcm_sba_command *cmdsp = cmds; + + /* Type-B command to load data into buf0 */ + cmd = sba_cmd_enc(0x0, SBA_TYPE_B, + SBA_TYPE_SHIFT, SBA_TYPE_MASK); + cmd = sba_cmd_enc(cmd, msg_len, + SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); + c_mdata = sba_cmd_load_c_mdata(0); + cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), + SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); + cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER, + SBA_CMD_SHIFT, SBA_CMD_MASK); + cmdsp->cmd = cmd; + *cmdsp->cmd_dma = cpu_to_le64(cmd); + cmdsp->flags = BRCM_SBA_CMD_TYPE_B; + cmdsp->data = src + msg_offset; + cmdsp->data_len = msg_len; + cmdsp++; + + /* Type-A command to write buf0 */ + cmd = sba_cmd_enc(0x0, SBA_TYPE_A, + SBA_TYPE_SHIFT, SBA_TYPE_MASK); + cmd = sba_cmd_enc(cmd, msg_len, + SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); + cmd = sba_cmd_enc(cmd, 0x1, + SBA_RESP_SHIFT, SBA_RESP_MASK); + c_mdata = sba_cmd_write_c_mdata(0); + cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), + SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); + cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER, + SBA_CMD_SHIFT, SBA_CMD_MASK); + cmdsp->cmd = cmd; + *cmdsp->cmd_dma = cpu_to_le64(cmd); + cmdsp->flags = BRCM_SBA_CMD_TYPE_A; + if (req->sba->hw_resp_size) { + cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; + cmdsp->resp = req->resp_dma; + cmdsp->resp_len = req->sba->hw_resp_size; + } + cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; + cmdsp->data = dst + msg_offset; + cmdsp->data_len = msg_len; + cmdsp++; + + /* Fillup brcm_message */ + msg->type = BRCM_MESSAGE_SBA; + msg->sba.cmds = cmds; + msg->sba.cmds_count = cmdsp - cmds; + msg->ctx = req; + msg->error = 0; +} + +static struct sba_request * +sba_prep_dma_memcpy_req(struct sba_device *sba, + dma_addr_t off, dma_addr_t dst, dma_addr_t src, + size_t len, unsigned long flags) +{ + struct sba_request *req = NULL; + + /* Alloc new request */ + req = sba_alloc_request(sba); + if (!req) + return NULL; + req->fence = (flags & DMA_PREP_FENCE) ? true : false; + + /* Fillup request message */ + sba_fillup_memcpy_msg(req, req->cmds, &req->msg, + off, len, dst, src); + + /* Init async_tx descriptor */ + req->tx.flags = flags; + req->tx.cookie = -EBUSY; + + return req; +} + +static struct dma_async_tx_descriptor * +sba_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src, + size_t len, unsigned long flags) +{ + size_t req_len; + dma_addr_t off = 0; + struct sba_device *sba = to_sba_device(dchan); + struct sba_request *first = NULL, *req; + + /* Create chained requests where each request is upto hw_buf_size */ + while (len) { + req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size; + + req = sba_prep_dma_memcpy_req(sba, off, dst, src, + req_len, flags); + if (!req) { + if (first) + sba_free_chained_requests(first); + return NULL; + } + + if (first) + sba_chain_request(first, req); + else + first = req; + + off += req_len; + len -= req_len; + } + + return (first) ? &first->tx : NULL; +} + +static void sba_fillup_xor_msg(struct sba_request *req, + struct brcm_sba_command *cmds, + struct brcm_message *msg, + dma_addr_t msg_offset, size_t msg_len, + dma_addr_t dst, dma_addr_t *src, u32 src_cnt) +{ + u64 cmd; + u32 c_mdata; + unsigned int i; + struct brcm_sba_command *cmdsp = cmds; + + /* Type-B command to load data into buf0 */ + cmd = sba_cmd_enc(0x0, SBA_TYPE_B, + SBA_TYPE_SHIFT, SBA_TYPE_MASK); + cmd = sba_cmd_enc(cmd, msg_len, + SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); + c_mdata = sba_cmd_load_c_mdata(0); + cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), + SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); + cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER, + SBA_CMD_SHIFT, SBA_CMD_MASK); + cmdsp->cmd = cmd; + *cmdsp->cmd_dma = cpu_to_le64(cmd); + cmdsp->flags = BRCM_SBA_CMD_TYPE_B; + cmdsp->data = src[0] + msg_offset; + cmdsp->data_len = msg_len; + cmdsp++; + + /* Type-B commands to xor data with buf0 and put it back in buf0 */ + for (i = 1; i < src_cnt; i++) { + cmd = sba_cmd_enc(0x0, SBA_TYPE_B, + SBA_TYPE_SHIFT, SBA_TYPE_MASK); + cmd = sba_cmd_enc(cmd, msg_len, + SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); + c_mdata = sba_cmd_xor_c_mdata(0, 0); + cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), + SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); + cmd = sba_cmd_enc(cmd, SBA_CMD_XOR, + SBA_CMD_SHIFT, SBA_CMD_MASK); + cmdsp->cmd = cmd; + *cmdsp->cmd_dma = cpu_to_le64(cmd); + cmdsp->flags = BRCM_SBA_CMD_TYPE_B; + cmdsp->data = src[i] + msg_offset; + cmdsp->data_len = msg_len; + cmdsp++; + } + + /* Type-A command to write buf0 */ + cmd = sba_cmd_enc(0x0, SBA_TYPE_A, + SBA_TYPE_SHIFT, SBA_TYPE_MASK); + cmd = sba_cmd_enc(cmd, msg_len, + SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); + cmd = sba_cmd_enc(cmd, 0x1, + SBA_RESP_SHIFT, SBA_RESP_MASK); + c_mdata = sba_cmd_write_c_mdata(0); + cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), + SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); + cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER, + SBA_CMD_SHIFT, SBA_CMD_MASK); + cmdsp->cmd = cmd; + *cmdsp->cmd_dma = cpu_to_le64(cmd); + cmdsp->flags = BRCM_SBA_CMD_TYPE_A; + if (req->sba->hw_resp_size) { + cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; + cmdsp->resp = req->resp_dma; + cmdsp->resp_len = req->sba->hw_resp_size; + } + cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; + cmdsp->data = dst + msg_offset; + cmdsp->data_len = msg_len; + cmdsp++; + + /* Fillup brcm_message */ + msg->type = BRCM_MESSAGE_SBA; + msg->sba.cmds = cmds; + msg->sba.cmds_count = cmdsp - cmds; + msg->ctx = req; + msg->error = 0; +} + +struct sba_request * +sba_prep_dma_xor_req(struct sba_device *sba, + dma_addr_t off, dma_addr_t dst, dma_addr_t *src, + u32 src_cnt, size_t len, unsigned long flags) +{ + struct sba_request *req = NULL; + + /* Alloc new request */ + req = sba_alloc_request(sba); + if (!req) + return NULL; + req->fence = (flags & DMA_PREP_FENCE) ? true : false; + + /* Fillup request message */ + sba_fillup_xor_msg(req, req->cmds, &req->msg, + off, len, dst, src, src_cnt); + + /* Init async_tx descriptor */ + req->tx.flags = flags; + req->tx.cookie = -EBUSY; + + return req; +} + +static struct dma_async_tx_descriptor * +sba_prep_dma_xor(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src, + u32 src_cnt, size_t len, unsigned long flags) +{ + size_t req_len; + dma_addr_t off = 0; + struct sba_device *sba = to_sba_device(dchan); + struct sba_request *first = NULL, *req; + + /* Sanity checks */ + if (unlikely(src_cnt > sba->max_xor_srcs)) + return NULL; + + /* Create chained requests where each request is upto hw_buf_size */ + while (len) { + req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size; + + req = sba_prep_dma_xor_req(sba, off, dst, src, src_cnt, + req_len, flags); + if (!req) { + if (first) + sba_free_chained_requests(first); + return NULL; + } + + if (first) + sba_chain_request(first, req); + else + first = req; + + off += req_len; + len -= req_len; + } + + return (first) ? &first->tx : NULL; +} + +static void sba_fillup_pq_msg(struct sba_request *req, + bool pq_continue, + struct brcm_sba_command *cmds, + struct brcm_message *msg, + dma_addr_t msg_offset, size_t msg_len, + dma_addr_t *dst_p, dma_addr_t *dst_q, + const u8 *scf, dma_addr_t *src, u32 src_cnt) +{ + u64 cmd; + u32 c_mdata; + unsigned int i; + struct brcm_sba_command *cmdsp = cmds; + + if (pq_continue) { + /* Type-B command to load old P into buf0 */ + if (dst_p) { + cmd = sba_cmd_enc(0x0, SBA_TYPE_B, + SBA_TYPE_SHIFT, SBA_TYPE_MASK); + cmd = sba_cmd_enc(cmd, msg_len, + SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); + c_mdata = sba_cmd_load_c_mdata(0); + cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), + SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); + cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER, + SBA_CMD_SHIFT, SBA_CMD_MASK); + cmdsp->cmd = cmd; + *cmdsp->cmd_dma = cpu_to_le64(cmd); + cmdsp->flags = BRCM_SBA_CMD_TYPE_B; + cmdsp->data = *dst_p + msg_offset; + cmdsp->data_len = msg_len; + cmdsp++; + } + + /* Type-B command to load old Q into buf1 */ + if (dst_q) { + cmd = sba_cmd_enc(0x0, SBA_TYPE_B, + SBA_TYPE_SHIFT, SBA_TYPE_MASK); + cmd = sba_cmd_enc(cmd, msg_len, + SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); + c_mdata = sba_cmd_load_c_mdata(1); + cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), + SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); + cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER, + SBA_CMD_SHIFT, SBA_CMD_MASK); + cmdsp->cmd = cmd; + *cmdsp->cmd_dma = cpu_to_le64(cmd); + cmdsp->flags = BRCM_SBA_CMD_TYPE_B; + cmdsp->data = *dst_q + msg_offset; + cmdsp->data_len = msg_len; + cmdsp++; + } + } else { + /* Type-A command to zero all buffers */ + cmd = sba_cmd_enc(0x0, SBA_TYPE_A, + SBA_TYPE_SHIFT, SBA_TYPE_MASK); + cmd = sba_cmd_enc(cmd, msg_len, + SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); + cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS, + SBA_CMD_SHIFT, SBA_CMD_MASK); + cmdsp->cmd = cmd; + *cmdsp->cmd_dma = cpu_to_le64(cmd); + cmdsp->flags = BRCM_SBA_CMD_TYPE_A; + cmdsp++; + } + + /* Type-B commands for generate P onto buf0 and Q onto buf1 */ + for (i = 0; i < src_cnt; i++) { + cmd = sba_cmd_enc(0x0, SBA_TYPE_B, + SBA_TYPE_SHIFT, SBA_TYPE_MASK); + cmd = sba_cmd_enc(cmd, msg_len, + SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); + c_mdata = sba_cmd_pq_c_mdata(raid6_gflog[scf[i]], 1, 0); + cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), + SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); + cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata), + SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK); + cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS_XOR, + SBA_CMD_SHIFT, SBA_CMD_MASK); + cmdsp->cmd = cmd; + *cmdsp->cmd_dma = cpu_to_le64(cmd); + cmdsp->flags = BRCM_SBA_CMD_TYPE_B; + cmdsp->data = src[i] + msg_offset; + cmdsp->data_len = msg_len; + cmdsp++; + } + + /* Type-A command to write buf0 */ + if (dst_p) { + cmd = sba_cmd_enc(0x0, SBA_TYPE_A, + SBA_TYPE_SHIFT, SBA_TYPE_MASK); + cmd = sba_cmd_enc(cmd, msg_len, + SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); + cmd = sba_cmd_enc(cmd, 0x1, + SBA_RESP_SHIFT, SBA_RESP_MASK); + c_mdata = sba_cmd_write_c_mdata(0); + cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), + SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); + cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER, + SBA_CMD_SHIFT, SBA_CMD_MASK); + cmdsp->cmd = cmd; + *cmdsp->cmd_dma = cpu_to_le64(cmd); + cmdsp->flags = BRCM_SBA_CMD_TYPE_A; + if (req->sba->hw_resp_size) { + cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; + cmdsp->resp = req->resp_dma; + cmdsp->resp_len = req->sba->hw_resp_size; + } + cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; + cmdsp->data = *dst_p + msg_offset; + cmdsp->data_len = msg_len; + cmdsp++; + } + + /* Type-A command to write buf1 */ + if (dst_q) { + cmd = sba_cmd_enc(0x0, SBA_TYPE_A, + SBA_TYPE_SHIFT, SBA_TYPE_MASK); + cmd = sba_cmd_enc(cmd, msg_len, + SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); + cmd = sba_cmd_enc(cmd, 0x1, + SBA_RESP_SHIFT, SBA_RESP_MASK); + c_mdata = sba_cmd_write_c_mdata(1); + cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), + SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); + cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER, + SBA_CMD_SHIFT, SBA_CMD_MASK); + cmdsp->cmd = cmd; + *cmdsp->cmd_dma = cpu_to_le64(cmd); + cmdsp->flags = BRCM_SBA_CMD_TYPE_A; + if (req->sba->hw_resp_size) { + cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; + cmdsp->resp = req->resp_dma; + cmdsp->resp_len = req->sba->hw_resp_size; + } + cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; + cmdsp->data = *dst_q + msg_offset; + cmdsp->data_len = msg_len; + cmdsp++; + } + + /* Fillup brcm_message */ + msg->type = BRCM_MESSAGE_SBA; + msg->sba.cmds = cmds; + msg->sba.cmds_count = cmdsp - cmds; + msg->ctx = req; + msg->error = 0; +} + +struct sba_request * +sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off, + dma_addr_t *dst_p, dma_addr_t *dst_q, dma_addr_t *src, + u32 src_cnt, const u8 *scf, size_t len, unsigned long flags) +{ + struct sba_request *req = NULL; + + /* Alloc new request */ + req = sba_alloc_request(sba); + if (!req) + return NULL; + req->fence = (flags & DMA_PREP_FENCE) ? true : false; + + /* Fillup request messages */ + sba_fillup_pq_msg(req, dmaf_continue(flags), + req->cmds, &req->msg, + off, len, dst_p, dst_q, scf, src, src_cnt); + + /* Init async_tx descriptor */ + req->tx.flags = flags; + req->tx.cookie = -EBUSY; + + return req; +} + +static void sba_fillup_pq_single_msg(struct sba_request *req, + bool pq_continue, + struct brcm_sba_command *cmds, + struct brcm_message *msg, + dma_addr_t msg_offset, size_t msg_len, + dma_addr_t *dst_p, dma_addr_t *dst_q, + dma_addr_t src, u8 scf) +{ + u64 cmd; + u32 c_mdata; + u8 pos, dpos = raid6_gflog[scf]; + struct brcm_sba_command *cmdsp = cmds; + + if (!dst_p) + goto skip_p; + + if (pq_continue) { + /* Type-B command to load old P into buf0 */ + cmd = sba_cmd_enc(0x0, SBA_TYPE_B, + SBA_TYPE_SHIFT, SBA_TYPE_MASK); + cmd = sba_cmd_enc(cmd, msg_len, + SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); + c_mdata = sba_cmd_load_c_mdata(0); + cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), + SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); + cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER, + SBA_CMD_SHIFT, SBA_CMD_MASK); + cmdsp->cmd = cmd; + *cmdsp->cmd_dma = cpu_to_le64(cmd); + cmdsp->flags = BRCM_SBA_CMD_TYPE_B; + cmdsp->data = *dst_p + msg_offset; + cmdsp->data_len = msg_len; + cmdsp++; + + /* + * Type-B commands to xor data with buf0 and put it + * back in buf0 + */ + cmd = sba_cmd_enc(0x0, SBA_TYPE_B, + SBA_TYPE_SHIFT, SBA_TYPE_MASK); + cmd = sba_cmd_enc(cmd, msg_len, + SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); + c_mdata = sba_cmd_xor_c_mdata(0, 0); + cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), + SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); + cmd = sba_cmd_enc(cmd, SBA_CMD_XOR, + SBA_CMD_SHIFT, SBA_CMD_MASK); + cmdsp->cmd = cmd; + *cmdsp->cmd_dma = cpu_to_le64(cmd); + cmdsp->flags = BRCM_SBA_CMD_TYPE_B; + cmdsp->data = src + msg_offset; + cmdsp->data_len = msg_len; + cmdsp++; + } else { + /* Type-B command to load old P into buf0 */ + cmd = sba_cmd_enc(0x0, SBA_TYPE_B, + SBA_TYPE_SHIFT, SBA_TYPE_MASK); + cmd = sba_cmd_enc(cmd, msg_len, + SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); + c_mdata = sba_cmd_load_c_mdata(0); + cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), + SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); + cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER, + SBA_CMD_SHIFT, SBA_CMD_MASK); + cmdsp->cmd = cmd; + *cmdsp->cmd_dma = cpu_to_le64(cmd); + cmdsp->flags = BRCM_SBA_CMD_TYPE_B; + cmdsp->data = src + msg_offset; + cmdsp->data_len = msg_len; + cmdsp++; + } + + /* Type-A command to write buf0 */ + cmd = sba_cmd_enc(0x0, SBA_TYPE_A, + SBA_TYPE_SHIFT, SBA_TYPE_MASK); + cmd = sba_cmd_enc(cmd, msg_len, + SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); + cmd = sba_cmd_enc(cmd, 0x1, + SBA_RESP_SHIFT, SBA_RESP_MASK); + c_mdata = sba_cmd_write_c_mdata(0); + cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), + SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); + cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER, + SBA_CMD_SHIFT, SBA_CMD_MASK); + cmdsp->cmd = cmd; + *cmdsp->cmd_dma = cpu_to_le64(cmd); + cmdsp->flags = BRCM_SBA_CMD_TYPE_A; + if (req->sba->hw_resp_size) { + cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; + cmdsp->resp = req->resp_dma; + cmdsp->resp_len = req->sba->hw_resp_size; + } + cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; + cmdsp->data = *dst_p + msg_offset; + cmdsp->data_len = msg_len; + cmdsp++; + +skip_p: + if (!dst_q) + goto skip_q; + + /* Type-A command to zero all buffers */ + cmd = sba_cmd_enc(0x0, SBA_TYPE_A, + SBA_TYPE_SHIFT, SBA_TYPE_MASK); + cmd = sba_cmd_enc(cmd, msg_len, + SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); + cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS, + SBA_CMD_SHIFT, SBA_CMD_MASK); + cmdsp->cmd = cmd; + *cmdsp->cmd_dma = cpu_to_le64(cmd); + cmdsp->flags = BRCM_SBA_CMD_TYPE_A; + cmdsp++; + + if (dpos == 255) + goto skip_q_computation; + pos = (dpos < req->sba->max_pq_coefs) ? + dpos : (req->sba->max_pq_coefs - 1); + + /* + * Type-B command to generate initial Q from data + * and store output into buf0 + */ + cmd = sba_cmd_enc(0x0, SBA_TYPE_B, + SBA_TYPE_SHIFT, SBA_TYPE_MASK); + cmd = sba_cmd_enc(cmd, msg_len, + SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); + c_mdata = sba_cmd_pq_c_mdata(pos, 0, 0); + cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), + SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); + cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata), + SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK); + cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS, + SBA_CMD_SHIFT, SBA_CMD_MASK); + cmdsp->cmd = cmd; + *cmdsp->cmd_dma = cpu_to_le64(cmd); + cmdsp->flags = BRCM_SBA_CMD_TYPE_B; + cmdsp->data = src + msg_offset; + cmdsp->data_len = msg_len; + cmdsp++; + + dpos -= pos; + + /* Multiple Type-A command to generate final Q */ + while (dpos) { + pos = (dpos < req->sba->max_pq_coefs) ? + dpos : (req->sba->max_pq_coefs - 1); + + /* + * Type-A command to generate Q with buf0 and + * buf1 store result in buf0 + */ + cmd = sba_cmd_enc(0x0, SBA_TYPE_A, + SBA_TYPE_SHIFT, SBA_TYPE_MASK); + cmd = sba_cmd_enc(cmd, msg_len, + SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); + c_mdata = sba_cmd_pq_c_mdata(pos, 0, 1); + cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), + SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); + cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata), + SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK); + cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS, + SBA_CMD_SHIFT, SBA_CMD_MASK); + cmdsp->cmd = cmd; + *cmdsp->cmd_dma = cpu_to_le64(cmd); + cmdsp->flags = BRCM_SBA_CMD_TYPE_A; + cmdsp++; + + dpos -= pos; + } + +skip_q_computation: + if (pq_continue) { + /* + * Type-B command to XOR previous output with + * buf0 and write it into buf0 + */ + cmd = sba_cmd_enc(0x0, SBA_TYPE_B, + SBA_TYPE_SHIFT, SBA_TYPE_MASK); + cmd = sba_cmd_enc(cmd, msg_len, + SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); + c_mdata = sba_cmd_xor_c_mdata(0, 0); + cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), + SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); + cmd = sba_cmd_enc(cmd, SBA_CMD_XOR, + SBA_CMD_SHIFT, SBA_CMD_MASK); + cmdsp->cmd = cmd; + *cmdsp->cmd_dma = cpu_to_le64(cmd); + cmdsp->flags = BRCM_SBA_CMD_TYPE_B; + cmdsp->data = *dst_q + msg_offset; + cmdsp->data_len = msg_len; + cmdsp++; + } + + /* Type-A command to write buf0 */ + cmd = sba_cmd_enc(0x0, SBA_TYPE_A, + SBA_TYPE_SHIFT, SBA_TYPE_MASK); + cmd = sba_cmd_enc(cmd, msg_len, + SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); + cmd = sba_cmd_enc(cmd, 0x1, + SBA_RESP_SHIFT, SBA_RESP_MASK); + c_mdata = sba_cmd_write_c_mdata(0); + cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), + SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); + cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER, + SBA_CMD_SHIFT, SBA_CMD_MASK); + cmdsp->cmd = cmd; + *cmdsp->cmd_dma = cpu_to_le64(cmd); + cmdsp->flags = BRCM_SBA_CMD_TYPE_A; + if (req->sba->hw_resp_size) { + cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; + cmdsp->resp = req->resp_dma; + cmdsp->resp_len = req->sba->hw_resp_size; + } + cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; + cmdsp->data = *dst_q + msg_offset; + cmdsp->data_len = msg_len; + cmdsp++; + +skip_q: + /* Fillup brcm_message */ + msg->type = BRCM_MESSAGE_SBA; + msg->sba.cmds = cmds; + msg->sba.cmds_count = cmdsp - cmds; + msg->ctx = req; + msg->error = 0; +} + +struct sba_request * +sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off, + dma_addr_t *dst_p, dma_addr_t *dst_q, + dma_addr_t src, u8 scf, size_t len, + unsigned long flags) +{ + struct sba_request *req = NULL; + + /* Alloc new request */ + req = sba_alloc_request(sba); + if (!req) + return NULL; + req->fence = (flags & DMA_PREP_FENCE) ? true : false; + + /* Fillup request messages */ + sba_fillup_pq_single_msg(req, dmaf_continue(flags), + req->cmds, &req->msg, off, len, + dst_p, dst_q, src, scf); + + /* Init async_tx descriptor */ + req->tx.flags = flags; + req->tx.cookie = -EBUSY; + + return req; +} + +static struct dma_async_tx_descriptor * +sba_prep_dma_pq(struct dma_chan *dchan, dma_addr_t *dst, dma_addr_t *src, + u32 src_cnt, const u8 *scf, size_t len, unsigned long flags) +{ + u32 i, dst_q_index; + size_t req_len; + bool slow = false; + dma_addr_t off = 0; + dma_addr_t *dst_p = NULL, *dst_q = NULL; + struct sba_device *sba = to_sba_device(dchan); + struct sba_request *first = NULL, *req; + + /* Sanity checks */ + if (unlikely(src_cnt > sba->max_pq_srcs)) + return NULL; + for (i = 0; i < src_cnt; i++) + if (sba->max_pq_coefs <= raid6_gflog[scf[i]]) + slow = true; + + /* Figure-out P and Q destination addresses */ + if (!(flags & DMA_PREP_PQ_DISABLE_P)) + dst_p = &dst[0]; + if (!(flags & DMA_PREP_PQ_DISABLE_Q)) + dst_q = &dst[1]; + + /* Create chained requests where each request is upto hw_buf_size */ + while (len) { + req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size; + + if (slow) { + dst_q_index = src_cnt; + + if (dst_q) { + for (i = 0; i < src_cnt; i++) { + if (*dst_q == src[i]) { + dst_q_index = i; + break; + } + } + } + + if (dst_q_index < src_cnt) { + i = dst_q_index; + req = sba_prep_dma_pq_single_req(sba, + off, dst_p, dst_q, src[i], scf[i], + req_len, flags | DMA_PREP_FENCE); + if (!req) + goto fail; + + if (first) + sba_chain_request(first, req); + else + first = req; + + flags |= DMA_PREP_CONTINUE; + } + + for (i = 0; i < src_cnt; i++) { + if (dst_q_index == i) + continue; + + req = sba_prep_dma_pq_single_req(sba, + off, dst_p, dst_q, src[i], scf[i], + req_len, flags | DMA_PREP_FENCE); + if (!req) + goto fail; + + if (first) + sba_chain_request(first, req); + else + first = req; + + flags |= DMA_PREP_CONTINUE; + } + } else { + req = sba_prep_dma_pq_req(sba, off, + dst_p, dst_q, src, src_cnt, + scf, req_len, flags); + if (!req) + goto fail; + + if (first) + sba_chain_request(first, req); + else + first = req; + } + + off += req_len; + len -= req_len; + } + + return (first) ? &first->tx : NULL; + +fail: + if (first) + sba_free_chained_requests(first); + return NULL; +} + +/* ====== Mailbox callbacks ===== */ + +static void sba_dma_tx_actions(struct sba_request *req) +{ + struct dma_async_tx_descriptor *tx = &req->tx; + + WARN_ON(tx->cookie < 0); + + if (tx->cookie > 0) { + dma_cookie_complete(tx); + + /* + * Call the callback (must not sleep or submit new + * operations to this channel) + */ + if (tx->callback) + tx->callback(tx->callback_param); + + dma_descriptor_unmap(tx); + } + + /* Run dependent operations */ + dma_run_dependencies(tx); + + /* If waiting for 'ack' then move to completed list */ + if (!async_tx_test_ack(&req->tx)) + sba_complete_chained_requests(req); + else + sba_free_chained_requests(req); +} + +static void sba_receive_message(struct mbox_client *cl, void *msg) +{ + unsigned long flags; + struct brcm_message *m = msg; + struct sba_request *req = m->ctx, *req1; + struct sba_device *sba = req->sba; + + /* Error count if message has error */ + if (m->error < 0) + dev_err(sba->dev, "%s got message with error %d", + dma_chan_name(&sba->dma_chan), m->error); + + /* Mark request as received */ + sba_received_request(req); + + /* Wait for all chained requests to be completed */ + if (atomic_dec_return(&req->first->next_pending_count)) + goto done; + + /* Point to first request */ + req = req->first; + + /* Update request */ + if (req->state == SBA_REQUEST_STATE_RECEIVED) + sba_dma_tx_actions(req); + else + sba_free_chained_requests(req); + + spin_lock_irqsave(&sba->reqs_lock, flags); + + /* Re-check all completed request waiting for 'ack' */ + list_for_each_entry_safe(req, req1, &sba->reqs_completed_list, node) { + spin_unlock_irqrestore(&sba->reqs_lock, flags); + sba_dma_tx_actions(req); + spin_lock_irqsave(&sba->reqs_lock, flags); + } + + spin_unlock_irqrestore(&sba->reqs_lock, flags); + +done: + /* Try to submit pending request */ + sba_issue_pending(&sba->dma_chan); +} + +/* ====== Platform driver routines ===== */ + +static int sba_prealloc_channel_resources(struct sba_device *sba) +{ + int i, j, p, ret = 0; + struct sba_request *req = NULL; + + sba->resp_base = dma_alloc_coherent(sba->dma_dev.dev, + sba->max_resp_pool_size, + &sba->resp_dma_base, GFP_KERNEL); + if (!sba->resp_base) + return -ENOMEM; + + sba->cmds_base = dma_alloc_coherent(sba->dma_dev.dev, + sba->max_cmds_pool_size, + &sba->cmds_dma_base, GFP_KERNEL); + if (!sba->cmds_base) { + ret = -ENOMEM; + goto fail_free_resp_pool; + } + + spin_lock_init(&sba->reqs_lock); + sba->reqs_fence = false; + INIT_LIST_HEAD(&sba->reqs_alloc_list); + INIT_LIST_HEAD(&sba->reqs_pending_list); + INIT_LIST_HEAD(&sba->reqs_active_list); + INIT_LIST_HEAD(&sba->reqs_received_list); + INIT_LIST_HEAD(&sba->reqs_completed_list); + INIT_LIST_HEAD(&sba->reqs_aborted_list); + INIT_LIST_HEAD(&sba->reqs_free_list); + + sba->reqs = devm_kcalloc(sba->dev, sba->max_req, + sizeof(*req), GFP_KERNEL); + if (!sba->reqs) { + ret = -ENOMEM; + goto fail_free_cmds_pool; + } + + for (i = 0, p = 0; i < sba->max_req; i++) { + req = &sba->reqs[i]; + INIT_LIST_HEAD(&req->node); + req->sba = sba; + req->state = SBA_REQUEST_STATE_FREE; + INIT_LIST_HEAD(&req->next); + req->next_count = 1; + atomic_set(&req->next_pending_count, 0); + req->fence = false; + req->resp = sba->resp_base + p; + req->resp_dma = sba->resp_dma_base + p; + p += sba->hw_resp_size; + req->cmds = devm_kcalloc(sba->dev, sba->max_cmd_per_req, + sizeof(*req->cmds), GFP_KERNEL); + if (!req->cmds) { + ret = -ENOMEM; + goto fail_free_cmds_pool; + } + for (j = 0; j < sba->max_cmd_per_req; j++) { + req->cmds[j].cmd = 0; + req->cmds[j].cmd_dma = sba->cmds_base + + (i * sba->max_cmd_per_req + j) * sizeof(u64); + req->cmds[j].cmd_dma_addr = sba->cmds_dma_base + + (i * sba->max_cmd_per_req + j) * sizeof(u64); + req->cmds[j].flags = 0; + } + memset(&req->msg, 0, sizeof(req->msg)); + dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan); + req->tx.tx_submit = sba_tx_submit; + req->tx.phys = req->resp_dma; + list_add_tail(&req->node, &sba->reqs_free_list); + } + + sba->reqs_free_count = sba->max_req; + + return 0; + +fail_free_cmds_pool: + dma_free_coherent(sba->dma_dev.dev, + sba->max_cmds_pool_size, + sba->cmds_base, sba->cmds_dma_base); +fail_free_resp_pool: + dma_free_coherent(sba->dma_dev.dev, + sba->max_resp_pool_size, + sba->resp_base, sba->resp_dma_base); + return ret; +} + +static void sba_freeup_channel_resources(struct sba_device *sba) +{ + dmaengine_terminate_all(&sba->dma_chan); + dma_free_coherent(sba->dma_dev.dev, sba->max_cmds_pool_size, + sba->cmds_base, sba->cmds_dma_base); + dma_free_coherent(sba->dma_dev.dev, sba->max_resp_pool_size, + sba->resp_base, sba->resp_dma_base); + sba->resp_base = NULL; + sba->resp_dma_base = 0; +} + +static int sba_async_register(struct sba_device *sba) +{ + int ret; + struct dma_device *dma_dev = &sba->dma_dev; + + /* Initialize DMA channel cookie */ + sba->dma_chan.device = dma_dev; + dma_cookie_init(&sba->dma_chan); + + /* Initialize DMA device capability mask */ + dma_cap_zero(dma_dev->cap_mask); + dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask); + dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); + dma_cap_set(DMA_XOR, dma_dev->cap_mask); + dma_cap_set(DMA_PQ, dma_dev->cap_mask); + + /* + * Set mailbox channel device as the base device of + * our dma_device because the actual memory accesses + * will be done by mailbox controller + */ + dma_dev->dev = sba->mbox_dev; + + /* Set base prep routines */ + dma_dev->device_free_chan_resources = sba_free_chan_resources; + dma_dev->device_terminate_all = sba_device_terminate_all; + dma_dev->device_issue_pending = sba_issue_pending; + dma_dev->device_tx_status = sba_tx_status; + + /* Set interrupt routine */ + if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) + dma_dev->device_prep_dma_interrupt = sba_prep_dma_interrupt; + + /* Set memcpy routine */ + if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) + dma_dev->device_prep_dma_memcpy = sba_prep_dma_memcpy; + + /* Set xor routine and capability */ + if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { + dma_dev->device_prep_dma_xor = sba_prep_dma_xor; + dma_dev->max_xor = sba->max_xor_srcs; + } + + /* Set pq routine and capability */ + if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { + dma_dev->device_prep_dma_pq = sba_prep_dma_pq; + dma_set_maxpq(dma_dev, sba->max_pq_srcs, 0); + } + + /* Initialize DMA device channel list */ + INIT_LIST_HEAD(&dma_dev->channels); + list_add_tail(&sba->dma_chan.device_node, &dma_dev->channels); + + /* Register with Linux async DMA framework*/ + ret = dma_async_device_register(dma_dev); + if (ret) { + dev_err(sba->dev, "async device register error %d", ret); + return ret; + } + + dev_info(sba->dev, "%s capabilities: %s%s%s%s\n", + dma_chan_name(&sba->dma_chan), + dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "interrupt " : "", + dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "memcpy " : "", + dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", + dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : ""); + + return 0; +} + +static int sba_probe(struct platform_device *pdev) +{ + int i, ret = 0, mchans_count; + struct sba_device *sba; + struct platform_device *mbox_pdev; + struct of_phandle_args args; + + /* Allocate main SBA struct */ + sba = devm_kzalloc(&pdev->dev, sizeof(*sba), GFP_KERNEL); + if (!sba) + return -ENOMEM; + + sba->dev = &pdev->dev; + platform_set_drvdata(pdev, sba); + + /* Determine SBA version from DT compatible string */ + if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba")) + sba->ver = SBA_VER_1; + else if (of_device_is_compatible(sba->dev->of_node, + "brcm,iproc-sba-v2")) + sba->ver = SBA_VER_2; + else + return -ENODEV; + + /* Derived Configuration parameters */ + switch (sba->ver) { + case SBA_VER_1: + sba->max_req = 1024; + sba->hw_buf_size = 4096; + sba->hw_resp_size = 8; + sba->max_pq_coefs = 6; + sba->max_pq_srcs = 6; + break; + case SBA_VER_2: + sba->max_req = 1024; + sba->hw_buf_size = 4096; + sba->hw_resp_size = 8; + sba->max_pq_coefs = 30; + /* + * We can support max_pq_srcs == max_pq_coefs because + * we are limited by number of SBA commands that we can + * fit in one message for underlying ring manager HW. + */ + sba->max_pq_srcs = 12; + break; + default: + return -EINVAL; + } + sba->max_cmd_per_req = sba->max_pq_srcs + 3; + sba->max_xor_srcs = sba->max_cmd_per_req - 1; + sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size; + sba->max_cmds_pool_size = sba->max_req * + sba->max_cmd_per_req * sizeof(u64); + + /* Setup mailbox client */ + sba->client.dev = &pdev->dev; + sba->client.rx_callback = sba_receive_message; + sba->client.tx_block = false; + sba->client.knows_txdone = false; + sba->client.tx_tout = 0; + + /* Number of channels equals number of mailbox channels */ + ret = of_count_phandle_with_args(pdev->dev.of_node, + "mboxes", "#mbox-cells"); + if (ret <= 0) + return -ENODEV; + mchans_count = ret; + sba->mchans_count = 0; + atomic_set(&sba->mchans_current, 0); + + /* Allocate mailbox channel array */ + sba->mchans = devm_kcalloc(&pdev->dev, sba->mchans_count, + sizeof(*sba->mchans), GFP_KERNEL); + if (!sba->mchans) + return -ENOMEM; + + /* Request mailbox channels */ + for (i = 0; i < mchans_count; i++) { + sba->mchans[i] = mbox_request_channel(&sba->client, i); + if (IS_ERR(sba->mchans[i])) { + ret = PTR_ERR(sba->mchans[i]); + goto fail_free_mchans; + } + sba->mchans_count++; + } + + /* Find-out underlying mailbox device */ + ret = of_parse_phandle_with_args(pdev->dev.of_node, + "mboxes", "#mbox-cells", 0, &args); + if (ret) + goto fail_free_mchans; + mbox_pdev = of_find_device_by_node(args.np); + of_node_put(args.np); + if (!mbox_pdev) { + ret = -ENODEV; + goto fail_free_mchans; + } + sba->mbox_dev = &mbox_pdev->dev; + + /* All mailbox channels should be of same ring manager device */ + for (i = 1; i < mchans_count; i++) { + ret = of_parse_phandle_with_args(pdev->dev.of_node, + "mboxes", "#mbox-cells", i, &args); + if (ret) + goto fail_free_mchans; + mbox_pdev = of_find_device_by_node(args.np); + of_node_put(args.np); + if (sba->mbox_dev != &mbox_pdev->dev) { + ret = -EINVAL; + goto fail_free_mchans; + } + } + + /* Register DMA device with linux async framework */ + ret = sba_async_register(sba); + if (ret) + goto fail_free_mchans; + + /* Prealloc channel resource */ + ret = sba_prealloc_channel_resources(sba); + if (ret) + goto fail_async_dev_unreg; + + /* Print device info */ + dev_info(sba->dev, "%s using SBAv%d and %d mailbox channels", + dma_chan_name(&sba->dma_chan), sba->ver+1, + sba->mchans_count); + + return 0; + +fail_async_dev_unreg: + dma_async_device_unregister(&sba->dma_dev); +fail_free_mchans: + for (i = 0; i < sba->mchans_count; i++) + mbox_free_channel(sba->mchans[i]); + return ret; +} + +static int sba_remove(struct platform_device *pdev) +{ + int i; + struct sba_device *sba = platform_get_drvdata(pdev); + + sba_freeup_channel_resources(sba); + + dma_async_device_unregister(&sba->dma_dev); + + for (i = 0; i < sba->mchans_count; i++) + mbox_free_channel(sba->mchans[i]); + + return 0; +} + +static const struct of_device_id sba_of_match[] = { + { .compatible = "brcm,iproc-sba", }, + { .compatible = "brcm,iproc-sba-v2", }, + {}, +}; +MODULE_DEVICE_TABLE(of, sba_of_match); + +static struct platform_driver sba_driver = { + .probe = sba_probe, + .remove = sba_remove, + .driver = { + .name = "bcm-sba-raid", + .of_match_table = sba_of_match, + }, +}; +module_platform_driver(sba_driver); + +MODULE_DESCRIPTION("Broadcom SBA RAID driver"); +MODULE_AUTHOR("Anup Patel <anup.patel@broadcom.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c index c639c60b825a..bc31fe802061 100644 --- a/drivers/dma/dw/platform.c +++ b/drivers/dma/dw/platform.c @@ -306,8 +306,12 @@ static int dw_resume_early(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct dw_dma_chip *chip = platform_get_drvdata(pdev); + int ret; + + ret = clk_prepare_enable(chip->clk); + if (ret) + return ret; - clk_prepare_enable(chip->clk); return dw_dma_enable(chip); } diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c index 90d29f90acfb..493dc6c59d1d 100644 --- a/drivers/dma/fsl_raid.c +++ b/drivers/dma/fsl_raid.c @@ -877,7 +877,7 @@ static int fsl_re_remove(struct platform_device *ofdev) return 0; } -static struct of_device_id fsl_re_ids[] = { +static const struct of_device_id fsl_re_ids[] = { { .compatible = "fsl,raideng-v1.0", }, {} }; diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 51c75bf2b9b6..3b8b752ede2d 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -269,6 +269,7 @@ static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size) case 2: case 4: case 8: + mode &= ~FSL_DMA_MR_SAHTS_MASK; mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14); break; } @@ -301,6 +302,7 @@ static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size) case 2: case 4: case 8: + mode &= ~FSL_DMA_MR_DAHTS_MASK; mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16); break; } @@ -327,7 +329,8 @@ static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size) BUG_ON(size > 1024); mode = get_mr(chan); - mode |= (__ilog2(size) << 24) & 0x0f000000; + mode &= ~FSL_DMA_MR_BWC_MASK; + mode |= (__ilog2(size) << 24) & FSL_DMA_MR_BWC_MASK; set_mr(chan, mode); } diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index 31bffccdcc75..4787d485dd76 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h @@ -36,6 +36,10 @@ #define FSL_DMA_MR_DAHE 0x00002000 #define FSL_DMA_MR_SAHE 0x00001000 +#define FSL_DMA_MR_SAHTS_MASK 0x0000C000 +#define FSL_DMA_MR_DAHTS_MASK 0x00030000 +#define FSL_DMA_MR_BWC_MASK 0x0f000000 + /* * Bandwidth/pause control determines how many bytes a given * channel is allowed to transfer before the DMA engine pauses diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index ab0fb804fb1e..f681df8f0ed3 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -888,7 +888,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( sg_init_table(imxdmac->sg_list, periods); for (i = 0; i < periods; i++) { - imxdmac->sg_list[i].page_link = 0; + sg_assign_page(&imxdmac->sg_list[i], NULL); imxdmac->sg_list[i].offset = 0; imxdmac->sg_list[i].dma_address = dma_addr; sg_dma_len(&imxdmac->sg_list[i]) = period_len; @@ -896,10 +896,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( } /* close the loop */ - imxdmac->sg_list[periods].offset = 0; - sg_dma_len(&imxdmac->sg_list[periods]) = 0; - imxdmac->sg_list[periods].page_link = - ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02; + sg_chain(imxdmac->sg_list, periods + 1, imxdmac->sg_list); desc->type = IMXDMA_DESC_CYCLIC; desc->sg = imxdmac->sg_list; diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 085993cb2ccc..a67ec1bdc4e0 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1323,7 +1323,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( } if (period_len > 0xffff) { - dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %d > %d\n", + dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %zu > %d\n", channel, period_len, 0xffff); goto err_out; } @@ -1347,7 +1347,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( if (i + 1 == num_periods) param |= BD_WRAP; - dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n", + dev_dbg(sdma->dev, "entry %d: count: %zu dma: %#llx %s%s\n", i, period_len, (u64)dma_addr, param & BD_WRAP ? "wrap" : "", param & BD_INTR ? " intr" : ""); @@ -1755,19 +1755,26 @@ static int sdma_probe(struct platform_device *pdev) if (IS_ERR(sdma->clk_ahb)) return PTR_ERR(sdma->clk_ahb); - clk_prepare(sdma->clk_ipg); - clk_prepare(sdma->clk_ahb); + ret = clk_prepare(sdma->clk_ipg); + if (ret) + return ret; + + ret = clk_prepare(sdma->clk_ahb); + if (ret) + goto err_clk; ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma", sdma); if (ret) - return ret; + goto err_irq; sdma->irq = irq; sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL); - if (!sdma->script_addrs) - return -ENOMEM; + if (!sdma->script_addrs) { + ret = -ENOMEM; + goto err_irq; + } /* initially no scripts available */ saddr_arr = (s32 *)sdma->script_addrs; @@ -1882,6 +1889,10 @@ err_register: dma_async_device_unregister(&sdma->dma_device); err_init: kfree(sdma->script_addrs); +err_irq: + clk_unprepare(sdma->clk_ahb); +err_clk: + clk_unprepare(sdma->clk_ipg); return ret; } @@ -1893,6 +1904,8 @@ static int sdma_remove(struct platform_device *pdev) devm_free_irq(&pdev->dev, sdma->irq, sdma); dma_async_device_unregister(&sdma->dma_device); kfree(sdma->script_addrs); + clk_unprepare(sdma->clk_ahb); + clk_unprepare(sdma->clk_ipg); /* Kill the tasklet */ for (i = 0; i < MAX_DMA_CHANNELS; i++) { struct sdma_channel *sdmac = &sdma->channel[i]; diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c index 0b9b6b07db9e..eab2fdda29ec 100644 --- a/drivers/dma/ioat/dca.c +++ b/drivers/dma/ioat/dca.c @@ -336,10 +336,10 @@ struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase) } if (dca3_tag_map_invalid(ioatdca->tag_map)) { - WARN_TAINT_ONCE(1, TAINT_FIRMWARE_WORKAROUND, - "%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n", - dev_driver_string(&pdev->dev), - dev_name(&pdev->dev)); + add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); + pr_warn_once("%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n", + dev_driver_string(&pdev->dev), + dev_name(&pdev->dev)); free_dca_provider(dca); return NULL; } diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c index f3e211f8f6c5..f652a0e0f5a2 100644 --- a/drivers/dma/mv_xor_v2.c +++ b/drivers/dma/mv_xor_v2.c @@ -42,6 +42,7 @@ #define MV_XOR_V2_DMA_IMSG_THRD_OFF 0x018 #define MV_XOR_V2_DMA_IMSG_THRD_MASK 0x7FFF #define MV_XOR_V2_DMA_IMSG_THRD_SHIFT 0x0 +#define MV_XOR_V2_DMA_IMSG_TIMER_EN BIT(18) #define MV_XOR_V2_DMA_DESQ_AWATTR_OFF 0x01C /* Same flags as MV_XOR_V2_DMA_DESQ_ARATTR_OFF */ #define MV_XOR_V2_DMA_DESQ_ALLOC_OFF 0x04C @@ -55,6 +56,9 @@ #define MV_XOR_V2_DMA_DESQ_STOP_OFF 0x800 #define MV_XOR_V2_DMA_DESQ_DEALLOC_OFF 0x804 #define MV_XOR_V2_DMA_DESQ_ADD_OFF 0x808 +#define MV_XOR_V2_DMA_IMSG_TMOT 0x810 +#define MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK 0x1FFF +#define MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT 0 /* XOR Global registers */ #define MV_XOR_V2_GLOB_BW_CTRL 0x4 @@ -90,6 +94,13 @@ */ #define MV_XOR_V2_DESC_NUM 1024 +/* + * Threshold values for descriptors and timeout, determined by + * experimentation as giving a good level of performance. + */ +#define MV_XOR_V2_DONE_IMSG_THRD 0x14 +#define MV_XOR_V2_TIMER_THRD 0xB0 + /** * struct mv_xor_v2_descriptor - DMA HW descriptor * @desc_id: used by S/W and is not affected by H/W. @@ -246,6 +257,29 @@ static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev) return MV_XOR_V2_EXT_DESC_SIZE; } +/* + * Set the IMSG threshold + */ +static inline +void mv_xor_v2_enable_imsg_thrd(struct mv_xor_v2_device *xor_dev) +{ + u32 reg; + + /* Configure threshold of number of descriptors, and enable timer */ + reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF); + reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT); + reg |= (MV_XOR_V2_DONE_IMSG_THRD << MV_XOR_V2_DMA_IMSG_THRD_SHIFT); + reg |= MV_XOR_V2_DMA_IMSG_TIMER_EN; + writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF); + + /* Configure Timer Threshold */ + reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_TMOT); + reg &= (~MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK << + MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT); + reg |= (MV_XOR_V2_TIMER_THRD << MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT); + writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_TMOT); +} + static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data) { struct mv_xor_v2_device *xor_dev = data; @@ -501,9 +535,6 @@ static void mv_xor_v2_issue_pending(struct dma_chan *chan) mv_xor_v2_add_desc_to_desq(xor_dev, xor_dev->npendings); xor_dev->npendings = 0; - /* Activate the channel */ - writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF); - spin_unlock_bh(&xor_dev->lock); } @@ -665,6 +696,27 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev) return 0; } +static int mv_xor_v2_suspend(struct platform_device *dev, pm_message_t state) +{ + struct mv_xor_v2_device *xor_dev = platform_get_drvdata(dev); + + /* Set this bit to disable to stop the XOR unit. */ + writel(0x1, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF); + + return 0; +} + +static int mv_xor_v2_resume(struct platform_device *dev) +{ + struct mv_xor_v2_device *xor_dev = platform_get_drvdata(dev); + + mv_xor_v2_set_desc_size(xor_dev); + mv_xor_v2_enable_imsg_thrd(xor_dev); + mv_xor_v2_descq_init(xor_dev); + + return 0; +} + static int mv_xor_v2_probe(struct platform_device *pdev) { struct mv_xor_v2_device *xor_dev; @@ -795,6 +847,8 @@ static int mv_xor_v2_probe(struct platform_device *pdev) list_add_tail(&xor_dev->dmachan.device_node, &dma_dev->channels); + mv_xor_v2_enable_imsg_thrd(xor_dev); + mv_xor_v2_descq_init(xor_dev); ret = dma_async_device_register(dma_dev); @@ -844,6 +898,8 @@ MODULE_DEVICE_TABLE(of, mv_xor_v2_dt_ids); static struct platform_driver mv_xor_v2_driver = { .probe = mv_xor_v2_probe, + .suspend = mv_xor_v2_suspend, + .resume = mv_xor_v2_resume, .remove = mv_xor_v2_remove, .driver = { .name = "mv_xor_v2", diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index e217268c7098..41d167921fab 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -617,7 +617,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( if (period_len > MAX_XFER_BYTES) { dev_err(mxs_dma->dma_device.dev, - "maximum period size exceeded: %d > %d\n", + "maximum period size exceeded: %zu > %d\n", period_len, MAX_XFER_BYTES); goto err_out; } diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index e90a7a0d760a..b19ee04567b5 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -443,7 +443,10 @@ struct dma_pl330_chan { /* For D-to-M and M-to-D channels */ int burst_sz; /* the peripheral fifo width */ int burst_len; /* the number of burst */ - dma_addr_t fifo_addr; + phys_addr_t fifo_addr; + /* DMA-mapped view of the FIFO; may differ if an IOMMU is present */ + dma_addr_t fifo_dma; + enum dma_data_direction dir; /* for cyclic capability */ bool cyclic; @@ -538,11 +541,6 @@ struct _xfer_spec { struct dma_pl330_desc *desc; }; -static inline bool _queue_empty(struct pl330_thread *thrd) -{ - return thrd->req[0].desc == NULL && thrd->req[1].desc == NULL; -} - static inline bool _queue_full(struct pl330_thread *thrd) { return thrd->req[0].desc != NULL && thrd->req[1].desc != NULL; @@ -564,23 +562,6 @@ static inline u32 get_revision(u32 periph_id) return (periph_id >> PERIPH_REV_SHIFT) & PERIPH_REV_MASK; } -static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[], - enum pl330_dst da, u16 val) -{ - if (dry_run) - return SZ_DMAADDH; - - buf[0] = CMD_DMAADDH; - buf[0] |= (da << 1); - buf[1] = val; - buf[2] = val >> 8; - - PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n", - da == 1 ? "DA" : "SA", val); - - return SZ_DMAADDH; -} - static inline u32 _emit_END(unsigned dry_run, u8 buf[]) { if (dry_run) @@ -738,18 +719,6 @@ static inline u32 _emit_MOV(unsigned dry_run, u8 buf[], return SZ_DMAMOV; } -static inline u32 _emit_NOP(unsigned dry_run, u8 buf[]) -{ - if (dry_run) - return SZ_DMANOP; - - buf[0] = CMD_DMANOP; - - PL330_DBGCMD_DUMP(SZ_DMANOP, "\tDMANOP\n"); - - return SZ_DMANOP; -} - static inline u32 _emit_RMB(unsigned dry_run, u8 buf[]) { if (dry_run) @@ -817,39 +786,6 @@ static inline u32 _emit_STP(unsigned dry_run, u8 buf[], return SZ_DMASTP; } -static inline u32 _emit_STZ(unsigned dry_run, u8 buf[]) -{ - if (dry_run) - return SZ_DMASTZ; - - buf[0] = CMD_DMASTZ; - - PL330_DBGCMD_DUMP(SZ_DMASTZ, "\tDMASTZ\n"); - - return SZ_DMASTZ; -} - -static inline u32 _emit_WFE(unsigned dry_run, u8 buf[], u8 ev, - unsigned invalidate) -{ - if (dry_run) - return SZ_DMAWFE; - - buf[0] = CMD_DMAWFE; - - ev &= 0x1f; - ev <<= 3; - buf[1] = ev; - - if (invalidate) - buf[1] |= (1 << 1); - - PL330_DBGCMD_DUMP(SZ_DMAWFE, "\tDMAWFE %u%s\n", - ev >> 3, invalidate ? ", I" : ""); - - return SZ_DMAWFE; -} - static inline u32 _emit_WFP(unsigned dry_run, u8 buf[], enum pl330_cond cond, u8 peri) { @@ -2120,11 +2056,60 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan) return 1; } +/* + * We need the data direction between the DMAC (the dma-mapping "device") and + * the FIFO (the dmaengine "dev"), from the FIFO's point of view. Confusing! + */ +static enum dma_data_direction +pl330_dma_slave_map_dir(enum dma_transfer_direction dir) +{ + switch (dir) { + case DMA_MEM_TO_DEV: + return DMA_FROM_DEVICE; + case DMA_DEV_TO_MEM: + return DMA_TO_DEVICE; + case DMA_DEV_TO_DEV: + return DMA_BIDIRECTIONAL; + default: + return DMA_NONE; + } +} + +static void pl330_unprep_slave_fifo(struct dma_pl330_chan *pch) +{ + if (pch->dir != DMA_NONE) + dma_unmap_resource(pch->chan.device->dev, pch->fifo_dma, + 1 << pch->burst_sz, pch->dir, 0); + pch->dir = DMA_NONE; +} + + +static bool pl330_prep_slave_fifo(struct dma_pl330_chan *pch, + enum dma_transfer_direction dir) +{ + struct device *dev = pch->chan.device->dev; + enum dma_data_direction dma_dir = pl330_dma_slave_map_dir(dir); + + /* Already mapped for this config? */ + if (pch->dir == dma_dir) + return true; + + pl330_unprep_slave_fifo(pch); + pch->fifo_dma = dma_map_resource(dev, pch->fifo_addr, + 1 << pch->burst_sz, dma_dir, 0); + if (dma_mapping_error(dev, pch->fifo_dma)) + return false; + + pch->dir = dma_dir; + return true; +} + static int pl330_config(struct dma_chan *chan, struct dma_slave_config *slave_config) { struct dma_pl330_chan *pch = to_pchan(chan); + pl330_unprep_slave_fifo(pch); if (slave_config->direction == DMA_MEM_TO_DEV) { if (slave_config->dst_addr) pch->fifo_addr = slave_config->dst_addr; @@ -2235,6 +2220,7 @@ static void pl330_free_chan_resources(struct dma_chan *chan) spin_unlock_irqrestore(&pl330->lock, flags); pm_runtime_mark_last_busy(pch->dmac->ddma.dev); pm_runtime_put_autosuspend(pch->dmac->ddma.dev); + pl330_unprep_slave_fifo(pch); } static int pl330_get_current_xferred_count(struct dma_pl330_chan *pch, @@ -2564,6 +2550,9 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( return NULL; } + if (!pl330_prep_slave_fifo(pch, direction)) + return NULL; + for (i = 0; i < len / period_len; i++) { desc = pl330_get_desc(pch); if (!desc) { @@ -2593,12 +2582,12 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( desc->rqcfg.src_inc = 1; desc->rqcfg.dst_inc = 0; src = dma_addr; - dst = pch->fifo_addr; + dst = pch->fifo_dma; break; case DMA_DEV_TO_MEM: desc->rqcfg.src_inc = 0; desc->rqcfg.dst_inc = 1; - src = pch->fifo_addr; + src = pch->fifo_dma; dst = dma_addr; break; default: @@ -2711,12 +2700,12 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, struct dma_pl330_chan *pch = to_pchan(chan); struct scatterlist *sg; int i; - dma_addr_t addr; if (unlikely(!pch || !sgl || !sg_len)) return NULL; - addr = pch->fifo_addr; + if (!pl330_prep_slave_fifo(pch, direction)) + return NULL; first = NULL; @@ -2742,13 +2731,13 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, if (direction == DMA_MEM_TO_DEV) { desc->rqcfg.src_inc = 1; desc->rqcfg.dst_inc = 0; - fill_px(&desc->px, - addr, sg_dma_address(sg), sg_dma_len(sg)); + fill_px(&desc->px, pch->fifo_dma, sg_dma_address(sg), + sg_dma_len(sg)); } else { desc->rqcfg.src_inc = 0; desc->rqcfg.dst_inc = 1; - fill_px(&desc->px, - sg_dma_address(sg), addr, sg_dma_len(sg)); + fill_px(&desc->px, sg_dma_address(sg), pch->fifo_dma, + sg_dma_len(sg)); } desc->rqcfg.brst_size = pch->burst_sz; @@ -2906,6 +2895,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) pch->thread = NULL; pch->chan.device = pd; pch->dmac = pl330; + pch->dir = DMA_NONE; /* Add the channel to the DMAC list */ list_add_tail(&pch->chan.device_node, &pd->channels); diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index 5072a7d306d4..34fb6afd229b 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c @@ -1,7 +1,7 @@ /* * Qualcomm Technologies HIDMA DMA engine interface * - * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -210,6 +210,7 @@ static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig) INIT_LIST_HEAD(&mchan->prepared); INIT_LIST_HEAD(&mchan->active); INIT_LIST_HEAD(&mchan->completed); + INIT_LIST_HEAD(&mchan->queued); spin_lock_init(&mchan->lock); list_add_tail(&mchan->chan.device_node, &ddev->channels); @@ -230,9 +231,15 @@ static void hidma_issue_pending(struct dma_chan *dmach) struct hidma_chan *mchan = to_hidma_chan(dmach); struct hidma_dev *dmadev = mchan->dmadev; unsigned long flags; + struct hidma_desc *qdesc, *next; int status; spin_lock_irqsave(&mchan->lock, flags); + list_for_each_entry_safe(qdesc, next, &mchan->queued, node) { + hidma_ll_queue_request(dmadev->lldev, qdesc->tre_ch); + list_move_tail(&qdesc->node, &mchan->active); + } + if (!mchan->running) { struct hidma_desc *desc = list_first_entry(&mchan->active, struct hidma_desc, @@ -315,17 +322,18 @@ static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd) pm_runtime_put_autosuspend(dmadev->ddev.dev); return -ENODEV; } + pm_runtime_mark_last_busy(dmadev->ddev.dev); + pm_runtime_put_autosuspend(dmadev->ddev.dev); mdesc = container_of(txd, struct hidma_desc, desc); spin_lock_irqsave(&mchan->lock, irqflags); - /* Move descriptor to active */ - list_move_tail(&mdesc->node, &mchan->active); + /* Move descriptor to queued */ + list_move_tail(&mdesc->node, &mchan->queued); /* Update cookie */ cookie = dma_cookie_assign(txd); - hidma_ll_queue_request(dmadev->lldev, mdesc->tre_ch); spin_unlock_irqrestore(&mchan->lock, irqflags); return cookie; @@ -431,6 +439,7 @@ static int hidma_terminate_channel(struct dma_chan *chan) list_splice_init(&mchan->active, &list); list_splice_init(&mchan->prepared, &list); list_splice_init(&mchan->completed, &list); + list_splice_init(&mchan->queued, &list); spin_unlock_irqrestore(&mchan->lock, irqflags); /* this suspends the existing transfer */ @@ -795,8 +804,11 @@ static int hidma_probe(struct platform_device *pdev) device_property_read_u32(&pdev->dev, "desc-count", &dmadev->nr_descriptors); - if (!dmadev->nr_descriptors && nr_desc_prm) + if (nr_desc_prm) { + dev_info(&pdev->dev, "overriding number of descriptors as %d\n", + nr_desc_prm); dmadev->nr_descriptors = nr_desc_prm; + } if (!dmadev->nr_descriptors) dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC; diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h index c7d014235c32..41e0aa283828 100644 --- a/drivers/dma/qcom/hidma.h +++ b/drivers/dma/qcom/hidma.h @@ -104,6 +104,7 @@ struct hidma_chan { struct dma_chan chan; struct list_head free; struct list_head prepared; + struct list_head queued; struct list_head active; struct list_head completed; diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c index f847d32cc4b5..5a0991bc4787 100644 --- a/drivers/dma/qcom/hidma_mgmt.c +++ b/drivers/dma/qcom/hidma_mgmt.c @@ -1,7 +1,7 @@ /* * Qualcomm Technologies HIDMA DMA engine Management interface * - * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -49,6 +49,26 @@ #define HIDMA_AUTOSUSPEND_TIMEOUT 2000 #define HIDMA_MAX_CHANNEL_WEIGHT 15 +static unsigned int max_write_request; +module_param(max_write_request, uint, 0644); +MODULE_PARM_DESC(max_write_request, + "maximum write burst (default: ACPI/DT value)"); + +static unsigned int max_read_request; +module_param(max_read_request, uint, 0644); +MODULE_PARM_DESC(max_read_request, + "maximum read burst (default: ACPI/DT value)"); + +static unsigned int max_wr_xactions; +module_param(max_wr_xactions, uint, 0644); +MODULE_PARM_DESC(max_wr_xactions, + "maximum number of write transactions (default: ACPI/DT value)"); + +static unsigned int max_rd_xactions; +module_param(max_rd_xactions, uint, 0644); +MODULE_PARM_DESC(max_rd_xactions, + "maximum number of read transactions (default: ACPI/DT value)"); + int hidma_mgmt_setup(struct hidma_mgmt_dev *mgmtdev) { unsigned int i; @@ -207,12 +227,25 @@ static int hidma_mgmt_probe(struct platform_device *pdev) goto out; } + if (max_write_request) { + dev_info(&pdev->dev, "overriding max-write-burst-bytes: %d\n", + max_write_request); + mgmtdev->max_write_request = max_write_request; + } else + max_write_request = mgmtdev->max_write_request; + rc = device_property_read_u32(&pdev->dev, "max-read-burst-bytes", &mgmtdev->max_read_request); if (rc) { dev_err(&pdev->dev, "max-read-burst-bytes missing\n"); goto out; } + if (max_read_request) { + dev_info(&pdev->dev, "overriding max-read-burst-bytes: %d\n", + max_read_request); + mgmtdev->max_read_request = max_read_request; + } else + max_read_request = mgmtdev->max_read_request; rc = device_property_read_u32(&pdev->dev, "max-write-transactions", &mgmtdev->max_wr_xactions); @@ -220,6 +253,12 @@ static int hidma_mgmt_probe(struct platform_device *pdev) dev_err(&pdev->dev, "max-write-transactions missing\n"); goto out; } + if (max_wr_xactions) { + dev_info(&pdev->dev, "overriding max-write-transactions: %d\n", + max_wr_xactions); + mgmtdev->max_wr_xactions = max_wr_xactions; + } else + max_wr_xactions = mgmtdev->max_wr_xactions; rc = device_property_read_u32(&pdev->dev, "max-read-transactions", &mgmtdev->max_rd_xactions); @@ -227,6 +266,12 @@ static int hidma_mgmt_probe(struct platform_device *pdev) dev_err(&pdev->dev, "max-read-transactions missing\n"); goto out; } + if (max_rd_xactions) { + dev_info(&pdev->dev, "overriding max-read-transactions: %d\n", + max_rd_xactions); + mgmtdev->max_rd_xactions = max_rd_xactions; + } else + max_rd_xactions = mgmtdev->max_rd_xactions; mgmtdev->priority = devm_kcalloc(&pdev->dev, mgmtdev->dma_channels, diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index bd261c9e9664..ffcadca53243 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -144,6 +144,7 @@ struct rcar_dmac_chan_map { * @chan: base DMA channel object * @iomem: channel I/O memory base * @index: index of this channel in the controller + * @irq: channel IRQ * @src: slave memory address and size on the source side * @dst: slave memory address and size on the destination side * @mid_rid: hardware MID/RID for the DMA client using this channel @@ -161,6 +162,7 @@ struct rcar_dmac_chan { struct dma_chan chan; void __iomem *iomem; unsigned int index; + int irq; struct rcar_dmac_chan_slave src; struct rcar_dmac_chan_slave dst; @@ -1008,7 +1010,11 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan) rcar_dmac_chan_halt(rchan); spin_unlock_irq(&rchan->lock); - /* Now no new interrupts will occur */ + /* + * Now no new interrupts will occur, but one might already be + * running. Wait for it to finish before freeing resources. + */ + synchronize_irq(rchan->irq); if (rchan->mid_rid >= 0) { /* The caller is holding dma_list_mutex */ @@ -1366,6 +1372,13 @@ done: spin_unlock_irqrestore(&rchan->lock, flags); } +static void rcar_dmac_device_synchronize(struct dma_chan *chan) +{ + struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); + + synchronize_irq(rchan->irq); +} + /* ----------------------------------------------------------------------------- * IRQ handling */ @@ -1650,7 +1663,6 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac, struct dma_chan *chan = &rchan->chan; char pdev_irqname[5]; char *irqname; - int irq; int ret; rchan->index = index; @@ -1667,8 +1679,8 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac, /* Request the channel interrupt. */ sprintf(pdev_irqname, "ch%u", index); - irq = platform_get_irq_byname(pdev, pdev_irqname); - if (irq < 0) { + rchan->irq = platform_get_irq_byname(pdev, pdev_irqname); + if (rchan->irq < 0) { dev_err(dmac->dev, "no IRQ specified for channel %u\n", index); return -ENODEV; } @@ -1678,11 +1690,13 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac, if (!irqname) return -ENOMEM; - ret = devm_request_threaded_irq(dmac->dev, irq, rcar_dmac_isr_channel, + ret = devm_request_threaded_irq(dmac->dev, rchan->irq, + rcar_dmac_isr_channel, rcar_dmac_isr_channel_thread, 0, irqname, rchan); if (ret) { - dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", irq, ret); + dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", + rchan->irq, ret); return ret; } @@ -1846,6 +1860,7 @@ static int rcar_dmac_probe(struct platform_device *pdev) engine->device_terminate_all = rcar_dmac_chan_terminate_all; engine->device_tx_status = rcar_dmac_tx_status; engine->device_issue_pending = rcar_dmac_issue_pending; + engine->device_synchronize = rcar_dmac_device_synchronize; ret = dma_async_device_register(engine); if (ret < 0) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index a6620b671d1d..c3052fbfd092 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -2528,10 +2528,7 @@ dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, dma_addr += period_len; } - sg[periods].offset = 0; - sg_dma_len(&sg[periods]) = 0; - sg[periods].page_link = - ((unsigned long)sg | 0x01) & ~0x02; + sg_chain(sg, periods + 1, sg); txd = d40_prep_sg(chan, sg, sg, periods, direction, DMA_PREP_INTERRUPT); diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 3722b9d8d9fe..b9d75a54c896 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -1494,35 +1494,7 @@ static int tegra_dma_remove(struct platform_device *pdev) static int tegra_dma_runtime_suspend(struct device *dev) { struct tegra_dma *tdma = dev_get_drvdata(dev); - - clk_disable_unprepare(tdma->dma_clk); - return 0; -} - -static int tegra_dma_runtime_resume(struct device *dev) -{ - struct tegra_dma *tdma = dev_get_drvdata(dev); - int ret; - - ret = clk_prepare_enable(tdma->dma_clk); - if (ret < 0) { - dev_err(dev, "clk_enable failed: %d\n", ret); - return ret; - } - return 0; -} - -#ifdef CONFIG_PM_SLEEP -static int tegra_dma_pm_suspend(struct device *dev) -{ - struct tegra_dma *tdma = dev_get_drvdata(dev); int i; - int ret; - - /* Enable clock before accessing register */ - ret = pm_runtime_get_sync(dev); - if (ret < 0) - return ret; tdma->reg_gen = tdma_read(tdma, TEGRA_APBDMA_GENERAL); for (i = 0; i < tdma->chip_data->nr_channels; i++) { @@ -1543,21 +1515,21 @@ static int tegra_dma_pm_suspend(struct device *dev) TEGRA_APBDMA_CHAN_WCOUNT); } - /* Disable clock */ - pm_runtime_put(dev); + clk_disable_unprepare(tdma->dma_clk); + return 0; } -static int tegra_dma_pm_resume(struct device *dev) +static int tegra_dma_runtime_resume(struct device *dev) { struct tegra_dma *tdma = dev_get_drvdata(dev); - int i; - int ret; + int i, ret; - /* Enable clock before accessing register */ - ret = pm_runtime_get_sync(dev); - if (ret < 0) + ret = clk_prepare_enable(tdma->dma_clk); + if (ret < 0) { + dev_err(dev, "clk_enable failed: %d\n", ret); return ret; + } tdma_write(tdma, TEGRA_APBDMA_GENERAL, tdma->reg_gen); tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0); @@ -1582,16 +1554,14 @@ static int tegra_dma_pm_resume(struct device *dev) (ch_reg->csr & ~TEGRA_APBDMA_CSR_ENB)); } - /* Disable clock */ - pm_runtime_put(dev); return 0; } -#endif static const struct dev_pm_ops tegra_dma_dev_pm_ops = { SET_RUNTIME_PM_OPS(tegra_dma_runtime_suspend, tegra_dma_runtime_resume, NULL) - SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) }; static const struct of_device_id tegra_dma_of_match[] = { diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index 6d221e5c72ee..47f64192d2fd 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -794,9 +794,6 @@ static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy( chan = to_chan(dchan); - if (len > ZYNQMP_DMA_MAX_TRANS_LEN) - return NULL; - desc_cnt = DIV_ROUND_UP(len, ZYNQMP_DMA_MAX_TRANS_LEN); spin_lock_bh(&chan->lock); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 43ca16b6eee2..bbac5d5d1fcf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1152,16 +1152,12 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero return; if (state == VGA_SWITCHEROO_ON) { - unsigned d3_delay = dev->pdev->d3_delay; - pr_info("amdgpu: switched on\n"); /* don't suspend or resume card normally */ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; amdgpu_device_resume(dev, true, true); - dev->pdev->d3_delay = d3_delay; - dev->switch_power_state = DRM_SWITCH_POWER_ON; drm_kms_helper_poll_enable(dev); } else { diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 0a6444d72000..997131d58c7f 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -113,7 +113,6 @@ static inline bool radeon_is_atpx_hybrid(void) { return false; } #endif #define RADEON_PX_QUIRK_DISABLE_PX (1 << 0) -#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1) struct radeon_px_quirk { u32 chip_vendor; @@ -140,8 +139,6 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = { * https://bugs.freedesktop.org/show_bug.cgi?id=101491 */ { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, - /* macbook pro 8.2 */ - { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP }, { 0, 0, 0, 0, 0 }, }; @@ -1245,25 +1242,17 @@ static void radeon_check_arguments(struct radeon_device *rdev) static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) { struct drm_device *dev = pci_get_drvdata(pdev); - struct radeon_device *rdev = dev->dev_private; if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF) return; if (state == VGA_SWITCHEROO_ON) { - unsigned d3_delay = dev->pdev->d3_delay; - pr_info("radeon: switched on\n"); /* don't suspend or resume card normally */ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - if (d3_delay < 20 && (rdev->px_quirk_flags & RADEON_PX_QUIRK_LONG_WAKEUP)) - dev->pdev->d3_delay = 20; - radeon_resume_kms(dev, true, true); - dev->pdev->d3_delay = d3_delay; - dev->switch_power_state = DRM_SWITCH_POWER_ON; drm_kms_helper_poll_enable(dev); } else { diff --git a/drivers/input/input.c b/drivers/input/input.c index 067d648028a2..7e6842bd525c 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -481,7 +481,7 @@ EXPORT_SYMBOL(input_inject_event); void input_alloc_absinfo(struct input_dev *dev) { if (!dev->absinfo) - dev->absinfo = kcalloc(ABS_CNT, sizeof(struct input_absinfo), + dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo), GFP_KERNEL); WARN(!dev->absinfo, "%s(): kcalloc() failed?\n", __func__); @@ -1126,7 +1126,7 @@ static void input_seq_print_bitmap(struct seq_file *seq, const char *name, * If no output was produced print a single 0. */ if (skip_empty) - seq_puts(seq, "0"); + seq_putc(seq, '0'); seq_putc(seq, '\n'); } @@ -1144,7 +1144,7 @@ static int input_devices_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "P: Phys=%s\n", dev->phys ? dev->phys : ""); seq_printf(seq, "S: Sysfs=%s\n", path ? path : ""); seq_printf(seq, "U: Uniq=%s\n", dev->uniq ? dev->uniq : ""); - seq_printf(seq, "H: Handlers="); + seq_puts(seq, "H: Handlers="); list_for_each_entry(handle, &dev->h_list, d_node) seq_printf(seq, "%s ", handle->name); @@ -1783,7 +1783,7 @@ struct input_dev *input_allocate_device(void) static atomic_t input_no = ATOMIC_INIT(-1); struct input_dev *dev; - dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL); + dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (dev) { dev->dev.type = &input_dev_type; dev->dev.class = &input_class; @@ -1849,7 +1849,7 @@ struct input_dev *devm_input_allocate_device(struct device *dev) struct input_devres *devres; devres = devres_alloc(devm_input_device_release, - sizeof(struct input_devres), GFP_KERNEL); + sizeof(*devres), GFP_KERNEL); if (!devres) return NULL; @@ -2099,7 +2099,7 @@ int input_register_device(struct input_dev *dev) if (dev->devres_managed) { devres = devres_alloc(devm_input_device_unregister, - sizeof(struct input_devres), GFP_KERNEL); + sizeof(*devres), GFP_KERNEL); if (!devres) return -ENOMEM; diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index def96cd2479b..298a6ba51411 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -126,13 +126,18 @@ static const struct xpad_device { u8 mapping; u8 xtype; } xpad_device[] = { + { 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX }, + { 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX }, { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX }, + { 0x044f, 0x0f10, "Thrustmaster Modena GT Wheel", 0, XTYPE_XBOX }, { 0x044f, 0xb326, "Thrustmaster Gamepad GP XID", 0, XTYPE_XBOX360 }, { 0x045e, 0x0202, "Microsoft X-Box pad v1 (US)", 0, XTYPE_XBOX }, { 0x045e, 0x0285, "Microsoft X-Box pad (Japan)", 0, XTYPE_XBOX }, { 0x045e, 0x0287, "Microsoft Xbox Controller S", 0, XTYPE_XBOX }, + { 0x045e, 0x0288, "Microsoft Xbox Controller S v2", 0, XTYPE_XBOX }, { 0x045e, 0x0289, "Microsoft X-Box pad v2 (US)", 0, XTYPE_XBOX }, { 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 }, + { 0x045e, 0x028f, "Microsoft X-Box 360 pad v2", 0, XTYPE_XBOX360 }, { 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W }, { 0x045e, 0x02d1, "Microsoft X-Box One pad", 0, XTYPE_XBOXONE }, { 0x045e, 0x02dd, "Microsoft X-Box One pad (Firmware 2015)", 0, XTYPE_XBOXONE }, @@ -145,28 +150,52 @@ static const struct xpad_device { { 0x046d, 0xc242, "Logitech Chillstream Controller", 0, XTYPE_XBOX360 }, { 0x046d, 0xca84, "Logitech Xbox Cordless Controller", 0, XTYPE_XBOX }, { 0x046d, 0xca88, "Logitech Compact Controller for Xbox", 0, XTYPE_XBOX }, + { 0x046d, 0xca8a, "Logitech Precision Vibration Feedback Wheel", 0, XTYPE_XBOX }, + { 0x046d, 0xcaa3, "Logitech DriveFx Racing Wheel", 0, XTYPE_XBOX360 }, { 0x056e, 0x2004, "Elecom JC-U3613M", 0, XTYPE_XBOX360 }, { 0x05fd, 0x1007, "Mad Catz Controller (unverified)", 0, XTYPE_XBOX }, { 0x05fd, 0x107a, "InterAct 'PowerPad Pro' X-Box pad (Germany)", 0, XTYPE_XBOX }, + { 0x05fe, 0x3030, "Chic Controller", 0, XTYPE_XBOX }, + { 0x05fe, 0x3031, "Chic Controller", 0, XTYPE_XBOX }, + { 0x062a, 0x0020, "Logic3 Xbox GamePad", 0, XTYPE_XBOX }, + { 0x062a, 0x0033, "Competition Pro Steering Wheel", 0, XTYPE_XBOX }, + { 0x06a3, 0x0200, "Saitek Racing Wheel", 0, XTYPE_XBOX }, + { 0x06a3, 0x0201, "Saitek Adrenalin", 0, XTYPE_XBOX }, + { 0x06a3, 0xf51a, "Saitek P3600", 0, XTYPE_XBOX360 }, + { 0x0738, 0x4506, "Mad Catz 4506 Wireless Controller", 0, XTYPE_XBOX }, { 0x0738, 0x4516, "Mad Catz Control Pad", 0, XTYPE_XBOX }, + { 0x0738, 0x4520, "Mad Catz Control Pad Pro", 0, XTYPE_XBOX }, { 0x0738, 0x4522, "Mad Catz LumiCON", 0, XTYPE_XBOX }, { 0x0738, 0x4526, "Mad Catz Control Pad Pro", 0, XTYPE_XBOX }, + { 0x0738, 0x4530, "Mad Catz Universal MC2 Racing Wheel and Pedals", 0, XTYPE_XBOX }, { 0x0738, 0x4536, "Mad Catz MicroCON", 0, XTYPE_XBOX }, { 0x0738, 0x4540, "Mad Catz Beat Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, { 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", 0, XTYPE_XBOX }, + { 0x0738, 0x4586, "Mad Catz MicroCon Wireless Controller", 0, XTYPE_XBOX }, + { 0x0738, 0x4588, "Mad Catz Blaster", 0, XTYPE_XBOX }, + { 0x0738, 0x45ff, "Mad Catz Beat Pad (w/ Handle)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, { 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", 0, XTYPE_XBOX360 }, { 0x0738, 0x4718, "Mad Catz Street Fighter IV FightStick SE", 0, XTYPE_XBOX360 }, { 0x0738, 0x4726, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 }, { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x0738, 0x4736, "Mad Catz MicroCon Gamepad", 0, XTYPE_XBOX360 }, { 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x0738, 0x4740, "Mad Catz Beat Pad", 0, XTYPE_XBOX360 }, + { 0x0738, 0x4743, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, + { 0x0738, 0x4758, "Mad Catz Arcade Game Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x0738, 0x4a01, "Mad Catz FightStick TE 2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, + { 0x0738, 0x9871, "Mad Catz Portable Drum", 0, XTYPE_XBOX360 }, { 0x0738, 0xb726, "Mad Catz Xbox controller - MW2", 0, XTYPE_XBOX360 }, + { 0x0738, 0xb738, "Mad Catz MVC2TE Stick 2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 }, { 0x0738, 0xcb02, "Saitek Cyborg Rumble Pad - PC/Xbox 360", 0, XTYPE_XBOX360 }, { 0x0738, 0xcb03, "Saitek P3200 Rumble Pad - PC/Xbox 360", 0, XTYPE_XBOX360 }, + { 0x0738, 0xcb29, "Saitek Aviator Stick AV8R02", 0, XTYPE_XBOX360 }, { 0x0738, 0xf738, "Super SFIV FightStick TE S", 0, XTYPE_XBOX360 }, + { 0x07ff, 0xffff, "Mad Catz GamePad", 0, XTYPE_XBOX360 }, + { 0x0c12, 0x0005, "Intec wireless", 0, XTYPE_XBOX }, + { 0x0c12, 0x8801, "Nyko Xbox Controller", 0, XTYPE_XBOX }, { 0x0c12, 0x8802, "Zeroplus Xbox Controller", 0, XTYPE_XBOX }, { 0x0c12, 0x8809, "RedOctane Xbox Dance Pad", DANCEPAD_MAP_CONFIG, XTYPE_XBOX }, { 0x0c12, 0x880a, "Pelican Eclipse PL-2023", 0, XTYPE_XBOX }, @@ -174,37 +203,63 @@ static const struct xpad_device { { 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", 0, XTYPE_XBOX }, { 0x0d2f, 0x0002, "Andamiro Pump It Up pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, { 0x0e4c, 0x1097, "Radica Gamester Controller", 0, XTYPE_XBOX }, + { 0x0e4c, 0x1103, "Radica Gamester Reflex", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX }, { 0x0e4c, 0x2390, "Radica Games Jtech Controller", 0, XTYPE_XBOX }, + { 0x0e4c, 0x3510, "Radica Gamester", 0, XTYPE_XBOX }, { 0x0e6f, 0x0003, "Logic3 Freebird wireless Controller", 0, XTYPE_XBOX }, { 0x0e6f, 0x0005, "Eclipse wireless Controller", 0, XTYPE_XBOX }, { 0x0e6f, 0x0006, "Edge wireless Controller", 0, XTYPE_XBOX }, + { 0x0e6f, 0x0008, "After Glow Pro Controller", 0, XTYPE_XBOX }, { 0x0e6f, 0x0105, "HSM3 Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, { 0x0e6f, 0x0113, "Afterglow AX.1 Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x011f, "Rock Candy Gamepad Wired Controller", 0, XTYPE_XBOX360 }, + { 0x0e6f, 0x0131, "PDP EA Sports Controller", 0, XTYPE_XBOX360 }, + { 0x0e6f, 0x0133, "Xbox 360 Wired Controller", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x0139, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x013a, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0146, "Rock Candy Wired Controller for Xbox One", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x0147, "PDP Marvel Xbox One Controller", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x015c, "PDP Xbox One Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, + { 0x0e6f, 0x0161, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x0162, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x0163, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x0164, "PDP Battlefield One", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x0165, "PDP Titanfall 2", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0201, "Pelican PL-3601 'TSZ' Wired Xbox 360 Controller", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, + { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 }, + { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x0413, "Afterglow AX.1 Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, + { 0x0e6f, 0x0501, "PDP Xbox 360 Controller", 0, XTYPE_XBOX360 }, + { 0x0e6f, 0xf900, "PDP Afterglow AX.1", 0, XTYPE_XBOX360 }, { 0x0e8f, 0x0201, "SmartJoy Frag Xpad/PS2 adaptor", 0, XTYPE_XBOX }, { 0x0e8f, 0x3008, "Generic xbox control (dealextreme)", 0, XTYPE_XBOX }, { 0x0f0d, 0x000a, "Hori Co. DOA4 FightStick", 0, XTYPE_XBOX360 }, + { 0x0f0d, 0x000c, "Hori PadEX Turbo", 0, XTYPE_XBOX360 }, { 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x0f0d, 0x001b, "Hori Real Arcade Pro VX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x0f0d, 0x0063, "Hori Real Arcade Pro Hayabusa (USA) Xbox One", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, { 0x0f0d, 0x0067, "HORIPAD ONE", 0, XTYPE_XBOXONE }, + { 0x0f0d, 0x0078, "Hori Real Arcade Pro V Kai Xbox One", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, + { 0x0f30, 0x010b, "Philips Recoil", 0, XTYPE_XBOX }, { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX }, { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX }, { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX }, + { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 }, { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 }, + { 0x12ab, 0x0303, "Mortal Kombat Klassic FightStick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x12ab, 0x8809, "Xbox DDR dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, { 0x1430, 0x4748, "RedOctane Guitar Hero X-plorer", 0, XTYPE_XBOX360 }, { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, + { 0x1430, 0xf801, "RedOctane Controller", 0, XTYPE_XBOX360 }, { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 }, { 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 }, + { 0x1532, 0x0a00, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, { 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE }, { 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 }, { 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 }, @@ -215,22 +270,44 @@ static const struct xpad_device { { 0x1689, 0xfe00, "Razer Sabertooth", 0, XTYPE_XBOX360 }, { 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 }, { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x1bad, 0x0130, "Ion Drum Rocker", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, { 0x1bad, 0xf016, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 }, { 0x1bad, 0xf018, "Mad Catz Street Fighter IV SE Fighting Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x1bad, 0xf019, "Mad Catz Brawlstick for Xbox 360", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x1bad, 0xf021, "Mad Cats Ghost Recon FS GamePad", 0, XTYPE_XBOX360 }, { 0x1bad, 0xf023, "MLG Pro Circuit Controller (Xbox)", 0, XTYPE_XBOX360 }, + { 0x1bad, 0xf025, "Mad Catz Call Of Duty", 0, XTYPE_XBOX360 }, + { 0x1bad, 0xf027, "Mad Catz FPS Pro", 0, XTYPE_XBOX360 }, { 0x1bad, 0xf028, "Street Fighter IV FightPad", 0, XTYPE_XBOX360 }, { 0x1bad, 0xf02e, "Mad Catz Fightpad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x1bad, 0xf030, "Mad Catz Xbox 360 MC2 MicroCon Racing Wheel", 0, XTYPE_XBOX360 }, + { 0x1bad, 0xf036, "Mad Catz MicroCon GamePad Pro", 0, XTYPE_XBOX360 }, { 0x1bad, 0xf038, "Street Fighter IV FightStick TE", 0, XTYPE_XBOX360 }, + { 0x1bad, 0xf039, "Mad Catz MvC2 TE", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x1bad, 0xf03a, "Mad Catz SFxT Fightstick Pro", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x1bad, 0xf03d, "Street Fighter IV Arcade Stick TE - Chun Li", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x1bad, 0xf03e, "Mad Catz MLG FightStick TE", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x1bad, 0xf03f, "Mad Catz FightStick SoulCaliber", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x1bad, 0xf042, "Mad Catz FightStick TES+", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x1bad, 0xf080, "Mad Catz FightStick TE2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x1bad, 0xf501, "HoriPad EX2 Turbo", 0, XTYPE_XBOX360 }, + { 0x1bad, 0xf502, "Hori Real Arcade Pro.VX SA", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x1bad, 0xf503, "Hori Fighting Stick VX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x1bad, 0xf504, "Hori Real Arcade Pro. EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x1bad, 0xf505, "Hori Fighting Stick EX2B", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x1bad, 0xf506, "Hori Real Arcade Pro.EX Premium VLX", 0, XTYPE_XBOX360 }, { 0x1bad, 0xf900, "Harmonix Xbox 360 Controller", 0, XTYPE_XBOX360 }, { 0x1bad, 0xf901, "Gamestop Xbox 360 Controller", 0, XTYPE_XBOX360 }, { 0x1bad, 0xf903, "Tron Xbox 360 controller", 0, XTYPE_XBOX360 }, + { 0x1bad, 0xf904, "PDP Versus Fighting Pad", 0, XTYPE_XBOX360 }, + { 0x1bad, 0xf906, "MortalKombat FightStick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 }, + { 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 }, + { 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 }, { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 }, { 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 }, + { 0x24c6, 0x530a, "Xbox 360 Pro EX Controller", 0, XTYPE_XBOX360 }, { 0x24c6, 0x531a, "PowerA Pro Ex", 0, XTYPE_XBOX360 }, { 0x24c6, 0x5397, "FUS1ON Tournament Controller", 0, XTYPE_XBOX360 }, { 0x24c6, 0x541a, "PowerA Xbox One Mini Wired Controller", 0, XTYPE_XBOXONE }, @@ -238,12 +315,19 @@ static const struct xpad_device { { 0x24c6, 0x543a, "PowerA Xbox One wired controller", 0, XTYPE_XBOXONE }, { 0x24c6, 0x5500, "Hori XBOX 360 EX 2 with Turbo", 0, XTYPE_XBOX360 }, { 0x24c6, 0x5501, "Hori Real Arcade Pro VX-SA", 0, XTYPE_XBOX360 }, + { 0x24c6, 0x5502, "Hori Fighting Stick VX Alt", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x24c6, 0x5503, "Hori Fighting Edge", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x24c6, 0x5506, "Hori SOULCALIBUR V Stick", 0, XTYPE_XBOX360 }, { 0x24c6, 0x550d, "Hori GEM Xbox controller", 0, XTYPE_XBOX360 }, + { 0x24c6, 0x550e, "Hori Real Arcade Pro V Kai 360", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x24c6, 0x551a, "PowerA FUSION Pro Controller", 0, XTYPE_XBOXONE }, + { 0x24c6, 0x561a, "PowerA FUSION Controller", 0, XTYPE_XBOXONE }, + { 0x24c6, 0x5b00, "ThrustMaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 }, { 0x24c6, 0x5b02, "Thrustmaster, Inc. GPX Controller", 0, XTYPE_XBOX360 }, { 0x24c6, 0x5b03, "Thrustmaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 }, { 0x24c6, 0x5d04, "Razer Sabertooth", 0, XTYPE_XBOX360 }, + { 0x24c6, 0xfafe, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, + { 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX }, { 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX }, { 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN } }; @@ -331,13 +415,16 @@ static struct usb_device_id xpad_table[] = { XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft X-Box One controllers */ XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */ XPAD_XBOX360_VENDOR(0x056e), /* Elecom JC-U3613M */ + XPAD_XBOX360_VENDOR(0x06a3), /* Saitek P3600 */ XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */ { USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */ XPAD_XBOXONE_VENDOR(0x0738), /* Mad Catz FightStick TE 2 */ + XPAD_XBOX360_VENDOR(0x07ff), /* Mad Catz GamePad */ XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */ XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */ XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */ XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */ + XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */ XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */ XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */ diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index 97acd6524ad7..4c4ab1ced235 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -178,6 +178,17 @@ config KEYBOARD_CLPS711X To compile this driver as a module, choose M here: the module will be called clps711x-keypad. +config KEYBOARD_DLINK_DIR685 + tristate "D-Link DIR-685 touchkeys support" + depends on I2C + default ARCH_GEMINI + help + If you say yes here you get support for the D-Link DIR-685 + touchkeys. + + To compile this driver as a module, choose M here: the + module will be called dlink-dir685-touchkeys. + config KEYBOARD_LKKBD tristate "DECstation/VAXstation LK201/LK401 keyboard" select SERIO diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile index 7d9acff819a7..d2338bacdad1 100644 --- a/drivers/input/keyboard/Makefile +++ b/drivers/input/keyboard/Makefile @@ -17,6 +17,7 @@ obj-$(CONFIG_KEYBOARD_CAP11XX) += cap11xx.o obj-$(CONFIG_KEYBOARD_CLPS711X) += clps711x-keypad.o obj-$(CONFIG_KEYBOARD_CROS_EC) += cros_ec_keyb.o obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o +obj-$(CONFIG_KEYBOARD_DLINK_DIR685) += dlink-dir685-touchkeys.o obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o obj-$(CONFIG_KEYBOARD_GOLDFISH_EVENTS) += goldfish_events.o obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o diff --git a/drivers/input/keyboard/dlink-dir685-touchkeys.c b/drivers/input/keyboard/dlink-dir685-touchkeys.c new file mode 100644 index 000000000000..88e321b76397 --- /dev/null +++ b/drivers/input/keyboard/dlink-dir685-touchkeys.c @@ -0,0 +1,155 @@ +/* + * D-Link DIR-685 router I2C-based Touchkeys input driver + * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org> + * + * This is a one-off touchkey controller based on the Cypress Semiconductor + * CY8C214 MCU with some firmware in its internal 8KB flash. The circuit + * board inside the router is named E119921 + */ + +#include <linux/module.h> +#include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/input.h> +#include <linux/slab.h> +#include <linux/bitops.h> + +struct dir685_touchkeys { + struct device *dev; + struct i2c_client *client; + struct input_dev *input; + unsigned long cur_key; + u16 codes[7]; +}; + +static irqreturn_t dir685_tk_irq_thread(int irq, void *data) +{ + struct dir685_touchkeys *tk = data; + const int num_bits = min_t(int, ARRAY_SIZE(tk->codes), 16); + unsigned long changed; + u8 buf[6]; + unsigned long key; + int i; + int err; + + memset(buf, 0, sizeof(buf)); + err = i2c_master_recv(tk->client, buf, sizeof(buf)); + if (err != sizeof(buf)) { + dev_err(tk->dev, "short read %d\n", err); + return IRQ_HANDLED; + } + + dev_dbg(tk->dev, "IN: %*ph\n", (int)sizeof(buf), buf); + key = be16_to_cpup((__be16 *) &buf[4]); + + /* Figure out if any bits went high or low since last message */ + changed = tk->cur_key ^ key; + for_each_set_bit(i, &changed, num_bits) { + dev_dbg(tk->dev, "key %d is %s\n", i, + test_bit(i, &key) ? "down" : "up"); + input_report_key(tk->input, tk->codes[i], test_bit(i, &key)); + } + + /* Store currently down keys */ + tk->cur_key = key; + input_sync(tk->input); + + return IRQ_HANDLED; +} + +static int dir685_tk_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct dir685_touchkeys *tk; + struct device *dev = &client->dev; + u8 bl_data[] = { 0xa7, 0x40 }; + int err; + int i; + + tk = devm_kzalloc(&client->dev, sizeof(*tk), GFP_KERNEL); + if (!tk) + return -ENOMEM; + + tk->input = devm_input_allocate_device(dev); + if (!tk->input) + return -ENOMEM; + + tk->client = client; + tk->dev = dev; + + tk->input->keycodesize = sizeof(u16); + tk->input->keycodemax = ARRAY_SIZE(tk->codes); + tk->input->keycode = tk->codes; + tk->codes[0] = KEY_UP; + tk->codes[1] = KEY_DOWN; + tk->codes[2] = KEY_LEFT; + tk->codes[3] = KEY_RIGHT; + tk->codes[4] = KEY_ENTER; + tk->codes[5] = KEY_WPS_BUTTON; + /* + * This key appears in the vendor driver, but I have + * not been able to activate it. + */ + tk->codes[6] = KEY_RESERVED; + + __set_bit(EV_KEY, tk->input->evbit); + for (i = 0; i < ARRAY_SIZE(tk->codes); i++) + __set_bit(tk->codes[i], tk->input->keybit); + __clear_bit(KEY_RESERVED, tk->input->keybit); + + tk->input->name = "D-Link DIR-685 touchkeys"; + tk->input->id.bustype = BUS_I2C; + + err = input_register_device(tk->input); + if (err) + return err; + + /* Set the brightness to max level */ + err = i2c_master_send(client, bl_data, sizeof(bl_data)); + if (err != sizeof(bl_data)) + dev_warn(tk->dev, "error setting brightness level\n"); + + if (!client->irq) { + dev_err(dev, "no IRQ on the I2C device\n"); + return -ENODEV; + } + err = devm_request_threaded_irq(dev, client->irq, + NULL, dir685_tk_irq_thread, + IRQF_ONESHOT, + "dir685-tk", tk); + if (err) { + dev_err(dev, "can't request IRQ\n"); + return err; + } + + return 0; +} + +static const struct i2c_device_id dir685_tk_id[] = { + { "dir685tk", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, dir685_tk_id); + +#ifdef CONFIG_OF +static const struct of_device_id dir685_tk_of_match[] = { + { .compatible = "dlink,dir685-touchkeys" }, + {}, +}; +MODULE_DEVICE_TABLE(of, dir685_tk_of_match); +#endif + +static struct i2c_driver dir685_tk_i2c_driver = { + .driver = { + .name = "dlin-dir685-touchkeys", + .of_match_table = of_match_ptr(dir685_tk_of_match), + }, + .probe = dir685_tk_probe, + .id_table = dir685_tk_id, +}; +module_i2c_driver(dir685_tk_i2c_driver); + +MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>"); +MODULE_DESCRIPTION("D-Link DIR-685 touchkeys driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c index 21bea52d4365..04a5d7e134d7 100644 --- a/drivers/input/keyboard/lm8323.c +++ b/drivers/input/keyboard/lm8323.c @@ -30,8 +30,8 @@ #include <linux/delay.h> #include <linux/input.h> #include <linux/leds.h> +#include <linux/platform_data/lm8323.h> #include <linux/pm.h> -#include <linux/i2c/lm8323.h> #include <linux/slab.h> /* Commands to send to the chip. */ diff --git a/drivers/input/keyboard/mcs_touchkey.c b/drivers/input/keyboard/mcs_touchkey.c index 31090d71a685..be56d4f262a7 100644 --- a/drivers/input/keyboard/mcs_touchkey.c +++ b/drivers/input/keyboard/mcs_touchkey.c @@ -13,11 +13,11 @@ #include <linux/module.h> #include <linux/i2c.h> -#include <linux/i2c/mcs.h> #include <linux/interrupt.h> #include <linux/input.h> #include <linux/irq.h> #include <linux/slab.h> +#include <linux/platform_data/mcs.h> #include <linux/pm.h> /* MCS5000 Touchkey */ diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c index 400869e61a06..38c79ebff033 100644 --- a/drivers/input/misc/axp20x-pek.c +++ b/drivers/input/misc/axp20x-pek.c @@ -253,6 +253,9 @@ static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek, return error; } + if (axp20x_pek->axp20x->variant == AXP288_ID) + enable_irq_wake(axp20x_pek->irq_dbr); + return 0; } @@ -331,10 +334,35 @@ static int axp20x_pek_probe(struct platform_device *pdev) return 0; } +static int __maybe_unused axp20x_pek_resume_noirq(struct device *dev) +{ + struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev); + + if (axp20x_pek->axp20x->variant != AXP288_ID) + return 0; + + /* + * Clear interrupts from button presses during suspend, to avoid + * a wakeup power-button press getting reported to userspace. + */ + regmap_write(axp20x_pek->axp20x->regmap, + AXP20X_IRQ1_STATE + AXP288_IRQ_POKN / 8, + BIT(AXP288_IRQ_POKN % 8)); + + return 0; +} + +static const struct dev_pm_ops axp20x_pek_pm_ops = { +#ifdef CONFIG_PM_SLEEP + .resume_noirq = axp20x_pek_resume_noirq, +#endif +}; + static struct platform_driver axp20x_pek_driver = { .probe = axp20x_pek_probe, .driver = { .name = "axp20x-pek", + .pm = &axp20x_pek_pm_ops, }, }; module_platform_driver(axp20x_pek_driver); diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c index 690148f9940e..eb770613a9bd 100644 --- a/drivers/input/misc/xen-kbdfront.c +++ b/drivers/input/misc/xen-kbdfront.c @@ -135,14 +135,17 @@ static int xenkbd_probe(struct xenbus_device *dev, goto error_nomem; /* Set input abs params to match backend screen res */ - abs = xenbus_read_unsigned(dev->otherend, "feature-abs-pointer", 0); - ptr_size[KPARAM_X] = xenbus_read_unsigned(dev->otherend, "width", + abs = xenbus_read_unsigned(dev->otherend, + XENKBD_FIELD_FEAT_ABS_POINTER, 0); + ptr_size[KPARAM_X] = xenbus_read_unsigned(dev->otherend, + XENKBD_FIELD_WIDTH, ptr_size[KPARAM_X]); - ptr_size[KPARAM_Y] = xenbus_read_unsigned(dev->otherend, "height", + ptr_size[KPARAM_Y] = xenbus_read_unsigned(dev->otherend, + XENKBD_FIELD_HEIGHT, ptr_size[KPARAM_Y]); if (abs) { ret = xenbus_write(XBT_NIL, dev->nodename, - "request-abs-pointer", "1"); + XENKBD_FIELD_REQ_ABS_POINTER, "1"); if (ret) { pr_warn("xenkbd: can't request abs-pointer\n"); abs = 0; @@ -271,14 +274,15 @@ static int xenkbd_connect_backend(struct xenbus_device *dev, xenbus_dev_fatal(dev, ret, "starting transaction"); goto error_irqh; } - ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu", + ret = xenbus_printf(xbt, dev->nodename, XENKBD_FIELD_RING_REF, "%lu", virt_to_gfn(info->page)); if (ret) goto error_xenbus; - ret = xenbus_printf(xbt, dev->nodename, "page-gref", "%u", info->gref); + ret = xenbus_printf(xbt, dev->nodename, XENKBD_FIELD_RING_GREF, + "%u", info->gref); if (ret) goto error_xenbus; - ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", + ret = xenbus_printf(xbt, dev->nodename, XENKBD_FIELD_EVT_CHANNEL, "%u", evtchn); if (ret) goto error_xenbus; @@ -353,7 +357,7 @@ static void xenkbd_backend_changed(struct xenbus_device *dev, } static const struct xenbus_device_id xenkbd_ids[] = { - { "vkbd" }, + { XENKBD_DRIVER_NAME }, { "" } }; @@ -390,4 +394,4 @@ module_exit(xenkbd_cleanup); MODULE_DESCRIPTION("Xen virtual keyboard/pointer device frontend"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("xen:vkbd"); +MODULE_ALIAS("xen:" XENKBD_DRIVER_NAME); diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h index c0ec26118732..61c202436250 100644 --- a/drivers/input/mouse/elan_i2c.h +++ b/drivers/input/mouse/elan_i2c.h @@ -58,7 +58,7 @@ struct elan_transport_ops { int (*get_version)(struct i2c_client *client, bool iap, u8 *version); int (*get_sm_version)(struct i2c_client *client, - u8* ic_type, u8 *version); + u16 *ic_type, u8 *version); int (*get_checksum)(struct i2c_client *client, bool iap, u16 *csum); int (*get_product_id)(struct i2c_client *client, u16 *id); @@ -82,6 +82,7 @@ struct elan_transport_ops { int (*get_report)(struct i2c_client *client, u8 *report); int (*get_pressure_adjustment)(struct i2c_client *client, int *adjustment); + int (*get_pattern)(struct i2c_client *client, u8 *pattern); }; extern const struct elan_transport_ops elan_smbus_ops, elan_i2c_ops; diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index d5ab9ddef3e3..3b616cb7c67f 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -5,7 +5,7 @@ * * Author: æž—æ”¿ç¶ (Duson Lin) <dusonlin@emc.com.tw> * Author: KT Liao <kt.liao@emc.com.tw> - * Version: 1.6.2 + * Version: 1.6.3 * * Based on cyapa driver: * copyright (c) 2011-2012 Cypress Semiconductor, Inc. @@ -41,7 +41,7 @@ #include "elan_i2c.h" #define DRIVER_NAME "elan_i2c" -#define ELAN_DRIVER_VERSION "1.6.2" +#define ELAN_DRIVER_VERSION "1.6.3" #define ELAN_VENDOR_ID 0x04f3 #define ETP_MAX_PRESSURE 255 #define ETP_FWIDTH_REDUCE 90 @@ -78,6 +78,7 @@ struct elan_tp_data { unsigned int x_res; unsigned int y_res; + u8 pattern; u16 product_id; u8 fw_version; u8 sm_version; @@ -85,7 +86,7 @@ struct elan_tp_data { u16 fw_checksum; int pressure_adjustment; u8 mode; - u8 ic_type; + u16 ic_type; u16 fw_validpage_count; u16 fw_signature_address; @@ -96,10 +97,10 @@ struct elan_tp_data { bool baseline_ready; }; -static int elan_get_fwinfo(u8 iap_version, u16 *validpage_count, +static int elan_get_fwinfo(u16 ic_type, u16 *validpage_count, u16 *signature_address) { - switch (iap_version) { + switch (ic_type) { case 0x00: case 0x06: case 0x08: @@ -119,6 +120,9 @@ static int elan_get_fwinfo(u8 iap_version, u16 *validpage_count, case 0x0E: *validpage_count = 640; break; + case 0x10: + *validpage_count = 1024; + break; default: /* unknown ic type clear value */ *validpage_count = 0; @@ -305,6 +309,7 @@ static int elan_initialize(struct elan_tp_data *data) static int elan_query_device_info(struct elan_tp_data *data) { int error; + u16 ic_type; error = data->ops->get_version(data->client, false, &data->fw_version); if (error) @@ -324,7 +329,16 @@ static int elan_query_device_info(struct elan_tp_data *data) if (error) return error; - error = elan_get_fwinfo(data->iap_version, &data->fw_validpage_count, + error = data->ops->get_pattern(data->client, &data->pattern); + if (error) + return error; + + if (data->pattern == 0x01) + ic_type = data->ic_type; + else + ic_type = data->iap_version; + + error = elan_get_fwinfo(ic_type, &data->fw_validpage_count, &data->fw_signature_address); if (error) dev_warn(&data->client->dev, @@ -1077,6 +1091,13 @@ static int elan_probe(struct i2c_client *client, return error; } + /* Make sure there is something at this address */ + error = i2c_smbus_read_byte(client); + if (error < 0) { + dev_dbg(&client->dev, "nothing at this address: %d\n", error); + return -ENXIO; + } + /* Initialize the touchpad. */ error = elan_initialize(data); if (error) @@ -1101,10 +1122,13 @@ static int elan_probe(struct i2c_client *client, "Elan Touchpad Extra Information:\n" " Max ABS X,Y: %d,%d\n" " Width X,Y: %d,%d\n" - " Resolution X,Y: %d,%d (dots/mm)\n", + " Resolution X,Y: %d,%d (dots/mm)\n" + " ic type: 0x%x\n" + " info pattern: 0x%x\n", data->max_x, data->max_y, data->width_x, data->width_y, - data->x_res, data->y_res); + data->x_res, data->y_res, + data->ic_type, data->pattern); /* Set up input device properties based on queried parameters. */ error = elan_setup_input_device(data); diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c index f431da07f861..80172f25974d 100644 --- a/drivers/input/mouse/elan_i2c_i2c.c +++ b/drivers/input/mouse/elan_i2c_i2c.c @@ -34,9 +34,12 @@ #define ETP_I2C_DESC_CMD 0x0001 #define ETP_I2C_REPORT_DESC_CMD 0x0002 #define ETP_I2C_STAND_CMD 0x0005 +#define ETP_I2C_PATTERN_CMD 0x0100 #define ETP_I2C_UNIQUEID_CMD 0x0101 #define ETP_I2C_FW_VERSION_CMD 0x0102 -#define ETP_I2C_SM_VERSION_CMD 0x0103 +#define ETP_I2C_IC_TYPE_CMD 0x0103 +#define ETP_I2C_OSM_VERSION_CMD 0x0103 +#define ETP_I2C_NSM_VERSION_CMD 0x0104 #define ETP_I2C_XY_TRACENUM_CMD 0x0105 #define ETP_I2C_MAX_X_AXIS_CMD 0x0106 #define ETP_I2C_MAX_Y_AXIS_CMD 0x0107 @@ -239,12 +242,34 @@ static int elan_i2c_get_baseline_data(struct i2c_client *client, return 0; } +static int elan_i2c_get_pattern(struct i2c_client *client, u8 *pattern) +{ + int error; + u8 val[3]; + + error = elan_i2c_read_cmd(client, ETP_I2C_PATTERN_CMD, val); + if (error) { + dev_err(&client->dev, "failed to get pattern: %d\n", error); + return error; + } + *pattern = val[1]; + + return 0; +} + static int elan_i2c_get_version(struct i2c_client *client, bool iap, u8 *version) { int error; + u8 pattern_ver; u8 val[3]; + error = elan_i2c_get_pattern(client, &pattern_ver); + if (error) { + dev_err(&client->dev, "failed to get pattern version\n"); + return error; + } + error = elan_i2c_read_cmd(client, iap ? ETP_I2C_IAP_VERSION_CMD : ETP_I2C_FW_VERSION_CMD, @@ -255,24 +280,54 @@ static int elan_i2c_get_version(struct i2c_client *client, return error; } - *version = val[0]; + if (pattern_ver == 0x01) + *version = iap ? val[1] : val[0]; + else + *version = val[0]; return 0; } static int elan_i2c_get_sm_version(struct i2c_client *client, - u8 *ic_type, u8 *version) + u16 *ic_type, u8 *version) { int error; + u8 pattern_ver; u8 val[3]; - error = elan_i2c_read_cmd(client, ETP_I2C_SM_VERSION_CMD, val); + error = elan_i2c_get_pattern(client, &pattern_ver); if (error) { - dev_err(&client->dev, "failed to get SM version: %d\n", error); + dev_err(&client->dev, "failed to get pattern version\n"); return error; } - *version = val[0]; - *ic_type = val[1]; + if (pattern_ver == 0x01) { + error = elan_i2c_read_cmd(client, ETP_I2C_IC_TYPE_CMD, val); + if (error) { + dev_err(&client->dev, "failed to get ic type: %d\n", + error); + return error; + } + *ic_type = be16_to_cpup((__be16 *)val); + + error = elan_i2c_read_cmd(client, ETP_I2C_NSM_VERSION_CMD, + val); + if (error) { + dev_err(&client->dev, "failed to get SM version: %d\n", + error); + return error; + } + *version = val[1]; + } else { + error = elan_i2c_read_cmd(client, ETP_I2C_OSM_VERSION_CMD, val); + if (error) { + dev_err(&client->dev, "failed to get SM version: %d\n", + error); + return error; + } + *version = val[0]; + *ic_type = val[1]; + } + return 0; } @@ -641,5 +696,7 @@ const struct elan_transport_ops elan_i2c_ops = { .write_fw_block = elan_i2c_write_fw_block, .finish_fw_update = elan_i2c_finish_fw_update, + .get_pattern = elan_i2c_get_pattern, + .get_report = elan_i2c_get_report, }; diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c index e23b2495d52e..df7a57ca7331 100644 --- a/drivers/input/mouse/elan_i2c_smbus.c +++ b/drivers/input/mouse/elan_i2c_smbus.c @@ -166,7 +166,7 @@ static int elan_smbus_get_version(struct i2c_client *client, } static int elan_smbus_get_sm_version(struct i2c_client *client, - u8 *ic_type, u8 *version) + u16 *ic_type, u8 *version) { int error; u8 val[3]; @@ -495,6 +495,12 @@ static int elan_smbus_finish_fw_update(struct i2c_client *client, return 0; } +static int elan_smbus_get_pattern(struct i2c_client *client, u8 *pattern) +{ + *pattern = 0; + return 0; +} + const struct elan_transport_ops elan_smbus_ops = { .initialize = elan_smbus_initialize, .sleep_control = elan_smbus_sleep_control, @@ -524,4 +530,5 @@ const struct elan_transport_ops elan_smbus_ops = { .finish_fw_update = elan_smbus_finish_fw_update, .get_report = elan_smbus_get_report, + .get_pattern = elan_smbus_get_pattern, }; diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index f1fa1f172107..791993215ea3 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -1711,6 +1711,17 @@ int elantech_init(struct psmouse *psmouse) etd->samples[0], etd->samples[1], etd->samples[2]); } + if (etd->samples[1] == 0x74 && etd->hw_version == 0x03) { + /* + * This module has a bug which makes absolute mode + * unusable, so let's abort so we'll be using standard + * PS/2 protocol. + */ + psmouse_info(psmouse, + "absolute mode broken, forcing standard PS/2 protocol\n"); + goto init_fail; + } + if (elantech_set_absolute_mode(psmouse)) { psmouse_err(psmouse, "failed to put touchpad into absolute mode.\n"); diff --git a/drivers/input/rmi4/rmi_f34v7.c b/drivers/input/rmi4/rmi_f34v7.c index 10c0d11b72c9..3991d2943660 100644 --- a/drivers/input/rmi4/rmi_f34v7.c +++ b/drivers/input/rmi4/rmi_f34v7.c @@ -9,13 +9,14 @@ * the Free Software Foundation. */ +#include <linux/bitops.h> #include <linux/kernel.h> #include <linux/rmi.h> #include <linux/firmware.h> -#include <asm/unaligned.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/jiffies.h> +#include <asm/unaligned.h> #include "rmi_driver.h" #include "rmi_f34.h" @@ -464,7 +465,7 @@ static int rmi_f34v7_read_queries_bl_version(struct f34_data *f34) static int rmi_f34v7_read_queries(struct f34_data *f34) { int ret; - int i, j; + int i; u8 base; int offset; u8 *ptable; @@ -518,10 +519,7 @@ static int rmi_f34v7_read_queries(struct f34_data *f34) query_1_7.partition_support[1] & HAS_GUEST_CODE; if (query_0 & HAS_CONFIG_ID) { - char f34_ctrl[CONFIG_ID_SIZE]; - int i = 0; - u8 *p = f34->configuration_id; - *p = '\0'; + u8 f34_ctrl[CONFIG_ID_SIZE]; ret = rmi_read_block(f34->fn->rmi_dev, f34->fn->fd.control_base_addr, @@ -531,13 +529,11 @@ static int rmi_f34v7_read_queries(struct f34_data *f34) return ret; /* Eat leading zeros */ - while (i < sizeof(f34_ctrl) && !f34_ctrl[i]) - i++; + for (i = 0; i < sizeof(f34_ctrl) - 1 && !f34_ctrl[i]; i++) + /* Empty */; - for (; i < sizeof(f34_ctrl); i++) - p += snprintf(p, f34->configuration_id - + sizeof(f34->configuration_id) - p, - "%02X", f34_ctrl[i]); + snprintf(f34->configuration_id, sizeof(f34->configuration_id), + "%*phN", (int)sizeof(f34_ctrl) - i, f34_ctrl + i); rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev, "Configuration ID: %s\n", f34->configuration_id); @@ -545,9 +541,7 @@ static int rmi_f34v7_read_queries(struct f34_data *f34) f34->v7.partitions = 0; for (i = 0; i < sizeof(query_1_7.partition_support); i++) - for (j = 0; j < 8; j++) - if (query_1_7.partition_support[i] & (1 << j)) - f34->v7.partitions++; + f34->v7.partitions += hweight8(query_1_7.partition_support[i]); rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev, "%s: Supported partitions: %*ph\n", __func__, sizeof(query_1_7.partition_support), diff --git a/drivers/input/sparse-keymap.c b/drivers/input/sparse-keymap.c index 12a3ad83296d..bb0349fa64bc 100644 --- a/drivers/input/sparse-keymap.c +++ b/drivers/input/sparse-keymap.c @@ -224,20 +224,6 @@ int sparse_keymap_setup(struct input_dev *dev, EXPORT_SYMBOL(sparse_keymap_setup); /** - * sparse_keymap_free - free memory allocated for sparse keymap - * @dev: Input device using sparse keymap - * - * This function used to free memory allocated by sparse keymap - * in an input device that was set up by sparse_keymap_setup(). - * Since sparse_keymap_setup() now uses a managed allocation for the - * keymap copy, use of this function is deprecated. - */ -void sparse_keymap_free(struct input_dev *dev) -{ -} -EXPORT_SYMBOL(sparse_keymap_free); - -/** * sparse_keymap_report_entry - report event corresponding to given key entry * @dev: Input device for which event should be reported * @ke: key entry describing event diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index cf26ca49ae6d..64b30fe273fd 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig @@ -1114,6 +1114,17 @@ config TOUCHSCREEN_ST1232 To compile this driver as a module, choose M here: the module will be called st1232_ts. +config TOUCHSCREEN_STMFTS + tristate "STMicroelectronics STMFTS touchscreen" + depends on I2C + depends on LEDS_CLASS + help + Say Y here if you want support for STMicroelectronics + STMFTS touchscreen. + + To compile this driver as a module, choose M here: the + module will be called stmfts. + config TOUCHSCREEN_STMPE tristate "STMicroelectronics STMPE touchscreens" depends on MFD_STMPE diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile index 18e476948e44..6badce87037b 100644 --- a/drivers/input/touchscreen/Makefile +++ b/drivers/input/touchscreen/Makefile @@ -67,6 +67,7 @@ obj-$(CONFIG_TOUCHSCREEN_S3C2410) += s3c2410_ts.o obj-$(CONFIG_TOUCHSCREEN_SILEAD) += silead.o obj-$(CONFIG_TOUCHSCREEN_SIS_I2C) += sis_i2c.o obj-$(CONFIG_TOUCHSCREEN_ST1232) += st1232.o +obj-$(CONFIG_TOUCHSCREEN_STMFTS) += stmfts.o obj-$(CONFIG_TOUCHSCREEN_STMPE) += stmpe-ts.o obj-$(CONFIG_TOUCHSCREEN_SUN4I) += sun4i-ts.o obj-$(CONFIG_TOUCHSCREEN_SUR40) += sur40.o diff --git a/drivers/input/touchscreen/mcs5000_ts.c b/drivers/input/touchscreen/mcs5000_ts.c index 90fc07dc98a6..8868573133ab 100644 --- a/drivers/input/touchscreen/mcs5000_ts.c +++ b/drivers/input/touchscreen/mcs5000_ts.c @@ -15,10 +15,10 @@ #include <linux/module.h> #include <linux/i2c.h> -#include <linux/i2c/mcs.h> #include <linux/interrupt.h> #include <linux/input.h> #include <linux/irq.h> +#include <linux/platform_data/mcs.h> #include <linux/slab.h> /* Registers */ diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c index 1fafc9f57af6..e5eeb6311f7d 100644 --- a/drivers/input/touchscreen/mms114.c +++ b/drivers/input/touchscreen/mms114.c @@ -11,9 +11,9 @@ #include <linux/delay.h> #include <linux/of.h> #include <linux/i2c.h> -#include <linux/i2c/mms114.h> #include <linux/input/mt.h> #include <linux/interrupt.h> +#include <linux/platform_data/mms114.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c index 41d58e88cc8a..3b3db8c868e0 100644 --- a/drivers/input/touchscreen/s3c2410_ts.c +++ b/drivers/input/touchscreen/s3c2410_ts.c @@ -264,7 +264,11 @@ static int s3c2410ts_probe(struct platform_device *pdev) return -ENOENT; } - clk_prepare_enable(ts.clock); + ret = clk_prepare_enable(ts.clock); + if (ret) { + dev_err(dev, "Failed! to enabled clocks\n"); + goto err_clk_get; + } dev_dbg(dev, "got and enabled clocks\n"); ts.irq_tc = ret = platform_get_irq(pdev, 0); @@ -353,7 +357,9 @@ static int s3c2410ts_probe(struct platform_device *pdev) err_iomap: iounmap(ts.io); err_clk: + clk_disable_unprepare(ts.clock); del_timer_sync(&touch_timer); + err_clk_get: clk_put(ts.clock); return ret; } diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c new file mode 100644 index 000000000000..157fdb4bb2e8 --- /dev/null +++ b/drivers/input/touchscreen/stmfts.c @@ -0,0 +1,822 @@ +/* + * Copyright (c) 2017 Samsung Electronics Co., Ltd. + * Author: Andi Shyti <andi.shyti@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * STMicroelectronics FTS Touchscreen device driver + */ + +#include <linux/delay.h> +#include <linux/i2c.h> +#include <linux/input/mt.h> +#include <linux/input/touchscreen.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/leds.h> +#include <linux/module.h> +#include <linux/pm_runtime.h> +#include <linux/regulator/consumer.h> + +/* I2C commands */ +#define STMFTS_READ_INFO 0x80 +#define STMFTS_READ_STATUS 0x84 +#define STMFTS_READ_ONE_EVENT 0x85 +#define STMFTS_READ_ALL_EVENT 0x86 +#define STMFTS_LATEST_EVENT 0x87 +#define STMFTS_SLEEP_IN 0x90 +#define STMFTS_SLEEP_OUT 0x91 +#define STMFTS_MS_MT_SENSE_OFF 0x92 +#define STMFTS_MS_MT_SENSE_ON 0x93 +#define STMFTS_SS_HOVER_SENSE_OFF 0x94 +#define STMFTS_SS_HOVER_SENSE_ON 0x95 +#define STMFTS_MS_KEY_SENSE_OFF 0x9a +#define STMFTS_MS_KEY_SENSE_ON 0x9b +#define STMFTS_SYSTEM_RESET 0xa0 +#define STMFTS_CLEAR_EVENT_STACK 0xa1 +#define STMFTS_FULL_FORCE_CALIBRATION 0xa2 +#define STMFTS_MS_CX_TUNING 0xa3 +#define STMFTS_SS_CX_TUNING 0xa4 + +/* events */ +#define STMFTS_EV_NO_EVENT 0x00 +#define STMFTS_EV_MULTI_TOUCH_DETECTED 0x02 +#define STMFTS_EV_MULTI_TOUCH_ENTER 0x03 +#define STMFTS_EV_MULTI_TOUCH_LEAVE 0x04 +#define STMFTS_EV_MULTI_TOUCH_MOTION 0x05 +#define STMFTS_EV_HOVER_ENTER 0x07 +#define STMFTS_EV_HOVER_LEAVE 0x08 +#define STMFTS_EV_HOVER_MOTION 0x09 +#define STMFTS_EV_KEY_STATUS 0x0e +#define STMFTS_EV_ERROR 0x0f +#define STMFTS_EV_CONTROLLER_READY 0x10 +#define STMFTS_EV_SLEEP_OUT_CONTROLLER_READY 0x11 +#define STMFTS_EV_STATUS 0x16 +#define STMFTS_EV_DEBUG 0xdb + +/* multi touch related event masks */ +#define STMFTS_MASK_EVENT_ID 0x0f +#define STMFTS_MASK_TOUCH_ID 0xf0 +#define STMFTS_MASK_LEFT_EVENT 0x0f +#define STMFTS_MASK_X_MSB 0x0f +#define STMFTS_MASK_Y_LSB 0xf0 + +/* key related event masks */ +#define STMFTS_MASK_KEY_NO_TOUCH 0x00 +#define STMFTS_MASK_KEY_MENU 0x01 +#define STMFTS_MASK_KEY_BACK 0x02 + +#define STMFTS_EVENT_SIZE 8 +#define STMFTS_STACK_DEPTH 32 +#define STMFTS_DATA_MAX_SIZE (STMFTS_EVENT_SIZE * STMFTS_STACK_DEPTH) +#define STMFTS_MAX_FINGERS 10 +#define STMFTS_DEV_NAME "stmfts" + +enum stmfts_regulators { + STMFTS_REGULATOR_VDD, + STMFTS_REGULATOR_AVDD, +}; + +struct stmfts_data { + struct i2c_client *client; + struct input_dev *input; + struct led_classdev led_cdev; + struct mutex mutex; + + struct touchscreen_properties prop; + + struct regulator_bulk_data regulators[2]; + + /* + * Presence of ledvdd will be used also to check + * whether the LED is supported. + */ + struct regulator *ledvdd; + + u16 chip_id; + u8 chip_ver; + u16 fw_ver; + u8 config_id; + u8 config_ver; + + u8 data[STMFTS_DATA_MAX_SIZE]; + + struct completion cmd_done; + + bool use_key; + bool led_status; + bool hover_enabled; + bool running; +}; + +static void stmfts_brightness_set(struct led_classdev *led_cdev, + enum led_brightness value) +{ + struct stmfts_data *sdata = container_of(led_cdev, + struct stmfts_data, led_cdev); + int err; + + if (value == sdata->led_status || !sdata->ledvdd) + return; + + if (!value) { + regulator_disable(sdata->ledvdd); + } else { + err = regulator_enable(sdata->ledvdd); + if (err) + dev_warn(&sdata->client->dev, + "failed to disable ledvdd regulator: %d\n", + err); + } + + sdata->led_status = value; +} + +static enum led_brightness stmfts_brightness_get(struct led_classdev *led_cdev) +{ + struct stmfts_data *sdata = container_of(led_cdev, + struct stmfts_data, led_cdev); + + return !!regulator_is_enabled(sdata->ledvdd); +} + +/* + * We can't simply use i2c_smbus_read_i2c_block_data because we + * need to read more than 255 bytes ( + */ +static int stmfts_read_events(struct stmfts_data *sdata) +{ + u8 cmd = STMFTS_READ_ALL_EVENT; + struct i2c_msg msgs[2] = { + { + .addr = sdata->client->addr, + .len = 1, + .buf = &cmd, + }, + { + .addr = sdata->client->addr, + .flags = I2C_M_RD, + .len = STMFTS_DATA_MAX_SIZE, + .buf = sdata->data, + }, + }; + int ret; + + ret = i2c_transfer(sdata->client->adapter, msgs, ARRAY_SIZE(msgs)); + if (ret < 0) + return ret; + + return ret == ARRAY_SIZE(msgs) ? 0 : -EIO; +} + +static void stmfts_report_contact_event(struct stmfts_data *sdata, + const u8 event[]) +{ + u8 slot_id = (event[0] & STMFTS_MASK_TOUCH_ID) >> 4; + u16 x = event[1] | ((event[2] & STMFTS_MASK_X_MSB) << 8); + u16 y = (event[2] >> 4) | (event[3] << 4); + u8 maj = event[4]; + u8 min = event[5]; + u8 orientation = event[6]; + u8 area = event[7]; + + input_mt_slot(sdata->input, slot_id); + + input_mt_report_slot_state(sdata->input, MT_TOOL_FINGER, true); + input_report_abs(sdata->input, ABS_MT_POSITION_X, x); + input_report_abs(sdata->input, ABS_MT_POSITION_Y, y); + input_report_abs(sdata->input, ABS_MT_TOUCH_MAJOR, maj); + input_report_abs(sdata->input, ABS_MT_TOUCH_MINOR, min); + input_report_abs(sdata->input, ABS_MT_PRESSURE, area); + input_report_abs(sdata->input, ABS_MT_ORIENTATION, orientation); + + input_sync(sdata->input); +} + +static void stmfts_report_contact_release(struct stmfts_data *sdata, + const u8 event[]) +{ + u8 slot_id = (event[0] & STMFTS_MASK_TOUCH_ID) >> 4; + + input_mt_slot(sdata->input, slot_id); + input_mt_report_slot_state(sdata->input, MT_TOOL_FINGER, false); + + input_sync(sdata->input); +} + +static void stmfts_report_hover_event(struct stmfts_data *sdata, + const u8 event[]) +{ + u16 x = (event[2] << 4) | (event[4] >> 4); + u16 y = (event[3] << 4) | (event[4] & STMFTS_MASK_Y_LSB); + u8 z = event[5]; + + input_report_abs(sdata->input, ABS_X, x); + input_report_abs(sdata->input, ABS_Y, y); + input_report_abs(sdata->input, ABS_DISTANCE, z); + + input_sync(sdata->input); +} + +static void stmfts_report_key_event(struct stmfts_data *sdata, const u8 event[]) +{ + switch (event[2]) { + case 0: + input_report_key(sdata->input, KEY_BACK, 0); + input_report_key(sdata->input, KEY_MENU, 0); + break; + + case STMFTS_MASK_KEY_BACK: + input_report_key(sdata->input, KEY_BACK, 1); + break; + + case STMFTS_MASK_KEY_MENU: + input_report_key(sdata->input, KEY_MENU, 1); + break; + + default: + dev_warn(&sdata->client->dev, + "unknown key event: %#02x\n", event[2]); + break; + } + + input_sync(sdata->input); +} + +static void stmfts_parse_events(struct stmfts_data *sdata) +{ + int i; + + for (i = 0; i < STMFTS_STACK_DEPTH; i++) { + u8 *event = &sdata->data[i * STMFTS_EVENT_SIZE]; + + switch (event[0]) { + + case STMFTS_EV_CONTROLLER_READY: + case STMFTS_EV_SLEEP_OUT_CONTROLLER_READY: + case STMFTS_EV_STATUS: + complete(&sdata->cmd_done); + /* fall through */ + + case STMFTS_EV_NO_EVENT: + case STMFTS_EV_DEBUG: + return; + } + + switch (event[0] & STMFTS_MASK_EVENT_ID) { + + case STMFTS_EV_MULTI_TOUCH_ENTER: + case STMFTS_EV_MULTI_TOUCH_MOTION: + stmfts_report_contact_event(sdata, event); + break; + + case STMFTS_EV_MULTI_TOUCH_LEAVE: + stmfts_report_contact_release(sdata, event); + break; + + case STMFTS_EV_HOVER_ENTER: + case STMFTS_EV_HOVER_LEAVE: + case STMFTS_EV_HOVER_MOTION: + stmfts_report_hover_event(sdata, event); + break; + + case STMFTS_EV_KEY_STATUS: + stmfts_report_key_event(sdata, event); + break; + + case STMFTS_EV_ERROR: + dev_warn(&sdata->client->dev, + "error code: 0x%x%x%x%x%x%x", + event[6], event[5], event[4], + event[3], event[2], event[1]); + break; + + default: + dev_err(&sdata->client->dev, + "unknown event %#02x\n", event[0]); + } + } +} + +static irqreturn_t stmfts_irq_handler(int irq, void *dev) +{ + struct stmfts_data *sdata = dev; + int err; + + mutex_lock(&sdata->mutex); + + err = stmfts_read_events(sdata); + if (unlikely(err)) + dev_err(&sdata->client->dev, + "failed to read events: %d\n", err); + else + stmfts_parse_events(sdata); + + mutex_unlock(&sdata->mutex); + return IRQ_HANDLED; +} + +static int stmfts_command(struct stmfts_data *sdata, const u8 cmd) +{ + int err; + + reinit_completion(&sdata->cmd_done); + + err = i2c_smbus_write_byte(sdata->client, cmd); + if (err) + return err; + + if (!wait_for_completion_timeout(&sdata->cmd_done, + msecs_to_jiffies(1000))) + return -ETIMEDOUT; + + return 0; +} + +static int stmfts_input_open(struct input_dev *dev) +{ + struct stmfts_data *sdata = input_get_drvdata(dev); + int err; + + err = pm_runtime_get_sync(&sdata->client->dev); + if (err < 0) + return err; + + err = i2c_smbus_write_byte(sdata->client, STMFTS_MS_MT_SENSE_ON); + if (err) + return err; + + mutex_lock(&sdata->mutex); + sdata->running = true; + + if (sdata->hover_enabled) { + err = i2c_smbus_write_byte(sdata->client, + STMFTS_SS_HOVER_SENSE_ON); + if (err) + dev_warn(&sdata->client->dev, + "failed to enable hover\n"); + } + mutex_unlock(&sdata->mutex); + + if (sdata->use_key) { + err = i2c_smbus_write_byte(sdata->client, + STMFTS_MS_KEY_SENSE_ON); + if (err) + /* I can still use only the touch screen */ + dev_warn(&sdata->client->dev, + "failed to enable touchkey\n"); + } + + return 0; +} + +static void stmfts_input_close(struct input_dev *dev) +{ + struct stmfts_data *sdata = input_get_drvdata(dev); + int err; + + err = i2c_smbus_write_byte(sdata->client, STMFTS_MS_MT_SENSE_OFF); + if (err) + dev_warn(&sdata->client->dev, + "failed to disable touchscreen: %d\n", err); + + mutex_lock(&sdata->mutex); + + sdata->running = false; + + if (sdata->hover_enabled) { + err = i2c_smbus_write_byte(sdata->client, + STMFTS_SS_HOVER_SENSE_OFF); + if (err) + dev_warn(&sdata->client->dev, + "failed to disable hover: %d\n", err); + } + mutex_unlock(&sdata->mutex); + + if (sdata->use_key) { + err = i2c_smbus_write_byte(sdata->client, + STMFTS_MS_KEY_SENSE_OFF); + if (err) + dev_warn(&sdata->client->dev, + "failed to disable touchkey: %d\n", err); + } + + pm_runtime_put_sync(&sdata->client->dev); +} + +static ssize_t stmfts_sysfs_chip_id(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct stmfts_data *sdata = dev_get_drvdata(dev); + + return sprintf(buf, "%#x\n", sdata->chip_id); +} + +static ssize_t stmfts_sysfs_chip_version(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct stmfts_data *sdata = dev_get_drvdata(dev); + + return sprintf(buf, "%u\n", sdata->chip_ver); +} + +static ssize_t stmfts_sysfs_fw_ver(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct stmfts_data *sdata = dev_get_drvdata(dev); + + return sprintf(buf, "%u\n", sdata->fw_ver); +} + +static ssize_t stmfts_sysfs_config_id(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct stmfts_data *sdata = dev_get_drvdata(dev); + + return sprintf(buf, "%#x\n", sdata->config_id); +} + +static ssize_t stmfts_sysfs_config_version(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct stmfts_data *sdata = dev_get_drvdata(dev); + + return sprintf(buf, "%u\n", sdata->config_ver); +} + +static ssize_t stmfts_sysfs_read_status(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct stmfts_data *sdata = dev_get_drvdata(dev); + u8 status[4]; + int err; + + err = i2c_smbus_read_i2c_block_data(sdata->client, STMFTS_READ_STATUS, + sizeof(status), status); + if (err) + return err; + + return sprintf(buf, "%#02x\n", status[0]); +} + +static ssize_t stmfts_sysfs_hover_enable_read(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct stmfts_data *sdata = dev_get_drvdata(dev); + + return sprintf(buf, "%u\n", sdata->hover_enabled); +} + +static ssize_t stmfts_sysfs_hover_enable_write(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct stmfts_data *sdata = dev_get_drvdata(dev); + unsigned long value; + int err = 0; + + if (kstrtoul(buf, 0, &value)) + return -EINVAL; + + mutex_lock(&sdata->mutex); + + if (value & sdata->hover_enabled) + goto out; + + if (sdata->running) + err = i2c_smbus_write_byte(sdata->client, + value ? STMFTS_SS_HOVER_SENSE_ON : + STMFTS_SS_HOVER_SENSE_OFF); + + if (!err) + sdata->hover_enabled = !!value; + +out: + mutex_unlock(&sdata->mutex); + + return len; +} + +static DEVICE_ATTR(chip_id, 0444, stmfts_sysfs_chip_id, NULL); +static DEVICE_ATTR(chip_version, 0444, stmfts_sysfs_chip_version, NULL); +static DEVICE_ATTR(fw_ver, 0444, stmfts_sysfs_fw_ver, NULL); +static DEVICE_ATTR(config_id, 0444, stmfts_sysfs_config_id, NULL); +static DEVICE_ATTR(config_version, 0444, stmfts_sysfs_config_version, NULL); +static DEVICE_ATTR(status, 0444, stmfts_sysfs_read_status, NULL); +static DEVICE_ATTR(hover_enable, 0644, stmfts_sysfs_hover_enable_read, + stmfts_sysfs_hover_enable_write); + +static struct attribute *stmfts_sysfs_attrs[] = { + &dev_attr_chip_id.attr, + &dev_attr_chip_version.attr, + &dev_attr_fw_ver.attr, + &dev_attr_config_id.attr, + &dev_attr_config_version.attr, + &dev_attr_status.attr, + &dev_attr_hover_enable.attr, + NULL +}; + +static struct attribute_group stmfts_attribute_group = { + .attrs = stmfts_sysfs_attrs +}; + +static int stmfts_power_on(struct stmfts_data *sdata) +{ + int err; + u8 reg[8]; + + err = regulator_bulk_enable(ARRAY_SIZE(sdata->regulators), + sdata->regulators); + if (err) + return err; + + /* + * The datasheet does not specify the power on time, but considering + * that the reset time is < 10ms, I sleep 20ms to be sure + */ + msleep(20); + + err = i2c_smbus_read_i2c_block_data(sdata->client, STMFTS_READ_INFO, + sizeof(reg), reg); + if (err < 0) + return err; + if (err != sizeof(reg)) + return -EIO; + + sdata->chip_id = be16_to_cpup((__be16 *)®[6]); + sdata->chip_ver = reg[0]; + sdata->fw_ver = be16_to_cpup((__be16 *)®[2]); + sdata->config_id = reg[4]; + sdata->config_ver = reg[5]; + + enable_irq(sdata->client->irq); + + msleep(50); + + err = stmfts_command(sdata, STMFTS_SYSTEM_RESET); + if (err) + return err; + + err = stmfts_command(sdata, STMFTS_SLEEP_OUT); + if (err) + return err; + + /* optional tuning */ + err = stmfts_command(sdata, STMFTS_MS_CX_TUNING); + if (err) + dev_warn(&sdata->client->dev, + "failed to perform mutual auto tune: %d\n", err); + + /* optional tuning */ + err = stmfts_command(sdata, STMFTS_SS_CX_TUNING); + if (err) + dev_warn(&sdata->client->dev, + "failed to perform self auto tune: %d\n", err); + + err = stmfts_command(sdata, STMFTS_FULL_FORCE_CALIBRATION); + if (err) + return err; + + /* + * At this point no one is using the touchscreen + * and I don't really care about the return value + */ + (void) i2c_smbus_write_byte(sdata->client, STMFTS_SLEEP_IN); + + return 0; +} + +static void stmfts_power_off(void *data) +{ + struct stmfts_data *sdata = data; + + disable_irq(sdata->client->irq); + regulator_bulk_disable(ARRAY_SIZE(sdata->regulators), + sdata->regulators); +} + +/* This function is void because I don't want to prevent using the touch key + * only because the LEDs don't get registered + */ +static int stmfts_enable_led(struct stmfts_data *sdata) +{ + int err; + + /* get the regulator for powering the leds on */ + sdata->ledvdd = devm_regulator_get(&sdata->client->dev, "ledvdd"); + if (IS_ERR(sdata->ledvdd)) + return PTR_ERR(sdata->ledvdd); + + sdata->led_cdev.name = STMFTS_DEV_NAME; + sdata->led_cdev.max_brightness = LED_ON; + sdata->led_cdev.brightness = LED_OFF; + sdata->led_cdev.brightness_set = stmfts_brightness_set; + sdata->led_cdev.brightness_get = stmfts_brightness_get; + + err = devm_led_classdev_register(&sdata->client->dev, &sdata->led_cdev); + if (err) { + devm_regulator_put(sdata->ledvdd); + return err; + } + + return 0; +} + +static int stmfts_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int err; + struct stmfts_data *sdata; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C | + I2C_FUNC_SMBUS_BYTE_DATA | + I2C_FUNC_SMBUS_I2C_BLOCK)) + return -ENODEV; + + sdata = devm_kzalloc(&client->dev, sizeof(*sdata), GFP_KERNEL); + if (!sdata) + return -ENOMEM; + + i2c_set_clientdata(client, sdata); + + sdata->client = client; + mutex_init(&sdata->mutex); + init_completion(&sdata->cmd_done); + + sdata->regulators[STMFTS_REGULATOR_VDD].supply = "vdd"; + sdata->regulators[STMFTS_REGULATOR_AVDD].supply = "avdd"; + err = devm_regulator_bulk_get(&client->dev, + ARRAY_SIZE(sdata->regulators), + sdata->regulators); + if (err) + return err; + + sdata->input = devm_input_allocate_device(&client->dev); + if (!sdata->input) + return -ENOMEM; + + sdata->input->name = STMFTS_DEV_NAME; + sdata->input->id.bustype = BUS_I2C; + sdata->input->open = stmfts_input_open; + sdata->input->close = stmfts_input_close; + + touchscreen_parse_properties(sdata->input, true, &sdata->prop); + + input_set_abs_params(sdata->input, ABS_MT_POSITION_X, 0, + sdata->prop.max_x, 0, 0); + input_set_abs_params(sdata->input, ABS_MT_POSITION_Y, 0, + sdata->prop.max_y, 0, 0); + input_set_abs_params(sdata->input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0); + input_set_abs_params(sdata->input, ABS_MT_TOUCH_MINOR, 0, 255, 0, 0); + input_set_abs_params(sdata->input, ABS_MT_ORIENTATION, 0, 255, 0, 0); + input_set_abs_params(sdata->input, ABS_MT_PRESSURE, 0, 255, 0, 0); + input_set_abs_params(sdata->input, ABS_DISTANCE, 0, 255, 0, 0); + + sdata->use_key = device_property_read_bool(&client->dev, + "touch-key-connected"); + if (sdata->use_key) { + input_set_capability(sdata->input, EV_KEY, KEY_MENU); + input_set_capability(sdata->input, EV_KEY, KEY_BACK); + } + + err = input_mt_init_slots(sdata->input, + STMFTS_MAX_FINGERS, INPUT_MT_DIRECT); + if (err) + return err; + + input_set_drvdata(sdata->input, sdata); + + err = devm_request_threaded_irq(&client->dev, client->irq, + NULL, stmfts_irq_handler, + IRQF_ONESHOT, + "stmfts_irq", sdata); + if (err) + return err; + + /* stmfts_power_on expects interrupt to be disabled */ + disable_irq(client->irq); + + dev_dbg(&client->dev, "initializing ST-Microelectronics FTS...\n"); + + err = stmfts_power_on(sdata); + if (err) + return err; + + err = devm_add_action_or_reset(&client->dev, stmfts_power_off, sdata); + if (err) + return err; + + err = input_register_device(sdata->input); + if (err) + return err; + + if (sdata->use_key) { + err = stmfts_enable_led(sdata); + if (err) { + /* + * Even if the LEDs have failed to be initialized and + * used in the driver, I can still use the device even + * without LEDs. The ledvdd regulator pointer will be + * used as a flag. + */ + dev_warn(&client->dev, "unable to use touchkey leds\n"); + sdata->ledvdd = NULL; + } + } + + err = sysfs_create_group(&sdata->client->dev.kobj, + &stmfts_attribute_group); + if (err) + return err; + + pm_runtime_enable(&client->dev); + + return 0; +} + +static int stmfts_remove(struct i2c_client *client) +{ + pm_runtime_disable(&client->dev); + sysfs_remove_group(&client->dev.kobj, &stmfts_attribute_group); + + return 0; +} + +static int __maybe_unused stmfts_runtime_suspend(struct device *dev) +{ + struct stmfts_data *sdata = dev_get_drvdata(dev); + int ret; + + ret = i2c_smbus_write_byte(sdata->client, STMFTS_SLEEP_IN); + if (ret) + dev_warn(dev, "failed to suspend device: %d\n", ret); + + return ret; +} + +static int __maybe_unused stmfts_runtime_resume(struct device *dev) +{ + struct stmfts_data *sdata = dev_get_drvdata(dev); + int ret; + + ret = i2c_smbus_write_byte(sdata->client, STMFTS_SLEEP_OUT); + if (ret) + dev_err(dev, "failed to resume device: %d\n", ret); + + return ret; +} + +static int __maybe_unused stmfts_suspend(struct device *dev) +{ + struct stmfts_data *sdata = dev_get_drvdata(dev); + + stmfts_power_off(sdata); + + return 0; +} + +static int __maybe_unused stmfts_resume(struct device *dev) +{ + struct stmfts_data *sdata = dev_get_drvdata(dev); + + return stmfts_power_on(sdata); +} + +static const struct dev_pm_ops stmfts_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(stmfts_suspend, stmfts_resume) + SET_RUNTIME_PM_OPS(stmfts_runtime_suspend, stmfts_runtime_resume, NULL) +}; + +#ifdef CONFIG_OF +static const struct of_device_id stmfts_of_match[] = { + { .compatible = "st,stmfts", }, + { }, +}; +MODULE_DEVICE_TABLE(of, stmfts_of_match); +#endif + +static const struct i2c_device_id stmfts_id[] = { + { "stmfts", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(i2c, stmfts_id); + +static struct i2c_driver stmfts_driver = { + .driver = { + .name = STMFTS_DEV_NAME, + .of_match_table = of_match_ptr(stmfts_of_match), + .pm = &stmfts_pm_ops, + }, + .probe = stmfts_probe, + .remove = stmfts_remove, + .id_table = stmfts_id, +}; + +module_i2c_driver(stmfts_driver); + +MODULE_AUTHOR("Andi Shyti <andi.shyti@samsung.com>"); +MODULE_DESCRIPTION("STMicroelectronics FTS Touch Screen"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/input/touchscreen/tsc2007_core.c b/drivers/input/touchscreen/tsc2007_core.c index fc7384936011..8342e0c48a53 100644 --- a/drivers/input/touchscreen/tsc2007_core.c +++ b/drivers/input/touchscreen/tsc2007_core.c @@ -25,9 +25,9 @@ #include <linux/input.h> #include <linux/interrupt.h> #include <linux/i2c.h> -#include <linux/i2c/tsc2007.h> #include <linux/of_device.h> #include <linux/of_gpio.h> +#include <linux/platform_data/tsc2007.h> #include "tsc2007.h" int tsc2007_xfer(struct tsc2007 *tsc, u8 cmd) diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c index b0536cfd8e17..06a64d5d8c6c 100644 --- a/drivers/md/faulty.c +++ b/drivers/md/faulty.c @@ -170,7 +170,7 @@ static void add_sector(struct faulty_conf *conf, sector_t start, int mode) conf->nfaults = n+1; } -static void faulty_make_request(struct mddev *mddev, struct bio *bio) +static bool faulty_make_request(struct mddev *mddev, struct bio *bio) { struct faulty_conf *conf = mddev->private; int failit = 0; @@ -182,7 +182,7 @@ static void faulty_make_request(struct mddev *mddev, struct bio *bio) * just fail immediately */ bio_io_error(bio); - return; + return true; } if (check_sector(conf, bio->bi_iter.bi_sector, @@ -224,6 +224,7 @@ static void faulty_make_request(struct mddev *mddev, struct bio *bio) bio->bi_bdev = conf->rdev->bdev; generic_make_request(bio); + return true; } static void faulty_status(struct seq_file *seq, struct mddev *mddev) diff --git a/drivers/md/linear.c b/drivers/md/linear.c index df6f2c98eca7..5f1eb9189542 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -245,7 +245,7 @@ static void linear_free(struct mddev *mddev, void *priv) kfree(conf); } -static void linear_make_request(struct mddev *mddev, struct bio *bio) +static bool linear_make_request(struct mddev *mddev, struct bio *bio) { char b[BDEVNAME_SIZE]; struct dev_info *tmp_dev; @@ -254,7 +254,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio) if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { md_flush_request(mddev, bio); - return; + return true; } tmp_dev = which_dev(mddev, bio_sector); @@ -292,7 +292,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio) mddev_check_write_zeroes(mddev, bio); generic_make_request(bio); } - return; + return true; out_of_bounds: pr_err("md/linear:%s: make_request: Sector %llu out of bounds on dev %s: %llu sectors, offset %llu\n", @@ -302,6 +302,7 @@ out_of_bounds: (unsigned long long)tmp_dev->rdev->sectors, (unsigned long long)start_sector); bio_io_error(bio); + return true; } static void linear_status (struct seq_file *seq, struct mddev *mddev) diff --git a/drivers/md/md.c b/drivers/md/md.c index 31bcbfb09fef..8cdca0296749 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -203,6 +203,14 @@ struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, } EXPORT_SYMBOL_GPL(bio_alloc_mddev); +static struct bio *md_bio_alloc_sync(struct mddev *mddev) +{ + if (!mddev || !mddev->sync_set) + return bio_alloc(GFP_NOIO, 1); + + return bio_alloc_bioset(GFP_NOIO, 1, mddev->sync_set); +} + /* * We have a system wide 'event count' that is incremented * on any 'interesting' event, and readers of /proc/mdstat @@ -277,7 +285,7 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) bio_endio(bio); return BLK_QC_T_NONE; } - smp_rmb(); /* Ensure implications of 'active' are visible */ +check_suspended: rcu_read_lock(); if (mddev->suspended) { DEFINE_WAIT(__wait); @@ -302,7 +310,11 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) sectors = bio_sectors(bio); /* bio could be mergeable after passing to underlayer */ bio->bi_opf &= ~REQ_NOMERGE; - mddev->pers->make_request(mddev, bio); + if (!mddev->pers->make_request(mddev, bio)) { + atomic_dec(&mddev->active_io); + wake_up(&mddev->sb_wait); + goto check_suspended; + } cpu = part_stat_lock(); part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); @@ -327,6 +339,7 @@ void mddev_suspend(struct mddev *mddev) if (mddev->suspended++) return; synchronize_rcu(); + wake_up(&mddev->sb_wait); wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); mddev->pers->quiesce(mddev, 1); @@ -462,7 +475,7 @@ static void mddev_delayed_delete(struct work_struct *ws); static void mddev_put(struct mddev *mddev) { - struct bio_set *bs = NULL; + struct bio_set *bs = NULL, *sync_bs = NULL; if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) return; @@ -472,7 +485,9 @@ static void mddev_put(struct mddev *mddev) * so destroy it */ list_del_init(&mddev->all_mddevs); bs = mddev->bio_set; + sync_bs = mddev->sync_set; mddev->bio_set = NULL; + mddev->sync_set = NULL; if (mddev->gendisk) { /* We did a probe so need to clean up. Call * queue_work inside the spinlock so that @@ -487,6 +502,8 @@ static void mddev_put(struct mddev *mddev) spin_unlock(&all_mddevs_lock); if (bs) bioset_free(bs); + if (sync_bs) + bioset_free(sync_bs); } static void md_safemode_timeout(unsigned long data); @@ -751,7 +768,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev, if (test_bit(Faulty, &rdev->flags)) return; - bio = bio_alloc_mddev(GFP_NOIO, 1, mddev); + bio = md_bio_alloc_sync(mddev); atomic_inc(&rdev->nr_pending); @@ -783,7 +800,7 @@ int md_super_wait(struct mddev *mddev) int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, struct page *page, int op, int op_flags, bool metadata_op) { - struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev); + struct bio *bio = md_bio_alloc_sync(rdev->mddev); int ret; bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? @@ -1852,7 +1869,7 @@ retry: max_dev = le32_to_cpu(sb->max_dev); for (i=0; i<max_dev;i++) - sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY); + sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE); if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL); @@ -5432,6 +5449,11 @@ int md_run(struct mddev *mddev) if (!mddev->bio_set) return -ENOMEM; } + if (mddev->sync_set == NULL) { + mddev->sync_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); + if (!mddev->sync_set) + return -ENOMEM; + } spin_lock(&pers_lock); pers = find_pers(mddev->level, mddev->clevel); @@ -7950,12 +7972,14 @@ EXPORT_SYMBOL(md_done_sync); * If we need to update some array metadata (e.g. 'active' flag * in superblock) before writing, schedule a superblock update * and wait for it to complete. + * A return value of 'false' means that the write wasn't recorded + * and cannot proceed as the array is being suspend. */ -void md_write_start(struct mddev *mddev, struct bio *bi) +bool md_write_start(struct mddev *mddev, struct bio *bi) { int did_change = 0; if (bio_data_dir(bi) != WRITE) - return; + return true; BUG_ON(mddev->ro == 1); if (mddev->ro == 2) { @@ -7987,7 +8011,12 @@ void md_write_start(struct mddev *mddev, struct bio *bi) if (did_change) sysfs_notify_dirent_safe(mddev->sysfs_state); wait_event(mddev->sb_wait, - !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); + !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) && !mddev->suspended); + if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { + percpu_ref_put(&mddev->writes_pending); + return false; + } + return true; } EXPORT_SYMBOL(md_write_start); diff --git a/drivers/md/md.h b/drivers/md/md.h index 0fa1de42c42b..991f0fe2dcc6 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -444,6 +444,9 @@ struct mddev { struct attribute_group *to_remove; struct bio_set *bio_set; + struct bio_set *sync_set; /* for sync operations like + * metadata and bitmap writes + */ /* Generic flush handling. * The last to finish preflush schedules a worker to submit @@ -510,7 +513,7 @@ struct md_personality int level; struct list_head list; struct module *owner; - void (*make_request)(struct mddev *mddev, struct bio *bio); + bool (*make_request)(struct mddev *mddev, struct bio *bio); int (*run)(struct mddev *mddev); void (*free)(struct mddev *mddev, void *priv); void (*status)(struct seq_file *seq, struct mddev *mddev); @@ -649,7 +652,7 @@ extern void md_wakeup_thread(struct md_thread *thread); extern void md_check_recovery(struct mddev *mddev); extern void md_reap_sync_thread(struct mddev *mddev); extern int mddev_init_writes_pending(struct mddev *mddev); -extern void md_write_start(struct mddev *mddev, struct bio *bi); +extern bool md_write_start(struct mddev *mddev, struct bio *bi); extern void md_write_inc(struct mddev *mddev, struct bio *bi); extern void md_write_end(struct mddev *mddev); extern void md_done_sync(struct mddev *mddev, int blocks, int ok); diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 68d036e64041..23a162ba6c56 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -106,7 +106,7 @@ static void multipath_end_request(struct bio *bio) rdev_dec_pending(rdev, conf->mddev); } -static void multipath_make_request(struct mddev *mddev, struct bio * bio) +static bool multipath_make_request(struct mddev *mddev, struct bio * bio) { struct mpconf *conf = mddev->private; struct multipath_bh * mp_bh; @@ -114,7 +114,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio) if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { md_flush_request(mddev, bio); - return; + return true; } mp_bh = mempool_alloc(conf->pool, GFP_NOIO); @@ -126,7 +126,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio) if (mp_bh->path < 0) { bio_io_error(bio); mempool_free(mp_bh, conf->pool); - return; + return true; } multipath = conf->multipaths + mp_bh->path; @@ -141,7 +141,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio) mddev_check_writesame(mddev, &mp_bh->bio); mddev_check_write_zeroes(mddev, &mp_bh->bio); generic_make_request(&mp_bh->bio); - return; + return true; } static void multipath_status(struct seq_file *seq, struct mddev *mddev) diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index d6c0bc76e837..94d9ae9b0fd0 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -548,7 +548,7 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio) bio_endio(bio); } -static void raid0_make_request(struct mddev *mddev, struct bio *bio) +static bool raid0_make_request(struct mddev *mddev, struct bio *bio) { struct strip_zone *zone; struct md_rdev *tmp_dev; @@ -559,12 +559,12 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { md_flush_request(mddev, bio); - return; + return true; } if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) { raid0_handle_discard(mddev, bio); - return; + return true; } bio_sector = bio->bi_iter.bi_sector; @@ -599,6 +599,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) mddev_check_writesame(mddev, bio); mddev_check_write_zeroes(mddev, bio); generic_make_request(bio); + return true; } static void raid0_status(struct seq_file *seq, struct mddev *mddev) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 98ca2c1d3226..3febfc8391fb 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1321,7 +1321,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, * Continue immediately if no resync is active currently. */ - md_write_start(mddev, bio); /* wait on superblock update early */ if ((bio_end_sector(bio) > mddev->suspend_lo && bio->bi_iter.bi_sector < mddev->suspend_hi) || @@ -1335,7 +1334,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, */ DEFINE_WAIT(w); for (;;) { - flush_signals(current); + sigset_t full, old; prepare_to_wait(&conf->wait_barrier, &w, TASK_INTERRUPTIBLE); if (bio_end_sector(bio) <= mddev->suspend_lo || @@ -1345,7 +1344,10 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, bio->bi_iter.bi_sector, bio_end_sector(bio)))) break; + sigfillset(&full); + sigprocmask(SIG_BLOCK, &full, &old); schedule(); + sigprocmask(SIG_SETMASK, &old, NULL); } finish_wait(&conf->wait_barrier, &w); } @@ -1550,13 +1552,13 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, wake_up(&conf->wait_barrier); } -static void raid1_make_request(struct mddev *mddev, struct bio *bio) +static bool raid1_make_request(struct mddev *mddev, struct bio *bio) { sector_t sectors; if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { md_flush_request(mddev, bio); - return; + return true; } /* @@ -1571,8 +1573,12 @@ static void raid1_make_request(struct mddev *mddev, struct bio *bio) if (bio_data_dir(bio) == READ) raid1_read_request(mddev, bio, sectors, NULL); - else + else { + if (!md_write_start(mddev,bio)) + return false; raid1_write_request(mddev, bio, sectors); + } + return true; } static void raid1_status(struct seq_file *seq, struct mddev *mddev) @@ -2165,9 +2171,7 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) struct r1conf *conf = mddev->private; int i; int disks = conf->raid_disks * 2; - struct bio *bio, *wbio; - - bio = r1_bio->bios[r1_bio->read_disk]; + struct bio *wbio; if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) /* ouch - failed to read all of that. */ diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 57a250fdbbcc..5026e7ad51d3 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1303,8 +1303,6 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, sector_t sectors; int max_sectors; - md_write_start(mddev, bio); - /* * Register the new request and wait if the reconstruction * thread has put up a bar for new requests. @@ -1525,7 +1523,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio, int sectors) raid10_write_request(mddev, bio, r10_bio); } -static void raid10_make_request(struct mddev *mddev, struct bio *bio) +static bool raid10_make_request(struct mddev *mddev, struct bio *bio) { struct r10conf *conf = mddev->private; sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask); @@ -1534,9 +1532,12 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio) if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { md_flush_request(mddev, bio); - return; + return true; } + if (!md_write_start(mddev, bio)) + return false; + /* * If this request crosses a chunk boundary, we need to split * it. @@ -1553,6 +1554,7 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio) /* In case raid10d snuck in to freeze_array */ wake_up(&conf->wait_barrier); + return true; } static void raid10_status(struct seq_file *seq, struct mddev *mddev) @@ -3293,7 +3295,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, biolist = bio; bio->bi_end_io = end_sync_read; bio_set_op_attrs(bio, REQ_OP_READ, 0); - if (test_bit(FailFast, &conf->mirrors[d].rdev->flags)) + if (test_bit(FailFast, &rdev->flags)) bio->bi_opf |= MD_FAILFAST; bio->bi_iter.bi_sector = sector + rdev->data_offset; bio->bi_bdev = rdev->bdev; @@ -3305,7 +3307,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, continue; } atomic_inc(&rdev->nr_pending); - rcu_read_unlock(); /* Need to set up for writing to the replacement */ bio = r10_bio->devs[i].repl_bio; @@ -3316,11 +3317,12 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, biolist = bio; bio->bi_end_io = end_sync_write; bio_set_op_attrs(bio, REQ_OP_WRITE, 0); - if (test_bit(FailFast, &conf->mirrors[d].rdev->flags)) + if (test_bit(FailFast, &rdev->flags)) bio->bi_opf |= MD_FAILFAST; bio->bi_iter.bi_sector = sector + rdev->data_offset; bio->bi_bdev = rdev->bdev; count++; + rcu_read_unlock(); } if (count < 2) { diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 62c965be97e1..2ceb338b094b 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5479,7 +5479,6 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi) last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9); bi->bi_next = NULL; - md_write_start(mddev, bi); stripe_sectors = conf->chunk_sectors * (conf->raid_disks - conf->max_degraded); @@ -5549,11 +5548,10 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi) release_stripe_plug(mddev, sh); } - md_write_end(mddev); bio_endio(bi); } -static void raid5_make_request(struct mddev *mddev, struct bio * bi) +static bool raid5_make_request(struct mddev *mddev, struct bio * bi) { struct r5conf *conf = mddev->private; int dd_idx; @@ -5569,10 +5567,10 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi) int ret = r5l_handle_flush_request(conf->log, bi); if (ret == 0) - return; + return true; if (ret == -ENODEV) { md_flush_request(mddev, bi); - return; + return true; } /* ret == -EAGAIN, fallback */ /* @@ -5582,6 +5580,8 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi) do_flush = bi->bi_opf & REQ_PREFLUSH; } + if (!md_write_start(mddev, bi)) + return false; /* * If array is degraded, better not do chunk aligned read because * later we might have to read it again in order to reconstruct @@ -5591,18 +5591,18 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi) mddev->reshape_position == MaxSector) { bi = chunk_aligned_read(mddev, bi); if (!bi) - return; + return true; } if (unlikely(bio_op(bi) == REQ_OP_DISCARD)) { make_discard_request(mddev, bi); - return; + md_write_end(mddev); + return true; } logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1); last_sector = bio_end_sector(bi); bi->bi_next = NULL; - md_write_start(mddev, bi); prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { @@ -5693,12 +5693,15 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi) * userspace, we want an interruptible * wait. */ - flush_signals(current); prepare_to_wait(&conf->wait_for_overlap, &w, TASK_INTERRUPTIBLE); if (logical_sector >= mddev->suspend_lo && logical_sector < mddev->suspend_hi) { + sigset_t full, old; + sigfillset(&full); + sigprocmask(SIG_BLOCK, &full, &old); schedule(); + sigprocmask(SIG_SETMASK, &old, NULL); do_prepare = true; } goto retry; @@ -5740,6 +5743,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi) if (rw == WRITE) md_write_end(mddev); bio_endio(bi); + return true; } static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks); diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c index c9339f85359b..cd4a6d7d6750 100644 --- a/drivers/mfd/timberdale.c +++ b/drivers/mfd/timberdale.c @@ -32,13 +32,13 @@ #include <linux/i2c.h> #include <linux/i2c-ocores.h> #include <linux/i2c-xiic.h> -#include <linux/i2c/tsc2007.h> #include <linux/spi/spi.h> #include <linux/spi/xilinx_spi.h> #include <linux/spi/max7301.h> #include <linux/spi/mc33880.h> +#include <linux/platform_data/tsc2007.h> #include <linux/platform_data/media/timb_radio.h> #include <linux/platform_data/media/timb_video.h> diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index 2d956cb59d06..01cab9548785 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c @@ -225,8 +225,10 @@ static int com20020pci_probe(struct pci_dev *pdev, card = devm_kzalloc(&pdev->dev, sizeof(struct com20020_dev), GFP_KERNEL); - if (!card) - return -ENOMEM; + if (!card) { + ret = -ENOMEM; + goto out_port; + } card->index = i; card->pci_priv = priv; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 2865f31c6076..14ff622190a5 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1820,7 +1820,7 @@ err_undo_flags: */ static int __bond_release_one(struct net_device *bond_dev, struct net_device *slave_dev, - bool all) + bool all, bool unregister) { struct bonding *bond = netdev_priv(bond_dev); struct slave *slave, *oldcurrent; @@ -1965,7 +1965,10 @@ static int __bond_release_one(struct net_device *bond_dev, dev_set_mac_address(slave_dev, (struct sockaddr *)&ss); } - dev_set_mtu(slave_dev, slave->original_mtu); + if (unregister) + __dev_set_mtu(slave_dev, slave->original_mtu); + else + dev_set_mtu(slave_dev, slave->original_mtu); slave_dev->priv_flags &= ~IFF_BONDING; @@ -1977,7 +1980,7 @@ static int __bond_release_one(struct net_device *bond_dev, /* A wrapper used because of ndo_del_link */ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) { - return __bond_release_one(bond_dev, slave_dev, false); + return __bond_release_one(bond_dev, slave_dev, false, false); } /* First release a slave and then destroy the bond if no more slaves are left. @@ -1989,7 +1992,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev, struct bonding *bond = netdev_priv(bond_dev); int ret; - ret = bond_release(bond_dev, slave_dev); + ret = __bond_release_one(bond_dev, slave_dev, false, true); if (ret == 0 && !bond_has_slaves(bond)) { bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; netdev_info(bond_dev, "Destroying bond %s\n", @@ -3060,7 +3063,7 @@ static int bond_slave_netdev_event(unsigned long event, if (bond_dev->type != ARPHRD_ETHER) bond_release_and_destroy(bond_dev, slave_dev); else - bond_release(bond_dev, slave_dev); + __bond_release_one(bond_dev, slave_dev, false, true); break; case NETDEV_UP: case NETDEV_CHANGE: @@ -4252,7 +4255,7 @@ static void bond_uninit(struct net_device *bond_dev) /* Release the bonded slaves */ bond_for_each_slave(bond, slave, iter) - __bond_release_one(bond_dev, slave->dev, true); + __bond_release_one(bond_dev, slave->dev, true, true); netdev_info(bond_dev, "Released all slaves\n"); arr = rtnl_dereference(bond->slave_arr); diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index e69ebdd65658..26d25749c3e4 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -3314,10 +3314,11 @@ static const struct macb_config sama5d2_config = { static const struct macb_config sama5d3_config = { .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE - | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, + | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO, .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, + .jumbo_max_len = 10240, }; static const struct macb_config sama5d4_config = { diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c index 6081c3132135..4b0ca9fb2cb4 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c @@ -221,7 +221,7 @@ static int cn23xx_pf_soft_reset(struct octeon_device *oct) /* Wait for 100ms as Octeon resets. */ mdelay(100); - if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1) == 0x1234ULL) { + if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)) { dev_err(&oct->pci_dev->dev, "OCTEON[%d]: Soft reset failed\n", oct->octeon_id); return 1; diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c index b28253c96d97..2df7440f58df 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c @@ -44,7 +44,7 @@ int lio_cn6xxx_soft_reset(struct octeon_device *oct) /* Wait for 10ms as Octeon resets. */ mdelay(100); - if (octeon_read_csr64(oct, CN6XXX_SLI_SCRATCH1) == 0x1234ULL) { + if (octeon_read_csr64(oct, CN6XXX_SLI_SCRATCH1)) { dev_err(&oct->pci_dev->dev, "Soft reset failed\n"); return 1; } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index c6700b91a2df..fe166e0f6781 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -300,9 +300,9 @@ static void fill_tso_desc(struct hnae_ring *ring, void *priv, mtu); } -int hns_nic_net_xmit_hw(struct net_device *ndev, - struct sk_buff *skb, - struct hns_nic_ring_data *ring_data) +netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev, + struct sk_buff *skb, + struct hns_nic_ring_data *ring_data) { struct hns_nic_priv *priv = netdev_priv(ndev); struct hnae_ring *ring = ring_data->ring; @@ -361,6 +361,10 @@ int hns_nic_net_xmit_hw(struct net_device *ndev, dev_queue = netdev_get_tx_queue(ndev, skb->queue_mapping); netdev_tx_sent_queue(dev_queue, skb->len); + netif_trans_update(ndev); + ndev->stats.tx_bytes += skb->len; + ndev->stats.tx_packets++; + wmb(); /* commit all data before submit */ assert(skb->queue_mapping < priv->ae_handle->q_num); hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num); @@ -1469,17 +1473,11 @@ static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb, struct net_device *ndev) { struct hns_nic_priv *priv = netdev_priv(ndev); - int ret; assert(skb->queue_mapping < ndev->ae_handle->q_num); - ret = hns_nic_net_xmit_hw(ndev, skb, - &tx_ring_data(priv, skb->queue_mapping)); - if (ret == NETDEV_TX_OK) { - netif_trans_update(ndev); - ndev->stats.tx_bytes += skb->len; - ndev->stats.tx_packets++; - } - return (netdev_tx_t)ret; + + return hns_nic_net_xmit_hw(ndev, skb, + &tx_ring_data(priv, skb->queue_mapping)); } static void hns_nic_drop_rx_fetch(struct hns_nic_ring_data *ring_data, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h index 1b83232082b2..9cb4c7884201 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h @@ -92,8 +92,8 @@ void hns_ethtool_set_ops(struct net_device *ndev); void hns_nic_net_reset(struct net_device *ndev); void hns_nic_net_reinit(struct net_device *netdev); int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h); -int hns_nic_net_xmit_hw(struct net_device *ndev, - struct sk_buff *skb, - struct hns_nic_ring_data *ring_data); +netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev, + struct sk_buff *skb, + struct hns_nic_ring_data *ring_data); #endif /**__HNS_ENET_H */ diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c index e5221d95afe1..017e08452d8c 100644 --- a/drivers/net/ethernet/hisilicon/hns_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns_mdio.c @@ -261,7 +261,7 @@ static int hns_mdio_write(struct mii_bus *bus, /* config the data needed writing */ cmd_reg_cfg = devad; - op = MDIO_C45_WRITE_ADDR; + op = MDIO_C45_WRITE_DATA; } MDIO_SET_REG_FIELD(mdio_dev, MDIO_WDATA_REG, MDIO_WDATA_DATA_M, diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 3e26d27ad213..63784576ae8b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -2348,30 +2348,19 @@ static void fm10k_io_resume(struct pci_dev *pdev) netif_device_attach(netdev); } -/** - * fm10k_io_reset_notify - called when PCI function is reset - * @pdev: Pointer to PCI device - * - * This callback is called when the PCI function is reset such as from - * /sys/class/net/<enpX>/device/reset or similar. When prepare is true, it - * means we should prepare for a function reset. If prepare is false, it means - * the function reset just occurred. - */ -static void fm10k_io_reset_notify(struct pci_dev *pdev, bool prepare) +static void fm10k_io_reset_prepare(struct pci_dev *pdev) { - struct fm10k_intfc *interface = pci_get_drvdata(pdev); - int err = 0; - - if (prepare) { - /* warn incase we have any active VF devices */ - if (pci_num_vf(pdev)) - dev_warn(&pdev->dev, - "PCIe FLR may cause issues for any active VF devices\n"); + /* warn incase we have any active VF devices */ + if (pci_num_vf(pdev)) + dev_warn(&pdev->dev, + "PCIe FLR may cause issues for any active VF devices\n"); + fm10k_prepare_suspend(pci_get_drvdata(pdev)); +} - fm10k_prepare_suspend(interface); - } else { - err = fm10k_handle_resume(interface); - } +static void fm10k_io_reset_done(struct pci_dev *pdev) +{ + struct fm10k_intfc *interface = pci_get_drvdata(pdev); + int err = fm10k_handle_resume(interface); if (err) { dev_warn(&pdev->dev, @@ -2384,7 +2373,8 @@ static const struct pci_error_handlers fm10k_err_handler = { .error_detected = fm10k_io_error_detected, .slot_reset = fm10k_io_slot_reset, .resume = fm10k_io_resume, - .reset_notify = fm10k_io_reset_notify, + .reset_prepare = fm10k_io_reset_prepare, + .reset_done = fm10k_io_reset_done, }; static struct pci_driver fm10k_driver = { diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 41a5c5d2ac89..b3d0c2e6347a 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -2401,15 +2401,10 @@ static int mtk_probe(struct platform_device *pdev) { struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct device_node *mac_np; - const struct of_device_id *match; - struct mtk_soc_data *soc; struct mtk_eth *eth; int err; int i; - match = of_match_device(of_mtk_match, &pdev->dev); - soc = (struct mtk_soc_data *)match->data; - eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL); if (!eth) return -ENOMEM; diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index fc10f27e0a0c..6a65c8b33807 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -356,6 +356,7 @@ err_free_app_priv: static void nfp_flower_clean(struct nfp_app *app) { + nfp_flower_metadata_cleanup(app); vfree(app->priv); app->priv = NULL; } diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 2e69bcdc5b07..99a26a9efec1 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -2229,6 +2229,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi) kfree(names); kfree(callbacks); kfree(vqs); + kfree(ctx); return 0; diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index f4d0054981c6..8a1eaf3c302a 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -922,15 +922,10 @@ static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev) static void vrf_dev_uninit(struct net_device *dev) { struct net_vrf *vrf = netdev_priv(dev); - struct net_device *port_dev; - struct list_head *iter; vrf_rtable_release(dev, vrf); vrf_rt6_release(dev, vrf); - netdev_for_each_lower_dev(dev, port_dev, iter) - vrf_del_slave(dev, port_dev); - free_percpu(dev->dstats); dev->dstats = NULL; } @@ -1386,6 +1381,12 @@ static int vrf_validate(struct nlattr *tb[], struct nlattr *data[], static void vrf_dellink(struct net_device *dev, struct list_head *head) { + struct net_device *port_dev; + struct list_head *iter; + + netdev_for_each_lower_dev(dev, port_dev, iter) + vrf_del_slave(dev, port_dev); + unregister_netdevice_queue(dev, head); } diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index b53ecf1eddda..21f2201405d1 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -346,11 +346,13 @@ static const struct pci_device_id mwifiex_ids[] = { MODULE_DEVICE_TABLE(pci, mwifiex_ids); -static void mwifiex_pcie_reset_notify(struct pci_dev *pdev, bool prepare) +/* + * Cleanup all software without cleaning anything related to PCIe and HW. + */ +static void mwifiex_pcie_reset_prepare(struct pci_dev *pdev) { struct pcie_service_card *card = pci_get_drvdata(pdev); struct mwifiex_adapter *adapter = card->adapter; - int ret; if (!adapter) { dev_err(&pdev->dev, "%s: adapter structure is not valid\n", @@ -359,35 +361,46 @@ static void mwifiex_pcie_reset_notify(struct pci_dev *pdev, bool prepare) } mwifiex_dbg(adapter, INFO, - "%s: vendor=0x%4.04x device=0x%4.04x rev=%d %s\n", - __func__, pdev->vendor, pdev->device, - pdev->revision, - prepare ? "Pre-FLR" : "Post-FLR"); - - if (prepare) { - /* Kernel would be performing FLR after this notification. - * Cleanup all software without cleaning anything related to - * PCIe and HW. - */ - mwifiex_shutdown_sw(adapter); - clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags); - clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags); - } else { - /* Kernel stores and restores PCIe function context before and - * after performing FLR respectively. Reconfigure the software - * and firmware including firmware redownload - */ - ret = mwifiex_reinit_sw(adapter); - if (ret) { - dev_err(&pdev->dev, "reinit failed: %d\n", ret); - return; - } - } + "%s: vendor=0x%4.04x device=0x%4.04x rev=%d Pre-FLR\n", + __func__, pdev->vendor, pdev->device, pdev->revision); + + mwifiex_shutdown_sw(adapter); + clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags); + clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags); mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__); } -static const struct pci_error_handlers mwifiex_pcie_err_handler[] = { - { .reset_notify = mwifiex_pcie_reset_notify, }, +/* + * Kernel stores and restores PCIe function context before and after performing + * FLR respectively. Reconfigure the software and firmware including firmware + * redownload. + */ +static void mwifiex_pcie_reset_done(struct pci_dev *pdev) +{ + struct pcie_service_card *card = pci_get_drvdata(pdev); + struct mwifiex_adapter *adapter = card->adapter; + int ret; + + if (!adapter) { + dev_err(&pdev->dev, "%s: adapter structure is not valid\n", + __func__); + return; + } + + mwifiex_dbg(adapter, INFO, + "%s: vendor=0x%4.04x device=0x%4.04x rev=%d Post-FLR\n", + __func__, pdev->vendor, pdev->device, pdev->revision); + + ret = mwifiex_reinit_sw(adapter); + if (ret) + dev_err(&pdev->dev, "reinit failed: %d\n", ret); + else + mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__); +} + +static const struct pci_error_handlers mwifiex_pcie_err_handler = { + .reset_prepare = mwifiex_pcie_reset_prepare, + .reset_done = mwifiex_pcie_reset_done, }; #ifdef CONFIG_PM_SLEEP @@ -408,7 +421,7 @@ static struct pci_driver __refdata mwifiex_pcie = { }, #endif .shutdown = mwifiex_pcie_shutdown, - .err_handler = mwifiex_pcie_err_handler, + .err_handler = &mwifiex_pcie_err_handler, }; /* diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 5b1ac79fb607..b7a84c523475 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2303,14 +2303,16 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) return result; } -static void nvme_reset_notify(struct pci_dev *pdev, bool prepare) +static void nvme_reset_prepare(struct pci_dev *pdev) { struct nvme_dev *dev = pci_get_drvdata(pdev); + nvme_dev_disable(dev, false); +} - if (prepare) - nvme_dev_disable(dev, false); - else - nvme_reset_ctrl(&dev->ctrl); +static void nvme_reset_done(struct pci_dev *pdev) +{ + struct nvme_dev *dev = pci_get_drvdata(pdev); + nvme_reset_ctrl(&dev->ctrl); } static void nvme_shutdown(struct pci_dev *pdev) @@ -2434,7 +2436,8 @@ static const struct pci_error_handlers nvme_err_handler = { .error_detected = nvme_error_detected, .slot_reset = nvme_slot_reset, .resume = nvme_error_resume, - .reset_notify = nvme_reset_notify, + .reset_prepare = nvme_reset_prepare, + .reset_done = nvme_reset_done, }; static const struct pci_device_id nvme_id_table[] = { diff --git a/drivers/of/of_pci_irq.c b/drivers/of/of_pci_irq.c index c175d9cd0bb5..3a05568f65df 100644 --- a/drivers/of/of_pci_irq.c +++ b/drivers/of/of_pci_irq.c @@ -113,7 +113,8 @@ EXPORT_SYMBOL_GPL(of_irq_parse_pci); * @pin: PCI irq pin number; passed when used as map_irq callback. Unused * * @slot and @pin are unused, but included in the function so that this - * function can be used directly as the map_irq callback to pci_fixup_irqs(). + * function can be used directly as the map_irq callback to + * pci_assign_irq() and struct pci_host_bridge.map_irq pointer */ int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin) { diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 462c1f5f5546..66a21acad952 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile @@ -4,7 +4,8 @@ obj-y += access.o bus.o probe.o host-bridge.o remove.o pci.o \ pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \ - irq.o vpd.o setup-bus.o vc.o mmap.o + irq.o vpd.o setup-bus.o vc.o mmap.o setup-irq.o + obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_SYSFS) += slot.o @@ -29,20 +30,6 @@ obj-$(CONFIG_PCI_ATS) += ats.o obj-$(CONFIG_PCI_IOV) += iov.o # -# Some architectures use the generic PCI setup functions -# -obj-$(CONFIG_ALPHA) += setup-irq.o -obj-$(CONFIG_ARC) += setup-irq.o -obj-$(CONFIG_ARM) += setup-irq.o -obj-$(CONFIG_ARM64) += setup-irq.o -obj-$(CONFIG_UNICORE32) += setup-irq.o -obj-$(CONFIG_SUPERH) += setup-irq.o -obj-$(CONFIG_MIPS) += setup-irq.o -obj-$(CONFIG_TILE) += setup-irq.o -obj-$(CONFIG_SPARC_LEON) += setup-irq.o -obj-$(CONFIG_M68K) += setup-irq.o - -# # ACPI Related PCI FW Functions # ACPI _DSM provided firmware instance and string name # diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c index eeb9fb2b47aa..ad8ddbbbf245 100644 --- a/drivers/pci/ats.c +++ b/drivers/pci/ats.c @@ -153,23 +153,27 @@ int pci_enable_pri(struct pci_dev *pdev, u32 reqs) u32 max_requests; int pos; + if (WARN_ON(pdev->pri_enabled)) + return -EBUSY; + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI); if (!pos) return -EINVAL; - pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control); pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status); - if ((control & PCI_PRI_CTRL_ENABLE) || - !(status & PCI_PRI_STATUS_STOPPED)) + if (!(status & PCI_PRI_STATUS_STOPPED)) return -EBUSY; pci_read_config_dword(pdev, pos + PCI_PRI_MAX_REQ, &max_requests); reqs = min(max_requests, reqs); + pdev->pri_reqs_alloc = reqs; pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ, reqs); - control |= PCI_PRI_CTRL_ENABLE; + control = PCI_PRI_CTRL_ENABLE; pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control); + pdev->pri_enabled = 1; + return 0; } EXPORT_SYMBOL_GPL(pci_enable_pri); @@ -185,6 +189,9 @@ void pci_disable_pri(struct pci_dev *pdev) u16 control; int pos; + if (WARN_ON(!pdev->pri_enabled)) + return; + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI); if (!pos) return; @@ -192,10 +199,34 @@ void pci_disable_pri(struct pci_dev *pdev) pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control); control &= ~PCI_PRI_CTRL_ENABLE; pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control); + + pdev->pri_enabled = 0; } EXPORT_SYMBOL_GPL(pci_disable_pri); /** + * pci_restore_pri_state - Restore PRI + * @pdev: PCI device structure + */ +void pci_restore_pri_state(struct pci_dev *pdev) +{ + u16 control = PCI_PRI_CTRL_ENABLE; + u32 reqs = pdev->pri_reqs_alloc; + int pos; + + if (!pdev->pri_enabled) + return; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI); + if (!pos) + return; + + pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ, reqs); + pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control); +} +EXPORT_SYMBOL_GPL(pci_restore_pri_state); + +/** * pci_reset_pri - Resets device's PRI state * @pdev: PCI device structure * @@ -207,16 +238,14 @@ int pci_reset_pri(struct pci_dev *pdev) u16 control; int pos; + if (WARN_ON(pdev->pri_enabled)) + return -EBUSY; + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI); if (!pos) return -EINVAL; - pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control); - if (control & PCI_PRI_CTRL_ENABLE) - return -EBUSY; - - control |= PCI_PRI_CTRL_RESET; - + control = PCI_PRI_CTRL_RESET; pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control); return 0; @@ -239,16 +268,14 @@ int pci_enable_pasid(struct pci_dev *pdev, int features) u16 control, supported; int pos; + if (WARN_ON(pdev->pasid_enabled)) + return -EBUSY; + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID); if (!pos) return -EINVAL; - pci_read_config_word(pdev, pos + PCI_PASID_CTRL, &control); pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported); - - if (control & PCI_PASID_CTRL_ENABLE) - return -EINVAL; - supported &= PCI_PASID_CAP_EXEC | PCI_PASID_CAP_PRIV; /* User wants to enable anything unsupported? */ @@ -256,9 +283,12 @@ int pci_enable_pasid(struct pci_dev *pdev, int features) return -EINVAL; control = PCI_PASID_CTRL_ENABLE | features; + pdev->pasid_features = features; pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control); + pdev->pasid_enabled = 1; + return 0; } EXPORT_SYMBOL_GPL(pci_enable_pasid); @@ -266,22 +296,47 @@ EXPORT_SYMBOL_GPL(pci_enable_pasid); /** * pci_disable_pasid - Disable the PASID capability * @pdev: PCI device structure - * */ void pci_disable_pasid(struct pci_dev *pdev) { u16 control = 0; int pos; + if (WARN_ON(!pdev->pasid_enabled)) + return; + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID); if (!pos) return; pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control); + + pdev->pasid_enabled = 0; } EXPORT_SYMBOL_GPL(pci_disable_pasid); /** + * pci_restore_pasid_state - Restore PASID capabilities + * @pdev: PCI device structure + */ +void pci_restore_pasid_state(struct pci_dev *pdev) +{ + u16 control; + int pos; + + if (!pdev->pasid_enabled) + return; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID); + if (!pos) + return; + + control = PCI_PASID_CTRL_ENABLE | pdev->pasid_features; + pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control); +} +EXPORT_SYMBOL_GPL(pci_restore_pasid_state); + +/** * pci_pasid_features - Check which PASID features are supported * @pdev: PCI device structure * diff --git a/drivers/pci/dwc/Kconfig b/drivers/pci/dwc/Kconfig index b7e15526d676..d275aadc47ee 100644 --- a/drivers/pci/dwc/Kconfig +++ b/drivers/pci/dwc/Kconfig @@ -16,6 +16,7 @@ config PCIE_DW_EP config PCI_DRA7XX bool "TI DRA7xx PCIe controller" + depends on SOC_DRA7XX || COMPILE_TEST depends on (PCI && PCI_MSI_IRQ_DOMAIN) || PCI_ENDPOINT depends on OF && HAS_IOMEM && TI_PIPE3 help @@ -158,4 +159,14 @@ config PCIE_ARTPEC6 Say Y here to enable PCIe controller support on Axis ARTPEC-6 SoCs. This PCIe controller uses the DesignWare core. +config PCIE_KIRIN + depends on OF && ARM64 + bool "HiSilicon Kirin series SoCs PCIe controllers" + depends on PCI + select PCIEPORTBUS + select PCIE_DW_HOST + help + Say Y here if you want PCIe controller support + on HiSilicon Kirin series SoCs. + endmenu diff --git a/drivers/pci/dwc/Makefile b/drivers/pci/dwc/Makefile index f31a8596442a..c61be9738cce 100644 --- a/drivers/pci/dwc/Makefile +++ b/drivers/pci/dwc/Makefile @@ -13,6 +13,7 @@ obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o +obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o # The following drivers are for devices that use the generic ACPI # pci_root.c driver but don't support standard ECAM config access. diff --git a/drivers/pci/dwc/pci-dra7xx.c b/drivers/pci/dwc/pci-dra7xx.c index 8decf46cf525..f2fc5f47064e 100644 --- a/drivers/pci/dwc/pci-dra7xx.c +++ b/drivers/pci/dwc/pci-dra7xx.c @@ -174,7 +174,7 @@ static int dra7xx_pcie_establish_link(struct dw_pcie *pci) static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx) { dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, - ~LEG_EP_INTERRUPTS & ~MSI); + LEG_EP_INTERRUPTS | MSI); dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI, @@ -184,7 +184,7 @@ static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx) static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx) { dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, - ~INTERRUPTS); + INTERRUPTS); dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, INTERRUPTS); } @@ -208,7 +208,7 @@ static void dra7xx_pcie_host_init(struct pcie_port *pp) dra7xx_pcie_enable_interrupts(dra7xx); } -static struct dw_pcie_host_ops dra7xx_pcie_host_ops = { +static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = { .host_init = dra7xx_pcie_host_init, }; diff --git a/drivers/pci/dwc/pci-exynos.c b/drivers/pci/dwc/pci-exynos.c index 546082ad5a3f..c78c06552590 100644 --- a/drivers/pci/dwc/pci-exynos.c +++ b/drivers/pci/dwc/pci-exynos.c @@ -590,7 +590,7 @@ static void exynos_pcie_host_init(struct pcie_port *pp) exynos_pcie_enable_interrupts(ep); } -static struct dw_pcie_host_ops exynos_pcie_host_ops = { +static const struct dw_pcie_host_ops exynos_pcie_host_ops = { .rd_own_conf = exynos_pcie_rd_own_conf, .wr_own_conf = exynos_pcie_wr_own_conf, .host_init = exynos_pcie_host_init, diff --git a/drivers/pci/dwc/pci-imx6.c b/drivers/pci/dwc/pci-imx6.c index 19a289b8cc94..bf5c3616e344 100644 --- a/drivers/pci/dwc/pci-imx6.c +++ b/drivers/pci/dwc/pci-imx6.c @@ -24,6 +24,7 @@ #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/regmap.h> +#include <linux/regulator/consumer.h> #include <linux/resource.h> #include <linux/signal.h> #include <linux/types.h> @@ -59,6 +60,7 @@ struct imx6_pcie { u32 tx_swing_full; u32 tx_swing_low; int link_gen; + struct regulator *vpcie; }; /* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */ @@ -284,6 +286,8 @@ static int imx6q_pcie_abort_handler(unsigned long addr, static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) { + struct device *dev = imx6_pcie->pci->dev; + switch (imx6_pcie->variant) { case IMX7D: reset_control_assert(imx6_pcie->pciephy_reset); @@ -310,6 +314,14 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16); break; } + + if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) { + int ret = regulator_disable(imx6_pcie->vpcie); + + if (ret) + dev_err(dev, "failed to disable vpcie regulator: %d\n", + ret); + } } static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) @@ -376,10 +388,19 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) struct device *dev = pci->dev; int ret; + if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) { + ret = regulator_enable(imx6_pcie->vpcie); + if (ret) { + dev_err(dev, "failed to enable vpcie regulator: %d\n", + ret); + return; + } + } + ret = clk_prepare_enable(imx6_pcie->pcie_phy); if (ret) { dev_err(dev, "unable to enable pcie_phy clock\n"); - return; + goto err_pcie_phy; } ret = clk_prepare_enable(imx6_pcie->pcie_bus); @@ -439,6 +460,13 @@ err_pcie: clk_disable_unprepare(imx6_pcie->pcie_bus); err_pcie_bus: clk_disable_unprepare(imx6_pcie->pcie_phy); +err_pcie_phy: + if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) { + ret = regulator_disable(imx6_pcie->vpcie); + if (ret) + dev_err(dev, "failed to disable vpcie regulator: %d\n", + ret); + } } static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) @@ -629,7 +657,7 @@ static int imx6_pcie_link_up(struct dw_pcie *pci) PCIE_PHY_DEBUG_R1_XMLH_LINK_UP; } -static struct dw_pcie_host_ops imx6_pcie_host_ops = { +static const struct dw_pcie_host_ops imx6_pcie_host_ops = { .host_init = imx6_pcie_host_init, }; @@ -802,6 +830,13 @@ static int imx6_pcie_probe(struct platform_device *pdev) if (ret) imx6_pcie->link_gen = 1; + imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie"); + if (IS_ERR(imx6_pcie->vpcie)) { + if (PTR_ERR(imx6_pcie->vpcie) == -EPROBE_DEFER) + return -EPROBE_DEFER; + imx6_pcie->vpcie = NULL; + } + platform_set_drvdata(pdev, imx6_pcie); ret = imx6_add_pcie_port(imx6_pcie, pdev); diff --git a/drivers/pci/dwc/pci-keystone.c b/drivers/pci/dwc/pci-keystone.c index fcc9723bad6e..4783cec1f78d 100644 --- a/drivers/pci/dwc/pci-keystone.c +++ b/drivers/pci/dwc/pci-keystone.c @@ -291,7 +291,7 @@ static void __init ks_pcie_host_init(struct pcie_port *pp) "Asynchronous external abort"); } -static struct dw_pcie_host_ops keystone_pcie_host_ops = { +static const struct dw_pcie_host_ops keystone_pcie_host_ops = { .rd_other_conf = ks_dw_pcie_rd_other_conf, .wr_other_conf = ks_dw_pcie_wr_other_conf, .host_init = ks_pcie_host_init, diff --git a/drivers/pci/dwc/pci-layerscape.c b/drivers/pci/dwc/pci-layerscape.c index 27d638c4e134..fd861289ad8b 100644 --- a/drivers/pci/dwc/pci-layerscape.c +++ b/drivers/pci/dwc/pci-layerscape.c @@ -39,7 +39,7 @@ struct ls_pcie_drvdata { u32 lut_offset; u32 ltssm_shift; u32 lut_dbg; - struct dw_pcie_host_ops *ops; + const struct dw_pcie_host_ops *ops; const struct dw_pcie_ops *dw_pcie_ops; }; @@ -185,12 +185,12 @@ static int ls_pcie_msi_host_init(struct pcie_port *pp, return 0; } -static struct dw_pcie_host_ops ls1021_pcie_host_ops = { +static const struct dw_pcie_host_ops ls1021_pcie_host_ops = { .host_init = ls1021_pcie_host_init, .msi_host_init = ls_pcie_msi_host_init, }; -static struct dw_pcie_host_ops ls_pcie_host_ops = { +static const struct dw_pcie_host_ops ls_pcie_host_ops = { .host_init = ls_pcie_host_init, .msi_host_init = ls_pcie_msi_host_init, }; diff --git a/drivers/pci/dwc/pcie-armada8k.c b/drivers/pci/dwc/pcie-armada8k.c index 495b023042b3..ea8f34af6a85 100644 --- a/drivers/pci/dwc/pcie-armada8k.c +++ b/drivers/pci/dwc/pcie-armada8k.c @@ -160,7 +160,7 @@ static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg) return IRQ_HANDLED; } -static struct dw_pcie_host_ops armada8k_pcie_host_ops = { +static const struct dw_pcie_host_ops armada8k_pcie_host_ops = { .host_init = armada8k_pcie_host_init, }; diff --git a/drivers/pci/dwc/pcie-artpec6.c b/drivers/pci/dwc/pcie-artpec6.c index 82a04acc42fd..01c6f7823672 100644 --- a/drivers/pci/dwc/pcie-artpec6.c +++ b/drivers/pci/dwc/pcie-artpec6.c @@ -184,7 +184,7 @@ static void artpec6_pcie_host_init(struct pcie_port *pp) artpec6_pcie_enable_interrupts(artpec6_pcie); } -static struct dw_pcie_host_ops artpec6_pcie_host_ops = { +static const struct dw_pcie_host_ops artpec6_pcie_host_ops = { .host_init = artpec6_pcie_host_init, }; diff --git a/drivers/pci/dwc/pcie-designware-host.c b/drivers/pci/dwc/pcie-designware-host.c index 28ed32ba4f1b..d29c020da082 100644 --- a/drivers/pci/dwc/pcie-designware-host.c +++ b/drivers/pci/dwc/pcie-designware-host.c @@ -280,9 +280,9 @@ int dw_pcie_host_init(struct pcie_port *pp) struct device_node *np = dev->of_node; struct platform_device *pdev = to_platform_device(dev); struct pci_bus *bus, *child; + struct pci_host_bridge *bridge; struct resource *cfg_res; int i, ret; - LIST_HEAD(res); struct resource_entry *win, *tmp; cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); @@ -295,16 +295,21 @@ int dw_pcie_host_init(struct pcie_port *pp) dev_err(dev, "missing *config* reg space\n"); } - ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &pp->io_base); + bridge = pci_alloc_host_bridge(0); + if (!bridge) + return -ENOMEM; + + ret = of_pci_get_host_bridge_resources(np, 0, 0xff, + &bridge->windows, &pp->io_base); if (ret) return ret; - ret = devm_request_pci_bus_resources(dev, &res); + ret = devm_request_pci_bus_resources(dev, &bridge->windows); if (ret) goto error; /* Get the I/O and memory ranges from DT */ - resource_list_for_each_entry_safe(win, tmp, &res) { + resource_list_for_each_entry_safe(win, tmp, &bridge->windows) { switch (resource_type(win->res)) { case IORESOURCE_IO: ret = pci_remap_iospace(win->res, pp->io_base); @@ -400,27 +405,27 @@ int dw_pcie_host_init(struct pcie_port *pp) pp->ops->host_init(pp); pp->root_bus_nr = pp->busn->start; + + bridge->dev.parent = dev; + bridge->sysdata = pp; + bridge->busnr = pp->root_bus_nr; + bridge->ops = &dw_pcie_ops; + bridge->map_irq = of_irq_parse_and_map_pci; + bridge->swizzle_irq = pci_common_swizzle; if (IS_ENABLED(CONFIG_PCI_MSI)) { - bus = pci_scan_root_bus_msi(dev, pp->root_bus_nr, - &dw_pcie_ops, pp, &res, - &dw_pcie_msi_chip); + bridge->msi = &dw_pcie_msi_chip; dw_pcie_msi_chip.dev = dev; - } else - bus = pci_scan_root_bus(dev, pp->root_bus_nr, &dw_pcie_ops, - pp, &res); - if (!bus) { - ret = -ENOMEM; - goto error; } + ret = pci_scan_root_bus_bridge(bridge); + if (ret) + goto error; + + bus = bridge->bus; + if (pp->ops->scan_bus) pp->ops->scan_bus(pp); -#ifdef CONFIG_ARM - /* support old dtbs that incorrectly describe IRQs */ - pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); -#endif - pci_bus_size_bridges(bus); pci_bus_assign_resources(bus); @@ -431,7 +436,7 @@ int dw_pcie_host_init(struct pcie_port *pp) return 0; error: - pci_free_resource_list(&res); + pci_free_host_bridge(bridge); return ret; } diff --git a/drivers/pci/dwc/pcie-designware-plat.c b/drivers/pci/dwc/pcie-designware-plat.c index 32091b32f6e1..091b4e7ad059 100644 --- a/drivers/pci/dwc/pcie-designware-plat.c +++ b/drivers/pci/dwc/pcie-designware-plat.c @@ -46,7 +46,7 @@ static void dw_plat_pcie_host_init(struct pcie_port *pp) dw_pcie_msi_init(pp); } -static struct dw_pcie_host_ops dw_plat_pcie_host_ops = { +static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = { .host_init = dw_plat_pcie_host_init, }; @@ -67,7 +67,8 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp, ret = devm_request_irq(dev, pp->msi_irq, dw_plat_pcie_msi_irq_handler, - IRQF_SHARED, "dw-plat-pcie-msi", pp); + IRQF_SHARED | IRQF_NO_THREAD, + "dw-plat-pcie-msi", pp); if (ret) { dev_err(dev, "failed to request MSI IRQ\n"); return ret; diff --git a/drivers/pci/dwc/pcie-designware.h b/drivers/pci/dwc/pcie-designware.h index c6a840575796..b4d2a89f8e58 100644 --- a/drivers/pci/dwc/pcie-designware.h +++ b/drivers/pci/dwc/pcie-designware.h @@ -162,7 +162,7 @@ struct pcie_port { struct resource *mem; struct resource *busn; int irq; - struct dw_pcie_host_ops *ops; + const struct dw_pcie_host_ops *ops; int msi_irq; struct irq_domain *irq_domain; unsigned long msi_data; diff --git a/drivers/pci/dwc/pcie-kirin.c b/drivers/pci/dwc/pcie-kirin.c new file mode 100644 index 000000000000..33fddb9f6739 --- /dev/null +++ b/drivers/pci/dwc/pcie-kirin.c @@ -0,0 +1,517 @@ +/* + * PCIe host controller driver for Kirin Phone SoCs + * + * Copyright (C) 2017 Hilisicon Electronics Co., Ltd. + * http://www.huawei.com + * + * Author: Xiaowei Song <songxiaowei@huawei.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <asm/compiler.h> +#include <linux/compiler.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/gpio.h> +#include <linux/interrupt.h> +#include <linux/mfd/syscon.h> +#include <linux/of_address.h> +#include <linux/of_gpio.h> +#include <linux/of_pci.h> +#include <linux/pci.h> +#include <linux/pci_regs.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/resource.h> +#include <linux/types.h> +#include "pcie-designware.h" + +#define to_kirin_pcie(x) dev_get_drvdata((x)->dev) + +#define REF_CLK_FREQ 100000000 + +/* PCIe ELBI registers */ +#define SOC_PCIECTRL_CTRL0_ADDR 0x000 +#define SOC_PCIECTRL_CTRL1_ADDR 0x004 +#define SOC_PCIEPHY_CTRL2_ADDR 0x008 +#define SOC_PCIEPHY_CTRL3_ADDR 0x00c +#define PCIE_ELBI_SLV_DBI_ENABLE (0x1 << 21) + +/* info located in APB */ +#define PCIE_APP_LTSSM_ENABLE 0x01c +#define PCIE_APB_PHY_CTRL0 0x0 +#define PCIE_APB_PHY_CTRL1 0x4 +#define PCIE_APB_PHY_STATUS0 0x400 +#define PCIE_LINKUP_ENABLE (0x8020) +#define PCIE_LTSSM_ENABLE_BIT (0x1 << 11) +#define PIPE_CLK_STABLE (0x1 << 19) +#define PHY_REF_PAD_BIT (0x1 << 8) +#define PHY_PWR_DOWN_BIT (0x1 << 22) +#define PHY_RST_ACK_BIT (0x1 << 16) + +/* info located in sysctrl */ +#define SCTRL_PCIE_CMOS_OFFSET 0x60 +#define SCTRL_PCIE_CMOS_BIT 0x10 +#define SCTRL_PCIE_ISO_OFFSET 0x44 +#define SCTRL_PCIE_ISO_BIT 0x30 +#define SCTRL_PCIE_HPCLK_OFFSET 0x190 +#define SCTRL_PCIE_HPCLK_BIT 0x184000 +#define SCTRL_PCIE_OE_OFFSET 0x14a +#define PCIE_DEBOUNCE_PARAM 0xF0F400 +#define PCIE_OE_BYPASS (0x3 << 28) + +/* peri_crg ctrl */ +#define CRGCTRL_PCIE_ASSERT_OFFSET 0x88 +#define CRGCTRL_PCIE_ASSERT_BIT 0x8c000000 + +/* Time for delay */ +#define REF_2_PERST_MIN 20000 +#define REF_2_PERST_MAX 25000 +#define PERST_2_ACCESS_MIN 10000 +#define PERST_2_ACCESS_MAX 12000 +#define LINK_WAIT_MIN 900 +#define LINK_WAIT_MAX 1000 +#define PIPE_CLK_WAIT_MIN 550 +#define PIPE_CLK_WAIT_MAX 600 +#define TIME_CMOS_MIN 100 +#define TIME_CMOS_MAX 105 +#define TIME_PHY_PD_MIN 10 +#define TIME_PHY_PD_MAX 11 + +struct kirin_pcie { + struct dw_pcie *pci; + void __iomem *apb_base; + void __iomem *phy_base; + struct regmap *crgctrl; + struct regmap *sysctrl; + struct clk *apb_sys_clk; + struct clk *apb_phy_clk; + struct clk *phy_ref_clk; + struct clk *pcie_aclk; + struct clk *pcie_aux_clk; + int gpio_id_reset; +}; + +/* Registers in PCIeCTRL */ +static inline void kirin_apb_ctrl_writel(struct kirin_pcie *kirin_pcie, + u32 val, u32 reg) +{ + writel(val, kirin_pcie->apb_base + reg); +} + +static inline u32 kirin_apb_ctrl_readl(struct kirin_pcie *kirin_pcie, u32 reg) +{ + return readl(kirin_pcie->apb_base + reg); +} + +/* Registers in PCIePHY */ +static inline void kirin_apb_phy_writel(struct kirin_pcie *kirin_pcie, + u32 val, u32 reg) +{ + writel(val, kirin_pcie->phy_base + reg); +} + +static inline u32 kirin_apb_phy_readl(struct kirin_pcie *kirin_pcie, u32 reg) +{ + return readl(kirin_pcie->phy_base + reg); +} + +static long kirin_pcie_get_clk(struct kirin_pcie *kirin_pcie, + struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + + kirin_pcie->phy_ref_clk = devm_clk_get(dev, "pcie_phy_ref"); + if (IS_ERR(kirin_pcie->phy_ref_clk)) + return PTR_ERR(kirin_pcie->phy_ref_clk); + + kirin_pcie->pcie_aux_clk = devm_clk_get(dev, "pcie_aux"); + if (IS_ERR(kirin_pcie->pcie_aux_clk)) + return PTR_ERR(kirin_pcie->pcie_aux_clk); + + kirin_pcie->apb_phy_clk = devm_clk_get(dev, "pcie_apb_phy"); + if (IS_ERR(kirin_pcie->apb_phy_clk)) + return PTR_ERR(kirin_pcie->apb_phy_clk); + + kirin_pcie->apb_sys_clk = devm_clk_get(dev, "pcie_apb_sys"); + if (IS_ERR(kirin_pcie->apb_sys_clk)) + return PTR_ERR(kirin_pcie->apb_sys_clk); + + kirin_pcie->pcie_aclk = devm_clk_get(dev, "pcie_aclk"); + if (IS_ERR(kirin_pcie->pcie_aclk)) + return PTR_ERR(kirin_pcie->pcie_aclk); + + return 0; +} + +static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie, + struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *apb; + struct resource *phy; + struct resource *dbi; + + apb = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb"); + kirin_pcie->apb_base = devm_ioremap_resource(dev, apb); + if (IS_ERR(kirin_pcie->apb_base)) + return PTR_ERR(kirin_pcie->apb_base); + + phy = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy"); + kirin_pcie->phy_base = devm_ioremap_resource(dev, phy); + if (IS_ERR(kirin_pcie->phy_base)) + return PTR_ERR(kirin_pcie->phy_base); + + dbi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); + kirin_pcie->pci->dbi_base = devm_ioremap_resource(dev, dbi); + if (IS_ERR(kirin_pcie->pci->dbi_base)) + return PTR_ERR(kirin_pcie->pci->dbi_base); + + kirin_pcie->crgctrl = + syscon_regmap_lookup_by_compatible("hisilicon,hi3660-crgctrl"); + if (IS_ERR(kirin_pcie->crgctrl)) + return PTR_ERR(kirin_pcie->crgctrl); + + kirin_pcie->sysctrl = + syscon_regmap_lookup_by_compatible("hisilicon,hi3660-sctrl"); + if (IS_ERR(kirin_pcie->sysctrl)) + return PTR_ERR(kirin_pcie->sysctrl); + + return 0; +} + +static int kirin_pcie_phy_init(struct kirin_pcie *kirin_pcie) +{ + struct device *dev = kirin_pcie->pci->dev; + u32 reg_val; + + reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1); + reg_val &= ~PHY_REF_PAD_BIT; + kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1); + + reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL0); + reg_val &= ~PHY_PWR_DOWN_BIT; + kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL0); + usleep_range(TIME_PHY_PD_MIN, TIME_PHY_PD_MAX); + + reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1); + reg_val &= ~PHY_RST_ACK_BIT; + kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1); + + usleep_range(PIPE_CLK_WAIT_MIN, PIPE_CLK_WAIT_MAX); + reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_STATUS0); + if (reg_val & PIPE_CLK_STABLE) { + dev_err(dev, "PIPE clk is not stable\n"); + return -EINVAL; + } + + return 0; +} + +static void kirin_pcie_oe_enable(struct kirin_pcie *kirin_pcie) +{ + u32 val; + + regmap_read(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, &val); + val |= PCIE_DEBOUNCE_PARAM; + val &= ~PCIE_OE_BYPASS; + regmap_write(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, val); +} + +static int kirin_pcie_clk_ctrl(struct kirin_pcie *kirin_pcie, bool enable) +{ + int ret = 0; + + if (!enable) + goto close_clk; + + ret = clk_set_rate(kirin_pcie->phy_ref_clk, REF_CLK_FREQ); + if (ret) + return ret; + + ret = clk_prepare_enable(kirin_pcie->phy_ref_clk); + if (ret) + return ret; + + ret = clk_prepare_enable(kirin_pcie->apb_sys_clk); + if (ret) + goto apb_sys_fail; + + ret = clk_prepare_enable(kirin_pcie->apb_phy_clk); + if (ret) + goto apb_phy_fail; + + ret = clk_prepare_enable(kirin_pcie->pcie_aclk); + if (ret) + goto aclk_fail; + + ret = clk_prepare_enable(kirin_pcie->pcie_aux_clk); + if (ret) + goto aux_clk_fail; + + return 0; + +close_clk: + clk_disable_unprepare(kirin_pcie->pcie_aux_clk); +aux_clk_fail: + clk_disable_unprepare(kirin_pcie->pcie_aclk); +aclk_fail: + clk_disable_unprepare(kirin_pcie->apb_phy_clk); +apb_phy_fail: + clk_disable_unprepare(kirin_pcie->apb_sys_clk); +apb_sys_fail: + clk_disable_unprepare(kirin_pcie->phy_ref_clk); + + return ret; +} + +static int kirin_pcie_power_on(struct kirin_pcie *kirin_pcie) +{ + int ret; + + /* Power supply for Host */ + regmap_write(kirin_pcie->sysctrl, + SCTRL_PCIE_CMOS_OFFSET, SCTRL_PCIE_CMOS_BIT); + usleep_range(TIME_CMOS_MIN, TIME_CMOS_MAX); + kirin_pcie_oe_enable(kirin_pcie); + + ret = kirin_pcie_clk_ctrl(kirin_pcie, true); + if (ret) + return ret; + + /* ISO disable, PCIeCtrl, PHY assert and clk gate clear */ + regmap_write(kirin_pcie->sysctrl, + SCTRL_PCIE_ISO_OFFSET, SCTRL_PCIE_ISO_BIT); + regmap_write(kirin_pcie->crgctrl, + CRGCTRL_PCIE_ASSERT_OFFSET, CRGCTRL_PCIE_ASSERT_BIT); + regmap_write(kirin_pcie->sysctrl, + SCTRL_PCIE_HPCLK_OFFSET, SCTRL_PCIE_HPCLK_BIT); + + ret = kirin_pcie_phy_init(kirin_pcie); + if (ret) + goto close_clk; + + /* perst assert Endpoint */ + if (!gpio_request(kirin_pcie->gpio_id_reset, "pcie_perst")) { + usleep_range(REF_2_PERST_MIN, REF_2_PERST_MAX); + ret = gpio_direction_output(kirin_pcie->gpio_id_reset, 1); + if (ret) + goto close_clk; + usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX); + + return 0; + } + +close_clk: + kirin_pcie_clk_ctrl(kirin_pcie, false); + return ret; +} + +static void kirin_pcie_sideband_dbi_w_mode(struct kirin_pcie *kirin_pcie, + bool on) +{ + u32 val; + + val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL0_ADDR); + if (on) + val = val | PCIE_ELBI_SLV_DBI_ENABLE; + else + val = val & ~PCIE_ELBI_SLV_DBI_ENABLE; + + kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL0_ADDR); +} + +static void kirin_pcie_sideband_dbi_r_mode(struct kirin_pcie *kirin_pcie, + bool on) +{ + u32 val; + + val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL1_ADDR); + if (on) + val = val | PCIE_ELBI_SLV_DBI_ENABLE; + else + val = val & ~PCIE_ELBI_SLV_DBI_ENABLE; + + kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL1_ADDR); +} + +static int kirin_pcie_rd_own_conf(struct pcie_port *pp, + int where, int size, u32 *val) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); + int ret; + + kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true); + ret = dw_pcie_read(pci->dbi_base + where, size, val); + kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false); + + return ret; +} + +static int kirin_pcie_wr_own_conf(struct pcie_port *pp, + int where, int size, u32 val) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); + int ret; + + kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true); + ret = dw_pcie_write(pci->dbi_base + where, size, val); + kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false); + + return ret; +} + +static u32 kirin_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, + u32 reg, size_t size) +{ + struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); + u32 ret; + + kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true); + dw_pcie_read(base + reg, size, &ret); + kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false); + + return ret; +} + +static void kirin_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, + u32 reg, size_t size, u32 val) +{ + struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); + + kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true); + dw_pcie_write(base + reg, size, val); + kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false); +} + +static int kirin_pcie_link_up(struct dw_pcie *pci) +{ + struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); + u32 val = kirin_apb_ctrl_readl(kirin_pcie, PCIE_APB_PHY_STATUS0); + + if ((val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE) + return 1; + + return 0; +} + +static int kirin_pcie_establish_link(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); + struct device *dev = kirin_pcie->pci->dev; + int count = 0; + + if (kirin_pcie_link_up(pci)) + return 0; + + dw_pcie_setup_rc(pp); + + /* assert LTSSM enable */ + kirin_apb_ctrl_writel(kirin_pcie, PCIE_LTSSM_ENABLE_BIT, + PCIE_APP_LTSSM_ENABLE); + + /* check if the link is up or not */ + while (!kirin_pcie_link_up(pci)) { + usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX); + count++; + if (count == 1000) { + dev_err(dev, "Link Fail\n"); + return -EINVAL; + } + } + + return 0; +} + +static void kirin_pcie_host_init(struct pcie_port *pp) +{ + kirin_pcie_establish_link(pp); +} + +static struct dw_pcie_ops kirin_dw_pcie_ops = { + .read_dbi = kirin_pcie_read_dbi, + .write_dbi = kirin_pcie_write_dbi, + .link_up = kirin_pcie_link_up, +}; + +static struct dw_pcie_host_ops kirin_pcie_host_ops = { + .rd_own_conf = kirin_pcie_rd_own_conf, + .wr_own_conf = kirin_pcie_wr_own_conf, + .host_init = kirin_pcie_host_init, +}; + +static int __init kirin_add_pcie_port(struct dw_pcie *pci, + struct platform_device *pdev) +{ + pci->pp.ops = &kirin_pcie_host_ops; + + return dw_pcie_host_init(&pci->pp); +} + +static int kirin_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct kirin_pcie *kirin_pcie; + struct dw_pcie *pci; + int ret; + + if (!dev->of_node) { + dev_err(dev, "NULL node\n"); + return -EINVAL; + } + + kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL); + if (!kirin_pcie) + return -ENOMEM; + + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + if (!pci) + return -ENOMEM; + + pci->dev = dev; + pci->ops = &kirin_dw_pcie_ops; + kirin_pcie->pci = pci; + + ret = kirin_pcie_get_clk(kirin_pcie, pdev); + if (ret) + return ret; + + ret = kirin_pcie_get_resource(kirin_pcie, pdev); + if (ret) + return ret; + + kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node, + "reset-gpio", 0); + if (kirin_pcie->gpio_id_reset < 0) + return -ENODEV; + + ret = kirin_pcie_power_on(kirin_pcie); + if (ret) + return ret; + + platform_set_drvdata(pdev, kirin_pcie); + + return kirin_add_pcie_port(pci, pdev); +} + +static const struct of_device_id kirin_pcie_match[] = { + { .compatible = "hisilicon,kirin960-pcie" }, + {}, +}; + +struct platform_driver kirin_pcie_driver = { + .probe = kirin_pcie_probe, + .driver = { + .name = "kirin-pcie", + .of_match_table = kirin_pcie_match, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(kirin_pcie_driver); diff --git a/drivers/pci/dwc/pcie-qcom.c b/drivers/pci/dwc/pcie-qcom.c index 5bf23d432fdb..68c5f2ab5bc8 100644 --- a/drivers/pci/dwc/pcie-qcom.c +++ b/drivers/pci/dwc/pcie-qcom.c @@ -51,6 +51,12 @@ #define PCIE20_ELBI_SYS_CTRL 0x04 #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0) +#define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818 +#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4 +#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5 +#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c +#define CFG_BRIDGE_SB_INIT BIT(0) + #define PCIE20_CAP 0x70 #define PERST_DELAY_US 1000 @@ -86,10 +92,29 @@ struct qcom_pcie_resources_v2 { struct clk *pipe_clk; }; +struct qcom_pcie_resources_v3 { + struct clk *aux_clk; + struct clk *master_clk; + struct clk *slave_clk; + struct reset_control *axi_m_reset; + struct reset_control *axi_s_reset; + struct reset_control *pipe_reset; + struct reset_control *axi_m_vmid_reset; + struct reset_control *axi_s_xpu_reset; + struct reset_control *parf_reset; + struct reset_control *phy_reset; + struct reset_control *axi_m_sticky_reset; + struct reset_control *pipe_sticky_reset; + struct reset_control *pwr_reset; + struct reset_control *ahb_reset; + struct reset_control *phy_ahb_reset; +}; + union qcom_pcie_resources { struct qcom_pcie_resources_v0 v0; struct qcom_pcie_resources_v1 v1; struct qcom_pcie_resources_v2 v2; + struct qcom_pcie_resources_v3 v3; }; struct qcom_pcie; @@ -133,26 +158,6 @@ static irqreturn_t qcom_pcie_msi_irq_handler(int irq, void *arg) return dw_handle_msi_irq(pp); } -static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie *pcie) -{ - u32 val; - - /* enable link training */ - val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL); - val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE; - writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL); -} - -static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie) -{ - u32 val; - - /* enable link training */ - val = readl(pcie->parf + PCIE20_PARF_LTSSM); - val |= BIT(8); - writel(val, pcie->parf + PCIE20_PARF_LTSSM); -} - static int qcom_pcie_establish_link(struct qcom_pcie *pcie) { struct dw_pcie *pci = pcie->pci; @@ -167,6 +172,16 @@ static int qcom_pcie_establish_link(struct qcom_pcie *pcie) return dw_pcie_wait_for_link(pci); } +static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie *pcie) +{ + u32 val; + + /* enable link training */ + val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL); + val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE; + writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL); +} + static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie) { struct qcom_pcie_resources_v0 *res = &pcie->res.v0; @@ -217,36 +232,6 @@ static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie) return PTR_ERR_OR_ZERO(res->phy_reset); } -static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie) -{ - struct qcom_pcie_resources_v1 *res = &pcie->res.v1; - struct dw_pcie *pci = pcie->pci; - struct device *dev = pci->dev; - - res->vdda = devm_regulator_get(dev, "vdda"); - if (IS_ERR(res->vdda)) - return PTR_ERR(res->vdda); - - res->iface = devm_clk_get(dev, "iface"); - if (IS_ERR(res->iface)) - return PTR_ERR(res->iface); - - res->aux = devm_clk_get(dev, "aux"); - if (IS_ERR(res->aux)) - return PTR_ERR(res->aux); - - res->master_bus = devm_clk_get(dev, "master_bus"); - if (IS_ERR(res->master_bus)) - return PTR_ERR(res->master_bus); - - res->slave_bus = devm_clk_get(dev, "slave_bus"); - if (IS_ERR(res->slave_bus)) - return PTR_ERR(res->slave_bus); - - res->core = devm_reset_control_get(dev, "core"); - return PTR_ERR_OR_ZERO(res->core); -} - static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie) { struct qcom_pcie_resources_v0 *res = &pcie->res.v0; @@ -357,6 +342,13 @@ static int qcom_pcie_init_v0(struct qcom_pcie *pcie) /* wait for clock acquisition */ usleep_range(1000, 1500); + + /* Set the Max TLP size to 2K, instead of using default of 4K */ + writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K, + pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0); + writel(CFG_BRIDGE_SB_INIT, + pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1); + return 0; err_deassert_ahb: @@ -375,6 +367,36 @@ err_refclk: return ret; } +static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_v1 *res = &pcie->res.v1; + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; + + res->vdda = devm_regulator_get(dev, "vdda"); + if (IS_ERR(res->vdda)) + return PTR_ERR(res->vdda); + + res->iface = devm_clk_get(dev, "iface"); + if (IS_ERR(res->iface)) + return PTR_ERR(res->iface); + + res->aux = devm_clk_get(dev, "aux"); + if (IS_ERR(res->aux)) + return PTR_ERR(res->aux); + + res->master_bus = devm_clk_get(dev, "master_bus"); + if (IS_ERR(res->master_bus)) + return PTR_ERR(res->master_bus); + + res->slave_bus = devm_clk_get(dev, "slave_bus"); + if (IS_ERR(res->slave_bus)) + return PTR_ERR(res->slave_bus); + + res->core = devm_reset_control_get(dev, "core"); + return PTR_ERR_OR_ZERO(res->core); +} + static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie) { struct qcom_pcie_resources_v1 *res = &pcie->res.v1; @@ -455,6 +477,16 @@ err_res: return ret; } +static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie) +{ + u32 val; + + /* enable link training */ + val = readl(pcie->parf + PCIE20_PARF_LTSSM); + val |= BIT(8); + writel(val, pcie->parf + PCIE20_PARF_LTSSM); +} + static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie) { struct qcom_pcie_resources_v2 *res = &pcie->res.v2; @@ -481,6 +513,17 @@ static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie) return PTR_ERR_OR_ZERO(res->pipe_clk); } +static void qcom_pcie_deinit_v2(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_v2 *res = &pcie->res.v2; + + clk_disable_unprepare(res->pipe_clk); + clk_disable_unprepare(res->slave_clk); + clk_disable_unprepare(res->master_clk); + clk_disable_unprepare(res->cfg_clk); + clk_disable_unprepare(res->aux_clk); +} + static int qcom_pcie_init_v2(struct qcom_pcie *pcie) { struct qcom_pcie_resources_v2 *res = &pcie->res.v2; @@ -562,22 +605,290 @@ static int qcom_pcie_post_init_v2(struct qcom_pcie *pcie) return 0; } -static int qcom_pcie_link_up(struct dw_pcie *pci) +static int qcom_pcie_get_resources_v3(struct qcom_pcie *pcie) { - u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA); + struct qcom_pcie_resources_v3 *res = &pcie->res.v3; + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; - return !!(val & PCI_EXP_LNKSTA_DLLLA); + res->aux_clk = devm_clk_get(dev, "aux"); + if (IS_ERR(res->aux_clk)) + return PTR_ERR(res->aux_clk); + + res->master_clk = devm_clk_get(dev, "master_bus"); + if (IS_ERR(res->master_clk)) + return PTR_ERR(res->master_clk); + + res->slave_clk = devm_clk_get(dev, "slave_bus"); + if (IS_ERR(res->slave_clk)) + return PTR_ERR(res->slave_clk); + + res->axi_m_reset = devm_reset_control_get(dev, "axi_m"); + if (IS_ERR(res->axi_m_reset)) + return PTR_ERR(res->axi_m_reset); + + res->axi_s_reset = devm_reset_control_get(dev, "axi_s"); + if (IS_ERR(res->axi_s_reset)) + return PTR_ERR(res->axi_s_reset); + + res->pipe_reset = devm_reset_control_get(dev, "pipe"); + if (IS_ERR(res->pipe_reset)) + return PTR_ERR(res->pipe_reset); + + res->axi_m_vmid_reset = devm_reset_control_get(dev, "axi_m_vmid"); + if (IS_ERR(res->axi_m_vmid_reset)) + return PTR_ERR(res->axi_m_vmid_reset); + + res->axi_s_xpu_reset = devm_reset_control_get(dev, "axi_s_xpu"); + if (IS_ERR(res->axi_s_xpu_reset)) + return PTR_ERR(res->axi_s_xpu_reset); + + res->parf_reset = devm_reset_control_get(dev, "parf"); + if (IS_ERR(res->parf_reset)) + return PTR_ERR(res->parf_reset); + + res->phy_reset = devm_reset_control_get(dev, "phy"); + if (IS_ERR(res->phy_reset)) + return PTR_ERR(res->phy_reset); + + res->axi_m_sticky_reset = devm_reset_control_get(dev, "axi_m_sticky"); + if (IS_ERR(res->axi_m_sticky_reset)) + return PTR_ERR(res->axi_m_sticky_reset); + + res->pipe_sticky_reset = devm_reset_control_get(dev, "pipe_sticky"); + if (IS_ERR(res->pipe_sticky_reset)) + return PTR_ERR(res->pipe_sticky_reset); + + res->pwr_reset = devm_reset_control_get(dev, "pwr"); + if (IS_ERR(res->pwr_reset)) + return PTR_ERR(res->pwr_reset); + + res->ahb_reset = devm_reset_control_get(dev, "ahb"); + if (IS_ERR(res->ahb_reset)) + return PTR_ERR(res->ahb_reset); + + res->phy_ahb_reset = devm_reset_control_get(dev, "phy_ahb"); + if (IS_ERR(res->phy_ahb_reset)) + return PTR_ERR(res->phy_ahb_reset); + + return 0; } -static void qcom_pcie_deinit_v2(struct qcom_pcie *pcie) +static void qcom_pcie_deinit_v3(struct qcom_pcie *pcie) { - struct qcom_pcie_resources_v2 *res = &pcie->res.v2; - - clk_disable_unprepare(res->pipe_clk); + struct qcom_pcie_resources_v3 *res = &pcie->res.v3; + + reset_control_assert(res->axi_m_reset); + reset_control_assert(res->axi_s_reset); + reset_control_assert(res->pipe_reset); + reset_control_assert(res->pipe_sticky_reset); + reset_control_assert(res->phy_reset); + reset_control_assert(res->phy_ahb_reset); + reset_control_assert(res->axi_m_sticky_reset); + reset_control_assert(res->pwr_reset); + reset_control_assert(res->ahb_reset); + clk_disable_unprepare(res->aux_clk); + clk_disable_unprepare(res->master_clk); clk_disable_unprepare(res->slave_clk); +} + +static int qcom_pcie_init_v3(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_v3 *res = &pcie->res.v3; + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; + u32 val; + int ret; + + ret = reset_control_assert(res->axi_m_reset); + if (ret) { + dev_err(dev, "cannot assert axi master reset\n"); + return ret; + } + + ret = reset_control_assert(res->axi_s_reset); + if (ret) { + dev_err(dev, "cannot assert axi slave reset\n"); + return ret; + } + + usleep_range(10000, 12000); + + ret = reset_control_assert(res->pipe_reset); + if (ret) { + dev_err(dev, "cannot assert pipe reset\n"); + return ret; + } + + ret = reset_control_assert(res->pipe_sticky_reset); + if (ret) { + dev_err(dev, "cannot assert pipe sticky reset\n"); + return ret; + } + + ret = reset_control_assert(res->phy_reset); + if (ret) { + dev_err(dev, "cannot assert phy reset\n"); + return ret; + } + + ret = reset_control_assert(res->phy_ahb_reset); + if (ret) { + dev_err(dev, "cannot assert phy ahb reset\n"); + return ret; + } + + usleep_range(10000, 12000); + + ret = reset_control_assert(res->axi_m_sticky_reset); + if (ret) { + dev_err(dev, "cannot assert axi master sticky reset\n"); + return ret; + } + + ret = reset_control_assert(res->pwr_reset); + if (ret) { + dev_err(dev, "cannot assert power reset\n"); + return ret; + } + + ret = reset_control_assert(res->ahb_reset); + if (ret) { + dev_err(dev, "cannot assert ahb reset\n"); + return ret; + } + + usleep_range(10000, 12000); + + ret = reset_control_deassert(res->phy_ahb_reset); + if (ret) { + dev_err(dev, "cannot deassert phy ahb reset\n"); + return ret; + } + + ret = reset_control_deassert(res->phy_reset); + if (ret) { + dev_err(dev, "cannot deassert phy reset\n"); + goto err_rst_phy; + } + + ret = reset_control_deassert(res->pipe_reset); + if (ret) { + dev_err(dev, "cannot deassert pipe reset\n"); + goto err_rst_pipe; + } + + ret = reset_control_deassert(res->pipe_sticky_reset); + if (ret) { + dev_err(dev, "cannot deassert pipe sticky reset\n"); + goto err_rst_pipe_sticky; + } + + usleep_range(10000, 12000); + + ret = reset_control_deassert(res->axi_m_reset); + if (ret) { + dev_err(dev, "cannot deassert axi master reset\n"); + goto err_rst_axi_m; + } + + ret = reset_control_deassert(res->axi_m_sticky_reset); + if (ret) { + dev_err(dev, "cannot deassert axi master sticky reset\n"); + goto err_rst_axi_m_sticky; + } + + ret = reset_control_deassert(res->axi_s_reset); + if (ret) { + dev_err(dev, "cannot deassert axi slave reset\n"); + goto err_rst_axi_s; + } + + ret = reset_control_deassert(res->pwr_reset); + if (ret) { + dev_err(dev, "cannot deassert power reset\n"); + goto err_rst_pwr; + } + + ret = reset_control_deassert(res->ahb_reset); + if (ret) { + dev_err(dev, "cannot deassert ahb reset\n"); + goto err_rst_ahb; + } + + usleep_range(10000, 12000); + + ret = clk_prepare_enable(res->aux_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable iface clock\n"); + goto err_clk_aux; + } + + ret = clk_prepare_enable(res->master_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable core clock\n"); + goto err_clk_axi_m; + } + + ret = clk_prepare_enable(res->slave_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable phy clock\n"); + goto err_clk_axi_s; + } + + /* enable PCIe clocks and resets */ + val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); + val &= !BIT(0); + writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); + + /* change DBI base address */ + writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); + + /* MAC PHY_POWERDOWN MUX DISABLE */ + val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); + val &= ~BIT(29); + writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); + + val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); + val |= BIT(4); + writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); + + val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); + val |= BIT(31); + writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); + + return 0; + +err_clk_axi_s: clk_disable_unprepare(res->master_clk); - clk_disable_unprepare(res->cfg_clk); +err_clk_axi_m: clk_disable_unprepare(res->aux_clk); +err_clk_aux: + reset_control_assert(res->ahb_reset); +err_rst_ahb: + reset_control_assert(res->pwr_reset); +err_rst_pwr: + reset_control_assert(res->axi_s_reset); +err_rst_axi_s: + reset_control_assert(res->axi_m_sticky_reset); +err_rst_axi_m_sticky: + reset_control_assert(res->axi_m_reset); +err_rst_axi_m: + reset_control_assert(res->pipe_sticky_reset); +err_rst_pipe_sticky: + reset_control_assert(res->pipe_reset); +err_rst_pipe: + reset_control_assert(res->phy_reset); +err_rst_phy: + reset_control_assert(res->phy_ahb_reset); + return ret; +} + +static int qcom_pcie_link_up(struct dw_pcie *pci) +{ + u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA); + + return !!(val & PCI_EXP_LNKSTA_DLLLA); } static void qcom_pcie_host_init(struct pcie_port *pp) @@ -634,7 +945,7 @@ static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, return dw_pcie_read(pci->dbi_base + where, size, val); } -static struct dw_pcie_host_ops qcom_pcie_dw_ops = { +static const struct dw_pcie_host_ops qcom_pcie_dw_ops = { .host_init = qcom_pcie_host_init, .rd_own_conf = qcom_pcie_rd_own_conf, }; @@ -665,6 +976,13 @@ static const struct dw_pcie_ops dw_pcie_ops = { .link_up = qcom_pcie_link_up, }; +static const struct qcom_pcie_ops ops_v3 = { + .get_resources = qcom_pcie_get_resources_v3, + .init = qcom_pcie_init_v3, + .deinit = qcom_pcie_deinit_v3, + .ltssm_enable = qcom_pcie_v2_ltssm_enable, +}; + static int qcom_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -727,7 +1045,8 @@ static int qcom_pcie_probe(struct platform_device *pdev) ret = devm_request_irq(dev, pp->msi_irq, qcom_pcie_msi_irq_handler, - IRQF_SHARED, "qcom-pcie-msi", pp); + IRQF_SHARED | IRQF_NO_THREAD, + "qcom-pcie-msi", pp); if (ret) { dev_err(dev, "cannot request msi irq\n"); return ret; @@ -754,6 +1073,7 @@ static const struct of_device_id qcom_pcie_match[] = { { .compatible = "qcom,pcie-apq8064", .data = &ops_v0 }, { .compatible = "qcom,pcie-apq8084", .data = &ops_v1 }, { .compatible = "qcom,pcie-msm8996", .data = &ops_v2 }, + { .compatible = "qcom,pcie-ipq4019", .data = &ops_v3 }, { } }; diff --git a/drivers/pci/dwc/pcie-spear13xx.c b/drivers/pci/dwc/pcie-spear13xx.c index 8ff36b3dbbdf..80897291e0fb 100644 --- a/drivers/pci/dwc/pcie-spear13xx.c +++ b/drivers/pci/dwc/pcie-spear13xx.c @@ -186,7 +186,7 @@ static void spear13xx_pcie_host_init(struct pcie_port *pp) spear13xx_pcie_enable_interrupts(spear13xx_pcie); } -static struct dw_pcie_host_ops spear13xx_pcie_host_ops = { +static const struct dw_pcie_host_ops spear13xx_pcie_host_ops = { .host_init = spear13xx_pcie_host_init, }; diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig index 7f47cd5e10a5..89d61c2cbfaa 100644 --- a/drivers/pci/host/Kconfig +++ b/drivers/pci/host/Kconfig @@ -180,6 +180,31 @@ config PCIE_ROCKCHIP There is 1 internal PCIe port available to support GEN2 with 4 slots. +config PCIE_MEDIATEK + bool "MediaTek PCIe controller" + depends on ARM && (ARCH_MEDIATEK || COMPILE_TEST) + depends on OF + depends on PCI + select PCIEPORTBUS + help + Say Y here if you want to enable PCIe controller support on + MT7623 series SoCs. There is one single root complex with 3 root + ports available. Each port supports Gen2 lane x1. + +config PCIE_TANGO_SMP8759 + bool "Tango SMP8759 PCIe controller (DANGEROUS)" + depends on ARCH_TANGO && PCI_MSI && OF + depends on BROKEN + select PCI_HOST_COMMON + help + Say Y here to enable PCIe controller support for Sigma Designs + Tango SMP8759-based systems. + + Note: The SMP8759 controller multiplexes PCI config and MMIO + accesses, and Linux doesn't provide a way to serialize them. + This can lead to data corruption if drivers perform concurrent + config and MMIO accesses. + config VMD depends on PCI_MSI && X86_64 && SRCU tristate "Intel Volume Management Device Driver" diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile index cab879578003..12382785e02a 100644 --- a/drivers/pci/host/Makefile +++ b/drivers/pci/host/Makefile @@ -18,6 +18,8 @@ obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o +obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o +obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o obj-$(CONFIG_VMD) += vmd.o # The following drivers are for devices that use the generic ACPI diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c index 37d0bcd31f8a..5fb9b620ac78 100644 --- a/drivers/pci/host/pci-aardvark.c +++ b/drivers/pci/host/pci-aardvark.c @@ -886,12 +886,14 @@ static int advk_pcie_probe(struct platform_device *pdev) struct advk_pcie *pcie; struct resource *res; struct pci_bus *bus, *child; + struct pci_host_bridge *bridge; int ret, irq; - pcie = devm_kzalloc(dev, sizeof(struct advk_pcie), GFP_KERNEL); - if (!pcie) + bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie)); + if (!bridge) return -ENOMEM; + pcie = pci_host_bridge_priv(bridge); pcie->pdev = pdev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -929,14 +931,21 @@ static int advk_pcie_probe(struct platform_device *pdev) return ret; } - bus = pci_scan_root_bus(dev, 0, &advk_pcie_ops, - pcie, &pcie->resources); - if (!bus) { + list_splice_init(&pcie->resources, &bridge->windows); + bridge->dev.parent = dev; + bridge->sysdata = pcie; + bridge->busnr = 0; + bridge->ops = &advk_pcie_ops; + + ret = pci_scan_root_bus_bridge(bridge); + if (ret < 0) { advk_pcie_remove_msi_irq_domain(pcie); advk_pcie_remove_irq_domain(pcie); - return -ENOMEM; + return ret; } + bus = bridge->bus; + pci_bus_assign_resources(bus); list_for_each_entry(child, &bus->children, node) diff --git a/drivers/pci/host/pci-ftpci100.c b/drivers/pci/host/pci-ftpci100.c index d26501c4145a..5162dffc102b 100644 --- a/drivers/pci/host/pci-ftpci100.c +++ b/drivers/pci/host/pci-ftpci100.c @@ -25,6 +25,7 @@ #include <linux/irqchip/chained_irq.h> #include <linux/bitops.h> #include <linux/irq.h> +#include <linux/clk.h> /* * Special configuration registers directly in the first few words @@ -37,6 +38,7 @@ #define PCI_CONFIG 0x28 /* PCI configuration command register */ #define PCI_DATA 0x2C +#define FARADAY_PCI_STATUS_CMD 0x04 /* Status and command */ #define FARADAY_PCI_PMC 0x40 /* Power management control */ #define FARADAY_PCI_PMCSR 0x44 /* Power management status */ #define FARADAY_PCI_CTRL1 0x48 /* Control register 1 */ @@ -45,6 +47,8 @@ #define FARADAY_PCI_MEM2_BASE_SIZE 0x54 /* Memory base and size #2 */ #define FARADAY_PCI_MEM3_BASE_SIZE 0x58 /* Memory base and size #3 */ +#define PCI_STATUS_66MHZ_CAPABLE BIT(21) + /* Bits 31..28 gives INTD..INTA status */ #define PCI_CTRL2_INTSTS_SHIFT 28 #define PCI_CTRL2_INTMASK_CMDERR BIT(27) @@ -117,6 +121,7 @@ struct faraday_pci { void __iomem *base; struct irq_domain *irqdomain; struct pci_bus *bus; + struct clk *bus_clk; }; static int faraday_res_to_memcfg(resource_size_t mem_base, @@ -178,12 +183,11 @@ static int faraday_res_to_memcfg(resource_size_t mem_base, return 0; } -static int faraday_pci_read_config(struct pci_bus *bus, unsigned int fn, - int config, int size, u32 *value) +static int faraday_raw_pci_read_config(struct faraday_pci *p, int bus_number, + unsigned int fn, int config, int size, + u32 *value) { - struct faraday_pci *p = bus->sysdata; - - writel(PCI_CONF_BUS(bus->number) | + writel(PCI_CONF_BUS(bus_number) | PCI_CONF_DEVICE(PCI_SLOT(fn)) | PCI_CONF_FUNCTION(PCI_FUNC(fn)) | PCI_CONF_WHERE(config) | @@ -197,24 +201,28 @@ static int faraday_pci_read_config(struct pci_bus *bus, unsigned int fn, else if (size == 2) *value = (*value >> (8 * (config & 3))) & 0xFFFF; + return PCIBIOS_SUCCESSFUL; +} + +static int faraday_pci_read_config(struct pci_bus *bus, unsigned int fn, + int config, int size, u32 *value) +{ + struct faraday_pci *p = bus->sysdata; + dev_dbg(&bus->dev, "[read] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n", PCI_SLOT(fn), PCI_FUNC(fn), config, size, *value); - return PCIBIOS_SUCCESSFUL; + return faraday_raw_pci_read_config(p, bus->number, fn, config, size, value); } -static int faraday_pci_write_config(struct pci_bus *bus, unsigned int fn, - int config, int size, u32 value) +static int faraday_raw_pci_write_config(struct faraday_pci *p, int bus_number, + unsigned int fn, int config, int size, + u32 value) { - struct faraday_pci *p = bus->sysdata; int ret = PCIBIOS_SUCCESSFUL; - dev_dbg(&bus->dev, - "[write] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n", - PCI_SLOT(fn), PCI_FUNC(fn), config, size, value); - - writel(PCI_CONF_BUS(bus->number) | + writel(PCI_CONF_BUS(bus_number) | PCI_CONF_DEVICE(PCI_SLOT(fn)) | PCI_CONF_FUNCTION(PCI_FUNC(fn)) | PCI_CONF_WHERE(config) | @@ -238,6 +246,19 @@ static int faraday_pci_write_config(struct pci_bus *bus, unsigned int fn, return ret; } +static int faraday_pci_write_config(struct pci_bus *bus, unsigned int fn, + int config, int size, u32 value) +{ + struct faraday_pci *p = bus->sysdata; + + dev_dbg(&bus->dev, + "[write] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n", + PCI_SLOT(fn), PCI_FUNC(fn), config, size, value); + + return faraday_raw_pci_write_config(p, bus->number, fn, config, size, + value); +} + static struct pci_ops faraday_pci_ops = { .read = faraday_pci_read_config, .write = faraday_pci_write_config, @@ -248,10 +269,10 @@ static void faraday_pci_ack_irq(struct irq_data *d) struct faraday_pci *p = irq_data_get_irq_chip_data(d); unsigned int reg; - faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, ®); + faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, ®); reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT); reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTSTS_SHIFT); - faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, reg); + faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg); } static void faraday_pci_mask_irq(struct irq_data *d) @@ -259,10 +280,10 @@ static void faraday_pci_mask_irq(struct irq_data *d) struct faraday_pci *p = irq_data_get_irq_chip_data(d); unsigned int reg; - faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, ®); + faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, ®); reg &= ~((0xF << PCI_CTRL2_INTSTS_SHIFT) | BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT)); - faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, reg); + faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg); } static void faraday_pci_unmask_irq(struct irq_data *d) @@ -270,10 +291,10 @@ static void faraday_pci_unmask_irq(struct irq_data *d) struct faraday_pci *p = irq_data_get_irq_chip_data(d); unsigned int reg; - faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, ®); + faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, ®); reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT); reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT); - faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, reg); + faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg); } static void faraday_pci_irq_handler(struct irq_desc *desc) @@ -282,7 +303,7 @@ static void faraday_pci_irq_handler(struct irq_desc *desc) struct irq_chip *irqchip = irq_desc_get_chip(desc); unsigned int irq_stat, reg, i; - faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, ®); + faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, ®); irq_stat = reg >> PCI_CTRL2_INTSTS_SHIFT; chained_irq_enter(irqchip, desc); @@ -403,8 +424,8 @@ static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p, dev_info(dev, "DMA MEM%d BASE: 0x%016llx -> 0x%016llx config %08x\n", i + 1, range.pci_addr, end, val); if (i <= 2) { - faraday_pci_write_config(p->bus, 0, confreg[i], - 4, val); + faraday_raw_pci_write_config(p, 0, 0, confreg[i], + 4, val); } else { dev_err(dev, "ignore extraneous dma-range %d\n", i); break; @@ -428,11 +449,14 @@ static int faraday_pci_probe(struct platform_device *pdev) struct resource *mem; struct resource *io; struct pci_host_bridge *host; + struct clk *clk; + unsigned char max_bus_speed = PCI_SPEED_33MHz; + unsigned char cur_bus_speed = PCI_SPEED_33MHz; int ret; u32 val; LIST_HEAD(res); - host = pci_alloc_host_bridge(sizeof(*p)); + host = devm_pci_alloc_host_bridge(dev, sizeof(*p)); if (!host) return -ENOMEM; @@ -440,10 +464,30 @@ static int faraday_pci_probe(struct platform_device *pdev) host->ops = &faraday_pci_ops; host->busnr = 0; host->msi = NULL; + host->map_irq = of_irq_parse_and_map_pci; + host->swizzle_irq = pci_common_swizzle; p = pci_host_bridge_priv(host); host->sysdata = p; p->dev = dev; + /* Retrieve and enable optional clocks */ + clk = devm_clk_get(dev, "PCLK"); + if (IS_ERR(clk)) + return PTR_ERR(clk); + ret = clk_prepare_enable(clk); + if (ret) { + dev_err(dev, "could not prepare PCLK\n"); + return ret; + } + p->bus_clk = devm_clk_get(dev, "PCICLK"); + if (IS_ERR(p->bus_clk)) + return PTR_ERR(clk); + ret = clk_prepare_enable(p->bus_clk); + if (ret) { + dev_err(dev, "could not prepare PCICLK\n"); + return ret; + } + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); p->base = devm_ioremap_resource(dev, regs); if (IS_ERR(p->base)) @@ -496,17 +540,8 @@ static int faraday_pci_probe(struct platform_device *pdev) val |= PCI_COMMAND_MEMORY; val |= PCI_COMMAND_MASTER; writel(val, p->base + PCI_CTRL); - - list_splice_init(&res, &host->windows); - ret = pci_register_host_bridge(host); - if (ret) { - dev_err(dev, "failed to register host: %d\n", ret); - return ret; - } - p->bus = host->bus; - /* Mask and clear all interrupts */ - faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2 + 2, 2, 0xF000); + faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2 + 2, 2, 0xF000); if (variant->cascaded_irq) { ret = faraday_pci_setup_cascaded_irq(p); if (ret) { @@ -515,12 +550,48 @@ static int faraday_pci_probe(struct platform_device *pdev) } } + /* Check bus clock if we can gear up to 66 MHz */ + if (!IS_ERR(p->bus_clk)) { + unsigned long rate; + u32 val; + + faraday_raw_pci_read_config(p, 0, 0, + FARADAY_PCI_STATUS_CMD, 4, &val); + rate = clk_get_rate(p->bus_clk); + + if ((rate == 33000000) && (val & PCI_STATUS_66MHZ_CAPABLE)) { + dev_info(dev, "33MHz bus is 66MHz capable\n"); + max_bus_speed = PCI_SPEED_66MHz; + ret = clk_set_rate(p->bus_clk, 66000000); + if (ret) + dev_err(dev, "failed to set bus clock\n"); + } else { + dev_info(dev, "33MHz only bus\n"); + max_bus_speed = PCI_SPEED_33MHz; + } + + /* Bumping the clock may fail so read back the rate */ + rate = clk_get_rate(p->bus_clk); + if (rate == 33000000) + cur_bus_speed = PCI_SPEED_33MHz; + if (rate == 66000000) + cur_bus_speed = PCI_SPEED_66MHz; + } + ret = faraday_pci_parse_map_dma_ranges(p, dev->of_node); if (ret) return ret; - pci_scan_child_bus(p->bus); - pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); + list_splice_init(&res, &host->windows); + ret = pci_scan_root_bus_bridge(host); + if (ret) { + dev_err(dev, "failed to scan host: %d\n", ret); + return ret; + } + p->bus = host->bus; + p->bus->max_bus_speed = max_bus_speed; + p->bus->cur_bus_speed = cur_bus_speed; + pci_bus_assign_resources(p->bus); pci_bus_add_devices(p->bus); pci_free_resource_list(&res); diff --git a/drivers/pci/host/pci-host-common.c b/drivers/pci/host/pci-host-common.c index e9a53bae1c25..44a47d4f0b8f 100644 --- a/drivers/pci/host/pci-host-common.c +++ b/drivers/pci/host/pci-host-common.c @@ -117,8 +117,14 @@ int pci_host_common_probe(struct platform_device *pdev, struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct pci_bus *bus, *child; + struct pci_host_bridge *bridge; struct pci_config_window *cfg; struct list_head resources; + int ret; + + bridge = devm_pci_alloc_host_bridge(dev, 0); + if (!bridge) + return -ENOMEM; type = of_get_property(np, "device_type", NULL); if (!type || strcmp(type, "pci")) { @@ -138,16 +144,21 @@ int pci_host_common_probe(struct platform_device *pdev, if (!pci_has_flag(PCI_PROBE_ONLY)) pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS); - bus = pci_scan_root_bus(dev, cfg->busr.start, &ops->pci_ops, cfg, - &resources); - if (!bus) { - dev_err(dev, "Scanning rootbus failed"); - return -ENODEV; + list_splice_init(&resources, &bridge->windows); + bridge->dev.parent = dev; + bridge->sysdata = cfg; + bridge->busnr = cfg->busr.start; + bridge->ops = &ops->pci_ops; + bridge->map_irq = of_irq_parse_and_map_pci; + bridge->swizzle_irq = pci_common_swizzle; + + ret = pci_scan_root_bus_bridge(bridge); + if (ret < 0) { + dev_err(dev, "Scanning root bridge failed"); + return ret; } -#ifdef CONFIG_ARM - pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); -#endif + bus = bridge->bus; /* * We insert PCI resources into the iomem_resource and diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c index 84936383e269..415dcc69a502 100644 --- a/drivers/pci/host/pci-hyperv.c +++ b/drivers/pci/host/pci-hyperv.c @@ -64,22 +64,39 @@ * major version. */ -#define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (major))) +#define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (minor))) #define PCI_MAJOR_VERSION(version) ((u32)(version) >> 16) #define PCI_MINOR_VERSION(version) ((u32)(version) & 0xff) -enum { - PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1), - PCI_PROTOCOL_VERSION_CURRENT = PCI_PROTOCOL_VERSION_1_1 +enum pci_protocol_version_t { + PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1), /* Win10 */ + PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2), /* RS1 */ }; #define CPU_AFFINITY_ALL -1ULL + +/* + * Supported protocol versions in the order of probing - highest go + * first. + */ +static enum pci_protocol_version_t pci_protocol_versions[] = { + PCI_PROTOCOL_VERSION_1_2, + PCI_PROTOCOL_VERSION_1_1, +}; + +/* + * Protocol version negotiated by hv_pci_protocol_negotiation(). + */ +static enum pci_protocol_version_t pci_protocol_version; + #define PCI_CONFIG_MMIO_LENGTH 0x2000 #define CFG_PAGE_OFFSET 0x1000 #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET) #define MAX_SUPPORTED_MSI_MESSAGES 0x400 +#define STATUS_REVISION_MISMATCH 0xC0000059 + /* * Message Types */ @@ -109,6 +126,9 @@ enum pci_message_type { PCI_QUERY_PROTOCOL_VERSION = PCI_MESSAGE_BASE + 0x13, PCI_CREATE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x14, PCI_DELETE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x15, + PCI_RESOURCES_ASSIGNED2 = PCI_MESSAGE_BASE + 0x16, + PCI_CREATE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x17, + PCI_DELETE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x18, /* unused */ PCI_MESSAGE_MAXIMUM }; @@ -179,6 +199,30 @@ struct hv_msi_desc { } __packed; /** + * struct hv_msi_desc2 - 1.2 version of hv_msi_desc + * @vector: IDT entry + * @delivery_mode: As defined in Intel's Programmer's + * Reference Manual, Volume 3, Chapter 8. + * @vector_count: Number of contiguous entries in the + * Interrupt Descriptor Table that are + * occupied by this Message-Signaled + * Interrupt. For "MSI", as first defined + * in PCI 2.2, this can be between 1 and + * 32. For "MSI-X," as first defined in PCI + * 3.0, this must be 1, as each MSI-X table + * entry would have its own descriptor. + * @processor_count: number of bits enabled in array. + * @processor_array: All the target virtual processors. + */ +struct hv_msi_desc2 { + u8 vector; + u8 delivery_mode; + u16 vector_count; + u16 processor_count; + u16 processor_array[32]; +} __packed; + +/** * struct tran_int_desc * @reserved: unused, padding * @vector_count: same as in hv_msi_desc @@ -245,7 +289,7 @@ struct pci_packet { struct pci_version_request { struct pci_message message_type; - enum pci_message_type protocol_version; + u32 protocol_version; } __packed; /* @@ -294,6 +338,14 @@ struct pci_resources_assigned { u32 reserved[4]; } __packed; +struct pci_resources_assigned2 { + struct pci_message message_type; + union win_slot_encoding wslot; + u8 memory_range[0x14][6]; /* not used here */ + u32 msi_descriptor_count; + u8 reserved[70]; +} __packed; + struct pci_create_interrupt { struct pci_message message_type; union win_slot_encoding wslot; @@ -306,6 +358,12 @@ struct pci_create_int_response { struct tran_int_desc int_desc; } __packed; +struct pci_create_interrupt2 { + struct pci_message message_type; + union win_slot_encoding wslot; + struct hv_msi_desc2 int_desc; +} __packed; + struct pci_delete_interrupt { struct pci_message message_type; union win_slot_encoding wslot; @@ -331,17 +389,42 @@ static int pci_ring_size = (4 * PAGE_SIZE); #define HV_PARTITION_ID_SELF ((u64)-1) #define HVCALL_RETARGET_INTERRUPT 0x7e -struct retarget_msi_interrupt { - u64 partition_id; /* use "self" */ - u64 device_id; +struct hv_interrupt_entry { u32 source; /* 1 for MSI(-X) */ u32 reserved1; u32 address; u32 data; - u64 reserved2; +}; + +#define HV_VP_SET_BANK_COUNT_MAX 5 /* current implementation limit */ + +struct hv_vp_set { + u64 format; /* 0 (HvGenericSetSparse4k) */ + u64 valid_banks; + u64 masks[HV_VP_SET_BANK_COUNT_MAX]; +}; + +/* + * flags for hv_device_interrupt_target.flags + */ +#define HV_DEVICE_INTERRUPT_TARGET_MULTICAST 1 +#define HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET 2 + +struct hv_device_interrupt_target { u32 vector; u32 flags; - u64 vp_mask; + union { + u64 vp_mask; + struct hv_vp_set vp_set; + }; +}; + +struct retarget_msi_interrupt { + u64 partition_id; /* use "self" */ + u64 device_id; + struct hv_interrupt_entry int_entry; + u64 reserved2; + struct hv_device_interrupt_target int_target; } __packed; /* @@ -382,7 +465,10 @@ struct hv_pcibus_device { struct msi_domain_info msi_info; struct msi_controller msi_chip; struct irq_domain *irq_domain; + + /* hypercall arg, must not cross page boundary */ struct retarget_msi_interrupt retarget_msi_interrupt_params; + spinlock_t retarget_msi_interrupt_lock; }; @@ -476,6 +562,52 @@ static void put_pcichild(struct hv_pci_dev *hv_pcidev, static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus); static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus); + +/* + * Temporary CPU to vCPU mapping to address transitioning + * vmbus_cpu_number_to_vp_number() being migrated to + * hv_cpu_number_to_vp_number() in a separate patch. Once that patch + * has been picked up in the main line, remove this code here and use + * the official code. + */ +static struct hv_tmpcpumap +{ + bool initialized; + u32 vp_index[NR_CPUS]; +} hv_tmpcpumap; + +static void hv_tmpcpumap_init_cpu(void *_unused) +{ + int cpu = smp_processor_id(); + u64 vp_index; + + hv_get_vp_index(vp_index); + + hv_tmpcpumap.vp_index[cpu] = vp_index; +} + +static void hv_tmpcpumap_init(void) +{ + if (hv_tmpcpumap.initialized) + return; + + memset(hv_tmpcpumap.vp_index, -1, sizeof(hv_tmpcpumap.vp_index)); + on_each_cpu(hv_tmpcpumap_init_cpu, NULL, true); + hv_tmpcpumap.initialized = true; +} + +/** + * hv_tmp_cpu_nr_to_vp_nr() - Convert Linux CPU nr to Hyper-V vCPU nr + * + * Remove once vmbus_cpu_number_to_vp_number() has been converted to + * hv_cpu_number_to_vp_number() and replace callers appropriately. + */ +static u32 hv_tmp_cpu_nr_to_vp_nr(int cpu) +{ + return hv_tmpcpumap.vp_index[cpu]; +} + + /** * devfn_to_wslot() - Convert from Linux PCI slot to Windows * @devfn: The Linux representation of PCI slot @@ -786,8 +918,11 @@ static void hv_irq_unmask(struct irq_data *data) struct cpumask *dest; struct pci_bus *pbus; struct pci_dev *pdev; - int cpu; unsigned long flags; + u32 var_size = 0; + int cpu_vmbus; + int cpu; + u64 res; dest = irq_data_get_affinity_mask(data); pdev = msi_desc_to_pci_dev(msi_desc); @@ -799,23 +934,74 @@ static void hv_irq_unmask(struct irq_data *data) params = &hbus->retarget_msi_interrupt_params; memset(params, 0, sizeof(*params)); params->partition_id = HV_PARTITION_ID_SELF; - params->source = 1; /* MSI(-X) */ - params->address = msi_desc->msg.address_lo; - params->data = msi_desc->msg.data; + params->int_entry.source = 1; /* MSI(-X) */ + params->int_entry.address = msi_desc->msg.address_lo; + params->int_entry.data = msi_desc->msg.data; params->device_id = (hbus->hdev->dev_instance.b[5] << 24) | (hbus->hdev->dev_instance.b[4] << 16) | (hbus->hdev->dev_instance.b[7] << 8) | (hbus->hdev->dev_instance.b[6] & 0xf8) | PCI_FUNC(pdev->devfn); - params->vector = cfg->vector; + params->int_target.vector = cfg->vector; + + /* + * Honoring apic->irq_delivery_mode set to dest_Fixed by + * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a + * spurious interrupt storm. Not doing so does not seem to have a + * negative effect (yet?). + */ + + if (pci_protocol_version >= PCI_PROTOCOL_VERSION_1_2) { + /* + * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the + * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides + * with >64 VP support. + * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED + * is not sufficient for this hypercall. + */ + params->int_target.flags |= + HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET; + params->int_target.vp_set.valid_banks = + (1ull << HV_VP_SET_BANK_COUNT_MAX) - 1; + + /* + * var-sized hypercall, var-size starts after vp_mask (thus + * vp_set.format does not count, but vp_set.valid_banks does). + */ + var_size = 1 + HV_VP_SET_BANK_COUNT_MAX; - for_each_cpu_and(cpu, dest, cpu_online_mask) - params->vp_mask |= (1ULL << vmbus_cpu_number_to_vp_number(cpu)); + for_each_cpu_and(cpu, dest, cpu_online_mask) { + cpu_vmbus = hv_tmp_cpu_nr_to_vp_nr(cpu); - hv_do_hypercall(HVCALL_RETARGET_INTERRUPT, params, NULL); + if (cpu_vmbus >= HV_VP_SET_BANK_COUNT_MAX * 64) { + dev_err(&hbus->hdev->device, + "too high CPU %d", cpu_vmbus); + res = 1; + goto exit_unlock; + } + params->int_target.vp_set.masks[cpu_vmbus / 64] |= + (1ULL << (cpu_vmbus & 63)); + } + } else { + for_each_cpu_and(cpu, dest, cpu_online_mask) { + params->int_target.vp_mask |= + (1ULL << hv_tmp_cpu_nr_to_vp_nr(cpu)); + } + } + + res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17), + params, NULL); + +exit_unlock: spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags); + if (res) { + dev_err(&hbus->hdev->device, + "%s() failed: %#llx", __func__, res); + return; + } + pci_msi_unmask_irq(data); } @@ -836,6 +1022,53 @@ static void hv_pci_compose_compl(void *context, struct pci_response *resp, complete(&comp_pkt->comp_pkt.host_event); } +static u32 hv_compose_msi_req_v1( + struct pci_create_interrupt *int_pkt, struct cpumask *affinity, + u32 slot, u8 vector) +{ + int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE; + int_pkt->wslot.slot = slot; + int_pkt->int_desc.vector = vector; + int_pkt->int_desc.vector_count = 1; + int_pkt->int_desc.delivery_mode = + (apic->irq_delivery_mode == dest_LowestPrio) ? + dest_LowestPrio : dest_Fixed; + + /* + * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in + * hv_irq_unmask(). + */ + int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL; + + return sizeof(*int_pkt); +} + +static u32 hv_compose_msi_req_v2( + struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity, + u32 slot, u8 vector) +{ + int cpu; + + int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2; + int_pkt->wslot.slot = slot; + int_pkt->int_desc.vector = vector; + int_pkt->int_desc.vector_count = 1; + int_pkt->int_desc.delivery_mode = + (apic->irq_delivery_mode == dest_LowestPrio) ? + dest_LowestPrio : dest_Fixed; + + /* + * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten + * by subsequent retarget in hv_irq_unmask(). + */ + cpu = cpumask_first_and(affinity, cpu_online_mask); + int_pkt->int_desc.processor_array[0] = + hv_tmp_cpu_nr_to_vp_nr(cpu); + int_pkt->int_desc.processor_count = 1; + + return sizeof(*int_pkt); +} + /** * hv_compose_msi_msg() - Supplies a valid MSI address/data * @data: Everything about this MSI @@ -854,15 +1087,17 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) struct hv_pci_dev *hpdev; struct pci_bus *pbus; struct pci_dev *pdev; - struct pci_create_interrupt *int_pkt; struct compose_comp_ctxt comp; struct tran_int_desc *int_desc; - struct cpumask *affinity; struct { - struct pci_packet pkt; - u8 buffer[sizeof(struct pci_create_interrupt)]; - } ctxt; - int cpu; + struct pci_packet pci_pkt; + union { + struct pci_create_interrupt v1; + struct pci_create_interrupt2 v2; + } int_pkts; + } __packed ctxt; + + u32 size; int ret; pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data)); @@ -885,36 +1120,44 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) memset(&ctxt, 0, sizeof(ctxt)); init_completion(&comp.comp_pkt.host_event); - ctxt.pkt.completion_func = hv_pci_compose_compl; - ctxt.pkt.compl_ctxt = ∁ - int_pkt = (struct pci_create_interrupt *)&ctxt.pkt.message; - int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE; - int_pkt->wslot.slot = hpdev->desc.win_slot.slot; - int_pkt->int_desc.vector = cfg->vector; - int_pkt->int_desc.vector_count = 1; - int_pkt->int_desc.delivery_mode = - (apic->irq_delivery_mode == dest_LowestPrio) ? 1 : 0; + ctxt.pci_pkt.completion_func = hv_pci_compose_compl; + ctxt.pci_pkt.compl_ctxt = ∁ + + switch (pci_protocol_version) { + case PCI_PROTOCOL_VERSION_1_1: + size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1, + irq_data_get_affinity_mask(data), + hpdev->desc.win_slot.slot, + cfg->vector); + break; - /* - * This bit doesn't have to work on machines with more than 64 - * processors because Hyper-V only supports 64 in a guest. - */ - affinity = irq_data_get_affinity_mask(data); - if (cpumask_weight(affinity) >= 32) { - int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL; - } else { - for_each_cpu_and(cpu, affinity, cpu_online_mask) { - int_pkt->int_desc.cpu_mask |= - (1ULL << vmbus_cpu_number_to_vp_number(cpu)); - } + case PCI_PROTOCOL_VERSION_1_2: + size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2, + irq_data_get_affinity_mask(data), + hpdev->desc.win_slot.slot, + cfg->vector); + break; + + default: + /* As we only negotiate protocol versions known to this driver, + * this path should never hit. However, this is it not a hot + * path so we print a message to aid future updates. + */ + dev_err(&hbus->hdev->device, + "Unexpected vPCI protocol, update driver."); + goto free_int_desc; } - ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, - sizeof(*int_pkt), (unsigned long)&ctxt.pkt, + ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, &ctxt.int_pkts, + size, (unsigned long)&ctxt.pci_pkt, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); - if (ret) + if (ret) { + dev_err(&hbus->hdev->device, + "Sending request for interrupt failed: 0x%x", + comp.comp_pkt.completion_status); goto free_int_desc; + } wait_for_completion(&comp.comp_pkt.host_event); @@ -1513,12 +1756,12 @@ static void pci_devices_present_work(struct work_struct *work) put_pcichild(hpdev, hv_pcidev_ref_initial); } - switch(hbus->state) { + switch (hbus->state) { case hv_pcibus_installed: /* - * Tell the core to rescan bus - * because there may have been changes. - */ + * Tell the core to rescan bus + * because there may have been changes. + */ pci_lock_rescan_remove(); pci_scan_child_bus(hbus->pci_bus); pci_unlock_rescan_remove(); @@ -1800,6 +2043,7 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev) struct hv_pci_compl comp_pkt; struct pci_packet *pkt; int ret; + int i; /* * Initiate the handshake with the host and negotiate @@ -1816,26 +2060,44 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev) pkt->compl_ctxt = &comp_pkt; version_req = (struct pci_version_request *)&pkt->message; version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION; - version_req->protocol_version = PCI_PROTOCOL_VERSION_CURRENT; - ret = vmbus_sendpacket(hdev->channel, version_req, - sizeof(struct pci_version_request), - (unsigned long)pkt, VM_PKT_DATA_INBAND, - VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); - if (ret) - goto exit; + for (i = 0; i < ARRAY_SIZE(pci_protocol_versions); i++) { + version_req->protocol_version = pci_protocol_versions[i]; + ret = vmbus_sendpacket(hdev->channel, version_req, + sizeof(struct pci_version_request), + (unsigned long)pkt, VM_PKT_DATA_INBAND, + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + if (ret) { + dev_err(&hdev->device, + "PCI Pass-through VSP failed sending version reqquest: %#x", + ret); + goto exit; + } - wait_for_completion(&comp_pkt.host_event); + wait_for_completion(&comp_pkt.host_event); - if (comp_pkt.completion_status < 0) { - dev_err(&hdev->device, - "PCI Pass-through VSP failed version request %x\n", - comp_pkt.completion_status); - ret = -EPROTO; - goto exit; + if (comp_pkt.completion_status >= 0) { + pci_protocol_version = pci_protocol_versions[i]; + dev_info(&hdev->device, + "PCI VMBus probing: Using version %#x\n", + pci_protocol_version); + goto exit; + } + + if (comp_pkt.completion_status != STATUS_REVISION_MISMATCH) { + dev_err(&hdev->device, + "PCI Pass-through VSP failed version request: %#x", + comp_pkt.completion_status); + ret = -EPROTO; + goto exit; + } + + reinit_completion(&comp_pkt.host_event); } - ret = 0; + dev_err(&hdev->device, + "PCI pass-through VSP failed to find supported version"); + ret = -EPROTO; exit: kfree(pkt); @@ -2094,13 +2356,18 @@ static int hv_send_resources_allocated(struct hv_device *hdev) { struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); struct pci_resources_assigned *res_assigned; + struct pci_resources_assigned2 *res_assigned2; struct hv_pci_compl comp_pkt; struct hv_pci_dev *hpdev; struct pci_packet *pkt; + size_t size_res; u32 wslot; int ret; - pkt = kmalloc(sizeof(*pkt) + sizeof(*res_assigned), GFP_KERNEL); + size_res = (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2) + ? sizeof(*res_assigned) : sizeof(*res_assigned2); + + pkt = kmalloc(sizeof(*pkt) + size_res, GFP_KERNEL); if (!pkt) return -ENOMEM; @@ -2111,22 +2378,30 @@ static int hv_send_resources_allocated(struct hv_device *hdev) if (!hpdev) continue; - memset(pkt, 0, sizeof(*pkt) + sizeof(*res_assigned)); + memset(pkt, 0, sizeof(*pkt) + size_res); init_completion(&comp_pkt.host_event); pkt->completion_func = hv_pci_generic_compl; pkt->compl_ctxt = &comp_pkt; - res_assigned = (struct pci_resources_assigned *)&pkt->message; - res_assigned->message_type.type = PCI_RESOURCES_ASSIGNED; - res_assigned->wslot.slot = hpdev->desc.win_slot.slot; + if (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2) { + res_assigned = + (struct pci_resources_assigned *)&pkt->message; + res_assigned->message_type.type = + PCI_RESOURCES_ASSIGNED; + res_assigned->wslot.slot = hpdev->desc.win_slot.slot; + } else { + res_assigned2 = + (struct pci_resources_assigned2 *)&pkt->message; + res_assigned2->message_type.type = + PCI_RESOURCES_ASSIGNED2; + res_assigned2->wslot.slot = hpdev->desc.win_slot.slot; + } put_pcichild(hpdev, hv_pcidev_ref_by_slot); - ret = vmbus_sendpacket( - hdev->channel, &pkt->message, - sizeof(*res_assigned), - (unsigned long)pkt, - VM_PKT_DATA_INBAND, - VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + ret = vmbus_sendpacket(hdev->channel, &pkt->message, + size_res, (unsigned long)pkt, + VM_PKT_DATA_INBAND, + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret) break; @@ -2204,11 +2479,19 @@ static int hv_pci_probe(struct hv_device *hdev, struct hv_pcibus_device *hbus; int ret; - hbus = kzalloc(sizeof(*hbus), GFP_KERNEL); + /* + * hv_pcibus_device contains the hypercall arguments for retargeting in + * hv_irq_unmask(). Those must not cross a page boundary. + */ + BUILD_BUG_ON(sizeof(*hbus) > PAGE_SIZE); + + hbus = (struct hv_pcibus_device *)get_zeroed_page(GFP_KERNEL); if (!hbus) return -ENOMEM; hbus->state = hv_pcibus_init; + hv_tmpcpumap_init(); + /* * The PCI bus "domain" is what is called "segment" in ACPI and * other specs. Pull it from the instance ID, to get something @@ -2308,7 +2591,7 @@ free_config: close: vmbus_close(hdev->channel); free_bus: - kfree(hbus); + free_page((unsigned long)hbus); return ret; } @@ -2386,7 +2669,7 @@ static int hv_pci_remove(struct hv_device *hdev) irq_domain_free_fwnode(hbus->sysdata.fwnode); put_hvpcibus(hbus); wait_for_completion(&hbus->remove_event); - kfree(hbus); + free_page((unsigned long)hbus); return 0; } diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c index 85348590848b..6f879685fedd 100644 --- a/drivers/pci/host/pci-rcar-gen2.c +++ b/drivers/pci/host/pci-rcar-gen2.c @@ -429,7 +429,7 @@ static int rcar_pci_probe(struct platform_device *pdev) return 0; } -static struct of_device_id rcar_pci_of_match[] = { +static const struct of_device_id rcar_pci_of_match[] = { { .compatible = "renesas,pci-r8a7790", }, { .compatible = "renesas,pci-r8a7791", }, { .compatible = "renesas,pci-r8a7794", }, diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c index 2618f875a600..b3722b7709df 100644 --- a/drivers/pci/host/pci-tegra.c +++ b/drivers/pci/host/pci-tegra.c @@ -233,8 +233,8 @@ struct tegra_msi { struct msi_controller chip; DECLARE_BITMAP(used, INT_PCI_MSI_NR); struct irq_domain *domain; - unsigned long pages; struct mutex lock; + u64 phys; int irq; }; @@ -1448,9 +1448,8 @@ static int tegra_msi_setup_irq(struct msi_controller *chip, irq_set_msi_desc(irq, desc); - msg.address_lo = virt_to_phys((void *)msi->pages); - /* 32 bit address only */ - msg.address_hi = 0; + msg.address_lo = lower_32_bits(msi->phys); + msg.address_hi = upper_32_bits(msi->phys); msg.data = hwirq; pci_write_msi_msg(irq, &msg); @@ -1499,7 +1498,6 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie) const struct tegra_pcie_soc *soc = pcie->soc; struct tegra_msi *msi = &pcie->msi; struct device *dev = pcie->dev; - unsigned long base; int err; u32 reg; @@ -1531,12 +1529,25 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie) goto err; } - /* setup AFI/FPCI range */ - msi->pages = __get_free_pages(GFP_KERNEL, 0); - base = virt_to_phys((void *)msi->pages); + /* + * The PCI host bridge on Tegra contains some logic that intercepts + * MSI writes, which means that the MSI target address doesn't have + * to point to actual physical memory. Rather than allocating one 4 + * KiB page of system memory that's never used, we can simply pick + * an arbitrary address within an area reserved for system memory + * in the FPCI address map. + * + * However, in order to avoid confusion, we pick an address that + * doesn't map to physical memory. The FPCI address map reserves a + * 1012 GiB region for system memory and memory-mapped I/O. Since + * none of the Tegra SoCs that contain this PCI host bridge can + * address more than 16 GiB of system memory, the last 4 KiB of + * these 1012 GiB is a good candidate. + */ + msi->phys = 0xfcfffff000; - afi_writel(pcie, base >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST); - afi_writel(pcie, base, AFI_MSI_AXI_BAR_ST); + afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST); + afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST); /* this register is in 4K increments */ afi_writel(pcie, 1, AFI_MSI_BAR_SZ); @@ -1585,8 +1596,6 @@ static int tegra_pcie_disable_msi(struct tegra_pcie *pcie) afi_writel(pcie, 0, AFI_MSI_EN_VEC6); afi_writel(pcie, 0, AFI_MSI_EN_VEC7); - free_pages(msi->pages, 0); - if (msi->irq > 0) free_irq(msi->irq, pcie); @@ -2238,7 +2247,7 @@ static int tegra_pcie_probe(struct platform_device *pdev) struct pci_bus *child; int err; - host = pci_alloc_host_bridge(sizeof(*pcie)); + host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); if (!host) return -ENOMEM; @@ -2284,16 +2293,15 @@ static int tegra_pcie_probe(struct platform_device *pdev) host->busnr = pcie->busn.start; host->dev.parent = &pdev->dev; host->ops = &tegra_pcie_ops; + host->map_irq = tegra_pcie_map_irq; + host->swizzle_irq = pci_common_swizzle; - err = pci_register_host_bridge(host); + err = pci_scan_root_bus_bridge(host); if (err < 0) { dev_err(dev, "failed to register host: %d\n", err); goto disable_msi; } - pci_scan_child_bus(host->bus); - - pci_fixup_irqs(pci_common_swizzle, tegra_pcie_map_irq); pci_bus_size_bridges(host->bus); pci_bus_assign_resources(host->bus); diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c index 9281eee2d000..d417acab0ecf 100644 --- a/drivers/pci/host/pci-versatile.c +++ b/drivers/pci/host/pci-versatile.c @@ -120,30 +120,35 @@ out_release_res: static int versatile_pci_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; struct resource *res; int ret, i, myslot = -1; u32 val; void __iomem *local_pci_cfg_base; struct pci_bus *bus, *child; + struct pci_host_bridge *bridge; LIST_HEAD(pci_res); + bridge = devm_pci_alloc_host_bridge(dev, 0); + if (!bridge) + return -ENOMEM; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - versatile_pci_base = devm_ioremap_resource(&pdev->dev, res); + versatile_pci_base = devm_ioremap_resource(dev, res); if (IS_ERR(versatile_pci_base)) return PTR_ERR(versatile_pci_base); res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - versatile_cfg_base[0] = devm_ioremap_resource(&pdev->dev, res); + versatile_cfg_base[0] = devm_ioremap_resource(dev, res); if (IS_ERR(versatile_cfg_base[0])) return PTR_ERR(versatile_cfg_base[0]); res = platform_get_resource(pdev, IORESOURCE_MEM, 2); - versatile_cfg_base[1] = devm_pci_remap_cfg_resource(&pdev->dev, - res); + versatile_cfg_base[1] = devm_pci_remap_cfg_resource(dev, res); if (IS_ERR(versatile_cfg_base[1])) return PTR_ERR(versatile_cfg_base[1]); - ret = versatile_pci_parse_request_of_pci_ranges(&pdev->dev, &pci_res); + ret = versatile_pci_parse_request_of_pci_ranges(dev, &pci_res); if (ret) return ret; @@ -159,7 +164,7 @@ static int versatile_pci_probe(struct platform_device *pdev) } } if (myslot == -1) { - dev_err(&pdev->dev, "Cannot find PCI core!\n"); + dev_err(dev, "Cannot find PCI core!\n"); return -EIO; } /* @@ -167,7 +172,7 @@ static int versatile_pci_probe(struct platform_device *pdev) */ pci_slot_ignore |= (1 << myslot); - dev_info(&pdev->dev, "PCI core found (slot %d)\n", myslot); + dev_info(dev, "PCI core found (slot %d)\n", myslot); writel(myslot, PCI_SELFID); local_pci_cfg_base = versatile_cfg_base[1] + (myslot << 11); @@ -199,11 +204,20 @@ static int versatile_pci_probe(struct platform_device *pdev) pci_add_flags(PCI_ENABLE_PROC_DOMAINS); pci_add_flags(PCI_REASSIGN_ALL_BUS | PCI_REASSIGN_ALL_RSRC); - bus = pci_scan_root_bus(&pdev->dev, 0, &pci_versatile_ops, NULL, &pci_res); - if (!bus) - return -ENOMEM; + list_splice_init(&pci_res, &bridge->windows); + bridge->dev.parent = dev; + bridge->sysdata = NULL; + bridge->busnr = 0; + bridge->ops = &pci_versatile_ops; + bridge->map_irq = of_irq_parse_and_map_pci; + bridge->swizzle_irq = pci_common_swizzle; + + ret = pci_scan_root_bus_bridge(bridge); + if (ret < 0) + return ret; + + bus = bridge->bus; - pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); pci_assign_unassigned_bus_resources(bus); list_for_each_entry(child, &bus->children, node) pcie_bus_configure_settings(child); diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c index 8cae013e7188..bd897479a215 100644 --- a/drivers/pci/host/pci-xgene.c +++ b/drivers/pci/host/pci-xgene.c @@ -636,13 +636,16 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev) struct xgene_pcie_port *port; resource_size_t iobase = 0; struct pci_bus *bus, *child; + struct pci_host_bridge *bridge; int ret; LIST_HEAD(res); - port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); - if (!port) + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port)); + if (!bridge) return -ENOMEM; + port = pci_host_bridge_priv(bridge); + port->node = of_node_get(dn); port->dev = dev; @@ -670,11 +673,19 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev) if (ret) goto error; - bus = pci_create_root_bus(dev, 0, &xgene_pcie_ops, port, &res); - if (!bus) { - ret = -ENOMEM; + list_splice_init(&res, &bridge->windows); + bridge->dev.parent = dev; + bridge->sysdata = port; + bridge->busnr = 0; + bridge->ops = &xgene_pcie_ops; + bridge->map_irq = of_irq_parse_and_map_pci; + bridge->swizzle_irq = pci_common_swizzle; + + ret = pci_scan_root_bus_bridge(bridge); + if (ret < 0) goto error; - } + + bus = bridge->bus; pci_scan_child_bus(bus); pci_assign_unassigned_bus_resources(bus); diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c index 75ec5cea26f6..4ea4f8f5dc77 100644 --- a/drivers/pci/host/pcie-altera.c +++ b/drivers/pci/host/pcie-altera.c @@ -579,12 +579,14 @@ static int altera_pcie_probe(struct platform_device *pdev) struct altera_pcie *pcie; struct pci_bus *bus; struct pci_bus *child; + struct pci_host_bridge *bridge; int ret; - pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); - if (!pcie) + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); + if (!bridge) return -ENOMEM; + pcie = pci_host_bridge_priv(bridge); pcie->pdev = pdev; ret = altera_pcie_parse_dt(pcie); @@ -613,12 +615,20 @@ static int altera_pcie_probe(struct platform_device *pdev) cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE); altera_pcie_host_init(pcie); - bus = pci_scan_root_bus(dev, pcie->root_bus_nr, &altera_pcie_ops, - pcie, &pcie->resources); - if (!bus) - return -ENOMEM; + list_splice_init(&pcie->resources, &bridge->windows); + bridge->dev.parent = dev; + bridge->sysdata = pcie; + bridge->busnr = pcie->root_bus_nr; + bridge->ops = &altera_pcie_ops; + bridge->map_irq = of_irq_parse_and_map_pci; + bridge->swizzle_irq = pci_common_swizzle; + + ret = pci_scan_root_bus_bridge(bridge); + if (ret < 0) + return ret; + + bus = bridge->bus; - pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); pci_assign_unassigned_bus_resources(bus); /* Configure PCI Express setting. */ diff --git a/drivers/pci/host/pcie-iproc-bcma.c b/drivers/pci/host/pcie-iproc-bcma.c index 384c27e664fe..f03d5e3612e9 100644 --- a/drivers/pci/host/pcie-iproc-bcma.c +++ b/drivers/pci/host/pcie-iproc-bcma.c @@ -45,12 +45,15 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev) struct device *dev = &bdev->dev; struct iproc_pcie *pcie; LIST_HEAD(resources); + struct pci_host_bridge *bridge; int ret; - pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); - if (!pcie) + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); + if (!bridge) return -ENOMEM; + pcie = pci_host_bridge_priv(bridge); + pcie->dev = dev; pcie->type = IPROC_PCIE_PAXB_BCMA; diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c index 90d2bdd94e41..22531190bc40 100644 --- a/drivers/pci/host/pcie-iproc-platform.c +++ b/drivers/pci/host/pcie-iproc-platform.c @@ -52,12 +52,15 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev) struct resource reg; resource_size_t iobase = 0; LIST_HEAD(resources); + struct pci_host_bridge *bridge; int ret; - pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); - if (!pcie) + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); + if (!bridge) return -ENOMEM; + pcie = pci_host_bridge_priv(bridge); + pcie->dev = dev; pcie->type = (enum iproc_pcie_type) of_device_get_match_data(dev); diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c index 0f39bd2a04cb..c57486348856 100644 --- a/drivers/pci/host/pcie-iproc.c +++ b/drivers/pci/host/pcie-iproc.c @@ -452,14 +452,13 @@ static inline void iproc_pcie_apb_err_disable(struct pci_bus *bus, * Note access to the configuration registers are protected at the higher layer * by 'pci_lock' in drivers/pci/access.c */ -static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus, +static void __iomem *iproc_pcie_map_cfg_bus(struct iproc_pcie *pcie, + int busno, unsigned int devfn, int where) { - struct iproc_pcie *pcie = iproc_data(bus); unsigned slot = PCI_SLOT(devfn); unsigned fn = PCI_FUNC(devfn); - unsigned busno = bus->number; u32 val; u16 offset; @@ -499,6 +498,58 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus, return (pcie->base + offset); } +static void __iomem *iproc_pcie_bus_map_cfg_bus(struct pci_bus *bus, + unsigned int devfn, + int where) +{ + return iproc_pcie_map_cfg_bus(iproc_data(bus), bus->number, devfn, + where); +} + +static int iproc_pci_raw_config_read32(struct iproc_pcie *pcie, + unsigned int devfn, int where, + int size, u32 *val) +{ + void __iomem *addr; + + addr = iproc_pcie_map_cfg_bus(pcie, 0, devfn, where & ~0x3); + if (!addr) { + *val = ~0; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + *val = readl(addr); + + if (size <= 2) + *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); + + return PCIBIOS_SUCCESSFUL; +} + +static int iproc_pci_raw_config_write32(struct iproc_pcie *pcie, + unsigned int devfn, int where, + int size, u32 val) +{ + void __iomem *addr; + u32 mask, tmp; + + addr = iproc_pcie_map_cfg_bus(pcie, 0, devfn, where & ~0x3); + if (!addr) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (size == 4) { + writel(val, addr); + return PCIBIOS_SUCCESSFUL; + } + + mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8)); + tmp = readl(addr) & mask; + tmp |= val << ((where & 0x3) * 8); + writel(tmp, addr); + + return PCIBIOS_SUCCESSFUL; +} + static int iproc_pcie_config_read32(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { @@ -524,7 +575,7 @@ static int iproc_pcie_config_write32(struct pci_bus *bus, unsigned int devfn, } static struct pci_ops iproc_pcie_ops = { - .map_bus = iproc_pcie_map_cfg_bus, + .map_bus = iproc_pcie_bus_map_cfg_bus, .read = iproc_pcie_config_read32, .write = iproc_pcie_config_write32, }; @@ -556,12 +607,11 @@ static void iproc_pcie_reset(struct iproc_pcie *pcie) msleep(100); } -static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus) +static int iproc_pcie_check_link(struct iproc_pcie *pcie) { struct device *dev = pcie->dev; - u8 hdr_type; - u32 link_ctrl, class, val; - u16 pos = PCI_EXP_CAP, link_status; + u32 hdr_type, link_ctrl, link_status, class, val; + u16 pos = PCI_EXP_CAP; bool link_is_active = false; /* @@ -578,7 +628,7 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus) } /* make sure we are not in EP mode */ - pci_bus_read_config_byte(bus, 0, PCI_HEADER_TYPE, &hdr_type); + iproc_pci_raw_config_read32(pcie, 0, PCI_HEADER_TYPE, 1, &hdr_type); if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) { dev_err(dev, "in EP mode, hdr=%#02x\n", hdr_type); return -EFAULT; @@ -588,13 +638,16 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus) #define PCI_BRIDGE_CTRL_REG_OFFSET 0x43c #define PCI_CLASS_BRIDGE_MASK 0xffff00 #define PCI_CLASS_BRIDGE_SHIFT 8 - pci_bus_read_config_dword(bus, 0, PCI_BRIDGE_CTRL_REG_OFFSET, &class); + iproc_pci_raw_config_read32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET, + 4, &class); class &= ~PCI_CLASS_BRIDGE_MASK; class |= (PCI_CLASS_BRIDGE_PCI << PCI_CLASS_BRIDGE_SHIFT); - pci_bus_write_config_dword(bus, 0, PCI_BRIDGE_CTRL_REG_OFFSET, class); + iproc_pci_raw_config_write32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET, + 4, class); /* check link status to see if link is active */ - pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA, &link_status); + iproc_pci_raw_config_read32(pcie, 0, pos + PCI_EXP_LNKSTA, + 2, &link_status); if (link_status & PCI_EXP_LNKSTA_NLW) link_is_active = true; @@ -603,20 +656,21 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus) #define PCI_TARGET_LINK_SPEED_MASK 0xf #define PCI_TARGET_LINK_SPEED_GEN2 0x2 #define PCI_TARGET_LINK_SPEED_GEN1 0x1 - pci_bus_read_config_dword(bus, 0, - pos + PCI_EXP_LNKCTL2, + iproc_pci_raw_config_read32(pcie, 0, + pos + PCI_EXP_LNKCTL2, 4, &link_ctrl); if ((link_ctrl & PCI_TARGET_LINK_SPEED_MASK) == PCI_TARGET_LINK_SPEED_GEN2) { link_ctrl &= ~PCI_TARGET_LINK_SPEED_MASK; link_ctrl |= PCI_TARGET_LINK_SPEED_GEN1; - pci_bus_write_config_dword(bus, 0, - pos + PCI_EXP_LNKCTL2, - link_ctrl); + iproc_pci_raw_config_write32(pcie, 0, + pos + PCI_EXP_LNKCTL2, + 4, link_ctrl); msleep(100); - pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA, - &link_status); + iproc_pci_raw_config_read32(pcie, 0, + pos + PCI_EXP_LNKSTA, + 2, &link_status); if (link_status & PCI_EXP_LNKSTA_NLW) link_is_active = true; } @@ -1205,7 +1259,8 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) struct device *dev; int ret; void *sysdata; - struct pci_bus *bus, *child; + struct pci_bus *child; + struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); dev = pcie->dev; @@ -1252,18 +1307,10 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) sysdata = pcie; #endif - bus = pci_create_root_bus(dev, 0, &iproc_pcie_ops, sysdata, res); - if (!bus) { - dev_err(dev, "unable to create PCI root bus\n"); - ret = -ENOMEM; - goto err_power_off_phy; - } - pcie->root_bus = bus; - - ret = iproc_pcie_check_link(pcie, bus); + ret = iproc_pcie_check_link(pcie); if (ret) { dev_err(dev, "no PCIe EP device detected\n"); - goto err_rm_root_bus; + goto err_power_off_phy; } iproc_pcie_enable(pcie); @@ -1272,23 +1319,31 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) if (iproc_pcie_msi_enable(pcie)) dev_info(dev, "not using iProc MSI\n"); - pci_scan_child_bus(bus); - pci_assign_unassigned_bus_resources(bus); + list_splice_init(res, &host->windows); + host->busnr = 0; + host->dev.parent = dev; + host->ops = &iproc_pcie_ops; + host->sysdata = sysdata; + host->map_irq = pcie->map_irq; + host->swizzle_irq = pci_common_swizzle; - if (pcie->map_irq) - pci_fixup_irqs(pci_common_swizzle, pcie->map_irq); + ret = pci_scan_root_bus_bridge(host); + if (ret < 0) { + dev_err(dev, "failed to scan host: %d\n", ret); + goto err_power_off_phy; + } - list_for_each_entry(child, &bus->children, node) + pci_assign_unassigned_bus_resources(host->bus); + + pcie->root_bus = host->bus; + + list_for_each_entry(child, &host->bus->children, node) pcie_bus_configure_settings(child); - pci_bus_add_devices(bus); + pci_bus_add_devices(host->bus); return 0; -err_rm_root_bus: - pci_stop_root_bus(bus); - pci_remove_root_bus(bus); - err_power_off_phy: phy_power_off(pcie->phy); err_exit_phy: diff --git a/drivers/pci/host/pcie-mediatek.c b/drivers/pci/host/pcie-mediatek.c new file mode 100644 index 000000000000..5a9d8589ea0b --- /dev/null +++ b/drivers/pci/host/pcie-mediatek.c @@ -0,0 +1,554 @@ +/* + * MediaTek PCIe host controller driver. + * + * Copyright (c) 2017 MediaTek Inc. + * Author: Ryder Lee <ryder.lee@mediatek.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/of_address.h> +#include <linux/of_pci.h> +#include <linux/of_platform.h> +#include <linux/pci.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/reset.h> + +/* PCIe shared registers */ +#define PCIE_SYS_CFG 0x00 +#define PCIE_INT_ENABLE 0x0c +#define PCIE_CFG_ADDR 0x20 +#define PCIE_CFG_DATA 0x24 + +/* PCIe per port registers */ +#define PCIE_BAR0_SETUP 0x10 +#define PCIE_CLASS 0x34 +#define PCIE_LINK_STATUS 0x50 + +#define PCIE_PORT_INT_EN(x) BIT(20 + (x)) +#define PCIE_PORT_PERST(x) BIT(1 + (x)) +#define PCIE_PORT_LINKUP BIT(0) +#define PCIE_BAR_MAP_MAX GENMASK(31, 16) + +#define PCIE_BAR_ENABLE BIT(0) +#define PCIE_REVISION_ID BIT(0) +#define PCIE_CLASS_CODE (0x60400 << 8) +#define PCIE_CONF_REG(regn) (((regn) & GENMASK(7, 2)) | \ + ((((regn) >> 8) & GENMASK(3, 0)) << 24)) +#define PCIE_CONF_FUN(fun) (((fun) << 8) & GENMASK(10, 8)) +#define PCIE_CONF_DEV(dev) (((dev) << 11) & GENMASK(15, 11)) +#define PCIE_CONF_BUS(bus) (((bus) << 16) & GENMASK(23, 16)) +#define PCIE_CONF_ADDR(regn, fun, dev, bus) \ + (PCIE_CONF_REG(regn) | PCIE_CONF_FUN(fun) | \ + PCIE_CONF_DEV(dev) | PCIE_CONF_BUS(bus)) + +/* MediaTek specific configuration registers */ +#define PCIE_FTS_NUM 0x70c +#define PCIE_FTS_NUM_MASK GENMASK(15, 8) +#define PCIE_FTS_NUM_L0(x) ((x) & 0xff << 8) + +#define PCIE_FC_CREDIT 0x73c +#define PCIE_FC_CREDIT_MASK (GENMASK(31, 31) | GENMASK(28, 16)) +#define PCIE_FC_CREDIT_VAL(x) ((x) << 16) + +/** + * struct mtk_pcie_port - PCIe port information + * @base: IO mapped register base + * @list: port list + * @pcie: pointer to PCIe host info + * @reset: pointer to port reset control + * @sys_ck: pointer to bus clock + * @phy: pointer to phy control block + * @lane: lane count + * @index: port index + */ +struct mtk_pcie_port { + void __iomem *base; + struct list_head list; + struct mtk_pcie *pcie; + struct reset_control *reset; + struct clk *sys_ck; + struct phy *phy; + u32 lane; + u32 index; +}; + +/** + * struct mtk_pcie - PCIe host information + * @dev: pointer to PCIe device + * @base: IO mapped register base + * @free_ck: free-run reference clock + * @io: IO resource + * @pio: PIO resource + * @mem: non-prefetchable memory resource + * @busn: bus range + * @offset: IO / Memory offset + * @ports: pointer to PCIe port information + */ +struct mtk_pcie { + struct device *dev; + void __iomem *base; + struct clk *free_ck; + + struct resource io; + struct resource pio; + struct resource mem; + struct resource busn; + struct { + resource_size_t mem; + resource_size_t io; + } offset; + struct list_head ports; +}; + +static inline bool mtk_pcie_link_up(struct mtk_pcie_port *port) +{ + return !!(readl(port->base + PCIE_LINK_STATUS) & PCIE_PORT_LINKUP); +} + +static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie) +{ + struct device *dev = pcie->dev; + + clk_disable_unprepare(pcie->free_ck); + + if (dev->pm_domain) { + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); + } +} + +static void mtk_pcie_port_free(struct mtk_pcie_port *port) +{ + struct mtk_pcie *pcie = port->pcie; + struct device *dev = pcie->dev; + + devm_iounmap(dev, port->base); + list_del(&port->list); + devm_kfree(dev, port); +} + +static void mtk_pcie_put_resources(struct mtk_pcie *pcie) +{ + struct mtk_pcie_port *port, *tmp; + + list_for_each_entry_safe(port, tmp, &pcie->ports, list) { + phy_power_off(port->phy); + clk_disable_unprepare(port->sys_ck); + mtk_pcie_port_free(port); + } + + mtk_pcie_subsys_powerdown(pcie); +} + +static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, + unsigned int devfn, int where) +{ + struct pci_host_bridge *host = pci_find_host_bridge(bus); + struct mtk_pcie *pcie = pci_host_bridge_priv(host); + + writel(PCIE_CONF_ADDR(where, PCI_FUNC(devfn), PCI_SLOT(devfn), + bus->number), pcie->base + PCIE_CFG_ADDR); + + return pcie->base + PCIE_CFG_DATA + (where & 3); +} + +static struct pci_ops mtk_pcie_ops = { + .map_bus = mtk_pcie_map_bus, + .read = pci_generic_config_read, + .write = pci_generic_config_write, +}; + +static void mtk_pcie_configure_rc(struct mtk_pcie_port *port) +{ + struct mtk_pcie *pcie = port->pcie; + u32 func = PCI_FUNC(port->index << 3); + u32 slot = PCI_SLOT(port->index << 3); + u32 val; + + /* enable interrupt */ + val = readl(pcie->base + PCIE_INT_ENABLE); + val |= PCIE_PORT_INT_EN(port->index); + writel(val, pcie->base + PCIE_INT_ENABLE); + + /* map to all DDR region. We need to set it before cfg operation. */ + writel(PCIE_BAR_MAP_MAX | PCIE_BAR_ENABLE, + port->base + PCIE_BAR0_SETUP); + + /* configure class code and revision ID */ + writel(PCIE_CLASS_CODE | PCIE_REVISION_ID, port->base + PCIE_CLASS); + + /* configure FC credit */ + writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0), + pcie->base + PCIE_CFG_ADDR); + val = readl(pcie->base + PCIE_CFG_DATA); + val &= ~PCIE_FC_CREDIT_MASK; + val |= PCIE_FC_CREDIT_VAL(0x806c); + writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0), + pcie->base + PCIE_CFG_ADDR); + writel(val, pcie->base + PCIE_CFG_DATA); + + /* configure RC FTS number to 250 when it leaves L0s */ + writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0), + pcie->base + PCIE_CFG_ADDR); + val = readl(pcie->base + PCIE_CFG_DATA); + val &= ~PCIE_FTS_NUM_MASK; + val |= PCIE_FTS_NUM_L0(0x50); + writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0), + pcie->base + PCIE_CFG_ADDR); + writel(val, pcie->base + PCIE_CFG_DATA); +} + +static void mtk_pcie_assert_ports(struct mtk_pcie_port *port) +{ + struct mtk_pcie *pcie = port->pcie; + u32 val; + + /* assert port PERST_N */ + val = readl(pcie->base + PCIE_SYS_CFG); + val |= PCIE_PORT_PERST(port->index); + writel(val, pcie->base + PCIE_SYS_CFG); + + /* de-assert port PERST_N */ + val = readl(pcie->base + PCIE_SYS_CFG); + val &= ~PCIE_PORT_PERST(port->index); + writel(val, pcie->base + PCIE_SYS_CFG); + + /* PCIe v2.0 need at least 100ms delay to train from Gen1 to Gen2 */ + msleep(100); +} + +static void mtk_pcie_enable_ports(struct mtk_pcie_port *port) +{ + struct device *dev = port->pcie->dev; + int err; + + err = clk_prepare_enable(port->sys_ck); + if (err) { + dev_err(dev, "failed to enable port%d clock\n", port->index); + goto err_sys_clk; + } + + reset_control_assert(port->reset); + reset_control_deassert(port->reset); + + err = phy_power_on(port->phy); + if (err) { + dev_err(dev, "failed to power on port%d phy\n", port->index); + goto err_phy_on; + } + + mtk_pcie_assert_ports(port); + + /* if link up, then setup root port configuration space */ + if (mtk_pcie_link_up(port)) { + mtk_pcie_configure_rc(port); + return; + } + + dev_info(dev, "Port%d link down\n", port->index); + + phy_power_off(port->phy); +err_phy_on: + clk_disable_unprepare(port->sys_ck); +err_sys_clk: + mtk_pcie_port_free(port); +} + +static int mtk_pcie_parse_ports(struct mtk_pcie *pcie, + struct device_node *node, + int index) +{ + struct mtk_pcie_port *port; + struct resource *regs; + struct device *dev = pcie->dev; + struct platform_device *pdev = to_platform_device(dev); + char name[10]; + int err; + + port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); + if (!port) + return -ENOMEM; + + err = of_property_read_u32(node, "num-lanes", &port->lane); + if (err) { + dev_err(dev, "missing num-lanes property\n"); + return err; + } + + regs = platform_get_resource(pdev, IORESOURCE_MEM, index + 1); + port->base = devm_ioremap_resource(dev, regs); + if (IS_ERR(port->base)) { + dev_err(dev, "failed to map port%d base\n", index); + return PTR_ERR(port->base); + } + + snprintf(name, sizeof(name), "sys_ck%d", index); + port->sys_ck = devm_clk_get(dev, name); + if (IS_ERR(port->sys_ck)) { + dev_err(dev, "failed to get port%d clock\n", index); + return PTR_ERR(port->sys_ck); + } + + snprintf(name, sizeof(name), "pcie-rst%d", index); + port->reset = devm_reset_control_get_optional(dev, name); + if (PTR_ERR(port->reset) == -EPROBE_DEFER) + return PTR_ERR(port->reset); + + /* some platforms may use default PHY setting */ + snprintf(name, sizeof(name), "pcie-phy%d", index); + port->phy = devm_phy_optional_get(dev, name); + if (IS_ERR(port->phy)) + return PTR_ERR(port->phy); + + port->index = index; + port->pcie = pcie; + + INIT_LIST_HEAD(&port->list); + list_add_tail(&port->list, &pcie->ports); + + return 0; +} + +static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie) +{ + struct device *dev = pcie->dev; + struct platform_device *pdev = to_platform_device(dev); + struct resource *regs; + int err; + + /* get shared registers */ + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pcie->base = devm_ioremap_resource(dev, regs); + if (IS_ERR(pcie->base)) { + dev_err(dev, "failed to map shared register\n"); + return PTR_ERR(pcie->base); + } + + pcie->free_ck = devm_clk_get(dev, "free_ck"); + if (IS_ERR(pcie->free_ck)) { + if (PTR_ERR(pcie->free_ck) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + pcie->free_ck = NULL; + } + + if (dev->pm_domain) { + pm_runtime_enable(dev); + pm_runtime_get_sync(dev); + } + + /* enable top level clock */ + err = clk_prepare_enable(pcie->free_ck); + if (err) { + dev_err(dev, "failed to enable free_ck\n"); + goto err_free_ck; + } + + return 0; + +err_free_ck: + if (dev->pm_domain) { + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); + } + + return err; +} + +static int mtk_pcie_setup(struct mtk_pcie *pcie) +{ + struct device *dev = pcie->dev; + struct device_node *node = dev->of_node, *child; + struct of_pci_range_parser parser; + struct of_pci_range range; + struct resource res; + struct mtk_pcie_port *port, *tmp; + int err; + + if (of_pci_range_parser_init(&parser, node)) { + dev_err(dev, "missing \"ranges\" property\n"); + return -EINVAL; + } + + for_each_of_pci_range(&parser, &range) { + err = of_pci_range_to_resource(&range, node, &res); + if (err < 0) + return err; + + switch (res.flags & IORESOURCE_TYPE_BITS) { + case IORESOURCE_IO: + pcie->offset.io = res.start - range.pci_addr; + + memcpy(&pcie->pio, &res, sizeof(res)); + pcie->pio.name = node->full_name; + + pcie->io.start = range.cpu_addr; + pcie->io.end = range.cpu_addr + range.size - 1; + pcie->io.flags = IORESOURCE_MEM; + pcie->io.name = "I/O"; + + memcpy(&res, &pcie->io, sizeof(res)); + break; + + case IORESOURCE_MEM: + pcie->offset.mem = res.start - range.pci_addr; + + memcpy(&pcie->mem, &res, sizeof(res)); + pcie->mem.name = "non-prefetchable"; + break; + } + } + + err = of_pci_parse_bus_range(node, &pcie->busn); + if (err < 0) { + dev_err(dev, "failed to parse bus ranges property: %d\n", err); + pcie->busn.name = node->name; + pcie->busn.start = 0; + pcie->busn.end = 0xff; + pcie->busn.flags = IORESOURCE_BUS; + } + + for_each_available_child_of_node(node, child) { + int index; + + err = of_pci_get_devfn(child); + if (err < 0) { + dev_err(dev, "failed to parse devfn: %d\n", err); + return err; + } + + index = PCI_SLOT(err); + + err = mtk_pcie_parse_ports(pcie, child, index); + if (err) + return err; + } + + err = mtk_pcie_subsys_powerup(pcie); + if (err) + return err; + + /* enable each port, and then check link status */ + list_for_each_entry_safe(port, tmp, &pcie->ports, list) + mtk_pcie_enable_ports(port); + + /* power down PCIe subsys if slots are all empty (link down) */ + if (list_empty(&pcie->ports)) + mtk_pcie_subsys_powerdown(pcie); + + return 0; +} + +static int mtk_pcie_request_resources(struct mtk_pcie *pcie) +{ + struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); + struct list_head *windows = &host->windows; + struct device *dev = pcie->dev; + int err; + + pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io); + pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem); + pci_add_resource(windows, &pcie->busn); + + err = devm_request_pci_bus_resources(dev, windows); + if (err < 0) + return err; + + pci_remap_iospace(&pcie->pio, pcie->io.start); + + return 0; +} + +static int mtk_pcie_register_host(struct pci_host_bridge *host) +{ + struct mtk_pcie *pcie = pci_host_bridge_priv(host); + struct pci_bus *child; + int err; + + host->busnr = pcie->busn.start; + host->dev.parent = pcie->dev; + host->ops = &mtk_pcie_ops; + host->map_irq = of_irq_parse_and_map_pci; + host->swizzle_irq = pci_common_swizzle; + + err = pci_scan_root_bus_bridge(host); + if (err < 0) + return err; + + pci_bus_size_bridges(host->bus); + pci_bus_assign_resources(host->bus); + + list_for_each_entry(child, &host->bus->children, node) + pcie_bus_configure_settings(child); + + pci_bus_add_devices(host->bus); + + return 0; +} + +static int mtk_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mtk_pcie *pcie; + struct pci_host_bridge *host; + int err; + + host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); + if (!host) + return -ENOMEM; + + pcie = pci_host_bridge_priv(host); + + pcie->dev = dev; + platform_set_drvdata(pdev, pcie); + INIT_LIST_HEAD(&pcie->ports); + + err = mtk_pcie_setup(pcie); + if (err) + return err; + + err = mtk_pcie_request_resources(pcie); + if (err) + goto put_resources; + + err = mtk_pcie_register_host(host); + if (err) + goto put_resources; + + return 0; + +put_resources: + if (!list_empty(&pcie->ports)) + mtk_pcie_put_resources(pcie); + + return err; +} + +static const struct of_device_id mtk_pcie_ids[] = { + { .compatible = "mediatek,mt7623-pcie"}, + { .compatible = "mediatek,mt2701-pcie"}, + {}, +}; + +static struct platform_driver mtk_pcie_driver = { + .probe = mtk_pcie_probe, + .driver = { + .name = "mtk-pcie", + .of_match_table = mtk_pcie_ids, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(mtk_pcie_driver); diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c index cb07c45c1858..246d485b24c6 100644 --- a/drivers/pci/host/pcie-rcar.c +++ b/drivers/pci/host/pcie-rcar.c @@ -450,29 +450,33 @@ done: static int rcar_pcie_enable(struct rcar_pcie *pcie) { struct device *dev = pcie->dev; + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); struct pci_bus *bus, *child; - LIST_HEAD(res); + int ret; /* Try setting 5 GT/s link speed */ rcar_pcie_force_speedup(pcie); - rcar_pcie_setup(&res, pcie); + rcar_pcie_setup(&bridge->windows, pcie); pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS); + bridge->dev.parent = dev; + bridge->sysdata = pcie; + bridge->busnr = pcie->root_bus_nr; + bridge->ops = &rcar_pcie_ops; + bridge->map_irq = of_irq_parse_and_map_pci; + bridge->swizzle_irq = pci_common_swizzle; if (IS_ENABLED(CONFIG_PCI_MSI)) - bus = pci_scan_root_bus_msi(dev, pcie->root_bus_nr, - &rcar_pcie_ops, pcie, &res, &pcie->msi.chip); - else - bus = pci_scan_root_bus(dev, pcie->root_bus_nr, - &rcar_pcie_ops, pcie, &res); + bridge->msi = &pcie->msi.chip; - if (!bus) { - dev_err(dev, "Scanning rootbus failed"); - return -ENODEV; + ret = pci_scan_root_bus_bridge(bridge); + if (ret < 0) { + kfree(bridge); + return ret; } - pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); + bus = bridge->bus; pci_bus_size_bridges(bus); pci_bus_assign_resources(bus); @@ -1127,11 +1131,14 @@ static int rcar_pcie_probe(struct platform_device *pdev) unsigned int data; int err; int (*hw_init_fn)(struct rcar_pcie *); + struct pci_host_bridge *bridge; - pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); - if (!pcie) + bridge = pci_alloc_host_bridge(sizeof(*pcie)); + if (!bridge) return -ENOMEM; + pcie = pci_host_bridge_priv(bridge); + pcie->dev = dev; INIT_LIST_HEAD(&pcie->resources); @@ -1141,12 +1148,12 @@ static int rcar_pcie_probe(struct platform_device *pdev) err = rcar_pcie_get_resources(pcie); if (err < 0) { dev_err(dev, "failed to request resources: %d\n", err); - return err; + goto err_free_bridge; } err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node); if (err) - return err; + goto err_free_bridge; pm_runtime_enable(dev); err = pm_runtime_get_sync(dev); @@ -1183,6 +1190,9 @@ static int rcar_pcie_probe(struct platform_device *pdev) return 0; +err_free_bridge: + pci_free_host_bridge(bridge); + err_pm_put: pm_runtime_put(dev); diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c index 0e020b6e0943..5acf8694fb23 100644 --- a/drivers/pci/host/pcie-rockchip.c +++ b/drivers/pci/host/pcie-rockchip.c @@ -139,6 +139,7 @@ PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | \ PCIE_CORE_INT_MMVC) +#define PCIE_RC_CONFIG_NORMAL_BASE 0x800000 #define PCIE_RC_CONFIG_BASE 0xa00000 #define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08) #define PCIE_RC_CONFIG_SCC_SHIFT 16 @@ -146,6 +147,9 @@ #define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18 #define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff #define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26 +#define PCIE_RC_CONFIG_DCSR (PCIE_RC_CONFIG_BASE + 0xc8) +#define PCIE_RC_CONFIG_DCSR_MPS_MASK GENMASK(7, 5) +#define PCIE_RC_CONFIG_DCSR_MPS_256 (0x1 << 5) #define PCIE_RC_CONFIG_LINK_CAP (PCIE_RC_CONFIG_BASE + 0xcc) #define PCIE_RC_CONFIG_LINK_CAP_L0S BIT(10) #define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0) @@ -175,6 +179,8 @@ #define IB_ROOT_PORT_REG_SIZE_SHIFT 3 #define AXI_WRAPPER_IO_WRITE 0x6 #define AXI_WRAPPER_MEM_WRITE 0x2 +#define AXI_WRAPPER_TYPE0_CFG 0xa +#define AXI_WRAPPER_TYPE1_CFG 0xb #define AXI_WRAPPER_NOR_MSG 0xc #define MAX_AXI_IB_ROOTPORT_REGION_NUM 3 @@ -198,6 +204,7 @@ #define RC_REGION_0_ADDR_TRANS_H 0x00000000 #define RC_REGION_0_ADDR_TRANS_L 0x00000000 #define RC_REGION_0_PASS_BITS (25 - 1) +#define RC_REGION_0_TYPE_MASK GENMASK(3, 0) #define MAX_AXI_WRAPPER_REGION_NUM 33 struct rockchip_pcie { @@ -295,7 +302,9 @@ static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip, static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip, int where, int size, u32 *val) { - void __iomem *addr = rockchip->apb_base + PCIE_RC_CONFIG_BASE + where; + void __iomem *addr; + + addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + where; if (!IS_ALIGNED((uintptr_t)addr, size)) { *val = 0; @@ -319,11 +328,13 @@ static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip, int where, int size, u32 val) { u32 mask, tmp, offset; + void __iomem *addr; offset = where & ~0x3; + addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + offset; if (size == 4) { - writel(val, rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset); + writel(val, addr); return PCIBIOS_SUCCESSFUL; } @@ -334,13 +345,33 @@ static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip, * corrupt RW1C bits in adjacent registers. But the hardware * doesn't support smaller writes. */ - tmp = readl(rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset) & mask; + tmp = readl(addr) & mask; tmp |= val << ((where & 0x3) * 8); - writel(tmp, rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset); + writel(tmp, addr); return PCIBIOS_SUCCESSFUL; } +static void rockchip_pcie_cfg_configuration_accesses( + struct rockchip_pcie *rockchip, u32 type) +{ + u32 ob_desc_0; + + /* Configuration Accesses for region 0 */ + rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF); + + rockchip_pcie_write(rockchip, + (RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS), + PCIE_CORE_OB_REGION_ADDR0); + rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H, + PCIE_CORE_OB_REGION_ADDR1); + ob_desc_0 = rockchip_pcie_read(rockchip, PCIE_CORE_OB_REGION_DESC0); + ob_desc_0 &= ~(RC_REGION_0_TYPE_MASK); + ob_desc_0 |= (type | (0x1 << 23)); + rockchip_pcie_write(rockchip, ob_desc_0, PCIE_CORE_OB_REGION_DESC0); + rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1); +} + static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip, struct pci_bus *bus, u32 devfn, int where, int size, u32 *val) @@ -355,6 +386,13 @@ static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip, return PCIBIOS_BAD_REGISTER_NUMBER; } + if (bus->parent->number == rockchip->root_bus_nr) + rockchip_pcie_cfg_configuration_accesses(rockchip, + AXI_WRAPPER_TYPE0_CFG); + else + rockchip_pcie_cfg_configuration_accesses(rockchip, + AXI_WRAPPER_TYPE1_CFG); + if (size == 4) { *val = readl(rockchip->reg_base + busdev); } else if (size == 2) { @@ -379,6 +417,13 @@ static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip, if (!IS_ALIGNED(busdev, size)) return PCIBIOS_BAD_REGISTER_NUMBER; + if (bus->parent->number == rockchip->root_bus_nr) + rockchip_pcie_cfg_configuration_accesses(rockchip, + AXI_WRAPPER_TYPE0_CFG); + else + rockchip_pcie_cfg_configuration_accesses(rockchip, + AXI_WRAPPER_TYPE1_CFG); + if (size == 4) writel(val, rockchip->reg_base + busdev); else if (size == 2) @@ -664,15 +709,10 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP); } - rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF); - - rockchip_pcie_write(rockchip, - (RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS), - PCIE_CORE_OB_REGION_ADDR0); - rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H, - PCIE_CORE_OB_REGION_ADDR1); - rockchip_pcie_write(rockchip, 0x0080000a, PCIE_CORE_OB_REGION_DESC0); - rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1); + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR); + status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK; + status |= PCIE_RC_CONFIG_DCSR_MPS_256; + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR); return 0; } @@ -1156,13 +1196,16 @@ static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip, return 0; } -static int rockchip_cfg_atu(struct rockchip_pcie *rockchip) +static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip) { struct device *dev = rockchip->dev; int offset; int err; int reg_no; + rockchip_pcie_cfg_configuration_accesses(rockchip, + AXI_WRAPPER_TYPE0_CFG); + for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) { err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1, AXI_WRAPPER_MEM_WRITE, @@ -1251,6 +1294,9 @@ static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev) clk_disable_unprepare(rockchip->aclk_perf_pcie); clk_disable_unprepare(rockchip->aclk_pcie); + if (!IS_ERR(rockchip->vpcie0v9)) + regulator_disable(rockchip->vpcie0v9); + return ret; } @@ -1259,24 +1305,54 @@ static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev) struct rockchip_pcie *rockchip = dev_get_drvdata(dev); int err; - clk_prepare_enable(rockchip->clk_pcie_pm); - clk_prepare_enable(rockchip->hclk_pcie); - clk_prepare_enable(rockchip->aclk_perf_pcie); - clk_prepare_enable(rockchip->aclk_pcie); + if (!IS_ERR(rockchip->vpcie0v9)) { + err = regulator_enable(rockchip->vpcie0v9); + if (err) { + dev_err(dev, "fail to enable vpcie0v9 regulator\n"); + return err; + } + } + + err = clk_prepare_enable(rockchip->clk_pcie_pm); + if (err) + goto err_pcie_pm; + + err = clk_prepare_enable(rockchip->hclk_pcie); + if (err) + goto err_hclk_pcie; + + err = clk_prepare_enable(rockchip->aclk_perf_pcie); + if (err) + goto err_aclk_perf_pcie; + + err = clk_prepare_enable(rockchip->aclk_pcie); + if (err) + goto err_aclk_pcie; err = rockchip_pcie_init_port(rockchip); if (err) - return err; + goto err_pcie_resume; - err = rockchip_cfg_atu(rockchip); + err = rockchip_pcie_cfg_atu(rockchip); if (err) - return err; + goto err_pcie_resume; /* Need this to enter L1 again */ rockchip_pcie_update_txcredit_mui(rockchip); rockchip_pcie_enable_interrupts(rockchip); return 0; + +err_pcie_resume: + clk_disable_unprepare(rockchip->aclk_pcie); +err_aclk_pcie: + clk_disable_unprepare(rockchip->aclk_perf_pcie); +err_aclk_perf_pcie: + clk_disable_unprepare(rockchip->hclk_pcie); +err_hclk_pcie: + clk_disable_unprepare(rockchip->clk_pcie_pm); +err_pcie_pm: + return err; } static int rockchip_pcie_probe(struct platform_device *pdev) @@ -1284,6 +1360,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev) struct rockchip_pcie *rockchip; struct device *dev = &pdev->dev; struct pci_bus *bus, *child; + struct pci_host_bridge *bridge; struct resource_entry *win; resource_size_t io_base; struct resource *mem; @@ -1295,10 +1372,12 @@ static int rockchip_pcie_probe(struct platform_device *pdev) if (!dev->of_node) return -ENODEV; - rockchip = devm_kzalloc(dev, sizeof(*rockchip), GFP_KERNEL); - if (!rockchip) + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rockchip)); + if (!bridge) return -ENOMEM; + rockchip = pci_host_bridge_priv(bridge); + platform_set_drvdata(pdev, rockchip); rockchip->dev = dev; @@ -1385,22 +1464,30 @@ static int rockchip_pcie_probe(struct platform_device *pdev) } } - err = rockchip_cfg_atu(rockchip); + err = rockchip_pcie_cfg_atu(rockchip); if (err) goto err_free_res; - rockchip->msg_region = devm_ioremap(rockchip->dev, - rockchip->msg_bus_addr, SZ_1M); + rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M); if (!rockchip->msg_region) { err = -ENOMEM; goto err_free_res; } - bus = pci_scan_root_bus(&pdev->dev, 0, &rockchip_pcie_ops, rockchip, &res); - if (!bus) { - err = -ENOMEM; + list_splice_init(&res, &bridge->windows); + bridge->dev.parent = dev; + bridge->sysdata = rockchip; + bridge->busnr = 0; + bridge->ops = &rockchip_pcie_ops; + bridge->map_irq = of_irq_parse_and_map_pci; + bridge->swizzle_irq = pci_common_swizzle; + + err = pci_scan_root_bus_bridge(bridge); + if (!err) goto err_free_res; - } + + bus = bridge->bus; + rockchip->root_bus = bus; pci_bus_size_bridges(bus); diff --git a/drivers/pci/host/pcie-tango.c b/drivers/pci/host/pcie-tango.c new file mode 100644 index 000000000000..6bbb81f06a53 --- /dev/null +++ b/drivers/pci/host/pcie-tango.c @@ -0,0 +1,141 @@ +#include <linux/pci-ecam.h> +#include <linux/delay.h> +#include <linux/of.h> + +#define SMP8759_MUX 0x48 +#define SMP8759_TEST_OUT 0x74 + +struct tango_pcie { + void __iomem *base; +}; + +static int smp8759_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + struct pci_config_window *cfg = bus->sysdata; + struct tango_pcie *pcie = dev_get_drvdata(cfg->parent); + int ret; + + /* Reads in configuration space outside devfn 0 return garbage */ + if (devfn != 0) + return PCIBIOS_FUNC_NOT_SUPPORTED; + + /* + * PCI config and MMIO accesses are muxed. Linux doesn't have a + * mutual exclusion mechanism for config vs. MMIO accesses, so + * concurrent accesses may cause corruption. + */ + writel_relaxed(1, pcie->base + SMP8759_MUX); + ret = pci_generic_config_read(bus, devfn, where, size, val); + writel_relaxed(0, pcie->base + SMP8759_MUX); + + return ret; +} + +static int smp8759_config_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + struct pci_config_window *cfg = bus->sysdata; + struct tango_pcie *pcie = dev_get_drvdata(cfg->parent); + int ret; + + writel_relaxed(1, pcie->base + SMP8759_MUX); + ret = pci_generic_config_write(bus, devfn, where, size, val); + writel_relaxed(0, pcie->base + SMP8759_MUX); + + return ret; +} + +static struct pci_ecam_ops smp8759_ecam_ops = { + .bus_shift = 20, + .pci_ops = { + .map_bus = pci_ecam_map_bus, + .read = smp8759_config_read, + .write = smp8759_config_write, + } +}; + +static int tango_pcie_link_up(struct tango_pcie *pcie) +{ + void __iomem *test_out = pcie->base + SMP8759_TEST_OUT; + int i; + + writel_relaxed(16, test_out); + for (i = 0; i < 10; ++i) { + u32 ltssm_state = readl_relaxed(test_out) >> 8; + if ((ltssm_state & 0x1f) == 0xf) /* L0 */ + return 1; + usleep_range(3000, 4000); + } + + return 0; +} + +static int tango_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct tango_pcie *pcie; + struct resource *res; + int ret; + + dev_warn(dev, "simultaneous PCI config and MMIO accesses may cause data corruption\n"); + add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); + + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + pcie->base = devm_ioremap_resource(dev, res); + if (IS_ERR(pcie->base)) + return PTR_ERR(pcie->base); + + platform_set_drvdata(pdev, pcie); + + if (!tango_pcie_link_up(pcie)) + return -ENODEV; + + return pci_host_common_probe(pdev, &smp8759_ecam_ops); +} + +static const struct of_device_id tango_pcie_ids[] = { + { .compatible = "sigma,smp8759-pcie" }, + { }, +}; + +static struct platform_driver tango_pcie_driver = { + .probe = tango_pcie_probe, + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = tango_pcie_ids, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(tango_pcie_driver); + +/* + * The root complex advertises the wrong device class. + * Header Type 1 is for PCI-to-PCI bridges. + */ +static void tango_fixup_class(struct pci_dev *dev) +{ + dev->class = PCI_CLASS_BRIDGE_PCI << 8; +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0024, tango_fixup_class); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0028, tango_fixup_class); + +/* + * The root complex exposes a "fake" BAR, which is used to filter + * bus-to-system accesses. Only accesses within the range defined by this + * BAR are forwarded to the host, others are ignored. + * + * By default, the DMA framework expects an identity mapping, and DRAM0 is + * mapped at 0x80000000. + */ +static void tango_fixup_bar(struct pci_dev *dev) +{ + dev->non_compliant_bars = true; + pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, 0x80000000); +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0024, tango_fixup_bar); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0028, tango_fixup_bar); diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c index 4b16b26ae909..eec641a34fc5 100644 --- a/drivers/pci/host/pcie-xilinx-nwl.c +++ b/drivers/pci/host/pcie-xilinx-nwl.c @@ -172,6 +172,7 @@ struct nwl_pcie { u8 root_busno; struct nwl_msi msi; struct irq_domain *legacy_irq_domain; + raw_spinlock_t leg_mask_lock; }; static inline u32 nwl_bridge_readl(struct nwl_pcie *pcie, u32 off) @@ -383,11 +384,52 @@ static void nwl_pcie_msi_handler_low(struct irq_desc *desc) chained_irq_exit(chip, desc); } +static void nwl_mask_leg_irq(struct irq_data *data) +{ + struct irq_desc *desc = irq_to_desc(data->irq); + struct nwl_pcie *pcie; + unsigned long flags; + u32 mask; + u32 val; + + pcie = irq_desc_get_chip_data(desc); + mask = 1 << (data->hwirq - 1); + raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags); + val = nwl_bridge_readl(pcie, MSGF_LEG_MASK); + nwl_bridge_writel(pcie, (val & (~mask)), MSGF_LEG_MASK); + raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags); +} + +static void nwl_unmask_leg_irq(struct irq_data *data) +{ + struct irq_desc *desc = irq_to_desc(data->irq); + struct nwl_pcie *pcie; + unsigned long flags; + u32 mask; + u32 val; + + pcie = irq_desc_get_chip_data(desc); + mask = 1 << (data->hwirq - 1); + raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags); + val = nwl_bridge_readl(pcie, MSGF_LEG_MASK); + nwl_bridge_writel(pcie, (val | mask), MSGF_LEG_MASK); + raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags); +} + +static struct irq_chip nwl_leg_irq_chip = { + .name = "nwl_pcie:legacy", + .irq_enable = nwl_unmask_leg_irq, + .irq_disable = nwl_mask_leg_irq, + .irq_mask = nwl_mask_leg_irq, + .irq_unmask = nwl_unmask_leg_irq, +}; + static int nwl_legacy_map(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq) { - irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); + irq_set_chip_and_handler(irq, &nwl_leg_irq_chip, handle_level_irq); irq_set_chip_data(irq, domain->host_data); + irq_set_status_flags(irq, IRQ_LEVEL); return 0; } @@ -526,11 +568,12 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie) return -ENOMEM; } + raw_spin_lock_init(&pcie->leg_mask_lock); nwl_pcie_init_msi_irq_domain(pcie); return 0; } -static int nwl_pcie_enable_msi(struct nwl_pcie *pcie, struct pci_bus *bus) +static int nwl_pcie_enable_msi(struct nwl_pcie *pcie) { struct device *dev = pcie->dev; struct platform_device *pdev = to_platform_device(dev); @@ -791,13 +834,16 @@ static int nwl_pcie_probe(struct platform_device *pdev) struct nwl_pcie *pcie; struct pci_bus *bus; struct pci_bus *child; + struct pci_host_bridge *bridge; int err; resource_size_t iobase = 0; LIST_HEAD(res); - pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); - if (!pcie) - return -ENOMEM; + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); + if (!bridge) + return -ENODEV; + + pcie = pci_host_bridge_priv(bridge); pcie->dev = dev; pcie->ecam_value = NWL_ECAM_VALUE_DEFAULT; @@ -830,21 +876,28 @@ static int nwl_pcie_probe(struct platform_device *pdev) goto error; } - bus = pci_create_root_bus(dev, pcie->root_busno, - &nwl_pcie_ops, pcie, &res); - if (!bus) { - err = -ENOMEM; - goto error; - } + list_splice_init(&res, &bridge->windows); + bridge->dev.parent = dev; + bridge->sysdata = pcie; + bridge->busnr = pcie->root_busno; + bridge->ops = &nwl_pcie_ops; + bridge->map_irq = of_irq_parse_and_map_pci; + bridge->swizzle_irq = pci_common_swizzle; if (IS_ENABLED(CONFIG_PCI_MSI)) { - err = nwl_pcie_enable_msi(pcie, bus); + err = nwl_pcie_enable_msi(pcie); if (err < 0) { dev_err(dev, "failed to enable MSI support: %d\n", err); goto error; } } - pci_scan_child_bus(bus); + + err = pci_scan_root_bus_bridge(bridge); + if (err) + goto error; + + bus = bridge->bus; + pci_assign_unassigned_bus_resources(bus); list_for_each_entry(child, &bus->children, node) pcie_bus_configure_settings(child); diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c index 2fe2df51f9f8..f63fa5e0278c 100644 --- a/drivers/pci/host/pcie-xilinx.c +++ b/drivers/pci/host/pcie-xilinx.c @@ -633,6 +633,7 @@ static int xilinx_pcie_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct xilinx_pcie_port *port; struct pci_bus *bus, *child; + struct pci_host_bridge *bridge; int err; resource_size_t iobase = 0; LIST_HEAD(res); @@ -640,9 +641,11 @@ static int xilinx_pcie_probe(struct platform_device *pdev) if (!dev->of_node) return -ENODEV; - port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); - if (!port) - return -ENOMEM; + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port)); + if (!bridge) + return -ENODEV; + + port = pci_host_bridge_priv(bridge); port->dev = dev; @@ -671,21 +674,26 @@ static int xilinx_pcie_probe(struct platform_device *pdev) if (err) goto error; - bus = pci_create_root_bus(dev, 0, &xilinx_pcie_ops, port, &res); - if (!bus) { - err = -ENOMEM; - goto error; - } + + list_splice_init(&res, &bridge->windows); + bridge->dev.parent = dev; + bridge->sysdata = port; + bridge->busnr = 0; + bridge->ops = &xilinx_pcie_ops; + bridge->map_irq = of_irq_parse_and_map_pci; + bridge->swizzle_irq = pci_common_swizzle; #ifdef CONFIG_PCI_MSI xilinx_pcie_msi_chip.dev = dev; - bus->msi = &xilinx_pcie_msi_chip; + bridge->msi = &xilinx_pcie_msi_chip; #endif - pci_scan_child_bus(bus); + err = pci_scan_root_bus_bridge(bridge); + if (err < 0) + goto error; + + bus = bridge->bus; + pci_assign_unassigned_bus_resources(bus); -#ifndef CONFIG_MICROBLAZE - pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); -#endif list_for_each_entry(child, &bus->children, node) pcie_bus_configure_settings(child); pci_bus_add_devices(bus); @@ -696,7 +704,7 @@ error: return err; } -static struct of_device_id xilinx_pcie_of_match[] = { +static const struct of_device_id xilinx_pcie_of_match[] = { { .compatible = "xlnx,axi-pcie-host-1.00.a", }, {} }; diff --git a/drivers/pci/host/vmd.c b/drivers/pci/host/vmd.c index 31203d69616d..6088c3083194 100644 --- a/drivers/pci/host/vmd.c +++ b/drivers/pci/host/vmd.c @@ -539,7 +539,10 @@ static void vmd_detach_resources(struct vmd_dev *vmd) } /* - * VMD domains start at 0x1000 to not clash with ACPI _SEG domains. + * VMD domains start at 0x10000 to not clash with ACPI _SEG domains. + * Per ACPI r6.0, sec 6.5.6, _SEG returns an integer, of which the lower + * 16 bits are the PCI Segment Group (domain) number. Other bits are + * currently reserved. */ static int vmd_find_free_domain(void) { @@ -710,7 +713,8 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) INIT_LIST_HEAD(&vmd->irqs[i].irq_list); err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i), - vmd_irq, 0, "vmd", &vmd->irqs[i]); + vmd_irq, IRQF_NO_THREAD, + "vmd", &vmd->irqs[i]); if (err) return err; } @@ -739,10 +743,10 @@ static void vmd_remove(struct pci_dev *dev) struct vmd_dev *vmd = pci_get_drvdata(dev); vmd_detach_resources(vmd); - vmd_cleanup_srcu(vmd); sysfs_remove_link(&vmd->dev->dev.kobj, "domain"); pci_stop_root_bus(vmd->bus); pci_remove_root_bus(vmd->bus); + vmd_cleanup_srcu(vmd); vmd_teardown_dma_ops(vmd); irq_domain_remove(vmd->irq_domain); } diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index d9dc7363ac77..120485d6f352 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -461,8 +461,6 @@ found: else iov->dev = dev; - mutex_init(&iov->lock); - dev->sriov = iov; dev->is_physfn = 1; rc = compute_max_vf_buses(dev); @@ -491,8 +489,6 @@ static void sriov_release(struct pci_dev *dev) if (dev != dev->sriov->dev) pci_dev_put(dev->sriov->dev); - mutex_destroy(&dev->sriov->lock); - kfree(dev->sriov); dev->sriov = NULL; } diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index fbad5dca3219..253d92409bb3 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -1058,7 +1058,7 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, for (;;) { if (affd) { - nvec = irq_calc_affinity_vectors(nvec, affd); + nvec = irq_calc_affinity_vectors(minvec, nvec, affd); if (nvec < minvec) return -ENOSPC; } @@ -1097,7 +1097,7 @@ static int __pci_enable_msix_range(struct pci_dev *dev, for (;;) { if (affd) { - nvec = irq_calc_affinity_vectors(nvec, affd); + nvec = irq_calc_affinity_vectors(minvec, nvec, affd); if (nvec < minvec) return -ENOSPC; } @@ -1165,16 +1165,6 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, if (flags & PCI_IRQ_AFFINITY) { if (!affd) affd = &msi_default_affd; - - if (affd->pre_vectors + affd->post_vectors > min_vecs) - return -EINVAL; - - /* - * If there aren't any vectors left after applying the pre/post - * vectors don't bother with assigning affinity. - */ - if (affd->pre_vectors + affd->post_vectors == min_vecs) - affd = NULL; } else { if (WARN_ON(affd)) affd = NULL; diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index df4aead394f2..607f677f48d2 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -415,6 +415,8 @@ static int pci_device_probe(struct device *dev) struct pci_dev *pci_dev = to_pci_dev(dev); struct pci_driver *drv = to_pci_driver(dev->driver); + pci_assign_irq(pci_dev); + error = pcibios_alloc_irq(pci_dev); if (error < 0) return error; @@ -967,6 +969,7 @@ static int pci_pm_thaw_noirq(struct device *dev) return pci_legacy_resume_early(dev); pci_update_current_state(pci_dev, PCI_D0); + pci_restore_state(pci_dev); if (drv && drv->pm && drv->pm->thaw_noirq) error = drv->pm->thaw_noirq(dev); diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c index 2d8db3ead6e8..a7a41d9c29df 100644 --- a/drivers/pci/pci-label.c +++ b/drivers/pci/pci-label.c @@ -43,9 +43,11 @@ static size_t find_smbios_instance_string(struct pci_dev *pdev, char *buf, { const struct dmi_device *dmi; struct dmi_dev_onboard *donboard; + int domain_nr; int bus; int devfn; + domain_nr = pci_domain_nr(pdev->bus); bus = pdev->bus->number; devfn = pdev->devfn; @@ -53,8 +55,9 @@ static size_t find_smbios_instance_string(struct pci_dev *pdev, char *buf, while ((dmi = dmi_find_device(DMI_DEV_TYPE_DEV_ONBOARD, NULL, dmi)) != NULL) { donboard = dmi->device_data; - if (donboard && donboard->bus == bus && - donboard->devfn == devfn) { + if (donboard && donboard->segment == domain_nr && + donboard->bus == bus && + donboard->devfn == devfn) { if (buf) { if (attribute == SMBIOS_ATTR_INSTANCE_SHOW) return scnprintf(buf, PAGE_SIZE, diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 31e99613a12e..2f3780b50723 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -154,6 +154,129 @@ static ssize_t resource_show(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RO(resource); +static ssize_t max_link_speed_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + u32 linkcap; + int err; + const char *speed; + + err = pcie_capability_read_dword(pci_dev, PCI_EXP_LNKCAP, &linkcap); + if (err) + return -EINVAL; + + switch (linkcap & PCI_EXP_LNKCAP_SLS) { + case PCI_EXP_LNKCAP_SLS_8_0GB: + speed = "8 GT/s"; + break; + case PCI_EXP_LNKCAP_SLS_5_0GB: + speed = "5 GT/s"; + break; + case PCI_EXP_LNKCAP_SLS_2_5GB: + speed = "2.5 GT/s"; + break; + default: + speed = "Unknown speed"; + } + + return sprintf(buf, "%s\n", speed); +} +static DEVICE_ATTR_RO(max_link_speed); + +static ssize_t max_link_width_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + u32 linkcap; + int err; + + err = pcie_capability_read_dword(pci_dev, PCI_EXP_LNKCAP, &linkcap); + if (err) + return -EINVAL; + + return sprintf(buf, "%u\n", (linkcap & PCI_EXP_LNKCAP_MLW) >> 4); +} +static DEVICE_ATTR_RO(max_link_width); + +static ssize_t current_link_speed_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + u16 linkstat; + int err; + const char *speed; + + err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat); + if (err) + return -EINVAL; + + switch (linkstat & PCI_EXP_LNKSTA_CLS) { + case PCI_EXP_LNKSTA_CLS_8_0GB: + speed = "8 GT/s"; + break; + case PCI_EXP_LNKSTA_CLS_5_0GB: + speed = "5 GT/s"; + break; + case PCI_EXP_LNKSTA_CLS_2_5GB: + speed = "2.5 GT/s"; + break; + default: + speed = "Unknown speed"; + } + + return sprintf(buf, "%s\n", speed); +} +static DEVICE_ATTR_RO(current_link_speed); + +static ssize_t current_link_width_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + u16 linkstat; + int err; + + err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat); + if (err) + return -EINVAL; + + return sprintf(buf, "%u\n", + (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT); +} +static DEVICE_ATTR_RO(current_link_width); + +static ssize_t secondary_bus_number_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + u8 sec_bus; + int err; + + err = pci_read_config_byte(pci_dev, PCI_SECONDARY_BUS, &sec_bus); + if (err) + return -EINVAL; + + return sprintf(buf, "%u\n", sec_bus); +} +static DEVICE_ATTR_RO(secondary_bus_number); + +static ssize_t subordinate_bus_number_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + u8 sub_bus; + int err; + + err = pci_read_config_byte(pci_dev, PCI_SUBORDINATE_BUS, &sub_bus); + if (err) + return -EINVAL; + + return sprintf(buf, "%u\n", sub_bus); +} +static DEVICE_ATTR_RO(subordinate_bus_number); + static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -472,7 +595,6 @@ static ssize_t sriov_numvfs_store(struct device *dev, const char *buf, size_t count) { struct pci_dev *pdev = to_pci_dev(dev); - struct pci_sriov *iov = pdev->sriov; int ret; u16 num_vfs; @@ -483,7 +605,7 @@ static ssize_t sriov_numvfs_store(struct device *dev, if (num_vfs > pci_sriov_get_totalvfs(pdev)) return -ERANGE; - mutex_lock(&iov->dev->sriov->lock); + device_lock(&pdev->dev); if (num_vfs == pdev->sriov->num_VFs) goto exit; @@ -518,7 +640,7 @@ static ssize_t sriov_numvfs_store(struct device *dev, num_vfs, ret); exit: - mutex_unlock(&iov->dev->sriov->lock); + device_unlock(&pdev->dev); if (ret < 0) return ret; @@ -629,12 +751,17 @@ static struct attribute *pci_dev_attrs[] = { NULL, }; -static const struct attribute_group pci_dev_group = { - .attrs = pci_dev_attrs, +static struct attribute *pci_bridge_attrs[] = { + &dev_attr_subordinate_bus_number.attr, + &dev_attr_secondary_bus_number.attr, + NULL, }; -const struct attribute_group *pci_dev_groups[] = { - &pci_dev_group, +static struct attribute *pcie_dev_attrs[] = { + &dev_attr_current_link_speed.attr, + &dev_attr_current_link_width.attr, + &dev_attr_max_link_width.attr, + &dev_attr_max_link_speed.attr, NULL, }; @@ -1557,6 +1684,57 @@ static umode_t pci_dev_hp_attrs_are_visible(struct kobject *kobj, return a->mode; } +static umode_t pci_bridge_attrs_are_visible(struct kobject *kobj, + struct attribute *a, int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct pci_dev *pdev = to_pci_dev(dev); + + if (pci_is_bridge(pdev)) + return a->mode; + + return 0; +} + +static umode_t pcie_dev_attrs_are_visible(struct kobject *kobj, + struct attribute *a, int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct pci_dev *pdev = to_pci_dev(dev); + + if (pci_is_pcie(pdev)) + return a->mode; + + return 0; +} + +static const struct attribute_group pci_dev_group = { + .attrs = pci_dev_attrs, +}; + +const struct attribute_group *pci_dev_groups[] = { + &pci_dev_group, + NULL, +}; + +static const struct attribute_group pci_bridge_group = { + .attrs = pci_bridge_attrs, +}; + +const struct attribute_group *pci_bridge_groups[] = { + &pci_bridge_group, + NULL, +}; + +static const struct attribute_group pcie_dev_group = { + .attrs = pcie_dev_attrs, +}; + +const struct attribute_group *pcie_dev_groups[] = { + &pcie_dev_group, + NULL, +}; + static struct attribute_group pci_dev_hp_attr_group = { .attrs = pci_dev_hp_attrs, .is_visible = pci_dev_hp_attrs_are_visible, @@ -1592,12 +1770,24 @@ static struct attribute_group pci_dev_attr_group = { .is_visible = pci_dev_attrs_are_visible, }; +static struct attribute_group pci_bridge_attr_group = { + .attrs = pci_bridge_attrs, + .is_visible = pci_bridge_attrs_are_visible, +}; + +static struct attribute_group pcie_dev_attr_group = { + .attrs = pcie_dev_attrs, + .is_visible = pcie_dev_attrs_are_visible, +}; + static const struct attribute_group *pci_dev_attr_groups[] = { &pci_dev_attr_group, &pci_dev_hp_attr_group, #ifdef CONFIG_PCI_IOV &sriov_dev_attr_group, #endif + &pci_bridge_attr_group, + &pcie_dev_attr_group, NULL, }; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 0b5302a9fdae..d88edf5c563b 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -28,6 +28,7 @@ #include <linux/pm_runtime.h> #include <linux/pci_hotplug.h> #include <linux/vmalloc.h> +#include <linux/pci-ats.h> #include <asm/setup.h> #include <asm/dma.h> #include <linux/aer.h> @@ -455,7 +456,7 @@ struct resource *pci_find_parent_resource(const struct pci_dev *dev, pci_bus_for_each_resource(bus, r, i) { if (!r) continue; - if (res->start && resource_contains(r, res)) { + if (resource_contains(r, res)) { /* * If the window is prefetchable but the BAR is @@ -1166,6 +1167,8 @@ void pci_restore_state(struct pci_dev *dev) /* PCI Express register must be restored first */ pci_restore_pcie_state(dev); + pci_restore_pasid_state(dev); + pci_restore_pri_state(dev); pci_restore_ats_state(dev); pci_restore_vc_state(dev); @@ -1966,12 +1969,13 @@ EXPORT_SYMBOL(pci_wake_from_d3); /** * pci_target_state - find an appropriate low power state for a given PCI dev * @dev: PCI device + * @wakeup: Whether or not wakeup functionality will be enabled for the device. * * Use underlying platform code to find a supported low power state for @dev. * If the platform can't manage @dev, return the deepest state from which it * can generate wake events, based on any available PME info. */ -static pci_power_t pci_target_state(struct pci_dev *dev) +static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup) { pci_power_t target_state = PCI_D3hot; @@ -2008,7 +2012,7 @@ static pci_power_t pci_target_state(struct pci_dev *dev) if (dev->current_state == PCI_D3cold) target_state = PCI_D3cold; - if (device_may_wakeup(&dev->dev)) { + if (wakeup) { /* * Find the deepest state from which the device can generate * wake-up events, make it the target state and enable device @@ -2034,13 +2038,14 @@ static pci_power_t pci_target_state(struct pci_dev *dev) */ int pci_prepare_to_sleep(struct pci_dev *dev) { - pci_power_t target_state = pci_target_state(dev); + bool wakeup = device_may_wakeup(&dev->dev); + pci_power_t target_state = pci_target_state(dev, wakeup); int error; if (target_state == PCI_POWER_ERROR) return -EIO; - pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev)); + pci_enable_wake(dev, target_state, wakeup); error = pci_set_power_state(dev, target_state); @@ -2073,9 +2078,10 @@ EXPORT_SYMBOL(pci_back_from_sleep); */ int pci_finish_runtime_suspend(struct pci_dev *dev) { - pci_power_t target_state = pci_target_state(dev); + pci_power_t target_state; int error; + target_state = pci_target_state(dev, device_can_wakeup(&dev->dev)); if (target_state == PCI_POWER_ERROR) return -EIO; @@ -2111,8 +2117,8 @@ bool pci_dev_run_wake(struct pci_dev *dev) if (!dev->pme_support) return false; - /* PME-capable in principle, but not from the intended sleep state */ - if (!pci_pme_capable(dev, pci_target_state(dev))) + /* PME-capable in principle, but not from the target power state */ + if (!pci_pme_capable(dev, pci_target_state(dev, false))) return false; while (bus->parent) { @@ -2147,9 +2153,10 @@ EXPORT_SYMBOL_GPL(pci_dev_run_wake); bool pci_dev_keep_suspended(struct pci_dev *pci_dev) { struct device *dev = &pci_dev->dev; + bool wakeup = device_may_wakeup(dev); if (!pm_runtime_suspended(dev) - || pci_target_state(pci_dev) != pci_dev->current_state + || pci_target_state(pci_dev, wakeup) != pci_dev->current_state || platform_pci_need_resume(pci_dev) || (pci_dev->dev_flags & PCI_DEV_FLAGS_NEEDS_RESUME)) return false; @@ -2167,7 +2174,7 @@ bool pci_dev_keep_suspended(struct pci_dev *pci_dev) spin_lock_irq(&dev->power.lock); if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold && - !device_may_wakeup(dev)) + !wakeup) __pci_pme_active(pci_dev, false); spin_unlock_irq(&dev->power.lock); @@ -3715,46 +3722,6 @@ void pci_intx(struct pci_dev *pdev, int enable) } EXPORT_SYMBOL_GPL(pci_intx); -/** - * pci_intx_mask_supported - probe for INTx masking support - * @dev: the PCI device to operate on - * - * Check if the device dev support INTx masking via the config space - * command word. - */ -bool pci_intx_mask_supported(struct pci_dev *dev) -{ - bool mask_supported = false; - u16 orig, new; - - if (dev->broken_intx_masking) - return false; - - pci_cfg_access_lock(dev); - - pci_read_config_word(dev, PCI_COMMAND, &orig); - pci_write_config_word(dev, PCI_COMMAND, - orig ^ PCI_COMMAND_INTX_DISABLE); - pci_read_config_word(dev, PCI_COMMAND, &new); - - /* - * There's no way to protect against hardware bugs or detect them - * reliably, but as long as we know what the value should be, let's - * go ahead and check it. - */ - if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) { - dev_err(&dev->dev, "Command register changed from 0x%x to 0x%x: driver or hardware bug?\n", - orig, new); - } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) { - mask_supported = true; - pci_write_config_word(dev, PCI_COMMAND, orig); - } - - pci_cfg_access_unlock(dev); - return mask_supported; -} -EXPORT_SYMBOL_GPL(pci_intx_mask_supported); - static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask) { struct pci_bus *bus = dev->bus; @@ -3805,7 +3772,7 @@ done: * @dev: the PCI device to operate on * * Check if the device dev has its INTx line asserted, mask it and - * return true in that case. False is returned if not interrupt was + * return true in that case. False is returned if no interrupt was * pending. */ bool pci_check_and_mask_intx(struct pci_dev *dev) @@ -4075,40 +4042,6 @@ static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe) return pci_reset_hotplug_slot(dev->slot->hotplug, probe); } -static int __pci_dev_reset(struct pci_dev *dev, int probe) -{ - int rc; - - might_sleep(); - - rc = pci_dev_specific_reset(dev, probe); - if (rc != -ENOTTY) - goto done; - - if (pcie_has_flr(dev)) { - if (!probe) - pcie_flr(dev); - rc = 0; - goto done; - } - - rc = pci_af_flr(dev, probe); - if (rc != -ENOTTY) - goto done; - - rc = pci_pm_reset(dev, probe); - if (rc != -ENOTTY) - goto done; - - rc = pci_dev_reset_slot_function(dev, probe); - if (rc != -ENOTTY) - goto done; - - rc = pci_parent_bus_reset(dev, probe); -done: - return rc; -} - static void pci_dev_lock(struct pci_dev *dev) { pci_cfg_access_lock(dev); @@ -4134,26 +4067,18 @@ static void pci_dev_unlock(struct pci_dev *dev) pci_cfg_access_unlock(dev); } -/** - * pci_reset_notify - notify device driver of reset - * @dev: device to be notified of reset - * @prepare: 'true' if device is about to be reset; 'false' if reset attempt - * completed - * - * Must be called prior to device access being disabled and after device - * access is restored. - */ -static void pci_reset_notify(struct pci_dev *dev, bool prepare) +static void pci_dev_save_and_disable(struct pci_dev *dev) { const struct pci_error_handlers *err_handler = dev->driver ? dev->driver->err_handler : NULL; - if (err_handler && err_handler->reset_notify) - err_handler->reset_notify(dev, prepare); -} -static void pci_dev_save_and_disable(struct pci_dev *dev) -{ - pci_reset_notify(dev, true); + /* + * dev->driver->err_handler->reset_prepare() is protected against + * races with ->remove() by the device lock, which must be held by + * the caller. + */ + if (err_handler && err_handler->reset_prepare) + err_handler->reset_prepare(dev); /* * Wake-up device prior to save. PM registers default to D0 after @@ -4175,23 +4100,18 @@ static void pci_dev_save_and_disable(struct pci_dev *dev) static void pci_dev_restore(struct pci_dev *dev) { - pci_restore_state(dev); - pci_reset_notify(dev, false); -} - -static int pci_dev_reset(struct pci_dev *dev, int probe) -{ - int rc; - - if (!probe) - pci_dev_lock(dev); - - rc = __pci_dev_reset(dev, probe); + const struct pci_error_handlers *err_handler = + dev->driver ? dev->driver->err_handler : NULL; - if (!probe) - pci_dev_unlock(dev); + pci_restore_state(dev); - return rc; + /* + * dev->driver->err_handler->reset_done() is protected against + * races with ->remove() by the device lock, which must be held by + * the caller. + */ + if (err_handler && err_handler->reset_done) + err_handler->reset_done(dev); } /** @@ -4213,7 +4133,13 @@ static int pci_dev_reset(struct pci_dev *dev, int probe) */ int __pci_reset_function(struct pci_dev *dev) { - return pci_dev_reset(dev, 0); + int ret; + + pci_dev_lock(dev); + ret = __pci_reset_function_locked(dev); + pci_dev_unlock(dev); + + return ret; } EXPORT_SYMBOL_GPL(__pci_reset_function); @@ -4238,7 +4164,27 @@ EXPORT_SYMBOL_GPL(__pci_reset_function); */ int __pci_reset_function_locked(struct pci_dev *dev) { - return __pci_dev_reset(dev, 0); + int rc; + + might_sleep(); + + rc = pci_dev_specific_reset(dev, 0); + if (rc != -ENOTTY) + return rc; + if (pcie_has_flr(dev)) { + pcie_flr(dev); + return 0; + } + rc = pci_af_flr(dev, 0); + if (rc != -ENOTTY) + return rc; + rc = pci_pm_reset(dev, 0); + if (rc != -ENOTTY) + return rc; + rc = pci_dev_reset_slot_function(dev, 0); + if (rc != -ENOTTY) + return rc; + return pci_parent_bus_reset(dev, 0); } EXPORT_SYMBOL_GPL(__pci_reset_function_locked); @@ -4255,7 +4201,26 @@ EXPORT_SYMBOL_GPL(__pci_reset_function_locked); */ int pci_probe_reset_function(struct pci_dev *dev) { - return pci_dev_reset(dev, 1); + int rc; + + might_sleep(); + + rc = pci_dev_specific_reset(dev, 1); + if (rc != -ENOTTY) + return rc; + if (pcie_has_flr(dev)) + return 0; + rc = pci_af_flr(dev, 1); + if (rc != -ENOTTY) + return rc; + rc = pci_pm_reset(dev, 1); + if (rc != -ENOTTY) + return rc; + rc = pci_dev_reset_slot_function(dev, 1); + if (rc != -ENOTTY) + return rc; + + return pci_parent_bus_reset(dev, 1); } /** @@ -4278,15 +4243,17 @@ int pci_reset_function(struct pci_dev *dev) { int rc; - rc = pci_dev_reset(dev, 1); + rc = pci_probe_reset_function(dev); if (rc) return rc; + pci_dev_lock(dev); pci_dev_save_and_disable(dev); - rc = pci_dev_reset(dev, 0); + rc = __pci_reset_function_locked(dev); pci_dev_restore(dev); + pci_dev_unlock(dev); return rc; } @@ -4302,20 +4269,18 @@ int pci_try_reset_function(struct pci_dev *dev) { int rc; - rc = pci_dev_reset(dev, 1); + rc = pci_probe_reset_function(dev); if (rc) return rc; - pci_dev_save_and_disable(dev); + if (!pci_dev_trylock(dev)) + return -EAGAIN; - if (pci_dev_trylock(dev)) { - rc = __pci_dev_reset(dev, 0); - pci_dev_unlock(dev); - } else - rc = -EAGAIN; + pci_dev_save_and_disable(dev); + rc = __pci_reset_function_locked(dev); + pci_dev_unlock(dev); pci_dev_restore(dev); - return rc; } EXPORT_SYMBOL_GPL(pci_try_reset_function); @@ -4465,7 +4430,9 @@ static void pci_bus_save_and_disable(struct pci_bus *bus) struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { + pci_dev_lock(dev); pci_dev_save_and_disable(dev); + pci_dev_unlock(dev); if (dev->subordinate) pci_bus_save_and_disable(dev->subordinate); } @@ -4480,7 +4447,9 @@ static void pci_bus_restore(struct pci_bus *bus) struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { + pci_dev_lock(dev); pci_dev_restore(dev); + pci_dev_unlock(dev); if (dev->subordinate) pci_bus_restore(dev->subordinate); } diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 240b2c0fed4b..03e3d0285aea 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -267,7 +267,6 @@ struct pci_sriov { u16 driver_max_VFs; /* max num VFs driver supports */ struct pci_dev *dev; /* lowest numbered PF */ struct pci_dev *self; /* this PF */ - struct mutex lock; /* lock for setting sriov_numvfs in sysfs */ resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */ bool drivers_autoprobe; /* auto probing of VFs by driver */ }; diff --git a/drivers/pci/pcie/pcie-dpc.c b/drivers/pci/pcie/pcie-dpc.c index 77d2ca99d2ec..c39f32e42b4d 100644 --- a/drivers/pci/pcie/pcie-dpc.c +++ b/drivers/pci/pcie/pcie-dpc.c @@ -92,7 +92,7 @@ static irqreturn_t dpc_irq(int irq, void *context) pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_STATUS, &status); pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_SOURCE_ID, &source); - if (!status) + if (!status || status == (u16)(~0)) return IRQ_NONE; dev_info(&dpc->dev->device, "DPC containment event, status:%#06x source:%#06x\n", @@ -144,7 +144,7 @@ static int dpc_probe(struct pcie_device *dev) dpc->rp = (cap & PCI_EXP_DPC_CAP_RP_EXT); - ctl |= PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN; + ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN; pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl); dev_info(&dev->device, "DPC error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n", diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h index 587aef36030d..4334fd5d7de9 100644 --- a/drivers/pci/pcie/portdrv.h +++ b/drivers/pci/pcie/portdrv.h @@ -13,10 +13,11 @@ #define PCIE_PORT_DEVICE_MAXSERVICES 5 /* - * According to the PCI Express Base Specification 2.0, the indices of - * the MSI-X table entries used by port services must not exceed 31 + * The PCIe Capability Interrupt Message Number (PCIe r3.1, sec 7.8.2) must + * be one of the first 32 MSI-X entries. Per PCI r3.0, sec 6.8.3.1, MSI + * supports a maximum of 32 vectors per function. */ -#define PCIE_PORT_MAX_MSIX_ENTRIES 32 +#define PCIE_PORT_MAX_MSI_ENTRIES 32 #define get_descriptor_id(type, service) (((type - 4) << 8) | service) diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index cea504f6f478..313a21df1692 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c @@ -44,14 +44,15 @@ static void release_pcie_device(struct device *dev) } /** - * pcie_port_enable_msix - try to set up MSI-X as interrupt mode for given port + * pcie_port_enable_irq_vec - try to set up MSI-X or MSI as interrupt mode + * for given port * @dev: PCI Express port to handle * @irqs: Array of interrupt vectors to populate * @mask: Bitmask of port capabilities returned by get_port_device_capability() * * Return value: 0 on success, error code on failure */ -static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask) +static int pcie_port_enable_irq_vec(struct pci_dev *dev, int *irqs, int mask) { int nr_entries, entry, nvec = 0; @@ -61,8 +62,8 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask) * equal to the number of entries this port actually uses, we'll happily * go through without any tricks. */ - nr_entries = pci_alloc_irq_vectors(dev, 1, PCIE_PORT_MAX_MSIX_ENTRIES, - PCI_IRQ_MSIX); + nr_entries = pci_alloc_irq_vectors(dev, 1, PCIE_PORT_MAX_MSI_ENTRIES, + PCI_IRQ_MSIX | PCI_IRQ_MSI); if (nr_entries < 0) return nr_entries; @@ -70,14 +71,19 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask) u16 reg16; /* - * The code below follows the PCI Express Base Specification 2.0 - * stating in Section 6.1.6 that "PME and Hot-Plug Event - * interrupts (when both are implemented) always share the same - * MSI or MSI-X vector, as indicated by the Interrupt Message - * Number field in the PCI Express Capabilities register", where - * according to Section 7.8.2 of the specification "For MSI-X, - * the value in this field indicates which MSI-X Table entry is - * used to generate the interrupt message." + * Per PCIe r3.1, sec 6.1.6, "PME and Hot-Plug Event + * interrupts (when both are implemented) always share the + * same MSI or MSI-X vector, as indicated by the Interrupt + * Message Number field in the PCI Express Capabilities + * register". + * + * Per sec 7.8.2, "For MSI, the [Interrupt Message Number] + * indicates the offset between the base Message Data and + * the interrupt message that is generated." + * + * "For MSI-X, the [Interrupt Message Number] indicates + * which MSI-X Table entry is used to generate the + * interrupt message." */ pcie_capability_read_word(dev, PCI_EXP_FLAGS, ®16); entry = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9; @@ -94,13 +100,17 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask) u32 reg32, pos; /* - * The code below follows Section 7.10.10 of the PCI Express - * Base Specification 2.0 stating that bits 31-27 of the Root - * Error Status Register contain a value indicating which of the - * MSI/MSI-X vectors assigned to the port is going to be used - * for AER, where "For MSI-X, the value in this register - * indicates which MSI-X Table entry is used to generate the - * interrupt message." + * Per PCIe r3.1, sec 7.10.10, the Advanced Error Interrupt + * Message Number in the Root Error Status register + * indicates which MSI/MSI-X vector is used for AER. + * + * "For MSI, the [Advanced Error Interrupt Message Number] + * indicates the offset between the base Message Data and + * the interrupt message that is generated." + * + * "For MSI-X, the [Advanced Error Interrupt Message + * Number] indicates which MSI-X Table entry is used to + * generate the interrupt message." */ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, ®32); @@ -113,6 +123,33 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask) nvec = max(nvec, entry + 1); } + if (mask & PCIE_PORT_SERVICE_DPC) { + u16 reg16, pos; + + /* + * Per PCIe r4.0 (v0.9), sec 7.9.15.2, the DPC Interrupt + * Message Number in the DPC Capability register indicates + * which MSI/MSI-X vector is used for DPC. + * + * "For MSI, the [DPC Interrupt Message Number] indicates + * the offset between the base Message Data and the + * interrupt message that is generated." + * + * "For MSI-X, the [DPC Interrupt Message Number] indicates + * which MSI-X Table entry is used to generate the + * interrupt message." + */ + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DPC); + pci_read_config_word(dev, pos + PCI_EXP_DPC_CAP, ®16); + entry = reg16 & 0x1f; + if (entry >= nr_entries) + goto out_free_irqs; + + irqs[PCIE_PORT_SERVICE_DPC_SHIFT] = pci_irq_vector(dev, entry); + + nvec = max(nvec, entry + 1); + } + /* * If nvec is equal to the allocated number of entries, we can just use * what we have. Otherwise, the port has some extra entries not for the @@ -124,7 +161,7 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask) /* Now allocate the MSI-X vectors for real */ nr_entries = pci_alloc_irq_vectors(dev, nvec, nvec, - PCI_IRQ_MSIX); + PCI_IRQ_MSIX | PCI_IRQ_MSI); if (nr_entries < 0) return nr_entries; } @@ -146,26 +183,29 @@ out_free_irqs: */ static int pcie_init_service_irqs(struct pci_dev *dev, int *irqs, int mask) { - unsigned flags = PCI_IRQ_LEGACY | PCI_IRQ_MSI; int ret, i; for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) irqs[i] = -1; /* - * If MSI cannot be used for PCIe PME or hotplug, we have to use - * INTx or other interrupts, e.g. system shared interrupt. + * If we support PME or hotplug, but we can't use MSI/MSI-X for + * them, we have to fall back to INTx or other interrupts, e.g., a + * system shared interrupt. */ - if (((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi()) || - ((mask & PCIE_PORT_SERVICE_HP) && pciehp_no_msi())) { - flags &= ~PCI_IRQ_MSI; - } else { - /* Try to use MSI-X if supported */ - if (!pcie_port_enable_msix(dev, irqs, mask)) - return 0; - } + if ((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi()) + goto legacy_irq; + + if ((mask & PCIE_PORT_SERVICE_HP) && pciehp_no_msi()) + goto legacy_irq; + + /* Try to use MSI-X or MSI if supported */ + if (pcie_port_enable_irq_vec(dev, irqs, mask) == 0) + return 0; - ret = pci_alloc_irq_vectors(dev, 1, 1, flags); +legacy_irq: + /* fall back to legacy IRQ */ + ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY); if (ret < 0) return -ENODEV; diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 19c8950c6c38..c31310db0404 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -510,16 +510,18 @@ static struct pci_bus *pci_alloc_bus(struct pci_bus *parent) return b; } -static void pci_release_host_bridge_dev(struct device *dev) +static void devm_pci_release_host_bridge_dev(struct device *dev) { struct pci_host_bridge *bridge = to_pci_host_bridge(dev); if (bridge->release_fn) bridge->release_fn(bridge); +} - pci_free_resource_list(&bridge->windows); - - kfree(bridge); +static void pci_release_host_bridge_dev(struct device *dev) +{ + devm_pci_release_host_bridge_dev(dev); + pci_free_host_bridge(to_pci_host_bridge(dev)); } struct pci_host_bridge *pci_alloc_host_bridge(size_t priv) @@ -531,11 +533,36 @@ struct pci_host_bridge *pci_alloc_host_bridge(size_t priv) return NULL; INIT_LIST_HEAD(&bridge->windows); + bridge->dev.release = pci_release_host_bridge_dev; return bridge; } EXPORT_SYMBOL(pci_alloc_host_bridge); +struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev, + size_t priv) +{ + struct pci_host_bridge *bridge; + + bridge = devm_kzalloc(dev, sizeof(*bridge) + priv, GFP_KERNEL); + if (!bridge) + return NULL; + + INIT_LIST_HEAD(&bridge->windows); + bridge->dev.release = devm_pci_release_host_bridge_dev; + + return bridge; +} +EXPORT_SYMBOL(devm_pci_alloc_host_bridge); + +void pci_free_host_bridge(struct pci_host_bridge *bridge) +{ + pci_free_resource_list(&bridge->windows); + + kfree(bridge); +} +EXPORT_SYMBOL(pci_free_host_bridge); + static const unsigned char pcix_bus_speed[] = { PCI_SPEED_UNKNOWN, /* 0 */ PCI_SPEED_66MHz_PCIX, /* 1 */ @@ -719,7 +746,7 @@ static void pci_set_bus_msi_domain(struct pci_bus *bus) dev_set_msi_domain(&bus->dev, d); } -int pci_register_host_bridge(struct pci_host_bridge *bridge) +static int pci_register_host_bridge(struct pci_host_bridge *bridge) { struct device *parent = bridge->dev.parent; struct resource_entry *window, *n; @@ -834,7 +861,6 @@ free: kfree(bus); return err; } -EXPORT_SYMBOL(pci_register_host_bridge); static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, struct pci_dev *bridge, int busnr) @@ -1330,6 +1356,34 @@ static void pci_msi_setup_pci_dev(struct pci_dev *dev) } /** + * pci_intx_mask_broken - test PCI_COMMAND_INTX_DISABLE writability + * @dev: PCI device + * + * Test whether PCI_COMMAND_INTX_DISABLE is writable for @dev. Check this + * at enumeration-time to avoid modifying PCI_COMMAND at run-time. + */ +static int pci_intx_mask_broken(struct pci_dev *dev) +{ + u16 orig, toggle, new; + + pci_read_config_word(dev, PCI_COMMAND, &orig); + toggle = orig ^ PCI_COMMAND_INTX_DISABLE; + pci_write_config_word(dev, PCI_COMMAND, toggle); + pci_read_config_word(dev, PCI_COMMAND, &new); + + pci_write_config_word(dev, PCI_COMMAND, orig); + + /* + * PCI_COMMAND_INTX_DISABLE was reserved and read-only prior to PCI + * r2.3, so strictly speaking, a device is not *broken* if it's not + * writable. But we'll live with the misnomer for now. + */ + if (new != toggle) + return 1; + return 0; +} + +/** * pci_setup_device - fill in class and map information of a device * @dev: the device structure to fill * @@ -1399,6 +1453,8 @@ int pci_setup_device(struct pci_dev *dev) } } + dev->broken_intx_masking = pci_intx_mask_broken(dev); + switch (dev->hdr_type) { /* header type */ case PCI_HEADER_TYPE_NORMAL: /* standard header */ if (class == PCI_CLASS_BRIDGE_PCI) @@ -1674,6 +1730,11 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) /* Initialize Advanced Error Capabilities and Control Register */ pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32); reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or; + /* Don't enable ECRC generation or checking if unsupported */ + if (!(reg32 & PCI_ERR_CAP_ECRC_GENC)) + reg32 &= ~PCI_ERR_CAP_ECRC_GENE; + if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC)) + reg32 &= ~PCI_ERR_CAP_ECRC_CHKE; pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32); /* @@ -2298,9 +2359,8 @@ void __weak pcibios_remove_bus(struct pci_bus *bus) { } -static struct pci_bus *pci_create_root_bus_msi(struct device *parent, - int bus, struct pci_ops *ops, void *sysdata, - struct list_head *resources, struct msi_controller *msi) +struct pci_bus *pci_create_root_bus(struct device *parent, int bus, + struct pci_ops *ops, void *sysdata, struct list_head *resources) { int error; struct pci_host_bridge *bridge; @@ -2310,13 +2370,11 @@ static struct pci_bus *pci_create_root_bus_msi(struct device *parent, return NULL; bridge->dev.parent = parent; - bridge->dev.release = pci_release_host_bridge_dev; list_splice_init(resources, &bridge->windows); bridge->sysdata = sysdata; bridge->busnr = bus; bridge->ops = ops; - bridge->msi = msi; error = pci_register_host_bridge(bridge); if (error < 0) @@ -2328,13 +2386,6 @@ err_out: kfree(bridge); return NULL; } - -struct pci_bus *pci_create_root_bus(struct device *parent, int bus, - struct pci_ops *ops, void *sysdata, struct list_head *resources) -{ - return pci_create_root_bus_msi(parent, bus, ops, sysdata, resources, - NULL); -} EXPORT_SYMBOL_GPL(pci_create_root_bus); int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max) @@ -2400,24 +2451,28 @@ void pci_bus_release_busn_res(struct pci_bus *b) res, ret ? "can not be" : "is"); } -struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus, - struct pci_ops *ops, void *sysdata, - struct list_head *resources, struct msi_controller *msi) +int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge) { struct resource_entry *window; bool found = false; struct pci_bus *b; - int max; + int max, bus, ret; - resource_list_for_each_entry(window, resources) + if (!bridge) + return -EINVAL; + + resource_list_for_each_entry(window, &bridge->windows) if (window->res->flags & IORESOURCE_BUS) { found = true; break; } - b = pci_create_root_bus_msi(parent, bus, ops, sysdata, resources, msi); - if (!b) - return NULL; + ret = pci_register_host_bridge(bridge); + if (ret < 0) + return ret; + + b = bridge->bus; + bus = bridge->busnr; if (!found) { dev_info(&b->dev, @@ -2431,14 +2486,41 @@ struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus, if (!found) pci_bus_update_busn_res_end(b, max); - return b; + return 0; } +EXPORT_SYMBOL(pci_scan_root_bus_bridge); struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, struct pci_ops *ops, void *sysdata, struct list_head *resources) { - return pci_scan_root_bus_msi(parent, bus, ops, sysdata, resources, - NULL); + struct resource_entry *window; + bool found = false; + struct pci_bus *b; + int max; + + resource_list_for_each_entry(window, resources) + if (window->res->flags & IORESOURCE_BUS) { + found = true; + break; + } + + b = pci_create_root_bus(parent, bus, ops, sysdata, resources); + if (!b) + return NULL; + + if (!found) { + dev_info(&b->dev, + "No busn resource found for root bus, will use [bus %02x-ff]\n", + bus); + pci_bus_insert_busn_res(b, bus, 255); + } + + max = pci_scan_child_bus(b); + + if (!found) + pci_bus_update_busn_res_end(b, max); + + return b; } EXPORT_SYMBOL(pci_scan_root_bus); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 085fb787aa9e..6967c6b4cf6b 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -304,7 +304,7 @@ static void quirk_extend_bar_to_page(struct pci_dev *dev) { int i; - for (i = 0; i < PCI_STD_RESOURCE_END; i++) { + for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { struct resource *r = &dev->resource[i]; if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) { @@ -1684,6 +1684,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2609, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm); +static void quirk_radeon_pm(struct pci_dev *dev) +{ + if (dev->subsystem_vendor == PCI_VENDOR_ID_APPLE && + dev->subsystem_device == 0x00e2) { + if (dev->d3_delay < 20) { + dev->d3_delay = 20; + dev_info(&dev->dev, "extending delay after power-on from D3 to %d msec\n", + dev->d3_delay); + } + } +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6741, quirk_radeon_pm); + #ifdef CONFIG_X86_IO_APIC static int dmi_disable_ioapicreroute(const struct dmi_system_id *d) { @@ -3236,6 +3249,10 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1588, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1589, quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158a, + quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158b, + quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d0, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d1, diff --git a/drivers/pci/setup-irq.c b/drivers/pci/setup-irq.c index 95c225be49d1..81eda3d93a5d 100644 --- a/drivers/pci/setup-irq.c +++ b/drivers/pci/setup-irq.c @@ -15,6 +15,7 @@ #include <linux/errno.h> #include <linux/ioport.h> #include <linux/cache.h> +#include "pci.h" void __weak pcibios_update_irq(struct pci_dev *dev, int irq) { @@ -22,12 +23,17 @@ void __weak pcibios_update_irq(struct pci_dev *dev, int irq) pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); } -static void pdev_fixup_irq(struct pci_dev *dev, - u8 (*swizzle)(struct pci_dev *, u8 *), - int (*map_irq)(const struct pci_dev *, u8, u8)) +void pci_assign_irq(struct pci_dev *dev) { - u8 pin, slot; + u8 pin; + u8 slot = -1; int irq = 0; + struct pci_host_bridge *hbrg = pci_find_host_bridge(dev->bus); + + if (!(hbrg->map_irq)) { + dev_dbg(&dev->dev, "runtime IRQ mapping not provided by arch\n"); + return; + } /* If this device is not on the primary bus, we need to figure out which interrupt pin it will come in on. We know which slot it @@ -40,17 +46,22 @@ static void pdev_fixup_irq(struct pci_dev *dev, if (pin > 4) pin = 1; - if (pin != 0) { + if (pin) { /* Follow the chain of bridges, swizzling as we go. */ - slot = (*swizzle)(dev, &pin); + if (hbrg->swizzle_irq) + slot = (*(hbrg->swizzle_irq))(dev, &pin); - irq = (*map_irq)(dev, slot, pin); + /* + * If a swizzling function is not used map_irq must + * ignore slot + */ + irq = (*(hbrg->map_irq))(dev, slot, pin); if (irq == -1) irq = 0; } dev->irq = irq; - dev_dbg(&dev->dev, "fixup irq: got %d\n", dev->irq); + dev_dbg(&dev->dev, "assign IRQ: got %d\n", dev->irq); /* Always tell the device, so the driver knows what is the real IRQ to use; the device does not use it. */ @@ -60,9 +71,23 @@ static void pdev_fixup_irq(struct pci_dev *dev, void pci_fixup_irqs(u8 (*swizzle)(struct pci_dev *, u8 *), int (*map_irq)(const struct pci_dev *, u8, u8)) { + /* + * Implement pci_fixup_irqs() through pci_assign_irq(). + * This code should be remove eventually, it is a wrapper + * around pci_assign_irq() interface to keep current + * pci_fixup_irqs() behaviour unchanged on architecture + * code still relying on its interface. + */ struct pci_dev *dev = NULL; + struct pci_host_bridge *hbrg = NULL; - for_each_pci_dev(dev) - pdev_fixup_irq(dev, swizzle, map_irq); + for_each_pci_dev(dev) { + hbrg = pci_find_host_bridge(dev->bus); + hbrg->swizzle_irq = swizzle; + hbrg->map_irq = map_irq; + pci_assign_irq(dev); + hbrg->swizzle_irq = NULL; + hbrg->map_irq = NULL; + } } EXPORT_SYMBOL_GPL(pci_fixup_irqs); diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c index f6a63406c76e..af81b2dec42e 100644 --- a/drivers/pci/switch/switchtec.c +++ b/drivers/pci/switch/switchtec.c @@ -120,6 +120,13 @@ struct sw_event_regs { u32 reserved16[4]; } __packed; +enum { + SWITCHTEC_CFG0_RUNNING = 0x04, + SWITCHTEC_CFG1_RUNNING = 0x05, + SWITCHTEC_IMG0_RUNNING = 0x03, + SWITCHTEC_IMG1_RUNNING = 0x07, +}; + struct sys_info_regs { u32 device_id; u32 device_version; @@ -129,7 +136,9 @@ struct sys_info_regs { u32 table_format_version; u32 partition_id; u32 cfg_file_fmt_version; - u32 reserved2[58]; + u16 cfg_running; + u16 img_running; + u32 reserved2[57]; char vendor_id[8]; char product_id[16]; char product_revision[4]; @@ -807,6 +816,7 @@ static int ioctl_flash_part_info(struct switchtec_dev *stdev, { struct switchtec_ioctl_flash_part_info info = {0}; struct flash_info_regs __iomem *fi = stdev->mmio_flash_info; + struct sys_info_regs __iomem *si = stdev->mmio_sys_info; u32 active_addr = -1; if (copy_from_user(&info, uinfo, sizeof(info))) @@ -816,18 +826,26 @@ static int ioctl_flash_part_info(struct switchtec_dev *stdev, case SWITCHTEC_IOCTL_PART_CFG0: active_addr = ioread32(&fi->active_cfg); set_fw_info_part(&info, &fi->cfg0); + if (ioread16(&si->cfg_running) == SWITCHTEC_CFG0_RUNNING) + info.active |= SWITCHTEC_IOCTL_PART_RUNNING; break; case SWITCHTEC_IOCTL_PART_CFG1: active_addr = ioread32(&fi->active_cfg); set_fw_info_part(&info, &fi->cfg1); + if (ioread16(&si->cfg_running) == SWITCHTEC_CFG1_RUNNING) + info.active |= SWITCHTEC_IOCTL_PART_RUNNING; break; case SWITCHTEC_IOCTL_PART_IMG0: active_addr = ioread32(&fi->active_img); set_fw_info_part(&info, &fi->img0); + if (ioread16(&si->img_running) == SWITCHTEC_IMG0_RUNNING) + info.active |= SWITCHTEC_IOCTL_PART_RUNNING; break; case SWITCHTEC_IOCTL_PART_IMG1: active_addr = ioread32(&fi->active_img); set_fw_info_part(&info, &fi->img1); + if (ioread16(&si->img_running) == SWITCHTEC_IMG1_RUNNING) + info.active |= SWITCHTEC_IOCTL_PART_RUNNING; break; case SWITCHTEC_IOCTL_PART_NVLOG: set_fw_info_part(&info, &fi->nvlog); @@ -861,7 +879,7 @@ static int ioctl_flash_part_info(struct switchtec_dev *stdev, } if (info.address == active_addr) - info.active = 1; + info.active |= SWITCHTEC_IOCTL_PART_ACTIVE; if (copy_to_user(uinfo, &info, sizeof(info))) return -EFAULT; @@ -1540,6 +1558,24 @@ static const struct pci_device_id switchtec_pci_tbl[] = { SWITCHTEC_PCI_DEVICE(0x8544), //PSX 64xG3 SWITCHTEC_PCI_DEVICE(0x8545), //PSX 80xG3 SWITCHTEC_PCI_DEVICE(0x8546), //PSX 96xG3 + SWITCHTEC_PCI_DEVICE(0x8551), //PAX 24XG3 + SWITCHTEC_PCI_DEVICE(0x8552), //PAX 32XG3 + SWITCHTEC_PCI_DEVICE(0x8553), //PAX 48XG3 + SWITCHTEC_PCI_DEVICE(0x8554), //PAX 64XG3 + SWITCHTEC_PCI_DEVICE(0x8555), //PAX 80XG3 + SWITCHTEC_PCI_DEVICE(0x8556), //PAX 96XG3 + SWITCHTEC_PCI_DEVICE(0x8561), //PFXL 24XG3 + SWITCHTEC_PCI_DEVICE(0x8562), //PFXL 32XG3 + SWITCHTEC_PCI_DEVICE(0x8563), //PFXL 48XG3 + SWITCHTEC_PCI_DEVICE(0x8564), //PFXL 64XG3 + SWITCHTEC_PCI_DEVICE(0x8565), //PFXL 80XG3 + SWITCHTEC_PCI_DEVICE(0x8566), //PFXL 96XG3 + SWITCHTEC_PCI_DEVICE(0x8571), //PFXI 24XG3 + SWITCHTEC_PCI_DEVICE(0x8572), //PFXI 32XG3 + SWITCHTEC_PCI_DEVICE(0x8573), //PFXI 48XG3 + SWITCHTEC_PCI_DEVICE(0x8574), //PFXI 64XG3 + SWITCHTEC_PCI_DEVICE(0x8575), //PFXI 80XG3 + SWITCHTEC_PCI_DEVICE(0x8576), //PFXI 96XG3 {0} }; MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl); diff --git a/drivers/ptp/ptp_dte.c b/drivers/ptp/ptp_dte.c index 00145a3f1e70..faf6f7a83713 100644 --- a/drivers/ptp/ptp_dte.c +++ b/drivers/ptp/ptp_dte.c @@ -40,7 +40,7 @@ #define DTE_WRAP_AROUND_NSEC_SHIFT 44 /* 44 bits NCO */ -#define DTE_NCO_MAX_NS 0xFFFFFFFFFFF +#define DTE_NCO_MAX_NS 0xFFFFFFFFFFFLL /* 125MHz with 3.29 reg cfg */ #define DTE_PPB_ADJ(ppb) (u32)(div64_u64((((u64)abs(ppb) * BIT(28)) +\ diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index d4d2d8d9f3e7..cda10719d1d1 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -253,10 +253,6 @@ static int spidev_message(struct spidev_data *spidev, goto done; } k_tmp->rx_buf = rx_buf; - if (!access_ok(VERIFY_WRITE, (u8 __user *) - (uintptr_t) u_tmp->rx_buf, - u_tmp->len)) - goto done; rx_buf += k_tmp->len; } if (u_tmp->tx_buf) { @@ -304,7 +300,7 @@ static int spidev_message(struct spidev_data *spidev, rx_buf = spidev->rx_buffer; for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) { if (u_tmp->rx_buf) { - if (__copy_to_user((u8 __user *) + if (copy_to_user((u8 __user *) (uintptr_t) u_tmp->rx_buf, rx_buf, u_tmp->len)) { status = -EFAULT; @@ -346,7 +342,6 @@ spidev_get_ioc_message(unsigned int cmd, struct spi_ioc_transfer __user *u_ioc, static long spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { - int err = 0; int retval = 0; struct spidev_data *spidev; struct spi_device *spi; @@ -358,19 +353,6 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC) return -ENOTTY; - /* Check access direction once here; don't repeat below. - * IOC_DIR is from the user perspective, while access_ok is - * from the kernel perspective; so they look reversed. - */ - if (_IOC_DIR(cmd) & _IOC_READ) - err = !access_ok(VERIFY_WRITE, - (void __user *)arg, _IOC_SIZE(cmd)); - if (err == 0 && _IOC_DIR(cmd) & _IOC_WRITE) - err = !access_ok(VERIFY_READ, - (void __user *)arg, _IOC_SIZE(cmd)); - if (err) - return -EFAULT; - /* guard against device removal before, or while, * we issue this ioctl. */ @@ -393,31 +375,31 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) switch (cmd) { /* read requests */ case SPI_IOC_RD_MODE: - retval = __put_user(spi->mode & SPI_MODE_MASK, + retval = put_user(spi->mode & SPI_MODE_MASK, (__u8 __user *)arg); break; case SPI_IOC_RD_MODE32: - retval = __put_user(spi->mode & SPI_MODE_MASK, + retval = put_user(spi->mode & SPI_MODE_MASK, (__u32 __user *)arg); break; case SPI_IOC_RD_LSB_FIRST: - retval = __put_user((spi->mode & SPI_LSB_FIRST) ? 1 : 0, + retval = put_user((spi->mode & SPI_LSB_FIRST) ? 1 : 0, (__u8 __user *)arg); break; case SPI_IOC_RD_BITS_PER_WORD: - retval = __put_user(spi->bits_per_word, (__u8 __user *)arg); + retval = put_user(spi->bits_per_word, (__u8 __user *)arg); break; case SPI_IOC_RD_MAX_SPEED_HZ: - retval = __put_user(spidev->speed_hz, (__u32 __user *)arg); + retval = put_user(spidev->speed_hz, (__u32 __user *)arg); break; /* write requests */ case SPI_IOC_WR_MODE: case SPI_IOC_WR_MODE32: if (cmd == SPI_IOC_WR_MODE) - retval = __get_user(tmp, (u8 __user *)arg); + retval = get_user(tmp, (u8 __user *)arg); else - retval = __get_user(tmp, (u32 __user *)arg); + retval = get_user(tmp, (u32 __user *)arg); if (retval == 0) { u32 save = spi->mode; @@ -436,7 +418,7 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) } break; case SPI_IOC_WR_LSB_FIRST: - retval = __get_user(tmp, (__u8 __user *)arg); + retval = get_user(tmp, (__u8 __user *)arg); if (retval == 0) { u32 save = spi->mode; @@ -453,7 +435,7 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) } break; case SPI_IOC_WR_BITS_PER_WORD: - retval = __get_user(tmp, (__u8 __user *)arg); + retval = get_user(tmp, (__u8 __user *)arg); if (retval == 0) { u8 save = spi->bits_per_word; @@ -466,7 +448,7 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) } break; case SPI_IOC_WR_MAX_SPEED_HZ: - retval = __get_user(tmp, (__u32 __user *)arg); + retval = get_user(tmp, (__u32 __user *)arg); if (retval == 0) { u32 save = spi->max_speed_hz; @@ -516,8 +498,6 @@ spidev_compat_ioc_message(struct file *filp, unsigned int cmd, struct spi_ioc_transfer *ioc; u_ioc = (struct spi_ioc_transfer __user *) compat_ptr(arg); - if (!access_ok(VERIFY_READ, u_ioc, _IOC_SIZE(cmd))) - return -EFAULT; /* guard against device removal before, or while, * we issue this ioctl. diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c index b827a8113e26..ff01bed7112f 100644 --- a/drivers/video/fbdev/efifb.c +++ b/drivers/video/fbdev/efifb.c @@ -408,7 +408,7 @@ static void efifb_fixup_resources(struct pci_dev *dev) if (!base) return; - for (i = 0; i < PCI_STD_RESOURCE_END; i++) { + for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { struct resource *res = &dev->resource[i]; if (!(res->flags & IORESOURCE_MEM)) diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index 112b3e1e20e3..2dd4a7af7dd7 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -739,23 +739,22 @@ static int do_i2c_smbus_ioctl(struct file *file, unsigned int cmd, struct i2c_smbus_ioctl_data32 __user *udata) { struct i2c_smbus_ioctl_data __user *tdata; - compat_caddr_t datap; + union { + /* beginnings of those have identical layouts */ + struct i2c_smbus_ioctl_data32 data32; + struct i2c_smbus_ioctl_data data; + } v; tdata = compat_alloc_user_space(sizeof(*tdata)); if (tdata == NULL) return -ENOMEM; - if (!access_ok(VERIFY_WRITE, tdata, sizeof(*tdata))) - return -EFAULT; - if (!access_ok(VERIFY_READ, udata, sizeof(*udata))) + memset(&v, 0, sizeof(v)); + if (copy_from_user(&v.data32, udata, sizeof(v.data32))) return -EFAULT; + v.data.data = compat_ptr(v.data32.data); - if (__copy_in_user(&tdata->read_write, &udata->read_write, 2 * sizeof(u8))) - return -EFAULT; - if (__copy_in_user(&tdata->size, &udata->size, 2 * sizeof(u32))) - return -EFAULT; - if (__get_user(datap, &udata->data) || - __put_user(compat_ptr(datap), &tdata->data)) + if (copy_to_user(tdata, &v.data, sizeof(v.data))) return -EFAULT; return do_ioctl(file, cmd, (unsigned long)tdata); diff --git a/fs/dcache.c b/fs/dcache.c index a140fe1dbb1a..7ece68d0d4db 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -277,6 +277,33 @@ static inline int dname_external(const struct dentry *dentry) return dentry->d_name.name != dentry->d_iname; } +void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry) +{ + spin_lock(&dentry->d_lock); + if (unlikely(dname_external(dentry))) { + struct external_name *p = external_name(dentry); + atomic_inc(&p->u.count); + spin_unlock(&dentry->d_lock); + name->name = p->name; + } else { + memcpy(name->inline_name, dentry->d_iname, DNAME_INLINE_LEN); + spin_unlock(&dentry->d_lock); + name->name = name->inline_name; + } +} +EXPORT_SYMBOL(take_dentry_name_snapshot); + +void release_dentry_name_snapshot(struct name_snapshot *name) +{ + if (unlikely(name->name != name->inline_name)) { + struct external_name *p; + p = container_of(name->name, struct external_name, name[0]); + if (unlikely(atomic_dec_and_test(&p->u.count))) + kfree_rcu(p, u.head); + } +} +EXPORT_SYMBOL(release_dentry_name_snapshot); + static inline void __d_set_inode_and_type(struct dentry *dentry, struct inode *inode, unsigned type_flags) @@ -3598,6 +3625,11 @@ EXPORT_SYMBOL(d_genocide); void __init vfs_caches_init_early(void) { + int i; + + for (i = 0; i < ARRAY_SIZE(in_lookup_hashtable); i++) + INIT_HLIST_BL_HEAD(&in_lookup_hashtable[i]); + dcache_init_early(); inode_init_early(); } diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 77440e4aa9d4..a0e4e2f7e0be 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -766,7 +766,7 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, { int error; struct dentry *dentry = NULL, *trap; - const char *old_name; + struct name_snapshot old_name; trap = lock_rename(new_dir, old_dir); /* Source or destination directories don't exist? */ @@ -781,19 +781,19 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, if (IS_ERR(dentry) || dentry == trap || d_really_is_positive(dentry)) goto exit; - old_name = fsnotify_oldname_init(old_dentry->d_name.name); + take_dentry_name_snapshot(&old_name, old_dentry); error = simple_rename(d_inode(old_dir), old_dentry, d_inode(new_dir), dentry, 0); if (error) { - fsnotify_oldname_free(old_name); + release_dentry_name_snapshot(&old_name); goto exit; } d_move(old_dentry, dentry); - fsnotify_move(d_inode(old_dir), d_inode(new_dir), old_name, + fsnotify_move(d_inode(old_dir), d_inode(new_dir), old_name.name, d_is_dir(old_dentry), NULL, old_dentry); - fsnotify_oldname_free(old_name); + release_dentry_name_snapshot(&old_name); unlock_rename(new_dir, old_dir); dput(dentry); return old_dentry; diff --git a/fs/filesystems.c b/fs/filesystems.c index cac75547d35c..8b99955e3504 100644 --- a/fs/filesystems.c +++ b/fs/filesystems.c @@ -275,8 +275,10 @@ struct file_system_type *get_fs_type(const char *name) int len = dot ? dot - name : strlen(name); fs = __get_fs_type(name, len); - if (!fs && (request_module("fs-%.*s", len, name) == 0)) + if (!fs && (request_module("fs-%.*s", len, name) == 0)) { fs = __get_fs_type(name, len); + WARN_ONCE(!fs, "request_module fs-%.*s succeeded, but still no fs?\n", len, name); + } if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) { put_filesystem(fs); diff --git a/fs/inode.c b/fs/inode.c index 5cbc8e6e9390..50370599e371 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -2014,7 +2014,7 @@ bool inode_owner_or_capable(const struct inode *inode) return true; ns = current_user_ns(); - if (ns_capable(ns, CAP_FOWNER) && kuid_has_mapping(ns, inode->i_uid)) + if (kuid_has_mapping(ns, inode->i_uid) && ns_capable(ns, CAP_FOWNER)) return true; return false; } diff --git a/fs/minix/itree_common.c b/fs/minix/itree_common.c index 4c57c9af6946..2d1ca08870f7 100644 --- a/fs/minix/itree_common.c +++ b/fs/minix/itree_common.c @@ -142,7 +142,7 @@ changed: return -EAGAIN; } -static inline int get_block(struct inode * inode, sector_t block, +static int get_block(struct inode * inode, sector_t block, struct buffer_head *bh, int create) { int err = -EIO; diff --git a/fs/namei.c b/fs/namei.c index 8bacc390c51e..e0b46eb0e212 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1008,7 +1008,7 @@ static int may_linkat(struct path *link) /* Source inode owner (or CAP_FOWNER) can hardlink all they like, * otherwise, it must be a safe source. */ - if (inode_owner_or_capable(inode) || safe_hardlink_source(inode)) + if (safe_hardlink_source(inode) || inode_owner_or_capable(inode)) return 0; audit_log_link_denied("linkat", link); @@ -4363,11 +4363,11 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry, { int error; bool is_dir = d_is_dir(old_dentry); - const unsigned char *old_name; struct inode *source = old_dentry->d_inode; struct inode *target = new_dentry->d_inode; bool new_is_dir = false; unsigned max_links = new_dir->i_sb->s_max_links; + struct name_snapshot old_name; if (source == target) return 0; @@ -4414,7 +4414,7 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (error) return error; - old_name = fsnotify_oldname_init(old_dentry->d_name.name); + take_dentry_name_snapshot(&old_name, old_dentry); dget(new_dentry); if (!is_dir || (flags & RENAME_EXCHANGE)) lock_two_nondirectories(source, target); @@ -4469,14 +4469,14 @@ out: inode_unlock(target); dput(new_dentry); if (!error) { - fsnotify_move(old_dir, new_dir, old_name, is_dir, + fsnotify_move(old_dir, new_dir, old_name.name, is_dir, !(flags & RENAME_EXCHANGE) ? target : NULL, old_dentry); if (flags & RENAME_EXCHANGE) { fsnotify_move(new_dir, old_dir, old_dentry->d_name.name, new_is_dir, NULL, new_dentry); } } - fsnotify_oldname_free(old_name); + release_dentry_name_snapshot(&old_name); return error; } diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index 01a9f0f007d4..0c4583b61717 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -161,16 +161,20 @@ int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask if (unlikely(!fsnotify_inode_watches_children(p_inode))) __fsnotify_update_child_dentry_flags(p_inode); else if (p_inode->i_fsnotify_mask & mask) { + struct name_snapshot name; + /* we are notifying a parent so come up with the new mask which * specifies these are events which came from a child. */ mask |= FS_EVENT_ON_CHILD; + take_dentry_name_snapshot(&name, dentry); if (path) ret = fsnotify(p_inode, mask, path, FSNOTIFY_EVENT_PATH, - dentry->d_name.name, 0); + name.name, 0); else ret = fsnotify(p_inode, mask, dentry->d_inode, FSNOTIFY_EVENT_INODE, - dentry->d_name.name, 0); + name.name, 0); + release_dentry_name_snapshot(&name); } dput(parent); diff --git a/fs/read_write.c b/fs/read_write.c index a2cbc8303dae..0cc7033aa413 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -925,12 +925,10 @@ static ssize_t do_iter_write(struct file *file, struct iov_iter *iter, if (ret < 0) return ret; - file_start_write(file); if (file->f_op->write_iter) ret = do_iter_readv_writev(file, iter, pos, WRITE, flags); else ret = do_loop_readv_writev(file, iter, pos, WRITE, flags); - file_end_write(file); if (ret > 0) fsnotify_modify(file); return ret; @@ -973,7 +971,9 @@ ssize_t vfs_writev(struct file *file, const struct iovec __user *vec, ret = import_iovec(WRITE, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter); if (ret >= 0) { + file_start_write(file); ret = do_iter_write(file, &iter, pos, flags); + file_end_write(file); kfree(iov); } return ret; @@ -1241,7 +1241,9 @@ static size_t compat_writev(struct file *file, ret = compat_import_iovec(WRITE, vec, vlen, UIO_FASTIOV, &iov, &iter); if (ret >= 0) { + file_start_write(file); ret = do_iter_write(file, &iter, pos, flags); + file_end_write(file); kfree(iov); } if (ret > 0) diff --git a/fs/statfs.c b/fs/statfs.c index 41a6a82da5e2..fab9b6a3c116 100644 --- a/fs/statfs.c +++ b/fs/statfs.c @@ -38,6 +38,8 @@ static int flags_by_sb(int s_flags) flags |= ST_SYNCHRONOUS; if (s_flags & MS_MANDLOCK) flags |= ST_MANDLOCK; + if (s_flags & MS_RDONLY) + flags |= ST_RDONLY; return flags; } diff --git a/include/linux/amba/pl080.h b/include/linux/amba/pl080.h index 580b5323a717..ab036b6b1804 100644 --- a/include/linux/amba/pl080.h +++ b/include/linux/amba/pl080.h @@ -44,7 +44,14 @@ #define PL080_SYNC (0x34) -/* Per channel configuration registers */ +/* The Faraday Technology FTDMAC020 variant registers */ +#define FTDMAC020_CH_BUSY (0x20) +/* Identical to PL080_CONFIG */ +#define FTDMAC020_CSR (0x24) +/* Identical to PL080_SYNC */ +#define FTDMAC020_SYNC (0x2C) +#define FTDMAC020_REVISION (0x30) +#define FTDMAC020_FEATURE (0x34) /* Per channel configuration registers */ #define PL080_Cx_BASE(x) ((0x100 + (x * 0x20))) @@ -55,13 +62,20 @@ #define PL080_CH_CONFIG (0x10) #define PL080S_CH_CONTROL2 (0x10) #define PL080S_CH_CONFIG (0x14) - -#define PL080_LLI_ADDR_MASK (0x3fffffff << 2) +/* The Faraday FTDMAC020 derivative shuffles the registers around */ +#define FTDMAC020_CH_CSR (0x00) +#define FTDMAC020_CH_CFG (0x04) +#define FTDMAC020_CH_SRC_ADDR (0x08) +#define FTDMAC020_CH_DST_ADDR (0x0C) +#define FTDMAC020_CH_LLP (0x10) +#define FTDMAC020_CH_SIZE (0x14) + +#define PL080_LLI_ADDR_MASK GENMASK(31, 2) #define PL080_LLI_ADDR_SHIFT (2) #define PL080_LLI_LM_AHB2 BIT(0) #define PL080_CONTROL_TC_IRQ_EN BIT(31) -#define PL080_CONTROL_PROT_MASK (0x7 << 28) +#define PL080_CONTROL_PROT_MASK GENMASK(30, 28) #define PL080_CONTROL_PROT_SHIFT (28) #define PL080_CONTROL_PROT_CACHE BIT(30) #define PL080_CONTROL_PROT_BUFF BIT(29) @@ -70,16 +84,16 @@ #define PL080_CONTROL_SRC_INCR BIT(26) #define PL080_CONTROL_DST_AHB2 BIT(25) #define PL080_CONTROL_SRC_AHB2 BIT(24) -#define PL080_CONTROL_DWIDTH_MASK (0x7 << 21) +#define PL080_CONTROL_DWIDTH_MASK GENMASK(23, 21) #define PL080_CONTROL_DWIDTH_SHIFT (21) -#define PL080_CONTROL_SWIDTH_MASK (0x7 << 18) +#define PL080_CONTROL_SWIDTH_MASK GENMASK(20, 18) #define PL080_CONTROL_SWIDTH_SHIFT (18) -#define PL080_CONTROL_DB_SIZE_MASK (0x7 << 15) +#define PL080_CONTROL_DB_SIZE_MASK GENMASK(17, 15) #define PL080_CONTROL_DB_SIZE_SHIFT (15) -#define PL080_CONTROL_SB_SIZE_MASK (0x7 << 12) +#define PL080_CONTROL_SB_SIZE_MASK GENMASK(14, 12) #define PL080_CONTROL_SB_SIZE_SHIFT (12) -#define PL080_CONTROL_TRANSFER_SIZE_MASK (0xfff << 0) -#define PL080S_CONTROL_TRANSFER_SIZE_MASK (0x1ffffff << 0) +#define PL080_CONTROL_TRANSFER_SIZE_MASK GENMASK(11, 0) +#define PL080S_CONTROL_TRANSFER_SIZE_MASK GENMASK(24, 0) #define PL080_CONTROL_TRANSFER_SIZE_SHIFT (0) #define PL080_BSIZE_1 (0x0) @@ -102,11 +116,11 @@ #define PL080_CONFIG_LOCK BIT(16) #define PL080_CONFIG_TC_IRQ_MASK BIT(15) #define PL080_CONFIG_ERR_IRQ_MASK BIT(14) -#define PL080_CONFIG_FLOW_CONTROL_MASK (0x7 << 11) +#define PL080_CONFIG_FLOW_CONTROL_MASK GENMASK(13, 11) #define PL080_CONFIG_FLOW_CONTROL_SHIFT (11) -#define PL080_CONFIG_DST_SEL_MASK (0xf << 6) +#define PL080_CONFIG_DST_SEL_MASK GENMASK(9, 6) #define PL080_CONFIG_DST_SEL_SHIFT (6) -#define PL080_CONFIG_SRC_SEL_MASK (0xf << 1) +#define PL080_CONFIG_SRC_SEL_MASK GENMASK(4, 1) #define PL080_CONFIG_SRC_SEL_SHIFT (1) #define PL080_CONFIG_ENABLE BIT(0) @@ -119,6 +133,73 @@ #define PL080_FLOW_PER2MEM_PER (0x6) #define PL080_FLOW_SRC2DST_SRC (0x7) +#define FTDMAC020_CH_CSR_TC_MSK BIT(31) +/* Later versions have a threshold in bits 24..26, */ +#define FTDMAC020_CH_CSR_FIFOTH_MSK GENMASK(26, 24) +#define FTDMAC020_CH_CSR_FIFOTH_SHIFT (24) +#define FTDMAC020_CH_CSR_CHPR1_MSK GENMASK(23, 22) +#define FTDMAC020_CH_CSR_PROT3 BIT(21) +#define FTDMAC020_CH_CSR_PROT2 BIT(20) +#define FTDMAC020_CH_CSR_PROT1 BIT(19) +#define FTDMAC020_CH_CSR_SRC_SIZE_MSK GENMASK(18, 16) +#define FTDMAC020_CH_CSR_SRC_SIZE_SHIFT (16) +#define FTDMAC020_CH_CSR_ABT BIT(15) +#define FTDMAC020_CH_CSR_SRC_WIDTH_MSK GENMASK(13, 11) +#define FTDMAC020_CH_CSR_SRC_WIDTH_SHIFT (11) +#define FTDMAC020_CH_CSR_DST_WIDTH_MSK GENMASK(10, 8) +#define FTDMAC020_CH_CSR_DST_WIDTH_SHIFT (8) +#define FTDMAC020_CH_CSR_MODE BIT(7) +/* 00 = increase, 01 = decrease, 10 = fix */ +#define FTDMAC020_CH_CSR_SRCAD_CTL_MSK GENMASK(6, 5) +#define FTDMAC020_CH_CSR_SRCAD_CTL_SHIFT (5) +#define FTDMAC020_CH_CSR_DSTAD_CTL_MSK GENMASK(4, 3) +#define FTDMAC020_CH_CSR_DSTAD_CTL_SHIFT (3) +#define FTDMAC020_CH_CSR_SRC_SEL BIT(2) +#define FTDMAC020_CH_CSR_DST_SEL BIT(1) +#define FTDMAC020_CH_CSR_EN BIT(0) + +/* FIFO threshold setting */ +#define FTDMAC020_CH_CSR_FIFOTH_1 (0x0) +#define FTDMAC020_CH_CSR_FIFOTH_2 (0x1) +#define FTDMAC020_CH_CSR_FIFOTH_4 (0x2) +#define FTDMAC020_CH_CSR_FIFOTH_8 (0x3) +#define FTDMAC020_CH_CSR_FIFOTH_16 (0x4) +/* The FTDMAC020 supports 64bit wide transfers */ +#define FTDMAC020_WIDTH_64BIT (0x3) +/* Address can be increased, decreased or fixed */ +#define FTDMAC020_CH_CSR_SRCAD_CTL_INC (0x0) +#define FTDMAC020_CH_CSR_SRCAD_CTL_DEC (0x1) +#define FTDMAC020_CH_CSR_SRCAD_CTL_FIXED (0x2) + +#define FTDMAC020_CH_CFG_LLP_CNT_MASK GENMASK(19, 16) +#define FTDMAC020_CH_CFG_LLP_CNT_SHIFT (16) +#define FTDMAC020_CH_CFG_BUSY BIT(8) +#define FTDMAC020_CH_CFG_INT_ABT_MASK BIT(2) +#define FTDMAC020_CH_CFG_INT_ERR_MASK BIT(1) +#define FTDMAC020_CH_CFG_INT_TC_MASK BIT(0) + +/* Inside the LLIs, the applicable CSR fields are mapped differently */ +#define FTDMAC020_LLI_TC_MSK BIT(28) +#define FTDMAC020_LLI_SRC_WIDTH_MSK GENMASK(27, 25) +#define FTDMAC020_LLI_SRC_WIDTH_SHIFT (25) +#define FTDMAC020_LLI_DST_WIDTH_MSK GENMASK(24, 22) +#define FTDMAC020_LLI_DST_WIDTH_SHIFT (22) +#define FTDMAC020_LLI_SRCAD_CTL_MSK GENMASK(21, 20) +#define FTDMAC020_LLI_SRCAD_CTL_SHIFT (20) +#define FTDMAC020_LLI_DSTAD_CTL_MSK GENMASK(19, 18) +#define FTDMAC020_LLI_DSTAD_CTL_SHIFT (18) +#define FTDMAC020_LLI_SRC_SEL BIT(17) +#define FTDMAC020_LLI_DST_SEL BIT(16) +#define FTDMAC020_LLI_TRANSFER_SIZE_MASK GENMASK(11, 0) +#define FTDMAC020_LLI_TRANSFER_SIZE_SHIFT (0) + +#define FTDMAC020_CFG_LLP_CNT_MASK GENMASK(19, 16) +#define FTDMAC020_CFG_LLP_CNT_SHIFT (16) +#define FTDMAC020_CFG_BUSY BIT(8) +#define FTDMAC020_CFG_INT_ABT_MSK BIT(2) +#define FTDMAC020_CFG_INT_ERR_MSK BIT(1) +#define FTDMAC020_CFG_INT_TC_MSK BIT(0) + /* DMA linked list chain structure */ struct pl080_lli { diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h index 5308eae9ce35..79d1bcee738d 100644 --- a/include/linux/amba/pl08x.h +++ b/include/linux/amba/pl08x.h @@ -47,8 +47,6 @@ enum { * devices with static assignments * @muxval: a number usually used to poke into some mux regiser to * mux in the signal to this channel - * @cctl_memcpy: options for the channel control register for memcpy - * *** not used for slave channels *** * @addr: source/target address in physical memory for this DMA channel, * can be the address of a FIFO register for burst requests for example. * This can be left undefined if the PrimeCell API is used for configuring @@ -63,12 +61,28 @@ struct pl08x_channel_data { int min_signal; int max_signal; u32 muxval; - u32 cctl_memcpy; dma_addr_t addr; bool single; u8 periph_buses; }; +enum pl08x_burst_size { + PL08X_BURST_SZ_1, + PL08X_BURST_SZ_4, + PL08X_BURST_SZ_8, + PL08X_BURST_SZ_16, + PL08X_BURST_SZ_32, + PL08X_BURST_SZ_64, + PL08X_BURST_SZ_128, + PL08X_BURST_SZ_256, +}; + +enum pl08x_bus_width { + PL08X_BUS_WIDTH_8_BITS, + PL08X_BUS_WIDTH_16_BITS, + PL08X_BUS_WIDTH_32_BITS, +}; + /** * struct pl08x_platform_data - the platform configuration for the PL08x * PrimeCells. @@ -76,6 +90,11 @@ struct pl08x_channel_data { * platform, all inclusive, including multiplexed channels. The available * physical channels will be multiplexed around these signals as they are * requested, just enumerate all possible channels. + * @num_slave_channels: number of elements in the slave channel array + * @memcpy_burst_size: the appropriate burst size for memcpy operations + * @memcpy_bus_width: memory bus width + * @memcpy_prot_buff: whether memcpy DMA is bufferable + * @memcpy_prot_cache: whether memcpy DMA is cacheable * @get_xfer_signal: request a physical signal to be used for a DMA transfer * immediately: if there is some multiplexing or similar blocking the use * of the channel the transfer can be denied by returning less than zero, @@ -90,7 +109,10 @@ struct pl08x_channel_data { struct pl08x_platform_data { struct pl08x_channel_data *slave_channels; unsigned int num_slave_channels; - struct pl08x_channel_data memcpy_channel; + enum pl08x_burst_size memcpy_burst_size; + enum pl08x_bus_width memcpy_bus_width; + bool memcpy_prot_buff; + bool memcpy_prot_cache; int (*get_xfer_signal)(const struct pl08x_channel_data *); void (*put_xfer_signal)(const struct pl08x_channel_data *, int); u8 lli_buses; diff --git a/include/linux/dcache.h b/include/linux/dcache.h index d2e38dc6172c..025727bf6797 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -591,5 +591,11 @@ static inline struct inode *d_real_inode(const struct dentry *dentry) return d_backing_inode(d_real((struct dentry *) dentry, NULL, 0)); } +struct name_snapshot { + const char *name; + char inline_name[DNAME_INLINE_LEN]; +}; +void take_dentry_name_snapshot(struct name_snapshot *, struct dentry *); +void release_dentry_name_snapshot(struct name_snapshot *); #endif /* __LINUX_DCACHE_H */ diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index b43d3f5bd9ea..b78aa7ac77ce 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -293,35 +293,4 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid) } } -#if defined(CONFIG_FSNOTIFY) /* notify helpers */ - -/* - * fsnotify_oldname_init - save off the old filename before we change it - */ -static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name) -{ - return kstrdup(name, GFP_KERNEL); -} - -/* - * fsnotify_oldname_free - free the name we got from fsnotify_oldname_init - */ -static inline void fsnotify_oldname_free(const unsigned char *old_name) -{ - kfree(old_name); -} - -#else /* CONFIG_FSNOTIFY */ - -static inline const char *fsnotify_oldname_init(const unsigned char *name) -{ - return NULL; -} - -static inline void fsnotify_oldname_free(const unsigned char *old_name) -{ -} - -#endif /* CONFIG_FSNOTIFY */ - #endif /* _LINUX_FS_NOTIFY_H */ diff --git a/include/linux/input/sparse-keymap.h b/include/linux/input/sparse-keymap.h index 52db62064c6e..c7346e33d958 100644 --- a/include/linux/input/sparse-keymap.h +++ b/include/linux/input/sparse-keymap.h @@ -51,7 +51,6 @@ struct key_entry *sparse_keymap_entry_from_keycode(struct input_dev *dev, int sparse_keymap_setup(struct input_dev *dev, const struct key_entry *keymap, int (*setup)(struct input_dev *, struct key_entry *)); -void sparse_keymap_free(struct input_dev *dev); void sparse_keymap_report_entry(struct input_dev *dev, const struct key_entry *ke, unsigned int value, bool autorelease); diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 37f8e354f564..431e1d83e274 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -291,7 +291,7 @@ extern int irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd); -int irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd); +int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd); #else /* CONFIG_SMP */ @@ -331,7 +331,7 @@ irq_create_affinity_masks(int nvec, const struct irq_affinity *affd) } static inline int -irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd) +irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd) { return maxvec; } diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index e48ee2eaaa3e..779b23595596 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3284,6 +3284,7 @@ void __dev_notify_flags(struct net_device *, unsigned int old_flags, int dev_change_name(struct net_device *, const char *); int dev_set_alias(struct net_device *, const char *, size_t); int dev_change_net_namespace(struct net_device *, struct net *, const char *); +int __dev_set_mtu(struct net_device *, int); int dev_set_mtu(struct net_device *, int); void dev_set_group(struct net_device *, int); int dev_set_mac_address(struct net_device *, struct sockaddr *); diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h index 57e0b8250947..782fb8e0755f 100644 --- a/include/linux/pci-ats.h +++ b/include/linux/pci-ats.h @@ -7,6 +7,7 @@ int pci_enable_pri(struct pci_dev *pdev, u32 reqs); void pci_disable_pri(struct pci_dev *pdev); +void pci_restore_pri_state(struct pci_dev *pdev); int pci_reset_pri(struct pci_dev *pdev); #else /* CONFIG_PCI_PRI */ @@ -20,6 +21,10 @@ static inline void pci_disable_pri(struct pci_dev *pdev) { } +static inline void pci_restore_pri_state(struct pci_dev *pdev) +{ +} + static inline int pci_reset_pri(struct pci_dev *pdev) { return -ENODEV; @@ -31,6 +36,7 @@ static inline int pci_reset_pri(struct pci_dev *pdev) int pci_enable_pasid(struct pci_dev *pdev, int features); void pci_disable_pasid(struct pci_dev *pdev); +void pci_restore_pasid_state(struct pci_dev *pdev); int pci_pasid_features(struct pci_dev *pdev); int pci_max_pasids(struct pci_dev *pdev); @@ -45,6 +51,10 @@ static inline void pci_disable_pasid(struct pci_dev *pdev) { } +static inline void pci_restore_pasid_state(struct pci_dev *pdev) +{ +} + static inline int pci_pasid_features(struct pci_dev *pdev) { return -EINVAL; diff --git a/include/linux/pci.h b/include/linux/pci.h index 1ef093866581..4869e66dd659 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -360,6 +360,8 @@ struct pci_dev { unsigned int msix_enabled:1; unsigned int ari_enabled:1; /* ARI forwarding */ unsigned int ats_enabled:1; /* Address Translation Service */ + unsigned int pasid_enabled:1; /* Process Address Space ID */ + unsigned int pri_enabled:1; /* Page Request Interface */ unsigned int is_managed:1; unsigned int needs_freset:1; /* Dev requires fundamental reset */ unsigned int state_saved:1; @@ -370,7 +372,7 @@ struct pci_dev { unsigned int is_thunderbolt:1; /* Thunderbolt controller */ unsigned int __aer_firmware_first_valid:1; unsigned int __aer_firmware_first:1; - unsigned int broken_intx_masking:1; + unsigned int broken_intx_masking:1; /* INTx masking can't be used */ unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */ unsigned int irq_managed:1; unsigned int has_secondary_link:1; @@ -404,6 +406,12 @@ struct pci_dev { u8 ats_stu; /* ATS Smallest Translation Unit */ atomic_t ats_ref_cnt; /* number of VFs with ATS enabled */ #endif +#ifdef CONFIG_PCI_PRI + u32 pri_reqs_alloc; /* Number of PRI requests allocated */ +#endif +#ifdef CONFIG_PCI_PASID + u16 pasid_features; +#endif phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */ size_t romlen; /* Length of ROM if it's not from the BAR */ char *driver_override; /* Driver name to force a match */ @@ -437,6 +445,8 @@ struct pci_host_bridge { void *sysdata; int busnr; struct list_head windows; /* resource_entry */ + u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* platform IRQ swizzler */ + int (*map_irq)(const struct pci_dev *, u8, u8); void (*release_fn)(struct pci_host_bridge *); void *release_data; struct msi_controller *msi; @@ -463,7 +473,9 @@ static inline struct pci_host_bridge *pci_host_bridge_from_priv(void *priv) } struct pci_host_bridge *pci_alloc_host_bridge(size_t priv); -int pci_register_host_bridge(struct pci_host_bridge *bridge); +struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev, + size_t priv); +void pci_free_host_bridge(struct pci_host_bridge *bridge); struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus); void pci_set_host_bridge_release(struct pci_host_bridge *bridge, @@ -695,7 +707,8 @@ struct pci_error_handlers { pci_ers_result_t (*slot_reset)(struct pci_dev *dev); /* PCI function reset prepare or completed */ - void (*reset_notify)(struct pci_dev *dev, bool prepare); + void (*reset_prepare)(struct pci_dev *dev); + void (*reset_done)(struct pci_dev *dev); /* Device driver may resume normal operations */ void (*resume)(struct pci_dev *dev); @@ -852,13 +865,10 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus, int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax); int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax); void pci_bus_release_busn_res(struct pci_bus *b); -struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus, - struct pci_ops *ops, void *sysdata, - struct list_head *resources, - struct msi_controller *msi); struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, struct pci_ops *ops, void *sysdata, struct list_head *resources); +int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge); struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr); void pcie_update_link_speed(struct pci_bus *bus, u16 link_status); @@ -1008,6 +1018,15 @@ int __must_check pci_reenable_device(struct pci_dev *); int __must_check pcim_enable_device(struct pci_dev *pdev); void pcim_pin_device(struct pci_dev *pdev); +static inline bool pci_intx_mask_supported(struct pci_dev *pdev) +{ + /* + * INTx masking is supported if PCI_COMMAND_INTX_DISABLE is + * writable and no quirk has marked the feature broken. + */ + return !pdev->broken_intx_masking; +} + static inline int pci_is_enabled(struct pci_dev *pdev) { return (atomic_read(&pdev->enable_cnt) > 0); @@ -1031,7 +1050,6 @@ int __must_check pci_set_mwi(struct pci_dev *dev); int pci_try_set_mwi(struct pci_dev *dev); void pci_clear_mwi(struct pci_dev *dev); void pci_intx(struct pci_dev *dev, int enable); -bool pci_intx_mask_supported(struct pci_dev *dev); bool pci_check_and_mask_intx(struct pci_dev *dev); bool pci_check_and_unmask_intx(struct pci_dev *dev); int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask); @@ -1144,6 +1162,7 @@ void pdev_enable_device(struct pci_dev *); int pci_enable_resources(struct pci_dev *, int mask); void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *), int (*)(const struct pci_dev *, u8, u8)); +void pci_assign_irq(struct pci_dev *dev); struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res); #define HAVE_PCI_REQ_REGIONS 2 int __must_check pci_request_regions(struct pci_dev *, const char *); diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 5f6b71d15393..c71e532da458 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1373,6 +1373,8 @@ #define PCI_DEVICE_ID_TTI_HPT374 0x0008 #define PCI_DEVICE_ID_TTI_HPT372N 0x0009 /* apparently a 372N variant? */ +#define PCI_VENDOR_ID_SIGMA 0x1105 + #define PCI_VENDOR_ID_VIA 0x1106 #define PCI_DEVICE_ID_VIA_8763_0 0x0198 #define PCI_DEVICE_ID_VIA_8380_0 0x0204 diff --git a/include/linux/i2c/lm8323.h b/include/linux/platform_data/lm8323.h index 478d668bc590..478d668bc590 100644 --- a/include/linux/i2c/lm8323.h +++ b/include/linux/platform_data/lm8323.h diff --git a/include/linux/i2c/mcs.h b/include/linux/platform_data/mcs.h index 61bb18a4fd3c..61bb18a4fd3c 100644 --- a/include/linux/i2c/mcs.h +++ b/include/linux/platform_data/mcs.h diff --git a/include/linux/i2c/mms114.h b/include/linux/platform_data/mms114.h index 5722ebfb2738..5722ebfb2738 100644 --- a/include/linux/i2c/mms114.h +++ b/include/linux/platform_data/mms114.h diff --git a/include/linux/i2c/tsc2007.h b/include/linux/platform_data/tsc2007.h index 4f35b6ad3889..c2d3aa1dadd4 100644 --- a/include/linux/i2c/tsc2007.h +++ b/include/linux/platform_data/tsc2007.h @@ -1,7 +1,7 @@ #ifndef __LINUX_I2C_TSC2007_H #define __LINUX_I2C_TSC2007_H -/* linux/i2c/tsc2007.h */ +/* linux/platform_data/tsc2007.h */ struct tsc2007_platform_data { u16 model; /* 2007. */ diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h index 4d57bbaaa1bf..30f945329818 100644 --- a/include/linux/raid/pq.h +++ b/include/linux/raid/pq.h @@ -142,6 +142,7 @@ int raid6_select_algo(void); extern const u8 raid6_gfmul[256][256] __attribute__((aligned(256))); extern const u8 raid6_vgfmul[256][32] __attribute__((aligned(256))); extern const u8 raid6_gfexp[256] __attribute__((aligned(256))); +extern const u8 raid6_gflog[256] __attribute__((aligned(256))); extern const u8 raid6_gfinv[256] __attribute__((aligned(256))); extern const u8 raid6_gfexi[256] __attribute__((aligned(256))); diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index d7d3ea637dd0..250a27614328 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -113,6 +113,33 @@ static inline void check_object_size(const void *ptr, unsigned long n, { } #endif /* CONFIG_HARDENED_USERCOPY */ +extern void __compiletime_error("copy source size is too small") +__bad_copy_from(void); +extern void __compiletime_error("copy destination size is too small") +__bad_copy_to(void); + +static inline void copy_overflow(int size, unsigned long count) +{ + WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); +} + +static __always_inline bool +check_copy_size(const void *addr, size_t bytes, bool is_source) +{ + int sz = __compiletime_object_size(addr); + if (unlikely(sz >= 0 && sz < bytes)) { + if (!__builtin_constant_p(bytes)) + copy_overflow(sz, bytes); + else if (is_source) + __bad_copy_from(); + else + __bad_copy_to(); + return false; + } + check_object_size(addr, bytes, is_source); + return true; +} + #ifndef arch_setup_new_exec static inline void arch_setup_new_exec(void) { } #endif diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 201418d5e15c..acdd6f915a8d 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -109,8 +109,11 @@ static inline unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n) { unsigned long res = n; - if (likely(access_ok(VERIFY_READ, from, n))) + might_fault(); + if (likely(access_ok(VERIFY_READ, from, n))) { + kasan_check_write(to, n); res = raw_copy_from_user(to, from, n); + } if (unlikely(res)) memset(to + (n - res), 0, res); return res; @@ -124,8 +127,11 @@ _copy_from_user(void *, const void __user *, unsigned long); static inline unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n) { - if (access_ok(VERIFY_WRITE, to, n)) + might_fault(); + if (access_ok(VERIFY_WRITE, to, n)) { + kasan_check_read(from, n); n = raw_copy_to_user(to, from, n); + } return n; } #else @@ -133,59 +139,23 @@ extern unsigned long _copy_to_user(void __user *, const void *, unsigned long); #endif -extern void __compiletime_error("usercopy buffer size is too small") -__bad_copy_user(void); - -static inline void copy_user_overflow(int size, unsigned long count) -{ - WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); -} - static __always_inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) { - int sz = __compiletime_object_size(to); - - might_fault(); - kasan_check_write(to, n); - - if (likely(sz < 0 || sz >= n)) { - check_object_size(to, n, false); + if (likely(check_copy_size(to, n, false))) n = _copy_from_user(to, from, n); - } else if (!__builtin_constant_p(n)) - copy_user_overflow(sz, n); - else - __bad_copy_user(); - return n; } static __always_inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) { - int sz = __compiletime_object_size(from); - - kasan_check_read(from, n); - might_fault(); - - if (likely(sz < 0 || sz >= n)) { - check_object_size(from, n, true); + if (likely(check_copy_size(from, n, true))) n = _copy_to_user(to, from, n); - } else if (!__builtin_constant_p(n)) - copy_user_overflow(sz, n); - else - __bad_copy_user(); - return n; } #ifdef CONFIG_COMPAT static __always_inline unsigned long __must_check -__copy_in_user(void __user *to, const void *from, unsigned long n) -{ - might_fault(); - return raw_copy_in_user(to, from, n); -} -static __always_inline unsigned long __must_check copy_in_user(void __user *to, const void *from, unsigned long n) { might_fault(); diff --git a/include/linux/uio.h b/include/linux/uio.h index 55cd54a0e941..342d2dc225b9 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -10,6 +10,7 @@ #define __LINUX_UIO_H #include <linux/kernel.h> +#include <linux/thread_info.h> #include <uapi/linux/uio.h> struct page; @@ -91,10 +92,58 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i); size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i); -size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i); -size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); -bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i); -size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); + +size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i); +size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); +bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i); +size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); +bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i); + +static __always_inline __must_check +size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) +{ + if (unlikely(!check_copy_size(addr, bytes, true))) + return bytes; + else + return _copy_to_iter(addr, bytes, i); +} + +static __always_inline __must_check +size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) +{ + if (unlikely(!check_copy_size(addr, bytes, false))) + return bytes; + else + return _copy_from_iter(addr, bytes, i); +} + +static __always_inline __must_check +bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) +{ + if (unlikely(!check_copy_size(addr, bytes, false))) + return false; + else + return _copy_from_iter_full(addr, bytes, i); +} + +static __always_inline __must_check +size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) +{ + if (unlikely(!check_copy_size(addr, bytes, false))) + return bytes; + else + return _copy_from_iter_nocache(addr, bytes, i); +} + +static __always_inline __must_check +bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) +{ + if (unlikely(!check_copy_size(addr, bytes, false))) + return false; + else + return _copy_from_iter_full_nocache(addr, bytes, i); +} + #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE /* * Note, users like pmem that depend on the stricter semantics of @@ -102,15 +151,20 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the * destination is flushed from the cache on return. */ -size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i); +size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i); #else -static inline size_t copy_from_iter_flushcache(void *addr, size_t bytes, - struct iov_iter *i) +#define _copy_from_iter_flushcache _copy_from_iter_nocache +#endif + +static __always_inline __must_check +size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) { - return copy_from_iter_nocache(addr, bytes, i); + if (unlikely(!check_copy_size(addr, bytes, false))) + return bytes; + else + return _copy_from_iter_flushcache(addr, bytes, i); } -#endif -bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i); + size_t iov_iter_zero(size_t bytes, struct iov_iter *); unsigned long iov_iter_alignment(const struct iov_iter *i); unsigned long iov_iter_gap_alignment(const struct iov_iter *i); diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 0fbf73dd531a..199056933dcb 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -22,6 +22,7 @@ struct route_info { #include <net/flow.h> #include <net/ip6_fib.h> #include <net/sock.h> +#include <net/lwtunnel.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/route.h> @@ -232,4 +233,11 @@ static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt, return daddr; } +static inline bool rt6_duplicate_nexthop(struct rt6_info *a, struct rt6_info *b) +{ + return a->dst.dev == b->dst.dev && + a->rt6i_idev == b->rt6i_idev && + ipv6_addr_equal(&a->rt6i_gateway, &b->rt6i_gateway) && + !lwtunnel_cmp_encap(a->dst.lwtstate, b->dst.lwtstate); +} #endif diff --git a/include/net/sock.h b/include/net/sock.h index 48e4d5c38f85..8c85791fc196 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1697,6 +1697,7 @@ static inline void sock_orphan(struct sock *sk) static inline void sock_graft(struct sock *sk, struct socket *parent) { + WARN_ON(parent->sk); write_lock_bh(&sk->sk_callback_lock); sk->sk_wq = parent->wq; parent->sk = sk; diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h index f5a8d96e1e09..179891074b3c 100644 --- a/include/uapi/linux/input-event-codes.h +++ b/include/uapi/linux/input-event-codes.h @@ -600,6 +600,7 @@ #define KEY_APPSELECT 0x244 /* AL Select Task/Application */ #define KEY_SCREENSAVER 0x245 /* AL Screen Saver */ #define KEY_VOICECOMMAND 0x246 /* Listening Voice Command */ +#define KEY_ASSISTANT 0x247 /* AL Context-aware desktop assistant */ #define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */ #define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */ diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index d56bb0051009..c22d3ebaca20 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h @@ -517,6 +517,7 @@ #define PCI_EXP_LNKCAP_SLS 0x0000000f /* Supported Link Speeds */ #define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */ #define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */ +#define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */ #define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */ #define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */ #define PCI_EXP_LNKCAP_L0SEL 0x00007000 /* L0s Exit Latency */ diff --git a/include/uapi/linux/switchtec_ioctl.h b/include/uapi/linux/switchtec_ioctl.h index 3e824e1a6495..5e392968bad2 100644 --- a/include/uapi/linux/switchtec_ioctl.h +++ b/include/uapi/linux/switchtec_ioctl.h @@ -39,6 +39,9 @@ struct switchtec_ioctl_flash_info { __u32 padding; }; +#define SWITCHTEC_IOCTL_PART_ACTIVE 1 +#define SWITCHTEC_IOCTL_PART_RUNNING 2 + struct switchtec_ioctl_flash_part_info { __u32 flash_partition; __u32 address; diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c index d2747f9c5707..d69bd77252a7 100644 --- a/kernel/irq/affinity.c +++ b/kernel/irq/affinity.c @@ -110,6 +110,13 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) struct cpumask *masks; cpumask_var_t nmsk, *node_to_present_cpumask; + /* + * If there aren't any vectors left after applying the pre/post + * vectors don't bother with assigning affinity. + */ + if (!affv) + return NULL; + if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL)) return NULL; @@ -192,15 +199,19 @@ out: /** * irq_calc_affinity_vectors - Calculate the optimal number of vectors + * @minvec: The minimum number of vectors available * @maxvec: The maximum number of vectors available * @affd: Description of the affinity requirements */ -int irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd) +int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd) { int resv = affd->pre_vectors + affd->post_vectors; int vecs = maxvec - resv; int ret; + if (resv > minvec) + return 0; + get_online_cpus(); ret = min_t(int, cpumask_weight(cpu_present_mask), vecs) + resv; put_online_cpus(); diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c index cc3ed0ccdfa2..2655f26ec882 100644 --- a/kernel/locking/qrwlock.c +++ b/kernel/locking/qrwlock.c @@ -20,6 +20,7 @@ #include <linux/cpumask.h> #include <linux/percpu.h> #include <linux/hardirq.h> +#include <linux/spinlock.h> #include <asm/qrwlock.h> /* diff --git a/lib/iov_iter.c b/lib/iov_iter.c index c9a69064462f..52c8dd6d8e82 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -130,6 +130,24 @@ } \ } +static int copyout(void __user *to, const void *from, size_t n) +{ + if (access_ok(VERIFY_WRITE, to, n)) { + kasan_check_read(from, n); + n = raw_copy_to_user(to, from, n); + } + return n; +} + +static int copyin(void *to, const void __user *from, size_t n) +{ + if (access_ok(VERIFY_READ, from, n)) { + kasan_check_write(to, n); + n = raw_copy_from_user(to, from, n); + } + return n; +} + static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes, struct iov_iter *i) { @@ -144,6 +162,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b if (unlikely(!bytes)) return 0; + might_fault(); wanted = bytes; iov = i->iov; skip = i->iov_offset; @@ -155,7 +174,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b from = kaddr + offset; /* first chunk, usually the only one */ - left = __copy_to_user_inatomic(buf, from, copy); + left = copyout(buf, from, copy); copy -= left; skip += copy; from += copy; @@ -165,7 +184,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b iov++; buf = iov->iov_base; copy = min(bytes, iov->iov_len); - left = __copy_to_user_inatomic(buf, from, copy); + left = copyout(buf, from, copy); copy -= left; skip = copy; from += copy; @@ -184,7 +203,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b kaddr = kmap(page); from = kaddr + offset; - left = __copy_to_user(buf, from, copy); + left = copyout(buf, from, copy); copy -= left; skip += copy; from += copy; @@ -193,7 +212,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b iov++; buf = iov->iov_base; copy = min(bytes, iov->iov_len); - left = __copy_to_user(buf, from, copy); + left = copyout(buf, from, copy); copy -= left; skip = copy; from += copy; @@ -227,6 +246,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t if (unlikely(!bytes)) return 0; + might_fault(); wanted = bytes; iov = i->iov; skip = i->iov_offset; @@ -238,7 +258,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t to = kaddr + offset; /* first chunk, usually the only one */ - left = __copy_from_user_inatomic(to, buf, copy); + left = copyin(to, buf, copy); copy -= left; skip += copy; to += copy; @@ -248,7 +268,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t iov++; buf = iov->iov_base; copy = min(bytes, iov->iov_len); - left = __copy_from_user_inatomic(to, buf, copy); + left = copyin(to, buf, copy); copy -= left; skip = copy; to += copy; @@ -267,7 +287,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t kaddr = kmap(page); to = kaddr + offset; - left = __copy_from_user(to, buf, copy); + left = copyin(to, buf, copy); copy -= left; skip += copy; to += copy; @@ -276,7 +296,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t iov++; buf = iov->iov_base; copy = min(bytes, iov->iov_len); - left = __copy_from_user(to, buf, copy); + left = copyin(to, buf, copy); copy -= left; skip = copy; to += copy; @@ -535,14 +555,15 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes, return bytes; } -size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) +size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) { const char *from = addr; if (unlikely(i->type & ITER_PIPE)) return copy_pipe_to_iter(addr, bytes, i); + if (iter_is_iovec(i)) + might_fault(); iterate_and_advance(i, bytes, v, - __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len, - v.iov_len), + copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len), memcpy_to_page(v.bv_page, v.bv_offset, (from += v.bv_len) - v.bv_len, v.bv_len), memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len) @@ -550,18 +571,19 @@ size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) return bytes; } -EXPORT_SYMBOL(copy_to_iter); +EXPORT_SYMBOL(_copy_to_iter); -size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) +size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) { char *to = addr; if (unlikely(i->type & ITER_PIPE)) { WARN_ON(1); return 0; } + if (iter_is_iovec(i)) + might_fault(); iterate_and_advance(i, bytes, v, - __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base, - v.iov_len), + copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, v.bv_offset, v.bv_len), memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) @@ -569,9 +591,9 @@ size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) return bytes; } -EXPORT_SYMBOL(copy_from_iter); +EXPORT_SYMBOL(_copy_from_iter); -bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) +bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) { char *to = addr; if (unlikely(i->type & ITER_PIPE)) { @@ -581,8 +603,10 @@ bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) if (unlikely(i->count < bytes)) return false; + if (iter_is_iovec(i)) + might_fault(); iterate_all_kinds(i, bytes, v, ({ - if (__copy_from_user((to += v.iov_len) - v.iov_len, + if (copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)) return false; 0;}), @@ -594,9 +618,9 @@ bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) iov_iter_advance(i, bytes); return true; } -EXPORT_SYMBOL(copy_from_iter_full); +EXPORT_SYMBOL(_copy_from_iter_full); -size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) +size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) { char *to = addr; if (unlikely(i->type & ITER_PIPE)) { @@ -613,10 +637,10 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) return bytes; } -EXPORT_SYMBOL(copy_from_iter_nocache); +EXPORT_SYMBOL(_copy_from_iter_nocache); #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE -size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) +size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) { char *to = addr; if (unlikely(i->type & ITER_PIPE)) { @@ -634,10 +658,10 @@ size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) return bytes; } -EXPORT_SYMBOL_GPL(copy_from_iter_flushcache); +EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache); #endif -bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) +bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) { char *to = addr; if (unlikely(i->type & ITER_PIPE)) { @@ -659,11 +683,22 @@ bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) iov_iter_advance(i, bytes); return true; } -EXPORT_SYMBOL(copy_from_iter_full_nocache); +EXPORT_SYMBOL(_copy_from_iter_full_nocache); + +static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) +{ + size_t v = n + offset; + if (likely(n <= v && v <= (PAGE_SIZE << compound_order(page)))) + return true; + WARN_ON(1); + return false; +} size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i) { + if (unlikely(!page_copy_sane(page, offset, bytes))) + return 0; if (i->type & (ITER_BVEC|ITER_KVEC)) { void *kaddr = kmap_atomic(page); size_t wanted = copy_to_iter(kaddr + offset, bytes, i); @@ -679,13 +714,15 @@ EXPORT_SYMBOL(copy_page_to_iter); size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i) { + if (unlikely(!page_copy_sane(page, offset, bytes))) + return 0; if (unlikely(i->type & ITER_PIPE)) { WARN_ON(1); return 0; } if (i->type & (ITER_BVEC|ITER_KVEC)) { void *kaddr = kmap_atomic(page); - size_t wanted = copy_from_iter(kaddr + offset, bytes, i); + size_t wanted = _copy_from_iter(kaddr + offset, bytes, i); kunmap_atomic(kaddr); return wanted; } else @@ -722,7 +759,7 @@ size_t iov_iter_zero(size_t bytes, struct iov_iter *i) if (unlikely(i->type & ITER_PIPE)) return pipe_zero(bytes, i); iterate_and_advance(i, bytes, v, - __clear_user(v.iov_base, v.iov_len), + clear_user(v.iov_base, v.iov_len), memzero_page(v.bv_page, v.bv_offset, v.bv_len), memset(v.iov_base, 0, v.iov_len) ) @@ -735,14 +772,17 @@ size_t iov_iter_copy_from_user_atomic(struct page *page, struct iov_iter *i, unsigned long offset, size_t bytes) { char *kaddr = kmap_atomic(page), *p = kaddr + offset; + if (unlikely(!page_copy_sane(page, offset, bytes))) { + kunmap_atomic(kaddr); + return 0; + } if (unlikely(i->type & ITER_PIPE)) { kunmap_atomic(kaddr); WARN_ON(1); return 0; } iterate_all_kinds(i, bytes, v, - __copy_from_user_inatomic((p += v.iov_len) - v.iov_len, - v.iov_base, v.iov_len), + copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page, v.bv_offset, v.bv_len), memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) diff --git a/lib/raid6/mktables.c b/lib/raid6/mktables.c index 39787db588b0..e824d088f72c 100644 --- a/lib/raid6/mktables.c +++ b/lib/raid6/mktables.c @@ -125,6 +125,26 @@ int main(int argc, char *argv[]) printf("EXPORT_SYMBOL(raid6_gfexp);\n"); printf("#endif\n"); + /* Compute log-of-2 table */ + printf("\nconst u8 __attribute__((aligned(256)))\n" + "raid6_gflog[256] =\n" "{\n"); + for (i = 0; i < 256; i += 8) { + printf("\t"); + for (j = 0; j < 8; j++) { + v = 255; + for (k = 0; k < 256; k++) + if (exptbl[k] == (i + j)) { + v = k; + break; + } + printf("0x%02x,%c", v, (j == 7) ? '\n' : ' '); + } + } + printf("};\n"); + printf("#ifdef __KERNEL__\n"); + printf("EXPORT_SYMBOL(raid6_gflog);\n"); + printf("#endif\n"); + /* Compute inverse table x^-1 == x^254 */ printf("\nconst u8 __attribute__((aligned(256)))\n" "raid6_gfinv[256] =\n" "{\n"); diff --git a/lib/usercopy.c b/lib/usercopy.c index 1b6010a3beb8..f5d9f08ee032 100644 --- a/lib/usercopy.c +++ b/lib/usercopy.c @@ -6,8 +6,11 @@ unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n) { unsigned long res = n; - if (likely(access_ok(VERIFY_READ, from, n))) + might_fault(); + if (likely(access_ok(VERIFY_READ, from, n))) { + kasan_check_write(to, n); res = raw_copy_from_user(to, from, n); + } if (unlikely(res)) memset(to + (n - res), 0, res); return res; @@ -18,8 +21,11 @@ EXPORT_SYMBOL(_copy_from_user); #ifndef INLINE_COPY_TO_USER unsigned long _copy_to_user(void *to, const void __user *from, unsigned long n) { - if (likely(access_ok(VERIFY_WRITE, to, n))) + might_fault(); + if (likely(access_ok(VERIFY_WRITE, to, n))) { + kasan_check_read(from, n); n = raw_copy_to_user(to, from, n); + } return n; } EXPORT_SYMBOL(_copy_to_user); diff --git a/mm/mmap.c b/mm/mmap.c index 5a0ba9788cdd..7f8cfe9d9b4d 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -3186,8 +3186,12 @@ static int special_mapping_mremap(struct vm_area_struct *new_vma) { struct vm_special_mapping *sm = new_vma->vm_private_data; + if (WARN_ON_ONCE(current->mm != new_vma->vm_mm)) + return -EFAULT; + if (sm->mremap) return sm->mremap(sm, new_vma); + return 0; } diff --git a/net/bridge/netfilter/ebt_nflog.c b/net/bridge/netfilter/ebt_nflog.c index c1dc48686200..da1c2fdc08c8 100644 --- a/net/bridge/netfilter/ebt_nflog.c +++ b/net/bridge/netfilter/ebt_nflog.c @@ -30,6 +30,7 @@ ebt_nflog_tg(struct sk_buff *skb, const struct xt_action_param *par) li.u.ulog.copy_len = info->len; li.u.ulog.group = info->group; li.u.ulog.qthreshold = info->threshold; + li.u.ulog.flags = 0; nf_log_packet(net, PF_BRIDGE, xt_hooknum(par), skb, xt_in(par), xt_out(par), &li, "%s", info->prefix); diff --git a/net/core/dev.c b/net/core/dev.c index 7098fba52be1..02440518dd69 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6765,7 +6765,7 @@ int dev_change_flags(struct net_device *dev, unsigned int flags) } EXPORT_SYMBOL(dev_change_flags); -static int __dev_set_mtu(struct net_device *dev, int new_mtu) +int __dev_set_mtu(struct net_device *dev, int new_mtu) { const struct net_device_ops *ops = dev->netdev_ops; @@ -6775,6 +6775,7 @@ static int __dev_set_mtu(struct net_device *dev, int new_mtu) dev->mtu = new_mtu; return 0; } +EXPORT_SYMBOL(__dev_set_mtu); /** * dev_set_mtu - Change maximum transfer unit diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 6ec6900eb300..a20e7f03d5f7 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -943,9 +943,9 @@ struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk, } EXPORT_SYMBOL(tcp_md5_do_lookup); -struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk, - const union tcp_md5_addr *addr, - int family, u8 prefixlen) +static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk, + const union tcp_md5_addr *addr, + int family, u8 prefixlen) { const struct tcp_sock *tp = tcp_sk(sk); struct tcp_md5sig_key *key; diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 5477ba729c36..ebb299cf72b7 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -786,10 +786,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, goto next_iter; } - if (iter->dst.dev == rt->dst.dev && - iter->rt6i_idev == rt->rt6i_idev && - ipv6_addr_equal(&iter->rt6i_gateway, - &rt->rt6i_gateway)) { + if (rt6_duplicate_nexthop(iter, rt)) { if (rt->rt6i_nsiblings) rt->rt6i_nsiblings = 0; if (!(iter->rt6i_flags & RTF_EXPIRES)) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 0488a24c2a44..4d30c96a819d 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -3036,17 +3036,11 @@ static int ip6_route_info_append(struct list_head *rt6_nh_list, struct rt6_info *rt, struct fib6_config *r_cfg) { struct rt6_nh *nh; - struct rt6_info *rtnh; int err = -EEXIST; list_for_each_entry(nh, rt6_nh_list, next) { /* check if rt6_info already exists */ - rtnh = nh->rt6_info; - - if (rtnh->dst.dev == rt->dst.dev && - rtnh->rt6i_idev == rt->rt6i_idev && - ipv6_addr_equal(&rtnh->rt6i_gateway, - &rt->rt6i_gateway)) + if (rt6_duplicate_nexthop(nh->rt6_info, rt)) return err; } diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index bdcfb2d04cd2..ea4f481839dd 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -2076,6 +2076,7 @@ static int mpls_getroute(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, { struct net *net = sock_net(in_skb->sk); u32 portid = NETLINK_CB(in_skb).portid; + u32 in_label = LABEL_NOT_SPECIFIED; struct nlattr *tb[RTA_MAX + 1]; u32 labels[MAX_NEW_LABELS]; struct mpls_shim_hdr *hdr; @@ -2086,9 +2087,8 @@ static int mpls_getroute(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlmsghdr *nlh; struct sk_buff *skb; struct mpls_nh *nh; - int err = -EINVAL; - u32 in_label; u8 n_labels; + int err; err = nlmsg_parse(in_nlh, sizeof(*rtm), tb, RTA_MAX, rtm_mpls_policy, extack); @@ -2101,11 +2101,15 @@ static int mpls_getroute(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, u8 label_count; if (nla_get_labels(tb[RTA_DST], 1, &label_count, - &in_label, extack)) + &in_label, extack)) { + err = -EINVAL; goto errout; + } - if (in_label < MPLS_LABEL_FIRST_UNRESERVED) + if (!mpls_label_ok(net, in_label, extack)) { + err = -EINVAL; goto errout; + } } rt = mpls_route_input_rcu(net, in_label); diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index b553fdd68816..4707d997558a 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c @@ -872,6 +872,11 @@ static int dccp_init_net(struct net *net, u_int16_t proto) return dccp_kmemdup_sysctl_table(net, pn, dn); } +static struct nf_proto_net *dccp_get_net_proto(struct net *net) +{ + return &net->ct.nf_ct_proto.dccp.pn; +} + struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 __read_mostly = { .l3proto = AF_INET, .l4proto = IPPROTO_DCCP, @@ -904,6 +909,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 __read_mostly = { }, #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ .init_net = dccp_init_net, + .get_net_proto = dccp_get_net_proto, }; EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_dccp4); @@ -939,5 +945,6 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 __read_mostly = { }, #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ .init_net = dccp_init_net, + .get_net_proto = dccp_get_net_proto, }; EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_dccp6); diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 31c6c8ee9d5d..6eef29d2eec4 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -783,6 +783,11 @@ static int sctp_init_net(struct net *net, u_int16_t proto) return sctp_kmemdup_sysctl_table(pn, sn); } +static struct nf_proto_net *sctp_get_net_proto(struct net *net) +{ + return &net->ct.nf_ct_proto.sctp.pn; +} + struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = { .l3proto = PF_INET, .l4proto = IPPROTO_SCTP, @@ -816,6 +821,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = { }, #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ .init_net = sctp_init_net, + .get_net_proto = sctp_get_net_proto, }; EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_sctp4); @@ -852,5 +858,6 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = { #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ #endif .init_net = sctp_init_net, + .get_net_proto = sctp_get_net_proto, }; EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_sctp6); diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c index c6dc8caaf5ca..c061d6eb465d 100644 --- a/net/rds/tcp_listen.c +++ b/net/rds/tcp_listen.c @@ -136,7 +136,7 @@ int rds_tcp_accept_one(struct socket *sock) if (!sock) /* module unload or netns delete in progress */ return -ENETUNREACH; - ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family, + ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type, sock->sk->sk_protocol, &new_sock); if (ret) diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index f5b45b8b8b16..2a186b201ad2 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -252,6 +252,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, fl6->flowi6_proto = IPPROTO_SCTP; if (ipv6_addr_type(&daddr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) fl6->flowi6_oif = daddr->v6.sin6_scope_id; + else if (asoc) + fl6->flowi6_oif = asoc->base.sk->sk_bound_dev_if; pr_debug("%s: dst=%pI6 ", __func__, &fl6->daddr); diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index a03130a47b85..60aff60e30ad 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -272,7 +272,7 @@ static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval, goto out; } - if (len == sizeof(crypto_info)) { + if (len == sizeof(*crypto_info)) { if (copy_to_user(optval, crypto_info, sizeof(*crypto_info))) rc = -EFAULT; goto out; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 45ba3d0872cc..8ce85420ecb0 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -291,8 +291,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 }, [NL80211_ATTR_PID] = { .type = NLA_U32 }, [NL80211_ATTR_4ADDR] = { .type = NLA_U8 }, - [NL80211_ATTR_PMKID] = { .type = NLA_BINARY, - .len = WLAN_PMKID_LEN }, + [NL80211_ATTR_PMKID] = { .len = WLAN_PMKID_LEN }, [NL80211_ATTR_DURATION] = { .type = NLA_U32 }, [NL80211_ATTR_COOKIE] = { .type = NLA_U64 }, [NL80211_ATTR_TX_RATES] = { .type = NLA_NESTED }, @@ -348,6 +347,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 }, [NL80211_ATTR_P2P_CTWINDOW] = { .type = NLA_U8 }, [NL80211_ATTR_P2P_OPPPS] = { .type = NLA_U8 }, + [NL80211_ATTR_LOCAL_MESH_POWER_MODE] = {. type = NLA_U32 }, [NL80211_ATTR_ACL_POLICY] = {. type = NLA_U32 }, [NL80211_ATTR_MAC_ADDRS] = { .type = NLA_NESTED }, [NL80211_ATTR_STA_CAPABILITY] = { .type = NLA_U16 }, @@ -520,7 +520,7 @@ nl80211_bss_select_policy[NL80211_BSS_SELECT_ATTR_MAX + 1] = { static const struct nla_policy nl80211_nan_func_policy[NL80211_NAN_FUNC_ATTR_MAX + 1] = { [NL80211_NAN_FUNC_TYPE] = { .type = NLA_U8 }, - [NL80211_NAN_FUNC_SERVICE_ID] = { .type = NLA_BINARY, + [NL80211_NAN_FUNC_SERVICE_ID] = { .len = NL80211_NAN_FUNC_SERVICE_ID_LEN }, [NL80211_NAN_FUNC_PUBLISH_TYPE] = { .type = NLA_U8 }, [NL80211_NAN_FUNC_PUBLISH_BCAST] = { .type = NLA_FLAG }, @@ -6469,6 +6469,10 @@ static int validate_scan_freqs(struct nlattr *freqs) struct nlattr *attr1, *attr2; int n_channels = 0, tmp1, tmp2; + nla_for_each_nested(attr1, freqs, tmp1) + if (nla_len(attr1) != sizeof(u32)) + return 0; + nla_for_each_nested(attr1, freqs, tmp1) { n_channels++; /* |