summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap1
-rw-r--r--Documentation/admin-guide/sysctl/net.rst8
-rw-r--r--Documentation/arm64/silicon-errata.rst3
-rw-r--r--Documentation/devicetree/bindings/net/adi,adin.yaml15
-rw-r--r--Documentation/devicetree/bindings/net/can/ctu,ctucanfd.yaml3
-rw-r--r--Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml3
-rw-r--r--Documentation/devicetree/bindings/net/marvell,orion-mdio.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/mediatek,net.yaml141
-rw-r--r--Documentation/devicetree/bindings/net/renesas,etheravb.yaml82
-rw-r--r--Documentation/devicetree/bindings/net/toshiba,visconti-dwmac.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml4
-rw-r--r--Documentation/networking/device_drivers/can/ctu/ctucanfd-driver.rst4
-rw-r--r--Documentation/networking/device_drivers/ethernet/dec/de4x5.rst189
-rw-r--r--Documentation/networking/device_drivers/ethernet/index.rst1
-rw-r--r--Documentation/networking/index.rst4
-rw-r--r--Documentation/networking/nf_conntrack-sysctl.rst5
-rw-r--r--Documentation/process/embargoed-hardware-issues.rst7
-rw-r--r--MAINTAINERS6
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-asrock-romed8hm3.dts4
-rw-r--r--arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi10
-rw-r--r--arch/arm/boot/dts/aspeed-g6.dtsi10
-rw-r--r--arch/arm/boot/dts/imx6qdl-sr-som.dtsi10
-rw-r--r--arch/arm/include/asm/io.h3
-rw-r--r--arch/arm/mm/ioremap.c8
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts12
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7986a-rfb.dts74
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7986a.dtsi39
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7986b-rfb.dts70
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250-mtp.dts12
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts40
-rw-r--r--arch/arm64/include/asm/io.h4
-rw-r--r--arch/arm64/kernel/Makefile4
-rw-r--r--arch/arm64/kernel/cpu_errata.c2
-rw-r--r--arch/arm64/kernel/cpufeature.c3
-rw-r--r--arch/arm64/kernel/vdso/Makefile3
-rw-r--r--arch/arm64/kernel/vdso32/Makefile3
-rw-r--r--arch/arm64/mm/ioremap.c8
-rw-r--r--arch/mips/configs/mtx1_defconfig1
-rw-r--r--arch/powerpc/configs/chrp32_defconfig1
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig1
-rw-r--r--arch/powerpc/kvm/book3s_32_sr.S26
-rw-r--r--arch/x86/mm/init_64.c5
-rw-r--r--block/mq-deadline.c1
-rw-r--r--drivers/base/firmware_loader/main.c17
-rw-r--r--drivers/bcma/driver_gpio.c7
-rw-r--r--drivers/dma-buf/dma-buf.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c8
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c26
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c8
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/tmp401.c11
-rw-r--r--drivers/interconnect/core.c8
-rw-r--r--drivers/net/can/Kconfig17
-rw-r--r--drivers/net/can/at91_can.c12
-rw-r--r--drivers/net/can/c_can/c_can_main.c19
-rw-r--r--drivers/net/can/ctucanfd/Kconfig6
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_base.c10
-rw-r--r--drivers/net/can/dev/Makefile2
-rw-r--r--drivers/net/can/dev/dev.c5
-rw-r--r--drivers/net/can/dev/rx-offload.c5
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c7
-rw-r--r--drivers/net/can/grcan.c2
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c9
-rw-r--r--drivers/net/can/janz-ican3.c2
-rw-r--r--drivers/net/can/led.c140
-rw-r--r--drivers/net/can/m_can/m_can.c35
-rw-r--r--drivers/net/can/m_can/m_can.h4
-rw-r--r--drivers/net/can/m_can/m_can_pci.c48
-rw-r--r--drivers/net/can/mscan/mscan.c2
-rw-r--r--drivers/net/can/pch_can.c2
-rw-r--r--drivers/net/can/rcar/rcar_can.c12
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c11
-rw-r--r--drivers/net/can/sja1000/sja1000.c11
-rw-r--r--drivers/net/can/slcan.c4
-rw-r--r--drivers/net/can/spi/hi311x.c8
-rw-r--r--drivers/net/can/spi/mcp251x.c10
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd.h2
-rw-r--r--drivers/net/can/sun4i_can.c7
-rw-r--r--drivers/net/can/ti_hecc.c8
-rw-r--r--drivers/net/can/usb/mcba_usb.c8
-rw-r--r--drivers/net/can/usb/usb_8dev.c11
-rw-r--r--drivers/net/can/xilinx_can.c12
-rw-r--r--drivers/net/dsa/lantiq_gswip.c9
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c252
-rw-r--r--drivers/net/dsa/microchip/ksz8795_spi.c35
-rw-r--r--drivers/net/dsa/microchip/ksz8863_smi.c10
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c200
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c30
-rw-r--r--drivers/net/dsa/microchip/ksz9477_spi.c30
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c485
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h79
-rw-r--r--drivers/net/dsa/ocelot/felix.c173
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c3
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.c37
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h3
-rw-r--r--drivers/net/ethernet/broadcom/Makefile5
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c84
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h415
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c80
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h12
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c3
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/Kconfig15
-rw-r--r--drivers/net/ethernet/dec/tulip/Makefile1
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c5591
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.h1017
-rw-r--r--drivers/net/ethernet/dec/tulip/eeprom.c7
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c64
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c5
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c10
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c2
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c69
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h11
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c99
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.h2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c31
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.c3
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c45
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c27
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c23
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h1
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_rxtx.c2
-rw-r--r--drivers/net/ethernet/mediatek/Makefile5
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c937
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h345
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c6
-rw-r--r--drivers/net/ethernet/mediatek/mtk_sgmii.c176
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c111
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c131
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c192
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c28
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c162
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c16
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.c243
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/lag_conf.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c51
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c1
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c3
-rw-r--r--drivers/net/ethernet/renesas/ravb.h6
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c109
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c6
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c3
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c8
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c8
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.c3
-rw-r--r--drivers/net/ethernet/sfc/siena/Kconfig1
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_channels.c8
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_channels.h4
-rw-r--r--drivers/net/ethernet/sfc/siena/tx.c9
-rw-r--r--drivers/net/ethernet/sfc/tx_common.c3
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c6
-rw-r--r--drivers/net/ethernet/sun/cassini.c4
-rw-r--r--drivers/net/ethernet/sunplus/Kconfig3
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_int.c4
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_mdio.c11
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac.h3
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.c13
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h54
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c168
-rw-r--r--drivers/net/fddi/skfp/smt.c2
-rw-r--r--drivers/net/hyperv/rndis_filter.c2
-rw-r--r--drivers/net/ipa/gsi.c22
-rw-r--r--drivers/net/ipa/gsi.h1
-rw-r--r--drivers/net/ipa/gsi_reg.h2
-rw-r--r--drivers/net/ipa/gsi_trans.c38
-rw-r--r--drivers/net/ipa/gsi_trans.h24
-rw-r--r--drivers/net/ipa/ipa.h2
-rw-r--r--drivers/net/ipa/ipa_cmd.c78
-rw-r--r--drivers/net/ipa/ipa_cmd.h11
-rw-r--r--drivers/net/ipa/ipa_data-v3.1.c2
-rw-r--r--drivers/net/ipa/ipa_data-v3.5.1.c2
-rw-r--r--drivers/net/ipa/ipa_data-v4.11.c2
-rw-r--r--drivers/net/ipa/ipa_data-v4.2.c2
-rw-r--r--drivers/net/ipa/ipa_data-v4.5.c2
-rw-r--r--drivers/net/ipa/ipa_data-v4.9.c2
-rw-r--r--drivers/net/ipa/ipa_data.h70
-rw-r--r--drivers/net/ipa/ipa_endpoint.c228
-rw-r--r--drivers/net/ipa/ipa_endpoint.h85
-rw-r--r--drivers/net/ipa/ipa_interrupt.c6
-rw-r--r--drivers/net/ipa/ipa_modem.c13
-rw-r--r--drivers/net/ipa/ipa_qmi.c2
-rw-r--r--drivers/net/loopback.c2
-rw-r--r--drivers/net/netdevsim/ipsec.c2
-rw-r--r--drivers/net/phy/adin.c40
-rw-r--r--drivers/net/phy/dp83822.c9
-rw-r--r--drivers/net/phy/marvell.c37
-rw-r--r--drivers/net/phy/micrel.c16
-rw-r--r--drivers/net/ppp/pppoe.c1
-rw-r--r--drivers/net/usb/r8152.c33
-rw-r--r--drivers/net/veth.c1
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c6
-rw-r--r--drivers/net/vxlan/vxlan_core.c13
-rw-r--r--drivers/net/wireless/Makefile2
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c13
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c178
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.h9
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c10
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c8
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.c23
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c172
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c24
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c62
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h46
-rw-r--r--drivers/net/wireless/ath/ath11k/wow.c34
-rw-r--r--drivers/net/wireless/ath/ath6kl/Makefile5
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile5
-rw-r--r--drivers/net/wireless/ath/carl9170/Makefile5
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c14
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c23
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/main.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c44
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c48
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c215
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mcu.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mmio.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h50
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/beacon.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/dma.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c201
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c61
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c129
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c249
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c72
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c148
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h39
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c39
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/pci.c99
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/regs.h61
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/soc.c41
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/dma.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c41
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c155
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c122
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/regs.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb.c62
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb_mac.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c53
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.h2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/mon.c4
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c3
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.c7
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/usb.c4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/Kconfig18
-rw-r--r--drivers/net/wireless/realtek/rtw89/Makefile9
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c11
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h14
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c5
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c23
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c12
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c30
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h22
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c229
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c18
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_table.c3714
-rw-r--r--drivers/net/wireless/silabs/wfx/bh.c6
-rw-r--r--drivers/net/wireless/silabs/wfx/data_rx.c5
-rw-r--r--drivers/net/wireless/silabs/wfx/data_tx.c3
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_tx.c2
-rw-r--r--drivers/net/wireless/silabs/wfx/key.c4
-rw-r--r--drivers/net/wireless/silabs/wfx/main.c6
-rw-r--r--drivers/net/wireless/silabs/wfx/queue.c3
-rw-r--r--drivers/net/wireless/silabs/wfx/scan.c11
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.c76
-rw-r--r--drivers/net/wireless/silabs/wfx/wfx.h7
-rw-r--r--drivers/net/wireless/ti/wl1251/event.c22
-rw-r--r--drivers/net/wireless/ti/wl1251/io.c20
-rw-r--r--drivers/net/wireless/ti/wl1251/tx.c15
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_coredump.h5
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c10
-rw-r--r--drivers/net/wwan/t7xx/t7xx_dpmaif.c6
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.c17
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c6
-rw-r--r--drivers/net/wwan/t7xx/t7xx_modem_ops.c4
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.c2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_proxy.c3
-rw-r--r--drivers/nfc/pn533/pn533.c5
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c7
-rw-r--r--drivers/pci/controller/pci-aardvark.c48
-rw-r--r--drivers/pci/pci.c10
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c17
-rw-r--r--drivers/ptp/ptp_clockmatrix.c321
-rw-r--r--drivers/ptp/ptp_clockmatrix.h7
-rw-r--r--drivers/ptp/ptp_ocp.c62
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c3
-rw-r--r--drivers/scsi/fcoe/fcoe.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c3
-rw-r--r--drivers/slimbus/qcom-ctrl.c4
-rw-r--r--drivers/ssb/pci.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c48
-rw-r--r--drivers/tty/n_gsm.c20
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c29
-rw-r--r--drivers/tty/serial/digicolor-usart.c5
-rw-r--r--drivers/tty/serial/fsl_lpuart.c18
-rw-r--r--drivers/usb/class/cdc-wdm.c1
-rw-r--r--drivers/usb/gadget/function/f_uvc.c25
-rw-r--r--drivers/usb/gadget/function/uvc.h2
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c3
-rw-r--r--drivers/usb/gadget/legacy/raw_gadget.c2
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c90
-rw-r--r--drivers/usb/host/xhci-mtk.h2
-rw-r--r--drivers/usb/serial/option.c4
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h1
-rw-r--r--drivers/usb/serial/qcserial.c2
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c2
-rw-r--r--drivers/usb/typec/tcpm/tcpci_mt6360.c26
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c61
-rw-r--r--drivers/vhost/net.c15
-rw-r--r--drivers/video/fbdev/core/fbmem.c5
-rw-r--r--drivers/video/fbdev/core/fbsysfs.c4
-rw-r--r--drivers/video/fbdev/efifb.c9
-rw-r--r--drivers/video/fbdev/simplefb.c8
-rw-r--r--drivers/video/fbdev/vesafb.c8
-rw-r--r--fs/afs/misc.c5
-rw-r--r--fs/afs/rotate.c4
-rw-r--r--fs/afs/rxrpc.c8
-rw-r--r--fs/afs/write.c1
-rw-r--r--fs/ceph/addr.c11
-rw-r--r--fs/ceph/file.c16
-rw-r--r--fs/gfs2/bmap.c11
-rw-r--r--fs/gfs2/file.c139
-rw-r--r--fs/io_uring.c3
-rw-r--r--fs/nfs/fs_context.c2
-rw-r--r--fs/proc/fd.c23
-rw-r--r--fs/seq_file.c32
-rw-r--r--include/linux/audit.h2
-rw-r--r--include/linux/can/dev.h10
-rw-r--r--include/linux/can/led.h51
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/list.h10
-rw-r--r--include/linux/mfd/idt8a340_reg.h12
-rw-r--r--include/linux/mlx5/driver.h9
-rw-r--r--include/linux/mlx5/mlx5_ifc.h5
-rw-r--r--include/linux/netdevice.h26
-rw-r--r--include/linux/qed/qed_fcoe_if.h4
-rw-r--r--include/linux/qed/qed_iscsi_if.h4
-rw-r--r--include/linux/qed/qed_nvmetcp_if.h2
-rw-r--r--include/linux/seq_file.h4
-rw-r--r--include/linux/skbuff.h8
-rw-r--r--include/linux/sunrpc/clnt.h2
-rw-r--r--include/net/cfg80211.h16
-rw-r--r--include/net/cfg802154.h2
-rw-r--r--include/net/inet6_hashtables.h28
-rw-r--r--include/net/inet_connection_sock.h3
-rw-r--r--include/net/inet_hashtables.h103
-rw-r--r--include/net/inet_sock.h5
-rw-r--r--include/net/inet_timewait_sock.h3
-rw-r--r--include/net/ip.h3
-rw-r--r--include/net/ipv6.h44
-rw-r--r--include/net/mac80211.h36
-rw-r--r--include/net/netfilter/nf_conntrack.h17
-rw-r--r--include/net/netfilter/nf_conntrack_core.h2
-rw-r--r--include/net/netfilter/nf_conntrack_count.h1
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h53
-rw-r--r--include/net/netfilter/nf_conntrack_extend.h31
-rw-r--r--include/net/netfilter/nf_conntrack_labels.h10
-rw-r--r--include/net/netfilter/nf_conntrack_timeout.h8
-rw-r--r--include/net/netfilter/nf_reject.h21
-rw-r--r--include/net/netns/conntrack.h8
-rw-r--r--include/net/sock.h22
-rw-r--r--include/net/tls.h1
-rw-r--r--include/net/xfrm.h34
-rw-r--r--include/soc/mscc/ocelot.h11
-rw-r--r--include/soc/mscc/ocelot_vcap.h2
-rw-r--r--include/trace/events/io_uring.h2
-rw-r--r--include/trace/events/rxrpc.h263
-rw-r--r--include/trace/events/sched.h6
-rw-r--r--include/uapi/linux/can/isotp.h25
-rw-r--r--include/uapi/linux/if_link.h2
-rw-r--r--include/uapi/linux/nl80211.h2
-rw-r--r--include/uapi/linux/tls.h2
-rw-r--r--kernel/auditsc.c6
-rw-r--r--kernel/irq/irqdesc.c1
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/trace/fgraph.c4
-rw-r--r--kernel/trace/ftrace.c4
-rw-r--r--kernel/trace/trace_events.c8
-rw-r--r--kernel/trace/trace_osnoise.c4
-rw-r--r--kernel/trace/trace_sched_switch.c4
-rw-r--r--kernel/trace/trace_sched_wakeup.c4
-rw-r--r--lib/percpu-refcount.c1
-rw-r--r--mm/huge_memory.c7
-rw-r--r--mm/kfence/core.c10
-rw-r--r--mm/memory-failure.c15
-rw-r--r--mm/mremap.c2
-rw-r--r--net/ax25/ax25_dev.c22
-rw-r--r--net/batman-adv/hard-interface.c2
-rw-r--r--net/bpf/test_run.c2
-rw-r--r--net/bridge/br_input.c7
-rw-r--r--net/can/isotp.c126
-rw-r--r--net/can/raw.c12
-rw-r--r--net/core/dev.c26
-rw-r--r--net/core/dev.h2
-rw-r--r--net/core/drop_monitor.c2
-rw-r--r--net/core/gro.c8
-rw-r--r--net/core/net-sysfs.c21
-rw-r--r--net/core/rtnetlink.c16
-rw-r--r--net/core/skbuff.c29
-rw-r--r--net/core/sock.c25
-rw-r--r--net/core/sysctl_net_core.c8
-rw-r--r--net/dccp/ipv4.c8
-rw-r--r--net/dccp/ipv6.c10
-rw-r--r--net/dccp/proto.c33
-rw-r--r--net/decnet/dn_route.c2
-rw-r--r--net/dsa/dsa2.c7
-rw-r--r--net/ipv4/esp4.c6
-rw-r--r--net/ipv4/inet_connection_sock.c245
-rw-r--r--net/ipv4/inet_hashtables.c210
-rw-r--r--net/ipv4/inet_timewait_sock.c58
-rw-r--r--net/ipv4/netfilter.c3
-rw-r--r--net/ipv4/netfilter/nf_reject_ipv4.c10
-rw-r--r--net/ipv4/route.c23
-rw-r--r--net/ipv4/tcp.c14
-rw-r--r--net/ipv4/tcp_bbr.c2
-rw-r--r--net/ipv4/tcp_cubic.c4
-rw-r--r--net/ipv4/tcp_input.c10
-rw-r--r--net/ipv4/tcp_ipv4.c3
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv4/udp.c3
-rw-r--r--net/ipv6/datagram.c6
-rw-r--r--net/ipv6/esp6.c6
-rw-r--r--net/ipv6/inet6_hashtables.c6
-rw-r--r--net/ipv6/ip6_offload.c56
-rw-r--r--net/ipv6/ip6_output.c22
-rw-r--r--net/ipv6/netfilter.c3
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c4
-rw-r--r--net/ipv6/tcp_ipv6.c7
-rw-r--r--net/ipv6/udp.c13
-rw-r--r--net/key/af_key.c12
-rw-r--r--net/l2tp/l2tp_ip.c4
-rw-r--r--net/l2tp/l2tp_ip6.c8
-rw-r--r--net/mac80211/cfg.c60
-rw-r--r--net/mac80211/debugfs_netdev.c2
-rw-r--r--net/mac80211/ieee80211_i.h12
-rw-r--r--net/mac80211/main.c4
-rw-r--r--net/mac80211/mlme.c117
-rw-r--r--net/mac80211/offchannel.c2
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c154
-rw-r--r--net/mac80211/rc80211_minstrel_ht.h2
-rw-r--r--net/mac80211/scan.c20
-rw-r--r--net/mac80211/status.c91
-rw-r--r--net/mac80211/tx.c2
-rw-r--r--net/mac80211/util.c40
-rw-r--r--net/mac80211/wpa.c103
-rw-r--r--net/mptcp/options.c37
-rw-r--r--net/mptcp/pm.c14
-rw-r--r--net/mptcp/protocol.c28
-rw-r--r--net/mptcp/protocol.h33
-rw-r--r--net/mptcp/sockopt.c15
-rw-r--r--net/mptcp/subflow.c50
-rw-r--r--net/netfilter/nf_conncount.c11
-rw-r--r--net/netfilter/nf_conntrack_core.c304
-rw-r--r--net/netfilter/nf_conntrack_ecache.c165
-rw-r--r--net/netfilter/nf_conntrack_extend.c32
-rw-r--r--net/netfilter/nf_conntrack_helper.c5
-rw-r--r--net/netfilter/nf_conntrack_netlink.c88
-rw-r--r--net/netfilter/nf_conntrack_proto.c10
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c52
-rw-r--r--net/netfilter/nf_conntrack_standalone.c2
-rw-r--r--net/netfilter/nf_conntrack_timeout.c7
-rw-r--r--net/netfilter/nf_flow_table_core.c60
-rw-r--r--net/netfilter/nf_flow_table_ip.c19
-rw-r--r--net/netfilter/nf_nat_masquerade.c5
-rw-r--r--net/netfilter/nf_tables_api.c11
-rw-r--r--net/netfilter/nfnetlink.c40
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c47
-rw-r--r--net/netfilter/nft_flow_offload.c36
-rw-r--r--net/nfc/nci/data.c2
-rw-r--r--net/nfc/nci/hci.c4
-rw-r--r--net/rxrpc/af_rxrpc.c2
-rw-r--r--net/rxrpc/ar-internal.h25
-rw-r--r--net/rxrpc/call_accept.c10
-rw-r--r--net/rxrpc/call_event.c4
-rw-r--r--net/rxrpc/call_object.c62
-rw-r--r--net/rxrpc/conn_client.c30
-rw-r--r--net/rxrpc/conn_object.c51
-rw-r--r--net/rxrpc/conn_service.c8
-rw-r--r--net/rxrpc/input.c31
-rw-r--r--net/rxrpc/local_object.c68
-rw-r--r--net/rxrpc/net_ns.c7
-rw-r--r--net/rxrpc/peer_object.c40
-rw-r--r--net/rxrpc/proc.c85
-rw-r--r--net/rxrpc/sendmsg.c6
-rw-r--r--net/rxrpc/skbuff.c1
-rw-r--r--net/sched/act_pedit.c4
-rw-r--r--net/sched/em_meta.c7
-rw-r--r--net/sctp/input.c4
-rw-r--r--net/sctp/output.c3
-rw-r--r--net/sctp/stream_sched.c9
-rw-r--r--net/smc/af_smc.c50
-rw-r--r--net/smc/smc_ib.c1
-rw-r--r--net/smc/smc_tx.c17
-rw-r--r--net/smc/smc_wr.c5
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_upcall.c3
-rw-r--r--net/sunrpc/clnt.c36
-rw-r--r--net/tls/tls_device.c53
-rw-r--r--net/tls/tls_main.c55
-rw-r--r--net/tls/tls_sw.c4
-rw-r--r--net/wireless/chan.c93
-rw-r--r--net/wireless/core.h14
-rw-r--r--net/wireless/ibss.c4
-rw-r--r--net/wireless/nl80211.c416
-rw-r--r--net/wireless/reg.c4
-rw-r--r--net/xfrm/xfrm_device.c15
-rw-r--r--net/xfrm/xfrm_policy.c2
-rw-r--r--net/xfrm/xfrm_state.c4
-rw-r--r--net/xfrm/xfrm_user.c5
-rw-r--r--samples/trace_events/trace_custom_sched.h6
-rw-r--r--security/selinux/ss/hashtab.c3
-rw-r--r--sound/isa/wavefront/wavefront_synth.c3
-rw-r--r--sound/pci/hda/patch_realtek.c71
-rw-r--r--sound/usb/quirks-table.h9
-rw-r--r--sound/usb/quirks.c2
-rw-r--r--tools/include/uapi/linux/if_link.h2
-rw-r--r--tools/include/uapi/linux/kvm.h10
-rw-r--r--tools/perf/bench/numa.c2
-rwxr-xr-xtools/perf/tests/shell/test_arm_coresight.sh1
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/hw_stats_l3.sh4
-rwxr-xr-xtools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh202
-rw-r--r--tools/testing/selftests/net/.gitignore2
-rw-r--r--tools/testing/selftests/net/Makefile2
-rw-r--r--tools/testing/selftests/net/bind_bhash_test.c119
-rwxr-xr-xtools/testing/selftests/net/fib_nexthops.sh53
-rw-r--r--tools/testing/selftests/net/forwarding/Makefile2
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh59
-rw-r--r--tools/testing/selftests/vm/Makefile10
661 files changed, 14015 insertions, 14967 deletions
diff --git a/.mailmap b/.mailmap
index ecd51ee5fa0c..0b04aa20c431 100644
--- a/.mailmap
+++ b/.mailmap
@@ -251,6 +251,7 @@ Mark Yao <markyao0591@gmail.com> <mark.yao@rock-chips.com>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@puri.sm>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
+Martyna Szapar-Mudlaw <martyna.szapar-mudlaw@linux.intel.com> <martyna.szapar-mudlaw@intel.com>
Mathieu Othacehe <m.othacehe@gmail.com>
Matthew Wilcox <willy@infradead.org> <matthew.r.wilcox@intel.com>
Matthew Wilcox <willy@infradead.org> <matthew@wil.cx>
diff --git a/Documentation/admin-guide/sysctl/net.rst b/Documentation/admin-guide/sysctl/net.rst
index f86b5e1623c6..fa4dcdb283cf 100644
--- a/Documentation/admin-guide/sysctl/net.rst
+++ b/Documentation/admin-guide/sysctl/net.rst
@@ -322,6 +322,14 @@ a leaked reference faster. A larger value may be useful to prevent false
warnings on slow/loaded systems.
Default value is 10, minimum 1, maximum 3600.
+skb_defer_max
+-------------
+
+Max size (in skbs) of the per-cpu list of skbs being freed
+by the cpu which allocated them. Used by TCP stack so far.
+
+Default: 64
+
optmem_max
----------
diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst
index 466cb9e89047..d27db84d585e 100644
--- a/Documentation/arm64/silicon-errata.rst
+++ b/Documentation/arm64/silicon-errata.rst
@@ -189,6 +189,9 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| Qualcomm Tech. | Kryo4xx Silver | N/A | ARM64_ERRATUM_1024718 |
+----------------+-----------------+-----------------+-----------------------------+
+| Qualcomm Tech. | Kryo4xx Gold | N/A | ARM64_ERRATUM_1286807 |
++----------------+-----------------+-----------------+-----------------------------+
+
+----------------+-----------------+-----------------+-----------------------------+
| Fujitsu | A64FX | E#010001 | FUJITSU_ERRATUM_010001 |
+----------------+-----------------+-----------------+-----------------------------+
diff --git a/Documentation/devicetree/bindings/net/adi,adin.yaml b/Documentation/devicetree/bindings/net/adi,adin.yaml
index 1129f2b58e98..77750df0c2c4 100644
--- a/Documentation/devicetree/bindings/net/adi,adin.yaml
+++ b/Documentation/devicetree/bindings/net/adi,adin.yaml
@@ -36,6 +36,21 @@ properties:
enum: [ 4, 8, 12, 16, 20, 24 ]
default: 8
+ adi,phy-output-clock:
+ description: Select clock output on GP_CLK pin. Two clocks are available:
+ A 25MHz reference and a free-running 125MHz.
+ The phy can alternatively automatically switch between the reference and
+ the 125MHz clocks based on its internal state.
+ $ref: /schemas/types.yaml#/definitions/string
+ enum:
+ - 25mhz-reference
+ - 125mhz-free-running
+ - adaptive-free-running
+
+ adi,phy-output-reference-clock:
+ description: Enable 25MHz reference clock output on CLK25_REF pin.
+ type: boolean
+
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/net/can/ctu,ctucanfd.yaml b/Documentation/devicetree/bindings/net/can/ctu,ctucanfd.yaml
index fb34d971dcb3..4635cb96fc64 100644
--- a/Documentation/devicetree/bindings/net/can/ctu,ctucanfd.yaml
+++ b/Documentation/devicetree/bindings/net/can/ctu,ctucanfd.yaml
@@ -25,6 +25,9 @@ maintainers:
- Ondrej Ille <ondrej.ille@gmail.com>
- Martin Jerabek <martin.jerabek01@gmail.com>
+allOf:
+ - $ref: can-controller.yaml#
+
properties:
compatible:
oneOf:
diff --git a/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml b/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml
index 9fc137fafed9..6f71fc96bc4e 100644
--- a/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml
+++ b/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml
@@ -88,6 +88,7 @@ required:
- compatible
- reg
- interrupts
+ - interrupt-names
- clocks
- clock-names
- power-domains
@@ -136,7 +137,6 @@ then:
- const: rstc_n
required:
- - interrupt-names
- reset-names
else:
properties:
@@ -167,6 +167,7 @@ examples:
reg = <0xe66c0000 0x8000>;
interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "ch_int", "g_int";
clocks = <&cpg CPG_MOD 914>,
<&cpg CPG_CORE R8A7795_CLK_CANFD>,
<&can_clk>;
diff --git a/Documentation/devicetree/bindings/net/marvell,orion-mdio.yaml b/Documentation/devicetree/bindings/net/marvell,orion-mdio.yaml
index fe3a3412f093..d2906b4a0f59 100644
--- a/Documentation/devicetree/bindings/net/marvell,orion-mdio.yaml
+++ b/Documentation/devicetree/bindings/net/marvell,orion-mdio.yaml
@@ -39,7 +39,7 @@ required:
- compatible
- reg
-unevaluatedProperties: true
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/net/mediatek,net.yaml b/Documentation/devicetree/bindings/net/mediatek,net.yaml
index 43cc4024ef98..699164dd1295 100644
--- a/Documentation/devicetree/bindings/net/mediatek,net.yaml
+++ b/Documentation/devicetree/bindings/net/mediatek,net.yaml
@@ -21,6 +21,7 @@ properties:
- mediatek,mt7623-eth
- mediatek,mt7622-eth
- mediatek,mt7629-eth
+ - mediatek,mt7986-eth
- ralink,rt5350-eth
reg:
@@ -28,7 +29,7 @@ properties:
interrupts:
minItems: 3
- maxItems: 3
+ maxItems: 4
power-domains:
maxItems: 1
@@ -88,6 +89,9 @@ allOf:
- mediatek,mt7623-eth
then:
properties:
+ interrupts:
+ maxItems: 3
+
clocks:
minItems: 4
maxItems: 4
@@ -112,6 +116,9 @@ allOf:
const: mediatek,mt7622-eth
then:
properties:
+ interrupts:
+ maxItems: 3
+
clocks:
minItems: 11
maxItems: 11
@@ -155,6 +162,9 @@ allOf:
const: mediatek,mt7629-eth
then:
properties:
+ interrupts:
+ maxItems: 3
+
clocks:
minItems: 17
maxItems: 17
@@ -189,6 +199,42 @@ allOf:
minItems: 2
maxItems: 2
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: mediatek,mt7986-eth
+ then:
+ properties:
+ interrupts:
+ minItems: 4
+
+ clocks:
+ minItems: 15
+ maxItems: 15
+
+ clock-names:
+ items:
+ - const: fe
+ - const: gp2
+ - const: gp1
+ - const: wocpu1
+ - const: wocpu0
+ - const: sgmii_tx250m
+ - const: sgmii_rx250m
+ - const: sgmii_cdr_ref
+ - const: sgmii_cdr_fb
+ - const: sgmii2_tx250m
+ - const: sgmii2_rx250m
+ - const: sgmii2_cdr_ref
+ - const: sgmii2_cdr_fb
+ - const: netsys0
+ - const: netsys1
+
+ mediatek,sgmiisys:
+ minItems: 2
+ maxItems: 2
+
patternProperties:
"^mac@[0-1]$":
type: object
@@ -219,7 +265,6 @@ required:
- interrupts
- clocks
- clock-names
- - power-domains
- mediatek,ethsys
unevaluatedProperties: false
@@ -295,3 +340,95 @@ examples:
};
};
};
+
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/clock/mt7622-clk.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ eth: ethernet@15100000 {
+ #define CLK_ETH_FE_EN 0
+ #define CLK_ETH_WOCPU1_EN 3
+ #define CLK_ETH_WOCPU0_EN 4
+ #define CLK_TOP_NETSYS_SEL 43
+ #define CLK_TOP_NETSYS_500M_SEL 44
+ #define CLK_TOP_NETSYS_2X_SEL 46
+ #define CLK_TOP_SGM_325M_SEL 47
+ #define CLK_APMIXED_NET2PLL 1
+ #define CLK_APMIXED_SGMPLL 3
+
+ compatible = "mediatek,mt7986-eth";
+ reg = <0 0x15100000 0 0x80000>;
+ interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ethsys CLK_ETH_FE_EN>,
+ <&ethsys CLK_ETH_GP2_EN>,
+ <&ethsys CLK_ETH_GP1_EN>,
+ <&ethsys CLK_ETH_WOCPU1_EN>,
+ <&ethsys CLK_ETH_WOCPU0_EN>,
+ <&sgmiisys0 CLK_SGMII_TX250M_EN>,
+ <&sgmiisys0 CLK_SGMII_RX250M_EN>,
+ <&sgmiisys0 CLK_SGMII_CDR_REF>,
+ <&sgmiisys0 CLK_SGMII_CDR_FB>,
+ <&sgmiisys1 CLK_SGMII_TX250M_EN>,
+ <&sgmiisys1 CLK_SGMII_RX250M_EN>,
+ <&sgmiisys1 CLK_SGMII_CDR_REF>,
+ <&sgmiisys1 CLK_SGMII_CDR_FB>,
+ <&topckgen CLK_TOP_NETSYS_SEL>,
+ <&topckgen CLK_TOP_NETSYS_SEL>;
+ clock-names = "fe", "gp2", "gp1", "wocpu1", "wocpu0",
+ "sgmii_tx250m", "sgmii_rx250m",
+ "sgmii_cdr_ref", "sgmii_cdr_fb",
+ "sgmii2_tx250m", "sgmii2_rx250m",
+ "sgmii2_cdr_ref", "sgmii2_cdr_fb",
+ "netsys0", "netsys1";
+ mediatek,ethsys = <&ethsys>;
+ mediatek,sgmiisys = <&sgmiisys0>, <&sgmiisys1>;
+ assigned-clocks = <&topckgen CLK_TOP_NETSYS_2X_SEL>,
+ <&topckgen CLK_TOP_SGM_325M_SEL>;
+ assigned-clock-parents = <&apmixedsys CLK_APMIXED_NET2PLL>,
+ <&apmixedsys CLK_APMIXED_SGMPLL>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mdio: mdio-bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy5: ethernet-phy@0 {
+ compatible = "ethernet-phy-id67c9.de0a";
+ phy-mode = "2500base-x";
+ reset-gpios = <&pio 6 1>;
+ reset-deassert-us = <20000>;
+ reg = <5>;
+ };
+
+ phy6: ethernet-phy@1 {
+ compatible = "ethernet-phy-id67c9.de0a";
+ phy-mode = "2500base-x";
+ reg = <6>;
+ };
+ };
+
+ mac0: mac@0 {
+ compatible = "mediatek,eth-mac";
+ phy-mode = "2500base-x";
+ phy-handle = <&phy5>;
+ reg = <0>;
+ };
+
+ mac1: mac@1 {
+ compatible = "mediatek,eth-mac";
+ phy-mode = "2500base-x";
+ phy-handle = <&phy6>;
+ reg = <1>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/renesas,etheravb.yaml b/Documentation/devicetree/bindings/net/renesas,etheravb.yaml
index ee2ccacc39ff..acf347f3cdbe 100644
--- a/Documentation/devicetree/bindings/net/renesas,etheravb.yaml
+++ b/Documentation/devicetree/bindings/net/renesas,etheravb.yaml
@@ -45,6 +45,11 @@ properties:
- items:
- enum:
+ - renesas,etheravb-r9a09g011 # RZ/V2M
+ - const: renesas,etheravb-rzv2m # RZ/V2M compatible
+
+ - items:
+ - enum:
- renesas,r9a07g043-gbeth # RZ/G2UL
- renesas,r9a07g044-gbeth # RZ/G2{L,LC}
- renesas,r9a07g054-gbeth # RZ/V2L
@@ -160,16 +165,33 @@ allOf:
- const: arp_ns
rx-internal-delay-ps: false
else:
- properties:
- interrupts:
- minItems: 25
- maxItems: 25
- interrupt-names:
- items:
- pattern: '^ch[0-9]+$'
- required:
- - interrupt-names
- - rx-internal-delay-ps
+ if:
+ properties:
+ compatible:
+ contains:
+ const: renesas,etheravb-rzv2m
+ then:
+ properties:
+ interrupts:
+ minItems: 29
+ maxItems: 29
+ interrupt-names:
+ items:
+ pattern: '^(ch(1?)[0-9])|ch20|ch21|dia|dib|err_a|err_b|mgmt_a|mgmt_b|line3$'
+ rx-internal-delay-ps: false
+ required:
+ - interrupt-names
+ else:
+ properties:
+ interrupts:
+ minItems: 25
+ maxItems: 25
+ interrupt-names:
+ items:
+ pattern: '^ch[0-9]+$'
+ required:
+ - interrupt-names
+ - rx-internal-delay-ps
- if:
properties:
@@ -231,17 +253,35 @@ allOf:
- const: chi
- const: refclk
else:
- properties:
- clocks:
- minItems: 1
- items:
- - description: AVB functional clock
- - description: Optional TXC reference clock
- clock-names:
- minItems: 1
- items:
- - const: fck
- - const: refclk
+ if:
+ properties:
+ compatible:
+ contains:
+ const: renesas,etheravb-rzv2m
+ then:
+ properties:
+ clocks:
+ items:
+ - description: Main clock
+ - description: Coherent Hub Interface clock
+ - description: gPTP reference clock
+ clock-names:
+ items:
+ - const: axi
+ - const: chi
+ - const: gptp
+ else:
+ properties:
+ clocks:
+ minItems: 1
+ items:
+ - description: AVB functional clock
+ - description: Optional TXC reference clock
+ clock-names:
+ minItems: 1
+ items:
+ - const: fck
+ - const: refclk
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/net/toshiba,visconti-dwmac.yaml b/Documentation/devicetree/bindings/net/toshiba,visconti-dwmac.yaml
index b12bfe61c67a..0988ed8d1c12 100644
--- a/Documentation/devicetree/bindings/net/toshiba,visconti-dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/toshiba,visconti-dwmac.yaml
@@ -52,6 +52,7 @@ unevaluatedProperties: false
examples:
- |
+ #include <dt-bindings/clock/toshiba,tmpv770x.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
soc {
@@ -63,7 +64,7 @@ examples:
reg = <0 0x28000000 0 0x10000>;
interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "macirq";
- clocks = <&clk300mhz>, <&clk125mhz>;
+ clocks = <&pismu TMPV770X_CLK_PIETHER_BUS>, <&pismu TMPV770X_CLK_PIETHER_125M>;
clock-names = "stmmaceth", "phy_ref_clk";
snps,txpbl = <4>;
snps,rxpbl = <4>;
diff --git a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml
index 57b68d6c7c70..3666ac5b6518 100644
--- a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml
@@ -33,7 +33,7 @@ patternProperties:
$ref: "/schemas/types.yaml#/definitions/string"
enum: [ ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15, ADC2,
ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, EMMC, ESPI, ESPIALT,
- FSI1, FSI2, FWSPIABR, FWSPID, FWSPIWP, GPIT0, GPIT1, GPIT2, GPIT3,
+ FSI1, FSI2, FWQSPI, FWSPIABR, FWSPID, FWSPIWP, GPIT0, GPIT1, GPIT2, GPIT3,
GPIT4, GPIT5, GPIT6, GPIT7, GPIU0, GPIU1, GPIU2, GPIU3, GPIU4, GPIU5,
GPIU6, GPIU7, I2C1, I2C10, I2C11, I2C12, I2C13, I2C14, I2C15, I2C16,
I2C2, I2C3, I2C4, I2C5, I2C6, I2C7, I2C8, I2C9, I3C3, I3C4, I3C5,
@@ -58,7 +58,7 @@ patternProperties:
$ref: "/schemas/types.yaml#/definitions/string"
enum: [ ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15, ADC2,
ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, EMMCG1, EMMCG4,
- EMMCG8, ESPI, ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID, FWQSPID, FWSPIWP,
+ EMMCG8, ESPI, ESPIALT, FSI1, FSI2, FWQSPI, FWSPIABR, FWSPID, FWSPIWP,
GPIT0, GPIT1, GPIT2, GPIT3, GPIT4, GPIT5, GPIT6, GPIT7, GPIU0, GPIU1,
GPIU2, GPIU3, GPIU4, GPIU5, GPIU6, GPIU7, HVI3C3, HVI3C4, I2C1, I2C10,
I2C11, I2C12, I2C13, I2C14, I2C15, I2C16, I2C2, I2C3, I2C4, I2C5,
diff --git a/Documentation/networking/device_drivers/can/ctu/ctucanfd-driver.rst b/Documentation/networking/device_drivers/can/ctu/ctucanfd-driver.rst
index 2fde5551e756..40c92ea272af 100644
--- a/Documentation/networking/device_drivers/can/ctu/ctucanfd-driver.rst
+++ b/Documentation/networking/device_drivers/can/ctu/ctucanfd-driver.rst
@@ -72,7 +72,7 @@ it is reachable (on which bus it resides) and its configuration –
registers address, interrupts and so on. An example of such a device
tree is given in .
-.. code:: raw
+::
/ {
/* ... */
@@ -451,7 +451,7 @@ the FIFO is maintained, together with priority rotation, is depicted in
|
-.. figure:: fsm_txt_buffer_user.svg
+.. kernel-figure:: fsm_txt_buffer_user.svg
TX Buffer states with possible transitions
diff --git a/Documentation/networking/device_drivers/ethernet/dec/de4x5.rst b/Documentation/networking/device_drivers/ethernet/dec/de4x5.rst
deleted file mode 100644
index e03e9c631879..000000000000
--- a/Documentation/networking/device_drivers/ethernet/dec/de4x5.rst
+++ /dev/null
@@ -1,189 +0,0 @@
-.. SPDX-License-Identifier: GPL-2.0
-
-===================================
-DEC EtherWORKS Ethernet De4x5 cards
-===================================
-
- Originally, this driver was written for the Digital Equipment
- Corporation series of EtherWORKS Ethernet cards:
-
- - DE425 TP/COAX EISA
- - DE434 TP PCI
- - DE435 TP/COAX/AUI PCI
- - DE450 TP/COAX/AUI PCI
- - DE500 10/100 PCI Fasternet
-
- but it will now attempt to support all cards which conform to the
- Digital Semiconductor SROM Specification. The driver currently
- recognises the following chips:
-
- - DC21040 (no SROM)
- - DC21041[A]
- - DC21140[A]
- - DC21142
- - DC21143
-
- So far the driver is known to work with the following cards:
-
- - KINGSTON
- - Linksys
- - ZNYX342
- - SMC8432
- - SMC9332 (w/new SROM)
- - ZNYX31[45]
- - ZNYX346 10/100 4 port (can act as a 10/100 bridge!)
-
- The driver has been tested on a relatively busy network using the DE425,
- DE434, DE435 and DE500 cards and benchmarked with 'ttcp': it transferred
- 16M of data to a DECstation 5000/200 as follows::
-
- TCP UDP
- TX RX TX RX
- DE425 1030k 997k 1170k 1128k
- DE434 1063k 995k 1170k 1125k
- DE435 1063k 995k 1170k 1125k
- DE500 1063k 998k 1170k 1125k in 10Mb/s mode
-
- All values are typical (in kBytes/sec) from a sample of 4 for each
- measurement. Their error is +/-20k on a quiet (private) network and also
- depend on what load the CPU has.
-
-----------------------------------------------------------------------------
-
- The ability to load this driver as a loadable module has been included
- and used extensively during the driver development (to save those long
- reboot sequences). Loadable module support under PCI and EISA has been
- achieved by letting the driver autoprobe as if it were compiled into the
- kernel. Do make sure you're not sharing interrupts with anything that
- cannot accommodate interrupt sharing!
-
- To utilise this ability, you have to do 8 things:
-
- 0) have a copy of the loadable modules code installed on your system.
- 1) copy de4x5.c from the /linux/drivers/net directory to your favourite
- temporary directory.
- 2) for fixed autoprobes (not recommended), edit the source code near
- line 5594 to reflect the I/O address you're using, or assign these when
- loading by::
-
- insmod de4x5 io=0xghh where g = bus number
- hh = device number
-
- .. note::
-
- autoprobing for modules is now supported by default. You may just
- use::
-
- insmod de4x5
-
- to load all available boards. For a specific board, still use
- the 'io=?' above.
- 3) compile de4x5.c, but include -DMODULE in the command line to ensure
- that the correct bits are compiled (see end of source code).
- 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a
- kernel with the de4x5 configuration turned off and reboot.
- 5) insmod de4x5 [io=0xghh]
- 6) run the net startup bits for your new eth?? interface(s) manually
- (usually /etc/rc.inet[12] at boot time).
- 7) enjoy!
-
- To unload a module, turn off the associated interface(s)
- 'ifconfig eth?? down' then 'rmmod de4x5'.
-
- Automedia detection is included so that in principle you can disconnect
- from, e.g. TP, reconnect to BNC and things will still work (after a
- pause while the driver figures out where its media went). My tests
- using ping showed that it appears to work....
-
- By default, the driver will now autodetect any DECchip based card.
- Should you have a need to restrict the driver to DIGITAL only cards, you
- can compile with a DEC_ONLY define, or if loading as a module, use the
- 'dec_only=1' parameter.
-
- I've changed the timing routines to use the kernel timer and scheduling
- functions so that the hangs and other assorted problems that occurred
- while autosensing the media should be gone. A bonus for the DC21040
- auto media sense algorithm is that it can now use one that is more in
- line with the rest (the DC21040 chip doesn't have a hardware timer).
- The downside is the 1 'jiffies' (10ms) resolution.
-
- IEEE 802.3u MII interface code has been added in anticipation that some
- products may use it in the future.
-
- The SMC9332 card has a non-compliant SROM which needs fixing - I have
- patched this driver to detect it because the SROM format used complies
- to a previous DEC-STD format.
-
- I have removed the buffer copies needed for receive on Intels. I cannot
- remove them for Alphas since the Tulip hardware only does longword
- aligned DMA transfers and the Alphas get alignment traps with non
- longword aligned data copies (which makes them really slow). No comment.
-
- I have added SROM decoding routines to make this driver work with any
- card that supports the Digital Semiconductor SROM spec. This will help
- all cards running the dc2114x series chips in particular. Cards using
- the dc2104x chips should run correctly with the basic driver. I'm in
- debt to <mjacob@feral.com> for the testing and feedback that helped get
- this feature working. So far we have tested KINGSTON, SMC8432, SMC9332
- (with the latest SROM complying with the SROM spec V3: their first was
- broken), ZNYX342 and LinkSys. ZNYX314 (dual 21041 MAC) and ZNYX 315
- (quad 21041 MAC) cards also appear to work despite their incorrectly
- wired IRQs.
-
- I have added a temporary fix for interrupt problems when some SCSI cards
- share the same interrupt as the DECchip based cards. The problem occurs
- because the SCSI card wants to grab the interrupt as a fast interrupt
- (runs the service routine with interrupts turned off) vs. this card
- which really needs to run the service routine with interrupts turned on.
- This driver will now add the interrupt service routine as a fast
- interrupt if it is bounced from the slow interrupt. THIS IS NOT A
- RECOMMENDED WAY TO RUN THE DRIVER and has been done for a limited time
- until people sort out their compatibility issues and the kernel
- interrupt service code is fixed. YOU SHOULD SEPARATE OUT THE FAST
- INTERRUPT CARDS FROM THE SLOW INTERRUPT CARDS to ensure that they do not
- run on the same interrupt. PCMCIA/CardBus is another can of worms...
-
- Finally, I think I have really fixed the module loading problem with
- more than one DECchip based card. As a side effect, I don't mess with
- the device structure any more which means that if more than 1 card in
- 2.0.x is installed (4 in 2.1.x), the user will have to edit
- linux/drivers/net/Space.c to make room for them. Hence, module loading
- is the preferred way to use this driver, since it doesn't have this
- limitation.
-
- Where SROM media detection is used and full duplex is specified in the
- SROM, the feature is ignored unless lp->params.fdx is set at compile
- time OR during a module load (insmod de4x5 args='eth??:fdx' [see
- below]). This is because there is no way to automatically detect full
- duplex links except through autonegotiation. When I include the
- autonegotiation feature in the SROM autoconf code, this detection will
- occur automatically for that case.
-
- Command line arguments are now allowed, similar to passing arguments
- through LILO. This will allow a per adapter board set up of full duplex
- and media. The only lexical constraints are: the board name (dev->name)
- appears in the list before its parameters. The list of parameters ends
- either at the end of the parameter list or with another board name. The
- following parameters are allowed:
-
- ========= ===============================================
- fdx for full duplex
- autosense to set the media/speed; with the following
- sub-parameters:
- TP, TP_NW, BNC, AUI, BNC_AUI, 100Mb, 10Mb, AUTO
- ========= ===============================================
-
- Case sensitivity is important for the sub-parameters. They *must* be
- upper case. Examples::
-
- insmod de4x5 args='eth1:fdx autosense=BNC eth0:autosense=100Mb'.
-
- For a compiled in driver, in linux/drivers/net/CONFIG, place e.g.::
-
- DE4X5_OPTS = -DDE4X5_PARM='"eth0:fdx autosense=AUI eth2:autosense=TP"'
-
- Yes, I know full duplex isn't permissible on BNC or AUI; they're just
- examples. By default, full duplex is turned off and AUTO is the default
- autosense setting. In reality, I expect only the full duplex option to
- be used. Note the use of single quotes in the two examples above and the
- lack of commas to separate items.
diff --git a/Documentation/networking/device_drivers/ethernet/index.rst b/Documentation/networking/device_drivers/ethernet/index.rst
index 21a97703421d..4e06684d079b 100644
--- a/Documentation/networking/device_drivers/ethernet/index.rst
+++ b/Documentation/networking/device_drivers/ethernet/index.rst
@@ -19,7 +19,6 @@ Contents:
cirrus/cs89x0
dlink/dl2k
davicom/dm9000
- dec/de4x5
dec/dmfe
freescale/dpaa
freescale/dpaa2/index
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index a1c271fe484e..03b215bddde8 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -1,5 +1,5 @@
-Linux Networking Documentation
-==============================
+Networking
+==========
Refer to :ref:`netdev-FAQ` for a guide on netdev development process specifics.
diff --git a/Documentation/networking/nf_conntrack-sysctl.rst b/Documentation/networking/nf_conntrack-sysctl.rst
index 311128abb768..834945ebc4cd 100644
--- a/Documentation/networking/nf_conntrack-sysctl.rst
+++ b/Documentation/networking/nf_conntrack-sysctl.rst
@@ -34,10 +34,13 @@ nf_conntrack_count - INTEGER (read-only)
nf_conntrack_events - BOOLEAN
- 0 - disabled
- - not 0 - enabled (default)
+ - 1 - enabled
+ - 2 - auto (default)
If this option is enabled, the connection tracking code will
provide userspace with connection tracking events via ctnetlink.
+ The default allocates the extension if a userspace program is
+ listening to ctnetlink events.
nf_conntrack_expect_max - INTEGER
Maximum size of expectation table. Default value is
diff --git a/Documentation/process/embargoed-hardware-issues.rst b/Documentation/process/embargoed-hardware-issues.rst
index 6f8f36e10e8b..95999302d279 100644
--- a/Documentation/process/embargoed-hardware-issues.rst
+++ b/Documentation/process/embargoed-hardware-issues.rst
@@ -244,10 +244,11 @@ disclosure of a particular issue, unless requested by a response team or by
an involved disclosed party. The current ambassadors list:
============= ========================================================
- ARM Grant Likely <grant.likely@arm.com>
AMD Tom Lendacky <tom.lendacky@amd.com>
- IBM Z Christian Borntraeger <borntraeger@de.ibm.com>
- IBM Power Anton Blanchard <anton@linux.ibm.com>
+ Ampere Darren Hart <darren@os.amperecomputing.com>
+ ARM Catalin Marinas <catalin.marinas@arm.com>
+ IBM Power Anton Blanchard <anton@linux.ibm.com>
+ IBM Z Christian Borntraeger <borntraeger@de.ibm.com>
Intel Tony Luck <tony.luck@intel.com>
Qualcomm Trilok Soni <tsoni@codeaurora.org>
diff --git a/MAINTAINERS b/MAINTAINERS
index 69b597aa4bc7..234380c959c8 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4376,7 +4376,6 @@ F: drivers/net/can/
F: drivers/phy/phy-can-transceiver.c
F: include/linux/can/bittiming.h
F: include/linux/can/dev.h
-F: include/linux/can/led.h
F: include/linux/can/length.h
F: include/linux/can/platform/
F: include/linux/can/rx-offload.h
@@ -5442,6 +5441,7 @@ F: net/ax25/sysctl_net_ax25.c
DATA ACCESS MONITOR
M: SeongJae Park <sj@kernel.org>
+L: damon@lists.linux.dev
L: linux-mm@kvack.org
S: Maintained
F: Documentation/ABI/testing/sysfs-kernel-mm-damon
@@ -14399,7 +14399,6 @@ F: arch/arm/*omap*/*pm*
F: drivers/cpufreq/omap-cpufreq.c
OMAP POWERDOMAIN SOC ADAPTATION LAYER SUPPORT
-M: Rajendra Nayak <rnayak@codeaurora.org>
M: Paul Walmsley <paul@pwsan.com>
L: linux-omap@vger.kernel.org
S: Maintained
@@ -15502,7 +15501,8 @@ F: tools/perf/
PERFORMANCE EVENTS TOOLING ARM64
R: John Garry <john.garry@huawei.com>
R: Will Deacon <will@kernel.org>
-R: Mathieu Poirier <mathieu.poirier@linaro.org>
+R: James Clark <james.clark@arm.com>
+R: Mike Leach <mike.leach@linaro.org>
R: Leo Yan <leo.yan@linaro.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
diff --git a/Makefile b/Makefile
index 2284d1ca2503..5033c0577c6d 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 18
SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
NAME = Superb Owl
# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/aspeed-bmc-asrock-romed8hm3.dts b/arch/arm/boot/dts/aspeed-bmc-asrock-romed8hm3.dts
index e71ccfd1df63..ff4c07c69af1 100644
--- a/arch/arm/boot/dts/aspeed-bmc-asrock-romed8hm3.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-asrock-romed8hm3.dts
@@ -100,12 +100,14 @@
lm25066@40 {
compatible = "lm25066";
reg = <0x40>;
+ shunt-resistor-micro-ohms = <1000>;
};
/* 12VSB PMIC */
lm25066@41 {
compatible = "lm25066";
reg = <0x41>;
+ shunt-resistor-micro-ohms = <10000>;
};
};
@@ -196,7 +198,7 @@
gpio-line-names =
/* A */ "LOCATORLED_STATUS_N", "BMC_MAC2_INTB", "NMI_BTN_N", "BMC_NMI",
"", "", "", "",
- /* B */ "DDR_MEM_TEMP", "", "", "", "", "", "", "",
+ /* B */ "POST_COMPLETE_N", "", "", "", "", "", "", "",
/* C */ "", "", "", "", "PCIE_HP_SEL_N", "PCIE_SATA_SEL_N", "LOCATORBTN", "",
/* D */ "BMC_PSIN", "BMC_PSOUT", "BMC_RESETCON", "RESETCON",
"", "", "", "PSU_FAN_FAIL_N",
diff --git a/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi b/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi
index e4775bbceecc..7cd4f075e325 100644
--- a/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi
+++ b/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi
@@ -117,9 +117,9 @@
groups = "FWSPID";
};
- pinctrl_fwqspid_default: fwqspid_default {
- function = "FWSPID";
- groups = "FWQSPID";
+ pinctrl_fwqspi_default: fwqspi_default {
+ function = "FWQSPI";
+ groups = "FWQSPI";
};
pinctrl_fwspiwp_default: fwspiwp_default {
@@ -653,12 +653,12 @@
};
pinctrl_qspi1_default: qspi1_default {
- function = "QSPI1";
+ function = "SPI1";
groups = "QSPI1";
};
pinctrl_qspi2_default: qspi2_default {
- function = "QSPI2";
+ function = "SPI2";
groups = "QSPI2";
};
diff --git a/arch/arm/boot/dts/aspeed-g6.dtsi b/arch/arm/boot/dts/aspeed-g6.dtsi
index 6aa1fd5c9359..3c1011678ce6 100644
--- a/arch/arm/boot/dts/aspeed-g6.dtsi
+++ b/arch/arm/boot/dts/aspeed-g6.dtsi
@@ -393,6 +393,16 @@
reg = <0x1e6f2000 0x1000>;
};
+ video: video@1e700000 {
+ compatible = "aspeed,ast2600-video-engine";
+ reg = <0x1e700000 0x1000>;
+ clocks = <&syscon ASPEED_CLK_GATE_VCLK>,
+ <&syscon ASPEED_CLK_GATE_ECLK>;
+ clock-names = "vclk", "eclk";
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
gpio0: gpio@1e780000 {
#gpio-cells = <2>;
gpio-controller;
diff --git a/arch/arm/boot/dts/imx6qdl-sr-som.dtsi b/arch/arm/boot/dts/imx6qdl-sr-som.dtsi
index f86efd0ccc40..ce543e325cd3 100644
--- a/arch/arm/boot/dts/imx6qdl-sr-som.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sr-som.dtsi
@@ -83,6 +83,16 @@
qca,clk-out-frequency = <125000000>;
qca,smarteee-tw-us-1g = <24>;
};
+
+ /*
+ * ADIN1300 (som rev 1.9 or later) is always at address 1. It
+ * will be enabled automatically by U-Boot if detected.
+ */
+ ethernet-phy@1 {
+ reg = <1>;
+ adi,phy-output-clock = "125mhz-free-running";
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 0c70eb688a00..2a0739a2350b 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -440,6 +440,9 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
+extern bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
+ unsigned long flags);
+#define arch_memremap_can_ram_remap arch_memremap_can_ram_remap
#endif
/*
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index aa08bcb72db9..290702328a33 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -493,3 +493,11 @@ void __init early_ioremap_init(void)
{
early_ioremap_setup();
}
+
+bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
+ unsigned long flags)
+{
+ unsigned long pfn = PHYS_PFN(offset);
+
+ return memblock_is_map_memory(pfn);
+}
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
index 1cee26479bfe..98c9a3265446 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
@@ -303,7 +303,7 @@
/* switch nodes are enabled by U-Boot if modules are present */
switch0@10 {
compatible = "marvell,mv88e6190";
- reg = <0x10 0>;
+ reg = <0x10>;
dsa,member = <0 0>;
interrupt-parent = <&moxtet>;
interrupts = <MOXTET_IRQ_PERIDOT(0)>;
@@ -428,7 +428,7 @@
switch0@2 {
compatible = "marvell,mv88e6085";
- reg = <0x2 0>;
+ reg = <0x2>;
dsa,member = <0 0>;
interrupt-parent = <&moxtet>;
interrupts = <MOXTET_IRQ_TOPAZ>;
@@ -495,7 +495,7 @@
switch1@11 {
compatible = "marvell,mv88e6190";
- reg = <0x11 0>;
+ reg = <0x11>;
dsa,member = <0 1>;
interrupt-parent = <&moxtet>;
interrupts = <MOXTET_IRQ_PERIDOT(1)>;
@@ -620,7 +620,7 @@
switch1@2 {
compatible = "marvell,mv88e6085";
- reg = <0x2 0>;
+ reg = <0x2>;
dsa,member = <0 1>;
interrupt-parent = <&moxtet>;
interrupts = <MOXTET_IRQ_TOPAZ>;
@@ -687,7 +687,7 @@
switch2@12 {
compatible = "marvell,mv88e6190";
- reg = <0x12 0>;
+ reg = <0x12>;
dsa,member = <0 2>;
interrupt-parent = <&moxtet>;
interrupts = <MOXTET_IRQ_PERIDOT(2)>;
@@ -803,7 +803,7 @@
switch2@2 {
compatible = "marvell,mv88e6085";
- reg = <0x2 0>;
+ reg = <0x2>;
dsa,member = <0 2>;
interrupt-parent = <&moxtet>;
interrupts = <MOXTET_IRQ_TOPAZ>;
diff --git a/arch/arm64/boot/dts/mediatek/mt7986a-rfb.dts b/arch/arm64/boot/dts/mediatek/mt7986a-rfb.dts
index 21e420829572..882277a52b69 100644
--- a/arch/arm64/boot/dts/mediatek/mt7986a-rfb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7986a-rfb.dts
@@ -25,6 +25,80 @@
};
};
+&eth {
+ status = "okay";
+
+ gmac0: mac@0 {
+ compatible = "mediatek,eth-mac";
+ reg = <0>;
+ phy-mode = "2500base-x";
+
+ fixed-link {
+ speed = <2500>;
+ full-duplex;
+ pause;
+ };
+ };
+
+ mdio: mdio-bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+};
+
+&mdio {
+ switch: switch@0 {
+ compatible = "mediatek,mt7531";
+ reg = <31>;
+ reset-gpios = <&pio 5 0>;
+ };
+};
+
+&switch {
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "lan0";
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan1";
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan2";
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "lan3";
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "lan4";
+ };
+
+ port@6 {
+ reg = <6>;
+ label = "cpu";
+ ethernet = <&gmac0>;
+ phy-mode = "2500base-x";
+
+ fixed-link {
+ speed = <2500>;
+ full-duplex;
+ pause;
+ };
+ };
+ };
+};
+
&uart0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
index 694acf8f5b70..d2636a0ed152 100644
--- a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
@@ -222,6 +222,45 @@
#reset-cells = <1>;
};
+ eth: ethernet@15100000 {
+ compatible = "mediatek,mt7986-eth";
+ reg = <0 0x15100000 0 0x80000>;
+ interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ethsys CLK_ETH_FE_EN>,
+ <&ethsys CLK_ETH_GP2_EN>,
+ <&ethsys CLK_ETH_GP1_EN>,
+ <&ethsys CLK_ETH_WOCPU1_EN>,
+ <&ethsys CLK_ETH_WOCPU0_EN>,
+ <&sgmiisys0 CLK_SGMII0_TX250M_EN>,
+ <&sgmiisys0 CLK_SGMII0_RX250M_EN>,
+ <&sgmiisys0 CLK_SGMII0_CDR_REF>,
+ <&sgmiisys0 CLK_SGMII0_CDR_FB>,
+ <&sgmiisys1 CLK_SGMII1_TX250M_EN>,
+ <&sgmiisys1 CLK_SGMII1_RX250M_EN>,
+ <&sgmiisys1 CLK_SGMII1_CDR_REF>,
+ <&sgmiisys1 CLK_SGMII1_CDR_FB>,
+ <&topckgen CLK_TOP_NETSYS_SEL>,
+ <&topckgen CLK_TOP_NETSYS_500M_SEL>;
+ clock-names = "fe", "gp2", "gp1", "wocpu1", "wocpu0",
+ "sgmii_tx250m", "sgmii_rx250m",
+ "sgmii_cdr_ref", "sgmii_cdr_fb",
+ "sgmii2_tx250m", "sgmii2_rx250m",
+ "sgmii2_cdr_ref", "sgmii2_cdr_fb",
+ "netsys0", "netsys1";
+ assigned-clocks = <&topckgen CLK_TOP_NETSYS_2X_SEL>,
+ <&topckgen CLK_TOP_SGM_325M_SEL>;
+ assigned-clock-parents = <&apmixedsys CLK_APMIXED_NET2PLL>,
+ <&apmixedsys CLK_APMIXED_SGMPLL>;
+ mediatek,ethsys = <&ethsys>;
+ mediatek,sgmiisys = <&sgmiisys0>, <&sgmiisys1>;
+ #reset-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt7986b-rfb.dts b/arch/arm64/boot/dts/mediatek/mt7986b-rfb.dts
index d73467ea3641..0f49d5764ff3 100644
--- a/arch/arm64/boot/dts/mediatek/mt7986b-rfb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7986b-rfb.dts
@@ -28,3 +28,73 @@
&uart0 {
status = "okay";
};
+
+&eth {
+ status = "okay";
+
+ gmac0: mac@0 {
+ compatible = "mediatek,eth-mac";
+ reg = <0>;
+ phy-mode = "2500base-x";
+
+ fixed-link {
+ speed = <2500>;
+ full-duplex;
+ pause;
+ };
+ };
+
+ mdio: mdio-bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ switch@0 {
+ compatible = "mediatek,mt7531";
+ reg = <31>;
+ reset-gpios = <&pio 5 0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "lan0";
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan1";
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan2";
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "lan3";
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "lan4";
+ };
+
+ port@6 {
+ reg = <6>;
+ label = "cpu";
+ ethernet = <&gmac0>;
+ phy-mode = "2500base-x";
+
+ fixed-link {
+ speed = <2500>;
+ full-duplex;
+ pause;
+ };
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
index fb99cc2827c7..7ab3627cc347 100644
--- a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
@@ -622,6 +622,10 @@
status = "okay";
};
+&rxmacro {
+ status = "okay";
+};
+
&slpi {
status = "okay";
firmware-name = "qcom/sm8250/slpi.mbn";
@@ -773,6 +777,8 @@
};
&swr1 {
+ status = "okay";
+
wcd_rx: wcd9380-rx@0,4 {
compatible = "sdw20217010d00";
reg = <0 4>;
@@ -781,6 +787,8 @@
};
&swr2 {
+ status = "okay";
+
wcd_tx: wcd9380-tx@0,3 {
compatible = "sdw20217010d00";
reg = <0 3>;
@@ -819,6 +827,10 @@
};
};
+&txmacro {
+ status = "okay";
+};
+
&uart12 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
index af8f22636436..1304b86af1a0 100644
--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
@@ -2255,6 +2255,7 @@
pinctrl-0 = <&rx_swr_active>;
compatible = "qcom,sm8250-lpass-rx-macro";
reg = <0 0x3200000 0 0x1000>;
+ status = "disabled";
clocks = <&q6afecc LPASS_CLK_ID_TX_CORE_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
<&q6afecc LPASS_CLK_ID_TX_CORE_NPL_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
@@ -2273,6 +2274,7 @@
swr1: soundwire-controller@3210000 {
reg = <0 0x3210000 0 0x2000>;
compatible = "qcom,soundwire-v1.5.1";
+ status = "disabled";
interrupts = <GIC_SPI 298 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&rxmacro>;
clock-names = "iface";
@@ -2300,6 +2302,7 @@
pinctrl-0 = <&tx_swr_active>;
compatible = "qcom,sm8250-lpass-tx-macro";
reg = <0 0x3220000 0 0x1000>;
+ status = "disabled";
clocks = <&q6afecc LPASS_CLK_ID_TX_CORE_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
<&q6afecc LPASS_CLK_ID_TX_CORE_NPL_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
@@ -2323,6 +2326,7 @@
compatible = "qcom,soundwire-v1.5.1";
interrupts-extended = <&intc GIC_SPI 297 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "core";
+ status = "disabled";
clocks = <&txmacro>;
clock-names = "iface";
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts b/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts
index a01886b467ed..067fe4a6b178 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts
@@ -16,6 +16,7 @@
aliases {
ethernet0 = &gmac0;
+ ethernet1 = &gmac1;
mmc0 = &sdmmc0;
mmc1 = &sdhci;
};
@@ -78,7 +79,6 @@
assigned-clocks = <&cru SCLK_GMAC0_RX_TX>, <&cru SCLK_GMAC0>;
assigned-clock-parents = <&cru SCLK_GMAC0_RGMII_SPEED>, <&cru CLK_MAC0_2TOP>;
clock_in_out = "input";
- phy-handle = <&rgmii_phy0>;
phy-mode = "rgmii";
pinctrl-names = "default";
pinctrl-0 = <&gmac0_miim
@@ -90,8 +90,38 @@
snps,reset-active-low;
/* Reset time is 20ms, 100ms for rtl8211f */
snps,reset-delays-us = <0 20000 100000>;
+ tx_delay = <0x4f>;
+ rx_delay = <0x0f>;
+ status = "okay";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ pause;
+ };
+};
+
+&gmac1 {
+ assigned-clocks = <&cru SCLK_GMAC1_RX_TX>, <&cru SCLK_GMAC1>;
+ assigned-clock-parents = <&cru SCLK_GMAC1_RGMII_SPEED>, <&cru CLK_MAC1_2TOP>;
+ clock_in_out = "output";
+ phy-handle = <&rgmii_phy1>;
+ phy-mode = "rgmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&gmac1m1_miim
+ &gmac1m1_tx_bus2
+ &gmac1m1_rx_bus2
+ &gmac1m1_rgmii_clk
+ &gmac1m1_rgmii_bus>;
+
+ snps,reset-gpio = <&gpio3 RK_PB0 GPIO_ACTIVE_LOW>;
+ snps,reset-active-low;
+ /* Reset time is 20ms, 100ms for rtl8211f */
+ snps,reset-delays-us = <0 20000 100000>;
+
tx_delay = <0x3c>;
rx_delay = <0x2f>;
+
status = "okay";
};
@@ -315,8 +345,8 @@
status = "disabled";
};
-&mdio0 {
- rgmii_phy0: ethernet-phy@0 {
+&mdio1 {
+ rgmii_phy1: ethernet-phy@0 {
compatible = "ethernet-phy-ieee802.3-c22";
reg = <0x0>;
};
@@ -345,9 +375,9 @@
pmuio2-supply = <&vcc3v3_pmu>;
vccio1-supply = <&vccio_acodec>;
vccio3-supply = <&vccio_sd>;
- vccio4-supply = <&vcc_1v8>;
+ vccio4-supply = <&vcc_3v3>;
vccio5-supply = <&vcc_3v3>;
- vccio6-supply = <&vcc_3v3>;
+ vccio6-supply = <&vcc_1v8>;
vccio7-supply = <&vcc_3v3>;
status = "okay";
};
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 7fd836bea7eb..3995652daf81 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -192,4 +192,8 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
+extern bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
+ unsigned long flags);
+#define arch_memremap_can_ram_remap arch_memremap_can_ram_remap
+
#endif /* __ASM_IO_H */
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 986837d7ec82..fa7981d0d917 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -75,6 +75,10 @@ obj-$(CONFIG_ARM64_MTE) += mte.o
obj-y += vdso-wrap.o
obj-$(CONFIG_COMPAT_VDSO) += vdso32-wrap.o
+# Force dependency (vdso*-wrap.S includes vdso.so through incbin)
+$(obj)/vdso-wrap.o: $(obj)/vdso/vdso.so
+$(obj)/vdso32-wrap.o: $(obj)/vdso32/vdso.so
+
obj-y += probes/
head-y := head.o
extra-y += $(head-y) vmlinux.lds
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 4c9b5b4b7a0b..a0f3d0aaa3c5 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -208,6 +208,8 @@ static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
#ifdef CONFIG_ARM64_ERRATUM_1286807
{
ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
+ /* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */
+ ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
},
#endif
{},
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index d72c4b4d389c..2cb9cc9e0eff 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -654,7 +654,6 @@ static const struct __ftr_reg_entry {
ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1,
&id_aa64isar1_override),
- ARM64_FTR_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2),
ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2,
&id_aa64isar2_override),
@@ -810,7 +809,7 @@ static void __init sort_ftr_regs(void)
* to sys_id for subsequent binary search in get_arm64_ftr_reg()
* to work correctly.
*/
- BUG_ON(arm64_ftr_regs[i].sys_id < arm64_ftr_regs[i - 1].sys_id);
+ BUG_ON(arm64_ftr_regs[i].sys_id <= arm64_ftr_regs[i - 1].sys_id);
}
}
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index 172452f79e46..ac1964ebed1e 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -52,9 +52,6 @@ GCOV_PROFILE := n
targets += vdso.lds
CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
-# Force dependency (incbin is bad)
-$(obj)/vdso.o : $(obj)/vdso.so
-
# Link rule for the .so file, .lds has to be first
$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
$(call if_changed,vdsold_and_vdso_check)
diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
index ed181bedbffc..05ba1aae1b6f 100644
--- a/arch/arm64/kernel/vdso32/Makefile
+++ b/arch/arm64/kernel/vdso32/Makefile
@@ -131,9 +131,6 @@ obj-vdso := $(c-obj-vdso) $(c-obj-vdso-gettimeofday) $(asm-obj-vdso)
targets += vdso.lds
CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
-# Force dependency (vdso.s includes vdso.so through incbin)
-$(obj)/vdso.o: $(obj)/vdso.so
-
include/generated/vdso32-offsets.h: $(obj)/vdso.so.dbg FORCE
$(call if_changed,vdsosym)
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
index b7c81dacabf0..b21f91cd830d 100644
--- a/arch/arm64/mm/ioremap.c
+++ b/arch/arm64/mm/ioremap.c
@@ -99,3 +99,11 @@ void __init early_ioremap_init(void)
{
early_ioremap_setup();
}
+
+bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
+ unsigned long flags)
+{
+ unsigned long pfn = PHYS_PFN(offset);
+
+ return pfn_is_map_memory(pfn);
+}
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
index 0cb4d9aa14d1..4194e79b435c 100644
--- a/arch/mips/configs/mtx1_defconfig
+++ b/arch/mips/configs/mtx1_defconfig
@@ -277,7 +277,6 @@ CONFIG_CHELSIO_T1=m
CONFIG_NET_TULIP=y
CONFIG_DE2104X=m
CONFIG_TULIP=m
-CONFIG_DE4X5=m
CONFIG_WINBOND_840=m
CONFIG_DM9102=m
CONFIG_ULI526X=m
diff --git a/arch/powerpc/configs/chrp32_defconfig b/arch/powerpc/configs/chrp32_defconfig
index a4a805b87469..fb314f75ad4b 100644
--- a/arch/powerpc/configs/chrp32_defconfig
+++ b/arch/powerpc/configs/chrp32_defconfig
@@ -53,7 +53,6 @@ CONFIG_ATA_GENERIC=y
CONFIG_NETDEVICES=y
CONFIG_PCNET32=y
CONFIG_NET_TULIP=y
-CONFIG_DE4X5=y
CONFIG_MV643XX_ETH=y
CONFIG_8139CP=y
CONFIG_8139TOO=y
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index bb549cb1c3e3..b622ecd73286 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -444,7 +444,6 @@ CONFIG_NET_TULIP=y
CONFIG_DE2104X=m
CONFIG_TULIP=m
CONFIG_TULIP_MMIO=y
-CONFIG_DE4X5=m
CONFIG_WINBOND_840=m
CONFIG_DM9102=m
CONFIG_ULI526X=m
diff --git a/arch/powerpc/kvm/book3s_32_sr.S b/arch/powerpc/kvm/book3s_32_sr.S
index e3ab9df6cf19..6cfcd20d4668 100644
--- a/arch/powerpc/kvm/book3s_32_sr.S
+++ b/arch/powerpc/kvm/book3s_32_sr.S
@@ -122,11 +122,27 @@
/* 0x0 - 0xb */
- /* 'current->mm' needs to be in r4 */
- tophys(r4, r2)
- lwz r4, MM(r4)
- tophys(r4, r4)
- /* This only clobbers r0, r3, r4 and r5 */
+ /* switch_mmu_context() needs paging, let's enable it */
+ mfmsr r9
+ ori r11, r9, MSR_DR
+ mtmsr r11
+ sync
+
+ /* switch_mmu_context() clobbers r12, rescue it */
+ SAVE_GPR(12, r1)
+
+ /* Calling switch_mmu_context(<inv>, current->mm, <inv>); */
+ lwz r4, MM(r2)
bl switch_mmu_context
+ /* restore r12 */
+ REST_GPR(12, r1)
+
+ /* Disable paging again */
+ mfmsr r9
+ li r6, MSR_DR
+ andc r9, r9, r6
+ mtmsr r9
+ sync
+
.endm
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 96d34ebb20a9..e2942335d143 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -902,6 +902,8 @@ static void __meminit vmemmap_use_sub_pmd(unsigned long start, unsigned long end
static void __meminit vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
{
+ const unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
+
vmemmap_flush_unused_pmd();
/*
@@ -914,8 +916,7 @@ static void __meminit vmemmap_use_new_sub_pmd(unsigned long start, unsigned long
* Mark with PAGE_UNUSED the unused parts of the new memmap range
*/
if (!IS_ALIGNED(start, PMD_SIZE))
- memset((void *)start, PAGE_UNUSED,
- start - ALIGN_DOWN(start, PMD_SIZE));
+ memset((void *)page, PAGE_UNUSED, start - page);
/*
* We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index 3ed5eaf3446a..6ed602b2f80a 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -742,6 +742,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
if (at_head) {
list_add(&rq->queuelist, &per_prio->dispatch);
+ rq->fifo_time = jiffies;
} else {
deadline_add_rq_rb(per_prio, rq);
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index 94d1789a233e..406a907a4cae 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -735,6 +735,8 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
size_t offset, u32 opt_flags)
{
struct firmware *fw = NULL;
+ struct cred *kern_cred = NULL;
+ const struct cred *old_cred;
bool nondirect = false;
int ret;
@@ -751,6 +753,18 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
if (ret <= 0) /* error or already assigned */
goto out;
+ /*
+ * We are about to try to access the firmware file. Because we may have been
+ * called by a driver when serving an unrelated request from userland, we use
+ * the kernel credentials to read the file.
+ */
+ kern_cred = prepare_kernel_cred(NULL);
+ if (!kern_cred) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ old_cred = override_creds(kern_cred);
+
ret = fw_get_filesystem_firmware(device, fw->priv, "", NULL);
/* Only full reads can support decompression, platform, and sysfs. */
@@ -776,6 +790,9 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
} else
ret = assign_fw(fw, device);
+ revert_creds(old_cred);
+ put_cred(kern_cred);
+
out:
if (ret < 0) {
fw_abort_batch_reqs(fw);
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index 1e74ec1c7f23..fac8ff983aec 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -11,6 +11,8 @@
#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/export.h>
+#include <linux/property.h>
+
#include <linux/bcma/bcma.h>
#include "bcma_private.h"
@@ -182,9 +184,8 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
chip->direction_input = bcma_gpio_direction_input;
chip->direction_output = bcma_gpio_direction_output;
chip->parent = bus->dev;
-#if IS_BUILTIN(CONFIG_OF)
- chip->of_node = cc->core->dev.of_node;
-#endif
+ chip->fwnode = dev_fwnode(&cc->core->dev);
+
switch (bus->chipinfo.id) {
case BCMA_CHIP_ID_BCM4707:
case BCMA_CHIP_ID_BCM5357:
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index df23239b04fc..b1e25ae98302 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -543,10 +543,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
file->f_mode |= FMODE_LSEEK;
dmabuf->file = file;
- ret = dma_buf_stats_setup(dmabuf);
- if (ret)
- goto err_sysfs;
-
mutex_init(&dmabuf->lock);
INIT_LIST_HEAD(&dmabuf->attachments);
@@ -554,6 +550,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
list_add(&dmabuf->list_node, &db_list.head);
mutex_unlock(&db_list.lock);
+ ret = dma_buf_stats_setup(dmabuf);
+ if (ret)
+ goto err_sysfs;
+
return dmabuf;
err_sysfs:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 8f0e6d93bb9c..c317078d1afd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -296,6 +296,7 @@ static int amdgpu_ctx_set_stable_pstate(struct amdgpu_ctx *ctx,
{
struct amdgpu_device *adev = ctx->adev;
enum amd_dpm_forced_level level;
+ u32 current_stable_pstate;
int r;
mutex_lock(&adev->pm.stable_pstate_ctx_lock);
@@ -304,6 +305,10 @@ static int amdgpu_ctx_set_stable_pstate(struct amdgpu_ctx *ctx,
goto done;
}
+ r = amdgpu_ctx_get_stable_pstate(ctx, &current_stable_pstate);
+ if (r || (stable_pstate == current_stable_pstate))
+ goto done;
+
switch (stable_pstate) {
case AMDGPU_CTX_STABLE_PSTATE_NONE:
level = AMD_DPM_FORCED_LEVEL_AUTO;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 039b90cdc3bc..45f0188c4273 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -81,6 +81,10 @@
#include "mxgpu_vi.h"
#include "amdgpu_dm.h"
+#if IS_ENABLED(CONFIG_X86)
+#include <asm/intel-family.h>
+#endif
+
#define ixPCIE_LC_L1_PM_SUBSTATE 0x100100C6
#define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK 0x00000001L
#define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK 0x00000002L
@@ -1134,13 +1138,24 @@ static void vi_enable_aspm(struct amdgpu_device *adev)
WREG32_PCIE(ixPCIE_LC_CNTL, data);
}
+static bool aspm_support_quirk_check(void)
+{
+#if IS_ENABLED(CONFIG_X86)
+ struct cpuinfo_x86 *c = &cpu_data(0);
+
+ return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE);
+#else
+ return true;
+#endif
+}
+
static void vi_program_aspm(struct amdgpu_device *adev)
{
u32 data, data1, orig;
bool bL1SS = false;
bool bClkReqSupport = true;
- if (!amdgpu_device_should_use_aspm(adev))
+ if (!amdgpu_device_should_use_aspm(adev) || !aspm_support_quirk_check())
return;
if (adev->flags & AMD_IS_APU ||
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
index d7559e5a99ce..e708f07fe75a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
@@ -153,9 +153,4 @@ void dcn31_hw_sequencer_construct(struct dc *dc)
dc->hwss.init_hw = dcn20_fpga_init_hw;
dc->hwseq->funcs.init_pipes = NULL;
}
- if (dc->debug.disable_z10) {
- /*hw not support z10 or sw disable it*/
- dc->hwss.z10_restore = NULL;
- dc->hwss.z10_save_init = NULL;
- }
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index f1544755d8b4..f10a0256413e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1351,14 +1351,8 @@ static int smu_disable_dpms(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0;
- /*
- * TODO: (adev->in_suspend && !adev->in_s0ix) is added to pair
- * the workaround which always reset the asic in suspend.
- * It's likely that workaround will be dropped in the future.
- * Then the change here should be dropped together.
- */
bool use_baco = !smu->is_apu &&
- (((amdgpu_in_reset(adev) || (adev->in_suspend && !adev->in_s0ix)) &&
+ ((amdgpu_in_reset(adev) &&
(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 94fcdb7bd21d..eeaa8d0d0407 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -1605,17 +1605,17 @@ void i915_vma_close(struct i915_vma *vma)
static void __i915_vma_remove_closed(struct i915_vma *vma)
{
- struct intel_gt *gt = vma->vm->gt;
-
- spin_lock_irq(&gt->closed_lock);
list_del_init(&vma->closed_link);
- spin_unlock_irq(&gt->closed_lock);
}
void i915_vma_reopen(struct i915_vma *vma)
{
+ struct intel_gt *gt = vma->vm->gt;
+
+ spin_lock_irq(&gt->closed_lock);
if (i915_vma_is_closed(vma))
__i915_vma_remove_closed(vma);
+ spin_unlock_irq(&gt->closed_lock);
}
void i915_vma_release(struct kref *ref)
@@ -1641,6 +1641,7 @@ static void force_unbind(struct i915_vma *vma)
static void release_references(struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
+ struct intel_gt *gt = vma->vm->gt;
GEM_BUG_ON(i915_vma_is_active(vma));
@@ -1650,7 +1651,9 @@ static void release_references(struct i915_vma *vma)
rb_erase(&vma->obj_node, &obj->vma.tree);
spin_unlock(&obj->vma.lock);
+ spin_lock_irq(&gt->closed_lock);
__i915_vma_remove_closed(vma);
+ spin_unlock_irq(&gt->closed_lock);
__i915_vma_put(vma);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index daf9f87477ba..a2141d3d9b1d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -46,8 +46,9 @@ static bool
nouveau_get_backlight_name(char backlight_name[BL_NAME_SIZE],
struct nouveau_backlight *bl)
{
- const int nb = ida_simple_get(&bl_ida, 0, 0, GFP_KERNEL);
- if (nb < 0 || nb >= 100)
+ const int nb = ida_alloc_max(&bl_ida, 99, GFP_KERNEL);
+
+ if (nb < 0)
return false;
if (nb > 0)
snprintf(backlight_name, BL_NAME_SIZE, "nv_backlight%d", nb);
@@ -414,7 +415,7 @@ nouveau_backlight_init(struct drm_connector *connector)
nv_encoder, ops, &props);
if (IS_ERR(bl->dev)) {
if (bl->id >= 0)
- ida_simple_remove(&bl_ida, bl->id);
+ ida_free(&bl_ida, bl->id);
ret = PTR_ERR(bl->dev);
goto fail_alloc;
}
@@ -442,7 +443,7 @@ nouveau_backlight_fini(struct drm_connector *connector)
return;
if (bl->id >= 0)
- ida_simple_remove(&bl_ida, bl->id);
+ ida_free(&bl_ida, bl->id);
backlight_device_unregister(bl->dev);
nv_conn->backlight = NULL;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 992cc285f2fe..2ed528c065fa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -123,7 +123,7 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
mutex_init(&tdev->iommu.mutex);
- if (iommu_present(&platform_bus_type)) {
+ if (device_iommu_mapped(dev)) {
tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type);
if (!tdev->iommu.domain)
goto error;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 6c58b0fd13fb..98b78ec6b37d 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -38,6 +38,7 @@
#include <drm/drm_scdc_helper.h>
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/of_address.h>
#include <linux/of_gpio.h>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
index a3bfbb6c3e14..162dfeb1cc5a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
@@ -528,7 +528,7 @@ int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
*seqno = atomic_add_return(1, &dev_priv->marker_seq);
} while (*seqno == 0);
- if (!(vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_FENCE)) {
+ if (!vmw_has_fences(dev_priv)) {
/*
* Don't request hardware to send a fence. The
@@ -675,11 +675,14 @@ int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv,
*/
bool vmw_cmd_supported(struct vmw_private *vmw)
{
- if ((vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS |
- SVGA_CAP_CMD_BUFFERS_2)) != 0)
- return true;
+ bool has_cmdbufs =
+ (vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS |
+ SVGA_CAP_CMD_BUFFERS_2)) != 0;
+ if (vmw_is_svga_v3(vmw))
+ return (has_cmdbufs &&
+ (vmw->capabilities & SVGA_CAP_GBOBJECTS) != 0);
/*
* We have FIFO cmd's
*/
- return vmw->fifo_mem != NULL;
+ return has_cmdbufs || vmw->fifo_mem != NULL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index ea3ecdda561d..6de0b9ef5c77 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1679,4 +1679,12 @@ static inline void vmw_irq_status_write(struct vmw_private *vmw,
outl(status, vmw->io_start + SVGA_IRQSTATUS_PORT);
}
+static inline bool vmw_has_fences(struct vmw_private *vmw)
+{
+ if ((vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS |
+ SVGA_CAP_CMD_BUFFERS_2)) != 0)
+ return true;
+ return (vmw_fifo_caps(vmw) & SVGA_FIFO_CAP_FENCE) != 0;
+}
+
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 8ee34576c7d0..adf17c740656 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -483,7 +483,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par,
static int vmw_fb_kms_framebuffer(struct fb_info *info)
{
- struct drm_mode_fb_cmd2 mode_cmd;
+ struct drm_mode_fb_cmd2 mode_cmd = {0};
struct vmw_fb_par *par = info->par;
struct fb_var_screeninfo *var = &info->var;
struct drm_framebuffer *cur_fb;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 59d6a2dd4c2e..66cc35dc223e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -82,6 +82,22 @@ fman_from_fence(struct vmw_fence_obj *fence)
return container_of(fence->base.lock, struct vmw_fence_manager, lock);
}
+static u32 vmw_fence_goal_read(struct vmw_private *vmw)
+{
+ if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
+ return vmw_read(vmw, SVGA_REG_FENCE_GOAL);
+ else
+ return vmw_fifo_mem_read(vmw, SVGA_FIFO_FENCE_GOAL);
+}
+
+static void vmw_fence_goal_write(struct vmw_private *vmw, u32 value)
+{
+ if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
+ vmw_write(vmw, SVGA_REG_FENCE_GOAL, value);
+ else
+ vmw_fifo_mem_write(vmw, SVGA_FIFO_FENCE_GOAL, value);
+}
+
/*
* Note on fencing subsystem usage of irqs:
* Typically the vmw_fences_update function is called
@@ -392,7 +408,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
if (likely(!fman->seqno_valid))
return false;
- goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL);
+ goal_seqno = vmw_fence_goal_read(fman->dev_priv);
if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
return false;
@@ -400,9 +416,8 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
list_for_each_entry(fence, &fman->fence_list, head) {
if (!list_empty(&fence->seq_passed_actions)) {
fman->seqno_valid = true;
- vmw_fifo_mem_write(fman->dev_priv,
- SVGA_FIFO_FENCE_GOAL,
- fence->base.seqno);
+ vmw_fence_goal_write(fman->dev_priv,
+ fence->base.seqno);
break;
}
}
@@ -434,13 +449,12 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
if (dma_fence_is_signaled_locked(&fence->base))
return false;
- goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL);
+ goal_seqno = vmw_fence_goal_read(fman->dev_priv);
if (likely(fman->seqno_valid &&
goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
return false;
- vmw_fifo_mem_write(fman->dev_priv, SVGA_FIFO_FENCE_GOAL,
- fence->base.seqno);
+ vmw_fence_goal_write(fman->dev_priv, fence->base.seqno);
fman->seqno_valid = true;
return true;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index c5191de365ca..fe4732bf2c9d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -32,6 +32,14 @@
#define VMW_FENCE_WRAP (1 << 24)
+static u32 vmw_irqflag_fence_goal(struct vmw_private *vmw)
+{
+ if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
+ return SVGA_IRQFLAG_REG_FENCE_GOAL;
+ else
+ return SVGA_IRQFLAG_FENCE_GOAL;
+}
+
/**
* vmw_thread_fn - Deferred (process context) irq handler
*
@@ -96,7 +104,7 @@ static irqreturn_t vmw_irq_handler(int irq, void *arg)
wake_up_all(&dev_priv->fifo_queue);
if ((masked_status & (SVGA_IRQFLAG_ANY_FENCE |
- SVGA_IRQFLAG_FENCE_GOAL)) &&
+ vmw_irqflag_fence_goal(dev_priv))) &&
!test_and_set_bit(VMW_IRQTHREAD_FENCE, dev_priv->irqthread_pending))
ret = IRQ_WAKE_THREAD;
@@ -137,8 +145,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
return true;
- if (!(vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_FENCE) &&
- vmw_fifo_idle(dev_priv, seqno))
+ if (!vmw_has_fences(dev_priv) && vmw_fifo_idle(dev_priv, seqno))
return true;
/**
@@ -160,6 +167,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
unsigned long timeout)
{
struct vmw_fifo_state *fifo_state = dev_priv->fifo;
+ bool fifo_down = false;
uint32_t count = 0;
uint32_t signal_seq;
@@ -176,12 +184,14 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
*/
if (fifo_idle) {
- down_read(&fifo_state->rwsem);
if (dev_priv->cman) {
ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible,
10*HZ);
if (ret)
goto out_err;
+ } else if (fifo_state) {
+ down_read(&fifo_state->rwsem);
+ fifo_down = true;
}
}
@@ -218,12 +228,12 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
}
}
finish_wait(&dev_priv->fence_queue, &__wait);
- if (ret == 0 && fifo_idle)
+ if (ret == 0 && fifo_idle && fifo_state)
vmw_fence_write(dev_priv, signal_seq);
wake_up_all(&dev_priv->fence_queue);
out_err:
- if (fifo_idle)
+ if (fifo_down)
up_read(&fifo_state->rwsem);
return ret;
@@ -266,13 +276,13 @@ void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
void vmw_goal_waiter_add(struct vmw_private *dev_priv)
{
- vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FENCE_GOAL,
+ vmw_generic_waiter_add(dev_priv, vmw_irqflag_fence_goal(dev_priv),
&dev_priv->goal_queue_waiters);
}
void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
{
- vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FENCE_GOAL,
+ vmw_generic_waiter_remove(dev_priv, vmw_irqflag_fence_goal(dev_priv),
&dev_priv->goal_queue_waiters);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index bbd2f4ec08ec..93431e8f6606 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1344,7 +1344,6 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
mode_cmd,
is_bo_proxy);
-
/*
* vmw_create_bo_proxy() adds a reference that is no longer
* needed
@@ -1385,13 +1384,16 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
ret = vmw_user_lookup_handle(dev_priv, file_priv,
mode_cmd->handles[0],
&surface, &bo);
- if (ret)
+ if (ret) {
+ DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
+ mode_cmd->handles[0], mode_cmd->handles[0]);
goto err_out;
+ }
if (!bo &&
!vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
- DRM_ERROR("Surface size cannot exceed %dx%d",
+ DRM_ERROR("Surface size cannot exceed %dx%d\n",
dev_priv->texture_max_width,
dev_priv->texture_max_height);
goto err_out;
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 68a8a27ab3b7..f2b038fa3b84 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -960,7 +960,7 @@ config SENSORS_LTC4261
config SENSORS_LTQ_CPUTEMP
bool "Lantiq cpu temperature sensor driver"
- depends on LANTIQ
+ depends on SOC_XWAY
help
If you say yes here you get support for the temperature
sensor inside your CPU.
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index b86d9df7105d..52c9e7d3f2ae 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -708,10 +708,21 @@ static int tmp401_probe(struct i2c_client *client)
return 0;
}
+static const struct of_device_id __maybe_unused tmp4xx_of_match[] = {
+ { .compatible = "ti,tmp401", },
+ { .compatible = "ti,tmp411", },
+ { .compatible = "ti,tmp431", },
+ { .compatible = "ti,tmp432", },
+ { .compatible = "ti,tmp435", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tmp4xx_of_match);
+
static struct i2c_driver tmp401_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "tmp401",
+ .of_match_table = of_match_ptr(tmp4xx_of_match),
},
.probe_new = tmp401_probe,
.id_table = tmp401_id,
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 9050ca1f4285..808f6e7a8048 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -1087,9 +1087,15 @@ static int of_count_icc_providers(struct device_node *np)
{
struct device_node *child;
int count = 0;
+ const struct of_device_id __maybe_unused ignore_list[] = {
+ { .compatible = "qcom,sc7180-ipa-virt" },
+ { .compatible = "qcom,sdx55-ipa-virt" },
+ {}
+ };
for_each_available_child_of_node(np, child) {
- if (of_property_read_bool(child, "#interconnect-cells"))
+ if (of_property_read_bool(child, "#interconnect-cells") &&
+ likely(!of_match_node(ignore_list, child)))
count++;
count += of_count_icc_providers(child);
}
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index ac760fd39282..b2dcc1e5a388 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -71,23 +71,6 @@ config CAN_CALC_BITTIMING
arguments "tq", "prop_seg", "phase_seg1", "phase_seg2" and "sjw".
If unsure, say Y.
-config CAN_LEDS
- bool "Enable LED triggers for Netlink based drivers"
- depends on LEDS_CLASS
- # The netdev trigger (LEDS_TRIGGER_NETDEV) should be able to do
- # everything that this driver is doing. This is marked as broken
- # because it uses stuff that is intended to be changed or removed.
- # Please consider switching to the netdev trigger and confirm it
- # fulfills your needs instead of fixing this driver.
- depends on BROKEN
- select LEDS_TRIGGERS
- help
- This option adds two LED triggers for packet receive and transmit
- events on each supported CAN device.
-
- Say Y here if you are working on a system with led-class supported
- LEDs and you want to use them as canbus activity indicators.
-
config CAN_AT91
tristate "Atmel AT91 onchip CAN controller"
depends on (ARCH_AT91 || COMPILE_TEST) && HAS_IOMEM
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index a00655ccda02..29ed0d3cd171 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -23,7 +23,6 @@
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
#define AT91_MB_MASK(i) ((1 << (i)) - 1)
@@ -618,8 +617,6 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb)
stats->rx_bytes += cf->len;
netif_receive_skb(skb);
-
- can_led_event(dev, CAN_LED_EVENT_RX);
}
/**
@@ -854,7 +851,6 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
mb - get_mb_tx_first(priv),
NULL);
dev->stats.tx_packets++;
- can_led_event(dev, CAN_LED_EVENT_TX);
}
}
@@ -1101,8 +1097,6 @@ static int at91_open(struct net_device *dev)
goto out_close;
}
- can_led_event(dev, CAN_LED_EVENT_OPEN);
-
/* start chip and queuing */
at91_chip_start(dev);
napi_enable(&priv->napi);
@@ -1133,8 +1127,6 @@ static int at91_close(struct net_device *dev)
close_candev(dev);
- can_led_event(dev, CAN_LED_EVENT_STOP);
-
return 0;
}
@@ -1317,7 +1309,7 @@ static int at91_can_probe(struct platform_device *pdev)
priv->pdata = dev_get_platdata(&pdev->dev);
priv->mb0_id = 0x7ff;
- netif_napi_add(dev, &priv->napi, at91_poll, get_mb_rx_num(priv));
+ netif_napi_add_weight(dev, &priv->napi, at91_poll, get_mb_rx_num(priv));
if (at91_is_sam9263(priv))
dev->sysfs_groups[0] = &at91_sysfs_attr_group;
@@ -1331,8 +1323,6 @@ static int at91_can_probe(struct platform_device *pdev)
goto exit_free;
}
- devm_can_led_init(dev);
-
dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n",
priv->reg_base, dev->irq);
diff --git a/drivers/net/can/c_can/c_can_main.c b/drivers/net/can/c_can/c_can_main.c
index faa217f26771..a7362af0babb 100644
--- a/drivers/net/can/c_can/c_can_main.c
+++ b/drivers/net/can/c_can/c_can_main.c
@@ -40,7 +40,6 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
#include "c_can.h"
@@ -759,7 +758,6 @@ static void c_can_do_tx(struct net_device *dev)
stats->tx_bytes += bytes;
stats->tx_packets += pkts;
- can_led_event(dev, CAN_LED_EVENT_TX);
tail = c_can_get_tx_tail(tx_ring);
@@ -906,9 +904,6 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
quota -= n;
}
- if (pkts)
- can_led_event(dev, CAN_LED_EVENT_RX);
-
return pkts;
}
@@ -1182,8 +1177,6 @@ static int c_can_open(struct net_device *dev)
if (err)
goto exit_start_fail;
- can_led_event(dev, CAN_LED_EVENT_OPEN);
-
napi_enable(&priv->napi);
/* enable status change, error and module interrupts */
c_can_irq_control(priv, true);
@@ -1214,8 +1207,6 @@ static int c_can_close(struct net_device *dev)
c_can_reset_ram(priv, false);
c_can_pm_runtime_put_sync(priv);
- can_led_event(dev, CAN_LED_EVENT_STOP);
-
return 0;
}
@@ -1246,7 +1237,8 @@ struct net_device *alloc_c_can_dev(int msg_obj_num)
priv->tx.tail = 0;
priv->tx.obj_num = msg_obj_tx_num;
- netif_napi_add(dev, &priv->napi, c_can_poll, priv->msg_obj_rx_num);
+ netif_napi_add_weight(dev, &priv->napi, c_can_poll,
+ priv->msg_obj_rx_num);
priv->dev = dev;
priv->can.bittiming_const = &c_can_bittiming_const;
@@ -1364,8 +1356,6 @@ static const struct net_device_ops c_can_netdev_ops = {
int register_c_can_dev(struct net_device *dev)
{
- int err;
-
/* Deactivate pins to prevent DRA7 DCAN IP from being
* stuck in transition when module is disabled.
* Pins are activated in c_can_start() and deactivated
@@ -1377,10 +1367,7 @@ int register_c_can_dev(struct net_device *dev)
dev->netdev_ops = &c_can_netdev_ops;
c_can_set_ethtool_ops(dev);
- err = register_candev(dev);
- if (!err)
- devm_can_led_init(dev);
- return err;
+ return register_candev(dev);
}
EXPORT_SYMBOL_GPL(register_c_can_dev);
diff --git a/drivers/net/can/ctucanfd/Kconfig b/drivers/net/can/ctucanfd/Kconfig
index 48963efc7f19..3c383612eb17 100644
--- a/drivers/net/can/ctucanfd/Kconfig
+++ b/drivers/net/can/ctucanfd/Kconfig
@@ -1,5 +1,5 @@
config CAN_CTUCANFD
- tristate "CTU CAN-FD IP core"
+ tristate "CTU CAN-FD IP core" if COMPILE_TEST
help
This driver adds support for the CTU CAN FD open-source IP core.
More documentation and core sources at project page
@@ -13,8 +13,8 @@ config CAN_CTUCANFD
config CAN_CTUCANFD_PCI
tristate "CTU CAN-FD IP core PCI/PCIe driver"
- depends on CAN_CTUCANFD
depends on PCI
+ select CAN_CTUCANFD
help
This driver adds PCI/PCIe support for CTU CAN-FD IP core.
The project providing FPGA design for Intel EP4CGX15 based DB4CGX15
@@ -23,8 +23,8 @@ config CAN_CTUCANFD_PCI
config CAN_CTUCANFD_PLATFORM
tristate "CTU CAN-FD IP core platform (FPGA, SoC) driver"
- depends on CAN_CTUCANFD
depends on OF || COMPILE_TEST
+ select CAN_CTUCANFD
help
The core has been tested together with OpenCores SJA1000
modified to be CAN FD frames tolerant on MicroZed Zynq based
diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c
index 2ada097d1ede..64990bf20fdc 100644
--- a/drivers/net/can/ctucanfd/ctucanfd_base.c
+++ b/drivers/net/can/ctucanfd/ctucanfd_base.c
@@ -29,7 +29,6 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
#include <linux/pm_runtime.h>
#include "ctucanfd.h"
@@ -957,9 +956,6 @@ static int ctucan_rx_poll(struct napi_struct *napi, int quota)
ctucan_write32(priv, CTUCANFD_COMMAND, REG_COMMAND_CDO);
}
- if (work_done)
- can_led_event(ndev, CAN_LED_EVENT_RX);
-
if (!framecnt && res != 0) {
if (napi_complete_done(napi, work_done)) {
/* Clear and enable RBNEI. It is level-triggered, so
@@ -1079,8 +1075,6 @@ clear:
}
} while (some_buffers_processed);
- can_led_event(ndev, CAN_LED_EVENT_TX);
-
spin_lock_irqsave(&priv->tx_lock, flags);
/* Check if at least one TX buffer is free */
@@ -1236,7 +1230,6 @@ static int ctucan_open(struct net_device *ndev)
}
netdev_info(ndev, "ctu_can_fd device registered\n");
- can_led_event(ndev, CAN_LED_EVENT_OPEN);
napi_enable(&priv->napi);
netif_start_queue(ndev);
@@ -1269,7 +1262,6 @@ static int ctucan_close(struct net_device *ndev)
free_irq(ndev->irq, ndev);
close_candev(ndev);
- can_led_event(ndev, CAN_LED_EVENT_STOP);
pm_runtime_put(priv->dev);
return 0;
@@ -1434,8 +1426,6 @@ int ctucan_probe_common(struct device *dev, void __iomem *addr, int irq, unsigne
goto err_deviceoff;
}
- devm_can_led_init(ndev);
-
pm_runtime_put(dev);
netdev_dbg(ndev, "mem_base=0x%p irq=%d clock=%d, no. of txt buffers:%d\n",
diff --git a/drivers/net/can/dev/Makefile b/drivers/net/can/dev/Makefile
index 3e2e207861fc..af2901db473c 100644
--- a/drivers/net/can/dev/Makefile
+++ b/drivers/net/can/dev/Makefile
@@ -7,5 +7,3 @@ can-dev-y += length.o
can-dev-y += netlink.o
can-dev-y += rx-offload.o
can-dev-y += skb.o
-
-can-dev-$(CONFIG_CAN_LEDS) += led.o
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index e7ab45f1c43b..96c9d9db00cf 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -14,7 +14,6 @@
#include <linux/can/can-ml.h>
#include <linux/can/dev.h>
#include <linux/can/skb.h>
-#include <linux/can/led.h>
#include <linux/gpio/consumer.h>
#include <linux/of.h>
@@ -512,8 +511,6 @@ static __init int can_dev_init(void)
{
int err;
- can_led_notifier_init();
-
err = can_netlink_register();
if (!err)
pr_info(MOD_DESC "\n");
@@ -525,8 +522,6 @@ module_init(can_dev_init);
static __exit void can_dev_exit(void)
{
can_netlink_unregister();
-
- can_led_notifier_exit();
}
module_exit(can_dev_exit);
diff --git a/drivers/net/can/dev/rx-offload.c b/drivers/net/can/dev/rx-offload.c
index 6d0dc18c03e7..a32a01c172d4 100644
--- a/drivers/net/can/dev/rx-offload.c
+++ b/drivers/net/can/dev/rx-offload.c
@@ -70,8 +70,6 @@ static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
napi_reschedule(&offload->napi);
}
- can_led_event(offload->dev, CAN_LED_EVENT_RX);
-
return work_done;
}
@@ -337,7 +335,8 @@ static int can_rx_offload_init_queue(struct net_device *dev,
skb_queue_head_init(&offload->skb_queue);
__skb_queue_head_init(&offload->skb_irq_queue);
- netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight);
+ netif_napi_add_weight(dev, &offload->napi, can_rx_offload_napi_poll,
+ weight);
dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n",
__func__, offload->skb_queue_len_max);
diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c
index fe9bda0f5ec4..d060088047f1 100644
--- a/drivers/net/can/flexcan/flexcan-core.c
+++ b/drivers/net/can/flexcan/flexcan-core.c
@@ -14,7 +14,6 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/firmware/imx/sci.h>
@@ -1081,7 +1080,6 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
can_rx_offload_get_echo_skb(&priv->offload, 0,
reg_ctrl << 16, NULL);
stats->tx_packets++;
- can_led_event(dev, CAN_LED_EVENT_TX);
/* after sending a RTR frame MB is in RX mode */
priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
@@ -1738,8 +1736,6 @@ static int flexcan_open(struct net_device *dev)
flexcan_chip_interrupts_enable(dev);
- can_led_event(dev, CAN_LED_EVENT_OPEN);
-
netif_start_queue(dev);
return 0;
@@ -1785,8 +1781,6 @@ static int flexcan_close(struct net_device *dev)
pm_runtime_put(priv->dev);
- can_led_event(dev, CAN_LED_EVENT_STOP);
-
return 0;
}
@@ -2189,7 +2183,6 @@ static int flexcan_probe(struct platform_device *pdev)
}
of_can_transceiver(dev);
- devm_can_led_init(dev);
return 0;
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index 5215bd9b2c80..76df4807d366 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -1609,7 +1609,7 @@ static int grcan_setup_netdev(struct platform_device *ofdev,
timer_setup(&priv->hang_timer, grcan_initiate_running_reset, 0);
}
- netif_napi_add(dev, &priv->napi, grcan_poll, GRCAN_NAPI_WEIGHT);
+ netif_napi_add_weight(dev, &priv->napi, grcan_poll, GRCAN_NAPI_WEIGHT);
SET_NETDEV_DEV(dev, &ofdev->dev);
dev_info(&ofdev->dev, "regs=0x%p, irq=%d, clock=%d\n",
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index b0a3473f211d..968ed6d7316b 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -345,9 +345,6 @@ static int ifi_canfd_do_rx_poll(struct net_device *ndev, int quota)
rxst = readl(priv->base + IFI_CANFD_RXSTCMD);
}
- if (pkts)
- can_led_event(ndev, CAN_LED_EVENT_RX);
-
return pkts;
}
@@ -626,7 +623,6 @@ static irqreturn_t ifi_canfd_isr(int irq, void *dev_id)
if (isr & IFI_CANFD_INTERRUPT_TXFIFO_REMOVE) {
stats->tx_bytes += can_get_echo_skb(ndev, 0, NULL);
stats->tx_packets++;
- can_led_event(ndev, CAN_LED_EVENT_TX);
}
if (isr & tx_irq_mask)
@@ -830,7 +826,6 @@ static int ifi_canfd_open(struct net_device *ndev)
ifi_canfd_start(ndev);
- can_led_event(ndev, CAN_LED_EVENT_OPEN);
napi_enable(&priv->napi);
netif_start_queue(ndev);
@@ -853,8 +848,6 @@ static int ifi_canfd_close(struct net_device *ndev)
close_candev(ndev);
- can_led_event(ndev, CAN_LED_EVENT_STOP);
-
return 0;
}
@@ -1004,8 +997,6 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
goto err_reg;
}
- devm_can_led_init(ndev);
-
dev_info(dev, "Driver registered: regs=%p, irq=%d, clock=%d\n",
priv->base, ndev->irq, priv->can.clock.freq);
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 808c105cf8f7..35bfb82d6929 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1910,7 +1910,7 @@ static int ican3_probe(struct platform_device *pdev)
mod = netdev_priv(ndev);
mod->ndev = ndev;
mod->num = pdata->modno;
- netif_napi_add(ndev, &mod->napi, ican3_napi, ICAN3_RX_BUFFERS);
+ netif_napi_add_weight(ndev, &mod->napi, ican3_napi, ICAN3_RX_BUFFERS);
skb_queue_head_init(&mod->echoq);
spin_lock_init(&mod->lock);
init_completion(&mod->termination_comp);
diff --git a/drivers/net/can/led.c b/drivers/net/can/led.c
deleted file mode 100644
index db14897f8e16..000000000000
--- a/drivers/net/can/led.c
+++ /dev/null
@@ -1,140 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2012, Fabio Baltieri <fabio.baltieri@gmail.com>
- * Copyright 2012, Kurt Van Dijck <kurt.van.dijck@eia.be>
- */
-
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/netdevice.h>
-#include <linux/can/dev.h>
-
-#include <linux/can/led.h>
-
-static unsigned long led_delay = 50;
-module_param(led_delay, ulong, 0644);
-MODULE_PARM_DESC(led_delay,
- "blink delay time for activity leds (msecs, default: 50).");
-
-/* Trigger a LED event in response to a CAN device event */
-void can_led_event(struct net_device *netdev, enum can_led_event event)
-{
- struct can_priv *priv = netdev_priv(netdev);
-
- switch (event) {
- case CAN_LED_EVENT_OPEN:
- led_trigger_event(priv->tx_led_trig, LED_FULL);
- led_trigger_event(priv->rx_led_trig, LED_FULL);
- led_trigger_event(priv->rxtx_led_trig, LED_FULL);
- break;
- case CAN_LED_EVENT_STOP:
- led_trigger_event(priv->tx_led_trig, LED_OFF);
- led_trigger_event(priv->rx_led_trig, LED_OFF);
- led_trigger_event(priv->rxtx_led_trig, LED_OFF);
- break;
- case CAN_LED_EVENT_TX:
- if (led_delay) {
- led_trigger_blink_oneshot(priv->tx_led_trig,
- &led_delay, &led_delay, 1);
- led_trigger_blink_oneshot(priv->rxtx_led_trig,
- &led_delay, &led_delay, 1);
- }
- break;
- case CAN_LED_EVENT_RX:
- if (led_delay) {
- led_trigger_blink_oneshot(priv->rx_led_trig,
- &led_delay, &led_delay, 1);
- led_trigger_blink_oneshot(priv->rxtx_led_trig,
- &led_delay, &led_delay, 1);
- }
- break;
- }
-}
-EXPORT_SYMBOL_GPL(can_led_event);
-
-static void can_led_release(struct device *gendev, void *res)
-{
- struct can_priv *priv = netdev_priv(to_net_dev(gendev));
-
- led_trigger_unregister_simple(priv->tx_led_trig);
- led_trigger_unregister_simple(priv->rx_led_trig);
- led_trigger_unregister_simple(priv->rxtx_led_trig);
-}
-
-/* Register CAN LED triggers for a CAN device
- *
- * This is normally called from a driver's probe function
- */
-void devm_can_led_init(struct net_device *netdev)
-{
- struct can_priv *priv = netdev_priv(netdev);
- void *res;
-
- res = devres_alloc(can_led_release, 0, GFP_KERNEL);
- if (!res) {
- netdev_err(netdev, "cannot register LED triggers\n");
- return;
- }
-
- snprintf(priv->tx_led_trig_name, sizeof(priv->tx_led_trig_name),
- "%s-tx", netdev->name);
- snprintf(priv->rx_led_trig_name, sizeof(priv->rx_led_trig_name),
- "%s-rx", netdev->name);
- snprintf(priv->rxtx_led_trig_name, sizeof(priv->rxtx_led_trig_name),
- "%s-rxtx", netdev->name);
-
- led_trigger_register_simple(priv->tx_led_trig_name,
- &priv->tx_led_trig);
- led_trigger_register_simple(priv->rx_led_trig_name,
- &priv->rx_led_trig);
- led_trigger_register_simple(priv->rxtx_led_trig_name,
- &priv->rxtx_led_trig);
-
- devres_add(&netdev->dev, res);
-}
-EXPORT_SYMBOL_GPL(devm_can_led_init);
-
-/* NETDEV rename notifier to rename the associated led triggers too */
-static int can_led_notifier(struct notifier_block *nb, unsigned long msg,
- void *ptr)
-{
- struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
- struct can_priv *priv = safe_candev_priv(netdev);
- char name[CAN_LED_NAME_SZ];
-
- if (!priv)
- return NOTIFY_DONE;
-
- if (!priv->tx_led_trig || !priv->rx_led_trig || !priv->rxtx_led_trig)
- return NOTIFY_DONE;
-
- if (msg == NETDEV_CHANGENAME) {
- snprintf(name, sizeof(name), "%s-tx", netdev->name);
- led_trigger_rename_static(name, priv->tx_led_trig);
-
- snprintf(name, sizeof(name), "%s-rx", netdev->name);
- led_trigger_rename_static(name, priv->rx_led_trig);
-
- snprintf(name, sizeof(name), "%s-rxtx", netdev->name);
- led_trigger_rename_static(name, priv->rxtx_led_trig);
- }
-
- return NOTIFY_DONE;
-}
-
-/* notifier block for netdevice event */
-static struct notifier_block can_netdev_notifier __read_mostly = {
- .notifier_call = can_led_notifier,
-};
-
-int __init can_led_notifier_init(void)
-{
- return register_netdevice_notifier(&can_netdev_notifier);
-}
-
-void __exit can_led_notifier_exit(void)
-{
- unregister_netdevice_notifier(&can_netdev_notifier);
-}
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index e6d2da4a9f41..5d0c82d8b9a9 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -565,9 +565,6 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota)
rxfs = m_can_read(cdev, M_CAN_RXF0S);
}
- if (pkts)
- can_led_event(dev, CAN_LED_EVENT_RX);
-
return pkts;
}
@@ -1087,8 +1084,6 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
if (cdev->is_peripheral)
timestamp = m_can_get_timestamp(cdev);
m_can_tx_update_stats(cdev, 0, timestamp);
-
- can_led_event(dev, CAN_LED_EVENT_TX);
netif_wake_queue(dev);
}
} else {
@@ -1097,7 +1092,6 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
if (m_can_echo_tx_event(dev) != 0)
goto out_fail;
- can_led_event(dev, CAN_LED_EVENT_TX);
if (netif_queue_stopped(dev) &&
!m_can_tx_fifo_full(cdev))
netif_wake_queue(dev);
@@ -1492,34 +1486,22 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
if (err)
return err;
- cdev->can.bittiming_const = cdev->bit_timing ?
- cdev->bit_timing : &m_can_bittiming_const_30X;
-
- cdev->can.data_bittiming_const = cdev->data_timing ?
- cdev->data_timing :
- &m_can_data_bittiming_const_30X;
+ cdev->can.bittiming_const = &m_can_bittiming_const_30X;
+ cdev->can.data_bittiming_const = &m_can_data_bittiming_const_30X;
break;
case 31:
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */
err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
if (err)
return err;
- cdev->can.bittiming_const = cdev->bit_timing ?
- cdev->bit_timing : &m_can_bittiming_const_31X;
-
- cdev->can.data_bittiming_const = cdev->data_timing ?
- cdev->data_timing :
- &m_can_data_bittiming_const_31X;
+ cdev->can.bittiming_const = &m_can_bittiming_const_31X;
+ cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
break;
case 32:
case 33:
/* Support both MCAN version v3.2.x and v3.3.0 */
- cdev->can.bittiming_const = cdev->bit_timing ?
- cdev->bit_timing : &m_can_bittiming_const_31X;
-
- cdev->can.data_bittiming_const = cdev->data_timing ?
- cdev->data_timing :
- &m_can_data_bittiming_const_31X;
+ cdev->can.bittiming_const = &m_can_bittiming_const_31X;
+ cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
cdev->can.ctrlmode_supported |=
(m_can_niso_supported(cdev) ?
@@ -1574,7 +1556,6 @@ static int m_can_close(struct net_device *dev)
can_rx_offload_disable(&cdev->offload);
close_candev(dev);
- can_led_event(dev, CAN_LED_EVENT_STOP);
phy_power_off(cdev->transceiver);
@@ -1818,8 +1799,6 @@ static int m_can_open(struct net_device *dev)
/* start the m_can controller */
m_can_start(dev);
- can_led_event(dev, CAN_LED_EVENT_OPEN);
-
if (!cdev->is_peripheral)
napi_enable(&cdev->napi);
@@ -2007,8 +1986,6 @@ int m_can_class_register(struct m_can_classdev *cdev)
goto rx_offload_del;
}
- devm_can_led_init(cdev->net);
-
of_can_transceiver(cdev->net);
dev_info(cdev->dev, "%s device registered (irq=%d, version=%d)\n",
diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h
index 2c5d40997168..4c0267f9f297 100644
--- a/drivers/net/can/m_can/m_can.h
+++ b/drivers/net/can/m_can/m_can.h
@@ -7,7 +7,6 @@
#define _CAN_M_CAN_H_
#include <linux/can/core.h>
-#include <linux/can/led.h>
#include <linux/can/rx-offload.h>
#include <linux/completion.h>
#include <linux/device.h>
@@ -85,9 +84,6 @@ struct m_can_classdev {
struct sk_buff *tx_skb;
struct phy *transceiver;
- const struct can_bittiming_const *bit_timing;
- const struct can_bittiming_const *data_timing;
-
struct m_can_ops *ops;
int version;
diff --git a/drivers/net/can/m_can/m_can_pci.c b/drivers/net/can/m_can/m_can_pci.c
index b56a54d6c5a9..8f184a852a0a 100644
--- a/drivers/net/can/m_can/m_can_pci.c
+++ b/drivers/net/can/m_can/m_can_pci.c
@@ -18,14 +18,9 @@
#define M_CAN_PCI_MMIO_BAR 0
+#define M_CAN_CLOCK_FREQ_EHL 200000000
#define CTL_CSR_INT_CTL_OFFSET 0x508
-struct m_can_pci_config {
- const struct can_bittiming_const *bit_timing;
- const struct can_bittiming_const *data_timing;
- unsigned int clock_freq;
-};
-
struct m_can_pci_priv {
struct m_can_classdev cdev;
@@ -89,40 +84,9 @@ static struct m_can_ops m_can_pci_ops = {
.read_fifo = iomap_read_fifo,
};
-static const struct can_bittiming_const m_can_bittiming_const_ehl = {
- .name = KBUILD_MODNAME,
- .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
- .tseg1_max = 64,
- .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
- .tseg2_max = 128,
- .sjw_max = 128,
- .brp_min = 1,
- .brp_max = 512,
- .brp_inc = 1,
-};
-
-static const struct can_bittiming_const m_can_data_bittiming_const_ehl = {
- .name = KBUILD_MODNAME,
- .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
- .tseg1_max = 16,
- .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
- .tseg2_max = 8,
- .sjw_max = 4,
- .brp_min = 1,
- .brp_max = 32,
- .brp_inc = 1,
-};
-
-static const struct m_can_pci_config m_can_pci_ehl = {
- .bit_timing = &m_can_bittiming_const_ehl,
- .data_timing = &m_can_data_bittiming_const_ehl,
- .clock_freq = 200000000,
-};
-
static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
{
struct device *dev = &pci->dev;
- const struct m_can_pci_config *cfg;
struct m_can_classdev *mcan_class;
struct m_can_pci_priv *priv;
void __iomem *base;
@@ -150,8 +114,6 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
if (!mcan_class)
return -ENOMEM;
- cfg = (const struct m_can_pci_config *)id->driver_data;
-
priv = cdev_to_priv(mcan_class);
priv->base = base;
@@ -163,9 +125,7 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
mcan_class->dev = &pci->dev;
mcan_class->net->irq = pci_irq_vector(pci, 0);
mcan_class->pm_clock_support = 1;
- mcan_class->bit_timing = cfg->bit_timing;
- mcan_class->data_timing = cfg->data_timing;
- mcan_class->can.clock.freq = cfg->clock_freq;
+ mcan_class->can.clock.freq = id->driver_data;
mcan_class->ops = &m_can_pci_ops;
pci_set_drvdata(pci, mcan_class);
@@ -218,8 +178,8 @@ static SIMPLE_DEV_PM_OPS(m_can_pci_pm_ops,
m_can_pci_suspend, m_can_pci_resume);
static const struct pci_device_id m_can_pci_id_table[] = {
- { PCI_VDEVICE(INTEL, 0x4bc1), (kernel_ulong_t)&m_can_pci_ehl, },
- { PCI_VDEVICE(INTEL, 0x4bc2), (kernel_ulong_t)&m_can_pci_ehl, },
+ { PCI_VDEVICE(INTEL, 0x4bc1), M_CAN_CLOCK_FREQ_EHL, },
+ { PCI_VDEVICE(INTEL, 0x4bc2), M_CAN_CLOCK_FREQ_EHL, },
{ } /* Terminating Entry */
};
MODULE_DEVICE_TABLE(pci, m_can_pci_id_table);
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 5b5802fac772..78a21ab63601 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -679,7 +679,7 @@ struct net_device *alloc_mscandev(void)
dev->flags |= IFF_ECHO; /* we support local echo */
- netif_napi_add(dev, &priv->napi, mscan_rx_poll, 8);
+ netif_napi_add_weight(dev, &priv->napi, mscan_rx_poll, 8);
priv->can.bittiming_const = &mscan_bittiming_const;
priv->can.do_set_bittiming = mscan_do_set_bittiming;
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 888bef03de09..fde3ac516d26 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -1189,7 +1189,7 @@ static int pch_can_probe(struct pci_dev *pdev,
ndev->netdev_ops = &pch_can_netdev_ops;
priv->can.clock.freq = PCH_CAN_CLK; /* Hz */
- netif_napi_add(ndev, &priv->napi, pch_can_poll, PCH_RX_OBJ_END);
+ netif_napi_add_weight(ndev, &priv->napi, pch_can_poll, PCH_RX_OBJ_END);
rc = pci_enable_msi(priv->dev);
if (rc) {
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index 33e37395379d..d45762f1cf6b 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -12,7 +12,6 @@
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
-#include <linux/can/led.h>
#include <linux/can/dev.h>
#include <linux/clk.h>
#include <linux/of.h>
@@ -389,7 +388,6 @@ static void rcar_can_tx_done(struct net_device *ndev)
/* Clear interrupt */
isr = readb(&priv->regs->isr);
writeb(isr & ~RCAR_CAN_ISR_TXFF, &priv->regs->isr);
- can_led_event(ndev, CAN_LED_EVENT_TX);
}
static irqreturn_t rcar_can_interrupt(int irq, void *dev_id)
@@ -531,7 +529,6 @@ static int rcar_can_open(struct net_device *ndev)
ndev->irq, err);
goto out_close;
}
- can_led_event(ndev, CAN_LED_EVENT_OPEN);
rcar_can_start(ndev);
netif_start_queue(ndev);
return 0;
@@ -581,7 +578,6 @@ static int rcar_can_close(struct net_device *ndev)
clk_disable_unprepare(priv->can_clk);
clk_disable_unprepare(priv->clk);
close_candev(ndev);
- can_led_event(ndev, CAN_LED_EVENT_STOP);
return 0;
}
@@ -666,8 +662,6 @@ static void rcar_can_rx_pkt(struct rcar_can_priv *priv)
}
stats->rx_packets++;
- can_led_event(priv->ndev, CAN_LED_EVENT_RX);
-
netif_receive_skb(skb);
}
@@ -803,8 +797,8 @@ static int rcar_can_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
- netif_napi_add(ndev, &priv->napi, rcar_can_rx_poll,
- RCAR_CAN_NAPI_WEIGHT);
+ netif_napi_add_weight(ndev, &priv->napi, rcar_can_rx_poll,
+ RCAR_CAN_NAPI_WEIGHT);
err = register_candev(ndev);
if (err) {
dev_err(&pdev->dev, "register_candev() failed, error %d\n",
@@ -812,8 +806,6 @@ static int rcar_can_probe(struct platform_device *pdev)
goto fail_candev;
}
- devm_can_led_init(ndev);
-
dev_info(&pdev->dev, "device registered (IRQ%d)\n", ndev->irq);
return 0;
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index 1e121e04208c..40a11445d021 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -29,7 +29,6 @@
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
-#include <linux/can/led.h>
#include <linux/can/dev.h>
#include <linux/clk.h>
#include <linux/of.h>
@@ -1128,7 +1127,6 @@ static void rcar_canfd_tx_done(struct net_device *ndev)
/* Clear interrupt */
rcar_canfd_write(priv->base, RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX),
sts & ~RCANFD_CFSTS_CFTXIF);
- can_led_event(ndev, CAN_LED_EVENT_TX);
}
static void rcar_canfd_handle_global_err(struct rcar_canfd_global *gpriv, u32 ch)
@@ -1419,7 +1417,6 @@ static int rcar_canfd_open(struct net_device *ndev)
if (err)
goto out_close;
netif_start_queue(ndev);
- can_led_event(ndev, CAN_LED_EVENT_OPEN);
return 0;
out_close:
napi_disable(&priv->napi);
@@ -1469,7 +1466,6 @@ static int rcar_canfd_close(struct net_device *ndev)
napi_disable(&priv->napi);
clk_disable_unprepare(gpriv->can_clk);
close_candev(ndev);
- can_led_event(ndev, CAN_LED_EVENT_STOP);
return 0;
}
@@ -1619,8 +1615,6 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
*/
rcar_canfd_write(priv->base, RCANFD_RFPCTR(gpriv, ridx), 0xff);
- can_led_event(priv->ndev, CAN_LED_EVENT_RX);
-
if (!(cf->can_id & CAN_RTR_FLAG))
stats->rx_bytes += cf->len;
stats->rx_packets++;
@@ -1789,10 +1783,9 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
priv->gpriv = gpriv;
SET_NETDEV_DEV(ndev, &pdev->dev);
- netif_napi_add(ndev, &priv->napi, rcar_canfd_rx_poll,
- RCANFD_NAPI_WEIGHT);
+ netif_napi_add_weight(ndev, &priv->napi, rcar_canfd_rx_poll,
+ RCANFD_NAPI_WEIGHT);
spin_lock_init(&priv->tx_lock);
- devm_can_led_init(ndev);
gpriv->ch[priv->channel] = priv;
err = register_candev(ndev);
if (err) {
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 966316479485..2e7638f98cf1 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -60,7 +60,6 @@
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
#include "sja1000.h"
@@ -383,8 +382,6 @@ static void sja1000_rx(struct net_device *dev)
sja1000_write_cmdreg(priv, CMD_RRB);
netif_rx(skb);
-
- can_led_event(dev, CAN_LED_EVENT_RX);
}
static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
@@ -531,7 +528,6 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
stats->tx_packets++;
}
netif_wake_queue(dev);
- can_led_event(dev, CAN_LED_EVENT_TX);
}
if (isrc & IRQ_RI) {
/* receive interrupt */
@@ -587,8 +583,6 @@ static int sja1000_open(struct net_device *dev)
/* init and start chi */
sja1000_start(dev);
- can_led_event(dev, CAN_LED_EVENT_OPEN);
-
netif_start_queue(dev);
return 0;
@@ -606,8 +600,6 @@ static int sja1000_close(struct net_device *dev)
close_candev(dev);
- can_led_event(dev, CAN_LED_EVENT_STOP);
-
return 0;
}
@@ -673,9 +665,6 @@ int register_sja1000dev(struct net_device *dev)
ret = register_candev(dev);
- if (!ret)
- devm_can_led_init(dev);
-
return ret;
}
EXPORT_SYMBOL_GPL(register_sja1000dev);
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index ec294d0c5722..64a3aee8a7da 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -359,8 +359,8 @@ static netdev_tx_t slc_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct slcan *sl = netdev_priv(dev);
- if (skb->len != CAN_MTU)
- goto out;
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
spin_lock(&sl->lock);
if (!netif_running(dev)) {
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index a5b2952b8d0f..ebc4ebb44c98 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -16,7 +16,6 @@
#include <linux/can/core.h>
#include <linux/can/dev.h>
-#include <linux/can/led.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
@@ -354,8 +353,6 @@ static void hi3110_hw_rx(struct spi_device *spi)
}
priv->net->stats.rx_packets++;
- can_led_event(priv->net, CAN_LED_EVENT_RX);
-
netif_rx(skb);
}
@@ -567,8 +564,6 @@ static int hi3110_stop(struct net_device *net)
mutex_unlock(&priv->hi3110_lock);
- can_led_event(net, CAN_LED_EVENT_STOP);
-
return 0;
}
@@ -725,7 +720,6 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
if (priv->tx_busy && statf & HI3110_STAT_TXMTY) {
net->stats.tx_packets++;
net->stats.tx_bytes += can_get_echo_skb(net, 0, NULL);
- can_led_event(net, CAN_LED_EVENT_TX);
priv->tx_busy = false;
netif_wake_queue(net);
}
@@ -783,7 +777,6 @@ static int hi3110_open(struct net_device *net)
if (ret)
goto out_free_wq;
- can_led_event(net, CAN_LED_EVENT_OPEN);
netif_wake_queue(net);
mutex_unlock(&priv->hi3110_lock);
@@ -931,7 +924,6 @@ static int hi3110_can_probe(struct spi_device *spi)
if (ret)
goto error_probe;
- devm_can_led_init(net);
netdev_info(net, "%x successfully initialized.\n", priv->model);
return 0;
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index fc747bff5eeb..666a4505a55a 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -22,7 +22,6 @@
#include <linux/bitfield.h>
#include <linux/can/core.h>
#include <linux/can/dev.h>
-#include <linux/can/led.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
@@ -738,8 +737,6 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
}
priv->net->stats.rx_packets++;
- can_led_event(priv->net, CAN_LED_EVENT_RX);
-
netif_rx(skb);
}
@@ -973,8 +970,6 @@ static int mcp251x_stop(struct net_device *net)
mutex_unlock(&priv->mcp_lock);
- can_led_event(net, CAN_LED_EVENT_STOP);
-
return 0;
}
@@ -1177,7 +1172,6 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
break;
if (intf & CANINTF_TX) {
- can_led_event(net, CAN_LED_EVENT_TX);
if (priv->tx_busy) {
net->stats.tx_packets++;
net->stats.tx_bytes += can_get_echo_skb(net, 0,
@@ -1232,8 +1226,6 @@ static int mcp251x_open(struct net_device *net)
if (ret)
goto out_free_irq;
- can_led_event(net, CAN_LED_EVENT_OPEN);
-
netif_wake_queue(net);
mutex_unlock(&priv->mcp_lock);
@@ -1403,8 +1395,6 @@ static int mcp251x_can_probe(struct spi_device *spi)
if (ret)
goto error_probe;
- devm_can_led_init(net);
-
ret = mcp251x_gpio_setup(priv);
if (ret)
goto error_probe;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
index 1d43bccc29bf..2b0309fedfac 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
@@ -441,7 +441,7 @@ struct mcp251xfd_hw_tef_obj {
/* The tx_obj_raw version is used in spi async, i.e. without
* regmap. We have to take care of endianness ourselves.
*/
-struct mcp251xfd_hw_tx_obj_raw {
+struct __packed mcp251xfd_hw_tx_obj_raw {
__le32 id;
__le32 flags;
u8 data[sizeof_field(struct canfd_frame, data)];
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index 25d6d81ab4f4..155b90f6c767 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -51,7 +51,6 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -516,8 +515,6 @@ static void sun4i_can_rx(struct net_device *dev)
sun4i_can_write_cmdreg(priv, SUN4I_CMD_RELEASE_RBUF);
netif_rx(skb);
-
- can_led_event(dev, CAN_LED_EVENT_RX);
}
static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
@@ -664,7 +661,6 @@ static irqreturn_t sun4i_can_interrupt(int irq, void *dev_id)
stats->tx_bytes += can_get_echo_skb(dev, 0, NULL);
stats->tx_packets++;
netif_wake_queue(dev);
- can_led_event(dev, CAN_LED_EVENT_TX);
}
if ((isrc & SUN4I_INT_RBUF_VLD) &&
!(isrc & SUN4I_INT_DATA_OR)) {
@@ -729,7 +725,6 @@ static int sun4ican_open(struct net_device *dev)
goto exit_can_start;
}
- can_led_event(dev, CAN_LED_EVENT_OPEN);
netif_start_queue(dev);
return 0;
@@ -756,7 +751,6 @@ static int sun4ican_close(struct net_device *dev)
free_irq(dev->irq, dev);
close_candev(dev);
- can_led_event(dev, CAN_LED_EVENT_STOP);
return 0;
}
@@ -883,7 +877,6 @@ static int sun4ican_probe(struct platform_device *pdev)
DRV_NAME, err);
goto exit_free;
}
- devm_can_led_init(dev);
dev_info(&pdev->dev, "device registered (base=%p, irq=%d)\n",
priv->base, dev->irq);
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index bb3f2e3b004c..debe17bfd0f0 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -34,7 +34,6 @@
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
#include <linux/can/rx-offload.h>
#define DRV_NAME "ti_hecc"
@@ -759,7 +758,6 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
can_rx_offload_get_echo_skb(&priv->offload,
mbxno, stamp, NULL);
stats->tx_packets++;
- can_led_event(ndev, CAN_LED_EVENT_TX);
--priv->tx_tail;
}
@@ -814,8 +812,6 @@ static int ti_hecc_open(struct net_device *ndev)
return err;
}
- can_led_event(ndev, CAN_LED_EVENT_OPEN);
-
ti_hecc_start(ndev);
can_rx_offload_enable(&priv->offload);
netif_start_queue(ndev);
@@ -834,8 +830,6 @@ static int ti_hecc_close(struct net_device *ndev)
close_candev(ndev);
ti_hecc_transceiver_switch(priv, 0);
- can_led_event(ndev, CAN_LED_EVENT_STOP);
-
return 0;
}
@@ -954,8 +948,6 @@ static int ti_hecc_probe(struct platform_device *pdev)
goto probe_exit_offload;
}
- devm_can_led_init(ndev);
-
dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n",
priv->base, (u32)ndev->irq);
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index c45a814e1de2..792ab9da317d 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -10,7 +10,6 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/signal.h>
@@ -232,8 +231,6 @@ static void mcba_usb_write_bulk_callback(struct urb *urb)
netdev->stats.tx_packets++;
netdev->stats.tx_bytes += can_get_echo_skb(netdev, ctx->ndx,
NULL);
-
- can_led_event(netdev, CAN_LED_EVENT_TX);
}
if (urb->status)
@@ -452,7 +449,6 @@ static void mcba_usb_process_can(struct mcba_priv *priv,
}
stats->rx_packets++;
- can_led_event(priv->netdev, CAN_LED_EVENT_RX);
netif_rx(skb);
}
@@ -700,7 +696,6 @@ static int mcba_usb_open(struct net_device *netdev)
priv->can_speed_check = true;
priv->can.state = CAN_STATE_ERROR_ACTIVE;
- can_led_event(netdev, CAN_LED_EVENT_OPEN);
netif_start_queue(netdev);
return 0;
@@ -732,7 +727,6 @@ static int mcba_usb_close(struct net_device *netdev)
mcba_urb_unlink(priv);
close_candev(netdev);
- can_led_event(netdev, CAN_LED_EVENT_STOP);
return 0;
}
@@ -857,8 +851,6 @@ static int mcba_usb_probe(struct usb_interface *intf,
priv->rx_pipe = usb_rcvbulkpipe(priv->udev, in->bEndpointAddress);
priv->tx_pipe = usb_sndbulkpipe(priv->udev, out->bEndpointAddress);
- devm_can_led_init(netdev);
-
/* Start USB dev only if we have successfully registered CAN device */
err = mcba_usb_start(priv);
if (err) {
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index b638604bf1ee..f3363575bf32 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -21,7 +21,6 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
/* driver constants */
#define MAX_RX_URBS 20
@@ -480,8 +479,6 @@ static void usb_8dev_rx_can_msg(struct usb_8dev_priv *priv,
stats->rx_packets++;
netif_rx(skb);
-
- can_led_event(priv->netdev, CAN_LED_EVENT_RX);
} else {
netdev_warn(priv->netdev, "frame type %d unknown",
msg->type);
@@ -582,8 +579,6 @@ static void usb_8dev_write_bulk_callback(struct urb *urb)
netdev->stats.tx_packets++;
netdev->stats.tx_bytes += can_get_echo_skb(netdev, context->echo_index, NULL);
- can_led_event(netdev, CAN_LED_EVENT_TX);
-
/* Release context */
context->echo_index = MAX_TX_URBS;
@@ -807,8 +802,6 @@ static int usb_8dev_open(struct net_device *netdev)
if (err)
return err;
- can_led_event(netdev, CAN_LED_EVENT_OPEN);
-
/* finally start device */
err = usb_8dev_start(priv);
if (err) {
@@ -865,8 +858,6 @@ static int usb_8dev_close(struct net_device *netdev)
close_candev(netdev);
- can_led_event(netdev, CAN_LED_EVENT_STOP);
-
return err;
}
@@ -974,8 +965,6 @@ static int usb_8dev_probe(struct usb_interface *intf,
(version>>8) & 0xff, version & 0xff);
}
- devm_can_led_init(netdev);
-
return 0;
cleanup_unregister_candev:
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 43f0c6a064ba..8a3b7b103ca4 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -26,7 +26,6 @@
#include <linux/types.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
#include <linux/pm_runtime.h>
#define DRIVER_NAME "xilinx_can"
@@ -1209,10 +1208,8 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota)
XCAN_IXR_RXNEMP_MASK);
}
- if (work_done) {
- can_led_event(ndev, CAN_LED_EVENT_RX);
+ if (work_done)
xcan_update_error_state_after_rxtx(ndev);
- }
if (work_done < quota) {
if (napi_complete_done(napi, work_done)) {
@@ -1298,7 +1295,6 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
spin_unlock_irqrestore(&priv->tx_lock, flags);
- can_led_event(ndev, CAN_LED_EVENT_TX);
xcan_update_error_state_after_rxtx(ndev);
}
@@ -1420,7 +1416,6 @@ static int xcan_open(struct net_device *ndev)
goto err_candev;
}
- can_led_event(ndev, CAN_LED_EVENT_OPEN);
napi_enable(&priv->napi);
netif_start_queue(ndev);
@@ -1452,7 +1447,6 @@ static int xcan_close(struct net_device *ndev)
free_irq(ndev->irq, ndev);
close_candev(ndev);
- can_led_event(ndev, CAN_LED_EVENT_STOP);
pm_runtime_put(priv->dev);
return 0;
@@ -1804,7 +1798,7 @@ static int xcan_probe(struct platform_device *pdev)
priv->can.clock.freq = clk_get_rate(priv->can_clk);
- netif_napi_add(ndev, &priv->napi, xcan_rx_poll, rx_max);
+ netif_napi_add_weight(ndev, &priv->napi, xcan_rx_poll, rx_max);
ret = register_candev(ndev);
if (ret) {
@@ -1812,8 +1806,6 @@ static int xcan_probe(struct platform_device *pdev)
goto err_disableclks;
}
- devm_can_led_init(ndev);
-
pm_runtime_put(&pdev->dev);
if (priv->devtype.flags & XCAN_FLAG_CANFD_2) {
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 12c15da55664..8af4def38a98 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -1360,7 +1360,7 @@ static int gswip_port_fdb(struct dsa_switch *ds, int port,
struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
struct gswip_priv *priv = ds->priv;
struct gswip_pce_table_entry mac_bridge = {0,};
- unsigned int cpu_port = priv->hw_info->cpu_port;
+ unsigned int max_ports = priv->hw_info->max_ports;
int fid = -1;
int i;
int err;
@@ -1368,7 +1368,7 @@ static int gswip_port_fdb(struct dsa_switch *ds, int port,
if (!bridge)
return -EINVAL;
- for (i = cpu_port; i < ARRAY_SIZE(priv->vlans); i++) {
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
if (priv->vlans[i].bridge == bridge) {
fid = priv->vlans[i].fid;
break;
@@ -1426,8 +1426,9 @@ static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
err = gswip_pce_table_entry_read(priv, &mac_bridge);
if (err) {
- dev_err(priv->dev, "failed to write mac bridge: %d\n",
- err);
+ dev_err(priv->dev,
+ "failed to read mac bridge entry %d: %d\n",
+ i, err);
return err;
}
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index f91deea9368e..12a599d5e61a 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -126,86 +126,6 @@ static u8 ksz8863_shifts[] = {
[DYNAMIC_MAC_SRC_PORT] = 20,
};
-struct mib_names {
- char string[ETH_GSTRING_LEN];
-};
-
-static const struct mib_names ksz87xx_mib_names[] = {
- { "rx_hi" },
- { "rx_undersize" },
- { "rx_fragments" },
- { "rx_oversize" },
- { "rx_jabbers" },
- { "rx_symbol_err" },
- { "rx_crc_err" },
- { "rx_align_err" },
- { "rx_mac_ctrl" },
- { "rx_pause" },
- { "rx_bcast" },
- { "rx_mcast" },
- { "rx_ucast" },
- { "rx_64_or_less" },
- { "rx_65_127" },
- { "rx_128_255" },
- { "rx_256_511" },
- { "rx_512_1023" },
- { "rx_1024_1522" },
- { "rx_1523_2000" },
- { "rx_2001" },
- { "tx_hi" },
- { "tx_late_col" },
- { "tx_pause" },
- { "tx_bcast" },
- { "tx_mcast" },
- { "tx_ucast" },
- { "tx_deferred" },
- { "tx_total_col" },
- { "tx_exc_col" },
- { "tx_single_col" },
- { "tx_mult_col" },
- { "rx_total" },
- { "tx_total" },
- { "rx_discards" },
- { "tx_discards" },
-};
-
-static const struct mib_names ksz88xx_mib_names[] = {
- { "rx" },
- { "rx_hi" },
- { "rx_undersize" },
- { "rx_fragments" },
- { "rx_oversize" },
- { "rx_jabbers" },
- { "rx_symbol_err" },
- { "rx_crc_err" },
- { "rx_align_err" },
- { "rx_mac_ctrl" },
- { "rx_pause" },
- { "rx_bcast" },
- { "rx_mcast" },
- { "rx_ucast" },
- { "rx_64_or_less" },
- { "rx_65_127" },
- { "rx_128_255" },
- { "rx_256_511" },
- { "rx_512_1023" },
- { "rx_1024_1522" },
- { "tx" },
- { "tx_hi" },
- { "tx_late_col" },
- { "tx_pause" },
- { "tx_bcast" },
- { "tx_mcast" },
- { "tx_ucast" },
- { "tx_deferred" },
- { "tx_total_col" },
- { "tx_exc_col" },
- { "tx_single_col" },
- { "tx_mult_col" },
- { "rx_discards" },
- { "tx_discards" },
-};
-
static bool ksz_is_ksz88x3(struct ksz_device *dev)
{
return dev->chip_id == 0x8830;
@@ -306,7 +226,7 @@ static void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
masks = ksz8->masks;
regs = ksz8->regs;
- ctrl_addr = addr + dev->reg_mib_cnt * port;
+ ctrl_addr = addr + dev->info->reg_mib_cnt * port;
ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ);
mutex_lock(&dev->alu_mutex);
@@ -343,7 +263,7 @@ static void ksz8795_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
masks = ksz8->masks;
regs = ksz8->regs;
- addr -= dev->reg_mib_cnt;
+ addr -= dev->info->reg_mib_cnt;
ctrl_addr = (KSZ8795_MIB_TOTAL_RX_1 - KSZ8795_MIB_TOTAL_RX_0) * port;
ctrl_addr += addr + KSZ8795_MIB_TOTAL_RX_0;
ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ);
@@ -392,7 +312,7 @@ static void ksz8863_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
u32 data;
u32 cur;
- addr -= dev->reg_mib_cnt;
+ addr -= dev->info->reg_mib_cnt;
ctrl_addr = addr ? KSZ8863_MIB_PACKET_DROPPED_TX_0 :
KSZ8863_MIB_PACKET_DROPPED_RX_0;
ctrl_addr += port;
@@ -453,23 +373,21 @@ static void ksz8_port_init_cnt(struct ksz_device *dev, int port)
mib->cnt_ptr = 0;
/* Some ports may not have MIB counters before SWITCH_COUNTER_NUM. */
- while (mib->cnt_ptr < dev->reg_mib_cnt) {
+ while (mib->cnt_ptr < dev->info->reg_mib_cnt) {
dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr,
&mib->counters[mib->cnt_ptr]);
++mib->cnt_ptr;
}
/* last one in storage */
- dropped = &mib->counters[dev->mib_cnt];
+ dropped = &mib->counters[dev->info->mib_cnt];
/* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */
- while (mib->cnt_ptr < dev->mib_cnt) {
+ while (mib->cnt_ptr < dev->info->mib_cnt) {
dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr,
dropped, &mib->counters[mib->cnt_ptr]);
++mib->cnt_ptr;
}
- mib->cnt_ptr = 0;
- memset(mib->counters, 0, dev->mib_cnt * sizeof(u64));
}
static void ksz8_r_table(struct ksz_device *dev, int table, u16 addr, u64 *data)
@@ -1003,18 +921,6 @@ static u32 ksz8_sw_get_phy_flags(struct dsa_switch *ds, int port)
return 0;
}
-static void ksz8_get_strings(struct dsa_switch *ds, int port,
- u32 stringset, uint8_t *buf)
-{
- struct ksz_device *dev = ds->priv;
- int i;
-
- for (i = 0; i < dev->mib_cnt; i++) {
- memcpy(buf + i * ETH_GSTRING_LEN,
- dev->mib_names[i].string, ETH_GSTRING_LEN);
- }
-}
-
static void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member)
{
u8 data;
@@ -1036,13 +942,13 @@ static void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
int first, index, cnt;
struct ksz_port *p;
- if ((uint)port < dev->port_cnt) {
+ if ((uint)port < dev->info->port_cnt) {
first = port;
cnt = port + 1;
} else {
/* Flush all ports. */
first = 0;
- cnt = dev->port_cnt;
+ cnt = dev->info->port_cnt;
}
for (index = first; index < cnt; index++) {
p = &dev->ports[index];
@@ -1118,7 +1024,7 @@ static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
* Remove Tag flag to be changed, unless there are no
* other VLANs currently configured.
*/
- for (vid = 1; vid < dev->num_vlans; ++vid) {
+ for (vid = 1; vid < dev->info->num_vlans; ++vid) {
/* Skip the VID we are going to add or reconfigure */
if (vid == vlan->vid)
continue;
@@ -1389,7 +1295,7 @@ static int ksz8_handle_global_errata(struct dsa_switch *ds)
* KSZ879x/KSZ877x/KSZ876x and some EEE link partners may result in
* the link dropping.
*/
- if (dev->ksz87xx_eee_link_erratum)
+ if (dev->info->ksz87xx_eee_link_erratum)
ret = ksz8_ind_write8(dev, TABLE_EEE, REG_IND_EEE_GLOB2_HI, 0);
return ret;
@@ -1402,7 +1308,7 @@ static int ksz8_setup(struct dsa_switch *ds)
int i, ret = 0;
dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table),
- dev->num_vlans, GFP_KERNEL);
+ dev->info->num_vlans, GFP_KERNEL);
if (!dev->vlan_cache)
return -ENOMEM;
@@ -1446,7 +1352,7 @@ static int ksz8_setup(struct dsa_switch *ds)
(BROADCAST_STORM_VALUE *
BROADCAST_STORM_PROT_RATE) / 100);
- for (i = 0; i < (dev->num_vlans / 4); i++)
+ for (i = 0; i < (dev->info->num_vlans / 4); i++)
ksz8_r_vlan_entries(dev, i);
/* Setup STP address for STP operation. */
@@ -1454,7 +1360,7 @@ static int ksz8_setup(struct dsa_switch *ds)
ether_addr_copy(alu.mac, eth_stp_addr);
alu.is_static = true;
alu.is_override = true;
- alu.port_forward = dev->host_mask;
+ alu.port_forward = dev->info->cpu_ports;
ksz8_w_sta_mac_table(dev, 0, &alu);
@@ -1470,15 +1376,7 @@ static void ksz8_get_caps(struct dsa_switch *ds, int port,
{
struct ksz_device *dev = ds->priv;
- if (port == dev->cpu_port) {
- __set_bit(PHY_INTERFACE_MODE_RMII,
- config->supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_MII,
- config->supported_interfaces);
- } else {
- __set_bit(PHY_INTERFACE_MODE_INTERNAL,
- config->supported_interfaces);
- }
+ ksz_phylink_get_caps(ds, port, config);
config->mac_capabilities = MAC_10 | MAC_100;
@@ -1504,7 +1402,7 @@ static const struct dsa_switch_ops ksz8_switch_ops = {
.phylink_get_caps = ksz8_get_caps,
.phylink_mac_link_down = ksz_mac_link_down,
.port_enable = ksz_enable_port,
- .get_strings = ksz8_get_strings,
+ .get_strings = ksz_get_strings,
.get_ethtool_stats = ksz_get_ethtool_stats,
.get_sset_count = ksz_sset_count,
.port_bridge_join = ksz_port_bridge_join,
@@ -1571,140 +1469,26 @@ static int ksz8_switch_detect(struct ksz_device *dev)
return 0;
}
-struct ksz_chip_data {
- u16 chip_id;
- const char *dev_name;
- int num_vlans;
- int num_alus;
- int num_statics;
- int cpu_ports;
- int port_cnt;
- bool ksz87xx_eee_link_erratum;
-};
-
-static const struct ksz_chip_data ksz8_switch_chips[] = {
- {
- .chip_id = 0x8795,
- .dev_name = "KSZ8795",
- .num_vlans = 4096,
- .num_alus = 0,
- .num_statics = 8,
- .cpu_ports = 0x10, /* can be configured as cpu port */
- .port_cnt = 5, /* total cpu and user ports */
- .ksz87xx_eee_link_erratum = true,
- },
- {
- /*
- * WARNING
- * =======
- * KSZ8794 is similar to KSZ8795, except the port map
- * contains a gap between external and CPU ports, the
- * port map is NOT continuous. The per-port register
- * map is shifted accordingly too, i.e. registers at
- * offset 0x40 are NOT used on KSZ8794 and they ARE
- * used on KSZ8795 for external port 3.
- * external cpu
- * KSZ8794 0,1,2 4
- * KSZ8795 0,1,2,3 4
- * KSZ8765 0,1,2,3 4
- */
- .chip_id = 0x8794,
- .dev_name = "KSZ8794",
- .num_vlans = 4096,
- .num_alus = 0,
- .num_statics = 8,
- .cpu_ports = 0x10, /* can be configured as cpu port */
- .port_cnt = 4, /* total cpu and user ports */
- .ksz87xx_eee_link_erratum = true,
- },
- {
- .chip_id = 0x8765,
- .dev_name = "KSZ8765",
- .num_vlans = 4096,
- .num_alus = 0,
- .num_statics = 8,
- .cpu_ports = 0x10, /* can be configured as cpu port */
- .port_cnt = 5, /* total cpu and user ports */
- .ksz87xx_eee_link_erratum = true,
- },
- {
- .chip_id = 0x8830,
- .dev_name = "KSZ8863/KSZ8873",
- .num_vlans = 16,
- .num_alus = 0,
- .num_statics = 8,
- .cpu_ports = 0x4, /* can be configured as cpu port */
- .port_cnt = 3,
- },
-};
-
static int ksz8_switch_init(struct ksz_device *dev)
{
struct ksz8 *ksz8 = dev->priv;
- int i;
dev->ds->ops = &ksz8_switch_ops;
- for (i = 0; i < ARRAY_SIZE(ksz8_switch_chips); i++) {
- const struct ksz_chip_data *chip = &ksz8_switch_chips[i];
-
- if (dev->chip_id == chip->chip_id) {
- dev->name = chip->dev_name;
- dev->num_vlans = chip->num_vlans;
- dev->num_alus = chip->num_alus;
- dev->num_statics = chip->num_statics;
- dev->port_cnt = fls(chip->cpu_ports);
- dev->cpu_port = fls(chip->cpu_ports) - 1;
- dev->phy_port_cnt = dev->port_cnt - 1;
- dev->cpu_ports = chip->cpu_ports;
- dev->host_mask = chip->cpu_ports;
- dev->port_mask = (BIT(dev->phy_port_cnt) - 1) |
- chip->cpu_ports;
- dev->ksz87xx_eee_link_erratum =
- chip->ksz87xx_eee_link_erratum;
- break;
- }
- }
-
- /* no switch found */
- if (!dev->cpu_ports)
- return -ENODEV;
+ dev->cpu_port = fls(dev->info->cpu_ports) - 1;
+ dev->phy_port_cnt = dev->info->port_cnt - 1;
+ dev->port_mask = (BIT(dev->phy_port_cnt) - 1) | dev->info->cpu_ports;
if (ksz_is_ksz88x3(dev)) {
ksz8->regs = ksz8863_regs;
ksz8->masks = ksz8863_masks;
ksz8->shifts = ksz8863_shifts;
- dev->mib_cnt = ARRAY_SIZE(ksz88xx_mib_names);
- dev->mib_names = ksz88xx_mib_names;
} else {
ksz8->regs = ksz8795_regs;
ksz8->masks = ksz8795_masks;
ksz8->shifts = ksz8795_shifts;
- dev->mib_cnt = ARRAY_SIZE(ksz87xx_mib_names);
- dev->mib_names = ksz87xx_mib_names;
- }
-
- dev->reg_mib_cnt = MIB_COUNTER_NUM;
-
- dev->ports = devm_kzalloc(dev->dev,
- dev->port_cnt * sizeof(struct ksz_port),
- GFP_KERNEL);
- if (!dev->ports)
- return -ENOMEM;
- for (i = 0; i < dev->port_cnt; i++) {
- mutex_init(&dev->ports[i].mib.cnt_mutex);
- dev->ports[i].mib.counters =
- devm_kzalloc(dev->dev,
- sizeof(u64) *
- (dev->mib_cnt + 1),
- GFP_KERNEL);
- if (!dev->ports[i].mib.counters)
- return -ENOMEM;
}
- /* set the real number of ports */
- dev->ds->num_ports = dev->port_cnt;
-
/* We rely on software untagging on the CPU port, so that we
* can support both tagged and untagged VLANs
*/
diff --git a/drivers/net/dsa/microchip/ksz8795_spi.c b/drivers/net/dsa/microchip/ksz8795_spi.c
index 5f8d94aee774..961a74c359a8 100644
--- a/drivers/net/dsa/microchip/ksz8795_spi.c
+++ b/drivers/net/dsa/microchip/ksz8795_spi.c
@@ -34,6 +34,7 @@ KSZ_REGMAP_TABLE(ksz8863, 16, KSZ8863_SPI_ADDR_SHIFT,
static int ksz8795_spi_probe(struct spi_device *spi)
{
const struct regmap_config *regmap_config;
+ const struct ksz_chip_data *chip;
struct device *ddev = &spi->dev;
struct regmap_config rc;
struct ksz_device *dev;
@@ -50,10 +51,15 @@ static int ksz8795_spi_probe(struct spi_device *spi)
if (!dev)
return -ENOMEM;
- regmap_config = device_get_match_data(ddev);
- if (!regmap_config)
+ chip = device_get_match_data(ddev);
+ if (!chip)
return -EINVAL;
+ if (chip->chip_id == KSZ8830_CHIP_ID)
+ regmap_config = ksz8863_regmap_config;
+ else
+ regmap_config = ksz8795_regmap_config;
+
for (i = 0; i < ARRAY_SIZE(ksz8795_regmap_config); i++) {
rc = regmap_config[i];
rc.lock_arg = &dev->regmap_mutex;
@@ -113,11 +119,26 @@ static void ksz8795_spi_shutdown(struct spi_device *spi)
}
static const struct of_device_id ksz8795_dt_ids[] = {
- { .compatible = "microchip,ksz8765", .data = &ksz8795_regmap_config },
- { .compatible = "microchip,ksz8794", .data = &ksz8795_regmap_config },
- { .compatible = "microchip,ksz8795", .data = &ksz8795_regmap_config },
- { .compatible = "microchip,ksz8863", .data = &ksz8863_regmap_config },
- { .compatible = "microchip,ksz8873", .data = &ksz8863_regmap_config },
+ {
+ .compatible = "microchip,ksz8765",
+ .data = &ksz_switch_chips[KSZ8765]
+ },
+ {
+ .compatible = "microchip,ksz8794",
+ .data = &ksz_switch_chips[KSZ8794]
+ },
+ {
+ .compatible = "microchip,ksz8795",
+ .data = &ksz_switch_chips[KSZ8795]
+ },
+ {
+ .compatible = "microchip,ksz8863",
+ .data = &ksz_switch_chips[KSZ8830]
+ },
+ {
+ .compatible = "microchip,ksz8873",
+ .data = &ksz_switch_chips[KSZ8830]
+ },
{},
};
MODULE_DEVICE_TABLE(of, ksz8795_dt_ids);
diff --git a/drivers/net/dsa/microchip/ksz8863_smi.c b/drivers/net/dsa/microchip/ksz8863_smi.c
index 5883fa7edda2..b6f99e641dca 100644
--- a/drivers/net/dsa/microchip/ksz8863_smi.c
+++ b/drivers/net/dsa/microchip/ksz8863_smi.c
@@ -206,8 +206,14 @@ static void ksz8863_smi_shutdown(struct mdio_device *mdiodev)
}
static const struct of_device_id ksz8863_dt_ids[] = {
- { .compatible = "microchip,ksz8863" },
- { .compatible = "microchip,ksz8873" },
+ {
+ .compatible = "microchip,ksz8863",
+ .data = &ksz_switch_chips[KSZ8830]
+ },
+ {
+ .compatible = "microchip,ksz8873",
+ .data = &ksz_switch_chips[KSZ8830]
+ },
{ },
};
MODULE_DEVICE_TABLE(of, ksz8863_dt_ids);
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 61dd0fa97748..ab40b700cf1a 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -23,48 +23,6 @@
#define NEW_XMII BIT(1)
#define IS_9893 BIT(2)
-static const struct {
- int index;
- char string[ETH_GSTRING_LEN];
-} ksz9477_mib_names[TOTAL_SWITCH_COUNTER_NUM] = {
- { 0x00, "rx_hi" },
- { 0x01, "rx_undersize" },
- { 0x02, "rx_fragments" },
- { 0x03, "rx_oversize" },
- { 0x04, "rx_jabbers" },
- { 0x05, "rx_symbol_err" },
- { 0x06, "rx_crc_err" },
- { 0x07, "rx_align_err" },
- { 0x08, "rx_mac_ctrl" },
- { 0x09, "rx_pause" },
- { 0x0A, "rx_bcast" },
- { 0x0B, "rx_mcast" },
- { 0x0C, "rx_ucast" },
- { 0x0D, "rx_64_or_less" },
- { 0x0E, "rx_65_127" },
- { 0x0F, "rx_128_255" },
- { 0x10, "rx_256_511" },
- { 0x11, "rx_512_1023" },
- { 0x12, "rx_1024_1522" },
- { 0x13, "rx_1523_2000" },
- { 0x14, "rx_2001" },
- { 0x15, "tx_hi" },
- { 0x16, "tx_late_col" },
- { 0x17, "tx_pause" },
- { 0x18, "tx_bcast" },
- { 0x19, "tx_mcast" },
- { 0x1A, "tx_ucast" },
- { 0x1B, "tx_deferred" },
- { 0x1C, "tx_total_col" },
- { 0x1D, "tx_exc_col" },
- { 0x1E, "tx_single_col" },
- { 0x1F, "tx_mult_col" },
- { 0x80, "rx_total" },
- { 0x81, "tx_total" },
- { 0x82, "rx_discards" },
- { 0x83, "tx_discards" },
-};
-
static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
{
regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0);
@@ -100,7 +58,7 @@ static int ksz9477_change_mtu(struct dsa_switch *ds, int port, int mtu)
/* Cache the per-port MTU setting */
dev->ports[port].max_frame = frame_size;
- for (i = 0; i < dev->port_cnt; i++)
+ for (i = 0; i < dev->info->port_cnt; i++)
max_frame = max(max_frame, dev->ports[i].max_frame);
return regmap_update_bits(dev->regmap[1], REG_SW_MTU__2,
@@ -287,7 +245,7 @@ static void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr,
static void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
u64 *dropped, u64 *cnt)
{
- addr = ksz9477_mib_names[addr].index;
+ addr = dev->info->mib_names[addr].index;
ksz9477_r_mib_cnt(dev, port, addr, cnt);
}
@@ -316,9 +274,6 @@ static void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
ksz_write8(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FLUSH);
ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, 0);
mutex_unlock(&mib->cnt_mutex);
-
- mib->cnt_ptr = 0;
- memset(mib->counters, 0, dev->mib_cnt * sizeof(u64));
}
static enum dsa_tag_protocol ksz9477_get_tag_protocol(struct dsa_switch *ds,
@@ -400,20 +355,6 @@ static int ksz9477_phy_write16(struct dsa_switch *ds, int addr, int reg,
return 0;
}
-static void ksz9477_get_strings(struct dsa_switch *ds, int port,
- u32 stringset, uint8_t *buf)
-{
- int i;
-
- if (stringset != ETH_SS_STATS)
- return;
-
- for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) {
- memcpy(buf + i * ETH_GSTRING_LEN, ksz9477_mib_names[i].string,
- ETH_GSTRING_LEN);
- }
-}
-
static void ksz9477_cfg_port_member(struct ksz_device *dev, int port,
u8 member)
{
@@ -434,7 +375,7 @@ static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
SW_FLUSH_OPTION_M << SW_FLUSH_OPTION_S,
SW_FLUSH_OPTION_DYN_MAC << SW_FLUSH_OPTION_S);
- if (port < dev->port_cnt) {
+ if (port < dev->info->port_cnt) {
/* flush individual port */
ksz_pread8(dev, port, P_STP_CTRL, &data);
if (!(data & PORT_LEARN_DISABLE))
@@ -756,7 +697,7 @@ static int ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
mutex_lock(&dev->alu_mutex);
- for (index = 0; index < dev->num_statics; index++) {
+ for (index = 0; index < dev->info->num_statics; index++) {
/* find empty slot first */
data = (index << ALU_STAT_INDEX_S) |
ALU_STAT_READ | ALU_STAT_START;
@@ -787,7 +728,7 @@ static int ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
}
/* no available entry */
- if (index == dev->num_statics) {
+ if (index == dev->info->num_statics) {
err = -ENOSPC;
goto exit;
}
@@ -832,7 +773,7 @@ static int ksz9477_port_mdb_del(struct dsa_switch *ds, int port,
mutex_lock(&dev->alu_mutex);
- for (index = 0; index < dev->num_statics; index++) {
+ for (index = 0; index < dev->info->num_statics; index++) {
/* find empty slot first */
data = (index << ALU_STAT_INDEX_S) |
ALU_STAT_READ | ALU_STAT_START;
@@ -861,7 +802,7 @@ static int ksz9477_port_mdb_del(struct dsa_switch *ds, int port,
}
/* no available entry */
- if (index == dev->num_statics)
+ if (index == dev->info->num_statics)
goto exit;
/* clear port */
@@ -903,7 +844,7 @@ static int ksz9477_port_mirror_add(struct dsa_switch *ds, int port,
* Check if any of the port is already set for sniffing
* If yes, instruct the user to remove the previous entry & exit
*/
- for (p = 0; p < dev->port_cnt; p++) {
+ for (p = 0; p < dev->info->port_cnt; p++) {
/* Skip the current sniffing port */
if (p == mirror->to_local_port)
continue;
@@ -946,7 +887,7 @@ static void ksz9477_port_mirror_del(struct dsa_switch *ds, int port,
/* Check if any of the port is still referring to sniffer port */
- for (p = 0; p < dev->port_cnt; p++) {
+ for (p = 0; p < dev->info->port_cnt; p++) {
ksz_pread8(dev, p, P_MIRROR_CTRL, &data);
if ((data & (PORT_MIRROR_RX | PORT_MIRROR_TX))) {
@@ -1156,6 +1097,15 @@ static void ksz9477_phy_errata_setup(struct ksz_device *dev, int port)
ksz9477_port_mmd_write(dev, port, 0x1c, 0x20, 0xeeee);
}
+static void ksz9477_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ ksz_phylink_get_caps(ds, port, config);
+
+ config->mac_capabilities = MAC_10 | MAC_100 | MAC_1000FD |
+ MAC_ASYM_PAUSE | MAC_SYM_PAUSE;
+}
+
static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
{
struct ksz_port *p = &dev->ports[port];
@@ -1194,7 +1144,7 @@ static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
false);
- if (dev->phy_errata_9477)
+ if (dev->info->phy_errata_9477)
ksz9477_phy_errata_setup(dev, port);
} else {
/* force flow control */
@@ -1259,8 +1209,9 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds)
struct ksz_port *p;
int i;
- for (i = 0; i < dev->port_cnt; i++) {
- if (dsa_is_cpu_port(ds, i) && (dev->cpu_ports & (1 << i))) {
+ for (i = 0; i < dev->info->port_cnt; i++) {
+ if (dsa_is_cpu_port(ds, i) &&
+ (dev->info->cpu_ports & (1 << i))) {
phy_interface_t interface;
const char *prev_msg;
const char *prev_mode;
@@ -1304,7 +1255,7 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds)
}
}
- for (i = 0; i < dev->port_cnt; i++) {
+ for (i = 0; i < dev->info->port_cnt; i++) {
if (i == dev->cpu_port)
continue;
p = &dev->ports[i];
@@ -1328,7 +1279,7 @@ static int ksz9477_setup(struct dsa_switch *ds)
int ret = 0;
dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table),
- dev->num_vlans, GFP_KERNEL);
+ dev->info->num_vlans, GFP_KERNEL);
if (!dev->vlan_cache)
return -ENOMEM;
@@ -1380,8 +1331,9 @@ static const struct dsa_switch_ops ksz9477_switch_ops = {
.phy_read = ksz9477_phy_read16,
.phy_write = ksz9477_phy_write16,
.phylink_mac_link_down = ksz_mac_link_down,
+ .phylink_get_caps = ksz9477_get_caps,
.port_enable = ksz_enable_port,
- .get_strings = ksz9477_get_strings,
+ .get_strings = ksz_get_strings,
.get_ethtool_stats = ksz_get_ethtool_stats,
.get_sset_count = ksz_sset_count,
.port_bridge_join = ksz_port_bridge_join,
@@ -1470,109 +1422,11 @@ static int ksz9477_switch_detect(struct ksz_device *dev)
return 0;
}
-struct ksz_chip_data {
- u32 chip_id;
- const char *dev_name;
- int num_vlans;
- int num_alus;
- int num_statics;
- int cpu_ports;
- int port_cnt;
- bool phy_errata_9477;
-};
-
-static const struct ksz_chip_data ksz9477_switch_chips[] = {
- {
- .chip_id = 0x00947700,
- .dev_name = "KSZ9477",
- .num_vlans = 4096,
- .num_alus = 4096,
- .num_statics = 16,
- .cpu_ports = 0x7F, /* can be configured as cpu port */
- .port_cnt = 7, /* total physical port count */
- .phy_errata_9477 = true,
- },
- {
- .chip_id = 0x00989700,
- .dev_name = "KSZ9897",
- .num_vlans = 4096,
- .num_alus = 4096,
- .num_statics = 16,
- .cpu_ports = 0x7F, /* can be configured as cpu port */
- .port_cnt = 7, /* total physical port count */
- .phy_errata_9477 = true,
- },
- {
- .chip_id = 0x00989300,
- .dev_name = "KSZ9893",
- .num_vlans = 4096,
- .num_alus = 4096,
- .num_statics = 16,
- .cpu_ports = 0x07, /* can be configured as cpu port */
- .port_cnt = 3, /* total port count */
- },
- {
- .chip_id = 0x00956700,
- .dev_name = "KSZ9567",
- .num_vlans = 4096,
- .num_alus = 4096,
- .num_statics = 16,
- .cpu_ports = 0x7F, /* can be configured as cpu port */
- .port_cnt = 7, /* total physical port count */
- .phy_errata_9477 = true,
- },
-};
-
static int ksz9477_switch_init(struct ksz_device *dev)
{
- int i;
-
dev->ds->ops = &ksz9477_switch_ops;
- for (i = 0; i < ARRAY_SIZE(ksz9477_switch_chips); i++) {
- const struct ksz_chip_data *chip = &ksz9477_switch_chips[i];
-
- if (dev->chip_id == chip->chip_id) {
- dev->name = chip->dev_name;
- dev->num_vlans = chip->num_vlans;
- dev->num_alus = chip->num_alus;
- dev->num_statics = chip->num_statics;
- dev->port_cnt = chip->port_cnt;
- dev->cpu_ports = chip->cpu_ports;
- dev->phy_errata_9477 = chip->phy_errata_9477;
-
- break;
- }
- }
-
- /* no switch found */
- if (!dev->port_cnt)
- return -ENODEV;
-
- dev->port_mask = (1 << dev->port_cnt) - 1;
-
- dev->reg_mib_cnt = SWITCH_COUNTER_NUM;
- dev->mib_cnt = TOTAL_SWITCH_COUNTER_NUM;
-
- dev->ports = devm_kzalloc(dev->dev,
- dev->port_cnt * sizeof(struct ksz_port),
- GFP_KERNEL);
- if (!dev->ports)
- return -ENOMEM;
- for (i = 0; i < dev->port_cnt; i++) {
- spin_lock_init(&dev->ports[i].mib.stats64_lock);
- mutex_init(&dev->ports[i].mib.cnt_mutex);
- dev->ports[i].mib.counters =
- devm_kzalloc(dev->dev,
- sizeof(u64) *
- (TOTAL_SWITCH_COUNTER_NUM + 1),
- GFP_KERNEL);
- if (!dev->ports[i].mib.counters)
- return -ENOMEM;
- }
-
- /* set the real number of ports */
- dev->ds->num_ports = dev->port_cnt;
+ dev->port_mask = (1 << dev->info->port_cnt) - 1;
return 0;
}
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
index cbc0b20e7e1b..faa3163c86b0 100644
--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -87,12 +87,30 @@ static const struct i2c_device_id ksz9477_i2c_id[] = {
MODULE_DEVICE_TABLE(i2c, ksz9477_i2c_id);
static const struct of_device_id ksz9477_dt_ids[] = {
- { .compatible = "microchip,ksz9477" },
- { .compatible = "microchip,ksz9897" },
- { .compatible = "microchip,ksz9893" },
- { .compatible = "microchip,ksz9563" },
- { .compatible = "microchip,ksz9567" },
- { .compatible = "microchip,ksz8563" },
+ {
+ .compatible = "microchip,ksz9477",
+ .data = &ksz_switch_chips[KSZ9477]
+ },
+ {
+ .compatible = "microchip,ksz9897",
+ .data = &ksz_switch_chips[KSZ9897]
+ },
+ {
+ .compatible = "microchip,ksz9893",
+ .data = &ksz_switch_chips[KSZ9893]
+ },
+ {
+ .compatible = "microchip,ksz9563",
+ .data = &ksz_switch_chips[KSZ9893]
+ },
+ {
+ .compatible = "microchip,ksz8563",
+ .data = &ksz_switch_chips[KSZ9893]
+ },
+ {
+ .compatible = "microchip,ksz9567",
+ .data = &ksz_switch_chips[KSZ9567]
+ },
{},
};
MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c
index 87ca464dad32..1bc8b0cbe458 100644
--- a/drivers/net/dsa/microchip/ksz9477_spi.c
+++ b/drivers/net/dsa/microchip/ksz9477_spi.c
@@ -86,12 +86,30 @@ static void ksz9477_spi_shutdown(struct spi_device *spi)
}
static const struct of_device_id ksz9477_dt_ids[] = {
- { .compatible = "microchip,ksz9477" },
- { .compatible = "microchip,ksz9897" },
- { .compatible = "microchip,ksz9893" },
- { .compatible = "microchip,ksz9563" },
- { .compatible = "microchip,ksz8563" },
- { .compatible = "microchip,ksz9567" },
+ {
+ .compatible = "microchip,ksz9477",
+ .data = &ksz_switch_chips[KSZ9477]
+ },
+ {
+ .compatible = "microchip,ksz9897",
+ .data = &ksz_switch_chips[KSZ9897]
+ },
+ {
+ .compatible = "microchip,ksz9893",
+ .data = &ksz_switch_chips[KSZ9893]
+ },
+ {
+ .compatible = "microchip,ksz9563",
+ .data = &ksz_switch_chips[KSZ9893]
+ },
+ {
+ .compatible = "microchip,ksz8563",
+ .data = &ksz_switch_chips[KSZ9893]
+ },
+ {
+ .compatible = "microchip,ksz9567",
+ .data = &ksz_switch_chips[KSZ9567]
+ },
{},
};
MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 10f127b09e58..9ca8c8d7740f 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -14,12 +14,15 @@
#include <linux/phy.h>
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
+#include <linux/of_device.h>
#include <linux/of_net.h>
#include <net/dsa.h>
#include <net/switchdev.h>
#include "ksz_common.h"
+#define MIB_COUNTER_NUM 0x20
+
struct ksz_stats_raw {
u64 rx_hi;
u64 rx_undersize;
@@ -59,6 +62,403 @@ struct ksz_stats_raw {
u64 tx_discards;
};
+static const struct ksz_mib_names ksz88xx_mib_names[] = {
+ { 0x00, "rx" },
+ { 0x01, "rx_hi" },
+ { 0x02, "rx_undersize" },
+ { 0x03, "rx_fragments" },
+ { 0x04, "rx_oversize" },
+ { 0x05, "rx_jabbers" },
+ { 0x06, "rx_symbol_err" },
+ { 0x07, "rx_crc_err" },
+ { 0x08, "rx_align_err" },
+ { 0x09, "rx_mac_ctrl" },
+ { 0x0a, "rx_pause" },
+ { 0x0b, "rx_bcast" },
+ { 0x0c, "rx_mcast" },
+ { 0x0d, "rx_ucast" },
+ { 0x0e, "rx_64_or_less" },
+ { 0x0f, "rx_65_127" },
+ { 0x10, "rx_128_255" },
+ { 0x11, "rx_256_511" },
+ { 0x12, "rx_512_1023" },
+ { 0x13, "rx_1024_1522" },
+ { 0x14, "tx" },
+ { 0x15, "tx_hi" },
+ { 0x16, "tx_late_col" },
+ { 0x17, "tx_pause" },
+ { 0x18, "tx_bcast" },
+ { 0x19, "tx_mcast" },
+ { 0x1a, "tx_ucast" },
+ { 0x1b, "tx_deferred" },
+ { 0x1c, "tx_total_col" },
+ { 0x1d, "tx_exc_col" },
+ { 0x1e, "tx_single_col" },
+ { 0x1f, "tx_mult_col" },
+ { 0x100, "rx_discards" },
+ { 0x101, "tx_discards" },
+};
+
+static const struct ksz_mib_names ksz9477_mib_names[] = {
+ { 0x00, "rx_hi" },
+ { 0x01, "rx_undersize" },
+ { 0x02, "rx_fragments" },
+ { 0x03, "rx_oversize" },
+ { 0x04, "rx_jabbers" },
+ { 0x05, "rx_symbol_err" },
+ { 0x06, "rx_crc_err" },
+ { 0x07, "rx_align_err" },
+ { 0x08, "rx_mac_ctrl" },
+ { 0x09, "rx_pause" },
+ { 0x0A, "rx_bcast" },
+ { 0x0B, "rx_mcast" },
+ { 0x0C, "rx_ucast" },
+ { 0x0D, "rx_64_or_less" },
+ { 0x0E, "rx_65_127" },
+ { 0x0F, "rx_128_255" },
+ { 0x10, "rx_256_511" },
+ { 0x11, "rx_512_1023" },
+ { 0x12, "rx_1024_1522" },
+ { 0x13, "rx_1523_2000" },
+ { 0x14, "rx_2001" },
+ { 0x15, "tx_hi" },
+ { 0x16, "tx_late_col" },
+ { 0x17, "tx_pause" },
+ { 0x18, "tx_bcast" },
+ { 0x19, "tx_mcast" },
+ { 0x1A, "tx_ucast" },
+ { 0x1B, "tx_deferred" },
+ { 0x1C, "tx_total_col" },
+ { 0x1D, "tx_exc_col" },
+ { 0x1E, "tx_single_col" },
+ { 0x1F, "tx_mult_col" },
+ { 0x80, "rx_total" },
+ { 0x81, "tx_total" },
+ { 0x82, "rx_discards" },
+ { 0x83, "tx_discards" },
+};
+
+const struct ksz_chip_data ksz_switch_chips[] = {
+ [KSZ8795] = {
+ .chip_id = KSZ8795_CHIP_ID,
+ .dev_name = "KSZ8795",
+ .num_vlans = 4096,
+ .num_alus = 0,
+ .num_statics = 8,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 5, /* total cpu and user ports */
+ .ksz87xx_eee_link_erratum = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .supports_mii = {false, false, false, false, true},
+ .supports_rmii = {false, false, false, false, true},
+ .supports_rgmii = {false, false, false, false, true},
+ .internal_phy = {true, true, true, true, false},
+ },
+
+ [KSZ8794] = {
+ /* WARNING
+ * =======
+ * KSZ8794 is similar to KSZ8795, except the port map
+ * contains a gap between external and CPU ports, the
+ * port map is NOT continuous. The per-port register
+ * map is shifted accordingly too, i.e. registers at
+ * offset 0x40 are NOT used on KSZ8794 and they ARE
+ * used on KSZ8795 for external port 3.
+ * external cpu
+ * KSZ8794 0,1,2 4
+ * KSZ8795 0,1,2,3 4
+ * KSZ8765 0,1,2,3 4
+ * port_cnt is configured as 5, even though it is 4
+ */
+ .chip_id = KSZ8794_CHIP_ID,
+ .dev_name = "KSZ8794",
+ .num_vlans = 4096,
+ .num_alus = 0,
+ .num_statics = 8,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 5, /* total cpu and user ports */
+ .ksz87xx_eee_link_erratum = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .supports_mii = {false, false, false, false, true},
+ .supports_rmii = {false, false, false, false, true},
+ .supports_rgmii = {false, false, false, false, true},
+ .internal_phy = {true, true, true, false, false},
+ },
+
+ [KSZ8765] = {
+ .chip_id = KSZ8765_CHIP_ID,
+ .dev_name = "KSZ8765",
+ .num_vlans = 4096,
+ .num_alus = 0,
+ .num_statics = 8,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 5, /* total cpu and user ports */
+ .ksz87xx_eee_link_erratum = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .supports_mii = {false, false, false, false, true},
+ .supports_rmii = {false, false, false, false, true},
+ .supports_rgmii = {false, false, false, false, true},
+ .internal_phy = {true, true, true, true, false},
+ },
+
+ [KSZ8830] = {
+ .chip_id = KSZ8830_CHIP_ID,
+ .dev_name = "KSZ8863/KSZ8873",
+ .num_vlans = 16,
+ .num_alus = 0,
+ .num_statics = 8,
+ .cpu_ports = 0x4, /* can be configured as cpu port */
+ .port_cnt = 3,
+ .mib_names = ksz88xx_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz88xx_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .supports_mii = {false, false, true},
+ .supports_rmii = {false, false, true},
+ .internal_phy = {true, true, false},
+ },
+
+ [KSZ9477] = {
+ .chip_id = KSZ9477_CHIP_ID,
+ .dev_name = "KSZ9477",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x7F, /* can be configured as cpu port */
+ .port_cnt = 7, /* total physical port count */
+ .phy_errata_9477 = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .supports_mii = {false, false, false, false,
+ false, true, false},
+ .supports_rmii = {false, false, false, false,
+ false, true, false},
+ .supports_rgmii = {false, false, false, false,
+ false, true, false},
+ .internal_phy = {true, true, true, true,
+ true, false, false},
+ },
+
+ [KSZ9897] = {
+ .chip_id = KSZ9897_CHIP_ID,
+ .dev_name = "KSZ9897",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x7F, /* can be configured as cpu port */
+ .port_cnt = 7, /* total physical port count */
+ .phy_errata_9477 = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .supports_mii = {false, false, false, false,
+ false, true, true},
+ .supports_rmii = {false, false, false, false,
+ false, true, true},
+ .supports_rgmii = {false, false, false, false,
+ false, true, true},
+ .internal_phy = {true, true, true, true,
+ true, false, false},
+ },
+
+ [KSZ9893] = {
+ .chip_id = KSZ9893_CHIP_ID,
+ .dev_name = "KSZ9893",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x07, /* can be configured as cpu port */
+ .port_cnt = 3, /* total port count */
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .supports_mii = {false, false, true},
+ .supports_rmii = {false, false, true},
+ .supports_rgmii = {false, false, true},
+ .internal_phy = {true, true, false},
+ },
+
+ [KSZ9567] = {
+ .chip_id = KSZ9567_CHIP_ID,
+ .dev_name = "KSZ9567",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x7F, /* can be configured as cpu port */
+ .port_cnt = 7, /* total physical port count */
+ .phy_errata_9477 = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .supports_mii = {false, false, false, false,
+ false, true, true},
+ .supports_rmii = {false, false, false, false,
+ false, true, true},
+ .supports_rgmii = {false, false, false, false,
+ false, true, true},
+ .internal_phy = {true, true, true, true,
+ true, false, false},
+ },
+
+ [LAN9370] = {
+ .chip_id = LAN9370_CHIP_ID,
+ .dev_name = "LAN9370",
+ .num_vlans = 4096,
+ .num_alus = 1024,
+ .num_statics = 256,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 5, /* total physical port count */
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .supports_mii = {false, false, false, false, true},
+ .supports_rmii = {false, false, false, false, true},
+ .supports_rgmii = {false, false, false, false, true},
+ .internal_phy = {true, true, true, true, false},
+ },
+
+ [LAN9371] = {
+ .chip_id = LAN9371_CHIP_ID,
+ .dev_name = "LAN9371",
+ .num_vlans = 4096,
+ .num_alus = 1024,
+ .num_statics = 256,
+ .cpu_ports = 0x30, /* can be configured as cpu port */
+ .port_cnt = 6, /* total physical port count */
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .supports_mii = {false, false, false, false, true, true},
+ .supports_rmii = {false, false, false, false, true, true},
+ .supports_rgmii = {false, false, false, false, true, true},
+ .internal_phy = {true, true, true, true, false, false},
+ },
+
+ [LAN9372] = {
+ .chip_id = LAN9372_CHIP_ID,
+ .dev_name = "LAN9372",
+ .num_vlans = 4096,
+ .num_alus = 1024,
+ .num_statics = 256,
+ .cpu_ports = 0x30, /* can be configured as cpu port */
+ .port_cnt = 8, /* total physical port count */
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .supports_mii = {false, false, false, false,
+ true, true, false, false},
+ .supports_rmii = {false, false, false, false,
+ true, true, false, false},
+ .supports_rgmii = {false, false, false, false,
+ true, true, false, false},
+ .internal_phy = {true, true, true, true,
+ false, false, true, true},
+ },
+
+ [LAN9373] = {
+ .chip_id = LAN9373_CHIP_ID,
+ .dev_name = "LAN9373",
+ .num_vlans = 4096,
+ .num_alus = 1024,
+ .num_statics = 256,
+ .cpu_ports = 0x38, /* can be configured as cpu port */
+ .port_cnt = 5, /* total physical port count */
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .supports_mii = {false, false, false, false,
+ true, true, false, false},
+ .supports_rmii = {false, false, false, false,
+ true, true, false, false},
+ .supports_rgmii = {false, false, false, false,
+ true, true, false, false},
+ .internal_phy = {true, true, true, false,
+ false, false, true, true},
+ },
+
+ [LAN9374] = {
+ .chip_id = LAN9374_CHIP_ID,
+ .dev_name = "LAN9374",
+ .num_vlans = 4096,
+ .num_alus = 1024,
+ .num_statics = 256,
+ .cpu_ports = 0x30, /* can be configured as cpu port */
+ .port_cnt = 8, /* total physical port count */
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .supports_mii = {false, false, false, false,
+ true, true, false, false},
+ .supports_rmii = {false, false, false, false,
+ true, true, false, false},
+ .supports_rgmii = {false, false, false, false,
+ true, true, false, false},
+ .internal_phy = {true, true, true, true,
+ false, false, true, true},
+ },
+};
+EXPORT_SYMBOL_GPL(ksz_switch_chips);
+
+static const struct ksz_chip_data *ksz_lookup_info(unsigned int prod_num)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ksz_switch_chips); i++) {
+ const struct ksz_chip_data *chip = &ksz_switch_chips[i];
+
+ if (chip->chip_id == prod_num)
+ return chip;
+ }
+
+ return NULL;
+}
+
+static int ksz_check_device_id(struct ksz_device *dev)
+{
+ const struct ksz_chip_data *dt_chip_data;
+
+ dt_chip_data = of_device_get_match_data(dev->dev);
+
+ /* Check for Device Tree and Chip ID */
+ if (dt_chip_data->chip_id != dev->chip_id) {
+ dev_err(dev->dev,
+ "Device tree specifies chip %s but found %s, please fix it!\n",
+ dt_chip_data->dev_name, dev->info->dev_name);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+void ksz_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ struct ksz_device *dev = ds->priv;
+
+ config->legacy_pre_march2020 = false;
+
+ if (dev->info->supports_mii[port])
+ __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
+
+ if (dev->info->supports_rmii[port])
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
+
+ if (dev->info->supports_rgmii[port])
+ phy_interface_set_rgmii(config->supported_interfaces);
+
+ if (dev->info->internal_phy[port])
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+}
+EXPORT_SYMBOL_GPL(ksz_phylink_get_caps);
+
void ksz_r_mib_stats64(struct ksz_device *dev, int port)
{
struct rtnl_link_stats64 *stats;
@@ -116,6 +516,22 @@ void ksz_get_stats64(struct dsa_switch *ds, int port,
}
EXPORT_SYMBOL_GPL(ksz_get_stats64);
+void ksz_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, uint8_t *buf)
+{
+ struct ksz_device *dev = ds->priv;
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < dev->info->mib_cnt; i++) {
+ memcpy(buf + i * ETH_GSTRING_LEN,
+ dev->info->mib_names[i].string, ETH_GSTRING_LEN);
+ }
+}
+EXPORT_SYMBOL_GPL(ksz_get_strings);
+
void ksz_update_port_member(struct ksz_device *dev, int port)
{
struct ksz_port *p = &dev->ports[port];
@@ -181,17 +597,17 @@ static void port_r_cnt(struct ksz_device *dev, int port)
u64 *dropped;
/* Some ports may not have MIB counters before SWITCH_COUNTER_NUM. */
- while (mib->cnt_ptr < dev->reg_mib_cnt) {
+ while (mib->cnt_ptr < dev->info->reg_mib_cnt) {
dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr,
&mib->counters[mib->cnt_ptr]);
++mib->cnt_ptr;
}
/* last one in storage */
- dropped = &mib->counters[dev->mib_cnt];
+ dropped = &mib->counters[dev->info->mib_cnt];
/* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */
- while (mib->cnt_ptr < dev->mib_cnt) {
+ while (mib->cnt_ptr < dev->info->mib_cnt) {
dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr,
dropped, &mib->counters[mib->cnt_ptr]);
++mib->cnt_ptr;
@@ -207,7 +623,7 @@ static void ksz_mib_read_work(struct work_struct *work)
struct ksz_port *p;
int i;
- for (i = 0; i < dev->port_cnt; i++) {
+ for (i = 0; i < dev->info->port_cnt; i++) {
if (dsa_is_unused_port(dev->ds, i))
continue;
@@ -222,7 +638,7 @@ static void ksz_mib_read_work(struct work_struct *work)
const struct dsa_port *dp = dsa_to_port(dev->ds, i);
if (!netif_carrier_ok(dp->slave))
- mib->cnt_ptr = dev->reg_mib_cnt;
+ mib->cnt_ptr = dev->info->reg_mib_cnt;
}
port_r_cnt(dev, i);
p->read = false;
@@ -242,8 +658,14 @@ void ksz_init_mib_timer(struct ksz_device *dev)
INIT_DELAYED_WORK(&dev->mib_read, ksz_mib_read_work);
- for (i = 0; i < dev->port_cnt; i++)
+ for (i = 0; i < dev->info->port_cnt; i++) {
+ struct ksz_port_mib *mib = &dev->ports[i].mib;
+
dev->dev_ops->port_init_cnt(dev, i);
+
+ mib->cnt_ptr = 0;
+ memset(mib->counters, 0, dev->info->mib_cnt * sizeof(u64));
+ }
}
EXPORT_SYMBOL_GPL(ksz_init_mib_timer);
@@ -289,7 +711,7 @@ int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
if (sset != ETH_SS_STATS)
return 0;
- return dev->mib_cnt;
+ return dev->info->mib_cnt;
}
EXPORT_SYMBOL_GPL(ksz_sset_count);
@@ -304,9 +726,9 @@ void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf)
/* Only read dropped counters if no link. */
if (!netif_carrier_ok(dp->slave))
- mib->cnt_ptr = dev->reg_mib_cnt;
+ mib->cnt_ptr = dev->info->reg_mib_cnt;
port_r_cnt(dev, port);
- memcpy(buf, mib->counters, dev->mib_cnt * sizeof(u64));
+ memcpy(buf, mib->counters, dev->info->mib_cnt * sizeof(u64));
mutex_unlock(&mib->cnt_mutex);
}
EXPORT_SYMBOL_GPL(ksz_get_ethtool_stats);
@@ -382,7 +804,7 @@ int ksz_port_mdb_add(struct dsa_switch *ds, int port,
int empty = 0;
alu.port_forward = 0;
- for (index = 0; index < dev->num_statics; index++) {
+ for (index = 0; index < dev->info->num_statics; index++) {
if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
/* Found one already in static MAC table. */
if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
@@ -395,11 +817,11 @@ int ksz_port_mdb_add(struct dsa_switch *ds, int port,
}
/* no available entry */
- if (index == dev->num_statics && !empty)
+ if (index == dev->info->num_statics && !empty)
return -ENOSPC;
/* add entry */
- if (index == dev->num_statics) {
+ if (index == dev->info->num_statics) {
index = empty - 1;
memset(&alu, 0, sizeof(alu));
memcpy(alu.mac, mdb->addr, ETH_ALEN);
@@ -426,7 +848,7 @@ int ksz_port_mdb_del(struct dsa_switch *ds, int port,
struct alu_struct alu;
int index;
- for (index = 0; index < dev->num_statics; index++) {
+ for (index = 0; index < dev->info->num_statics; index++) {
if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
/* Found one already in static MAC table. */
if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
@@ -436,7 +858,7 @@ int ksz_port_mdb_del(struct dsa_switch *ds, int port,
}
/* no available entry */
- if (index == dev->num_statics)
+ if (index == dev->info->num_statics)
goto exit;
/* clear port */
@@ -537,10 +959,12 @@ EXPORT_SYMBOL(ksz_switch_alloc);
int ksz_switch_register(struct ksz_device *dev,
const struct ksz_dev_ops *ops)
{
+ const struct ksz_chip_data *info;
struct device_node *port, *ports;
phy_interface_t interface;
unsigned int port_num;
int ret;
+ int i;
if (dev->pdata)
dev->chip_id = dev->pdata->chip_id;
@@ -567,14 +991,45 @@ int ksz_switch_register(struct ksz_device *dev,
if (dev->dev_ops->detect(dev))
return -EINVAL;
+ info = ksz_lookup_info(dev->chip_id);
+ if (!info)
+ return -ENODEV;
+
+ /* Update the compatible info with the probed one */
+ dev->info = info;
+
+ ret = ksz_check_device_id(dev);
+ if (ret)
+ return ret;
+
ret = dev->dev_ops->init(dev);
if (ret)
return ret;
+ dev->ports = devm_kzalloc(dev->dev,
+ dev->info->port_cnt * sizeof(struct ksz_port),
+ GFP_KERNEL);
+ if (!dev->ports)
+ return -ENOMEM;
+
+ for (i = 0; i < dev->info->port_cnt; i++) {
+ spin_lock_init(&dev->ports[i].mib.stats64_lock);
+ mutex_init(&dev->ports[i].mib.cnt_mutex);
+ dev->ports[i].mib.counters =
+ devm_kzalloc(dev->dev,
+ sizeof(u64) * (dev->info->mib_cnt + 1),
+ GFP_KERNEL);
+ if (!dev->ports[i].mib.counters)
+ return -ENOMEM;
+ }
+
+ /* set the real number of ports */
+ dev->ds->num_ports = dev->info->port_cnt;
+
/* Host port interface will be self detected, or specifically set in
* device tree.
*/
- for (port_num = 0; port_num < dev->port_cnt; ++port_num)
+ for (port_num = 0; port_num < dev->info->port_cnt; ++port_num)
dev->ports[port_num].interface = PHY_INTERFACE_MODE_NA;
if (dev->dev->of_node) {
ret = of_get_phy_mode(dev->dev->of_node, &interface);
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 28cda79b090f..8500eaedad67 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -14,6 +14,8 @@
#include <linux/regmap.h>
#include <net/dsa.h>
+#define KSZ_MAX_NUM_PORTS 8
+
struct vlan_table {
u32 table[3];
};
@@ -26,6 +28,30 @@ struct ksz_port_mib {
struct spinlock stats64_lock;
};
+struct ksz_mib_names {
+ int index;
+ char string[ETH_GSTRING_LEN];
+};
+
+struct ksz_chip_data {
+ u32 chip_id;
+ const char *dev_name;
+ int num_vlans;
+ int num_alus;
+ int num_statics;
+ int cpu_ports;
+ int port_cnt;
+ bool phy_errata_9477;
+ bool ksz87xx_eee_link_erratum;
+ const struct ksz_mib_names *mib_names;
+ int mib_cnt;
+ u8 reg_mib_cnt;
+ bool supports_mii[KSZ_MAX_NUM_PORTS];
+ bool supports_rmii[KSZ_MAX_NUM_PORTS];
+ bool supports_rgmii[KSZ_MAX_NUM_PORTS];
+ bool internal_phy[KSZ_MAX_NUM_PORTS];
+};
+
struct ksz_port {
bool remove_tag; /* Remove Tag flag set, for ksz8795 only */
int stp_state;
@@ -47,7 +73,7 @@ struct ksz_port {
struct ksz_device {
struct dsa_switch *ds;
struct ksz_platform_data *pdata;
- const char *name;
+ const struct ksz_chip_data *info;
struct mutex dev_mutex; /* device access */
struct mutex regmap_mutex; /* regmap access */
@@ -64,20 +90,9 @@ struct ksz_device {
/* chip specific data */
u32 chip_id;
- int num_vlans;
- int num_alus;
- int num_statics;
int cpu_port; /* port connected to CPU */
- int cpu_ports; /* port bitmap can be cpu port */
int phy_port_cnt;
- int port_cnt;
- u8 reg_mib_cnt;
- int mib_cnt;
- const struct mib_names *mib_names;
phy_interface_t compat_interface;
- u32 regs_size;
- bool phy_errata_9477;
- bool ksz87xx_eee_link_erratum;
bool synclko_125;
bool synclko_disable;
@@ -89,11 +104,42 @@ struct ksz_device {
u16 mirror_rx;
u16 mirror_tx;
u32 features; /* chip specific features */
- u32 overrides; /* chip functions set by user */
- u16 host_mask;
u16 port_mask;
};
+/* List of supported models */
+enum ksz_model {
+ KSZ8795,
+ KSZ8794,
+ KSZ8765,
+ KSZ8830,
+ KSZ9477,
+ KSZ9897,
+ KSZ9893,
+ KSZ9567,
+ LAN9370,
+ LAN9371,
+ LAN9372,
+ LAN9373,
+ LAN9374,
+};
+
+enum ksz_chip_id {
+ KSZ8795_CHIP_ID = 0x8795,
+ KSZ8794_CHIP_ID = 0x8794,
+ KSZ8765_CHIP_ID = 0x8765,
+ KSZ8830_CHIP_ID = 0x8830,
+ KSZ9477_CHIP_ID = 0x00947700,
+ KSZ9897_CHIP_ID = 0x00989700,
+ KSZ9893_CHIP_ID = 0x00989300,
+ KSZ9567_CHIP_ID = 0x00956700,
+ LAN9370_CHIP_ID = 0x00937000,
+ LAN9371_CHIP_ID = 0x00937100,
+ LAN9372_CHIP_ID = 0x00937200,
+ LAN9373_CHIP_ID = 0x00937300,
+ LAN9374_CHIP_ID = 0x00937400,
+};
+
struct alu_struct {
/* entry 1 */
u8 is_static:1;
@@ -154,6 +200,9 @@ void ksz_init_mib_timer(struct ksz_device *dev);
void ksz_r_mib_stats64(struct ksz_device *dev, int port);
void ksz_get_stats64(struct dsa_switch *ds, int port,
struct rtnl_link_stats64 *s);
+void ksz_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config);
+extern const struct ksz_chip_data ksz_switch_chips[];
/* Common DSA access functions */
@@ -180,6 +229,8 @@ int ksz_port_mdb_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb,
struct dsa_db db);
int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
+void ksz_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, uint8_t *buf);
/* Common register access functions */
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index d38258a39d07..3e07dc39007a 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -45,24 +45,26 @@ static struct net_device *felix_classify_db(struct dsa_db db)
/* Set up VCAP ES0 rules for pushing a tag_8021q VLAN towards the CPU such that
* the tagger can perform RX source port identification.
*/
-static int felix_tag_8021q_vlan_add_rx(struct felix *felix, int port, u16 vid)
+static int felix_tag_8021q_vlan_add_rx(struct dsa_switch *ds, int port,
+ int upstream, u16 vid)
{
struct ocelot_vcap_filter *outer_tagging_rule;
- struct ocelot *ocelot = &felix->ocelot;
- struct dsa_switch *ds = felix->ds;
- int key_length, upstream, err;
+ struct ocelot *ocelot = ds->priv;
+ unsigned long cookie;
+ int key_length, err;
key_length = ocelot->vcap[VCAP_ES0].keys[VCAP_ES0_IGR_PORT].length;
- upstream = dsa_upstream_port(ds, port);
outer_tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter),
GFP_KERNEL);
if (!outer_tagging_rule)
return -ENOMEM;
+ cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream);
+
outer_tagging_rule->key_type = OCELOT_VCAP_KEY_ANY;
outer_tagging_rule->prio = 1;
- outer_tagging_rule->id.cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port);
+ outer_tagging_rule->id.cookie = cookie;
outer_tagging_rule->id.tc_offload = false;
outer_tagging_rule->block_id = VCAP_ES0;
outer_tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
@@ -83,16 +85,19 @@ static int felix_tag_8021q_vlan_add_rx(struct felix *felix, int port, u16 vid)
return err;
}
-static int felix_tag_8021q_vlan_del_rx(struct felix *felix, int port, u16 vid)
+static int felix_tag_8021q_vlan_del_rx(struct dsa_switch *ds, int port,
+ int upstream, u16 vid)
{
struct ocelot_vcap_filter *outer_tagging_rule;
struct ocelot_vcap_block *block_vcap_es0;
- struct ocelot *ocelot = &felix->ocelot;
+ struct ocelot *ocelot = ds->priv;
+ unsigned long cookie;
block_vcap_es0 = &ocelot->block[VCAP_ES0];
+ cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream);
outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0,
- port, false);
+ cookie, false);
if (!outer_tagging_rule)
return -ENOENT;
@@ -102,12 +107,14 @@ static int felix_tag_8021q_vlan_del_rx(struct felix *felix, int port, u16 vid)
/* Set up VCAP IS1 rules for stripping the tag_8021q VLAN on TX and VCAP IS2
* rules for steering those tagged packets towards the correct destination port
*/
-static int felix_tag_8021q_vlan_add_tx(struct felix *felix, int port, u16 vid)
+static int felix_tag_8021q_vlan_add_tx(struct dsa_switch *ds, int port,
+ u16 vid)
{
struct ocelot_vcap_filter *untagging_rule, *redirect_rule;
- struct ocelot *ocelot = &felix->ocelot;
- struct dsa_switch *ds = felix->ds;
- int upstream, err;
+ unsigned long cpu_ports = dsa_cpu_ports(ds);
+ struct ocelot *ocelot = ds->priv;
+ unsigned long cookie;
+ int err;
untagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
if (!untagging_rule)
@@ -119,14 +126,14 @@ static int felix_tag_8021q_vlan_add_tx(struct felix *felix, int port, u16 vid)
return -ENOMEM;
}
- upstream = dsa_upstream_port(ds, port);
+ cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port);
untagging_rule->key_type = OCELOT_VCAP_KEY_ANY;
- untagging_rule->ingress_port_mask = BIT(upstream);
+ untagging_rule->ingress_port_mask = cpu_ports;
untagging_rule->vlan.vid.value = vid;
untagging_rule->vlan.vid.mask = VLAN_VID_MASK;
untagging_rule->prio = 1;
- untagging_rule->id.cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port);
+ untagging_rule->id.cookie = cookie;
untagging_rule->id.tc_offload = false;
untagging_rule->block_id = VCAP_IS1;
untagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
@@ -143,11 +150,13 @@ static int felix_tag_8021q_vlan_add_tx(struct felix *felix, int port, u16 vid)
return err;
}
+ cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port);
+
redirect_rule->key_type = OCELOT_VCAP_KEY_ANY;
- redirect_rule->ingress_port_mask = BIT(upstream);
+ redirect_rule->ingress_port_mask = cpu_ports;
redirect_rule->pag = port;
redirect_rule->prio = 1;
- redirect_rule->id.cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port);
+ redirect_rule->id.cookie = cookie;
redirect_rule->id.tc_offload = false;
redirect_rule->block_id = VCAP_IS2;
redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
@@ -165,19 +174,21 @@ static int felix_tag_8021q_vlan_add_tx(struct felix *felix, int port, u16 vid)
return 0;
}
-static int felix_tag_8021q_vlan_del_tx(struct felix *felix, int port, u16 vid)
+static int felix_tag_8021q_vlan_del_tx(struct dsa_switch *ds, int port, u16 vid)
{
struct ocelot_vcap_filter *untagging_rule, *redirect_rule;
struct ocelot_vcap_block *block_vcap_is1;
struct ocelot_vcap_block *block_vcap_is2;
- struct ocelot *ocelot = &felix->ocelot;
+ struct ocelot *ocelot = ds->priv;
+ unsigned long cookie;
int err;
block_vcap_is1 = &ocelot->block[VCAP_IS1];
block_vcap_is2 = &ocelot->block[VCAP_IS2];
+ cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port);
untagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1,
- port, false);
+ cookie, false);
if (!untagging_rule)
return -ENOENT;
@@ -185,8 +196,9 @@ static int felix_tag_8021q_vlan_del_tx(struct felix *felix, int port, u16 vid)
if (err)
return err;
+ cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port);
redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2,
- port, false);
+ cookie, false);
if (!redirect_rule)
return -ENOENT;
@@ -196,7 +208,7 @@ static int felix_tag_8021q_vlan_del_tx(struct felix *felix, int port, u16 vid)
static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
u16 flags)
{
- struct ocelot *ocelot = ds->priv;
+ struct dsa_port *cpu_dp;
int err;
/* tag_8021q.c assumes we are implementing this via port VLAN
@@ -206,74 +218,50 @@ static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
if (!dsa_is_user_port(ds, port))
return 0;
- err = felix_tag_8021q_vlan_add_rx(ocelot_to_felix(ocelot), port, vid);
- if (err)
- return err;
-
- err = felix_tag_8021q_vlan_add_tx(ocelot_to_felix(ocelot), port, vid);
- if (err) {
- felix_tag_8021q_vlan_del_rx(ocelot_to_felix(ocelot), port, vid);
- return err;
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ err = felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid);
+ if (err)
+ return err;
}
+ err = felix_tag_8021q_vlan_add_tx(ds, port, vid);
+ if (err)
+ goto add_tx_failed;
+
return 0;
+
+add_tx_failed:
+ dsa_switch_for_each_cpu_port(cpu_dp, ds)
+ felix_tag_8021q_vlan_del_rx(ds, port, cpu_dp->index, vid);
+
+ return err;
}
static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
{
- struct ocelot *ocelot = ds->priv;
+ struct dsa_port *cpu_dp;
int err;
if (!dsa_is_user_port(ds, port))
return 0;
- err = felix_tag_8021q_vlan_del_rx(ocelot_to_felix(ocelot), port, vid);
- if (err)
- return err;
-
- err = felix_tag_8021q_vlan_del_tx(ocelot_to_felix(ocelot), port, vid);
- if (err) {
- felix_tag_8021q_vlan_add_rx(ocelot_to_felix(ocelot), port, vid);
- return err;
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ err = felix_tag_8021q_vlan_del_rx(ds, port, cpu_dp->index, vid);
+ if (err)
+ return err;
}
- return 0;
-}
-
-/* Alternatively to using the NPI functionality, that same hardware MAC
- * connected internally to the enetc or fman DSA master can be configured to
- * use the software-defined tag_8021q frame format. As far as the hardware is
- * concerned, it thinks it is a "dumb switch" - the queues of the CPU port
- * module are now disconnected from it, but can still be accessed through
- * register-based MMIO.
- */
-static void felix_8021q_cpu_port_init(struct ocelot *ocelot, int port)
-{
- mutex_lock(&ocelot->fwd_domain_lock);
-
- ocelot_port_set_dsa_8021q_cpu(ocelot, port);
-
- /* Overwrite PGID_CPU with the non-tagging port */
- ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, PGID_CPU);
-
- ocelot_apply_bridge_fwd_mask(ocelot, true);
-
- mutex_unlock(&ocelot->fwd_domain_lock);
-}
-
-static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port)
-{
- mutex_lock(&ocelot->fwd_domain_lock);
-
- ocelot_port_unset_dsa_8021q_cpu(ocelot, port);
+ err = felix_tag_8021q_vlan_del_tx(ds, port, vid);
+ if (err)
+ goto del_tx_failed;
- /* Restore PGID_CPU */
- ocelot_write_rix(ocelot, BIT(ocelot->num_phys_ports), ANA_PGID_PGID,
- PGID_CPU);
+ return 0;
- ocelot_apply_bridge_fwd_mask(ocelot, true);
+del_tx_failed:
+ dsa_switch_for_each_cpu_port(cpu_dp, ds)
+ felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid);
- mutex_unlock(&ocelot->fwd_domain_lock);
+ return err;
}
static int felix_trap_get_cpu_port(struct dsa_switch *ds,
@@ -434,6 +422,13 @@ static unsigned long felix_tag_npi_get_host_fwd_mask(struct dsa_switch *ds)
return BIT(ocelot->num_phys_ports);
}
+/* Alternatively to using the NPI functionality, that same hardware MAC
+ * connected internally to the enetc or fman DSA master can be configured to
+ * use the software-defined tag_8021q frame format. As far as the hardware is
+ * concerned, it thinks it is a "dumb switch" - the queues of the CPU port
+ * module are now disconnected from it, but can still be accessed through
+ * register-based MMIO.
+ */
static const struct felix_tag_proto_ops felix_tag_npi_proto_ops = {
.setup = felix_tag_npi_setup,
.teardown = felix_tag_npi_teardown,
@@ -443,21 +438,18 @@ static const struct felix_tag_proto_ops felix_tag_npi_proto_ops = {
static int felix_tag_8021q_setup(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
- struct dsa_port *dp, *cpu_dp;
+ struct dsa_port *dp;
int err;
err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD));
if (err)
return err;
- dsa_switch_for_each_cpu_port(cpu_dp, ds) {
- felix_8021q_cpu_port_init(ocelot, cpu_dp->index);
-
- /* TODO we could support multiple CPU ports in tag_8021q mode */
- break;
- }
+ dsa_switch_for_each_user_port(dp, ds)
+ ocelot_port_assign_dsa_8021q_cpu(ocelot, dp->index,
+ dp->cpu_dp->index);
- dsa_switch_for_each_available_port(dp, ds) {
+ dsa_switch_for_each_available_port(dp, ds)
/* This overwrites ocelot_init():
* Do not forward BPDU frames to the CPU port module,
* for 2 reasons:
@@ -471,7 +463,6 @@ static int felix_tag_8021q_setup(struct dsa_switch *ds)
ocelot_write_gix(ocelot,
ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0),
ANA_PORT_CPU_FWD_BPDU_CFG, dp->index);
- }
/* The ownership of the CPU port module's queues might have just been
* transferred to the tag_8021q tagger from the NPI-based tagger.
@@ -488,9 +479,9 @@ static int felix_tag_8021q_setup(struct dsa_switch *ds)
static void felix_tag_8021q_teardown(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
- struct dsa_port *dp, *cpu_dp;
+ struct dsa_port *dp;
- dsa_switch_for_each_available_port(dp, ds) {
+ dsa_switch_for_each_available_port(dp, ds)
/* Restore the logic from ocelot_init:
* do not forward BPDU frames to the front ports.
*/
@@ -498,14 +489,9 @@ static void felix_tag_8021q_teardown(struct dsa_switch *ds)
ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff),
ANA_PORT_CPU_FWD_BPDU_CFG,
dp->index);
- }
- dsa_switch_for_each_cpu_port(cpu_dp, ds) {
- felix_8021q_cpu_port_deinit(ocelot, cpu_dp->index);
-
- /* TODO we could support multiple CPU ports in tag_8021q mode */
- break;
- }
+ dsa_switch_for_each_user_port(dp, ds)
+ ocelot_port_unassign_dsa_8021q_cpu(ocelot, dp->index);
dsa_tag_8021q_unregister(ds);
}
@@ -534,6 +520,9 @@ static void felix_set_host_flood(struct dsa_switch *ds, unsigned long mask,
ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MC);
ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV4);
ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV6);
+
+ val = bc ? mask : 0;
+ ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_BC);
}
static void
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 98caca4317d7..570d0204b7be 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -2162,7 +2162,8 @@ static void vsc9959_cut_through_fwd(struct ocelot *ocelot)
if (ocelot->npi >= 0)
mask |= BIT(ocelot->npi);
else
- mask |= ocelot_get_dsa_8021q_cpu_mask(ocelot);
+ mask |= ocelot_port_assigned_dsa_8021q_cpu_mask(ocelot,
+ port);
}
/* Calculate the minimum link speed, among the ports that are
diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c
index 1a3406b9e64c..25f88022b9e4 100644
--- a/drivers/net/dsa/realtek/rtl8366rb.c
+++ b/drivers/net/dsa/realtek/rtl8366rb.c
@@ -1653,29 +1653,37 @@ static int rtl8366rb_phy_read(struct realtek_priv *priv, int phy, int regnum)
if (phy > RTL8366RB_PHY_NO_MAX)
return -EINVAL;
- ret = regmap_write(priv->map, RTL8366RB_PHY_ACCESS_CTRL_REG,
+ mutex_lock(&priv->map_lock);
+
+ ret = regmap_write(priv->map_nolock, RTL8366RB_PHY_ACCESS_CTRL_REG,
RTL8366RB_PHY_CTRL_READ);
if (ret)
- return ret;
+ goto out;
reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum;
- ret = regmap_write(priv->map, reg, 0);
+ ret = regmap_write(priv->map_nolock, reg, 0);
if (ret) {
dev_err(priv->dev,
"failed to write PHY%d reg %04x @ %04x, ret %d\n",
phy, regnum, reg, ret);
- return ret;
+ goto out;
}
- ret = regmap_read(priv->map, RTL8366RB_PHY_ACCESS_DATA_REG, &val);
+ ret = regmap_read(priv->map_nolock, RTL8366RB_PHY_ACCESS_DATA_REG,
+ &val);
if (ret)
- return ret;
+ goto out;
+
+ ret = val;
dev_dbg(priv->dev, "read PHY%d register 0x%04x @ %08x, val <- %04x\n",
phy, regnum, reg, val);
- return val;
+out:
+ mutex_unlock(&priv->map_lock);
+
+ return ret;
}
static int rtl8366rb_phy_write(struct realtek_priv *priv, int phy, int regnum,
@@ -1687,21 +1695,26 @@ static int rtl8366rb_phy_write(struct realtek_priv *priv, int phy, int regnum,
if (phy > RTL8366RB_PHY_NO_MAX)
return -EINVAL;
- ret = regmap_write(priv->map, RTL8366RB_PHY_ACCESS_CTRL_REG,
+ mutex_lock(&priv->map_lock);
+
+ ret = regmap_write(priv->map_nolock, RTL8366RB_PHY_ACCESS_CTRL_REG,
RTL8366RB_PHY_CTRL_WRITE);
if (ret)
- return ret;
+ goto out;
reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum;
dev_dbg(priv->dev, "write PHY%d register 0x%04x @ %04x, val -> %04x\n",
phy, regnum, reg, val);
- ret = regmap_write(priv->map, reg, val);
+ ret = regmap_write(priv->map_nolock, reg, val);
if (ret)
- return ret;
+ goto out;
- return 0;
+out:
+ mutex_unlock(&priv->map_lock);
+
+ return ret;
}
static int rtl8366rb_dsa_phy_read(struct dsa_switch *ds, int phy, int regnum)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 607a2c90513b..d9547552ceef 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -151,7 +151,8 @@
#define XGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
/* Descriptors required for maximum contiguous TSO/GSO packet */
-#define XGBE_TX_MAX_SPLIT ((GSO_MAX_SIZE / XGBE_TX_MAX_BUF_SIZE) + 1)
+#define XGBE_TX_MAX_SPLIT \
+ ((GSO_LEGACY_MAX_SIZE / XGBE_TX_MAX_BUF_SIZE) + 1)
/* Maximum possible descriptors needed for an SKB:
* - Maximum number of SKB frags
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index 0ddfb5b5d53c..2e6c5f258a1f 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -17,3 +17,8 @@ obj-$(CONFIG_BGMAC_BCMA) += bgmac-bcma.o bgmac-bcma-mdio.o
obj-$(CONFIG_BGMAC_PLATFORM) += bgmac-platform.o
obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o
obj-$(CONFIG_BNXT) += bnxt/
+
+# FIXME: temporarily silence -Warray-bounds on non W=1+ builds
+ifndef KBUILD_EXTRA_WARN
+CFLAGS_tg3.o += -Wno-array-bounds
+endif
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index a4ce96bb3903..3272aca496dc 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2585,8 +2585,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
device_set_wakeup_capable(&pdev->dev, 1);
priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol");
- if (IS_ERR(priv->wol_clk))
- return PTR_ERR(priv->wol_clk);
+ if (IS_ERR(priv->wol_clk)) {
+ ret = PTR_ERR(priv->wol_clk);
+ goto err_deregister_fixed_link;
+ }
/* Set the needed headroom once and for all */
BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 082518e68579..56b46b8206a7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2040,7 +2040,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
- RX_CMP_FLAGS_ITYPE_PTP_W_TS)) {
+ RX_CMP_FLAGS_ITYPE_PTP_W_TS) || bp->ptp_all_rx_tstamp) {
if (bp->flags & BNXT_FLAG_CHIP_P5) {
u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
u64 ns, ts;
@@ -7659,7 +7659,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
struct hwrm_func_qcaps_output *resp;
struct hwrm_func_qcaps_input *req;
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- u32 flags, flags_ext;
+ u32 flags, flags_ext, flags_ext2;
int rc;
rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
@@ -7704,6 +7704,10 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
+ flags_ext2 = le32_to_cpu(resp->flags_ext2);
+ if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
+
bp->tx_push_thresh = 0;
if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
BNXT_FW_MAJ(bp) > 217)
@@ -10508,6 +10512,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
if (BNXT_PF(bp))
bnxt_vf_reps_open(bp);
bnxt_ptp_init_rtc(bp, true);
+ bnxt_ptp_cfg_tstamp_filters(bp);
return 0;
open_err_irq:
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index a498ee297946..a1dca8c58f54 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1968,6 +1968,7 @@ struct bnxt {
#define BNXT_FW_CAP_ERR_RECOVER_RELOAD 0x00100000
#define BNXT_FW_CAP_HOT_RESET 0x00200000
#define BNXT_FW_CAP_PTP_RTC 0x00400000
+ #define BNXT_FW_CAP_RX_ALL_PKT_TS 0x00800000
#define BNXT_FW_CAP_VLAN_RX_STRIP 0x01000000
#define BNXT_FW_CAP_VLAN_TX_INSERT 0x02000000
#define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED 0x04000000
@@ -2131,6 +2132,7 @@ struct bnxt {
struct bpf_prog *xdp_prog;
struct bnxt_ptp_cfg *ptp_cfg;
+ u8 ptp_all_rx_tstamp;
/* devlink interface and vf-rep structs */
struct devlink *dl;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 0c17f90d44a2..3528ce9849e6 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -45,7 +45,7 @@ bnxt_dl_flash_update(struct devlink *dl,
}
devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0);
- rc = bnxt_flash_package_from_fw_obj(bp->dev, params->fw, 0);
+ rc = bnxt_flash_package_from_fw_obj(bp->dev, params->fw, 0, extack);
if (!rc)
devlink_flash_update_status_notify(dl, "Flashing done", NULL, 0, 0);
else
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index b3a48d6675fe..7191e5d74208 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -23,6 +23,7 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
#include <linux/timecounter.h>
+#include <net/netlink.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
@@ -34,6 +35,13 @@
#include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
#include "bnxt_coredump.h"
+#define BNXT_NVM_ERR_MSG(dev, extack, msg) \
+ do { \
+ if (extack) \
+ NL_SET_ERR_MSG_MOD(extack, msg); \
+ netdev_err(dev, "%s\n", msg); \
+ } while (0)
+
static u32 bnxt_get_msglevel(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
@@ -2499,12 +2507,65 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
return rc;
}
+#define MSG_INTEGRITY_ERR "PKG install error : Data integrity on NVM"
+#define MSG_INVALID_PKG "PKG install error : Invalid package"
+#define MSG_AUTHENTICATION_ERR "PKG install error : Authentication error"
+#define MSG_INVALID_DEV "PKG install error : Invalid device"
+#define MSG_INTERNAL_ERR "PKG install error : Internal error"
+#define MSG_NO_PKG_UPDATE_AREA_ERR "PKG update area not created in nvram"
+#define MSG_NO_SPACE_ERR "PKG insufficient update area in nvram"
+#define MSG_ANTI_ROLLBACK_ERR "HWRM_NVM_INSTALL_UPDATE failure due to Anti-rollback detected"
+#define MSG_GENERIC_FAILURE_ERR "HWRM_NVM_INSTALL_UPDATE failure"
+
+static int nvm_update_err_to_stderr(struct net_device *dev, u8 result,
+ struct netlink_ext_ack *extack)
+{
+ switch (result) {
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED:
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_INTEGRITY_ERR);
+ return -EINVAL;
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM:
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_PKG);
+ return -ENOPKG;
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR:
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_AUTHENTICATION_ERR);
+ return -EPERM;
+ case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM:
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_DEV);
+ return -EOPNOTSUPP;
+ default:
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_INTERNAL_ERR);
+ return -EIO;
+ }
+}
+
#define BNXT_PKG_DMA_SIZE 0x40000
#define BNXT_NVM_MORE_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE))
#define BNXT_NVM_LAST_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST))
int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw,
- u32 install_type)
+ u32 install_type, struct netlink_ext_ack *extack)
{
struct hwrm_nvm_install_update_input *install;
struct hwrm_nvm_install_update_output *resp;
@@ -2567,12 +2628,11 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
BNX_DIR_EXT_NONE,
&index, &item_len, NULL);
if (rc) {
- netdev_err(dev, "PKG update area not created in nvram\n");
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR);
break;
}
if (fw->size > item_len) {
- netdev_err(dev, "PKG insufficient update area in nvram: %lu\n",
- (unsigned long)fw->size);
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_SPACE_ERR);
rc = -EFBIG;
break;
}
@@ -2613,7 +2673,7 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
switch (cmd_err) {
case NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK:
- netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure Anti-rollback detected\n");
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_ANTI_ROLLBACK_ERR);
rc = -EALREADY;
break;
case NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR:
@@ -2641,8 +2701,7 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
}
fallthrough;
default:
- netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x cmd_err :%x\n",
- rc, cmd_err);
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_GENERIC_FAILURE_ERR);
}
} while (defrag_attempted && !rc);
@@ -2653,7 +2712,7 @@ pkg_abort:
if (resp->result) {
netdev_err(dev, "PKG install error = %d, problem_item = %d\n",
(s8)resp->result, (int)resp->problem_item);
- rc = -ENOPKG;
+ rc = nvm_update_err_to_stderr(dev, resp->result, extack);
}
if (rc == -EACCES)
bnxt_print_admin_err(bp);
@@ -2661,7 +2720,7 @@ pkg_abort:
}
static int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
- u32 install_type)
+ u32 install_type, struct netlink_ext_ack *extack)
{
const struct firmware *fw;
int rc;
@@ -2673,7 +2732,7 @@ static int bnxt_flash_package_from_file(struct net_device *dev, const char *file
return rc;
}
- rc = bnxt_flash_package_from_fw_obj(dev, fw, install_type);
+ rc = bnxt_flash_package_from_fw_obj(dev, fw, install_type, extack);
release_firmware(fw);
@@ -2691,7 +2750,7 @@ static int bnxt_flash_device(struct net_device *dev,
if (flash->region == ETHTOOL_FLASH_ALL_REGIONS ||
flash->region > 0xffff)
return bnxt_flash_package_from_file(dev, flash->data,
- flash->region);
+ flash->region, NULL);
return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
}
@@ -3759,6 +3818,9 @@ static int bnxt_get_ts_info(struct net_device *dev,
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
+
+ if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS)
+ info->rx_filters |= (1 << HWTSTAMP_FILTER_ALL);
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index 6aa44840f13a..a59284215e78 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -54,7 +54,7 @@ int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
u8 self_reset, u8 flags);
int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw,
- u32 install_type);
+ u32 install_type, struct netlink_ext_ack *extack);
int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size);
void bnxt_ethtool_init(struct bnxt *bp);
void bnxt_ethtool_free(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index b7100edbd6dd..b753032a1047 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -2,7 +2,7 @@
*
* Copyright (c) 2014-2016 Broadcom Corporation
* Copyright (c) 2014-2018 Broadcom Limited
- * Copyright (c) 2018-2021 Broadcom Inc.
+ * Copyright (c) 2018-2022 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -311,6 +311,8 @@ struct cmd_nums {
#define HWRM_CFA_TFLIB 0x125UL
#define HWRM_CFA_LAG_GROUP_MEMBER_RGTR 0x126UL
#define HWRM_CFA_LAG_GROUP_MEMBER_UNRGTR 0x127UL
+ #define HWRM_CFA_TLS_FILTER_ALLOC 0x128UL
+ #define HWRM_CFA_TLS_FILTER_FREE 0x129UL
#define HWRM_ENGINE_CKV_STATUS 0x12eUL
#define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
#define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL
@@ -375,6 +377,8 @@ struct cmd_nums {
#define HWRM_FUNC_DBR_PACING_QCFG 0x1a6UL
#define HWRM_FUNC_DBR_PACING_BROADCAST_EVENT 0x1a7UL
#define HWRM_FUNC_BACKING_STORE_QCAPS_V2 0x1a8UL
+ #define HWRM_FUNC_DBR_PACING_NQLIST_QUERY 0x1a9UL
+ #define HWRM_FUNC_DBR_RECOVERY_COMPLETED 0x1aaUL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -399,6 +403,7 @@ struct cmd_nums {
#define HWRM_MFG_PSOC_QSTATUS 0x215UL
#define HWRM_MFG_SELFTEST_QLIST 0x216UL
#define HWRM_MFG_SELFTEST_EXEC 0x217UL
+ #define HWRM_STAT_GENERIC_QSTATS 0x218UL
#define HWRM_TF 0x2bcUL
#define HWRM_TF_VERSION_GET 0x2bdUL
#define HWRM_TF_SESSION_OPEN 0x2c6UL
@@ -541,8 +546,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 2
-#define HWRM_VERSION_RSVD 73
-#define HWRM_VERSION_STR "1.10.2.73"
+#define HWRM_VERSION_RSVD 95
+#define HWRM_VERSION_STR "1.10.2.95"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -770,7 +775,9 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP 0x44UL
#define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT 0x45UL
#define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD 0x46UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x47UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_RSS_CHANGE 0x47UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE 0x48UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x49UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -1259,7 +1266,8 @@ struct hwrm_async_event_cmpl_error_report_base {
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD 0x5UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD
};
/* hwrm_async_event_cmpl_error_report_pause_storm (size:128b/16B) */
@@ -1365,6 +1373,8 @@ struct hwrm_async_event_cmpl_error_report_doorbell_drop_threshold {
#define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_SFT 0
#define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
#define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_MASK 0xffffff00UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_SFT 8
};
/* hwrm_func_reset_input (size:192b/24B) */
@@ -1600,36 +1610,38 @@ struct hwrm_func_qcaps_output {
__le16 max_sp_tx_rings;
__le16 max_msix_vfs;
__le32 flags_ext;
- #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT 0x8UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PROXY_MODE_SUPPORT 0x10UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_PROXY_SRC_INTF_OVERRIDE_SUPPORT 0x20UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_SCHQ_SUPPORTED 0x40UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED 0x80UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_EVB_MODE_CFG_NOT_SUPPORTED 0x100UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_SOC_SPD_SUPPORTED 0x200UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED 0x400UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_FAST_RESET_CAPABLE 0x800UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_METADATA_CFG_CAPABLE 0x1000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_NVM_OPTION_ACTION_SUPPORTED 0x2000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_BD_METADATA_SUPPORTED 0x4000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_ECHO_REQUEST_SUPPORTED 0x8000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED 0x10000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PTM_SUPPORTED 0x20000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED 0x40000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_VF_CFG_ASYNC_FOR_PF_SUPPORTED 0x80000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PARTITION_BW_SUPPORTED 0x100000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED 0x200000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_KTLS_SUPPORTED 0x400000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_EP_RATE_CONTROL 0x800000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_MIN_BW_SUPPORTED 0x1000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP 0x2000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED 0x4000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_REQUIRED 0x8000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED 0x10000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_DBR_PACING_SUPPORTED 0x20000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PROXY_MODE_SUPPORT 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_PROXY_SRC_INTF_OVERRIDE_SUPPORT 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_SCHQ_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EVB_MODE_CFG_NOT_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_SOC_SPD_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_FAST_RESET_CAPABLE 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_METADATA_CFG_CAPABLE 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_NVM_OPTION_ACTION_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BD_METADATA_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECHO_REQUEST_SUPPORTED 0x8000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED 0x10000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PTM_SUPPORTED 0x20000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED 0x40000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_VF_CFG_ASYNC_FOR_PF_SUPPORTED 0x80000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PARTITION_BW_SUPPORTED 0x100000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED 0x200000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_KTLS_SUPPORTED 0x400000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EP_RATE_CONTROL 0x800000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_MIN_BW_SUPPORTED 0x1000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP 0x2000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED 0x4000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_REQUIRED 0x8000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED 0x10000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_DBR_PACING_SUPPORTED 0x20000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_HW_DBR_DROP_RECOV_SUPPORTED 0x40000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_DISABLE_CQ_OVERFLOW_DETECTION_SUPPORTED 0x80000000UL
u8 max_schqs;
u8 mpc_chnls_cap;
#define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE 0x1UL
@@ -1638,7 +1650,23 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RE_CFA 0x8UL
#define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_PRIMATE 0x10UL
__le16 max_key_ctxs_alloc;
- u8 unused_1[7];
+ __le32 flags_ext2;
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_QUIC_SUPPORTED 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_KDNET_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_DBR_DROP_RECOVERY_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_GENERIC_STATS_SUPPORTED 0x20UL
+ __le16 tunnel_disable_flag;
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NVGRE 0x4UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_L2GRE 0x8UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_GRE 0x10UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_IPINIP 0x20UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_MPLS 0x40UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_PPPOE 0x80UL
+ u8 unused_1;
u8 valid;
};
@@ -1802,11 +1830,17 @@ struct hwrm_func_qcfg_output {
__le16 host_mtu;
__le16 alloc_tx_key_ctxs;
__le16 alloc_rx_key_ctxs;
- u8 unused_3[5];
+ u8 port_kdnet_mode;
+ #define FUNC_QCFG_RESP_PORT_KDNET_MODE_DISABLED 0x0UL
+ #define FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED 0x1UL
+ #define FUNC_QCFG_RESP_PORT_KDNET_MODE_LAST FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED
+ u8 kdnet_pcie_function;
+ __le16 port_kdnet_fid;
+ u8 unused_3;
u8 valid;
};
-/* hwrm_func_cfg_input (size:896b/112B) */
+/* hwrm_func_cfg_input (size:960b/120B) */
struct hwrm_func_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -1986,7 +2020,13 @@ struct hwrm_func_cfg_input {
__le16 host_mtu;
__le16 num_tx_key_ctxs;
__le16 num_rx_key_ctxs;
- u8 unused_0[4];
+ __le32 enables2;
+ #define FUNC_CFG_REQ_ENABLES2_KDNET 0x1UL
+ u8 port_kdnet_mode;
+ #define FUNC_CFG_REQ_PORT_KDNET_MODE_DISABLED 0x0UL
+ #define FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED 0x1UL
+ #define FUNC_CFG_REQ_PORT_KDNET_MODE_LAST FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED
+ u8 unused_0[7];
};
/* hwrm_func_cfg_output (size:128b/16B) */
@@ -3355,20 +3395,26 @@ struct hwrm_func_backing_store_cfg_v2_input {
__le16 target_id;
__le64 resp_addr;
__le16 type;
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TKC 0x13UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RKC 0x14UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TKC 0x13UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID
__le16 instance;
__le32 flags;
#define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_PREBOOT_MODE 0x1UL
@@ -3416,20 +3462,26 @@ struct hwrm_func_backing_store_qcfg_v2_input {
__le16 target_id;
__le64 resp_addr;
__le16 type;
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TKC 0x13UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RKC 0x14UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TKC 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID
__le16 instance;
u8 rsvd[4];
};
@@ -3453,6 +3505,8 @@ struct hwrm_func_backing_store_qcfg_v2_output {
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TKC 0x13UL
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RKC 0x14UL
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_TKC 0x1aUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_RKC 0x1bUL
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID
__le16 instance;
@@ -3528,20 +3582,26 @@ struct hwrm_func_backing_store_qcaps_v2_input {
__le16 target_id;
__le64 resp_addr;
__le16 type;
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TKC 0x13UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RKC 0x14UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TKC 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_TKC 0x1aUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_RKC 0x1bUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID
u8 rsvd[6];
};
@@ -3552,24 +3612,31 @@ struct hwrm_func_backing_store_qcaps_v2_output {
__le16 seq_id;
__le16 resp_len;
__le16 type;
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TKC 0x13UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RKC 0x14UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TKC 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QUIC_TKC 0x1aUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QUIC_RKC 0x1bUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID
__le16 entry_size;
__le32 flags;
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_DRIVER_MANAGED_MEMORY 0x4UL
__le32 instance_bit_map;
u8 ctx_init_value;
u8 ctx_init_offset;
@@ -4108,6 +4175,8 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL
#define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL
#define PORT_MAC_CFG_REQ_FLAGS_PTP_ONE_STEP_TX_TS 0x2000UL
+ #define PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE 0x4000UL
+ #define PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE 0x8000UL
__le32 enables;
#define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
#define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
@@ -6390,6 +6459,7 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID 0x40UL
#define VNIC_CFG_REQ_ENABLES_QUEUE_ID 0x80UL
#define VNIC_CFG_REQ_ENABLES_RX_CSUM_V2_MODE 0x100UL
+ #define VNIC_CFG_REQ_ENABLES_L2_CQE_MODE 0x200UL
__le16 vnic_id;
__le16 dflt_ring_grp;
__le16 rss_rule;
@@ -6404,7 +6474,12 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_RX_CSUM_V2_MODE_ALL_OK 0x1UL
#define VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX 0x2UL
#define VNIC_CFG_REQ_RX_CSUM_V2_MODE_LAST VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX
- u8 unused0[5];
+ u8 l2_cqe_mode;
+ #define VNIC_CFG_REQ_L2_CQE_MODE_DEFAULT 0x0UL
+ #define VNIC_CFG_REQ_L2_CQE_MODE_COMPRESSED 0x1UL
+ #define VNIC_CFG_REQ_L2_CQE_MODE_MIXED 0x2UL
+ #define VNIC_CFG_REQ_L2_CQE_MODE_LAST VNIC_CFG_REQ_L2_CQE_MODE_MIXED
+ u8 unused0[4];
};
/* hwrm_vnic_cfg_output (size:128b/16B) */
@@ -6437,25 +6512,31 @@ struct hwrm_vnic_qcaps_output {
__le16 mru;
u8 unused_0[2];
__le32 flags;
- #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL
- #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL
- #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL
- #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL
- #define VNIC_QCAPS_RESP_FLAGS_COS_ASSIGNMENT_CAP 0x100UL
- #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V2_CAP 0x200UL
- #define VNIC_QCAPS_RESP_FLAGS_VNIC_STATE_CAP 0x400UL
- #define VNIC_QCAPS_RESP_FLAGS_VIRTIO_NET_VNIC_ALLOC_CAP 0x800UL
- #define VNIC_QCAPS_RESP_FLAGS_METADATA_FORMAT_CAP 0x1000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_STRICT_HASH_TYPE_CAP 0x2000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP 0x4000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_FUNCTION_TOEPLITZ_CAP 0x8000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_FUNCTION_XOR_CAP 0x10000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_FUNCTION_CHKSM_CAP 0x20000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP 0x40000UL
+ #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL
+ #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL
+ #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL
+ #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL
+ #define VNIC_QCAPS_RESP_FLAGS_COS_ASSIGNMENT_CAP 0x100UL
+ #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V2_CAP 0x200UL
+ #define VNIC_QCAPS_RESP_FLAGS_VNIC_STATE_CAP 0x400UL
+ #define VNIC_QCAPS_RESP_FLAGS_VIRTIO_NET_VNIC_ALLOC_CAP 0x800UL
+ #define VNIC_QCAPS_RESP_FLAGS_METADATA_FORMAT_CAP 0x1000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_STRICT_HASH_TYPE_CAP 0x2000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP 0x4000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_TOEPLITZ_CAP 0x8000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_XOR_CAP 0x10000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_TOEPLITZ_CHKSM_CAP 0x20000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP 0x40000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V3_CAP 0x80000UL
+ #define VNIC_QCAPS_RESP_FLAGS_L2_CQE_MODE_CAP 0x100000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP 0x200000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP 0x400000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP 0x800000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP 0x1000000UL
__le16 max_aggs_supported;
u8 unused_1[5];
u8 valid;
@@ -6576,6 +6657,10 @@ struct hwrm_vnic_rss_cfg_input {
#define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
#define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
#define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL 0x40UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 0x80UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4 0x100UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 0x200UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6 0x400UL
__le16 vnic_id;
u8 ring_table_pair_index;
u8 hash_mode_flags;
@@ -6590,11 +6675,11 @@ struct hwrm_vnic_rss_cfg_input {
u8 flags;
#define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE 0x1UL
#define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE 0x2UL
- u8 rss_hash_function;
- #define VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_TOEPLITZ 0x0UL
- #define VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_XOR 0x1UL
- #define VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_CHECKSUM 0x2UL
- #define VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_LAST VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_CHECKSUM
+ u8 ring_select_mode;
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ 0x0UL
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_XOR 0x1UL
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ_CHECKSUM 0x2UL
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_LAST VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ_CHECKSUM
u8 unused_1[4];
};
@@ -6739,7 +6824,9 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX 0xfUL
#define RING_ALLOC_REQ_CMPL_COAL_CNT_LAST RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX
__le16 flags;
- #define RING_ALLOC_REQ_FLAGS_RX_SOP_PAD 0x1UL
+ #define RING_ALLOC_REQ_FLAGS_RX_SOP_PAD 0x1UL
+ #define RING_ALLOC_REQ_FLAGS_DISABLE_CQ_OVERFLOW_DETECTION 0x2UL
+ #define RING_ALLOC_REQ_FLAGS_NQ_DBR_PACING 0x4UL
__le64 page_tbl_addr;
__le32 fbo;
u8 page_size;
@@ -7923,12 +8010,17 @@ struct hwrm_cfa_flow_info_input {
__le16 target_id;
__le64 resp_addr;
__le16 flow_handle;
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK 0xfffUL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_SFT 0
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT 0x1000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT 0x2000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT 0x4000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX 0x8000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK 0xfffUL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT 0x1000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT 0x2000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_NIC_TX 0x3000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT 0x4000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX 0x8000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT_RX 0x9000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT_RX 0xa000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_NIC_RX 0xb000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT_RX 0xc000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_LAST CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT_RX
u8 unused_0[6];
__le64 ext_flow_handle;
};
@@ -8017,7 +8109,8 @@ struct hwrm_cfa_flow_stats_output {
__le64 byte_7;
__le64 byte_8;
__le64 byte_9;
- u8 unused_0[7];
+ __le16 flow_hits;
+ u8 unused_0[5];
u8 valid;
};
@@ -8243,6 +8336,7 @@ struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_FILTER_TRAFFIC_TYPE_L2_ROCE_SUPPORTED 0x10000UL
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_LAG_SUPPORTED 0x20000UL
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_NO_L2CTX_SUPPORTED 0x40000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NIC_FLOW_STATS_SUPPORTED 0x80000UL
u8 unused_0[3];
u8 valid;
};
@@ -8583,6 +8677,56 @@ struct pcie_ctx_hw_stats {
__le64 pcie_recovery_histogram;
};
+/* hwrm_stat_generic_qstats_input (size:256b/32B) */
+struct hwrm_stat_generic_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 generic_stat_size;
+ u8 flags;
+ #define STAT_GENERIC_QSTATS_REQ_FLAGS_COUNTER 0x0UL
+ #define STAT_GENERIC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ #define STAT_GENERIC_QSTATS_REQ_FLAGS_LAST STAT_GENERIC_QSTATS_REQ_FLAGS_COUNTER_MASK
+ u8 unused_0[5];
+ __le64 generic_stat_host_addr;
+};
+
+/* hwrm_stat_generic_qstats_output (size:128b/16B) */
+struct hwrm_stat_generic_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 generic_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* generic_sw_hw_stats (size:1216b/152B) */
+struct generic_sw_hw_stats {
+ __le64 pcie_statistics_tx_tlp;
+ __le64 pcie_statistics_rx_tlp;
+ __le64 pcie_credit_fc_hdr_posted;
+ __le64 pcie_credit_fc_hdr_nonposted;
+ __le64 pcie_credit_fc_hdr_cmpl;
+ __le64 pcie_credit_fc_data_posted;
+ __le64 pcie_credit_fc_data_nonposted;
+ __le64 pcie_credit_fc_data_cmpl;
+ __le64 pcie_credit_fc_tgt_nonposted;
+ __le64 pcie_credit_fc_tgt_data_posted;
+ __le64 pcie_credit_fc_tgt_hdr_posted;
+ __le64 pcie_credit_fc_cmpl_hdr_posted;
+ __le64 pcie_credit_fc_cmpl_data_posted;
+ __le64 pcie_cmpl_longest;
+ __le64 pcie_cmpl_shortest;
+ __le64 cache_miss_count_cfcq;
+ __le64 cache_miss_count_cfcs;
+ __le64 cache_miss_count_cfcc;
+ __le64 cache_miss_count_cfcm;
+};
+
/* hwrm_fw_reset_input (size:192b/24B) */
struct hwrm_fw_reset_input {
__le16 req_type;
@@ -9811,11 +9955,12 @@ struct hwrm_nvm_install_update_output {
/* hwrm_nvm_install_update_cmd_err (size:64b/8B) */
struct hwrm_nvm_install_update_cmd_err {
u8 code;
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK 0x3UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK 0x3UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_VOLTREG_SUPPORT 0x4UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_VOLTREG_SUPPORT
u8 unused_0[7];
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index 00f2f80c0073..562f8f68a47d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -295,6 +295,40 @@ static int bnxt_ptp_cfg_event(struct bnxt *bp, u8 event)
return hwrm_req_send(bp, req);
}
+void bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ struct hwrm_port_mac_cfg_input *req;
+
+ if (!ptp || !ptp->tstamp_filters)
+ return;
+
+ if (hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG))
+ goto out;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) && (ptp->tstamp_filters &
+ (PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE |
+ PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE))) {
+ ptp->tstamp_filters &= ~(PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE |
+ PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE);
+ netdev_warn(bp->dev, "Unsupported FW for all RX pkts timestamp filter\n");
+ }
+
+ req->flags = cpu_to_le32(ptp->tstamp_filters);
+ req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
+ req->rx_ts_capture_ptp_msg_type = cpu_to_le16(ptp->rxctl);
+
+ if (!hwrm_req_send(bp, req)) {
+ bp->ptp_all_rx_tstamp = !!(ptp->tstamp_filters &
+ PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE);
+ return;
+ }
+ ptp->tstamp_filters = 0;
+out:
+ bp->ptp_all_rx_tstamp = 0;
+ netdev_warn(bp->dev, "Failed to configure HW packet timestamp filters\n");
+}
+
void bnxt_ptp_reapply_pps(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
@@ -435,27 +469,41 @@ static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info,
static int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
- struct hwrm_port_mac_cfg_input *req;
u32 flags = 0;
- int rc;
+ int rc = 0;
- rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
- if (rc)
- return rc;
+ switch (ptp->rx_filter) {
+ case HWTSTAMP_FILTER_ALL:
+ flags = PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE;
+ break;
+ case HWTSTAMP_FILTER_NONE:
+ flags = PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
+ if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS)
+ flags |= PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ flags = PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
+ break;
+ }
- if (ptp->rx_filter)
- flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
- else
- flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
if (ptp->tx_tstamp_en)
flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
else
flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
- req->flags = cpu_to_le32(flags);
- req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
- req->rx_ts_capture_ptp_msg_type = cpu_to_le16(ptp->rxctl);
- return hwrm_req_send(bp, req);
+ ptp->tstamp_filters = flags;
+
+ if (netif_running(bp->dev)) {
+ rc = bnxt_close_nic(bp, false, false);
+ if (!rc)
+ rc = bnxt_open_nic(bp, false, false);
+ if (!rc && !ptp->tstamp_filters)
+ rc = -EIO;
+ }
+
+ return rc;
}
int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
@@ -486,6 +534,12 @@ int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
ptp->rxctl = 0;
ptp->rx_filter = HWTSTAMP_FILTER_NONE;
break;
+ case HWTSTAMP_FILTER_ALL:
+ if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) {
+ ptp->rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ }
+ return -EOPNOTSUPP;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
index 530b9922608c..4ce0a14c1e23 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
@@ -113,6 +113,7 @@ struct bnxt_ptp_cfg {
BNXT_PTP_MSG_PDELAY_RESP)
u8 tx_tstamp_en:1;
int rx_filter;
+ u32 tstamp_filters;
u32 refclk_regs[2];
u32 refclk_mapped_regs[2];
@@ -133,6 +134,7 @@ do { \
int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off);
void bnxt_ptp_update_current_time(struct bnxt *bp);
void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2);
+void bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp);
void bnxt_ptp_reapply_pps(struct bnxt *bp);
int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index fde0c3e8ac57..2e54bf4fc7a7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -25,7 +25,7 @@
#include "bnxt_hwrm.h"
#include "bnxt_ulp.h"
-static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id,
+static int bnxt_register_dev(struct bnxt_en_dev *edev, unsigned int ulp_id,
struct bnxt_ulp_ops *ulp_ops, void *handle)
{
struct net_device *dev = edev->net;
@@ -62,7 +62,7 @@ static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id,
return 0;
}
-static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id)
+static int bnxt_unregister_dev(struct bnxt_en_dev *edev, unsigned int ulp_id)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
@@ -115,7 +115,7 @@ static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
}
}
-static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
+static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, unsigned int ulp_id,
struct bnxt_msix_entry *ent, int num_msix)
{
struct net_device *dev = edev->net;
@@ -179,7 +179,7 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
return avail_msix;
}
-static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
+static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, unsigned int ulp_id)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
@@ -233,7 +233,7 @@ int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
return 0;
}
-static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
+static int bnxt_send_msg(struct bnxt_en_dev *edev, unsigned int ulp_id,
struct bnxt_fw_msg *fw_msg)
{
struct net_device *dev = edev->net;
@@ -447,7 +447,7 @@ void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
rcu_read_unlock();
}
-static int bnxt_register_async_events(struct bnxt_en_dev *edev, int ulp_id,
+static int bnxt_register_async_events(struct bnxt_en_dev *edev, unsigned int ulp_id,
unsigned long *events_bmap, u16 max_id)
{
struct net_device *dev = edev->net;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 54d59f681b86..42b50abc3e91 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -77,15 +77,15 @@ struct bnxt_en_dev {
};
struct bnxt_en_ops {
- int (*bnxt_register_device)(struct bnxt_en_dev *, int,
+ int (*bnxt_register_device)(struct bnxt_en_dev *, unsigned int,
struct bnxt_ulp_ops *, void *);
- int (*bnxt_unregister_device)(struct bnxt_en_dev *, int);
- int (*bnxt_request_msix)(struct bnxt_en_dev *, int,
+ int (*bnxt_unregister_device)(struct bnxt_en_dev *, unsigned int);
+ int (*bnxt_request_msix)(struct bnxt_en_dev *, unsigned int,
struct bnxt_msix_entry *, int);
- int (*bnxt_free_msix)(struct bnxt_en_dev *, int);
- int (*bnxt_send_fw_msg)(struct bnxt_en_dev *, int,
+ int (*bnxt_free_msix)(struct bnxt_en_dev *, unsigned int);
+ int (*bnxt_send_fw_msg)(struct bnxt_en_dev *, unsigned int,
struct bnxt_fw_msg *);
- int (*bnxt_register_fw_async_events)(struct bnxt_en_dev *, int,
+ int (*bnxt_register_fw_async_events)(struct bnxt_en_dev *, unsigned int,
unsigned long *, u16);
};
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index e993616308f8..d6cdb97bfb38 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -1213,7 +1213,6 @@ static void gem_rx_refill(struct macb_queue *queue)
/* Make hw descriptor updates visible to CPU */
rmb();
- queue->rx_prepared_head++;
desc = macb_rx_desc(queue, entry);
if (!queue->rx_skbuff[entry]) {
@@ -1252,6 +1251,7 @@ static void gem_rx_refill(struct macb_queue *queue)
dma_wmb();
desc->addr &= ~MACB_BIT(RX_USED);
}
+ queue->rx_prepared_head++;
}
/* Make descriptor updates visible to hardware */
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index a04aa206fddc..768ea426d49f 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -2024,9 +2024,6 @@ static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
u8 mode;
struct xcast_addr_list *mc;
- if (!vf_work)
- return;
-
/* Save message data locally to prevent them from
* being overwritten by next ndo_set_rx_mode call().
*/
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 4a97aa8e1387..06a0c00af99c 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -985,7 +985,7 @@ release_irq:
if (result == DETECTED_NONE) {
pr_warn("%s: 10Base-5 (AUI) has no cable\n", dev->name);
if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */
- result = DETECTED_AUI; /* Yes! I don't care if I see a carrrier */
+ result = DETECTED_AUI; /* Yes! I don't care if I see a carrier */
}
break;
case A_CNF_MEDIA_10B_2:
diff --git a/drivers/net/ethernet/dec/tulip/Kconfig b/drivers/net/ethernet/dec/tulip/Kconfig
index 79dc336ce709..078a12f07e96 100644
--- a/drivers/net/ethernet/dec/tulip/Kconfig
+++ b/drivers/net/ethernet/dec/tulip/Kconfig
@@ -104,21 +104,6 @@ config TULIP_DM910X
def_bool y
depends on TULIP && SPARC
-config DE4X5
- tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA"
- depends on (PCI || EISA)
- depends on VIRT_TO_BUS || ALPHA || PPC || SPARC
- select CRC32
- help
- This is support for the DIGITAL series of PCI/EISA Ethernet cards.
- These include the DE425, DE434, DE435, DE450 and DE500 models. If
- you have a network card of this type, say Y. More specific
- information is contained in
- <file:Documentation/networking/device_drivers/ethernet/dec/de4x5.rst>.
-
- To compile this driver as a module, choose M here. The module will
- be called de4x5.
-
config WINBOND_840
tristate "Winbond W89c840 Ethernet support"
depends on PCI
diff --git a/drivers/net/ethernet/dec/tulip/Makefile b/drivers/net/ethernet/dec/tulip/Makefile
index 8aab37564d5d..d4f1d21d29a0 100644
--- a/drivers/net/ethernet/dec/tulip/Makefile
+++ b/drivers/net/ethernet/dec/tulip/Makefile
@@ -10,7 +10,6 @@ obj-$(CONFIG_DM9102) += dmfe.o
obj-$(CONFIG_WINBOND_840) += winbond-840.o
obj-$(CONFIG_DE2104X) += de2104x.o
obj-$(CONFIG_TULIP) += tulip.o
-obj-$(CONFIG_DE4X5) += de4x5.o
obj-$(CONFIG_ULI526X) += uli526x.o
# Declare multi-part drivers.
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
deleted file mode 100644
index 71730ef4cd57..000000000000
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ /dev/null
@@ -1,5591 +0,0 @@
-/* de4x5.c: A DIGITAL DC21x4x DECchip and DE425/DE434/DE435/DE450/DE500
- ethernet driver for Linux.
-
- Copyright 1994, 1995 Digital Equipment Corporation.
-
- Testing resources for this driver have been made available
- in part by NASA Ames Research Center (mjacob@nas.nasa.gov).
-
- The author may be reached at davies@maniac.ultranet.com.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by the
- Free Software Foundation; either version 2 of the License, or (at your
- option) any later version.
-
- THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
- You should have received a copy of the GNU General Public License along
- with this program; if not, write to the Free Software Foundation, Inc.,
- 675 Mass Ave, Cambridge, MA 02139, USA.
-
- Originally, this driver was written for the Digital Equipment
- Corporation series of EtherWORKS ethernet cards:
-
- DE425 TP/COAX EISA
- DE434 TP PCI
- DE435 TP/COAX/AUI PCI
- DE450 TP/COAX/AUI PCI
- DE500 10/100 PCI Fasternet
-
- but it will now attempt to support all cards which conform to the
- Digital Semiconductor SROM Specification. The driver currently
- recognises the following chips:
-
- DC21040 (no SROM)
- DC21041[A]
- DC21140[A]
- DC21142
- DC21143
-
- So far the driver is known to work with the following cards:
-
- KINGSTON
- Linksys
- ZNYX342
- SMC8432
- SMC9332 (w/new SROM)
- ZNYX31[45]
- ZNYX346 10/100 4 port (can act as a 10/100 bridge!)
-
- The driver has been tested on a relatively busy network using the DE425,
- DE434, DE435 and DE500 cards and benchmarked with 'ttcp': it transferred
- 16M of data to a DECstation 5000/200 as follows:
-
- TCP UDP
- TX RX TX RX
- DE425 1030k 997k 1170k 1128k
- DE434 1063k 995k 1170k 1125k
- DE435 1063k 995k 1170k 1125k
- DE500 1063k 998k 1170k 1125k in 10Mb/s mode
-
- All values are typical (in kBytes/sec) from a sample of 4 for each
- measurement. Their error is +/-20k on a quiet (private) network and also
- depend on what load the CPU has.
-
- =========================================================================
- This driver has been written substantially from scratch, although its
- inheritance of style and stack interface from 'ewrk3.c' and in turn from
- Donald Becker's 'lance.c' should be obvious. With the module autoload of
- every usable DECchip board, I pinched Donald's 'next_module' field to
- link my modules together.
-
- Up to 15 EISA cards can be supported under this driver, limited primarily
- by the available IRQ lines. I have checked different configurations of
- multiple depca, EtherWORKS 3 cards and de4x5 cards and have not found a
- problem yet (provided you have at least depca.c v0.38) ...
-
- PCI support has been added to allow the driver to work with the DE434,
- DE435, DE450 and DE500 cards. The I/O accesses are a bit of a kludge due
- to the differences in the EISA and PCI CSR address offsets from the base
- address.
-
- The ability to load this driver as a loadable module has been included
- and used extensively during the driver development (to save those long
- reboot sequences). Loadable module support under PCI and EISA has been
- achieved by letting the driver autoprobe as if it were compiled into the
- kernel. Do make sure you're not sharing interrupts with anything that
- cannot accommodate interrupt sharing!
-
- To utilise this ability, you have to do 8 things:
-
- 0) have a copy of the loadable modules code installed on your system.
- 1) copy de4x5.c from the /linux/drivers/net directory to your favourite
- temporary directory.
- 2) for fixed autoprobes (not recommended), edit the source code near
- line 5594 to reflect the I/O address you're using, or assign these when
- loading by:
-
- insmod de4x5 io=0xghh where g = bus number
- hh = device number
-
- NB: autoprobing for modules is now supported by default. You may just
- use:
-
- insmod de4x5
-
- to load all available boards. For a specific board, still use
- the 'io=?' above.
- 3) compile de4x5.c, but include -DMODULE in the command line to ensure
- that the correct bits are compiled (see end of source code).
- 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a
- kernel with the de4x5 configuration turned off and reboot.
- 5) insmod de4x5 [io=0xghh]
- 6) run the net startup bits for your new eth?? interface(s) manually
- (usually /etc/rc.inet[12] at boot time).
- 7) enjoy!
-
- To unload a module, turn off the associated interface(s)
- 'ifconfig eth?? down' then 'rmmod de4x5'.
-
- Automedia detection is included so that in principal you can disconnect
- from, e.g. TP, reconnect to BNC and things will still work (after a
- pause whilst the driver figures out where its media went). My tests
- using ping showed that it appears to work....
-
- By default, the driver will now autodetect any DECchip based card.
- Should you have a need to restrict the driver to DIGITAL only cards, you
- can compile with a DEC_ONLY define, or if loading as a module, use the
- 'dec_only=1' parameter.
-
- I've changed the timing routines to use the kernel timer and scheduling
- functions so that the hangs and other assorted problems that occurred
- while autosensing the media should be gone. A bonus for the DC21040
- auto media sense algorithm is that it can now use one that is more in
- line with the rest (the DC21040 chip doesn't have a hardware timer).
- The downside is the 1 'jiffies' (10ms) resolution.
-
- IEEE 802.3u MII interface code has been added in anticipation that some
- products may use it in the future.
-
- The SMC9332 card has a non-compliant SROM which needs fixing - I have
- patched this driver to detect it because the SROM format used complies
- to a previous DEC-STD format.
-
- I have removed the buffer copies needed for receive on Intels. I cannot
- remove them for Alphas since the Tulip hardware only does longword
- aligned DMA transfers and the Alphas get alignment traps with non
- longword aligned data copies (which makes them really slow). No comment.
-
- I have added SROM decoding routines to make this driver work with any
- card that supports the Digital Semiconductor SROM spec. This will help
- all cards running the dc2114x series chips in particular. Cards using
- the dc2104x chips should run correctly with the basic driver. I'm in
- debt to <mjacob@feral.com> for the testing and feedback that helped get
- this feature working. So far we have tested KINGSTON, SMC8432, SMC9332
- (with the latest SROM complying with the SROM spec V3: their first was
- broken), ZNYX342 and LinkSys. ZYNX314 (dual 21041 MAC) and ZNYX 315
- (quad 21041 MAC) cards also appear to work despite their incorrectly
- wired IRQs.
-
- I have added a temporary fix for interrupt problems when some SCSI cards
- share the same interrupt as the DECchip based cards. The problem occurs
- because the SCSI card wants to grab the interrupt as a fast interrupt
- (runs the service routine with interrupts turned off) vs. this card
- which really needs to run the service routine with interrupts turned on.
- This driver will now add the interrupt service routine as a fast
- interrupt if it is bounced from the slow interrupt. THIS IS NOT A
- RECOMMENDED WAY TO RUN THE DRIVER and has been done for a limited time
- until people sort out their compatibility issues and the kernel
- interrupt service code is fixed. YOU SHOULD SEPARATE OUT THE FAST
- INTERRUPT CARDS FROM THE SLOW INTERRUPT CARDS to ensure that they do not
- run on the same interrupt. PCMCIA/CardBus is another can of worms...
-
- Finally, I think I have really fixed the module loading problem with
- more than one DECchip based card. As a side effect, I don't mess with
- the device structure any more which means that if more than 1 card in
- 2.0.x is installed (4 in 2.1.x), the user will have to edit
- linux/drivers/net/Space.c to make room for them. Hence, module loading
- is the preferred way to use this driver, since it doesn't have this
- limitation.
-
- Where SROM media detection is used and full duplex is specified in the
- SROM, the feature is ignored unless lp->params.fdx is set at compile
- time OR during a module load (insmod de4x5 args='eth??:fdx' [see
- below]). This is because there is no way to automatically detect full
- duplex links except through autonegotiation. When I include the
- autonegotiation feature in the SROM autoconf code, this detection will
- occur automatically for that case.
-
- Command line arguments are now allowed, similar to passing arguments
- through LILO. This will allow a per adapter board set up of full duplex
- and media. The only lexical constraints are: the board name (dev->name)
- appears in the list before its parameters. The list of parameters ends
- either at the end of the parameter list or with another board name. The
- following parameters are allowed:
-
- fdx for full duplex
- autosense to set the media/speed; with the following
- sub-parameters:
- TP, TP_NW, BNC, AUI, BNC_AUI, 100Mb, 10Mb, AUTO
-
- Case sensitivity is important for the sub-parameters. They *must* be
- upper case. Examples:
-
- insmod de4x5 args='eth1:fdx autosense=BNC eth0:autosense=100Mb'.
-
- For a compiled in driver, at or above line 548, place e.g.
- #define DE4X5_PARM "eth0:fdx autosense=AUI eth2:autosense=TP"
-
- Yes, I know full duplex isn't permissible on BNC or AUI; they're just
- examples. By default, full duplex is turned off and AUTO is the default
- autosense setting. In reality, I expect only the full duplex option to
- be used. Note the use of single quotes in the two examples above and the
- lack of commas to separate items. ALSO, you must get the requested media
- correct in relation to what the adapter SROM says it has. There's no way
- to determine this in advance other than by trial and error and common
- sense, e.g. call a BNC connectored port 'BNC', not '10Mb'.
-
- Changed the bus probing. EISA used to be done first, followed by PCI.
- Most people probably don't even know what a de425 is today and the EISA
- probe has messed up some SCSI cards in the past, so now PCI is always
- probed first followed by EISA if a) the architecture allows EISA and
- either b) there have been no PCI cards detected or c) an EISA probe is
- forced by the user. To force a probe include "force_eisa" in your
- insmod "args" line; for built-in kernels either change the driver to do
- this automatically or include #define DE4X5_FORCE_EISA on or before
- line 1040 in the driver.
-
- TO DO:
- ------
-
- Revision History
- ----------------
-
- Version Date Description
-
- 0.1 17-Nov-94 Initial writing. ALPHA code release.
- 0.2 13-Jan-95 Added PCI support for DE435's.
- 0.21 19-Jan-95 Added auto media detection.
- 0.22 10-Feb-95 Fix interrupt handler call <chris@cosy.sbg.ac.at>.
- Fix recognition bug reported by <bkm@star.rl.ac.uk>.
- Add request/release_region code.
- Add loadable modules support for PCI.
- Clean up loadable modules support.
- 0.23 28-Feb-95 Added DC21041 and DC21140 support.
- Fix missed frame counter value and initialisation.
- Fixed EISA probe.
- 0.24 11-Apr-95 Change delay routine to use <linux/udelay>.
- Change TX_BUFFS_AVAIL macro.
- Change media autodetection to allow manual setting.
- Completed DE500 (DC21140) support.
- 0.241 18-Apr-95 Interim release without DE500 Autosense Algorithm.
- 0.242 10-May-95 Minor changes.
- 0.30 12-Jun-95 Timer fix for DC21140.
- Portability changes.
- Add ALPHA changes from <jestabro@ant.tay1.dec.com>.
- Add DE500 semi automatic autosense.
- Add Link Fail interrupt TP failure detection.
- Add timer based link change detection.
- Plugged a memory leak in de4x5_queue_pkt().
- 0.31 13-Jun-95 Fixed PCI stuff for 1.3.1.
- 0.32 26-Jun-95 Added verify_area() calls in de4x5_ioctl() from a
- suggestion by <heiko@colossus.escape.de>.
- 0.33 8-Aug-95 Add shared interrupt support (not released yet).
- 0.331 21-Aug-95 Fix de4x5_open() with fast CPUs.
- Fix de4x5_interrupt().
- Fix dc21140_autoconf() mess.
- No shared interrupt support.
- 0.332 11-Sep-95 Added MII management interface routines.
- 0.40 5-Mar-96 Fix setup frame timeout <maartenb@hpkuipc.cern.ch>.
- Add kernel timer code (h/w is too flaky).
- Add MII based PHY autosense.
- Add new multicasting code.
- Add new autosense algorithms for media/mode
- selection using kernel scheduling/timing.
- Re-formatted.
- Made changes suggested by <jeff@router.patch.net>:
- Change driver to detect all DECchip based cards
- with DEC_ONLY restriction a special case.
- Changed driver to autoprobe as a module. No irq
- checking is done now - assume BIOS is good!
- Added SMC9332 detection <manabe@Roy.dsl.tutics.ac.jp>
- 0.41 21-Mar-96 Don't check for get_hw_addr checksum unless DEC card
- only <niles@axp745gsfc.nasa.gov>
- Fix for multiple PCI cards reported by <jos@xos.nl>
- Duh, put the IRQF_SHARED flag into request_interrupt().
- Fix SMC ethernet address in enet_det[].
- Print chip name instead of "UNKNOWN" during boot.
- 0.42 26-Apr-96 Fix MII write TA bit error.
- Fix bug in dc21040 and dc21041 autosense code.
- Remove buffer copies on receive for Intels.
- Change sk_buff handling during media disconnects to
- eliminate DUP packets.
- Add dynamic TX thresholding.
- Change all chips to use perfect multicast filtering.
- Fix alloc_device() bug <jari@markkus2.fimr.fi>
- 0.43 21-Jun-96 Fix unconnected media TX retry bug.
- Add Accton to the list of broken cards.
- Fix TX under-run bug for non DC21140 chips.
- Fix boot command probe bug in alloc_device() as
- reported by <koen.gadeyne@barco.com> and
- <orava@nether.tky.hut.fi>.
- Add cache locks to prevent a race condition as
- reported by <csd@microplex.com> and
- <baba@beckman.uiuc.edu>.
- Upgraded alloc_device() code.
- 0.431 28-Jun-96 Fix potential bug in queue_pkt() from discussion
- with <csd@microplex.com>
- 0.44 13-Aug-96 Fix RX overflow bug in 2114[023] chips.
- Fix EISA probe bugs reported by <os2@kpi.kharkov.ua>
- and <michael@compurex.com>.
- 0.441 9-Sep-96 Change dc21041_autoconf() to probe quiet BNC media
- with a loopback packet.
- 0.442 9-Sep-96 Include AUI in dc21041 media printout. Bug reported
- by <bhat@mundook.cs.mu.OZ.AU>
- 0.45 8-Dec-96 Include endian functions for PPC use, from work
- by <cort@cs.nmt.edu> and <g.thomas@opengroup.org>.
- 0.451 28-Dec-96 Added fix to allow autoprobe for modules after
- suggestion from <mjacob@feral.com>.
- 0.5 30-Jan-97 Added SROM decoding functions.
- Updated debug flags.
- Fix sleep/wakeup calls for PCI cards, bug reported
- by <cross@gweep.lkg.dec.com>.
- Added multi-MAC, one SROM feature from discussion
- with <mjacob@feral.com>.
- Added full module autoprobe capability.
- Added attempt to use an SMC9332 with broken SROM.
- Added fix for ZYNX multi-mac cards that didn't
- get their IRQs wired correctly.
- 0.51 13-Feb-97 Added endian fixes for the SROM accesses from
- <paubert@iram.es>
- Fix init_connection() to remove extra device reset.
- Fix MAC/PHY reset ordering in dc21140m_autoconf().
- Fix initialisation problem with lp->timeout in
- typeX_infoblock() from <paubert@iram.es>.
- Fix MII PHY reset problem from work done by
- <paubert@iram.es>.
- 0.52 26-Apr-97 Some changes may not credit the right people -
- a disk crash meant I lost some mail.
- Change RX interrupt routine to drop rather than
- defer packets to avoid hang reported by
- <g.thomas@opengroup.org>.
- Fix srom_exec() to return for COMPACT and type 1
- infoblocks.
- Added DC21142 and DC21143 functions.
- Added byte counters from <phil@tazenda.demon.co.uk>
- Added IRQF_DISABLED temporary fix from
- <mjacob@feral.com>.
- 0.53 12-Nov-97 Fix the *_probe() to include 'eth??' name during
- module load: bug reported by
- <Piete.Brooks@cl.cam.ac.uk>
- Fix multi-MAC, one SROM, to work with 2114x chips:
- bug reported by <cmetz@inner.net>.
- Make above search independent of BIOS device scan
- direction.
- Completed DC2114[23] autosense functions.
- 0.531 21-Dec-97 Fix DE500-XA 100Mb/s bug reported by
- <robin@intercore.com
- Fix type1_infoblock() bug introduced in 0.53, from
- problem reports by
- <parmee@postecss.ncrfran.france.ncr.com> and
- <jo@ice.dillingen.baynet.de>.
- Added argument list to set up each board from either
- a module's command line or a compiled in #define.
- Added generic MII PHY functionality to deal with
- newer PHY chips.
- Fix the mess in 2.1.67.
- 0.532 5-Jan-98 Fix bug in mii_get_phy() reported by
- <redhat@cococo.net>.
- Fix bug in pci_probe() for 64 bit systems reported
- by <belliott@accessone.com>.
- 0.533 9-Jan-98 Fix more 64 bit bugs reported by <jal@cs.brown.edu>.
- 0.534 24-Jan-98 Fix last (?) endian bug from <geert@linux-m68k.org>
- 0.535 21-Feb-98 Fix Ethernet Address PROM reset bug for DC21040.
- 0.536 21-Mar-98 Change pci_probe() to use the pci_dev structure.
- **Incompatible with 2.0.x from here.**
- 0.540 5-Jul-98 Atomicize assertion of dev->interrupt for SMP
- from <lma@varesearch.com>
- Add TP, AUI and BNC cases to 21140m_autoconf() for
- case where a 21140 under SROM control uses, e.g. AUI
- from problem report by <delchini@lpnp09.in2p3.fr>
- Add MII parallel detection to 2114x_autoconf() for
- case where no autonegotiation partner exists from
- problem report by <mlapsley@ndirect.co.uk>.
- Add ability to force connection type directly even
- when using SROM control from problem report by
- <earl@exis.net>.
- Updated the PCI interface to conform with the latest
- version. I hope nothing is broken...
- Add TX done interrupt modification from suggestion
- by <Austin.Donnelly@cl.cam.ac.uk>.
- Fix is_anc_capable() bug reported by
- <Austin.Donnelly@cl.cam.ac.uk>.
- Fix type[13]_infoblock() bug: during MII search, PHY
- lp->rst not run because lp->ibn not initialised -
- from report & fix by <paubert@iram.es>.
- Fix probe bug with EISA & PCI cards present from
- report by <eirik@netcom.com>.
- 0.541 24-Aug-98 Fix compiler problems associated with i386-string
- ops from multiple bug reports and temporary fix
- from <paubert@iram.es>.
- Fix pci_probe() to correctly emulate the old
- pcibios_find_class() function.
- Add an_exception() for old ZYNX346 and fix compile
- warning on PPC & SPARC, from <ecd@skynet.be>.
- Fix lastPCI to correctly work with compiled in
- kernels and modules from bug report by
- <Zlatko.Calusic@CARNet.hr> et al.
- 0.542 15-Sep-98 Fix dc2114x_autoconf() to stop multiple messages
- when media is unconnected.
- Change dev->interrupt to lp->interrupt to ensure
- alignment for Alpha's and avoid their unaligned
- access traps. This flag is merely for log messages:
- should do something more definitive though...
- 0.543 30-Dec-98 Add SMP spin locking.
- 0.544 8-May-99 Fix for buggy SROM in Motorola embedded boards using
- a 21143 by <mmporter@home.com>.
- Change PCI/EISA bus probing order.
- 0.545 28-Nov-99 Further Moto SROM bug fix from
- <mporter@eng.mcd.mot.com>
- Remove double checking for DEBUG_RX in de4x5_dbg_rx()
- from report by <geert@linux-m68k.org>
- 0.546 22-Feb-01 Fixes Alpha XP1000 oops. The srom_search function
- was causing a page fault when initializing the
- variable 'pb', on a non de4x5 PCI device, in this
- case a PCI bridge (DEC chip 21152). The value of
- 'pb' is now only initialized if a de4x5 chip is
- present.
- <france@handhelds.org>
- 0.547 08-Nov-01 Use library crc32 functions by <Matt_Domsch@dell.com>
- 0.548 30-Aug-03 Big 2.6 cleanup. Ported to PCI/EISA probing and
- generic DMA APIs. Fixed DE425 support on Alpha.
- <maz@wild-wind.fr.eu.org>
- =========================================================================
-*/
-
-#include <linux/compat.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/interrupt.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <linux/eisa.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/crc32.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/time.h>
-#include <linux/types.h>
-#include <linux/unistd.h>
-#include <linux/ctype.h>
-#include <linux/dma-mapping.h>
-#include <linux/moduleparam.h>
-#include <linux/bitops.h>
-#include <linux/gfp.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <asm/byteorder.h>
-#include <asm/unaligned.h>
-#include <linux/uaccess.h>
-#ifdef CONFIG_PPC_PMAC
-#include <asm/machdep.h>
-#endif /* CONFIG_PPC_PMAC */
-
-#include "de4x5.h"
-
-static const char version[] =
- KERN_INFO "de4x5.c:V0.546 2001/02/22 davies@maniac.ultranet.com\n";
-
-#define c_char const char
-
-/*
-** MII Information
-*/
-struct phy_table {
- int reset; /* Hard reset required? */
- int id; /* IEEE OUI */
- int ta; /* One cycle TA time - 802.3u is confusing here */
- struct { /* Non autonegotiation (parallel) speed det. */
- int reg;
- int mask;
- int value;
- } spd;
-};
-
-struct mii_phy {
- int reset; /* Hard reset required? */
- int id; /* IEEE OUI */
- int ta; /* One cycle TA time */
- struct { /* Non autonegotiation (parallel) speed det. */
- int reg;
- int mask;
- int value;
- } spd;
- int addr; /* MII address for the PHY */
- u_char *gep; /* Start of GEP sequence block in SROM */
- u_char *rst; /* Start of reset sequence in SROM */
- u_int mc; /* Media Capabilities */
- u_int ana; /* NWay Advertisement */
- u_int fdx; /* Full DupleX capabilities for each media */
- u_int ttm; /* Transmit Threshold Mode for each media */
- u_int mci; /* 21142 MII Connector Interrupt info */
-};
-
-#define DE4X5_MAX_PHY 8 /* Allow up to 8 attached PHY devices per board */
-
-struct sia_phy {
- u_char mc; /* Media Code */
- u_char ext; /* csr13-15 valid when set */
- int csr13; /* SIA Connectivity Register */
- int csr14; /* SIA TX/RX Register */
- int csr15; /* SIA General Register */
- int gepc; /* SIA GEP Control Information */
- int gep; /* SIA GEP Data */
-};
-
-/*
-** Define the know universe of PHY devices that can be
-** recognised by this driver.
-*/
-static struct phy_table phy_info[] = {
- {0, NATIONAL_TX, 1, {0x19, 0x40, 0x00}}, /* National TX */
- {1, BROADCOM_T4, 1, {0x10, 0x02, 0x02}}, /* Broadcom T4 */
- {0, SEEQ_T4 , 1, {0x12, 0x10, 0x10}}, /* SEEQ T4 */
- {0, CYPRESS_T4 , 1, {0x05, 0x20, 0x20}}, /* Cypress T4 */
- {0, 0x7810 , 1, {0x14, 0x0800, 0x0800}} /* Level One LTX970 */
-};
-
-/*
-** These GENERIC values assumes that the PHY devices follow 802.3u and
-** allow parallel detection to set the link partner ability register.
-** Detection of 100Base-TX [H/F Duplex] and 100Base-T4 is supported.
-*/
-#define GENERIC_REG 0x05 /* Autoneg. Link Partner Advertisement Reg. */
-#define GENERIC_MASK MII_ANLPA_100M /* All 100Mb/s Technologies */
-#define GENERIC_VALUE MII_ANLPA_100M /* 100B-TX, 100B-TX FDX, 100B-T4 */
-
-/*
-** Define special SROM detection cases
-*/
-static c_char enet_det[][ETH_ALEN] = {
- {0x00, 0x00, 0xc0, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0xe8, 0x00, 0x00, 0x00}
-};
-
-#define SMC 1
-#define ACCTON 2
-
-/*
-** SROM Repair definitions. If a broken SROM is detected a card may
-** use this information to help figure out what to do. This is a
-** "stab in the dark" and so far for SMC9332's only.
-*/
-static c_char srom_repair_info[][100] = {
- {0x00,0x1e,0x00,0x00,0x00,0x08, /* SMC9332 */
- 0x1f,0x01,0x8f,0x01,0x00,0x01,0x00,0x02,
- 0x01,0x00,0x00,0x78,0xe0,0x01,0x00,0x50,
- 0x00,0x18,}
-};
-
-
-#ifdef DE4X5_DEBUG
-static int de4x5_debug = DE4X5_DEBUG;
-#else
-/*static int de4x5_debug = (DEBUG_MII | DEBUG_SROM | DEBUG_PCICFG | DEBUG_MEDIA | DEBUG_VERSION);*/
-static int de4x5_debug = (DEBUG_MEDIA | DEBUG_VERSION);
-#endif
-
-/*
-** Allow per adapter set up. For modules this is simply a command line
-** parameter, e.g.:
-** insmod de4x5 args='eth1:fdx autosense=BNC eth0:autosense=100Mb'.
-**
-** For a compiled in driver, place e.g.
-** #define DE4X5_PARM "eth0:fdx autosense=AUI eth2:autosense=TP"
-** here
-*/
-#ifdef DE4X5_PARM
-static char *args = DE4X5_PARM;
-#else
-static char *args;
-#endif
-
-struct parameters {
- bool fdx;
- int autosense;
-};
-
-#define DE4X5_AUTOSENSE_MS 250 /* msec autosense tick (DE500) */
-
-#define DE4X5_NDA 0xffe0 /* No Device (I/O) Address */
-
-/*
-** Ethernet PROM defines
-*/
-#define PROBE_LENGTH 32
-#define ETH_PROM_SIG 0xAA5500FFUL
-
-/*
-** Ethernet Info
-*/
-#define PKT_BUF_SZ 1536 /* Buffer size for each Tx/Rx buffer */
-#define IEEE802_3_SZ 1518 /* Packet + CRC */
-#define MAX_PKT_SZ 1514 /* Maximum ethernet packet length */
-#define MAX_DAT_SZ 1500 /* Maximum ethernet data length */
-#define MIN_DAT_SZ 1 /* Minimum ethernet data length */
-#define PKT_HDR_LEN 14 /* Addresses and data length info */
-#define FAKE_FRAME_LEN (MAX_PKT_SZ + 1)
-#define QUEUE_PKT_TIMEOUT (3*HZ) /* 3 second timeout */
-
-
-/*
-** EISA bus defines
-*/
-#define DE4X5_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */
-#define DE4X5_EISA_TOTAL_SIZE 0x100 /* I/O address extent */
-
-#define EISA_ALLOWED_IRQ_LIST {5, 9, 10, 11}
-
-#define DE4X5_SIGNATURE {"DE425","DE434","DE435","DE450","DE500"}
-#define DE4X5_NAME_LENGTH 8
-
-static c_char *de4x5_signatures[] = DE4X5_SIGNATURE;
-
-/*
-** Ethernet PROM defines for DC21040
-*/
-#define PROBE_LENGTH 32
-#define ETH_PROM_SIG 0xAA5500FFUL
-
-/*
-** PCI Bus defines
-*/
-#define PCI_MAX_BUS_NUM 8
-#define DE4X5_PCI_TOTAL_SIZE 0x80 /* I/O address extent */
-#define DE4X5_CLASS_CODE 0x00020000 /* Network controller, Ethernet */
-
-/*
-** Memory Alignment. Each descriptor is 4 longwords long. To force a
-** particular alignment on the TX descriptor, adjust DESC_SKIP_LEN and
-** DESC_ALIGN. ALIGN aligns the start address of the private memory area
-** and hence the RX descriptor ring's first entry.
-*/
-#define DE4X5_ALIGN4 ((u_long)4 - 1) /* 1 longword align */
-#define DE4X5_ALIGN8 ((u_long)8 - 1) /* 2 longword align */
-#define DE4X5_ALIGN16 ((u_long)16 - 1) /* 4 longword align */
-#define DE4X5_ALIGN32 ((u_long)32 - 1) /* 8 longword align */
-#define DE4X5_ALIGN64 ((u_long)64 - 1) /* 16 longword align */
-#define DE4X5_ALIGN128 ((u_long)128 - 1) /* 32 longword align */
-
-#define DE4X5_ALIGN DE4X5_ALIGN32 /* Keep the DC21040 happy... */
-#define DE4X5_CACHE_ALIGN CAL_16LONG
-#define DESC_SKIP_LEN DSL_0 /* Must agree with DESC_ALIGN */
-/*#define DESC_ALIGN u32 dummy[4]; / * Must agree with DESC_SKIP_LEN */
-#define DESC_ALIGN
-
-#ifndef DEC_ONLY /* See README.de4x5 for using this */
-static int dec_only;
-#else
-static int dec_only = 1;
-#endif
-
-/*
-** DE4X5 IRQ ENABLE/DISABLE
-*/
-#define ENABLE_IRQs { \
- imr |= lp->irq_en;\
- outl(imr, DE4X5_IMR); /* Enable the IRQs */\
-}
-
-#define DISABLE_IRQs {\
- imr = inl(DE4X5_IMR);\
- imr &= ~lp->irq_en;\
- outl(imr, DE4X5_IMR); /* Disable the IRQs */\
-}
-
-#define UNMASK_IRQs {\
- imr |= lp->irq_mask;\
- outl(imr, DE4X5_IMR); /* Unmask the IRQs */\
-}
-
-#define MASK_IRQs {\
- imr = inl(DE4X5_IMR);\
- imr &= ~lp->irq_mask;\
- outl(imr, DE4X5_IMR); /* Mask the IRQs */\
-}
-
-/*
-** DE4X5 START/STOP
-*/
-#define START_DE4X5 {\
- omr = inl(DE4X5_OMR);\
- omr |= OMR_ST | OMR_SR;\
- outl(omr, DE4X5_OMR); /* Enable the TX and/or RX */\
-}
-
-#define STOP_DE4X5 {\
- omr = inl(DE4X5_OMR);\
- omr &= ~(OMR_ST|OMR_SR);\
- outl(omr, DE4X5_OMR); /* Disable the TX and/or RX */ \
-}
-
-/*
-** DE4X5 SIA RESET
-*/
-#define RESET_SIA outl(0, DE4X5_SICR); /* Reset SIA connectivity regs */
-
-/*
-** DE500 AUTOSENSE TIMER INTERVAL (MILLISECS)
-*/
-#define DE4X5_AUTOSENSE_MS 250
-
-/*
-** SROM Structure
-*/
-struct de4x5_srom {
- char sub_vendor_id[2];
- char sub_system_id[2];
- char reserved[12];
- char id_block_crc;
- char reserved2;
- char version;
- char num_controllers;
- char ieee_addr[6];
- char info[100];
- short chksum;
-};
-#define SUB_VENDOR_ID 0x500a
-
-/*
-** DE4X5 Descriptors. Make sure that all the RX buffers are contiguous
-** and have sizes of both a power of 2 and a multiple of 4.
-** A size of 256 bytes for each buffer could be chosen because over 90% of
-** all packets in our network are <256 bytes long and 64 longword alignment
-** is possible. 1536 showed better 'ttcp' performance. Take your pick. 32 TX
-** descriptors are needed for machines with an ALPHA CPU.
-*/
-#define NUM_RX_DESC 8 /* Number of RX descriptors */
-#define NUM_TX_DESC 32 /* Number of TX descriptors */
-#define RX_BUFF_SZ 1536 /* Power of 2 for kmalloc and */
- /* Multiple of 4 for DC21040 */
- /* Allows 512 byte alignment */
-struct de4x5_desc {
- volatile __le32 status;
- __le32 des1;
- __le32 buf;
- __le32 next;
- DESC_ALIGN
-};
-
-/*
-** The DE4X5 private structure
-*/
-#define DE4X5_PKT_STAT_SZ 16
-#define DE4X5_PKT_BIN_SZ 128 /* Should be >=100 unless you
- increase DE4X5_PKT_STAT_SZ */
-
-struct pkt_stats {
- u_int bins[DE4X5_PKT_STAT_SZ]; /* Private stats counters */
- u_int unicast;
- u_int multicast;
- u_int broadcast;
- u_int excessive_collisions;
- u_int tx_underruns;
- u_int excessive_underruns;
- u_int rx_runt_frames;
- u_int rx_collision;
- u_int rx_dribble;
- u_int rx_overflow;
-};
-
-struct de4x5_private {
- char adapter_name[80]; /* Adapter name */
- u_long interrupt; /* Aligned ISR flag */
- struct de4x5_desc *rx_ring; /* RX descriptor ring */
- struct de4x5_desc *tx_ring; /* TX descriptor ring */
- struct sk_buff *tx_skb[NUM_TX_DESC]; /* TX skb for freeing when sent */
- struct sk_buff *rx_skb[NUM_RX_DESC]; /* RX skb's */
- int rx_new, rx_old; /* RX descriptor ring pointers */
- int tx_new, tx_old; /* TX descriptor ring pointers */
- char setup_frame[SETUP_FRAME_LEN]; /* Holds MCA and PA info. */
- char frame[64]; /* Min sized packet for loopback*/
- spinlock_t lock; /* Adapter specific spinlock */
- struct net_device_stats stats; /* Public stats */
- struct pkt_stats pktStats; /* Private stats counters */
- char rxRingSize;
- char txRingSize;
- int bus; /* EISA or PCI */
- int bus_num; /* PCI Bus number */
- int device; /* Device number on PCI bus */
- int state; /* Adapter OPENED or CLOSED */
- int chipset; /* DC21040, DC21041 or DC21140 */
- s32 irq_mask; /* Interrupt Mask (Enable) bits */
- s32 irq_en; /* Summary interrupt bits */
- int media; /* Media (eg TP), mode (eg 100B)*/
- int c_media; /* Remember the last media conn */
- bool fdx; /* media full duplex flag */
- int linkOK; /* Link is OK */
- int autosense; /* Allow/disallow autosensing */
- bool tx_enable; /* Enable descriptor polling */
- int setup_f; /* Setup frame filtering type */
- int local_state; /* State within a 'media' state */
- struct mii_phy phy[DE4X5_MAX_PHY]; /* List of attached PHY devices */
- struct sia_phy sia; /* SIA PHY Information */
- int active; /* Index to active PHY device */
- int mii_cnt; /* Number of attached PHY's */
- int timeout; /* Scheduling counter */
- struct timer_list timer; /* Timer info for kernel */
- int tmp; /* Temporary global per card */
- struct {
- u_long lock; /* Lock the cache accesses */
- s32 csr0; /* Saved Bus Mode Register */
- s32 csr6; /* Saved Operating Mode Reg. */
- s32 csr7; /* Saved IRQ Mask Register */
- s32 gep; /* Saved General Purpose Reg. */
- s32 gepc; /* Control info for GEP */
- s32 csr13; /* Saved SIA Connectivity Reg. */
- s32 csr14; /* Saved SIA TX/RX Register */
- s32 csr15; /* Saved SIA General Register */
- int save_cnt; /* Flag if state already saved */
- struct sk_buff_head queue; /* Save the (re-ordered) skb's */
- } cache;
- struct de4x5_srom srom; /* A copy of the SROM */
- int cfrv; /* Card CFRV copy */
- int rx_ovf; /* Check for 'RX overflow' tag */
- bool useSROM; /* For non-DEC card use SROM */
- bool useMII; /* Infoblock using the MII */
- int asBitValid; /* Autosense bits in GEP? */
- int asPolarity; /* 0 => asserted high */
- int asBit; /* Autosense bit number in GEP */
- int defMedium; /* SROM default medium */
- int tcount; /* Last infoblock number */
- int infoblock_init; /* Initialised this infoblock? */
- int infoleaf_offset; /* SROM infoleaf for controller */
- s32 infoblock_csr6; /* csr6 value in SROM infoblock */
- int infoblock_media; /* infoblock media */
- int (*infoleaf_fn)(struct net_device *); /* Pointer to infoleaf function */
- u_char *rst; /* Pointer to Type 5 reset info */
- u_char ibn; /* Infoblock number */
- struct parameters params; /* Command line/ #defined params */
- struct device *gendev; /* Generic device */
- dma_addr_t dma_rings; /* DMA handle for rings */
- int dma_size; /* Size of the DMA area */
- char *rx_bufs; /* rx bufs on alpha, sparc, ... */
-};
-
-/*
-** To get around certain poxy cards that don't provide an SROM
-** for the second and more DECchip, I have to key off the first
-** chip's address. I'll assume there's not a bad SROM iff:
-**
-** o the chipset is the same
-** o the bus number is the same and > 0
-** o the sum of all the returned hw address bytes is 0 or 0x5fa
-**
-** Also have to save the irq for those cards whose hardware designers
-** can't follow the PCI to PCI Bridge Architecture spec.
-*/
-static struct {
- int chipset;
- int bus;
- int irq;
- u_char addr[ETH_ALEN];
-} last = {0,};
-
-/*
-** The transmit ring full condition is described by the tx_old and tx_new
-** pointers by:
-** tx_old = tx_new Empty ring
-** tx_old = tx_new+1 Full ring
-** tx_old+txRingSize = tx_new+1 Full ring (wrapped condition)
-*/
-#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
- lp->tx_old+lp->txRingSize-lp->tx_new-1:\
- lp->tx_old -lp->tx_new-1)
-
-#define TX_PKT_PENDING (lp->tx_old != lp->tx_new)
-
-/*
-** Public Functions
-*/
-static int de4x5_open(struct net_device *dev);
-static netdev_tx_t de4x5_queue_pkt(struct sk_buff *skb,
- struct net_device *dev);
-static irqreturn_t de4x5_interrupt(int irq, void *dev_id);
-static int de4x5_close(struct net_device *dev);
-static struct net_device_stats *de4x5_get_stats(struct net_device *dev);
-static void de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len);
-static void set_multicast_list(struct net_device *dev);
-static int de4x5_siocdevprivate(struct net_device *dev, struct ifreq *rq,
- void __user *data, int cmd);
-
-/*
-** Private functions
-*/
-static int de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev);
-static int de4x5_init(struct net_device *dev);
-static int de4x5_sw_reset(struct net_device *dev);
-static int de4x5_rx(struct net_device *dev);
-static int de4x5_tx(struct net_device *dev);
-static void de4x5_ast(struct timer_list *t);
-static int de4x5_txur(struct net_device *dev);
-static int de4x5_rx_ovfc(struct net_device *dev);
-
-static int autoconf_media(struct net_device *dev);
-static void create_packet(struct net_device *dev, char *frame, int len);
-static void load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb);
-static int dc21040_autoconf(struct net_device *dev);
-static int dc21041_autoconf(struct net_device *dev);
-static int dc21140m_autoconf(struct net_device *dev);
-static int dc2114x_autoconf(struct net_device *dev);
-static int srom_autoconf(struct net_device *dev);
-static int de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state, int (*fn)(struct net_device *, int), int (*asfn)(struct net_device *));
-static int dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout, int next_state, int suspect_state, int (*fn)(struct net_device *, int));
-static int test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec);
-static int test_for_100Mb(struct net_device *dev, int msec);
-static int wait_for_link(struct net_device *dev);
-static int test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec);
-static int is_spd_100(struct net_device *dev);
-static int is_100_up(struct net_device *dev);
-static int is_10_up(struct net_device *dev);
-static int is_anc_capable(struct net_device *dev);
-static int ping_media(struct net_device *dev, int msec);
-static struct sk_buff *de4x5_alloc_rx_buff(struct net_device *dev, int index, int len);
-static void de4x5_free_rx_buffs(struct net_device *dev);
-static void de4x5_free_tx_buffs(struct net_device *dev);
-static void de4x5_save_skbs(struct net_device *dev);
-static void de4x5_rst_desc_ring(struct net_device *dev);
-static void de4x5_cache_state(struct net_device *dev, int flag);
-static void de4x5_put_cache(struct net_device *dev, struct sk_buff *skb);
-static void de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb);
-static struct sk_buff *de4x5_get_cache(struct net_device *dev);
-static void de4x5_setup_intr(struct net_device *dev);
-static void de4x5_init_connection(struct net_device *dev);
-static int de4x5_reset_phy(struct net_device *dev);
-static void reset_init_sia(struct net_device *dev, s32 sicr, s32 strr, s32 sigr);
-static int test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec);
-static int test_tp(struct net_device *dev, s32 msec);
-static int EISA_signature(char *name, struct device *device);
-static void PCI_signature(char *name, struct de4x5_private *lp);
-static void DevicePresent(struct net_device *dev, u_long iobase);
-static void enet_addr_rst(u_long aprom_addr);
-static int de4x5_bad_srom(struct de4x5_private *lp);
-static short srom_rd(u_long address, u_char offset);
-static void srom_latch(u_int command, u_long address);
-static void srom_command(u_int command, u_long address);
-static void srom_address(u_int command, u_long address, u_char offset);
-static short srom_data(u_int command, u_long address);
-/*static void srom_busy(u_int command, u_long address);*/
-static void sendto_srom(u_int command, u_long addr);
-static int getfrom_srom(u_long addr);
-static int srom_map_media(struct net_device *dev);
-static int srom_infoleaf_info(struct net_device *dev);
-static void srom_init(struct net_device *dev);
-static void srom_exec(struct net_device *dev, u_char *p);
-static int mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr);
-static void mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr);
-static int mii_rdata(u_long ioaddr);
-static void mii_wdata(int data, int len, u_long ioaddr);
-static void mii_ta(u_long rw, u_long ioaddr);
-static int mii_swap(int data, int len);
-static void mii_address(u_char addr, u_long ioaddr);
-static void sendto_mii(u32 command, int data, u_long ioaddr);
-static int getfrom_mii(u32 command, u_long ioaddr);
-static int mii_get_oui(u_char phyaddr, u_long ioaddr);
-static int mii_get_phy(struct net_device *dev);
-static void SetMulticastFilter(struct net_device *dev);
-static int get_hw_addr(struct net_device *dev);
-static void srom_repair(struct net_device *dev, int card);
-static int test_bad_enet(struct net_device *dev, int status);
-static int an_exception(struct de4x5_private *lp);
-static char *build_setup_frame(struct net_device *dev, int mode);
-static void disable_ast(struct net_device *dev);
-static long de4x5_switch_mac_port(struct net_device *dev);
-static int gep_rd(struct net_device *dev);
-static void gep_wr(s32 data, struct net_device *dev);
-static void yawn(struct net_device *dev, int state);
-static void de4x5_parse_params(struct net_device *dev);
-static void de4x5_dbg_open(struct net_device *dev);
-static void de4x5_dbg_mii(struct net_device *dev, int k);
-static void de4x5_dbg_media(struct net_device *dev);
-static void de4x5_dbg_srom(struct de4x5_srom *p);
-static void de4x5_dbg_rx(struct sk_buff *skb, int len);
-static int dc21041_infoleaf(struct net_device *dev);
-static int dc21140_infoleaf(struct net_device *dev);
-static int dc21142_infoleaf(struct net_device *dev);
-static int dc21143_infoleaf(struct net_device *dev);
-static int type0_infoblock(struct net_device *dev, u_char count, u_char *p);
-static int type1_infoblock(struct net_device *dev, u_char count, u_char *p);
-static int type2_infoblock(struct net_device *dev, u_char count, u_char *p);
-static int type3_infoblock(struct net_device *dev, u_char count, u_char *p);
-static int type4_infoblock(struct net_device *dev, u_char count, u_char *p);
-static int type5_infoblock(struct net_device *dev, u_char count, u_char *p);
-static int compact_infoblock(struct net_device *dev, u_char count, u_char *p);
-
-/*
-** Note now that module autoprobing is allowed under EISA and PCI. The
-** IRQ lines will not be auto-detected; instead I'll rely on the BIOSes
-** to "do the right thing".
-*/
-
-static int io=0x0;/* EDIT THIS LINE FOR YOUR CONFIGURATION IF NEEDED */
-
-module_param_hw(io, int, ioport, 0);
-module_param(de4x5_debug, int, 0);
-module_param(dec_only, int, 0);
-module_param(args, charp, 0);
-
-MODULE_PARM_DESC(io, "de4x5 I/O base address");
-MODULE_PARM_DESC(de4x5_debug, "de4x5 debug mask");
-MODULE_PARM_DESC(dec_only, "de4x5 probe only for Digital boards (0-1)");
-MODULE_PARM_DESC(args, "de4x5 full duplex and media type settings; see de4x5.c for details");
-MODULE_LICENSE("GPL");
-
-/*
-** List the SROM infoleaf functions and chipsets
-*/
-struct InfoLeaf {
- int chipset;
- int (*fn)(struct net_device *);
-};
-static struct InfoLeaf infoleaf_array[] = {
- {DC21041, dc21041_infoleaf},
- {DC21140, dc21140_infoleaf},
- {DC21142, dc21142_infoleaf},
- {DC21143, dc21143_infoleaf}
-};
-#define INFOLEAF_SIZE ARRAY_SIZE(infoleaf_array)
-
-/*
-** List the SROM info block functions
-*/
-static int (*dc_infoblock[])(struct net_device *dev, u_char, u_char *) = {
- type0_infoblock,
- type1_infoblock,
- type2_infoblock,
- type3_infoblock,
- type4_infoblock,
- type5_infoblock,
- compact_infoblock
-};
-
-#define COMPACT (ARRAY_SIZE(dc_infoblock) - 1)
-
-/*
-** Miscellaneous defines...
-*/
-#define RESET_DE4X5 {\
- int i;\
- i=inl(DE4X5_BMR);\
- mdelay(1);\
- outl(i | BMR_SWR, DE4X5_BMR);\
- mdelay(1);\
- outl(i, DE4X5_BMR);\
- mdelay(1);\
- for (i=0;i<5;i++) {inl(DE4X5_BMR); mdelay(1);}\
- mdelay(1);\
-}
-
-#define PHY_HARD_RESET {\
- outl(GEP_HRST, DE4X5_GEP); /* Hard RESET the PHY dev. */\
- mdelay(1); /* Assert for 1ms */\
- outl(0x00, DE4X5_GEP);\
- mdelay(2); /* Wait for 2ms */\
-}
-
-static const struct net_device_ops de4x5_netdev_ops = {
- .ndo_open = de4x5_open,
- .ndo_stop = de4x5_close,
- .ndo_start_xmit = de4x5_queue_pkt,
- .ndo_get_stats = de4x5_get_stats,
- .ndo_set_rx_mode = set_multicast_list,
- .ndo_siocdevprivate = de4x5_siocdevprivate,
- .ndo_set_mac_address= eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-
-static int
-de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
-{
- char name[DE4X5_NAME_LENGTH + 1];
- struct de4x5_private *lp = netdev_priv(dev);
- struct pci_dev *pdev = NULL;
- int i, status=0;
-
- dev_set_drvdata(gendev, dev);
-
- /* Ensure we're not sleeping */
- if (lp->bus == EISA) {
- outb(WAKEUP, PCI_CFPM);
- } else {
- pdev = to_pci_dev (gendev);
- pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP);
- }
- mdelay(10);
-
- RESET_DE4X5;
-
- if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) != 0) {
- return -ENXIO; /* Hardware could not reset */
- }
-
- /*
- ** Now find out what kind of DC21040/DC21041/DC21140 board we have.
- */
- lp->useSROM = false;
- if (lp->bus == PCI) {
- PCI_signature(name, lp);
- } else {
- EISA_signature(name, gendev);
- }
-
- if (*name == '\0') { /* Not found a board signature */
- return -ENXIO;
- }
-
- dev->base_addr = iobase;
- printk ("%s: %s at 0x%04lx", dev_name(gendev), name, iobase);
-
- status = get_hw_addr(dev);
- printk(", h/w address %pM\n", dev->dev_addr);
-
- if (status != 0) {
- printk(" which has an Ethernet PROM CRC error.\n");
- return -ENXIO;
- } else {
- skb_queue_head_init(&lp->cache.queue);
- lp->cache.gepc = GEP_INIT;
- lp->asBit = GEP_SLNK;
- lp->asPolarity = GEP_SLNK;
- lp->asBitValid = ~0;
- lp->timeout = -1;
- lp->gendev = gendev;
- spin_lock_init(&lp->lock);
- timer_setup(&lp->timer, de4x5_ast, 0);
- de4x5_parse_params(dev);
-
- /*
- ** Choose correct autosensing in case someone messed up
- */
- lp->autosense = lp->params.autosense;
- if (lp->chipset != DC21140) {
- if ((lp->chipset==DC21040) && (lp->params.autosense&TP_NW)) {
- lp->params.autosense = TP;
- }
- if ((lp->chipset==DC21041) && (lp->params.autosense&BNC_AUI)) {
- lp->params.autosense = BNC;
- }
- }
- lp->fdx = lp->params.fdx;
- sprintf(lp->adapter_name,"%s (%s)", name, dev_name(gendev));
-
- lp->dma_size = (NUM_RX_DESC + NUM_TX_DESC) * sizeof(struct de4x5_desc);
-#if defined(__alpha__) || defined(__powerpc__) || defined(CONFIG_SPARC) || defined(DE4X5_DO_MEMCPY)
- lp->dma_size += RX_BUFF_SZ * NUM_RX_DESC + DE4X5_ALIGN;
-#endif
- lp->rx_ring = dma_alloc_coherent(gendev, lp->dma_size,
- &lp->dma_rings, GFP_ATOMIC);
- if (lp->rx_ring == NULL) {
- return -ENOMEM;
- }
-
- lp->tx_ring = lp->rx_ring + NUM_RX_DESC;
-
- /*
- ** Set up the RX descriptor ring (Intels)
- ** Allocate contiguous receive buffers, long word aligned (Alphas)
- */
-#if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY)
- for (i=0; i<NUM_RX_DESC; i++) {
- lp->rx_ring[i].status = 0;
- lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
- lp->rx_ring[i].buf = 0;
- lp->rx_ring[i].next = 0;
- lp->rx_skb[i] = (struct sk_buff *) 1; /* Dummy entry */
- }
-
-#else
- {
- dma_addr_t dma_rx_bufs;
-
- dma_rx_bufs = lp->dma_rings + (NUM_RX_DESC + NUM_TX_DESC)
- * sizeof(struct de4x5_desc);
- dma_rx_bufs = (dma_rx_bufs + DE4X5_ALIGN) & ~DE4X5_ALIGN;
- lp->rx_bufs = (char *)(((long)(lp->rx_ring + NUM_RX_DESC
- + NUM_TX_DESC) + DE4X5_ALIGN) & ~DE4X5_ALIGN);
- for (i=0; i<NUM_RX_DESC; i++) {
- lp->rx_ring[i].status = 0;
- lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
- lp->rx_ring[i].buf =
- cpu_to_le32(dma_rx_bufs+i*RX_BUFF_SZ);
- lp->rx_ring[i].next = 0;
- lp->rx_skb[i] = (struct sk_buff *) 1; /* Dummy entry */
- }
-
- }
-#endif
-
- barrier();
-
- lp->rxRingSize = NUM_RX_DESC;
- lp->txRingSize = NUM_TX_DESC;
-
- /* Write the end of list marker to the descriptor lists */
- lp->rx_ring[lp->rxRingSize - 1].des1 |= cpu_to_le32(RD_RER);
- lp->tx_ring[lp->txRingSize - 1].des1 |= cpu_to_le32(TD_TER);
-
- /* Tell the adapter where the TX/RX rings are located. */
- outl(lp->dma_rings, DE4X5_RRBA);
- outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
- DE4X5_TRBA);
-
- /* Initialise the IRQ mask and Enable/Disable */
- lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM | IMR_UNM;
- lp->irq_en = IMR_NIM | IMR_AIM;
-
- /* Create a loopback packet frame for later media probing */
- create_packet(dev, lp->frame, sizeof(lp->frame));
-
- /* Check if the RX overflow bug needs testing for */
- i = lp->cfrv & 0x000000fe;
- if ((lp->chipset == DC21140) && (i == 0x20)) {
- lp->rx_ovf = 1;
- }
-
- /* Initialise the SROM pointers if possible */
- if (lp->useSROM) {
- lp->state = INITIALISED;
- if (srom_infoleaf_info(dev)) {
- dma_free_coherent (gendev, lp->dma_size,
- lp->rx_ring, lp->dma_rings);
- return -ENXIO;
- }
- srom_init(dev);
- }
-
- lp->state = CLOSED;
-
- /*
- ** Check for an MII interface
- */
- if ((lp->chipset != DC21040) && (lp->chipset != DC21041)) {
- mii_get_phy(dev);
- }
-
- printk(" and requires IRQ%d (provided by %s).\n", dev->irq,
- ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG"));
- }
-
- if (de4x5_debug & DEBUG_VERSION) {
- printk(version);
- }
-
- /* The DE4X5-specific entries in the device structure. */
- SET_NETDEV_DEV(dev, gendev);
- dev->netdev_ops = &de4x5_netdev_ops;
- dev->mem_start = 0;
-
- /* Fill in the generic fields of the device structure. */
- if ((status = register_netdev (dev))) {
- dma_free_coherent (gendev, lp->dma_size,
- lp->rx_ring, lp->dma_rings);
- return status;
- }
-
- /* Let the adapter sleep to save power */
- yawn(dev, SLEEP);
-
- return status;
-}
-
-
-static int
-de4x5_open(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int i, status = 0;
- s32 omr;
-
- /* Allocate the RX buffers */
- for (i=0; i<lp->rxRingSize; i++) {
- if (de4x5_alloc_rx_buff(dev, i, 0) == NULL) {
- de4x5_free_rx_buffs(dev);
- return -EAGAIN;
- }
- }
-
- /*
- ** Wake up the adapter
- */
- yawn(dev, WAKEUP);
-
- /*
- ** Re-initialize the DE4X5...
- */
- status = de4x5_init(dev);
- spin_lock_init(&lp->lock);
- lp->state = OPEN;
- de4x5_dbg_open(dev);
-
- if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
- lp->adapter_name, dev)) {
- printk("de4x5_open(): Requested IRQ%d is busy - attempting FAST/SHARE...", dev->irq);
- if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
- lp->adapter_name, dev)) {
- printk("\n Cannot get IRQ- reconfigure your hardware.\n");
- disable_ast(dev);
- de4x5_free_rx_buffs(dev);
- de4x5_free_tx_buffs(dev);
- yawn(dev, SLEEP);
- lp->state = CLOSED;
- return -EAGAIN;
- } else {
- printk("\n Succeeded, but you should reconfigure your hardware to avoid this.\n");
- printk("WARNING: there may be IRQ related problems in heavily loaded systems.\n");
- }
- }
-
- lp->interrupt = UNMASK_INTERRUPTS;
- netif_trans_update(dev); /* prevent tx timeout */
-
- START_DE4X5;
-
- de4x5_setup_intr(dev);
-
- if (de4x5_debug & DEBUG_OPEN) {
- printk("\tsts: 0x%08x\n", inl(DE4X5_STS));
- printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR));
- printk("\timr: 0x%08x\n", inl(DE4X5_IMR));
- printk("\tomr: 0x%08x\n", inl(DE4X5_OMR));
- printk("\tsisr: 0x%08x\n", inl(DE4X5_SISR));
- printk("\tsicr: 0x%08x\n", inl(DE4X5_SICR));
- printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR));
- printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR));
- }
-
- return status;
-}
-
-/*
-** Initialize the DE4X5 operating conditions. NB: a chip problem with the
-** DC21140 requires using perfect filtering mode for that chip. Since I can't
-** see why I'd want > 14 multicast addresses, I have changed all chips to use
-** the perfect filtering mode. Keep the DMA burst length at 8: there seems
-** to be data corruption problems if it is larger (UDP errors seen from a
-** ttcp source).
-*/
-static int
-de4x5_init(struct net_device *dev)
-{
- /* Lock out other processes whilst setting up the hardware */
- netif_stop_queue(dev);
-
- de4x5_sw_reset(dev);
-
- /* Autoconfigure the connected port */
- autoconf_media(dev);
-
- return 0;
-}
-
-static int
-de4x5_sw_reset(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int i, j, status = 0;
- s32 bmr, omr;
-
- /* Select the MII or SRL port now and RESET the MAC */
- if (!lp->useSROM) {
- if (lp->phy[lp->active].id != 0) {
- lp->infoblock_csr6 = OMR_SDP | OMR_PS | OMR_HBD;
- } else {
- lp->infoblock_csr6 = OMR_SDP | OMR_TTM;
- }
- de4x5_switch_mac_port(dev);
- }
-
- /*
- ** Set the programmable burst length to 8 longwords for all the DC21140
- ** Fasternet chips and 4 longwords for all others: DMA errors result
- ** without these values. Cache align 16 long.
- */
- bmr = (lp->chipset==DC21140 ? PBL_8 : PBL_4) | DESC_SKIP_LEN | DE4X5_CACHE_ALIGN;
- bmr |= ((lp->chipset & ~0x00ff)==DC2114x ? BMR_RML : 0);
- outl(bmr, DE4X5_BMR);
-
- omr = inl(DE4X5_OMR) & ~OMR_PR; /* Turn off promiscuous mode */
- if (lp->chipset == DC21140) {
- omr |= (OMR_SDP | OMR_SB);
- }
- lp->setup_f = PERFECT;
- outl(lp->dma_rings, DE4X5_RRBA);
- outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
- DE4X5_TRBA);
-
- lp->rx_new = lp->rx_old = 0;
- lp->tx_new = lp->tx_old = 0;
-
- for (i = 0; i < lp->rxRingSize; i++) {
- lp->rx_ring[i].status = cpu_to_le32(R_OWN);
- }
-
- for (i = 0; i < lp->txRingSize; i++) {
- lp->tx_ring[i].status = cpu_to_le32(0);
- }
-
- barrier();
-
- /* Build the setup frame depending on filtering mode */
- SetMulticastFilter(dev);
-
- load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, (struct sk_buff *)1);
- outl(omr|OMR_ST, DE4X5_OMR);
-
- /* Poll for setup frame completion (adapter interrupts are disabled now) */
-
- for (j=0, i=0;(i<500) && (j==0);i++) { /* Up to 500ms delay */
- mdelay(1);
- if ((s32)le32_to_cpu(lp->tx_ring[lp->tx_new].status) >= 0) j=1;
- }
- outl(omr, DE4X5_OMR); /* Stop everything! */
-
- if (j == 0) {
- printk("%s: Setup frame timed out, status %08x\n", dev->name,
- inl(DE4X5_STS));
- status = -EIO;
- }
-
- lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
- lp->tx_old = lp->tx_new;
-
- return status;
-}
-
-/*
-** Writes a socket buffer address to the next available transmit descriptor.
-*/
-static netdev_tx_t
-de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- u_long flags = 0;
-
- netif_stop_queue(dev);
- if (!lp->tx_enable) /* Cannot send for now */
- goto tx_err;
-
- /*
- ** Clean out the TX ring asynchronously to interrupts - sometimes the
- ** interrupts are lost by delayed descriptor status updates relative to
- ** the irq assertion, especially with a busy PCI bus.
- */
- spin_lock_irqsave(&lp->lock, flags);
- de4x5_tx(dev);
- spin_unlock_irqrestore(&lp->lock, flags);
-
- /* Test if cache is already locked - requeue skb if so */
- if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt)
- goto tx_err;
-
- /* Transmit descriptor ring full or stale skb */
- if (netif_queue_stopped(dev) || (u_long) lp->tx_skb[lp->tx_new] > 1) {
- if (lp->interrupt) {
- de4x5_putb_cache(dev, skb); /* Requeue the buffer */
- } else {
- de4x5_put_cache(dev, skb);
- }
- if (de4x5_debug & DEBUG_TX) {
- printk("%s: transmit busy, lost media or stale skb found:\n STS:%08x\n tbusy:%d\n IMR:%08x\n OMR:%08x\n Stale skb: %s\n",dev->name, inl(DE4X5_STS), netif_queue_stopped(dev), inl(DE4X5_IMR), inl(DE4X5_OMR), ((u_long) lp->tx_skb[lp->tx_new] > 1) ? "YES" : "NO");
- }
- } else if (skb->len > 0) {
- /* If we already have stuff queued locally, use that first */
- if (!skb_queue_empty(&lp->cache.queue) && !lp->interrupt) {
- de4x5_put_cache(dev, skb);
- skb = de4x5_get_cache(dev);
- }
-
- while (skb && !netif_queue_stopped(dev) &&
- (u_long) lp->tx_skb[lp->tx_new] <= 1) {
- spin_lock_irqsave(&lp->lock, flags);
- netif_stop_queue(dev);
- load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb);
- lp->stats.tx_bytes += skb->len;
- outl(POLL_DEMAND, DE4X5_TPD);/* Start the TX */
-
- lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
-
- if (TX_BUFFS_AVAIL) {
- netif_start_queue(dev); /* Another pkt may be queued */
- }
- skb = de4x5_get_cache(dev);
- spin_unlock_irqrestore(&lp->lock, flags);
- }
- if (skb) de4x5_putb_cache(dev, skb);
- }
-
- lp->cache.lock = 0;
-
- return NETDEV_TX_OK;
-tx_err:
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
-}
-
-/*
-** The DE4X5 interrupt handler.
-**
-** I/O Read/Writes through intermediate PCI bridges are never 'posted',
-** so that the asserted interrupt always has some real data to work with -
-** if these I/O accesses are ever changed to memory accesses, ensure the
-** STS write is read immediately to complete the transaction if the adapter
-** is not on bus 0. Lost interrupts can still occur when the PCI bus load
-** is high and descriptor status bits cannot be set before the associated
-** interrupt is asserted and this routine entered.
-*/
-static irqreturn_t
-de4x5_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct de4x5_private *lp;
- s32 imr, omr, sts, limit;
- u_long iobase;
- unsigned int handled = 0;
-
- lp = netdev_priv(dev);
- spin_lock(&lp->lock);
- iobase = dev->base_addr;
-
- DISABLE_IRQs; /* Ensure non re-entrancy */
-
- if (test_and_set_bit(MASK_INTERRUPTS, (void*) &lp->interrupt))
- printk("%s: Re-entering the interrupt handler.\n", dev->name);
-
- synchronize_irq(dev->irq);
-
- for (limit=0; limit<8; limit++) {
- sts = inl(DE4X5_STS); /* Read IRQ status */
- outl(sts, DE4X5_STS); /* Reset the board interrupts */
-
- if (!(sts & lp->irq_mask)) break;/* All done */
- handled = 1;
-
- if (sts & (STS_RI | STS_RU)) /* Rx interrupt (packet[s] arrived) */
- de4x5_rx(dev);
-
- if (sts & (STS_TI | STS_TU)) /* Tx interrupt (packet sent) */
- de4x5_tx(dev);
-
- if (sts & STS_LNF) { /* TP Link has failed */
- lp->irq_mask &= ~IMR_LFM;
- }
-
- if (sts & STS_UNF) { /* Transmit underrun */
- de4x5_txur(dev);
- }
-
- if (sts & STS_SE) { /* Bus Error */
- STOP_DE4X5;
- printk("%s: Fatal bus error occurred, sts=%#8x, device stopped.\n",
- dev->name, sts);
- spin_unlock(&lp->lock);
- return IRQ_HANDLED;
- }
- }
-
- /* Load the TX ring with any locally stored packets */
- if (!test_and_set_bit(0, (void *)&lp->cache.lock)) {
- while (!skb_queue_empty(&lp->cache.queue) && !netif_queue_stopped(dev) && lp->tx_enable) {
- de4x5_queue_pkt(de4x5_get_cache(dev), dev);
- }
- lp->cache.lock = 0;
- }
-
- lp->interrupt = UNMASK_INTERRUPTS;
- ENABLE_IRQs;
- spin_unlock(&lp->lock);
-
- return IRQ_RETVAL(handled);
-}
-
-static int
-de4x5_rx(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int entry;
- s32 status;
-
- for (entry=lp->rx_new; (s32)le32_to_cpu(lp->rx_ring[entry].status)>=0;
- entry=lp->rx_new) {
- status = (s32)le32_to_cpu(lp->rx_ring[entry].status);
-
- if (lp->rx_ovf) {
- if (inl(DE4X5_MFC) & MFC_FOCM) {
- de4x5_rx_ovfc(dev);
- break;
- }
- }
-
- if (status & RD_FS) { /* Remember the start of frame */
- lp->rx_old = entry;
- }
-
- if (status & RD_LS) { /* Valid frame status */
- if (lp->tx_enable) lp->linkOK++;
- if (status & RD_ES) { /* There was an error. */
- lp->stats.rx_errors++; /* Update the error stats. */
- if (status & (RD_RF | RD_TL)) lp->stats.rx_frame_errors++;
- if (status & RD_CE) lp->stats.rx_crc_errors++;
- if (status & RD_OF) lp->stats.rx_fifo_errors++;
- if (status & RD_TL) lp->stats.rx_length_errors++;
- if (status & RD_RF) lp->pktStats.rx_runt_frames++;
- if (status & RD_CS) lp->pktStats.rx_collision++;
- if (status & RD_DB) lp->pktStats.rx_dribble++;
- if (status & RD_OF) lp->pktStats.rx_overflow++;
- } else { /* A valid frame received */
- struct sk_buff *skb;
- short pkt_len = (short)(le32_to_cpu(lp->rx_ring[entry].status)
- >> 16) - 4;
-
- if ((skb = de4x5_alloc_rx_buff(dev, entry, pkt_len)) == NULL) {
- printk("%s: Insufficient memory; nuking packet.\n",
- dev->name);
- lp->stats.rx_dropped++;
- } else {
- de4x5_dbg_rx(skb, pkt_len);
-
- /* Push up the protocol stack */
- skb->protocol=eth_type_trans(skb,dev);
- de4x5_local_stats(dev, skb->data, pkt_len);
- netif_rx(skb);
-
- /* Update stats */
- lp->stats.rx_packets++;
- lp->stats.rx_bytes += pkt_len;
- }
- }
-
- /* Change buffer ownership for this frame, back to the adapter */
- for (;lp->rx_old!=entry;lp->rx_old=(lp->rx_old + 1)%lp->rxRingSize) {
- lp->rx_ring[lp->rx_old].status = cpu_to_le32(R_OWN);
- barrier();
- }
- lp->rx_ring[entry].status = cpu_to_le32(R_OWN);
- barrier();
- }
-
- /*
- ** Update entry information
- */
- lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
- }
-
- return 0;
-}
-
-static inline void
-de4x5_free_tx_buff(struct de4x5_private *lp, int entry)
-{
- dma_unmap_single(lp->gendev, le32_to_cpu(lp->tx_ring[entry].buf),
- le32_to_cpu(lp->tx_ring[entry].des1) & TD_TBS1,
- DMA_TO_DEVICE);
- if ((u_long) lp->tx_skb[entry] > 1)
- dev_kfree_skb_irq(lp->tx_skb[entry]);
- lp->tx_skb[entry] = NULL;
-}
-
-/*
-** Buffer sent - check for TX buffer errors.
-*/
-static int
-de4x5_tx(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int entry;
- s32 status;
-
- for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
- status = (s32)le32_to_cpu(lp->tx_ring[entry].status);
- if (status < 0) { /* Buffer not sent yet */
- break;
- } else if (status != 0x7fffffff) { /* Not setup frame */
- if (status & TD_ES) { /* An error happened */
- lp->stats.tx_errors++;
- if (status & TD_NC) lp->stats.tx_carrier_errors++;
- if (status & TD_LC) lp->stats.tx_window_errors++;
- if (status & TD_UF) lp->stats.tx_fifo_errors++;
- if (status & TD_EC) lp->pktStats.excessive_collisions++;
- if (status & TD_DE) lp->stats.tx_aborted_errors++;
-
- if (TX_PKT_PENDING) {
- outl(POLL_DEMAND, DE4X5_TPD);/* Restart a stalled TX */
- }
- } else { /* Packet sent */
- lp->stats.tx_packets++;
- if (lp->tx_enable) lp->linkOK++;
- }
- /* Update the collision counter */
- lp->stats.collisions += ((status & TD_EC) ? 16 :
- ((status & TD_CC) >> 3));
-
- /* Free the buffer. */
- if (lp->tx_skb[entry] != NULL)
- de4x5_free_tx_buff(lp, entry);
- }
-
- /* Update all the pointers */
- lp->tx_old = (lp->tx_old + 1) % lp->txRingSize;
- }
-
- /* Any resources available? */
- if (TX_BUFFS_AVAIL && netif_queue_stopped(dev)) {
- if (lp->interrupt)
- netif_wake_queue(dev);
- else
- netif_start_queue(dev);
- }
-
- return 0;
-}
-
-static void
-de4x5_ast(struct timer_list *t)
-{
- struct de4x5_private *lp = from_timer(lp, t, timer);
- struct net_device *dev = dev_get_drvdata(lp->gendev);
- int next_tick = DE4X5_AUTOSENSE_MS;
- int dt;
-
- if (lp->useSROM)
- next_tick = srom_autoconf(dev);
- else if (lp->chipset == DC21140)
- next_tick = dc21140m_autoconf(dev);
- else if (lp->chipset == DC21041)
- next_tick = dc21041_autoconf(dev);
- else if (lp->chipset == DC21040)
- next_tick = dc21040_autoconf(dev);
- lp->linkOK = 0;
-
- dt = (next_tick * HZ) / 1000;
-
- if (!dt)
- dt = 1;
-
- mod_timer(&lp->timer, jiffies + dt);
-}
-
-static int
-de4x5_txur(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int omr;
-
- omr = inl(DE4X5_OMR);
- if (!(omr & OMR_SF) || (lp->chipset==DC21041) || (lp->chipset==DC21040)) {
- omr &= ~(OMR_ST|OMR_SR);
- outl(omr, DE4X5_OMR);
- while (inl(DE4X5_STS) & STS_TS);
- if ((omr & OMR_TR) < OMR_TR) {
- omr += 0x4000;
- } else {
- omr |= OMR_SF;
- }
- outl(omr | OMR_ST | OMR_SR, DE4X5_OMR);
- }
-
- return 0;
-}
-
-static int
-de4x5_rx_ovfc(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int omr;
-
- omr = inl(DE4X5_OMR);
- outl(omr & ~OMR_SR, DE4X5_OMR);
- while (inl(DE4X5_STS) & STS_RS);
-
- for (; (s32)le32_to_cpu(lp->rx_ring[lp->rx_new].status)>=0;) {
- lp->rx_ring[lp->rx_new].status = cpu_to_le32(R_OWN);
- lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
- }
-
- outl(omr, DE4X5_OMR);
-
- return 0;
-}
-
-static int
-de4x5_close(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- s32 imr, omr;
-
- disable_ast(dev);
-
- netif_stop_queue(dev);
-
- if (de4x5_debug & DEBUG_CLOSE) {
- printk("%s: Shutting down ethercard, status was %8.8x.\n",
- dev->name, inl(DE4X5_STS));
- }
-
- /*
- ** We stop the DE4X5 here... mask interrupts and stop TX & RX
- */
- DISABLE_IRQs;
- STOP_DE4X5;
-
- /* Free the associated irq */
- free_irq(dev->irq, dev);
- lp->state = CLOSED;
-
- /* Free any socket buffers */
- de4x5_free_rx_buffs(dev);
- de4x5_free_tx_buffs(dev);
-
- /* Put the adapter to sleep to save power */
- yawn(dev, SLEEP);
-
- return 0;
-}
-
-static struct net_device_stats *
-de4x5_get_stats(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- lp->stats.rx_missed_errors = (int)(inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR));
-
- return &lp->stats;
-}
-
-static void
-de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int i;
-
- for (i=1; i<DE4X5_PKT_STAT_SZ-1; i++) {
- if (pkt_len < (i*DE4X5_PKT_BIN_SZ)) {
- lp->pktStats.bins[i]++;
- i = DE4X5_PKT_STAT_SZ;
- }
- }
- if (is_multicast_ether_addr(buf)) {
- if (is_broadcast_ether_addr(buf)) {
- lp->pktStats.broadcast++;
- } else {
- lp->pktStats.multicast++;
- }
- } else if (ether_addr_equal(buf, dev->dev_addr)) {
- lp->pktStats.unicast++;
- }
-
- lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */
- if (lp->pktStats.bins[0] == 0) { /* Reset counters */
- memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
- }
-}
-
-/*
-** Removes the TD_IC flag from previous descriptor to improve TX performance.
-** If the flag is changed on a descriptor that is being read by the hardware,
-** I assume PCI transaction ordering will mean you are either successful or
-** just miss asserting the change to the hardware. Anyway you're messing with
-** a descriptor you don't own, but this shouldn't kill the chip provided
-** the descriptor register is read only to the hardware.
-*/
-static void
-load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int entry = (lp->tx_new ? lp->tx_new-1 : lp->txRingSize-1);
- dma_addr_t buf_dma = dma_map_single(lp->gendev, buf, flags & TD_TBS1, DMA_TO_DEVICE);
-
- lp->tx_ring[lp->tx_new].buf = cpu_to_le32(buf_dma);
- lp->tx_ring[lp->tx_new].des1 &= cpu_to_le32(TD_TER);
- lp->tx_ring[lp->tx_new].des1 |= cpu_to_le32(flags);
- lp->tx_skb[lp->tx_new] = skb;
- lp->tx_ring[entry].des1 &= cpu_to_le32(~TD_IC);
- barrier();
-
- lp->tx_ring[lp->tx_new].status = cpu_to_le32(T_OWN);
- barrier();
-}
-
-/*
-** Set or clear the multicast filter for this adaptor.
-*/
-static void
-set_multicast_list(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- /* First, double check that the adapter is open */
- if (lp->state == OPEN) {
- if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
- u32 omr;
- omr = inl(DE4X5_OMR);
- omr |= OMR_PR;
- outl(omr, DE4X5_OMR);
- } else {
- SetMulticastFilter(dev);
- load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
- SETUP_FRAME_LEN, (struct sk_buff *)1);
-
- lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
- outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
- netif_trans_update(dev); /* prevent tx timeout */
- }
- }
-}
-
-/*
-** Calculate the hash code and update the logical address filter
-** from a list of ethernet multicast addresses.
-** Little endian crc one liner from Matt Thomas, DEC.
-*/
-static void
-SetMulticastFilter(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- struct netdev_hw_addr *ha;
- u_long iobase = dev->base_addr;
- int i, bit, byte;
- u16 hashcode;
- u32 omr, crc;
- char *pa;
- unsigned char *addrs;
-
- omr = inl(DE4X5_OMR);
- omr &= ~(OMR_PR | OMR_PM);
- pa = build_setup_frame(dev, ALL); /* Build the basic frame */
-
- if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 14)) {
- omr |= OMR_PM; /* Pass all multicasts */
- } else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */
- netdev_for_each_mc_addr(ha, dev) {
- crc = ether_crc_le(ETH_ALEN, ha->addr);
- hashcode = crc & DE4X5_HASH_BITS; /* hashcode is 9 LSb of CRC */
-
- byte = hashcode >> 3; /* bit[3-8] -> byte in filter */
- bit = 1 << (hashcode & 0x07);/* bit[0-2] -> bit in byte */
-
- byte <<= 1; /* calc offset into setup frame */
- if (byte & 0x02) {
- byte -= 1;
- }
- lp->setup_frame[byte] |= bit;
- }
- } else { /* Perfect filtering */
- netdev_for_each_mc_addr(ha, dev) {
- addrs = ha->addr;
- for (i=0; i<ETH_ALEN; i++) {
- *(pa + (i&1)) = *addrs++;
- if (i & 0x01) pa += 4;
- }
- }
- }
- outl(omr, DE4X5_OMR);
-}
-
-#ifdef CONFIG_EISA
-
-static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST;
-
-static int de4x5_eisa_probe(struct device *gendev)
-{
- struct eisa_device *edev;
- u_long iobase;
- u_char irq, regval;
- u_short vendor;
- u32 cfid;
- int status, device;
- struct net_device *dev;
- struct de4x5_private *lp;
-
- edev = to_eisa_device (gendev);
- iobase = edev->base_addr;
-
- if (!request_region (iobase, DE4X5_EISA_TOTAL_SIZE, "de4x5"))
- return -EBUSY;
-
- if (!request_region (iobase + DE4X5_EISA_IO_PORTS,
- DE4X5_EISA_TOTAL_SIZE, "de4x5")) {
- status = -EBUSY;
- goto release_reg_1;
- }
-
- if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) {
- status = -ENOMEM;
- goto release_reg_2;
- }
- lp = netdev_priv(dev);
-
- cfid = (u32) inl(PCI_CFID);
- lp->cfrv = (u_short) inl(PCI_CFRV);
- device = (cfid >> 8) & 0x00ffff00;
- vendor = (u_short) cfid;
-
- /* Read the EISA Configuration Registers */
- regval = inb(EISA_REG0) & (ER0_INTL | ER0_INTT);
-#ifdef CONFIG_ALPHA
- /* Looks like the Jensen firmware (rev 2.2) doesn't really
- * care about the EISA configuration, and thus doesn't
- * configure the PLX bridge properly. Oh well... Simply mimic
- * the EISA config file to sort it out. */
-
- /* EISA REG1: Assert DecChip 21040 HW Reset */
- outb (ER1_IAM | 1, EISA_REG1);
- mdelay (1);
-
- /* EISA REG1: Deassert DecChip 21040 HW Reset */
- outb (ER1_IAM, EISA_REG1);
- mdelay (1);
-
- /* EISA REG3: R/W Burst Transfer Enable */
- outb (ER3_BWE | ER3_BRE, EISA_REG3);
-
- /* 32_bit slave/master, Preempt Time=23 bclks, Unlatched Interrupt */
- outb (ER0_BSW | ER0_BMW | ER0_EPT | regval, EISA_REG0);
-#endif
- irq = de4x5_irq[(regval >> 1) & 0x03];
-
- if (is_DC2114x) {
- device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
- }
- lp->chipset = device;
- lp->bus = EISA;
-
- /* Write the PCI Configuration Registers */
- outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS);
- outl(0x00006000, PCI_CFLT);
- outl(iobase, PCI_CBIO);
-
- DevicePresent(dev, EISA_APROM);
-
- dev->irq = irq;
-
- if (!(status = de4x5_hw_init (dev, iobase, gendev))) {
- return 0;
- }
-
- free_netdev (dev);
- release_reg_2:
- release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE);
- release_reg_1:
- release_region (iobase, DE4X5_EISA_TOTAL_SIZE);
-
- return status;
-}
-
-static int de4x5_eisa_remove(struct device *device)
-{
- struct net_device *dev;
- u_long iobase;
-
- dev = dev_get_drvdata(device);
- iobase = dev->base_addr;
-
- unregister_netdev (dev);
- free_netdev (dev);
- release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE);
- release_region (iobase, DE4X5_EISA_TOTAL_SIZE);
-
- return 0;
-}
-
-static const struct eisa_device_id de4x5_eisa_ids[] = {
- { "DEC4250", 0 }, /* 0 is the board name index... */
- { "" }
-};
-MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
-
-static struct eisa_driver de4x5_eisa_driver = {
- .id_table = de4x5_eisa_ids,
- .driver = {
- .name = "de4x5",
- .probe = de4x5_eisa_probe,
- .remove = de4x5_eisa_remove,
- }
-};
-#endif
-
-#ifdef CONFIG_PCI
-
-/*
-** This function searches the current bus (which is >0) for a DECchip with an
-** SROM, so that in multiport cards that have one SROM shared between multiple
-** DECchips, we can find the base SROM irrespective of the BIOS scan direction.
-** For single port cards this is a time waster...
-*/
-static void
-srom_search(struct net_device *dev, struct pci_dev *pdev)
-{
- u_char pb;
- u_short vendor, status;
- u_int irq = 0, device;
- u_long iobase = 0; /* Clear upper 32 bits in Alphas */
- int i, j;
- struct de4x5_private *lp = netdev_priv(dev);
- struct pci_dev *this_dev;
-
- list_for_each_entry(this_dev, &pdev->bus->devices, bus_list) {
- vendor = this_dev->vendor;
- device = this_dev->device << 8;
- if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x)) continue;
-
- /* Get the chip configuration revision register */
- pb = this_dev->bus->number;
-
- /* Set the device number information */
- lp->device = PCI_SLOT(this_dev->devfn);
- lp->bus_num = pb;
-
- /* Set the chipset information */
- if (is_DC2114x) {
- device = ((this_dev->revision & CFRV_RN) < DC2114x_BRK
- ? DC21142 : DC21143);
- }
- lp->chipset = device;
-
- /* Get the board I/O address (64 bits on sparc64) */
- iobase = pci_resource_start(this_dev, 0);
-
- /* Fetch the IRQ to be used */
- irq = this_dev->irq;
- if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) continue;
-
- /* Check if I/O accesses are enabled */
- pci_read_config_word(this_dev, PCI_COMMAND, &status);
- if (!(status & PCI_COMMAND_IO)) continue;
-
- /* Search for a valid SROM attached to this DECchip */
- DevicePresent(dev, DE4X5_APROM);
- for (j=0, i=0; i<ETH_ALEN; i++) {
- j += (u_char) *((u_char *)&lp->srom + SROM_HWADD + i);
- }
- if (j != 0 && j != 6 * 0xff) {
- last.chipset = device;
- last.bus = pb;
- last.irq = irq;
- for (i=0; i<ETH_ALEN; i++) {
- last.addr[i] = (u_char)*((u_char *)&lp->srom + SROM_HWADD + i);
- }
- return;
- }
- }
-}
-
-/*
-** PCI bus I/O device probe
-** NB: PCI I/O accesses and Bus Mastering are enabled by the PCI BIOS, not
-** the driver. Some PCI BIOS's, pre V2.1, need the slot + features to be
-** enabled by the user first in the set up utility. Hence we just check for
-** enabled features and silently ignore the card if they're not.
-**
-** STOP PRESS: Some BIOS's __require__ the driver to enable the bus mastering
-** bit. Here, check for I/O accesses and then set BM. If you put the card in
-** a non BM slot, you're on your own (and complain to the PC vendor that your
-** PC doesn't conform to the PCI standard)!
-**
-** This function is only compatible with the *latest* 2.1.x kernels. For 2.0.x
-** kernels use the V0.535[n] drivers.
-*/
-
-static int de4x5_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- u_char pb, pbus = 0, dev_num, dnum = 0, timer;
- u_short vendor, status;
- u_int irq = 0, device;
- u_long iobase = 0; /* Clear upper 32 bits in Alphas */
- int error;
- struct net_device *dev;
- struct de4x5_private *lp;
-
- dev_num = PCI_SLOT(pdev->devfn);
- pb = pdev->bus->number;
-
- if (io) { /* probe a single PCI device */
- pbus = (u_short)(io >> 8);
- dnum = (u_short)(io & 0xff);
- if ((pbus != pb) || (dnum != dev_num))
- return -ENODEV;
- }
-
- vendor = pdev->vendor;
- device = pdev->device << 8;
- if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x))
- return -ENODEV;
-
- /* Ok, the device seems to be for us. */
- if ((error = pci_enable_device (pdev)))
- return error;
-
- if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) {
- error = -ENOMEM;
- goto disable_dev;
- }
-
- lp = netdev_priv(dev);
- lp->bus = PCI;
- lp->bus_num = 0;
-
- /* Search for an SROM on this bus */
- if (lp->bus_num != pb) {
- lp->bus_num = pb;
- srom_search(dev, pdev);
- }
-
- /* Get the chip configuration revision register */
- lp->cfrv = pdev->revision;
-
- /* Set the device number information */
- lp->device = dev_num;
- lp->bus_num = pb;
-
- /* Set the chipset information */
- if (is_DC2114x) {
- device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
- }
- lp->chipset = device;
-
- /* Get the board I/O address (64 bits on sparc64) */
- iobase = pci_resource_start(pdev, 0);
-
- /* Fetch the IRQ to be used */
- irq = pdev->irq;
- if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) {
- error = -ENODEV;
- goto free_dev;
- }
-
- /* Check if I/O accesses and Bus Mastering are enabled */
- pci_read_config_word(pdev, PCI_COMMAND, &status);
-#ifdef __powerpc__
- if (!(status & PCI_COMMAND_IO)) {
- status |= PCI_COMMAND_IO;
- pci_write_config_word(pdev, PCI_COMMAND, status);
- pci_read_config_word(pdev, PCI_COMMAND, &status);
- }
-#endif /* __powerpc__ */
- if (!(status & PCI_COMMAND_IO)) {
- error = -ENODEV;
- goto free_dev;
- }
-
- if (!(status & PCI_COMMAND_MASTER)) {
- status |= PCI_COMMAND_MASTER;
- pci_write_config_word(pdev, PCI_COMMAND, status);
- pci_read_config_word(pdev, PCI_COMMAND, &status);
- }
- if (!(status & PCI_COMMAND_MASTER)) {
- error = -ENODEV;
- goto free_dev;
- }
-
- /* Check the latency timer for values >= 0x60 */
- pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &timer);
- if (timer < 0x60) {
- pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x60);
- }
-
- DevicePresent(dev, DE4X5_APROM);
-
- if (!request_region (iobase, DE4X5_PCI_TOTAL_SIZE, "de4x5")) {
- error = -EBUSY;
- goto free_dev;
- }
-
- dev->irq = irq;
-
- if ((error = de4x5_hw_init(dev, iobase, &pdev->dev))) {
- goto release;
- }
-
- return 0;
-
- release:
- release_region (iobase, DE4X5_PCI_TOTAL_SIZE);
- free_dev:
- free_netdev (dev);
- disable_dev:
- pci_disable_device (pdev);
- return error;
-}
-
-static void de4x5_pci_remove(struct pci_dev *pdev)
-{
- struct net_device *dev;
- u_long iobase;
-
- dev = pci_get_drvdata(pdev);
- iobase = dev->base_addr;
-
- unregister_netdev (dev);
- free_netdev (dev);
- release_region (iobase, DE4X5_PCI_TOTAL_SIZE);
- pci_disable_device (pdev);
-}
-
-static const struct pci_device_id de4x5_pci_tbl[] = {
- { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
- { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
- { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
- { },
-};
-
-static struct pci_driver de4x5_pci_driver = {
- .name = "de4x5",
- .id_table = de4x5_pci_tbl,
- .probe = de4x5_pci_probe,
- .remove = de4x5_pci_remove,
-};
-
-#endif
-
-/*
-** Auto configure the media here rather than setting the port at compile
-** time. This routine is called by de4x5_init() and when a loss of media is
-** detected (excessive collisions, loss of carrier, no carrier or link fail
-** [TP] or no recent receive activity) to check whether the user has been
-** sneaky and changed the port on us.
-*/
-static int
-autoconf_media(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- disable_ast(dev);
-
- lp->c_media = AUTO; /* Bogus last media */
- inl(DE4X5_MFC); /* Zero the lost frames counter */
- lp->media = INIT;
- lp->tcount = 0;
-
- de4x5_ast(&lp->timer);
-
- return lp->media;
-}
-
-/*
-** Autoconfigure the media when using the DC21040. AUI cannot be distinguished
-** from BNC as the port has a jumper to set thick or thin wire. When set for
-** BNC, the BNC port will indicate activity if it's not terminated correctly.
-** The only way to test for that is to place a loopback packet onto the
-** network and watch for errors. Since we're messing with the interrupt mask
-** register, disable the board interrupts and do not allow any more packets to
-** be queued to the hardware. Re-enable everything only when the media is
-** found.
-** I may have to "age out" locally queued packets so that the higher layer
-** timeouts don't effectively duplicate packets on the network.
-*/
-static int
-dc21040_autoconf(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int next_tick = DE4X5_AUTOSENSE_MS;
- s32 imr;
-
- switch (lp->media) {
- case INIT:
- DISABLE_IRQs;
- lp->tx_enable = false;
- lp->timeout = -1;
- de4x5_save_skbs(dev);
- if ((lp->autosense == AUTO) || (lp->autosense == TP)) {
- lp->media = TP;
- } else if ((lp->autosense == BNC) || (lp->autosense == AUI) || (lp->autosense == BNC_AUI)) {
- lp->media = BNC_AUI;
- } else if (lp->autosense == EXT_SIA) {
- lp->media = EXT_SIA;
- } else {
- lp->media = NC;
- }
- lp->local_state = 0;
- next_tick = dc21040_autoconf(dev);
- break;
-
- case TP:
- next_tick = dc21040_state(dev, 0x8f01, 0xffff, 0x0000, 3000, BNC_AUI,
- TP_SUSPECT, test_tp);
- break;
-
- case TP_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21040_autoconf);
- break;
-
- case BNC:
- case AUI:
- case BNC_AUI:
- next_tick = dc21040_state(dev, 0x8f09, 0x0705, 0x0006, 3000, EXT_SIA,
- BNC_AUI_SUSPECT, ping_media);
- break;
-
- case BNC_AUI_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, BNC_AUI, ping_media, dc21040_autoconf);
- break;
-
- case EXT_SIA:
- next_tick = dc21040_state(dev, 0x3041, 0x0000, 0x0006, 3000,
- NC, EXT_SIA_SUSPECT, ping_media);
- break;
-
- case EXT_SIA_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, EXT_SIA, ping_media, dc21040_autoconf);
- break;
-
- case NC:
- /* default to TP for all */
- reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
- if (lp->media != lp->c_media) {
- de4x5_dbg_media(dev);
- lp->c_media = lp->media;
- }
- lp->media = INIT;
- lp->tx_enable = false;
- break;
- }
-
- return next_tick;
-}
-
-static int
-dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout,
- int next_state, int suspect_state,
- int (*fn)(struct net_device *, int))
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int next_tick = DE4X5_AUTOSENSE_MS;
- int linkBad;
-
- switch (lp->local_state) {
- case 0:
- reset_init_sia(dev, csr13, csr14, csr15);
- lp->local_state++;
- next_tick = 500;
- break;
-
- case 1:
- if (!lp->tx_enable) {
- linkBad = fn(dev, timeout);
- if (linkBad < 0) {
- next_tick = linkBad & ~TIMER_CB;
- } else {
- if (linkBad && (lp->autosense == AUTO)) {
- lp->local_state = 0;
- lp->media = next_state;
- } else {
- de4x5_init_connection(dev);
- }
- }
- } else if (!lp->linkOK && (lp->autosense == AUTO)) {
- lp->media = suspect_state;
- next_tick = 3000;
- }
- break;
- }
-
- return next_tick;
-}
-
-static int
-de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state,
- int (*fn)(struct net_device *, int),
- int (*asfn)(struct net_device *))
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int next_tick = DE4X5_AUTOSENSE_MS;
- int linkBad;
-
- switch (lp->local_state) {
- case 1:
- if (lp->linkOK) {
- lp->media = prev_state;
- } else {
- lp->local_state++;
- next_tick = asfn(dev);
- }
- break;
-
- case 2:
- linkBad = fn(dev, timeout);
- if (linkBad < 0) {
- next_tick = linkBad & ~TIMER_CB;
- } else if (!linkBad) {
- lp->local_state--;
- lp->media = prev_state;
- } else {
- lp->media = INIT;
- lp->tcount++;
- }
- }
-
- return next_tick;
-}
-
-/*
-** Autoconfigure the media when using the DC21041. AUI needs to be tested
-** before BNC, because the BNC port will indicate activity if it's not
-** terminated correctly. The only way to test for that is to place a loopback
-** packet onto the network and watch for errors. Since we're messing with
-** the interrupt mask register, disable the board interrupts and do not allow
-** any more packets to be queued to the hardware. Re-enable everything only
-** when the media is found.
-*/
-static int
-dc21041_autoconf(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- s32 sts, irqs, irq_mask, imr, omr;
- int next_tick = DE4X5_AUTOSENSE_MS;
-
- switch (lp->media) {
- case INIT:
- DISABLE_IRQs;
- lp->tx_enable = false;
- lp->timeout = -1;
- de4x5_save_skbs(dev); /* Save non transmitted skb's */
- if ((lp->autosense == AUTO) || (lp->autosense == TP_NW)) {
- lp->media = TP; /* On chip auto negotiation is broken */
- } else if (lp->autosense == TP) {
- lp->media = TP;
- } else if (lp->autosense == BNC) {
- lp->media = BNC;
- } else if (lp->autosense == AUI) {
- lp->media = AUI;
- } else {
- lp->media = NC;
- }
- lp->local_state = 0;
- next_tick = dc21041_autoconf(dev);
- break;
-
- case TP_NW:
- if (lp->timeout < 0) {
- omr = inl(DE4X5_OMR);/* Set up full duplex for the autonegotiate */
- outl(omr | OMR_FDX, DE4X5_OMR);
- }
- irqs = STS_LNF | STS_LNP;
- irq_mask = IMR_LFM | IMR_LPM;
- sts = test_media(dev, irqs, irq_mask, 0xef01, 0xffff, 0x0008, 2400);
- if (sts < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- if (sts & STS_LNP) {
- lp->media = ANS;
- } else {
- lp->media = AUI;
- }
- next_tick = dc21041_autoconf(dev);
- }
- break;
-
- case ANS:
- if (!lp->tx_enable) {
- irqs = STS_LNP;
- irq_mask = IMR_LPM;
- sts = test_ans(dev, irqs, irq_mask, 3000);
- if (sts < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
- lp->media = TP;
- next_tick = dc21041_autoconf(dev);
- } else {
- lp->local_state = 1;
- de4x5_init_connection(dev);
- }
- }
- } else if (!lp->linkOK && (lp->autosense == AUTO)) {
- lp->media = ANS_SUSPECT;
- next_tick = 3000;
- }
- break;
-
- case ANS_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, ANS, test_tp, dc21041_autoconf);
- break;
-
- case TP:
- if (!lp->tx_enable) {
- if (lp->timeout < 0) {
- omr = inl(DE4X5_OMR); /* Set up half duplex for TP */
- outl(omr & ~OMR_FDX, DE4X5_OMR);
- }
- irqs = STS_LNF | STS_LNP;
- irq_mask = IMR_LFM | IMR_LPM;
- sts = test_media(dev,irqs, irq_mask, 0xef01, 0xff3f, 0x0008, 2400);
- if (sts < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
- if (inl(DE4X5_SISR) & SISR_NRA) {
- lp->media = AUI; /* Non selected port activity */
- } else {
- lp->media = BNC;
- }
- next_tick = dc21041_autoconf(dev);
- } else {
- lp->local_state = 1;
- de4x5_init_connection(dev);
- }
- }
- } else if (!lp->linkOK && (lp->autosense == AUTO)) {
- lp->media = TP_SUSPECT;
- next_tick = 3000;
- }
- break;
-
- case TP_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21041_autoconf);
- break;
-
- case AUI:
- if (!lp->tx_enable) {
- if (lp->timeout < 0) {
- omr = inl(DE4X5_OMR); /* Set up half duplex for AUI */
- outl(omr & ~OMR_FDX, DE4X5_OMR);
- }
- irqs = 0;
- irq_mask = 0;
- sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x000e, 1000);
- if (sts < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
- lp->media = BNC;
- next_tick = dc21041_autoconf(dev);
- } else {
- lp->local_state = 1;
- de4x5_init_connection(dev);
- }
- }
- } else if (!lp->linkOK && (lp->autosense == AUTO)) {
- lp->media = AUI_SUSPECT;
- next_tick = 3000;
- }
- break;
-
- case AUI_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc21041_autoconf);
- break;
-
- case BNC:
- switch (lp->local_state) {
- case 0:
- if (lp->timeout < 0) {
- omr = inl(DE4X5_OMR); /* Set up half duplex for BNC */
- outl(omr & ~OMR_FDX, DE4X5_OMR);
- }
- irqs = 0;
- irq_mask = 0;
- sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x0006, 1000);
- if (sts < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- lp->local_state++; /* Ensure media connected */
- next_tick = dc21041_autoconf(dev);
- }
- break;
-
- case 1:
- if (!lp->tx_enable) {
- if ((sts = ping_media(dev, 3000)) < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- if (sts) {
- lp->local_state = 0;
- lp->media = NC;
- } else {
- de4x5_init_connection(dev);
- }
- }
- } else if (!lp->linkOK && (lp->autosense == AUTO)) {
- lp->media = BNC_SUSPECT;
- next_tick = 3000;
- }
- break;
- }
- break;
-
- case BNC_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc21041_autoconf);
- break;
-
- case NC:
- omr = inl(DE4X5_OMR); /* Set up full duplex for the autonegotiate */
- outl(omr | OMR_FDX, DE4X5_OMR);
- reset_init_sia(dev, 0xef01, 0xffff, 0x0008);/* Initialise the SIA */
- if (lp->media != lp->c_media) {
- de4x5_dbg_media(dev);
- lp->c_media = lp->media;
- }
- lp->media = INIT;
- lp->tx_enable = false;
- break;
- }
-
- return next_tick;
-}
-
-/*
-** Some autonegotiation chips are broken in that they do not return the
-** acknowledge bit (anlpa & MII_ANLPA_ACK) in the link partner advertisement
-** register, except at the first power up negotiation.
-*/
-static int
-dc21140m_autoconf(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int ana, anlpa, cap, cr, slnk, sr;
- int next_tick = DE4X5_AUTOSENSE_MS;
- u_long imr, omr, iobase = dev->base_addr;
-
- switch(lp->media) {
- case INIT:
- if (lp->timeout < 0) {
- DISABLE_IRQs;
- lp->tx_enable = false;
- lp->linkOK = 0;
- de4x5_save_skbs(dev); /* Save non transmitted skb's */
- }
- if ((next_tick = de4x5_reset_phy(dev)) < 0) {
- next_tick &= ~TIMER_CB;
- } else {
- if (lp->useSROM) {
- if (srom_map_media(dev) < 0) {
- lp->tcount++;
- return next_tick;
- }
- srom_exec(dev, lp->phy[lp->active].gep);
- if (lp->infoblock_media == ANS) {
- ana = lp->phy[lp->active].ana | MII_ANA_CSMA;
- mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
- }
- } else {
- lp->tmp = MII_SR_ASSC; /* Fake out the MII speed set */
- SET_10Mb;
- if (lp->autosense == _100Mb) {
- lp->media = _100Mb;
- } else if (lp->autosense == _10Mb) {
- lp->media = _10Mb;
- } else if ((lp->autosense == AUTO) &&
- ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
- ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
- ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
- mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
- lp->media = ANS;
- } else if (lp->autosense == AUTO) {
- lp->media = SPD_DET;
- } else if (is_spd_100(dev) && is_100_up(dev)) {
- lp->media = _100Mb;
- } else {
- lp->media = NC;
- }
- }
- lp->local_state = 0;
- next_tick = dc21140m_autoconf(dev);
- }
- break;
-
- case ANS:
- switch (lp->local_state) {
- case 0:
- if (lp->timeout < 0) {
- mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
- }
- cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500);
- if (cr < 0) {
- next_tick = cr & ~TIMER_CB;
- } else {
- if (cr) {
- lp->local_state = 0;
- lp->media = SPD_DET;
- } else {
- lp->local_state++;
- }
- next_tick = dc21140m_autoconf(dev);
- }
- break;
-
- case 1:
- if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000)) < 0) {
- next_tick = sr & ~TIMER_CB;
- } else {
- lp->media = SPD_DET;
- lp->local_state = 0;
- if (sr) { /* Success! */
- lp->tmp = MII_SR_ASSC;
- anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
- ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
- if (!(anlpa & MII_ANLPA_RF) &&
- (cap = anlpa & MII_ANLPA_TAF & ana)) {
- if (cap & MII_ANA_100M) {
- lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0;
- lp->media = _100Mb;
- } else if (cap & MII_ANA_10M) {
- lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0;
-
- lp->media = _10Mb;
- }
- }
- } /* Auto Negotiation failed to finish */
- next_tick = dc21140m_autoconf(dev);
- } /* Auto Negotiation failed to start */
- break;
- }
- break;
-
- case SPD_DET: /* Choose 10Mb/s or 100Mb/s */
- if (lp->timeout < 0) {
- lp->tmp = (lp->phy[lp->active].id ? MII_SR_LKS :
- (~gep_rd(dev) & GEP_LNP));
- SET_100Mb_PDET;
- }
- if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
- next_tick = slnk & ~TIMER_CB;
- } else {
- if (is_spd_100(dev) && is_100_up(dev)) {
- lp->media = _100Mb;
- } else if ((!is_spd_100(dev) && (is_10_up(dev) & lp->tmp))) {
- lp->media = _10Mb;
- } else {
- lp->media = NC;
- }
- next_tick = dc21140m_autoconf(dev);
- }
- break;
-
- case _100Mb: /* Set 100Mb/s */
- next_tick = 3000;
- if (!lp->tx_enable) {
- SET_100Mb;
- de4x5_init_connection(dev);
- } else {
- if (!lp->linkOK && (lp->autosense == AUTO)) {
- if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) {
- lp->media = INIT;
- lp->tcount++;
- next_tick = DE4X5_AUTOSENSE_MS;
- }
- }
- }
- break;
-
- case BNC:
- case AUI:
- case _10Mb: /* Set 10Mb/s */
- next_tick = 3000;
- if (!lp->tx_enable) {
- SET_10Mb;
- de4x5_init_connection(dev);
- } else {
- if (!lp->linkOK && (lp->autosense == AUTO)) {
- if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) {
- lp->media = INIT;
- lp->tcount++;
- next_tick = DE4X5_AUTOSENSE_MS;
- }
- }
- }
- break;
-
- case NC:
- if (lp->media != lp->c_media) {
- de4x5_dbg_media(dev);
- lp->c_media = lp->media;
- }
- lp->media = INIT;
- lp->tx_enable = false;
- break;
- }
-
- return next_tick;
-}
-
-/*
-** This routine may be merged into dc21140m_autoconf() sometime as I'm
-** changing how I figure out the media - but trying to keep it backwards
-** compatible with the de500-xa and de500-aa.
-** Whether it's BNC, AUI, SYM or MII is sorted out in the infoblock
-** functions and set during de4x5_mac_port() and/or de4x5_reset_phy().
-** This routine just has to figure out whether 10Mb/s or 100Mb/s is
-** active.
-** When autonegotiation is working, the ANS part searches the SROM for
-** the highest common speed (TP) link that both can run and if that can
-** be full duplex. That infoblock is executed and then the link speed set.
-**
-** Only _10Mb and _100Mb are tested here.
-*/
-static int
-dc2114x_autoconf(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- s32 cr, anlpa, ana, cap, irqs, irq_mask, imr, omr, slnk, sr, sts;
- int next_tick = DE4X5_AUTOSENSE_MS;
-
- switch (lp->media) {
- case INIT:
- if (lp->timeout < 0) {
- DISABLE_IRQs;
- lp->tx_enable = false;
- lp->linkOK = 0;
- lp->timeout = -1;
- de4x5_save_skbs(dev); /* Save non transmitted skb's */
- if (lp->params.autosense & ~AUTO) {
- srom_map_media(dev); /* Fixed media requested */
- if (lp->media != lp->params.autosense) {
- lp->tcount++;
- lp->media = INIT;
- return next_tick;
- }
- lp->media = INIT;
- }
- }
- if ((next_tick = de4x5_reset_phy(dev)) < 0) {
- next_tick &= ~TIMER_CB;
- } else {
- if (lp->autosense == _100Mb) {
- lp->media = _100Mb;
- } else if (lp->autosense == _10Mb) {
- lp->media = _10Mb;
- } else if (lp->autosense == TP) {
- lp->media = TP;
- } else if (lp->autosense == BNC) {
- lp->media = BNC;
- } else if (lp->autosense == AUI) {
- lp->media = AUI;
- } else {
- lp->media = SPD_DET;
- if ((lp->infoblock_media == ANS) &&
- ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
- ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
- ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
- mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
- lp->media = ANS;
- }
- }
- lp->local_state = 0;
- next_tick = dc2114x_autoconf(dev);
- }
- break;
-
- case ANS:
- switch (lp->local_state) {
- case 0:
- if (lp->timeout < 0) {
- mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
- }
- cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500);
- if (cr < 0) {
- next_tick = cr & ~TIMER_CB;
- } else {
- if (cr) {
- lp->local_state = 0;
- lp->media = SPD_DET;
- } else {
- lp->local_state++;
- }
- next_tick = dc2114x_autoconf(dev);
- }
- break;
-
- case 1:
- sr = test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000);
- if (sr < 0) {
- next_tick = sr & ~TIMER_CB;
- } else {
- lp->media = SPD_DET;
- lp->local_state = 0;
- if (sr) { /* Success! */
- lp->tmp = MII_SR_ASSC;
- anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
- ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
- if (!(anlpa & MII_ANLPA_RF) &&
- (cap = anlpa & MII_ANLPA_TAF & ana)) {
- if (cap & MII_ANA_100M) {
- lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0;
- lp->media = _100Mb;
- } else if (cap & MII_ANA_10M) {
- lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0;
- lp->media = _10Mb;
- }
- }
- } /* Auto Negotiation failed to finish */
- next_tick = dc2114x_autoconf(dev);
- } /* Auto Negotiation failed to start */
- break;
- }
- break;
-
- case AUI:
- if (!lp->tx_enable) {
- if (lp->timeout < 0) {
- omr = inl(DE4X5_OMR); /* Set up half duplex for AUI */
- outl(omr & ~OMR_FDX, DE4X5_OMR);
- }
- irqs = 0;
- irq_mask = 0;
- sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000);
- if (sts < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
- lp->media = BNC;
- next_tick = dc2114x_autoconf(dev);
- } else {
- lp->local_state = 1;
- de4x5_init_connection(dev);
- }
- }
- } else if (!lp->linkOK && (lp->autosense == AUTO)) {
- lp->media = AUI_SUSPECT;
- next_tick = 3000;
- }
- break;
-
- case AUI_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc2114x_autoconf);
- break;
-
- case BNC:
- switch (lp->local_state) {
- case 0:
- if (lp->timeout < 0) {
- omr = inl(DE4X5_OMR); /* Set up half duplex for BNC */
- outl(omr & ~OMR_FDX, DE4X5_OMR);
- }
- irqs = 0;
- irq_mask = 0;
- sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000);
- if (sts < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- lp->local_state++; /* Ensure media connected */
- next_tick = dc2114x_autoconf(dev);
- }
- break;
-
- case 1:
- if (!lp->tx_enable) {
- if ((sts = ping_media(dev, 3000)) < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- if (sts) {
- lp->local_state = 0;
- lp->tcount++;
- lp->media = INIT;
- } else {
- de4x5_init_connection(dev);
- }
- }
- } else if (!lp->linkOK && (lp->autosense == AUTO)) {
- lp->media = BNC_SUSPECT;
- next_tick = 3000;
- }
- break;
- }
- break;
-
- case BNC_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc2114x_autoconf);
- break;
-
- case SPD_DET: /* Choose 10Mb/s or 100Mb/s */
- if (srom_map_media(dev) < 0) {
- lp->tcount++;
- lp->media = INIT;
- return next_tick;
- }
- if (lp->media == _100Mb) {
- if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
- lp->media = SPD_DET;
- return slnk & ~TIMER_CB;
- }
- } else {
- if (wait_for_link(dev) < 0) {
- lp->media = SPD_DET;
- return PDET_LINK_WAIT;
- }
- }
- if (lp->media == ANS) { /* Do MII parallel detection */
- if (is_spd_100(dev)) {
- lp->media = _100Mb;
- } else {
- lp->media = _10Mb;
- }
- next_tick = dc2114x_autoconf(dev);
- } else if (((lp->media == _100Mb) && is_100_up(dev)) ||
- (((lp->media == _10Mb) || (lp->media == TP) ||
- (lp->media == BNC) || (lp->media == AUI)) &&
- is_10_up(dev))) {
- next_tick = dc2114x_autoconf(dev);
- } else {
- lp->tcount++;
- lp->media = INIT;
- }
- break;
-
- case _10Mb:
- next_tick = 3000;
- if (!lp->tx_enable) {
- SET_10Mb;
- de4x5_init_connection(dev);
- } else {
- if (!lp->linkOK && (lp->autosense == AUTO)) {
- if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) {
- lp->media = INIT;
- lp->tcount++;
- next_tick = DE4X5_AUTOSENSE_MS;
- }
- }
- }
- break;
-
- case _100Mb:
- next_tick = 3000;
- if (!lp->tx_enable) {
- SET_100Mb;
- de4x5_init_connection(dev);
- } else {
- if (!lp->linkOK && (lp->autosense == AUTO)) {
- if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) {
- lp->media = INIT;
- lp->tcount++;
- next_tick = DE4X5_AUTOSENSE_MS;
- }
- }
- }
- break;
-
- default:
- lp->tcount++;
-printk("Huh?: media:%02x\n", lp->media);
- lp->media = INIT;
- break;
- }
-
- return next_tick;
-}
-
-static int
-srom_autoconf(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
-
- return lp->infoleaf_fn(dev);
-}
-
-/*
-** This mapping keeps the original media codes and FDX flag unchanged.
-** While it isn't strictly necessary, it helps me for the moment...
-** The early return avoids a media state / SROM media space clash.
-*/
-static int
-srom_map_media(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
-
- lp->fdx = false;
- if (lp->infoblock_media == lp->media)
- return 0;
-
- switch(lp->infoblock_media) {
- case SROM_10BASETF:
- if (!lp->params.fdx) return -1;
- lp->fdx = true;
- fallthrough;
-
- case SROM_10BASET:
- if (lp->params.fdx && !lp->fdx) return -1;
- if ((lp->chipset == DC21140) || ((lp->chipset & ~0x00ff) == DC2114x)) {
- lp->media = _10Mb;
- } else {
- lp->media = TP;
- }
- break;
-
- case SROM_10BASE2:
- lp->media = BNC;
- break;
-
- case SROM_10BASE5:
- lp->media = AUI;
- break;
-
- case SROM_100BASETF:
- if (!lp->params.fdx) return -1;
- lp->fdx = true;
- fallthrough;
-
- case SROM_100BASET:
- if (lp->params.fdx && !lp->fdx) return -1;
- lp->media = _100Mb;
- break;
-
- case SROM_100BASET4:
- lp->media = _100Mb;
- break;
-
- case SROM_100BASEFF:
- if (!lp->params.fdx) return -1;
- lp->fdx = true;
- fallthrough;
-
- case SROM_100BASEF:
- if (lp->params.fdx && !lp->fdx) return -1;
- lp->media = _100Mb;
- break;
-
- case ANS:
- lp->media = ANS;
- lp->fdx = lp->params.fdx;
- break;
-
- default:
- printk("%s: Bad media code [%d] detected in SROM!\n", dev->name,
- lp->infoblock_media);
- return -1;
- }
-
- return 0;
-}
-
-static void
-de4x5_init_connection(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- u_long flags = 0;
-
- if (lp->media != lp->c_media) {
- de4x5_dbg_media(dev);
- lp->c_media = lp->media; /* Stop scrolling media messages */
- }
-
- spin_lock_irqsave(&lp->lock, flags);
- de4x5_rst_desc_ring(dev);
- de4x5_setup_intr(dev);
- lp->tx_enable = true;
- spin_unlock_irqrestore(&lp->lock, flags);
- outl(POLL_DEMAND, DE4X5_TPD);
-
- netif_wake_queue(dev);
-}
-
-/*
-** General PHY reset function. Some MII devices don't reset correctly
-** since their MII address pins can float at voltages that are dependent
-** on the signal pin use. Do a double reset to ensure a reset.
-*/
-static int
-de4x5_reset_phy(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int next_tick = 0;
-
- if ((lp->useSROM) || (lp->phy[lp->active].id)) {
- if (lp->timeout < 0) {
- if (lp->useSROM) {
- if (lp->phy[lp->active].rst) {
- srom_exec(dev, lp->phy[lp->active].rst);
- srom_exec(dev, lp->phy[lp->active].rst);
- } else if (lp->rst) { /* Type 5 infoblock reset */
- srom_exec(dev, lp->rst);
- srom_exec(dev, lp->rst);
- }
- } else {
- PHY_HARD_RESET;
- }
- if (lp->useMII) {
- mii_wr(MII_CR_RST, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
- }
- }
- if (lp->useMII) {
- next_tick = test_mii_reg(dev, MII_CR, MII_CR_RST, false, 500);
- }
- } else if (lp->chipset == DC21140) {
- PHY_HARD_RESET;
- }
-
- return next_tick;
-}
-
-static int
-test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- s32 sts, csr12;
-
- if (lp->timeout < 0) {
- lp->timeout = msec/100;
- if (!lp->useSROM) { /* Already done if by SROM, else dc2104[01] */
- reset_init_sia(dev, csr13, csr14, csr15);
- }
-
- /* set up the interrupt mask */
- outl(irq_mask, DE4X5_IMR);
-
- /* clear all pending interrupts */
- sts = inl(DE4X5_STS);
- outl(sts, DE4X5_STS);
-
- /* clear csr12 NRA and SRA bits */
- if ((lp->chipset == DC21041) || lp->useSROM) {
- csr12 = inl(DE4X5_SISR);
- outl(csr12, DE4X5_SISR);
- }
- }
-
- sts = inl(DE4X5_STS) & ~TIMER_CB;
-
- if (!(sts & irqs) && --lp->timeout) {
- sts = 100 | TIMER_CB;
- } else {
- lp->timeout = -1;
- }
-
- return sts;
-}
-
-static int
-test_tp(struct net_device *dev, s32 msec)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int sisr;
-
- if (lp->timeout < 0) {
- lp->timeout = msec/100;
- }
-
- sisr = (inl(DE4X5_SISR) & ~TIMER_CB) & (SISR_LKF | SISR_NCR);
-
- if (sisr && --lp->timeout) {
- sisr = 100 | TIMER_CB;
- } else {
- lp->timeout = -1;
- }
-
- return sisr;
-}
-
-/*
-** Samples the 100Mb Link State Signal. The sample interval is important
-** because too fast a rate can give erroneous results and confuse the
-** speed sense algorithm.
-*/
-#define SAMPLE_INTERVAL 500 /* ms */
-#define SAMPLE_DELAY 2000 /* ms */
-static int
-test_for_100Mb(struct net_device *dev, int msec)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int gep = 0, ret = ((lp->chipset & ~0x00ff)==DC2114x? -1 :GEP_SLNK);
-
- if (lp->timeout < 0) {
- if ((msec/SAMPLE_INTERVAL) <= 0) return 0;
- if (msec > SAMPLE_DELAY) {
- lp->timeout = (msec - SAMPLE_DELAY)/SAMPLE_INTERVAL;
- gep = SAMPLE_DELAY | TIMER_CB;
- return gep;
- } else {
- lp->timeout = msec/SAMPLE_INTERVAL;
- }
- }
-
- if (lp->phy[lp->active].id || lp->useSROM) {
- gep = is_100_up(dev) | is_spd_100(dev);
- } else {
- gep = (~gep_rd(dev) & (GEP_SLNK | GEP_LNP));
- }
- if (!(gep & ret) && --lp->timeout) {
- gep = SAMPLE_INTERVAL | TIMER_CB;
- } else {
- lp->timeout = -1;
- }
-
- return gep;
-}
-
-static int
-wait_for_link(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
-
- if (lp->timeout < 0) {
- lp->timeout = 1;
- }
-
- if (lp->timeout--) {
- return TIMER_CB;
- } else {
- lp->timeout = -1;
- }
-
- return 0;
-}
-
-/*
-**
-**
-*/
-static int
-test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int test;
- u_long iobase = dev->base_addr;
-
- if (lp->timeout < 0) {
- lp->timeout = msec/100;
- }
-
- reg = mii_rd((u_char)reg, lp->phy[lp->active].addr, DE4X5_MII) & mask;
- test = (reg ^ (pol ? ~0 : 0)) & mask;
-
- if (test && --lp->timeout) {
- reg = 100 | TIMER_CB;
- } else {
- lp->timeout = -1;
- }
-
- return reg;
-}
-
-static int
-is_spd_100(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int spd;
-
- if (lp->useMII) {
- spd = mii_rd(lp->phy[lp->active].spd.reg, lp->phy[lp->active].addr, DE4X5_MII);
- spd = ~(spd ^ lp->phy[lp->active].spd.value);
- spd &= lp->phy[lp->active].spd.mask;
- } else if (!lp->useSROM) { /* de500-xa */
- spd = ((~gep_rd(dev)) & GEP_SLNK);
- } else {
- if ((lp->ibn == 2) || !lp->asBitValid)
- return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0;
-
- spd = (lp->asBitValid & (lp->asPolarity ^ (gep_rd(dev) & lp->asBit))) |
- (lp->linkOK & ~lp->asBitValid);
- }
-
- return spd;
-}
-
-static int
-is_100_up(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- if (lp->useMII) {
- /* Double read for sticky bits & temporary drops */
- mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
- return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS;
- } else if (!lp->useSROM) { /* de500-xa */
- return (~gep_rd(dev)) & GEP_SLNK;
- } else {
- if ((lp->ibn == 2) || !lp->asBitValid)
- return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0;
-
- return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
- (lp->linkOK & ~lp->asBitValid);
- }
-}
-
-static int
-is_10_up(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- if (lp->useMII) {
- /* Double read for sticky bits & temporary drops */
- mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
- return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS;
- } else if (!lp->useSROM) { /* de500-xa */
- return (~gep_rd(dev)) & GEP_LNP;
- } else {
- if ((lp->ibn == 2) || !lp->asBitValid)
- return ((lp->chipset & ~0x00ff) == DC2114x) ?
- (~inl(DE4X5_SISR)&SISR_LS10):
- 0;
-
- return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
- (lp->linkOK & ~lp->asBitValid);
- }
-}
-
-static int
-is_anc_capable(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
- return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
- } else if ((lp->chipset & ~0x00ff) == DC2114x) {
- return (inl(DE4X5_SISR) & SISR_LPN) >> 12;
- } else {
- return 0;
- }
-}
-
-/*
-** Send a packet onto the media and watch for send errors that indicate the
-** media is bad or unconnected.
-*/
-static int
-ping_media(struct net_device *dev, int msec)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int sisr;
-
- if (lp->timeout < 0) {
- lp->timeout = msec/100;
-
- lp->tmp = lp->tx_new; /* Remember the ring position */
- load_packet(dev, lp->frame, TD_LS | TD_FS | sizeof(lp->frame), (struct sk_buff *)1);
- lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
- outl(POLL_DEMAND, DE4X5_TPD);
- }
-
- sisr = inl(DE4X5_SISR);
-
- if ((!(sisr & SISR_NCR)) &&
- ((s32)le32_to_cpu(lp->tx_ring[lp->tmp].status) < 0) &&
- (--lp->timeout)) {
- sisr = 100 | TIMER_CB;
- } else {
- if ((!(sisr & SISR_NCR)) &&
- !(le32_to_cpu(lp->tx_ring[lp->tmp].status) & (T_OWN | TD_ES)) &&
- lp->timeout) {
- sisr = 0;
- } else {
- sisr = 1;
- }
- lp->timeout = -1;
- }
-
- return sisr;
-}
-
-/*
-** This function does 2 things: on Intels it kmalloc's another buffer to
-** replace the one about to be passed up. On Alpha's it kmallocs a buffer
-** into which the packet is copied.
-*/
-static struct sk_buff *
-de4x5_alloc_rx_buff(struct net_device *dev, int index, int len)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- struct sk_buff *p;
-
-#if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY)
- struct sk_buff *ret;
- u_long i=0, tmp;
-
- p = netdev_alloc_skb(dev, IEEE802_3_SZ + DE4X5_ALIGN + 2);
- if (!p) return NULL;
-
- tmp = virt_to_bus(p->data);
- i = ((tmp + DE4X5_ALIGN) & ~DE4X5_ALIGN) - tmp;
- skb_reserve(p, i);
- lp->rx_ring[index].buf = cpu_to_le32(tmp + i);
-
- ret = lp->rx_skb[index];
- lp->rx_skb[index] = p;
-
- if ((u_long) ret > 1) {
- skb_put(ret, len);
- }
-
- return ret;
-
-#else
- if (lp->state != OPEN) return (struct sk_buff *)1; /* Fake out the open */
-
- p = netdev_alloc_skb(dev, len + 2);
- if (!p) return NULL;
-
- skb_reserve(p, 2); /* Align */
- if (index < lp->rx_old) { /* Wrapped buffer */
- short tlen = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ;
- skb_put_data(p, lp->rx_bufs + lp->rx_old * RX_BUFF_SZ, tlen);
- skb_put_data(p, lp->rx_bufs, len - tlen);
- } else { /* Linear buffer */
- skb_put_data(p, lp->rx_bufs + lp->rx_old * RX_BUFF_SZ, len);
- }
-
- return p;
-#endif
-}
-
-static void
-de4x5_free_rx_buffs(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int i;
-
- for (i=0; i<lp->rxRingSize; i++) {
- if ((u_long) lp->rx_skb[i] > 1) {
- dev_kfree_skb(lp->rx_skb[i]);
- }
- lp->rx_ring[i].status = 0;
- lp->rx_skb[i] = (struct sk_buff *)1; /* Dummy entry */
- }
-}
-
-static void
-de4x5_free_tx_buffs(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int i;
-
- for (i=0; i<lp->txRingSize; i++) {
- if (lp->tx_skb[i])
- de4x5_free_tx_buff(lp, i);
- lp->tx_ring[i].status = 0;
- }
-
- /* Unload the locally queued packets */
- __skb_queue_purge(&lp->cache.queue);
-}
-
-/*
-** When a user pulls a connection, the DECchip can end up in a
-** 'running - waiting for end of transmission' state. This means that we
-** have to perform a chip soft reset to ensure that we can synchronize
-** the hardware and software and make any media probes using a loopback
-** packet meaningful.
-*/
-static void
-de4x5_save_skbs(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- s32 omr;
-
- if (!lp->cache.save_cnt) {
- STOP_DE4X5;
- de4x5_tx(dev); /* Flush any sent skb's */
- de4x5_free_tx_buffs(dev);
- de4x5_cache_state(dev, DE4X5_SAVE_STATE);
- de4x5_sw_reset(dev);
- de4x5_cache_state(dev, DE4X5_RESTORE_STATE);
- lp->cache.save_cnt++;
- START_DE4X5;
- }
-}
-
-static void
-de4x5_rst_desc_ring(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int i;
- s32 omr;
-
- if (lp->cache.save_cnt) {
- STOP_DE4X5;
- outl(lp->dma_rings, DE4X5_RRBA);
- outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
- DE4X5_TRBA);
-
- lp->rx_new = lp->rx_old = 0;
- lp->tx_new = lp->tx_old = 0;
-
- for (i = 0; i < lp->rxRingSize; i++) {
- lp->rx_ring[i].status = cpu_to_le32(R_OWN);
- }
-
- for (i = 0; i < lp->txRingSize; i++) {
- lp->tx_ring[i].status = cpu_to_le32(0);
- }
-
- barrier();
- lp->cache.save_cnt--;
- START_DE4X5;
- }
-}
-
-static void
-de4x5_cache_state(struct net_device *dev, int flag)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- switch(flag) {
- case DE4X5_SAVE_STATE:
- lp->cache.csr0 = inl(DE4X5_BMR);
- lp->cache.csr6 = (inl(DE4X5_OMR) & ~(OMR_ST | OMR_SR));
- lp->cache.csr7 = inl(DE4X5_IMR);
- break;
-
- case DE4X5_RESTORE_STATE:
- outl(lp->cache.csr0, DE4X5_BMR);
- outl(lp->cache.csr6, DE4X5_OMR);
- outl(lp->cache.csr7, DE4X5_IMR);
- if (lp->chipset == DC21140) {
- gep_wr(lp->cache.gepc, dev);
- gep_wr(lp->cache.gep, dev);
- } else {
- reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14,
- lp->cache.csr15);
- }
- break;
- }
-}
-
-static void
-de4x5_put_cache(struct net_device *dev, struct sk_buff *skb)
-{
- struct de4x5_private *lp = netdev_priv(dev);
-
- __skb_queue_tail(&lp->cache.queue, skb);
-}
-
-static void
-de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb)
-{
- struct de4x5_private *lp = netdev_priv(dev);
-
- __skb_queue_head(&lp->cache.queue, skb);
-}
-
-static struct sk_buff *
-de4x5_get_cache(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
-
- return __skb_dequeue(&lp->cache.queue);
-}
-
-/*
-** Check the Auto Negotiation State. Return OK when a link pass interrupt
-** is received and the auto-negotiation status is NWAY OK.
-*/
-static int
-test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- s32 sts, ans;
-
- if (lp->timeout < 0) {
- lp->timeout = msec/100;
- outl(irq_mask, DE4X5_IMR);
-
- /* clear all pending interrupts */
- sts = inl(DE4X5_STS);
- outl(sts, DE4X5_STS);
- }
-
- ans = inl(DE4X5_SISR) & SISR_ANS;
- sts = inl(DE4X5_STS) & ~TIMER_CB;
-
- if (!(sts & irqs) && (ans ^ ANS_NWOK) && --lp->timeout) {
- sts = 100 | TIMER_CB;
- } else {
- lp->timeout = -1;
- }
-
- return sts;
-}
-
-static void
-de4x5_setup_intr(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- s32 imr, sts;
-
- if (inl(DE4X5_OMR) & OMR_SR) { /* Only unmask if TX/RX is enabled */
- imr = 0;
- UNMASK_IRQs;
- sts = inl(DE4X5_STS); /* Reset any pending (stale) interrupts */
- outl(sts, DE4X5_STS);
- ENABLE_IRQs;
- }
-}
-
-/*
-**
-*/
-static void
-reset_init_sia(struct net_device *dev, s32 csr13, s32 csr14, s32 csr15)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- RESET_SIA;
- if (lp->useSROM) {
- if (lp->ibn == 3) {
- srom_exec(dev, lp->phy[lp->active].rst);
- srom_exec(dev, lp->phy[lp->active].gep);
- outl(1, DE4X5_SICR);
- return;
- } else {
- csr15 = lp->cache.csr15;
- csr14 = lp->cache.csr14;
- csr13 = lp->cache.csr13;
- outl(csr15 | lp->cache.gepc, DE4X5_SIGR);
- outl(csr15 | lp->cache.gep, DE4X5_SIGR);
- }
- } else {
- outl(csr15, DE4X5_SIGR);
- }
- outl(csr14, DE4X5_STRR);
- outl(csr13, DE4X5_SICR);
-
- mdelay(10);
-}
-
-/*
-** Create a loopback ethernet packet
-*/
-static void
-create_packet(struct net_device *dev, char *frame, int len)
-{
- int i;
- char *buf = frame;
-
- for (i=0; i<ETH_ALEN; i++) { /* Use this source address */
- *buf++ = dev->dev_addr[i];
- }
- for (i=0; i<ETH_ALEN; i++) { /* Use this destination address */
- *buf++ = dev->dev_addr[i];
- }
-
- *buf++ = 0; /* Packet length (2 bytes) */
- *buf++ = 1;
-}
-
-/*
-** Look for a particular board name in the EISA configuration space
-*/
-static int
-EISA_signature(char *name, struct device *device)
-{
- int i, status = 0, siglen = ARRAY_SIZE(de4x5_signatures);
- struct eisa_device *edev;
-
- *name = '\0';
- edev = to_eisa_device (device);
- i = edev->id.driver_data;
-
- if (i >= 0 && i < siglen) {
- strcpy (name, de4x5_signatures[i]);
- status = 1;
- }
-
- return status; /* return the device name string */
-}
-
-/*
-** Look for a particular board name in the PCI configuration space
-*/
-static void
-PCI_signature(char *name, struct de4x5_private *lp)
-{
- int i, siglen = ARRAY_SIZE(de4x5_signatures);
-
- if (lp->chipset == DC21040) {
- strcpy(name, "DE434/5");
- return;
- } else { /* Search for a DEC name in the SROM */
- int tmp = *((char *)&lp->srom + 19) * 3;
- strncpy(name, (char *)&lp->srom + 26 + tmp, 8);
- }
- name[8] = '\0';
- for (i=0; i<siglen; i++) {
- if (strstr(name,de4x5_signatures[i])!=NULL) break;
- }
- if (i == siglen) {
- if (dec_only) {
- *name = '\0';
- } else { /* Use chip name to avoid confusion */
- strcpy(name, (((lp->chipset == DC21040) ? "DC21040" :
- ((lp->chipset == DC21041) ? "DC21041" :
- ((lp->chipset == DC21140) ? "DC21140" :
- ((lp->chipset == DC21142) ? "DC21142" :
- ((lp->chipset == DC21143) ? "DC21143" : "UNKNOWN"
- )))))));
- }
- if (lp->chipset != DC21041) {
- lp->useSROM = true; /* card is not recognisably DEC */
- }
- } else if ((lp->chipset & ~0x00ff) == DC2114x) {
- lp->useSROM = true;
- }
-}
-
-/*
-** Set up the Ethernet PROM counter to the start of the Ethernet address on
-** the DC21040, else read the SROM for the other chips.
-** The SROM may not be present in a multi-MAC card, so first read the
-** MAC address and check for a bad address. If there is a bad one then exit
-** immediately with the prior srom contents intact (the h/w address will
-** be fixed up later).
-*/
-static void
-DevicePresent(struct net_device *dev, u_long aprom_addr)
-{
- int i, j=0;
- struct de4x5_private *lp = netdev_priv(dev);
-
- if (lp->chipset == DC21040) {
- if (lp->bus == EISA) {
- enet_addr_rst(aprom_addr); /* Reset Ethernet Address ROM Pointer */
- } else {
- outl(0, aprom_addr); /* Reset Ethernet Address ROM Pointer */
- }
- } else { /* Read new srom */
- u_short tmp;
- __le16 *p = (__le16 *)((char *)&lp->srom + SROM_HWADD);
- for (i=0; i<(ETH_ALEN>>1); i++) {
- tmp = srom_rd(aprom_addr, (SROM_HWADD>>1) + i);
- j += tmp; /* for check for 0:0:0:0:0:0 or ff:ff:ff:ff:ff:ff */
- *p = cpu_to_le16(tmp);
- }
- if (j == 0 || j == 3 * 0xffff) {
- /* could get 0 only from all-0 and 3 * 0xffff only from all-1 */
- return;
- }
-
- p = (__le16 *)&lp->srom;
- for (i=0; i<(sizeof(struct de4x5_srom)>>1); i++) {
- tmp = srom_rd(aprom_addr, i);
- *p++ = cpu_to_le16(tmp);
- }
- de4x5_dbg_srom(&lp->srom);
- }
-}
-
-/*
-** Since the write on the Enet PROM register doesn't seem to reset the PROM
-** pointer correctly (at least on my DE425 EISA card), this routine should do
-** it...from depca.c.
-*/
-static void
-enet_addr_rst(u_long aprom_addr)
-{
- union {
- struct {
- u32 a;
- u32 b;
- } llsig;
- char Sig[sizeof(u32) << 1];
- } dev;
- short sigLength=0;
- s8 data;
- int i, j;
-
- dev.llsig.a = ETH_PROM_SIG;
- dev.llsig.b = ETH_PROM_SIG;
- sigLength = sizeof(u32) << 1;
-
- for (i=0,j=0;j<sigLength && i<PROBE_LENGTH+sigLength-1;i++) {
- data = inb(aprom_addr);
- if (dev.Sig[j] == data) { /* track signature */
- j++;
- } else { /* lost signature; begin search again */
- if (data == dev.Sig[0]) { /* rare case.... */
- j=1;
- } else {
- j=0;
- }
- }
- }
-}
-
-/*
-** For the bad status case and no SROM, then add one to the previous
-** address. However, need to add one backwards in case we have 0xff
-** as one or more of the bytes. Only the last 3 bytes should be checked
-** as the first three are invariant - assigned to an organisation.
-*/
-static int
-get_hw_addr(struct net_device *dev)
-{
- u_long iobase = dev->base_addr;
- int broken, i, k, tmp, status = 0;
- u_short j,chksum;
- struct de4x5_private *lp = netdev_priv(dev);
- u8 addr[ETH_ALEN];
-
- broken = de4x5_bad_srom(lp);
-
- for (i=0,k=0,j=0;j<3;j++) {
- k <<= 1;
- if (k > 0xffff) k-=0xffff;
-
- if (lp->bus == PCI) {
- if (lp->chipset == DC21040) {
- while ((tmp = inl(DE4X5_APROM)) < 0);
- k += (u_char) tmp;
- addr[i++] = (u_char) tmp;
- while ((tmp = inl(DE4X5_APROM)) < 0);
- k += (u_short) (tmp << 8);
- addr[i++] = (u_char) tmp;
- } else if (!broken) {
- addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
- addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
- } else if ((broken == SMC) || (broken == ACCTON)) {
- addr[i] = *((u_char *)&lp->srom + i); i++;
- addr[i] = *((u_char *)&lp->srom + i); i++;
- }
- } else {
- k += (u_char) (tmp = inb(EISA_APROM));
- addr[i++] = (u_char) tmp;
- k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
- addr[i++] = (u_char) tmp;
- }
-
- if (k > 0xffff) k-=0xffff;
- }
- if (k == 0xffff) k=0;
-
- eth_hw_addr_set(dev, addr);
-
- if (lp->bus == PCI) {
- if (lp->chipset == DC21040) {
- while ((tmp = inl(DE4X5_APROM)) < 0);
- chksum = (u_char) tmp;
- while ((tmp = inl(DE4X5_APROM)) < 0);
- chksum |= (u_short) (tmp << 8);
- if ((k != chksum) && (dec_only)) status = -1;
- }
- } else {
- chksum = (u_char) inb(EISA_APROM);
- chksum |= (u_short) (inb(EISA_APROM) << 8);
- if ((k != chksum) && (dec_only)) status = -1;
- }
-
- /* If possible, try to fix a broken card - SMC only so far */
- srom_repair(dev, broken);
-
-#ifdef CONFIG_PPC_PMAC
- /*
- ** If the address starts with 00 a0, we have to bit-reverse
- ** each byte of the address.
- */
- if ( machine_is(powermac) &&
- (dev->dev_addr[0] == 0) &&
- (dev->dev_addr[1] == 0xa0) )
- {
- for (i = 0; i < ETH_ALEN; ++i)
- {
- int x = dev->dev_addr[i];
- x = ((x & 0xf) << 4) + ((x & 0xf0) >> 4);
- x = ((x & 0x33) << 2) + ((x & 0xcc) >> 2);
- addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1);
- }
- eth_hw_addr_set(dev, addr);
- }
-#endif /* CONFIG_PPC_PMAC */
-
- /* Test for a bad enet address */
- status = test_bad_enet(dev, status);
-
- return status;
-}
-
-/*
-** Test for enet addresses in the first 32 bytes.
-*/
-static int
-de4x5_bad_srom(struct de4x5_private *lp)
-{
- int i, status = 0;
-
- for (i = 0; i < ARRAY_SIZE(enet_det); i++) {
- if (!memcmp(&lp->srom, &enet_det[i], 3) &&
- !memcmp((char *)&lp->srom+0x10, &enet_det[i], 3)) {
- if (i == 0) {
- status = SMC;
- } else if (i == 1) {
- status = ACCTON;
- }
- break;
- }
- }
-
- return status;
-}
-
-static void
-srom_repair(struct net_device *dev, int card)
-{
- struct de4x5_private *lp = netdev_priv(dev);
-
- switch(card) {
- case SMC:
- memset((char *)&lp->srom, 0, sizeof(struct de4x5_srom));
- memcpy(lp->srom.ieee_addr, (char *)dev->dev_addr, ETH_ALEN);
- memcpy(lp->srom.info, (char *)&srom_repair_info[SMC-1], 100);
- lp->useSROM = true;
- break;
- }
-}
-
-/*
-** Assume that the irq's do not follow the PCI spec - this is seems
-** to be true so far (2 for 2).
-*/
-static int
-test_bad_enet(struct net_device *dev, int status)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int i, tmp;
-
- for (tmp=0,i=0; i<ETH_ALEN; i++) tmp += (u_char)dev->dev_addr[i];
- if ((tmp == 0) || (tmp == 0x5fa)) {
- if ((lp->chipset == last.chipset) &&
- (lp->bus_num == last.bus) && (lp->bus_num > 0)) {
- eth_addr_inc(last.addr);
- eth_hw_addr_set(dev, last.addr);
-
- if (!an_exception(lp)) {
- dev->irq = last.irq;
- }
-
- status = 0;
- }
- } else if (!status) {
- last.chipset = lp->chipset;
- last.bus = lp->bus_num;
- last.irq = dev->irq;
- for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i];
- }
-
- return status;
-}
-
-/*
-** List of board exceptions with correctly wired IRQs
-*/
-static int
-an_exception(struct de4x5_private *lp)
-{
- if ((*(u_short *)lp->srom.sub_vendor_id == 0x00c0) &&
- (*(u_short *)lp->srom.sub_system_id == 0x95e0)) {
- return -1;
- }
-
- return 0;
-}
-
-/*
-** SROM Read
-*/
-static short
-srom_rd(u_long addr, u_char offset)
-{
- sendto_srom(SROM_RD | SROM_SR, addr);
-
- srom_latch(SROM_RD | SROM_SR | DT_CS, addr);
- srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr);
- srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset);
-
- return srom_data(SROM_RD | SROM_SR | DT_CS, addr);
-}
-
-static void
-srom_latch(u_int command, u_long addr)
-{
- sendto_srom(command, addr);
- sendto_srom(command | DT_CLK, addr);
- sendto_srom(command, addr);
-}
-
-static void
-srom_command(u_int command, u_long addr)
-{
- srom_latch(command, addr);
- srom_latch(command, addr);
- srom_latch((command & 0x0000ff00) | DT_CS, addr);
-}
-
-static void
-srom_address(u_int command, u_long addr, u_char offset)
-{
- int i, a;
-
- a = offset << 2;
- for (i=0; i<6; i++, a <<= 1) {
- srom_latch(command | ((a & 0x80) ? DT_IN : 0), addr);
- }
- udelay(1);
-
- i = (getfrom_srom(addr) >> 3) & 0x01;
-}
-
-static short
-srom_data(u_int command, u_long addr)
-{
- int i;
- short word = 0;
- s32 tmp;
-
- for (i=0; i<16; i++) {
- sendto_srom(command | DT_CLK, addr);
- tmp = getfrom_srom(addr);
- sendto_srom(command, addr);
-
- word = (word << 1) | ((tmp >> 3) & 0x01);
- }
-
- sendto_srom(command & 0x0000ff00, addr);
-
- return word;
-}
-
-/*
-static void
-srom_busy(u_int command, u_long addr)
-{
- sendto_srom((command & 0x0000ff00) | DT_CS, addr);
-
- while (!((getfrom_srom(addr) >> 3) & 0x01)) {
- mdelay(1);
- }
-
- sendto_srom(command & 0x0000ff00, addr);
-}
-*/
-
-static void
-sendto_srom(u_int command, u_long addr)
-{
- outl(command, addr);
- udelay(1);
-}
-
-static int
-getfrom_srom(u_long addr)
-{
- s32 tmp;
-
- tmp = inl(addr);
- udelay(1);
-
- return tmp;
-}
-
-static int
-srom_infoleaf_info(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int i, count;
- u_char *p;
-
- /* Find the infoleaf decoder function that matches this chipset */
- for (i=0; i<INFOLEAF_SIZE; i++) {
- if (lp->chipset == infoleaf_array[i].chipset) break;
- }
- if (i == INFOLEAF_SIZE) {
- lp->useSROM = false;
- printk("%s: Cannot find correct chipset for SROM decoding!\n",
- dev->name);
- return -ENXIO;
- }
-
- lp->infoleaf_fn = infoleaf_array[i].fn;
-
- /* Find the information offset that this function should use */
- count = *((u_char *)&lp->srom + 19);
- p = (u_char *)&lp->srom + 26;
-
- if (count > 1) {
- for (i=count; i; --i, p+=3) {
- if (lp->device == *p) break;
- }
- if (i == 0) {
- lp->useSROM = false;
- printk("%s: Cannot find correct PCI device [%d] for SROM decoding!\n",
- dev->name, lp->device);
- return -ENXIO;
- }
- }
-
- lp->infoleaf_offset = get_unaligned_le16(p + 1);
-
- return 0;
-}
-
-/*
-** This routine loads any type 1 or 3 MII info into the mii device
-** struct and executes any type 5 code to reset PHY devices for this
-** controller.
-** The info for the MII devices will be valid since the index used
-** will follow the discovery process from MII address 1-31 then 0.
-*/
-static void
-srom_init(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
- u_char count;
-
- p+=2;
- if (lp->chipset == DC21140) {
- lp->cache.gepc = (*p++ | GEP_CTRL);
- gep_wr(lp->cache.gepc, dev);
- }
-
- /* Block count */
- count = *p++;
-
- /* Jump the infoblocks to find types */
- for (;count; --count) {
- if (*p < 128) {
- p += COMPACT_LEN;
- } else if (*(p+1) == 5) {
- type5_infoblock(dev, 1, p);
- p += ((*p & BLOCK_LEN) + 1);
- } else if (*(p+1) == 4) {
- p += ((*p & BLOCK_LEN) + 1);
- } else if (*(p+1) == 3) {
- type3_infoblock(dev, 1, p);
- p += ((*p & BLOCK_LEN) + 1);
- } else if (*(p+1) == 2) {
- p += ((*p & BLOCK_LEN) + 1);
- } else if (*(p+1) == 1) {
- type1_infoblock(dev, 1, p);
- p += ((*p & BLOCK_LEN) + 1);
- } else {
- p += ((*p & BLOCK_LEN) + 1);
- }
- }
-}
-
-/*
-** A generic routine that writes GEP control, data and reset information
-** to the GEP register (21140) or csr15 GEP portion (2114[23]).
-*/
-static void
-srom_exec(struct net_device *dev, u_char *p)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- u_char count = (p ? *p++ : 0);
- u_short *w = (u_short *)p;
-
- if (((lp->ibn != 1) && (lp->ibn != 3) && (lp->ibn != 5)) || !count) return;
-
- if (lp->chipset != DC21140) RESET_SIA;
-
- while (count--) {
- gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ?
- *p++ : get_unaligned_le16(w++)), dev);
- mdelay(2); /* 2ms per action */
- }
-
- if (lp->chipset != DC21140) {
- outl(lp->cache.csr14, DE4X5_STRR);
- outl(lp->cache.csr13, DE4X5_SICR);
- }
-}
-
-/*
-** Basically this function is a NOP since it will never be called,
-** unless I implement the DC21041 SROM functions. There's no need
-** since the existing code will be satisfactory for all boards.
-*/
-static int
-dc21041_infoleaf(struct net_device *dev)
-{
- return DE4X5_AUTOSENSE_MS;
-}
-
-static int
-dc21140_infoleaf(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char count = 0;
- u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
- int next_tick = DE4X5_AUTOSENSE_MS;
-
- /* Read the connection type */
- p+=2;
-
- /* GEP control */
- lp->cache.gepc = (*p++ | GEP_CTRL);
-
- /* Block count */
- count = *p++;
-
- /* Recursively figure out the info blocks */
- if (*p < 128) {
- next_tick = dc_infoblock[COMPACT](dev, count, p);
- } else {
- next_tick = dc_infoblock[*(p+1)](dev, count, p);
- }
-
- if (lp->tcount == count) {
- lp->media = NC;
- if (lp->media != lp->c_media) {
- de4x5_dbg_media(dev);
- lp->c_media = lp->media;
- }
- lp->media = INIT;
- lp->tcount = 0;
- lp->tx_enable = false;
- }
-
- return next_tick & ~TIMER_CB;
-}
-
-static int
-dc21142_infoleaf(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char count = 0;
- u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
- int next_tick = DE4X5_AUTOSENSE_MS;
-
- /* Read the connection type */
- p+=2;
-
- /* Block count */
- count = *p++;
-
- /* Recursively figure out the info blocks */
- if (*p < 128) {
- next_tick = dc_infoblock[COMPACT](dev, count, p);
- } else {
- next_tick = dc_infoblock[*(p+1)](dev, count, p);
- }
-
- if (lp->tcount == count) {
- lp->media = NC;
- if (lp->media != lp->c_media) {
- de4x5_dbg_media(dev);
- lp->c_media = lp->media;
- }
- lp->media = INIT;
- lp->tcount = 0;
- lp->tx_enable = false;
- }
-
- return next_tick & ~TIMER_CB;
-}
-
-static int
-dc21143_infoleaf(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char count = 0;
- u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
- int next_tick = DE4X5_AUTOSENSE_MS;
-
- /* Read the connection type */
- p+=2;
-
- /* Block count */
- count = *p++;
-
- /* Recursively figure out the info blocks */
- if (*p < 128) {
- next_tick = dc_infoblock[COMPACT](dev, count, p);
- } else {
- next_tick = dc_infoblock[*(p+1)](dev, count, p);
- }
- if (lp->tcount == count) {
- lp->media = NC;
- if (lp->media != lp->c_media) {
- de4x5_dbg_media(dev);
- lp->c_media = lp->media;
- }
- lp->media = INIT;
- lp->tcount = 0;
- lp->tx_enable = false;
- }
-
- return next_tick & ~TIMER_CB;
-}
-
-/*
-** The compact infoblock is only designed for DC21140[A] chips, so
-** we'll reuse the dc21140m_autoconf function. Non MII media only.
-*/
-static int
-compact_infoblock(struct net_device *dev, u_char count, u_char *p)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char flags, csr6;
-
- /* Recursively figure out the info blocks */
- if (--count > lp->tcount) {
- if (*(p+COMPACT_LEN) < 128) {
- return dc_infoblock[COMPACT](dev, count, p+COMPACT_LEN);
- } else {
- return dc_infoblock[*(p+COMPACT_LEN+1)](dev, count, p+COMPACT_LEN);
- }
- }
-
- if ((lp->media == INIT) && (lp->timeout < 0)) {
- lp->ibn = COMPACT;
- lp->active = 0;
- gep_wr(lp->cache.gepc, dev);
- lp->infoblock_media = (*p++) & COMPACT_MC;
- lp->cache.gep = *p++;
- csr6 = *p++;
- flags = *p++;
-
- lp->asBitValid = (flags & 0x80) ? 0 : -1;
- lp->defMedium = (flags & 0x40) ? -1 : 0;
- lp->asBit = 1 << ((csr6 >> 1) & 0x07);
- lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
- lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
- lp->useMII = false;
-
- de4x5_switch_mac_port(dev);
- }
-
- return dc21140m_autoconf(dev);
-}
-
-/*
-** This block describes non MII media for the DC21140[A] only.
-*/
-static int
-type0_infoblock(struct net_device *dev, u_char count, u_char *p)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
-
- /* Recursively figure out the info blocks */
- if (--count > lp->tcount) {
- if (*(p+len) < 128) {
- return dc_infoblock[COMPACT](dev, count, p+len);
- } else {
- return dc_infoblock[*(p+len+1)](dev, count, p+len);
- }
- }
-
- if ((lp->media == INIT) && (lp->timeout < 0)) {
- lp->ibn = 0;
- lp->active = 0;
- gep_wr(lp->cache.gepc, dev);
- p+=2;
- lp->infoblock_media = (*p++) & BLOCK0_MC;
- lp->cache.gep = *p++;
- csr6 = *p++;
- flags = *p++;
-
- lp->asBitValid = (flags & 0x80) ? 0 : -1;
- lp->defMedium = (flags & 0x40) ? -1 : 0;
- lp->asBit = 1 << ((csr6 >> 1) & 0x07);
- lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
- lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
- lp->useMII = false;
-
- de4x5_switch_mac_port(dev);
- }
-
- return dc21140m_autoconf(dev);
-}
-
-/* These functions are under construction! */
-
-static int
-type1_infoblock(struct net_device *dev, u_char count, u_char *p)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char len = (*p & BLOCK_LEN)+1;
-
- /* Recursively figure out the info blocks */
- if (--count > lp->tcount) {
- if (*(p+len) < 128) {
- return dc_infoblock[COMPACT](dev, count, p+len);
- } else {
- return dc_infoblock[*(p+len+1)](dev, count, p+len);
- }
- }
-
- p += 2;
- if (lp->state == INITIALISED) {
- lp->ibn = 1;
- lp->active = *p++;
- lp->phy[lp->active].gep = (*p ? p : NULL); p += (*p + 1);
- lp->phy[lp->active].rst = (*p ? p : NULL); p += (*p + 1);
- lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
- lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
- lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
- lp->phy[lp->active].ttm = get_unaligned_le16(p);
- return 0;
- } else if ((lp->media == INIT) && (lp->timeout < 0)) {
- lp->ibn = 1;
- lp->active = *p;
- lp->infoblock_csr6 = OMR_MII_100;
- lp->useMII = true;
- lp->infoblock_media = ANS;
-
- de4x5_switch_mac_port(dev);
- }
-
- return dc21140m_autoconf(dev);
-}
-
-static int
-type2_infoblock(struct net_device *dev, u_char count, u_char *p)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char len = (*p & BLOCK_LEN)+1;
-
- /* Recursively figure out the info blocks */
- if (--count > lp->tcount) {
- if (*(p+len) < 128) {
- return dc_infoblock[COMPACT](dev, count, p+len);
- } else {
- return dc_infoblock[*(p+len+1)](dev, count, p+len);
- }
- }
-
- if ((lp->media == INIT) && (lp->timeout < 0)) {
- lp->ibn = 2;
- lp->active = 0;
- p += 2;
- lp->infoblock_media = (*p) & MEDIA_CODE;
-
- if ((*p++) & EXT_FIELD) {
- lp->cache.csr13 = get_unaligned_le16(p); p += 2;
- lp->cache.csr14 = get_unaligned_le16(p); p += 2;
- lp->cache.csr15 = get_unaligned_le16(p); p += 2;
- } else {
- lp->cache.csr13 = CSR13;
- lp->cache.csr14 = CSR14;
- lp->cache.csr15 = CSR15;
- }
- lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
- lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16);
- lp->infoblock_csr6 = OMR_SIA;
- lp->useMII = false;
-
- de4x5_switch_mac_port(dev);
- }
-
- return dc2114x_autoconf(dev);
-}
-
-static int
-type3_infoblock(struct net_device *dev, u_char count, u_char *p)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char len = (*p & BLOCK_LEN)+1;
-
- /* Recursively figure out the info blocks */
- if (--count > lp->tcount) {
- if (*(p+len) < 128) {
- return dc_infoblock[COMPACT](dev, count, p+len);
- } else {
- return dc_infoblock[*(p+len+1)](dev, count, p+len);
- }
- }
-
- p += 2;
- if (lp->state == INITIALISED) {
- lp->ibn = 3;
- lp->active = *p++;
- if (MOTO_SROM_BUG) lp->active = 0;
- /* if (MOTO_SROM_BUG) statement indicates lp->active could
- * be 8 (i.e. the size of array lp->phy) */
- if (WARN_ON(lp->active >= ARRAY_SIZE(lp->phy)))
- return -EINVAL;
- lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1);
- lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1);
- lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
- lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
- lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
- lp->phy[lp->active].ttm = get_unaligned_le16(p); p += 2;
- lp->phy[lp->active].mci = *p;
- return 0;
- } else if ((lp->media == INIT) && (lp->timeout < 0)) {
- lp->ibn = 3;
- lp->active = *p;
- if (MOTO_SROM_BUG) lp->active = 0;
- lp->infoblock_csr6 = OMR_MII_100;
- lp->useMII = true;
- lp->infoblock_media = ANS;
-
- de4x5_switch_mac_port(dev);
- }
-
- return dc2114x_autoconf(dev);
-}
-
-static int
-type4_infoblock(struct net_device *dev, u_char count, u_char *p)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
-
- /* Recursively figure out the info blocks */
- if (--count > lp->tcount) {
- if (*(p+len) < 128) {
- return dc_infoblock[COMPACT](dev, count, p+len);
- } else {
- return dc_infoblock[*(p+len+1)](dev, count, p+len);
- }
- }
-
- if ((lp->media == INIT) && (lp->timeout < 0)) {
- lp->ibn = 4;
- lp->active = 0;
- p+=2;
- lp->infoblock_media = (*p++) & MEDIA_CODE;
- lp->cache.csr13 = CSR13; /* Hard coded defaults */
- lp->cache.csr14 = CSR14;
- lp->cache.csr15 = CSR15;
- lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
- lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
- csr6 = *p++;
- flags = *p++;
-
- lp->asBitValid = (flags & 0x80) ? 0 : -1;
- lp->defMedium = (flags & 0x40) ? -1 : 0;
- lp->asBit = 1 << ((csr6 >> 1) & 0x07);
- lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
- lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
- lp->useMII = false;
-
- de4x5_switch_mac_port(dev);
- }
-
- return dc2114x_autoconf(dev);
-}
-
-/*
-** This block type provides information for resetting external devices
-** (chips) through the General Purpose Register.
-*/
-static int
-type5_infoblock(struct net_device *dev, u_char count, u_char *p)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char len = (*p & BLOCK_LEN)+1;
-
- /* Recursively figure out the info blocks */
- if (--count > lp->tcount) {
- if (*(p+len) < 128) {
- return dc_infoblock[COMPACT](dev, count, p+len);
- } else {
- return dc_infoblock[*(p+len+1)](dev, count, p+len);
- }
- }
-
- /* Must be initializing to run this code */
- if ((lp->state == INITIALISED) || (lp->media == INIT)) {
- p+=2;
- lp->rst = p;
- srom_exec(dev, lp->rst);
- }
-
- return DE4X5_AUTOSENSE_MS;
-}
-
-/*
-** MII Read/Write
-*/
-
-static int
-mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr)
-{
- mii_wdata(MII_PREAMBLE, 2, ioaddr); /* Start of 34 bit preamble... */
- mii_wdata(MII_PREAMBLE, 32, ioaddr); /* ...continued */
- mii_wdata(MII_STRD, 4, ioaddr); /* SFD and Read operation */
- mii_address(phyaddr, ioaddr); /* PHY address to be accessed */
- mii_address(phyreg, ioaddr); /* PHY Register to read */
- mii_ta(MII_STRD, ioaddr); /* Turn around time - 2 MDC */
-
- return mii_rdata(ioaddr); /* Read data */
-}
-
-static void
-mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr)
-{
- mii_wdata(MII_PREAMBLE, 2, ioaddr); /* Start of 34 bit preamble... */
- mii_wdata(MII_PREAMBLE, 32, ioaddr); /* ...continued */
- mii_wdata(MII_STWR, 4, ioaddr); /* SFD and Write operation */
- mii_address(phyaddr, ioaddr); /* PHY address to be accessed */
- mii_address(phyreg, ioaddr); /* PHY Register to write */
- mii_ta(MII_STWR, ioaddr); /* Turn around time - 2 MDC */
- data = mii_swap(data, 16); /* Swap data bit ordering */
- mii_wdata(data, 16, ioaddr); /* Write data */
-}
-
-static int
-mii_rdata(u_long ioaddr)
-{
- int i;
- s32 tmp = 0;
-
- for (i=0; i<16; i++) {
- tmp <<= 1;
- tmp |= getfrom_mii(MII_MRD | MII_RD, ioaddr);
- }
-
- return tmp;
-}
-
-static void
-mii_wdata(int data, int len, u_long ioaddr)
-{
- int i;
-
- for (i=0; i<len; i++) {
- sendto_mii(MII_MWR | MII_WR, data, ioaddr);
- data >>= 1;
- }
-}
-
-static void
-mii_address(u_char addr, u_long ioaddr)
-{
- int i;
-
- addr = mii_swap(addr, 5);
- for (i=0; i<5; i++) {
- sendto_mii(MII_MWR | MII_WR, addr, ioaddr);
- addr >>= 1;
- }
-}
-
-static void
-mii_ta(u_long rw, u_long ioaddr)
-{
- if (rw == MII_STWR) {
- sendto_mii(MII_MWR | MII_WR, 1, ioaddr);
- sendto_mii(MII_MWR | MII_WR, 0, ioaddr);
- } else {
- getfrom_mii(MII_MRD | MII_RD, ioaddr); /* Tri-state MDIO */
- }
-}
-
-static int
-mii_swap(int data, int len)
-{
- int i, tmp = 0;
-
- for (i=0; i<len; i++) {
- tmp <<= 1;
- tmp |= (data & 1);
- data >>= 1;
- }
-
- return tmp;
-}
-
-static void
-sendto_mii(u32 command, int data, u_long ioaddr)
-{
- u32 j;
-
- j = (data & 1) << 17;
- outl(command | j, ioaddr);
- udelay(1);
- outl(command | MII_MDC | j, ioaddr);
- udelay(1);
-}
-
-static int
-getfrom_mii(u32 command, u_long ioaddr)
-{
- outl(command, ioaddr);
- udelay(1);
- outl(command | MII_MDC, ioaddr);
- udelay(1);
-
- return (inl(ioaddr) >> 19) & 1;
-}
-
-/*
-** Here's 3 ways to calculate the OUI from the ID registers.
-*/
-static int
-mii_get_oui(u_char phyaddr, u_long ioaddr)
-{
-/*
- union {
- u_short reg;
- u_char breg[2];
- } a;
- int i, r2, r3, ret=0;*/
- int r2;
-
- /* Read r2 and r3 */
- r2 = mii_rd(MII_ID0, phyaddr, ioaddr);
- mii_rd(MII_ID1, phyaddr, ioaddr);
- /* SEEQ and Cypress way * /
- / * Shuffle r2 and r3 * /
- a.reg=0;
- r3 = ((r3>>10)|(r2<<6))&0x0ff;
- r2 = ((r2>>2)&0x3fff);
-
- / * Bit reverse r3 * /
- for (i=0;i<8;i++) {
- ret<<=1;
- ret |= (r3&1);
- r3>>=1;
- }
-
- / * Bit reverse r2 * /
- for (i=0;i<16;i++) {
- a.reg<<=1;
- a.reg |= (r2&1);
- r2>>=1;
- }
-
- / * Swap r2 bytes * /
- i=a.breg[0];
- a.breg[0]=a.breg[1];
- a.breg[1]=i;
-
- return (a.reg<<8)|ret; */ /* SEEQ and Cypress way */
-/* return (r2<<6)|(u_int)(r3>>10); */ /* NATIONAL and BROADCOM way */
- return r2; /* (I did it) My way */
-}
-
-/*
-** The SROM spec forces us to search addresses [1-31 0]. Bummer.
-*/
-static int
-mii_get_phy(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int i, j, k, n, limit=ARRAY_SIZE(phy_info);
- int id;
-
- lp->active = 0;
- lp->useMII = true;
-
- /* Search the MII address space for possible PHY devices */
- for (n=0, lp->mii_cnt=0, i=1; !((i==1) && (n==1)); i=(i+1)%DE4X5_MAX_MII) {
- lp->phy[lp->active].addr = i;
- if (i==0) n++; /* Count cycles */
- while (de4x5_reset_phy(dev)<0) udelay(100);/* Wait for reset */
- id = mii_get_oui(i, DE4X5_MII);
- if ((id == 0) || (id == 65535)) continue; /* Valid ID? */
- for (j=0; j<limit; j++) { /* Search PHY table */
- if (id != phy_info[j].id) continue; /* ID match? */
- for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
- if (k < DE4X5_MAX_PHY) {
- memcpy((char *)&lp->phy[k],
- (char *)&phy_info[j], sizeof(struct phy_table));
- lp->phy[k].addr = i;
- lp->mii_cnt++;
- lp->active++;
- } else {
- goto purgatory; /* Stop the search */
- }
- break;
- }
- if ((j == limit) && (i < DE4X5_MAX_MII)) {
- for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
- if (k < DE4X5_MAX_PHY) {
- lp->phy[k].addr = i;
- lp->phy[k].id = id;
- lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */
- lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */
- lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */
- lp->mii_cnt++;
- lp->active++;
- printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
- j = de4x5_debug;
- de4x5_debug |= DEBUG_MII;
- de4x5_dbg_mii(dev, k);
- de4x5_debug = j;
- printk("\n");
- } else {
- goto purgatory;
- }
- }
- }
- purgatory:
- lp->active = 0;
- if (lp->phy[0].id) { /* Reset the PHY devices */
- for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++) { /*For each PHY*/
- mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII);
- while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST);
-
- de4x5_dbg_mii(dev, k);
- }
- }
- if (!lp->mii_cnt) lp->useMII = false;
-
- return lp->mii_cnt;
-}
-
-static char *
-build_setup_frame(struct net_device *dev, int mode)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int i;
- char *pa = lp->setup_frame;
-
- /* Initialise the setup frame */
- if (mode == ALL) {
- memset(lp->setup_frame, 0, SETUP_FRAME_LEN);
- }
-
- if (lp->setup_f == HASH_PERF) {
- for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; i<ETH_ALEN; i++) {
- *(pa + i) = dev->dev_addr[i]; /* Host address */
- if (i & 0x01) pa += 2;
- }
- *(lp->setup_frame + (DE4X5_HASH_TABLE_LEN >> 3) - 3) = 0x80;
- } else {
- for (i=0; i<ETH_ALEN; i++) { /* Host address */
- *(pa + (i&1)) = dev->dev_addr[i];
- if (i & 0x01) pa += 4;
- }
- for (i=0; i<ETH_ALEN; i++) { /* Broadcast address */
- *(pa + (i&1)) = (char) 0xff;
- if (i & 0x01) pa += 4;
- }
- }
-
- return pa; /* Points to the next entry */
-}
-
-static void
-disable_ast(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- del_timer_sync(&lp->timer);
-}
-
-static long
-de4x5_switch_mac_port(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- s32 omr;
-
- STOP_DE4X5;
-
- /* Assert the OMR_PS bit in CSR6 */
- omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR |
- OMR_FDX));
- omr |= lp->infoblock_csr6;
- if (omr & OMR_PS) omr |= OMR_HBD;
- outl(omr, DE4X5_OMR);
-
- /* Soft Reset */
- RESET_DE4X5;
-
- /* Restore the GEP - especially for COMPACT and Type 0 Infoblocks */
- if (lp->chipset == DC21140) {
- gep_wr(lp->cache.gepc, dev);
- gep_wr(lp->cache.gep, dev);
- } else if ((lp->chipset & ~0x0ff) == DC2114x) {
- reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14, lp->cache.csr15);
- }
-
- /* Restore CSR6 */
- outl(omr, DE4X5_OMR);
-
- /* Reset CSR8 */
- inl(DE4X5_MFC);
-
- return omr;
-}
-
-static void
-gep_wr(s32 data, struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- if (lp->chipset == DC21140) {
- outl(data, DE4X5_GEP);
- } else if ((lp->chipset & ~0x00ff) == DC2114x) {
- outl((data<<16) | lp->cache.csr15, DE4X5_SIGR);
- }
-}
-
-static int
-gep_rd(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- if (lp->chipset == DC21140) {
- return inl(DE4X5_GEP);
- } else if ((lp->chipset & ~0x00ff) == DC2114x) {
- return inl(DE4X5_SIGR) & 0x000fffff;
- }
-
- return 0;
-}
-
-static void
-yawn(struct net_device *dev, int state)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- if ((lp->chipset == DC21040) || (lp->chipset == DC21140)) return;
-
- if(lp->bus == EISA) {
- switch(state) {
- case WAKEUP:
- outb(WAKEUP, PCI_CFPM);
- mdelay(10);
- break;
-
- case SNOOZE:
- outb(SNOOZE, PCI_CFPM);
- break;
-
- case SLEEP:
- outl(0, DE4X5_SICR);
- outb(SLEEP, PCI_CFPM);
- break;
- }
- } else {
- struct pci_dev *pdev = to_pci_dev (lp->gendev);
- switch(state) {
- case WAKEUP:
- pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP);
- mdelay(10);
- break;
-
- case SNOOZE:
- pci_write_config_byte(pdev, PCI_CFDA_PSM, SNOOZE);
- break;
-
- case SLEEP:
- outl(0, DE4X5_SICR);
- pci_write_config_byte(pdev, PCI_CFDA_PSM, SLEEP);
- break;
- }
- }
-}
-
-static void
-de4x5_parse_params(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- char *p, *q, t;
-
- lp->params.fdx = false;
- lp->params.autosense = AUTO;
-
- if (args == NULL) return;
-
- if ((p = strstr(args, dev->name))) {
- if (!(q = strstr(p+strlen(dev->name), "eth"))) q = p + strlen(p);
- t = *q;
- *q = '\0';
-
- if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = true;
-
- if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) {
- if (strstr(p, "TP_NW")) {
- lp->params.autosense = TP_NW;
- } else if (strstr(p, "TP")) {
- lp->params.autosense = TP;
- } else if (strstr(p, "BNC_AUI")) {
- lp->params.autosense = BNC;
- } else if (strstr(p, "BNC")) {
- lp->params.autosense = BNC;
- } else if (strstr(p, "AUI")) {
- lp->params.autosense = AUI;
- } else if (strstr(p, "10Mb")) {
- lp->params.autosense = _10Mb;
- } else if (strstr(p, "100Mb")) {
- lp->params.autosense = _100Mb;
- } else if (strstr(p, "AUTO")) {
- lp->params.autosense = AUTO;
- }
- }
- *q = t;
- }
-}
-
-static void
-de4x5_dbg_open(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int i;
-
- if (de4x5_debug & DEBUG_OPEN) {
- printk("%s: de4x5 opening with irq %d\n",dev->name,dev->irq);
- printk("\tphysical address: %pM\n", dev->dev_addr);
- printk("Descriptor head addresses:\n");
- printk("\t0x%8.8lx 0x%8.8lx\n",(u_long)lp->rx_ring,(u_long)lp->tx_ring);
- printk("Descriptor addresses:\nRX: ");
- for (i=0;i<lp->rxRingSize-1;i++){
- if (i < 3) {
- printk("0x%8.8lx ",(u_long)&lp->rx_ring[i].status);
- }
- }
- printk("...0x%8.8lx\n",(u_long)&lp->rx_ring[i].status);
- printk("TX: ");
- for (i=0;i<lp->txRingSize-1;i++){
- if (i < 3) {
- printk("0x%8.8lx ", (u_long)&lp->tx_ring[i].status);
- }
- }
- printk("...0x%8.8lx\n", (u_long)&lp->tx_ring[i].status);
- printk("Descriptor buffers:\nRX: ");
- for (i=0;i<lp->rxRingSize-1;i++){
- if (i < 3) {
- printk("0x%8.8x ",le32_to_cpu(lp->rx_ring[i].buf));
- }
- }
- printk("...0x%8.8x\n",le32_to_cpu(lp->rx_ring[i].buf));
- printk("TX: ");
- for (i=0;i<lp->txRingSize-1;i++){
- if (i < 3) {
- printk("0x%8.8x ", le32_to_cpu(lp->tx_ring[i].buf));
- }
- }
- printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf));
- printk("Ring size:\nRX: %d\nTX: %d\n",
- (short)lp->rxRingSize,
- (short)lp->txRingSize);
- }
-}
-
-static void
-de4x5_dbg_mii(struct net_device *dev, int k)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- if (de4x5_debug & DEBUG_MII) {
- printk("\nMII device address: %d\n", lp->phy[k].addr);
- printk("MII CR: %x\n",mii_rd(MII_CR,lp->phy[k].addr,DE4X5_MII));
- printk("MII SR: %x\n",mii_rd(MII_SR,lp->phy[k].addr,DE4X5_MII));
- printk("MII ID0: %x\n",mii_rd(MII_ID0,lp->phy[k].addr,DE4X5_MII));
- printk("MII ID1: %x\n",mii_rd(MII_ID1,lp->phy[k].addr,DE4X5_MII));
- if (lp->phy[k].id != BROADCOM_T4) {
- printk("MII ANA: %x\n",mii_rd(0x04,lp->phy[k].addr,DE4X5_MII));
- printk("MII ANC: %x\n",mii_rd(0x05,lp->phy[k].addr,DE4X5_MII));
- }
- printk("MII 16: %x\n",mii_rd(0x10,lp->phy[k].addr,DE4X5_MII));
- if (lp->phy[k].id != BROADCOM_T4) {
- printk("MII 17: %x\n",mii_rd(0x11,lp->phy[k].addr,DE4X5_MII));
- printk("MII 18: %x\n",mii_rd(0x12,lp->phy[k].addr,DE4X5_MII));
- } else {
- printk("MII 20: %x\n",mii_rd(0x14,lp->phy[k].addr,DE4X5_MII));
- }
- }
-}
-
-static void
-de4x5_dbg_media(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
-
- if (lp->media != lp->c_media) {
- if (de4x5_debug & DEBUG_MEDIA) {
- printk("%s: media is %s%s\n", dev->name,
- (lp->media == NC ? "unconnected, link down or incompatible connection" :
- (lp->media == TP ? "TP" :
- (lp->media == ANS ? "TP/Nway" :
- (lp->media == BNC ? "BNC" :
- (lp->media == AUI ? "AUI" :
- (lp->media == BNC_AUI ? "BNC/AUI" :
- (lp->media == EXT_SIA ? "EXT SIA" :
- (lp->media == _100Mb ? "100Mb/s" :
- (lp->media == _10Mb ? "10Mb/s" :
- "???"
- ))))))))), (lp->fdx?" full duplex.":"."));
- }
- lp->c_media = lp->media;
- }
-}
-
-static void
-de4x5_dbg_srom(struct de4x5_srom *p)
-{
- int i;
-
- if (de4x5_debug & DEBUG_SROM) {
- printk("Sub-system Vendor ID: %04x\n", *((u_short *)p->sub_vendor_id));
- printk("Sub-system ID: %04x\n", *((u_short *)p->sub_system_id));
- printk("ID Block CRC: %02x\n", (u_char)(p->id_block_crc));
- printk("SROM version: %02x\n", (u_char)(p->version));
- printk("# controllers: %02x\n", (u_char)(p->num_controllers));
-
- printk("Hardware Address: %pM\n", p->ieee_addr);
- printk("CRC checksum: %04x\n", (u_short)(p->chksum));
- for (i=0; i<64; i++) {
- printk("%3d %04x\n", i<<1, (u_short)*((u_short *)p+i));
- }
- }
-}
-
-static void
-de4x5_dbg_rx(struct sk_buff *skb, int len)
-{
- int i, j;
-
- if (de4x5_debug & DEBUG_RX) {
- printk("R: %pM <- %pM len/SAP:%02x%02x [%d]\n",
- skb->data, &skb->data[6],
- (u_char)skb->data[12],
- (u_char)skb->data[13],
- len);
- for (j=0; len>0;j+=16, len-=16) {
- printk(" %03x: ",j);
- for (i=0; i<16 && i<len; i++) {
- printk("%02x ",(u_char)skb->data[i+j]);
- }
- printk("\n");
- }
- }
-}
-
-/*
-** Perform IOCTL call functions here. Some are privileged operations and the
-** effective uid is checked in those cases. In the normal course of events
-** this function is only used for my testing.
-*/
-static int
-de4x5_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_ifru;
- u_long iobase = dev->base_addr;
- int i, j, status = 0;
- s32 omr;
- union {
- u8 addr[144];
- u16 sval[72];
- u32 lval[36];
- } tmp;
- u_long flags = 0;
-
- if (cmd != SIOCDEVPRIVATE || in_compat_syscall())
- return -EOPNOTSUPP;
-
- switch(ioc->cmd) {
- case DE4X5_GET_HWADDR: /* Get the hardware address */
- ioc->len = ETH_ALEN;
- for (i=0; i<ETH_ALEN; i++) {
- tmp.addr[i] = dev->dev_addr[i];
- }
- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
- break;
-
- case DE4X5_SET_HWADDR: /* Set the hardware address */
- if (!capable(CAP_NET_ADMIN)) return -EPERM;
- if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN)) return -EFAULT;
- if (netif_queue_stopped(dev))
- return -EBUSY;
- netif_stop_queue(dev);
- eth_hw_addr_set(dev, tmp.addr);
- build_setup_frame(dev, PHYS_ADDR_ONLY);
- /* Set up the descriptor and give ownership to the card */
- load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
- SETUP_FRAME_LEN, (struct sk_buff *)1);
- lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
- outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
- netif_wake_queue(dev); /* Unlock the TX ring */
- break;
-
- case DE4X5_SAY_BOO: /* Say "Boo!" to the kernel log file */
- if (!capable(CAP_NET_ADMIN)) return -EPERM;
- printk("%s: Boo!\n", dev->name);
- break;
-
- case DE4X5_MCA_EN: /* Enable pass all multicast addressing */
- if (!capable(CAP_NET_ADMIN)) return -EPERM;
- omr = inl(DE4X5_OMR);
- omr |= OMR_PM;
- outl(omr, DE4X5_OMR);
- break;
-
- case DE4X5_GET_STATS: /* Get the driver statistics */
- {
- struct pkt_stats statbuf;
- ioc->len = sizeof(statbuf);
- spin_lock_irqsave(&lp->lock, flags);
- memcpy(&statbuf, &lp->pktStats, ioc->len);
- spin_unlock_irqrestore(&lp->lock, flags);
- if (copy_to_user(ioc->data, &statbuf, ioc->len))
- return -EFAULT;
- break;
- }
- case DE4X5_CLR_STATS: /* Zero out the driver statistics */
- if (!capable(CAP_NET_ADMIN)) return -EPERM;
- spin_lock_irqsave(&lp->lock, flags);
- memset(&lp->pktStats, 0, sizeof(lp->pktStats));
- spin_unlock_irqrestore(&lp->lock, flags);
- break;
-
- case DE4X5_GET_OMR: /* Get the OMR Register contents */
- tmp.addr[0] = inl(DE4X5_OMR);
- if (copy_to_user(ioc->data, tmp.addr, 1)) return -EFAULT;
- break;
-
- case DE4X5_SET_OMR: /* Set the OMR Register contents */
- if (!capable(CAP_NET_ADMIN)) return -EPERM;
- if (copy_from_user(tmp.addr, ioc->data, 1)) return -EFAULT;
- outl(tmp.addr[0], DE4X5_OMR);
- break;
-
- case DE4X5_GET_REG: /* Get the DE4X5 Registers */
- j = 0;
- tmp.lval[0] = inl(DE4X5_STS); j+=4;
- tmp.lval[1] = inl(DE4X5_BMR); j+=4;
- tmp.lval[2] = inl(DE4X5_IMR); j+=4;
- tmp.lval[3] = inl(DE4X5_OMR); j+=4;
- tmp.lval[4] = inl(DE4X5_SISR); j+=4;
- tmp.lval[5] = inl(DE4X5_SICR); j+=4;
- tmp.lval[6] = inl(DE4X5_STRR); j+=4;
- tmp.lval[7] = inl(DE4X5_SIGR); j+=4;
- ioc->len = j;
- if (copy_to_user(ioc->data, tmp.lval, ioc->len))
- return -EFAULT;
- break;
-
-#define DE4X5_DUMP 0x0f /* Dump the DE4X5 Status */
-/*
- case DE4X5_DUMP:
- j = 0;
- tmp.addr[j++] = dev->irq;
- for (i=0; i<ETH_ALEN; i++) {
- tmp.addr[j++] = dev->dev_addr[i];
- }
- tmp.addr[j++] = lp->rxRingSize;
- tmp.lval[j>>2] = (long)lp->rx_ring; j+=4;
- tmp.lval[j>>2] = (long)lp->tx_ring; j+=4;
-
- for (i=0;i<lp->rxRingSize-1;i++){
- if (i < 3) {
- tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
- }
- }
- tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
- for (i=0;i<lp->txRingSize-1;i++){
- if (i < 3) {
- tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
- }
- }
- tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
-
- for (i=0;i<lp->rxRingSize-1;i++){
- if (i < 3) {
- tmp.lval[j>>2] = (s32)le32_to_cpu(lp->rx_ring[i].buf); j+=4;
- }
- }
- tmp.lval[j>>2] = (s32)le32_to_cpu(lp->rx_ring[i].buf); j+=4;
- for (i=0;i<lp->txRingSize-1;i++){
- if (i < 3) {
- tmp.lval[j>>2] = (s32)le32_to_cpu(lp->tx_ring[i].buf); j+=4;
- }
- }
- tmp.lval[j>>2] = (s32)le32_to_cpu(lp->tx_ring[i].buf); j+=4;
-
- for (i=0;i<lp->rxRingSize;i++){
- tmp.lval[j>>2] = le32_to_cpu(lp->rx_ring[i].status); j+=4;
- }
- for (i=0;i<lp->txRingSize;i++){
- tmp.lval[j>>2] = le32_to_cpu(lp->tx_ring[i].status); j+=4;
- }
-
- tmp.lval[j>>2] = inl(DE4X5_BMR); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_TPD); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_RPD); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_RRBA); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_TRBA); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_STS); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_OMR); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_IMR); j+=4;
- tmp.lval[j>>2] = lp->chipset; j+=4;
- if (lp->chipset == DC21140) {
- tmp.lval[j>>2] = gep_rd(dev); j+=4;
- } else {
- tmp.lval[j>>2] = inl(DE4X5_SISR); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_SICR); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_STRR); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_SIGR); j+=4;
- }
- tmp.lval[j>>2] = lp->phy[lp->active].id; j+=4;
- if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
- tmp.lval[j>>2] = lp->active; j+=4;
- tmp.lval[j>>2]=mii_rd(MII_CR,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- tmp.lval[j>>2]=mii_rd(MII_SR,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- tmp.lval[j>>2]=mii_rd(MII_ID0,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- tmp.lval[j>>2]=mii_rd(MII_ID1,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- if (lp->phy[lp->active].id != BROADCOM_T4) {
- tmp.lval[j>>2]=mii_rd(MII_ANA,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- tmp.lval[j>>2]=mii_rd(MII_ANLPA,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- }
- tmp.lval[j>>2]=mii_rd(0x10,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- if (lp->phy[lp->active].id != BROADCOM_T4) {
- tmp.lval[j>>2]=mii_rd(0x11,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- tmp.lval[j>>2]=mii_rd(0x12,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- } else {
- tmp.lval[j>>2]=mii_rd(0x14,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- }
- }
-
- tmp.addr[j++] = lp->txRingSize;
- tmp.addr[j++] = netif_queue_stopped(dev);
-
- ioc->len = j;
- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
- break;
-
-*/
- default:
- return -EOPNOTSUPP;
- }
-
- return status;
-}
-
-static int __init de4x5_module_init (void)
-{
- int err = 0;
-
-#ifdef CONFIG_PCI
- err = pci_register_driver(&de4x5_pci_driver);
-#endif
-#ifdef CONFIG_EISA
- err |= eisa_driver_register (&de4x5_eisa_driver);
-#endif
-
- return err;
-}
-
-static void __exit de4x5_module_exit (void)
-{
-#ifdef CONFIG_PCI
- pci_unregister_driver (&de4x5_pci_driver);
-#endif
-#ifdef CONFIG_EISA
- eisa_driver_unregister (&de4x5_eisa_driver);
-#endif
-}
-
-module_init (de4x5_module_init);
-module_exit (de4x5_module_exit);
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.h b/drivers/net/ethernet/dec/tulip/de4x5.h
deleted file mode 100644
index 1bfdc9b117f6..000000000000
--- a/drivers/net/ethernet/dec/tulip/de4x5.h
+++ /dev/null
@@ -1,1017 +0,0 @@
-/*
- Copyright 1994 Digital Equipment Corporation.
-
- This software may be used and distributed according to the terms of the
- GNU General Public License, incorporated herein by reference.
-
- The author may be reached as davies@wanton.lkg.dec.com or Digital
- Equipment Corporation, 550 King Street, Littleton MA 01460.
-
- =========================================================================
-*/
-
-/*
-** DC21040 CSR<1..15> Register Address Map
-*/
-#define DE4X5_BMR iobase+(0x000 << lp->bus) /* Bus Mode Register */
-#define DE4X5_TPD iobase+(0x008 << lp->bus) /* Transmit Poll Demand Reg */
-#define DE4X5_RPD iobase+(0x010 << lp->bus) /* Receive Poll Demand Reg */
-#define DE4X5_RRBA iobase+(0x018 << lp->bus) /* RX Ring Base Address Reg */
-#define DE4X5_TRBA iobase+(0x020 << lp->bus) /* TX Ring Base Address Reg */
-#define DE4X5_STS iobase+(0x028 << lp->bus) /* Status Register */
-#define DE4X5_OMR iobase+(0x030 << lp->bus) /* Operation Mode Register */
-#define DE4X5_IMR iobase+(0x038 << lp->bus) /* Interrupt Mask Register */
-#define DE4X5_MFC iobase+(0x040 << lp->bus) /* Missed Frame Counter */
-#define DE4X5_APROM iobase+(0x048 << lp->bus) /* Ethernet Address PROM */
-#define DE4X5_BROM iobase+(0x048 << lp->bus) /* Boot ROM Register */
-#define DE4X5_SROM iobase+(0x048 << lp->bus) /* Serial ROM Register */
-#define DE4X5_MII iobase+(0x048 << lp->bus) /* MII Interface Register */
-#define DE4X5_DDR iobase+(0x050 << lp->bus) /* Data Diagnostic Register */
-#define DE4X5_FDR iobase+(0x058 << lp->bus) /* Full Duplex Register */
-#define DE4X5_GPT iobase+(0x058 << lp->bus) /* General Purpose Timer Reg.*/
-#define DE4X5_GEP iobase+(0x060 << lp->bus) /* General Purpose Register */
-#define DE4X5_SISR iobase+(0x060 << lp->bus) /* SIA Status Register */
-#define DE4X5_SICR iobase+(0x068 << lp->bus) /* SIA Connectivity Register */
-#define DE4X5_STRR iobase+(0x070 << lp->bus) /* SIA TX/RX Register */
-#define DE4X5_SIGR iobase+(0x078 << lp->bus) /* SIA General Register */
-
-/*
-** EISA Register Address Map
-*/
-#define EISA_ID iobase+0x0c80 /* EISA ID Registers */
-#define EISA_ID0 iobase+0x0c80 /* EISA ID Register 0 */
-#define EISA_ID1 iobase+0x0c81 /* EISA ID Register 1 */
-#define EISA_ID2 iobase+0x0c82 /* EISA ID Register 2 */
-#define EISA_ID3 iobase+0x0c83 /* EISA ID Register 3 */
-#define EISA_CR iobase+0x0c84 /* EISA Control Register */
-#define EISA_REG0 iobase+0x0c88 /* EISA Configuration Register 0 */
-#define EISA_REG1 iobase+0x0c89 /* EISA Configuration Register 1 */
-#define EISA_REG2 iobase+0x0c8a /* EISA Configuration Register 2 */
-#define EISA_REG3 iobase+0x0c8f /* EISA Configuration Register 3 */
-#define EISA_APROM iobase+0x0c90 /* Ethernet Address PROM */
-
-/*
-** PCI/EISA Configuration Registers Address Map
-*/
-#define PCI_CFID iobase+0x0008 /* PCI Configuration ID Register */
-#define PCI_CFCS iobase+0x000c /* PCI Command/Status Register */
-#define PCI_CFRV iobase+0x0018 /* PCI Revision Register */
-#define PCI_CFLT iobase+0x001c /* PCI Latency Timer Register */
-#define PCI_CBIO iobase+0x0028 /* PCI Base I/O Register */
-#define PCI_CBMA iobase+0x002c /* PCI Base Memory Address Register */
-#define PCI_CBER iobase+0x0030 /* PCI Expansion ROM Base Address Reg. */
-#define PCI_CFIT iobase+0x003c /* PCI Configuration Interrupt Register */
-#define PCI_CFDA iobase+0x0040 /* PCI Driver Area Register */
-#define PCI_CFDD iobase+0x0041 /* PCI Driver Dependent Area Register */
-#define PCI_CFPM iobase+0x0043 /* PCI Power Management Area Register */
-
-/*
-** EISA Configuration Register 0 bit definitions
-*/
-#define ER0_BSW 0x80 /* EISA Bus Slave Width, 1: 32 bits */
-#define ER0_BMW 0x40 /* EISA Bus Master Width, 1: 32 bits */
-#define ER0_EPT 0x20 /* EISA PREEMPT Time, 0: 23 BCLKs */
-#define ER0_ISTS 0x10 /* Interrupt Status (X) */
-#define ER0_LI 0x08 /* Latch Interrupts */
-#define ER0_INTL 0x06 /* INTerrupt Level */
-#define ER0_INTT 0x01 /* INTerrupt Type, 0: Level, 1: Edge */
-
-/*
-** EISA Configuration Register 1 bit definitions
-*/
-#define ER1_IAM 0xe0 /* ISA Address Mode */
-#define ER1_IAE 0x10 /* ISA Addressing Enable */
-#define ER1_UPIN 0x0f /* User Pins */
-
-/*
-** EISA Configuration Register 2 bit definitions
-*/
-#define ER2_BRS 0xc0 /* Boot ROM Size */
-#define ER2_BRA 0x3c /* Boot ROM Address <16:13> */
-
-/*
-** EISA Configuration Register 3 bit definitions
-*/
-#define ER3_BWE 0x40 /* Burst Write Enable */
-#define ER3_BRE 0x04 /* Burst Read Enable */
-#define ER3_LSR 0x02 /* Local Software Reset */
-
-/*
-** PCI Configuration ID Register (PCI_CFID). The Device IDs are left
-** shifted 8 bits to allow detection of DC21142 and DC21143 variants with
-** the configuration revision register step number.
-*/
-#define CFID_DID 0xff00 /* Device ID */
-#define CFID_VID 0x00ff /* Vendor ID */
-#define DC21040_DID 0x0200 /* Unique Device ID # */
-#define DC21040_VID 0x1011 /* DC21040 Manufacturer */
-#define DC21041_DID 0x1400 /* Unique Device ID # */
-#define DC21041_VID 0x1011 /* DC21041 Manufacturer */
-#define DC21140_DID 0x0900 /* Unique Device ID # */
-#define DC21140_VID 0x1011 /* DC21140 Manufacturer */
-#define DC2114x_DID 0x1900 /* Unique Device ID # */
-#define DC2114x_VID 0x1011 /* DC2114[23] Manufacturer */
-
-/*
-** Chipset defines
-*/
-#define DC21040 DC21040_DID
-#define DC21041 DC21041_DID
-#define DC21140 DC21140_DID
-#define DC2114x DC2114x_DID
-#define DC21142 (DC2114x_DID | 0x0010)
-#define DC21143 (DC2114x_DID | 0x0030)
-#define DC2114x_BRK 0x0020 /* CFRV break between DC21142 & DC21143 */
-
-#define is_DC21040 ((vendor == DC21040_VID) && (device == DC21040_DID))
-#define is_DC21041 ((vendor == DC21041_VID) && (device == DC21041_DID))
-#define is_DC21140 ((vendor == DC21140_VID) && (device == DC21140_DID))
-#define is_DC2114x ((vendor == DC2114x_VID) && (device == DC2114x_DID))
-#define is_DC21142 ((vendor == DC2114x_VID) && (device == DC21142))
-#define is_DC21143 ((vendor == DC2114x_VID) && (device == DC21143))
-
-/*
-** PCI Configuration Command/Status Register (PCI_CFCS)
-*/
-#define CFCS_DPE 0x80000000 /* Detected Parity Error (S) */
-#define CFCS_SSE 0x40000000 /* Signal System Error (S) */
-#define CFCS_RMA 0x20000000 /* Receive Master Abort (S) */
-#define CFCS_RTA 0x10000000 /* Receive Target Abort (S) */
-#define CFCS_DST 0x06000000 /* DEVSEL Timing (S) */
-#define CFCS_DPR 0x01000000 /* Data Parity Report (S) */
-#define CFCS_FBB 0x00800000 /* Fast Back-To-Back (S) */
-#define CFCS_SEE 0x00000100 /* System Error Enable (C) */
-#define CFCS_PER 0x00000040 /* Parity Error Response (C) */
-#define CFCS_MO 0x00000004 /* Master Operation (C) */
-#define CFCS_MSA 0x00000002 /* Memory Space Access (C) */
-#define CFCS_IOSA 0x00000001 /* I/O Space Access (C) */
-
-/*
-** PCI Configuration Revision Register (PCI_CFRV)
-*/
-#define CFRV_BC 0xff000000 /* Base Class */
-#define CFRV_SC 0x00ff0000 /* Subclass */
-#define CFRV_RN 0x000000f0 /* Revision Number */
-#define CFRV_SN 0x0000000f /* Step Number */
-#define BASE_CLASS 0x02000000 /* Indicates Network Controller */
-#define SUB_CLASS 0x00000000 /* Indicates Ethernet Controller */
-#define STEP_NUMBER 0x00000020 /* Increments for future chips */
-#define REV_NUMBER 0x00000003 /* 0x00, 0x01, 0x02, 0x03: Rev in Step */
-#define CFRV_MASK 0xffff0000 /* Register mask */
-
-/*
-** PCI Configuration Latency Timer Register (PCI_CFLT)
-*/
-#define CFLT_BC 0x0000ff00 /* Latency Timer bits */
-
-/*
-** PCI Configuration Base I/O Address Register (PCI_CBIO)
-*/
-#define CBIO_MASK -128 /* Base I/O Address Mask */
-#define CBIO_IOSI 0x00000001 /* I/O Space Indicator (RO, value is 1) */
-
-/*
-** PCI Configuration Card Information Structure Register (PCI_CCIS)
-*/
-#define CCIS_ROMI 0xf0000000 /* ROM Image */
-#define CCIS_ASO 0x0ffffff8 /* Address Space Offset */
-#define CCIS_ASI 0x00000007 /* Address Space Indicator */
-
-/*
-** PCI Configuration Subsystem ID Register (PCI_SSID)
-*/
-#define SSID_SSID 0xffff0000 /* Subsystem ID */
-#define SSID_SVID 0x0000ffff /* Subsystem Vendor ID */
-
-/*
-** PCI Configuration Expansion ROM Base Address Register (PCI_CBER)
-*/
-#define CBER_MASK 0xfffffc00 /* Expansion ROM Base Address Mask */
-#define CBER_ROME 0x00000001 /* ROM Enable */
-
-/*
-** PCI Configuration Interrupt Register (PCI_CFIT)
-*/
-#define CFIT_MXLT 0xff000000 /* MAX_LAT Value (0.25us periods) */
-#define CFIT_MNGT 0x00ff0000 /* MIN_GNT Value (0.25us periods) */
-#define CFIT_IRQP 0x0000ff00 /* Interrupt Pin */
-#define CFIT_IRQL 0x000000ff /* Interrupt Line */
-
-/*
-** PCI Configuration Power Management Area Register (PCI_CFPM)
-*/
-#define SLEEP 0x80 /* Power Saving Sleep Mode */
-#define SNOOZE 0x40 /* Power Saving Snooze Mode */
-#define WAKEUP 0x00 /* Power Saving Wakeup */
-
-#define PCI_CFDA_DSU 0x41 /* 8 bit Configuration Space Address */
-#define PCI_CFDA_PSM 0x43 /* 8 bit Configuration Space Address */
-
-/*
-** DC21040 Bus Mode Register (DE4X5_BMR)
-*/
-#define BMR_RML 0x00200000 /* [Memory] Read Multiple */
-#define BMR_DBO 0x00100000 /* Descriptor Byte Ordering (Endian) */
-#define BMR_TAP 0x000e0000 /* Transmit Automatic Polling */
-#define BMR_DAS 0x00010000 /* Diagnostic Address Space */
-#define BMR_CAL 0x0000c000 /* Cache Alignment */
-#define BMR_PBL 0x00003f00 /* Programmable Burst Length */
-#define BMR_BLE 0x00000080 /* Big/Little Endian */
-#define BMR_DSL 0x0000007c /* Descriptor Skip Length */
-#define BMR_BAR 0x00000002 /* Bus ARbitration */
-#define BMR_SWR 0x00000001 /* Software Reset */
-
- /* Timings here are for 10BASE-T/AUI only*/
-#define TAP_NOPOLL 0x00000000 /* No automatic polling */
-#define TAP_200US 0x00020000 /* TX automatic polling every 200us */
-#define TAP_800US 0x00040000 /* TX automatic polling every 800us */
-#define TAP_1_6MS 0x00060000 /* TX automatic polling every 1.6ms */
-#define TAP_12_8US 0x00080000 /* TX automatic polling every 12.8us */
-#define TAP_25_6US 0x000a0000 /* TX automatic polling every 25.6us */
-#define TAP_51_2US 0x000c0000 /* TX automatic polling every 51.2us */
-#define TAP_102_4US 0x000e0000 /* TX automatic polling every 102.4us */
-
-#define CAL_NOUSE 0x00000000 /* Not used */
-#define CAL_8LONG 0x00004000 /* 8-longword alignment */
-#define CAL_16LONG 0x00008000 /* 16-longword alignment */
-#define CAL_32LONG 0x0000c000 /* 32-longword alignment */
-
-#define PBL_0 0x00000000 /* DMA burst length = amount in RX FIFO */
-#define PBL_1 0x00000100 /* 1 longword DMA burst length */
-#define PBL_2 0x00000200 /* 2 longwords DMA burst length */
-#define PBL_4 0x00000400 /* 4 longwords DMA burst length */
-#define PBL_8 0x00000800 /* 8 longwords DMA burst length */
-#define PBL_16 0x00001000 /* 16 longwords DMA burst length */
-#define PBL_32 0x00002000 /* 32 longwords DMA burst length */
-
-#define DSL_0 0x00000000 /* 0 longword / descriptor */
-#define DSL_1 0x00000004 /* 1 longword / descriptor */
-#define DSL_2 0x00000008 /* 2 longwords / descriptor */
-#define DSL_4 0x00000010 /* 4 longwords / descriptor */
-#define DSL_8 0x00000020 /* 8 longwords / descriptor */
-#define DSL_16 0x00000040 /* 16 longwords / descriptor */
-#define DSL_32 0x00000080 /* 32 longwords / descriptor */
-
-/*
-** DC21040 Transmit Poll Demand Register (DE4X5_TPD)
-*/
-#define TPD 0x00000001 /* Transmit Poll Demand */
-
-/*
-** DC21040 Receive Poll Demand Register (DE4X5_RPD)
-*/
-#define RPD 0x00000001 /* Receive Poll Demand */
-
-/*
-** DC21040 Receive Ring Base Address Register (DE4X5_RRBA)
-*/
-#define RRBA 0xfffffffc /* RX Descriptor List Start Address */
-
-/*
-** DC21040 Transmit Ring Base Address Register (DE4X5_TRBA)
-*/
-#define TRBA 0xfffffffc /* TX Descriptor List Start Address */
-
-/*
-** Status Register (DE4X5_STS)
-*/
-#define STS_GPI 0x04000000 /* General Purpose Port Interrupt */
-#define STS_BE 0x03800000 /* Bus Error Bits */
-#define STS_TS 0x00700000 /* Transmit Process State */
-#define STS_RS 0x000e0000 /* Receive Process State */
-#define STS_NIS 0x00010000 /* Normal Interrupt Summary */
-#define STS_AIS 0x00008000 /* Abnormal Interrupt Summary */
-#define STS_ER 0x00004000 /* Early Receive */
-#define STS_FBE 0x00002000 /* Fatal Bus Error */
-#define STS_SE 0x00002000 /* System Error */
-#define STS_LNF 0x00001000 /* Link Fail */
-#define STS_FD 0x00000800 /* Full-Duplex Short Frame Received */
-#define STS_TM 0x00000800 /* Timer Expired (DC21041) */
-#define STS_ETI 0x00000400 /* Early Transmit Interrupt */
-#define STS_AT 0x00000400 /* AUI/TP Pin */
-#define STS_RWT 0x00000200 /* Receive Watchdog Time-Out */
-#define STS_RPS 0x00000100 /* Receive Process Stopped */
-#define STS_RU 0x00000080 /* Receive Buffer Unavailable */
-#define STS_RI 0x00000040 /* Receive Interrupt */
-#define STS_UNF 0x00000020 /* Transmit Underflow */
-#define STS_LNP 0x00000010 /* Link Pass */
-#define STS_ANC 0x00000010 /* Autonegotiation Complete */
-#define STS_TJT 0x00000008 /* Transmit Jabber Time-Out */
-#define STS_TU 0x00000004 /* Transmit Buffer Unavailable */
-#define STS_TPS 0x00000002 /* Transmit Process Stopped */
-#define STS_TI 0x00000001 /* Transmit Interrupt */
-
-#define EB_PAR 0x00000000 /* Parity Error */
-#define EB_MA 0x00800000 /* Master Abort */
-#define EB_TA 0x01000000 /* Target Abort */
-#define EB_RES0 0x01800000 /* Reserved */
-#define EB_RES1 0x02000000 /* Reserved */
-
-#define TS_STOP 0x00000000 /* Stopped */
-#define TS_FTD 0x00100000 /* Fetch Transmit Descriptor */
-#define TS_WEOT 0x00200000 /* Wait for End Of Transmission */
-#define TS_QDAT 0x00300000 /* Queue skb data into TX FIFO */
-#define TS_RES 0x00400000 /* Reserved */
-#define TS_SPKT 0x00500000 /* Setup Packet */
-#define TS_SUSP 0x00600000 /* Suspended */
-#define TS_CLTD 0x00700000 /* Close Transmit Descriptor */
-
-#define RS_STOP 0x00000000 /* Stopped */
-#define RS_FRD 0x00020000 /* Fetch Receive Descriptor */
-#define RS_CEOR 0x00040000 /* Check for End of Receive Packet */
-#define RS_WFRP 0x00060000 /* Wait for Receive Packet */
-#define RS_SUSP 0x00080000 /* Suspended */
-#define RS_CLRD 0x000a0000 /* Close Receive Descriptor */
-#define RS_FLUSH 0x000c0000 /* Flush RX FIFO */
-#define RS_QRFS 0x000e0000 /* Queue RX FIFO into RX Skb */
-
-#define INT_CANCEL 0x0001ffff /* For zeroing all interrupt sources */
-
-/*
-** Operation Mode Register (DE4X5_OMR)
-*/
-#define OMR_SC 0x80000000 /* Special Capture Effect Enable */
-#define OMR_RA 0x40000000 /* Receive All */
-#define OMR_SDP 0x02000000 /* SD Polarity - MUST BE ASSERTED */
-#define OMR_SCR 0x01000000 /* Scrambler Mode */
-#define OMR_PCS 0x00800000 /* PCS Function */
-#define OMR_TTM 0x00400000 /* Transmit Threshold Mode */
-#define OMR_SF 0x00200000 /* Store and Forward */
-#define OMR_HBD 0x00080000 /* HeartBeat Disable */
-#define OMR_PS 0x00040000 /* Port Select */
-#define OMR_CA 0x00020000 /* Capture Effect Enable */
-#define OMR_BP 0x00010000 /* Back Pressure */
-#define OMR_TR 0x0000c000 /* Threshold Control Bits */
-#define OMR_ST 0x00002000 /* Start/Stop Transmission Command */
-#define OMR_FC 0x00001000 /* Force Collision Mode */
-#define OMR_OM 0x00000c00 /* Operating Mode */
-#define OMR_FDX 0x00000200 /* Full Duplex Mode */
-#define OMR_FKD 0x00000100 /* Flaky Oscillator Disable */
-#define OMR_PM 0x00000080 /* Pass All Multicast */
-#define OMR_PR 0x00000040 /* Promiscuous Mode */
-#define OMR_SB 0x00000020 /* Start/Stop Backoff Counter */
-#define OMR_IF 0x00000010 /* Inverse Filtering */
-#define OMR_PB 0x00000008 /* Pass Bad Frames */
-#define OMR_HO 0x00000004 /* Hash Only Filtering Mode */
-#define OMR_SR 0x00000002 /* Start/Stop Receive */
-#define OMR_HP 0x00000001 /* Hash/Perfect Receive Filtering Mode */
-
-#define TR_72 0x00000000 /* Threshold set to 72 (128) bytes */
-#define TR_96 0x00004000 /* Threshold set to 96 (256) bytes */
-#define TR_128 0x00008000 /* Threshold set to 128 (512) bytes */
-#define TR_160 0x0000c000 /* Threshold set to 160 (1024) bytes */
-
-#define OMR_DEF (OMR_SDP)
-#define OMR_SIA (OMR_SDP | OMR_TTM)
-#define OMR_SYM (OMR_SDP | OMR_SCR | OMR_PCS | OMR_HBD | OMR_PS)
-#define OMR_MII_10 (OMR_SDP | OMR_TTM | OMR_PS)
-#define OMR_MII_100 (OMR_SDP | OMR_HBD | OMR_PS)
-
-/*
-** DC21040 Interrupt Mask Register (DE4X5_IMR)
-*/
-#define IMR_GPM 0x04000000 /* General Purpose Port Mask */
-#define IMR_NIM 0x00010000 /* Normal Interrupt Summary Mask */
-#define IMR_AIM 0x00008000 /* Abnormal Interrupt Summary Mask */
-#define IMR_ERM 0x00004000 /* Early Receive Mask */
-#define IMR_FBM 0x00002000 /* Fatal Bus Error Mask */
-#define IMR_SEM 0x00002000 /* System Error Mask */
-#define IMR_LFM 0x00001000 /* Link Fail Mask */
-#define IMR_FDM 0x00000800 /* Full-Duplex (Short Frame) Mask */
-#define IMR_TMM 0x00000800 /* Timer Expired Mask (DC21041) */
-#define IMR_ETM 0x00000400 /* Early Transmit Interrupt Mask */
-#define IMR_ATM 0x00000400 /* AUI/TP Switch Mask */
-#define IMR_RWM 0x00000200 /* Receive Watchdog Time-Out Mask */
-#define IMR_RSM 0x00000100 /* Receive Stopped Mask */
-#define IMR_RUM 0x00000080 /* Receive Buffer Unavailable Mask */
-#define IMR_RIM 0x00000040 /* Receive Interrupt Mask */
-#define IMR_UNM 0x00000020 /* Underflow Interrupt Mask */
-#define IMR_ANM 0x00000010 /* Autonegotiation Complete Mask */
-#define IMR_LPM 0x00000010 /* Link Pass */
-#define IMR_TJM 0x00000008 /* Transmit Time-Out Jabber Mask */
-#define IMR_TUM 0x00000004 /* Transmit Buffer Unavailable Mask */
-#define IMR_TSM 0x00000002 /* Transmission Stopped Mask */
-#define IMR_TIM 0x00000001 /* Transmit Interrupt Mask */
-
-/*
-** Missed Frames and FIFO Overflow Counters (DE4X5_MFC)
-*/
-#define MFC_FOCO 0x10000000 /* FIFO Overflow Counter Overflow Bit */
-#define MFC_FOC 0x0ffe0000 /* FIFO Overflow Counter Bits */
-#define MFC_OVFL 0x00010000 /* Missed Frames Counter Overflow Bit */
-#define MFC_CNTR 0x0000ffff /* Missed Frames Counter Bits */
-#define MFC_FOCM 0x1ffe0000 /* FIFO Overflow Counter Mask */
-
-/*
-** DC21040 Ethernet Address PROM (DE4X5_APROM)
-*/
-#define APROM_DN 0x80000000 /* Data Not Valid */
-#define APROM_DT 0x000000ff /* Address Byte */
-
-/*
-** DC21041 Boot/Ethernet Address ROM (DE4X5_BROM)
-*/
-#define BROM_MODE 0x00008000 /* MODE_1: 0, MODE_0: 1 (read only) */
-#define BROM_RD 0x00004000 /* Read from Boot ROM */
-#define BROM_WR 0x00002000 /* Write to Boot ROM */
-#define BROM_BR 0x00001000 /* Select Boot ROM when set */
-#define BROM_SR 0x00000800 /* Select Serial ROM when set */
-#define BROM_REG 0x00000400 /* External Register Select */
-#define BROM_DT 0x000000ff /* Data Byte */
-
-/*
-** DC21041 Serial/Ethernet Address ROM (DE4X5_SROM, DE4X5_MII)
-*/
-#define MII_MDI 0x00080000 /* MII Management Data In */
-#define MII_MDO 0x00060000 /* MII Management Mode/Data Out */
-#define MII_MRD 0x00040000 /* MII Management Define Read Mode */
-#define MII_MWR 0x00000000 /* MII Management Define Write Mode */
-#define MII_MDT 0x00020000 /* MII Management Data Out */
-#define MII_MDC 0x00010000 /* MII Management Clock */
-#define MII_RD 0x00004000 /* Read from MII */
-#define MII_WR 0x00002000 /* Write to MII */
-#define MII_SEL 0x00000800 /* Select MII when RESET */
-
-#define SROM_MODE 0x00008000 /* MODE_1: 0, MODE_0: 1 (read only) */
-#define SROM_RD 0x00004000 /* Read from Boot ROM */
-#define SROM_WR 0x00002000 /* Write to Boot ROM */
-#define SROM_BR 0x00001000 /* Select Boot ROM when set */
-#define SROM_SR 0x00000800 /* Select Serial ROM when set */
-#define SROM_REG 0x00000400 /* External Register Select */
-#define SROM_DT 0x000000ff /* Data Byte */
-
-#define DT_OUT 0x00000008 /* Serial Data Out */
-#define DT_IN 0x00000004 /* Serial Data In */
-#define DT_CLK 0x00000002 /* Serial ROM Clock */
-#define DT_CS 0x00000001 /* Serial ROM Chip Select */
-
-#define MII_PREAMBLE 0xffffffff /* MII Management Preamble */
-#define MII_TEST 0xaaaaaaaa /* MII Test Signal */
-#define MII_STRD 0x06 /* Start of Frame+Op Code: use low nibble */
-#define MII_STWR 0x0a /* Start of Frame+Op Code: use low nibble */
-
-#define MII_CR 0x00 /* MII Management Control Register */
-#define MII_SR 0x01 /* MII Management Status Register */
-#define MII_ID0 0x02 /* PHY Identifier Register 0 */
-#define MII_ID1 0x03 /* PHY Identifier Register 1 */
-#define MII_ANA 0x04 /* Auto Negotiation Advertisement */
-#define MII_ANLPA 0x05 /* Auto Negotiation Link Partner Ability */
-#define MII_ANE 0x06 /* Auto Negotiation Expansion */
-#define MII_ANP 0x07 /* Auto Negotiation Next Page TX */
-
-#define DE4X5_MAX_MII 32 /* Maximum address of MII PHY devices */
-
-/*
-** MII Management Control Register
-*/
-#define MII_CR_RST 0x8000 /* RESET the PHY chip */
-#define MII_CR_LPBK 0x4000 /* Loopback enable */
-#define MII_CR_SPD 0x2000 /* 0: 10Mb/s; 1: 100Mb/s */
-#define MII_CR_10 0x0000 /* Set 10Mb/s */
-#define MII_CR_100 0x2000 /* Set 100Mb/s */
-#define MII_CR_ASSE 0x1000 /* Auto Speed Select Enable */
-#define MII_CR_PD 0x0800 /* Power Down */
-#define MII_CR_ISOL 0x0400 /* Isolate Mode */
-#define MII_CR_RAN 0x0200 /* Restart Auto Negotiation */
-#define MII_CR_FDM 0x0100 /* Full Duplex Mode */
-#define MII_CR_CTE 0x0080 /* Collision Test Enable */
-
-/*
-** MII Management Status Register
-*/
-#define MII_SR_T4C 0x8000 /* 100BASE-T4 capable */
-#define MII_SR_TXFD 0x4000 /* 100BASE-TX Full Duplex capable */
-#define MII_SR_TXHD 0x2000 /* 100BASE-TX Half Duplex capable */
-#define MII_SR_TFD 0x1000 /* 10BASE-T Full Duplex capable */
-#define MII_SR_THD 0x0800 /* 10BASE-T Half Duplex capable */
-#define MII_SR_ASSC 0x0020 /* Auto Speed Selection Complete*/
-#define MII_SR_RFD 0x0010 /* Remote Fault Detected */
-#define MII_SR_ANC 0x0008 /* Auto Negotiation capable */
-#define MII_SR_LKS 0x0004 /* Link Status */
-#define MII_SR_JABD 0x0002 /* Jabber Detect */
-#define MII_SR_XC 0x0001 /* Extended Capabilities */
-
-/*
-** MII Management Auto Negotiation Advertisement Register
-*/
-#define MII_ANA_TAF 0x03e0 /* Technology Ability Field */
-#define MII_ANA_T4AM 0x0200 /* T4 Technology Ability Mask */
-#define MII_ANA_TXAM 0x0180 /* TX Technology Ability Mask */
-#define MII_ANA_FDAM 0x0140 /* Full Duplex Technology Ability Mask */
-#define MII_ANA_HDAM 0x02a0 /* Half Duplex Technology Ability Mask */
-#define MII_ANA_100M 0x0380 /* 100Mb Technology Ability Mask */
-#define MII_ANA_10M 0x0060 /* 10Mb Technology Ability Mask */
-#define MII_ANA_CSMA 0x0001 /* CSMA-CD Capable */
-
-/*
-** MII Management Auto Negotiation Remote End Register
-*/
-#define MII_ANLPA_NP 0x8000 /* Next Page (Enable) */
-#define MII_ANLPA_ACK 0x4000 /* Remote Acknowledge */
-#define MII_ANLPA_RF 0x2000 /* Remote Fault */
-#define MII_ANLPA_TAF 0x03e0 /* Technology Ability Field */
-#define MII_ANLPA_T4AM 0x0200 /* T4 Technology Ability Mask */
-#define MII_ANLPA_TXAM 0x0180 /* TX Technology Ability Mask */
-#define MII_ANLPA_FDAM 0x0140 /* Full Duplex Technology Ability Mask */
-#define MII_ANLPA_HDAM 0x02a0 /* Half Duplex Technology Ability Mask */
-#define MII_ANLPA_100M 0x0380 /* 100Mb Technology Ability Mask */
-#define MII_ANLPA_10M 0x0060 /* 10Mb Technology Ability Mask */
-#define MII_ANLPA_CSMA 0x0001 /* CSMA-CD Capable */
-
-/*
-** SROM Media Definitions (ABG SROM Section)
-*/
-#define MEDIA_NWAY 0x0080 /* Nway (Auto Negotiation) on PHY */
-#define MEDIA_MII 0x0040 /* MII Present on the adapter */
-#define MEDIA_FIBRE 0x0008 /* Fibre Media present */
-#define MEDIA_AUI 0x0004 /* AUI Media present */
-#define MEDIA_TP 0x0002 /* TP Media present */
-#define MEDIA_BNC 0x0001 /* BNC Media present */
-
-/*
-** SROM Definitions (Digital Semiconductor Format)
-*/
-#define SROM_SSVID 0x0000 /* Sub-system Vendor ID offset */
-#define SROM_SSID 0x0002 /* Sub-system ID offset */
-#define SROM_CISPL 0x0004 /* CardBus CIS Pointer low offset */
-#define SROM_CISPH 0x0006 /* CardBus CIS Pointer high offset */
-#define SROM_IDCRC 0x0010 /* ID Block CRC offset*/
-#define SROM_RSVD2 0x0011 /* ID Reserved 2 offset */
-#define SROM_SFV 0x0012 /* SROM Format Version offset */
-#define SROM_CCNT 0x0013 /* Controller Count offset */
-#define SROM_HWADD 0x0014 /* Hardware Address offset */
-#define SROM_MRSVD 0x007c /* Manufacturer Reserved offset*/
-#define SROM_CRC 0x007e /* SROM CRC offset */
-
-/*
-** SROM Media Connection Definitions
-*/
-#define SROM_10BT 0x0000 /* 10BASE-T half duplex */
-#define SROM_10BTN 0x0100 /* 10BASE-T with Nway */
-#define SROM_10BTF 0x0204 /* 10BASE-T full duplex */
-#define SROM_10BTNLP 0x0400 /* 10BASE-T without Link Pass test */
-#define SROM_10B2 0x0001 /* 10BASE-2 (BNC) */
-#define SROM_10B5 0x0002 /* 10BASE-5 (AUI) */
-#define SROM_100BTH 0x0003 /* 100BASE-T half duplex */
-#define SROM_100BTF 0x0205 /* 100BASE-T full duplex */
-#define SROM_100BT4 0x0006 /* 100BASE-T4 */
-#define SROM_100BFX 0x0007 /* 100BASE-FX half duplex (Fiber) */
-#define SROM_M10BT 0x0009 /* MII 10BASE-T half duplex */
-#define SROM_M10BTF 0x020a /* MII 10BASE-T full duplex */
-#define SROM_M100BT 0x000d /* MII 100BASE-T half duplex */
-#define SROM_M100BTF 0x020e /* MII 100BASE-T full duplex */
-#define SROM_M100BT4 0x000f /* MII 100BASE-T4 */
-#define SROM_M100BF 0x0010 /* MII 100BASE-FX half duplex */
-#define SROM_M100BFF 0x0211 /* MII 100BASE-FX full duplex */
-#define SROM_PDA 0x0800 /* Powerup & Dynamic Autosense */
-#define SROM_PAO 0x8800 /* Powerup Autosense Only */
-#define SROM_NSMI 0xffff /* No Selected Media Information */
-
-/*
-** SROM Media Definitions
-*/
-#define SROM_10BASET 0x0000 /* 10BASE-T half duplex */
-#define SROM_10BASE2 0x0001 /* 10BASE-2 (BNC) */
-#define SROM_10BASE5 0x0002 /* 10BASE-5 (AUI) */
-#define SROM_100BASET 0x0003 /* 100BASE-T half duplex */
-#define SROM_10BASETF 0x0004 /* 10BASE-T full duplex */
-#define SROM_100BASETF 0x0005 /* 100BASE-T full duplex */
-#define SROM_100BASET4 0x0006 /* 100BASE-T4 */
-#define SROM_100BASEF 0x0007 /* 100BASE-FX half duplex */
-#define SROM_100BASEFF 0x0008 /* 100BASE-FX full duplex */
-
-#define BLOCK_LEN 0x7f /* Extended blocks length mask */
-#define EXT_FIELD 0x40 /* Extended blocks extension field bit */
-#define MEDIA_CODE 0x3f /* Extended blocks media code mask */
-
-/*
-** SROM Compact Format Block Masks
-*/
-#define COMPACT_FI 0x80 /* Format Indicator */
-#define COMPACT_LEN 0x04 /* Length */
-#define COMPACT_MC 0x3f /* Media Code */
-
-/*
-** SROM Extended Format Block Type 0 Masks
-*/
-#define BLOCK0_FI 0x80 /* Format Indicator */
-#define BLOCK0_MCS 0x80 /* Media Code byte Sign */
-#define BLOCK0_MC 0x3f /* Media Code */
-
-/*
-** DC21040 Full Duplex Register (DE4X5_FDR)
-*/
-#define FDR_FDACV 0x0000ffff /* Full Duplex Auto Configuration Value */
-
-/*
-** DC21041 General Purpose Timer Register (DE4X5_GPT)
-*/
-#define GPT_CON 0x00010000 /* One shot: 0, Continuous: 1 */
-#define GPT_VAL 0x0000ffff /* Timer Value */
-
-/*
-** DC21140 General Purpose Register (DE4X5_GEP) (hardware dependent bits)
-*/
-/* Valid ONLY for DE500 hardware */
-#define GEP_LNP 0x00000080 /* Link Pass (input) */
-#define GEP_SLNK 0x00000040 /* SYM LINK (input) */
-#define GEP_SDET 0x00000020 /* Signal Detect (input) */
-#define GEP_HRST 0x00000010 /* Hard RESET (to PHY) (output) */
-#define GEP_FDXD 0x00000008 /* Full Duplex Disable (output) */
-#define GEP_PHYL 0x00000004 /* PHY Loopback (output) */
-#define GEP_FLED 0x00000002 /* Force Activity LED on (output) */
-#define GEP_MODE 0x00000001 /* 0: 10Mb/s, 1: 100Mb/s */
-#define GEP_INIT 0x0000011f /* Setup inputs (0) and outputs (1) */
-#define GEP_CTRL 0x00000100 /* GEP control bit */
-
-/*
-** SIA Register Defaults
-*/
-#define CSR13 0x00000001
-#define CSR14 0x0003ff7f /* Autonegotiation disabled */
-#define CSR15 0x00000008
-
-/*
-** SIA Status Register (DE4X5_SISR)
-*/
-#define SISR_LPC 0xffff0000 /* Link Partner's Code Word */
-#define SISR_LPN 0x00008000 /* Link Partner Negotiable */
-#define SISR_ANS 0x00007000 /* Auto Negotiation Arbitration State */
-#define SISR_NSN 0x00000800 /* Non Stable NLPs Detected (DC21041) */
-#define SISR_TRF 0x00000800 /* Transmit Remote Fault */
-#define SISR_NSND 0x00000400 /* Non Stable NLPs Detected (DC21142) */
-#define SISR_ANR_FDS 0x00000400 /* Auto Negotiate Restart/Full Duplex Sel.*/
-#define SISR_TRA 0x00000200 /* 10BASE-T Receive Port Activity */
-#define SISR_NRA 0x00000200 /* Non Selected Port Receive Activity */
-#define SISR_ARA 0x00000100 /* AUI Receive Port Activity */
-#define SISR_SRA 0x00000100 /* Selected Port Receive Activity */
-#define SISR_DAO 0x00000080 /* PLL All One */
-#define SISR_DAZ 0x00000040 /* PLL All Zero */
-#define SISR_DSP 0x00000020 /* PLL Self-Test Pass */
-#define SISR_DSD 0x00000010 /* PLL Self-Test Done */
-#define SISR_APS 0x00000008 /* Auto Polarity State */
-#define SISR_LKF 0x00000004 /* Link Fail Status */
-#define SISR_LS10 0x00000004 /* 10Mb/s Link Fail Status */
-#define SISR_NCR 0x00000002 /* Network Connection Error */
-#define SISR_LS100 0x00000002 /* 100Mb/s Link Fail Status */
-#define SISR_PAUI 0x00000001 /* AUI_TP Indication */
-#define SISR_MRA 0x00000001 /* MII Receive Port Activity */
-
-#define ANS_NDIS 0x00000000 /* Nway disable */
-#define ANS_TDIS 0x00001000 /* Transmit Disable */
-#define ANS_ADET 0x00002000 /* Ability Detect */
-#define ANS_ACK 0x00003000 /* Acknowledge */
-#define ANS_CACK 0x00004000 /* Complete Acknowledge */
-#define ANS_NWOK 0x00005000 /* Nway OK - FLP Link Good */
-#define ANS_LCHK 0x00006000 /* Link Check */
-
-#define SISR_RST 0x00000301 /* CSR12 reset */
-#define SISR_ANR 0x00001301 /* Autonegotiation restart */
-
-/*
-** SIA Connectivity Register (DE4X5_SICR)
-*/
-#define SICR_SDM 0xffff0000 /* SIA Diagnostics Mode */
-#define SICR_OE57 0x00008000 /* Output Enable 5 6 7 */
-#define SICR_OE24 0x00004000 /* Output Enable 2 4 */
-#define SICR_OE13 0x00002000 /* Output Enable 1 3 */
-#define SICR_IE 0x00001000 /* Input Enable */
-#define SICR_EXT 0x00000000 /* SIA MUX Select External SIA Mode */
-#define SICR_D_SIA 0x00000400 /* SIA MUX Select Diagnostics - SIA Sigs */
-#define SICR_DPLL 0x00000800 /* SIA MUX Select Diagnostics - DPLL Sigs*/
-#define SICR_APLL 0x00000a00 /* SIA MUX Select Diagnostics - DPLL Sigs*/
-#define SICR_D_RxM 0x00000c00 /* SIA MUX Select Diagnostics - RxM Sigs */
-#define SICR_M_RxM 0x00000d00 /* SIA MUX Select Diagnostics - RxM Sigs */
-#define SICR_LNKT 0x00000e00 /* SIA MUX Select Diagnostics - Link Test*/
-#define SICR_SEL 0x00000f00 /* SIA MUX Select AUI or TP with LEDs */
-#define SICR_ASE 0x00000080 /* APLL Start Enable*/
-#define SICR_SIM 0x00000040 /* Serial Interface Input Multiplexer */
-#define SICR_ENI 0x00000020 /* Encoder Input Multiplexer */
-#define SICR_EDP 0x00000010 /* SIA PLL External Input Enable */
-#define SICR_AUI 0x00000008 /* 10Base-T (0) or AUI (1) */
-#define SICR_CAC 0x00000004 /* CSR Auto Configuration */
-#define SICR_PS 0x00000002 /* Pin AUI/TP Selection */
-#define SICR_SRL 0x00000001 /* SIA Reset */
-#define SIA_RESET 0x00000000 /* SIA Reset Value */
-
-/*
-** SIA Transmit and Receive Register (DE4X5_STRR)
-*/
-#define STRR_TAS 0x00008000 /* 10Base-T/AUI Autosensing Enable */
-#define STRR_SPP 0x00004000 /* Set Polarity Plus */
-#define STRR_APE 0x00002000 /* Auto Polarity Enable */
-#define STRR_LTE 0x00001000 /* Link Test Enable */
-#define STRR_SQE 0x00000800 /* Signal Quality Enable */
-#define STRR_CLD 0x00000400 /* Collision Detect Enable */
-#define STRR_CSQ 0x00000200 /* Collision Squelch Enable */
-#define STRR_RSQ 0x00000100 /* Receive Squelch Enable */
-#define STRR_ANE 0x00000080 /* Auto Negotiate Enable */
-#define STRR_HDE 0x00000040 /* Half Duplex Enable */
-#define STRR_CPEN 0x00000030 /* Compensation Enable */
-#define STRR_LSE 0x00000008 /* Link Pulse Send Enable */
-#define STRR_DREN 0x00000004 /* Driver Enable */
-#define STRR_LBK 0x00000002 /* Loopback Enable */
-#define STRR_ECEN 0x00000001 /* Encoder Enable */
-#define STRR_RESET 0xffffffff /* Reset value for STRR */
-
-/*
-** SIA General Register (DE4X5_SIGR)
-*/
-#define SIGR_RMI 0x40000000 /* Receive Match Interrupt */
-#define SIGR_GI1 0x20000000 /* General Port Interrupt 1 */
-#define SIGR_GI0 0x10000000 /* General Port Interrupt 0 */
-#define SIGR_CWE 0x08000000 /* Control Write Enable */
-#define SIGR_RME 0x04000000 /* Receive Match Enable */
-#define SIGR_GEI1 0x02000000 /* GEP Interrupt Enable on Port 1 */
-#define SIGR_GEI0 0x01000000 /* GEP Interrupt Enable on Port 0 */
-#define SIGR_LGS3 0x00800000 /* LED/GEP3 Select */
-#define SIGR_LGS2 0x00400000 /* LED/GEP2 Select */
-#define SIGR_LGS1 0x00200000 /* LED/GEP1 Select */
-#define SIGR_LGS0 0x00100000 /* LED/GEP0 Select */
-#define SIGR_MD 0x000f0000 /* General Purpose Mode and Data */
-#define SIGR_LV2 0x00008000 /* General Purpose LED2 value */
-#define SIGR_LE2 0x00004000 /* General Purpose LED2 enable */
-#define SIGR_FRL 0x00002000 /* Force Receiver Low */
-#define SIGR_DPST 0x00001000 /* PLL Self Test Start */
-#define SIGR_LSD 0x00000800 /* LED Stretch Disable */
-#define SIGR_FLF 0x00000400 /* Force Link Fail */
-#define SIGR_FUSQ 0x00000200 /* Force Unsquelch */
-#define SIGR_TSCK 0x00000100 /* Test Clock */
-#define SIGR_LV1 0x00000080 /* General Purpose LED1 value */
-#define SIGR_LE1 0x00000040 /* General Purpose LED1 enable */
-#define SIGR_RWR 0x00000020 /* Receive Watchdog Release */
-#define SIGR_RWD 0x00000010 /* Receive Watchdog Disable */
-#define SIGR_ABM 0x00000008 /* BNC: 0, AUI:1 */
-#define SIGR_JCK 0x00000004 /* Jabber Clock */
-#define SIGR_HUJ 0x00000002 /* Host Unjab */
-#define SIGR_JBD 0x00000001 /* Jabber Disable */
-#define SIGR_RESET 0xffff0000 /* Reset value for SIGR */
-
-/*
-** Receive Descriptor Bit Summary
-*/
-#define R_OWN 0x80000000 /* Own Bit */
-#define RD_FF 0x40000000 /* Filtering Fail */
-#define RD_FL 0x3fff0000 /* Frame Length */
-#define RD_ES 0x00008000 /* Error Summary */
-#define RD_LE 0x00004000 /* Length Error */
-#define RD_DT 0x00003000 /* Data Type */
-#define RD_RF 0x00000800 /* Runt Frame */
-#define RD_MF 0x00000400 /* Multicast Frame */
-#define RD_FS 0x00000200 /* First Descriptor */
-#define RD_LS 0x00000100 /* Last Descriptor */
-#define RD_TL 0x00000080 /* Frame Too Long */
-#define RD_CS 0x00000040 /* Collision Seen */
-#define RD_FT 0x00000020 /* Frame Type */
-#define RD_RJ 0x00000010 /* Receive Watchdog */
-#define RD_RE 0x00000008 /* Report on MII Error */
-#define RD_DB 0x00000004 /* Dribbling Bit */
-#define RD_CE 0x00000002 /* CRC Error */
-#define RD_OF 0x00000001 /* Overflow */
-
-#define RD_RER 0x02000000 /* Receive End Of Ring */
-#define RD_RCH 0x01000000 /* Second Address Chained */
-#define RD_RBS2 0x003ff800 /* Buffer 2 Size */
-#define RD_RBS1 0x000007ff /* Buffer 1 Size */
-
-/*
-** Transmit Descriptor Bit Summary
-*/
-#define T_OWN 0x80000000 /* Own Bit */
-#define TD_ES 0x00008000 /* Error Summary */
-#define TD_TO 0x00004000 /* Transmit Jabber Time-Out */
-#define TD_LO 0x00000800 /* Loss Of Carrier */
-#define TD_NC 0x00000400 /* No Carrier */
-#define TD_LC 0x00000200 /* Late Collision */
-#define TD_EC 0x00000100 /* Excessive Collisions */
-#define TD_HF 0x00000080 /* Heartbeat Fail */
-#define TD_CC 0x00000078 /* Collision Counter */
-#define TD_LF 0x00000004 /* Link Fail */
-#define TD_UF 0x00000002 /* Underflow Error */
-#define TD_DE 0x00000001 /* Deferred */
-
-#define TD_IC 0x80000000 /* Interrupt On Completion */
-#define TD_LS 0x40000000 /* Last Segment */
-#define TD_FS 0x20000000 /* First Segment */
-#define TD_FT1 0x10000000 /* Filtering Type */
-#define TD_SET 0x08000000 /* Setup Packet */
-#define TD_AC 0x04000000 /* Add CRC Disable */
-#define TD_TER 0x02000000 /* Transmit End Of Ring */
-#define TD_TCH 0x01000000 /* Second Address Chained */
-#define TD_DPD 0x00800000 /* Disabled Padding */
-#define TD_FT0 0x00400000 /* Filtering Type */
-#define TD_TBS2 0x003ff800 /* Buffer 2 Size */
-#define TD_TBS1 0x000007ff /* Buffer 1 Size */
-
-#define PERFECT_F 0x00000000
-#define HASH_F TD_FT0
-#define INVERSE_F TD_FT1
-#define HASH_O_F (TD_FT1 | TD_F0)
-
-/*
-** Media / mode state machine definitions
-** User selectable:
-*/
-#define TP 0x0040 /* 10Base-T (now equiv to _10Mb) */
-#define TP_NW 0x0002 /* 10Base-T with Nway */
-#define BNC 0x0004 /* Thinwire */
-#define AUI 0x0008 /* Thickwire */
-#define BNC_AUI 0x0010 /* BNC/AUI on DC21040 indistinguishable */
-#define _10Mb 0x0040 /* 10Mb/s Ethernet */
-#define _100Mb 0x0080 /* 100Mb/s Ethernet */
-#define AUTO 0x4000 /* Auto sense the media or speed */
-
-/*
-** Internal states
-*/
-#define NC 0x0000 /* No Connection */
-#define ANS 0x0020 /* Intermediate AutoNegotiation State */
-#define SPD_DET 0x0100 /* Parallel speed detection */
-#define INIT 0x0200 /* Initial state */
-#define EXT_SIA 0x0400 /* External SIA for motherboard chip */
-#define ANS_SUSPECT 0x0802 /* Suspect the ANS (TP) port is down */
-#define TP_SUSPECT 0x0803 /* Suspect the TP port is down */
-#define BNC_AUI_SUSPECT 0x0804 /* Suspect the BNC or AUI port is down */
-#define EXT_SIA_SUSPECT 0x0805 /* Suspect the EXT SIA port is down */
-#define BNC_SUSPECT 0x0806 /* Suspect the BNC port is down */
-#define AUI_SUSPECT 0x0807 /* Suspect the AUI port is down */
-#define MII 0x1000 /* MII on the 21143 */
-
-#define TIMER_CB 0x80000000 /* Timer callback detection */
-
-/*
-** DE4X5 DEBUG Options
-*/
-#define DEBUG_NONE 0x0000 /* No DEBUG messages */
-#define DEBUG_VERSION 0x0001 /* Print version message */
-#define DEBUG_MEDIA 0x0002 /* Print media messages */
-#define DEBUG_TX 0x0004 /* Print TX (queue_pkt) messages */
-#define DEBUG_RX 0x0008 /* Print RX (de4x5_rx) messages */
-#define DEBUG_SROM 0x0010 /* Print SROM messages */
-#define DEBUG_MII 0x0020 /* Print MII messages */
-#define DEBUG_OPEN 0x0040 /* Print de4x5_open() messages */
-#define DEBUG_CLOSE 0x0080 /* Print de4x5_close() messages */
-#define DEBUG_PCICFG 0x0100
-#define DEBUG_ALL 0x01ff
-
-/*
-** Miscellaneous
-*/
-#define PCI 0
-#define EISA 1
-
-#define DE4X5_HASH_TABLE_LEN 512 /* Bits */
-#define DE4X5_HASH_BITS 0x01ff /* 9 LS bits */
-
-#define SETUP_FRAME_LEN 192 /* Bytes */
-#define IMPERF_PA_OFFSET 156 /* Bytes */
-
-#define POLL_DEMAND 1
-
-#define LOST_MEDIA_THRESHOLD 3
-
-#define MASK_INTERRUPTS 1
-#define UNMASK_INTERRUPTS 0
-
-#define DE4X5_STRLEN 8
-
-#define DE4X5_INIT 0 /* Initialisation time */
-#define DE4X5_RUN 1 /* Run time */
-
-#define DE4X5_SAVE_STATE 0
-#define DE4X5_RESTORE_STATE 1
-
-/*
-** Address Filtering Modes
-*/
-#define PERFECT 0 /* 16 perfect physical addresses */
-#define HASH_PERF 1 /* 1 perfect, 512 multicast addresses */
-#define PERFECT_REJ 2 /* Reject 16 perfect physical addresses */
-#define ALL_HASH 3 /* Hashes all physical & multicast addrs */
-
-#define ALL 0 /* Clear out all the setup frame */
-#define PHYS_ADDR_ONLY 1 /* Update the physical address only */
-
-/*
-** Adapter state
-*/
-#define INITIALISED 0 /* After h/w initialised and mem alloc'd */
-#define CLOSED 1 /* Ready for opening */
-#define OPEN 2 /* Running */
-
-/*
-** Various wait times
-*/
-#define PDET_LINK_WAIT 1200 /* msecs to wait for link detect bits */
-#define ANS_FINISH_WAIT 1000 /* msecs to wait for link detect bits */
-
-/*
-** IEEE OUIs for various PHY vendor/chip combos - Reg 2 values only. Since
-** the vendors seem split 50-50 on how to calculate the OUI register values
-** anyway, just reading Reg2 seems reasonable for now [see de4x5_get_oui()].
-*/
-#define NATIONAL_TX 0x2000
-#define BROADCOM_T4 0x03e0
-#define SEEQ_T4 0x0016
-#define CYPRESS_T4 0x0014
-
-/*
-** Speed Selection stuff
-*/
-#define SET_10Mb {\
- if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\
- omr = inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX);\
- if ((lp->tmp != MII_SR_ASSC) || (lp->autosense != AUTO)) {\
- mii_wr(MII_CR_10|(lp->fdx?MII_CR_FDM:0), MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\
- }\
- omr |= ((lp->fdx ? OMR_FDX : 0) | OMR_TTM);\
- outl(omr, DE4X5_OMR);\
- if (!lp->useSROM) lp->cache.gep = 0;\
- } else if (lp->useSROM && !lp->useMII) {\
- omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
- omr |= (lp->fdx ? OMR_FDX : 0);\
- outl(omr | (lp->infoblock_csr6 & ~(OMR_SCR | OMR_HBD)), DE4X5_OMR);\
- } else {\
- omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
- omr |= (lp->fdx ? OMR_FDX : 0);\
- outl(omr | OMR_SDP | OMR_TTM, DE4X5_OMR);\
- lp->cache.gep = (lp->fdx ? 0 : GEP_FDXD);\
- gep_wr(lp->cache.gep, dev);\
- }\
-}
-
-#define SET_100Mb {\
- if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\
- int fdx=0;\
- if (lp->phy[lp->active].id == NATIONAL_TX) {\
- mii_wr(mii_rd(0x18, lp->phy[lp->active].addr, DE4X5_MII) & ~0x2000,\
- 0x18, lp->phy[lp->active].addr, DE4X5_MII);\
- }\
- omr = inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX);\
- sr = mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);\
- if (!(sr & MII_ANA_T4AM) && lp->fdx) fdx=1;\
- if ((lp->tmp != MII_SR_ASSC) || (lp->autosense != AUTO)) {\
- mii_wr(MII_CR_100|(fdx?MII_CR_FDM:0), MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\
- }\
- if (fdx) omr |= OMR_FDX;\
- outl(omr, DE4X5_OMR);\
- if (!lp->useSROM) lp->cache.gep = 0;\
- } else if (lp->useSROM && !lp->useMII) {\
- omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
- omr |= (lp->fdx ? OMR_FDX : 0);\
- outl(omr | lp->infoblock_csr6, DE4X5_OMR);\
- } else {\
- omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
- omr |= (lp->fdx ? OMR_FDX : 0);\
- outl(omr | OMR_SDP | OMR_PS | OMR_HBD | OMR_PCS | OMR_SCR, DE4X5_OMR);\
- lp->cache.gep = (lp->fdx ? 0 : GEP_FDXD) | GEP_MODE;\
- gep_wr(lp->cache.gep, dev);\
- }\
-}
-
-/* FIX ME so I don't jam 10Mb networks */
-#define SET_100Mb_PDET {\
- if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\
- mii_wr(MII_CR_100|MII_CR_ASSE, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\
- omr = (inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
- outl(omr, DE4X5_OMR);\
- } else if (lp->useSROM && !lp->useMII) {\
- omr = (inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
- outl(omr, DE4X5_OMR);\
- } else {\
- omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
- outl(omr | OMR_SDP | OMR_PS | OMR_HBD | OMR_PCS, DE4X5_OMR);\
- lp->cache.gep = (GEP_FDXD | GEP_MODE);\
- gep_wr(lp->cache.gep, dev);\
- }\
-}
-
-/*
-** Include the IOCTL stuff
-*/
-#include <linux/sockios.h>
-
-struct de4x5_ioctl {
- unsigned short cmd; /* Command to run */
- unsigned short len; /* Length of the data buffer */
- unsigned char __user *data; /* Pointer to the data buffer */
-};
-
-/*
-** Recognised commands for the driver
-*/
-#define DE4X5_GET_HWADDR 0x01 /* Get the hardware address */
-#define DE4X5_SET_HWADDR 0x02 /* Set the hardware address */
-/* 0x03 and 0x04 were used before and are obsoleted now. Don't use them. */
-#define DE4X5_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */
-#define DE4X5_GET_MCA 0x06 /* Get a multicast address */
-#define DE4X5_SET_MCA 0x07 /* Set a multicast address */
-#define DE4X5_CLR_MCA 0x08 /* Clear a multicast address */
-#define DE4X5_MCA_EN 0x09 /* Enable a multicast address group */
-#define DE4X5_GET_STATS 0x0a /* Get the driver statistics */
-#define DE4X5_CLR_STATS 0x0b /* Zero out the driver statistics */
-#define DE4X5_GET_OMR 0x0c /* Get the OMR Register contents */
-#define DE4X5_SET_OMR 0x0d /* Set the OMR Register contents */
-#define DE4X5_GET_REG 0x0e /* Get the DE4X5 Registers */
-
-#define MOTO_SROM_BUG (lp->active == 8 && (get_unaligned_le32(dev->dev_addr) & 0x00ffffff) == 0x3e0008)
diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
index ba0a69b363f8..d5657ff15e3c 100644
--- a/drivers/net/ethernet/dec/tulip/eeprom.c
+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
@@ -117,8 +117,8 @@ static void tulip_build_fake_mediatable(struct tulip_private *tp)
0x00, 0x06 /* ttm bit map */
};
- tp->mtable = kmalloc(sizeof(struct mediatable) +
- sizeof(struct medialeaf), GFP_KERNEL);
+ tp->mtable = devm_kmalloc(&tp->pdev->dev, sizeof(struct mediatable) +
+ sizeof(struct medialeaf), GFP_KERNEL);
if (tp->mtable == NULL)
return; /* Horrible, impossible failure. */
@@ -224,7 +224,8 @@ subsequent_board:
return;
}
- mtable = kmalloc(struct_size(mtable, mleaf, count), GFP_KERNEL);
+ mtable = devm_kmalloc(&tp->pdev->dev, struct_size(mtable, mleaf, count),
+ GFP_KERNEL);
if (mtable == NULL)
return; /* Horrible, impossible failure. */
last_mediatable = tp->mtable = mtable;
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 825e81f5fd22..b8e46c4849ef 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1389,7 +1389,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* And back to business
*/
- i = pci_enable_device(pdev);
+ i = pcim_enable_device(pdev);
if (i) {
pr_err("Cannot enable tulip board #%d, aborting\n", board_idx);
return i;
@@ -1398,11 +1398,9 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
irq = pdev->irq;
/* alloc_etherdev ensures aligned and zeroed private structures */
- dev = alloc_etherdev (sizeof (*tp));
- if (!dev) {
- pci_disable_device(pdev);
+ dev = devm_alloc_etherdev(&pdev->dev, sizeof(*tp));
+ if (!dev)
return -ENOMEM;
- }
SET_NETDEV_DEV(dev, &pdev->dev);
if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
@@ -1410,18 +1408,18 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_name(pdev),
(unsigned long long)pci_resource_len (pdev, 0),
(unsigned long long)pci_resource_start (pdev, 0));
- goto err_out_free_netdev;
+ return -ENODEV;
}
/* grab all resources from both PIO and MMIO regions, as we
* don't want anyone else messing around with our hardware */
- if (pci_request_regions (pdev, DRV_NAME))
- goto err_out_free_netdev;
+ if (pci_request_regions(pdev, DRV_NAME))
+ return -ENODEV;
- ioaddr = pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
+ ioaddr = pcim_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
if (!ioaddr)
- goto err_out_free_res;
+ return -ENODEV;
/*
* initialize private data structure 'tp'
@@ -1430,12 +1428,12 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp = netdev_priv(dev);
tp->dev = dev;
- tp->rx_ring = dma_alloc_coherent(&pdev->dev,
- sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
- sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
- &tp->rx_ring_dma, GFP_KERNEL);
+ tp->rx_ring = dmam_alloc_coherent(&pdev->dev,
+ sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
+ sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
+ &tp->rx_ring_dma, GFP_KERNEL);
if (!tp->rx_ring)
- goto err_out_mtable;
+ return -ENODEV;
tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE;
@@ -1695,8 +1693,9 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
#endif
dev->ethtool_ops = &ops;
- if (register_netdev(dev))
- goto err_out_free_ring;
+ i = register_netdev(dev);
+ if (i)
+ return i;
pci_set_drvdata(pdev, dev);
@@ -1771,24 +1770,6 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tulip_set_power_state (tp, 0, 1);
return 0;
-
-err_out_free_ring:
- dma_free_coherent(&pdev->dev,
- sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
- sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
- tp->rx_ring, tp->rx_ring_dma);
-
-err_out_mtable:
- kfree (tp->mtable);
- pci_iounmap(pdev, ioaddr);
-
-err_out_free_res:
- pci_release_regions (pdev);
-
-err_out_free_netdev:
- free_netdev (dev);
- pci_disable_device(pdev);
- return -ENODEV;
}
@@ -1888,24 +1869,11 @@ static int __maybe_unused tulip_resume(struct device *dev_d)
static void tulip_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata (pdev);
- struct tulip_private *tp;
if (!dev)
return;
- tp = netdev_priv(dev);
unregister_netdev(dev);
- dma_free_coherent(&pdev->dev,
- sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
- sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
- tp->rx_ring, tp->rx_ring_dma);
- kfree (tp->mtable);
- pci_iounmap(pdev, tp->base_addr);
- free_netdev (dev);
- pci_release_regions (pdev);
- pci_disable_device(pdev);
-
- /* pci_power_off (pdev, -1); */
}
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index caf48023f8ea..5231818943c6 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1928,6 +1928,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
/* AST2400 doesn't have working HW checksum generation */
if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac")))
netdev->hw_features &= ~NETIF_F_HW_CSUM;
+
+ /* AST2600 tx checksum with NCSI is broken */
+ if (priv->use_ncsi && of_device_is_compatible(np, "aspeed,ast2600-mac"))
+ netdev->hw_features &= ~NETIF_F_HW_CSUM;
+
if (np && of_get_property(np, "no-hw-checksum", NULL))
netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM);
netdev->features |= netdev->hw_features;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 6e52f3ad182f..9d7ef993ce45 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3076,7 +3076,7 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
rxq = fep->rx_queue[queue];
bdp = rxq->bd.base;
for (i = 0; i < rxq->bd.ring_size; i++) {
- skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
+ skb = __netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE, GFP_KERNEL);
if (!skb)
goto err_alloc;
@@ -3866,9 +3866,11 @@ fec_probe(struct platform_device *pdev)
fep->itr_clk_rate = clk_get_rate(fep->clk_ahb);
/* enet_out is optional, depends on board */
- fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out");
- if (IS_ERR(fep->clk_enet_out))
- fep->clk_enet_out = NULL;
+ fep->clk_enet_out = devm_clk_get_optional(&pdev->dev, "enet_out");
+ if (IS_ERR(fep->clk_enet_out)) {
+ ret = PTR_ERR(fep->clk_enet_out);
+ goto failed_clk;
+ }
fep->ptp_clk_on = false;
mutex_init(&fep->ptp_clk_mutex);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
index ebc77771f5da..4aa1f433ed24 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
@@ -643,6 +643,7 @@ int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt,
err = alloc_msg_buf(pf_to_mgmt);
if (err) {
dev_err(&pdev->dev, "Failed to allocate msg buffers\n");
+ destroy_workqueue(pf_to_mgmt->workq);
hinic_health_reporters_destroy(hwdev->devlink_dev);
return err;
}
@@ -650,6 +651,7 @@ int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt,
err = hinic_api_cmd_init(pf_to_mgmt->cmd_chain, hwif);
if (err) {
dev_err(&pdev->dev, "Failed to initialize cmd chains\n");
+ destroy_workqueue(pf_to_mgmt->workq);
hinic_health_reporters_destroy(hwdev->devlink_dev);
return err;
}
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 9183d480b70b..46f439641441 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -47,3 +47,8 @@ ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o
+
+# FIXME: temporarily silence -Warray-bounds on non W=1+ builds
+ifndef KBUILD_EXTRA_WARN
+CFLAGS_ice_switch.o += -Wno-array-bounds
+endif
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 476bd1c83c87..1e71b70f0e52 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -3111,36 +3111,47 @@ static u32 ice_get_rxfh_indir_size(struct net_device *netdev)
return np->vsi->rss_table_size;
}
-/**
- * ice_get_rxfh - get the Rx flow hash indirection table
- * @netdev: network interface device structure
- * @indir: indirection table
- * @key: hash key
- * @hfunc: hash function
- *
- * Reads the indirection table directly from the hardware.
- */
static int
-ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
+ice_get_rxfh_context(struct net_device *netdev, u32 *indir,
+ u8 *key, u8 *hfunc, u32 rss_context)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
- int err, i;
+ u16 qcount, offset;
+ int err, num_tc, i;
u8 *lut;
+ if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
+ netdev_warn(netdev, "RSS is not supported on this VSI!\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (rss_context && !ice_is_adq_active(pf)) {
+ netdev_err(netdev, "RSS context cannot be non-zero when ADQ is not configured.\n");
+ return -EINVAL;
+ }
+
+ qcount = vsi->mqprio_qopt.qopt.count[rss_context];
+ offset = vsi->mqprio_qopt.qopt.offset[rss_context];
+
+ if (rss_context && ice_is_adq_active(pf)) {
+ num_tc = vsi->mqprio_qopt.qopt.num_tc;
+ if (rss_context >= num_tc) {
+ netdev_err(netdev, "RSS context:%d > num_tc:%d\n",
+ rss_context, num_tc);
+ return -EINVAL;
+ }
+ /* Use channel VSI of given TC */
+ vsi = vsi->tc_map_vsi[rss_context];
+ }
+
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
if (!indir)
return 0;
- if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
- /* RSS not supported return error here */
- netdev_warn(netdev, "RSS is not configured on this VSI!\n");
- return -EIO;
- }
-
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!lut)
return -ENOMEM;
@@ -3153,8 +3164,14 @@ ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
if (err)
goto out;
+ if (ice_is_adq_active(pf)) {
+ for (i = 0; i < vsi->rss_table_size; i++)
+ indir[i] = offset + lut[i] % qcount;
+ goto out;
+ }
+
for (i = 0; i < vsi->rss_table_size; i++)
- indir[i] = (u32)(lut[i]);
+ indir[i] = lut[i];
out:
kfree(lut);
@@ -3162,6 +3179,21 @@ out:
}
/**
+ * ice_get_rxfh - get the Rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ * @key: hash key
+ * @hfunc: hash function
+ *
+ * Reads the indirection table directly from the hardware.
+ */
+static int
+ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
+{
+ return ice_get_rxfh_context(netdev, indir, key, hfunc, 0);
+}
+
+/**
* ice_set_rxfh - set the Rx flow hash indirection table
* @netdev: network interface device structure
* @indir: indirection table
@@ -4102,6 +4134,7 @@ static const struct ethtool_ops ice_ethtool_ops = {
.set_pauseparam = ice_set_pauseparam,
.get_rxfh_key_size = ice_get_rxfh_key_size,
.get_rxfh_indir_size = ice_get_rxfh_indir_size,
+ .get_rxfh_context = ice_get_rxfh_context,
.get_rxfh = ice_get_rxfh,
.set_rxfh = ice_set_rxfh,
.get_channels = ice_get_channels,
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 6d19c58ccacd..454e01ae09b9 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -3043,8 +3043,8 @@ ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
ice_for_each_q_vector(vsi, i) {
struct ice_q_vector *q_vector = vsi->q_vectors[i];
- coalesce[i].itr_tx = q_vector->tx.itr_setting;
- coalesce[i].itr_rx = q_vector->rx.itr_setting;
+ coalesce[i].itr_tx = q_vector->tx.itr_settings;
+ coalesce[i].itr_rx = q_vector->rx.itr_settings;
coalesce[i].intrl = q_vector->intrl;
if (i < vsi->num_txq)
@@ -3100,21 +3100,21 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
*/
if (i < vsi->alloc_rxq && coalesce[i].rx_valid) {
rc = &vsi->q_vectors[i]->rx;
- rc->itr_setting = coalesce[i].itr_rx;
+ rc->itr_settings = coalesce[i].itr_rx;
ice_write_itr(rc, rc->itr_setting);
} else if (i < vsi->alloc_rxq) {
rc = &vsi->q_vectors[i]->rx;
- rc->itr_setting = coalesce[0].itr_rx;
+ rc->itr_settings = coalesce[0].itr_rx;
ice_write_itr(rc, rc->itr_setting);
}
if (i < vsi->alloc_txq && coalesce[i].tx_valid) {
rc = &vsi->q_vectors[i]->tx;
- rc->itr_setting = coalesce[i].itr_tx;
+ rc->itr_settings = coalesce[i].itr_tx;
ice_write_itr(rc, rc->itr_setting);
} else if (i < vsi->alloc_txq) {
rc = &vsi->q_vectors[i]->tx;
- rc->itr_setting = coalesce[0].itr_tx;
+ rc->itr_settings = coalesce[0].itr_tx;
ice_write_itr(rc, rc->itr_setting);
}
@@ -3128,12 +3128,12 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
for (; i < vsi->num_q_vectors; i++) {
/* transmit */
rc = &vsi->q_vectors[i]->tx;
- rc->itr_setting = coalesce[0].itr_tx;
+ rc->itr_settings = coalesce[0].itr_tx;
ice_write_itr(rc, rc->itr_setting);
/* receive */
rc = &vsi->q_vectors[i]->rx;
- rc->itr_setting = coalesce[0].itr_rx;
+ rc->itr_settings = coalesce[0].itr_rx;
ice_write_itr(rc, rc->itr_setting);
vsi->q_vectors[i]->intrl = coalesce[0].intrl;
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 4a5d4d971161..e1cae253412c 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -6189,9 +6189,10 @@ static int ice_up_complete(struct ice_vsi *vsi)
ice_ptp_link_change(pf, pf->hw.pf_id, true);
}
- /* clear this now, and the first stats read will be used as baseline */
- vsi->stat_offsets_loaded = false;
-
+ /* Perform an initial read of the statistics registers now to
+ * set the baseline so counters are ready when interface is up
+ */
+ ice_update_eth_stats(vsi);
ice_service_task_schedule(pf);
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index da025c204577..662947c882e8 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -500,12 +500,19 @@ ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
* This function must be called periodically to ensure that the cached value
* is never more than 2 seconds old. It must also be called whenever the PHC
* time has been changed.
+ *
+ * Return:
+ * * 0 - OK, successfully updated
+ * * -EAGAIN - PF was busy, need to reschedule the update
*/
-static void ice_ptp_update_cached_phctime(struct ice_pf *pf)
+static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
{
u64 systime;
int i;
+ if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
+ return -EAGAIN;
+
/* Read the current PHC time */
systime = ice_ptp_read_src_clk_reg(pf, NULL);
@@ -528,6 +535,9 @@ static void ice_ptp_update_cached_phctime(struct ice_pf *pf)
WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
}
}
+ clear_bit(ICE_CFG_BUSY, pf->state);
+
+ return 0;
}
/**
@@ -2330,17 +2340,18 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
{
struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
+ int err;
if (!test_bit(ICE_FLAG_PTP, pf->flags))
return;
- ice_ptp_update_cached_phctime(pf);
+ err = ice_ptp_update_cached_phctime(pf);
ice_ptp_tx_tstamp_cleanup(&pf->hw, &pf->ptp.port.tx);
- /* Run twice a second */
+ /* Run twice a second or reschedule if phc update failed */
kthread_queue_delayed_work(ptp->kworker, &ptp->work,
- msecs_to_jiffies(500));
+ msecs_to_jiffies(err ? 10 : 500));
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index f5a906c03669..ca902af54bb4 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -385,9 +385,14 @@ struct ice_ring_container {
/* this matches the maximum number of ITR bits, but in usec
* values, so it is shifted left one bit (bit zero is ignored)
*/
- u16 itr_setting:13;
- u16 itr_reserved:2;
- u16 itr_mode:1;
+ union {
+ struct {
+ u16 itr_setting:13;
+ u16 itr_reserved:2;
+ u16 itr_mode:1;
+ };
+ u16 itr_settings;
+ };
enum ice_container_type type;
};
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 34b33b21e0dc..68be2976f539 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -5505,7 +5505,8 @@ static void igb_watchdog_task(struct work_struct *work)
break;
}
- if (adapter->link_speed != SPEED_1000)
+ if (adapter->link_speed != SPEED_1000 ||
+ !hw->phy.ops.read_reg)
goto no_wait;
/* wait for Remote receiver status OK */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index 69d11ff7677d..774de63dd93a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -585,7 +585,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
return -EINVAL;
}
- if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
+ if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) {
struct rx_sa rsa;
if (xs->calg) {
@@ -757,7 +757,7 @@ static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
u32 zerobuf[4] = {0, 0, 0, 0};
u16 sa_idx;
- if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
+ if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) {
struct rx_sa *rsa;
u8 ipi;
@@ -903,8 +903,7 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
/* Tx IPsec offload doesn't seem to work on this
* device, so block these requests for now.
*/
- sam->flags = sam->flags & ~XFRM_OFFLOAD_IPV6;
- if (sam->flags != XFRM_OFFLOAD_INBOUND) {
+ if (sam->dir != XFRM_DEV_OFFLOAD_IN) {
err = -EOPNOTSUPP;
goto err_out;
}
@@ -915,7 +914,7 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
goto err_out;
}
- xs->xso.flags = sam->flags;
+ xs->xso.dir = sam->dir;
xs->id.spi = sam->spi;
xs->id.proto = sam->proto;
xs->props.family = sam->family;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h
index d2b64ff8eb4e..809ab51a7842 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h
@@ -74,7 +74,7 @@ struct ixgbe_ipsec {
struct sa_mbx_msg {
__be32 spi;
- u8 flags;
+ u8 dir;
u8 proto;
u16 family;
__be32 addr[4];
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 19cde928d9b7..77c2e70b0860 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2344,6 +2344,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
hard_start = page_address(rx_buffer->page) +
rx_buffer->page_offset - offset;
xdp_prepare_buff(&xdp, hard_start, offset, size, true);
+ xdp_buff_clear_frags_flag(&xdp);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size);
@@ -8571,57 +8572,83 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
int ixgbe_xmit_xdp_ring(struct ixgbe_ring *ring,
struct xdp_frame *xdpf)
{
- struct ixgbe_tx_buffer *tx_buffer;
- union ixgbe_adv_tx_desc *tx_desc;
- u32 len, cmd_type;
- dma_addr_t dma;
- u16 i;
-
- len = xdpf->len;
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
+ u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
+ u16 i = 0, index = ring->next_to_use;
+ struct ixgbe_tx_buffer *tx_head = &ring->tx_buffer_info[index];
+ struct ixgbe_tx_buffer *tx_buff = tx_head;
+ union ixgbe_adv_tx_desc *tx_desc = IXGBE_TX_DESC(ring, index);
+ u32 cmd_type, len = xdpf->len;
+ void *data = xdpf->data;
- if (unlikely(!ixgbe_desc_unused(ring)))
+ if (unlikely(ixgbe_desc_unused(ring) < 1 + nr_frags))
return IXGBE_XDP_CONSUMED;
- dma = dma_map_single(ring->dev, xdpf->data, len, DMA_TO_DEVICE);
- if (dma_mapping_error(ring->dev, dma))
- return IXGBE_XDP_CONSUMED;
+ tx_head->bytecount = xdp_get_frame_len(xdpf);
+ tx_head->gso_segs = 1;
+ tx_head->xdpf = xdpf;
- /* record the location of the first descriptor for this packet */
- tx_buffer = &ring->tx_buffer_info[ring->next_to_use];
- tx_buffer->bytecount = len;
- tx_buffer->gso_segs = 1;
- tx_buffer->protocol = 0;
+ tx_desc->read.olinfo_status =
+ cpu_to_le32(tx_head->bytecount << IXGBE_ADVTXD_PAYLEN_SHIFT);
- i = ring->next_to_use;
- tx_desc = IXGBE_TX_DESC(ring, i);
+ for (;;) {
+ dma_addr_t dma;
- dma_unmap_len_set(tx_buffer, len, len);
- dma_unmap_addr_set(tx_buffer, dma, dma);
- tx_buffer->xdpf = xdpf;
+ dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ring->dev, dma))
+ goto unmap;
- tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ dma_unmap_len_set(tx_buff, len, len);
+ dma_unmap_addr_set(tx_buff, dma, dma);
+
+ cmd_type = IXGBE_ADVTXD_DTYP_DATA | IXGBE_ADVTXD_DCMD_DEXT |
+ IXGBE_ADVTXD_DCMD_IFCS | len;
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ tx_buff->protocol = 0;
+
+ if (++index == ring->count)
+ index = 0;
+
+ if (i == nr_frags)
+ break;
+
+ tx_buff = &ring->tx_buffer_info[index];
+ tx_desc = IXGBE_TX_DESC(ring, index);
+ tx_desc->read.olinfo_status = 0;
+ data = skb_frag_address(&sinfo->frags[i]);
+ len = skb_frag_size(&sinfo->frags[i]);
+ i++;
+ }
/* put descriptor type bits */
- cmd_type = IXGBE_ADVTXD_DTYP_DATA |
- IXGBE_ADVTXD_DCMD_DEXT |
- IXGBE_ADVTXD_DCMD_IFCS;
- cmd_type |= len | IXGBE_TXD_CMD;
- tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
- tx_desc->read.olinfo_status =
- cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+ tx_desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD);
/* Avoid any potential race with xdp_xmit and cleanup */
smp_wmb();
- /* set next_to_watch value indicating a packet is present */
- i++;
- if (i == ring->count)
- i = 0;
-
- tx_buffer->next_to_watch = tx_desc;
- ring->next_to_use = i;
+ tx_head->next_to_watch = tx_desc;
+ ring->next_to_use = index;
return IXGBE_XDP_TX;
+
+unmap:
+ for (;;) {
+ tx_buff = &ring->tx_buffer_info[index];
+ if (dma_unmap_len(tx_buff, len))
+ dma_unmap_page(ring->dev, dma_unmap_addr(tx_buff, dma),
+ dma_unmap_len(tx_buff, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buff, len, 0);
+ if (tx_buff == tx_head)
+ break;
+
+ if (!index)
+ index += ring->count;
+ index--;
+ }
+
+ return IXGBE_XDP_CONSUMED;
}
netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
index e763cee0695e..9984ebc62d78 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
@@ -25,7 +25,7 @@ static int ixgbevf_ipsec_set_pf_sa(struct ixgbevf_adapter *adapter,
/* send the important bits to the PF */
sam = (struct sa_mbx_msg *)(&msgbuf[1]);
- sam->flags = xs->xso.flags;
+ sam->dir = xs->xso.dir;
sam->spi = xs->id.spi;
sam->proto = xs->id.proto;
sam->family = xs->props.family;
@@ -280,7 +280,7 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
return -EINVAL;
}
- if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
+ if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) {
struct rx_sa rsa;
if (xs->calg) {
@@ -394,7 +394,7 @@ static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs)
adapter = netdev_priv(dev);
ipsec = adapter->ipsec;
- if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
+ if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) {
sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
if (!ipsec->rx_tbl[sa_idx].used) {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.h b/drivers/net/ethernet/intel/ixgbevf/ipsec.h
index 3740725041c3..d22990165353 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.h
@@ -57,7 +57,7 @@ struct ixgbevf_ipsec {
struct sa_mbx_msg {
__be32 spi;
- u8 flags;
+ u8 dir;
u8 proto;
u16 family;
__be32 addr[4];
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 2b7eade373be..b84128b549b4 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -1869,7 +1869,7 @@ static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg)
* design, incremented at different moments in the chain of packet processing,
* it is very likely that incoming packets could have been dropped after being
* counted by hardware but before reaching software statistics (most probably
- * multicast packets), and in the oppposite way, during transmission, FCS bytes
+ * multicast packets), and in the opposite way, during transmission, FCS bytes
* are added in between as well as TSO skb will be split and header bytes added.
* Hence, statistics gathered from userspace with ifconfig (software) and
* ethtool (hardware) cannot be compared.
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index e020c81f3455..97f080c66dd4 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -202,7 +202,7 @@ static int octep_request_irqs(struct octep_device *oct)
struct msix_entry *msix_entry;
char **non_ioq_msix_names;
int num_non_ioq_msix;
- int ret, i;
+ int ret, i, j;
num_non_ioq_msix = CFG_GET_NON_IOQ_MSIX(oct->conf);
non_ioq_msix_names = CFG_GET_NON_IOQ_MSIX_NAMES(oct->conf);
@@ -233,23 +233,23 @@ static int octep_request_irqs(struct octep_device *oct)
}
/* Request IRQs for Tx/Rx queues */
- for (i = 0; i < oct->num_oqs; i++) {
- ioq_vector = oct->ioq_vector[i];
- msix_entry = &oct->msix_entries[i + num_non_ioq_msix];
+ for (j = 0; j < oct->num_oqs; j++) {
+ ioq_vector = oct->ioq_vector[j];
+ msix_entry = &oct->msix_entries[j + num_non_ioq_msix];
snprintf(ioq_vector->name, sizeof(ioq_vector->name),
- "%s-q%d", netdev->name, i);
+ "%s-q%d", netdev->name, j);
ret = request_irq(msix_entry->vector,
octep_ioq_intr_handler, 0,
ioq_vector->name, ioq_vector);
if (ret) {
netdev_err(netdev,
"request_irq failed for Q-%d; err=%d",
- i, ret);
+ j, ret);
goto ioq_irq_err;
}
- cpumask_set_cpu(i % num_online_cpus(),
+ cpumask_set_cpu(j % num_online_cpus(),
&ioq_vector->affinity_mask);
irq_set_affinity_hint(msix_entry->vector,
&ioq_vector->affinity_mask);
@@ -257,16 +257,21 @@ static int octep_request_irqs(struct octep_device *oct)
return 0;
ioq_irq_err:
- while (i > num_non_ioq_msix) {
- --i;
- irq_set_affinity_hint(oct->msix_entries[i].vector, NULL);
- free_irq(oct->msix_entries[i].vector, oct->ioq_vector[i]);
+ while (j) {
+ --j;
+ ioq_vector = oct->ioq_vector[j];
+ msix_entry = &oct->msix_entries[j + num_non_ioq_msix];
+
+ irq_set_affinity_hint(msix_entry->vector, NULL);
+ free_irq(msix_entry->vector, ioq_vector);
}
non_ioq_irq_err:
while (i) {
--i;
free_irq(oct->msix_entries[i].vector, oct);
}
+ kfree(oct->non_ioq_irq_names);
+ oct->non_ioq_irq_names = NULL;
alloc_err:
return -1;
}
@@ -980,8 +985,7 @@ static void octep_device_cleanup(struct octep_device *oct)
dev_info(&oct->pdev->dev, "Cleaning up Octeon Device ...\n");
for (i = 0; i < OCTEP_MAX_VF; i++) {
- if (oct->mbox[i])
- vfree(oct->mbox[i]);
+ vfree(oct->mbox[i]);
oct->mbox[i] = NULL;
}
@@ -1149,6 +1153,7 @@ static int __init octep_init_module(void)
if (ret < 0) {
pr_err("%s: Failed to register PCI driver; err=%d\n",
OCTEP_DRV_NAME, ret);
+ destroy_workqueue(octep_wq);
return ret;
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
index 945947ec7723..d9ae0937d17a 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
@@ -230,8 +230,7 @@ static int octep_free_oq(struct octep_oq *oq)
octep_oq_free_ring_buffers(oq);
- if (oq->buff_info)
- vfree(oq->buff_info);
+ vfree(oq->buff_info);
if (oq->desc_ring)
dma_free_coherent(oq->dev,
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
index 511552bc3e87..5a520d37bea0 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
@@ -270,8 +270,7 @@ static void octep_free_iq(struct octep_iq *iq)
desc_ring_size = OCTEP_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf);
- if (iq->buff_info)
- vfree(iq->buff_info);
+ vfree(iq->buff_info);
if (iq->desc_ring)
dma_free_coherent(iq->dev, desc_ring_size,
diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig
index 639893d87055..e1036b0eb6b1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/Kconfig
+++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig
@@ -33,6 +33,7 @@ config OCTEONTX2_PF
select OCTEONTX2_MBOX
select NET_DEVLINK
depends on (64BIT && COMPILE_TEST) || ARM64
+ select DIMLIB
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
help
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index b9d7601138ca..fb8db5888d2f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -97,11 +97,6 @@ void otx2_get_dev_stats(struct otx2_nic *pfvf)
{
struct otx2_dev_stats *dev_stats = &pfvf->hw.dev_stats;
-#define OTX2_GET_RX_STATS(reg) \
- otx2_read64(pfvf, NIX_LF_RX_STATX(reg))
-#define OTX2_GET_TX_STATS(reg) \
- otx2_read64(pfvf, NIX_LF_TX_STATX(reg))
-
dev_stats->rx_bytes = OTX2_GET_RX_STATS(RX_OCTS);
dev_stats->rx_drops = OTX2_GET_RX_STATS(RX_DROP);
dev_stats->rx_bcast_frames = OTX2_GET_RX_STATS(RX_BCAST);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index c587c14ac2a3..ce2766317c0b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -18,6 +18,7 @@
#include <net/pkt_cls.h>
#include <net/devlink.h>
#include <linux/time64.h>
+#include <linux/dim.h>
#include <mbox.h>
#include <npc.h>
@@ -54,6 +55,11 @@ enum arua_mapped_qtypes {
/* Send skid of 2000 packets required for CQ size of 4K CQEs. */
#define SEND_CQ_SKID 2000
+#define OTX2_GET_RX_STATS(reg) \
+ otx2_read64(pfvf, NIX_LF_RX_STATX(reg))
+#define OTX2_GET_TX_STATS(reg) \
+ otx2_read64(pfvf, NIX_LF_TX_STATX(reg))
+
struct otx2_lmt_info {
u64 lmt_addr;
u16 lmt_id;
@@ -351,6 +357,7 @@ struct otx2_nic {
#define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12)
#define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13)
#define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14)
+#define OTX2_FLAG_ADPTV_INT_COAL_ENABLED BIT_ULL(16)
u64 flags;
u64 *cq_op_addr;
@@ -408,6 +415,9 @@ struct otx2_nic {
u8 pfc_en;
u8 *queue_to_pfc_map;
#endif
+
+ /* napi event count. It is needed for adaptive irq coalescing. */
+ u32 napi_events;
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index fc328de5345e..bc614a4def9e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -455,6 +455,14 @@ static int otx2_get_coalesce(struct net_device *netdev,
cmd->rx_max_coalesced_frames = hw->cq_ecount_wait;
cmd->tx_coalesce_usecs = hw->cq_time_wait;
cmd->tx_max_coalesced_frames = hw->cq_ecount_wait;
+ if ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) ==
+ OTX2_FLAG_ADPTV_INT_COAL_ENABLED) {
+ cmd->use_adaptive_rx_coalesce = 1;
+ cmd->use_adaptive_tx_coalesce = 1;
+ } else {
+ cmd->use_adaptive_rx_coalesce = 0;
+ cmd->use_adaptive_tx_coalesce = 0;
+ }
return 0;
}
@@ -466,11 +474,30 @@ static int otx2_set_coalesce(struct net_device *netdev,
{
struct otx2_nic *pfvf = netdev_priv(netdev);
struct otx2_hw *hw = &pfvf->hw;
+ u8 priv_coalesce_status;
int qidx;
if (!ec->rx_max_coalesced_frames || !ec->tx_max_coalesced_frames)
return 0;
+ if (ec->use_adaptive_rx_coalesce != ec->use_adaptive_tx_coalesce) {
+ netdev_err(netdev,
+ "adaptive-rx should be same as adaptive-tx");
+ return -EINVAL;
+ }
+
+ /* Check and update coalesce status */
+ if ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) ==
+ OTX2_FLAG_ADPTV_INT_COAL_ENABLED) {
+ priv_coalesce_status = 1;
+ if (!ec->use_adaptive_rx_coalesce)
+ pfvf->flags &= ~OTX2_FLAG_ADPTV_INT_COAL_ENABLED;
+ } else {
+ priv_coalesce_status = 0;
+ if (ec->use_adaptive_rx_coalesce)
+ pfvf->flags |= OTX2_FLAG_ADPTV_INT_COAL_ENABLED;
+ }
+
/* 'cq_time_wait' is 8bit and is in multiple of 100ns,
* so clamp the user given value to the range of 1 to 25usec.
*/
@@ -494,9 +521,9 @@ static int otx2_set_coalesce(struct net_device *netdev,
* so clamp the user given value to the range of 1 to 64k.
*/
ec->rx_max_coalesced_frames = clamp_t(u32, ec->rx_max_coalesced_frames,
- 1, U16_MAX);
+ 1, NAPI_POLL_WEIGHT);
ec->tx_max_coalesced_frames = clamp_t(u32, ec->tx_max_coalesced_frames,
- 1, U16_MAX);
+ 1, NAPI_POLL_WEIGHT);
/* Rx and Tx are mapped to same CQ, check which one
* is changed, if both then choose the min.
@@ -509,6 +536,17 @@ static int otx2_set_coalesce(struct net_device *netdev,
hw->cq_ecount_wait = min_t(u16, ec->rx_max_coalesced_frames,
ec->tx_max_coalesced_frames);
+ /* Reset 'cq_time_wait' and 'cq_ecount_wait' to
+ * default values if coalesce status changed from
+ * 'on' to 'off'.
+ */
+ if (priv_coalesce_status &&
+ ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) !=
+ OTX2_FLAG_ADPTV_INT_COAL_ENABLED)) {
+ hw->cq_time_wait = CQ_TIMER_THRESH_DEFAULT;
+ hw->cq_ecount_wait = CQ_CQE_THRESH_DEFAULT;
+ }
+
if (netif_running(netdev)) {
for (qidx = 0; qidx < pfvf->hw.cint_cnt; qidx++)
otx2_config_irq_coalescing(pfvf, qidx);
@@ -1230,7 +1268,8 @@ end:
static const struct ethtool_ops otx2_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
- ETHTOOL_COALESCE_MAX_FRAMES,
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
.supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN |
ETHTOOL_RING_USE_CQE_SIZE,
.get_link = otx2_get_link,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 53b2706d65a1..fe3472e04c23 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1254,6 +1254,7 @@ static irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq)
otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
/* Schedule NAPI */
+ pf->napi_events++;
napi_schedule_irqoff(&cq_poll->napi);
return IRQ_HANDLED;
@@ -1267,6 +1268,7 @@ static void otx2_disable_napi(struct otx2_nic *pf)
for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
cq_poll = &qset->napi[qidx];
+ cancel_work_sync(&cq_poll->dim.work);
napi_disable(&cq_poll->napi);
netif_napi_del(&cq_poll->napi);
}
@@ -1546,6 +1548,24 @@ static void otx2_do_set_rx_mode(struct otx2_nic *pf)
mutex_unlock(&pf->mbox.lock);
}
+static void otx2_dim_work(struct work_struct *w)
+{
+ struct dim_cq_moder cur_moder;
+ struct otx2_cq_poll *cq_poll;
+ struct otx2_nic *pfvf;
+ struct dim *dim;
+
+ dim = container_of(w, struct dim, work);
+ cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ cq_poll = container_of(dim, struct otx2_cq_poll, dim);
+ pfvf = (struct otx2_nic *)cq_poll->dev;
+ pfvf->hw.cq_time_wait = (cur_moder.usec > CQ_TIMER_THRESH_MAX) ?
+ CQ_TIMER_THRESH_MAX : cur_moder.usec;
+ pfvf->hw.cq_ecount_wait = (cur_moder.pkts > NAPI_POLL_WEIGHT) ?
+ NAPI_POLL_WEIGHT : cur_moder.pkts;
+ dim->state = DIM_START_MEASURE;
+}
+
int otx2_open(struct net_device *netdev)
{
struct otx2_nic *pf = netdev_priv(netdev);
@@ -1612,6 +1632,8 @@ int otx2_open(struct net_device *netdev)
cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ;
cq_poll->dev = (void *)pf;
+ cq_poll->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
+ INIT_WORK(&cq_poll->dim.work, otx2_dim_work);
netif_napi_add(netdev, &cq_poll->napi,
otx2_napi_handler, NAPI_POLL_WEIGHT);
napi_enable(&cq_poll->napi);
@@ -1718,7 +1740,6 @@ err_free_cints:
vec = pci_irq_vector(pf->pdev,
pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
- synchronize_irq(vec);
free_irq(vec, pf);
err_disable_napi:
otx2_disable_napi(pf);
@@ -1762,7 +1783,6 @@ int otx2_stop(struct net_device *netdev)
vec = pci_irq_vector(pf->pdev,
pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
- synchronize_irq(vec);
free_irq(vec, pf);
/* Cleanup CQ NAPI and IRQ */
@@ -1796,8 +1816,7 @@ int otx2_stop(struct net_device *netdev)
kfree(qset->rq);
kfree(qset->napi);
/* Do not clear RQ/SQ ringsize settings */
- memset((void *)qset + offsetof(struct otx2_qset, sqe_cnt), 0,
- sizeof(*qset) - offsetof(struct otx2_qset, sqe_cnt));
+ memset_startat(qset, 0, sqe_cnt);
return 0;
}
EXPORT_SYMBOL(otx2_stop);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index c26de15b2ac3..3baeafc40807 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -484,6 +484,18 @@ process_cqe:
return 0;
}
+static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_poll *cq_poll)
+{
+ struct dim_sample dim_sample;
+ u64 rx_frames, rx_bytes;
+
+ rx_frames = OTX2_GET_RX_STATS(RX_BCAST) + OTX2_GET_RX_STATS(RX_MCAST) +
+ OTX2_GET_RX_STATS(RX_UCAST);
+ rx_bytes = OTX2_GET_RX_STATS(RX_OCTS);
+ dim_update_sample(pfvf->napi_events, rx_frames, rx_bytes, &dim_sample);
+ net_dim(&cq_poll->dim, dim_sample);
+}
+
int otx2_napi_handler(struct napi_struct *napi, int budget)
{
struct otx2_cq_queue *rx_cq = NULL;
@@ -521,6 +533,17 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
return workdone;
+ /* Check for adaptive interrupt coalesce */
+ if (workdone != 0 &&
+ ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) ==
+ OTX2_FLAG_ADPTV_INT_COAL_ENABLED)) {
+ /* Adjust irq coalese using net_dim */
+ otx2_adjust_adaptive_coalese(pfvf, cq_poll);
+ /* Update irq coalescing */
+ for (i = 0; i < pfvf->hw.cint_cnt; i++)
+ otx2_config_irq_coalescing(pfvf, i);
+ }
+
/* Re-enable interrupts */
otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
BIT_ULL(0));
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index f1a04cf9210c..c88e8a436029 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -109,6 +109,7 @@ struct otx2_cq_poll {
#define CINT_INVALID_CQ 255
u8 cint_idx;
u8 cq_ids[CQS_PER_CINT];
+ struct dim dim;
struct napi_struct napi;
};
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
index e452cdeaf703..dc3e3ddc60bf 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
@@ -102,7 +102,7 @@ struct prestera_sdma {
struct net_device napi_dev;
u32 map_addr;
u64 dma_mask;
- /* protect SDMA with concurrrent access from multiple CPUs */
+ /* protect SDMA with concurrent access from multiple CPUs */
spinlock_t tx_lock;
};
diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
index 45ba0970504a..fe66ba8793cf 100644
--- a/drivers/net/ethernet/mediatek/Makefile
+++ b/drivers/net/ethernet/mediatek/Makefile
@@ -11,3 +11,8 @@ mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
endif
obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o
+
+# FIXME: temporarily silence -Warray-bounds on non W=1+ builds
+ifndef KBUILD_EXTRA_WARN
+CFLAGS_mtk_ppe.o += -Wno-array-bounds
+endif
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 31c5da5d6b72..a9d4fd8945bb 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -34,6 +34,96 @@ MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
#define MTK_ETHTOOL_STAT(x) { #x, \
offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
+static const struct mtk_reg_map mtk_reg_map = {
+ .tx_irq_mask = 0x1a1c,
+ .tx_irq_status = 0x1a18,
+ .pdma = {
+ .rx_ptr = 0x0900,
+ .rx_cnt_cfg = 0x0904,
+ .pcrx_ptr = 0x0908,
+ .glo_cfg = 0x0a04,
+ .rst_idx = 0x0a08,
+ .delay_irq = 0x0a0c,
+ .irq_status = 0x0a20,
+ .irq_mask = 0x0a28,
+ .int_grp = 0x0a50,
+ },
+ .qdma = {
+ .qtx_cfg = 0x1800,
+ .rx_ptr = 0x1900,
+ .rx_cnt_cfg = 0x1904,
+ .qcrx_ptr = 0x1908,
+ .glo_cfg = 0x1a04,
+ .rst_idx = 0x1a08,
+ .delay_irq = 0x1a0c,
+ .fc_th = 0x1a10,
+ .int_grp = 0x1a20,
+ .hred = 0x1a44,
+ .ctx_ptr = 0x1b00,
+ .dtx_ptr = 0x1b04,
+ .crx_ptr = 0x1b10,
+ .drx_ptr = 0x1b14,
+ .fq_head = 0x1b20,
+ .fq_tail = 0x1b24,
+ .fq_count = 0x1b28,
+ .fq_blen = 0x1b2c,
+ },
+ .gdm1_cnt = 0x2400,
+};
+
+static const struct mtk_reg_map mt7628_reg_map = {
+ .tx_irq_mask = 0x0a28,
+ .tx_irq_status = 0x0a20,
+ .pdma = {
+ .rx_ptr = 0x0900,
+ .rx_cnt_cfg = 0x0904,
+ .pcrx_ptr = 0x0908,
+ .glo_cfg = 0x0a04,
+ .rst_idx = 0x0a08,
+ .delay_irq = 0x0a0c,
+ .irq_status = 0x0a20,
+ .irq_mask = 0x0a28,
+ .int_grp = 0x0a50,
+ },
+};
+
+static const struct mtk_reg_map mt7986_reg_map = {
+ .tx_irq_mask = 0x461c,
+ .tx_irq_status = 0x4618,
+ .pdma = {
+ .rx_ptr = 0x6100,
+ .rx_cnt_cfg = 0x6104,
+ .pcrx_ptr = 0x6108,
+ .glo_cfg = 0x6204,
+ .rst_idx = 0x6208,
+ .delay_irq = 0x620c,
+ .irq_status = 0x6220,
+ .irq_mask = 0x6228,
+ .int_grp = 0x6250,
+ },
+ .qdma = {
+ .qtx_cfg = 0x4400,
+ .rx_ptr = 0x4500,
+ .rx_cnt_cfg = 0x4504,
+ .qcrx_ptr = 0x4508,
+ .glo_cfg = 0x4604,
+ .rst_idx = 0x4608,
+ .delay_irq = 0x460c,
+ .fc_th = 0x4610,
+ .int_grp = 0x4620,
+ .hred = 0x4644,
+ .ctx_ptr = 0x4700,
+ .dtx_ptr = 0x4704,
+ .crx_ptr = 0x4710,
+ .drx_ptr = 0x4714,
+ .fq_head = 0x4720,
+ .fq_tail = 0x4724,
+ .fq_count = 0x4728,
+ .fq_blen = 0x472c,
+ },
+ .gdm1_cnt = 0x1c00,
+};
+
/* strings used by ethtool */
static const struct mtk_ethtool_stats {
char str[ETH_GSTRING_LEN];
@@ -57,7 +147,7 @@ static const char * const mtk_clks_source_name[] = {
"ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
"sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
"sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
- "sgmii_ck", "eth2pll",
+ "sgmii_ck", "eth2pll", "wocpu0", "wocpu1", "netsys0", "netsys1"
};
void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
@@ -263,14 +353,33 @@ static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
mtk_w32(eth, val, TRGMII_TCK_CTRL);
}
+static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ struct mtk_eth *eth = mac->hw;
+ unsigned int sid;
+
+ if (interface == PHY_INTERFACE_MODE_SGMII ||
+ phy_interface_mode_is_8023z(interface)) {
+ sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
+ 0 : mac->id;
+
+ return mtk_sgmii_select_pcs(eth->sgmii, sid);
+ }
+
+ return NULL;
+}
+
static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
struct mtk_mac *mac = container_of(config, struct mtk_mac,
phylink_config);
struct mtk_eth *eth = mac->hw;
- u32 mcr_cur, mcr_new, sid, i;
int val, ge_mode, err = 0;
+ u32 i;
/* MT76x8 has no hardware settings between for the MAC */
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
@@ -327,6 +436,14 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
state->interface))
goto err_phy;
} else {
+ /* FIXME: this is incorrect. Not only does it
+ * use state->speed (which is not guaranteed
+ * to be correct) but it also makes use of it
+ * in a code path that will only be reachable
+ * when the PHY interface mode changes, not
+ * when the speed changes. Consequently, RGMII
+ * is probably broken.
+ */
mtk_gmac0_rgmii_adjust(mac->hw,
state->interface,
state->speed);
@@ -383,38 +500,14 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
SYSCFG0_SGMII_MASK,
~(u32)SYSCFG0_SGMII_MASK);
- /* Decide how GMAC and SGMIISYS be mapped */
- sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
- 0 : mac->id;
-
- /* Setup SGMIISYS with the determined property */
- if (state->interface != PHY_INTERFACE_MODE_SGMII)
- err = mtk_sgmii_setup_mode_force(eth->sgmii, sid,
- state);
- else if (phylink_autoneg_inband(mode))
- err = mtk_sgmii_setup_mode_an(eth->sgmii, sid);
-
- if (err)
- goto init_err;
-
- regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
- SYSCFG0_SGMII_MASK, val);
+ /* Save the syscfg0 value for mac_finish */
+ mac->syscfg0 = val;
} else if (phylink_autoneg_inband(mode)) {
dev_err(eth->dev,
"In-band mode not supported in non SGMII mode!\n");
return;
}
- /* Setup gmac */
- mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
- mcr_new = mcr_cur;
- mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
- MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
-
- /* Only update control register when needed! */
- if (mcr_new != mcr_cur)
- mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
-
return;
err_phy:
@@ -427,6 +520,33 @@ init_err:
mac->id, phy_modes(state->interface), err);
}
+static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ struct mtk_eth *eth = mac->hw;
+ u32 mcr_cur, mcr_new;
+
+ /* Enable SGMII */
+ if (interface == PHY_INTERFACE_MODE_SGMII ||
+ phy_interface_mode_is_8023z(interface))
+ regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
+ SYSCFG0_SGMII_MASK, mac->syscfg0);
+
+ /* Setup gmac */
+ mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+ mcr_new = mcr_cur;
+ mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
+ MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
+
+ /* Only update control register when needed! */
+ if (mcr_new != mcr_cur)
+ mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
+
+ return 0;
+}
+
static void mtk_mac_pcs_get_state(struct phylink_config *config,
struct phylink_link_state *state)
{
@@ -459,14 +579,6 @@ static void mtk_mac_pcs_get_state(struct phylink_config *config,
state->pause |= MLO_PAUSE_TX;
}
-static void mtk_mac_an_restart(struct phylink_config *config)
-{
- struct mtk_mac *mac = container_of(config, struct mtk_mac,
- phylink_config);
-
- mtk_sgmii_restart_an(mac->hw, mac->id);
-}
-
static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
phy_interface_t interface)
{
@@ -485,8 +597,9 @@ static void mtk_mac_link_up(struct phylink_config *config,
{
struct mtk_mac *mac = container_of(config, struct mtk_mac,
phylink_config);
- u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+ u32 mcr;
+ mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
MAC_MCR_FORCE_RX_FC);
@@ -518,9 +631,10 @@ static void mtk_mac_link_up(struct phylink_config *config,
static const struct phylink_mac_ops mtk_phylink_ops = {
.validate = phylink_generic_validate,
+ .mac_select_pcs = mtk_mac_select_pcs,
.mac_pcs_get_state = mtk_mac_pcs_get_state,
- .mac_an_restart = mtk_mac_an_restart,
.mac_config = mtk_mac_config,
+ .mac_finish = mtk_mac_finish,
.mac_link_down = mtk_mac_link_down,
.mac_link_up = mtk_mac_link_up,
};
@@ -576,8 +690,8 @@ static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
u32 val;
spin_lock_irqsave(&eth->tx_irq_lock, flags);
- val = mtk_r32(eth, eth->tx_int_mask_reg);
- mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg);
+ val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
+ mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
}
@@ -587,8 +701,8 @@ static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
u32 val;
spin_lock_irqsave(&eth->tx_irq_lock, flags);
- val = mtk_r32(eth, eth->tx_int_mask_reg);
- mtk_w32(eth, val | mask, eth->tx_int_mask_reg);
+ val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
+ mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
}
@@ -598,8 +712,8 @@ static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
u32 val;
spin_lock_irqsave(&eth->rx_irq_lock, flags);
- val = mtk_r32(eth, MTK_PDMA_INT_MASK);
- mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
+ val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
+ mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
}
@@ -609,8 +723,8 @@ static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
u32 val;
spin_lock_irqsave(&eth->rx_irq_lock, flags);
- val = mtk_r32(eth, MTK_PDMA_INT_MASK);
- mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
+ val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
+ mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
}
@@ -661,39 +775,39 @@ void mtk_stats_update_mac(struct mtk_mac *mac)
hw_stats->rx_checksum_errors +=
mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
} else {
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
unsigned int offs = hw_stats->reg_offset;
u64 stats;
- hw_stats->rx_bytes += mtk_r32(mac->hw,
- MTK_GDM1_RX_GBCNT_L + offs);
- stats = mtk_r32(mac->hw, MTK_GDM1_RX_GBCNT_H + offs);
+ hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
+ stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
if (stats)
hw_stats->rx_bytes += (stats << 32);
hw_stats->rx_packets +=
- mtk_r32(mac->hw, MTK_GDM1_RX_GPCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs);
hw_stats->rx_overflow +=
- mtk_r32(mac->hw, MTK_GDM1_RX_OERCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
hw_stats->rx_fcs_errors +=
- mtk_r32(mac->hw, MTK_GDM1_RX_FERCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
hw_stats->rx_short_errors +=
- mtk_r32(mac->hw, MTK_GDM1_RX_SERCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
hw_stats->rx_long_errors +=
- mtk_r32(mac->hw, MTK_GDM1_RX_LENCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
hw_stats->rx_checksum_errors +=
- mtk_r32(mac->hw, MTK_GDM1_RX_CERCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
hw_stats->rx_flow_control_packets +=
- mtk_r32(mac->hw, MTK_GDM1_RX_FCCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
hw_stats->tx_skip +=
- mtk_r32(mac->hw, MTK_GDM1_TX_SKIPCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
hw_stats->tx_collisions +=
- mtk_r32(mac->hw, MTK_GDM1_TX_COLCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
hw_stats->tx_bytes +=
- mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_L + offs);
- stats = mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_H + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
+ stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
if (stats)
hw_stats->tx_bytes += (stats << 32);
hw_stats->tx_packets +=
- mtk_r32(mac->hw, MTK_GDM1_TX_GPCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
}
u64_stats_update_end(&hw_stats->syncp);
@@ -767,8 +881,8 @@ static inline int mtk_max_buf_size(int frag_size)
return buf_size;
}
-static inline bool mtk_rx_get_desc(struct mtk_rx_dma *rxd,
- struct mtk_rx_dma *dma_rxd)
+static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
+ struct mtk_rx_dma_v2 *dma_rxd)
{
rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
if (!(rxd->rxd2 & RX_DMA_DONE))
@@ -777,6 +891,10 @@ static inline bool mtk_rx_get_desc(struct mtk_rx_dma *rxd,
rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
+ rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
+ }
return true;
}
@@ -784,20 +902,20 @@ static inline bool mtk_rx_get_desc(struct mtk_rx_dma *rxd,
/* the qdma core needs scratch memory to be setup */
static int mtk_init_fq_dma(struct mtk_eth *eth)
{
+ const struct mtk_soc_data *soc = eth->soc;
dma_addr_t phy_ring_tail;
int cnt = MTK_DMA_SIZE;
dma_addr_t dma_addr;
int i;
eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
- cnt * sizeof(struct mtk_tx_dma),
+ cnt * soc->txrx.txd_size,
&eth->phy_scratch_ring,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (unlikely(!eth->scratch_ring))
return -ENOMEM;
- eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
- GFP_KERNEL);
+ eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
if (unlikely(!eth->scratch_head))
return -ENOMEM;
@@ -807,37 +925,44 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
return -ENOMEM;
- phy_ring_tail = eth->phy_scratch_ring +
- (sizeof(struct mtk_tx_dma) * (cnt - 1));
+ phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
for (i = 0; i < cnt; i++) {
- eth->scratch_ring[i].txd1 =
- (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
+ struct mtk_tx_dma_v2 *txd;
+
+ txd = eth->scratch_ring + i * soc->txrx.txd_size;
+ txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
if (i < cnt - 1)
- eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
- ((i + 1) * sizeof(struct mtk_tx_dma)));
- eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
+ txd->txd2 = eth->phy_scratch_ring +
+ (i + 1) * soc->txrx.txd_size;
+
+ txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
+ txd->txd4 = 0;
+ if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
+ txd->txd5 = 0;
+ txd->txd6 = 0;
+ txd->txd7 = 0;
+ txd->txd8 = 0;
+ }
}
- mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
- mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
- mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
- mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
+ mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
+ mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
+ mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
+ mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
return 0;
}
-static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
+static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
{
- void *ret = ring->dma;
-
- return ret + (desc - ring->phys);
+ return ring->dma + (desc - ring->phys);
}
-static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
- struct mtk_tx_dma *txd)
+static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
+ void *txd, u32 txd_size)
{
- int idx = txd - ring->dma;
+ int idx = (txd - ring->dma) / txd_size;
return &ring->buf[idx];
}
@@ -845,12 +970,12 @@ static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
struct mtk_tx_dma *dma)
{
- return ring->dma_pdma - ring->dma + dma;
+ return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma;
}
-static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma)
+static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
{
- return ((void *)dma - (void *)ring->dma) / sizeof(*dma);
+ return (dma - ring->dma) / txd_size;
}
static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
@@ -918,18 +1043,108 @@ static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
}
}
+static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
+ struct mtk_tx_dma_desc_info *info)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ struct mtk_tx_dma *desc = txd;
+ u32 data;
+
+ WRITE_ONCE(desc->txd1, info->addr);
+
+ data = TX_DMA_SWC | TX_DMA_PLEN0(info->size);
+ if (info->last)
+ data |= TX_DMA_LS0;
+ WRITE_ONCE(desc->txd3, data);
+
+ data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
+ if (info->first) {
+ if (info->gso)
+ data |= TX_DMA_TSO;
+ /* tx checksum offload */
+ if (info->csum)
+ data |= TX_DMA_CHKSUM;
+ /* vlan header offload */
+ if (info->vlan)
+ data |= TX_DMA_INS_VLAN | info->vlan_tci;
+ }
+ WRITE_ONCE(desc->txd4, data);
+}
+
+static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
+ struct mtk_tx_dma_desc_info *info)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_tx_dma_v2 *desc = txd;
+ struct mtk_eth *eth = mac->hw;
+ u32 data;
+
+ WRITE_ONCE(desc->txd1, info->addr);
+
+ data = TX_DMA_PLEN0(info->size);
+ if (info->last)
+ data |= TX_DMA_LS0;
+ WRITE_ONCE(desc->txd3, data);
+
+ if (!info->qid && mac->id)
+ info->qid = MTK_QDMA_GMAC2_QID;
+
+ data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
+ data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
+ WRITE_ONCE(desc->txd4, data);
+
+ data = 0;
+ if (info->first) {
+ if (info->gso)
+ data |= TX_DMA_TSO_V2;
+ /* tx checksum offload */
+ if (info->csum)
+ data |= TX_DMA_CHKSUM_V2;
+ }
+ WRITE_ONCE(desc->txd5, data);
+
+ data = 0;
+ if (info->first && info->vlan)
+ data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
+ WRITE_ONCE(desc->txd6, data);
+
+ WRITE_ONCE(desc->txd7, 0);
+ WRITE_ONCE(desc->txd8, 0);
+}
+
+static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
+ struct mtk_tx_dma_desc_info *info)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ mtk_tx_set_dma_desc_v2(dev, txd, info);
+ else
+ mtk_tx_set_dma_desc_v1(dev, txd, info);
+}
+
static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
int tx_num, struct mtk_tx_ring *ring, bool gso)
{
+ struct mtk_tx_dma_desc_info txd_info = {
+ .size = skb_headlen(skb),
+ .gso = gso,
+ .csum = skb->ip_summed == CHECKSUM_PARTIAL,
+ .vlan = skb_vlan_tag_present(skb),
+ .qid = skb->mark & MTK_QDMA_TX_MASK,
+ .vlan_tci = skb_vlan_tag_get(skb),
+ .first = true,
+ .last = !skb_is_nonlinear(skb),
+ };
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
+ const struct mtk_soc_data *soc = eth->soc;
struct mtk_tx_dma *itxd, *txd;
struct mtk_tx_dma *itxd_pdma, *txd_pdma;
struct mtk_tx_buf *itx_buf, *tx_buf;
- dma_addr_t mapped_addr;
- unsigned int nr_frags;
int i, n_desc = 1;
- u32 txd4 = 0, fport;
int k = 0;
itxd = ring->next_free;
@@ -937,52 +1152,35 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
if (itxd == ring->last_free)
return -ENOMEM;
- /* set the forward port */
- fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
- txd4 |= fport;
-
- itx_buf = mtk_desc_to_tx_buf(ring, itxd);
+ itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
memset(itx_buf, 0, sizeof(*itx_buf));
- if (gso)
- txd4 |= TX_DMA_TSO;
-
- /* TX Checksum offload */
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- txd4 |= TX_DMA_CHKSUM;
-
- /* VLAN header offload */
- if (skb_vlan_tag_present(skb))
- txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
-
- mapped_addr = dma_map_single(eth->dma_dev, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr)))
+ txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
return -ENOMEM;
- WRITE_ONCE(itxd->txd1, mapped_addr);
+ mtk_tx_set_dma_desc(dev, itxd, &txd_info);
+
itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
MTK_TX_FLAGS_FPORT1;
- setup_tx_buf(eth, itx_buf, itxd_pdma, mapped_addr, skb_headlen(skb),
+ setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
k++);
/* TX SG offload */
txd = itxd;
txd_pdma = qdma_to_pdma(ring, txd);
- nr_frags = skb_shinfo(skb)->nr_frags;
- for (i = 0; i < nr_frags; i++) {
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
unsigned int offset = 0;
int frag_size = skb_frag_size(frag);
while (frag_size) {
- bool last_frag = false;
- unsigned int frag_map_size;
bool new_desc = true;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) ||
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
(i & 0x1)) {
txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
txd_pdma = qdma_to_pdma(ring, txd);
@@ -994,25 +1192,22 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
new_desc = false;
}
-
- frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
- mapped_addr = skb_frag_dma_map(eth->dma_dev, frag, offset,
- frag_map_size,
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr)))
+ memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
+ txd_info.size = min_t(unsigned int, frag_size,
+ soc->txrx.dma_max_len);
+ txd_info.qid = skb->mark & MTK_QDMA_TX_MASK;
+ txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
+ !(frag_size - txd_info.size);
+ txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
+ offset, txd_info.size,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
goto err_dma;
- if (i == nr_frags - 1 &&
- (frag_size - frag_map_size) == 0)
- last_frag = true;
-
- WRITE_ONCE(txd->txd1, mapped_addr);
- WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
- TX_DMA_PLEN0(frag_map_size) |
- last_frag * TX_DMA_LS0));
- WRITE_ONCE(txd->txd4, fport);
+ mtk_tx_set_dma_desc(dev, txd, &txd_info);
- tx_buf = mtk_desc_to_tx_buf(ring, txd);
+ tx_buf = mtk_desc_to_tx_buf(ring, txd,
+ soc->txrx.txd_size);
if (new_desc)
memset(tx_buf, 0, sizeof(*tx_buf));
tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
@@ -1020,21 +1215,18 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
MTK_TX_FLAGS_FPORT1;
- setup_tx_buf(eth, tx_buf, txd_pdma, mapped_addr,
- frag_map_size, k++);
+ setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
+ txd_info.size, k++);
- frag_size -= frag_map_size;
- offset += frag_map_size;
+ frag_size -= txd_info.size;
+ offset += txd_info.size;
}
}
/* store skb to cleanup */
itx_buf->skb = skb;
- WRITE_ONCE(itxd->txd4, txd4);
- WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
- (!nr_frags * TX_DMA_LS0)));
- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
if (k & 0x1)
txd_pdma->txd2 |= TX_DMA_LS0;
else
@@ -1052,13 +1244,15 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
*/
wmb();
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
!netdev_xmit_more())
- mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
+ mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
} else {
- int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd),
- ring->dma_size);
+ int next_idx;
+
+ next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
+ ring->dma_size);
mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
}
@@ -1066,13 +1260,13 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
err_dma:
do {
- tx_buf = mtk_desc_to_tx_buf(ring, itxd);
+ tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
/* unmap dma */
mtk_tx_unmap(eth, tx_buf, false);
itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
@@ -1082,17 +1276,16 @@ err_dma:
return -ENOMEM;
}
-static inline int mtk_cal_txd_req(struct sk_buff *skb)
+static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
{
- int i, nfrags;
+ int i, nfrags = 1;
skb_frag_t *frag;
- nfrags = 1;
if (skb_is_gso(skb)) {
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
frag = &skb_shinfo(skb)->frags[i];
nfrags += DIV_ROUND_UP(skb_frag_size(frag),
- MTK_TX_DMA_BUF_LEN);
+ eth->soc->txrx.dma_max_len);
}
} else {
nfrags += skb_shinfo(skb)->nr_frags;
@@ -1144,7 +1337,7 @@ static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
goto drop;
- tx_num = mtk_cal_txd_req(skb);
+ tx_num = mtk_cal_txd_req(eth, skb);
if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
netif_stop_queue(dev);
netif_err(eth, tx_queued, dev,
@@ -1195,9 +1388,12 @@ static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
return &eth->rx_ring[0];
for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
+ struct mtk_rx_dma *rxd;
+
ring = &eth->rx_ring[i];
idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
- if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
+ rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
+ if (rxd->rxd2 & RX_DMA_DONE) {
ring->calc_idx_update = true;
return ring;
}
@@ -1233,7 +1429,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
int idx;
struct sk_buff *skb;
u8 *data, *new_data;
- struct mtk_rx_dma *rxd, trxd;
+ struct mtk_rx_dma_v2 *rxd, trxd;
int done = 0, bytes = 0;
while (done < budget) {
@@ -1241,26 +1437,25 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
unsigned int pktlen;
dma_addr_t dma_addr;
u32 hash, reason;
- int mac;
+ int mac = 0;
ring = mtk_get_rx_ring(eth);
if (unlikely(!ring))
goto rx_done;
idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
- rxd = &ring->dma[idx];
+ rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
data = ring->data[idx];
- if (!mtk_rx_get_desc(&trxd, rxd))
+ if (!mtk_rx_get_desc(eth, &trxd, rxd))
break;
/* find out which mac the packet come from. values start at 1 */
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) ||
- (trxd.rxd4 & RX_DMA_SPECIAL_TAG))
- mac = 0;
- else
- mac = ((trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
- RX_DMA_FPORT_MASK) - 1;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
+ else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
+ !(trxd.rxd4 & RX_DMA_SPECIAL_TAG))
+ mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
!eth->netdev[mac]))
@@ -1303,7 +1498,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
skb->dev = netdev;
skb_put(skb, pktlen);
- if (trxd.rxd4 & eth->rx_dma_l4_valid)
+ if (trxd.rxd4 & eth->soc->txrx.rx_dma_l4_valid)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
@@ -1321,10 +1516,25 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
mtk_ppe_check_skb(eth->ppe, skb,
trxd.rxd4 & MTK_RXD4_FOE_ENTRY);
- if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
- (trxd.rxd2 & RX_DMA_VTAG))
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- RX_DMA_VID(trxd.rxd3));
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ if (trxd.rxd3 & RX_DMA_VTAG_V2)
+ __vlan_hwaccel_put_tag(skb,
+ htons(RX_DMA_VPID(trxd.rxd4)),
+ RX_DMA_VID(trxd.rxd4));
+ } else if (trxd.rxd2 & RX_DMA_VTAG) {
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ RX_DMA_VID(trxd.rxd3));
+ }
+
+ /* If the device is attached to a dsa switch, the special
+ * tag inserted in VLAN field by hw switch can * be offloaded
+ * by RX HW VLAN offload. Clear vlan info.
+ */
+ if (netdev_uses_dsa(netdev))
+ __vlan_hwaccel_clear_tag(skb);
+ }
+
skb_record_rx_queue(skb, 0);
napi_gro_receive(napi, skb);
@@ -1336,7 +1546,7 @@ release_desc:
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
rxd->rxd2 = RX_DMA_LSO;
else
- rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
+ rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
ring->calc_idx = idx;
@@ -1364,6 +1574,7 @@ rx_done:
static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
unsigned int *done, unsigned int *bytes)
{
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
struct mtk_tx_ring *ring = &eth->tx_ring;
struct mtk_tx_dma *desc;
struct sk_buff *skb;
@@ -1371,7 +1582,7 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
u32 cpu, dma;
cpu = ring->last_free_ptr;
- dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
+ dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
desc = mtk_qdma_phys_to_virt(ring, cpu);
@@ -1383,7 +1594,8 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
break;
- tx_buf = mtk_desc_to_tx_buf(ring, desc);
+ tx_buf = mtk_desc_to_tx_buf(ring, desc,
+ eth->soc->txrx.txd_size);
if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
mac = 1;
@@ -1405,7 +1617,7 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
}
ring->last_free_ptr = cpu;
- mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
+ mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
return budget;
}
@@ -1436,7 +1648,7 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
mtk_tx_unmap(eth, tx_buf, true);
- desc = &ring->dma[cpu];
+ desc = ring->dma + cpu * eth->soc->txrx.txd_size;
ring->last_free = desc;
atomic_inc(&ring->free_count);
@@ -1498,24 +1710,25 @@ static void mtk_handle_status_irq(struct mtk_eth *eth)
static int mtk_napi_tx(struct napi_struct *napi, int budget)
{
struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
int tx_done = 0;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
mtk_handle_status_irq(eth);
- mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg);
+ mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
tx_done = mtk_poll_tx(eth, budget);
if (unlikely(netif_msg_intr(eth))) {
dev_info(eth->dev,
"done tx %d, intr 0x%08x/0x%x\n", tx_done,
- mtk_r32(eth, eth->tx_int_status_reg),
- mtk_r32(eth, eth->tx_int_mask_reg));
+ mtk_r32(eth, reg_map->tx_irq_status),
+ mtk_r32(eth, reg_map->tx_irq_mask));
}
if (tx_done == budget)
return budget;
- if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
+ if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
return budget;
if (napi_complete_done(napi, tx_done))
@@ -1527,6 +1740,7 @@ static int mtk_napi_tx(struct napi_struct *napi, int budget)
static int mtk_napi_rx(struct napi_struct *napi, int budget)
{
struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
int rx_done_total = 0;
mtk_handle_status_irq(eth);
@@ -1534,32 +1748,36 @@ static int mtk_napi_rx(struct napi_struct *napi, int budget)
do {
int rx_done;
- mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
+ mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
+ reg_map->pdma.irq_status);
rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
rx_done_total += rx_done;
if (unlikely(netif_msg_intr(eth))) {
dev_info(eth->dev,
"done rx %d, intr 0x%08x/0x%x\n", rx_done,
- mtk_r32(eth, MTK_PDMA_INT_STATUS),
- mtk_r32(eth, MTK_PDMA_INT_MASK));
+ mtk_r32(eth, reg_map->pdma.irq_status),
+ mtk_r32(eth, reg_map->pdma.irq_mask));
}
if (rx_done_total == budget)
return budget;
- } while (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT);
+ } while (mtk_r32(eth, reg_map->pdma.irq_status) &
+ eth->soc->txrx.rx_irq_done_mask);
if (napi_complete_done(napi, rx_done_total))
- mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
+ mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
return rx_done_total;
}
static int mtk_tx_alloc(struct mtk_eth *eth)
{
+ const struct mtk_soc_data *soc = eth->soc;
struct mtk_tx_ring *ring = &eth->tx_ring;
- int i, sz = sizeof(*ring->dma);
+ int i, sz = soc->txrx.txd_size;
+ struct mtk_tx_dma_v2 *txd;
ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
GFP_KERNEL);
@@ -1567,7 +1785,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
goto no_tx_mem;
ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
- &ring->phys, GFP_ATOMIC);
+ &ring->phys, GFP_KERNEL);
if (!ring->dma)
goto no_tx_mem;
@@ -1575,18 +1793,25 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
int next = (i + 1) % MTK_DMA_SIZE;
u32 next_ptr = ring->phys + next * sz;
- ring->dma[i].txd2 = next_ptr;
- ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+ txd = ring->dma + i * sz;
+ txd->txd2 = next_ptr;
+ txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+ txd->txd4 = 0;
+ if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
+ txd->txd5 = 0;
+ txd->txd6 = 0;
+ txd->txd7 = 0;
+ txd->txd8 = 0;
+ }
}
/* On MT7688 (PDMA only) this driver uses the ring->dma structs
* only as the framework. The real HW descriptors are the PDMA
* descriptors in ring->dma_pdma.
*/
- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
- &ring->phys_pdma,
- GFP_ATOMIC);
+ &ring->phys_pdma, GFP_KERNEL);
if (!ring->dma_pdma)
goto no_tx_mem;
@@ -1598,8 +1823,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
ring->dma_size = MTK_DMA_SIZE;
atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
- ring->next_free = &ring->dma[0];
- ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
+ ring->next_free = ring->dma;
+ ring->last_free = (void *)txd;
ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
ring->thresh = MAX_SKB_FRAGS;
@@ -1608,20 +1833,20 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
*/
wmb();
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
- mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
- mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
+ mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
mtk_w32(eth,
ring->phys + ((MTK_DMA_SIZE - 1) * sz),
- MTK_QTX_CRX_PTR);
- mtk_w32(eth, ring->last_free_ptr, MTK_QTX_DRX_PTR);
+ soc->reg_map->qdma.crx_ptr);
+ mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
- MTK_QTX_CFG(0));
+ soc->reg_map->qdma.qtx_cfg);
} else {
mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
- mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX);
+ mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
}
return 0;
@@ -1632,6 +1857,7 @@ no_tx_mem:
static void mtk_tx_clean(struct mtk_eth *eth)
{
+ const struct mtk_soc_data *soc = eth->soc;
struct mtk_tx_ring *ring = &eth->tx_ring;
int i;
@@ -1644,33 +1870,30 @@ static void mtk_tx_clean(struct mtk_eth *eth)
if (ring->dma) {
dma_free_coherent(eth->dma_dev,
- MTK_DMA_SIZE * sizeof(*ring->dma),
- ring->dma,
- ring->phys);
+ MTK_DMA_SIZE * soc->txrx.txd_size,
+ ring->dma, ring->phys);
ring->dma = NULL;
}
if (ring->dma_pdma) {
dma_free_coherent(eth->dma_dev,
- MTK_DMA_SIZE * sizeof(*ring->dma_pdma),
- ring->dma_pdma,
- ring->phys_pdma);
+ MTK_DMA_SIZE * soc->txrx.txd_size,
+ ring->dma_pdma, ring->phys_pdma);
ring->dma_pdma = NULL;
}
}
static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
{
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
struct mtk_rx_ring *ring;
int rx_data_len, rx_dma_size;
int i;
- u32 offset = 0;
if (rx_flag == MTK_RX_FLAGS_QDMA) {
if (ring_no)
return -EINVAL;
ring = &eth->rx_ring_qdma;
- offset = 0x1000;
} else {
ring = &eth->rx_ring[ring_no];
}
@@ -1697,38 +1920,68 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
}
ring->dma = dma_alloc_coherent(eth->dma_dev,
- rx_dma_size * sizeof(*ring->dma),
- &ring->phys, GFP_ATOMIC);
+ rx_dma_size * eth->soc->txrx.rxd_size,
+ &ring->phys, GFP_KERNEL);
if (!ring->dma)
return -ENOMEM;
for (i = 0; i < rx_dma_size; i++) {
+ struct mtk_rx_dma_v2 *rxd;
+
dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
ring->data[i] + NET_SKB_PAD + eth->ip_align,
ring->buf_size,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
return -ENOMEM;
- ring->dma[i].rxd1 = (unsigned int)dma_addr;
+
+ rxd = ring->dma + i * eth->soc->txrx.rxd_size;
+ rxd->rxd1 = (unsigned int)dma_addr;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
- ring->dma[i].rxd2 = RX_DMA_LSO;
+ rxd->rxd2 = RX_DMA_LSO;
else
- ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
+ rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
+
+ rxd->rxd3 = 0;
+ rxd->rxd4 = 0;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ rxd->rxd5 = 0;
+ rxd->rxd6 = 0;
+ rxd->rxd7 = 0;
+ rxd->rxd8 = 0;
+ }
}
ring->dma_size = rx_dma_size;
ring->calc_idx_update = false;
ring->calc_idx = rx_dma_size - 1;
- ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no);
+ if (rx_flag == MTK_RX_FLAGS_QDMA)
+ ring->crx_idx_reg = reg_map->qdma.qcrx_ptr +
+ ring_no * MTK_QRX_OFFSET;
+ else
+ ring->crx_idx_reg = reg_map->pdma.pcrx_ptr +
+ ring_no * MTK_QRX_OFFSET;
/* make sure that all changes to the dma ring are flushed before we
* continue
*/
wmb();
- mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no) + offset);
- mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no) + offset);
- mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset);
- mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX + offset);
+ if (rx_flag == MTK_RX_FLAGS_QDMA) {
+ mtk_w32(eth, ring->phys,
+ reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
+ mtk_w32(eth, rx_dma_size,
+ reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
+ mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
+ reg_map->qdma.rst_idx);
+ } else {
+ mtk_w32(eth, ring->phys,
+ reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
+ mtk_w32(eth, rx_dma_size,
+ reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
+ mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
+ reg_map->pdma.rst_idx);
+ }
+ mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
return 0;
}
@@ -1739,14 +1992,17 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
if (ring->data && ring->dma) {
for (i = 0; i < ring->dma_size; i++) {
+ struct mtk_rx_dma *rxd;
+
if (!ring->data[i])
continue;
- if (!ring->dma[i].rxd1)
+
+ rxd = ring->dma + i * eth->soc->txrx.rxd_size;
+ if (!rxd->rxd1)
continue;
- dma_unmap_single(eth->dma_dev,
- ring->dma[i].rxd1,
- ring->buf_size,
- DMA_FROM_DEVICE);
+
+ dma_unmap_single(eth->dma_dev, rxd->rxd1,
+ ring->buf_size, DMA_FROM_DEVICE);
skb_free_frag(ring->data[i]);
}
kfree(ring->data);
@@ -1755,9 +2011,8 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
if (ring->dma) {
dma_free_coherent(eth->dma_dev,
- ring->dma_size * sizeof(*ring->dma),
- ring->dma,
- ring->phys);
+ ring->dma_size * eth->soc->txrx.rxd_size,
+ ring->dma, ring->phys);
ring->dma = NULL;
}
}
@@ -2032,9 +2287,9 @@ static int mtk_dma_busy_wait(struct mtk_eth *eth)
u32 val;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
- reg = MTK_QDMA_GLO_CFG;
+ reg = eth->soc->reg_map->qdma.glo_cfg;
else
- reg = MTK_PDMA_GLO_CFG;
+ reg = eth->soc->reg_map->pdma.glo_cfg;
ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
!(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
@@ -2092,8 +2347,8 @@ static int mtk_dma_init(struct mtk_eth *eth)
* automatically
*/
mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
- FC_THRES_MIN, MTK_QDMA_FC_THRES);
- mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
+ FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
+ mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred);
}
return 0;
@@ -2101,6 +2356,7 @@ static int mtk_dma_init(struct mtk_eth *eth)
static void mtk_dma_free(struct mtk_eth *eth)
{
+ const struct mtk_soc_data *soc = eth->soc;
int i;
for (i = 0; i < MTK_MAC_COUNT; i++)
@@ -2108,9 +2364,8 @@ static void mtk_dma_free(struct mtk_eth *eth)
netdev_reset_queue(eth->netdev[i]);
if (eth->scratch_ring) {
dma_free_coherent(eth->dma_dev,
- MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
- eth->scratch_ring,
- eth->phy_scratch_ring);
+ MTK_DMA_SIZE * soc->txrx.txd_size,
+ eth->scratch_ring, eth->phy_scratch_ring);
eth->scratch_ring = NULL;
eth->phy_scratch_ring = 0;
}
@@ -2145,7 +2400,7 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
eth->rx_events++;
if (likely(napi_schedule_prep(&eth->rx_napi))) {
__napi_schedule(&eth->rx_napi);
- mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
+ mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
}
return IRQ_HANDLED;
@@ -2167,13 +2422,16 @@ static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
static irqreturn_t mtk_handle_irq(int irq, void *_eth)
{
struct mtk_eth *eth = _eth;
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
- if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT) {
- if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT)
+ if (mtk_r32(eth, reg_map->pdma.irq_mask) &
+ eth->soc->txrx.rx_irq_done_mask) {
+ if (mtk_r32(eth, reg_map->pdma.irq_status) &
+ eth->soc->txrx.rx_irq_done_mask)
mtk_handle_irq_rx(irq, _eth);
}
- if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) {
- if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
+ if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
+ if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
mtk_handle_irq_tx(irq, _eth);
}
@@ -2187,16 +2445,17 @@ static void mtk_poll_controller(struct net_device *dev)
struct mtk_eth *eth = mac->hw;
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
+ mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
mtk_handle_irq_rx(eth->irq[2], dev);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
+ mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
}
#endif
static int mtk_start_dma(struct mtk_eth *eth)
{
- u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
+ u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
int err;
err = mtk_dma_init(eth);
@@ -2206,21 +2465,27 @@ static int mtk_start_dma(struct mtk_eth *eth)
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
- mtk_w32(eth,
- MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
- MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
- MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
- MTK_RX_BT_32DWORDS,
- MTK_QDMA_GLO_CFG);
+ val = mtk_r32(eth, reg_map->qdma.glo_cfg);
+ val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN |
+ MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
+ MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
+ MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
+ MTK_CHK_DDONE_EN;
+ else
+ val |= MTK_RX_BT_32DWORDS;
+ mtk_w32(eth, val, reg_map->qdma.glo_cfg);
mtk_w32(eth,
MTK_RX_DMA_EN | rx_2b_offset |
MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
- MTK_PDMA_GLO_CFG);
+ reg_map->pdma.glo_cfg);
} else {
mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
- MTK_PDMA_GLO_CFG);
+ reg_map->pdma.glo_cfg);
}
return 0;
@@ -2283,7 +2548,7 @@ static int mtk_open(struct net_device *dev)
napi_enable(&eth->tx_napi);
napi_enable(&eth->rx_napi);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
+ mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
refcount_set(&eth->dma_refcnt, 1);
}
else
@@ -2335,7 +2600,7 @@ static int mtk_stop(struct net_device *dev)
mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
+ mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
napi_disable(&eth->tx_napi);
napi_disable(&eth->rx_napi);
@@ -2343,8 +2608,8 @@ static int mtk_stop(struct net_device *dev)
cancel_work_sync(&eth->tx_dim.work);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
- mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
- mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
+ mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
+ mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
mtk_dma_free(eth);
@@ -2398,6 +2663,7 @@ static void mtk_dim_rx(struct work_struct *work)
{
struct dim *dim = container_of(work, struct dim, work);
struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
struct dim_cq_moder cur_profile;
u32 val, cur;
@@ -2405,7 +2671,7 @@ static void mtk_dim_rx(struct work_struct *work)
dim->profile_ix);
spin_lock_bh(&eth->dim_lock);
- val = mtk_r32(eth, MTK_PDMA_DELAY_INT);
+ val = mtk_r32(eth, reg_map->pdma.delay_irq);
val &= MTK_PDMA_DELAY_TX_MASK;
val |= MTK_PDMA_DELAY_RX_EN;
@@ -2415,9 +2681,9 @@ static void mtk_dim_rx(struct work_struct *work)
cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
- mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
+ mtk_w32(eth, val, reg_map->pdma.delay_irq);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
- mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
+ mtk_w32(eth, val, reg_map->qdma.delay_irq);
spin_unlock_bh(&eth->dim_lock);
@@ -2428,6 +2694,7 @@ static void mtk_dim_tx(struct work_struct *work)
{
struct dim *dim = container_of(work, struct dim, work);
struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
struct dim_cq_moder cur_profile;
u32 val, cur;
@@ -2435,7 +2702,7 @@ static void mtk_dim_tx(struct work_struct *work)
dim->profile_ix);
spin_lock_bh(&eth->dim_lock);
- val = mtk_r32(eth, MTK_PDMA_DELAY_INT);
+ val = mtk_r32(eth, reg_map->pdma.delay_irq);
val &= MTK_PDMA_DELAY_RX_MASK;
val |= MTK_PDMA_DELAY_TX_EN;
@@ -2445,9 +2712,9 @@ static void mtk_dim_tx(struct work_struct *work)
cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
- mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
+ mtk_w32(eth, val, reg_map->pdma.delay_irq);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
- mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
+ mtk_w32(eth, val, reg_map->qdma.delay_irq);
spin_unlock_bh(&eth->dim_lock);
@@ -2458,6 +2725,7 @@ static int mtk_hw_init(struct mtk_eth *eth)
{
u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
ETHSYS_DMA_AG_MAP_PPE;
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
int i, val, ret;
if (test_and_set_bit(MTK_HW_INIT, &eth->state))
@@ -2492,9 +2760,25 @@ static int mtk_hw_init(struct mtk_eth *eth)
return 0;
}
- /* Non-MT7628 handling... */
- ethsys_reset(eth, RSTCTRL_FE);
- ethsys_reset(eth, RSTCTRL_PPE);
+ val = RSTCTRL_FE | RSTCTRL_PPE;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
+
+ val |= RSTCTRL_ETH;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+ val |= RSTCTRL_PPE1;
+ }
+
+ ethsys_reset(eth, val);
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
+ 0x3ffffff);
+
+ /* Set FE to PDMAv2 if necessary */
+ val = mtk_r32(eth, MTK_FE_GLO_MISC);
+ mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
+ }
if (eth->pctl) {
/* Set GE2 driving and slew rate */
@@ -2532,12 +2816,48 @@ static int mtk_hw_init(struct mtk_eth *eth)
mtk_rx_irq_disable(eth, ~0);
/* FE int grouping */
- mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
- mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
- mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
- mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
+ mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
+ mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
+ mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
+ mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ /* PSE should not drop port8 and port9 packets */
+ mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
+
+ /* PSE Free Queue Flow Control */
+ mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
+
+ /* PSE config input queue threshold */
+ mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
+ mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
+ mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
+
+ /* PSE config output queue threshold */
+ mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
+ mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
+ mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
+ mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
+ mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
+ mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
+ mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
+ mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
+
+ /* GDM and CDM Threshold */
+ mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
+ mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
+ mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
+ mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
+ mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
+ mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
+ }
+
return 0;
err_disable_pm:
@@ -2982,14 +3302,11 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
/* mac config is not set */
mac->interface = PHY_INTERFACE_MODE_NA;
- mac->mode = MLO_AN_PHY;
mac->speed = SPEED_UNKNOWN;
mac->phylink_config.dev = &eth->netdev[id]->dev;
mac->phylink_config.type = PHYLINK_NETDEV;
- /* This driver makes use of state->speed/state->duplex in
- * mac_config
- */
+ /* This driver makes use of state->speed in mac_config */
mac->phylink_config.legacy_pre_march2020 = true;
mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
@@ -3101,20 +3418,8 @@ static int mtk_probe(struct platform_device *pdev)
if (IS_ERR(eth->base))
return PTR_ERR(eth->base);
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
- eth->tx_int_mask_reg = MTK_QDMA_INT_MASK;
- eth->tx_int_status_reg = MTK_QDMA_INT_STATUS;
- } else {
- eth->tx_int_mask_reg = MTK_PDMA_INT_MASK;
- eth->tx_int_status_reg = MTK_PDMA_INT_STATUS;
- }
-
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
- eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
eth->ip_align = NET_IP_ALIGN;
- } else {
- eth->rx_dma_l4_valid = RX_DMA_L4_VALID;
- }
spin_lock_init(&eth->page_lock);
spin_lock_init(&eth->tx_irq_lock);
@@ -3342,50 +3647,119 @@ static int mtk_remove(struct platform_device *pdev)
}
static const struct mtk_soc_data mt2701_data = {
+ .reg_map = &mtk_reg_map,
.caps = MT7623_CAPS | MTK_HWLRO,
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
};
static const struct mtk_soc_data mt7621_data = {
+ .reg_map = &mtk_reg_map,
.caps = MT7621_CAPS,
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7621_CLKS_BITMAP,
.required_pctl = false,
.offload_version = 2,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
};
static const struct mtk_soc_data mt7622_data = {
+ .reg_map = &mtk_reg_map,
.ana_rgc3 = 0x2028,
.caps = MT7622_CAPS | MTK_HWLRO,
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7622_CLKS_BITMAP,
.required_pctl = false,
.offload_version = 2,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
};
static const struct mtk_soc_data mt7623_data = {
+ .reg_map = &mtk_reg_map,
.caps = MT7623_CAPS | MTK_HWLRO,
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
.offload_version = 2,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
};
static const struct mtk_soc_data mt7629_data = {
+ .reg_map = &mtk_reg_map,
.ana_rgc3 = 0x128,
.caps = MT7629_CAPS | MTK_HWLRO,
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7629_CLKS_BITMAP,
.required_pctl = false,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+};
+
+static const struct mtk_soc_data mt7986_data = {
+ .reg_map = &mt7986_reg_map,
+ .ana_rgc3 = 0x128,
+ .caps = MT7986_CAPS,
+ .required_clks = MT7986_CLKS_BITMAP,
+ .required_pctl = false,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma_v2),
+ .rxd_size = sizeof(struct mtk_rx_dma_v2),
+ .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ .dma_len_offset = 8,
+ },
};
static const struct mtk_soc_data rt5350_data = {
+ .reg_map = &mt7628_reg_map,
.caps = MT7628_CAPS,
.hw_features = MTK_HW_FEATURES_MT7628,
.required_clks = MT7628_CLKS_BITMAP,
.required_pctl = false,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
};
const struct of_device_id of_mtk_match[] = {
@@ -3394,6 +3768,7 @@ const struct of_device_id of_mtk_match[] = {
{ .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
{ .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
{ .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
+ { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
{ .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
{},
};
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index b04977fa84f6..0a632896451a 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -17,12 +17,14 @@
#include <linux/phylink.h>
#include <linux/rhashtable.h>
#include <linux/dim.h>
+#include <linux/bitfield.h>
#include "mtk_ppe.h"
#define MTK_QDMA_PAGE_SIZE 2048
#define MTK_MAX_RX_LENGTH 1536
#define MTK_MAX_RX_LENGTH_2K 2048
#define MTK_TX_DMA_BUF_LEN 0x3fff
+#define MTK_TX_DMA_BUF_LEN_V2 0xffff
#define MTK_DMA_SIZE 512
#define MTK_MAC_COUNT 2
#define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN)
@@ -47,6 +49,8 @@
#define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM)
#define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
+#define MTK_QRX_OFFSET 0x10
+
#define MTK_MAX_RX_RING_NUM 4
#define MTK_HW_LRO_DMA_SIZE 8
@@ -80,6 +84,10 @@
#define MTK_CDMQ_IG_CTRL 0x1400
#define MTK_CDMQ_STAG_EN BIT(0)
+/* CDMP Ingress Control Register */
+#define MTK_CDMP_IG_CTRL 0x400
+#define MTK_CDMP_STAG_EN BIT(0)
+
/* CDMP Exgress Control Register */
#define MTK_CDMP_EG_CTRL 0x404
@@ -99,25 +107,38 @@
/* Unicast Filter MAC Address Register - High */
#define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
-/* PDMA RX Base Pointer Register */
-#define MTK_PRX_BASE_PTR0 0x900
-#define MTK_PRX_BASE_PTR_CFG(x) (MTK_PRX_BASE_PTR0 + (x * 0x10))
+/* FE global misc reg*/
+#define MTK_FE_GLO_MISC 0x124
+
+/* PSE Free Queue Flow Control */
+#define PSE_FQFC_CFG1 0x100
+#define PSE_FQFC_CFG2 0x104
+#define PSE_DROP_CFG 0x108
-/* PDMA RX Maximum Count Register */
-#define MTK_PRX_MAX_CNT0 0x904
-#define MTK_PRX_MAX_CNT_CFG(x) (MTK_PRX_MAX_CNT0 + (x * 0x10))
+/* PSE Input Queue Reservation Register*/
+#define PSE_IQ_REV(x) (0x140 + (((x) - 1) << 2))
-/* PDMA RX CPU Pointer Register */
-#define MTK_PRX_CRX_IDX0 0x908
-#define MTK_PRX_CRX_IDX_CFG(x) (MTK_PRX_CRX_IDX0 + (x * 0x10))
+/* PSE Output Queue Threshold Register*/
+#define PSE_OQ_TH(x) (0x160 + (((x) - 1) << 2))
+
+/* GDM and CDM Threshold */
+#define MTK_GDM2_THRES 0x1530
+#define MTK_CDMW0_THRES 0x164c
+#define MTK_CDMW1_THRES 0x1650
+#define MTK_CDME0_THRES 0x1654
+#define MTK_CDME1_THRES 0x1658
+#define MTK_CDMM_THRES 0x165c
/* PDMA HW LRO Control Registers */
#define MTK_PDMA_LRO_CTRL_DW0 0x980
#define MTK_LRO_EN BIT(0)
#define MTK_L3_CKS_UPD_EN BIT(7)
+#define MTK_L3_CKS_UPD_EN_V2 BIT(19)
#define MTK_LRO_ALT_PKT_CNT_MODE BIT(21)
#define MTK_LRO_RING_RELINQUISH_REQ (0x7 << 26)
+#define MTK_LRO_RING_RELINQUISH_REQ_V2 (0xf << 24)
#define MTK_LRO_RING_RELINQUISH_DONE (0x7 << 29)
+#define MTK_LRO_RING_RELINQUISH_DONE_V2 (0xf << 28)
#define MTK_PDMA_LRO_CTRL_DW1 0x984
#define MTK_PDMA_LRO_CTRL_DW2 0x988
@@ -125,18 +146,19 @@
#define MTK_ADMA_MODE BIT(15)
#define MTK_LRO_MIN_RXD_SDL (MTK_HW_LRO_SDL_REMAIN_ROOM << 16)
-/* PDMA Global Configuration Register */
-#define MTK_PDMA_GLO_CFG 0xa04
+#define MTK_RX_DMA_LRO_EN BIT(8)
#define MTK_MULTI_EN BIT(10)
#define MTK_PDMA_SIZE_8DWORDS (1 << 4)
+/* PDMA Global Configuration Register */
+#define MTK_PDMA_LRO_SDL 0x3000
+#define MTK_RX_CFG_SDL_OFFSET 16
+
/* PDMA Reset Index Register */
-#define MTK_PDMA_RST_IDX 0xa08
#define MTK_PST_DRX_IDX0 BIT(16)
#define MTK_PST_DRX_IDX_CFG(x) (MTK_PST_DRX_IDX0 << (x))
/* PDMA Delay Interrupt Register */
-#define MTK_PDMA_DELAY_INT 0xa0c
#define MTK_PDMA_DELAY_RX_MASK GENMASK(15, 0)
#define MTK_PDMA_DELAY_RX_EN BIT(15)
#define MTK_PDMA_DELAY_RX_PINT_SHIFT 8
@@ -150,19 +172,9 @@
#define MTK_PDMA_DELAY_PINT_MASK 0x7f
#define MTK_PDMA_DELAY_PTIME_MASK 0xff
-/* PDMA Interrupt Status Register */
-#define MTK_PDMA_INT_STATUS 0xa20
-
-/* PDMA Interrupt Mask Register */
-#define MTK_PDMA_INT_MASK 0xa28
-
/* PDMA HW LRO Alter Flow Delta Register */
#define MTK_PDMA_LRO_ALT_SCORE_DELTA 0xa4c
-/* PDMA Interrupt grouping registers */
-#define MTK_PDMA_INT_GRP1 0xa50
-#define MTK_PDMA_INT_GRP2 0xa54
-
/* PDMA HW LRO IP Setting Registers */
#define MTK_LRO_RX_RING0_DIP_DW0 0xb04
#define MTK_LRO_DIP_DW0_CFG(x) (MTK_LRO_RX_RING0_DIP_DW0 + (x * 0x40))
@@ -184,26 +196,9 @@
#define MTK_RING_MAX_AGG_CNT_H ((MTK_HW_LRO_MAX_AGG_CNT >> 6) & 0x3)
/* QDMA TX Queue Configuration Registers */
-#define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
#define QDMA_RES_THRES 4
-/* QDMA TX Queue Scheduler Registers */
-#define MTK_QTX_SCH(x) (0x1804 + (x * 0x10))
-
-/* QDMA RX Base Pointer Register */
-#define MTK_QRX_BASE_PTR0 0x1900
-
-/* QDMA RX Maximum Count Register */
-#define MTK_QRX_MAX_CNT0 0x1904
-
-/* QDMA RX CPU Pointer Register */
-#define MTK_QRX_CRX_IDX0 0x1908
-
-/* QDMA RX DMA Pointer Register */
-#define MTK_QRX_DRX_IDX0 0x190C
-
/* QDMA Global Configuration Register */
-#define MTK_QDMA_GLO_CFG 0x1A04
#define MTK_RX_2B_OFFSET BIT(31)
#define MTK_RX_BT_32DWORDS (3 << 11)
#define MTK_NDP_CO_PRO BIT(10)
@@ -215,20 +210,19 @@
#define MTK_TX_DMA_EN BIT(0)
#define MTK_DMA_BUSY_TIMEOUT_US 1000000
-/* QDMA Reset Index Register */
-#define MTK_QDMA_RST_IDX 0x1A08
-
-/* QDMA Delay Interrupt Register */
-#define MTK_QDMA_DELAY_INT 0x1A0C
+/* QDMA V2 Global Configuration Register */
+#define MTK_CHK_DDONE_EN BIT(28)
+#define MTK_DMAD_WR_WDONE BIT(26)
+#define MTK_WCOMP_EN BIT(24)
+#define MTK_RESV_BUF (0x40 << 16)
+#define MTK_MUTLI_CNT (0x4 << 12)
/* QDMA Flow Control Register */
-#define MTK_QDMA_FC_THRES 0x1A10
#define FC_THRES_DROP_MODE BIT(20)
#define FC_THRES_DROP_EN (7 << 16)
#define FC_THRES_MIN 0x4444
/* QDMA Interrupt Status Register */
-#define MTK_QDMA_INT_STATUS 0x1A18
#define MTK_RX_DONE_DLY BIT(30)
#define MTK_TX_DONE_DLY BIT(28)
#define MTK_RX_DONE_INT3 BIT(19)
@@ -242,58 +236,32 @@
#define MTK_RX_DONE_INT MTK_RX_DONE_DLY
#define MTK_TX_DONE_INT MTK_TX_DONE_DLY
+#define MTK_RX_DONE_INT_V2 BIT(14)
+
/* QDMA Interrupt grouping registers */
-#define MTK_QDMA_INT_GRP1 0x1a20
-#define MTK_QDMA_INT_GRP2 0x1a24
#define MTK_RLS_DONE_INT BIT(0)
-/* QDMA Interrupt Status Register */
-#define MTK_QDMA_INT_MASK 0x1A1C
-
-/* QDMA Interrupt Mask Register */
-#define MTK_QDMA_HRED2 0x1A44
-
-/* QDMA TX Forward CPU Pointer Register */
-#define MTK_QTX_CTX_PTR 0x1B00
-
-/* QDMA TX Forward DMA Pointer Register */
-#define MTK_QTX_DTX_PTR 0x1B04
-
-/* QDMA TX Release CPU Pointer Register */
-#define MTK_QTX_CRX_PTR 0x1B10
-
-/* QDMA TX Release DMA Pointer Register */
-#define MTK_QTX_DRX_PTR 0x1B14
-
-/* QDMA FQ Head Pointer Register */
-#define MTK_QDMA_FQ_HEAD 0x1B20
-
-/* QDMA FQ Head Pointer Register */
-#define MTK_QDMA_FQ_TAIL 0x1B24
-
-/* QDMA FQ Free Page Counter Register */
-#define MTK_QDMA_FQ_CNT 0x1B28
-
-/* QDMA FQ Free Page Buffer Length Register */
-#define MTK_QDMA_FQ_BLEN 0x1B2C
-
-/* GMA1 counter / statics register */
-#define MTK_GDM1_RX_GBCNT_L 0x2400
-#define MTK_GDM1_RX_GBCNT_H 0x2404
-#define MTK_GDM1_RX_GPCNT 0x2408
-#define MTK_GDM1_RX_OERCNT 0x2410
-#define MTK_GDM1_RX_FERCNT 0x2414
-#define MTK_GDM1_RX_SERCNT 0x2418
-#define MTK_GDM1_RX_LENCNT 0x241c
-#define MTK_GDM1_RX_CERCNT 0x2420
-#define MTK_GDM1_RX_FCCNT 0x2424
-#define MTK_GDM1_TX_SKIPCNT 0x2428
-#define MTK_GDM1_TX_COLCNT 0x242c
-#define MTK_GDM1_TX_GBCNT_L 0x2430
-#define MTK_GDM1_TX_GBCNT_H 0x2434
-#define MTK_GDM1_TX_GPCNT 0x2438
#define MTK_STAT_OFFSET 0x40
+/* QDMA TX NUM */
+#define MTK_QDMA_TX_NUM 16
+#define MTK_QDMA_TX_MASK (MTK_QDMA_TX_NUM - 1)
+#define QID_BITS_V2(x) (((x) & 0x3f) << 16)
+#define MTK_QDMA_GMAC2_QID 8
+
+#define MTK_TX_DMA_BUF_SHIFT 8
+
+/* QDMA V2 descriptor txd6 */
+#define TX_DMA_INS_VLAN_V2 BIT(16)
+/* QDMA V2 descriptor txd5 */
+#define TX_DMA_CHKSUM_V2 (0x7 << 28)
+#define TX_DMA_TSO_V2 BIT(31)
+
+/* QDMA V2 descriptor txd4 */
+#define TX_DMA_FPORT_SHIFT_V2 8
+#define TX_DMA_FPORT_MASK_V2 0xf
+#define TX_DMA_SWC_V2 BIT(30)
+
#define MTK_WDMA0_BASE 0x2800
#define MTK_WDMA1_BASE 0x2c00
@@ -307,10 +275,9 @@
/* QDMA descriptor txd3 */
#define TX_DMA_OWNER_CPU BIT(31)
#define TX_DMA_LS0 BIT(30)
-#define TX_DMA_PLEN0(_x) (((_x) & MTK_TX_DMA_BUF_LEN) << 16)
-#define TX_DMA_PLEN1(_x) ((_x) & MTK_TX_DMA_BUF_LEN)
+#define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
+#define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len)
#define TX_DMA_SWC BIT(14)
-#define TX_DMA_SDL(_x) (((_x) & 0x3fff) << 16)
/* PDMA on MT7628 */
#define TX_DMA_DONE BIT(31)
@@ -320,12 +287,14 @@
/* QDMA descriptor rxd2 */
#define RX_DMA_DONE BIT(31)
#define RX_DMA_LSO BIT(30)
-#define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16)
-#define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff)
+#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
+#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len)
#define RX_DMA_VTAG BIT(15)
/* QDMA descriptor rxd3 */
-#define RX_DMA_VID(_x) ((_x) & 0xfff)
+#define RX_DMA_VID(x) ((x) & VLAN_VID_MASK)
+#define RX_DMA_TCI(x) ((x) & (VLAN_PRIO_MASK | VLAN_VID_MASK))
+#define RX_DMA_VPID(x) (((x) >> 16) & 0xffff)
/* QDMA descriptor rxd4 */
#define MTK_RXD4_FOE_ENTRY GENMASK(13, 0)
@@ -336,10 +305,15 @@
/* QDMA descriptor rxd4 */
#define RX_DMA_L4_VALID BIT(24)
#define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */
-#define RX_DMA_FPORT_SHIFT 19
-#define RX_DMA_FPORT_MASK 0x7
#define RX_DMA_SPECIAL_TAG BIT(22)
+#define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0xf)
+#define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0x7)
+
+/* PDMA V2 descriptor rxd3 */
+#define RX_DMA_VTAG_V2 BIT(0)
+#define RX_DMA_L4_VALID_V2 BIT(2)
+
/* PHY Indirect Access Control registers */
#define MTK_PHY_IAC 0x10004
#define PHY_IAC_ACCESS BIT(31)
@@ -463,6 +437,16 @@
#define ETHSYS_TRGMII_MT7621_DDR_PLL BIT(5)
/* ethernet reset control register */
+#define ETHSYS_RSTCTRL 0x34
+#define RSTCTRL_FE BIT(6)
+#define RSTCTRL_PPE BIT(31)
+#define RSTCTRL_PPE1 BIT(30)
+#define RSTCTRL_ETH BIT(23)
+
+/* ethernet reset check idle register */
+#define ETHSYS_FE_RST_CHK_IDLE_EN 0x28
+
+/* ethernet reset control register */
#define ETHSYS_RSTCTRL 0x34
#define RSTCTRL_FE BIT(6)
#define RSTCTRL_PPE BIT(31)
@@ -493,9 +477,10 @@
#define SGMSYS_SGMII_MODE 0x20
#define SGMII_IF_MODE_BIT0 BIT(0)
#define SGMII_SPEED_DUPLEX_AN BIT(1)
-#define SGMII_SPEED_10 0x0
-#define SGMII_SPEED_100 BIT(2)
-#define SGMII_SPEED_1000 BIT(3)
+#define SGMII_SPEED_MASK GENMASK(3, 2)
+#define SGMII_SPEED_10 FIELD_PREP(SGMII_SPEED_MASK, 0)
+#define SGMII_SPEED_100 FIELD_PREP(SGMII_SPEED_MASK, 1)
+#define SGMII_SPEED_1000 FIELD_PREP(SGMII_SPEED_MASK, 2)
#define SGMII_DUPLEX_FULL BIT(4)
#define SGMII_IF_MODE_BIT5 BIT(5)
#define SGMII_REMOTE_FAULT_DIS BIT(8)
@@ -546,6 +531,17 @@ struct mtk_rx_dma {
unsigned int rxd4;
} __packed __aligned(4);
+struct mtk_rx_dma_v2 {
+ unsigned int rxd1;
+ unsigned int rxd2;
+ unsigned int rxd3;
+ unsigned int rxd4;
+ unsigned int rxd5;
+ unsigned int rxd6;
+ unsigned int rxd7;
+ unsigned int rxd8;
+} __packed __aligned(4);
+
struct mtk_tx_dma {
unsigned int txd1;
unsigned int txd2;
@@ -553,6 +549,17 @@ struct mtk_tx_dma {
unsigned int txd4;
} __packed __aligned(4);
+struct mtk_tx_dma_v2 {
+ unsigned int txd1;
+ unsigned int txd2;
+ unsigned int txd3;
+ unsigned int txd4;
+ unsigned int txd5;
+ unsigned int txd6;
+ unsigned int txd7;
+ unsigned int txd8;
+} __packed __aligned(4);
+
struct mtk_eth;
struct mtk_mac;
@@ -620,6 +627,10 @@ enum mtk_clks_map {
MTK_CLK_SGMII2_CDR_FB,
MTK_CLK_SGMII_CK,
MTK_CLK_ETH2PLL,
+ MTK_CLK_WOCPU0,
+ MTK_CLK_WOCPU1,
+ MTK_CLK_NETSYS0,
+ MTK_CLK_NETSYS1,
MTK_CLK_MAX
};
@@ -650,6 +661,16 @@ enum mtk_clks_map {
BIT(MTK_CLK_SGMII2_CDR_FB) | \
BIT(MTK_CLK_SGMII_CK) | \
BIT(MTK_CLK_ETH2PLL) | BIT(MTK_CLK_SGMIITOP))
+#define MT7986_CLKS_BITMAP (BIT(MTK_CLK_FE) | BIT(MTK_CLK_GP2) | BIT(MTK_CLK_GP1) | \
+ BIT(MTK_CLK_WOCPU1) | BIT(MTK_CLK_WOCPU0) | \
+ BIT(MTK_CLK_SGMII_TX_250M) | \
+ BIT(MTK_CLK_SGMII_RX_250M) | \
+ BIT(MTK_CLK_SGMII_CDR_REF) | \
+ BIT(MTK_CLK_SGMII_CDR_FB) | \
+ BIT(MTK_CLK_SGMII2_TX_250M) | \
+ BIT(MTK_CLK_SGMII2_RX_250M) | \
+ BIT(MTK_CLK_SGMII2_CDR_REF) | \
+ BIT(MTK_CLK_SGMII2_CDR_FB))
enum mtk_dev_state {
MTK_HW_INIT,
@@ -685,7 +706,7 @@ struct mtk_tx_buf {
* are present
*/
struct mtk_tx_ring {
- struct mtk_tx_dma *dma;
+ void *dma;
struct mtk_tx_buf *buf;
dma_addr_t phys;
struct mtk_tx_dma *next_free;
@@ -715,7 +736,7 @@ enum mtk_rx_flags {
* @calc_idx: The current head of ring
*/
struct mtk_rx_ring {
- struct mtk_rx_dma *dma;
+ void *dma;
u8 **data;
dma_addr_t phys;
u16 frag_size;
@@ -739,7 +760,9 @@ enum mkt_eth_capabilities {
MTK_SHARED_INT_BIT,
MTK_TRGMII_MT7621_CLK_BIT,
MTK_QDMA_BIT,
+ MTK_NETSYS_V2_BIT,
MTK_SOC_MT7628_BIT,
+ MTK_RSTCTRL_PPE1_BIT,
/* MUX BITS*/
MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT,
@@ -771,7 +794,9 @@ enum mkt_eth_capabilities {
#define MTK_SHARED_INT BIT(MTK_SHARED_INT_BIT)
#define MTK_TRGMII_MT7621_CLK BIT(MTK_TRGMII_MT7621_CLK_BIT)
#define MTK_QDMA BIT(MTK_QDMA_BIT)
+#define MTK_NETSYS_V2 BIT(MTK_NETSYS_V2_BIT)
#define MTK_SOC_MT7628 BIT(MTK_SOC_MT7628_BIT)
+#define MTK_RSTCTRL_PPE1 BIT(MTK_RSTCTRL_PPE1_BIT)
#define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \
BIT(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT)
@@ -844,8 +869,62 @@ enum mkt_eth_capabilities {
MTK_MUX_U3_GMAC2_TO_QPHY | \
MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA)
+#define MT7986_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | \
+ MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
+ MTK_NETSYS_V2 | MTK_RSTCTRL_PPE1)
+
+struct mtk_tx_dma_desc_info {
+ dma_addr_t addr;
+ u32 size;
+ u16 vlan_tci;
+ u16 qid;
+ u8 gso:1;
+ u8 csum:1;
+ u8 vlan:1;
+ u8 first:1;
+ u8 last:1;
+};
+
+struct mtk_reg_map {
+ u32 tx_irq_mask;
+ u32 tx_irq_status;
+ struct {
+ u32 rx_ptr; /* rx base pointer */
+ u32 rx_cnt_cfg; /* rx max count configuration */
+ u32 pcrx_ptr; /* rx cpu pointer */
+ u32 glo_cfg; /* global configuration */
+ u32 rst_idx; /* reset index */
+ u32 delay_irq; /* delay interrupt */
+ u32 irq_status; /* interrupt status */
+ u32 irq_mask; /* interrupt mask */
+ u32 int_grp;
+ } pdma;
+ struct {
+ u32 qtx_cfg; /* tx queue configuration */
+ u32 rx_ptr; /* rx base pointer */
+ u32 rx_cnt_cfg; /* rx max count configuration */
+ u32 qcrx_ptr; /* rx cpu pointer */
+ u32 glo_cfg; /* global configuration */
+ u32 rst_idx; /* reset index */
+ u32 delay_irq; /* delay interrupt */
+ u32 fc_th; /* flow control */
+ u32 int_grp;
+ u32 hred; /* interrupt mask */
+ u32 ctx_ptr; /* tx acquire cpu pointer */
+ u32 dtx_ptr; /* tx acquire dma pointer */
+ u32 crx_ptr; /* tx release cpu pointer */
+ u32 drx_ptr; /* tx release dma pointer */
+ u32 fq_head; /* fq head pointer */
+ u32 fq_tail; /* fq tail pointer */
+ u32 fq_count; /* fq free page count */
+ u32 fq_blen; /* fq free page buffer length */
+ } qdma;
+ u32 gdm1_cnt;
+};
+
/* struct mtk_eth_data - This is the structure holding all differences
* among various plaforms
+ * @reg_map Soc register map.
* @ana_rgc3: The offset for register ANA_RGC3 related to
* sgmiisys syscon
* @caps Flags shown the extra capability for the SoC
@@ -854,37 +933,53 @@ enum mkt_eth_capabilities {
* the target SoC
* @required_pctl A bool value to show whether the SoC requires
* the extra setup for those pins used by GMAC.
+ * @txd_size Tx DMA descriptor size.
+ * @rxd_size Rx DMA descriptor size.
+ * @rx_irq_done_mask Rx irq done register mask.
+ * @rx_dma_l4_valid Rx DMA valid register mask.
+ * @dma_max_len Max DMA tx/rx buffer length.
+ * @dma_len_offset Tx/Rx DMA length field offset.
*/
struct mtk_soc_data {
+ const struct mtk_reg_map *reg_map;
u32 ana_rgc3;
u32 caps;
u32 required_clks;
bool required_pctl;
u8 offload_version;
netdev_features_t hw_features;
+ struct {
+ u32 txd_size;
+ u32 rxd_size;
+ u32 rx_irq_done_mask;
+ u32 rx_dma_l4_valid;
+ u32 dma_max_len;
+ u32 dma_len_offset;
+ } txrx;
};
/* currently no SoC has more than 2 macs */
#define MTK_MAX_DEVS 2
-#define MTK_SGMII_PHYSPEED_AN BIT(31)
-#define MTK_SGMII_PHYSPEED_MASK GENMASK(2, 0)
-#define MTK_SGMII_PHYSPEED_1000 BIT(0)
-#define MTK_SGMII_PHYSPEED_2500 BIT(1)
-#define MTK_HAS_FLAGS(flags, _x) (((flags) & (_x)) == (_x))
-
-/* struct mtk_sgmii - This is the structure holding sgmii regmap and its
- * characteristics
+/* struct mtk_pcs - This structure holds each sgmii regmap and associated
+ * data
* @regmap: The register map pointing at the range used to setup
* SGMII modes
- * @flags: The enum refers to which mode the sgmii wants to run on
* @ana_rgc3: The offset refers to register ANA_RGC3 related to regmap
+ * @pcs: Phylink PCS structure
*/
+struct mtk_pcs {
+ struct regmap *regmap;
+ u32 ana_rgc3;
+ struct phylink_pcs pcs;
+};
+/* struct mtk_sgmii - This is the structure holding sgmii regmap and its
+ * characteristics
+ * @pcs Array of individual PCS structures
+ */
struct mtk_sgmii {
- struct regmap *regmap[MTK_MAX_DEVS];
- u32 flags[MTK_MAX_DEVS];
- u32 ana_rgc3;
+ struct mtk_pcs pcs[MTK_MAX_DEVS];
};
/* struct mtk_eth - This is the main datasructure for holding the state
@@ -956,7 +1051,7 @@ struct mtk_eth {
struct mtk_rx_ring rx_ring_qdma;
struct napi_struct tx_napi;
struct napi_struct rx_napi;
- struct mtk_tx_dma *scratch_ring;
+ void *scratch_ring;
dma_addr_t phy_scratch_ring;
void *scratch_head;
struct clk *clks[MTK_CLK_MAX];
@@ -979,9 +1074,6 @@ struct mtk_eth {
u32 tx_bytes;
struct dim tx_dim;
- u32 tx_int_mask_reg;
- u32 tx_int_status_reg;
- u32 rx_dma_l4_valid;
int ip_align;
struct mtk_ppe *ppe;
@@ -999,7 +1091,6 @@ struct mtk_eth {
struct mtk_mac {
int id;
phy_interface_t interface;
- unsigned int mode;
int speed;
struct device_node *of_node;
struct phylink *phylink;
@@ -1008,6 +1099,7 @@ struct mtk_mac {
struct mtk_hw_stats *hw_stats;
__be32 hwlro_ip[MTK_MAX_LRO_IP_CNT];
int hwlro_ip_cnt;
+ unsigned int syscfg0;
};
/* the struct describing the SoC. these are declared in the soc_xyz.c files */
@@ -1019,12 +1111,9 @@ void mtk_stats_update_mac(struct mtk_mac *mac);
void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg);
u32 mtk_r32(struct mtk_eth *eth, unsigned reg);
+struct phylink_pcs *mtk_sgmii_select_pcs(struct mtk_sgmii *ss, int id);
int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *np,
u32 ana_rgc3);
-int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id);
-int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id,
- const struct phylink_link_state *state);
-void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id);
int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id);
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index 1fe31058b0f2..90e7dfd011c9 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -90,10 +90,11 @@ mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_i
{
struct net_device_path_ctx ctx = {
.dev = dev,
- .daddr = addr,
};
struct net_device_path path = {};
+ memcpy(ctx.daddr, addr, sizeof(ctx.daddr));
+
if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
return -1;
@@ -434,7 +435,8 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
memcpy(&entry->data, &foe, sizeof(entry->data));
entry->wed_index = wed_index;
- if (mtk_foe_entry_commit(eth->ppe, entry) < 0)
+ err = mtk_foe_entry_commit(eth->ppe, entry);
+ if (err < 0)
goto free;
err = rhashtable_insert_fast(&eth->flow_table, &entry->node,
diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c
index 5897940a418b..736839c84130 100644
--- a/drivers/net/ethernet/mediatek/mtk_sgmii.c
+++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c
@@ -9,119 +9,151 @@
#include <linux/mfd/syscon.h>
#include <linux/of.h>
+#include <linux/phylink.h>
#include <linux/regmap.h>
#include "mtk_eth_soc.h"
-int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3)
+static struct mtk_pcs *pcs_to_mtk_pcs(struct phylink_pcs *pcs)
{
- struct device_node *np;
- int i;
-
- ss->ana_rgc3 = ana_rgc3;
-
- for (i = 0; i < MTK_MAX_DEVS; i++) {
- np = of_parse_phandle(r, "mediatek,sgmiisys", i);
- if (!np)
- break;
-
- ss->regmap[i] = syscon_node_to_regmap(np);
- of_node_put(np);
- if (IS_ERR(ss->regmap[i]))
- return PTR_ERR(ss->regmap[i]);
- }
-
- return 0;
+ return container_of(pcs, struct mtk_pcs, pcs);
}
-int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id)
+/* For SGMII interface mode */
+static int mtk_pcs_setup_mode_an(struct mtk_pcs *mpcs)
{
unsigned int val;
- if (!ss->regmap[id])
- return -EINVAL;
-
/* Setup the link timer and QPHY power up inside SGMIISYS */
- regmap_write(ss->regmap[id], SGMSYS_PCS_LINK_TIMER,
+ regmap_write(mpcs->regmap, SGMSYS_PCS_LINK_TIMER,
SGMII_LINK_TIMER_DEFAULT);
- regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val);
+ regmap_read(mpcs->regmap, SGMSYS_SGMII_MODE, &val);
val |= SGMII_REMOTE_FAULT_DIS;
- regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val);
+ regmap_write(mpcs->regmap, SGMSYS_SGMII_MODE, val);
- regmap_read(ss->regmap[id], SGMSYS_PCS_CONTROL_1, &val);
+ regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &val);
val |= SGMII_AN_RESTART;
- regmap_write(ss->regmap[id], SGMSYS_PCS_CONTROL_1, val);
+ regmap_write(mpcs->regmap, SGMSYS_PCS_CONTROL_1, val);
- regmap_read(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
+ regmap_read(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, &val);
val &= ~SGMII_PHYA_PWD;
- regmap_write(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, val);
+ regmap_write(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, val);
return 0;
+
}
-int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id,
- const struct phylink_link_state *state)
+/* For 1000BASE-X and 2500BASE-X interface modes, which operate at a
+ * fixed speed.
+ */
+static int mtk_pcs_setup_mode_force(struct mtk_pcs *mpcs,
+ phy_interface_t interface)
{
unsigned int val;
- if (!ss->regmap[id])
- return -EINVAL;
-
- regmap_read(ss->regmap[id], ss->ana_rgc3, &val);
+ regmap_read(mpcs->regmap, mpcs->ana_rgc3, &val);
val &= ~RG_PHY_SPEED_MASK;
- if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+ if (interface == PHY_INTERFACE_MODE_2500BASEX)
val |= RG_PHY_SPEED_3_125G;
- regmap_write(ss->regmap[id], ss->ana_rgc3, val);
+ regmap_write(mpcs->regmap, mpcs->ana_rgc3, val);
/* Disable SGMII AN */
- regmap_read(ss->regmap[id], SGMSYS_PCS_CONTROL_1, &val);
+ regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &val);
val &= ~SGMII_AN_ENABLE;
- regmap_write(ss->regmap[id], SGMSYS_PCS_CONTROL_1, val);
-
- /* SGMII force mode setting */
- regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val);
- val &= ~SGMII_IF_MODE_MASK;
-
- switch (state->speed) {
- case SPEED_10:
- val |= SGMII_SPEED_10;
- break;
- case SPEED_100:
- val |= SGMII_SPEED_100;
- break;
- case SPEED_2500:
- case SPEED_1000:
- val |= SGMII_SPEED_1000;
- break;
- }
+ regmap_write(mpcs->regmap, SGMSYS_PCS_CONTROL_1, val);
- if (state->duplex == DUPLEX_FULL)
- val |= SGMII_DUPLEX_FULL;
-
- regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val);
+ /* Set the speed etc but leave the duplex unchanged */
+ regmap_read(mpcs->regmap, SGMSYS_SGMII_MODE, &val);
+ val &= SGMII_DUPLEX_FULL | ~SGMII_IF_MODE_MASK;
+ val |= SGMII_SPEED_1000;
+ regmap_write(mpcs->regmap, SGMSYS_SGMII_MODE, val);
/* Release PHYA power down state */
- regmap_read(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
+ regmap_read(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, &val);
val &= ~SGMII_PHYA_PWD;
- regmap_write(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, val);
+ regmap_write(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, val);
return 0;
}
-void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id)
+static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
{
- struct mtk_sgmii *ss = eth->sgmii;
- unsigned int val, sid;
+ struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs);
+ int err = 0;
- /* Decide how GMAC and SGMIISYS be mapped */
- sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
- 0 : mac_id;
+ /* Setup SGMIISYS with the determined property */
+ if (interface != PHY_INTERFACE_MODE_SGMII)
+ err = mtk_pcs_setup_mode_force(mpcs, interface);
+ else if (phylink_autoneg_inband(mode))
+ err = mtk_pcs_setup_mode_an(mpcs);
- if (!ss->regmap[sid])
- return;
+ return err;
+}
+
+static void mtk_pcs_restart_an(struct phylink_pcs *pcs)
+{
+ struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs);
+ unsigned int val;
- regmap_read(ss->regmap[sid], SGMSYS_PCS_CONTROL_1, &val);
+ regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &val);
val |= SGMII_AN_RESTART;
- regmap_write(ss->regmap[sid], SGMSYS_PCS_CONTROL_1, val);
+ regmap_write(mpcs->regmap, SGMSYS_PCS_CONTROL_1, val);
+}
+
+static void mtk_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex)
+{
+ struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs);
+ unsigned int val;
+
+ if (!phy_interface_mode_is_8023z(interface))
+ return;
+
+ /* SGMII force duplex setting */
+ regmap_read(mpcs->regmap, SGMSYS_SGMII_MODE, &val);
+ val &= ~SGMII_DUPLEX_FULL;
+ if (duplex == DUPLEX_FULL)
+ val |= SGMII_DUPLEX_FULL;
+
+ regmap_write(mpcs->regmap, SGMSYS_SGMII_MODE, val);
+}
+
+static const struct phylink_pcs_ops mtk_pcs_ops = {
+ .pcs_config = mtk_pcs_config,
+ .pcs_an_restart = mtk_pcs_restart_an,
+ .pcs_link_up = mtk_pcs_link_up,
+};
+
+int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3)
+{
+ struct device_node *np;
+ int i;
+
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ np = of_parse_phandle(r, "mediatek,sgmiisys", i);
+ if (!np)
+ break;
+
+ ss->pcs[i].ana_rgc3 = ana_rgc3;
+ ss->pcs[i].regmap = syscon_node_to_regmap(np);
+ of_node_put(np);
+ if (IS_ERR(ss->pcs[i].regmap))
+ return PTR_ERR(ss->pcs[i].regmap);
+
+ ss->pcs[i].pcs.ops = &mtk_pcs_ops;
+ }
+
+ return 0;
+}
+
+struct phylink_pcs *mtk_sgmii_select_pcs(struct mtk_sgmii *ss, int id)
+{
+ if (!ss->pcs[id].regmap)
+ return NULL;
+
+ return &ss->pcs[id].pcs;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index c61dc7ae0c05..ca4b93a01034 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -3417,6 +3417,9 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = priv->max_mtu;
+ /* supports LSOv2 packets. */
+ netif_set_tso_max_size(dev, GSO_MAX_SIZE);
+
mdev->pndev[port] = dev;
mdev->upper[port] = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index f777151d226f..af3b2b59a2a6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -43,6 +43,7 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/indirect_call_wrapper.h>
+#include <net/ipv6.h>
#include "mlx4_en.h"
@@ -634,19 +635,28 @@ static int get_real_size(const struct sk_buff *skb,
struct net_device *dev,
int *lso_header_size,
bool *inline_ok,
- void **pfrag)
+ void **pfrag,
+ int *hopbyhop)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int real_size;
if (shinfo->gso_size) {
*inline_ok = false;
- if (skb->encapsulation)
+ *hopbyhop = 0;
+ if (skb->encapsulation) {
*lso_header_size = (skb_inner_transport_header(skb) - skb->data) + inner_tcp_hdrlen(skb);
- else
+ } else {
+ /* Detects large IPV6 TCP packets and prepares for removal of
+ * HBH header that has been pushed by ip6_xmit(),
+ * mainly so that tcpdump can dissect them.
+ */
+ if (ipv6_has_hopopt_jumbo(skb))
+ *hopbyhop = sizeof(struct hop_jumbo_hdr);
*lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ }
real_size = CTRL_SIZE + shinfo->nr_frags * DS_SIZE +
- ALIGN(*lso_header_size + 4, DS_SIZE);
+ ALIGN(*lso_header_size - *hopbyhop + 4, DS_SIZE);
if (unlikely(*lso_header_size != skb_headlen(skb))) {
/* We add a segment for the skb linear buffer only if
* it contains data */
@@ -873,6 +883,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
int desc_size;
int real_size;
u32 index, bf_index;
+ struct ipv6hdr *h6;
__be32 op_own;
int lso_header_size;
void *fragptr = NULL;
@@ -881,6 +892,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
bool stop_queue;
bool inline_ok;
u8 data_offset;
+ int hopbyhop;
bool bf_ok;
tx_ind = skb_get_queue_mapping(skb);
@@ -890,7 +902,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_drop;
real_size = get_real_size(skb, shinfo, dev, &lso_header_size,
- &inline_ok, &fragptr);
+ &inline_ok, &fragptr, &hopbyhop);
if (unlikely(!real_size))
goto tx_drop_count;
@@ -943,7 +955,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
data = &tx_desc->data;
data_offset = offsetof(struct mlx4_en_tx_desc, data);
} else {
- int lso_align = ALIGN(lso_header_size + 4, DS_SIZE);
+ int lso_align = ALIGN(lso_header_size - hopbyhop + 4, DS_SIZE);
data = (void *)&tx_desc->lso + lso_align;
data_offset = offsetof(struct mlx4_en_tx_desc, lso) + lso_align;
@@ -1008,14 +1020,31 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
((ring->prod & ring->size) ?
cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
+ lso_header_size -= hopbyhop;
/* Fill in the LSO prefix */
tx_desc->lso.mss_hdr_size = cpu_to_be32(
shinfo->gso_size << 16 | lso_header_size);
- /* Copy headers;
- * note that we already verified that it is linear */
- memcpy(tx_desc->lso.header, skb->data, lso_header_size);
+ if (unlikely(hopbyhop)) {
+ /* remove the HBH header.
+ * Layout: [Ethernet header][IPv6 header][HBH][TCP header]
+ */
+ memcpy(tx_desc->lso.header, skb->data, ETH_HLEN + sizeof(*h6));
+ h6 = (struct ipv6hdr *)((char *)tx_desc->lso.header + ETH_HLEN);
+ h6->nexthdr = IPPROTO_TCP;
+ /* Copy the TCP header after the IPv6 one */
+ memcpy(h6 + 1,
+ skb->data + ETH_HLEN + sizeof(*h6) +
+ sizeof(struct hop_jumbo_hdr),
+ tcp_hdrlen(skb));
+ /* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */
+ } else {
+ /* Copy headers;
+ * note that we already verified that it is linear
+ */
+ memcpy(tx_desc->lso.header, skb->data, lso_header_size);
+ }
ring->tso_packets++;
i = shinfo->gso_segs;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 7895ed7cc285..9ea867a45764 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -39,7 +39,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += lag/mp.o lag/port_sel.o lib/geneve.o lib/port_tun.o \
en_rep.o en/rep/bond.o en/mod_hdr.o \
- en/mapping.o
+ en/mapping.o lag/mpesw.o
mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \
lib/fs_chains.o en/tc_tun.o \
esw/indir_table.o en/tc_tun_encap.o \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index e52b0bac09da..6aca004e88cd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -213,12 +213,6 @@ out:
}
EXPORT_SYMBOL_GPL(mlx5_db_alloc_node);
-int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
-{
- return mlx5_db_alloc_node(dev, db, dev->priv.numa_node);
-}
-EXPORT_SYMBOL_GPL(mlx5_db_alloc);
-
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
{
u32 db_per_page = PAGE_SIZE / cache_line_size();
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 26ba94cb432e..0377392848d9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1887,7 +1887,8 @@ out_in:
return err;
}
-static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status, int err)
+static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
+ u32 syndrome, int err)
{
struct mlx5_cmd_stats *stats;
@@ -1902,6 +1903,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status, int
if (err == -EREMOTEIO) {
stats->failed_mbox_status++;
stats->last_failed_mbox_status = status;
+ stats->last_failed_syndrome = syndrome;
}
spin_unlock_irq(&stats->lock);
}
@@ -1909,6 +1911,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status, int
/* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */
static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, void *out)
{
+ u32 syndrome = MLX5_GET(mbox_out, out, syndrome);
u8 status = MLX5_GET(mbox_out, out, status);
if (err == -EREMOTEIO) /* -EREMOTEIO is preserved */
@@ -1917,7 +1920,7 @@ static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, void *
if (!err && status != MLX5_CMD_STAT_OK)
err = -EREMOTEIO;
- cmd_status_log(dev, opcode, status, err);
+ cmd_status_log(dev, opcode, status, syndrome, err);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 3d3e55a5cb11..9caa1b52321b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -192,6 +192,8 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
&stats->last_failed_errno);
debugfs_create_u8("last_failed_mbox_status", 0400, stats->root,
&stats->last_failed_mbox_status);
+ debugfs_create_x32("last_failed_syndrome", 0400, stats->root,
+ &stats->last_failed_syndrome);
}
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index b90902db7819..65d3c4865abf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -1220,6 +1220,7 @@ mlx5e_tx_mpwqe_supported(struct mlx5_core_dev *mdev)
MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe);
}
+int mlx5e_get_pf_num_tirs(struct mlx5_core_dev *mdev);
int mlx5e_priv_init(struct mlx5e_priv *priv,
const struct mlx5e_profile *profile,
struct net_device *netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 3c1edfa33aa7..68364484a435 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -565,8 +565,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
bool lro_en = params->packet_merge.type == MLX5E_PACKET_MERGE_LRO;
- bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) &&
- MLX5_CAP_GEN(mdev, relaxed_ordering_write);
+ bool ro = MLX5_CAP_GEN(mdev, relaxed_ordering_write);
return ro && lro_en ?
MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
index 2b002c6a2e73..4ac7de3f6afa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
@@ -10,6 +10,7 @@
#include "en/tc_tun_encap.h"
#include "en/tc_priv.h"
#include "en_rep.h"
+#include "lag/lag.h"
static bool
same_vf_reps(struct mlx5e_priv *priv, struct net_device *out_dev)
@@ -215,6 +216,7 @@ parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
struct net_device *uplink_dev;
struct mlx5e_priv *out_priv;
struct mlx5_eswitch *esw;
+ bool is_uplink_rep;
int *ifindexes;
int if_count;
int err;
@@ -229,6 +231,10 @@ parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
parse_state->ifindexes[if_count] = out_dev->ifindex;
parse_state->if_count++;
+ is_uplink_rep = mlx5e_eswitch_uplink_rep(out_dev);
+ err = mlx5_lag_do_mirred(priv->mdev, out_dev);
+ if (err)
+ return err;
out_dev = get_fdb_out_dev(uplink_dev, out_dev);
if (!out_dev)
@@ -268,6 +274,14 @@ parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
rpriv = out_priv->ppriv;
esw_attr->dests[esw_attr->out_count].rep = rpriv->rep;
esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev;
+
+ /* If output device is bond master then rules are not explicit
+ * so we don't attempt to count them.
+ */
+ if (is_uplink_rep && MLX5_CAP_PORT_SELECTION(priv->mdev, port_select_flow_table) &&
+ MLX5_CAP_GEN(priv->mdev, create_lag_when_not_master_up))
+ attr->lag.count = true;
+
esw_attr->out_count++;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
index b979826f3f6c..2b80fe73549d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
@@ -23,7 +23,7 @@ struct mlx5_ct_fs_smfs_matcher {
};
struct mlx5_ct_fs_smfs_matchers {
- struct mlx5_ct_fs_smfs_matcher smfs_matchers[4];
+ struct mlx5_ct_fs_smfs_matcher smfs_matchers[6];
struct list_head used;
};
@@ -44,7 +44,8 @@ struct mlx5_ct_fs_smfs_rule {
};
static inline void
-mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bool ipv4, bool tcp)
+mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bool ipv4, bool tcp,
+ bool gre)
{
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
@@ -77,7 +78,7 @@ mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bo
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, tcp_dport);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
ntohs(MLX5_CT_TCP_FLAGS_MASK));
- } else {
+ } else if (!gre) {
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_sport);
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_dport);
}
@@ -87,7 +88,7 @@ mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bo
static struct mlx5dr_matcher *
mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl, bool ipv4,
- bool tcp, u32 priority)
+ bool tcp, bool gre, u32 priority)
{
struct mlx5dr_matcher *dr_matcher;
struct mlx5_flow_spec *spec;
@@ -96,7 +97,7 @@ mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl,
if (!spec)
return ERR_PTR(-ENOMEM);
- mlx5_ct_fs_smfs_fill_mask(fs, spec, ipv4, tcp);
+ mlx5_ct_fs_smfs_fill_mask(fs, spec, ipv4, tcp, gre);
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2 | MLX5_MATCH_OUTER_HEADERS;
dr_matcher = mlx5_smfs_matcher_create(tbl, priority, spec);
@@ -108,7 +109,7 @@ mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl,
}
static struct mlx5_ct_fs_smfs_matcher *
-mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp)
+mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp, bool gre)
{
struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
struct mlx5_ct_fs_smfs_matcher *m, *smfs_matcher;
@@ -119,7 +120,7 @@ mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp
int prio;
matchers = nat ? &fs_smfs->matchers_nat : &fs_smfs->matchers;
- smfs_matcher = &matchers->smfs_matchers[ipv4 * 2 + tcp];
+ smfs_matcher = &matchers->smfs_matchers[ipv4 * 3 + tcp * 2 + gre];
if (refcount_inc_not_zero(&smfs_matcher->ref))
return smfs_matcher;
@@ -145,11 +146,11 @@ mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp
}
tbl = nat ? fs_smfs->ct_nat_tbl : fs_smfs->ct_tbl;
- dr_matcher = mlx5_ct_fs_smfs_matcher_create(fs, tbl, ipv4, tcp, prio);
+ dr_matcher = mlx5_ct_fs_smfs_matcher_create(fs, tbl, ipv4, tcp, gre, prio);
if (IS_ERR(dr_matcher)) {
netdev_warn(fs->netdev,
- "ct_fs_smfs: failed to create matcher (nat %d, ipv4 %d, tcp %d), err: %ld\n",
- nat, ipv4, tcp, PTR_ERR(dr_matcher));
+ "ct_fs_smfs: failed to create matcher (nat %d, ipv4 %d, tcp %d, gre %d), err: %ld\n",
+ nat, ipv4, tcp, gre, PTR_ERR(dr_matcher));
smfs_matcher = ERR_CAST(dr_matcher);
goto out_unlock;
@@ -222,16 +223,17 @@ mlx5_ct_fs_smfs_destroy(struct mlx5_ct_fs *fs)
static inline bool
mlx5_tc_ct_valid_used_dissector_keys(const u32 used_keys)
{
-#define DISSECTOR_BIT(name) BIT(FLOW_DISSECTOR_KEY_ ## name)
- const u32 basic_keys = DISSECTOR_BIT(BASIC) | DISSECTOR_BIT(CONTROL) |
- DISSECTOR_BIT(PORTS) | DISSECTOR_BIT(META);
- const u32 ipv4_tcp = basic_keys | DISSECTOR_BIT(IPV4_ADDRS) | DISSECTOR_BIT(TCP);
- const u32 ipv4_udp = basic_keys | DISSECTOR_BIT(IPV4_ADDRS);
- const u32 ipv6_tcp = basic_keys | DISSECTOR_BIT(IPV6_ADDRS) | DISSECTOR_BIT(TCP);
- const u32 ipv6_udp = basic_keys | DISSECTOR_BIT(IPV6_ADDRS);
+#define DISS_BIT(name) BIT(FLOW_DISSECTOR_KEY_ ## name)
+ const u32 basic_keys = DISS_BIT(BASIC) | DISS_BIT(CONTROL) | DISS_BIT(META);
+ const u32 ipv4_tcp = basic_keys | DISS_BIT(IPV4_ADDRS) | DISS_BIT(PORTS) | DISS_BIT(TCP);
+ const u32 ipv6_tcp = basic_keys | DISS_BIT(IPV6_ADDRS) | DISS_BIT(PORTS) | DISS_BIT(TCP);
+ const u32 ipv4_udp = basic_keys | DISS_BIT(IPV4_ADDRS) | DISS_BIT(PORTS);
+ const u32 ipv6_udp = basic_keys | DISS_BIT(IPV6_ADDRS) | DISS_BIT(PORTS);
+ const u32 ipv4_gre = basic_keys | DISS_BIT(IPV4_ADDRS);
+ const u32 ipv6_gre = basic_keys | DISS_BIT(IPV6_ADDRS);
return (used_keys == ipv4_tcp || used_keys == ipv4_udp || used_keys == ipv6_tcp ||
- used_keys == ipv6_udp);
+ used_keys == ipv6_udp || used_keys == ipv4_gre || used_keys == ipv6_gre);
}
static bool
@@ -254,20 +256,24 @@ mlx5_ct_fs_smfs_ct_validate_flow_rule(struct mlx5_ct_fs *fs, struct flow_rule *f
flow_rule_match_control(flow_rule, &control);
flow_rule_match_ipv4_addrs(flow_rule, &ipv4_addrs);
flow_rule_match_ipv6_addrs(flow_rule, &ipv6_addrs);
- flow_rule_match_ports(flow_rule, &ports);
- flow_rule_match_tcp(flow_rule, &tcp);
+ if (basic.key->ip_proto != IPPROTO_GRE)
+ flow_rule_match_ports(flow_rule, &ports);
+ if (basic.key->ip_proto == IPPROTO_TCP)
+ flow_rule_match_tcp(flow_rule, &tcp);
if (basic.mask->n_proto != htons(0xFFFF) ||
(basic.key->n_proto != htons(ETH_P_IP) && basic.key->n_proto != htons(ETH_P_IPV6)) ||
basic.mask->ip_proto != 0xFF ||
- (basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP)) {
+ (basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP &&
+ basic.key->ip_proto != IPPROTO_GRE)) {
ct_dbg("rule uses unexpected basic match (n_proto 0x%04x/0x%04x, ip_proto 0x%02x/0x%02x)",
ntohs(basic.key->n_proto), ntohs(basic.mask->n_proto),
basic.key->ip_proto, basic.mask->ip_proto);
return false;
}
- if (ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF)) {
+ if (basic.key->ip_proto != IPPROTO_GRE &&
+ (ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF))) {
ct_dbg("rule uses ports match (src 0x%04x, dst 0x%04x)",
ports.mask->src, ports.mask->dst);
return false;
@@ -291,7 +297,7 @@ mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
struct mlx5dr_action *actions[5];
struct mlx5dr_rule *rule;
int num_actions = 0, err;
- bool nat, tcp, ipv4;
+ bool nat, tcp, ipv4, gre;
if (!mlx5_ct_fs_smfs_ct_validate_flow_rule(fs, flow_rule))
return ERR_PTR(-EOPNOTSUPP);
@@ -314,15 +320,17 @@ mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
ipv4 = mlx5e_tc_get_ip_version(spec, true) == 4;
tcp = MLX5_GET(fte_match_param, spec->match_value,
outer_headers.ip_protocol) == IPPROTO_TCP;
+ gre = MLX5_GET(fte_match_param, spec->match_value,
+ outer_headers.ip_protocol) == IPPROTO_GRE;
- smfs_matcher = mlx5_ct_fs_smfs_matcher_get(fs, nat, ipv4, tcp);
+ smfs_matcher = mlx5_ct_fs_smfs_matcher_get(fs, nat, ipv4, tcp, gre);
if (IS_ERR(smfs_matcher)) {
err = PTR_ERR(smfs_matcher);
goto err_matcher;
}
rule = mlx5_smfs_rule_create(smfs_matcher->dr_matcher, spec, num_actions, actions,
- MLX5_FLOW_CONTEXT_FLOW_SOURCE_ANY_VPORT);
+ spec->flow_context.flow_source);
if (!rule) {
err = -EINVAL;
goto err_create;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 228fbd2c20d4..bceea7a1589e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -15,6 +15,7 @@
#include <linux/refcount.h>
#include <linux/xarray.h>
#include <linux/if_macvlan.h>
+#include <linux/debugfs.h>
#include "lib/fs_chains.h"
#include "en/tc_ct.h"
@@ -47,6 +48,15 @@
#define ct_dbg(fmt, args...)\
netdev_dbg(ct_priv->netdev, "ct_debug: " fmt "\n", ##args)
+struct mlx5_tc_ct_debugfs {
+ struct {
+ atomic_t offloaded;
+ atomic_t rx_dropped;
+ } stats;
+
+ struct dentry *root;
+};
+
struct mlx5_tc_ct_priv {
struct mlx5_core_dev *dev;
const struct net_device *netdev;
@@ -66,6 +76,8 @@ struct mlx5_tc_ct_priv {
struct mlx5_ct_fs *fs;
struct mlx5_ct_fs_ops *fs_ops;
spinlock_t ht_lock; /* protects ft entries */
+
+ struct mlx5_tc_ct_debugfs debugfs;
};
struct mlx5_ct_flow {
@@ -520,6 +532,8 @@ mlx5_tc_ct_entry_del_rules(struct mlx5_tc_ct_priv *ct_priv,
{
mlx5_tc_ct_entry_del_rule(ct_priv, entry, true);
mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
+
+ atomic_dec(&ct_priv->debugfs.stats.offloaded);
}
static struct flow_action_entry *
@@ -1040,6 +1054,7 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
if (err)
goto err_nat;
+ atomic_inc(&ct_priv->debugfs.stats.offloaded);
return 0;
err_nat:
@@ -2064,6 +2079,29 @@ out_err:
return err;
}
+static void
+mlx5_ct_tc_create_dbgfs(struct mlx5_tc_ct_priv *ct_priv)
+{
+ bool is_fdb = ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB;
+ struct mlx5_tc_ct_debugfs *ct_dbgfs = &ct_priv->debugfs;
+ char dirname[16] = {};
+
+ if (sscanf(dirname, "ct_%s", is_fdb ? "fdb" : "nic") < 0)
+ return;
+
+ ct_dbgfs->root = debugfs_create_dir(dirname, mlx5_debugfs_get_dev_root(ct_priv->dev));
+ debugfs_create_atomic_t("offloaded", 0400, ct_dbgfs->root,
+ &ct_dbgfs->stats.offloaded);
+ debugfs_create_atomic_t("rx_dropped", 0400, ct_dbgfs->root,
+ &ct_dbgfs->stats.rx_dropped);
+}
+
+static void
+mlx5_ct_tc_remove_dbgfs(struct mlx5_tc_ct_priv *ct_priv)
+{
+ debugfs_remove_recursive(ct_priv->debugfs.root);
+}
+
#define INIT_ERR_PREFIX "tc ct offload init failed"
struct mlx5_tc_ct_priv *
@@ -2139,6 +2177,7 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
if (err)
goto err_init_fs;
+ mlx5_ct_tc_create_dbgfs(ct_priv);
return ct_priv;
err_init_fs:
@@ -2171,6 +2210,7 @@ mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv)
if (!ct_priv)
return;
+ mlx5_ct_tc_remove_dbgfs(ct_priv);
chains = ct_priv->chains;
ct_priv->fs_ops->destroy(ct_priv->fs);
@@ -2200,22 +2240,22 @@ mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
return true;
if (mapping_find(ct_priv->zone_mapping, zone_restore_id, &zone))
- return false;
+ goto out_inc_drop;
if (!mlx5_tc_ct_skb_to_tuple(skb, &tuple, zone))
- return false;
+ goto out_inc_drop;
spin_lock(&ct_priv->ht_lock);
entry = mlx5_tc_ct_entry_get(ct_priv, &tuple);
if (!entry) {
spin_unlock(&ct_priv->ht_lock);
- return false;
+ goto out_inc_drop;
}
if (IS_ERR(entry)) {
spin_unlock(&ct_priv->ht_lock);
- return false;
+ goto out_inc_drop;
}
spin_unlock(&ct_priv->ht_lock);
@@ -2223,4 +2263,8 @@ mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
__mlx5_tc_ct_entry_put(entry);
return true;
+
+out_inc_drop:
+ atomic_inc(&ct_priv->debugfs.stats.rx_dropped);
+ return false;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
index a55b066746cb..857840ab1e91 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -14,19 +14,26 @@ static int mlx5e_trap_napi_poll(struct napi_struct *napi, int budget)
bool busy = false;
int work_done = 0;
+ rcu_read_lock();
+
ch_stats->poll++;
work_done = mlx5e_poll_rx_cq(&rq->cq, budget);
busy |= work_done == budget;
busy |= rq->post_wqes(rq);
- if (busy)
- return budget;
+ if (busy) {
+ work_done = budget;
+ goto out;
+ }
if (unlikely(!napi_complete_done(napi, work_done)))
- return work_done;
+ goto out;
mlx5e_cq_arm(&rq->cq);
+
+out:
+ rcu_read_unlock();
return work_done;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 35e2bb301c26..2a8fd7020622 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -172,9 +172,9 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
}
/* action */
- attrs->action = (!(x->xso.flags & XFRM_OFFLOAD_INBOUND)) ?
- MLX5_ACCEL_ESP_ACTION_ENCRYPT :
- MLX5_ACCEL_ESP_ACTION_DECRYPT;
+ attrs->action = (x->xso.dir == XFRM_DEV_OFFLOAD_OUT) ?
+ MLX5_ACCEL_ESP_ACTION_ENCRYPT :
+ MLX5_ACCEL_ESP_ACTION_DECRYPT;
/* flags */
attrs->flags |= (x->props.mode == XFRM_MODE_TRANSPORT) ?
MLX5_ACCEL_ESP_FLAGS_TRANSPORT :
@@ -306,7 +306,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
if (err)
goto err_hw_ctx;
- if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
+ if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) {
err = mlx5e_ipsec_sadb_rx_add(sa_entry);
if (err)
goto err_add_rule;
@@ -333,7 +333,7 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
- if (x->xso.flags & XFRM_OFFLOAD_INBOUND)
+ if (x->xso.dir == XFRM_DEV_OFFLOAD_IN)
mlx5e_ipsec_sadb_rx_del(sa_entry);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index c0f409c195bf..43a536cb81db 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -38,12 +38,11 @@
void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc)
{
- bool ro_pci_enable = pcie_relaxed_ordering_enabled(mdev->pdev);
bool ro_write = MLX5_CAP_GEN(mdev, relaxed_ordering_write);
bool ro_read = MLX5_CAP_GEN(mdev, relaxed_ordering_read);
- MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_pci_enable && ro_read);
- MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_pci_enable && ro_write);
+ MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_read);
+ MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_write);
}
static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index aeff1d972a46..d2f0773f95c6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -155,7 +155,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
list_size = max_list_size;
}
- vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
+ vlans = kvcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
if (!vlans)
return -ENOMEM;
@@ -171,7 +171,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n",
err);
- kfree(vlans);
+ kvfree(vlans);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index d27986869b8b..05c015515cce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3858,6 +3858,10 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev
if (netdev->features & NETIF_F_NTUPLE)
netdev_warn(netdev, "Disabling ntuple, not supported in switchdev mode\n");
+ features &= ~NETIF_F_GRO_HW;
+ if (netdev->features & NETIF_F_GRO_HW)
+ netdev_warn(netdev, "Disabling HW_GRO, not supported in switchdev mode\n");
+
return features;
}
@@ -3890,6 +3894,25 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
}
}
+ if (params->xdp_prog) {
+ if (features & NETIF_F_LRO) {
+ netdev_warn(netdev, "LRO is incompatible with XDP\n");
+ features &= ~NETIF_F_LRO;
+ }
+ if (features & NETIF_F_GRO_HW) {
+ netdev_warn(netdev, "HW GRO is incompatible with XDP\n");
+ features &= ~NETIF_F_GRO_HW;
+ }
+ }
+
+ if (priv->xsk.refcnt) {
+ if (features & NETIF_F_GRO_HW) {
+ netdev_warn(netdev, "HW GRO is incompatible with AF_XDP (%u XSKs are active)\n",
+ priv->xsk.refcnt);
+ features &= ~NETIF_F_GRO_HW;
+ }
+ }
+
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
features &= ~NETIF_F_RXHASH;
if (netdev->features & NETIF_F_RXHASH)
@@ -4812,6 +4835,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->vlan_features |= NETIF_F_TSO6;
netdev->vlan_features |= NETIF_F_RXCSUM;
netdev->vlan_features |= NETIF_F_RXHASH;
+ netdev->vlan_features |= NETIF_F_GSO_PARTIAL;
netdev->mpls_features |= NETIF_F_SG;
netdev->mpls_features |= NETIF_F_HW_CSUM;
@@ -4838,10 +4862,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
- if (!!MLX5_CAP_GEN(mdev, shampo) &&
- mlx5e_check_fragmented_striding_rq_cap(mdev))
- netdev->hw_features |= NETIF_F_GRO_HW;
-
if (mlx5e_tunnel_any_tx_proto_supported(mdev)) {
netdev->hw_enc_features |= NETIF_F_HW_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO;
@@ -4877,7 +4897,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
NETIF_F_GSO_IPXIP6;
}
- netdev->hw_features |= NETIF_F_GSO_PARTIAL;
netdev->gso_partial_features |= NETIF_F_GSO_UDP_L4;
netdev->hw_features |= NETIF_F_GSO_UDP_L4;
netdev->features |= NETIF_F_GSO_UDP_L4;
@@ -4920,6 +4939,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->priv_flags |= IFF_UNICAST_FLT;
+ netif_set_tso_max_size(netdev, GSO_MAX_SIZE);
mlx5e_set_netdev_dev_addr(netdev);
mlx5e_ipsec_build_netdev(priv);
mlx5e_ktls_build_netdev(priv);
@@ -5220,6 +5240,15 @@ mlx5e_calc_max_nch(struct mlx5_core_dev *mdev, struct net_device *netdev,
return max_nch;
}
+int mlx5e_get_pf_num_tirs(struct mlx5_core_dev *mdev)
+{
+ /* Indirect TIRS: 2 sets of TTCs (inner + outer steering)
+ * and 1 set of direct TIRS
+ */
+ return 2 * MLX5E_NUM_INDIR_TIRS
+ + mlx5e_profile_max_num_channels(mdev, &mlx5e_nic_profile);
+}
+
/* mlx5e generic netdev management API (move to en_common.c) */
int mlx5e_priv_init(struct mlx5e_priv *priv,
const struct mlx5e_profile *profile,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 47f7b4c034cc..eb90e79388f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -399,7 +399,9 @@ out_err:
int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
{
+ int sqs_per_channel = mlx5e_get_dcb_num_tc(&priv->channels.params);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ bool is_uplink_rep = mlx5e_is_uplink_rep(priv);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
int n, tc, nch, num_sqs = 0;
@@ -411,9 +413,13 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
ptp_sq = !!(priv->channels.ptp &&
MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS));
nch = priv->channels.num + ptp_sq;
+ /* +2 for xdpsqs, they don't exist on the ptp channel but will not be
+ * counted for by num_sqs.
+ */
+ if (is_uplink_rep)
+ sqs_per_channel += 2;
- sqs = kcalloc(nch * mlx5e_get_dcb_num_tc(&priv->channels.params), sizeof(*sqs),
- GFP_KERNEL);
+ sqs = kvcalloc(nch * sqs_per_channel, sizeof(*sqs), GFP_KERNEL);
if (!sqs)
goto out;
@@ -421,6 +427,13 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
c = priv->channels.c[n];
for (tc = 0; tc < c->num_tc; tc++)
sqs[num_sqs++] = c->sq[tc].sqn;
+
+ if (is_uplink_rep) {
+ if (c->xdp)
+ sqs[num_sqs++] = c->rq_xdpsq.sqn;
+
+ sqs[num_sqs++] = c->xdpsq.sqn;
+ }
}
if (ptp_sq) {
struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
@@ -430,7 +443,7 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
}
err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
- kfree(sqs);
+ kvfree(sqs);
out:
if (err)
@@ -604,10 +617,16 @@ bool mlx5e_eswitch_vf_rep(const struct net_device *netdev)
return netdev->netdev_ops == &mlx5e_netdev_ops_rep;
}
+/* One indirect TIR set for outer. Inner not supported in reps. */
+#define REP_NUM_INDIR_TIRS MLX5E_NUM_INDIR_TIRS
+
static int mlx5e_rep_max_nch_limit(struct mlx5_core_dev *mdev)
{
- return (1 << MLX5_CAP_GEN(mdev, log_max_tir)) /
- mlx5_eswitch_get_total_vports(mdev);
+ int max_tir_num = 1 << MLX5_CAP_GEN(mdev, log_max_tir);
+ int num_vports = mlx5_eswitch_get_total_vports(mdev);
+
+ return (max_tir_num - mlx5e_get_pf_num_tirs(mdev)
+ - (num_vports * REP_NUM_INDIR_TIRS)) / num_vports;
}
static void mlx5e_build_rep_params(struct net_device *netdev)
@@ -1269,7 +1288,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
struct mlx5e_rep_priv *rpriv;
int err;
- rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
+ rpriv = kvzalloc(sizeof(*rpriv), GFP_KERNEL);
if (!rpriv)
return -ENOMEM;
@@ -1284,7 +1303,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
err = mlx5e_vport_vf_rep_load(dev, rep);
if (err)
- kfree(rpriv);
+ kvfree(rpriv);
return err;
}
@@ -1312,7 +1331,7 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
priv->profile->cleanup(priv);
mlx5e_destroy_netdev(priv);
free_ppriv:
- kfree(ppriv); /* mlx5e_rep_priv */
+ kvfree(ppriv); /* mlx5e_rep_priv */
}
static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index fb11081001a0..24de37b79f5a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -2038,7 +2038,7 @@ mlx5e_hw_gro_skb_has_enough_space(struct sk_buff *skb, u16 data_bcnt)
{
int nr_frags = skb_shinfo(skb)->nr_frags;
- return PAGE_SIZE * nr_frags + data_bcnt <= GSO_MAX_SIZE;
+ return PAGE_SIZE * nr_frags + data_bcnt <= GRO_LEGACY_MAX_SIZE;
}
static void
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index ac0f73074f7a..49dea02a12d2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1740,6 +1740,9 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
free_flow_post_acts(flow);
+ if (flow->attr->lag.count)
+ mlx5_lag_del_mpesw_rule(esw->dev);
+
kvfree(attr->esw_attr->rx_tun_attr);
kvfree(attr->parse_attr);
kfree(flow->attr);
@@ -3788,12 +3791,25 @@ static bool is_lag_dev(struct mlx5e_priv *priv,
same_hw_reps(priv, peer_netdev));
}
+static bool is_multiport_eligible(struct mlx5e_priv *priv, struct net_device *out_dev)
+{
+ if (mlx5e_eswitch_uplink_rep(out_dev) &&
+ MLX5_CAP_PORT_SELECTION(priv->mdev, port_select_flow_table) &&
+ MLX5_CAP_GEN(priv->mdev, create_lag_when_not_master_up))
+ return true;
+
+ return false;
+}
+
bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
struct net_device *out_dev)
{
if (is_merged_eswitch_vfs(priv, out_dev))
return true;
+ if (is_multiport_eligible(priv, out_dev))
+ return true;
+
if (is_lag_dev(priv, out_dev))
return true;
@@ -4050,6 +4066,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_core_dev *in_mdev)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct netlink_ext_ack *extack = f->common.extack;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_tc_flow *flow;
@@ -4085,17 +4102,26 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
if (err)
goto err_free;
+ if (flow->attr->lag.count) {
+ err = mlx5_lag_add_mpesw_rule(esw->dev);
+ if (err)
+ goto err_free;
+ }
+
err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
complete_all(&flow->init_done);
if (err) {
if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
- goto err_free;
+ goto err_lag;
add_unready_flow(flow);
}
return flow;
+err_lag:
+ if (flow->attr->lag.count)
+ mlx5_lag_del_mpesw_rule(esw->dev);
err_free:
mlx5e_flow_put(priv, flow);
out:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index a80b00946f1b..e2a1250aeca1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -85,6 +85,13 @@ struct mlx5_flow_attr {
u32 flags;
struct list_head list;
struct mlx5e_post_act_handle *post_act_handle;
+ struct {
+ /* Indicate whether the parsed flow should be counted for lag mode decision
+ * making
+ */
+ bool count;
+ } lag;
+ /* keep this union last */
union {
struct mlx5_esw_flow_attr esw_attr[0];
struct mlx5_nic_flow_attr nic_attr[0];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 5855d8f9c509..50d14cec4894 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -40,6 +40,7 @@
#include "en_accel/en_accel.h"
#include "en_accel/ipsec_rxtx.h"
#include "en/ptp.h"
+#include <net/ipv6.h>
static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
{
@@ -91,6 +92,13 @@ static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
return min_t(u16, hlen, skb_headlen(skb));
}
+#define MLX5_UNSAFE_MEMCPY_DISCLAIMER \
+ "This copy has been bounds-checked earlier in " \
+ "mlx5i_sq_calc_wqe_attr() and intentionally " \
+ "crosses a flex array boundary. Since it is " \
+ "performance sensitive, splitting the copy is " \
+ "undesirable."
+
static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
{
struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
@@ -100,7 +108,10 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
memcpy(&vhdr->addrs, skb->data, cpy1_sz);
vhdr->h_vlan_proto = skb->vlan_proto;
vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
- memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
+ unsafe_memcpy(&vhdr->h_vlan_encapsulated_proto,
+ skb->data + cpy1_sz,
+ cpy2_sz,
+ MLX5_UNSAFE_MEMCPY_DISCLAIMER);
}
static inline void
@@ -130,23 +141,32 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
sq->stats->csum_none++;
}
+/* Returns the number of header bytes that we plan
+ * to inline later in the transmit descriptor
+ */
static inline u16
-mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb)
+mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
{
struct mlx5e_sq_stats *stats = sq->stats;
u16 ihs;
+ *hopbyhop = 0;
if (skb->encapsulation) {
ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
stats->tso_inner_packets++;
stats->tso_inner_bytes += skb->len - ihs;
} else {
- if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
- else
+ } else {
ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ if (ipv6_has_hopopt_jumbo(skb)) {
+ *hopbyhop = sizeof(struct hop_jumbo_hdr);
+ ihs -= sizeof(struct hop_jumbo_hdr);
+ }
+ }
stats->tso_packets++;
- stats->tso_bytes += skb->len - ihs;
+ stats->tso_bytes += skb->len - ihs - *hopbyhop;
}
return ihs;
@@ -208,6 +228,7 @@ struct mlx5e_tx_attr {
__be16 mss;
u16 insz;
u8 opcode;
+ u8 hopbyhop;
};
struct mlx5e_tx_wqe_attr {
@@ -244,14 +265,16 @@ static void mlx5e_sq_xmit_prepare(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_sq_stats *stats = sq->stats;
if (skb_is_gso(skb)) {
- u16 ihs = mlx5e_tx_get_gso_ihs(sq, skb);
+ int hopbyhop;
+ u16 ihs = mlx5e_tx_get_gso_ihs(sq, skb, &hopbyhop);
*attr = (struct mlx5e_tx_attr) {
.opcode = MLX5_OPCODE_LSO,
.mss = cpu_to_be16(skb_shinfo(skb)->gso_size),
.ihs = ihs,
.num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs,
- .headlen = skb_headlen(skb) - ihs,
+ .headlen = skb_headlen(skb) - ihs - hopbyhop,
+ .hopbyhop = hopbyhop,
};
stats->packets += skb_shinfo(skb)->gso_segs;
@@ -365,7 +388,8 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg;
struct mlx5_wqe_data_seg *dseg;
struct mlx5e_tx_wqe_info *wi;
-
+ u16 ihs = attr->ihs;
+ struct ipv6hdr *h6;
struct mlx5e_sq_stats *stats = sq->stats;
int num_dma;
@@ -379,21 +403,40 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
eseg->mss = attr->mss;
- if (attr->ihs) {
- if (skb_vlan_tag_present(skb)) {
- eseg->inline_hdr.sz |= cpu_to_be16(attr->ihs + VLAN_HLEN);
- mlx5e_insert_vlan(eseg->inline_hdr.start, skb, attr->ihs);
+ if (ihs) {
+ u8 *start = eseg->inline_hdr.start;
+
+ if (unlikely(attr->hopbyhop)) {
+ /* remove the HBH header.
+ * Layout: [Ethernet header][IPv6 header][HBH][TCP header]
+ */
+ if (skb_vlan_tag_present(skb)) {
+ mlx5e_insert_vlan(start, skb, ETH_HLEN + sizeof(*h6));
+ ihs += VLAN_HLEN;
+ h6 = (struct ipv6hdr *)(start + sizeof(struct vlan_ethhdr));
+ } else {
+ unsafe_memcpy(start, skb->data,
+ ETH_HLEN + sizeof(*h6),
+ MLX5_UNSAFE_MEMCPY_DISCLAIMER);
+ h6 = (struct ipv6hdr *)(start + ETH_HLEN);
+ }
+ h6->nexthdr = IPPROTO_TCP;
+ /* Copy the TCP header after the IPv6 one */
+ memcpy(h6 + 1,
+ skb->data + ETH_HLEN + sizeof(*h6) +
+ sizeof(struct hop_jumbo_hdr),
+ tcp_hdrlen(skb));
+ /* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */
+ } else if (skb_vlan_tag_present(skb)) {
+ mlx5e_insert_vlan(start, skb, ihs);
+ ihs += VLAN_HLEN;
stats->added_vlan_packets++;
} else {
- eseg->inline_hdr.sz |= cpu_to_be16(attr->ihs);
- unsafe_memcpy(eseg->inline_hdr.start, skb->data, attr->ihs,
- /* This copy has been bounds-checked earlier in
- * mlx5i_sq_calc_wqe_attr() and intentionally
- * crosses a flex array boundary. Since it is
- * performance sensitive, splitting the copy is
- * undesirable.
- */);
+ unsafe_memcpy(eseg->inline_hdr.start, skb->data,
+ attr->ihs,
+ MLX5_UNSAFE_MEMCPY_DISCLAIMER);
}
+ eseg->inline_hdr.sz |= cpu_to_be16(ihs);
dseg += wqe_attr->ds_cnt_inl;
} else if (skb_vlan_tag_present(skb)) {
eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
@@ -404,7 +447,7 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
dseg += wqe_attr->ds_cnt_ids;
- num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr->ihs,
+ num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr->ihs + attr->hopbyhop,
attr->headlen, dseg);
if (unlikely(num_dma < 0))
goto err_drop;
@@ -924,12 +967,34 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
eseg->mss = attr.mss;
if (attr.ihs) {
- memcpy(eseg->inline_hdr.start, skb->data, attr.ihs);
+ if (unlikely(attr.hopbyhop)) {
+ struct ipv6hdr *h6;
+
+ /* remove the HBH header.
+ * Layout: [Ethernet header][IPv6 header][HBH][TCP header]
+ */
+ unsafe_memcpy(eseg->inline_hdr.start, skb->data,
+ ETH_HLEN + sizeof(*h6),
+ MLX5_UNSAFE_MEMCPY_DISCLAIMER);
+ h6 = (struct ipv6hdr *)((char *)eseg->inline_hdr.start + ETH_HLEN);
+ h6->nexthdr = IPPROTO_TCP;
+ /* Copy the TCP header after the IPv6 one */
+ unsafe_memcpy(h6 + 1,
+ skb->data + ETH_HLEN + sizeof(*h6) +
+ sizeof(struct hop_jumbo_hdr),
+ tcp_hdrlen(skb),
+ MLX5_UNSAFE_MEMCPY_DISCLAIMER);
+ /* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */
+ } else {
+ unsafe_memcpy(eseg->inline_hdr.start, skb->data,
+ attr.ihs,
+ MLX5_UNSAFE_MEMCPY_DISCLAIMER);
+ }
eseg->inline_hdr.sz = cpu_to_be16(attr.ihs);
dseg += wqe_attr.ds_cnt_inl;
}
- num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr.ihs,
+ num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr.ihs + attr.hopbyhop,
attr.headlen, dseg);
if (unlikely(num_dma < 0))
goto err_drop;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 3b151332e2f8..217cac29057f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -49,6 +49,7 @@
#include "en_tc.h"
#include "en/mapping.h"
#include "devlink.h"
+#include "lag/lag.h"
#define mlx5_esw_for_each_rep(esw, i, rep) \
xa_for_each(&((esw)->offloads.vport_reps), i, rep)
@@ -418,6 +419,8 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f
dest[dest_idx].vport.vhca_id =
MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
+ if (mlx5_lag_mpesw_is_activated(esw->dev))
+ dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
}
if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) {
if (pkt_reformat) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index fb8175672478..84caffe4c278 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -2673,28 +2673,6 @@ static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
clean_tree(&root_ns->ns.node);
}
-void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
-{
- struct mlx5_flow_steering *steering = dev->priv.steering;
-
- cleanup_root_ns(steering->root_ns);
- cleanup_root_ns(steering->fdb_root_ns);
- steering->fdb_root_ns = NULL;
- kfree(steering->fdb_sub_ns);
- steering->fdb_sub_ns = NULL;
- cleanup_root_ns(steering->port_sel_root_ns);
- cleanup_root_ns(steering->sniffer_rx_root_ns);
- cleanup_root_ns(steering->sniffer_tx_root_ns);
- cleanup_root_ns(steering->rdma_rx_root_ns);
- cleanup_root_ns(steering->rdma_tx_root_ns);
- cleanup_root_ns(steering->egress_root_ns);
- mlx5_cleanup_fc_stats(dev);
- kmem_cache_destroy(steering->ftes_cache);
- kmem_cache_destroy(steering->fgs_cache);
- mlx5_ft_pool_destroy(dev);
- kfree(steering);
-}
-
static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
{
struct fs_prio *prio;
@@ -3096,42 +3074,27 @@ cleanup:
return err;
}
-int mlx5_init_fs(struct mlx5_core_dev *dev)
+void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
{
- struct mlx5_flow_steering *steering;
- int err = 0;
-
- err = mlx5_init_fc_stats(dev);
- if (err)
- return err;
-
- err = mlx5_ft_pool_init(dev);
- if (err)
- return err;
-
- steering = kzalloc(sizeof(*steering), GFP_KERNEL);
- if (!steering) {
- err = -ENOMEM;
- goto err;
- }
-
- steering->dev = dev;
- dev->priv.steering = steering;
+ struct mlx5_flow_steering *steering = dev->priv.steering;
- if (mlx5_fs_dr_is_supported(dev))
- steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
- else
- steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
+ cleanup_root_ns(steering->root_ns);
+ cleanup_root_ns(steering->fdb_root_ns);
+ steering->fdb_root_ns = NULL;
+ kfree(steering->fdb_sub_ns);
+ steering->fdb_sub_ns = NULL;
+ cleanup_root_ns(steering->port_sel_root_ns);
+ cleanup_root_ns(steering->sniffer_rx_root_ns);
+ cleanup_root_ns(steering->sniffer_tx_root_ns);
+ cleanup_root_ns(steering->rdma_rx_root_ns);
+ cleanup_root_ns(steering->rdma_tx_root_ns);
+ cleanup_root_ns(steering->egress_root_ns);
+}
- steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
- sizeof(struct mlx5_flow_group), 0,
- 0, NULL);
- steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
- 0, NULL);
- if (!steering->ftes_cache || !steering->fgs_cache) {
- err = -ENOMEM;
- goto err;
- }
+int mlx5_fs_core_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int err = 0;
if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
(MLX5_CAP_GEN(dev, nic_flow_table))) ||
@@ -3189,8 +3152,64 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
}
return 0;
+
+err:
+ mlx5_fs_core_cleanup(dev);
+ return err;
+}
+
+void mlx5_fs_core_free(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+
+ kmem_cache_destroy(steering->ftes_cache);
+ kmem_cache_destroy(steering->fgs_cache);
+ kfree(steering);
+ mlx5_ft_pool_destroy(dev);
+ mlx5_cleanup_fc_stats(dev);
+}
+
+int mlx5_fs_core_alloc(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering;
+ int err = 0;
+
+ err = mlx5_init_fc_stats(dev);
+ if (err)
+ return err;
+
+ err = mlx5_ft_pool_init(dev);
+ if (err)
+ goto err;
+
+ steering = kzalloc(sizeof(*steering), GFP_KERNEL);
+ if (!steering) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ steering->dev = dev;
+ dev->priv.steering = steering;
+
+ if (mlx5_fs_dr_is_supported(dev))
+ steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
+ else
+ steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
+
+ steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
+ sizeof(struct mlx5_flow_group), 0,
+ 0, NULL);
+ steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
+ 0, NULL);
+ if (!steering->ftes_cache || !steering->fgs_cache) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ return 0;
+
err:
- mlx5_cleanup_fs(dev);
+ mlx5_fs_core_free(dev);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 67cad7a6d836..3af50fd04d28 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -299,8 +299,10 @@ int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
enum mlx5_flow_steering_mode mode);
-int mlx5_init_fs(struct mlx5_core_dev *dev);
-void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
+int mlx5_fs_core_alloc(struct mlx5_core_dev *dev);
+void mlx5_fs_core_free(struct mlx5_core_dev *dev);
+int mlx5_fs_core_init(struct mlx5_core_dev *dev);
+void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev);
int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports);
void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 84df0d56a2b6..052af4901c0b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -8,7 +8,8 @@
enum {
MLX5_FW_RESET_FLAGS_RESET_REQUESTED,
MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST,
- MLX5_FW_RESET_FLAGS_PENDING_COMP
+ MLX5_FW_RESET_FLAGS_PENDING_COMP,
+ MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS
};
struct mlx5_fw_reset {
@@ -208,7 +209,10 @@ static void poll_sync_reset(struct timer_list *t)
if (fatal_error) {
mlx5_core_warn(dev, "Got Device Reset\n");
- queue_work(fw_reset->wq, &fw_reset->reset_reload_work);
+ if (!test_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags))
+ queue_work(fw_reset->wq, &fw_reset->reset_reload_work);
+ else
+ mlx5_core_err(dev, "Device is being removed, Drop new reset work\n");
return;
}
@@ -433,9 +437,12 @@ static int fw_reset_event_notifier(struct notifier_block *nb, unsigned long acti
struct mlx5_fw_reset *fw_reset = mlx5_nb_cof(nb, struct mlx5_fw_reset, nb);
struct mlx5_eqe *eqe = data;
+ if (test_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags))
+ return NOTIFY_DONE;
+
switch (eqe->sub_type) {
case MLX5_GENERAL_SUBTYPE_FW_LIVE_PATCH_EVENT:
- queue_work(fw_reset->wq, &fw_reset->fw_live_patch_work);
+ queue_work(fw_reset->wq, &fw_reset->fw_live_patch_work);
break;
case MLX5_GENERAL_SUBTYPE_PCI_SYNC_FOR_FW_UPDATE_EVENT:
mlx5_sync_reset_events_handle(fw_reset, eqe);
@@ -479,6 +486,18 @@ void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev)
mlx5_eq_notifier_unregister(dev, &dev->priv.fw_reset->nb);
}
+void mlx5_drain_fw_reset(struct mlx5_core_dev *dev)
+{
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+
+ set_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags);
+ cancel_work_sync(&fw_reset->fw_live_patch_work);
+ cancel_work_sync(&fw_reset->reset_request_work);
+ cancel_work_sync(&fw_reset->reset_reload_work);
+ cancel_work_sync(&fw_reset->reset_now_work);
+ cancel_work_sync(&fw_reset->reset_abort_work);
+}
+
int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
index 694fc7cb2684..dc141c7e641a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
@@ -16,6 +16,7 @@ int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev);
int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev);
void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev);
void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev);
+void mlx5_drain_fw_reset(struct mlx5_core_dev *dev);
int mlx5_fw_reset_init(struct mlx5_core_dev *dev);
void mlx5_fw_reset_cleanup(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index f4f7eaf16446..8da73ef5680f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -221,7 +221,6 @@ static int mlx5i_get_link_ksettings(struct net_device *netdev,
return 0;
}
-#ifdef CONFIG_MLX5_EN_RXNFC
static u32 mlx5i_flow_type_mask(u32 flow_type)
{
return flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
@@ -243,9 +242,18 @@ static int mlx5i_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
+ /* ETHTOOL_GRXRINGS is needed by ethtool -x which is not part
+ * of rxnfc. We keep this logic out of mlx5e_ethtool_get_rxnfc,
+ * to avoid breaking "ethtool -x" when mlx5e_ethtool_get_rxnfc
+ * is compiled out via CONFIG_MLX5_EN_RXNFC=n.
+ */
+ if (info->cmd == ETHTOOL_GRXRINGS) {
+ info->data = priv->channels.params.num_channels;
+ return 0;
+ }
+
return mlx5e_ethtool_get_rxnfc(priv, info, rule_locs);
}
-#endif
const struct ethtool_ops mlx5i_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
@@ -263,10 +271,8 @@ const struct ethtool_ops mlx5i_ethtool_ops = {
.get_coalesce = mlx5i_get_coalesce,
.set_coalesce = mlx5i_set_coalesce,
.get_ts_info = mlx5i_get_ts_info,
-#ifdef CONFIG_MLX5_EN_RXNFC
.get_rxnfc = mlx5i_get_rxnfc,
.set_rxnfc = mlx5i_set_rxnfc,
-#endif
.get_link_ksettings = mlx5i_get_link_ksettings,
.get_link = ethtool_op_get_link,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
index 443daf6e3d4b..15e41dc84d53 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
@@ -5,12 +5,13 @@
static char *get_str_mode_type(struct mlx5_lag *ldev)
{
- if (ldev->flags & MLX5_LAG_FLAG_ROCE)
- return "roce";
- if (ldev->flags & MLX5_LAG_FLAG_SRIOV)
- return "switchdev";
- if (ldev->flags & MLX5_LAG_FLAG_MULTIPATH)
- return "multipath";
+ switch (ldev->mode) {
+ case MLX5_LAG_MODE_ROCE: return "roce";
+ case MLX5_LAG_MODE_SRIOV: return "switchdev";
+ case MLX5_LAG_MODE_MULTIPATH: return "multipath";
+ case MLX5_LAG_MODE_MPESW: return "multiport_eswitch";
+ default: return "invalid";
+ }
return NULL;
}
@@ -43,11 +44,11 @@ static int port_sel_mode_show(struct seq_file *file, void *priv)
ldev = dev->priv.lag;
mutex_lock(&ldev->lock);
if (__mlx5_lag_is_active(ldev))
- mode = get_str_port_sel_mode(ldev->flags);
+ mode = mlx5_get_str_port_sel_mode(ldev);
else
ret = -EINVAL;
mutex_unlock(&ldev->lock);
- if (ret || !mode)
+ if (ret)
return ret;
seq_printf(file, "%s\n", mode);
@@ -79,7 +80,7 @@ static int flags_show(struct seq_file *file, void *priv)
mutex_lock(&ldev->lock);
lag_active = __mlx5_lag_is_active(ldev);
if (lag_active)
- shared_fdb = ldev->shared_fdb;
+ shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
mutex_unlock(&ldev->lock);
if (!lag_active)
@@ -103,7 +104,7 @@ static int mapping_show(struct seq_file *file, void *priv)
mutex_lock(&ldev->lock);
lag_active = __mlx5_lag_is_active(ldev);
if (lag_active) {
- if (ldev->flags & MLX5_LAG_FLAG_HASH_BASED) {
+ if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags)) {
mlx5_infer_tx_enabled(&ldev->tracker, ldev->ports, ports,
&num_ports);
hash = true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index b6dd9043061f..552b6e26e701 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -41,6 +41,7 @@
#include "esw/acl/ofld.h"
#include "lag.h"
#include "mp.h"
+#include "mpesw.h"
enum {
MLX5_LAG_EGRESS_PORT_1 = 1,
@@ -53,21 +54,33 @@ enum {
*/
static DEFINE_SPINLOCK(lag_lock);
-static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, bool shared_fdb, u8 flags)
+static int get_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
{
+ if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags))
+ return MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT;
+
+ if (mode == MLX5_LAG_MODE_MPESW)
+ return MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_MPESW;
+
+ return MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY;
+}
+
+static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, int mode,
+ unsigned long flags)
+{
+ bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
+ int port_sel_mode = get_port_sel_mode(mode, flags);
u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {};
- void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
+ void *lag_ctx;
+ lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
-
MLX5_SET(lagc, lag_ctx, fdb_selection_mode, shared_fdb);
- if (!(flags & MLX5_LAG_FLAG_HASH_BASED)) {
+ if (port_sel_mode == MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY) {
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]);
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]);
- } else {
- MLX5_SET(lagc, lag_ctx, port_select_mode,
- MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT);
}
+ MLX5_SET(lagc, lag_ctx, port_select_mode, port_sel_mode);
return mlx5_cmd_exec_in(dev, create_lag, in);
}
@@ -139,7 +152,7 @@ void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports,
static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev,
struct mlx5_lag *ldev,
struct lag_tracker *tracker,
- u8 flags)
+ unsigned long flags)
{
char buf[MLX5_MAX_PORTS * 10 + 1] = {};
u8 enabled_ports[MLX5_MAX_PORTS] = {};
@@ -150,7 +163,7 @@ static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev,
int i;
int j;
- if (flags & MLX5_LAG_FLAG_HASH_BASED) {
+ if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
mlx5_infer_tx_enabled(tracker, ldev->ports, enabled_ports,
&num_enabled);
for (i = 0; i < num_enabled; i++) {
@@ -187,7 +200,8 @@ static void mlx5_ldev_free(struct kref *ref)
if (ldev->nb.notifier_call)
unregister_netdevice_notifier_net(&init_net, &ldev->nb);
mlx5_lag_mp_cleanup(ldev);
- cancel_delayed_work_sync(&ldev->bond_work);
+ mlx5_lag_mpesw_cleanup(ldev);
+ cancel_work_sync(&ldev->mpesw_work);
destroy_workqueue(ldev->wq);
mutex_destroy(&ldev->lock);
kfree(ldev);
@@ -227,11 +241,14 @@ static struct mlx5_lag *mlx5_lag_dev_alloc(struct mlx5_core_dev *dev)
ldev->nb.notifier_call = NULL;
mlx5_core_err(dev, "Failed to register LAG netdev notifier\n");
}
+ ldev->mode = MLX5_LAG_MODE_NONE;
err = mlx5_lag_mp_init(ldev);
if (err)
mlx5_core_err(dev, "Failed to init multipath lag err=%d\n",
err);
+
+ mlx5_lag_mpesw_init(ldev);
ldev->ports = MLX5_CAP_GEN(dev, num_lag_ports);
ldev->buckets = 1;
@@ -252,12 +269,12 @@ int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
static bool __mlx5_lag_is_roce(struct mlx5_lag *ldev)
{
- return !!(ldev->flags & MLX5_LAG_FLAG_ROCE);
+ return ldev->mode == MLX5_LAG_MODE_ROCE;
}
static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev)
{
- return !!(ldev->flags & MLX5_LAG_FLAG_SRIOV);
+ return ldev->mode == MLX5_LAG_MODE_SRIOV;
}
/* Create a mapping between steering slots and active ports.
@@ -372,7 +389,7 @@ static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
- if (ldev->flags & MLX5_LAG_FLAG_HASH_BASED)
+ if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags))
return mlx5_lag_port_sel_modify(ldev, ports);
return mlx5_cmd_modify_lag(dev0, ldev->ports, ports);
}
@@ -404,19 +421,19 @@ void mlx5_modify_lag(struct mlx5_lag *ldev,
memcpy(ldev->v2p_map, ports, sizeof(ports));
mlx5_lag_print_mapping(dev0, ldev, tracker,
- ldev->flags);
+ ldev->mode_flags);
break;
}
}
if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
- !(ldev->flags & MLX5_LAG_FLAG_ROCE))
+ !(ldev->mode == MLX5_LAG_MODE_ROCE))
mlx5_lag_drop_rule_setup(ldev, tracker);
}
#define MLX5_LAG_ROCE_HASH_PORTS_SUPPORTED 4
static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
- struct lag_tracker *tracker, u8 *flags)
+ unsigned long *flags)
{
struct lag_func *dev0 = &ldev->pf[MLX5_LAG_P1];
@@ -424,7 +441,7 @@ static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
/* Four ports are support only in hash mode */
if (!MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table))
return -EINVAL;
- *flags |= MLX5_LAG_FLAG_HASH_BASED;
+ set_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, flags);
if (ldev->ports > 2)
ldev->buckets = MLX5_LAG_MAX_HASH_BUCKETS;
}
@@ -432,49 +449,67 @@ static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
return 0;
}
-static int mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev,
- struct lag_tracker *tracker, u8 *flags)
+static void mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev,
+ struct lag_tracker *tracker,
+ enum mlx5_lag_mode mode,
+ unsigned long *flags)
{
struct lag_func *dev0 = &ldev->pf[MLX5_LAG_P1];
+ if (mode == MLX5_LAG_MODE_MPESW)
+ return;
+
if (MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) &&
tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH)
- *flags |= MLX5_LAG_FLAG_HASH_BASED;
-
- return 0;
+ set_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, flags);
}
-static int mlx5_lag_set_port_sel_mode(struct mlx5_lag *ldev,
- struct lag_tracker *tracker, u8 *flags)
+static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode,
+ struct lag_tracker *tracker, bool shared_fdb,
+ unsigned long *flags)
{
- bool roce_lag = !!(*flags & MLX5_LAG_FLAG_ROCE);
+ bool roce_lag = mode == MLX5_LAG_MODE_ROCE;
+
+ *flags = 0;
+ if (shared_fdb)
+ set_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, flags);
if (roce_lag)
- return mlx5_lag_set_port_sel_mode_roce(ldev, tracker, flags);
- return mlx5_lag_set_port_sel_mode_offloads(ldev, tracker, flags);
+ return mlx5_lag_set_port_sel_mode_roce(ldev, flags);
+
+ mlx5_lag_set_port_sel_mode_offloads(ldev, tracker, mode, flags);
+ return 0;
}
-char *get_str_port_sel_mode(u8 flags)
+char *mlx5_get_str_port_sel_mode(struct mlx5_lag *ldev)
{
- if (flags & MLX5_LAG_FLAG_HASH_BASED)
- return "hash";
- return "queue_affinity";
+ int port_sel_mode = get_port_sel_mode(ldev->mode, ldev->mode_flags);
+
+ switch (port_sel_mode) {
+ case MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY: return "queue_affinity";
+ case MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT: return "hash";
+ case MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_MPESW: return "mpesw";
+ default: return "invalid";
+ }
}
static int mlx5_create_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker,
- bool shared_fdb, u8 flags)
+ enum mlx5_lag_mode mode,
+ unsigned long flags)
{
+ bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
int err;
- mlx5_lag_print_mapping(dev0, ldev, tracker, flags);
+ if (tracker)
+ mlx5_lag_print_mapping(dev0, ldev, tracker, flags);
mlx5_core_info(dev0, "shared_fdb:%d mode:%s\n",
- shared_fdb, get_str_port_sel_mode(flags));
+ shared_fdb, mlx5_get_str_port_sel_mode(ldev));
- err = mlx5_cmd_create_lag(dev0, ldev->v2p_map, shared_fdb, flags);
+ err = mlx5_cmd_create_lag(dev0, ldev->v2p_map, mode, flags);
if (err) {
mlx5_core_err(dev0,
"Failed to create LAG (%d)\n",
@@ -503,33 +538,35 @@ static int mlx5_create_lag(struct mlx5_lag *ldev,
int mlx5_activate_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker,
- u8 flags,
+ enum mlx5_lag_mode mode,
bool shared_fdb)
{
- bool roce_lag = !!(flags & MLX5_LAG_FLAG_ROCE);
+ bool roce_lag = mode == MLX5_LAG_MODE_ROCE;
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ unsigned long flags = 0;
int err;
- err = mlx5_lag_set_port_sel_mode(ldev, tracker, &flags);
+ err = mlx5_lag_set_flags(ldev, mode, tracker, shared_fdb, &flags);
if (err)
return err;
- mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ldev->v2p_map);
-
- if (flags & MLX5_LAG_FLAG_HASH_BASED) {
- err = mlx5_lag_port_sel_create(ldev, tracker->hash_type,
- ldev->v2p_map);
- if (err) {
- mlx5_core_err(dev0,
- "Failed to create LAG port selection(%d)\n",
- err);
- return err;
+ if (mode != MLX5_LAG_MODE_MPESW) {
+ mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ldev->v2p_map);
+ if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
+ err = mlx5_lag_port_sel_create(ldev, tracker->hash_type,
+ ldev->v2p_map);
+ if (err) {
+ mlx5_core_err(dev0,
+ "Failed to create LAG port selection(%d)\n",
+ err);
+ return err;
+ }
}
}
- err = mlx5_create_lag(ldev, tracker, shared_fdb, flags);
+ err = mlx5_create_lag(ldev, tracker, mode, flags);
if (err) {
- if (flags & MLX5_LAG_FLAG_HASH_BASED)
+ if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags))
mlx5_lag_port_sel_destroy(ldev);
if (roce_lag)
mlx5_core_err(dev0,
@@ -541,12 +578,12 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
return err;
}
- if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
+ if (tracker && tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
!roce_lag)
mlx5_lag_drop_rule_setup(ldev, tracker);
- ldev->flags |= flags;
- ldev->shared_fdb = shared_fdb;
+ ldev->mode = mode;
+ ldev->mode_flags = flags;
return 0;
}
@@ -556,16 +593,17 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
bool roce_lag = __mlx5_lag_is_roce(ldev);
- u8 flags = ldev->flags;
+ unsigned long flags = ldev->mode_flags;
int err;
- ldev->flags &= ~MLX5_LAG_MODE_FLAGS;
+ ldev->mode = MLX5_LAG_MODE_NONE;
+ ldev->mode_flags = 0;
mlx5_lag_mp_reset(ldev);
- if (ldev->shared_fdb) {
+ if (test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags)) {
mlx5_eswitch_offloads_destroy_single_fdb(dev0->priv.eswitch,
dev1->priv.eswitch);
- ldev->shared_fdb = false;
+ clear_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
}
MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
@@ -582,7 +620,7 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
return err;
}
- if (flags & MLX5_LAG_FLAG_HASH_BASED)
+ if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags))
mlx5_lag_port_sel_destroy(ldev);
if (mlx5_lag_has_drop_rule(ldev))
mlx5_lag_drop_rule_cleanup(ldev);
@@ -656,11 +694,11 @@ static void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
}
}
-static void mlx5_disable_lag(struct mlx5_lag *ldev)
+void mlx5_disable_lag(struct mlx5_lag *ldev)
{
+ bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
- bool shared_fdb = ldev->shared_fdb;
bool roce_lag;
int err;
int i;
@@ -693,7 +731,7 @@ static void mlx5_disable_lag(struct mlx5_lag *ldev)
}
}
-static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
+bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
@@ -729,6 +767,18 @@ static bool mlx5_lag_is_roce_lag(struct mlx5_lag *ldev)
return roce_lag;
}
+static bool mlx5_lag_should_modify_lag(struct mlx5_lag *ldev, bool do_bond)
+{
+ return do_bond && __mlx5_lag_is_active(ldev) &&
+ ldev->mode != MLX5_LAG_MODE_MPESW;
+}
+
+static bool mlx5_lag_should_disable_lag(struct mlx5_lag *ldev, bool do_bond)
+{
+ return !do_bond && __mlx5_lag_is_active(ldev) &&
+ ldev->mode != MLX5_LAG_MODE_MPESW;
+}
+
static void mlx5_do_bond(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
@@ -759,8 +809,8 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
mlx5_lag_remove_devices(ldev);
err = mlx5_activate_lag(ldev, &tracker,
- roce_lag ? MLX5_LAG_FLAG_ROCE :
- MLX5_LAG_FLAG_SRIOV,
+ roce_lag ? MLX5_LAG_MODE_ROCE :
+ MLX5_LAG_MODE_SRIOV,
shared_fdb);
if (err) {
if (shared_fdb || roce_lag)
@@ -791,9 +841,9 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
return;
}
}
- } else if (do_bond && __mlx5_lag_is_active(ldev)) {
+ } else if (mlx5_lag_should_modify_lag(ldev, do_bond)) {
mlx5_modify_lag(ldev, &tracker);
- } else if (!do_bond && __mlx5_lag_is_active(ldev)) {
+ } else if (mlx5_lag_should_disable_lag(ldev, do_bond)) {
mlx5_disable_lag(ldev);
}
}
@@ -831,7 +881,6 @@ static void mlx5_do_bond_work(struct work_struct *work)
static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
struct lag_tracker *tracker,
- struct net_device *ndev,
struct netdev_notifier_changeupper_info *info)
{
struct net_device *upper = info->upper_dev, *ndev_tmp;
@@ -968,6 +1017,7 @@ static int mlx5_handle_changeinfodata_event(struct mlx5_lag *ldev,
return 1;
}
+/* this handler is always registered to netdev events */
static int mlx5_lag_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
@@ -987,8 +1037,7 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
switch (event) {
case NETDEV_CHANGEUPPER:
- changed = mlx5_handle_changeupper_event(ldev, &tracker, ndev,
- ptr);
+ changed = mlx5_handle_changeupper_event(ldev, &tracker, ptr);
break;
case NETDEV_CHANGELOWERSTATE:
changed = mlx5_handle_changelowerstate_event(ldev, &tracker,
@@ -1156,7 +1205,7 @@ void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev,
mutex_lock(&ldev->lock);
mlx5_ldev_remove_netdev(ldev, netdev);
- ldev->flags &= ~MLX5_LAG_FLAG_READY;
+ clear_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags);
lag_is_active = __mlx5_lag_is_active(ldev);
mutex_unlock(&ldev->lock);
@@ -1183,7 +1232,7 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
break;
if (i >= ldev->ports)
- ldev->flags |= MLX5_LAG_FLAG_READY;
+ set_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags);
mutex_unlock(&ldev->lock);
mlx5_queue_bond_work(ldev, 0);
}
@@ -1252,7 +1301,8 @@ bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev)
spin_lock(&lag_lock);
ldev = mlx5_lag_dev(dev);
- res = ldev && __mlx5_lag_is_sriov(ldev) && ldev->shared_fdb;
+ res = ldev && __mlx5_lag_is_sriov(ldev) &&
+ test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
spin_unlock(&lag_lock);
return res;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index 46683b84ff84..72f70fad4641 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -10,6 +10,7 @@
#include "mlx5_core.h"
#include "mp.h"
#include "port_sel.h"
+#include "mpesw.h"
enum {
MLX5_LAG_P1,
@@ -17,16 +18,21 @@ enum {
};
enum {
- MLX5_LAG_FLAG_ROCE = 1 << 0,
- MLX5_LAG_FLAG_SRIOV = 1 << 1,
- MLX5_LAG_FLAG_MULTIPATH = 1 << 2,
- MLX5_LAG_FLAG_READY = 1 << 3,
- MLX5_LAG_FLAG_HASH_BASED = 1 << 4,
+ MLX5_LAG_FLAG_NDEVS_READY,
};
-#define MLX5_LAG_MODE_FLAGS (MLX5_LAG_FLAG_ROCE | MLX5_LAG_FLAG_SRIOV |\
- MLX5_LAG_FLAG_MULTIPATH | \
- MLX5_LAG_FLAG_HASH_BASED)
+enum {
+ MLX5_LAG_MODE_FLAG_HASH_BASED,
+ MLX5_LAG_MODE_FLAG_SHARED_FDB,
+};
+
+enum mlx5_lag_mode {
+ MLX5_LAG_MODE_NONE,
+ MLX5_LAG_MODE_ROCE,
+ MLX5_LAG_MODE_SRIOV,
+ MLX5_LAG_MODE_MULTIPATH,
+ MLX5_LAG_MODE_MPESW,
+};
struct lag_func {
struct mlx5_core_dev *dev;
@@ -47,22 +53,25 @@ struct lag_tracker {
* It serves both its phys functions.
*/
struct mlx5_lag {
- u8 flags;
+ enum mlx5_lag_mode mode;
+ unsigned long mode_flags;
+ unsigned long state_flags;
u8 ports;
u8 buckets;
int mode_changes_in_progress;
- bool shared_fdb;
u8 v2p_map[MLX5_MAX_PORTS * MLX5_LAG_MAX_HASH_BUCKETS];
struct kref ref;
struct lag_func pf[MLX5_MAX_PORTS];
struct lag_tracker tracker;
struct workqueue_struct *wq;
struct delayed_work bond_work;
+ struct work_struct mpesw_work;
struct notifier_block nb;
struct lag_mp lag_mp;
struct mlx5_lag_port_sel port_sel;
/* Protect lag fields/state changes */
struct mutex lock;
+ struct lag_mpesw lag_mpesw;
};
static inline struct mlx5_lag *
@@ -74,29 +83,33 @@ mlx5_lag_dev(struct mlx5_core_dev *dev)
static inline bool
__mlx5_lag_is_active(struct mlx5_lag *ldev)
{
- return !!(ldev->flags & MLX5_LAG_MODE_FLAGS);
+ return ldev->mode != MLX5_LAG_MODE_NONE;
}
static inline bool
mlx5_lag_is_ready(struct mlx5_lag *ldev)
{
- return ldev->flags & MLX5_LAG_FLAG_READY;
+ return test_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags);
}
void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker);
int mlx5_activate_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker,
- u8 flags,
+ enum mlx5_lag_mode mode,
bool shared_fdb);
int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
struct net_device *ndev);
+bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev);
+void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev);
+int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev);
-char *get_str_port_sel_mode(u8 flags);
+char *mlx5_get_str_port_sel_mode(struct mlx5_lag *ldev);
void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports,
u8 *ports, int *num_enabled);
void mlx5_ldev_add_debugfs(struct mlx5_core_dev *dev);
void mlx5_ldev_remove_debugfs(struct dentry *dbg);
+void mlx5_disable_lag(struct mlx5_lag *ldev);
#endif /* __MLX5_LAG_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
index d6c3e6dfd71f..0259a149a64c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
@@ -11,7 +11,7 @@
static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
{
- return !!(ldev->flags & MLX5_LAG_FLAG_MULTIPATH);
+ return ldev->mode == MLX5_LAG_MODE_MULTIPATH;
}
static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
@@ -179,7 +179,7 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event,
struct lag_tracker tracker;
tracker = ldev->tracker;
- mlx5_activate_lag(ldev, &tracker, MLX5_LAG_FLAG_MULTIPATH, false);
+ mlx5_activate_lag(ldev, &tracker, MLX5_LAG_MODE_MULTIPATH, false);
}
mlx5_lag_set_port_affinity(ldev, MLX5_LAG_NORMAL_AFFINITY);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
new file mode 100644
index 000000000000..ee4b25a50315
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/netdevice.h>
+#include <net/nexthop.h>
+#include "lag/lag.h"
+#include "eswitch.h"
+#include "lib/mlx5.h"
+
+void mlx5_mpesw_work(struct work_struct *work)
+{
+ struct mlx5_lag *ldev = container_of(work, struct mlx5_lag, mpesw_work);
+
+ mutex_lock(&ldev->lock);
+ mlx5_disable_lag(ldev);
+ mutex_unlock(&ldev->lock);
+}
+
+static void mlx5_lag_disable_mpesw(struct mlx5_core_dev *dev)
+{
+ struct mlx5_lag *ldev = dev->priv.lag;
+
+ if (!queue_work(ldev->wq, &ldev->mpesw_work))
+ mlx5_core_warn(dev, "failed to queue work\n");
+}
+
+void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev)
+{
+ struct mlx5_lag *ldev = dev->priv.lag;
+
+ if (!ldev)
+ return;
+
+ mutex_lock(&ldev->lock);
+ if (!atomic_dec_return(&ldev->lag_mpesw.mpesw_rule_count) &&
+ ldev->mode == MLX5_LAG_MODE_MPESW)
+ mlx5_lag_disable_mpesw(dev);
+ mutex_unlock(&ldev->lock);
+}
+
+int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev)
+{
+ struct mlx5_lag *ldev = dev->priv.lag;
+ bool shared_fdb;
+ int err = 0;
+
+ if (!ldev)
+ return 0;
+
+ mutex_lock(&ldev->lock);
+ if (atomic_add_return(1, &ldev->lag_mpesw.mpesw_rule_count) != 1)
+ goto out;
+
+ if (ldev->mode != MLX5_LAG_MODE_NONE) {
+ err = -EINVAL;
+ goto out;
+ }
+ shared_fdb = mlx5_shared_fdb_supported(ldev);
+ err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, shared_fdb);
+ if (err)
+ mlx5_core_warn(dev, "Failed to create LAG in MPESW mode (%d)\n", err);
+
+out:
+ mutex_unlock(&ldev->lock);
+ return err;
+}
+
+int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev)
+{
+ struct mlx5_lag *ldev = mdev->priv.lag;
+
+ if (!netif_is_bond_master(out_dev) || !ldev)
+ return 0;
+
+ mutex_lock(&ldev->lock);
+ if (ldev->mode == MLX5_LAG_MODE_MPESW) {
+ mutex_unlock(&ldev->lock);
+ return -EOPNOTSUPP;
+ }
+ mutex_unlock(&ldev->lock);
+ return 0;
+}
+
+bool mlx5_lag_mpesw_is_activated(struct mlx5_core_dev *dev)
+{
+ bool ret;
+
+ ret = dev->priv.lag && dev->priv.lag->mode == MLX5_LAG_MODE_MPESW;
+ return ret;
+}
+
+void mlx5_lag_mpesw_init(struct mlx5_lag *ldev)
+{
+ INIT_WORK(&ldev->mpesw_work, mlx5_mpesw_work);
+ atomic_set(&ldev->lag_mpesw.mpesw_rule_count, 0);
+}
+
+void mlx5_lag_mpesw_cleanup(struct mlx5_lag *ldev)
+{
+ cancel_delayed_work_sync(&ldev->bond_work);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h
new file mode 100644
index 000000000000..be4abcb8fcd5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_LAG_MPESW_H__
+#define __MLX5_LAG_MPESW_H__
+
+#include "lag.h"
+#include "mlx5_core.h"
+
+struct lag_mpesw {
+ struct work_struct mpesw_work;
+ atomic_t mpesw_rule_count;
+};
+
+void mlx5_mpesw_work(struct work_struct *work);
+int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev);
+bool mlx5_lag_mpesw_is_activated(struct mlx5_core_dev *dev);
+#if IS_ENABLED(CONFIG_MLX5_ESWITCH)
+void mlx5_lag_mpesw_init(struct mlx5_lag *ldev);
+void mlx5_lag_mpesw_cleanup(struct mlx5_lag *ldev);
+#else
+static inline void mlx5_lag_mpesw_init(struct mlx5_lag *ldev) {}
+static inline void mlx5_lag_mpesw_cleanup(struct mlx5_lag *ldev) {}
+#endif
+
+#endif /* __MLX5_LAG_MPESW_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 84f75aa25214..c9b4e50a593e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -936,6 +936,12 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
goto err_sf_table_cleanup;
}
+ err = mlx5_fs_core_alloc(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to alloc flow steering\n");
+ goto err_fs;
+ }
+
dev->dm = mlx5_dm_create(dev);
if (IS_ERR(dev->dm))
mlx5_core_warn(dev, "Failed to init device memory%d\n", err);
@@ -946,6 +952,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
return 0;
+err_fs:
+ mlx5_sf_table_cleanup(dev);
err_sf_table_cleanup:
mlx5_sf_hw_table_cleanup(dev);
err_sf_hw_table_cleanup:
@@ -983,6 +991,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_hv_vhca_destroy(dev->hv_vhca);
mlx5_fw_tracer_destroy(dev->tracer);
mlx5_dm_cleanup(dev);
+ mlx5_fs_core_free(dev);
mlx5_sf_table_cleanup(dev);
mlx5_sf_hw_table_cleanup(dev);
mlx5_vhca_event_cleanup(dev);
@@ -1181,7 +1190,7 @@ static int mlx5_load(struct mlx5_core_dev *dev)
goto err_fpga_start;
}
- err = mlx5_init_fs(dev);
+ err = mlx5_fs_core_init(dev);
if (err) {
mlx5_core_err(dev, "Failed to init flow steering\n");
goto err_fs;
@@ -1226,7 +1235,7 @@ err_ec:
err_vhca:
mlx5_vhca_event_stop(dev);
err_set_hca:
- mlx5_cleanup_fs(dev);
+ mlx5_fs_core_cleanup(dev);
err_fs:
mlx5_fpga_device_stop(dev);
err_fpga_start:
@@ -1252,7 +1261,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
mlx5_ec_cleanup(dev);
mlx5_sf_hw_table_destroy(dev);
mlx5_vhca_event_stop(dev);
- mlx5_cleanup_fs(dev);
+ mlx5_fs_core_cleanup(dev);
mlx5_fpga_device_stop(dev);
mlx5_rsc_dump_cleanup(dev);
mlx5_hv_vhca_cleanup(dev->hv_vhca);
@@ -1608,6 +1617,10 @@ static void remove_one(struct pci_dev *pdev)
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct devlink *devlink = priv_to_devlink(dev);
+ /* mlx5_drain_fw_reset() is using devlink APIs. Hence, we must drain
+ * fw_reset before unregistering the devlink.
+ */
+ mlx5_drain_fw_reset(dev);
set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
devlink_unregister(devlink);
mlx5_sriov_disable(pdev);
@@ -1756,7 +1769,7 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
}
/* Panic tear down fw command will stop the PCI bus communication
- * with the HCA, so the health polll is no longer needed.
+ * with the HCA, so the health poll is no longer needed.
*/
mlx5_drain_health_wq(dev);
mlx5_stop_health_poll(dev, false);
@@ -1886,7 +1899,6 @@ static struct pci_driver mlx5_core_driver = {
* Return: Pointer to the associated mlx5_core_dev or NULL.
*/
struct mlx5_core_dev *mlx5_vf_get_core_dev(struct pci_dev *pdev)
- __acquires(&mdev->intf_state_mutex)
{
struct mlx5_core_dev *mdev;
@@ -1912,7 +1924,6 @@ EXPORT_SYMBOL(mlx5_vf_get_core_dev);
* access the mdev any more.
*/
void mlx5_vf_put_core_dev(struct mlx5_core_dev *mdev)
- __releases(&mdev->intf_state_mutex)
{
mutex_unlock(&mdev->intf_state_mutex);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index db77f1d2eeb4..662f1d55e30e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -94,8 +94,8 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
if (msix_vec_count > max_msix)
return -EOVERFLOW;
- query_cap = kzalloc(query_sz, GFP_KERNEL);
- hca_cap = kzalloc(set_sz, GFP_KERNEL);
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ hca_cap = kvzalloc(set_sz, GFP_KERNEL);
if (!hca_cap || !query_cap) {
ret = -ENOMEM;
goto out;
@@ -118,8 +118,8 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1);
ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap);
out:
- kfree(hca_cap);
- kfree(query_cap);
+ kvfree(hca_cap);
+ kvfree(query_cap);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index 850937cd8bf9..1383550f44c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -530,6 +530,37 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn,
return 0;
}
+static void dr_action_modify_ttl_adjust(struct mlx5dr_domain *dmn,
+ struct mlx5dr_ste_actions_attr *attr,
+ bool rx_rule,
+ bool *recalc_cs_required)
+{
+ *recalc_cs_required = false;
+
+ /* if device supports csum recalculation - no adjustment needed */
+ if (mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps))
+ return;
+
+ /* no adjustment needed on TX rules */
+ if (!rx_rule)
+ return;
+
+ if (!MLX5_CAP_ESW_FLOWTABLE(dmn->mdev, fdb_ipv4_ttl_modify)) {
+ /* Ignore the modify TTL action.
+ * It is always kept as last HW action.
+ */
+ attr->modify_actions--;
+ return;
+ }
+
+ if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB)
+ /* Due to a HW bug on some devices, modifying TTL on RX flows
+ * will cause an incorrect checksum calculation. In such cases
+ * we will use a FW table to recalculate the checksum.
+ */
+ *recalc_cs_required = true;
+}
+
static void dr_action_print_sequence(struct mlx5dr_domain *dmn,
struct mlx5dr_action *actions[],
int last_idx)
@@ -650,8 +681,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
case DR_ACTION_TYP_MODIFY_HDR:
attr.modify_index = action->rewrite->index;
attr.modify_actions = action->rewrite->num_of_actions;
- recalc_cs_required = action->rewrite->modify_ttl &&
- !mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps);
+ if (action->rewrite->modify_ttl)
+ dr_action_modify_ttl_adjust(dmn, &attr, rx_rule,
+ &recalc_cs_required);
break;
case DR_ACTION_TYP_L2_TO_TNL_L2:
case DR_ACTION_TYP_L2_TO_TNL_L3:
@@ -732,12 +764,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
*new_hw_ste_arr_sz = nic_matcher->num_of_builders;
last_ste = ste_arr + DR_STE_SIZE * (nic_matcher->num_of_builders - 1);
- /* Due to a HW bug in some devices, modifying TTL on RX flows will
- * cause an incorrect checksum calculation. In this case we will
- * use a FW table to recalculate.
- */
- if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB &&
- rx_rule && recalc_cs_required && dest_action) {
+ if (recalc_cs_required && dest_action) {
ret = dr_action_handle_cs_recalc(dmn, dest_action, &attr.final_icm_addr);
if (ret) {
mlx5dr_err(dmn,
@@ -842,7 +869,8 @@ struct mlx5dr_action *
mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_action_dest *dests,
u32 num_of_dests,
- bool ignore_flow_level)
+ bool ignore_flow_level,
+ u32 flow_source)
{
struct mlx5dr_cmd_flow_destination_hw_info *hw_dests;
struct mlx5dr_action **ref_actions;
@@ -914,7 +942,8 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
reformat_req,
&action->dest_tbl->fw_tbl.id,
&action->dest_tbl->fw_tbl.group_id,
- ignore_flow_level);
+ ignore_flow_level,
+ flow_source);
if (ret)
goto free_action;
@@ -1556,12 +1585,6 @@ dr_action_modify_check_is_ttl_modify(const void *sw_action)
return sw_field == MLX5_ACTION_IN_FIELD_OUT_IP_TTL;
}
-static bool dr_action_modify_ttl_ignore(struct mlx5dr_domain *dmn)
-{
- return !mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps) &&
- !MLX5_CAP_ESW_FLOWTABLE(dmn->mdev, fdb_ipv4_ttl_modify);
-}
-
static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
u32 max_hw_actions,
u32 num_sw_actions,
@@ -1573,6 +1596,7 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
const struct mlx5dr_ste_action_modify_field *hw_dst_action_info;
const struct mlx5dr_ste_action_modify_field *hw_src_action_info;
struct mlx5dr_domain *dmn = action->rewrite->dmn;
+ __be64 *modify_ttl_sw_action = NULL;
int ret, i, hw_idx = 0;
__be64 *sw_action;
__be64 hw_action;
@@ -1585,8 +1609,14 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
action->rewrite->allow_rx = 1;
action->rewrite->allow_tx = 1;
- for (i = 0; i < num_sw_actions; i++) {
- sw_action = &sw_actions[i];
+ for (i = 0; i < num_sw_actions || modify_ttl_sw_action; i++) {
+ /* modify TTL is handled separately, as a last action */
+ if (i == num_sw_actions) {
+ sw_action = modify_ttl_sw_action;
+ modify_ttl_sw_action = NULL;
+ } else {
+ sw_action = &sw_actions[i];
+ }
ret = dr_action_modify_check_field_limitation(action,
sw_action);
@@ -1595,10 +1625,9 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
if (!(*modify_ttl) &&
dr_action_modify_check_is_ttl_modify(sw_action)) {
- if (dr_action_modify_ttl_ignore(dmn))
- continue;
-
+ modify_ttl_sw_action = sw_action;
*modify_ttl = true;
+ continue;
}
/* Convert SW action to HW action */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
index 68a4c32d5f34..f05ef0cd54ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
@@ -104,7 +104,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
bool reformat_req,
u32 *tbl_id,
u32 *group_id,
- bool ignore_flow_level)
+ bool ignore_flow_level,
+ u32 flow_source)
{
struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
struct mlx5dr_cmd_fte_info fte_info = {};
@@ -139,6 +140,7 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
fte_info.val = val;
fte_info.dest_arr = dest;
fte_info.ignore_flow_level = ignore_flow_level;
+ fte_info.flow_context.flow_source = flow_source;
ret = mlx5dr_cmd_set_fte(dmn->mdev, 0, 0, &ft_info, *group_id, &fte_info);
if (ret) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
index 5a322335f204..2010d4ac6519 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
@@ -420,7 +420,7 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
* encapsulation. The reason for that is that we support
* modify headers for outer headers only
*/
- if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
+ if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] && attr->modify_actions) {
dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT);
dr_ste_v0_set_rewrite_actions(last_ste,
attr->modify_actions,
@@ -513,7 +513,7 @@ dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
}
}
- if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
+ if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] && attr->modify_actions) {
if (dr_ste_v0_get_entry_type(last_ste) == DR_STE_TYPE_MODIFY_PKT)
dr_ste_v0_arr_init_next(&last_ste,
added_stes,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 46866a5fc5ca..98320e3945ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -1461,7 +1461,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
bool reformat_req,
u32 *tbl_id,
u32 *group_id,
- bool ignore_flow_level);
+ bool ignore_flow_level,
+ u32 flow_source);
void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id,
u32 group_id);
#endif /* _DR_TYPES_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 045b0cf90063..728f81882589 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -520,6 +520,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
} else if (num_term_actions > 1) {
bool ignore_flow_level =
!!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
+ u32 flow_source = fte->flow_context.flow_source;
if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
@@ -529,7 +530,8 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
tmp_action = mlx5dr_action_create_mult_dest_tbl(domain,
term_actions,
num_term_actions,
- ignore_flow_level);
+ ignore_flow_level,
+ flow_source);
if (!tmp_action) {
err = -EOPNOTSUPP;
goto free_actions;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index ec5cbec0d455..7626c85643b1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -99,7 +99,8 @@ struct mlx5dr_action *
mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_action_dest *dests,
u32 num_of_dests,
- bool ignore_flow_level);
+ bool ignore_flow_level,
+ u32 flow_source);
struct mlx5dr_action *mlx5dr_action_create_drop(void);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 8846d30a380a..ac020cb78072 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -280,7 +280,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_in) +
req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
- out = kzalloc(out_sz, GFP_KERNEL);
+ out = kvzalloc(out_sz, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -307,7 +307,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
ether_addr_copy(addr_list[i], mac_addr);
}
out:
- kfree(out);
+ kvfree(out);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
@@ -335,7 +335,7 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
- in = kzalloc(in_sz, GFP_KERNEL);
+ in = kvzalloc(in_sz, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -360,7 +360,7 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
}
err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
- kfree(in);
+ kvfree(in);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
@@ -386,7 +386,7 @@ int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
list_size * MLX5_ST_SZ_BYTES(vlan_layout);
memset(out, 0, sizeof(out));
- in = kzalloc(in_sz, GFP_KERNEL);
+ in = kvzalloc(in_sz, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -411,7 +411,7 @@ int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
}
err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
- kfree(in);
+ kvfree(in);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
@@ -542,8 +542,8 @@ int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
out_sz += nout * sizeof(*gid);
- in = kzalloc(in_sz, GFP_KERNEL);
- out = kzalloc(out_sz, GFP_KERNEL);
+ in = kvzalloc(in_sz, GFP_KERNEL);
+ out = kvzalloc(out_sz, GFP_KERNEL);
if (!in || !out) {
err = -ENOMEM;
goto out;
@@ -573,8 +573,8 @@ int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
gid->global.interface_id = tmp->global.interface_id;
out:
- kfree(in);
- kfree(out);
+ kvfree(in);
+ kvfree(out);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
@@ -607,8 +607,8 @@ int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
- in = kzalloc(in_sz, GFP_KERNEL);
- out = kzalloc(out_sz, GFP_KERNEL);
+ in = kvzalloc(in_sz, GFP_KERNEL);
+ out = kvzalloc(out_sz, GFP_KERNEL);
if (!in || !out) {
err = -ENOMEM;
goto out;
@@ -638,8 +638,8 @@ int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
*pkey = MLX5_GET_PR(pkey, pkarr, pkey);
out:
- kfree(in);
- kfree(out);
+ kvfree(in);
+ kvfree(out);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
@@ -658,7 +658,7 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
- out = kzalloc(out_sz, GFP_KERNEL);
+ out = kvzalloc(out_sz, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -717,7 +717,7 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
system_image_guid);
ex:
- kfree(out);
+ kvfree(out);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context);
@@ -728,7 +728,7 @@ int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
struct mlx5_hca_vport_context *rep;
int err;
- rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+ rep = kvzalloc(sizeof(*rep), GFP_KERNEL);
if (!rep)
return -ENOMEM;
@@ -736,7 +736,7 @@ int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
if (!err)
*sys_image_guid = rep->sys_image_guid;
- kfree(rep);
+ kvfree(rep);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
@@ -747,7 +747,7 @@ int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
struct mlx5_hca_vport_context *rep;
int err;
- rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+ rep = kvzalloc(sizeof(*rep), GFP_KERNEL);
if (!rep)
return -ENOMEM;
@@ -755,7 +755,7 @@ int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
if (!err)
*node_guid = rep->node_guid;
- kfree(rep);
+ kvfree(rep);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
@@ -770,7 +770,7 @@ int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
int err;
- out = kzalloc(outlen, GFP_KERNEL);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -786,7 +786,7 @@ int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
nic_vport_context.promisc_all);
out:
- kfree(out);
+ kvfree(out);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
@@ -874,7 +874,7 @@ int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status)
int value;
int err;
- out = kzalloc(outlen, GFP_KERNEL);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -891,7 +891,7 @@ int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status)
*status = !value;
out:
- kfree(out);
+ kvfree(out);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_nic_vport_query_local_lb);
@@ -1033,7 +1033,7 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
mlx5_core_dbg(dev, "vf %d\n", vf);
is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
- in = kzalloc(in_sz, GFP_KERNEL);
+ in = kvzalloc(in_sz, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1065,7 +1065,7 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
req->cap_mask1_perm);
err = mlx5_cmd_exec_in(dev, modify_hca_vport_context, in);
ex:
- kfree(in);
+ kvfree(in);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context);
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index 66ef0090755e..84621b4cb15b 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -69,7 +69,7 @@ static void mlxbf_gige_initial_mac(struct mlxbf_gige *priv)
u8 mac[ETH_ALEN];
u64 local_mac;
- memset(mac, 0, ETH_ALEN);
+ eth_zero_addr(mac);
mlxbf_gige_get_mac_rx_filter(priv, MLXBF_GIGE_LOCAL_MAC_FILTER_IDX,
&local_mac);
u64_to_ether_addr(local_mac, mac);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 138718f33dbd..6ad68b422129 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -101,6 +101,24 @@ static int lan966x_create_targets(struct platform_device *pdev,
return 0;
}
+static bool lan966x_port_unique_address(struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ int p;
+
+ for (p = 0; p < lan966x->num_phys_ports; ++p) {
+ port = lan966x->ports[p];
+ if (!port || port->dev == dev)
+ continue;
+
+ if (ether_addr_equal(dev->dev_addr, port->dev->dev_addr))
+ return false;
+ }
+
+ return true;
+}
+
static int lan966x_port_set_mac_address(struct net_device *dev, void *p)
{
struct lan966x_port *port = netdev_priv(dev);
@@ -108,16 +126,26 @@ static int lan966x_port_set_mac_address(struct net_device *dev, void *p)
const struct sockaddr *addr = p;
int ret;
+ if (ether_addr_equal(addr->sa_data, dev->dev_addr))
+ return 0;
+
/* Learn the new net device MAC address in the mac table. */
ret = lan966x_mac_cpu_learn(lan966x, addr->sa_data, HOST_PVID);
if (ret)
return ret;
+ /* If there is another port with the same address as the dev, then don't
+ * delete it from the MAC table
+ */
+ if (!lan966x_port_unique_address(dev))
+ goto out;
+
/* Then forget the previous one. */
ret = lan966x_mac_cpu_forget(lan966x, dev->dev_addr, HOST_PVID);
if (ret)
return ret;
+out:
eth_hw_addr_set(dev, addr->sa_data);
return ret;
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
index 189a6a0a2e08..32709d21ab2f 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
@@ -742,7 +742,7 @@ static int sparx5_port_pcs_low_set(struct sparx5 *sparx5,
if (err)
return -EINVAL;
} else {
- sgmii = true; /* Phy is connnected to the MAC */
+ sgmii = true; /* Phy is connected to the MAC */
}
/* Choose SGMII or 1000BaseX/2500BaseX PCS mode */
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index e0d1d5b59981..8da7e25a47c9 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -2046,57 +2046,68 @@ static int ocelot_bond_get_id(struct ocelot *ocelot, struct net_device *bond)
return __ffs(bond_mask);
}
-u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port)
+static u32 ocelot_dsa_8021q_cpu_assigned_ports(struct ocelot *ocelot,
+ struct ocelot_port *cpu)
{
- struct ocelot_port *ocelot_port = ocelot->ports[src_port];
- const struct net_device *bridge;
u32 mask = 0;
int port;
- if (!ocelot_port || ocelot_port->stp_state != BR_STATE_FORWARDING)
- return 0;
-
- bridge = ocelot_port->bridge;
- if (!bridge)
- return 0;
-
for (port = 0; port < ocelot->num_phys_ports; port++) {
- ocelot_port = ocelot->ports[port];
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
if (!ocelot_port)
continue;
- if (ocelot_port->stp_state == BR_STATE_FORWARDING &&
- ocelot_port->bridge == bridge)
+ if (ocelot_port->dsa_8021q_cpu == cpu)
mask |= BIT(port);
}
return mask;
}
-EXPORT_SYMBOL_GPL(ocelot_get_bridge_fwd_mask);
-u32 ocelot_get_dsa_8021q_cpu_mask(struct ocelot *ocelot)
+u32 ocelot_port_assigned_dsa_8021q_cpu_mask(struct ocelot *ocelot, int port)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct ocelot_port *cpu_port = ocelot_port->dsa_8021q_cpu;
+
+ if (!cpu_port)
+ return 0;
+
+ return BIT(cpu_port->index);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_assigned_dsa_8021q_cpu_mask);
+
+u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port)
{
+ struct ocelot_port *ocelot_port = ocelot->ports[src_port];
+ const struct net_device *bridge;
u32 mask = 0;
int port;
+ if (!ocelot_port || ocelot_port->stp_state != BR_STATE_FORWARDING)
+ return 0;
+
+ bridge = ocelot_port->bridge;
+ if (!bridge)
+ return 0;
+
for (port = 0; port < ocelot->num_phys_ports; port++) {
- struct ocelot_port *ocelot_port = ocelot->ports[port];
+ ocelot_port = ocelot->ports[port];
if (!ocelot_port)
continue;
- if (ocelot_port->is_dsa_8021q_cpu)
+ if (ocelot_port->stp_state == BR_STATE_FORWARDING &&
+ ocelot_port->bridge == bridge)
mask |= BIT(port);
}
return mask;
}
-EXPORT_SYMBOL_GPL(ocelot_get_dsa_8021q_cpu_mask);
+EXPORT_SYMBOL_GPL(ocelot_get_bridge_fwd_mask);
-void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining)
+static void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining)
{
- unsigned long cpu_fwd_mask;
int port;
lockdep_assert_held(&ocelot->fwd_domain_lock);
@@ -2108,15 +2119,6 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining)
if (joining && ocelot->ops->cut_through_fwd)
ocelot->ops->cut_through_fwd(ocelot);
- /* If a DSA tag_8021q CPU exists, it needs to be included in the
- * regular forwarding path of the front ports regardless of whether
- * those are bridged or standalone.
- * If DSA tag_8021q is not used, this returns 0, which is fine because
- * the hardware-based CPU port module can be a destination for packets
- * even if it isn't part of PGID_SRC.
- */
- cpu_fwd_mask = ocelot_get_dsa_8021q_cpu_mask(ocelot);
-
/* Apply FWD mask. The loop is needed to add/remove the current port as
* a source for the other ports.
*/
@@ -2129,17 +2131,19 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining)
mask = 0;
} else if (ocelot_port->is_dsa_8021q_cpu) {
/* The DSA tag_8021q CPU ports need to be able to
- * forward packets to all other ports except for
- * themselves
+ * forward packets to all ports assigned to them.
*/
- mask = GENMASK(ocelot->num_phys_ports - 1, 0);
- mask &= ~cpu_fwd_mask;
+ mask = ocelot_dsa_8021q_cpu_assigned_ports(ocelot,
+ ocelot_port);
} else if (ocelot_port->bridge) {
struct net_device *bond = ocelot_port->bond;
mask = ocelot_get_bridge_fwd_mask(ocelot, port);
- mask |= cpu_fwd_mask;
mask &= ~BIT(port);
+
+ mask |= ocelot_port_assigned_dsa_8021q_cpu_mask(ocelot,
+ port);
+
if (bond)
mask &= ~ocelot_get_bond_mask(ocelot, bond);
} else {
@@ -2147,7 +2151,8 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining)
* ports (if those exist), or to the hardware CPU port
* module otherwise.
*/
- mask = cpu_fwd_mask;
+ mask = ocelot_port_assigned_dsa_8021q_cpu_mask(ocelot,
+ port);
}
ocelot_write_rix(ocelot, mask, ANA_PGID_PGID, PGID_SRC + port);
@@ -2163,29 +2168,94 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining)
if (!joining && ocelot->ops->cut_through_fwd)
ocelot->ops->cut_through_fwd(ocelot);
}
-EXPORT_SYMBOL(ocelot_apply_bridge_fwd_mask);
-void ocelot_port_set_dsa_8021q_cpu(struct ocelot *ocelot, int port)
+/* Update PGID_CPU which is the destination port mask used for whitelisting
+ * unicast addresses filtered towards the host. In the normal and NPI modes,
+ * this points to the analyzer entry for the CPU port module, while in DSA
+ * tag_8021q mode, it is a bit mask of all active CPU ports.
+ * PGID_SRC will take care of forwarding a packet from one user port to
+ * no more than a single CPU port.
+ */
+static void ocelot_update_pgid_cpu(struct ocelot *ocelot)
+{
+ int pgid_cpu = 0;
+ int port;
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ if (!ocelot_port || !ocelot_port->is_dsa_8021q_cpu)
+ continue;
+
+ pgid_cpu |= BIT(port);
+ }
+
+ if (!pgid_cpu)
+ pgid_cpu = BIT(ocelot->num_phys_ports);
+
+ ocelot_write_rix(ocelot, pgid_cpu, ANA_PGID_PGID, PGID_CPU);
+}
+
+void ocelot_port_assign_dsa_8021q_cpu(struct ocelot *ocelot, int port,
+ int cpu)
{
+ struct ocelot_port *cpu_port = ocelot->ports[cpu];
u16 vid;
- ocelot->ports[port]->is_dsa_8021q_cpu = true;
+ mutex_lock(&ocelot->fwd_domain_lock);
+
+ ocelot->ports[port]->dsa_8021q_cpu = cpu_port;
+
+ if (!cpu_port->is_dsa_8021q_cpu) {
+ cpu_port->is_dsa_8021q_cpu = true;
- for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++)
- ocelot_vlan_member_add(ocelot, port, vid, true);
+ for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++)
+ ocelot_vlan_member_add(ocelot, cpu, vid, true);
+
+ ocelot_update_pgid_cpu(ocelot);
+ }
+
+ ocelot_apply_bridge_fwd_mask(ocelot, true);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
}
-EXPORT_SYMBOL_GPL(ocelot_port_set_dsa_8021q_cpu);
+EXPORT_SYMBOL_GPL(ocelot_port_assign_dsa_8021q_cpu);
-void ocelot_port_unset_dsa_8021q_cpu(struct ocelot *ocelot, int port)
+void ocelot_port_unassign_dsa_8021q_cpu(struct ocelot *ocelot, int port)
{
+ struct ocelot_port *cpu_port = ocelot->ports[port]->dsa_8021q_cpu;
+ bool keep = false;
u16 vid;
+ int p;
+
+ mutex_lock(&ocelot->fwd_domain_lock);
+
+ ocelot->ports[port]->dsa_8021q_cpu = NULL;
- ocelot->ports[port]->is_dsa_8021q_cpu = false;
+ for (p = 0; p < ocelot->num_phys_ports; p++) {
+ if (!ocelot->ports[p])
+ continue;
- for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++)
- ocelot_vlan_member_del(ocelot, port, vid);
+ if (ocelot->ports[p]->dsa_8021q_cpu == cpu_port) {
+ keep = true;
+ break;
+ }
+ }
+
+ if (!keep) {
+ cpu_port->is_dsa_8021q_cpu = false;
+
+ for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++)
+ ocelot_vlan_member_del(ocelot, cpu_port->index, vid);
+
+ ocelot_update_pgid_cpu(ocelot);
+ }
+
+ ocelot_apply_bridge_fwd_mask(ocelot, true);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
}
-EXPORT_SYMBOL_GPL(ocelot_port_unset_dsa_8021q_cpu);
+EXPORT_SYMBOL_GPL(ocelot_port_unassign_dsa_8021q_cpu);
void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
{
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index 51cf241ff7d0..7c0897e779dc 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -279,6 +279,22 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
filter->action.pol_ix = OCELOT_POLICER_DISCARD;
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
+ case FLOW_ACTION_ACCEPT:
+ if (filter->block_id != VCAP_ES0 &&
+ filter->block_id != VCAP_IS1 &&
+ filter->block_id != VCAP_IS2) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Accept action can only be offloaded to VCAP chains");
+ return -EOPNOTSUPP;
+ }
+ if (filter->block_id != VCAP_ES0 &&
+ filter->goto_target != -1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Last action must be GOTO");
+ return -EOPNOTSUPP;
+ }
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ break;
case FLOW_ACTION_TRAP:
if (filter->block_id != VCAP_IS2 ||
filter->lookup != 0) {
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index be168a372498..5e6136e80282 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -775,7 +775,7 @@ static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
static int ocelot_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
- struct netlink_ext_ack *extack)
+ struct netlink_ext_ack *extack)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index d2de8ac44f72..fa5d4ddf429b 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -2405,7 +2405,6 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev)
for (intr_cnt = 0; intr_cnt < (vdev->no_of_vpath * 2 + 1);
intr_cnt++) {
if (vdev->vxge_entries[intr_cnt].in_use) {
- synchronize_irq(vdev->entries[intr_cnt].vector);
free_irq(vdev->entries[intr_cnt].vector,
vdev->vxge_entries[intr_cnt].arg);
vdev->vxge_entries[intr_cnt].in_use = 0;
@@ -2427,7 +2426,6 @@ static void vxge_rem_isr(struct vxgedev *vdev)
vdev->config.intr_type == MSI_X) {
vxge_rem_msix_isr(vdev);
} else if (vdev->config.intr_type == INTA) {
- synchronize_irq(vdev->pdev->irq);
free_irq(vdev->pdev->irq, vdev);
}
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
index 1edcd9f86c9c..443a5d6eb57b 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
@@ -76,12 +76,119 @@ bool is_post_ct_flow(struct flow_cls_offload *flow)
return false;
}
+/**
+ * get_mangled_key() - Mangle the key if mangle act exists
+ * @rule: rule that carries the actions
+ * @buf: pointer to key to be mangled
+ * @offset: used to adjust mangled offset in L2/L3/L4 header
+ * @key_sz: key size
+ * @htype: mangling type
+ *
+ * Returns buf where the mangled key stores.
+ */
+static void *get_mangled_key(struct flow_rule *rule, void *buf,
+ u32 offset, size_t key_sz,
+ enum flow_action_mangle_base htype)
+{
+ struct flow_action_entry *act;
+ u32 *val = (u32 *)buf;
+ u32 off, msk, key;
+ int i;
+
+ flow_action_for_each(i, act, &rule->action) {
+ if (act->id == FLOW_ACTION_MANGLE &&
+ act->mangle.htype == htype) {
+ off = act->mangle.offset - offset;
+ msk = act->mangle.mask;
+ key = act->mangle.val;
+
+ /* Mangling is supposed to be u32 aligned */
+ if (off % 4 || off >= key_sz)
+ continue;
+
+ val[off >> 2] &= msk;
+ val[off >> 2] |= key;
+ }
+ }
+
+ return buf;
+}
+
+/* Only tos and ttl are involved in flow_match_ip structure, which
+ * doesn't conform to the layout of ip/ipv6 header definition. So
+ * they need particular process here: fill them into the ip/ipv6
+ * header, so that mangling actions can work directly.
+ */
+#define NFP_IPV4_TOS_MASK GENMASK(23, 16)
+#define NFP_IPV4_TTL_MASK GENMASK(31, 24)
+#define NFP_IPV6_TCLASS_MASK GENMASK(27, 20)
+#define NFP_IPV6_HLIMIT_MASK GENMASK(7, 0)
+static void *get_mangled_tos_ttl(struct flow_rule *rule, void *buf,
+ bool is_v6)
+{
+ struct flow_match_ip match;
+ /* IPv4's ttl field is in third dword. */
+ __be32 ip_hdr[3];
+ u32 tmp, hdr_len;
+
+ flow_rule_match_ip(rule, &match);
+
+ if (is_v6) {
+ tmp = FIELD_PREP(NFP_IPV6_TCLASS_MASK, match.key->tos);
+ ip_hdr[0] = cpu_to_be32(tmp);
+ tmp = FIELD_PREP(NFP_IPV6_HLIMIT_MASK, match.key->ttl);
+ ip_hdr[1] = cpu_to_be32(tmp);
+ hdr_len = 2 * sizeof(__be32);
+ } else {
+ tmp = FIELD_PREP(NFP_IPV4_TOS_MASK, match.key->tos);
+ ip_hdr[0] = cpu_to_be32(tmp);
+ tmp = FIELD_PREP(NFP_IPV4_TTL_MASK, match.key->ttl);
+ ip_hdr[2] = cpu_to_be32(tmp);
+ hdr_len = 3 * sizeof(__be32);
+ }
+
+ get_mangled_key(rule, ip_hdr, 0, hdr_len,
+ is_v6 ? FLOW_ACT_MANGLE_HDR_TYPE_IP6 :
+ FLOW_ACT_MANGLE_HDR_TYPE_IP4);
+
+ match.key = buf;
+
+ if (is_v6) {
+ tmp = be32_to_cpu(ip_hdr[0]);
+ match.key->tos = FIELD_GET(NFP_IPV6_TCLASS_MASK, tmp);
+ tmp = be32_to_cpu(ip_hdr[1]);
+ match.key->ttl = FIELD_GET(NFP_IPV6_HLIMIT_MASK, tmp);
+ } else {
+ tmp = be32_to_cpu(ip_hdr[0]);
+ match.key->tos = FIELD_GET(NFP_IPV4_TOS_MASK, tmp);
+ tmp = be32_to_cpu(ip_hdr[2]);
+ match.key->ttl = FIELD_GET(NFP_IPV4_TTL_MASK, tmp);
+ }
+
+ return buf;
+}
+
+/* Note entry1 and entry2 are not swappable, entry1 should be
+ * the former flow whose mangle action need be taken into account
+ * if existed, and entry2 should be the latter flow whose action
+ * we don't care.
+ */
static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
struct nfp_fl_ct_flow_entry *entry2)
{
unsigned int ovlp_keys = entry1->rule->match.dissector->used_keys &
entry2->rule->match.dissector->used_keys;
- bool out;
+ bool out, is_v6 = false;
+ u8 ip_proto = 0;
+ /* Temporary buffer for mangling keys, 64 is enough to cover max
+ * struct size of key in various fields that may be mangled.
+ * Supported fileds to mangle:
+ * mac_src/mac_dst(struct flow_match_eth_addrs, 12B)
+ * nw_tos/nw_ttl(struct flow_match_ip, 2B)
+ * nw_src/nw_dst(struct flow_match_ipv4/6_addrs, 32B)
+ * tp_src/tp_dst(struct flow_match_ports, 4B)
+ */
+ char buf[64];
if (entry1->netdev && entry2->netdev &&
entry1->netdev != entry2->netdev)
@@ -105,6 +212,14 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
flow_rule_match_basic(entry1->rule, &match1);
flow_rule_match_basic(entry2->rule, &match2);
+
+ /* n_proto field is a must in ct-related flows,
+ * it should be either ipv4 or ipv6.
+ */
+ is_v6 = match1.key->n_proto == htons(ETH_P_IPV6);
+ /* ip_proto field is a must when port field is cared */
+ ip_proto = match1.key->ip_proto;
+
COMPARE_UNMASKED_FIELDS(match1, match2, &out);
if (out)
goto check_failed;
@@ -115,6 +230,13 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
flow_rule_match_ipv4_addrs(entry1->rule, &match1);
flow_rule_match_ipv4_addrs(entry2->rule, &match2);
+
+ memcpy(buf, match1.key, sizeof(*match1.key));
+ match1.key = get_mangled_key(entry1->rule, buf,
+ offsetof(struct iphdr, saddr),
+ sizeof(*match1.key),
+ FLOW_ACT_MANGLE_HDR_TYPE_IP4);
+
COMPARE_UNMASKED_FIELDS(match1, match2, &out);
if (out)
goto check_failed;
@@ -125,16 +247,34 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
flow_rule_match_ipv6_addrs(entry1->rule, &match1);
flow_rule_match_ipv6_addrs(entry2->rule, &match2);
+
+ memcpy(buf, match1.key, sizeof(*match1.key));
+ match1.key = get_mangled_key(entry1->rule, buf,
+ offsetof(struct ipv6hdr, saddr),
+ sizeof(*match1.key),
+ FLOW_ACT_MANGLE_HDR_TYPE_IP6);
+
COMPARE_UNMASKED_FIELDS(match1, match2, &out);
if (out)
goto check_failed;
}
if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_PORTS)) {
+ enum flow_action_mangle_base htype = FLOW_ACT_MANGLE_UNSPEC;
struct flow_match_ports match1, match2;
flow_rule_match_ports(entry1->rule, &match1);
flow_rule_match_ports(entry2->rule, &match2);
+
+ if (ip_proto == IPPROTO_UDP)
+ htype = FLOW_ACT_MANGLE_HDR_TYPE_UDP;
+ else if (ip_proto == IPPROTO_TCP)
+ htype = FLOW_ACT_MANGLE_HDR_TYPE_TCP;
+
+ memcpy(buf, match1.key, sizeof(*match1.key));
+ match1.key = get_mangled_key(entry1->rule, buf, 0,
+ sizeof(*match1.key), htype);
+
COMPARE_UNMASKED_FIELDS(match1, match2, &out);
if (out)
goto check_failed;
@@ -145,6 +285,12 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
flow_rule_match_eth_addrs(entry1->rule, &match1);
flow_rule_match_eth_addrs(entry2->rule, &match2);
+
+ memcpy(buf, match1.key, sizeof(*match1.key));
+ match1.key = get_mangled_key(entry1->rule, buf, 0,
+ sizeof(*match1.key),
+ FLOW_ACT_MANGLE_HDR_TYPE_ETH);
+
COMPARE_UNMASKED_FIELDS(match1, match2, &out);
if (out)
goto check_failed;
@@ -185,6 +331,8 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
flow_rule_match_ip(entry1->rule, &match1);
flow_rule_match_ip(entry2->rule, &match2);
+
+ match1.key = get_mangled_tos_ttl(entry1->rule, buf, is_v6);
COMPARE_UNMASKED_FIELDS(match1, match2, &out);
if (out)
goto check_failed;
@@ -256,98 +404,16 @@ check_failed:
return -EINVAL;
}
-static int nfp_ct_check_mangle_merge(struct flow_action_entry *a_in,
- struct flow_rule *rule)
-{
- enum flow_action_mangle_base htype = a_in->mangle.htype;
- u32 offset = a_in->mangle.offset;
-
- switch (htype) {
- case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS))
- return -EOPNOTSUPP;
- break;
- case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
- struct flow_match_ip match;
-
- flow_rule_match_ip(rule, &match);
- if (offset == offsetof(struct iphdr, ttl) &&
- match.mask->ttl)
- return -EOPNOTSUPP;
- if (offset == round_down(offsetof(struct iphdr, tos), 4) &&
- match.mask->tos)
- return -EOPNOTSUPP;
- }
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
- struct flow_match_ipv4_addrs match;
-
- flow_rule_match_ipv4_addrs(rule, &match);
- if (offset == offsetof(struct iphdr, saddr) &&
- match.mask->src)
- return -EOPNOTSUPP;
- if (offset == offsetof(struct iphdr, daddr) &&
- match.mask->dst)
- return -EOPNOTSUPP;
- }
- break;
- case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
- struct flow_match_ip match;
-
- flow_rule_match_ip(rule, &match);
- if (offset == round_down(offsetof(struct ipv6hdr, hop_limit), 4) &&
- match.mask->ttl)
- return -EOPNOTSUPP;
- /* for ipv6, tos and flow_lbl are in the same word */
- if (offset == round_down(offsetof(struct ipv6hdr, flow_lbl), 4) &&
- match.mask->tos)
- return -EOPNOTSUPP;
- }
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
- struct flow_match_ipv6_addrs match;
-
- flow_rule_match_ipv6_addrs(rule, &match);
- if (offset >= offsetof(struct ipv6hdr, saddr) &&
- offset < offsetof(struct ipv6hdr, daddr) &&
- memchr_inv(&match.mask->src, 0, sizeof(match.mask->src)))
- return -EOPNOTSUPP;
- if (offset >= offsetof(struct ipv6hdr, daddr) &&
- offset < sizeof(struct ipv6hdr) &&
- memchr_inv(&match.mask->dst, 0, sizeof(match.mask->dst)))
- return -EOPNOTSUPP;
- }
- break;
- case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
- case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
- /* currently only can modify ports */
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS))
- return -EOPNOTSUPP;
- break;
- default:
- break;
- }
- return 0;
-}
-
static int nfp_ct_merge_act_check(struct nfp_fl_ct_flow_entry *pre_ct_entry,
struct nfp_fl_ct_flow_entry *post_ct_entry,
struct nfp_fl_ct_flow_entry *nft_entry)
{
struct flow_action_entry *act;
- int err, i;
+ int i;
/* Check for pre_ct->action conflicts */
flow_action_for_each(i, act, &pre_ct_entry->rule->action) {
switch (act->id) {
- case FLOW_ACTION_MANGLE:
- err = nfp_ct_check_mangle_merge(act, nft_entry->rule);
- if (err)
- return err;
- err = nfp_ct_check_mangle_merge(act, post_ct_entry->rule);
- if (err)
- return err;
- break;
case FLOW_ACTION_VLAN_PUSH:
case FLOW_ACTION_VLAN_POP:
case FLOW_ACTION_VLAN_MANGLE:
@@ -363,11 +429,6 @@ static int nfp_ct_merge_act_check(struct nfp_fl_ct_flow_entry *pre_ct_entry,
/* Check for nft->action conflicts */
flow_action_for_each(i, act, &nft_entry->rule->action) {
switch (act->id) {
- case FLOW_ACTION_MANGLE:
- err = nfp_ct_check_mangle_merge(act, post_ct_entry->rule);
- if (err)
- return err;
- break;
case FLOW_ACTION_VLAN_PUSH:
case FLOW_ACTION_VLAN_POP:
case FLOW_ACTION_VLAN_MANGLE:
@@ -924,7 +985,7 @@ static int nfp_ct_do_nft_merge(struct nfp_fl_ct_zone_entry *zt,
err = nfp_ct_merge_check(pre_ct_entry, nft_entry);
if (err)
return err;
- err = nfp_ct_merge_check(post_ct_entry, nft_entry);
+ err = nfp_ct_merge_check(nft_entry, post_ct_entry);
if (err)
return err;
err = nfp_ct_check_meta(post_ct_entry, nft_entry);
@@ -1009,7 +1070,7 @@ static int nfp_ct_do_tc_merge(struct nfp_fl_ct_zone_entry *zt,
if (post_ct_entry->chain_index != pre_ct_entry->chain_index)
return -EINVAL;
- err = nfp_ct_merge_check(post_ct_entry, pre_ct_entry);
+ err = nfp_ct_merge_check(pre_ct_entry, post_ct_entry);
if (err)
return err;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
index 63907aeb3884..ede90e086b28 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
@@ -576,7 +576,7 @@ nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag,
group->dirty = true;
group->slave_cnt = slave_count;
- /* Group may have been on queue for removal but is now offfloable. */
+ /* Group may have been on queue for removal but is now offloable. */
group->to_remove = false;
mutex_unlock(&lag->lock);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 9d86eea4dc16..193a167a6762 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -98,16 +98,18 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
{
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_match_eth_addrs match;
+ u8 tmp;
int i;
flow_rule_match_eth_addrs(rule, &match);
/* Populate mac frame. */
for (i = 0; i < ETH_ALEN; i++) {
- ext->mac_dst[i] |= match.key->dst[i] &
- match.mask->dst[i];
+ tmp = match.key->dst[i] & match.mask->dst[i];
+ ext->mac_dst[i] |= tmp & (~msk->mac_dst[i]);
msk->mac_dst[i] |= match.mask->dst[i];
- ext->mac_src[i] |= match.key->src[i] &
- match.mask->src[i];
+
+ tmp = match.key->src[i] & match.mask->src[i];
+ ext->mac_src[i] |= tmp & (~msk->mac_src[i]);
msk->mac_src[i] |= match.mask->src[i];
}
}
@@ -189,11 +191,16 @@ nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
{
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_match_ports match;
+ __be16 tmp;
flow_rule_match_ports(rule, &match);
- ext->port_src |= match.key->src & match.mask->src;
- ext->port_dst |= match.key->dst & match.mask->dst;
+
+ tmp = match.key->src & match.mask->src;
+ ext->port_src |= tmp & (~msk->port_src);
msk->port_src |= match.mask->src;
+
+ tmp = match.key->dst & match.mask->dst;
+ ext->port_dst |= tmp & (~msk->port_dst);
msk->port_dst |= match.mask->dst;
}
}
@@ -212,11 +219,16 @@ nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
struct flow_match_ip match;
+ u8 tmp;
flow_rule_match_ip(rule, &match);
- ext->tos |= match.key->tos & match.mask->tos;
- ext->ttl |= match.key->ttl & match.mask->ttl;
+
+ tmp = match.key->tos & match.mask->tos;
+ ext->tos |= tmp & (~msk->tos);
msk->tos |= match.mask->tos;
+
+ tmp = match.key->ttl & match.mask->ttl;
+ ext->ttl |= tmp & (~msk->ttl);
msk->ttl |= match.mask->ttl;
}
@@ -325,11 +337,16 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
{
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
struct flow_match_ipv4_addrs match;
+ __be32 tmp;
flow_rule_match_ipv4_addrs(rule, &match);
- ext->ipv4_src |= match.key->src & match.mask->src;
- ext->ipv4_dst |= match.key->dst & match.mask->dst;
+
+ tmp = match.key->src & match.mask->src;
+ ext->ipv4_src |= tmp & (~msk->ipv4_src);
msk->ipv4_src |= match.mask->src;
+
+ tmp = match.key->dst & match.mask->dst;
+ ext->ipv4_dst |= tmp & (~msk->ipv4_dst);
msk->ipv4_dst |= match.mask->dst;
}
@@ -342,15 +359,21 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
{
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
struct flow_match_ipv6_addrs match;
+ u8 tmp;
int i;
flow_rule_match_ipv6_addrs(rule, &match);
for (i = 0; i < sizeof(ext->ipv6_src); i++) {
- ext->ipv6_src.s6_addr[i] |= match.key->src.s6_addr[i] &
- match.mask->src.s6_addr[i];
- ext->ipv6_dst.s6_addr[i] |= match.key->dst.s6_addr[i] &
- match.mask->dst.s6_addr[i];
+ tmp = match.key->src.s6_addr[i] &
+ match.mask->src.s6_addr[i];
+ ext->ipv6_src.s6_addr[i] |= tmp &
+ (~msk->ipv6_src.s6_addr[i]);
msk->ipv6_src.s6_addr[i] |= match.mask->src.s6_addr[i];
+
+ tmp = match.key->dst.s6_addr[i] &
+ match.mask->dst.s6_addr[i];
+ ext->ipv6_dst.s6_addr[i] |= tmp &
+ (~msk->ipv6_dst.s6_addr[i]);
msk->ipv6_dst.s6_addr[i] |= match.mask->dst.s6_addr[i];
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h
index 9d5a0c9e1ca0..f6cd1b3efdfd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h
@@ -1282,7 +1282,7 @@ void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn,
* @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
* results.
*
- * Return: Rrror if the parsing fails, ok otherwise.
+ * Return: Error if the parsing fails, ok otherwise.
*/
enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index c5003fa1a25e..c91898be7c03 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -823,7 +823,6 @@ static void qed_slowpath_irq_free(struct qed_dev *cdev)
for_each_hwfn(cdev, i) {
if (!cdev->hwfns[i].b_int_requested)
break;
- synchronize_irq(cdev->int_params.msix_table[i].vector);
free_irq(cdev->int_params.msix_table[i].vector,
&cdev->hwfns[i].sp_dpc);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
index 306b5f4bc632..2bd51a41ce8d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
@@ -225,7 +225,7 @@ struct pfvf_start_queue_resp_tlv {
};
/* Extended queue information - additional index for reference inside qzone.
- * If commmunicated between VF/PF, each TLV relating to queues should be
+ * If communicated between VF/PF, each TLV relating to queues should be
* extended by one such [or have a future base TLV that already contains info].
*/
struct vfpf_qid_tlv {
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index b4e5a15e308b..f56b679adb4b 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -1916,7 +1916,6 @@ static void qede_sync_free_irqs(struct qede_dev *edev)
for (i = 0; i < edev->int_info.used_cnt; i++) {
if (edev->int_info.msix_cnt) {
- synchronize_irq(edev->int_info.msix[i].vector);
free_irq(edev->int_info.msix[i].vector,
&edev->fp_array[i]);
} else {
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index b30589a135c2..06f4d9a9e938 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -3614,7 +3614,8 @@ static void ql_reset_work(struct work_struct *work)
qdev->mem_map_registers;
unsigned long hw_flags;
- if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) {
+ if (test_bit(QL_RESET_PER_SCSI, &qdev->flags) ||
+ test_bit(QL_RESET_START, &qdev->flags)) {
clear_bit(QL_LINK_MASTER, &qdev->flags);
/*
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 08062d73df10..b980bce763d3 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -1027,8 +1027,11 @@ struct ravb_hw_info {
unsigned tx_counters:1; /* E-MAC has TX counters */
unsigned carrier_counters:1; /* E-MAC has carrier counters */
unsigned multi_irqs:1; /* AVB-DMAC and E-MAC has multiple irqs */
+ unsigned irq_en_dis:1; /* Has separate irq enable and disable regs */
+ unsigned err_mgmt_irqs:1; /* Line1 (Err) and Line2 (Mgmt) irqs are separate */
unsigned gptp:1; /* AVB-DMAC has gPTP support */
unsigned ccc_gac:1; /* AVB-DMAC has gPTP support active in config mode */
+ unsigned gptp_ref_clk:1; /* gPTP has separate reference clock */
unsigned nc_queues:1; /* AVB-DMAC has RX and TX NC queues */
unsigned magic_pkt:1; /* E-MAC supports magic packet detection */
unsigned half_duplex:1; /* E-MAC supports half duplex mode */
@@ -1040,6 +1043,7 @@ struct ravb_private {
void __iomem *addr;
struct clk *clk;
struct clk *refclk;
+ struct clk *gptp_clk;
struct mdiobb_ctrl mdiobb;
u32 num_rx_ring[NUM_RX_QUEUE];
u32 num_tx_ring[NUM_TX_QUEUE];
@@ -1077,6 +1081,8 @@ struct ravb_private {
int msg_enable;
int speed;
int emac_irq;
+ int erra_irq;
+ int mgmta_irq;
int rx_irqs[NUM_RX_QUEUE];
int tx_irqs[NUM_TX_QUEUE];
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 525d66f71f02..b357ac4c56c5 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1124,7 +1124,7 @@ static bool ravb_queue_interrupt(struct net_device *ndev, int q)
if (((ris0 & ric0) & BIT(q)) || ((tis & tic) & BIT(q))) {
if (napi_schedule_prep(&priv->napi[q])) {
/* Mask RX and TX interrupts */
- if (!info->multi_irqs) {
+ if (!info->irq_en_dis) {
ravb_write(ndev, ric0 & ~BIT(q), RIC0);
ravb_write(ndev, tic & ~BIT(q), TIC);
} else {
@@ -1306,7 +1306,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
/* Re-enable RX/TX interrupts */
spin_lock_irqsave(&priv->lock, flags);
- if (!info->multi_irqs) {
+ if (!info->irq_en_dis) {
ravb_modify(ndev, RIC0, mask, mask);
ravb_modify(ndev, TIC, mask, mask);
} else {
@@ -1798,12 +1798,23 @@ static int ravb_open(struct net_device *ndev)
ndev, dev, "ch19:tx_nc");
if (error)
goto out_free_irq_nc_rx;
+
+ if (info->err_mgmt_irqs) {
+ error = ravb_hook_irq(priv->erra_irq, ravb_multi_interrupt,
+ ndev, dev, "err_a");
+ if (error)
+ goto out_free_irq_nc_tx;
+ error = ravb_hook_irq(priv->mgmta_irq, ravb_multi_interrupt,
+ ndev, dev, "mgmt_a");
+ if (error)
+ goto out_free_irq_erra;
+ }
}
/* Device init */
error = ravb_dmac_init(ndev);
if (error)
- goto out_free_irq_nc_tx;
+ goto out_free_irq_mgmta;
ravb_emac_init(ndev);
/* Initialise PTP Clock driver */
@@ -1823,9 +1834,15 @@ out_ptp_stop:
/* Stop PTP Clock driver */
if (info->gptp)
ravb_ptp_stop(ndev);
-out_free_irq_nc_tx:
+out_free_irq_mgmta:
if (!info->multi_irqs)
goto out_free_irq;
+ if (info->err_mgmt_irqs)
+ free_irq(priv->mgmta_irq, ndev);
+out_free_irq_erra:
+ if (info->err_mgmt_irqs)
+ free_irq(priv->erra_irq, ndev);
+out_free_irq_nc_tx:
free_irq(priv->tx_irqs[RAVB_NC], ndev);
out_free_irq_nc_rx:
free_irq(priv->rx_irqs[RAVB_NC], ndev);
@@ -2166,6 +2183,10 @@ static int ravb_close(struct net_device *ndev)
free_irq(priv->tx_irqs[RAVB_BE], ndev);
free_irq(priv->rx_irqs[RAVB_BE], ndev);
free_irq(priv->emac_irq, ndev);
+ if (info->err_mgmt_irqs) {
+ free_irq(priv->erra_irq, ndev);
+ free_irq(priv->mgmta_irq, ndev);
+ }
}
free_irq(ndev->irq, ndev);
@@ -2410,6 +2431,7 @@ static const struct ravb_hw_info ravb_gen3_hw_info = {
.internal_delay = 1,
.tx_counters = 1,
.multi_irqs = 1,
+ .irq_en_dis = 1,
.ccc_gac = 1,
.nc_queues = 1,
.magic_pkt = 1,
@@ -2438,6 +2460,31 @@ static const struct ravb_hw_info ravb_gen2_hw_info = {
.magic_pkt = 1,
};
+static const struct ravb_hw_info ravb_rzv2m_hw_info = {
+ .rx_ring_free = ravb_rx_ring_free_rcar,
+ .rx_ring_format = ravb_rx_ring_format_rcar,
+ .alloc_rx_desc = ravb_alloc_rx_desc_rcar,
+ .receive = ravb_rx_rcar,
+ .set_rate = ravb_set_rate_rcar,
+ .set_feature = ravb_set_features_rcar,
+ .dmac_init = ravb_dmac_init_rcar,
+ .emac_init = ravb_emac_init_rcar,
+ .gstrings_stats = ravb_gstrings_stats,
+ .gstrings_size = sizeof(ravb_gstrings_stats),
+ .net_hw_features = NETIF_F_RXCSUM,
+ .net_features = NETIF_F_RXCSUM,
+ .stats_len = ARRAY_SIZE(ravb_gstrings_stats),
+ .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
+ .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .rx_max_buf_size = SZ_2K,
+ .multi_irqs = 1,
+ .err_mgmt_irqs = 1,
+ .gptp = 1,
+ .gptp_ref_clk = 1,
+ .nc_queues = 1,
+ .magic_pkt = 1,
+};
+
static const struct ravb_hw_info gbeth_hw_info = {
.rx_ring_free = ravb_rx_ring_free_gbeth,
.rx_ring_format = ravb_rx_ring_format_gbeth,
@@ -2465,6 +2512,7 @@ static const struct of_device_id ravb_match_table[] = {
{ .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info },
{ .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info },
{ .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info },
+ { .compatible = "renesas,etheravb-rzv2m", .data = &ravb_rzv2m_hw_info },
{ .compatible = "renesas,rzg2l-gbeth", .data = &gbeth_hw_info },
{ }
};
@@ -2473,11 +2521,15 @@ MODULE_DEVICE_TABLE(of, ravb_match_table);
static int ravb_set_gti(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
struct device *dev = ndev->dev.parent;
unsigned long rate;
uint64_t inc;
- rate = clk_get_rate(priv->clk);
+ if (info->gptp_ref_clk)
+ rate = clk_get_rate(priv->gptp_clk);
+ else
+ rate = clk_get_rate(priv->clk);
if (!rate)
return -EINVAL;
@@ -2594,10 +2646,14 @@ static int ravb_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
- if (info->multi_irqs)
- irq = platform_get_irq_byname(pdev, "ch22");
- else
+ if (info->multi_irqs) {
+ if (info->err_mgmt_irqs)
+ irq = platform_get_irq_byname(pdev, "dia");
+ else
+ irq = platform_get_irq_byname(pdev, "ch22");
+ } else {
irq = platform_get_irq(pdev, 0);
+ }
if (irq < 0) {
error = irq;
goto out_release;
@@ -2639,7 +2695,10 @@ static int ravb_probe(struct platform_device *pdev)
of_property_read_bool(np, "renesas,ether-link-active-low");
if (info->multi_irqs) {
- irq = platform_get_irq_byname(pdev, "ch24");
+ if (info->err_mgmt_irqs)
+ irq = platform_get_irq_byname(pdev, "line3");
+ else
+ irq = platform_get_irq_byname(pdev, "ch24");
if (irq < 0) {
error = irq;
goto out_release;
@@ -2661,6 +2720,22 @@ static int ravb_probe(struct platform_device *pdev)
}
priv->tx_irqs[i] = irq;
}
+
+ if (info->err_mgmt_irqs) {
+ irq = platform_get_irq_byname(pdev, "err_a");
+ if (irq < 0) {
+ error = irq;
+ goto out_release;
+ }
+ priv->erra_irq = irq;
+
+ irq = platform_get_irq_byname(pdev, "mgmt_a");
+ if (irq < 0) {
+ error = irq;
+ goto out_release;
+ }
+ priv->mgmta_irq = irq;
+ }
}
priv->clk = devm_clk_get(&pdev->dev, NULL);
@@ -2676,6 +2751,15 @@ static int ravb_probe(struct platform_device *pdev)
}
clk_prepare_enable(priv->refclk);
+ if (info->gptp_ref_clk) {
+ priv->gptp_clk = devm_clk_get(&pdev->dev, "gptp");
+ if (IS_ERR(priv->gptp_clk)) {
+ error = PTR_ERR(priv->gptp_clk);
+ goto out_disable_refclk;
+ }
+ clk_prepare_enable(priv->gptp_clk);
+ }
+
ndev->max_mtu = info->rx_max_buf_size - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
ndev->min_mtu = ETH_MIN_MTU;
@@ -2697,7 +2781,7 @@ static int ravb_probe(struct platform_device *pdev)
/* Set GTI value */
error = ravb_set_gti(ndev);
if (error)
- goto out_disable_refclk;
+ goto out_disable_gptp_clk;
/* Request GTI loading */
ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
@@ -2717,7 +2801,7 @@ static int ravb_probe(struct platform_device *pdev)
"Cannot allocate desc base address table (size %d bytes)\n",
priv->desc_bat_size);
error = -ENOMEM;
- goto out_disable_refclk;
+ goto out_disable_gptp_clk;
}
for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
priv->desc_bat[q].die_dt = DT_EOS;
@@ -2780,6 +2864,8 @@ out_dma_free:
/* Stop PTP Clock driver */
if (info->ccc_gac)
ravb_ptp_stop(ndev);
+out_disable_gptp_clk:
+ clk_disable_unprepare(priv->gptp_clk);
out_disable_refclk:
clk_disable_unprepare(priv->refclk);
out_release:
@@ -2801,6 +2887,7 @@ static int ravb_remove(struct platform_device *pdev)
if (info->ccc_gac)
ravb_ptp_stop(ndev);
+ clk_disable_unprepare(priv->gptp_clk);
clk_disable_unprepare(priv->refclk);
dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
index c099656dd75b..87c4306d66ec 100644
--- a/drivers/net/ethernet/renesas/ravb_ptp.c
+++ b/drivers/net/ethernet/renesas/ravb_ptp.c
@@ -198,7 +198,7 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp,
priv->ptp.extts[req->index] = on;
spin_lock_irqsave(&priv->lock, flags);
- if (!info->multi_irqs)
+ if (!info->irq_en_dis)
ravb_modify(ndev, GIC, GIC_PTCE, on ? GIC_PTCE : 0);
else if (on)
ravb_write(ndev, GIE_PTCS, GIE);
@@ -254,7 +254,7 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
error = ravb_ptp_update_compare(priv, (u32)start_ns);
if (!error) {
/* Unmask interrupt */
- if (!info->multi_irqs)
+ if (!info->irq_en_dis)
ravb_modify(ndev, GIC, GIC_PTME, GIC_PTME);
else
ravb_write(ndev, GIE_PTMS0, GIE);
@@ -266,7 +266,7 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
perout->period = 0;
/* Mask interrupt */
- if (!info->multi_irqs)
+ if (!info->irq_en_dis)
ravb_modify(ndev, GIC, GIC_PTME, 0);
else
ravb_write(ndev, GID_PTMD0, GID);
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index a69d756e09b9..b2536d2c218a 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -1008,7 +1008,8 @@ static int ef100_process_design_param(struct efx_nic *efx,
}
return 0;
case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN:
- nic_data->tso_max_payload_len = min_t(u64, reader->value, GSO_MAX_SIZE);
+ nic_data->tso_max_payload_len = min_t(u64, reader->value,
+ GSO_LEGACY_MAX_SIZE);
netif_set_tso_max_size(efx->net_dev,
nic_data->tso_max_payload_len);
return 0;
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index 79df636d6df8..f4919e7ee77b 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -46,11 +46,6 @@ module_param(irq_adapt_high_thresh, uint, 0644);
MODULE_PARM_DESC(irq_adapt_high_thresh,
"Threshold score for increasing IRQ moderation");
-/* This is the weight assigned to each of the (per-channel) virtual
- * NAPI devices.
- */
-static int napi_weight = 64;
-
static const struct efx_channel_type efx_default_channel_type;
/*************
@@ -1320,8 +1315,7 @@ void efx_init_napi_channel(struct efx_channel *channel)
struct efx_nic *efx = channel->efx;
channel->napi_dev = efx->net_dev;
- netif_napi_add_weight(channel->napi_dev, &channel->napi_str, efx_poll,
- napi_weight);
+ netif_napi_add(channel->napi_dev, &channel->napi_str, efx_poll, 64);
}
void efx_init_napi(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index f619ffb26787..a63f40b09856 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -112,11 +112,6 @@ module_param(ef4_separate_tx_channels, bool, 0444);
MODULE_PARM_DESC(ef4_separate_tx_channels,
"Use separate channels for TX and RX");
-/* This is the weight assigned to each of the (per-channel) virtual
- * NAPI devices.
- */
-static int napi_weight = 64;
-
/* This is the time (in jiffies) between invocations of the hardware
* monitor.
* On Falcon-based NICs, this will:
@@ -2017,8 +2012,7 @@ static void ef4_init_napi_channel(struct ef4_channel *channel)
struct ef4_nic *efx = channel->efx;
channel->napi_dev = efx->net_dev;
- netif_napi_add_weight(channel->napi_dev, &channel->napi_str, ef4_poll,
- napi_weight);
+ netif_napi_add(channel->napi_dev, &channel->napi_str, ef4_poll, 64);
}
static void ef4_init_napi(struct ef4_nic *efx)
diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c
index f7306e93a8b8..b9369483758c 100644
--- a/drivers/net/ethernet/sfc/falcon/tx.c
+++ b/drivers/net/ethernet/sfc/falcon/tx.c
@@ -98,7 +98,8 @@ unsigned int ef4_tx_max_skb_descs(struct ef4_nic *efx)
/* Possibly more for PCIe page boundaries within input fragments */
if (PAGE_SIZE > EF4_PAGE_SIZE)
max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
- DIV_ROUND_UP(GSO_MAX_SIZE, EF4_PAGE_SIZE));
+ DIV_ROUND_UP(GSO_LEGACY_MAX_SIZE,
+ EF4_PAGE_SIZE));
return max_descs;
}
diff --git a/drivers/net/ethernet/sfc/siena/Kconfig b/drivers/net/ethernet/sfc/siena/Kconfig
index cb3c5cb42a53..c6ea09769873 100644
--- a/drivers/net/ethernet/sfc/siena/Kconfig
+++ b/drivers/net/ethernet/sfc/siena/Kconfig
@@ -2,6 +2,7 @@
config SFC_SIENA
tristate "Solarflare SFC9000 support"
depends on PCI
+ depends on PTP_1588_CLOCK
select MDIO
select CRC32
help
diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c
index 28391875de69..2465cf4d505c 100644
--- a/drivers/net/ethernet/sfc/siena/efx_channels.c
+++ b/drivers/net/ethernet/sfc/siena/efx_channels.c
@@ -46,11 +46,6 @@ module_param(irq_adapt_high_thresh, uint, 0644);
MODULE_PARM_DESC(irq_adapt_high_thresh,
"Threshold score for increasing IRQ moderation");
-/* This is the weight assigned to each of the (per-channel) virtual
- * NAPI devices.
- */
-static int napi_weight = 64;
-
static const struct efx_channel_type efx_default_channel_type;
/*************
@@ -1324,8 +1319,7 @@ static void efx_init_napi_channel(struct efx_channel *channel)
struct efx_nic *efx = channel->efx;
channel->napi_dev = efx->net_dev;
- netif_napi_add_weight(channel->napi_dev, &channel->napi_str, efx_poll,
- napi_weight);
+ netif_napi_add(channel->napi_dev, &channel->napi_str, efx_poll, 64);
}
void efx_siena_init_napi(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.h b/drivers/net/ethernet/sfc/siena/efx_channels.h
index 10d78049b885..c4b95a2d770f 100644
--- a/drivers/net/ethernet/sfc/siena/efx_channels.h
+++ b/drivers/net/ethernet/sfc/siena/efx_channels.h
@@ -8,8 +8,8 @@
* by the Free Software Foundation, incorporated herein by reference.
*/
-#ifndef EFX_CHANNELS_H
-#define EFX_CHANNELS_H
+#ifndef EFX_SIENA_CHANNELS_H
+#define EFX_SIENA_CHANNELS_H
extern unsigned int efx_siena_interrupt_mode;
extern unsigned int efx_siena_rss_cpus;
diff --git a/drivers/net/ethernet/sfc/siena/tx.c b/drivers/net/ethernet/sfc/siena/tx.c
index b84b9e348c13..e166dcb9b99c 100644
--- a/drivers/net/ethernet/sfc/siena/tx.c
+++ b/drivers/net/ethernet/sfc/siena/tx.c
@@ -181,14 +181,7 @@ netdev_tx_t __efx_siena_enqueue_skb(struct efx_tx_queue *tx_queue,
if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more))
efx_tx_send_pending(tx_queue->channel);
- if (segments) {
- tx_queue->tso_bursts++;
- tx_queue->tso_packets += segments;
- tx_queue->tx_packets += segments;
- } else {
- tx_queue->tx_packets++;
- }
-
+ tx_queue->tx_packets++;
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c
index 9bc8281b7f5b..658ea2d34070 100644
--- a/drivers/net/ethernet/sfc/tx_common.c
+++ b/drivers/net/ethernet/sfc/tx_common.c
@@ -416,7 +416,8 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
/* Possibly more for PCIe page boundaries within input fragments */
if (PAGE_SIZE > EFX_PAGE_SIZE)
max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
- DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
+ DIV_ROUND_UP(GSO_LEGACY_MAX_SIZE,
+ EFX_PAGE_SIZE));
return max_descs;
}
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index fc9cef9dcefc..24d66af797d4 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -140,7 +140,7 @@ static void PRINT_PKT(u_char *buf, int length)
pr_cont("\n");
}
#else
-#define PRINT_PKT(x...) do { } while (0)
+static inline void PRINT_PKT(u_char *buf, int length) { }
#endif
@@ -430,7 +430,7 @@ static inline void smc911x_rcv(struct net_device *dev)
SMC_PULL_DATA(lp, data, pkt_len+2+3);
DBG(SMC_DEBUG_PKTS, dev, "Received packet\n");
- PRINT_PKT(data, ((pkt_len - 4) <= 64) ? pkt_len - 4 : 64);
+ PRINT_PKT(data, min(pkt_len - 4, 64U));
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
dev->stats.rx_packets++;
@@ -480,7 +480,7 @@ static void smc911x_hardware_send_pkt(struct net_device *dev)
SMC_SET_TX_FIFO(lp, cmdB);
DBG(SMC_DEBUG_PKTS, dev, "Transmitted packet\n");
- PRINT_PKT(buf, len <= 64 ? len : 64);
+ PRINT_PKT(buf, min(len, 64U));
/* Send pkt via PIO or DMA */
#ifdef SMC_USE_DMA
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index d3b4765c1a5b..8cc80b1db4cb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -462,11 +462,6 @@ static void dwmac4_set_mss_ctxt(struct dma_desc *p, unsigned int mss)
p->des3 = cpu_to_le32(TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV);
}
-static void dwmac4_get_addr(struct dma_desc *p, unsigned int *addr)
-{
- *addr = le32_to_cpu(p->des0);
-}
-
static void dwmac4_set_addr(struct dma_desc *p, dma_addr_t addr)
{
p->des0 = cpu_to_le32(lower_32_bits(addr));
@@ -575,7 +570,6 @@ const struct stmmac_desc_ops dwmac4_desc_ops = {
.init_tx_desc = dwmac4_rd_init_tx_desc,
.display_ring = dwmac4_display_ring,
.set_mss = dwmac4_set_mss_ctxt,
- .get_addr = dwmac4_get_addr,
.set_addr = dwmac4_set_addr,
.clear = dwmac4_clear,
.set_sarc = dwmac4_set_sarc,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index ccfb0102dde4..b1f0c3984a09 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -239,11 +239,6 @@ static void dwxgmac2_set_mss(struct dma_desc *p, unsigned int mss)
p->des3 = cpu_to_le32(XGMAC_TDES3_CTXT | XGMAC_TDES3_TCMSSV);
}
-static void dwxgmac2_get_addr(struct dma_desc *p, unsigned int *addr)
-{
- *addr = le32_to_cpu(p->des0);
-}
-
static void dwxgmac2_set_addr(struct dma_desc *p, dma_addr_t addr)
{
p->des0 = cpu_to_le32(lower_32_bits(addr));
@@ -366,7 +361,6 @@ const struct stmmac_desc_ops dwxgmac210_desc_ops = {
.init_rx_desc = dwxgmac2_init_rx_desc,
.init_tx_desc = dwxgmac2_init_tx_desc,
.set_mss = dwxgmac2_set_mss,
- .get_addr = dwxgmac2_get_addr,
.set_addr = dwxgmac2_set_addr,
.clear = dwxgmac2_clear,
.get_rx_hash = dwxgmac2_get_rx_hash,
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 6650edfab5bc..1bcbbd724fb5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -440,11 +440,6 @@ static void enh_desc_display_ring(void *head, unsigned int size, bool rx,
pr_info("\n");
}
-static void enh_desc_get_addr(struct dma_desc *p, unsigned int *addr)
-{
- *addr = le32_to_cpu(p->des2);
-}
-
static void enh_desc_set_addr(struct dma_desc *p, dma_addr_t addr)
{
p->des2 = cpu_to_le32(addr);
@@ -475,7 +470,6 @@ const struct stmmac_desc_ops enh_desc_ops = {
.get_timestamp = enh_desc_get_timestamp,
.get_rx_timestamp_status = enh_desc_get_rx_timestamp_status,
.display_ring = enh_desc_display_ring,
- .get_addr = enh_desc_get_addr,
.set_addr = enh_desc_set_addr,
.clear = enh_desc_clear,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index f7dc447f05a0..592b4067f9b8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -82,8 +82,6 @@ struct stmmac_desc_ops {
dma_addr_t dma_rx_phy, unsigned int desc_size);
/* set MSS via context descriptor */
void (*set_mss)(struct dma_desc *p, unsigned int mss);
- /* get descriptor skbuff address */
- void (*get_addr)(struct dma_desc *p, unsigned int *addr);
/* set descriptor skbuff address */
void (*set_addr)(struct dma_desc *p, dma_addr_t addr);
/* clear descriptor */
@@ -142,8 +140,6 @@ struct stmmac_desc_ops {
stmmac_do_void_callback(__priv, desc, display_ring, __args)
#define stmmac_set_mss(__priv, __args...) \
stmmac_do_void_callback(__priv, desc, set_mss, __args)
-#define stmmac_get_desc_addr(__priv, __args...) \
- stmmac_do_void_callback(__priv, desc, get_addr, __args)
#define stmmac_set_desc_addr(__priv, __args...) \
stmmac_do_void_callback(__priv, desc, set_addr, __args)
#define stmmac_clear_desc(__priv, __args...) \
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index 98ef43f35802..e3da4da242ee 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -292,11 +292,6 @@ static void ndesc_display_ring(void *head, unsigned int size, bool rx,
pr_info("\n");
}
-static void ndesc_get_addr(struct dma_desc *p, unsigned int *addr)
-{
- *addr = le32_to_cpu(p->des2);
-}
-
static void ndesc_set_addr(struct dma_desc *p, dma_addr_t addr)
{
p->des2 = cpu_to_le32(addr);
@@ -326,7 +321,6 @@ const struct stmmac_desc_ops ndesc_ops = {
.get_timestamp = ndesc_get_timestamp,
.get_rx_timestamp_status = ndesc_get_rx_timestamp_status,
.display_ring = ndesc_display_ring,
- .get_addr = ndesc_get_addr,
.set_addr = ndesc_set_addr,
.clear = ndesc_clear,
};
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index b04a6a7bf566..435dc00d04e5 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -1313,7 +1313,7 @@ static void cas_init_rx_dma(struct cas *cp)
writel(val, cp->regs + REG_RX_PAGE_SIZE);
/* enable the header parser if desired */
- if (CAS_HP_FIRMWARE == cas_prog_null)
+ if (&CAS_HP_FIRMWARE[0] == &cas_prog_null[0])
return;
val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
@@ -3780,7 +3780,7 @@ static void cas_reset(struct cas *cp, int blkflag)
/* program header parser */
if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) ||
- (CAS_HP_ALT_FIRMWARE == cas_prog_null)) {
+ (&CAS_HP_ALT_FIRMWARE[0] == &cas_prog_null[0])) {
cas_load_firmware(cp, CAS_HP_FIRMWARE);
} else {
cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE);
diff --git a/drivers/net/ethernet/sunplus/Kconfig b/drivers/net/ethernet/sunplus/Kconfig
index d0144a2ab918..be50a6b723eb 100644
--- a/drivers/net/ethernet/sunplus/Kconfig
+++ b/drivers/net/ethernet/sunplus/Kconfig
@@ -23,9 +23,6 @@ config SP7021_EMAC
tristate "Sunplus Dual 10M/100M Ethernet devices"
depends on SOC_SP7021 || COMPILE_TEST
select PHYLIB
- select COMMON_CLK_SP7021
- select RESET_SUNPLUS
- select NVMEM_SUNPLUS_OCOTP
help
If you have Sunplus dual 10M/100M Ethernet devices, say Y.
The network device creates two net-device interfaces.
diff --git a/drivers/net/ethernet/sunplus/spl2sw_int.c b/drivers/net/ethernet/sunplus/spl2sw_int.c
index 69b1e2e0271e..a37c9a4c281f 100644
--- a/drivers/net/ethernet/sunplus/spl2sw_int.c
+++ b/drivers/net/ethernet/sunplus/spl2sw_int.c
@@ -29,6 +29,7 @@ int spl2sw_rx_poll(struct napi_struct *napi, int budget)
u32 mask;
int port;
u32 cmd;
+ u32 len;
/* Process high-priority queue and then low-priority queue. */
for (queue = 0; queue < RX_DESC_QUEUE_NUM; queue++) {
@@ -63,10 +64,11 @@ int spl2sw_rx_poll(struct napi_struct *napi, int budget)
skb_put(skb, pkg_len - 4); /* Minus FCS */
skb->ip_summed = CHECKSUM_NONE;
skb->protocol = eth_type_trans(skb, comm->ndev[port]);
+ len = skb->len;
netif_receive_skb(skb);
stats->rx_packets++;
- stats->rx_bytes += skb->len;
+ stats->rx_bytes += len;
/* Allocate a new skb for receiving. */
new_skb = netdev_alloc_skb(NULL, comm->rx_desc_buff_size);
diff --git a/drivers/net/ethernet/sunplus/spl2sw_mdio.c b/drivers/net/ethernet/sunplus/spl2sw_mdio.c
index 139ac8f2685e..733ae1704269 100644
--- a/drivers/net/ethernet/sunplus/spl2sw_mdio.c
+++ b/drivers/net/ethernet/sunplus/spl2sw_mdio.c
@@ -97,8 +97,10 @@ u32 spl2sw_mdio_init(struct spl2sw_common *comm)
/* Allocate and register mdio bus. */
mii_bus = devm_mdiobus_alloc(&comm->pdev->dev);
- if (!mii_bus)
- return -ENOMEM;
+ if (!mii_bus) {
+ ret = -ENOMEM;
+ goto out;
+ }
mii_bus->name = "sunplus_mii_bus";
mii_bus->parent = &comm->pdev->dev;
@@ -110,10 +112,13 @@ u32 spl2sw_mdio_init(struct spl2sw_common *comm)
ret = of_mdiobus_register(mii_bus, mdio_np);
if (ret) {
dev_err(&comm->pdev->dev, "Failed to register mdiobus!\n");
- return ret;
+ goto out;
}
comm->mii_bus = mii_bus;
+
+out:
+ of_node_put(mdio_np);
return ret;
}
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac.h b/drivers/net/ethernet/synopsys/dwc-xlgmac.h
index 98e3a271e017..a848e10f3ea4 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac.h
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac.h
@@ -38,7 +38,8 @@
#define XLGMAC_RX_DESC_MAX_DIRTY (XLGMAC_RX_DESC_CNT >> 3)
/* Descriptors required for maximum contiguous TSO/GSO packet */
-#define XLGMAC_TX_MAX_SPLIT ((GSO_MAX_SIZE / XLGMAC_TX_MAX_BUF_SIZE) + 1)
+#define XLGMAC_TX_MAX_SPLIT \
+ ((GSO_LEGACY_MAX_SIZE / XLGMAC_TX_MAX_BUF_SIZE) + 1)
/* Maximum possible descriptors needed for a SKB */
#define XLGMAC_TX_MAX_DESC_NR (MAX_SKB_FRAGS + XLGMAC_TX_MAX_SPLIT + 2)
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c
index aa32dd905e2b..e162771893af 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-qos.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c
@@ -166,8 +166,7 @@ static void am65_cpsw_admin_to_oper(struct net_device *ndev)
{
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
- if (port->qos.est_oper)
- devm_kfree(&ndev->dev, port->qos.est_oper);
+ devm_kfree(&ndev->dev, port->qos.est_oper);
port->qos.est_oper = port->qos.est_admin;
port->qos.est_admin = NULL;
@@ -434,11 +433,8 @@ static void am65_cpsw_purge_est(struct net_device *ndev)
am65_cpsw_stop_est(ndev);
- if (port->qos.est_admin)
- devm_kfree(&ndev->dev, port->qos.est_admin);
-
- if (port->qos.est_oper)
- devm_kfree(&ndev->dev, port->qos.est_oper);
+ devm_kfree(&ndev->dev, port->qos.est_admin);
+ devm_kfree(&ndev->dev, port->qos.est_oper);
port->qos.est_oper = NULL;
port->qos.est_admin = NULL;
@@ -524,8 +520,7 @@ static int am65_cpsw_set_taprio(struct net_device *ndev, void *type_data)
ret = am65_cpsw_configure_taprio(ndev, est_new);
if (!ret) {
if (taprio->enable) {
- if (port->qos.est_admin)
- devm_kfree(&ndev->dev, port->qos.est_admin);
+ devm_kfree(&ndev->dev, port->qos.est_admin);
port->qos.est_admin = est_new;
} else {
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index d5c1e5c4a508..4225efbeda3d 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -385,7 +385,6 @@ struct axidma_bd {
* @phy_node: Pointer to device node structure
* @phylink: Pointer to phylink instance
* @phylink_config: phylink configuration settings
- * @napi: NAPI control structure
* @pcs_phy: Reference to PCS/PMA PHY if used
* @pcs: phylink pcs structure for PCS PHY
* @switch_x_sgmii: Whether switchable 1000BaseX/SGMII mode is enabled in the core
@@ -396,7 +395,22 @@ struct axidma_bd {
* @regs_start: Resource start for axienet device addresses
* @regs: Base address for the axienet_local device address space
* @dma_regs: Base address for the axidma device address space
+ * @napi_rx: NAPI RX control structure
* @rx_dma_cr: Nominal content of RX DMA control register
+ * @rx_bd_v: Virtual address of the RX buffer descriptor ring
+ * @rx_bd_p: Physical address(start address) of the RX buffer descr. ring
+ * @rx_bd_num: Size of RX buffer descriptor ring
+ * @rx_bd_ci: Stores the index of the Rx buffer descriptor in the ring being
+ * accessed currently.
+ * @napi_tx: NAPI TX control structure
+ * @tx_dma_cr: Nominal content of TX DMA control register
+ * @tx_bd_v: Virtual address of the TX buffer descriptor ring
+ * @tx_bd_p: Physical address(start address) of the TX buffer descr. ring
+ * @tx_bd_num: Size of TX buffer descriptor ring
+ * @tx_bd_ci: Stores the next Tx buffer descriptor in the ring that may be
+ * complete. Only updated at runtime by TX NAPI poll.
+ * @tx_bd_tail: Stores the index of the next Tx buffer descriptor in the ring
+ * to be populated.
* @dma_err_task: Work structure to process Axi DMA errors
* @tx_irq: Axidma TX IRQ number
* @rx_irq: Axidma RX IRQ number
@@ -404,19 +418,6 @@ struct axidma_bd {
* @phy_mode: Phy type to identify between MII/GMII/RGMII/SGMII/1000 Base-X
* @options: AxiEthernet option word
* @features: Stores the extended features supported by the axienet hw
- * @tx_bd_v: Virtual address of the TX buffer descriptor ring
- * @tx_bd_p: Physical address(start address) of the TX buffer descr. ring
- * @tx_bd_num: Size of TX buffer descriptor ring
- * @rx_bd_v: Virtual address of the RX buffer descriptor ring
- * @rx_bd_p: Physical address(start address) of the RX buffer descr. ring
- * @rx_bd_num: Size of RX buffer descriptor ring
- * @tx_bd_ci: Stores the index of the Tx buffer descriptor in the ring being
- * accessed currently. Used while alloc. BDs before a TX starts
- * @tx_bd_tail: Stores the index of the Tx buffer descriptor in the ring being
- * accessed currently. Used while processing BDs after the TX
- * completed.
- * @rx_bd_ci: Stores the index of the Rx buffer descriptor in the ring being
- * accessed currently.
* @max_frm_size: Stores the maximum size of the frame that can be that
* Txed/Rxed in the existing hardware. If jumbo option is
* supported, the maximum frame size would be 9k. Else it is
@@ -436,8 +437,6 @@ struct axienet_local {
struct phylink *phylink;
struct phylink_config phylink_config;
- struct napi_struct napi;
-
struct mdio_device *pcs_phy;
struct phylink_pcs pcs;
@@ -453,7 +452,20 @@ struct axienet_local {
void __iomem *regs;
void __iomem *dma_regs;
+ struct napi_struct napi_rx;
u32 rx_dma_cr;
+ struct axidma_bd *rx_bd_v;
+ dma_addr_t rx_bd_p;
+ u32 rx_bd_num;
+ u32 rx_bd_ci;
+
+ struct napi_struct napi_tx;
+ u32 tx_dma_cr;
+ struct axidma_bd *tx_bd_v;
+ dma_addr_t tx_bd_p;
+ u32 tx_bd_num;
+ u32 tx_bd_ci;
+ u32 tx_bd_tail;
struct work_struct dma_err_task;
@@ -465,16 +477,6 @@ struct axienet_local {
u32 options;
u32 features;
- struct axidma_bd *tx_bd_v;
- dma_addr_t tx_bd_p;
- u32 tx_bd_num;
- struct axidma_bd *rx_bd_v;
- dma_addr_t rx_bd_p;
- u32 rx_bd_num;
- u32 tx_bd_ci;
- u32 tx_bd_tail;
- u32 rx_bd_ci;
-
u32 max_frm_size;
u32 rxmem;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index d6fc3f7acdf0..93c9f305bba4 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -254,8 +254,6 @@ static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
*/
static void axienet_dma_start(struct axienet_local *lp)
{
- u32 tx_cr;
-
/* Start updating the Rx channel control register */
lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) |
XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
@@ -269,16 +267,16 @@ static void axienet_dma_start(struct axienet_local *lp)
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
/* Start updating the Tx channel control register */
- tx_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) |
- XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
+ lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) |
+ XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
/* Only set interrupt delay timer if not generating an interrupt on
* the first TX packet. Otherwise leave at 0 to disable delay interrupt.
*/
if (lp->coalesce_count_tx > 1)
- tx_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx)
- << XAXIDMA_DELAY_SHIFT) |
- XAXIDMA_IRQ_DELAY_MASK;
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, tx_cr);
+ lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx)
+ << XAXIDMA_DELAY_SHIFT) |
+ XAXIDMA_IRQ_DELAY_MASK;
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
/* Populate the tail pointer and bring the Rx Axi DMA engine out of
* halted state. This will make the Rx side ready for reception.
@@ -294,8 +292,8 @@ static void axienet_dma_start(struct axienet_local *lp)
* tail pointer register that the Tx channel will start transmitting.
*/
axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
- tx_cr |= XAXIDMA_CR_RUNSTOP_MASK;
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, tx_cr);
+ lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
}
/**
@@ -666,37 +664,34 @@ static int axienet_device_reset(struct net_device *ndev)
/**
* axienet_free_tx_chain - Clean up a series of linked TX descriptors.
- * @ndev: Pointer to the net_device structure
+ * @lp: Pointer to the axienet_local structure
* @first_bd: Index of first descriptor to clean up
- * @nr_bds: Number of descriptors to clean up, can be -1 if unknown.
+ * @nr_bds: Max number of descriptors to clean up
+ * @force: Whether to clean descriptors even if not complete
* @sizep: Pointer to a u32 filled with the total sum of all bytes
* in all cleaned-up descriptors. Ignored if NULL.
+ * @budget: NAPI budget (use 0 when not called from NAPI poll)
*
* Would either be called after a successful transmit operation, or after
* there was an error when setting up the chain.
* Returns the number of descriptors handled.
*/
-static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
- int nr_bds, u32 *sizep)
+static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
+ int nr_bds, bool force, u32 *sizep, int budget)
{
- struct axienet_local *lp = netdev_priv(ndev);
struct axidma_bd *cur_p;
- int max_bds = nr_bds;
unsigned int status;
dma_addr_t phys;
int i;
- if (max_bds == -1)
- max_bds = lp->tx_bd_num;
-
- for (i = 0; i < max_bds; i++) {
+ for (i = 0; i < nr_bds; i++) {
cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
status = cur_p->status;
- /* If no number is given, clean up *all* descriptors that have
- * been completed by the MAC.
+ /* If force is not specified, clean up only descriptors
+ * that have been completed by the MAC.
*/
- if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
+ if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
break;
/* Ensure we see complete descriptor update */
@@ -707,7 +702,7 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
DMA_TO_DEVICE);
if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK))
- dev_consume_skb_irq(cur_p->skb);
+ napi_consume_skb(cur_p->skb, budget);
cur_p->app0 = 0;
cur_p->app1 = 0;
@@ -737,52 +732,68 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
* This function is invoked before BDs are allocated and transmission starts.
* This function returns 0 if a BD or group of BDs can be allocated for
* transmission. If the BD or any of the BDs are not free the function
- * returns a busy status. This is invoked from axienet_start_xmit.
+ * returns a busy status.
*/
static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
int num_frag)
{
struct axidma_bd *cur_p;
- /* Ensure we see all descriptor updates from device or TX IRQ path */
+ /* Ensure we see all descriptor updates from device or TX polling */
rmb();
- cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num];
+ cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) %
+ lp->tx_bd_num];
if (cur_p->cntrl)
return NETDEV_TX_BUSY;
return 0;
}
/**
- * axienet_start_xmit_done - Invoked once a transmit is completed by the
+ * axienet_tx_poll - Invoked once a transmit is completed by the
* Axi DMA Tx channel.
- * @ndev: Pointer to the net_device structure
+ * @napi: Pointer to NAPI structure.
+ * @budget: Max number of TX packets to process.
*
- * This function is invoked from the Axi DMA Tx isr to notify the completion
+ * Return: Number of TX packets processed.
+ *
+ * This function is invoked from the NAPI processing to notify the completion
* of transmit operation. It clears fields in the corresponding Tx BDs and
* unmaps the corresponding buffer so that CPU can regain ownership of the
* buffer. It finally invokes "netif_wake_queue" to restart transmission if
* required.
*/
-static void axienet_start_xmit_done(struct net_device *ndev)
+static int axienet_tx_poll(struct napi_struct *napi, int budget)
{
- struct axienet_local *lp = netdev_priv(ndev);
- u32 packets = 0;
+ struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx);
+ struct net_device *ndev = lp->ndev;
u32 size = 0;
+ int packets;
+
+ packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, budget, false, &size, budget);
- packets = axienet_free_tx_chain(ndev, lp->tx_bd_ci, -1, &size);
+ if (packets) {
+ lp->tx_bd_ci += packets;
+ if (lp->tx_bd_ci >= lp->tx_bd_num)
+ lp->tx_bd_ci %= lp->tx_bd_num;
- lp->tx_bd_ci += packets;
- if (lp->tx_bd_ci >= lp->tx_bd_num)
- lp->tx_bd_ci -= lp->tx_bd_num;
+ ndev->stats.tx_packets += packets;
+ ndev->stats.tx_bytes += size;
- ndev->stats.tx_packets += packets;
- ndev->stats.tx_bytes += size;
+ /* Matches barrier in axienet_start_xmit */
+ smp_mb();
- /* Matches barrier in axienet_start_xmit */
- smp_mb();
+ if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
+ netif_wake_queue(ndev);
+ }
- if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
- netif_wake_queue(ndev);
+ if (packets < budget && napi_complete_done(napi, packets)) {
+ /* Re-enable TX completion interrupts. This should
+ * cause an immediate interrupt if any TX packets are
+ * already pending.
+ */
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
+ }
+ return packets;
}
/**
@@ -807,12 +818,15 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
u32 csum_index_off;
skb_frag_t *frag;
dma_addr_t tail_p, phys;
+ u32 orig_tail_ptr, new_tail_ptr;
struct axienet_local *lp = netdev_priv(ndev);
struct axidma_bd *cur_p;
- u32 orig_tail_ptr = lp->tx_bd_tail;
+
+ orig_tail_ptr = lp->tx_bd_tail;
+ new_tail_ptr = orig_tail_ptr;
num_frag = skb_shinfo(skb)->nr_frags;
- cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+ cur_p = &lp->tx_bd_v[orig_tail_ptr];
if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
/* Should not happen as last start_xmit call should have
@@ -852,9 +866,9 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
for (ii = 0; ii < num_frag; ii++) {
- if (++lp->tx_bd_tail >= lp->tx_bd_num)
- lp->tx_bd_tail = 0;
- cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+ if (++new_tail_ptr >= lp->tx_bd_num)
+ new_tail_ptr = 0;
+ cur_p = &lp->tx_bd_v[new_tail_ptr];
frag = &skb_shinfo(skb)->frags[ii];
phys = dma_map_single(lp->dev,
skb_frag_address(frag),
@@ -864,10 +878,8 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (net_ratelimit())
netdev_err(ndev, "TX DMA mapping error\n");
ndev->stats.tx_dropped++;
- axienet_free_tx_chain(ndev, orig_tail_ptr, ii + 1,
- NULL);
- lp->tx_bd_tail = orig_tail_ptr;
-
+ axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
+ true, NULL, 0);
return NETDEV_TX_OK;
}
desc_set_phys_addr(lp, phys, cur_p);
@@ -877,17 +889,19 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
cur_p->skb = skb;
- tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
+ tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr;
+ if (++new_tail_ptr >= lp->tx_bd_num)
+ new_tail_ptr = 0;
+ WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
+
/* Start the transfer */
axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
- if (++lp->tx_bd_tail >= lp->tx_bd_num)
- lp->tx_bd_tail = 0;
/* Stop queue if next transmit may not have space */
if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
netif_stop_queue(ndev);
- /* Matches barrier in axienet_start_xmit_done */
+ /* Matches barrier in axienet_tx_poll */
smp_mb();
/* Space might have just been freed - check again */
@@ -899,13 +913,13 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
/**
- * axienet_poll - Triggered by RX ISR to complete the received BD processing.
+ * axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
* @napi: Pointer to NAPI structure.
- * @budget: Max number of packets to process.
+ * @budget: Max number of RX packets to process.
*
* Return: Number of RX packets processed.
*/
-static int axienet_poll(struct napi_struct *napi, int budget)
+static int axienet_rx_poll(struct napi_struct *napi, int budget)
{
u32 length;
u32 csumstatus;
@@ -914,7 +928,7 @@ static int axienet_poll(struct napi_struct *napi, int budget)
dma_addr_t tail_p = 0;
struct axidma_bd *cur_p;
struct sk_buff *skb, *new_skb;
- struct axienet_local *lp = container_of(napi, struct axienet_local, napi);
+ struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx);
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
@@ -1017,8 +1031,8 @@ static int axienet_poll(struct napi_struct *napi, int budget)
*
* Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
*
- * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done"
- * to complete the BD processing.
+ * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the
+ * TX BD processing.
*/
static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
{
@@ -1040,7 +1054,15 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
(lp->tx_bd_v[lp->tx_bd_ci]).phys);
schedule_work(&lp->dma_err_task);
} else {
- axienet_start_xmit_done(lp->ndev);
+ /* Disable further TX completion interrupts and schedule
+ * NAPI to handle the completions.
+ */
+ u32 cr = lp->tx_dma_cr;
+
+ cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+
+ napi_schedule(&lp->napi_tx);
}
return IRQ_HANDLED;
@@ -1084,7 +1106,7 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
- napi_schedule(&lp->napi);
+ napi_schedule(&lp->napi_rx);
}
return IRQ_HANDLED;
@@ -1160,7 +1182,8 @@ static int axienet_open(struct net_device *ndev)
/* Enable worker thread for Axi DMA error handling */
INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
- napi_enable(&lp->napi);
+ napi_enable(&lp->napi_rx);
+ napi_enable(&lp->napi_tx);
/* Enable interrupts for Axi DMA Tx */
ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
@@ -1187,7 +1210,8 @@ err_eth_irq:
err_rx_irq:
free_irq(lp->tx_irq, ndev);
err_tx_irq:
- napi_disable(&lp->napi);
+ napi_disable(&lp->napi_tx);
+ napi_disable(&lp->napi_rx);
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
cancel_work_sync(&lp->dma_err_task);
@@ -1211,7 +1235,8 @@ static int axienet_stop(struct net_device *ndev)
dev_dbg(&ndev->dev, "axienet_close()\n");
- napi_disable(&lp->napi);
+ napi_disable(&lp->napi_tx);
+ napi_disable(&lp->napi_rx);
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
@@ -1732,7 +1757,8 @@ static void axienet_dma_err_handler(struct work_struct *work)
dma_err_task);
struct net_device *ndev = lp->ndev;
- napi_disable(&lp->napi);
+ napi_disable(&lp->napi_tx);
+ napi_disable(&lp->napi_rx);
axienet_setoptions(ndev, lp->options &
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
@@ -1798,7 +1824,8 @@ static void axienet_dma_err_handler(struct work_struct *work)
axienet_set_mac_address(ndev, NULL);
axienet_set_multicast_list(ndev);
axienet_setoptions(ndev, lp->options);
- napi_enable(&lp->napi);
+ napi_enable(&lp->napi_rx);
+ napi_enable(&lp->napi_tx);
}
/**
@@ -1847,7 +1874,8 @@ static int axienet_probe(struct platform_device *pdev)
lp->rx_bd_num = RX_BD_NUM_DEFAULT;
lp->tx_bd_num = TX_BD_NUM_DEFAULT;
- netif_napi_add(ndev, &lp->napi, axienet_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll, NAPI_POLL_WEIGHT);
lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
if (!lp->axi_clk) {
diff --git a/drivers/net/fddi/skfp/smt.c b/drivers/net/fddi/skfp/smt.c
index 72c31f0013ad..dd15af4e98c2 100644
--- a/drivers/net/fddi/skfp/smt.c
+++ b/drivers/net/fddi/skfp/smt.c
@@ -747,7 +747,7 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
#endif
#ifdef SBA
- DB_SBAN(2,"SBA: RAF frame received\n",0,0) ;
+ DB_SBAN(2, "SBA: RAF frame received") ;
sba_raf_received_pack(smc,sm,fs) ;
#endif
break ;
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 866af2cc27a3..6da36cb8af80 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -1349,7 +1349,7 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
struct net_device_context *net_device_ctx = netdev_priv(net);
struct ndis_offload hwcaps;
struct ndis_offload_params offloads;
- unsigned int gso_max_size = GSO_MAX_SIZE;
+ unsigned int gso_max_size = GSO_LEGACY_MAX_SIZE;
int ret;
/* Find HW offload capabilities */
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index db4cb2de218c..9cfe84319ee4 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -1179,15 +1179,15 @@ static void gsi_isr_gp_int1(struct gsi *gsi)
* Similarly, we could get an error back when updating flow control
* on a channel because it's not in the proper state.
*
- * In either case, we silently ignore a CHANNEL_NOT_RUNNING error
- * if we receive it.
+ * In either case, we silently ignore a INCORRECT_CHANNEL_STATE
+ * error if we receive it.
*/
val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK);
switch (result) {
case GENERIC_EE_SUCCESS:
- case GENERIC_EE_CHANNEL_NOT_RUNNING:
+ case GENERIC_EE_INCORRECT_CHANNEL_STATE:
gsi->result = 0;
break;
@@ -1367,9 +1367,10 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
struct gsi_event *event_done;
struct gsi_event *event;
struct gsi_trans *trans;
+ u32 trans_count = 0;
u32 byte_count = 0;
- u32 old_index;
u32 event_avail;
+ u32 old_index;
trans_info = &channel->trans_info;
@@ -1390,6 +1391,7 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
do {
trans->len = __le16_to_cpu(event->len);
byte_count += trans->len;
+ trans_count++;
/* Move on to the next event and transaction */
if (--event_avail)
@@ -1401,7 +1403,7 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
/* We record RX bytes when they are received */
channel->byte_count += byte_count;
- channel->trans_count++;
+ channel->trans_count += trans_count;
}
/* Initialize a ring, including allocating DMA memory for its entries */
@@ -1490,12 +1492,8 @@ static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel)
if (index == ring->index % ring->count)
return NULL;
- /* Get the transaction for the latest completed event. Take a
- * reference to keep it from completing before we give the events
- * for this and previous transactions back to the hardware.
- */
+ /* Get the transaction for the latest completed event. */
trans = gsi_event_trans(channel, gsi_ring_virt(ring, index - 1));
- refcount_inc(&trans->refcount);
/* For RX channels, update each completed transaction with the number
* of bytes that were actually received. For TX channels, report
@@ -1510,9 +1508,7 @@ static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel)
gsi_trans_move_complete(trans);
/* Tell the hardware we've handled these events */
- gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index);
-
- gsi_trans_free(trans);
+ gsi_evt_ring_doorbell(gsi, evt_ring_id, index);
return gsi_channel_trans_complete(channel);
}
diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
index 9cc657658811..5d66116b46b0 100644
--- a/drivers/net/ipa/gsi.h
+++ b/drivers/net/ipa/gsi.h
@@ -84,7 +84,6 @@ struct gsi_trans_info {
struct gsi_trans_pool pool; /* transaction pool */
struct gsi_trans_pool sg_pool; /* scatterlist pool */
struct gsi_trans_pool cmd_pool; /* command payload DMA pool */
- struct gsi_trans_pool info_pool;/* command information pool */
struct gsi_trans **map; /* TRE -> transaction map */
spinlock_t spinlock; /* protects updates to the lists */
diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
index 8906f4381032..5bd8b31656d3 100644
--- a/drivers/net/ipa/gsi_reg.h
+++ b/drivers/net/ipa/gsi_reg.h
@@ -515,7 +515,7 @@ enum gsi_err_type {
/** enum gsi_generic_ee_result - GENERIC_EE_RESULT field values in SCRATCH_0 */
enum gsi_generic_ee_result {
GENERIC_EE_SUCCESS = 0x1,
- GENERIC_EE_CHANNEL_NOT_RUNNING = 0x2,
+ GENERIC_EE_INCORRECT_CHANNEL_STATE = 0x2,
GENERIC_EE_INCORRECT_DIRECTION = 0x3,
GENERIC_EE_INCORRECT_CHANNEL_TYPE = 0x4,
GENERIC_EE_INCORRECT_CHANNEL = 0x5,
diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
index 87e1d43c118c..55f8fe7d2668 100644
--- a/drivers/net/ipa/gsi_trans.c
+++ b/drivers/net/ipa/gsi_trans.c
@@ -410,10 +410,8 @@ void gsi_trans_free(struct gsi_trans *trans)
/* Add an immediate command to a transaction */
void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
- dma_addr_t addr, enum dma_data_direction direction,
- enum ipa_cmd_opcode opcode)
+ dma_addr_t addr, enum ipa_cmd_opcode opcode)
{
- struct ipa_cmd_info *info;
u32 which = trans->used++;
struct scatterlist *sg;
@@ -438,9 +436,7 @@ void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
sg_dma_address(sg) = addr;
sg_dma_len(sg) = size;
- info = &trans->info[which];
- info->opcode = opcode;
- info->direction = direction;
+ trans->cmd_opcode[which] = opcode;
}
/* Add a page transfer to a transaction. It will fill the only TRE. */
@@ -556,10 +552,10 @@ static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
struct gsi_ring *ring = &channel->tre_ring;
enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
bool bei = channel->toward_ipa;
- struct ipa_cmd_info *info;
struct gsi_tre *dest_tre;
struct scatterlist *sg;
u32 byte_count = 0;
+ u8 *cmd_opcode;
u32 avail;
u32 i;
@@ -570,7 +566,7 @@ static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
* If there is no info array we're doing a simple data
* transfer request, whose opcode is IPA_CMD_NONE.
*/
- info = trans->info ? &trans->info[0] : NULL;
+ cmd_opcode = channel->command ? &trans->cmd_opcode[0] : NULL;
avail = ring->count - ring->index % ring->count;
dest_tre = gsi_ring_virt(ring, ring->index);
for_each_sg(trans->sgl, sg, trans->used, i) {
@@ -581,8 +577,8 @@ static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
byte_count += len;
if (!avail--)
dest_tre = gsi_ring_virt(ring, 0);
- if (info)
- opcode = info++->opcode;
+ if (cmd_opcode)
+ opcode = *cmd_opcode++;
gsi_trans_tre_fill(dest_tre, addr, len, last_tre, bei, opcode);
dest_tre++;
@@ -637,28 +633,6 @@ out_trans_free:
gsi_trans_free(trans);
}
-/* Commit a GSI transaction and wait for it to complete, with timeout */
-int gsi_trans_commit_wait_timeout(struct gsi_trans *trans,
- unsigned long timeout)
-{
- unsigned long timeout_jiffies = msecs_to_jiffies(timeout);
- unsigned long remaining = 1; /* In case of empty transaction */
-
- if (!trans->used)
- goto out_trans_free;
-
- refcount_inc(&trans->refcount);
-
- __gsi_trans_commit(trans, true);
-
- remaining = wait_for_completion_timeout(&trans->completion,
- timeout_jiffies);
-out_trans_free:
- gsi_trans_free(trans);
-
- return remaining ? 0 : -ETIMEDOUT;
-}
-
/* Process the completion of a transaction; called while polling */
void gsi_trans_complete(struct gsi_trans *trans)
{
diff --git a/drivers/net/ipa/gsi_trans.h b/drivers/net/ipa/gsi_trans.h
index af379b49299e..020c3b32de1d 100644
--- a/drivers/net/ipa/gsi_trans.h
+++ b/drivers/net/ipa/gsi_trans.h
@@ -22,6 +22,9 @@ struct gsi;
struct gsi_trans;
struct gsi_trans_pool;
+/* Maximum number of TREs in an IPA immediate command transaction */
+#define IPA_COMMAND_TRANS_TRE_MAX 8
+
/**
* struct gsi_trans - a GSI transaction
*
@@ -34,8 +37,8 @@ struct gsi_trans_pool;
* @used: Number of TREs *used* (could be less than tre_count)
* @len: Total # of transfer bytes represented in sgl[] (set by core)
* @data: Preserved but not touched by the core transaction code
+ * @cmd_opcode: Array of command opcodes (command channel only)
* @sgl: An array of scatter/gather entries managed by core code
- * @info: Array of command information structures (command channel)
* @direction: DMA transfer direction (DMA_NONE for commands)
* @refcount: Reference count used for destruction
* @completion: Completed when the transaction completes
@@ -57,9 +60,11 @@ struct gsi_trans {
u8 used; /* # entries used in sgl[] */
u32 len; /* total # bytes across sgl[] */
- void *data;
+ union {
+ void *data;
+ u8 cmd_opcode[IPA_COMMAND_TRANS_TRE_MAX];
+ };
struct scatterlist *sgl;
- struct ipa_cmd_info *info; /* array of entries, or null */
enum dma_data_direction direction;
refcount_t refcount;
@@ -165,12 +170,10 @@ void gsi_trans_free(struct gsi_trans *trans);
* @buf: Buffer pointer for command payload
* @size: Number of bytes in buffer
* @addr: DMA address for payload
- * @direction: Direction of DMA transfer (or DMA_NONE if none required)
* @opcode: IPA immediate command opcode
*/
void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
- dma_addr_t addr, enum dma_data_direction direction,
- enum ipa_cmd_opcode opcode);
+ dma_addr_t addr, enum ipa_cmd_opcode opcode);
/**
* gsi_trans_page_add() - Add a page transfer to a transaction
@@ -206,15 +209,6 @@ void gsi_trans_commit(struct gsi_trans *trans, bool ring_db);
void gsi_trans_commit_wait(struct gsi_trans *trans);
/**
- * gsi_trans_commit_wait_timeout() - Commit a GSI transaction and wait for
- * it to complete, with timeout
- * @trans: Transaction to commit
- * @timeout: Timeout period (in milliseconds)
- */
-int gsi_trans_commit_wait_timeout(struct gsi_trans *trans,
- unsigned long timeout);
-
-/**
* gsi_trans_read_byte() - Issue a single byte read TRE on a channel
* @gsi: GSI pointer
* @channel_id: Channel on which to read a byte
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
index 9fc880eb7e3a..4fc3c72359f5 100644
--- a/drivers/net/ipa/ipa.h
+++ b/drivers/net/ipa/ipa.h
@@ -62,6 +62,7 @@ struct ipa_interrupt;
* @initialized: Bit mask indicating endpoints initialized
* @set_up: Bit mask indicating endpoints set up
* @enabled: Bit mask indicating endpoints enabled
+ * @modem_tx_count: Number of defined modem TX endoints
* @endpoint: Array of endpoint information
* @channel_map: Mapping of GSI channel to IPA endpoint
* @name_map: Mapping of IPA endpoint name to IPA endpoint
@@ -114,6 +115,7 @@ struct ipa {
u32 set_up;
u32 enabled;
+ u32 modem_tx_count;
struct ipa_endpoint endpoint[IPA_ENDPOINT_MAX];
struct ipa_endpoint *channel_map[GSI_CHANNEL_COUNT_MAX];
struct ipa_endpoint *name_map[IPA_ENDPOINT_COUNT];
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index d57472ea077f..e58cd4478fd3 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -26,14 +26,13 @@
* other than data transfer to another endpoint.
*
* Immediate commands are represented by GSI transactions just like other
- * transfer requests, represented by a single GSI TRE. Each immediate
- * command has a well-defined format, having a payload of a known length.
- * This allows the transfer element's length field to be used to hold an
- * immediate command's opcode. The payload for a command resides in DRAM
- * and is described by a single scatterlist entry in its transaction.
- * Commands do not require a transaction completion callback. To commit
- * an immediate command transaction, either gsi_trans_commit_wait() or
- * gsi_trans_commit_wait_timeout() is used.
+ * transfer requests, and use a single GSI TRE. Each immediate command
+ * has a well-defined format, having a payload of a known length. This
+ * allows the transfer element's length field to be used to hold an
+ * immediate command's opcode. The payload for a command resides in AP
+ * memory and is described by a single scatterlist entry in its transaction.
+ * Commands do not require a transaction completion callback, and are
+ * (currently) always issued using gsi_trans_commit_wait().
*/
/* Some commands can wait until indicated pipeline stages are clear */
@@ -350,7 +349,6 @@ int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max)
{
struct gsi_trans_info *trans_info = &channel->trans_info;
struct device *dev = channel->gsi->dev;
- int ret;
/* This is as good a place as any to validate build constants */
ipa_cmd_validate_build();
@@ -359,20 +357,9 @@ int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max)
* a single transaction can require up to tlv_count of them,
* so we treat them as if that many can be allocated at once.
*/
- ret = gsi_trans_pool_init_dma(dev, &trans_info->cmd_pool,
- sizeof(union ipa_cmd_payload),
- tre_max, channel->tlv_count);
- if (ret)
- return ret;
-
- /* Each TRE needs a command info structure */
- ret = gsi_trans_pool_init(&trans_info->info_pool,
- sizeof(struct ipa_cmd_info),
- tre_max, channel->tlv_count);
- if (ret)
- gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool);
-
- return ret;
+ return gsi_trans_pool_init_dma(dev, &trans_info->cmd_pool,
+ sizeof(union ipa_cmd_payload),
+ tre_max, channel->tlv_count);
}
void ipa_cmd_pool_exit(struct gsi_channel *channel)
@@ -380,7 +367,6 @@ void ipa_cmd_pool_exit(struct gsi_channel *channel)
struct gsi_trans_info *trans_info = &channel->trans_info;
struct device *dev = channel->gsi->dev;
- gsi_trans_pool_exit(&trans_info->info_pool);
gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool);
}
@@ -403,7 +389,6 @@ void ipa_cmd_table_init_add(struct gsi_trans *trans,
dma_addr_t hash_addr)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
- enum dma_data_direction direction = DMA_TO_DEVICE;
struct ipa_cmd_hw_ip_fltrt_init *payload;
union ipa_cmd_payload *cmd_payload;
dma_addr_t payload_addr;
@@ -434,7 +419,7 @@ void ipa_cmd_table_init_add(struct gsi_trans *trans,
payload->nhash_rules_addr = cpu_to_le64(addr);
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
- direction, opcode);
+ opcode);
}
/* Initialize header space in IPA-local memory */
@@ -443,7 +428,6 @@ void ipa_cmd_hdr_init_local_add(struct gsi_trans *trans, u32 offset, u16 size,
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
enum ipa_cmd_opcode opcode = IPA_CMD_HDR_INIT_LOCAL;
- enum dma_data_direction direction = DMA_TO_DEVICE;
struct ipa_cmd_hw_hdr_init_local *payload;
union ipa_cmd_payload *cmd_payload;
dma_addr_t payload_addr;
@@ -465,7 +449,7 @@ void ipa_cmd_hdr_init_local_add(struct gsi_trans *trans, u32 offset, u16 size,
payload->flags = cpu_to_le32(flags);
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
- direction, opcode);
+ opcode);
}
void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value,
@@ -522,7 +506,7 @@ void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value,
payload->clear_options = cpu_to_le32(options);
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
- DMA_NONE, opcode);
+ opcode);
}
/* Skip IP packet processing on the next data transfer on a TX channel */
@@ -530,7 +514,6 @@ static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_INIT;
- enum dma_data_direction direction = DMA_TO_DEVICE;
struct ipa_cmd_ip_packet_init *payload;
union ipa_cmd_payload *cmd_payload;
dma_addr_t payload_addr;
@@ -542,7 +525,7 @@ static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id)
IPA_PACKET_INIT_DEST_ENDPOINT_FMASK);
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
- direction, opcode);
+ opcode);
}
/* Use a DMA command to read or write a block of IPA-resident memory */
@@ -553,7 +536,6 @@ void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size,
enum ipa_cmd_opcode opcode = IPA_CMD_DMA_SHARED_MEM;
struct ipa_cmd_hw_dma_mem_mem *payload;
union ipa_cmd_payload *cmd_payload;
- enum dma_data_direction direction;
dma_addr_t payload_addr;
u16 flags;
@@ -584,17 +566,14 @@ void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size,
payload->flags = cpu_to_le16(flags);
payload->system_addr = cpu_to_le64(addr);
- direction = toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
-
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
- direction, opcode);
+ opcode);
}
static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_TAG_STATUS;
- enum dma_data_direction direction = DMA_TO_DEVICE;
struct ipa_cmd_ip_packet_tag_status *payload;
union ipa_cmd_payload *cmd_payload;
dma_addr_t payload_addr;
@@ -605,14 +584,13 @@ static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans)
payload->tag = le64_encode_bits(0, IP_PACKET_TAG_STATUS_TAG_FMASK);
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
- direction, opcode);
+ opcode);
}
/* Issue a small command TX data transfer */
static void ipa_cmd_transfer_add(struct gsi_trans *trans)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
- enum dma_data_direction direction = DMA_TO_DEVICE;
enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
union ipa_cmd_payload *payload;
dma_addr_t payload_addr;
@@ -621,7 +599,7 @@ static void ipa_cmd_transfer_add(struct gsi_trans *trans)
payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
- direction, opcode);
+ opcode);
}
/* Add immediate commands to a transaction to clear the hardware pipeline */
@@ -661,28 +639,16 @@ void ipa_cmd_pipeline_clear_wait(struct ipa *ipa)
wait_for_completion(&ipa->completion);
}
-static struct ipa_cmd_info *
-ipa_cmd_info_alloc(struct ipa_endpoint *endpoint, u32 tre_count)
-{
- struct gsi_channel *channel;
-
- channel = &endpoint->ipa->gsi.channel[endpoint->channel_id];
-
- return gsi_trans_pool_alloc(&channel->trans_info.info_pool, tre_count);
-}
-
/* Allocate a transaction for the command TX endpoint */
struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count)
{
struct ipa_endpoint *endpoint;
- struct gsi_trans *trans;
- endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
+ if (WARN_ON(tre_count > IPA_COMMAND_TRANS_TRE_MAX))
+ return NULL;
- trans = gsi_channel_trans_alloc(&ipa->gsi, endpoint->channel_id,
- tre_count, DMA_NONE);
- if (trans)
- trans->info = ipa_cmd_info_alloc(endpoint, tre_count);
+ endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
- return trans;
+ return gsi_channel_trans_alloc(&ipa->gsi, endpoint->channel_id,
+ tre_count, DMA_NONE);
}
diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h
index 05ed7e42e184..9215ddad1010 100644
--- a/drivers/net/ipa/ipa_cmd.h
+++ b/drivers/net/ipa/ipa_cmd.h
@@ -47,17 +47,6 @@ enum ipa_cmd_opcode {
};
/**
- * struct ipa_cmd_info - information needed for an IPA immediate command
- *
- * @opcode: The command opcode.
- * @direction: Direction of data transfer for DMA commands
- */
-struct ipa_cmd_info {
- enum ipa_cmd_opcode opcode;
- enum dma_data_direction direction;
-};
-
-/**
* ipa_cmd_table_valid() - Validate a memory region holding a table
* @ipa: - IPA pointer
* @mem: - IPA memory region descriptor
diff --git a/drivers/net/ipa/ipa_data-v3.1.c b/drivers/net/ipa/ipa_data-v3.1.c
index 8ff351aefd23..00f4e506e6e5 100644
--- a/drivers/net/ipa/ipa_data-v3.1.c
+++ b/drivers/net/ipa/ipa_data-v3.1.c
@@ -103,6 +103,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.rx = {
.buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
+ .aggr_time_limit = 500,
},
},
},
@@ -150,6 +151,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.rx = {
.buffer_size = 8192,
+ .aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
diff --git a/drivers/net/ipa/ipa_data-v3.5.1.c b/drivers/net/ipa/ipa_data-v3.5.1.c
index d1c466abddb2..b7e32e87733e 100644
--- a/drivers/net/ipa/ipa_data-v3.5.1.c
+++ b/drivers/net/ipa/ipa_data-v3.5.1.c
@@ -94,6 +94,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.rx = {
.buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
+ .aggr_time_limit = 500,
},
},
},
@@ -142,6 +143,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.rx = {
.buffer_size = 8192,
+ .aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
diff --git a/drivers/net/ipa/ipa_data-v4.11.c b/drivers/net/ipa/ipa_data-v4.11.c
index b1991cc6f0ca..1be823e5c5c2 100644
--- a/drivers/net/ipa/ipa_data-v4.11.c
+++ b/drivers/net/ipa/ipa_data-v4.11.c
@@ -88,6 +88,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.rx = {
.buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
+ .aggr_time_limit = 500,
},
},
},
@@ -135,6 +136,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.rx = {
.buffer_size = 32768,
+ .aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
diff --git a/drivers/net/ipa/ipa_data-v4.2.c b/drivers/net/ipa/ipa_data-v4.2.c
index 1190a43e8743..683f1f91042f 100644
--- a/drivers/net/ipa/ipa_data-v4.2.c
+++ b/drivers/net/ipa/ipa_data-v4.2.c
@@ -84,6 +84,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.rx = {
.buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
+ .aggr_time_limit = 500,
},
},
},
@@ -132,6 +133,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.rx = {
.buffer_size = 8192,
+ .aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
diff --git a/drivers/net/ipa/ipa_data-v4.5.c b/drivers/net/ipa/ipa_data-v4.5.c
index 944f72b0f285..79398f286a9c 100644
--- a/drivers/net/ipa/ipa_data-v4.5.c
+++ b/drivers/net/ipa/ipa_data-v4.5.c
@@ -97,6 +97,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.rx = {
.buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
+ .aggr_time_limit = 500,
},
},
},
@@ -144,6 +145,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.rx = {
.buffer_size = 8192,
+ .aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
diff --git a/drivers/net/ipa/ipa_data-v4.9.c b/drivers/net/ipa/ipa_data-v4.9.c
index 16786bff7ef8..4b96efd05cf2 100644
--- a/drivers/net/ipa/ipa_data-v4.9.c
+++ b/drivers/net/ipa/ipa_data-v4.9.c
@@ -89,6 +89,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.rx = {
.buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
+ .aggr_time_limit = 500,
},
},
},
@@ -136,6 +137,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.rx = {
.buffer_size = 8192,
+ .aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h
index dbbeecf6df29..e15eb3cd3e33 100644
--- a/drivers/net/ipa/ipa_data.h
+++ b/drivers/net/ipa/ipa_data.h
@@ -96,71 +96,9 @@ struct gsi_channel_data {
};
/**
- * struct ipa_endpoint_tx_data - configuration data for TX endpoints
- * @seq_type: primary packet processing sequencer type
- * @seq_rep_type: sequencer type for replication processing
- * @status_endpoint: endpoint to which status elements are sent
- *
- * The @status_endpoint is only valid if the endpoint's @status_enable
- * flag is set.
- */
-struct ipa_endpoint_tx_data {
- enum ipa_seq_type seq_type;
- enum ipa_seq_rep_type seq_rep_type;
- enum ipa_endpoint_name status_endpoint;
-};
-
-/**
- * struct ipa_endpoint_rx_data - configuration data for RX endpoints
- * @buffer_size: requested receive buffer size (bytes)
- * @pad_align: power-of-2 boundary to which packet payload is aligned
- * @aggr_close_eof: whether aggregation closes on end-of-frame
- *
- * With each packet it transfers, the IPA hardware can perform certain
- * transformations of its packet data. One of these is adding pad bytes
- * to the end of the packet data so the result ends on a power-of-2 boundary.
- *
- * It is also able to aggregate multiple packets into a single receive buffer.
- * Aggregation is "open" while a buffer is being filled, and "closes" when
- * certain criteria are met. One of those criteria is the sender indicating
- * a "frame" consisting of several transfers has ended.
- */
-struct ipa_endpoint_rx_data {
- u32 buffer_size;
- u32 pad_align;
- bool aggr_close_eof;
-};
-
-/**
- * struct ipa_endpoint_config_data - IPA endpoint hardware configuration
- * @resource_group: resource group to assign endpoint to
- * @checksum: whether checksum offload is enabled
- * @qmap: whether endpoint uses QMAP protocol
- * @aggregation: whether endpoint supports aggregation
- * @status_enable: whether endpoint uses status elements
- * @dma_mode: whether endpoint operates in DMA mode
- * @dma_endpoint: peer endpoint, if operating in DMA mode
- * @tx: TX-specific endpoint information (see above)
- * @rx: RX-specific endpoint information (see above)
- */
-struct ipa_endpoint_config_data {
- u32 resource_group;
- bool checksum;
- bool qmap;
- bool aggregation;
- bool status_enable;
- bool dma_mode;
- enum ipa_endpoint_name dma_endpoint;
- union {
- struct ipa_endpoint_tx_data tx;
- struct ipa_endpoint_rx_data rx;
- };
-};
-
-/**
* struct ipa_endpoint_data - IPA endpoint configuration data
* @filter_support: whether endpoint supports filtering
- * @config: hardware configuration (see above)
+ * @config: hardware configuration
*
* Not all endpoints support the IPA filtering capability. A filter table
* defines the filters to apply for those endpoints that support it. The
@@ -168,12 +106,12 @@ struct ipa_endpoint_config_data {
* for non-AP endpoints. For this reason we define *all* endpoints used
* in the system, and indicate whether they support filtering.
*
- * The remaining endpoint configuration data applies only to AP endpoints.
+ * The remaining endpoint configuration data specifies default hardware
+ * configuration values that apply only to AP endpoints.
*/
struct ipa_endpoint_data {
bool filter_support;
- /* Everything else is specified only for AP endpoints */
- struct ipa_endpoint_config_data config;
+ struct ipa_endpoint_config config;
};
/**
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index e133eb2bebcf..385aa63ab4bb 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -35,7 +35,6 @@
#define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */
#define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3
-#define IPA_AGGR_TIME_LIMIT 500 /* microseconds */
/** enum ipa_status_opcode - status element opcode hardware values */
enum ipa_status_opcode {
@@ -81,6 +80,24 @@ static u32 aggr_byte_limit_max(enum ipa_version version)
return field_max(aggr_byte_limit_fmask(false));
}
+/* Compute the aggregation size value to use for a given buffer size */
+static u32 ipa_aggr_size_kb(u32 rx_buffer_size, bool aggr_hard_limit)
+{
+ /* A hard aggregation limit will not be crossed; aggregation closes
+ * if saving incoming data would cross the hard byte limit boundary.
+ *
+ * With a soft limit, aggregation closes *after* the size boundary
+ * has been crossed. In that case the limit must leave enough space
+ * after that limit to receive a full MTU of data plus overhead.
+ */
+ if (!aggr_hard_limit)
+ rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
+
+ /* The byte limit is encoded as a number of kilobytes */
+
+ return rx_buffer_size / SZ_1K;
+}
+
static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *all_data,
const struct ipa_gsi_endpoint_data *data)
@@ -93,7 +110,9 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
return true;
if (!data->toward_ipa) {
+ const struct ipa_endpoint_rx *rx_config;
u32 buffer_size;
+ u32 aggr_size;
u32 limit;
if (data->endpoint.filter_support) {
@@ -107,8 +126,10 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
if (data->ee_id != GSI_EE_AP)
return true;
- buffer_size = data->endpoint.config.rx.buffer_size;
+ rx_config = &data->endpoint.config.rx;
+
/* The buffer size must hold an MTU plus overhead */
+ buffer_size = rx_config->buffer_size;
limit = IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
if (buffer_size < limit) {
dev_err(dev, "RX buffer size too small for RX endpoint %u (%u < %u)\n",
@@ -116,27 +137,46 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
return false;
}
- /* For an endpoint supporting receive aggregation, the
- * aggregation byte limit defines the point at which an
- * aggregation window will close. It is programmed into the
- * IPA hardware as a number of KB. We don't use "hard byte
- * limit" aggregation, so we need to supply enough space in
- * a receive buffer to hold a complete MTU plus normal skb
- * overhead *after* that aggregation byte limit has been
- * crossed.
- *
- * This check just ensures the receive buffer size doesn't
- * exceed what's representable in the aggregation limit field.
- */
- if (data->endpoint.config.aggregation) {
- limit += SZ_1K * aggr_byte_limit_max(ipa->version);
- if (buffer_size - NET_SKB_PAD > limit) {
- dev_err(dev, "RX buffer size too large for aggregated RX endpoint %u (%u > %u)\n",
- data->endpoint_id,
- buffer_size - NET_SKB_PAD, limit);
+ if (!data->endpoint.config.aggregation) {
+ bool result = true;
- return false;
+ /* No aggregation; check for bogus aggregation data */
+ if (rx_config->aggr_time_limit) {
+ dev_err(dev,
+ "time limit with no aggregation for RX endpoint %u\n",
+ data->endpoint_id);
+ result = false;
+ }
+
+ if (rx_config->aggr_hard_limit) {
+ dev_err(dev, "hard limit with no aggregation for RX endpoint %u\n",
+ data->endpoint_id);
+ result = false;
}
+
+ if (rx_config->aggr_close_eof) {
+ dev_err(dev, "close EOF with no aggregation for RX endpoint %u\n",
+ data->endpoint_id);
+ result = false;
+ }
+
+ return result; /* Nothing more to check */
+ }
+
+ /* For an endpoint supporting receive aggregation, the byte
+ * limit defines the point at which aggregation closes. This
+ * check ensures the receive buffer size doesn't result in a
+ * limit that exceeds what's representable in the aggregation
+ * byte limit field.
+ */
+ aggr_size = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
+ rx_config->aggr_hard_limit);
+ limit = aggr_byte_limit_max(ipa->version);
+ if (aggr_size > limit) {
+ dev_err(dev, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n",
+ data->endpoint_id, aggr_size, limit);
+
+ return false;
}
return true; /* Nothing more to check for RX */
@@ -333,7 +373,7 @@ static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
{
struct ipa *ipa = endpoint->ipa;
- if (!endpoint->data->aggregation)
+ if (!endpoint->config.aggregation)
return;
/* Nothing to do if the endpoint doesn't have aggregation open */
@@ -402,12 +442,10 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
struct gsi_trans *trans;
u32 count;
- /* We need one command per modem TX endpoint. We can get an upper
- * bound on that by assuming all initialized endpoints are modem->IPA.
- * That won't happen, and we could be more precise, but this is fine
- * for now. End the transaction with commands to clear the pipeline.
+ /* We need one command per modem TX endpoint, plus the commands
+ * that clear the pipeline.
*/
- count = hweight32(initialized) + ipa_cmd_pipeline_clear_count();
+ count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count();
trans = ipa_cmd_trans_alloc(ipa, count);
if (!trans) {
dev_err(&ipa->pdev->dev,
@@ -438,7 +476,6 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
ipa_cmd_pipeline_clear_add(trans);
- /* XXX This should have a 1 second timeout */
gsi_trans_commit_wait(trans);
ipa_cmd_pipeline_clear_wait(ipa);
@@ -453,7 +490,7 @@ static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
u32 val = 0;
/* FRAG_OFFLOAD_EN is 0 */
- if (endpoint->data->checksum) {
+ if (endpoint->config.checksum) {
enum ipa_version version = endpoint->ipa->version;
if (endpoint->toward_ipa) {
@@ -502,7 +539,7 @@ ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
u32 header_size = sizeof(struct rmnet_map_header);
/* Without checksum offload, we just have the MAP header */
- if (!endpoint->data->checksum)
+ if (!endpoint->config.checksum)
return header_size;
if (version < IPA_VERSION_4_5) {
@@ -544,7 +581,7 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
struct ipa *ipa = endpoint->ipa;
u32 val = 0;
- if (endpoint->data->qmap) {
+ if (endpoint->config.qmap) {
enum ipa_version version = ipa->version;
size_t header_size;
@@ -583,23 +620,27 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
{
u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id);
- u32 pad_align = endpoint->data->rx.pad_align;
+ u32 pad_align = endpoint->config.rx.pad_align;
struct ipa *ipa = endpoint->ipa;
u32 val = 0;
- val |= HDR_ENDIANNESS_FMASK; /* big endian */
+ if (endpoint->config.qmap) {
+ /* We have a header, so we must specify its endianness */
+ val |= HDR_ENDIANNESS_FMASK; /* big endian */
- /* A QMAP header contains a 6 bit pad field at offset 0. The RMNet
- * driver assumes this field is meaningful in packets it receives,
- * and assumes the header's payload length includes that padding.
- * The RMNet driver does *not* pad packets it sends, however, so
- * the pad field (although 0) should be ignored.
- */
- if (endpoint->data->qmap && !endpoint->toward_ipa) {
- val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK;
- /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
- val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK;
- /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
+ /* A QMAP header contains a 6 bit pad field at offset 0.
+ * The RMNet driver assumes this field is meaningful in
+ * packets it receives, and assumes the header's payload
+ * length includes that padding. The RMNet driver does
+ * *not* pad packets it sends, however, so the pad field
+ * (although 0) should be ignored.
+ */
+ if (!endpoint->toward_ipa) {
+ val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK;
+ /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
+ val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK;
+ /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
+ }
}
/* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
@@ -611,7 +652,7 @@ static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
*/
if (ipa->version >= IPA_VERSION_4_5) {
/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
- if (endpoint->data->qmap && !endpoint->toward_ipa) {
+ if (endpoint->config.qmap && !endpoint->toward_ipa) {
u32 offset;
offset = offsetof(struct rmnet_map_header, pkt_len);
@@ -636,7 +677,7 @@ static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id);
/* Note that HDR_ENDIANNESS indicates big endian header fields */
- if (endpoint->data->qmap)
+ if (endpoint->config.qmap)
val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
iowrite32(val, endpoint->ipa->reg_virt + offset);
@@ -650,8 +691,8 @@ static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
if (!endpoint->toward_ipa)
return; /* Register not valid for RX endpoints */
- if (endpoint->data->dma_mode) {
- enum ipa_endpoint_name name = endpoint->data->dma_endpoint;
+ if (endpoint->config.dma_mode) {
+ enum ipa_endpoint_name name = endpoint->config.dma_endpoint;
u32 dma_endpoint_id;
dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id;
@@ -666,18 +707,6 @@ static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
-/* Compute the aggregation size value to use for a given buffer size */
-static u32 ipa_aggr_size_kb(u32 rx_buffer_size)
-{
- /* We don't use "hard byte limit" aggregation, so we define the
- * aggregation limit such that our buffer has enough space *after*
- * that limit to receive a full MTU of data, plus overhead.
- */
- rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
-
- return rx_buffer_size / SZ_1K;
-}
-
/* Encoded values for AGGR endpoint register fields */
static u32 aggr_byte_limit_encoded(enum ipa_version version, u32 limit)
{
@@ -696,9 +725,13 @@ static u32 aggr_time_limit_encoded(enum ipa_version version, u32 limit)
if (version < IPA_VERSION_4_5) {
/* We set aggregation granularity in ipa_hardware_config() */
- limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY);
+ fmask = aggr_time_limit_fmask(true);
+ val = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY);
+ WARN(val > field_max(fmask),
+ "aggr_time_limit too large (%u > %u usec)\n",
+ val, field_max(fmask) * IPA_AGGR_GRANULARITY);
- return u32_encode_bits(limit, aggr_time_limit_fmask(true));
+ return u32_encode_bits(val, fmask);
}
/* IPA v4.5 expresses the time limit using Qtime. The AP has
@@ -713,6 +746,9 @@ static u32 aggr_time_limit_encoded(enum ipa_version version, u32 limit)
/* Have to use pulse generator 1 (millisecond granularity) */
gran_sel = AGGR_GRAN_SEL_FMASK;
val = DIV_ROUND_CLOSEST(limit, 1000);
+ WARN(val > field_max(fmask),
+ "aggr_time_limit too large (%u > %u usec)\n",
+ limit, field_max(fmask) * 1000);
} else {
/* We can use pulse generator 0 (100 usec granularity) */
gran_sel = 0;
@@ -737,30 +773,29 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
enum ipa_version version = endpoint->ipa->version;
u32 val = 0;
- if (endpoint->data->aggregation) {
+ if (endpoint->config.aggregation) {
if (!endpoint->toward_ipa) {
- const struct ipa_endpoint_rx_data *rx_data;
+ const struct ipa_endpoint_rx *rx_config;
u32 buffer_size;
bool close_eof;
u32 limit;
- rx_data = &endpoint->data->rx;
+ rx_config = &endpoint->config.rx;
val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
- buffer_size = rx_data->buffer_size;
- limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD);
+ buffer_size = rx_config->buffer_size;
+ limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
+ rx_config->aggr_hard_limit);
val |= aggr_byte_limit_encoded(version, limit);
- limit = IPA_AGGR_TIME_LIMIT;
+ limit = rx_config->aggr_time_limit;
val |= aggr_time_limit_encoded(version, limit);
/* AGGR_PKT_LIMIT is 0 (unlimited) */
- close_eof = rx_data->aggr_close_eof;
+ close_eof = rx_config->aggr_close_eof;
val |= aggr_sw_eof_active_encoded(version, close_eof);
-
- /* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */
} else {
val |= u32_encode_bits(IPA_ENABLE_DEAGGR,
AGGR_EN_FMASK);
@@ -945,7 +980,7 @@ static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
struct ipa *ipa = endpoint->ipa;
u32 val;
- val = rsrc_grp_encoded(ipa->version, endpoint->data->resource_group);
+ val = rsrc_grp_encoded(ipa->version, endpoint->config.resource_group);
iowrite32(val, ipa->reg_virt + offset);
}
@@ -958,10 +993,10 @@ static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
return; /* Register not valid for RX endpoints */
/* Low-order byte configures primary packet processing */
- val |= u32_encode_bits(endpoint->data->tx.seq_type, SEQ_TYPE_FMASK);
+ val |= u32_encode_bits(endpoint->config.tx.seq_type, SEQ_TYPE_FMASK);
/* Second byte configures replicated packet processing */
- val |= u32_encode_bits(endpoint->data->tx.seq_rep_type,
+ val |= u32_encode_bits(endpoint->config.tx.seq_rep_type,
SEQ_REP_TYPE_FMASK);
iowrite32(val, endpoint->ipa->reg_virt + offset);
@@ -1019,13 +1054,13 @@ static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
- if (endpoint->data->status_enable) {
+ if (endpoint->config.status_enable) {
val |= STATUS_EN_FMASK;
if (endpoint->toward_ipa) {
enum ipa_endpoint_name name;
u32 status_endpoint_id;
- name = endpoint->data->tx.status_endpoint;
+ name = endpoint->config.tx.status_endpoint;
status_endpoint_id = ipa->name_map[name]->endpoint_id;
val |= u32_encode_bits(status_endpoint_id,
@@ -1049,7 +1084,7 @@ static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
u32 len;
int ret;
- buffer_size = endpoint->data->rx.buffer_size;
+ buffer_size = endpoint->config.rx.buffer_size;
page = dev_alloc_pages(get_order(buffer_size));
if (!page)
return -ENOMEM;
@@ -1153,13 +1188,12 @@ static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
return;
skb = __dev_alloc_skb(len, GFP_ATOMIC);
- if (!skb)
- return;
-
- /* Copy the data into the socket buffer and receive it */
- skb_put(skb, len);
- memcpy(skb->data, data, len);
- skb->truesize += extra;
+ if (skb) {
+ /* Copy the data into the socket buffer and receive it */
+ skb_put(skb, len);
+ memcpy(skb->data, data, len);
+ skb->truesize += extra;
+ }
ipa_modem_skb_rx(endpoint->netdev, skb);
}
@@ -1167,7 +1201,7 @@ static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
struct page *page, u32 len)
{
- u32 buffer_size = endpoint->data->rx.buffer_size;
+ u32 buffer_size = endpoint->config.rx.buffer_size;
struct sk_buff *skb;
/* Nothing to do if there's no netdev */
@@ -1274,7 +1308,7 @@ static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint,
static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
struct page *page, u32 total_len)
{
- u32 buffer_size = endpoint->data->rx.buffer_size;
+ u32 buffer_size = endpoint->config.rx.buffer_size;
void *data = page_address(page) + NET_SKB_PAD;
u32 unused = buffer_size - total_len;
u32 resid = total_len;
@@ -1304,10 +1338,10 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
* And if checksum offload is enabled a trailer containing
* computed checksum information will be appended.
*/
- align = endpoint->data->rx.pad_align ? : 1;
+ align = endpoint->config.rx.pad_align ? : 1;
len = le16_to_cpu(status->pkt_len);
len = sizeof(*status) + ALIGN(len, align);
- if (endpoint->data->checksum)
+ if (endpoint->config.checksum)
len += sizeof(struct rmnet_map_dl_csum_trailer);
if (!ipa_endpoint_status_drop(endpoint, status)) {
@@ -1351,7 +1385,7 @@ static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
/* Parse or build a socket buffer using the actual received length */
page = trans->data;
- if (endpoint->data->status_enable)
+ if (endpoint->config.status_enable)
ipa_endpoint_status_parse(endpoint, page, trans->len);
else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
trans->data = NULL; /* Pages have been consumed */
@@ -1385,7 +1419,7 @@ void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
struct page *page = trans->data;
if (page) {
- u32 buffer_size = endpoint->data->rx.buffer_size;
+ u32 buffer_size = endpoint->config.rx.buffer_size;
__free_pages(page, get_order(buffer_size));
}
@@ -1519,7 +1553,7 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
* All other cases just need to reset the underlying GSI channel.
*/
special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa &&
- endpoint->data->aggregation;
+ endpoint->config.aggregation;
if (special && ipa_endpoint_aggr_active(endpoint))
ret = ipa_endpoint_reset_rx_aggr(endpoint);
else
@@ -1553,8 +1587,12 @@ static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
ipa_endpoint_init_hdr_metadata_mask(endpoint);
ipa_endpoint_init_mode(endpoint);
ipa_endpoint_init_aggr(endpoint);
- if (!endpoint->toward_ipa)
- ipa_endpoint_init_hol_block_disable(endpoint);
+ if (!endpoint->toward_ipa) {
+ if (endpoint->config.rx.holb_drop)
+ ipa_endpoint_init_hol_block_enable(endpoint, 0);
+ else
+ ipa_endpoint_init_hol_block_disable(endpoint);
+ }
ipa_endpoint_init_deaggr(endpoint);
ipa_endpoint_init_rsrc_grp(endpoint);
ipa_endpoint_init_seq(endpoint);
@@ -1834,7 +1872,7 @@ static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
endpoint->channel_id = data->channel_id;
endpoint->endpoint_id = data->endpoint_id;
endpoint->toward_ipa = data->toward_ipa;
- endpoint->data = &data->endpoint.config;
+ endpoint->config = data->endpoint.config;
ipa->initialized |= BIT(endpoint->endpoint_id);
}
@@ -1884,6 +1922,8 @@ u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
if (data->endpoint.filter_support)
filter_map |= BIT(data->endpoint_id);
+ if (data->ee_id == GSI_EE_MODEM && data->toward_ipa)
+ ipa->modem_tx_count++;
}
if (!ipa_filter_map_valid(ipa, filter_map))
diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h
index 12fd5b16c18e..01790c60bee8 100644
--- a/drivers/net/ipa/ipa_endpoint.h
+++ b/drivers/net/ipa/ipa_endpoint.h
@@ -41,6 +41,87 @@ enum ipa_endpoint_name {
#define IPA_ENDPOINT_MAX 32 /* Max supported by driver */
/**
+ * struct ipa_endpoint_tx - Endpoint configuration for TX endpoints
+ * @seq_type: primary packet processing sequencer type
+ * @seq_rep_type: sequencer type for replication processing
+ * @status_endpoint: endpoint to which status elements are sent
+ *
+ * The @status_endpoint is only valid if the endpoint's @status_enable
+ * flag is set.
+ */
+struct ipa_endpoint_tx {
+ enum ipa_seq_type seq_type;
+ enum ipa_seq_rep_type seq_rep_type;
+ enum ipa_endpoint_name status_endpoint;
+};
+
+/**
+ * struct ipa_endpoint_rx - Endpoint configuration for RX endpoints
+ * @buffer_size: requested receive buffer size (bytes)
+ * @pad_align: power-of-2 boundary to which packet payload is aligned
+ * @aggr_time_limit: time before aggregation closes (microseconds)
+ * @aggr_hard_limit: whether aggregation closes before or after boundary
+ * @aggr_close_eof: whether aggregation closes on end-of-frame
+ * @holb_drop: whether to drop packets to avoid head-of-line blocking
+ *
+ * The actual size of the receive buffer is rounded up if necessary
+ * to be a power-of-2 number of pages.
+ *
+ * With each packet it transfers, the IPA hardware can perform certain
+ * transformations of its packet data. One of these is adding pad bytes
+ * to the end of the packet data so the result ends on a power-of-2 boundary.
+ *
+ * It is also able to aggregate multiple packets into a single receive buffer.
+ * Aggregation is "open" while a buffer is being filled, and "closes" when
+ * certain criteria are met.
+ *
+ * A time limit can be specified to close aggregation. Aggregation will be
+ * closed if this period passes after data is first written into a receive
+ * buffer. If not specified, no time limit is imposed.
+ *
+ * Insufficient space available in the receive buffer can close aggregation.
+ * The aggregation byte limit defines the point (in units of 1024 bytes) in
+ * the buffer where aggregation closes. With a "soft" aggregation limit,
+ * aggregation closes when a packet written to the buffer *crosses* that
+ * aggregation limit. With a "hard" aggregation limit, aggregation will
+ * close *before* writing a packet that would cross that boundary.
+ */
+struct ipa_endpoint_rx {
+ u32 buffer_size;
+ u32 pad_align;
+ u32 aggr_time_limit;
+ bool aggr_hard_limit;
+ bool aggr_close_eof;
+ bool holb_drop;
+};
+
+/**
+ * struct ipa_endpoint_config - IPA endpoint hardware configuration
+ * @resource_group: resource group to assign endpoint to
+ * @checksum: whether checksum offload is enabled
+ * @qmap: whether endpoint uses QMAP protocol
+ * @aggregation: whether endpoint supports aggregation
+ * @status_enable: whether endpoint uses status elements
+ * @dma_mode: whether endpoint operates in DMA mode
+ * @dma_endpoint: peer endpoint, if operating in DMA mode
+ * @tx: TX-specific endpoint information (see above)
+ * @rx: RX-specific endpoint information (see above)
+ */
+struct ipa_endpoint_config {
+ u32 resource_group;
+ bool checksum;
+ bool qmap;
+ bool aggregation;
+ bool status_enable;
+ bool dma_mode;
+ enum ipa_endpoint_name dma_endpoint;
+ union {
+ struct ipa_endpoint_tx tx;
+ struct ipa_endpoint_rx rx;
+ };
+};
+
+/**
* enum ipa_replenish_flag: RX buffer replenish flags
*
* @IPA_REPLENISH_ENABLED: Whether receive buffer replenishing is enabled
@@ -60,7 +141,7 @@ enum ipa_replenish_flag {
* @channel_id: GSI channel used by the endpoint
* @endpoint_id: IPA endpoint number
* @toward_ipa: Endpoint direction (true = TX, false = RX)
- * @data: Endpoint configuration data
+ * @config: Default endpoint configuration
* @trans_tre_max: Maximum number of TRE descriptors per transaction
* @evt_ring_id: GSI event ring used by the endpoint
* @netdev: Network device pointer, if endpoint uses one
@@ -74,7 +155,7 @@ struct ipa_endpoint {
u32 channel_id;
u32 endpoint_id;
bool toward_ipa;
- const struct ipa_endpoint_config_data *data;
+ struct ipa_endpoint_config config;
u32 trans_tre_max;
u32 evt_ring_id;
diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
index b35170a93b0f..307bed2ee707 100644
--- a/drivers/net/ipa/ipa_interrupt.c
+++ b/drivers/net/ipa/ipa_interrupt.c
@@ -191,7 +191,8 @@ void ipa_interrupt_add(struct ipa_interrupt *interrupt,
struct ipa *ipa = interrupt->ipa;
u32 offset;
- WARN_ON(ipa_irq >= IPA_IRQ_COUNT);
+ if (WARN_ON(ipa_irq >= IPA_IRQ_COUNT))
+ return;
interrupt->handler[ipa_irq] = handler;
@@ -208,7 +209,8 @@ ipa_interrupt_remove(struct ipa_interrupt *interrupt, enum ipa_irq_id ipa_irq)
struct ipa *ipa = interrupt->ipa;
u32 offset;
- WARN_ON(ipa_irq >= IPA_IRQ_COUNT);
+ if (WARN_ON(ipa_irq >= IPA_IRQ_COUNT))
+ return;
/* Update the IPA interrupt mask to disable it */
interrupt->enabled &= ~BIT(ipa_irq);
diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c
index 27d87097433f..c8b1c4d9c507 100644
--- a/drivers/net/ipa/ipa_modem.c
+++ b/drivers/net/ipa/ipa_modem.c
@@ -9,6 +9,8 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/if_rmnet.h>
+#include <linux/etherdevice.h>
+#include <net/pkt_sched.h>
#include <linux/pm_runtime.h>
#include <linux/remoteproc/qcom_rproc.h>
@@ -127,7 +129,7 @@ ipa_start_xmit(struct sk_buff *skb, struct net_device *netdev)
goto err_drop_skb;
endpoint = ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX];
- if (endpoint->data->qmap && skb->protocol != htons(ETH_P_MAP))
+ if (endpoint->config.qmap && skb->protocol != htons(ETH_P_MAP))
goto err_drop_skb;
/* The hardware must be powered for us to transmit */
@@ -203,15 +205,20 @@ static const struct net_device_ops ipa_modem_ops = {
static void ipa_modem_netdev_setup(struct net_device *netdev)
{
netdev->netdev_ops = &ipa_modem_ops;
- ether_setup(netdev);
- /* No header ops (override value set by ether_setup()) */
+
netdev->header_ops = NULL;
netdev->type = ARPHRD_RAWIP;
netdev->hard_header_len = 0;
+ netdev->min_header_len = ETH_HLEN;
+ netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = IPA_MTU;
netdev->mtu = netdev->max_mtu;
netdev->addr_len = 0;
+ netdev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
netdev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
+ netdev->priv_flags |= IFF_TX_SKB_SHARING;
+ eth_broadcast_addr(netdev->broadcast);
+
/* The endpoint is configured for QMAP */
netdev->needed_headroom = sizeof(struct rmnet_map_header);
netdev->needed_tailroom = IPA_NETDEV_TAILROOM;
diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
index 90f3aec55b36..ec010cf2e816 100644
--- a/drivers/net/ipa/ipa_qmi.c
+++ b/drivers/net/ipa/ipa_qmi.c
@@ -125,7 +125,7 @@ static void ipa_qmi_indication(struct ipa_qmi *ipa_qmi)
*/
static void ipa_qmi_ready(struct ipa_qmi *ipa_qmi)
{
- struct ipa *ipa = container_of(ipa_qmi, struct ipa, qmi);
+ struct ipa *ipa;
int ret;
/* We aren't ready until the modem and microcontroller are */
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 720394c0639b..14e8d04cb434 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -191,6 +191,8 @@ static void gen_lo_setup(struct net_device *dev,
dev->netdev_ops = dev_ops;
dev->needs_free_netdev = true;
dev->priv_destructor = dev_destructor;
+
+ netif_set_tso_max_size(dev, GSO_MAX_SIZE);
}
/* The loopback device is special. There is only one instance
diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c
index b80ed2ffd45e..386336a38f34 100644
--- a/drivers/net/netdevsim/ipsec.c
+++ b/drivers/net/netdevsim/ipsec.c
@@ -171,7 +171,7 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs)
return ret;
}
- if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
+ if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) {
sa.rx = true;
if (xs->props.family == AF_INET6)
diff --git a/drivers/net/phy/adin.c b/drivers/net/phy/adin.c
index 5ce6da62cc8e..ee374a85544a 100644
--- a/drivers/net/phy/adin.c
+++ b/drivers/net/phy/adin.c
@@ -99,6 +99,15 @@
#define ADIN1300_GE_SOFT_RESET_REG 0xff0c
#define ADIN1300_GE_SOFT_RESET BIT(0)
+#define ADIN1300_GE_CLK_CFG_REG 0xff1f
+#define ADIN1300_GE_CLK_CFG_MASK GENMASK(5, 0)
+#define ADIN1300_GE_CLK_CFG_RCVR_125 BIT(5)
+#define ADIN1300_GE_CLK_CFG_FREE_125 BIT(4)
+#define ADIN1300_GE_CLK_CFG_REF_EN BIT(3)
+#define ADIN1300_GE_CLK_CFG_HRT_RCVR BIT(2)
+#define ADIN1300_GE_CLK_CFG_HRT_FREE BIT(1)
+#define ADIN1300_GE_CLK_CFG_25 BIT(0)
+
#define ADIN1300_GE_RGMII_CFG_REG 0xff23
#define ADIN1300_GE_RGMII_RX_MSK GENMASK(8, 6)
#define ADIN1300_GE_RGMII_RX_SEL(x) \
@@ -433,6 +442,33 @@ static int adin_set_tunable(struct phy_device *phydev,
}
}
+static int adin_config_clk_out(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ const char *val = NULL;
+ u8 sel = 0;
+
+ device_property_read_string(dev, "adi,phy-output-clock", &val);
+ if (!val) {
+ /* property not present, do not enable GP_CLK pin */
+ } else if (strcmp(val, "25mhz-reference") == 0) {
+ sel |= ADIN1300_GE_CLK_CFG_25;
+ } else if (strcmp(val, "125mhz-free-running") == 0) {
+ sel |= ADIN1300_GE_CLK_CFG_FREE_125;
+ } else if (strcmp(val, "adaptive-free-running") == 0) {
+ sel |= ADIN1300_GE_CLK_CFG_HRT_FREE;
+ } else {
+ phydev_err(phydev, "invalid adi,phy-output-clock\n");
+ return -EINVAL;
+ }
+
+ if (device_property_read_bool(dev, "adi,phy-output-reference-clock"))
+ sel |= ADIN1300_GE_CLK_CFG_REF_EN;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND1, ADIN1300_GE_CLK_CFG_REG,
+ ADIN1300_GE_CLK_CFG_MASK, sel);
+}
+
static int adin_config_init(struct phy_device *phydev)
{
int rc;
@@ -455,6 +491,10 @@ static int adin_config_init(struct phy_device *phydev)
if (rc < 0)
return rc;
+ rc = adin_config_clk_out(phydev);
+ if (rc < 0)
+ return rc;
+
phydev_dbg(phydev, "PHY is using mode '%s'\n",
phy_modes(phydev->interface));
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index ce17b2af3218..e6ad3a494d32 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -94,7 +94,8 @@
#define DP83822_WOL_INDICATION_SEL BIT(8)
#define DP83822_WOL_CLR_INDICATION BIT(11)
-/* RSCR bits */
+/* RCSR bits */
+#define DP83822_RGMII_MODE_EN BIT(9)
#define DP83822_RX_CLK_SHIFT BIT(12)
#define DP83822_TX_CLK_SHIFT BIT(11)
@@ -408,6 +409,12 @@ static int dp83822_config_init(struct phy_device *phydev)
if (err)
return err;
}
+
+ phy_set_bits_mmd(phydev, DP83822_DEVADDR,
+ MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
+ } else {
+ phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
+ MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
}
if (dp83822->fx_enabled) {
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 47e83c1e9051..d777c8851ed6 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1191,7 +1191,44 @@ static int m88e1318_config_init(struct phy_device *phydev)
static int m88e1510_config_init(struct phy_device *phydev)
{
+ static const struct {
+ u16 reg17, reg16;
+ } errata_vals[] = {
+ { 0x214b, 0x2144 },
+ { 0x0c28, 0x2146 },
+ { 0xb233, 0x214d },
+ { 0xcc0c, 0x2159 },
+ };
int err;
+ int i;
+
+ /* As per Marvell Release Notes - Alaska 88E1510/88E1518/88E1512/
+ * 88E1514 Rev A0, Errata Section 5.1:
+ * If EEE is intended to be used, the following register writes
+ * must be done once after every hardware reset.
+ */
+ err = marvell_set_page(phydev, 0x00FF);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(errata_vals); ++i) {
+ err = phy_write(phydev, 17, errata_vals[i].reg17);
+ if (err)
+ return err;
+ err = phy_write(phydev, 16, errata_vals[i].reg16);
+ if (err)
+ return err;
+ }
+
+ err = marvell_set_page(phydev, 0x00FB);
+ if (err < 0)
+ return err;
+ err = phy_write(phydev, 07, 0xC00D);
+ if (err < 0)
+ return err;
+ err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
+ if (err < 0)
+ return err;
/* SGMII-to-Copper mode initialization */
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index c34a93403d1e..22139901f01c 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -520,7 +520,7 @@ static int kszphy_config_reset(struct phy_device *phydev)
}
}
- if (priv->led_mode >= 0)
+ if (priv->type && priv->led_mode >= 0)
kszphy_setup_led(phydev, priv->type->led_mode_reg, priv->led_mode);
return 0;
@@ -536,10 +536,10 @@ static int kszphy_config_init(struct phy_device *phydev)
type = priv->type;
- if (type->has_broadcast_disable)
+ if (type && type->has_broadcast_disable)
kszphy_broadcast_disable(phydev);
- if (type->has_nand_tree_disable)
+ if (type && type->has_nand_tree_disable)
kszphy_nand_tree_disable(phydev);
return kszphy_config_reset(phydev);
@@ -1730,7 +1730,7 @@ static int kszphy_probe(struct phy_device *phydev)
priv->type = type;
- if (type->led_mode_reg) {
+ if (type && type->led_mode_reg) {
ret = of_property_read_u32(np, "micrel,led-mode",
&priv->led_mode);
if (ret)
@@ -1751,7 +1751,8 @@ static int kszphy_probe(struct phy_device *phydev)
unsigned long rate = clk_get_rate(clk);
bool rmii_ref_clk_sel_25_mhz;
- priv->rmii_ref_clk_sel = type->has_rmii_ref_clk_sel;
+ if (type)
+ priv->rmii_ref_clk_sel = type->has_rmii_ref_clk_sel;
rmii_ref_clk_sel_25_mhz = of_property_read_bool(np,
"micrel,rmii-reference-clock-select-25-mhz");
@@ -3018,11 +3019,12 @@ static struct phy_driver ksphy_driver[] = {
.name = "Micrel KSZ8061",
.phy_id_mask = MICREL_PHY_ID_MASK,
/* PHY_BASIC_FEATURES */
+ .probe = kszphy_probe,
.config_init = ksz8061_config_init,
.config_intr = kszphy_config_intr,
.handle_interrupt = kszphy_handle_interrupt,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
+ .suspend = kszphy_suspend,
+ .resume = kszphy_resume,
}, {
.phy_id = PHY_ID_KSZ9021,
.phy_id_mask = 0x000ffffe,
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 1b41cd9732d7..ce2cbb5903d7 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -988,6 +988,7 @@ static int pppoe_fill_forward_path(struct net_device_path_ctx *ctx,
path->encap.proto = htons(ETH_P_PPP_SES);
path->encap.id = be16_to_cpu(po->num);
memcpy(path->encap.h_dest, po->pppoe_pa.remote, ETH_ALEN);
+ memcpy(ctx->daddr, po->pppoe_pa.remote, ETH_ALEN);
path->dev = ctx->dev;
ctx->dev = dev;
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index c2da3438387c..7389d6ef8569 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -771,7 +771,9 @@ enum rtl8152_flags {
};
#define DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2 0x3082
+#define DEVICE_ID_THINKPAD_USB_C_DONGLE 0x720c
#define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2 0xa387
+#define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN3 0x3062
struct tally_counter {
__le64 tx_packets;
@@ -9562,6 +9564,29 @@ u8 rtl8152_get_version(struct usb_interface *intf)
}
EXPORT_SYMBOL_GPL(rtl8152_get_version);
+static bool rtl8152_supports_lenovo_macpassthru(struct usb_device *udev)
+{
+ int parent_vendor_id = le16_to_cpu(udev->parent->descriptor.idVendor);
+ int product_id = le16_to_cpu(udev->descriptor.idProduct);
+ int vendor_id = le16_to_cpu(udev->descriptor.idVendor);
+
+ if (vendor_id == VENDOR_ID_LENOVO) {
+ switch (product_id) {
+ case DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2:
+ case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2:
+ case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN3:
+ case DEVICE_ID_THINKPAD_USB_C_DONGLE:
+ return 1;
+ }
+ } else if (vendor_id == VENDOR_ID_REALTEK && parent_vendor_id == VENDOR_ID_LENOVO) {
+ switch (product_id) {
+ case 0x8153:
+ return 1;
+ }
+ }
+ return 0;
+}
+
static int rtl8152_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -9642,13 +9667,7 @@ static int rtl8152_probe(struct usb_interface *intf,
netdev->hw_features &= ~NETIF_F_RXCSUM;
}
- if (le16_to_cpu(udev->descriptor.idVendor) == VENDOR_ID_LENOVO) {
- switch (le16_to_cpu(udev->descriptor.idProduct)) {
- case DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2:
- case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2:
- tp->lenovo_macpassthru = 1;
- }
- }
+ tp->lenovo_macpassthru = rtl8152_supports_lenovo_macpassthru(udev);
if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial &&
(!strcmp(udev->serial, "000001000000") ||
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index f474e79a7745..466da01ba2e3 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -1647,6 +1647,7 @@ static void veth_setup(struct net_device *dev)
dev->hw_features = VETH_FEATURES;
dev->hw_enc_features = VETH_FEATURES;
dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
+ netif_set_tso_max_size(dev, GSO_MAX_SIZE);
}
/*
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index d9d90baac72a..93e8d119d45f 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -589,6 +589,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
if (dma_mapping_error(&adapter->pdev->dev,
rbi->dma_addr)) {
dev_kfree_skb_any(rbi->skb);
+ rbi->skb = NULL;
rq->stats.rx_buf_alloc_failure++;
break;
}
@@ -613,6 +614,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
if (dma_mapping_error(&adapter->pdev->dev,
rbi->dma_addr)) {
put_page(rbi->page);
+ rbi->page = NULL;
rq->stats.rx_buf_alloc_failure++;
break;
}
@@ -1666,6 +1668,10 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
u32 i, ring_idx;
struct Vmxnet3_RxDesc *rxd;
+ /* ring has already been cleaned up */
+ if (!rq->rx_ring[0].base)
+ return;
+
for (ring_idx = 0; ring_idx < 2; ring_idx++) {
for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
#ifdef __BIG_ENDIAN_BITFIELD
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 293082c32a78..265d4a0245e7 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -1135,12 +1135,11 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
struct net *net = dev_net(vxlan->dev);
int err;
- if (tb[NDA_NH_ID] && (tb[NDA_DST] || tb[NDA_VNI] || tb[NDA_IFINDEX] ||
- tb[NDA_PORT])) {
- NL_SET_ERR_MSG(extack,
- "DST, VNI, ifindex and port are mutually exclusive with NH_ID");
- return -EINVAL;
- }
+ if (tb[NDA_NH_ID] &&
+ (tb[NDA_DST] || tb[NDA_VNI] || tb[NDA_IFINDEX] || tb[NDA_PORT])) {
+ NL_SET_ERR_MSG(extack, "DST, VNI, ifindex and port are mutually exclusive with NH_ID");
+ return -EINVAL;
+ }
if (tb[NDA_DST]) {
err = vxlan_nla_get_addr(ip, tb[NDA_DST]);
@@ -1297,7 +1296,7 @@ out:
static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
- struct netlink_ext_ack *extack)
+ struct netlink_ext_ack *extack)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
union vxlan_addr ip;
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index abf3e5c87ca7..a61cf6c90343 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_WLAN_VENDOR_MARVELL) += marvell/
obj-$(CONFIG_WLAN_VENDOR_MEDIATEK) += mediatek/
obj-$(CONFIG_WLAN_VENDOR_MICROCHIP) += microchip/
obj-$(CONFIG_WLAN_VENDOR_PURELIFI) += purelifi/
+obj-$(CONFIG_WLAN_VENDOR_QUANTENNA) += quantenna/
obj-$(CONFIG_WLAN_VENDOR_RALINK) += ralink/
obj-$(CONFIG_WLAN_VENDOR_REALTEK) += realtek/
obj-$(CONFIG_WLAN_VENDOR_RSI) += rsi/
@@ -21,7 +22,6 @@ obj-$(CONFIG_WLAN_VENDOR_SILABS) += silabs/
obj-$(CONFIG_WLAN_VENDOR_ST) += st/
obj-$(CONFIG_WLAN_VENDOR_TI) += ti/
obj-$(CONFIG_WLAN_VENDOR_ZYDAS) += zydas/
-obj-$(CONFIG_WLAN_VENDOR_QUANTENNA) += quantenna/
# 16-bit wireless PCMCIA client drivers
obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 2092bfd02cd1..688177453b07 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -1233,6 +1233,7 @@ success:
static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar, int bd_ie_type)
{
const struct firmware *fw;
+ char boardname[100];
if (bd_ie_type == ATH10K_BD_IE_BOARD) {
if (!ar->hw_params.fw.board) {
@@ -1240,9 +1241,19 @@ static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar, int bd_ie_type)
return -EINVAL;
}
+ scnprintf(boardname, sizeof(boardname), "board-%s-%s.bin",
+ ath10k_bus_str(ar->hif.bus), dev_name(ar->dev));
+
ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar,
ar->hw_params.fw.dir,
- ar->hw_params.fw.board);
+ boardname);
+ if (IS_ERR(ar->normal_mode_fw.board)) {
+ fw = ath10k_fetch_fw_file(ar,
+ ar->hw_params.fw.dir,
+ ar->hw_params.fw.board);
+ ar->normal_mode_fw.board = fw;
+ }
+
if (IS_ERR(ar->normal_mode_fw.board))
return PTR_ERR(ar->normal_mode_fw.board);
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 06a51a48c1d9..3570a5895ea8 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -2692,8 +2692,10 @@ static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
static enum wmi_phy_mode ath10k_mac_get_phymode_vht(struct ath10k *ar,
struct ieee80211_sta *sta)
{
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
+
if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
- switch (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
+ switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
return MODE_11AC_VHT160;
case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
@@ -6926,6 +6928,9 @@ static int ath10k_mac_validate_rate_mask(struct ath10k *ar,
struct ieee80211_sta *sta,
u32 rate_ctrl_flag, u8 nss)
{
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
+
if (nss > sta->deflink.rx_nss) {
ath10k_warn(ar, "Invalid nss field, configured %u limit %u\n",
nss, sta->deflink.rx_nss);
@@ -6933,19 +6938,19 @@ static int ath10k_mac_validate_rate_mask(struct ath10k *ar,
}
if (ATH10K_HW_PREAMBLE(rate_ctrl_flag) == WMI_RATE_PREAMBLE_VHT) {
- if (!sta->deflink.vht_cap.vht_supported) {
+ if (!vht_cap->vht_supported) {
ath10k_warn(ar, "Invalid VHT rate for sta %pM\n",
sta->addr);
return -EINVAL;
}
} else if (ATH10K_HW_PREAMBLE(rate_ctrl_flag) == WMI_RATE_PREAMBLE_HT) {
- if (!sta->deflink.ht_cap.ht_supported || sta->deflink.vht_cap.vht_supported) {
+ if (!ht_cap->ht_supported || vht_cap->vht_supported) {
ath10k_warn(ar, "Invalid HT rate for sta %pM\n",
sta->addr);
return -EINVAL;
}
} else {
- if (sta->deflink.ht_cap.ht_supported || sta->deflink.vht_cap.vht_supported)
+ if (ht_cap->ht_supported || vht_cap->vht_supported)
return -EINVAL;
}
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index 050bda828966..fa11807f48a9 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -9,6 +9,8 @@
#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/dma-mapping.h>
+#include <linux/of_address.h>
+#include <linux/iommu.h>
#include "ahb.h"
#include "debug.h"
#include "hif.h"
@@ -757,6 +759,172 @@ static int ath11k_ahb_setup_resources(struct ath11k_base *ab)
return 0;
}
+static int ath11k_ahb_setup_msa_resources(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+ struct device *dev = ab->dev;
+ struct device_node *node;
+ struct resource r;
+ int ret;
+
+ node = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (!node)
+ return -ENOENT;
+
+ ret = of_address_to_resource(node, 0, &r);
+ of_node_put(node);
+ if (ret) {
+ dev_err(dev, "failed to resolve msa fixed region\n");
+ return ret;
+ }
+
+ ab_ahb->fw.msa_paddr = r.start;
+ ab_ahb->fw.msa_size = resource_size(&r);
+
+ node = of_parse_phandle(dev->of_node, "memory-region", 1);
+ if (!node)
+ return -ENOENT;
+
+ ret = of_address_to_resource(node, 0, &r);
+ of_node_put(node);
+ if (ret) {
+ dev_err(dev, "failed to resolve ce fixed region\n");
+ return ret;
+ }
+
+ ab_ahb->fw.ce_paddr = r.start;
+ ab_ahb->fw.ce_size = resource_size(&r);
+
+ return 0;
+}
+
+static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+ struct device *host_dev = ab->dev;
+ struct platform_device_info info = {0};
+ struct iommu_domain *iommu_dom;
+ struct platform_device *pdev;
+ struct device_node *node;
+ int ret;
+
+ /* Chipsets not requiring MSA need not initialize
+ * MSA resources, return success in such cases.
+ */
+ if (!ab->hw_params.fixed_fw_mem)
+ return 0;
+
+ ret = ath11k_ahb_setup_msa_resources(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to setup msa resources\n");
+ return ret;
+ }
+
+ node = of_get_child_by_name(host_dev->of_node, "wifi-firmware");
+ if (!node) {
+ ab_ahb->fw.use_tz = true;
+ return 0;
+ }
+
+ info.fwnode = &node->fwnode;
+ info.parent = host_dev;
+ info.name = node->name;
+ info.dma_mask = DMA_BIT_MASK(32);
+
+ pdev = platform_device_register_full(&info);
+ if (IS_ERR(pdev)) {
+ of_node_put(node);
+ return PTR_ERR(pdev);
+ }
+
+ ret = of_dma_configure(&pdev->dev, node, true);
+ if (ret) {
+ ath11k_err(ab, "dma configure fail: %d\n", ret);
+ goto err_unregister;
+ }
+
+ ab_ahb->fw.dev = &pdev->dev;
+
+ iommu_dom = iommu_domain_alloc(&platform_bus_type);
+ if (!iommu_dom) {
+ ath11k_err(ab, "failed to allocate iommu domain\n");
+ ret = -ENOMEM;
+ goto err_unregister;
+ }
+
+ ret = iommu_attach_device(iommu_dom, ab_ahb->fw.dev);
+ if (ret) {
+ ath11k_err(ab, "could not attach device: %d\n", ret);
+ goto err_iommu_free;
+ }
+
+ ret = iommu_map(iommu_dom, ab_ahb->fw.msa_paddr,
+ ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size,
+ IOMMU_READ | IOMMU_WRITE);
+ if (ret) {
+ ath11k_err(ab, "failed to map firmware region: %d\n", ret);
+ goto err_iommu_detach;
+ }
+
+ ret = iommu_map(iommu_dom, ab_ahb->fw.ce_paddr,
+ ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size,
+ IOMMU_READ | IOMMU_WRITE);
+ if (ret) {
+ ath11k_err(ab, "failed to map firmware CE region: %d\n", ret);
+ goto err_iommu_unmap;
+ }
+
+ ab_ahb->fw.use_tz = false;
+ ab_ahb->fw.iommu_domain = iommu_dom;
+ of_node_put(node);
+
+ return 0;
+
+err_iommu_unmap:
+ iommu_unmap(iommu_dom, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size);
+
+err_iommu_detach:
+ iommu_detach_device(iommu_dom, ab_ahb->fw.dev);
+
+err_iommu_free:
+ iommu_domain_free(iommu_dom);
+
+err_unregister:
+ platform_device_unregister(pdev);
+ of_node_put(node);
+
+ return ret;
+}
+
+static int ath11k_ahb_fw_resource_deinit(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+ struct iommu_domain *iommu;
+ size_t unmapped_size;
+
+ if (ab_ahb->fw.use_tz)
+ return 0;
+
+ iommu = ab_ahb->fw.iommu_domain;
+
+ unmapped_size = iommu_unmap(iommu, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size);
+ if (unmapped_size != ab_ahb->fw.msa_size)
+ ath11k_err(ab, "failed to unmap firmware: %zu\n",
+ unmapped_size);
+
+ unmapped_size = iommu_unmap(iommu, ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size);
+ if (unmapped_size != ab_ahb->fw.ce_size)
+ ath11k_err(ab, "failed to unmap firmware CE memory: %zu\n",
+ unmapped_size);
+
+ iommu_detach_device(iommu, ab_ahb->fw.dev);
+ iommu_domain_free(iommu);
+
+ platform_device_unregister(to_platform_device(ab_ahb->fw.dev));
+
+ return 0;
+}
+
static int ath11k_ahb_probe(struct platform_device *pdev)
{
struct ath11k_base *ab;
@@ -816,10 +984,14 @@ static int ath11k_ahb_probe(struct platform_device *pdev)
if (ret)
goto err_core_free;
- ret = ath11k_hal_srng_init(ab);
+ ret = ath11k_ahb_fw_resources_init(ab);
if (ret)
goto err_core_free;
+ ret = ath11k_hal_srng_init(ab);
+ if (ret)
+ goto err_fw_deinit;
+
ret = ath11k_ce_alloc_pipes(ab);
if (ret) {
ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret);
@@ -856,6 +1028,9 @@ err_ce_free:
err_hal_srng_deinit:
ath11k_hal_srng_deinit(ab);
+err_fw_deinit:
+ ath11k_ahb_fw_resource_deinit(ab);
+
err_core_free:
ath11k_core_free(ab);
platform_set_drvdata(pdev, NULL);
@@ -891,6 +1066,7 @@ static int ath11k_ahb_remove(struct platform_device *pdev)
qmi_fail:
ath11k_ahb_free_irq(ab);
ath11k_hal_srng_deinit(ab);
+ ath11k_ahb_fw_resource_deinit(ab);
ath11k_ce_free_pipes(ab);
ath11k_core_free(ab);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/wireless/ath/ath11k/ahb.h b/drivers/net/wireless/ath/ath11k/ahb.h
index 51e6e4a5f686..58a945411c5b 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.h
+++ b/drivers/net/wireless/ath/ath11k/ahb.h
@@ -12,6 +12,15 @@ struct ath11k_base;
struct ath11k_ahb {
struct rproc *tgt_rproc;
+ struct {
+ struct device *dev;
+ struct iommu_domain *iommu_domain;
+ dma_addr_t msa_paddr;
+ u32 msa_size;
+ dma_addr_t ce_paddr;
+ u32 ce_size;
+ bool use_tz;
+ } fw;
};
static inline struct ath11k_ahb *ath11k_ahb_priv(struct ath11k_base *ab)
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index 01e1d494b527..1e98ff9ff288 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -110,6 +110,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.dp_window_idx = 0,
.ce_window_idx = 0,
.fixed_fw_mem = false,
+ .support_off_channel_tx = false,
},
{
.hw_rev = ATH11K_HW_IPQ6018_HW10,
@@ -185,6 +186,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.dp_window_idx = 0,
.ce_window_idx = 0,
.fixed_fw_mem = false,
+ .support_off_channel_tx = false,
},
{
.name = "qca6390 hw2.0",
@@ -259,6 +261,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.dp_window_idx = 0,
.ce_window_idx = 0,
.fixed_fw_mem = false,
+ .support_off_channel_tx = true,
},
{
.name = "qcn9074 hw1.0",
@@ -333,6 +336,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.dp_window_idx = 3,
.ce_window_idx = 2,
.fixed_fw_mem = false,
+ .support_off_channel_tx = false,
},
{
.name = "wcn6855 hw2.0",
@@ -407,6 +411,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.dp_window_idx = 0,
.ce_window_idx = 0,
.fixed_fw_mem = false,
+ .support_off_channel_tx = true,
},
{
.name = "wcn6855 hw2.1",
@@ -480,6 +485,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.dp_window_idx = 0,
.ce_window_idx = 0,
.fixed_fw_mem = false,
+ .support_off_channel_tx = true,
},
{
.name = "wcn6750 hw1.0",
@@ -553,6 +559,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.dp_window_idx = 1,
.ce_window_idx = 2,
.fixed_fw_mem = true,
+ .support_off_channel_tx = false,
},
};
@@ -1620,9 +1627,11 @@ static void ath11k_core_pre_reconfigure_recovery(struct ath11k_base *ab)
ieee80211_stop_queues(ar->hw);
ath11k_mac_drain_tx(ar);
+ ar->state_11d = ATH11K_11D_IDLE;
complete(&ar->completed_11d_scan);
complete(&ar->scan.started);
complete(&ar->scan.completed);
+ complete(&ar->scan.on_channel);
complete(&ar->peer_assoc_done);
complete(&ar->peer_delete_done);
complete(&ar->install_key_done);
@@ -1768,7 +1777,6 @@ static void ath11k_core_reset(struct work_struct *work)
ATH11K_RECOVER_START_TIMEOUT_HZ);
ath11k_hif_power_down(ab);
- ath11k_qmi_free_resource(ab);
ath11k_hif_power_up(ab);
ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset started\n");
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
index 00a45819907e..c17a2620aad7 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
@@ -520,6 +520,7 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
struct hal_tx_status *ts)
{
struct ieee80211_tx_status status = { 0 };
+ struct ieee80211_rate_status status_rate = { 0 };
struct ath11k_base *ab = ar->ab;
struct ieee80211_tx_info *info;
struct ath11k_skb_cb *skb_cb;
@@ -603,7 +604,12 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
status.skb = msdu;
status.info = info;
rate = arsta->last_txrate;
- status.rate = &rate;
+
+ status_rate.rate_idx = rate;
+ status_rate.try_count = 1;
+
+ status.rates = &status_rate;
+ status.n_rates = 1;
spin_unlock_bh(&ab->base_lock);
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
index 1aadb1566df8..110c337ddf33 100644
--- a/drivers/net/wireless/ath/ath11k/hal.h
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -121,7 +121,7 @@ struct ath11k_base;
#define HAL_REO1_DEST_RING_CTRL_IX_1 0x00000008
#define HAL_REO1_DEST_RING_CTRL_IX_2 0x0000000c
#define HAL_REO1_DEST_RING_CTRL_IX_3 0x00000010
-#define HAL_REO1_MISC_CTL 0x00000630
+#define HAL_REO1_MISC_CTL(ab) ab->hw_params.regs->hal_reo1_misc_ctl
#define HAL_REO1_RING_BASE_LSB(ab) ab->hw_params.regs->hal_reo1_ring_base_lsb
#define HAL_REO1_RING_BASE_MSB(ab) ab->hw_params.regs->hal_reo1_ring_base_msb
#define HAL_REO1_RING_ID(ab) ab->hw_params.regs->hal_reo1_ring_id
diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c
index 09ce357f0f0d..96db85c55585 100644
--- a/drivers/net/wireless/ath/ath11k/hw.c
+++ b/drivers/net/wireless/ath/ath11k/hw.c
@@ -771,10 +771,10 @@ static void ath11k_hw_wcn6855_reo_setup(struct ath11k_base *ab)
FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
ath11k_hif_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val);
- val = ath11k_hif_read32(ab, reo_base + HAL_REO1_MISC_CTL);
+ val = ath11k_hif_read32(ab, reo_base + HAL_REO1_MISC_CTL(ab));
val &= ~HAL_REO1_MISC_CTL_FRAGMENT_DST_RING;
val |= FIELD_PREP(HAL_REO1_MISC_CTL_FRAGMENT_DST_RING, HAL_SRNG_RING_ID_REO2SW1);
- ath11k_hif_write32(ab, reo_base + HAL_REO1_MISC_CTL, val);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_MISC_CTL(ab), val);
ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0(ab),
HAL_DEFAULT_REO_TIMEOUT_USEC);
@@ -1983,6 +1983,9 @@ const struct ath11k_hw_regs ipq8074_regs = {
/* Shadow register area */
.hal_shadow_base_addr = 0x0,
+
+ /* REO misc control register, not used in IPQ8074 */
+ .hal_reo1_misc_ctl = 0x0,
};
const struct ath11k_hw_regs qca6390_regs = {
@@ -2065,6 +2068,9 @@ const struct ath11k_hw_regs qca6390_regs = {
/* Shadow register area */
.hal_shadow_base_addr = 0x000008fc,
+
+ /* REO misc control register, not used in QCA6390 */
+ .hal_reo1_misc_ctl = 0x0,
};
const struct ath11k_hw_regs qcn9074_regs = {
@@ -2147,6 +2153,9 @@ const struct ath11k_hw_regs qcn9074_regs = {
/* Shadow register area */
.hal_shadow_base_addr = 0x0,
+
+ /* REO misc control register, not used in QCN9074 */
+ .hal_reo1_misc_ctl = 0x0,
};
const struct ath11k_hw_regs wcn6855_regs = {
@@ -2229,6 +2238,11 @@ const struct ath11k_hw_regs wcn6855_regs = {
/* Shadow register area */
.hal_shadow_base_addr = 0x000008fc,
+
+ /* REO misc control register, used for fragment
+ * destination ring config in WCN6855.
+ */
+ .hal_reo1_misc_ctl = 0x00000630,
};
const struct ath11k_hw_regs wcn6750_regs = {
@@ -2311,6 +2325,11 @@ const struct ath11k_hw_regs wcn6750_regs = {
/* Shadow register area */
.hal_shadow_base_addr = 0x00000504,
+
+ /* REO misc control register, used for fragment
+ * destination ring config in WCN6750.
+ */
+ .hal_reo1_misc_ctl = 0x000005d8,
};
const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074 = {
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index 6d588cd80093..77dc5c851c9b 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -204,6 +204,7 @@ struct ath11k_hw_params {
u8 dp_window_idx;
u8 ce_window_idx;
bool fixed_fw_mem;
+ bool support_off_channel_tx;
};
struct ath11k_hw_ops {
@@ -379,6 +380,7 @@ struct ath11k_hw_regs {
u32 pcie_pcs_osc_dtct_config_base;
u32 hal_shadow_base_addr;
+ u32 hal_reo1_misc_ctl;
};
extern const struct ath11k_hw_regs ipq8074_regs;
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 1957e1713548..ee1590b16eff 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -1951,7 +1951,7 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
/* Calculate peer NSS capability from VHT capabilities if STA
* supports VHT.
*/
- for (i = 0, max_nss = 0, vht_mcs = 0; i < NL80211_VHT_NSS_MAX; i++) {
+ for (i = 0, max_nss = 0; i < NL80211_VHT_NSS_MAX; i++) {
vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >>
(2 * i) & 3;
@@ -2272,7 +2272,7 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
/* Calculate peer NSS capability from HE capabilities if STA
* supports HE.
*/
- for (i = 0, max_nss = 0, he_mcs = 0; i < NL80211_HE_NSS_MAX; i++) {
+ for (i = 0, max_nss = 0; i < NL80211_HE_NSS_MAX; i++) {
he_mcs = he_tx_mcs >> (2 * i) & 3;
/* In case of fixed rates, MCS Range in he_tx_mcs might have
@@ -5551,8 +5551,8 @@ static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work)
}
arvif = ath11k_vif_to_arvif(skb_cb->vif);
- if (ar->allocated_vdev_map & (1LL << arvif->vdev_id) &&
- arvif->is_started) {
+ mutex_lock(&ar->conf_mutex);
+ if (ar->allocated_vdev_map & (1LL << arvif->vdev_id)) {
ret = ath11k_mac_mgmt_tx_wmi(ar, arvif, skb);
if (ret) {
ath11k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n",
@@ -5570,6 +5570,7 @@ static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work)
arvif->is_started);
ath11k_mgmt_over_wmi_tx_drop(ar, skb);
}
+ mutex_unlock(&ar->conf_mutex);
}
}
@@ -6155,6 +6156,11 @@ void ath11k_mac_11d_scan_stop(struct ath11k *ar)
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac stop 11d vdev id %d\n",
ar->vdev_id_11d_scan);
+ if (ar->state_11d == ATH11K_11D_PREPARING) {
+ ar->state_11d = ATH11K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
+ }
+
if (ar->vdev_id_11d_scan != ATH11K_11D_INVALID_VDEV_ID) {
vdev_id = ar->vdev_id_11d_scan;
@@ -7741,6 +7747,7 @@ ath11k_mac_validate_vht_he_fixed_rate_settings(struct ath11k *ar, enum nl80211_b
bool he_fixed_rate = false, vht_fixed_rate = false;
struct ath11k_peer *peer, *tmp;
const u16 *vht_mcs_mask, *he_mcs_mask;
+ struct ieee80211_link_sta *deflink;
u8 vht_nss, he_nss;
bool ret = true;
@@ -7763,13 +7770,16 @@ ath11k_mac_validate_vht_he_fixed_rate_settings(struct ath11k *ar, enum nl80211_b
spin_lock_bh(&ar->ab->base_lock);
list_for_each_entry_safe(peer, tmp, &ar->ab->peers, list) {
if (peer->sta) {
- if (vht_fixed_rate && (!peer->sta->deflink.vht_cap.vht_supported ||
- peer->sta->deflink.rx_nss < vht_nss)) {
+ deflink = &peer->sta->deflink;
+
+ if (vht_fixed_rate && (!deflink->vht_cap.vht_supported ||
+ deflink->rx_nss < vht_nss)) {
ret = false;
goto out;
}
- if (he_fixed_rate && (!peer->sta->deflink.he_cap.has_he ||
- peer->sta->deflink.rx_nss < he_nss)) {
+
+ if (he_fixed_rate && (!deflink->he_cap.has_he ||
+ deflink->rx_nss < he_nss)) {
ret = false;
goto out;
}
@@ -8345,6 +8355,118 @@ exit:
return ret;
}
+static int ath11k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.roc_notify = false;
+ spin_unlock_bh(&ar->data_lock);
+
+ ath11k_scan_abort(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ cancel_delayed_work_sync(&ar->scan.timeout);
+
+ return 0;
+}
+
+static int ath11k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel *chan,
+ int duration,
+ enum ieee80211_roc_type type)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ struct scan_req_params arg;
+ int ret;
+ u32 scan_time_msec;
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ reinit_completion(&ar->scan.started);
+ reinit_completion(&ar->scan.completed);
+ reinit_completion(&ar->scan.on_channel);
+ ar->scan.state = ATH11K_SCAN_STARTING;
+ ar->scan.is_roc = true;
+ ar->scan.vdev_id = arvif->vdev_id;
+ ar->scan.roc_freq = chan->center_freq;
+ ar->scan.roc_notify = true;
+ ret = 0;
+ break;
+ case ATH11K_SCAN_STARTING:
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ ret = -EBUSY;
+ break;
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ if (ret)
+ goto exit;
+
+ scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2;
+
+ memset(&arg, 0, sizeof(arg));
+ ath11k_wmi_start_scan_init(ar, &arg);
+ arg.num_chan = 1;
+ arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list),
+ GFP_KERNEL);
+ if (!arg.chan_list) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.scan_id = ATH11K_SCAN_ID;
+ arg.chan_list[0] = chan->center_freq;
+ arg.dwell_time_active = scan_time_msec;
+ arg.dwell_time_passive = scan_time_msec;
+ arg.max_scan_time = scan_time_msec;
+ arg.scan_flags |= WMI_SCAN_FLAG_PASSIVE;
+ arg.scan_flags |= WMI_SCAN_FILTER_PROBE_REQ;
+ arg.burst_duration = duration;
+
+ ret = ath11k_start_scan(ar, &arg);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start roc scan: %d\n", ret);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.state = ATH11K_SCAN_IDLE;
+ spin_unlock_bh(&ar->data_lock);
+ goto free_chan_list;
+ }
+
+ ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ);
+ if (ret == 0) {
+ ath11k_warn(ar->ab, "failed to switch to channel for roc scan\n");
+ ret = ath11k_scan_stop(ar);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to stop scan: %d\n", ret);
+ ret = -ETIMEDOUT;
+ goto free_chan_list;
+ }
+
+ ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
+ msecs_to_jiffies(duration));
+
+ ret = 0;
+
+free_chan_list:
+ kfree(arg.chan_list);
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
static const struct ieee80211_ops ath11k_ops = {
.tx = ath11k_mac_op_tx,
.start = ath11k_mac_op_start,
@@ -8397,6 +8519,8 @@ static const struct ieee80211_ops ath11k_ops = {
#endif
.set_sar_specs = ath11k_mac_op_set_bios_sar_specs,
+ .remain_on_channel = ath11k_mac_op_remain_on_channel,
+ .cancel_remain_on_channel = ath11k_mac_op_cancel_remain_on_channel,
};
static void ath11k_mac_update_ch_list(struct ath11k *ar,
@@ -8986,6 +9110,7 @@ int ath11k_mac_allocate(struct ath11k_base *ab)
init_completion(&ar->bss_survey_done);
init_completion(&ar->scan.started);
init_completion(&ar->scan.completed);
+ init_completion(&ar->scan.on_channel);
init_completion(&ar->thermal.wmi_sync);
INIT_DELAYED_WORK(&ar->scan.timeout, ath11k_scan_timeout_work);
@@ -9026,3 +9151,34 @@ void ath11k_mac_destroy(struct ath11k_base *ab)
pdev->ar = NULL;
}
}
+
+int ath11k_mac_vif_set_keepalive(struct ath11k_vif *arvif,
+ enum wmi_sta_keepalive_method method,
+ u32 interval)
+{
+ struct ath11k *ar = arvif->ar;
+ struct wmi_sta_keepalive_arg arg = {};
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ return 0;
+
+ if (!test_bit(WMI_TLV_SERVICE_STA_KEEP_ALIVE, ar->ab->wmi_ab.svc_map))
+ return 0;
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.enabled = 1;
+ arg.method = method;
+ arg.interval = interval;
+
+ ret = ath11k_wmi_sta_keepalive(ar, &arg);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set keepalive on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h
index 7f93e3a9ca23..57ebfc592b00 100644
--- a/drivers/net/wireless/ath/ath11k/mac.h
+++ b/drivers/net/wireless/ath/ath11k/mac.h
@@ -8,6 +8,7 @@
#include <net/mac80211.h>
#include <net/cfg80211.h>
+#include "wmi.h"
struct ath11k;
struct ath11k_base;
@@ -173,4 +174,7 @@ void ath11k_mac_handle_beacon(struct ath11k *ar, struct sk_buff *skb);
void ath11k_mac_handle_beacon_miss(struct ath11k *ar, u32 vdev_id);
void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif);
int ath11k_mac_wait_tx_complete(struct ath11k *ar);
+int ath11k_mac_vif_set_keepalive(struct ath11k_vif *arvif,
+ enum wmi_sta_keepalive_method method,
+ u32 interval);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index d1e945074bc1..61ead37a944a 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -1970,6 +1970,21 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
for (i = 0; i < ab->qmi.mem_seg_count; i++) {
chunk = &ab->qmi.target_mem[i];
+
+ /* Firmware reloads in coldboot/firmware recovery.
+ * in such case, no need to allocate memory for FW again.
+ */
+ if (chunk->vaddr) {
+ if (chunk->prev_type == chunk->type ||
+ chunk->prev_size == chunk->size)
+ continue;
+
+ /* cannot reuse the existing chunk */
+ dma_free_coherent(ab->dev, chunk->size,
+ chunk->vaddr, chunk->paddr);
+ chunk->vaddr = NULL;
+ }
+
chunk->vaddr = dma_alloc_coherent(ab->dev,
chunk->size,
&chunk->paddr,
@@ -1990,6 +2005,8 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
chunk->type);
return -EINVAL;
}
+ chunk->prev_type = chunk->type;
+ chunk->prev_size = chunk->size;
}
return 0;
@@ -2466,9 +2483,6 @@ static int ath11k_qmi_m3_load(struct ath11k_base *ab)
char path[100];
int ret;
- if (m3_mem->vaddr || m3_mem->size)
- return 0;
-
fw = ath11k_core_firmware_request(ab, ATH11K_M3_FILE);
if (IS_ERR(fw)) {
ret = PTR_ERR(fw);
@@ -2478,6 +2492,9 @@ static int ath11k_qmi_m3_load(struct ath11k_base *ab)
return ret;
}
+ if (m3_mem->vaddr || m3_mem->size)
+ goto skip_m3_alloc;
+
m3_mem->vaddr = dma_alloc_coherent(ab->dev,
fw->size, &m3_mem->paddr,
GFP_KERNEL);
@@ -2488,6 +2505,7 @@ static int ath11k_qmi_m3_load(struct ath11k_base *ab)
return -ENOMEM;
}
+skip_m3_alloc:
memcpy(m3_mem->vaddr, fw->data, fw->size);
m3_mem->size = fw->size;
release_firmware(fw);
diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h
index c24e6995cca3..c83cf822be81 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.h
+++ b/drivers/net/wireless/ath/ath11k/qmi.h
@@ -97,6 +97,8 @@ struct ath11k_qmi_event_msg {
struct target_mem_chunk {
u32 size;
u32 type;
+ u32 prev_size;
+ u32 prev_type;
dma_addr_t paddr;
u32 *vaddr;
void __iomem *iaddr;
diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
index 79ac2142317a..7ee3ff69dfc8 100644
--- a/drivers/net/wireless/ath/ath11k/reg.c
+++ b/drivers/net/wireless/ath/ath11k/reg.c
@@ -139,6 +139,9 @@ int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait)
"reg hw scan wait left time %d\n", left);
}
+ if (ar->state == ATH11K_STATE_RESTARTING)
+ return 0;
+
bands = hw->wiphy->bands;
for (band = 0; band < NUM_NL80211_BANDS; band++) {
if (!bands[band])
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 1410114d1d5c..84d1c7054013 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -625,10 +625,25 @@ struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len)
return skb;
}
+static u32 ath11k_wmi_mgmt_get_freq(struct ath11k *ar,
+ struct ieee80211_tx_info *info)
+{
+ struct ath11k_base *ab = ar->ab;
+ u32 freq = 0;
+
+ if (ab->hw_params.support_off_channel_tx &&
+ ar->scan.is_roc &&
+ (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
+ freq = ar->scan.roc_freq;
+
+ return freq;
+}
+
int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id,
struct sk_buff *frame)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame);
struct wmi_mgmt_send_cmd *cmd;
struct wmi_tlv *frame_tlv;
struct sk_buff *skb;
@@ -649,7 +664,7 @@ int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id,
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->desc_id = buf_id;
- cmd->chanfreq = 0;
+ cmd->chanfreq = ath11k_wmi_mgmt_get_freq(ar, info);
cmd->paddr_lo = lower_32_bits(ATH11K_SKB_CB(frame)->paddr);
cmd->paddr_hi = upper_32_bits(ATH11K_SKB_CB(frame)->paddr);
cmd->frame_len = frame->len;
@@ -5264,6 +5279,8 @@ static void ath11k_wmi_event_scan_started(struct ath11k *ar)
break;
case ATH11K_SCAN_STARTING:
ar->scan.state = ATH11K_SCAN_RUNNING;
+ if (ar->scan.is_roc)
+ ieee80211_ready_on_channel(ar->hw);
complete(&ar->scan.started);
break;
}
@@ -5346,6 +5363,8 @@ static void ath11k_wmi_event_scan_foreign_chan(struct ath11k *ar, u32 freq)
case ATH11K_SCAN_RUNNING:
case ATH11K_SCAN_ABORTING:
ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
+ if (ar->scan.is_roc && ar->scan.roc_freq == freq)
+ complete(&ar->scan.on_channel);
break;
}
}
@@ -8959,3 +8978,44 @@ int ath11k_wmi_pdev_set_bios_geo_table_param(struct ath11k *ar)
return ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID);
}
+
+int ath11k_wmi_sta_keepalive(struct ath11k *ar,
+ const struct wmi_sta_keepalive_arg *arg)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_sta_keepalive_cmd *cmd;
+ struct wmi_sta_keepalive_arp_resp *arp;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd) + sizeof(*arp);
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_sta_keepalive_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_STA_KEEPALIVE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = arg->vdev_id;
+ cmd->enabled = arg->enabled;
+ cmd->interval = arg->interval;
+ cmd->method = arg->method;
+
+ if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE ||
+ arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) {
+ arp = (struct wmi_sta_keepalive_arp_resp *)(cmd + 1);
+ arp->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_STA_KEEPALVE_ARP_RESPONSE) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*arp) - TLV_HDR_SIZE);
+ arp->src_ip4_addr = arg->src_ip4_addr;
+ arp->dest_ip4_addr = arg->dest_ip4_addr;
+ ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi sta keepalive vdev %d enabled %d method %d interval %d\n",
+ arg->vdev_id, arg->enabled, arg->method, arg->interval);
+
+ return ath11k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID);
+}
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
index 7600e9a52da8..b1fad4707dc6 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.h
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -5907,6 +5907,50 @@ struct wmi_pdev_set_geo_table_cmd {
u32 rsvd_len;
} __packed;
+struct wmi_sta_keepalive_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 enabled;
+
+ /* WMI_STA_KEEPALIVE_METHOD_ */
+ u32 method;
+
+ /* in seconds */
+ u32 interval;
+
+ /* following this structure is the TLV for struct
+ * wmi_sta_keepalive_arp_resp
+ */
+} __packed;
+
+struct wmi_sta_keepalive_arp_resp {
+ u32 tlv_header;
+ u32 src_ip4_addr;
+ u32 dest_ip4_addr;
+ struct wmi_mac_addr dest_mac_addr;
+} __packed;
+
+struct wmi_sta_keepalive_arg {
+ u32 vdev_id;
+ u32 enabled;
+ u32 method;
+ u32 interval;
+ u32 src_ip4_addr;
+ u32 dest_ip4_addr;
+ const u8 dest_mac_addr[ETH_ALEN];
+};
+
+enum wmi_sta_keepalive_method {
+ WMI_STA_KEEPALIVE_METHOD_NULL_FRAME = 1,
+ WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE = 2,
+ WMI_STA_KEEPALIVE_METHOD_ETHERNET_LOOPBACK = 3,
+ WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST = 4,
+ WMI_STA_KEEPALIVE_METHOD_MGMT_VENDOR_ACTION = 5,
+};
+
+#define WMI_STA_KEEPALIVE_INTERVAL_DEFAULT 30
+#define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0
+
int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
u32 cmd_id);
struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len);
@@ -6087,5 +6131,7 @@ int ath11k_wmi_gtk_rekey_getinfo(struct ath11k *ar,
struct ath11k_vif *arvif);
int ath11k_wmi_pdev_set_bios_sar_table_param(struct ath11k *ar, const u8 *sar_val);
int ath11k_wmi_pdev_set_bios_geo_table_param(struct ath11k *ar);
+int ath11k_wmi_sta_keepalive(struct ath11k *ar,
+ const struct wmi_sta_keepalive_arg *arg);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/wow.c b/drivers/net/wireless/ath/ath11k/wow.c
index 9d088cebef03..b3e65cd13d83 100644
--- a/drivers/net/wireless/ath/ath11k/wow.c
+++ b/drivers/net/wireless/ath/ath11k/wow.c
@@ -640,6 +640,24 @@ static int ath11k_wow_protocol_offload(struct ath11k *ar, bool enable)
return 0;
}
+static int ath11k_wow_set_keepalive(struct ath11k *ar,
+ enum wmi_sta_keepalive_method method,
+ u32 interval)
+{
+ struct ath11k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath11k_mac_vif_set_keepalive(arvif, method, interval);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
int ath11k_wow_op_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan)
{
@@ -691,6 +709,14 @@ int ath11k_wow_op_suspend(struct ieee80211_hw *hw,
goto cleanup;
}
+ ret = ath11k_wow_set_keepalive(ar,
+ WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
+ WMI_STA_KEEPALIVE_INTERVAL_DEFAULT);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to enable wow keepalive: %d\n", ret);
+ goto cleanup;
+ }
+
ret = ath11k_wow_enable(ar->ab);
if (ret) {
ath11k_warn(ar->ab, "failed to start wow: %d\n", ret);
@@ -786,6 +812,14 @@ int ath11k_wow_op_resume(struct ieee80211_hw *hw)
goto exit;
}
+ ret = ath11k_wow_set_keepalive(ar,
+ WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
+ WMI_STA_KEEPALIVE_INTERVAL_DISABLE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to disable wow keepalive: %d\n", ret);
+ goto exit;
+ }
+
exit:
if (ret) {
switch (ar->state) {
diff --git a/drivers/net/wireless/ath/ath6kl/Makefile b/drivers/net/wireless/ath/ath6kl/Makefile
index dc2b3b46781e..01cc0d50fee6 100644
--- a/drivers/net/wireless/ath/ath6kl/Makefile
+++ b/drivers/net/wireless/ath/ath6kl/Makefile
@@ -36,6 +36,11 @@ ath6kl_core-y += wmi.o
ath6kl_core-y += core.o
ath6kl_core-y += recovery.o
+# FIXME: temporarily silence -Wdangling-pointer on non W=1+ builds
+ifndef KBUILD_EXTRA_WARN
+CFLAGS_htc_mbox.o += -Wno-dangling-pointer
+endif
+
ath6kl_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
ath6kl_core-$(CONFIG_ATH6KL_TRACING) += trace.o
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index eff94bcd1f0a..9bdfcee2f448 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -45,6 +45,11 @@ ath9k_hw-y:= \
ar9003_eeprom.o \
ar9003_paprd.o
+# FIXME: temporarily silence -Warray-bounds on non W=1+ builds
+ifndef KBUILD_EXTRA_WARN
+CFLAGS_mac.o += -Wno-array-bounds
+endif
+
ath9k_hw-$(CONFIG_ATH9K_WOW) += ar9003_wow.o
ath9k_hw-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += btcoex.o \
diff --git a/drivers/net/wireless/ath/carl9170/Makefile b/drivers/net/wireless/ath/carl9170/Makefile
index 1a81868ce26d..7463baa62fa8 100644
--- a/drivers/net/wireless/ath/carl9170/Makefile
+++ b/drivers/net/wireless/ath/carl9170/Makefile
@@ -3,3 +3,8 @@ carl9170-objs := main.o usb.o cmd.o mac.o phy.o led.o fw.o tx.o rx.o
carl9170-$(CONFIG_CARL9170_DEBUGFS) += debug.o
obj-$(CONFIG_CARL9170) += carl9170.o
+
+# FIXME: temporarily silence -Warray-bounds on non W=1+ builds
+ifndef KBUILD_EXTRA_WARN
+CFLAGS_cmd.o += -Wno-array-bounds
+endif
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 4c944e595978..64d6c98174c8 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -1391,19 +1391,6 @@ static int temp_show(struct seq_file *s, void *data)
}
DEFINE_SHOW_ATTRIBUTE(temp);
-/*---------freq------------*/
-static int freq_show(struct seq_file *s, void *data)
-{
- struct wil6210_priv *wil = s->private;
- struct wireless_dev *wdev = wil->main_ndev->ieee80211_ptr;
- u32 freq = wdev->chandef.chan ? wdev->chandef.chan->center_freq : 0;
-
- seq_printf(s, "Freq = %d\n", freq);
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(freq);
-
/*---------link------------*/
static int link_show(struct seq_file *s, void *data)
{
@@ -2380,7 +2367,6 @@ static const struct {
{"pmcdata", 0444, &fops_pmcdata},
{"pmcring", 0444, &fops_pmcring},
{"temp", 0444, &temp_fops},
- {"freq", 0444, &freq_fops},
{"link", 0444, &link_fops},
{"info", 0444, &info_fops},
{"recovery", 0644, &fops_recovery},
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 390648066382..87a88f26233e 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -458,16 +458,14 @@ int wil_if_add(struct wil6210_priv *wil)
netif_napi_add(&wil->napi_ndev, &wil->napi_rx,
wil6210_netdev_poll_rx_edma,
NAPI_POLL_WEIGHT);
- netif_tx_napi_add(&wil->napi_ndev,
- &wil->napi_tx, wil6210_netdev_poll_tx_edma,
- NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(&wil->napi_ndev,
+ &wil->napi_tx, wil6210_netdev_poll_tx_edma);
} else {
netif_napi_add(&wil->napi_ndev, &wil->napi_rx,
wil6210_netdev_poll_rx,
NAPI_POLL_WEIGHT);
- netif_tx_napi_add(&wil->napi_ndev,
- &wil->napi_tx, wil6210_netdev_poll_tx,
- NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(&wil->napi_ndev,
+ &wil->napi_tx, wil6210_netdev_poll_tx);
}
wil_update_net_queues_bh(wil, vif, NULL, true);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 360b103fe898..605206abe424 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -2167,7 +2167,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
offsetof(struct brcmf_assoc_params_le, chanspec_list);
if (cfg->channel)
join_params_size += sizeof(u16);
- ext_join_params = kzalloc(join_params_size, GFP_KERNEL);
+ ext_join_params = kzalloc(sizeof(*ext_join_params), GFP_KERNEL);
if (ext_join_params == NULL) {
err = -ENOMEM;
goto done;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index e3758bd86acf..fe01da9e620d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -202,13 +202,24 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
char *ptr;
s32 err;
- /* retreive mac address */
- err = brcmf_fil_iovar_data_get(ifp, "cur_etheraddr", ifp->mac_addr,
- sizeof(ifp->mac_addr));
- if (err < 0) {
- bphy_err(drvr, "Retrieving cur_etheraddr failed, %d\n", err);
- goto done;
+ if (is_valid_ether_addr(ifp->mac_addr)) {
+ /* set mac address */
+ err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", ifp->mac_addr,
+ ETH_ALEN);
+ if (err < 0) {
+ bphy_err(ifp->drvr, "Setting cur_etheraddr failed, %d\n", err);
+ goto done;
+ }
+ } else {
+ /* retrieve mac address */
+ err = brcmf_fil_iovar_data_get(ifp, "cur_etheraddr", ifp->mac_addr,
+ sizeof(ifp->mac_addr));
+ if (err < 0) {
+ bphy_err(drvr, "Retrieving cur_etheraddr failed, %d\n", err);
+ goto done;
+ }
}
+
memcpy(ifp->drvr->mac, ifp->mac_addr, sizeof(ifp->drvr->mac));
memcpy(ifp->drvr->wiphy->perm_addr, ifp->drvr->mac, ETH_ALEN);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
index 8b5f49997c8b..15accc88d5c0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
@@ -50,6 +50,7 @@ struct brcmf_mp_device {
bool ignore_probe_fail;
struct brcmfmac_pd_cc *country_codes;
const char *board_type;
+ unsigned char mac[ETH_ALEN];
union {
struct brcmfmac_sdio_pd sdio;
} bus;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 26fab4bee22c..87aef211b35f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -7,6 +7,7 @@
#include <linux/etherdevice.h>
#include <linux/module.h>
#include <linux/inetdevice.h>
+#include <linux/property.h>
#include <net/cfg80211.h>
#include <net/rtnetlink.h>
#include <net/addrconf.h>
@@ -1197,7 +1198,8 @@ static int brcmf_bus_started(struct brcmf_pub *drvr, struct cfg80211_ops *ops)
brcmf_dbg(TRACE, "\n");
/* add primary networking interface */
- ifp = brcmf_add_if(drvr, 0, 0, false, "wlan%d", NULL);
+ ifp = brcmf_add_if(drvr, 0, 0, false, "wlan%d",
+ is_valid_ether_addr(drvr->settings->mac) ? drvr->settings->mac : NULL);
if (IS_ERR(ifp))
return PTR_ERR(ifp);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
index 8623bde5eb70..083ac58f466d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
@@ -5,6 +5,7 @@
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_irq.h>
+#include <linux/of_net.h>
#include <defs.h>
#include "debug.h"
@@ -99,6 +100,8 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
if (err)
brcmf_err("failed to get OF country code map (err=%d)\n", err);
+ of_get_mac_address(np, settings->mac);
+
if (bus_type != BRCMF_BUSTYPE_SDIO)
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index 33aae639ad37..e6d64152c81a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -937,6 +937,9 @@ int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
{
int i, j;
+ if (!fwrt->geo_enabled)
+ return -ENODATA;
+
if (!iwl_sar_geo_support(fwrt))
return -EOPNOTSUPP;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index a22788a68168..157d1f31c487 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -389,6 +389,8 @@ enum {
#define WFPM_LMAC1_PD_NOTIFICATION 0xa0338c
#define WFPM_ARC1_PD_NOTIFICATION 0xa03044
#define HPM_SECONDARY_DEVICE_STATE 0xa03404
+#define WFPM_MAC_OTP_CFG7_ADDR 0xa03338
+#define WFPM_MAC_OTP_CFG7_DATA 0xa0333c
/* For UMAG_GEN_HW_STATUS reg check */
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/main.c b/drivers/net/wireless/intel/iwlwifi/mei/main.c
index b4f45234cfc8..357f14626cf4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mei/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/mei/main.c
@@ -493,6 +493,7 @@ void iwl_mei_add_data_to_ring(struct sk_buff *skb, bool cb_tx)
if (cb_tx) {
struct iwl_sap_cb_data *cb_hdr = skb_push(skb, sizeof(*cb_hdr));
+ memset(cb_hdr, 0, sizeof(*cb_hdr));
cb_hdr->hdr.type = cpu_to_le16(SAP_MSG_CB_DATA_PACKET);
cb_hdr->hdr.len = cpu_to_le16(skb->len - sizeof(cb_hdr->hdr));
cb_hdr->hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no));
@@ -1019,6 +1020,8 @@ static void iwl_mei_handle_sap_data(struct mei_cl_device *cldev,
/* We need enough room for the WiFi header + SNAP + IV */
skb = netdev_alloc_skb(netdev, len + QOS_HDR_IV_SNAP_LEN);
+ if (!skb)
+ continue;
skb_reserve(skb, QOS_HDR_IV_SNAP_LEN);
ethhdr = skb_push(skb, sizeof(*ethhdr));
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index bcc4ed20fe5b..61f9136a333d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -1956,18 +1956,18 @@ iwl_mvm_parse_wowlan_status_common_ ## _ver(struct iwl_mvm *mvm, \
\
if (len < sizeof(*data)) { \
IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \
- return ERR_PTR(-EIO); \
+ return NULL; \
} \
\
data_size = ALIGN(le32_to_cpu(data->wake_packet_bufsize), 4); \
if (len != sizeof(*data) + data_size) { \
IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \
- return ERR_PTR(-EIO); \
+ return NULL; \
} \
\
status = kzalloc(sizeof(*status) + data_size, GFP_KERNEL); \
if (!status) \
- return ERR_PTR(-ENOMEM); \
+ return NULL; \
\
/* copy all the common fields */ \
status->replay_ctr = le64_to_cpu(data->replay_ctr); \
@@ -2097,7 +2097,7 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id)
struct iwl_wowlan_status_v6 *v6 = (void *)cmd.resp_pkt->data;
status = iwl_mvm_parse_wowlan_status_common_v6(mvm, v6, len);
- if (IS_ERR(status))
+ if (!status)
goto out_free_resp;
BUILD_BUG_ON(sizeof(v6->gtk.decrypt_key) >
@@ -2128,7 +2128,7 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id)
struct iwl_wowlan_status_v7 *v7 = (void *)cmd.resp_pkt->data;
status = iwl_mvm_parse_wowlan_status_common_v7(mvm, v7, len);
- if (IS_ERR(status))
+ if (!status)
goto out_free_resp;
iwl_mvm_convert_key_counters(status, &v7->gtk[0].rsc.all_tsc_rsc);
@@ -2141,7 +2141,7 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id)
* difference is only in a few not used (reserved) fields.
*/
status = iwl_mvm_parse_wowlan_status_common_v9(mvm, v9, len);
- if (IS_ERR(status))
+ if (!status)
goto out_free_resp;
iwl_mvm_convert_key_counters(status, &v9->gtk[0].rsc.all_tsc_rsc);
@@ -2153,7 +2153,7 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id)
struct iwl_wowlan_status_v12 *v12 = (void *)cmd.resp_pkt->data;
status = iwl_mvm_parse_wowlan_status_common_v12(mvm, v12, len);
- if (IS_ERR(status))
+ if (!status)
goto out_free_resp;
iwl_mvm_convert_key_counters_v5(status, &v12->gtk[0].sc);
@@ -2165,7 +2165,7 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id)
IWL_ERR(mvm,
"Firmware advertises unknown WoWLAN status response %d!\n",
notif_ver);
- status = ERR_PTR(-EIO);
+ status = NULL;
}
out_free_resp:
@@ -2203,7 +2203,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvm_ap_sta;
status = iwl_mvm_get_wakeup_status(mvm, mvmvif->ap_sta_id);
- if (IS_ERR(status))
+ if (!status)
goto out_unlock;
IWL_DEBUG_WOWLAN(mvm, "wakeup reason 0x%x\n",
@@ -2370,7 +2370,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
int i, n_matches, ret;
status = iwl_mvm_get_wakeup_status(mvm, IWL_MVM_INVALID_STA);
- if (!IS_ERR(status)) {
+ if (status) {
reasons = status->wakeup_reasons;
kfree(status);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index e842816134f1..f041e77af059 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -287,6 +287,9 @@ static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
static void iwl_mvm_print_pd_notification(struct iwl_mvm *mvm)
{
+#define IWL_FW_PRINT_REG_INFO(reg_name) \
+ IWL_ERR(mvm, #reg_name ": 0x%x\n", iwl_read_umac_prph(trans, reg_name))
+
struct iwl_trans *trans = mvm->trans;
enum iwl_device_family device_family = trans->trans_cfg->device_family;
@@ -294,15 +297,15 @@ static void iwl_mvm_print_pd_notification(struct iwl_mvm *mvm)
return;
if (device_family <= IWL_DEVICE_FAMILY_9000)
- IWL_ERR(mvm, "WFPM_ARC1_PD_NOTIFICATION: 0x%x\n",
- iwl_read_umac_prph(trans, WFPM_ARC1_PD_NOTIFICATION));
+ IWL_FW_PRINT_REG_INFO(WFPM_ARC1_PD_NOTIFICATION);
else
- IWL_ERR(mvm, "WFPM_LMAC1_PD_NOTIFICATION: 0x%x\n",
- iwl_read_umac_prph(trans, WFPM_LMAC1_PD_NOTIFICATION));
+ IWL_FW_PRINT_REG_INFO(WFPM_LMAC1_PD_NOTIFICATION);
- IWL_ERR(mvm, "HPM_SECONDARY_DEVICE_STATE: 0x%x\n",
- iwl_read_umac_prph(trans, HPM_SECONDARY_DEVICE_STATE));
+ IWL_FW_PRINT_REG_INFO(HPM_SECONDARY_DEVICE_STATE);
+ /* print OPT info */
+ IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_ADDR);
+ IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_DATA);
}
static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 5aa4520b70ac..56fa20596f16 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -552,6 +552,12 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
/* Fill the common data for all mac context types */
iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, bssid_override, action);
+ /*
+ * We always want to hear MCAST frames, if we're not authorized yet,
+ * we'll drop them.
+ */
+ cmd.filter_flags |= cpu_to_le32(MAC_FILTER_ACCEPT_GRP);
+
if (vif->p2p) {
struct ieee80211_p2p_noa_attr *noa =
&vif->bss_conf.p2p_noa_attr;
@@ -567,7 +573,6 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
if (vif->bss_conf.assoc && vif->bss_conf.dtim_period &&
!force_assoc_off) {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- u8 ap_sta_id = mvmvif->ap_sta_id;
u32 dtim_offs;
/*
@@ -609,29 +614,6 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
IWL_UCODE_TLV_CAPA_COEX_HIGH_PRIO))
ctxt_sta->data_policy |=
cpu_to_le32(COEX_HIGH_PRIORITY_ENABLE);
-
- /*
- * allow multicast data frames only as long as the station is
- * authorized, i.e., GTK keys are already installed (if needed)
- */
- if (ap_sta_id < mvm->fw->ucode_capa.num_stations) {
- struct ieee80211_sta *sta;
-
- rcu_read_lock();
-
- sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]);
- if (!IS_ERR_OR_NULL(sta)) {
- struct iwl_mvm_sta *mvmsta =
- iwl_mvm_sta_from_mac80211(sta);
-
- if (mvmsta->sta_state ==
- IEEE80211_STA_AUTHORIZED)
- cmd.filter_flags |=
- cpu_to_le32(MAC_FILTER_ACCEPT_GRP);
- }
-
- rcu_read_unlock();
- }
} else {
ctxt_sta->is_assoc = cpu_to_le32(0);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 4fda6c3ba9f3..bb9bd2165355 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -976,7 +976,6 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
ieee80211_wake_queues(mvm->hw);
- mvm->vif_count = 0;
mvm->rx_ba_sessions = 0;
mvm->fwrt.dump.conf = FW_DBG_INVALID;
mvm->monitor_on = false;
@@ -1380,10 +1379,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
rcu_assign_pointer(mvm->vif_id_to_mac[mvmvif->id], vif);
- /* Counting number of interfaces is needed for legacy PM */
- if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
- mvm->vif_count++;
-
/*
* The AP binding flow can be done only after the beacon
* template is configured (which happens only in the mac80211
@@ -1400,7 +1395,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
if (ret) {
IWL_ERR(mvm, "Failed to allocate bcast sta\n");
- goto out_release;
+ goto out_unlock;
}
/*
@@ -1411,7 +1406,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
0, vif->type,
IWL_STA_MULTICAST);
if (ret)
- goto out_release;
+ goto out_unlock;
iwl_mvm_vif_dbgfs_register(mvm, vif);
goto out_unlock;
@@ -1421,7 +1416,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
ret = iwl_mvm_mac_ctxt_add(mvm, vif);
if (ret)
- goto out_release;
+ goto out_unlock;
ret = iwl_mvm_power_update_mac(mvm);
if (ret)
@@ -1498,9 +1493,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
out_remove_mac:
mvmvif->phy_ctxt = NULL;
iwl_mvm_mac_ctxt_remove(mvm, vif);
- out_release:
- if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
- mvm->vif_count--;
out_unlock:
mutex_unlock(&mvm->mutex);
@@ -1582,9 +1574,6 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
mvmvif->phy_ctxt = NULL;
}
- if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
- mvm->vif_count--;
-
iwl_mvm_power_update_mac(mvm);
iwl_mvm_mac_ctxt_remove(mvm, vif);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index c6bc85d4600a..bf35e130c876 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -934,7 +934,6 @@ struct iwl_mvm {
unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)];
u8 fw_key_deleted[STA_KEY_MAX_NUM];
- u8 vif_count;
struct ieee80211_vif __rcu *vif_id_to_mac[NUM_MAC_INDEX_DRIVER];
/* -1 for always, 0 for never, >0 for that many times */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
index b2ea2fca5376..b9bd81242b21 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -563,6 +563,9 @@ static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac,
struct iwl_power_vifs *power_iterator = _data;
bool active = mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < NUM_PHY_CTX;
+ if (!mvmvif->uploaded)
+ return;
+
switch (ieee80211_vif_type_p2p(vif)) {
case NL80211_IFTYPE_P2P_DEVICE:
break;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 78198da7e55b..49ca1e168fc5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -327,17 +327,6 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status = IEEE80211_SKB_RXCB(skb);
/*
- * drop the packet if it has failed being decrypted by HW
- */
- if (iwl_mvm_set_mac80211_rx_flag(mvm, hdr, rx_status, rx_pkt_status,
- &crypt_len)) {
- IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n",
- rx_pkt_status);
- kfree_skb(skb);
- return;
- }
-
- /*
* Keep packets with CRC errors (and with overrun) for monitor mode
* (otherwise the firmware discards them) but mark them as bad.
*/
@@ -388,6 +377,37 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
if (sta) {
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct ieee80211_vif *vif = mvmsta->vif;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ /*
+ * Don't even try to decrypt a MCAST frame that was received
+ * before the managed vif is authorized, we'd fail anyway.
+ */
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ !mvmvif->authorized &&
+ is_multicast_ether_addr(hdr->addr1)) {
+ IWL_DEBUG_DROP(mvm, "MCAST before the vif is authorized\n");
+ kfree_skb(skb);
+ rcu_read_unlock();
+ return;
+ }
+ }
+
+ /*
+ * drop the packet if it has failed being decrypted by HW
+ */
+ if (iwl_mvm_set_mac80211_rx_flag(mvm, hdr, rx_status, rx_pkt_status,
+ &crypt_len)) {
+ IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n",
+ rx_pkt_status);
+ kfree_skb(skb);
+ rcu_read_unlock();
+ return;
+ }
+
+ if (sta) {
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct ieee80211_vif *tx_blocked_vif =
rcu_dereference(mvm->csa_tx_blocked_vif);
struct iwl_fw_dbg_trigger_tlv *trig;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 406f0a50a5bf..bbb1522e7280 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1058,7 +1058,7 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
unsigned long *unshare_queues,
unsigned long *changetid_queues)
{
- int tid;
+ unsigned int tid;
lockdep_assert_held(&mvmsta->lock);
lockdep_assert_held(&mvm->mutex);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 8be3c3c8c68b..6fc69c42f36e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1085,34 +1085,44 @@ bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans)
}
struct iwl_causes_list {
- u32 cause_num;
- u32 mask_reg;
+ u16 mask_reg;
+ u8 bit;
u8 addr;
};
+#define CAUSE(reg, mask) \
+ { \
+ .mask_reg = reg, \
+ .bit = ilog2(mask), \
+ .addr = ilog2(mask) + \
+ ((reg) == CSR_MSIX_FH_INT_MASK_AD ? -16 : \
+ (reg) == CSR_MSIX_HW_INT_MASK_AD ? 16 : \
+ 0xffff), /* causes overflow warning */ \
+ }
+
static const struct iwl_causes_list causes_list_common[] = {
- {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0},
- {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1},
- {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3},
- {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5},
- {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10},
- {MSIX_HW_INT_CAUSES_REG_WAKEUP, CSR_MSIX_HW_INT_MASK_AD, 0x11},
- {MSIX_HW_INT_CAUSES_REG_RESET_DONE, CSR_MSIX_HW_INT_MASK_AD, 0x12},
- {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16},
- {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17},
- {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18},
- {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A},
- {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B},
- {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D},
- {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
+ CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH0_NUM),
+ CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH1_NUM),
+ CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_S2D),
+ CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_FH_ERR),
+ CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_ALIVE),
+ CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_WAKEUP),
+ CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RESET_DONE),
+ CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_CT_KILL),
+ CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RF_KILL),
+ CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_PERIODIC),
+ CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SCD),
+ CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_FH_TX),
+ CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HW_ERR),
+ CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HAP),
};
static const struct iwl_causes_list causes_list_pre_bz[] = {
- {MSIX_HW_INT_CAUSES_REG_SW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x29},
+ CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR),
};
static const struct iwl_causes_list causes_list_bz[] = {
- {MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ, CSR_MSIX_HW_INT_MASK_AD, 0x15},
+ CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ),
};
static void iwl_pcie_map_list(struct iwl_trans *trans,
@@ -1124,7 +1134,7 @@ static void iwl_pcie_map_list(struct iwl_trans *trans,
for (i = 0; i < arr_size; i++) {
iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val);
iwl_clear_bit(trans, causes[i].mask_reg,
- causes[i].cause_num);
+ BIT(causes[i].bit));
}
}
diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
index 72622220051b..10cbd9e560e7 100644
--- a/drivers/net/wireless/mediatek/mt76/agg-rx.c
+++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
@@ -162,15 +162,15 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
if (!sta)
return;
- if (!status->aggr && !(status->flag & RX_FLAG_8023)) {
- mt76_rx_aggr_check_ctl(skb, frames);
+ if (!status->aggr) {
+ if (!(status->flag & RX_FLAG_8023))
+ mt76_rx_aggr_check_ctl(skb, frames);
return;
}
/* not part of a BA session */
ackp = status->qos_ctl & IEEE80211_QOS_CTL_ACK_POLICY_MASK;
- if (ackp != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
- ackp != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
+ if (ackp == IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
return;
tid = rcu_dereference(wcid->aggr[tidno]);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 02daeefb0761..30de8be4aac1 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -7,6 +7,37 @@
#include "mt76.h"
#include "dma.h"
+#if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)
+
+#define Q_READ(_dev, _q, _field) ({ \
+ u32 _offset = offsetof(struct mt76_queue_regs, _field); \
+ u32 _val; \
+ if ((_q)->flags & MT_QFLAG_WED) \
+ _val = mtk_wed_device_reg_read(&(_dev)->mmio.wed, \
+ ((_q)->wed_regs + \
+ _offset)); \
+ else \
+ _val = readl(&(_q)->regs->_field); \
+ _val; \
+})
+
+#define Q_WRITE(_dev, _q, _field, _val) do { \
+ u32 _offset = offsetof(struct mt76_queue_regs, _field); \
+ if ((_q)->flags & MT_QFLAG_WED) \
+ mtk_wed_device_reg_write(&(_dev)->mmio.wed, \
+ ((_q)->wed_regs + _offset), \
+ _val); \
+ else \
+ writel(_val, &(_q)->regs->_field); \
+} while (0)
+
+#else
+
+#define Q_READ(_dev, _q, _field) readl(&(_q)->regs->_field)
+#define Q_WRITE(_dev, _q, _field, _val) writel(_val, &(_q)->regs->_field)
+
+#endif
+
static struct mt76_txwi_cache *
mt76_alloc_txwi(struct mt76_dev *dev)
{
@@ -16,11 +47,11 @@ mt76_alloc_txwi(struct mt76_dev *dev)
int size;
size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
- txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
+ txwi = kzalloc(size, GFP_ATOMIC);
if (!txwi)
return NULL;
- addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size,
+ addr = dma_map_single(dev->dma_dev, txwi, dev->drv->txwi_size,
DMA_TO_DEVICE);
t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
t->dma_addr = addr;
@@ -73,18 +104,20 @@ mt76_free_pending_txwi(struct mt76_dev *dev)
struct mt76_txwi_cache *t;
local_bh_disable();
- while ((t = __mt76_get_txwi(dev)) != NULL)
- dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size,
+ while ((t = __mt76_get_txwi(dev)) != NULL) {
+ dma_unmap_single(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE);
+ kfree(mt76_get_txwi_ptr(dev, t));
+ }
local_bh_enable();
}
static void
mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
{
- writel(q->desc_dma, &q->regs->desc_base);
- writel(q->ndesc, &q->regs->ring_size);
- q->head = readl(&q->regs->dma_idx);
+ Q_WRITE(dev, q, desc_base, q->desc_dma);
+ Q_WRITE(dev, q, ring_size, q->ndesc);
+ q->head = Q_READ(dev, q, dma_idx);
q->tail = q->head;
}
@@ -100,42 +133,12 @@ mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
for (i = 0; i < q->ndesc; i++)
q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
- writel(0, &q->regs->cpu_idx);
- writel(0, &q->regs->dma_idx);
+ Q_WRITE(dev, q, cpu_idx, 0);
+ Q_WRITE(dev, q, dma_idx, 0);
mt76_dma_sync_idx(dev, q);
}
static int
-mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
- int idx, int n_desc, int bufsize,
- u32 ring_base)
-{
- int size;
-
- spin_lock_init(&q->lock);
- spin_lock_init(&q->cleanup_lock);
-
- q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
- q->ndesc = n_desc;
- q->buf_size = bufsize;
- q->hw_idx = idx;
-
- size = q->ndesc * sizeof(struct mt76_desc);
- q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL);
- if (!q->desc)
- return -ENOMEM;
-
- size = q->ndesc * sizeof(*q->entry);
- q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
- if (!q->entry)
- return -ENOMEM;
-
- mt76_dma_queue_reset(dev, q);
-
- return 0;
-}
-
-static int
mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_buf *buf, int nbufs, u32 info,
struct sk_buff *skb, void *txwi)
@@ -203,11 +206,11 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
struct mt76_queue_entry *e = &q->entry[idx];
if (!e->skip_buf0)
- dma_unmap_single(dev->dev, e->dma_addr[0], e->dma_len[0],
+ dma_unmap_single(dev->dma_dev, e->dma_addr[0], e->dma_len[0],
DMA_TO_DEVICE);
if (!e->skip_buf1)
- dma_unmap_single(dev->dev, e->dma_addr[1], e->dma_len[1],
+ dma_unmap_single(dev->dma_dev, e->dma_addr[1], e->dma_len[1],
DMA_TO_DEVICE);
if (e->txwi == DMA_DUMMY_DATA)
@@ -224,7 +227,7 @@ static void
mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
wmb();
- writel(q->head, &q->regs->cpu_idx);
+ Q_WRITE(dev, q, cpu_idx, q->head);
}
static void
@@ -240,7 +243,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
if (flush)
last = -1;
else
- last = readl(&q->regs->dma_idx);
+ last = Q_READ(dev, q, dma_idx);
while (q->queued > 0 && q->tail != last) {
mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
@@ -252,8 +255,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
}
if (!flush && q->tail == last)
- last = readl(&q->regs->dma_idx);
-
+ last = Q_READ(dev, q, dma_idx);
}
spin_unlock_bh(&q->cleanup_lock);
@@ -288,7 +290,7 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
if (info)
*info = le32_to_cpu(desc->info);
- dma_unmap_single(dev->dev, buf_addr, buf_len, DMA_FROM_DEVICE);
+ dma_unmap_single(dev->dma_dev, buf_addr, buf_len, DMA_FROM_DEVICE);
e->buf = NULL;
return buf;
@@ -325,9 +327,9 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
if (q->queued + 1 >= q->ndesc - 1)
goto error;
- addr = dma_map_single(dev->dev, skb->data, skb->len,
+ addr = dma_map_single(dev->dma_dev, skb->data, skb->len,
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev->dev, addr)))
+ if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
goto error;
buf.addr = addr;
@@ -374,8 +376,8 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
mt76_insert_hdr_pad(skb);
len = skb_headlen(skb);
- addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev->dev, addr)))
+ addr = dma_map_single(dev->dma_dev, skb->data, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
goto free;
tx_info.buf[n].addr = t->dma_addr;
@@ -387,9 +389,9 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
if (n == ARRAY_SIZE(tx_info.buf))
goto unmap;
- addr = dma_map_single(dev->dev, iter->data, iter->len,
+ addr = dma_map_single(dev->dma_dev, iter->data, iter->len,
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev->dev, addr)))
+ if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
goto unmap;
tx_info.buf[n].addr = addr;
@@ -402,10 +404,10 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
goto unmap;
}
- dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size,
+ dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE);
ret = dev->drv->tx_prepare_skb(dev, txwi, q->qid, wcid, sta, &tx_info);
- dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size,
+ dma_sync_single_for_device(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE);
if (ret < 0)
goto unmap;
@@ -415,7 +417,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
unmap:
for (n--; n > 0; n--)
- dma_unmap_single(dev->dev, tx_info.buf[n].addr,
+ dma_unmap_single(dev->dma_dev, tx_info.buf[n].addr,
tx_info.buf[n].len, DMA_TO_DEVICE);
free:
@@ -460,8 +462,8 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
if (!buf)
break;
- addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev->dev, addr))) {
+ addr = dma_map_single(dev->dma_dev, buf, len, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {
skb_free_frag(buf);
break;
}
@@ -481,6 +483,85 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
return frames;
}
+static int
+mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
+{
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ struct mtk_wed_device *wed = &dev->mmio.wed;
+ int ret, type, ring;
+ u8 flags = q->flags;
+
+ if (!mtk_wed_device_active(wed))
+ q->flags &= ~MT_QFLAG_WED;
+
+ if (!(q->flags & MT_QFLAG_WED))
+ return 0;
+
+ type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags);
+ ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags);
+
+ switch (type) {
+ case MT76_WED_Q_TX:
+ ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs);
+ if (!ret)
+ q->wed_regs = wed->tx_ring[ring].reg_base;
+ break;
+ case MT76_WED_Q_TXFREE:
+ /* WED txfree queue needs ring to be initialized before setup */
+ q->flags = 0;
+ mt76_dma_queue_reset(dev, q);
+ mt76_dma_rx_fill(dev, q);
+ q->flags = flags;
+
+ ret = mtk_wed_device_txfree_ring_setup(wed, q->regs);
+ if (!ret)
+ q->wed_regs = wed->txfree_ring.reg_base;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+#else
+ return 0;
+#endif
+}
+
+static int
+mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
+ int idx, int n_desc, int bufsize,
+ u32 ring_base)
+{
+ int ret, size;
+
+ spin_lock_init(&q->lock);
+ spin_lock_init(&q->cleanup_lock);
+
+ q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
+ q->ndesc = n_desc;
+ q->buf_size = bufsize;
+ q->hw_idx = idx;
+
+ size = q->ndesc * sizeof(struct mt76_desc);
+ q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL);
+ if (!q->desc)
+ return -ENOMEM;
+
+ size = q->ndesc * sizeof(*q->entry);
+ q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
+ if (!q->entry)
+ return -ENOMEM;
+
+ ret = mt76_dma_wed_setup(dev, q);
+ if (ret)
+ return ret;
+
+ if (q->flags != MT_WED_Q_TXFREE)
+ mt76_dma_queue_reset(dev, q);
+
+ return 0;
+}
+
static void
mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
{
@@ -562,14 +643,29 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
static int
mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
{
- int len, data_len, done = 0;
+ int len, data_len, done = 0, dma_idx;
struct sk_buff *skb;
unsigned char *data;
+ bool check_ddone = false;
bool more;
+ if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
+ q->flags == MT_WED_Q_TXFREE) {
+ dma_idx = Q_READ(dev, q, dma_idx);
+ check_ddone = true;
+ }
+
while (done < budget) {
u32 info;
+ if (check_ddone) {
+ if (q->tail == dma_idx)
+ dma_idx = Q_READ(dev, q, dma_idx);
+
+ if (q->tail == dma_idx)
+ break;
+ }
+
data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
if (!data)
break;
@@ -710,5 +806,8 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
}
mt76_free_pending_txwi(dev);
+
+ if (mtk_wed_device_active(&dev->mmio.wed))
+ mtk_wed_device_detach(&dev->mmio.wed);
}
EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 5b53d008eb66..18b5de55334c 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -248,6 +248,8 @@ static void mt76_init_stream_cap(struct mt76_phy *phy,
vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
else
vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
+ vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
+ IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
for (i = 0; i < 8; i++) {
if (i < nstream)
@@ -323,8 +325,6 @@ mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
IEEE80211_VHT_CAP_RXSTBC_1 |
IEEE80211_VHT_CAP_SHORT_GI_80 |
- IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
- IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
(3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
return 0;
@@ -545,6 +545,7 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
dev->hw = hw;
dev->dev = pdev;
dev->drv = drv_ops;
+ dev->dma_dev = pdev;
phy = &dev->phy;
phy->dev = dev;
@@ -579,6 +580,7 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
INIT_LIST_HEAD(&dev->wcid_list);
INIT_LIST_HEAD(&dev->txwi_cache);
+ dev->token_size = dev->drv->token_size;
for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
skb_queue_head_init(&dev->rx_skb[i]);
@@ -1303,7 +1305,7 @@ mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif,
continue;
mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
- mtxq->wcid = wcid;
+ mtxq->wcid = wcid->idx;
}
ewma_signal_init(&wcid->rssi);
@@ -1381,7 +1383,9 @@ void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
mutex_lock(&dev->mutex);
+ spin_lock_bh(&dev->status_lock);
rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
+ spin_unlock_bh(&dev->status_lock);
mutex_unlock(&dev->mutex);
}
EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
@@ -1578,7 +1582,7 @@ EXPORT_SYMBOL_GPL(mt76_get_antenna);
struct mt76_queue *
mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
- int ring_base)
+ int ring_base, u32 flags)
{
struct mt76_queue *hwq;
int err;
@@ -1587,6 +1591,8 @@ mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
if (!hwq)
return ERR_PTR(-ENOMEM);
+ hwq->flags = flags;
+
err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
if (err < 0)
return ERR_PTR(err);
diff --git a/drivers/net/wireless/mediatek/mt76/mcu.c b/drivers/net/wireless/mediatek/mt76/mcu.c
index 3f94c37251df..914ee278e6e2 100644
--- a/drivers/net/wireless/mediatek/mt76/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mcu.c
@@ -6,14 +6,14 @@
#include "mt76.h"
struct sk_buff *
-mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
- int data_len)
+__mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
+ int data_len, gfp_t gfp)
{
const struct mt76_mcu_ops *ops = dev->mcu_ops;
int length = ops->headroom + data_len + ops->tailroom;
struct sk_buff *skb;
- skb = alloc_skb(length, GFP_KERNEL);
+ skb = alloc_skb(length, gfp);
if (!skb)
return NULL;
@@ -25,7 +25,7 @@ mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
return skb;
}
-EXPORT_SYMBOL_GPL(mt76_mcu_msg_alloc);
+EXPORT_SYMBOL_GPL(__mt76_mcu_msg_alloc);
struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
unsigned long expires)
diff --git a/drivers/net/wireless/mediatek/mt76/mmio.c b/drivers/net/wireless/mediatek/mt76/mmio.c
index 26353b6bce97..86e3d2ac4d0d 100644
--- a/drivers/net/wireless/mediatek/mt76/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mmio.c
@@ -73,8 +73,13 @@ void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr,
spin_lock_irqsave(&dev->mmio.irq_lock, flags);
dev->mmio.irqmask &= ~clear;
dev->mmio.irqmask |= set;
- if (addr)
- mt76_mmio_wr(dev, addr, dev->mmio.irqmask);
+ if (addr) {
+ if (mtk_wed_device_active(&dev->mmio.wed))
+ mtk_wed_device_irq_set_mask(&dev->mmio.wed,
+ dev->mmio.irqmask);
+ else
+ mt76_mmio_wr(dev, addr, dev->mmio.irqmask);
+ }
spin_unlock_irqrestore(&dev->mmio.irq_lock, flags);
}
EXPORT_SYMBOL_GPL(mt76_set_irq_mask);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 882fb5d2517f..4e8997c45c1b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -13,6 +13,7 @@
#include <linux/leds.h>
#include <linux/usb.h>
#include <linux/average.h>
+#include <linux/soc/mediatek/mtk_wed.h>
#include <net/mac80211.h>
#include "util.h"
#include "testmode.h"
@@ -26,6 +27,16 @@
#define MT76_TOKEN_FREE_THR 64
+#define MT_QFLAG_WED_RING GENMASK(1, 0)
+#define MT_QFLAG_WED_TYPE GENMASK(3, 2)
+#define MT_QFLAG_WED BIT(4)
+
+#define __MT_WED_Q(_type, _n) (MT_QFLAG_WED | \
+ FIELD_PREP(MT_QFLAG_WED_TYPE, _type) | \
+ FIELD_PREP(MT_QFLAG_WED_RING, _n))
+#define MT_WED_Q_TX(_n) __MT_WED_Q(MT76_WED_Q_TX, _n)
+#define MT_WED_Q_TXFREE __MT_WED_Q(MT76_WED_Q_TXFREE, 0)
+
struct mt76_dev;
struct mt76_phy;
struct mt76_wcid;
@@ -42,6 +53,11 @@ enum mt76_bus_type {
MT76_BUS_SDIO,
};
+enum mt76_wed_type {
+ MT76_WED_Q_TX,
+ MT76_WED_Q_TXFREE,
+};
+
struct mt76_bus_ops {
u32 (*rr)(struct mt76_dev *dev, u32 offset);
void (*wr)(struct mt76_dev *dev, u32 offset, u32 val);
@@ -170,6 +186,9 @@ struct mt76_queue {
u8 buf_offset;
u8 hw_idx;
u8 qid;
+ u8 flags;
+
+ u32 wed_regs;
dma_addr_t desc_dma;
struct sk_buff *rx_head;
@@ -275,7 +294,7 @@ struct mt76_wcid {
};
struct mt76_txq {
- struct mt76_wcid *wcid;
+ u16 wcid;
u16 agg_ssn;
bool send_bar;
@@ -537,6 +556,8 @@ struct mt76_mmio {
void __iomem *regs;
spinlock_t irq_lock;
u32 irqmask;
+
+ struct mtk_wed_device wed;
};
struct mt76_rx_status {
@@ -698,6 +719,7 @@ struct mt76_dev {
const struct mt76_driver_ops *drv;
const struct mt76_mcu_ops *mcu_ops;
struct device *dev;
+ struct device *dma_dev;
struct mt76_mcu mcu;
@@ -718,7 +740,9 @@ struct mt76_dev {
spinlock_t token_lock;
struct idr token;
- int token_count;
+ u16 wed_token_count;
+ u16 token_count;
+ u16 token_size;
wait_queue_head_t tx_wait;
/* spinclock used to protect wcid pktid linked list */
@@ -727,7 +751,7 @@ struct mt76_dev {
u32 wcid_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)];
u32 wcid_phy_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)];
- u32 vif_mask;
+ u64 vif_mask;
struct mt76_wcid global_wcid;
struct mt76_wcid __rcu *wcid[MT76_N_WCIDS];
@@ -942,14 +966,14 @@ int mt76_get_of_eeprom(struct mt76_dev *dev, void *data, int offset, int len);
struct mt76_queue *
mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
- int ring_base);
+ int ring_base, u32 flags);
u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx);
static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx,
- int n_desc, int ring_base)
+ int n_desc, int ring_base, u32 flags)
{
struct mt76_queue *q;
- q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base);
+ q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base, flags);
if (IS_ERR(q))
return PTR_ERR(q);
@@ -964,7 +988,7 @@ static inline int mt76_init_mcu_queue(struct mt76_dev *dev, int qid, int idx,
{
struct mt76_queue *q;
- q = mt76_init_queue(dev, qid, idx, n_desc, ring_base);
+ q = mt76_init_queue(dev, qid, idx, n_desc, ring_base, 0);
if (IS_ERR(q))
return PTR_ERR(q);
@@ -1321,8 +1345,15 @@ int mt76s_rd_rp(struct mt76_dev *dev, u32 base,
struct mt76_reg_pair *data, int len);
struct sk_buff *
+__mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
+ int data_len, gfp_t gfp);
+static inline struct sk_buff *
mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
- int data_len);
+ int data_len)
+{
+ return __mt76_mcu_msg_alloc(dev, data, data_len, GFP_KERNEL);
+}
+
void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb);
struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
unsigned long expires);
@@ -1380,8 +1411,7 @@ mt76_token_get(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
int token;
spin_lock_bh(&dev->token_lock);
- token = idr_alloc(&dev->token, *ptxwi, 0, dev->drv->token_size,
- GFP_ATOMIC);
+ token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC);
spin_unlock_bh(&dev->token_lock);
return token;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
index 5d4522f440b7..b5e8308e0cc7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
@@ -82,12 +82,12 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
__skb_queue_head_init(&data.q);
q = dev->mphy.q_tx[MT_TXQ_BEACON];
- spin_lock_bh(&q->lock);
+ spin_lock(&q->lock);
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
IEEE80211_IFACE_ITER_RESUME_ALL,
mt7603_update_beacon_iter, dev);
mt76_queue_kick(dev, q);
- spin_unlock_bh(&q->lock);
+ spin_unlock(&q->lock);
/* Flush all previous CAB queue packets */
mt76_wr(dev, MT_WF_ARB_CAB_FLUSH, GENMASK(30, 16) | BIT(0));
@@ -117,7 +117,7 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
mt76_skb_set_moredata(data.tail[i], false);
}
- spin_lock_bh(&q->lock);
+ spin_lock(&q->lock);
while ((skb = __skb_dequeue(&data.q)) != NULL) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
@@ -126,7 +126,7 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
mt76_tx_queue_skb(dev, q, skb, &mvif->sta.wcid, NULL);
}
mt76_queue_kick(dev, q);
- spin_unlock_bh(&q->lock);
+ spin_unlock(&q->lock);
for (i = 0; i < ARRAY_SIZE(data.count); i++)
mt76_wr(dev, MT_WF_ARB_CAB_COUNT_B0_REG(i),
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
index 37b092e3ea51..f9e5857850e7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
@@ -173,13 +173,13 @@ int mt7603_dma_init(struct mt7603_dev *dev)
for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
ret = mt76_init_tx_queue(&dev->mphy, i, wmm_queue_map[i],
- MT7603_TX_RING_SIZE, MT_TX_RING_BASE);
+ MT7603_TX_RING_SIZE, MT_TX_RING_BASE, 0);
if (ret)
return ret;
}
ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT_TX_HW_QUEUE_MGMT,
- MT7603_PSD_RING_SIZE, MT_TX_RING_BASE);
+ MT7603_PSD_RING_SIZE, MT_TX_RING_BASE, 0);
if (ret)
return ret;
@@ -189,12 +189,12 @@ int mt7603_dma_init(struct mt7603_dev *dev)
return ret;
ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_BEACON, MT_TX_HW_QUEUE_BCN,
- MT_MCU_RING_SIZE, MT_TX_RING_BASE);
+ MT_MCU_RING_SIZE, MT_TX_RING_BASE, 0);
if (ret)
return ret;
ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_CAB, MT_TX_HW_QUEUE_BMC,
- MT_MCU_RING_SIZE, MT_TX_RING_BASE);
+ MT_MCU_RING_SIZE, MT_TX_RING_BASE, 0);
if (ret)
return ret;
@@ -223,8 +223,8 @@ int mt7603_dma_init(struct mt7603_dev *dev)
if (ret)
return ret;
- netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
- mt7603_poll_tx, NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
+ mt7603_poll_tx);
napi_enable(&dev->mt76.tx_napi);
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index 83c5eec5b163..91425b454cae 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -44,7 +44,7 @@ mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mutex_lock(&dev->mt76.mutex);
- mvif->idx = ffs(~dev->mt76.vif_mask) - 1;
+ mvif->idx = __ffs64(~dev->mt76.vif_mask);
if (mvif->idx >= MT7603_MAX_INTERFACES) {
ret = -ENOSPC;
goto out;
@@ -65,7 +65,7 @@ mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
idx = MT7603_WTBL_RESERVED - 1 - mvif->idx;
- dev->mt76.vif_mask |= BIT(mvif->idx);
+ dev->mt76.vif_mask |= BIT_ULL(mvif->idx);
INIT_LIST_HEAD(&mvif->sta.poll_list);
mvif->sta.wcid.idx = idx;
mvif->sta.wcid.hw_key_idx = -1;
@@ -75,7 +75,7 @@ mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mt7603_wtbl_init(dev, idx, mvif->idx, bc_addr);
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
- mtxq->wcid = &mvif->sta.wcid;
+ mtxq->wcid = idx;
rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
out:
@@ -106,7 +106,7 @@ mt7603_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
spin_unlock_bh(&dev->sta_poll_lock);
mutex_lock(&dev->mt76.mutex);
- dev->mt76.vif_mask &= ~BIT(mvif->idx);
+ dev->mt76.vif_mask &= ~BIT_ULL(mvif->idx);
mutex_unlock(&dev->mt76.mutex);
mt76_packet_id_flush(&dev->mt76, &mvif->sta.wcid);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
index 00aefea1bf61..ce19f57de475 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
@@ -26,14 +26,14 @@ mt7622_init_tx_queues_multi(struct mt7615_dev *dev)
for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
ret = mt76_init_tx_queue(&dev->mphy, i, wmm_queue_map[i],
MT7615_TX_RING_SIZE / 2,
- MT_TX_RING_BASE);
+ MT_TX_RING_BASE, 0);
if (ret)
return ret;
}
ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT7622_TXQ_MGMT,
MT7615_TX_MGMT_RING_SIZE,
- MT_TX_RING_BASE);
+ MT_TX_RING_BASE, 0);
if (ret)
return ret;
@@ -55,7 +55,7 @@ mt7615_init_tx_queues(struct mt7615_dev *dev)
return mt7622_init_tx_queues_multi(dev);
ret = mt76_init_tx_queue(&dev->mphy, 0, 0, MT7615_TX_RING_SIZE,
- MT_TX_RING_BASE);
+ MT_TX_RING_BASE, 0);
if (ret)
return ret;
@@ -284,8 +284,8 @@ int mt7615_dma_init(struct mt7615_dev *dev)
if (ret < 0)
return ret;
- netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
- mt7615_poll_tx, NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
+ mt7615_poll_tx);
napi_enable(&dev->mt76.tx_napi);
mt76_poll(dev, MT_WPDMA_GLO_CFG,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index d79cbdbd5a05..a9c9b97d173e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -194,7 +194,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
is_zero_ether_addr(vif->addr))
phy->monitor_vif = vif;
- mvif->mt76.idx = ffs(~dev->mt76.vif_mask) - 1;
+ mvif->mt76.idx = __ffs64(~dev->mt76.vif_mask);
if (mvif->mt76.idx >= MT7615_MAX_INTERFACES) {
ret = -ENOSPC;
goto out;
@@ -212,7 +212,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
if (ext_phy)
mvif->mt76.wmm_idx += 2;
- dev->mt76.vif_mask |= BIT(mvif->mt76.idx);
+ dev->mt76.vif_mask |= BIT_ULL(mvif->mt76.idx);
dev->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
@@ -234,7 +234,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
if (vif->txq) {
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
- mtxq->wcid = &mvif->sta.wcid;
+ mtxq->wcid = idx;
}
ret = mt7615_mcu_add_dev_info(phy, vif, true);
@@ -268,7 +268,7 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
- dev->mt76.vif_mask &= ~BIT(mvif->mt76.idx);
+ dev->mt76.vif_mask &= ~BIT_ULL(mvif->mt76.idx);
dev->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx);
phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
index ce45c3bfc443..a208035e197a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
@@ -145,7 +145,7 @@ static void mt7615_irq_tasklet(struct tasklet_struct *t)
return;
dev->reset_state = mcu_int;
- ieee80211_queue_work(mt76_hw(dev), &dev->reset_work);
+ queue_work(dev->mt76.wq, &dev->reset_work);
wake_up(&dev->reset_wait);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index 51a9b5d60c7a..faa279bbbcb2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -2185,11 +2185,8 @@ int mt76_connac_mcu_update_arp_filter(struct mt76_dev *dev,
return -ENOMEM;
skb_put_data(skb, &req_hdr, sizeof(req_hdr));
- for (i = 0; i < len; i++) {
- u8 *addr = (u8 *)skb_put(skb, sizeof(__be32));
-
- memcpy(addr, &info->arp_addr_list[i], sizeof(__be32));
- }
+ for (i = 0; i < len; i++)
+ skb_put_data(skb, &info->arp_addr_list[i], sizeof(__be32));
return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD(OFFLOAD), true);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
index c3c93338d56a..561fb0368708 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
@@ -974,7 +974,6 @@ enum {
MCU_EXT_CMD_SET_RDD_PATTERN = 0x7d,
MCU_EXT_CMD_MWDS_SUPPORT = 0x80,
MCU_EXT_CMD_SET_SER_TRIGGER = 0x81,
- MCU_EXT_CMD_SCS_CTRL = 0x82,
MCU_EXT_CMD_TWT_AGRT_UPDATE = 0x94,
MCU_EXT_CMD_FW_DBG_CTRL = 0x95,
MCU_EXT_CMD_OFFCH_SCAN_CTRL = 0x9a,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index 8bcd8afa0d3a..96ec96df6a3c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -191,13 +191,13 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
ret = mt76_init_tx_queue(&dev->mphy, i, mt76_ac_to_hwq(i),
MT76x02_TX_RING_SIZE,
- MT_TX_RING_BASE);
+ MT_TX_RING_BASE, 0);
if (ret)
return ret;
}
ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT_TX_HW_QUEUE_MGMT,
- MT76x02_PSD_RING_SIZE, MT_TX_RING_BASE);
+ MT76x02_PSD_RING_SIZE, MT_TX_RING_BASE, 0);
if (ret)
return ret;
@@ -230,8 +230,8 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
if (ret)
return ret;
- netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
- mt76x02_poll_tx, NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
+ mt76x02_poll_tx);
napi_enable(&dev->mt76.tx_napi);
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index dd30f537676d..5bd0a0bae688 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -292,7 +292,8 @@ mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
mt76_packet_id_init(&mvif->group_wcid);
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
- mtxq->wcid = &mvif->group_wcid;
+ rcu_assign_pointer(dev->mt76.wcid[MT_VIF_WCID(idx)], &mvif->group_wcid);
+ mtxq->wcid = MT_VIF_WCID(idx);
}
int
@@ -327,11 +328,11 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
idx += 8;
/* vif is already set or idx is 8 for AP/Mesh/... */
- if (dev->mt76.vif_mask & BIT(idx) ||
+ if (dev->mt76.vif_mask & BIT_ULL(idx) ||
(vif->type != NL80211_IFTYPE_STATION && idx > 7))
return -EBUSY;
- dev->mt76.vif_mask |= BIT(idx);
+ dev->mt76.vif_mask |= BIT_ULL(idx);
mt76x02_vif_init(dev, vif, idx);
return 0;
@@ -344,7 +345,8 @@ void mt76x02_remove_interface(struct ieee80211_hw *hw,
struct mt76x02_dev *dev = hw->priv;
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
- dev->mt76.vif_mask &= ~BIT(mvif->idx);
+ dev->mt76.vif_mask &= ~BIT_ULL(mvif->idx);
+ rcu_assign_pointer(dev->mt76.wcid[mvif->group_wcid.idx], NULL);
mt76_packet_id_flush(&dev->mt76, &mvif->group_wcid);
}
EXPORT_SYMBOL_GPL(mt76x02_remove_interface);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
index e9cab1165f38..cab6e02e1f8c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
@@ -44,35 +44,113 @@ mt7915_implicit_txbf_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_implicit_txbf, mt7915_implicit_txbf_get,
mt7915_implicit_txbf_set, "%lld\n");
-/* test knob of system layer 1/2 error recovery */
-static int mt7915_ser_trigger_set(void *data, u64 val)
+/* test knob of system error recovery */
+static ssize_t
+mt7915_fw_ser_set(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
{
- enum {
- SER_SET_RECOVER_L1 = 1,
- SER_SET_RECOVER_L2,
- SER_ENABLE = 2,
- SER_RECOVER
- };
- struct mt7915_dev *dev = data;
+ struct mt7915_phy *phy = file->private_data;
+ struct mt7915_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ char buf[16];
int ret = 0;
+ u16 val;
+
+ if (count >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ if (count && buf[count - 1] == '\n')
+ buf[count - 1] = '\0';
+ else
+ buf[count] = '\0';
+
+ if (kstrtou16(buf, 0, &val))
+ return -EINVAL;
switch (val) {
+ case SER_QUERY:
+ /* grab firmware SER stats */
+ ret = mt7915_mcu_set_ser(dev, 0, 0, ext_phy);
+ break;
case SER_SET_RECOVER_L1:
case SER_SET_RECOVER_L2:
- ret = mt7915_mcu_set_ser(dev, SER_ENABLE, BIT(val), 0);
+ case SER_SET_RECOVER_L3_RX_ABORT:
+ case SER_SET_RECOVER_L3_TX_ABORT:
+ case SER_SET_RECOVER_L3_TX_DISABLE:
+ case SER_SET_RECOVER_L3_BF:
+ ret = mt7915_mcu_set_ser(dev, SER_ENABLE, BIT(val), ext_phy);
if (ret)
return ret;
- return mt7915_mcu_set_ser(dev, SER_RECOVER, val, 0);
+ ret = mt7915_mcu_set_ser(dev, SER_RECOVER, val, ext_phy);
+ break;
default:
break;
}
+ return ret ? ret : count;
+}
+
+static ssize_t
+mt7915_fw_ser_get(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct mt7915_phy *phy = file->private_data;
+ struct mt7915_dev *dev = phy->dev;
+ char *buff;
+ int desc = 0;
+ ssize_t ret;
+ static const size_t bufsz = 400;
+
+ buff = kmalloc(bufsz, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "::E R , SER_STATUS = 0x%08x\n",
+ mt76_rr(dev, MT_SWDEF_SER_STATS));
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "::E R , SER_PLE_ERR = 0x%08x\n",
+ mt76_rr(dev, MT_SWDEF_PLE_STATS));
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "::E R , SER_PLE_ERR_1 = 0x%08x\n",
+ mt76_rr(dev, MT_SWDEF_PLE1_STATS));
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "::E R , SER_PLE_ERR_AMSDU = 0x%08x\n",
+ mt76_rr(dev, MT_SWDEF_PLE_AMSDU_STATS));
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "::E R , SER_PSE_ERR = 0x%08x\n",
+ mt76_rr(dev, MT_SWDEF_PSE_STATS));
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "::E R , SER_PSE_ERR_1 = 0x%08x\n",
+ mt76_rr(dev, MT_SWDEF_PSE1_STATS));
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "::E R , SER_LMAC_WISR6_B0 = 0x%08x\n",
+ mt76_rr(dev, MT_SWDEF_LAMC_WISR6_BN0_STATS));
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "::E R , SER_LMAC_WISR6_B1 = 0x%08x\n",
+ mt76_rr(dev, MT_SWDEF_LAMC_WISR6_BN1_STATS));
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "::E R , SER_LMAC_WISR7_B0 = 0x%08x\n",
+ mt76_rr(dev, MT_SWDEF_LAMC_WISR7_BN0_STATS));
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "::E R , SER_LMAC_WISR7_B1 = 0x%08x\n",
+ mt76_rr(dev, MT_SWDEF_LAMC_WISR7_BN1_STATS));
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+ kfree(buff);
return ret;
}
-DEFINE_DEBUGFS_ATTRIBUTE(fops_ser_trigger, NULL,
- mt7915_ser_trigger_set, "%lld\n");
+static const struct file_operations mt7915_fw_ser_ops = {
+ .write = mt7915_fw_ser_set,
+ .read = mt7915_fw_ser_get,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
static int
mt7915_radar_trigger(void *data, u64 val)
@@ -95,7 +173,7 @@ mt7915_muru_debug_set(void *data, u64 val)
struct mt7915_dev *dev = data;
dev->muru_debug = val;
- mt7915_mcu_muru_debug_set(dev, data);
+ mt7915_mcu_muru_debug_set(dev, dev->muru_debug);
return 0;
}
@@ -369,20 +447,20 @@ mt7915_fw_debug_wm_set(void *data, u64 val)
bool tx, rx, en;
int ret;
- dev->fw_debug_wm = val ? MCU_FW_LOG_TO_HOST : 0;
+ dev->fw.debug_wm = val ? MCU_FW_LOG_TO_HOST : 0;
- if (dev->fw_debug_bin)
+ if (dev->fw.debug_bin)
val = 16;
else
- val = dev->fw_debug_wm;
+ val = dev->fw.debug_wm;
- tx = dev->fw_debug_wm || (dev->fw_debug_bin & BIT(1));
- rx = dev->fw_debug_wm || (dev->fw_debug_bin & BIT(2));
- en = dev->fw_debug_wm || (dev->fw_debug_bin & BIT(0));
+ tx = dev->fw.debug_wm || (dev->fw.debug_bin & BIT(1));
+ rx = dev->fw.debug_wm || (dev->fw.debug_bin & BIT(2));
+ en = dev->fw.debug_wm || (dev->fw.debug_bin & BIT(0));
ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WM, val);
if (ret)
- return ret;
+ goto out;
for (debug = DEBUG_TXCMD; debug <= DEBUG_RPT_RX; debug++) {
if (debug == DEBUG_RPT_RX)
@@ -392,16 +470,20 @@ mt7915_fw_debug_wm_set(void *data, u64 val)
ret = mt7915_mcu_fw_dbg_ctrl(dev, debug, val);
if (ret)
- return ret;
+ goto out;
}
/* WM CPU info record control */
mt76_clear(dev, MT_CPU_UTIL_CTRL, BIT(0));
- mt76_wr(dev, MT_DIC_CMD_REG_CMD, BIT(2) | BIT(13) | !dev->fw_debug_wm);
+ mt76_wr(dev, MT_DIC_CMD_REG_CMD, BIT(2) | BIT(13) | !dev->fw.debug_wm);
mt76_wr(dev, MT_MCU_WM_CIRQ_IRQ_MASK_CLR_ADDR, BIT(5));
mt76_wr(dev, MT_MCU_WM_CIRQ_IRQ_SOFT_ADDR, BIT(5));
- return 0;
+out:
+ if (ret)
+ dev->fw.debug_wm = 0;
+
+ return ret;
}
static int
@@ -409,7 +491,7 @@ mt7915_fw_debug_wm_get(void *data, u64 *val)
{
struct mt7915_dev *dev = data;
- *val = dev->fw_debug_wm;
+ *val = dev->fw.debug_wm;
return 0;
}
@@ -423,14 +505,19 @@ mt7915_fw_debug_wa_set(void *data, u64 val)
struct mt7915_dev *dev = data;
int ret;
- dev->fw_debug_wa = val ? MCU_FW_LOG_TO_HOST : 0;
+ dev->fw.debug_wa = val ? MCU_FW_LOG_TO_HOST : 0;
- ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WA, dev->fw_debug_wa);
+ ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WA, dev->fw.debug_wa);
if (ret)
- return ret;
+ goto out;
- return mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(SET), MCU_WA_PARAM_PDMA_RX,
- !!dev->fw_debug_wa, 0);
+ ret = mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(SET),
+ MCU_WA_PARAM_PDMA_RX, !!dev->fw.debug_wa, 0);
+out:
+ if (ret)
+ dev->fw.debug_wa = 0;
+
+ return ret;
}
static int
@@ -438,7 +525,7 @@ mt7915_fw_debug_wa_get(void *data, u64 *val)
{
struct mt7915_dev *dev = data;
- *val = dev->fw_debug_wa;
+ *val = dev->fw.debug_wa;
return 0;
}
@@ -485,11 +572,11 @@ mt7915_fw_debug_bin_set(void *data, u64 val)
if (!dev->relay_fwlog)
return -ENOMEM;
- dev->fw_debug_bin = val;
+ dev->fw.debug_bin = val;
relay_reset(dev->relay_fwlog);
- return mt7915_fw_debug_wm_set(dev, dev->fw_debug_wm);
+ return mt7915_fw_debug_wm_set(dev, dev->fw.debug_wm);
}
static int
@@ -497,7 +584,7 @@ mt7915_fw_debug_bin_get(void *data, u64 *val)
{
struct mt7915_dev *dev = data;
- *val = dev->fw_debug_bin;
+ *val = dev->fw.debug_bin;
return 0;
}
@@ -510,7 +597,13 @@ mt7915_fw_util_wm_show(struct seq_file *file, void *data)
{
struct mt7915_dev *dev = file->private;
- if (dev->fw_debug_wm) {
+ seq_printf(file, "Program counter: 0x%x\n", mt76_rr(dev, MT_WM_MCU_PC));
+ seq_printf(file, "Exception state: 0x%x\n",
+ is_mt7915(&dev->mt76) ?
+ (u32)mt76_get_field(dev, MT_FW_EXCEPTION, GENMASK(15, 8)) :
+ (u32)mt76_get_field(dev, MT_FW_EXCEPTION, GENMASK(7, 0)));
+
+ if (dev->fw.debug_wm) {
seq_printf(file, "Busy: %u%% Peak busy: %u%%\n",
mt76_rr(dev, MT_CPU_UTIL_BUSY_PCT),
mt76_rr(dev, MT_CPU_UTIL_PEAK_BUSY_PCT));
@@ -529,7 +622,9 @@ mt7915_fw_util_wa_show(struct seq_file *file, void *data)
{
struct mt7915_dev *dev = file->private;
- if (dev->fw_debug_wa)
+ seq_printf(file, "Program counter: 0x%x\n", mt76_rr(dev, MT_WA_MCU_PC));
+
+ if (dev->fw.debug_wa)
return mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(QUERY),
MCU_WA_PARAM_CPU_UTIL, 0, 0);
@@ -867,6 +962,36 @@ mt7915_twt_stats(struct seq_file *s, void *data)
return 0;
}
+/* The index of RF registers use the generic regidx, combined with two parts:
+ * WF selection [31:28] and offset [27:0].
+ */
+static int
+mt7915_rf_regval_get(void *data, u64 *val)
+{
+ struct mt7915_dev *dev = data;
+ u32 regval;
+ int ret;
+
+ ret = mt7915_mcu_rf_regval(dev, dev->mt76.debugfs_reg, &regval, false);
+ if (ret)
+ return ret;
+
+ *val = le32_to_cpu(regval);
+
+ return 0;
+}
+
+static int
+mt7915_rf_regval_set(void *data, u64 val)
+{
+ struct mt7915_dev *dev = data;
+
+ return mt7915_mcu_rf_regval(dev, dev->mt76.debugfs_reg, (u32 *)&val, true);
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_rf_regval, mt7915_rf_regval_get,
+ mt7915_rf_regval_set, "0x%08llx\n");
+
int mt7915_init_debugfs(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
@@ -884,6 +1009,7 @@ int mt7915_init_debugfs(struct mt7915_phy *phy)
debugfs_create_file("xmit-queues", 0400, dir, phy,
&mt7915_xmit_queues_fops);
debugfs_create_file("tx_stats", 0400, dir, phy, &mt7915_tx_stats_fops);
+ debugfs_create_file("fw_ser", 0600, dir, phy, &mt7915_fw_ser_ops);
debugfs_create_file("fw_debug_wm", 0600, dir, dev, &fops_fw_debug_wm);
debugfs_create_file("fw_debug_wa", 0600, dir, dev, &fops_fw_debug_wa);
debugfs_create_file("fw_debug_bin", 0600, dir, dev, &fops_fw_debug_bin);
@@ -897,7 +1023,8 @@ int mt7915_init_debugfs(struct mt7915_phy *phy)
&mt7915_rate_txpower_fops);
debugfs_create_devm_seqfile(dev->mt76.dev, "twt_stats", dir,
mt7915_twt_stats);
- debugfs_create_file("ser_trigger", 0200, dir, dev, &fops_ser_trigger);
+ debugfs_create_file("rf_regval", 0600, dir, dev, &fops_rf_regval);
+
if (!dev->dbdc_support || phy->band_idx) {
debugfs_create_u32("dfs_hw_pattern", 0400, dir,
&dev->hw_pattern);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
index 49b4d8ade16b..f3d608d2d3b2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
@@ -5,11 +5,19 @@
#include "../dma.h"
#include "mac.h"
-int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base)
+static int
+mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base)
{
+ struct mt7915_dev *dev = phy->dev;
int i, err;
- err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, ring_base);
+ if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) {
+ ring_base = MT_WED_TX_RING_BASE;
+ idx -= MT_TXQ_ID(0);
+ }
+
+ err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, ring_base,
+ MT_WED_Q_TX(idx));
if (err < 0)
return err;
@@ -318,14 +326,23 @@ static int mt7915_dma_enable(struct mt7915_dev *dev)
if (dev->dbdc_support || dev->phy.band_idx)
irq_mask |= MT_INT_BAND1_RX_DONE;
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
+ u32 wed_irq_mask = irq_mask;
+
+ wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1;
+ mt76_wr(dev, MT_INT_WED_MASK_CSR, wed_irq_mask);
+ mtk_wed_device_start(&dev->mt76.mmio.wed, wed_irq_mask);
+ }
+
mt7915_irq_enable(dev, irq_mask);
return 0;
}
-int mt7915_dma_init(struct mt7915_dev *dev)
+int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
{
struct mt76_dev *mdev = &dev->mt76;
+ u32 wa_rx_base, wa_rx_idx;
u32 hif1_ofs = 0;
int ret;
@@ -338,6 +355,17 @@ int mt7915_dma_init(struct mt7915_dev *dev)
mt7915_dma_disable(dev, true);
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
+ mt76_set(dev, MT_WFDMA_HOST_CONFIG, MT_WFDMA_HOST_CONFIG_WED);
+
+ mt76_wr(dev, MT_WFDMA_WED_RING_CONTROL,
+ FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX0, 18) |
+ FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX1, 19) |
+ FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_RX1, 1));
+ } else {
+ mt76_clear(dev, MT_WFDMA_HOST_CONFIG, MT_WFDMA_HOST_CONFIG_WED);
+ }
+
/* init tx queue */
ret = mt7915_init_tx_queues(&dev->phy,
MT_TXQ_ID(dev->phy.band_idx),
@@ -346,6 +374,15 @@ int mt7915_dma_init(struct mt7915_dev *dev)
if (ret)
return ret;
+ if (phy2) {
+ ret = mt7915_init_tx_queues(phy2,
+ MT_TXQ_ID(phy2->band_idx),
+ MT7915_TX_RING_SIZE,
+ MT_TXQ_RING_BASE(1));
+ if (ret)
+ return ret;
+ }
+
/* command to WM */
ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM,
MT_MCUQ_ID(MT_MCUQ_WM),
@@ -380,11 +417,17 @@ int mt7915_dma_init(struct mt7915_dev *dev)
return ret;
/* event from WA */
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
+ wa_rx_base = MT_WED_RX_RING_BASE;
+ wa_rx_idx = MT7915_RXQ_MCU_WA;
+ dev->mt76.q_rx[MT_RXQ_MCU_WA].flags = MT_WED_Q_TXFREE;
+ } else {
+ wa_rx_base = MT_RXQ_RING_BASE(MT_RXQ_MCU_WA);
+ wa_rx_idx = MT_RXQ_ID(MT_RXQ_MCU_WA);
+ }
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
- MT_RXQ_ID(MT_RXQ_MCU_WA),
- MT7915_RX_MCU_RING_SIZE,
- MT_RX_BUF_SIZE,
- MT_RXQ_RING_BASE(MT_RXQ_MCU_WA));
+ wa_rx_idx, MT7915_RX_MCU_RING_SIZE,
+ MT_RX_BUF_SIZE, wa_rx_base);
if (ret)
return ret;
@@ -434,8 +477,8 @@ int mt7915_dma_init(struct mt7915_dev *dev)
if (ret < 0)
return ret;
- netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
- mt7915_poll_tx, NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
+ mt7915_poll_tx);
napi_enable(&dev->mt76.tx_napi);
mt7915_dma_enable(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
index 5b133bcdab17..4b1a9811646f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
@@ -152,6 +152,8 @@ static void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
phy->mt76->cap.has_2ghz = true;
return;
}
+ } else if (val == MT_EE_BAND_SEL_DEFAULT && dev->dbdc_support) {
+ val = phy->band_idx ? MT_EE_BAND_SEL_5GHZ : MT_EE_BAND_SEL_2GHZ;
}
switch (val) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index 6d29366c5139..01169853355e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -351,6 +351,8 @@ mt7915_init_wiphy(struct ieee80211_hw *hw)
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HT);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_VHT);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HE);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_FILS_DISCOVERY);
if (!mdev->dev->of_node ||
!of_property_read_bool(mdev->dev->of_node,
@@ -450,6 +452,9 @@ static void mt7915_mac_init(struct mt7915_dev *dev)
mt76_rmw_field(dev, MT_MDP_DCR1, MT_MDP_DCR1_MAX_RX_LEN, rx_len);
+ if (!is_mt7915(&dev->mt76))
+ mt76_clear(dev, MT_MDP_DCR2, MT_MDP_DCR2_RX_TRANS_SHORT);
+
/* enable hardware de-agg */
mt76_set(dev, MT_MDP_DCR0, MT_MDP_DCR0_DAMSDU_EN);
@@ -484,21 +489,18 @@ static int mt7915_txbf_init(struct mt7915_dev *dev)
return mt7915_mcu_set_txbf(dev, MT_BF_TYPE_UPDATE);
}
-static int mt7915_register_ext_phy(struct mt7915_dev *dev)
+static struct mt7915_phy *
+mt7915_alloc_ext_phy(struct mt7915_dev *dev)
{
- struct mt7915_phy *phy = mt7915_ext_phy(dev);
+ struct mt7915_phy *phy;
struct mt76_phy *mphy;
- int ret;
if (!dev->dbdc_support)
- return 0;
-
- if (phy)
- return 0;
+ return NULL;
mphy = mt76_alloc_phy(&dev->mt76, sizeof(*phy), &mt7915_ops);
if (!mphy)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
phy = mphy->priv;
phy->dev = dev;
@@ -507,6 +509,15 @@ static int mt7915_register_ext_phy(struct mt7915_dev *dev)
/* Bind main phy to band0 and ext_phy to band1 for dbdc case */
phy->band_idx = 1;
+ return phy;
+}
+
+static int
+mt7915_register_ext_phy(struct mt7915_dev *dev, struct mt7915_phy *phy)
+{
+ struct mt76_phy *mphy = phy->mt76;
+ int ret;
+
INIT_DELAYED_WORK(&mphy->mac_work, mt7915_mac_work);
mt7915_eeprom_parse_hw_cap(dev, phy);
@@ -526,29 +537,22 @@ static int mt7915_register_ext_phy(struct mt7915_dev *dev)
/* init wiphy according to mphy and phy */
mt7915_init_wiphy(mphy->hw);
- ret = mt7915_init_tx_queues(phy, MT_TXQ_ID(phy->band_idx),
- MT7915_TX_RING_SIZE,
- MT_TXQ_RING_BASE(1));
- if (ret)
- goto error;
ret = mt76_register_phy(mphy, true, mt76_rates,
ARRAY_SIZE(mt76_rates));
if (ret)
- goto error;
+ return ret;
ret = mt7915_thermal_init(phy);
if (ret)
- goto error;
+ goto unreg;
- ret = mt7915_init_debugfs(phy);
- if (ret)
- goto error;
+ mt7915_init_debugfs(phy);
return 0;
-error:
- ieee80211_free_hw(mphy->hw);
+unreg:
+ mt76_unregister_phy(mphy);
return ret;
}
@@ -565,7 +569,7 @@ static void mt7915_init_work(struct work_struct *work)
mt7915_txbf_init(dev);
}
-static void mt7915_wfsys_reset(struct mt7915_dev *dev)
+void mt7915_wfsys_reset(struct mt7915_dev *dev)
{
#define MT_MCU_DUMMY_RANDOM GENMASK(15, 0)
#define MT_MCU_DUMMY_DEFAULT GENMASK(31, 16)
@@ -645,36 +649,25 @@ static bool mt7915_band_config(struct mt7915_dev *dev)
return ret;
}
-static int mt7915_init_hardware(struct mt7915_dev *dev)
+static int
+mt7915_init_hardware(struct mt7915_dev *dev, struct mt7915_phy *phy2)
{
int ret, idx;
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
INIT_WORK(&dev->init_work, mt7915_init_work);
- dev->dbdc_support = mt7915_band_config(dev);
-
- /* If MCU was already running, it is likely in a bad state */
- if (mt76_get_field(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE) >
- FW_STATE_FW_DOWNLOAD)
- mt7915_wfsys_reset(dev);
-
- ret = mt7915_dma_init(dev);
+ ret = mt7915_dma_init(dev, phy2);
if (ret)
return ret;
set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
ret = mt7915_mcu_init(dev);
- if (ret) {
- /* Reset and try again */
- mt7915_wfsys_reset(dev);
-
- ret = mt7915_mcu_init(dev);
- if (ret)
- return ret;
- }
+ if (ret)
+ return ret;
ret = mt7915_eeprom_init(dev);
if (ret < 0)
@@ -814,7 +807,7 @@ static void
mt7915_gen_ppe_thresh(u8 *he_ppet, int nss)
{
u8 i, ppet_bits, ppet_size, ru_bit_mask = 0x7; /* HE80 */
- u8 ppet16_ppet8_ru3_ru0[] = {0x1c, 0xc7, 0x71};
+ static const u8 ppet16_ppet8_ru3_ru0[] = {0x1c, 0xc7, 0x71};
he_ppet[0] = FIELD_PREP(IEEE80211_PPE_THRES_NSS_MASK, nss - 1) |
FIELD_PREP(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK,
@@ -1048,9 +1041,22 @@ static void mt7915_unregister_ext_phy(struct mt7915_dev *dev)
ieee80211_free_hw(mphy->hw);
}
+static void mt7915_stop_hardware(struct mt7915_dev *dev)
+{
+ mt7915_mcu_exit(dev);
+ mt7915_tx_token_put(dev);
+ mt7915_dma_cleanup(dev);
+ tasklet_disable(&dev->irq_tasklet);
+
+ if (is_mt7986(&dev->mt76))
+ mt7986_wmac_disable(dev);
+}
+
+
int mt7915_register_device(struct mt7915_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
+ struct mt7915_phy *phy2;
int ret;
dev->phy.dev = dev;
@@ -1066,9 +1072,15 @@ int mt7915_register_device(struct mt7915_dev *dev)
init_waitqueue_head(&dev->reset_wait);
INIT_WORK(&dev->reset_work, mt7915_mac_reset_work);
- ret = mt7915_init_hardware(dev);
+ dev->dbdc_support = mt7915_band_config(dev);
+
+ phy2 = mt7915_alloc_ext_phy(dev);
+ if (IS_ERR(phy2))
+ return PTR_ERR(phy2);
+
+ ret = mt7915_init_hardware(dev, phy2);
if (ret)
- return ret;
+ goto free_phy2;
mt7915_init_wiphy(hw);
@@ -1085,19 +1097,34 @@ int mt7915_register_device(struct mt7915_dev *dev)
ret = mt76_register_device(&dev->mt76, true, mt76_rates,
ARRAY_SIZE(mt76_rates));
if (ret)
- return ret;
+ goto stop_hw;
ret = mt7915_thermal_init(&dev->phy);
if (ret)
- return ret;
+ goto unreg_dev;
ieee80211_queue_work(mt76_hw(dev), &dev->init_work);
- ret = mt7915_register_ext_phy(dev);
- if (ret)
- return ret;
+ if (phy2) {
+ ret = mt7915_register_ext_phy(dev, phy2);
+ if (ret)
+ goto unreg_thermal;
+ }
+
+ mt7915_init_debugfs(&dev->phy);
+
+ return 0;
- return mt7915_init_debugfs(&dev->phy);
+unreg_thermal:
+ mt7915_unregister_thermal(&dev->phy);
+unreg_dev:
+ mt76_unregister_device(&dev->mt76);
+stop_hw:
+ mt7915_stop_hardware(dev);
+free_phy2:
+ if (phy2)
+ ieee80211_free_hw(phy2->mt76->hw);
+ return ret;
}
void mt7915_unregister_device(struct mt7915_dev *dev)
@@ -1105,13 +1132,7 @@ void mt7915_unregister_device(struct mt7915_dev *dev)
mt7915_unregister_ext_phy(dev);
mt7915_unregister_thermal(&dev->phy);
mt76_unregister_device(&dev->mt76);
- mt7915_mcu_exit(dev);
- mt7915_tx_token_put(dev);
- mt7915_dma_cleanup(dev);
- tasklet_disable(&dev->irq_tasklet);
-
- if (is_mt7986(&dev->mt76))
- mt7986_wmac_disable(dev);
+ mt7915_stop_hardware(dev);
mt76_free_device(&dev->mt76);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index bab70cf981bb..086244d9be76 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -309,7 +309,7 @@ mt7915_mac_decode_he_mu_radiotap(struct sk_buff *skb, __le32 *rxv)
}
static void
-mt7915_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u32 mode)
+mt7915_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u8 mode)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
static const struct ieee80211_radiotap_he known = {
@@ -474,10 +474,10 @@ static int
mt7915_mac_fill_rx_rate(struct mt7915_dev *dev,
struct mt76_rx_status *status,
struct ieee80211_supported_band *sband,
- __le32 *rxv)
+ __le32 *rxv, u8 *mode)
{
u32 v0, v2;
- u8 stbc, gi, bw, dcm, mode, nss;
+ u8 stbc, gi, bw, dcm, nss;
int i, idx;
bool cck = false;
@@ -490,18 +490,18 @@ mt7915_mac_fill_rx_rate(struct mt7915_dev *dev,
if (!is_mt7915(&dev->mt76)) {
stbc = FIELD_GET(MT_PRXV_HT_STBC, v0);
gi = FIELD_GET(MT_PRXV_HT_SHORT_GI, v0);
- mode = FIELD_GET(MT_PRXV_TX_MODE, v0);
+ *mode = FIELD_GET(MT_PRXV_TX_MODE, v0);
dcm = FIELD_GET(MT_PRXV_DCM, v0);
bw = FIELD_GET(MT_PRXV_FRAME_MODE, v0);
} else {
stbc = FIELD_GET(MT_CRXV_HT_STBC, v2);
gi = FIELD_GET(MT_CRXV_HT_SHORT_GI, v2);
- mode = FIELD_GET(MT_CRXV_TX_MODE, v2);
+ *mode = FIELD_GET(MT_CRXV_TX_MODE, v2);
dcm = !!(idx & GENMASK(3, 0) & MT_PRXV_TX_DCM);
bw = FIELD_GET(MT_CRXV_FRAME_MODE, v2);
}
- switch (mode) {
+ switch (*mode) {
case MT_PHY_TYPE_CCK:
cck = true;
fallthrough;
@@ -521,7 +521,7 @@ mt7915_mac_fill_rx_rate(struct mt7915_dev *dev,
status->encoding = RX_ENC_VHT;
if (gi)
status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
- if (i > 9)
+ if (i > 11)
return -EINVAL;
break;
case MT_PHY_TYPE_HE_MU:
@@ -546,7 +546,7 @@ mt7915_mac_fill_rx_rate(struct mt7915_dev *dev,
case IEEE80211_STA_RX_BW_20:
break;
case IEEE80211_STA_RX_BW_40:
- if (mode & MT_PHY_TYPE_HE_EXT_SU &&
+ if (*mode & MT_PHY_TYPE_HE_EXT_SU &&
(idx & MT_PRXV_TX_ER_SU_106T)) {
status->bw = RATE_INFO_BW_HE_RU;
status->he_ru =
@@ -566,7 +566,7 @@ mt7915_mac_fill_rx_rate(struct mt7915_dev *dev,
}
status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
- if (mode < MT_PHY_TYPE_HE_SU && gi)
+ if (*mode < MT_PHY_TYPE_HE_SU && gi)
status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
return 0;
@@ -581,7 +581,6 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
struct ieee80211_supported_band *sband;
__le32 *rxd = (__le32 *)skb->data;
__le32 *rxv = NULL;
- u32 mode = 0;
u32 rxd0 = le32_to_cpu(rxd[0]);
u32 rxd1 = le32_to_cpu(rxd[1]);
u32 rxd2 = le32_to_cpu(rxd[2]);
@@ -590,10 +589,10 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
bool unicast, insert_ccmp_hdr = false;
u8 remove_pad, amsdu_info;
+ u8 mode = 0, qos_ctl = 0;
bool hdr_trans;
u16 hdr_gap;
u16 seq_ctrl = 0;
- u8 qos_ctl = 0;
__le16 fc = 0;
int idx;
@@ -766,7 +765,8 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
}
if (!is_mt7915(&dev->mt76) || (rxd1 & MT_RXD1_NORMAL_GROUP_5)) {
- ret = mt7915_mac_fill_rx_rate(dev, status, sband, rxv);
+ ret = mt7915_mac_fill_rx_rate(dev, status, sband, rxv,
+ &mode);
if (ret < 0)
return ret;
}
@@ -837,10 +837,6 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
if (!status->wcid || !ieee80211_is_data_qos(fc))
return 0;
- /* drop no data frame */
- if (fc & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))
- return -EINVAL;
-
status->aggr = unicast &&
!ieee80211_is_qos_nullfunc(fc);
status->qos_ctl = qos_ctl;
@@ -864,8 +860,11 @@ mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb)
int i;
band_idx = le32_get_bits(rxv_hdr[1], MT_RXV_HDR_BAND_IDX);
- if (band_idx && !phy->band_idx)
+ if (band_idx && !phy->band_idx) {
phy = mt7915_ext_phy(dev);
+ if (!phy)
+ goto out;
+ }
rcpi = le32_to_cpu(rxv[6]);
ib_rssi = le32_to_cpu(rxv[7]);
@@ -890,8 +889,8 @@ mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb)
phy->test.last_freq_offset = foe;
phy->test.last_snr = snr;
+out:
#endif
-
dev_kfree_skb(skb);
}
@@ -1017,6 +1016,7 @@ mt7915_mac_write_txwi_8023(struct mt7915_dev *dev, __le32 *txwi,
u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
u8 fc_type, fc_stype;
+ u16 ethertype;
bool wmm = false;
u32 val;
@@ -1030,7 +1030,8 @@ mt7915_mac_write_txwi_8023(struct mt7915_dev *dev, __le32 *txwi,
val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
FIELD_PREP(MT_TXD1_TID, tid);
- if (be16_to_cpu(skb->protocol) >= ETH_P_802_3_MIN)
+ ethertype = get_unaligned_be16(&skb->data[12]);
+ if (ethertype >= ETH_P_802_3_MIN)
val |= MT_TXD1_ETH_802_3;
txwi[1] |= cpu_to_le32(val);
@@ -1176,7 +1177,7 @@ out:
void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid, int pid,
- struct ieee80211_key_conf *key, bool beacon)
+ struct ieee80211_key_conf *key, u32 changed)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
@@ -1187,6 +1188,10 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
bool mcast = false;
u16 tx_count = 15;
u32 val;
+ bool beacon = !!(changed & (BSS_CHANGED_BEACON |
+ BSS_CHANGED_BEACON_ENABLED));
+ bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
+ BSS_CHANGED_FILS_DISCOVERY));
if (vif) {
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
@@ -1199,7 +1204,10 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
if (ext_phy && dev->mt76.phy2)
mphy = dev->mt76.phy2;
- if (beacon) {
+ if (inband_disc) {
+ p_fmt = MT_TX_TYPE_FW;
+ q_idx = MT_LMAC_ALTX0;
+ } else if (beacon) {
p_fmt = MT_TX_TYPE_FW;
q_idx = MT_LMAC_BCN0;
} else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) {
@@ -1307,8 +1315,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
return id;
pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
- mt7915_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, pid, key,
- false);
+ mt7915_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, pid, key, 0);
txp = (struct mt7915_txp *)(txwi + MT_TXD_SIZE);
for (i = 0; i < nbuf; i++) {
@@ -1347,6 +1354,29 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
return 0;
}
+u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id)
+{
+ struct mt7915_txp *txp = ptr + MT_TXD_SIZE;
+ __le32 *txwi = ptr;
+ u32 val;
+
+ memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp));
+
+ val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) |
+ FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT);
+ txwi[0] = cpu_to_le32(val);
+
+ val = MT_TXD1_LONG_FORMAT |
+ FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3);
+ txwi[1] = cpu_to_le32(val);
+
+ txp->token = cpu_to_le16(token_id);
+ txp->nbuf = 1;
+ txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp));
+
+ return MT_TXD_SIZE + sizeof(*txp);
+}
+
static void
mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
{
@@ -1380,7 +1410,7 @@ mt7915_txp_skb_unmap(struct mt76_dev *dev, struct mt76_txwi_cache *t)
txp = mt7915_txwi_to_txp(dev, t);
for (i = 0; i < txp->nbuf; i++)
- dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
+ dma_unmap_single(dev->dma_dev, le32_to_cpu(txp->buf[i]),
le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
}
@@ -1389,6 +1419,7 @@ mt7915_txwi_free(struct mt7915_dev *dev, struct mt76_txwi_cache *t,
struct ieee80211_sta *sta, struct list_head *free_list)
{
struct mt76_dev *mdev = &dev->mt76;
+ struct mt7915_sta *msta;
struct mt76_wcid *wcid;
__le32 *txwi;
u16 wcid_idx;
@@ -1401,13 +1432,24 @@ mt7915_txwi_free(struct mt7915_dev *dev, struct mt76_txwi_cache *t,
if (sta) {
wcid = (struct mt76_wcid *)sta->drv_priv;
wcid_idx = wcid->idx;
-
- if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
- mt7915_tx_check_aggr(sta, txwi);
} else {
wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
+ wcid = rcu_dereference(dev->mt76.wcid[wcid_idx]);
+
+ if (wcid && wcid->sta) {
+ msta = container_of(wcid, struct mt7915_sta, wcid);
+ sta = container_of((void *)msta, struct ieee80211_sta,
+ drv_priv);
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&msta->poll_list))
+ list_add_tail(&msta->poll_list, &dev->sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+ }
}
+ if (sta && likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ mt7915_tx_check_aggr(sta, txwi);
+
__mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
out:
@@ -1416,28 +1458,54 @@ out:
}
static void
+mt7915_mac_tx_free_prepare(struct mt7915_dev *dev)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ struct mt76_phy *mphy_ext = mdev->phy2;
+
+ /* clean DMA queues and unmap buffers first */
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
+ if (mphy_ext) {
+ mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_PSD], false);
+ mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_BE], false);
+ }
+}
+
+static void
+mt7915_mac_tx_free_done(struct mt7915_dev *dev,
+ struct list_head *free_list, bool wake)
+{
+ struct sk_buff *skb, *tmp;
+
+ mt7915_mac_sta_poll(dev);
+
+ if (wake)
+ mt76_set_tx_blocked(&dev->mt76, false);
+
+ mt76_worker_schedule(&dev->mt76.tx_worker);
+
+ list_for_each_entry_safe(skb, tmp, free_list, list) {
+ skb_list_del_init(skb);
+ napi_consume_skb(skb, 1);
+ }
+}
+
+static void
mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
{
struct mt7915_tx_free *free = (struct mt7915_tx_free *)data;
struct mt76_dev *mdev = &dev->mt76;
- struct mt76_phy *mphy_ext = mdev->phy2;
struct mt76_txwi_cache *txwi;
struct ieee80211_sta *sta = NULL;
LIST_HEAD(free_list);
- struct sk_buff *skb, *tmp;
void *end = data + len;
bool v3, wake = false;
u16 total, count = 0;
u32 txd = le32_to_cpu(free->txd);
__le32 *cur_info;
- /* clean DMA queues and unmap buffers first */
- mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
- mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
- if (mphy_ext) {
- mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_PSD], false);
- mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_BE], false);
- }
+ mt7915_mac_tx_free_prepare(dev);
total = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT);
v3 = (FIELD_GET(MT_TX_FREE_VER, txd) == 0x4);
@@ -1491,17 +1559,38 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
}
}
- mt7915_mac_sta_poll(dev);
+ mt7915_mac_tx_free_done(dev, &free_list, wake);
+}
- if (wake)
- mt76_set_tx_blocked(&dev->mt76, false);
+static void
+mt7915_mac_tx_free_v0(struct mt7915_dev *dev, void *data, int len)
+{
+ struct mt7915_tx_free *free = (struct mt7915_tx_free *)data;
+ struct mt76_dev *mdev = &dev->mt76;
+ __le16 *info = (__le16 *)free->info;
+ void *end = data + len;
+ LIST_HEAD(free_list);
+ bool wake = false;
+ u8 i, count;
- mt76_worker_schedule(&dev->mt76.tx_worker);
+ mt7915_mac_tx_free_prepare(dev);
- list_for_each_entry_safe(skb, tmp, &free_list, list) {
- skb_list_del_init(skb);
- napi_consume_skb(skb, 1);
+ count = FIELD_GET(MT_TX_FREE_MSDU_CNT_V0, le16_to_cpu(free->ctrl));
+ if (WARN_ON_ONCE((void *)&info[count] > end))
+ return;
+
+ for (i = 0; i < count; i++) {
+ struct mt76_txwi_cache *txwi;
+ u16 msdu = le16_to_cpu(info[i]);
+
+ txwi = mt76_token_release(mdev, msdu, &wake);
+ if (!txwi)
+ continue;
+
+ mt7915_txwi_free(dev, txwi, NULL, &free_list);
}
+
+ mt7915_mac_tx_free_done(dev, &free_list, wake);
}
static bool
@@ -1681,6 +1770,9 @@ bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len)
case PKT_TYPE_TXRX_NOTIFY:
mt7915_mac_tx_free(dev, data, len);
return false;
+ case PKT_TYPE_TXRX_NOTIFY_V0:
+ mt7915_mac_tx_free_v0(dev, data, len);
+ return false;
case PKT_TYPE_TXS:
for (rxd += 2; rxd + 8 <= end; rxd += 8)
mt7915_mac_add_txs(dev, rxd);
@@ -1708,6 +1800,10 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
mt7915_mac_tx_free(dev, skb->data, skb->len);
napi_consume_skb(skb, 1);
break;
+ case PKT_TYPE_TXRX_NOTIFY_V0:
+ mt7915_mac_tx_free_v0(dev, skb->data, skb->len);
+ napi_consume_skb(skb, 1);
+ break;
case PKT_TYPE_RX_EVENT:
mt7915_mcu_rx_event(dev, skb);
break;
@@ -1918,7 +2014,8 @@ mt7915_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_AP:
- mt7915_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon);
+ mt7915_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon,
+ BSS_CHANGED_BEACON_ENABLED);
break;
default:
break;
@@ -2304,6 +2401,32 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy)
}
}
+static void mt7915_mac_severe_check(struct mt7915_phy *phy)
+{
+ struct mt7915_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ u32 trb;
+
+ if (!phy->omac_mask)
+ return;
+
+ /* In rare cases, TRB pointers might be out of sync leads to RMAC
+ * stopping Rx, so check status periodically to see if TRB hardware
+ * requires minimal recovery.
+ */
+ trb = mt76_rr(dev, MT_TRB_RXPSR0(phy->band_idx));
+
+ if ((FIELD_GET(MT_TRB_RXPSR0_RX_RMAC_PTR, trb) !=
+ FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, trb)) &&
+ (FIELD_GET(MT_TRB_RXPSR0_RX_RMAC_PTR, phy->trb_ts) !=
+ FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, phy->trb_ts)) &&
+ trb == phy->trb_ts)
+ mt7915_mcu_set_ser(dev, SER_RECOVER, SER_SET_RECOVER_L3_RX_ABORT,
+ ext_phy);
+
+ phy->trb_ts = trb;
+}
+
void mt7915_mac_sta_rc_work(struct work_struct *work)
{
struct mt7915_dev *dev = container_of(work, struct mt7915_dev, rc_work);
@@ -2356,6 +2479,7 @@ void mt7915_mac_work(struct work_struct *work)
mphy->mac_work_count = 0;
mt7915_mac_update_stats(phy);
+ mt7915_mac_severe_check(phy);
}
mutex_unlock(&mphy->dev->mutex);
@@ -2600,6 +2724,34 @@ static int mt7915_mac_check_twt_req(struct ieee80211_twt_setup *twt)
return 0;
}
+static bool
+mt7915_mac_twt_param_equal(struct mt7915_sta *msta,
+ struct ieee80211_twt_params *twt_agrt)
+{
+ u16 type = le16_to_cpu(twt_agrt->req_type);
+ u8 exp;
+ int i;
+
+ exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, type);
+ for (i = 0; i < MT7915_MAX_STA_TWT_AGRT; i++) {
+ struct mt7915_twt_flow *f;
+
+ if (!(msta->twt.flowid_mask & BIT(i)))
+ continue;
+
+ f = &msta->twt.flow[i];
+ if (f->duration == twt_agrt->min_twt_dur &&
+ f->mantissa == twt_agrt->mantissa &&
+ f->exp == exp &&
+ f->protection == !!(type & IEEE80211_TWT_REQTYPE_PROTECTION) &&
+ f->flowtype == !!(type & IEEE80211_TWT_REQTYPE_FLOWTYPE) &&
+ f->trigger == !!(type & IEEE80211_TWT_REQTYPE_TRIGGER))
+ return true;
+ }
+
+ return false;
+}
+
void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
struct ieee80211_twt_setup *twt)
@@ -2625,6 +2777,12 @@ void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw,
if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow))
goto unlock;
+ if (twt_agrt->min_twt_dur < MT7915_MIN_TWT_DUR) {
+ setup_cmd = TWT_SETUP_CMD_DICTATE;
+ twt_agrt->min_twt_dur = MT7915_MIN_TWT_DUR;
+ goto unlock;
+ }
+
flowid = ffs(~msta->twt.flowid_mask) - 1;
le16p_replace_bits(&twt_agrt->req_type, flowid,
IEEE80211_TWT_REQTYPE_FLOWID);
@@ -2633,6 +2791,9 @@ void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw,
exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type);
sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type);
+ if (mt7915_mac_twt_param_equal(msta, twt_agrt))
+ goto unlock;
+
flow = &msta->twt.flow[flowid];
memset(flow, 0, sizeof(*flow));
INIT_LIST_HEAD(&flow->list);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h
index 5add1dd36dbe..c5fd1a618ae7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h
@@ -24,6 +24,7 @@ enum rx_pkt_type {
PKT_TYPE_TXRX_NOTIFY,
PKT_TYPE_RX_EVENT,
PKT_TYPE_RX_FW_MONITOR = 0x0c,
+ PKT_TYPE_TXRX_NOTIFY_V0 = 0x18,
};
/* RXD DW1 */
@@ -311,6 +312,7 @@ struct mt7915_tx_free {
#define MT_TX_FREE_VER GENMASK(18, 16)
#define MT_TX_FREE_MSDU_CNT GENMASK(9, 0)
+#define MT_TX_FREE_MSDU_CNT_V0 GENMASK(6, 0)
#define MT_TX_FREE_WLAN_ID GENMASK(23, 14)
#define MT_TX_FREE_LATENCY GENMASK(12, 0)
/* 0: success, others: dropped */
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
index c3f44d801e7f..710ca757fb52 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -42,10 +42,6 @@ static int mt7915_start(struct ieee80211_hw *hw)
if (ret)
goto out;
- ret = mt7915_mcu_set_scs(dev, 0, true);
- if (ret)
- goto out;
-
mt7915_mac_enable_nf(dev, 0);
}
@@ -58,10 +54,6 @@ static int mt7915_start(struct ieee80211_hw *hw)
if (ret)
goto out;
- ret = mt7915_mcu_set_scs(dev, 1, true);
- if (ret)
- goto out;
-
mt7915_mac_enable_nf(dev, 1);
}
@@ -174,14 +166,14 @@ static void mt7915_init_bitrate_mask(struct ieee80211_vif *vif)
for (i = 0; i < ARRAY_SIZE(mvif->bitrate_mask.control); i++) {
mvif->bitrate_mask.control[i].gi = NL80211_TXRATE_DEFAULT_GI;
- mvif->bitrate_mask.control[i].he_gi = GENMASK(7, 0);
- mvif->bitrate_mask.control[i].he_ltf = GENMASK(7, 0);
+ mvif->bitrate_mask.control[i].he_gi = 0xff;
+ mvif->bitrate_mask.control[i].he_ltf = 0xff;
mvif->bitrate_mask.control[i].legacy = GENMASK(31, 0);
- memset(mvif->bitrate_mask.control[i].ht_mcs, GENMASK(7, 0),
+ memset(mvif->bitrate_mask.control[i].ht_mcs, 0xff,
sizeof(mvif->bitrate_mask.control[i].ht_mcs));
- memset(mvif->bitrate_mask.control[i].vht_mcs, GENMASK(15, 0),
+ memset(mvif->bitrate_mask.control[i].vht_mcs, 0xff,
sizeof(mvif->bitrate_mask.control[i].vht_mcs));
- memset(mvif->bitrate_mask.control[i].he_mcs, GENMASK(15, 0),
+ memset(mvif->bitrate_mask.control[i].he_mcs, 0xff,
sizeof(mvif->bitrate_mask.control[i].he_mcs));
}
}
@@ -204,8 +196,8 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
is_zero_ether_addr(vif->addr))
phy->monitor_vif = vif;
- mvif->mt76.idx = ffs(~dev->mt76.vif_mask) - 1;
- if (mvif->mt76.idx >= MT7915_MAX_INTERFACES) {
+ mvif->mt76.idx = __ffs64(~dev->mt76.vif_mask);
+ if (mvif->mt76.idx >= (MT7915_MAX_INTERFACES << dev->dbdc_support)) {
ret = -ENOSPC;
goto out;
}
@@ -227,7 +219,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
if (ret)
goto out;
- dev->mt76.vif_mask |= BIT(mvif->mt76.idx);
+ dev->mt76.vif_mask |= BIT_ULL(mvif->mt76.idx);
phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
idx = MT7915_WTBL_RESERVED - mvif->mt76.idx;
@@ -246,7 +238,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
if (vif->txq) {
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
- mtxq->wcid = &mvif->sta.wcid;
+ mtxq->wcid = idx;
}
if (vif->type != NL80211_IFTYPE_AP &&
@@ -290,7 +282,7 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw,
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
mutex_lock(&dev->mt76.mutex);
- dev->mt76.vif_mask &= ~BIT(mvif->mt76.idx);
+ dev->mt76.vif_mask &= ~BIT_ULL(mvif->mt76.idx);
phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx);
mutex_unlock(&dev->mt76.mutex);
@@ -630,8 +622,10 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
mt7915_update_bss_color(hw, vif, &info->he_bss_color);
if (changed & (BSS_CHANGED_BEACON |
- BSS_CHANGED_BEACON_ENABLED))
- mt7915_mcu_add_beacon(hw, vif, info->enable_beacon);
+ BSS_CHANGED_BEACON_ENABLED |
+ BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
+ BSS_CHANGED_FILS_DISCOVERY))
+ mt7915_mcu_add_beacon(hw, vif, info->enable_beacon, changed);
mutex_unlock(&dev->mt76.mutex);
}
@@ -644,7 +638,7 @@ mt7915_channel_switch_beacon(struct ieee80211_hw *hw,
struct mt7915_dev *dev = mt7915_hw_dev(hw);
mutex_lock(&dev->mt76.mutex);
- mt7915_mcu_add_beacon(hw, vif, true);
+ mt7915_mcu_add_beacon(hw, vif, true, BSS_CHANGED_BEACON);
mutex_unlock(&dev->mt76.mutex);
}
@@ -1381,6 +1375,39 @@ out:
return ret;
}
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+static int
+mt7915_net_fill_forward_path(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct net_device_path_ctx *ctx,
+ struct net_device_path *path)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+
+ if (!mtk_wed_device_active(wed))
+ return -ENODEV;
+
+ if (msta->wcid.idx > 0xff)
+ return -EIO;
+
+ path->type = DEV_PATH_MTK_WDMA;
+ path->dev = ctx->dev;
+ path->mtk_wdma.wdma_idx = wed->wdma_idx;
+ path->mtk_wdma.bss = mvif->mt76.idx;
+ path->mtk_wdma.wcid = msta->wcid.idx;
+ path->mtk_wdma.queue = phy != &dev->phy;
+
+ ctx->dev = NULL;
+
+ return 0;
+}
+#endif
+
const struct ieee80211_ops mt7915_ops = {
.tx = mt7915_tx,
.start = mt7915_start,
@@ -1428,4 +1455,7 @@ const struct ieee80211_ops mt7915_ops = {
.sta_add_debugfs = mt7915_sta_add_debugfs,
#endif
.set_radar_background = mt7915_set_radar_background,
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ .net_fill_forward_path = mt7915_net_fill_forward_path,
+#endif
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index df31084e860f..b7e2b365356c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -1854,7 +1854,8 @@ mt7915_mcu_beacon_mbss(struct sk_buff *rskb, struct sk_buff *skb,
continue;
for_each_element(sub_elem, elem->data + 1, elem->datalen - 1) {
- const u8 *data;
+ const struct ieee80211_bssid_index *idx;
+ const u8 *idx_ie;
if (sub_elem->id || sub_elem->datalen < 4)
continue; /* not a valid BSS profile */
@@ -1862,14 +1863,19 @@ mt7915_mcu_beacon_mbss(struct sk_buff *rskb, struct sk_buff *skb,
/* Find WLAN_EID_MULTI_BSSID_IDX
* in the merged nontransmitted profile
*/
- data = cfg80211_find_ie(WLAN_EID_MULTI_BSSID_IDX,
- sub_elem->data,
- sub_elem->datalen);
- if (!data || data[1] < 1 || !data[2])
+ idx_ie = cfg80211_find_ie(WLAN_EID_MULTI_BSSID_IDX,
+ sub_elem->data,
+ sub_elem->datalen);
+ if (!idx_ie || idx_ie[1] < sizeof(*idx))
continue;
- mbss->offset[data[2]] = cpu_to_le16(data - skb->data);
- mbss->bitmap |= cpu_to_le32(BIT(data[2]));
+ idx = (void *)(idx_ie + 2);
+ if (!idx->bssid_index || idx->bssid_index > 31)
+ continue;
+
+ mbss->offset[idx->bssid_index] =
+ cpu_to_le16(idx_ie - skb->data);
+ mbss->bitmap |= cpu_to_le32(BIT(idx->bssid_index));
}
}
}
@@ -1886,6 +1892,7 @@ mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct ieee80211_vif *vif,
u8 *buf;
int len = sizeof(*cont) + MT_TXD_SIZE + skb->len;
+ len = (len & 0x3) ? ((len | 0x3) + 1) : len;
tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_CONTENT,
len, &bcn->sub_ntlv, &bcn->len);
@@ -1904,7 +1911,7 @@ mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct ieee80211_vif *vif,
buf = (u8 *)tlv + sizeof(*cont);
mt7915_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, 0, NULL,
- true);
+ BSS_CHANGED_BEACON);
memcpy(buf + MT_TXD_SIZE, skb->data, skb->len);
}
@@ -1986,8 +1993,71 @@ mt7915_mcu_beacon_check_caps(struct mt7915_phy *phy, struct ieee80211_vif *vif,
}
}
-int mt7915_mcu_add_beacon(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, int en)
+static void
+mt7915_mcu_beacon_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct sk_buff *rskb, struct bss_info_bcn *bcn,
+ u32 changed)
+{
+#define OFFLOAD_TX_MODE_SU BIT(0)
+#define OFFLOAD_TX_MODE_MU BIT(1)
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct cfg80211_chan_def *chandef = &mvif->phy->mt76->chandef;
+ enum nl80211_band band = chandef->chan->band;
+ struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+ struct bss_info_inband_discovery *discov;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *skb = NULL;
+ struct tlv *tlv;
+ bool ext_phy = phy != &dev->phy;
+ u8 *buf, interval;
+ int len;
+
+ if (changed & BSS_CHANGED_FILS_DISCOVERY &&
+ vif->bss_conf.fils_discovery.max_interval) {
+ interval = vif->bss_conf.fils_discovery.max_interval;
+ skb = ieee80211_get_fils_discovery_tmpl(hw, vif);
+ } else if (changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP &&
+ vif->bss_conf.unsol_bcast_probe_resp_interval) {
+ interval = vif->bss_conf.unsol_bcast_probe_resp_interval;
+ skb = ieee80211_get_unsol_bcast_probe_resp_tmpl(hw, vif);
+ }
+
+ if (!skb)
+ return;
+
+ info = IEEE80211_SKB_CB(skb);
+ info->control.vif = vif;
+ info->band = band;
+
+ if (ext_phy)
+ info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
+
+ len = sizeof(*discov) + MT_TXD_SIZE + skb->len;
+ len = (len & 0x3) ? ((len | 0x3) + 1) : len;
+
+ tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_DISCOV,
+ len, &bcn->sub_ntlv, &bcn->len);
+ discov = (struct bss_info_inband_discovery *)tlv;
+ discov->tx_mode = OFFLOAD_TX_MODE_SU;
+ /* 0: UNSOL PROBE RESP, 1: FILS DISCOV */
+ discov->tx_type = !!(changed & BSS_CHANGED_FILS_DISCOVERY);
+ discov->tx_interval = interval;
+ discov->prob_rsp_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
+ discov->enable = true;
+
+ buf = (u8 *)tlv + sizeof(*discov);
+
+ mt7915_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, 0, NULL,
+ changed);
+ memcpy(buf + MT_TXD_SIZE, skb->data, skb->len);
+
+ dev_kfree_skb(skb);
+}
+
+int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ int en, u32 changed)
{
#define MAX_BEACON_SIZE 512
struct mt7915_dev *dev = mt7915_hw_dev(hw);
@@ -2038,6 +2108,11 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw,
mt7915_mcu_beacon_cont(dev, vif, rskb, skb, bcn, &offs);
dev_kfree_skb(skb);
+ if (changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP ||
+ changed & BSS_CHANGED_FILS_DISCOVERY)
+ mt7915_mcu_beacon_inband_discov(dev, vif, rskb,
+ bcn, changed);
+
out:
return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb,
MCU_EXT_CMD(BSS_INFO_UPDATE), true);
@@ -2465,10 +2540,7 @@ int mt7915_mcu_init(struct mt7915_dev *dev)
/* force firmware operation mode into normal state,
* which should be set before firmware download stage.
*/
- if (is_mt7915(&dev->mt76))
- mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE);
- else
- mt76_wr(dev, MT_SWDEF_MODE_MT7916, MT_SWDEF_NORMAL_MODE);
+ mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE);
ret = mt7915_driver_own(dev, 0);
if (ret)
@@ -2493,6 +2565,9 @@ int mt7915_mcu_init(struct mt7915_dev *dev)
if (ret)
return ret;
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed))
+ mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(CAPABILITY), 0, 0, 0);
+
ret = mt7915_mcu_set_mwds(dev, 1);
if (ret)
return ret;
@@ -2583,22 +2658,6 @@ int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band,
&req_mac, sizeof(req_mac), true);
}
-int mt7915_mcu_set_scs(struct mt7915_dev *dev, u8 band, bool enable)
-{
- struct {
- __le32 cmd;
- u8 band;
- u8 enable;
- } __packed req = {
- .cmd = cpu_to_le32(SCS_ENABLE),
- .band = band,
- .enable = enable + 1,
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SCS_CTRL), &req,
- sizeof(req), false);
-}
-
int mt7915_mcu_update_edca(struct mt7915_dev *dev, void *param)
{
struct mt7915_mcu_tx *req = (struct mt7915_mcu_tx *)param;
@@ -3671,3 +3730,32 @@ int mt7915_mcu_twt_agrt_update(struct mt7915_dev *dev,
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TWT_AGRT_UPDATE),
&req, sizeof(req), true);
}
+
+int mt7915_mcu_rf_regval(struct mt7915_dev *dev, u32 regidx, u32 *val, bool set)
+{
+ struct {
+ __le32 idx;
+ __le32 ofs;
+ __le32 data;
+ } __packed req = {
+ .idx = cpu_to_le32(u32_get_bits(regidx, GENMASK(31, 28))),
+ .ofs = cpu_to_le32(u32_get_bits(regidx, GENMASK(27, 0))),
+ .data = set ? cpu_to_le32(*val) : 0,
+ };
+ struct sk_buff *skb;
+ int ret;
+
+ if (set)
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RF_REG_ACCESS),
+ &req, sizeof(req), false);
+
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_QUERY(RF_REG_ACCESS),
+ &req, sizeof(req), true, &skb);
+ if (ret)
+ return ret;
+
+ *val = le32_to_cpu(*(__le32 *)(skb->data + 8));
+ dev_kfree_skb(skb);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
index 960072a44222..5abde482a97f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
@@ -304,16 +304,6 @@ enum mcu_mmps_mode {
MCU_MMPS_DISABLE,
};
-enum {
- SCS_SEND_DATA,
- SCS_SET_MANUAL_PD_TH,
- SCS_CONFIG,
- SCS_ENABLE,
- SCS_SHOW_INFO,
- SCS_GET_GLO_ADDR,
- SCS_GET_GLO_ADDR_EVENT,
-};
-
struct bss_info_bmc_rate {
__le16 tag;
__le16 len;
@@ -414,11 +404,23 @@ struct bss_info_bcn_cont {
__le16 pkt_len;
} __packed __aligned(4);
+struct bss_info_inband_discovery {
+ __le16 tag;
+ __le16 len;
+ u8 tx_type;
+ u8 tx_mode;
+ u8 tx_interval;
+ u8 enable;
+ __le16 rsv;
+ __le16 prob_rsp_len;
+} __packed __aligned(4);
+
enum {
BSS_INFO_BCN_CSA,
BSS_INFO_BCN_BCC,
BSS_INFO_BCN_MBSSID,
BSS_INFO_BCN_CONTENT,
+ BSS_INFO_BCN_DISCOV,
BSS_INFO_BCN_MAX
};
@@ -473,6 +475,20 @@ enum {
MURU_GET_TXC_TX_STATS = 151,
};
+enum {
+ SER_QUERY,
+ /* recovery */
+ SER_SET_RECOVER_L1,
+ SER_SET_RECOVER_L2,
+ SER_SET_RECOVER_L3_RX_ABORT,
+ SER_SET_RECOVER_L3_TX_ABORT,
+ SER_SET_RECOVER_L3_TX_DISABLE,
+ SER_SET_RECOVER_L3_BF,
+ /* action */
+ SER_ENABLE = 2,
+ SER_RECOVER
+};
+
#define MT7915_BSS_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \
sizeof(struct bss_info_omac) + \
sizeof(struct bss_info_basic) +\
@@ -486,6 +502,7 @@ enum {
#define MT7915_BEACON_UPDATE_SIZE (sizeof(struct sta_req_hdr) + \
sizeof(struct bss_info_bcn_cntdwn) + \
sizeof(struct bss_info_bcn_mbss) + \
- sizeof(struct bss_info_bcn_cont))
+ sizeof(struct bss_info_bcn_cont) + \
+ sizeof(struct bss_info_inband_discovery))
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
index 5062e0d8cae4..46ee8a7db7bc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
@@ -22,6 +22,8 @@ static const u32 mt7915_reg[] = {
[WFDMA_EXT_CSR_ADDR] = 0xd7000,
[CBTOP1_PHY_END] = 0x77ffffff,
[INFRA_MCU_ADDR_END] = 0x7c3fffff,
+ [FW_EXCEPTION_ADDR] = 0x219848,
+ [SWDEF_BASE_ADDR] = 0x41f200,
};
static const u32 mt7916_reg[] = {
@@ -36,6 +38,8 @@ static const u32 mt7916_reg[] = {
[WFDMA_EXT_CSR_ADDR] = 0xd7000,
[CBTOP1_PHY_END] = 0x7fffffff,
[INFRA_MCU_ADDR_END] = 0x7c085fff,
+ [FW_EXCEPTION_ADDR] = 0x022050bc,
+ [SWDEF_BASE_ADDR] = 0x411400,
};
static const u32 mt7986_reg[] = {
@@ -50,6 +54,8 @@ static const u32 mt7986_reg[] = {
[WFDMA_EXT_CSR_ADDR] = 0x27000,
[CBTOP1_PHY_END] = 0x7fffffff,
[INFRA_MCU_ADDR_END] = 0x7c085fff,
+ [FW_EXCEPTION_ADDR] = 0x02204ffc,
+ [SWDEF_BASE_ADDR] = 0x411400,
};
static const u32 mt7915_offs[] = {
@@ -547,15 +553,21 @@ static void mt7915_rx_poll_complete(struct mt76_dev *mdev,
static void mt7915_irq_tasklet(struct tasklet_struct *t)
{
struct mt7915_dev *dev = from_tasklet(dev, t, irq_tasklet);
+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
u32 intr, intr1, mask;
- mt76_wr(dev, MT_INT_MASK_CSR, 0);
- if (dev->hif2)
- mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+ if (mtk_wed_device_active(wed)) {
+ mtk_wed_device_irq_set_mask(wed, 0);
+ intr = mtk_wed_device_irq_get(wed, dev->mt76.mmio.irqmask);
+ } else {
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+ if (dev->hif2)
+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
- intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
- intr &= dev->mt76.mmio.irqmask;
- mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+ intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+ intr &= dev->mt76.mmio.irqmask;
+ mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+ }
if (dev->hif2) {
intr1 = mt76_rr(dev, MT_INT1_SOURCE_CSR);
@@ -601,7 +613,7 @@ static void mt7915_irq_tasklet(struct tasklet_struct *t)
mt76_wr(dev, MT_MCU_CMD, val);
if (val & MT_MCU_CMD_ERROR_MASK) {
dev->reset_state = val;
- ieee80211_queue_work(mt76_hw(dev), &dev->reset_work);
+ queue_work(dev->mt76.wq, &dev->reset_work);
wake_up(&dev->reset_wait);
}
}
@@ -610,10 +622,15 @@ static void mt7915_irq_tasklet(struct tasklet_struct *t)
irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
{
struct mt7915_dev *dev = dev_instance;
+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
- mt76_wr(dev, MT_INT_MASK_CSR, 0);
- if (dev->hif2)
- mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+ if (mtk_wed_device_active(wed)) {
+ mtk_wed_device_irq_set_mask(wed, 0);
+ } else {
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+ if (dev->hif2)
+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+ }
if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
return IRQ_NONE;
@@ -665,8 +682,6 @@ struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
tasklet_setup(&dev->irq_tasklet, mt7915_irq_tasklet);
- mt76_wr(dev, MT_INT_MASK_CSR, 0);
-
return dev;
error:
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
index 6efa0a2e2345..4dcae6991669 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
@@ -66,6 +66,7 @@
#define MT7915_MAX_TWT_AGRT 16
#define MT7915_MAX_STA_TWT_AGRT 8
+#define MT7915_MIN_TWT_DUR 64
#define MT7915_MAX_QUEUE (__MT_RXQ_MAX + __MT_MCUQ_MAX + 2)
struct mt7915_vif;
@@ -247,6 +248,8 @@ struct mt7915_phy {
u8 rdd_state;
+ u32 trb_ts;
+
u32 rx_ampdu_ts;
u32 ampdu_ref;
@@ -309,9 +312,6 @@ struct mt7915_dev {
bool flash_mode;
bool muru_debug;
bool ibf;
- u8 fw_debug_wm;
- u8 fw_debug_wa;
- u8 fw_debug_bin;
struct dentry *debugfs_dir;
struct rchan *relay_fwlog;
@@ -319,7 +319,13 @@ struct mt7915_dev {
void *cal;
struct {
- u8 table_mask;
+ u8 debug_wm;
+ u8 debug_wa;
+ u8 debug_bin;
+ } fw;
+
+ struct {
+ u16 table_mask;
u8 n_agrt;
} twt;
@@ -429,8 +435,11 @@ static inline void mt7986_wmac_disable(struct mt7915_dev *dev)
#endif
struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
void __iomem *mem_base, u32 device_id);
+void mt7915_wfsys_reset(struct mt7915_dev *dev);
irqreturn_t mt7915_irq_handler(int irq, void *dev_instance);
u64 __mt7915_get_tsf(struct ieee80211_hw *hw, struct mt7915_vif *mvif);
+u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id);
+
int mt7915_register_device(struct mt7915_dev *dev);
void mt7915_unregister_device(struct mt7915_dev *dev);
int mt7915_eeprom_init(struct mt7915_dev *dev);
@@ -440,7 +449,7 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
struct ieee80211_channel *chan,
u8 chain_idx);
s8 mt7915_eeprom_get_power_delta(struct mt7915_dev *dev, int band);
-int mt7915_dma_init(struct mt7915_dev *dev);
+int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2);
void mt7915_dma_prefetch(struct mt7915_dev *dev);
void mt7915_dma_cleanup(struct mt7915_dev *dev);
int mt7915_mcu_init(struct mt7915_dev *dev);
@@ -463,7 +472,7 @@ int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev,
int mt7915_mcu_update_bss_color(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct cfg80211_he_bss_color *he_bss_color);
int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- int enable);
+ int enable, u32 changed);
int mt7915_mcu_add_obss_spr(struct mt7915_dev *dev, struct ieee80211_vif *vif,
bool enable);
int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
@@ -485,7 +494,6 @@ int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band, bool enable,
bool hdr_trans);
int mt7915_mcu_set_test_param(struct mt7915_dev *dev, u8 param, bool test_mode,
u8 en);
-int mt7915_mcu_set_scs(struct mt7915_dev *dev, u8 band, bool enable);
int mt7915_mcu_set_ser(struct mt7915_dev *dev, u8 action, u8 set, u8 band);
int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable);
int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy);
@@ -506,6 +514,7 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct rate_info *rate);
int mt7915_mcu_rdd_background_enable(struct mt7915_phy *phy,
struct cfg80211_chan_def *chandef);
+int mt7915_mcu_rf_regval(struct mt7915_dev *dev, u32 regidx, u32 *val, bool set);
int mt7915_mcu_wa_cmd(struct mt7915_dev *dev, int cmd, u32 a1, u32 a2, u32 a3);
int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 type, u8 ctrl);
int mt7915_mcu_fw_dbg_ctrl(struct mt7915_dev *dev, u32 module, u8 level);
@@ -550,7 +559,7 @@ void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy);
void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool ext_phy);
void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid, int pid,
- struct ieee80211_key_conf *key, bool beacon);
+ struct ieee80211_key_conf *key, u32 changed);
void mt7915_mac_set_timing(struct mt7915_phy *phy);
int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
@@ -572,7 +581,6 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct mt76_tx_info *tx_info);
void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
void mt7915_tx_token_put(struct mt7915_dev *dev);
-int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base);
void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
index 6f819c41a4c4..d74f609775d3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
@@ -12,6 +12,9 @@
#include "mac.h"
#include "../trace.h"
+static bool wed_enable = false;
+module_param(wed_enable, bool, 0644);
+
static LIST_HEAD(hif_list);
static DEFINE_SPINLOCK(hif_lock);
static u32 hif_idx;
@@ -92,12 +95,79 @@ static int mt7915_pci_hif2_probe(struct pci_dev *pdev)
return 0;
}
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+static int mt7915_wed_offload_enable(struct mtk_wed_device *wed)
+{
+ struct mt7915_dev *dev;
+ int ret;
+
+ dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
+
+ spin_lock_bh(&dev->mt76.token_lock);
+ dev->mt76.token_size = wed->wlan.token_start;
+ spin_unlock_bh(&dev->mt76.token_lock);
+
+ ret = wait_event_timeout(dev->mt76.tx_wait,
+ !dev->mt76.wed_token_count, HZ);
+ if (!ret)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static void mt7915_wed_offload_disable(struct mtk_wed_device *wed)
+{
+ struct mt7915_dev *dev;
+
+ dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
+
+ spin_lock_bh(&dev->mt76.token_lock);
+ dev->mt76.token_size = MT7915_TOKEN_SIZE;
+ spin_unlock_bh(&dev->mt76.token_lock);
+}
+#endif
+
+static int
+mt7915_pci_wed_init(struct mt7915_dev *dev, struct pci_dev *pdev, int *irq)
+{
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+ int ret;
+
+ if (!wed_enable)
+ return 0;
+
+ wed->wlan.pci_dev = pdev;
+ wed->wlan.wpdma_phys = pci_resource_start(pdev, 0) +
+ MT_WFDMA_EXT_CSR_BASE;
+ wed->wlan.nbuf = 4096;
+ wed->wlan.token_start = MT7915_TOKEN_SIZE - wed->wlan.nbuf;
+ wed->wlan.init_buf = mt7915_wed_init_buf;
+ wed->wlan.offload_enable = mt7915_wed_offload_enable;
+ wed->wlan.offload_disable = mt7915_wed_offload_disable;
+
+ if (mtk_wed_device_attach(wed) != 0)
+ return 0;
+
+ *irq = wed->irq;
+ dev->mt76.dma_dev = wed->dev;
+
+ ret = dma_set_mask(wed->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ return 1;
+#else
+ return 0;
+#endif
+}
+
static int mt7915_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
+ struct mt7915_hif *hif2 = NULL;
struct mt7915_dev *dev;
struct mt76_dev *mdev;
- struct mt7915_hif *hif2;
int irq;
int ret;
@@ -126,19 +196,27 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
return PTR_ERR(dev);
mdev = &dev->mt76;
+ mt7915_wfsys_reset(dev);
hif2 = mt7915_pci_init_hif2(pdev);
- ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ ret = mt7915_pci_wed_init(dev, pdev, &irq);
if (ret < 0)
- goto free_device;
+ goto free_wed_or_irq_vector;
+
+ if (!ret) {
+ hif2 = mt7915_pci_init_hif2(pdev);
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0)
+ goto free_device;
+
+ irq = pdev->irq;
+ }
- irq = pdev->irq;
ret = devm_request_irq(mdev->dev, irq, mt7915_irq_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (ret)
- goto free_irq_vector;
-
- mt76_wr(dev, MT_INT_MASK_CSR, 0);
+ goto free_wed_or_irq_vector;
/* master switch of PCIe tnterrupt enable */
mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
@@ -173,8 +251,11 @@ free_hif2:
if (dev->hif2)
put_device(dev->hif2->dev);
devm_free_irq(mdev->dev, irq, dev);
-free_irq_vector:
- pci_free_irq_vectors(pdev);
+free_wed_or_irq_vector:
+ if (mtk_wed_device_active(&mdev->mmio.wed))
+ mtk_wed_device_detach(&mdev->mmio.wed);
+ else
+ pci_free_irq_vectors(pdev);
free_device:
mt76_free_device(&dev->mt76);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
index e5f93c40591c..4953be208c5e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
@@ -30,6 +30,8 @@ enum reg_rev {
WFDMA_EXT_CSR_ADDR,
CBTOP1_PHY_END,
INFRA_MCU_ADDR_END,
+ FW_EXCEPTION_ADDR,
+ SWDEF_BASE_ADDR,
__MT_REG_MAX,
};
@@ -158,6 +160,9 @@ enum offs_rev {
#define MT_MDP_DCR1 MT_MDP(0x004)
#define MT_MDP_DCR1_MAX_RX_LEN GENMASK(15, 3)
+#define MT_MDP_DCR2 MT_MDP(0x0e8)
+#define MT_MDP_DCR2_RX_TRANS_SHORT BIT(2)
+
#define MT_MDP_BNRCFR0(_band) MT_MDP(__OFFS(MDP_BNRCFR0) + \
((_band) << 8))
#define MT_MDP_RCFR0_MCU_RX_MGMT GENMASK(5, 4)
@@ -172,6 +177,14 @@ enum offs_rev {
#define MT_MDP_TO_HIF 0
#define MT_MDP_TO_WM 1
+/* TRB: band 0(0x820e1000), band 1(0x820f1000) */
+#define MT_WF_TRB_BASE(_band) ((_band) ? 0x820f1000 : 0x820e1000)
+#define MT_WF_TRB(_band, ofs) (MT_WF_TRB_BASE(_band) + (ofs))
+
+#define MT_TRB_RXPSR0(_band) MT_WF_TRB(_band, 0x03c)
+#define MT_TRB_RXPSR0_RX_WTBL_PTR GENMASK(25, 16)
+#define MT_TRB_RXPSR0_RX_RMAC_PTR GENMASK(9, 0)
+
/* TMAC: band 0(0x820e4000), band 1(0x820f4000) */
#define MT_WF_TMAC_BASE(_band) ((_band) ? 0x820f4000 : 0x820e4000)
#define MT_WF_TMAC(_band, ofs) (MT_WF_TMAC_BASE(_band) + (ofs))
@@ -565,18 +578,31 @@ enum offs_rev {
/* WFDMA CSR */
#define MT_WFDMA_EXT_CSR_BASE __REG(WFDMA_EXT_CSR_ADDR)
+#define MT_WFDMA_EXT_CSR_PHYS_BASE 0x18027000
#define MT_WFDMA_EXT_CSR(ofs) (MT_WFDMA_EXT_CSR_BASE + (ofs))
+#define MT_WFDMA_EXT_CSR_PHYS(ofs) (MT_WFDMA_EXT_CSR_PHYS_BASE + (ofs))
-#define MT_WFDMA_HOST_CONFIG MT_WFDMA_EXT_CSR(0x30)
+#define MT_WFDMA_HOST_CONFIG MT_WFDMA_EXT_CSR_PHYS(0x30)
#define MT_WFDMA_HOST_CONFIG_PDMA_BAND BIT(0)
+#define MT_WFDMA_HOST_CONFIG_WED BIT(1)
-#define MT_WFDMA_EXT_CSR_HIF_MISC MT_WFDMA_EXT_CSR(0x44)
+#define MT_WFDMA_WED_RING_CONTROL MT_WFDMA_EXT_CSR_PHYS(0x34)
+#define MT_WFDMA_WED_RING_CONTROL_TX0 GENMASK(4, 0)
+#define MT_WFDMA_WED_RING_CONTROL_TX1 GENMASK(12, 8)
+#define MT_WFDMA_WED_RING_CONTROL_RX1 GENMASK(20, 16)
+
+#define MT_WFDMA_EXT_CSR_HIF_MISC MT_WFDMA_EXT_CSR_PHYS(0x44)
#define MT_WFDMA_EXT_CSR_HIF_MISC_BUSY BIT(0)
#define MT_PCIE_RECOG_ID 0xd7090
#define MT_PCIE_RECOG_ID_MASK GENMASK(30, 0)
#define MT_PCIE_RECOG_ID_SEM BIT(31)
+#define MT_INT_WED_MASK_CSR MT_WFDMA_EXT_CSR(0x204)
+
+#define MT_WED_TX_RING_BASE MT_WFDMA_EXT_CSR(0x300)
+#define MT_WED_RX_RING_BASE MT_WFDMA_EXT_CSR(0x400)
+
/* WFDMA0 PCIE1 */
#define MT_WFDMA0_PCIE1_BASE __REG(WFDMA0_PCIE1_ADDR)
#define MT_WFDMA0_PCIE1(ofs) (MT_WFDMA0_PCIE1_BASE + (ofs))
@@ -794,6 +820,7 @@ enum offs_rev {
/* ADIE */
#define MT_ADIE_CHIP_ID 0x02c
+#define MT_ADIE_VERSION_MASK GENMASK(15, 0)
#define MT_ADIE_CHIP_ID_MASK GENMASK(31, 16)
#define MT_ADIE_IDX0 GENMASK(15, 0)
#define MT_ADIE_IDX1 GENMASK(31, 16)
@@ -913,12 +940,27 @@ enum offs_rev {
#define MT_ADIE_TYPE_MASK BIT(1)
/* FW MODE SYNC */
-#define MT_SWDEF_MODE 0x41f23c
-#define MT_SWDEF_MODE_MT7916 0x41143c
+#define MT_FW_EXCEPTION __REG(FW_EXCEPTION_ADDR)
+
+#define MT_SWDEF_BASE __REG(SWDEF_BASE_ADDR)
+
+#define MT_SWDEF(ofs) (MT_SWDEF_BASE + (ofs))
+#define MT_SWDEF_MODE MT_SWDEF(0x3c)
#define MT_SWDEF_NORMAL_MODE 0
#define MT_SWDEF_ICAP_MODE 1
#define MT_SWDEF_SPECTRUM_MODE 2
+#define MT_SWDEF_SER_STATS MT_SWDEF(0x040)
+#define MT_SWDEF_PLE_STATS MT_SWDEF(0x044)
+#define MT_SWDEF_PLE1_STATS MT_SWDEF(0x048)
+#define MT_SWDEF_PLE_AMSDU_STATS MT_SWDEF(0x04C)
+#define MT_SWDEF_PSE_STATS MT_SWDEF(0x050)
+#define MT_SWDEF_PSE1_STATS MT_SWDEF(0x054)
+#define MT_SWDEF_LAMC_WISR6_BN0_STATS MT_SWDEF(0x058)
+#define MT_SWDEF_LAMC_WISR6_BN1_STATS MT_SWDEF(0x05C)
+#define MT_SWDEF_LAMC_WISR7_BN0_STATS MT_SWDEF(0x060)
+#define MT_SWDEF_LAMC_WISR7_BN1_STATS MT_SWDEF(0x064)
+
#define MT_DIC_CMD_REG_BASE 0x41f000
#define MT_DIC_CMD_REG(ofs) (MT_DIC_CMD_REG_BASE + (ofs))
#define MT_DIC_CMD_REG_CMD MT_DIC_CMD_REG(0x10)
@@ -965,10 +1007,6 @@ enum offs_rev {
#define MT_TOP_MISC MT_TOP(0xf0)
#define MT_TOP_MISC_FW_STATE GENMASK(2, 0)
-#define MT_HW_BOUND 0x70010020
-#define MT_HW_REV 0x70010204
-#define MT_WF_SUBSYS_RST 0x70002600
-
#define MT_TOP_WFSYS_WAKEUP MT_TOP(0x1a4)
#define MT_TOP_WFSYS_WAKEUP_MASK BIT(0)
@@ -1030,6 +1068,10 @@ enum offs_rev {
#define MT_MCU_BUS_DBG_TIMEOUT_CK_EN_MASK BIT(3)
#define MT_MCU_BUS_DBG_TIMEOUT_EN_MASK BIT(2)
+#define MT_HW_BOUND 0x70010020
+#define MT_HW_REV 0x70010204
+#define MT_WF_SUBSYS_RST 0x70002600
+
/* PCIE MAC */
#define MT_PCIE_MAC_BASE 0x74030000
#define MT_PCIE_MAC(ofs) (MT_PCIE_MAC_BASE + (ofs))
@@ -1038,6 +1080,9 @@ enum offs_rev {
#define MT_PCIE1_MAC_INT_ENABLE 0x74020188
#define MT_PCIE1_MAC_INT_ENABLE_MT7916 0x74090188
+#define MT_WM_MCU_PC 0x7c060204
+#define MT_WA_MCU_PC 0x7c06020c
+
/* PP TOP */
#define MT_WF_PP_TOP_BASE 0x820cc000
#define MT_WF_PP_TOP(ofs) (MT_WF_PP_TOP_BASE + (ofs))
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
index 3028c02cb840..c74afa746251 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
@@ -12,6 +12,7 @@
#include <linux/iopoll.h>
#include <linux/reset.h>
#include <linux/of_net.h>
+#include <linux/clk.h>
#include "mt7915.h"
@@ -210,6 +211,8 @@ static int mt7986_wmac_gpio_setup(struct mt7915_dev *dev)
if (IS_ERR_OR_NULL(state))
return -EINVAL;
break;
+ default:
+ return -EINVAL;
}
ret = pinctrl_select_state(pinctrl, state);
@@ -468,17 +471,32 @@ static int mt7986_wmac_adie_xtal_trim_7976(struct mt7915_dev *dev, u8 adie)
static int mt7986_wmac_adie_patch_7976(struct mt7915_dev *dev, u8 adie)
{
+ u32 id, version, rg_xo_01, rg_xo_03;
int ret;
+ ret = mt76_wmac_spi_read(dev, adie, MT_ADIE_CHIP_ID, &id);
+ if (ret)
+ return ret;
+
+ version = FIELD_GET(MT_ADIE_VERSION_MASK, id);
+
ret = mt76_wmac_spi_write(dev, adie, MT_ADIE_RG_TOP_THADC, 0x4a563b00);
if (ret)
return ret;
- ret = mt76_wmac_spi_write(dev, adie, MT_ADIE_RG_XO_01, 0x1d59080f);
+ if (version == 0x8a00 || version == 0x8a10 || version == 0x8b00) {
+ rg_xo_01 = 0x1d59080f;
+ rg_xo_03 = 0x34c00fe0;
+ } else {
+ rg_xo_01 = 0x1959f80f;
+ rg_xo_03 = 0x34d00fe0;
+ }
+
+ ret = mt76_wmac_spi_write(dev, adie, MT_ADIE_RG_XO_01, rg_xo_01);
if (ret)
return ret;
- return mt76_wmac_spi_write(dev, adie, MT_ADIE_RG_XO_03, 0x34c00fe0);
+ return mt76_wmac_spi_write(dev, adie, MT_ADIE_RG_XO_03, rg_xo_03);
}
static int
@@ -1115,6 +1133,19 @@ static int mt7986_wmac_init(struct mt7915_dev *dev)
{
struct device *pdev = dev->mt76.dev;
struct platform_device *pfdev = to_platform_device(pdev);
+ struct clk *mcu_clk, *ap_conn_clk;
+
+ mcu_clk = devm_clk_get(pdev, "mcu");
+ if (IS_ERR(mcu_clk))
+ dev_err(pdev, "mcu clock not found\n");
+ else if (clk_prepare_enable(mcu_clk))
+ dev_err(pdev, "mcu clock configuration failed\n");
+
+ ap_conn_clk = devm_clk_get(pdev, "ap2conn");
+ if (IS_ERR(ap_conn_clk))
+ dev_err(pdev, "ap2conn clock not found\n");
+ else if (clk_prepare_enable(ap_conn_clk))
+ dev_err(pdev, "ap2conn clock configuration failed\n");
dev->dcm = devm_platform_ioremap_resource(pfdev, 1);
if (IS_ERR(dev->dcm))
@@ -1128,7 +1159,7 @@ static int mt7986_wmac_init(struct mt7915_dev *dev)
if (IS_ERR(dev->rstc))
return PTR_ERR(dev->rstc);
- return mt7986_wmac_enable(dev);
+ return 0;
}
static int mt7986_wmac_probe(struct platform_device *pdev)
@@ -1161,12 +1192,12 @@ static int mt7986_wmac_probe(struct platform_device *pdev)
if (ret)
goto free_device;
- mt76_wr(dev, MT_INT_MASK_CSR, 0);
-
ret = mt7986_wmac_init(dev);
if (ret)
goto free_irq;
+ mt7915_wfsys_reset(dev);
+
ret = mt7915_register_device(dev);
if (ret)
goto free_irq;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
index ca7e20fb5fc0..3a6b158b779e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
@@ -9,7 +9,7 @@ static int mt7921_init_tx_queues(struct mt7921_phy *phy, int idx, int n_desc)
{
int i, err;
- err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, MT_TX_RING_BASE);
+ err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, MT_TX_RING_BASE, 0);
if (err < 0)
return err;
@@ -296,8 +296,8 @@ int mt7921_dma_init(struct mt7921_dev *dev)
if (ret < 0)
return ret;
- netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
- mt7921_poll_tx, NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
+ mt7921_poll_tx);
napi_enable(&dev->mt76.tx_napi);
return mt7921_dma_enable(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
index 91fc41922d95..4a8675634f80 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
@@ -11,6 +11,10 @@ static const struct ieee80211_iface_limit if_limits[] = {
{
.max = MT7921_MAX_INTERFACES,
.types = BIT(NL80211_IFTYPE_STATION)
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_AP)
}
};
@@ -64,7 +68,8 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
wiphy->iface_combinations = if_comb;
wiphy->flags &= ~(WIPHY_FLAG_IBSS_RSN | WIPHY_FLAG_4ADDR_AP |
WIPHY_FLAG_4ADDR_STATION);
- wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP);
wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
wiphy->max_scan_ie_len = MT76_CONNAC_SCAN_IE_LEN;
wiphy->max_scan_ssids = 4;
@@ -80,6 +85,10 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_LEGACY);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HT);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_VHT);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HE);
ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
ieee80211_hw_set(hw, HAS_RATE_CONTROL);
@@ -255,6 +264,10 @@ int mt7921_register_device(struct mt7921_dev *dev)
INIT_DELAYED_WORK(&dev->mphy.mac_work, mt7921_mac_work);
INIT_DELAYED_WORK(&dev->phy.scan_work, mt7921_scan_work);
INIT_DELAYED_WORK(&dev->coredump.work, mt7921_coredump_work);
+#if IS_ENABLED(CONFIG_IPV6)
+ INIT_WORK(&dev->ipv6_ns_work, mt7921_set_ipv6_ns_work);
+ skb_queue_head_init(&dev->ipv6_ns_list);
+#endif
skb_queue_head_init(&dev->phy.scan_event_list);
skb_queue_head_init(&dev->coredump.msg_list);
INIT_LIST_HEAD(&dev->sta_poll_list);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
index b67615487910..a630ddbf19e5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
@@ -696,7 +696,7 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
status->nss =
FIELD_GET(MT_PRXV_NSTS, v0) + 1;
status->encoding = RX_ENC_VHT;
- if (i > 9)
+ if (i > 11)
return -EINVAL;
break;
case MT_PHY_TYPE_HE_MU:
@@ -814,6 +814,7 @@ mt7921_mac_write_txwi_8023(struct mt7921_dev *dev, __le32 *txwi,
{
u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
u8 fc_type, fc_stype;
+ u16 ethertype;
bool wmm = false;
u32 val;
@@ -827,7 +828,8 @@ mt7921_mac_write_txwi_8023(struct mt7921_dev *dev, __le32 *txwi,
val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
FIELD_PREP(MT_TXD1_TID, tid);
- if (be16_to_cpu(skb->protocol) >= ETH_P_802_3_MIN)
+ ethertype = get_unaligned_be16(&skb->data[12]);
+ if (ethertype >= ETH_P_802_3_MIN)
val |= MT_TXD1_ETH_802_3;
txwi[1] |= cpu_to_le32(val);
@@ -1361,12 +1363,21 @@ mt7921_vif_connect_iter(void *priv, u8 *mac,
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
struct mt7921_dev *dev = mvif->phy->dev;
+ struct ieee80211_hw *hw = mt76_hw(dev);
if (vif->type == NL80211_IFTYPE_STATION)
ieee80211_disconnect(vif, true);
mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, true);
mt7921_mcu_set_tx(dev, vif);
+
+ if (vif->type == NL80211_IFTYPE_AP) {
+ mt76_connac_mcu_uni_add_bss(dev->phy.mt76, vif, &mvif->sta.wcid,
+ true);
+ mt7921_mcu_sta_update(dev, NULL, vif, true,
+ MT76_STA_INFO_STATE_NONE);
+ mt7921_mcu_uni_add_beacon_offload(dev, hw, vif, true);
+ }
}
/* system error recovery */
@@ -1715,3 +1726,29 @@ bool mt7921_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update)
return false;
}
EXPORT_SYMBOL_GPL(mt7921_usb_sdio_tx_status_data);
+
+#if IS_ENABLED(CONFIG_IPV6)
+void mt7921_set_ipv6_ns_work(struct work_struct *work)
+{
+ struct mt7921_dev *dev = container_of(work, struct mt7921_dev,
+ ipv6_ns_work);
+ struct sk_buff *skb;
+ int ret = 0;
+
+ do {
+ skb = skb_dequeue(&dev->ipv6_ns_list);
+
+ if (!skb)
+ break;
+
+ mt7921_mutex_acquire(dev);
+ ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_UNI_CMD(OFFLOAD), true);
+ mt7921_mutex_release(dev);
+
+ } while (!ret);
+
+ if (ret)
+ skb_queue_purge(&dev->ipv6_ns_list);
+}
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index fdaf2451bc1d..80279f342109 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -5,6 +5,7 @@
#include <linux/platform_device.h>
#include <linux/pci.h>
#include <linux/module.h>
+#include <net/ipv6.h>
#include "mt7921.h"
#include "mcu.h"
@@ -12,7 +13,7 @@ static void
mt7921_gen_ppe_thresh(u8 *he_ppet, int nss)
{
u8 i, ppet_bits, ppet_size, ru_bit_mask = 0x7; /* HE80 */
- u8 ppet16_ppet8_ru3_ru0[] = {0x1c, 0xc7, 0x71};
+ static const u8 ppet16_ppet8_ru3_ru0[] = {0x1c, 0xc7, 0x71};
he_ppet[0] = FIELD_PREP(IEEE80211_PPE_THRES_NSS_MASK, nss - 1) |
FIELD_PREP(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK,
@@ -53,6 +54,7 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band,
switch (i) {
case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_AP:
break;
default:
continue;
@@ -86,6 +88,23 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band,
IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
switch (i) {
+ case NL80211_IFTYPE_AP:
+ he_cap_elem->mac_cap_info[2] |=
+ IEEE80211_HE_MAC_CAP2_BSR;
+ he_cap_elem->mac_cap_info[4] |=
+ IEEE80211_HE_MAC_CAP4_BQR;
+ he_cap_elem->mac_cap_info[5] |=
+ IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX;
+ he_cap_elem->phy_cap_info[3] |=
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK;
+ he_cap_elem->phy_cap_info[6] |=
+ IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE |
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
+ he_cap_elem->phy_cap_info[9] |=
+ IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU;
+ break;
case NL80211_IFTYPE_STATION:
he_cap_elem->mac_cap_info[1] |=
IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US;
@@ -294,7 +313,7 @@ static int mt7921_add_interface(struct ieee80211_hw *hw,
mt7921_mutex_acquire(dev);
- mvif->mt76.idx = ffs(~dev->mt76.vif_mask) - 1;
+ mvif->mt76.idx = __ffs64(~dev->mt76.vif_mask);
if (mvif->mt76.idx >= MT7921_MAX_INTERFACES) {
ret = -ENOSPC;
goto out;
@@ -310,7 +329,7 @@ static int mt7921_add_interface(struct ieee80211_hw *hw,
if (ret)
goto out;
- dev->mt76.vif_mask |= BIT(mvif->mt76.idx);
+ dev->mt76.vif_mask |= BIT_ULL(mvif->mt76.idx);
phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
idx = MT7921_WTBL_RESERVED - mvif->mt76.idx;
@@ -330,7 +349,7 @@ static int mt7921_add_interface(struct ieee80211_hw *hw,
rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
if (vif->txq) {
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
- mtxq->wcid = &mvif->sta.wcid;
+ mtxq->wcid = idx;
}
out:
@@ -354,7 +373,7 @@ static void mt7921_remove_interface(struct ieee80211_hw *hw,
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
- dev->mt76.vif_mask &= ~BIT(mvif->mt76.idx);
+ dev->mt76.vif_mask &= ~BIT_ULL(mvif->mt76.idx);
phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx);
mt7921_mutex_release(dev);
@@ -489,8 +508,8 @@ mt7921_sniffer_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
bool monitor = !!(hw->conf.flags & IEEE80211_CONF_MONITOR);
mt7921_mcu_set_sniffer(dev, vif, monitor);
- pm->enable = !monitor;
- pm->ds_enable = !monitor;
+ pm->enable = pm->enable_user && !monitor;
+ pm->ds_enable = pm->ds_enable_user && !monitor;
mt76_connac_mcu_set_deep_sleep(&dev->mt76, pm->ds_enable);
@@ -566,7 +585,6 @@ static void mt7921_configure_filter(struct ieee80211_hw *hw,
u64 multicast)
{
struct mt7921_dev *dev = mt7921_hw_dev(hw);
- struct mt7921_phy *phy = mt7921_hw_phy(hw);
u32 ctl_flags = MT_WF_RFCR1_DROP_ACK |
MT_WF_RFCR1_DROP_BF_POLL |
MT_WF_RFCR1_DROP_BA |
@@ -576,23 +594,23 @@ static void mt7921_configure_filter(struct ieee80211_hw *hw,
#define MT76_FILTER(_flag, _hw) do { \
flags |= *total_flags & FIF_##_flag; \
- phy->rxfilter &= ~(_hw); \
- phy->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
+ dev->mt76.rxfilter &= ~(_hw); \
+ dev->mt76.rxfilter |= !(flags & FIF_##_flag) * (_hw); \
} while (0)
mt7921_mutex_acquire(dev);
- phy->rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
- MT_WF_RFCR_DROP_OTHER_BEACON |
- MT_WF_RFCR_DROP_FRAME_REPORT |
- MT_WF_RFCR_DROP_PROBEREQ |
- MT_WF_RFCR_DROP_MCAST_FILTERED |
- MT_WF_RFCR_DROP_MCAST |
- MT_WF_RFCR_DROP_BCAST |
- MT_WF_RFCR_DROP_DUPLICATE |
- MT_WF_RFCR_DROP_A2_BSSID |
- MT_WF_RFCR_DROP_UNWANTED_CTL |
- MT_WF_RFCR_DROP_STBC_MULTI);
+ dev->mt76.rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
+ MT_WF_RFCR_DROP_OTHER_BEACON |
+ MT_WF_RFCR_DROP_FRAME_REPORT |
+ MT_WF_RFCR_DROP_PROBEREQ |
+ MT_WF_RFCR_DROP_MCAST_FILTERED |
+ MT_WF_RFCR_DROP_MCAST |
+ MT_WF_RFCR_DROP_BCAST |
+ MT_WF_RFCR_DROP_DUPLICATE |
+ MT_WF_RFCR_DROP_A2_BSSID |
+ MT_WF_RFCR_DROP_UNWANTED_CTL |
+ MT_WF_RFCR_DROP_STBC_MULTI);
MT76_FILTER(OTHER_BSS, MT_WF_RFCR_DROP_OTHER_TIM |
MT_WF_RFCR_DROP_A3_MAC |
@@ -606,7 +624,7 @@ static void mt7921_configure_filter(struct ieee80211_hw *hw,
MT_WF_RFCR_DROP_NDPA);
*total_flags = flags;
- mt76_wr(dev, MT_WF_RFCR(0), phy->rxfilter);
+ mt76_wr(dev, MT_WF_RFCR(0), dev->mt76.rxfilter);
if (*total_flags & FIF_CONTROL)
mt76_clear(dev, MT_WF_RFCR1(0), ctl_flags);
@@ -635,6 +653,20 @@ static void mt7921_bss_info_changed(struct ieee80211_hw *hw,
}
}
+ if (changed & BSS_CHANGED_BEACON_ENABLED && info->enable_beacon) {
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+
+ mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid,
+ true);
+ mt7921_mcu_sta_update(dev, NULL, vif, true,
+ MT76_STA_INFO_STATE_NONE);
+ }
+
+ if (changed & (BSS_CHANGED_BEACON |
+ BSS_CHANGED_BEACON_ENABLED))
+ mt7921_mcu_uni_add_beacon_offload(dev, hw, vif,
+ info->enable_beacon);
+
/* ensure that enable txcmd_mode after bss_info */
if (changed & (BSS_CHANGED_QOS | BSS_CHANGED_BEACON_ENABLED))
mt7921_mcu_set_tx(dev, vif);
@@ -1301,7 +1333,7 @@ static int mt7921_suspend(struct ieee80211_hw *hw,
clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
ieee80211_iterate_active_interfaces(hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
- mt76_connac_mcu_set_suspend_iter,
+ mt7921_mcu_set_suspend_iter,
&dev->mphy);
mt7921_mutex_release(dev);
@@ -1376,6 +1408,67 @@ static void mt7921_sta_set_decap_offload(struct ieee80211_hw *hw,
MCU_UNI_CMD(STA_REC_UPDATE));
}
+#if IS_ENABLED(CONFIG_IPV6)
+static void mt7921_ipv6_addr_change(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct inet6_dev *idev)
+{
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ struct mt7921_dev *dev = mvif->phy->dev;
+ struct inet6_ifaddr *ifa;
+ struct in6_addr ns_addrs[IEEE80211_BSS_ARP_ADDR_LIST_LEN];
+ struct sk_buff *skb;
+ u8 i, idx = 0;
+
+ struct {
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct mt76_connac_arpns_tlv arpns;
+ } req_hdr = {
+ .hdr = {
+ .bss_idx = mvif->mt76.idx,
+ },
+ .arpns = {
+ .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ND),
+ .mode = 2, /* update */
+ .option = 1, /* update only */
+ },
+ };
+
+ read_lock_bh(&idev->lock);
+ list_for_each_entry(ifa, &idev->addr_list, if_list) {
+ if (ifa->flags & IFA_F_TENTATIVE)
+ continue;
+ ns_addrs[idx] = ifa->addr;
+ if (++idx >= IEEE80211_BSS_ARP_ADDR_LIST_LEN)
+ break;
+ }
+ read_unlock_bh(&idev->lock);
+
+ if (!idx)
+ return;
+
+ skb = __mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(req_hdr) +
+ idx * sizeof(struct in6_addr), GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ req_hdr.arpns.ips_num = idx;
+ req_hdr.arpns.len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv)
+ + idx * sizeof(struct in6_addr));
+ skb_put_data(skb, &req_hdr, sizeof(req_hdr));
+
+ for (i = 0; i < idx; i++)
+ skb_put_data(skb, &ns_addrs[i].in6_u, sizeof(struct in6_addr));
+
+ skb_queue_tail(&dev->ipv6_ns_list, skb);
+
+ ieee80211_queue_work(dev->mt76.hw, &dev->ipv6_ns_work);
+}
+#endif
+
static int mt7921_set_sar_specs(struct ieee80211_hw *hw,
const struct cfg80211_sar_specs *sar)
{
@@ -1395,6 +1488,18 @@ out:
return err;
}
+static void
+mt7921_channel_switch_beacon(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *chandef)
+{
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+
+ mt7921_mutex_acquire(dev);
+ mt7921_mcu_uni_add_beacon_offload(dev, hw, vif, true);
+ mt7921_mutex_release(dev);
+}
+
const struct ieee80211_ops mt7921_ops = {
.tx = mt7921_tx,
.start = mt7921_start,
@@ -1409,10 +1514,14 @@ const struct ieee80211_ops mt7921_ops = {
.sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
.set_key = mt7921_set_key,
.sta_set_decap_offload = mt7921_sta_set_decap_offload,
+#if IS_ENABLED(CONFIG_IPV6)
+ .ipv6_addr_change = mt7921_ipv6_addr_change,
+#endif /* CONFIG_IPV6 */
.ampdu_action = mt7921_ampdu_action,
.set_rts_threshold = mt7921_set_rts_threshold,
.wake_tx_queue = mt76_wake_tx_queue,
.release_buffered_frames = mt76_release_buffered_frames,
+ .channel_switch_beacon = mt7921_channel_switch_beacon,
.get_txpower = mt76_get_txpower,
.get_stats = mt7921_get_stats,
.get_et_sset_count = mt7921_get_et_sset_count,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index da2be050ed7c..12bab18c4171 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -224,6 +224,49 @@ exit:
}
EXPORT_SYMBOL_GPL(mt7921_mcu_fill_message);
+#ifdef CONFIG_PM
+
+static int
+mt7921_mcu_set_ipv6_ns_filter(struct mt76_dev *dev,
+ struct ieee80211_vif *vif, bool suspend)
+{
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ struct {
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct mt76_connac_arpns_tlv arpns;
+ } req = {
+ .hdr = {
+ .bss_idx = mvif->mt76.idx,
+ },
+ .arpns = {
+ .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ND),
+ .len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv)),
+ .mode = suspend,
+ },
+ };
+
+ return mt76_mcu_send_msg(dev, MCU_UNI_CMD_OFFLOAD, &req, sizeof(req),
+ true);
+}
+
+void mt7921_mcu_set_suspend_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ if (IS_ENABLED(CONFIG_IPV6)) {
+ struct mt76_phy *phy = priv;
+
+ mt7921_mcu_set_ipv6_ns_filter(phy->dev, vif,
+ !test_bit(MT76_STATE_RUNNING,
+ &phy->state));
+ }
+
+ mt76_connac_mcu_set_suspend_iter(priv, mac, vif);
+}
+
+#endif /* CONFIG_PM */
+
static void
mt7921_mcu_scan_event(struct mt7921_dev *dev, struct sk_buff *skb)
{
@@ -248,7 +291,8 @@ mt7921_mcu_connection_loss_iter(void *priv, u8 *mac,
if (mvif->idx != event->bss_idx)
return;
- if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
+ if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER) ||
+ vif->type != NL80211_IFTYPE_STATION)
return;
ieee80211_connection_loss(vif);
@@ -1166,3 +1210,79 @@ int mt7921_mcu_set_sniffer(struct mt7921_dev *dev, struct ieee80211_vif *vif,
return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(SNIFFER), &req, sizeof(req),
true);
}
+
+int
+mt7921_mcu_uni_add_beacon_offload(struct mt7921_dev *dev,
+ struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+ struct ieee80211_mutable_offsets offs;
+ struct {
+ struct req_hdr {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct bcn_content_tlv {
+ __le16 tag;
+ __le16 len;
+ __le16 tim_ie_pos;
+ __le16 csa_ie_pos;
+ __le16 bcc_ie_pos;
+ /* 0: disable beacon offload
+ * 1: enable beacon offload
+ * 2: update probe respond offload
+ */
+ u8 enable;
+ /* 0: legacy format (TXD + payload)
+ * 1: only cap field IE
+ */
+ u8 type;
+ __le16 pkt_len;
+ u8 pkt[512];
+ } __packed beacon_tlv;
+ } req = {
+ .hdr = {
+ .bss_idx = mvif->mt76.idx,
+ },
+ .beacon_tlv = {
+ .tag = cpu_to_le16(UNI_BSS_INFO_BCN_CONTENT),
+ .len = cpu_to_le16(sizeof(struct bcn_content_tlv)),
+ .enable = enable,
+ },
+ };
+ struct sk_buff *skb;
+
+ if (!enable)
+ goto out;
+
+ skb = ieee80211_beacon_get_template(mt76_hw(dev), vif, &offs);
+ if (!skb)
+ return -EINVAL;
+
+ if (skb->len > 512 - MT_TXD_SIZE) {
+ dev_err(dev->mt76.dev, "beacon size limit exceed\n");
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ mt7921_mac_write_txwi(dev, (__le32 *)(req.beacon_tlv.pkt), skb,
+ wcid, NULL, 0, true);
+ memcpy(req.beacon_tlv.pkt + MT_TXD_SIZE, skb->data, skb->len);
+ req.beacon_tlv.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
+ req.beacon_tlv.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset);
+
+ if (offs.cntdwn_counter_offs[0]) {
+ u16 csa_offs;
+
+ csa_offs = MT_TXD_SIZE + offs.cntdwn_counter_offs[0] - 4;
+ req.beacon_tlv.csa_ie_pos = cpu_to_le16(csa_offs);
+ }
+ dev_kfree_skb(skb);
+
+out:
+ return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE),
+ &req, sizeof(req), true);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
index 7690364bc079..5ca584bb2fc6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
@@ -155,7 +155,6 @@ struct mt7921_phy {
struct ieee80211_sband_iftype_data iftype[NUM_NL80211_BANDS][NUM_NL80211_IFTYPES];
- u32 rxfilter;
u64 omac_mask;
u16 noise;
@@ -212,6 +211,10 @@ struct mt7921_dev {
struct mt76_connac_pm pm;
struct mt76_connac_coredump coredump;
const struct mt7921_hif_ops *hif_ops;
+
+ struct work_struct ipv6_ns_work;
+ /* IPv6 addresses for WoWLAN */
+ struct sk_buff_head ipv6_ns_list;
};
enum {
@@ -450,6 +453,10 @@ int mt7921s_mcu_drv_pmctrl(struct mt7921_dev *dev);
int mt7921s_mcu_fw_pmctrl(struct mt7921_dev *dev);
void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data);
void mt7921_set_runtime_pm(struct mt7921_dev *dev);
+void mt7921_mcu_set_suspend_iter(void *priv, u8 *mac,
+ struct ieee80211_vif *vif);
+void mt7921_set_ipv6_ns_work(struct work_struct *work);
+
int mt7921_mcu_set_sniffer(struct mt7921_dev *dev, struct ieee80211_vif *vif,
bool enable);
@@ -467,7 +474,11 @@ bool mt7921_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update);
int mt7921u_mcu_power_on(struct mt7921_dev *dev);
int mt7921u_wfsys_reset(struct mt7921_dev *dev);
-int mt7921u_dma_init(struct mt7921_dev *dev);
+int mt7921u_dma_init(struct mt7921_dev *dev, bool resume);
int mt7921u_init_reset(struct mt7921_dev *dev);
int mt7921u_mac_reset(struct mt7921_dev *dev);
+int mt7921_mcu_uni_add_beacon_offload(struct mt7921_dev *dev,
+ struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ bool enable);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
index 1a01d025bbe5..b5fb22b8e086 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
@@ -119,7 +119,6 @@ static void mt7921e_unregister_device(struct mt7921_dev *dev)
mt7921_mcu_exit(dev);
tasklet_disable(&dev->irq_tasklet);
- mt76_free_device(&dev->mt76);
}
static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr)
@@ -302,8 +301,10 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
dev->bus_ops = dev->mt76.bus;
bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
GFP_KERNEL);
- if (!bus_ops)
- return -ENOMEM;
+ if (!bus_ops) {
+ ret = -ENOMEM;
+ goto err_free_dev;
+ }
bus_ops->rr = mt7921_rr;
bus_ops->wr = mt7921_wr;
@@ -312,7 +313,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
ret = __mt7921e_mcu_drv_pmctrl(dev);
if (ret)
- return ret;
+ goto err_free_dev;
mdev->rev = (mt7921_l1_rr(dev, MT_HW_CHIPID) << 16) |
(mt7921_l1_rr(dev, MT_HW_REV) & 0xff);
@@ -354,6 +355,7 @@ static void mt7921_pci_remove(struct pci_dev *pdev)
mt7921e_unregister_device(dev);
devm_free_irq(&pdev->dev, pdev->irq, dev);
+ mt76_free_device(&dev->mt76);
pci_free_irq_vectors(pdev);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
index 6712ff60c722..ea643260ceb6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
@@ -516,4 +516,9 @@
#define MT_TOP_MISC2_FW_PWR_ON BIT(0)
#define MT_TOP_MISC2_FW_N9_RDY GENMASK(1, 0)
+#define MT_WF_SW_DEF_CR(ofs) (0x401a00 + (ofs))
+#define MT_WF_SW_DEF_CR_USB_MCU_EVENT MT_WF_SW_DEF_CR(0x028)
+#define MT_WF_SW_SER_TRIGGER_SUSPEND BIT(6)
+#define MT_WF_SW_SER_DONE_SUSPEND BIT(7)
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
index b7771e9f1fcd..dc38baef273a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
@@ -246,7 +246,7 @@ static int mt7921u_probe(struct usb_interface *usb_intf,
if (ret)
goto error;
- ret = mt7921u_dma_init(dev);
+ ret = mt7921u_dma_init(dev, false);
if (ret)
return ret;
@@ -288,6 +288,61 @@ static void mt7921u_disconnect(struct usb_interface *usb_intf)
mt76_free_device(&dev->mt76);
}
+#ifdef CONFIG_PM
+static int mt7921u_suspend(struct usb_interface *intf, pm_message_t state)
+{
+ struct mt7921_dev *dev = usb_get_intfdata(intf);
+ int err;
+
+ err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true);
+ if (err)
+ return err;
+
+ mt76u_stop_rx(&dev->mt76);
+ mt76u_stop_tx(&dev->mt76);
+
+ set_bit(MT76_STATE_SUSPEND, &dev->mphy.state);
+
+ return 0;
+}
+
+static int mt7921u_resume(struct usb_interface *intf)
+{
+ struct mt7921_dev *dev = usb_get_intfdata(intf);
+ bool reinit = true;
+ int err, i;
+
+ for (i = 0; i < 10; i++) {
+ u32 val = mt76_rr(dev, MT_WF_SW_DEF_CR_USB_MCU_EVENT);
+
+ if (!(val & MT_WF_SW_SER_TRIGGER_SUSPEND)) {
+ reinit = false;
+ break;
+ }
+ if (val & MT_WF_SW_SER_DONE_SUSPEND) {
+ mt76_wr(dev, MT_WF_SW_DEF_CR_USB_MCU_EVENT, 0);
+ break;
+ }
+
+ msleep(20);
+ }
+
+ if (reinit || mt7921_dma_need_reinit(dev)) {
+ err = mt7921u_dma_init(dev, true);
+ if (err)
+ return err;
+ }
+
+ clear_bit(MT76_STATE_SUSPEND, &dev->mphy.state);
+
+ err = mt76u_resume_rx(&dev->mt76);
+ if (err < 0)
+ return err;
+
+ return mt76_connac_mcu_set_hif_suspend(&dev->mt76, false);
+}
+#endif /* CONFIG_PM */
+
MODULE_DEVICE_TABLE(usb, mt7921u_device_table);
MODULE_FIRMWARE(MT7921_FIRMWARE_WM);
MODULE_FIRMWARE(MT7921_ROM_PATCH);
@@ -297,6 +352,11 @@ static struct usb_driver mt7921u_driver = {
.id_table = mt7921u_device_table,
.probe = mt7921u_probe,
.disconnect = mt7921u_disconnect,
+#ifdef CONFIG_PM
+ .suspend = mt7921u_suspend,
+ .resume = mt7921u_resume,
+ .reset_resume = mt7921u_resume,
+#endif /* CONFIG_PM */
.soft_unbind = 1,
.disable_hub_initiated_lpm = 1,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb_mac.c
index 99bcbd858b65..cd2f09743d2f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/usb_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb_mac.c
@@ -121,7 +121,7 @@ static void mt7921u_epctl_rst_opt(struct mt7921_dev *dev, bool reset)
mt7921u_uhw_wr(&dev->mt76, MT_SSUSB_EPCTL_CSR_EP_RST_OPT, val);
}
-int mt7921u_dma_init(struct mt7921_dev *dev)
+int mt7921u_dma_init(struct mt7921_dev *dev, bool resume)
{
int err;
@@ -136,6 +136,9 @@ int mt7921u_dma_init(struct mt7921_dev *dev)
MT_WL_RX_AGG_TO | MT_WL_RX_AGG_LMT);
mt76_clear(dev, MT_UDMA_WLCFG_1, MT_WL_RX_AGG_PKT_LMT);
+ if (resume)
+ return 0;
+
err = mt7921u_dma_rx_evt_ep4(dev);
if (err)
return err;
@@ -221,7 +224,7 @@ int mt7921u_mac_reset(struct mt7921_dev *dev)
if (err)
goto out;
- err = mt7921u_dma_init(dev);
+ err = mt7921u_dma_init(dev, false);
if (err)
goto out;
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 6b8c9dc80542..1d08d99e298c 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -66,9 +66,8 @@ mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
wcid = rcu_dereference(dev->wcid[cb->wcid]);
if (wcid) {
status.sta = wcid_to_sta(wcid);
-
- if (status.sta)
- status.rate = &wcid->rate;
+ status.rates = NULL;
+ status.n_rates = 0;
}
hw = mt76_tx_status_get_hw(dev, skb);
@@ -120,7 +119,7 @@ mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
memset(cb, 0, sizeof(*cb));
- if (!wcid)
+ if (!wcid || !rcu_access_pointer(dev->wcid[wcid->idx]))
return MT_PACKET_ID_NO_ACK;
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
@@ -436,12 +435,11 @@ mt76_txq_stopped(struct mt76_queue *q)
static int
mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
- struct mt76_txq *mtxq)
+ struct mt76_txq *mtxq, struct mt76_wcid *wcid)
{
struct mt76_dev *dev = phy->dev;
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
enum mt76_txq_id qid = mt76_txq_get_qid(txq);
- struct mt76_wcid *wcid = mtxq->wcid;
struct ieee80211_tx_info *info;
struct sk_buff *skb;
int n_frames = 1;
@@ -463,7 +461,9 @@ mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
info->control.rates, 1);
+ spin_lock(&q->lock);
idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop);
+ spin_unlock(&q->lock);
if (idx < 0)
return idx;
@@ -483,14 +483,18 @@ mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
info->control.rates, 1);
+ spin_lock(&q->lock);
idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop);
+ spin_unlock(&q->lock);
if (idx < 0)
break;
n_frames++;
} while (1);
+ spin_lock(&q->lock);
dev->queue_ops->kick(dev, q);
+ spin_unlock(&q->lock);
return n_frames;
}
@@ -521,12 +525,10 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
break;
mtxq = (struct mt76_txq *)txq->drv_priv;
- wcid = mtxq->wcid;
- if (wcid && test_bit(MT_WCID_FLAG_PS, &wcid->flags))
+ wcid = rcu_dereference(dev->wcid[mtxq->wcid]);
+ if (!wcid || test_bit(MT_WCID_FLAG_PS, &wcid->flags))
continue;
- spin_lock_bh(&q->lock);
-
if (mtxq->send_bar && mtxq->aggr) {
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
struct ieee80211_sta *sta = txq->sta;
@@ -535,15 +537,11 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
u8 tid = txq->tid;
mtxq->send_bar = false;
- spin_unlock_bh(&q->lock);
ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
- spin_lock_bh(&q->lock);
}
if (!mt76_txq_stopped(q))
- n_frames = mt76_txq_send_burst(phy, q, mtxq);
-
- spin_unlock_bh(&q->lock);
+ n_frames = mt76_txq_send_burst(phy, q, mtxq, wcid);
ieee80211_return_txq(phy->hw, txq, false);
@@ -563,6 +561,7 @@ void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
if (qid >= 4)
return;
+ local_bh_disable();
rcu_read_lock();
do {
@@ -572,6 +571,7 @@ void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
} while (len > 0);
rcu_read_unlock();
+ local_bh_enable();
}
EXPORT_SYMBOL_GPL(mt76_txq_schedule);
@@ -721,12 +721,17 @@ int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
spin_lock_bh(&dev->token_lock);
- token = idr_alloc(&dev->token, *ptxwi, 0, dev->drv->token_size,
- GFP_ATOMIC);
+ token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC);
if (token >= 0)
dev->token_count++;
- if (dev->token_count >= dev->drv->token_size - MT76_TOKEN_FREE_THR)
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ if (mtk_wed_device_active(&dev->mmio.wed) &&
+ token >= dev->mmio.wed.wlan.token_start)
+ dev->wed_token_count++;
+#endif
+
+ if (dev->token_count >= dev->token_size - MT76_TOKEN_FREE_THR)
__mt76_set_tx_blocked(dev, true);
spin_unlock_bh(&dev->token_lock);
@@ -743,10 +748,18 @@ mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
spin_lock_bh(&dev->token_lock);
txwi = idr_remove(&dev->token, token);
- if (txwi)
+ if (txwi) {
dev->token_count--;
- if (dev->token_count < dev->drv->token_size - MT76_TOKEN_FREE_THR &&
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ if (mtk_wed_device_active(&dev->mmio.wed) &&
+ token >= dev->mmio.wed.wlan.token_start &&
+ --dev->wed_token_count == 0)
+ wake_up(&dev->tx_wait);
+#endif
+ }
+
+ if (dev->token_count < dev->token_size - MT76_TOKEN_FREE_THR &&
dev->phy.q_tx[0]->blocked)
*wake = true;
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.h b/drivers/net/wireless/microchip/wilc1000/hif.h
index cccd54ed0518..77616fc77575 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.h
+++ b/drivers/net/wireless/microchip/wilc1000/hif.h
@@ -123,7 +123,7 @@ struct wilc_remain_ch {
u32 duration;
void (*expired)(void *priv, u64 cookie);
void *arg;
- u32 cookie;
+ u64 cookie;
};
struct wilc;
diff --git a/drivers/net/wireless/microchip/wilc1000/mon.c b/drivers/net/wireless/microchip/wilc1000/mon.c
index 6bd63934c2d8..b5a1b65c087c 100644
--- a/drivers/net/wireless/microchip/wilc1000/mon.c
+++ b/drivers/net/wireless/microchip/wilc1000/mon.c
@@ -233,7 +233,7 @@ struct net_device *wilc_wfi_init_mon_interface(struct wilc *wl,
wl->monitor_dev->netdev_ops = &wilc_wfi_netdev_ops;
wl->monitor_dev->needs_free_netdev = true;
- if (cfg80211_register_netdevice(wl->monitor_dev)) {
+ if (register_netdevice(wl->monitor_dev)) {
netdev_err(real_dev, "register_netdevice failed\n");
free_netdev(wl->monitor_dev);
return NULL;
@@ -251,7 +251,7 @@ void wilc_wfi_deinit_mon_interface(struct wilc *wl, bool rtnl_locked)
return;
if (rtnl_locked)
- cfg80211_unregister_netdevice(wl->monitor_dev);
+ unregister_netdevice(wl->monitor_dev);
else
unregister_netdev(wl->monitor_dev);
wl->monitor_dev = NULL;
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
index 643bddaae32a..3c292e3464c2 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.c
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
@@ -14,6 +14,7 @@
#include "wlan_cfg.h"
#define WILC_MULTICAST_TABLE_SIZE 8
+#define WILC_MAX_FW_VERSION_STR_SIZE 50
/* latest API version supported */
#define WILC1000_API_VER 1
@@ -522,7 +523,7 @@ static int wilc_wlan_initialize(struct net_device *dev, struct wilc_vif *vif)
if (wilc_wlan_cfg_get(vif, 1, WID_FIRMWARE_VERSION, 1, 0)) {
int size;
- char firmware_ver[20];
+ char firmware_ver[WILC_MAX_FW_VERSION_STR_SIZE];
size = wilc_wlan_cfg_get_val(wl, WID_FIRMWARE_VERSION,
firmware_ver,
diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
index ec595dbd8959..7962c11cfe84 100644
--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
+++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
@@ -598,7 +598,7 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume)
cmd.read_write = 1;
cmd.function = 0;
cmd.raw = 1;
- cmd.address = SDIO_FBR_BASE(func->num);
+ cmd.address = SDIO_FBR_BASE(1);
cmd.data = SDIO_FBR_ENABLE_CSA;
ret = wilc_sdio_cmd52(wilc, &cmd);
if (ret) {
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c
index fb5633a05fd5..48441f0389ca 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.c
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.c
@@ -875,14 +875,15 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
char *bssid;
u8 mgmt_ptk = 0;
+ if (vmm_table[i] == 0 || vmm_entries_ac[i] >= NQUEUES)
+ break;
+
tqe = wilc_wlan_txq_remove_from_head(wilc, vmm_entries_ac[i]);
- ac_pkt_num_to_chip[vmm_entries_ac[i]]++;
if (!tqe)
break;
+ ac_pkt_num_to_chip[vmm_entries_ac[i]]++;
vif = tqe->vif;
- if (vmm_table[i] == 0)
- break;
le32_to_cpus(&vmm_table[i]);
vmm_sz = FIELD_GET(WILC_VMM_BUFFER_SIZE, vmm_table[i]);
diff --git a/drivers/net/wireless/purelifi/plfxlc/usb.c b/drivers/net/wireless/purelifi/plfxlc/usb.c
index d0e98b2f1365..8519cf0adfff 100644
--- a/drivers/net/wireless/purelifi/plfxlc/usb.c
+++ b/drivers/net/wireless/purelifi/plfxlc/usb.c
@@ -824,7 +824,7 @@ static int suspend(struct usb_interface *interface,
struct plfxlc_usb *pl = get_plfxlc_usb(interface);
struct plfxlc_mac *mac = plfxlc_usb_to_mac(pl);
- if (!pl || !plfxlc_usb_dev(pl))
+ if (!pl)
return -ENODEV;
if (pl->initialized == 0)
return 0;
@@ -837,7 +837,7 @@ static int resume(struct usb_interface *interface)
{
struct plfxlc_usb *pl = get_plfxlc_usb(interface);
- if (!pl || !plfxlc_usb_dev(pl))
+ if (!pl)
return -ENODEV;
if (pl->was_running)
plfxlc_usb_resume(pl);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
index 840728ed57b2..8c23a77d1671 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
@@ -1146,8 +1146,8 @@ static int qtnf_pcie_pearl_probe(struct qtnf_bus *bus, unsigned int tx_bd_size,
}
tasklet_setup(&ps->base.reclaim_tq, qtnf_pearl_reclaim_tasklet_fn);
- netif_napi_add(&bus->mux_dev, &bus->mux_napi,
- qtnf_pcie_pearl_rx_poll, 10);
+ netif_napi_add_weight(&bus->mux_dev, &bus->mux_napi,
+ qtnf_pcie_pearl_rx_poll, 10);
ipc_int.fn = qtnf_pcie_pearl_ipc_gen_ep_int;
ipc_int.arg = ps;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
index 9534e1b33780..d83362578374 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
@@ -1159,8 +1159,8 @@ static int qtnf_pcie_topaz_probe(struct qtnf_bus *bus,
}
tasklet_setup(&ts->base.reclaim_tq, qtnf_reclaim_tasklet_fn);
- netif_napi_add(&bus->mux_dev, &bus->mux_napi,
- qtnf_topaz_rx_poll, 10);
+ netif_napi_add_weight(&bus->mux_dev, &bus->mux_napi,
+ qtnf_topaz_rx_poll, 10);
ipc_int.fn = qtnf_topaz_ipc_gen_ep_int;
ipc_int.arg = ts;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
index 51fe51bb0504..15e6a6aded31 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
@@ -2386,10 +2386,7 @@ void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel)
rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
"Just Read IQK Matrix reg for channel:%d....\n",
channel);
- if ((rtlphy->iqk_matrix[indexforchannel].
- value[0] != NULL)
- /*&&(regea4 != 0) */)
- _rtl92d_phy_patha_fill_iqk_matrix(hw, true,
+ _rtl92d_phy_patha_fill_iqk_matrix(hw, true,
rtlphy->iqk_matrix[
indexforchannel].value, 0,
(rtlphy->iqk_matrix[
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index 86a236873254..a8eebafb9a7e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -1014,7 +1014,7 @@ int rtl_usb_probe(struct usb_interface *intf,
hw = ieee80211_alloc_hw(sizeof(struct rtl_priv) +
sizeof(struct rtl_usb_priv), &rtl_ops);
if (!hw) {
- WARN_ONCE(true, "rtl_usb: ieee80211 alloc failed\n");
+ pr_warn("rtl_usb: ieee80211 alloc failed\n");
return -ENOMEM;
}
rtlpriv = hw->priv;
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index e344e058f943..090610e48d08 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -1786,7 +1786,7 @@ void rtw_fw_adaptivity(struct rtw_dev *rtwdev)
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_ADAPTIVITY);
SET_ADAPTIVITY_MODE(h2c_pkt, dm_info->edcca_mode);
- SET_ADAPTIVITY_OPTION(h2c_pkt, 2);
+ SET_ADAPTIVITY_OPTION(h2c_pkt, 1);
SET_ADAPTIVITY_IGI(h2c_pkt, dm_info->igi_history[0]);
SET_ADAPTIVITY_L2H(h2c_pkt, dm_info->l2h_th_ini);
SET_ADAPTIVITY_DENSITY(h2c_pkt, dm_info->scan_density);
diff --git a/drivers/net/wireless/realtek/rtw89/Kconfig b/drivers/net/wireless/realtek/rtw89/Kconfig
index dd02b6a6790e..93e09400aac4 100644
--- a/drivers/net/wireless/realtek/rtw89/Kconfig
+++ b/drivers/net/wireless/realtek/rtw89/Kconfig
@@ -19,8 +19,11 @@ config RTW89_PCI
config RTW89_8852A
tristate
+config RTW89_8852C
+ tristate
+
config RTW89_8852AE
- tristate "Realtek 8852AE PCI wireless network adapter"
+ tristate "Realtek 8852AE PCI wireless network (Wi-Fi 6) adapter"
depends on PCI
select RTW89_CORE
select RTW89_PCI
@@ -28,7 +31,18 @@ config RTW89_8852AE
help
Select this option will enable support for 8852AE chipset
- 802.11ax PCIe wireless network adapter
+ 802.11ax PCIe wireless network (Wi-Fi 6) adapter
+
+config RTW89_8852CE
+ tristate "Realtek 8852CE PCI wireless network (Wi-Fi 6E) adapter"
+ depends on PCI
+ select RTW89_CORE
+ select RTW89_PCI
+ select RTW89_8852C
+ help
+ Select this option will enable support for 8852CE chipset
+
+ 802.11ax PCIe wireless network (Wi-Fi 6E) adapter
config RTW89_DEBUG
bool
diff --git a/drivers/net/wireless/realtek/rtw89/Makefile b/drivers/net/wireless/realtek/rtw89/Makefile
index 012ae60c0b81..3006482d25c7 100644
--- a/drivers/net/wireless/realtek/rtw89/Makefile
+++ b/drivers/net/wireless/realtek/rtw89/Makefile
@@ -23,6 +23,15 @@ rtw89_8852a-objs := rtw8852a.o \
obj-$(CONFIG_RTW89_8852AE) += rtw89_8852ae.o
rtw89_8852ae-objs := rtw8852ae.o
+obj-$(CONFIG_RTW89_8852C) += rtw89_8852c.o
+rtw89_8852c-objs := rtw8852c.o \
+ rtw8852c_table.o \
+ rtw8852c_rfk.o \
+ rtw8852c_rfk_table.o
+
+obj-$(CONFIG_RTW89_8852CE) += rtw89_8852ce.o
+rtw89_8852ce-objs := rtw8852ce.o
+
rtw89_core-$(CONFIG_RTW89_DEBUG) += debug.o
obj-$(CONFIG_RTW89_PCI) += rtw89_pci.o
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index e3317deafa1d..a6a90572e74b 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -1608,10 +1608,13 @@ static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev,
if (rtwdev->scanning &&
RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) {
- rx_status->freq =
- ieee80211_channel_to_frequency(hal->current_channel,
- hal->current_band_type);
- rx_status->band = rtwdev->hal.current_band_type;
+ u8 chan = hal->current_channel;
+ u8 band = hal->current_band_type;
+ enum nl80211_band nl_band;
+
+ nl_band = rtw89_hw_to_nl80211_band(band);
+ rx_status->freq = ieee80211_channel_to_frequency(chan, nl_band);
+ rx_status->band = nl_band;
}
if (desc_info->icv_err || desc_info->crc32_err)
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index 2921814842ff..e8a77225a90f 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -3481,6 +3481,20 @@ static inline u8 rtw89_hw_to_rate_info_bw(enum rtw89_bandwidth hw_bw)
}
static inline
+enum nl80211_band rtw89_hw_to_nl80211_band(enum rtw89_band hw_band)
+{
+ switch (hw_band) {
+ default:
+ case RTW89_BAND_2G:
+ return NL80211_BAND_2GHZ;
+ case RTW89_BAND_5G:
+ return NL80211_BAND_5GHZ;
+ case RTW89_BAND_6G:
+ return NL80211_BAND_6GHZ;
+ }
+}
+
+static inline
enum rtw89_bandwidth nl_to_rtw89_bandwidth(enum nl80211_chan_width width)
{
switch (width) {
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index f93f3fee1505..7820bc3ab3b4 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -635,6 +635,11 @@ static int rtw89_debug_priv_mac_reg_dump_get(struct seq_file *m, void *v)
start = 0x000;
end = 0x014;
break;
+ case RTW89_DBG_SEL_MAC_30:
+ seq_puts(m, "Debug selected MAC page 0x30\n");
+ start = 0x030;
+ end = 0x033;
+ break;
case RTW89_DBG_SEL_MAC_40:
seq_puts(m, "Debug selected MAC page 0x40\n");
start = 0x040;
diff --git a/drivers/net/wireless/realtek/rtw89/debug.h b/drivers/net/wireless/realtek/rtw89/debug.h
index 1745815f5e00..de72155ad1fe 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.h
+++ b/drivers/net/wireless/realtek/rtw89/debug.h
@@ -28,6 +28,7 @@ enum rtw89_debug_mask {
enum rtw89_debug_mac_reg_sel {
RTW89_DBG_SEL_MAC_00,
+ RTW89_DBG_SEL_MAC_30,
RTW89_DBG_SEL_MAC_40,
RTW89_DBG_SEL_MAC_80,
RTW89_DBG_SEL_MAC_C0,
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index e4be785709d1..4718aced1428 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -2068,7 +2068,7 @@ static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev)
struct rtw89_pktofld_info *info, *tmp;
u8 idx;
- for (idx = RTW89_BAND_2G; idx < NUM_NL80211_BANDS; idx++) {
+ for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) {
if (!(rtwdev->chip->support_bands & BIT(idx)))
continue;
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index 05b94842fe66..3cf892912c1d 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -29,6 +29,7 @@ const u32 rtw89_mac_mem_base_addrs[RTW89_MAC_MEM_NUM] = {
[RTW89_MAC_MEM_TXDATA_FIFO_0] = TXDATA_FIFO_0_BASE_ADDR,
[RTW89_MAC_MEM_TXDATA_FIFO_1] = TXDATA_FIFO_1_BASE_ADDR,
[RTW89_MAC_MEM_CPU_LOCAL] = CPU_LOCAL_BASE_ADDR,
+ [RTW89_MAC_MEM_BSSID_CAM] = BSSID_CAM_BASE_ADDR,
};
static void rtw89_mac_mem_write(struct rtw89_dev *rtwdev, u32 offset,
@@ -1050,6 +1051,7 @@ static int rtw89_mac_check_cpwm_state(struct rtw89_dev *rtwdev,
void rtw89_mac_power_mode_change(struct rtw89_dev *rtwdev, bool enter)
{
enum rtw89_rpwm_req_pwr_state state;
+ unsigned long delay = enter ? 10 : 150;
int ret;
if (enter)
@@ -1059,7 +1061,7 @@ void rtw89_mac_power_mode_change(struct rtw89_dev *rtwdev, bool enter)
rtw89_mac_send_rpwm(rtwdev, state, false);
ret = read_poll_timeout_atomic(rtw89_mac_check_cpwm_state, ret, !ret,
- 1000, 15000, false, rtwdev, state);
+ delay, 15000, false, rtwdev, state);
if (ret)
rtw89_err(rtwdev, "firmware failed to ack for %s ps mode\n",
enter ? "entering" : "leaving");
@@ -1889,11 +1891,12 @@ static int cca_ctrl_init(struct rtw89_dev *rtwdev, u8 mac_idx)
B_AX_CTN_CHK_BASIC_NAV | B_AX_CTN_CHK_BTCCA |
B_AX_CTN_CHK_EDCCA | B_AX_CTN_CHK_CCA_S80 |
B_AX_CTN_CHK_CCA_S40 | B_AX_CTN_CHK_CCA_S20 |
- B_AX_CTN_CHK_CCA_P20 | B_AX_SIFS_CHK_EDCCA);
+ B_AX_CTN_CHK_CCA_P20);
val &= ~(B_AX_TB_CHK_TX_NAV | B_AX_TB_CHK_CCA_S80 |
B_AX_TB_CHK_CCA_S40 | B_AX_TB_CHK_CCA_S20 |
B_AX_SIFS_CHK_CCA_S80 | B_AX_SIFS_CHK_CCA_S40 |
- B_AX_SIFS_CHK_CCA_S20 | B_AX_CTN_CHK_TXNAV);
+ B_AX_SIFS_CHK_CCA_S20 | B_AX_CTN_CHK_TXNAV |
+ B_AX_SIFS_CHK_EDCCA);
rtw89_write32(rtwdev, reg, val);
@@ -2004,6 +2007,7 @@ static int rmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
#define TRXCFG_RMAC_DATA_TO 15
#define RX_MAX_LEN_UNIT 512
#define PLD_RLS_MAX_PG 127
+#define RX_SPEC_MAX_LEN (11454 + RX_MAX_LEN_UNIT)
int ret;
u32 reg, rx_max_len, rx_qta;
u16 val;
@@ -2034,11 +2038,10 @@ static int rmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
rx_qta = rtwdev->mac.dle_info.c0_rx_qta;
else
rx_qta = rtwdev->mac.dle_info.c1_rx_qta;
- rx_qta = rx_qta > PLD_RLS_MAX_PG ? PLD_RLS_MAX_PG : rx_qta;
- rx_max_len = (rx_qta - 1) * rtwdev->mac.dle_info.ple_pg_size /
- RX_MAX_LEN_UNIT;
- rx_max_len = rx_max_len > B_AX_RX_MPDU_MAX_LEN_SIZE ?
- B_AX_RX_MPDU_MAX_LEN_SIZE : rx_max_len;
+ rx_qta = min_t(u32, rx_qta, PLD_RLS_MAX_PG);
+ rx_max_len = rx_qta * rtwdev->mac.dle_info.ple_pg_size;
+ rx_max_len = min_t(u32, rx_max_len, RX_SPEC_MAX_LEN);
+ rx_max_len /= RX_MAX_LEN_UNIT;
rtw89_write32_mask(rtwdev, reg, B_AX_RX_MPDU_MAX_LEN_MASK, rx_max_len);
if (rtwdev->chip->chip_id == RTL8852A &&
@@ -4239,6 +4242,10 @@ static int rtw89_mac_init_bfee(struct rtw89_dev *rtwdev, u8 mac_idx)
u32_encode_bits(CSI_INIT_RATE_VHT, B_AX_BFMEE_VHT_CSI_RATE_MASK) |
u32_encode_bits(CSI_INIT_RATE_HE, B_AX_BFMEE_HE_CSI_RATE_MASK));
+ reg = rtw89_mac_reg_by_idx(R_AX_CSIRPT_OPTION, mac_idx);
+ rtw89_write32_set(rtwdev, reg,
+ B_AX_CSIPRT_VHTSU_AID_EN | B_AX_CSIPRT_HESU_AID_EN);
+
return 0;
}
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index 9eb4afe348b3..9f511c8d8a37 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -268,6 +268,7 @@ enum rtw89_mac_mem_sel {
RTW89_MAC_MEM_TXDATA_FIFO_0,
RTW89_MAC_MEM_TXDATA_FIFO_1,
RTW89_MAC_MEM_CPU_LOCAL,
+ RTW89_MAC_MEM_BSSID_CAM,
/* keep last */
RTW89_MAC_MEM_NUM,
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index 8da3e117ad38..f24e4a208376 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -630,7 +630,7 @@ static void rtw89_ra_mask_info_update_iter(void *data, struct ieee80211_sta *sta
rtwsta->use_cfg_mask = true;
rtwsta->mask = *br_data->mask;
- rtw89_phy_ra_updata_sta(br_data->rtwdev, sta);
+ rtw89_phy_ra_updata_sta(br_data->rtwdev, sta, IEEE80211_RC_SUPP_RATES_CHANGED);
}
static void rtw89_ra_mask_info_update(struct rtw89_dev *rtwdev,
@@ -759,6 +759,15 @@ static void rtw89_ops_cancel_hw_scan(struct ieee80211_hw *hw,
mutex_unlock(&rtwdev->mutex);
}
+static void rtw89_ops_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u32 changed)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ rtw89_phy_ra_updata_sta(rtwdev, sta, changed);
+}
+
const struct ieee80211_ops rtw89_ops = {
.tx = rtw89_ops_tx,
.wake_tx_queue = rtw89_ops_wake_tx_queue,
@@ -788,5 +797,6 @@ const struct ieee80211_ops rtw89_ops = {
.hw_scan = rtw89_ops_hw_scan,
.cancel_hw_scan = rtw89_ops_cancel_hw_scan,
.set_sar_specs = rtw89_ops_set_sar_specs,
+ .sta_rc_update = rtw89_ops_sta_rc_update,
};
EXPORT_SYMBOL(rtw89_ops);
diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
index 2bdce7024f25..0ef7821b2e0f 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.c
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -682,9 +682,6 @@ EXPORT_SYMBOL(rtw89_pci_enable_intr_v1);
void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
{
rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, 0);
- rtw89_write32(rtwdev, R_AX_HIMR0, 0);
- rtw89_write32(rtwdev, R_AX_HAXI_HIMR00, 0);
- rtw89_write32(rtwdev, R_AX_HIMR1, 0);
}
EXPORT_SYMBOL(rtw89_pci_disable_intr_v1);
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index 33494e8451cf..762cdba9d3cf 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -357,13 +357,19 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
ra->csi_mode = csi_mode;
}
-void rtw89_phy_ra_updata_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta)
+void rtw89_phy_ra_updata_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta,
+ u32 changed)
{
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
struct rtw89_ra_info *ra = &rtwsta->ra;
rtw89_phy_ra_sta_update(rtwdev, sta, false);
- ra->upd_mask = 1;
+
+ if (changed & IEEE80211_RC_SUPP_RATES_CHANGED)
+ ra->upd_mask = 1;
+ if (changed & (IEEE80211_RC_BW_CHANGED | IEEE80211_RC_NSS_CHANGED))
+ ra->upd_bw_nss_mask = 1;
+
rtw89_debug(rtwdev, RTW89_DBG_RA,
"ra updat: macid = %d, bw = %d, nss = %d, gi = %d %d",
ra->macid,
@@ -423,27 +429,28 @@ void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
RTW89_HW_RATE_MCS16,
RTW89_HW_RATE_MCS24};
u8 band = rtwdev->hal.current_band_type;
+ enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
u8 tx_nss = rtwdev->hal.tx_nss;
u8 i;
for (i = 0; i < tx_nss; i++)
if (!__check_rate_pattern(&next_pattern, hw_rate_he[i],
RA_MASK_HE_RATES, RTW89_RA_MODE_HE,
- mask->control[band].he_mcs[i],
+ mask->control[nl_band].he_mcs[i],
0, true))
goto out;
for (i = 0; i < tx_nss; i++)
if (!__check_rate_pattern(&next_pattern, hw_rate_vht[i],
RA_MASK_VHT_RATES, RTW89_RA_MODE_VHT,
- mask->control[band].vht_mcs[i],
+ mask->control[nl_band].vht_mcs[i],
0, true))
goto out;
for (i = 0; i < tx_nss; i++)
if (!__check_rate_pattern(&next_pattern, hw_rate_ht[i],
RA_MASK_HT_RATES, RTW89_RA_MODE_HT,
- mask->control[band].ht_mcs[i],
+ mask->control[nl_band].ht_mcs[i],
0, true))
goto out;
@@ -451,18 +458,18 @@ void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
* require at least one basic rate for ieee80211_set_bitrate_mask,
* so the decision just depends on if all bitrates are set or not.
*/
- sband = rtwdev->hw->wiphy->bands[band];
+ sband = rtwdev->hw->wiphy->bands[nl_band];
if (band == RTW89_BAND_2G) {
if (!__check_rate_pattern(&next_pattern, RTW89_HW_RATE_CCK1,
RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES,
RTW89_RA_MODE_CCK | RTW89_RA_MODE_OFDM,
- mask->control[band].legacy,
+ mask->control[nl_band].legacy,
BIT(sband->n_bitrates) - 1, false))
goto out;
} else {
if (!__check_rate_pattern(&next_pattern, RTW89_HW_RATE_OFDM6,
RA_MASK_OFDM_RATES, RTW89_RA_MODE_OFDM,
- mask->control[band].legacy,
+ mask->control[nl_band].legacy,
BIT(sband->n_bitrates) - 1, false))
goto out;
}
@@ -487,7 +494,7 @@ static void rtw89_phy_ra_updata_sta_iter(void *data, struct ieee80211_sta *sta)
{
struct rtw89_dev *rtwdev = (struct rtw89_dev *)data;
- rtw89_phy_ra_updata_sta(rtwdev, sta);
+ rtw89_phy_ra_updata_sta(rtwdev, sta, IEEE80211_RC_SUPP_RATES_CHANGED);
}
void rtw89_phy_ra_update(struct rtw89_dev *rtwdev)
@@ -2456,6 +2463,11 @@ void rtw89_phy_cfo_parse(struct rtw89_dev *rtwdev, s16 cfo_val,
struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
u8 macid = phy_ppdu->mac_id;
+ if (macid >= CFO_TRACK_MAX_USER) {
+ rtw89_warn(rtwdev, "mac_id %d is out of range\n", macid);
+ return;
+ }
+
cfo->cfo_tail[macid] += cfo_val;
cfo->cfo_cnt[macid]++;
cfo->packet_count++;
diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
index 3ca5efa4c097..291660154d58 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.h
+++ b/drivers/net/wireless/realtek/rtw89/phy.h
@@ -471,7 +471,8 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev,
u8 bw, u8 ntx, u8 rs, u8 bf, u8 ch);
void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta);
void rtw89_phy_ra_update(struct rtw89_dev *rtwdev);
-void rtw89_phy_ra_updata_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta);
+void rtw89_phy_ra_updata_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta,
+ u32 changed);
void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
const struct cfg80211_bitrate_mask *mask);
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index 6f5d1012c90c..ebf28719d935 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -2605,7 +2605,6 @@
B_AX_TMAC_HWSIGB_GEN | \
B_AX_TMAC_RXTB | \
B_AX_TMAC_MIMO_CTRL | \
- B_AX_RMAC_CSI | \
B_AX_RMAC_FTM)
#define R_AX_WMAC_TX_TF_INFO_0 0xCCD0
@@ -2842,6 +2841,11 @@
#define R_AX_RX_SR_CTRL_C1 0xEE4A
#define B_AX_SR_EN BIT(0)
+#define R_AX_CSIRPT_OPTION 0xCE64
+#define R_AX_CSIRPT_OPTION_C1 0xEE64
+#define B_AX_CSIPRT_HESU_AID_EN BIT(25)
+#define B_AX_CSIPRT_VHTSU_AID_EN BIT(24)
+
#define R_AX_RX_STATE_MONITOR 0xCEF0
#define R_AX_RX_STATE_MONITOR_C1 0xEEF0
#define B_AX_RX_STATE_MONITOR_MASK GENMASK(31, 0)
@@ -3662,7 +3666,7 @@
#define R_DCFO 0x4264
#define B_DCFO GENMASK(1, 0)
#define R_SEG0CSI 0x42AC
-#define B_SEG0CSI_IDX GENMASK(10, 0)
+#define B_SEG0CSI_IDX GENMASK(11, 0)
#define R_SEG0CSI_EN 0x42C4
#define B_SEG0CSI_EN BIT(23)
#define R_BSS_CLR_MAP 0x43ac
@@ -3818,6 +3822,8 @@
#define B_CHBW_MOD_SBW GENMASK(13, 12)
#define B_CHBW_MOD_PRICH GENMASK(11, 8)
#define B_ANT_RX_SEG0 GENMASK(3, 0)
+#define R_PD_BOOST_EN 0x49E8
+#define B_PD_BOOST_EN BIT(7)
#define R_P1_BACKOFF_IBADC_V1 0x49F0
#define B_P1_BACKOFF_IBADC_V1 GENMASK(31, 26)
#define R_BK_FC0_INV_V1 0x4A1C
@@ -3836,6 +3842,12 @@
#define B_PATH1_BT_BACKOFF_V1 GENMASK(23, 0)
#define R_PATH0_FRC_FIR_TYPE_V1 0x4C00
#define B_PATH0_FRC_FIR_TYPE_MSK_V1 GENMASK(1, 0)
+#define R_PATH0_NOTCH 0x4C14
+#define B_PATH0_NOTCH_EN BIT(12)
+#define B_PATH0_NOTCH_VAL GENMASK(11, 0)
+#define R_PATH0_NOTCH2 0x4C20
+#define B_PATH0_NOTCH2_EN BIT(12)
+#define B_PATH0_NOTCH2_VAL GENMASK(11, 0)
#define R_PATH0_5MDET 0x4C4C
#define B_PATH0_5MDET_EN BIT(12)
#define B_PATH0_5MDET_SB2 BIT(8)
@@ -3843,6 +3855,12 @@
#define B_PATH0_5MDET_TH GENMASK(5, 0)
#define R_PATH1_FRC_FIR_TYPE_V1 0x4CC4
#define B_PATH1_FRC_FIR_TYPE_MSK_V1 GENMASK(1, 0)
+#define R_PATH1_NOTCH 0x4CD8
+#define B_PATH1_NOTCH_EN BIT(12)
+#define B_PATH1_NOTCH_VAL GENMASK(11, 0)
+#define R_PATH1_NOTCH2 0x4CE4
+#define B_PATH1_NOTCH2_EN BIT(12)
+#define B_PATH1_NOTCH2_VAL GENMASK(11, 0)
#define R_PATH1_5MDET 0x4D10
#define B_PATH1_5MDET_EN BIT(12)
#define B_PATH1_5MDET_SB2 BIT(8)
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
index 4fb3de71d032..64840c8d9efe 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -1381,19 +1381,72 @@ static void rtw8852c_set_nbi_tone_idx(struct rtw89_dev *rtwdev,
}
}
+static void rtw8852c_spur_notch(struct rtw89_dev *rtwdev, u32 val,
+ enum rtw89_phy_idx phy_idx)
+{
+ u32 notch;
+ u32 notch2;
+
+ if (phy_idx == RTW89_PHY_0) {
+ notch = R_PATH0_NOTCH;
+ notch2 = R_PATH0_NOTCH2;
+ } else {
+ notch = R_PATH1_NOTCH;
+ notch2 = R_PATH1_NOTCH2;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, notch,
+ B_PATH0_NOTCH_VAL | B_PATH0_NOTCH_EN, val);
+ rtw89_phy_write32_set(rtwdev, notch, B_PATH0_NOTCH_EN);
+ rtw89_phy_write32_mask(rtwdev, notch2,
+ B_PATH0_NOTCH2_VAL | B_PATH0_NOTCH2_EN, val);
+ rtw89_phy_write32_set(rtwdev, notch2, B_PATH0_NOTCH2_EN);
+}
+
static void rtw8852c_spur_elimination(struct rtw89_dev *rtwdev,
struct rtw89_channel_params *param,
+ u8 pri_ch_idx,
enum rtw89_phy_idx phy_idx)
{
rtw8852c_set_csi_tone_idx(rtwdev, param, phy_idx);
if (phy_idx == RTW89_PHY_0) {
- rtw8852c_set_nbi_tone_idx(rtwdev, param, RF_PATH_A);
- if (!rtwdev->dbcc_en)
- rtw8852c_set_nbi_tone_idx(rtwdev, param, RF_PATH_B);
+ if (param->bandwidth == RTW89_CHANNEL_WIDTH_160 &&
+ (pri_ch_idx == RTW89_SC_20_LOWER ||
+ pri_ch_idx == RTW89_SC_20_UP3X)) {
+ rtw8852c_spur_notch(rtwdev, 0xe7f, RTW89_PHY_0);
+ if (!rtwdev->dbcc_en)
+ rtw8852c_spur_notch(rtwdev, 0xe7f, RTW89_PHY_1);
+ } else if (param->bandwidth == RTW89_CHANNEL_WIDTH_160 &&
+ (pri_ch_idx == RTW89_SC_20_UPPER ||
+ pri_ch_idx == RTW89_SC_20_LOW3X)) {
+ rtw8852c_spur_notch(rtwdev, 0x280, RTW89_PHY_0);
+ if (!rtwdev->dbcc_en)
+ rtw8852c_spur_notch(rtwdev, 0x280, RTW89_PHY_1);
+ } else {
+ rtw8852c_set_nbi_tone_idx(rtwdev, param, RF_PATH_A);
+ if (!rtwdev->dbcc_en)
+ rtw8852c_set_nbi_tone_idx(rtwdev, param,
+ RF_PATH_B);
+ }
} else {
- rtw8852c_set_nbi_tone_idx(rtwdev, param, RF_PATH_B);
+ if (param->bandwidth == RTW89_CHANNEL_WIDTH_160 &&
+ (pri_ch_idx == RTW89_SC_20_LOWER ||
+ pri_ch_idx == RTW89_SC_20_UP3X)) {
+ rtw8852c_spur_notch(rtwdev, 0xe7f, RTW89_PHY_1);
+ } else if (param->bandwidth == RTW89_CHANNEL_WIDTH_160 &&
+ (pri_ch_idx == RTW89_SC_20_UPPER ||
+ pri_ch_idx == RTW89_SC_20_LOW3X)) {
+ rtw8852c_spur_notch(rtwdev, 0x280, RTW89_PHY_1);
+ } else {
+ rtw8852c_set_nbi_tone_idx(rtwdev, param, RF_PATH_B);
+ }
}
+
+ if (pri_ch_idx == RTW89_SC_20_UP3X || pri_ch_idx == RTW89_SC_20_LOW3X)
+ rtw89_phy_write32_idx(rtwdev, R_PD_BOOST_EN, B_PD_BOOST_EN, 0, phy_idx);
+ else
+ rtw89_phy_write32_idx(rtwdev, R_PD_BOOST_EN, B_PD_BOOST_EN, 1, phy_idx);
}
static void rtw8852c_5m_mask(struct rtw89_dev *rtwdev,
@@ -1664,7 +1717,7 @@ static void rtw8852c_set_channel_bb(struct rtw89_dev *rtwdev,
B_PD_ARBITER_OFF, 0x1, phy_idx);
}
- rtw8852c_spur_elimination(rtwdev, param, phy_idx);
+ rtw8852c_spur_elimination(rtwdev, param, pri_ch_idx, phy_idx);
rtw8852c_ctrl_btg(rtwdev, param->band_type == RTW89_BAND_2G);
rtw8852c_5m_mask(rtwdev, param, phy_idx);
@@ -1786,6 +1839,7 @@ static void rtw8852c_rfk_channel(struct rtw89_dev *rtwdev)
{
enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
+ rtw8852c_mcc_get_ch_info(rtwdev, phy_idx);
rtw8852c_rx_dck(rtwdev, phy_idx, false);
rtw8852c_iqk(rtwdev, phy_idx);
rtw8852c_tssi(rtwdev, phy_idx);
@@ -2306,19 +2360,19 @@ static void rtw8852c_ctrl_tx_path_tmac(struct rtw89_dev *rtwdev, u8 tx_path,
rtw89_write32(rtwdev, reg, 0);
}
- if (tx_path == RF_PATH_A) {
+ if (tx_path == RF_A) {
path_com[0].data = AX_PATH_COM0_PATHA;
path_com[1].data = AX_PATH_COM1_PATHA;
path_com[2].data = AX_PATH_COM2_PATHA;
path_com[7].data = AX_PATH_COM7_PATHA;
path_com[8].data = AX_PATH_COM8_PATHA;
- } else if (tx_path == RF_PATH_B) {
+ } else if (tx_path == RF_B) {
path_com[0].data = AX_PATH_COM0_PATHB;
path_com[1].data = AX_PATH_COM1_PATHB;
path_com[2].data = AX_PATH_COM2_PATHB;
path_com[7].data = AX_PATH_COM7_PATHB;
path_com[8].data = AX_PATH_COM8_PATHB;
- } else if (tx_path == RF_PATH_AB) {
+ } else if (tx_path == RF_AB) {
path_com[0].data = AX_PATH_COM0_PATHAB;
path_com[1].data = AX_PATH_COM1_PATHAB;
path_com[2].data = AX_PATH_COM2_PATHAB;
@@ -2337,9 +2391,73 @@ static void rtw8852c_ctrl_tx_path_tmac(struct rtw89_dev *rtwdev, u8 tx_path,
}
}
+static void rtw8852c_bb_ctrl_btc_preagc(struct rtw89_dev *rtwdev, bool bt_en)
+{
+ if (bt_en) {
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_FRC_FIR_TYPE_V1,
+ B_PATH0_FRC_FIR_TYPE_MSK_V1, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_FRC_FIR_TYPE_V1,
+ B_PATH1_FRC_FIR_TYPE_MSK_V1, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_RXBB_V1,
+ B_PATH0_RXBB_MSK_V1, 0xf);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_RXBB_V1,
+ B_PATH1_RXBB_MSK_V1, 0xf);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_G_LNA6_OP1DB_V1,
+ B_PATH0_G_LNA6_OP1DB_V1, 0x80);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_G_LNA6_OP1DB_V1,
+ B_PATH1_G_LNA6_OP1DB_V1, 0x80);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_G_TIA0_LNA6_OP1DB_V1,
+ B_PATH0_G_TIA0_LNA6_OP1DB_V1, 0x80);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_G_TIA1_LNA6_OP1DB_V1,
+ B_PATH0_G_TIA1_LNA6_OP1DB_V1, 0x80);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA0_LNA6_OP1DB_V1,
+ B_PATH1_G_TIA0_LNA6_OP1DB_V1, 0x80);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA1_LNA6_OP1DB_V1,
+ B_PATH1_G_TIA1_LNA6_OP1DB_V1, 0x80);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_BACKOFF_V1,
+ B_PATH0_BT_BACKOFF_V1, 0x780D1E);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_BACKOFF_V1,
+ B_PATH1_BT_BACKOFF_V1, 0x780D1E);
+ rtw89_phy_write32_mask(rtwdev, R_P0_BACKOFF_IBADC_V1,
+ B_P0_BACKOFF_IBADC_V1, 0x34);
+ rtw89_phy_write32_mask(rtwdev, R_P1_BACKOFF_IBADC_V1,
+ B_P1_BACKOFF_IBADC_V1, 0x34);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_FRC_FIR_TYPE_V1,
+ B_PATH0_FRC_FIR_TYPE_MSK_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_FRC_FIR_TYPE_V1,
+ B_PATH1_FRC_FIR_TYPE_MSK_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_RXBB_V1,
+ B_PATH0_RXBB_MSK_V1, 0x60);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_RXBB_V1,
+ B_PATH1_RXBB_MSK_V1, 0x60);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_G_LNA6_OP1DB_V1,
+ B_PATH0_G_LNA6_OP1DB_V1, 0x1a);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_G_LNA6_OP1DB_V1,
+ B_PATH1_G_LNA6_OP1DB_V1, 0x1a);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_G_TIA0_LNA6_OP1DB_V1,
+ B_PATH0_G_TIA0_LNA6_OP1DB_V1, 0x2a);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_G_TIA1_LNA6_OP1DB_V1,
+ B_PATH0_G_TIA1_LNA6_OP1DB_V1, 0x2a);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA0_LNA6_OP1DB_V1,
+ B_PATH1_G_TIA0_LNA6_OP1DB_V1, 0x2a);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA1_LNA6_OP1DB_V1,
+ B_PATH1_G_TIA1_LNA6_OP1DB_V1, 0x2a);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_BACKOFF_V1,
+ B_PATH0_BT_BACKOFF_V1, 0x79E99E);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_BACKOFF_V1,
+ B_PATH1_BT_BACKOFF_V1, 0x79E99E);
+ rtw89_phy_write32_mask(rtwdev, R_P0_BACKOFF_IBADC_V1,
+ B_P0_BACKOFF_IBADC_V1, 0x26);
+ rtw89_phy_write32_mask(rtwdev, R_P1_BACKOFF_IBADC_V1,
+ B_P1_BACKOFF_IBADC_V1, 0x26);
+ }
+}
+
static void rtw8852c_bb_cfg_txrx_path(struct rtw89_dev *rtwdev)
{
struct rtw89_hal *hal = &rtwdev->hal;
+ u8 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_AB;
rtw8852c_bb_cfg_rx_path(rtwdev, RF_PATH_AB);
@@ -2355,7 +2473,7 @@ static void rtw8852c_bb_cfg_txrx_path(struct rtw89_dev *rtwdev)
rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 1);
}
- rtw8852c_ctrl_tx_path_tmac(rtwdev, RF_PATH_AB, RTW89_MAC_0);
+ rtw8852c_ctrl_tx_path_tmac(rtwdev, ntx_path, RTW89_MAC_0);
}
static u8 rtw8852c_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path)
@@ -2552,16 +2670,14 @@ rtw8852c_btc_set_wl_txpwr_ctrl(struct rtw89_dev *rtwdev, u32 txpwr_val)
#define __write_ctrl(_reg, _msk, _val, _en, _cond) \
do { \
- const typeof(_msk) __msk = _msk; \
- const typeof(_en) __en = _en; \
- u32 _wrt = FIELD_PREP(__msk, _val); \
- BUILD_BUG_ON((__msk & __en) != 0); \
+ u32 _wrt = FIELD_PREP(_msk, _val); \
+ BUILD_BUG_ON((_msk & _en) != 0); \
if (_cond) \
- _wrt |= __en; \
+ _wrt |= _en; \
else \
- _wrt &= ~__en; \
+ _wrt &= ~_en; \
rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, _reg, \
- __msk | __en, _wrt); \
+ _msk | _en, _wrt); \
} while (0)
switch (arg.ctrl_all_time) {
@@ -2598,6 +2714,48 @@ s8 rtw8852c_btc_get_bt_rssi(struct rtw89_dev *rtwdev, s8 val)
return clamp_t(s8, val, -100, 0) + 100;
}
+static const struct rtw89_btc_rf_trx_para rtw89_btc_8852c_rf_ul[] = {
+ {255, 0, 0, 7}, /* 0 -> original */
+ {255, 2, 0, 7}, /* 1 -> for BT-connected ACI issue && BTG co-rx */
+ {255, 0, 0, 7}, /* 2 ->reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 3- >reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 4 ->reserved for shared-antenna */
+ {255, 0, 0, 7}, /* the below id is for non-shared-antenna free-run */
+ {6, 1, 0, 7},
+ {13, 1, 0, 7},
+ {13, 1, 0, 7}
+};
+
+static const struct rtw89_btc_rf_trx_para rtw89_btc_8852c_rf_dl[] = {
+ {255, 0, 0, 7}, /* 0 -> original */
+ {255, 2, 0, 7}, /* 1 -> reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 2 ->reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 3- >reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 4 ->reserved for shared-antenna */
+ {255, 0, 0, 7}, /* the below id is for non-shared-antenna free-run */
+ {255, 1, 0, 7},
+ {255, 1, 0, 7},
+ {255, 1, 0, 7}
+};
+
+static const u8 rtw89_btc_8852c_wl_rssi_thres[BTC_WL_RSSI_THMAX] = {60, 50, 40, 30};
+static const u8 rtw89_btc_8852c_bt_rssi_thres[BTC_BT_RSSI_THMAX] = {40, 36, 31, 28};
+
+static const struct rtw89_btc_fbtc_mreg rtw89_btc_8852c_mon_reg[] = {
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda00),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda04),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda24),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda30),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda34),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda38),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda44),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda48),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda4c),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xd200),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xd220),
+ RTW89_DEF_FBTC_MREG(REG_BB, 4, 0x980),
+};
+
static
void rtw8852c_btc_bt_aci_imp(struct rtw89_dev *rtwdev)
{
@@ -2731,10 +2889,13 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = {
.disable_bb_rf = rtw8852c_mac_disable_bb_rf,
.bb_reset = rtw8852c_bb_reset,
.bb_sethw = rtw8852c_bb_sethw,
+ .read_rf = rtw89_phy_read_rf_v1,
+ .write_rf = rtw89_phy_write_rf_v1,
.set_channel = rtw8852c_set_channel,
.set_channel_help = rtw8852c_set_channel_help,
.read_efuse = rtw8852c_read_efuse,
.read_phycap = rtw8852c_read_phycap,
+ .fem_setup = NULL,
.rfk_init = rtw8852c_rfk_init,
.rfk_channel = rtw8852c_rfk_channel,
.rfk_band_changed = rtw8852c_rfk_band_changed,
@@ -2745,11 +2906,11 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = {
.set_txpwr_ctrl = rtw8852c_set_txpwr_ctrl,
.init_txpwr_unit = rtw8852c_init_txpwr_unit,
.get_thermal = rtw8852c_get_thermal,
+ .ctrl_btg = rtw8852c_ctrl_btg,
.query_ppdu = rtw8852c_query_ppdu,
- .read_rf = rtw89_phy_read_rf_v1,
- .write_rf = rtw89_phy_write_rf_v1,
- .set_txpwr_ul_tb_offset = rtw8852c_set_txpwr_ul_tb_offset,
+ .bb_ctrl_btc_preagc = rtw8852c_bb_ctrl_btc_preagc,
.cfg_txrx_path = rtw8852c_bb_cfg_txrx_path,
+ .set_txpwr_ul_tb_offset = rtw8852c_set_txpwr_ul_tb_offset,
.pwr_on_func = rtw8852c_pwr_on_func,
.pwr_off_func = rtw8852c_pwr_off_func,
.fill_txdesc = rtw89_core_fill_txdesc_v1,
@@ -2774,6 +2935,10 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.chip_id = RTL8852C,
.ops = &rtw8852c_chip_ops,
.fw_name = "rtw89/rtw8852c_fw.bin",
+ .fifo_size = 458752,
+ .max_amsdu_limit = 8000,
+ .dis_2g_40m_ul_ofdma = false,
+ .rsvd_ple_ofst = 0x6f800,
.hfc_param_ini = rtw8852c_hfc_param_ini_pcie,
.dle_mem = rtw8852c_dle_mem_pcie,
.rf_base_addr = {0xe000, 0xf000},
@@ -2795,7 +2960,17 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.txpwr_factor_mac = 1,
.dig_table = NULL,
.tssi_dbw_table = &rtw89_8852c_tssi_dbw_table,
+ .support_bands = BIT(NL80211_BAND_2GHZ) |
+ BIT(NL80211_BAND_5GHZ) |
+ BIT(NL80211_BAND_6GHZ),
+ .support_bw160 = true,
.hw_sec_hdr = true,
+ .rf_path_num = 2,
+ .tx_nss = 2,
+ .rx_nss = 2,
+ .acam_num = 128,
+ .bcam_num = 20,
+ .scam_num = 128,
.sec_ctrl_efuse_size = 4,
.physical_efuse_size = 1216,
.logical_efuse_size = 2048,
@@ -2804,6 +2979,22 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.dav_log_efuse_size = 16,
.phycap_addr = 0x590,
.phycap_size = 0x60,
+ .para_ver = 0x05050764,
+ .wlcx_desired = 0x05050000,
+ .btcx_desired = 0x5,
+ .scbd = 0x1,
+ .mailbox = 0x1,
+ .afh_guard_ch = 6,
+ .wl_rssi_thres = rtw89_btc_8852c_wl_rssi_thres,
+ .bt_rssi_thres = rtw89_btc_8852c_bt_rssi_thres,
+ .rssi_tol = 2,
+ .mon_reg_num = ARRAY_SIZE(rtw89_btc_8852c_mon_reg),
+ .mon_reg = rtw89_btc_8852c_mon_reg,
+ .rf_para_ulink_num = ARRAY_SIZE(rtw89_btc_8852c_rf_ul),
+ .rf_para_ulink = rtw89_btc_8852c_rf_ul,
+ .rf_para_dlink_num = ARRAY_SIZE(rtw89_btc_8852c_rf_dl),
+ .rf_para_dlink = rtw89_btc_8852c_rf_dl,
+ .ps_mode_supported = 0,
.low_power_hci_modes = BIT(RTW89_PS_MODE_CLK_GATED) |
BIT(RTW89_PS_MODE_PWR_GATED),
.h2c_cctl_func_id = H2C_FUNC_MAC_CCTLINFO_UD_V1,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
index ffc71ad24927..dfb9caba9bc4 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
@@ -3809,6 +3809,24 @@ void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev,
param->bandwidth);
}
+void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ struct rtw89_mcc_info *mcc_info = &rtwdev->mcc;
+ u8 idx = mcc_info->table_idx;
+ int i;
+
+ for (i = 0; i < RTW89_IQK_CHS_NR; i++) {
+ if (mcc_info->ch[idx] == 0)
+ break;
+ if (++idx >= RTW89_IQK_CHS_NR)
+ idx = 0;
+ }
+
+ mcc_info->table_idx = idx;
+ mcc_info->ch[idx] = rtwdev->hal.current_channel;
+ mcc_info->band[idx] = rtwdev->hal.current_band_type;
+}
+
void rtw8852c_rck(struct rtw89_dev *rtwdev)
{
u8 path;
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h
index e42fb1a4965e..c32756f0c01a 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h
@@ -7,6 +7,7 @@
#include "core.h"
+void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
void rtw8852c_rck(struct rtw89_dev *rtwdev);
void rtw8852c_dack(struct rtw89_dev *rtwdev);
void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c
index 477c46041c94..feaa83b16171 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c
@@ -13678,27 +13678,27 @@ static const struct rtw89_txpwr_byrate_cfg rtw89_8852c_txpwr_byrate[] = {
{ 0, 1, 3, 0, 4, 0x50505050, },
{ 0, 0, 4, 1, 4, 0x00000000, },
{ 0, 0, 4, 0, 1, 0x00000000, },
- { 1, 0, 1, 0, 4, 0x5054585c, },
- { 1, 0, 1, 4, 4, 0x4044484c, },
- { 1, 0, 2, 0, 4, 0x4c505458, },
+ { 1, 0, 1, 0, 4, 0x48484848, },
+ { 1, 0, 1, 4, 4, 0x40444848, },
+ { 1, 0, 2, 0, 4, 0x48484848, },
{ 1, 0, 2, 4, 4, 0x3c404448, },
{ 1, 0, 2, 8, 4, 0x2c303438, },
- { 1, 0, 3, 0, 4, 0x3c40484c, },
- { 1, 1, 2, 0, 4, 0x4c505458, },
+ { 1, 0, 3, 0, 4, 0x48484848, },
+ { 1, 1, 2, 0, 4, 0x48484848, },
{ 1, 1, 2, 4, 4, 0x3c404448, },
{ 1, 1, 2, 8, 4, 0x2c303438, },
- { 1, 1, 3, 0, 4, 0x3c40484c, },
+ { 1, 1, 3, 0, 4, 0x48484848, },
{ 1, 0, 4, 0, 4, 0x00000000, },
- { 2, 0, 1, 0, 4, 0x5054585c, },
- { 2, 0, 1, 4, 4, 0x4044484c, },
- { 2, 0, 2, 0, 4, 0x4c505458, },
- { 2, 0, 2, 4, 4, 0x3c404448, },
- { 2, 0, 2, 8, 4, 0x2c303438, },
- { 2, 0, 3, 0, 4, 0x3c40484c, },
- { 2, 1, 2, 0, 4, 0x4c505458, },
- { 2, 1, 2, 4, 4, 0x3c404448, },
- { 2, 1, 2, 8, 4, 0x2c303438, },
- { 2, 1, 3, 0, 4, 0x3c40484c, },
+ { 2, 0, 1, 0, 4, 0x40404040, },
+ { 2, 0, 1, 4, 4, 0x383c4040, },
+ { 2, 0, 2, 0, 4, 0x40404040, },
+ { 2, 0, 2, 4, 4, 0x34383c40, },
+ { 2, 0, 2, 8, 4, 0x24282c30, },
+ { 2, 0, 3, 0, 4, 0x40404040, },
+ { 2, 1, 2, 0, 4, 0x40404040, },
+ { 2, 1, 2, 4, 4, 0x34383c40, },
+ { 2, 1, 2, 8, 4, 0x24282c30, },
+ { 2, 1, 3, 0, 4, 0x40404040, },
{ 2, 0, 4, 0, 4, 0x00000000, },
};
@@ -13857,8 +13857,8 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][0][0][RTW89_WW][9] = 60,
[0][0][0][0][RTW89_WW][10] = 60,
[0][0][0][0][RTW89_WW][11] = 60,
- [0][0][0][0][RTW89_WW][12] = 58,
- [0][0][0][0][RTW89_WW][13] = 74,
+ [0][0][0][0][RTW89_WW][12] = 48,
+ [0][0][0][0][RTW89_WW][13] = 72,
[0][1][0][0][RTW89_WW][0] = 48,
[0][1][0][0][RTW89_WW][1] = 48,
[0][1][0][0][RTW89_WW][2] = 48,
@@ -13870,34 +13870,34 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][0][0][RTW89_WW][8] = 48,
[0][1][0][0][RTW89_WW][9] = 48,
[0][1][0][0][RTW89_WW][10] = 48,
- [0][1][0][0][RTW89_WW][11] = 48,
- [0][1][0][0][RTW89_WW][12] = 44,
- [0][1][0][0][RTW89_WW][13] = 62,
+ [0][1][0][0][RTW89_WW][11] = 46,
+ [0][1][0][0][RTW89_WW][12] = 34,
+ [0][1][0][0][RTW89_WW][13] = 60,
[1][0][0][0][RTW89_WW][0] = 0,
[1][0][0][0][RTW89_WW][1] = 0,
- [1][0][0][0][RTW89_WW][2] = 52,
- [1][0][0][0][RTW89_WW][3] = 52,
- [1][0][0][0][RTW89_WW][4] = 52,
- [1][0][0][0][RTW89_WW][5] = 60,
- [1][0][0][0][RTW89_WW][6] = 52,
- [1][0][0][0][RTW89_WW][7] = 52,
- [1][0][0][0][RTW89_WW][8] = 52,
- [1][0][0][0][RTW89_WW][9] = 44,
- [1][0][0][0][RTW89_WW][10] = 32,
+ [1][0][0][0][RTW89_WW][2] = 42,
+ [1][0][0][0][RTW89_WW][3] = 42,
+ [1][0][0][0][RTW89_WW][4] = 42,
+ [1][0][0][0][RTW89_WW][5] = 58,
+ [1][0][0][0][RTW89_WW][6] = 42,
+ [1][0][0][0][RTW89_WW][7] = 42,
+ [1][0][0][0][RTW89_WW][8] = 42,
+ [1][0][0][0][RTW89_WW][9] = 34,
+ [1][0][0][0][RTW89_WW][10] = 22,
[1][0][0][0][RTW89_WW][11] = 0,
[1][0][0][0][RTW89_WW][12] = 0,
[1][0][0][0][RTW89_WW][13] = 0,
[1][1][0][0][RTW89_WW][0] = 0,
[1][1][0][0][RTW89_WW][1] = 0,
- [1][1][0][0][RTW89_WW][2] = 48,
- [1][1][0][0][RTW89_WW][3] = 48,
- [1][1][0][0][RTW89_WW][4] = 48,
+ [1][1][0][0][RTW89_WW][2] = 38,
+ [1][1][0][0][RTW89_WW][3] = 38,
+ [1][1][0][0][RTW89_WW][4] = 38,
[1][1][0][0][RTW89_WW][5] = 48,
- [1][1][0][0][RTW89_WW][6] = 36,
- [1][1][0][0][RTW89_WW][7] = 36,
- [1][1][0][0][RTW89_WW][8] = 36,
- [1][1][0][0][RTW89_WW][9] = 32,
- [1][1][0][0][RTW89_WW][10] = 32,
+ [1][1][0][0][RTW89_WW][6] = 26,
+ [1][1][0][0][RTW89_WW][7] = 26,
+ [1][1][0][0][RTW89_WW][8] = 26,
+ [1][1][0][0][RTW89_WW][9] = 22,
+ [1][1][0][0][RTW89_WW][10] = 22,
[1][1][0][0][RTW89_WW][11] = 0,
[1][1][0][0][RTW89_WW][12] = 0,
[1][1][0][0][RTW89_WW][13] = 0,
@@ -13912,8 +13912,8 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_WW][8] = 60,
[0][0][1][0][RTW89_WW][9] = 60,
[0][0][1][0][RTW89_WW][10] = 60,
- [0][0][1][0][RTW89_WW][11] = 56,
- [0][0][1][0][RTW89_WW][12] = 52,
+ [0][0][1][0][RTW89_WW][11] = 46,
+ [0][0][1][0][RTW89_WW][12] = 42,
[0][0][1][0][RTW89_WW][13] = 0,
[0][1][1][0][RTW89_WW][0] = 48,
[0][1][1][0][RTW89_WW][1] = 48,
@@ -13926,8 +13926,8 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_WW][8] = 48,
[0][1][1][0][RTW89_WW][9] = 48,
[0][1][1][0][RTW89_WW][10] = 48,
- [0][1][1][0][RTW89_WW][11] = 48,
- [0][1][1][0][RTW89_WW][12] = 44,
+ [0][1][1][0][RTW89_WW][11] = 38,
+ [0][1][1][0][RTW89_WW][12] = 34,
[0][1][1][0][RTW89_WW][13] = 0,
[0][0][2][0][RTW89_WW][0] = 60,
[0][0][2][0][RTW89_WW][1] = 60,
@@ -13940,8 +13940,8 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_WW][8] = 60,
[0][0][2][0][RTW89_WW][9] = 60,
[0][0][2][0][RTW89_WW][10] = 60,
- [0][0][2][0][RTW89_WW][11] = 56,
- [0][0][2][0][RTW89_WW][12] = 52,
+ [0][0][2][0][RTW89_WW][11] = 46,
+ [0][0][2][0][RTW89_WW][12] = 42,
[0][0][2][0][RTW89_WW][13] = 0,
[0][1][2][0][RTW89_WW][0] = 48,
[0][1][2][0][RTW89_WW][1] = 48,
@@ -13954,8 +13954,8 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_WW][8] = 48,
[0][1][2][0][RTW89_WW][9] = 48,
[0][1][2][0][RTW89_WW][10] = 48,
- [0][1][2][0][RTW89_WW][11] = 48,
- [0][1][2][0][RTW89_WW][12] = 44,
+ [0][1][2][0][RTW89_WW][11] = 38,
+ [0][1][2][0][RTW89_WW][12] = 34,
[0][1][2][0][RTW89_WW][13] = 0,
[0][1][2][1][RTW89_WW][0] = 36,
[0][1][2][1][RTW89_WW][1] = 36,
@@ -13969,7 +13969,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_WW][9] = 36,
[0][1][2][1][RTW89_WW][10] = 36,
[0][1][2][1][RTW89_WW][11] = 36,
- [0][1][2][1][RTW89_WW][12] = 36,
+ [0][1][2][1][RTW89_WW][12] = 34,
[0][1][2][1][RTW89_WW][13] = 0,
[1][0][2][0][RTW89_WW][0] = 0,
[1][0][2][0][RTW89_WW][1] = 0,
@@ -13981,21 +13981,21 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_WW][7] = 60,
[1][0][2][0][RTW89_WW][8] = 60,
[1][0][2][0][RTW89_WW][9] = 60,
- [1][0][2][0][RTW89_WW][10] = 60,
+ [1][0][2][0][RTW89_WW][10] = 58,
[1][0][2][0][RTW89_WW][11] = 0,
[1][0][2][0][RTW89_WW][12] = 0,
[1][0][2][0][RTW89_WW][13] = 0,
[1][1][2][0][RTW89_WW][0] = 0,
[1][1][2][0][RTW89_WW][1] = 0,
- [1][1][2][0][RTW89_WW][2] = 48,
- [1][1][2][0][RTW89_WW][3] = 48,
+ [1][1][2][0][RTW89_WW][2] = 46,
+ [1][1][2][0][RTW89_WW][3] = 46,
[1][1][2][0][RTW89_WW][4] = 48,
[1][1][2][0][RTW89_WW][5] = 48,
[1][1][2][0][RTW89_WW][6] = 48,
- [1][1][2][0][RTW89_WW][7] = 48,
- [1][1][2][0][RTW89_WW][8] = 48,
- [1][1][2][0][RTW89_WW][9] = 44,
- [1][1][2][0][RTW89_WW][10] = 40,
+ [1][1][2][0][RTW89_WW][7] = 46,
+ [1][1][2][0][RTW89_WW][8] = 46,
+ [1][1][2][0][RTW89_WW][9] = 34,
+ [1][1][2][0][RTW89_WW][10] = 30,
[1][1][2][0][RTW89_WW][11] = 0,
[1][1][2][0][RTW89_WW][12] = 0,
[1][1][2][0][RTW89_WW][13] = 0,
@@ -14008,149 +14008,149 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_WW][6] = 36,
[1][1][2][1][RTW89_WW][7] = 36,
[1][1][2][1][RTW89_WW][8] = 36,
- [1][1][2][1][RTW89_WW][9] = 36,
- [1][1][2][1][RTW89_WW][10] = 36,
+ [1][1][2][1][RTW89_WW][9] = 34,
+ [1][1][2][1][RTW89_WW][10] = 30,
[1][1][2][1][RTW89_WW][11] = 0,
[1][1][2][1][RTW89_WW][12] = 0,
[1][1][2][1][RTW89_WW][13] = 0,
- [0][0][0][0][RTW89_FCC][0] = 80,
+ [0][0][0][0][RTW89_FCC][0] = 70,
[0][0][0][0][RTW89_ETSI][0] = 60,
- [0][0][0][0][RTW89_MKK][0] = 72,
- [0][0][0][0][RTW89_IC][0] = 80,
+ [0][0][0][0][RTW89_MKK][0] = 68,
+ [0][0][0][0][RTW89_IC][0] = 74,
[0][0][0][0][RTW89_ACMA][0] = 60,
- [0][0][0][0][RTW89_FCC][1] = 80,
+ [0][0][0][0][RTW89_FCC][1] = 70,
[0][0][0][0][RTW89_ETSI][1] = 60,
- [0][0][0][0][RTW89_MKK][1] = 72,
- [0][0][0][0][RTW89_IC][1] = 80,
+ [0][0][0][0][RTW89_MKK][1] = 68,
+ [0][0][0][0][RTW89_IC][1] = 74,
[0][0][0][0][RTW89_ACMA][1] = 60,
- [0][0][0][0][RTW89_FCC][2] = 80,
+ [0][0][0][0][RTW89_FCC][2] = 70,
[0][0][0][0][RTW89_ETSI][2] = 60,
- [0][0][0][0][RTW89_MKK][2] = 72,
- [0][0][0][0][RTW89_IC][2] = 80,
+ [0][0][0][0][RTW89_MKK][2] = 68,
+ [0][0][0][0][RTW89_IC][2] = 74,
[0][0][0][0][RTW89_ACMA][2] = 60,
- [0][0][0][0][RTW89_FCC][3] = 80,
+ [0][0][0][0][RTW89_FCC][3] = 70,
[0][0][0][0][RTW89_ETSI][3] = 60,
- [0][0][0][0][RTW89_MKK][3] = 72,
- [0][0][0][0][RTW89_IC][3] = 80,
+ [0][0][0][0][RTW89_MKK][3] = 68,
+ [0][0][0][0][RTW89_IC][3] = 74,
[0][0][0][0][RTW89_ACMA][3] = 60,
- [0][0][0][0][RTW89_FCC][4] = 80,
+ [0][0][0][0][RTW89_FCC][4] = 70,
[0][0][0][0][RTW89_ETSI][4] = 60,
- [0][0][0][0][RTW89_MKK][4] = 72,
- [0][0][0][0][RTW89_IC][4] = 80,
+ [0][0][0][0][RTW89_MKK][4] = 68,
+ [0][0][0][0][RTW89_IC][4] = 74,
[0][0][0][0][RTW89_ACMA][4] = 60,
- [0][0][0][0][RTW89_FCC][5] = 80,
+ [0][0][0][0][RTW89_FCC][5] = 70,
[0][0][0][0][RTW89_ETSI][5] = 60,
- [0][0][0][0][RTW89_MKK][5] = 72,
- [0][0][0][0][RTW89_IC][5] = 80,
+ [0][0][0][0][RTW89_MKK][5] = 68,
+ [0][0][0][0][RTW89_IC][5] = 74,
[0][0][0][0][RTW89_ACMA][5] = 60,
- [0][0][0][0][RTW89_FCC][6] = 80,
+ [0][0][0][0][RTW89_FCC][6] = 70,
[0][0][0][0][RTW89_ETSI][6] = 60,
- [0][0][0][0][RTW89_MKK][6] = 72,
- [0][0][0][0][RTW89_IC][6] = 80,
+ [0][0][0][0][RTW89_MKK][6] = 68,
+ [0][0][0][0][RTW89_IC][6] = 74,
[0][0][0][0][RTW89_ACMA][6] = 60,
- [0][0][0][0][RTW89_FCC][7] = 80,
+ [0][0][0][0][RTW89_FCC][7] = 70,
[0][0][0][0][RTW89_ETSI][7] = 60,
- [0][0][0][0][RTW89_MKK][7] = 72,
- [0][0][0][0][RTW89_IC][7] = 80,
+ [0][0][0][0][RTW89_MKK][7] = 68,
+ [0][0][0][0][RTW89_IC][7] = 74,
[0][0][0][0][RTW89_ACMA][7] = 60,
- [0][0][0][0][RTW89_FCC][8] = 80,
+ [0][0][0][0][RTW89_FCC][8] = 70,
[0][0][0][0][RTW89_ETSI][8] = 60,
- [0][0][0][0][RTW89_MKK][8] = 72,
- [0][0][0][0][RTW89_IC][8] = 80,
+ [0][0][0][0][RTW89_MKK][8] = 68,
+ [0][0][0][0][RTW89_IC][8] = 74,
[0][0][0][0][RTW89_ACMA][8] = 60,
- [0][0][0][0][RTW89_FCC][9] = 80,
+ [0][0][0][0][RTW89_FCC][9] = 70,
[0][0][0][0][RTW89_ETSI][9] = 60,
- [0][0][0][0][RTW89_MKK][9] = 72,
- [0][0][0][0][RTW89_IC][9] = 80,
+ [0][0][0][0][RTW89_MKK][9] = 68,
+ [0][0][0][0][RTW89_IC][9] = 74,
[0][0][0][0][RTW89_ACMA][9] = 60,
- [0][0][0][0][RTW89_FCC][10] = 80,
+ [0][0][0][0][RTW89_FCC][10] = 70,
[0][0][0][0][RTW89_ETSI][10] = 60,
- [0][0][0][0][RTW89_MKK][10] = 72,
- [0][0][0][0][RTW89_IC][10] = 80,
+ [0][0][0][0][RTW89_MKK][10] = 68,
+ [0][0][0][0][RTW89_IC][10] = 74,
[0][0][0][0][RTW89_ACMA][10] = 60,
- [0][0][0][0][RTW89_FCC][11] = 72,
+ [0][0][0][0][RTW89_FCC][11] = 62,
[0][0][0][0][RTW89_ETSI][11] = 60,
- [0][0][0][0][RTW89_MKK][11] = 72,
+ [0][0][0][0][RTW89_MKK][11] = 68,
[0][0][0][0][RTW89_IC][11] = 72,
[0][0][0][0][RTW89_ACMA][11] = 60,
- [0][0][0][0][RTW89_FCC][12] = 58,
+ [0][0][0][0][RTW89_FCC][12] = 48,
[0][0][0][0][RTW89_ETSI][12] = 60,
- [0][0][0][0][RTW89_MKK][12] = 72,
+ [0][0][0][0][RTW89_MKK][12] = 68,
[0][0][0][0][RTW89_IC][12] = 58,
[0][0][0][0][RTW89_ACMA][12] = 60,
[0][0][0][0][RTW89_FCC][13] = 127,
[0][0][0][0][RTW89_ETSI][13] = 127,
- [0][0][0][0][RTW89_MKK][13] = 74,
+ [0][0][0][0][RTW89_MKK][13] = 72,
[0][0][0][0][RTW89_IC][13] = 127,
[0][0][0][0][RTW89_ACMA][13] = 127,
- [0][1][0][0][RTW89_FCC][0] = 76,
+ [0][1][0][0][RTW89_FCC][0] = 66,
[0][1][0][0][RTW89_ETSI][0] = 48,
- [0][1][0][0][RTW89_MKK][0] = 60,
- [0][1][0][0][RTW89_IC][0] = 76,
+ [0][1][0][0][RTW89_MKK][0] = 58,
+ [0][1][0][0][RTW89_IC][0] = 74,
[0][1][0][0][RTW89_ACMA][0] = 48,
- [0][1][0][0][RTW89_FCC][1] = 76,
+ [0][1][0][0][RTW89_FCC][1] = 66,
[0][1][0][0][RTW89_ETSI][1] = 48,
- [0][1][0][0][RTW89_MKK][1] = 60,
- [0][1][0][0][RTW89_IC][1] = 76,
+ [0][1][0][0][RTW89_MKK][1] = 58,
+ [0][1][0][0][RTW89_IC][1] = 74,
[0][1][0][0][RTW89_ACMA][1] = 48,
- [0][1][0][0][RTW89_FCC][2] = 76,
+ [0][1][0][0][RTW89_FCC][2] = 66,
[0][1][0][0][RTW89_ETSI][2] = 48,
- [0][1][0][0][RTW89_MKK][2] = 60,
- [0][1][0][0][RTW89_IC][2] = 76,
+ [0][1][0][0][RTW89_MKK][2] = 58,
+ [0][1][0][0][RTW89_IC][2] = 74,
[0][1][0][0][RTW89_ACMA][2] = 48,
- [0][1][0][0][RTW89_FCC][3] = 76,
+ [0][1][0][0][RTW89_FCC][3] = 66,
[0][1][0][0][RTW89_ETSI][3] = 48,
- [0][1][0][0][RTW89_MKK][3] = 60,
- [0][1][0][0][RTW89_IC][3] = 76,
+ [0][1][0][0][RTW89_MKK][3] = 58,
+ [0][1][0][0][RTW89_IC][3] = 74,
[0][1][0][0][RTW89_ACMA][3] = 48,
- [0][1][0][0][RTW89_FCC][4] = 76,
+ [0][1][0][0][RTW89_FCC][4] = 66,
[0][1][0][0][RTW89_ETSI][4] = 48,
- [0][1][0][0][RTW89_MKK][4] = 60,
- [0][1][0][0][RTW89_IC][4] = 76,
+ [0][1][0][0][RTW89_MKK][4] = 58,
+ [0][1][0][0][RTW89_IC][4] = 74,
[0][1][0][0][RTW89_ACMA][4] = 48,
- [0][1][0][0][RTW89_FCC][5] = 76,
+ [0][1][0][0][RTW89_FCC][5] = 66,
[0][1][0][0][RTW89_ETSI][5] = 48,
- [0][1][0][0][RTW89_MKK][5] = 60,
- [0][1][0][0][RTW89_IC][5] = 76,
+ [0][1][0][0][RTW89_MKK][5] = 58,
+ [0][1][0][0][RTW89_IC][5] = 74,
[0][1][0][0][RTW89_ACMA][5] = 48,
- [0][1][0][0][RTW89_FCC][6] = 76,
+ [0][1][0][0][RTW89_FCC][6] = 66,
[0][1][0][0][RTW89_ETSI][6] = 48,
- [0][1][0][0][RTW89_MKK][6] = 60,
- [0][1][0][0][RTW89_IC][6] = 76,
+ [0][1][0][0][RTW89_MKK][6] = 58,
+ [0][1][0][0][RTW89_IC][6] = 74,
[0][1][0][0][RTW89_ACMA][6] = 48,
- [0][1][0][0][RTW89_FCC][7] = 76,
+ [0][1][0][0][RTW89_FCC][7] = 66,
[0][1][0][0][RTW89_ETSI][7] = 48,
- [0][1][0][0][RTW89_MKK][7] = 60,
- [0][1][0][0][RTW89_IC][7] = 76,
+ [0][1][0][0][RTW89_MKK][7] = 58,
+ [0][1][0][0][RTW89_IC][7] = 74,
[0][1][0][0][RTW89_ACMA][7] = 48,
- [0][1][0][0][RTW89_FCC][8] = 76,
+ [0][1][0][0][RTW89_FCC][8] = 66,
[0][1][0][0][RTW89_ETSI][8] = 48,
- [0][1][0][0][RTW89_MKK][8] = 60,
- [0][1][0][0][RTW89_IC][8] = 76,
+ [0][1][0][0][RTW89_MKK][8] = 58,
+ [0][1][0][0][RTW89_IC][8] = 74,
[0][1][0][0][RTW89_ACMA][8] = 48,
- [0][1][0][0][RTW89_FCC][9] = 76,
+ [0][1][0][0][RTW89_FCC][9] = 66,
[0][1][0][0][RTW89_ETSI][9] = 48,
- [0][1][0][0][RTW89_MKK][9] = 60,
- [0][1][0][0][RTW89_IC][9] = 76,
+ [0][1][0][0][RTW89_MKK][9] = 58,
+ [0][1][0][0][RTW89_IC][9] = 74,
[0][1][0][0][RTW89_ACMA][9] = 48,
- [0][1][0][0][RTW89_FCC][10] = 76,
+ [0][1][0][0][RTW89_FCC][10] = 66,
[0][1][0][0][RTW89_ETSI][10] = 48,
- [0][1][0][0][RTW89_MKK][10] = 60,
- [0][1][0][0][RTW89_IC][10] = 76,
+ [0][1][0][0][RTW89_MKK][10] = 58,
+ [0][1][0][0][RTW89_IC][10] = 74,
[0][1][0][0][RTW89_ACMA][10] = 48,
- [0][1][0][0][RTW89_FCC][11] = 56,
+ [0][1][0][0][RTW89_FCC][11] = 46,
[0][1][0][0][RTW89_ETSI][11] = 48,
- [0][1][0][0][RTW89_MKK][11] = 60,
+ [0][1][0][0][RTW89_MKK][11] = 58,
[0][1][0][0][RTW89_IC][11] = 56,
[0][1][0][0][RTW89_ACMA][11] = 48,
- [0][1][0][0][RTW89_FCC][12] = 44,
+ [0][1][0][0][RTW89_FCC][12] = 34,
[0][1][0][0][RTW89_ETSI][12] = 48,
- [0][1][0][0][RTW89_MKK][12] = 60,
+ [0][1][0][0][RTW89_MKK][12] = 58,
[0][1][0][0][RTW89_IC][12] = 44,
[0][1][0][0][RTW89_ACMA][12] = 48,
[0][1][0][0][RTW89_FCC][13] = 127,
[0][1][0][0][RTW89_ETSI][13] = 127,
- [0][1][0][0][RTW89_MKK][13] = 62,
+ [0][1][0][0][RTW89_MKK][13] = 60,
[0][1][0][0][RTW89_IC][13] = 127,
[0][1][0][0][RTW89_ACMA][13] = 127,
[1][0][0][0][RTW89_FCC][0] = 127,
@@ -14163,49 +14163,49 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][0][0][RTW89_MKK][1] = 127,
[1][0][0][0][RTW89_IC][1] = 127,
[1][0][0][0][RTW89_ACMA][1] = 127,
- [1][0][0][0][RTW89_FCC][2] = 52,
+ [1][0][0][0][RTW89_FCC][2] = 42,
[1][0][0][0][RTW89_ETSI][2] = 60,
- [1][0][0][0][RTW89_MKK][2] = 72,
+ [1][0][0][0][RTW89_MKK][2] = 66,
[1][0][0][0][RTW89_IC][2] = 52,
[1][0][0][0][RTW89_ACMA][2] = 60,
- [1][0][0][0][RTW89_FCC][3] = 52,
+ [1][0][0][0][RTW89_FCC][3] = 42,
[1][0][0][0][RTW89_ETSI][3] = 60,
- [1][0][0][0][RTW89_MKK][3] = 72,
+ [1][0][0][0][RTW89_MKK][3] = 66,
[1][0][0][0][RTW89_IC][3] = 52,
[1][0][0][0][RTW89_ACMA][3] = 60,
- [1][0][0][0][RTW89_FCC][4] = 52,
+ [1][0][0][0][RTW89_FCC][4] = 42,
[1][0][0][0][RTW89_ETSI][4] = 60,
- [1][0][0][0][RTW89_MKK][4] = 72,
+ [1][0][0][0][RTW89_MKK][4] = 66,
[1][0][0][0][RTW89_IC][4] = 52,
[1][0][0][0][RTW89_ACMA][4] = 60,
- [1][0][0][0][RTW89_FCC][5] = 68,
+ [1][0][0][0][RTW89_FCC][5] = 58,
[1][0][0][0][RTW89_ETSI][5] = 60,
- [1][0][0][0][RTW89_MKK][5] = 72,
+ [1][0][0][0][RTW89_MKK][5] = 66,
[1][0][0][0][RTW89_IC][5] = 68,
[1][0][0][0][RTW89_ACMA][5] = 60,
- [1][0][0][0][RTW89_FCC][6] = 52,
+ [1][0][0][0][RTW89_FCC][6] = 42,
[1][0][0][0][RTW89_ETSI][6] = 60,
- [1][0][0][0][RTW89_MKK][6] = 72,
+ [1][0][0][0][RTW89_MKK][6] = 66,
[1][0][0][0][RTW89_IC][6] = 52,
[1][0][0][0][RTW89_ACMA][6] = 60,
- [1][0][0][0][RTW89_FCC][7] = 52,
+ [1][0][0][0][RTW89_FCC][7] = 42,
[1][0][0][0][RTW89_ETSI][7] = 60,
- [1][0][0][0][RTW89_MKK][7] = 72,
+ [1][0][0][0][RTW89_MKK][7] = 66,
[1][0][0][0][RTW89_IC][7] = 52,
[1][0][0][0][RTW89_ACMA][7] = 60,
- [1][0][0][0][RTW89_FCC][8] = 52,
+ [1][0][0][0][RTW89_FCC][8] = 42,
[1][0][0][0][RTW89_ETSI][8] = 60,
- [1][0][0][0][RTW89_MKK][8] = 72,
+ [1][0][0][0][RTW89_MKK][8] = 66,
[1][0][0][0][RTW89_IC][8] = 52,
[1][0][0][0][RTW89_ACMA][8] = 60,
- [1][0][0][0][RTW89_FCC][9] = 44,
+ [1][0][0][0][RTW89_FCC][9] = 34,
[1][0][0][0][RTW89_ETSI][9] = 60,
- [1][0][0][0][RTW89_MKK][9] = 72,
+ [1][0][0][0][RTW89_MKK][9] = 66,
[1][0][0][0][RTW89_IC][9] = 44,
[1][0][0][0][RTW89_ACMA][9] = 60,
- [1][0][0][0][RTW89_FCC][10] = 32,
+ [1][0][0][0][RTW89_FCC][10] = 22,
[1][0][0][0][RTW89_ETSI][10] = 60,
- [1][0][0][0][RTW89_MKK][10] = 70,
+ [1][0][0][0][RTW89_MKK][10] = 66,
[1][0][0][0][RTW89_IC][10] = 32,
[1][0][0][0][RTW89_ACMA][10] = 60,
[1][0][0][0][RTW89_FCC][11] = 127,
@@ -14233,49 +14233,49 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][0][0][RTW89_MKK][1] = 127,
[1][1][0][0][RTW89_IC][1] = 127,
[1][1][0][0][RTW89_ACMA][1] = 127,
- [1][1][0][0][RTW89_FCC][2] = 48,
+ [1][1][0][0][RTW89_FCC][2] = 38,
[1][1][0][0][RTW89_ETSI][2] = 48,
- [1][1][0][0][RTW89_MKK][2] = 60,
+ [1][1][0][0][RTW89_MKK][2] = 58,
[1][1][0][0][RTW89_IC][2] = 48,
[1][1][0][0][RTW89_ACMA][2] = 48,
- [1][1][0][0][RTW89_FCC][3] = 48,
+ [1][1][0][0][RTW89_FCC][3] = 38,
[1][1][0][0][RTW89_ETSI][3] = 48,
- [1][1][0][0][RTW89_MKK][3] = 60,
+ [1][1][0][0][RTW89_MKK][3] = 58,
[1][1][0][0][RTW89_IC][3] = 48,
[1][1][0][0][RTW89_ACMA][3] = 48,
- [1][1][0][0][RTW89_FCC][4] = 48,
+ [1][1][0][0][RTW89_FCC][4] = 38,
[1][1][0][0][RTW89_ETSI][4] = 48,
- [1][1][0][0][RTW89_MKK][4] = 60,
+ [1][1][0][0][RTW89_MKK][4] = 58,
[1][1][0][0][RTW89_IC][4] = 48,
[1][1][0][0][RTW89_ACMA][4] = 48,
- [1][1][0][0][RTW89_FCC][5] = 64,
+ [1][1][0][0][RTW89_FCC][5] = 54,
[1][1][0][0][RTW89_ETSI][5] = 48,
- [1][1][0][0][RTW89_MKK][5] = 60,
+ [1][1][0][0][RTW89_MKK][5] = 58,
[1][1][0][0][RTW89_IC][5] = 64,
[1][1][0][0][RTW89_ACMA][5] = 48,
- [1][1][0][0][RTW89_FCC][6] = 36,
+ [1][1][0][0][RTW89_FCC][6] = 26,
[1][1][0][0][RTW89_ETSI][6] = 48,
- [1][1][0][0][RTW89_MKK][6] = 60,
+ [1][1][0][0][RTW89_MKK][6] = 58,
[1][1][0][0][RTW89_IC][6] = 36,
[1][1][0][0][RTW89_ACMA][6] = 48,
- [1][1][0][0][RTW89_FCC][7] = 36,
+ [1][1][0][0][RTW89_FCC][7] = 26,
[1][1][0][0][RTW89_ETSI][7] = 48,
- [1][1][0][0][RTW89_MKK][7] = 60,
+ [1][1][0][0][RTW89_MKK][7] = 58,
[1][1][0][0][RTW89_IC][7] = 36,
[1][1][0][0][RTW89_ACMA][7] = 48,
- [1][1][0][0][RTW89_FCC][8] = 36,
+ [1][1][0][0][RTW89_FCC][8] = 26,
[1][1][0][0][RTW89_ETSI][8] = 48,
- [1][1][0][0][RTW89_MKK][8] = 60,
+ [1][1][0][0][RTW89_MKK][8] = 58,
[1][1][0][0][RTW89_IC][8] = 36,
[1][1][0][0][RTW89_ACMA][8] = 48,
- [1][1][0][0][RTW89_FCC][9] = 32,
+ [1][1][0][0][RTW89_FCC][9] = 22,
[1][1][0][0][RTW89_ETSI][9] = 48,
- [1][1][0][0][RTW89_MKK][9] = 60,
+ [1][1][0][0][RTW89_MKK][9] = 58,
[1][1][0][0][RTW89_IC][9] = 32,
[1][1][0][0][RTW89_ACMA][9] = 48,
- [1][1][0][0][RTW89_FCC][10] = 32,
+ [1][1][0][0][RTW89_FCC][10] = 22,
[1][1][0][0][RTW89_ETSI][10] = 48,
- [1][1][0][0][RTW89_MKK][10] = 58,
+ [1][1][0][0][RTW89_MKK][10] = 56,
[1][1][0][0][RTW89_IC][10] = 32,
[1][1][0][0][RTW89_ACMA][10] = 48,
[1][1][0][0][RTW89_FCC][11] = 127,
@@ -14293,69 +14293,69 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][0][0][RTW89_MKK][13] = 127,
[1][1][0][0][RTW89_IC][13] = 127,
[1][1][0][0][RTW89_ACMA][13] = 127,
- [0][0][1][0][RTW89_FCC][0] = 78,
+ [0][0][1][0][RTW89_FCC][0] = 68,
[0][0][1][0][RTW89_ETSI][0] = 60,
[0][0][1][0][RTW89_MKK][0] = 76,
[0][0][1][0][RTW89_IC][0] = 78,
[0][0][1][0][RTW89_ACMA][0] = 60,
- [0][0][1][0][RTW89_FCC][1] = 78,
+ [0][0][1][0][RTW89_FCC][1] = 68,
[0][0][1][0][RTW89_ETSI][1] = 60,
- [0][0][1][0][RTW89_MKK][1] = 76,
+ [0][0][1][0][RTW89_MKK][1] = 78,
[0][0][1][0][RTW89_IC][1] = 78,
[0][0][1][0][RTW89_ACMA][1] = 60,
- [0][0][1][0][RTW89_FCC][2] = 80,
+ [0][0][1][0][RTW89_FCC][2] = 70,
[0][0][1][0][RTW89_ETSI][2] = 60,
- [0][0][1][0][RTW89_MKK][2] = 76,
- [0][0][1][0][RTW89_IC][2] = 80,
+ [0][0][1][0][RTW89_MKK][2] = 78,
+ [0][0][1][0][RTW89_IC][2] = 78,
[0][0][1][0][RTW89_ACMA][2] = 60,
- [0][0][1][0][RTW89_FCC][3] = 80,
+ [0][0][1][0][RTW89_FCC][3] = 70,
[0][0][1][0][RTW89_ETSI][3] = 60,
- [0][0][1][0][RTW89_MKK][3] = 76,
- [0][0][1][0][RTW89_IC][3] = 80,
+ [0][0][1][0][RTW89_MKK][3] = 78,
+ [0][0][1][0][RTW89_IC][3] = 78,
[0][0][1][0][RTW89_ACMA][3] = 60,
- [0][0][1][0][RTW89_FCC][4] = 80,
+ [0][0][1][0][RTW89_FCC][4] = 70,
[0][0][1][0][RTW89_ETSI][4] = 60,
- [0][0][1][0][RTW89_MKK][4] = 76,
- [0][0][1][0][RTW89_IC][4] = 80,
+ [0][0][1][0][RTW89_MKK][4] = 78,
+ [0][0][1][0][RTW89_IC][4] = 78,
[0][0][1][0][RTW89_ACMA][4] = 60,
- [0][0][1][0][RTW89_FCC][5] = 80,
+ [0][0][1][0][RTW89_FCC][5] = 70,
[0][0][1][0][RTW89_ETSI][5] = 60,
- [0][0][1][0][RTW89_MKK][5] = 76,
- [0][0][1][0][RTW89_IC][5] = 80,
+ [0][0][1][0][RTW89_MKK][5] = 78,
+ [0][0][1][0][RTW89_IC][5] = 78,
[0][0][1][0][RTW89_ACMA][5] = 60,
- [0][0][1][0][RTW89_FCC][6] = 80,
+ [0][0][1][0][RTW89_FCC][6] = 70,
[0][0][1][0][RTW89_ETSI][6] = 60,
[0][0][1][0][RTW89_MKK][6] = 76,
- [0][0][1][0][RTW89_IC][6] = 80,
+ [0][0][1][0][RTW89_IC][6] = 78,
[0][0][1][0][RTW89_ACMA][6] = 60,
- [0][0][1][0][RTW89_FCC][7] = 80,
+ [0][0][1][0][RTW89_FCC][7] = 70,
[0][0][1][0][RTW89_ETSI][7] = 60,
- [0][0][1][0][RTW89_MKK][7] = 76,
- [0][0][1][0][RTW89_IC][7] = 80,
+ [0][0][1][0][RTW89_MKK][7] = 78,
+ [0][0][1][0][RTW89_IC][7] = 78,
[0][0][1][0][RTW89_ACMA][7] = 60,
- [0][0][1][0][RTW89_FCC][8] = 80,
+ [0][0][1][0][RTW89_FCC][8] = 70,
[0][0][1][0][RTW89_ETSI][8] = 60,
- [0][0][1][0][RTW89_MKK][8] = 76,
- [0][0][1][0][RTW89_IC][8] = 80,
+ [0][0][1][0][RTW89_MKK][8] = 78,
+ [0][0][1][0][RTW89_IC][8] = 78,
[0][0][1][0][RTW89_ACMA][8] = 60,
- [0][0][1][0][RTW89_FCC][9] = 76,
+ [0][0][1][0][RTW89_FCC][9] = 66,
[0][0][1][0][RTW89_ETSI][9] = 60,
- [0][0][1][0][RTW89_MKK][9] = 76,
+ [0][0][1][0][RTW89_MKK][9] = 78,
[0][0][1][0][RTW89_IC][9] = 76,
[0][0][1][0][RTW89_ACMA][9] = 60,
- [0][0][1][0][RTW89_FCC][10] = 76,
+ [0][0][1][0][RTW89_FCC][10] = 66,
[0][0][1][0][RTW89_ETSI][10] = 60,
- [0][0][1][0][RTW89_MKK][10] = 76,
+ [0][0][1][0][RTW89_MKK][10] = 78,
[0][0][1][0][RTW89_IC][10] = 76,
[0][0][1][0][RTW89_ACMA][10] = 60,
- [0][0][1][0][RTW89_FCC][11] = 56,
+ [0][0][1][0][RTW89_FCC][11] = 46,
[0][0][1][0][RTW89_ETSI][11] = 60,
- [0][0][1][0][RTW89_MKK][11] = 76,
+ [0][0][1][0][RTW89_MKK][11] = 78,
[0][0][1][0][RTW89_IC][11] = 56,
[0][0][1][0][RTW89_ACMA][11] = 60,
- [0][0][1][0][RTW89_FCC][12] = 52,
+ [0][0][1][0][RTW89_FCC][12] = 42,
[0][0][1][0][RTW89_ETSI][12] = 60,
- [0][0][1][0][RTW89_MKK][12] = 76,
+ [0][0][1][0][RTW89_MKK][12] = 78,
[0][0][1][0][RTW89_IC][12] = 52,
[0][0][1][0][RTW89_ACMA][12] = 60,
[0][0][1][0][RTW89_FCC][13] = 127,
@@ -14363,69 +14363,69 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MKK][13] = 127,
[0][0][1][0][RTW89_IC][13] = 127,
[0][0][1][0][RTW89_ACMA][13] = 127,
- [0][1][1][0][RTW89_FCC][0] = 64,
+ [0][1][1][0][RTW89_FCC][0] = 54,
[0][1][1][0][RTW89_ETSI][0] = 48,
- [0][1][1][0][RTW89_MKK][0] = 68,
+ [0][1][1][0][RTW89_MKK][0] = 66,
[0][1][1][0][RTW89_IC][0] = 64,
[0][1][1][0][RTW89_ACMA][0] = 48,
- [0][1][1][0][RTW89_FCC][1] = 64,
+ [0][1][1][0][RTW89_FCC][1] = 54,
[0][1][1][0][RTW89_ETSI][1] = 48,
- [0][1][1][0][RTW89_MKK][1] = 68,
+ [0][1][1][0][RTW89_MKK][1] = 66,
[0][1][1][0][RTW89_IC][1] = 64,
[0][1][1][0][RTW89_ACMA][1] = 48,
- [0][1][1][0][RTW89_FCC][2] = 68,
+ [0][1][1][0][RTW89_FCC][2] = 58,
[0][1][1][0][RTW89_ETSI][2] = 48,
- [0][1][1][0][RTW89_MKK][2] = 68,
+ [0][1][1][0][RTW89_MKK][2] = 66,
[0][1][1][0][RTW89_IC][2] = 68,
[0][1][1][0][RTW89_ACMA][2] = 48,
- [0][1][1][0][RTW89_FCC][3] = 72,
+ [0][1][1][0][RTW89_FCC][3] = 62,
[0][1][1][0][RTW89_ETSI][3] = 48,
- [0][1][1][0][RTW89_MKK][3] = 68,
+ [0][1][1][0][RTW89_MKK][3] = 66,
[0][1][1][0][RTW89_IC][3] = 72,
[0][1][1][0][RTW89_ACMA][3] = 48,
- [0][1][1][0][RTW89_FCC][4] = 80,
+ [0][1][1][0][RTW89_FCC][4] = 70,
[0][1][1][0][RTW89_ETSI][4] = 48,
- [0][1][1][0][RTW89_MKK][4] = 68,
- [0][1][1][0][RTW89_IC][4] = 80,
+ [0][1][1][0][RTW89_MKK][4] = 66,
+ [0][1][1][0][RTW89_IC][4] = 78,
[0][1][1][0][RTW89_ACMA][4] = 48,
- [0][1][1][0][RTW89_FCC][5] = 80,
+ [0][1][1][0][RTW89_FCC][5] = 70,
[0][1][1][0][RTW89_ETSI][5] = 48,
- [0][1][1][0][RTW89_MKK][5] = 68,
- [0][1][1][0][RTW89_IC][5] = 80,
+ [0][1][1][0][RTW89_MKK][5] = 66,
+ [0][1][1][0][RTW89_IC][5] = 78,
[0][1][1][0][RTW89_ACMA][5] = 48,
- [0][1][1][0][RTW89_FCC][6] = 80,
+ [0][1][1][0][RTW89_FCC][6] = 70,
[0][1][1][0][RTW89_ETSI][6] = 48,
- [0][1][1][0][RTW89_MKK][6] = 68,
- [0][1][1][0][RTW89_IC][6] = 80,
+ [0][1][1][0][RTW89_MKK][6] = 66,
+ [0][1][1][0][RTW89_IC][6] = 78,
[0][1][1][0][RTW89_ACMA][6] = 48,
- [0][1][1][0][RTW89_FCC][7] = 72,
+ [0][1][1][0][RTW89_FCC][7] = 62,
[0][1][1][0][RTW89_ETSI][7] = 48,
- [0][1][1][0][RTW89_MKK][7] = 68,
+ [0][1][1][0][RTW89_MKK][7] = 66,
[0][1][1][0][RTW89_IC][7] = 72,
[0][1][1][0][RTW89_ACMA][7] = 48,
- [0][1][1][0][RTW89_FCC][8] = 68,
+ [0][1][1][0][RTW89_FCC][8] = 58,
[0][1][1][0][RTW89_ETSI][8] = 48,
- [0][1][1][0][RTW89_MKK][8] = 68,
+ [0][1][1][0][RTW89_MKK][8] = 66,
[0][1][1][0][RTW89_IC][8] = 68,
[0][1][1][0][RTW89_ACMA][8] = 48,
- [0][1][1][0][RTW89_FCC][9] = 64,
+ [0][1][1][0][RTW89_FCC][9] = 54,
[0][1][1][0][RTW89_ETSI][9] = 48,
- [0][1][1][0][RTW89_MKK][9] = 68,
+ [0][1][1][0][RTW89_MKK][9] = 66,
[0][1][1][0][RTW89_IC][9] = 64,
[0][1][1][0][RTW89_ACMA][9] = 48,
- [0][1][1][0][RTW89_FCC][10] = 64,
+ [0][1][1][0][RTW89_FCC][10] = 54,
[0][1][1][0][RTW89_ETSI][10] = 48,
- [0][1][1][0][RTW89_MKK][10] = 68,
+ [0][1][1][0][RTW89_MKK][10] = 66,
[0][1][1][0][RTW89_IC][10] = 64,
[0][1][1][0][RTW89_ACMA][10] = 48,
- [0][1][1][0][RTW89_FCC][11] = 48,
+ [0][1][1][0][RTW89_FCC][11] = 38,
[0][1][1][0][RTW89_ETSI][11] = 48,
- [0][1][1][0][RTW89_MKK][11] = 68,
+ [0][1][1][0][RTW89_MKK][11] = 66,
[0][1][1][0][RTW89_IC][11] = 48,
[0][1][1][0][RTW89_ACMA][11] = 48,
- [0][1][1][0][RTW89_FCC][12] = 44,
+ [0][1][1][0][RTW89_FCC][12] = 34,
[0][1][1][0][RTW89_ETSI][12] = 48,
- [0][1][1][0][RTW89_MKK][12] = 68,
+ [0][1][1][0][RTW89_MKK][12] = 66,
[0][1][1][0][RTW89_IC][12] = 44,
[0][1][1][0][RTW89_ACMA][12] = 48,
[0][1][1][0][RTW89_FCC][13] = 127,
@@ -14433,69 +14433,69 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MKK][13] = 127,
[0][1][1][0][RTW89_IC][13] = 127,
[0][1][1][0][RTW89_ACMA][13] = 127,
- [0][0][2][0][RTW89_FCC][0] = 78,
+ [0][0][2][0][RTW89_FCC][0] = 68,
[0][0][2][0][RTW89_ETSI][0] = 60,
- [0][0][2][0][RTW89_MKK][0] = 76,
+ [0][0][2][0][RTW89_MKK][0] = 78,
[0][0][2][0][RTW89_IC][0] = 78,
[0][0][2][0][RTW89_ACMA][0] = 60,
- [0][0][2][0][RTW89_FCC][1] = 78,
+ [0][0][2][0][RTW89_FCC][1] = 68,
[0][0][2][0][RTW89_ETSI][1] = 60,
- [0][0][2][0][RTW89_MKK][1] = 76,
+ [0][0][2][0][RTW89_MKK][1] = 78,
[0][0][2][0][RTW89_IC][1] = 78,
[0][0][2][0][RTW89_ACMA][1] = 60,
- [0][0][2][0][RTW89_FCC][2] = 80,
+ [0][0][2][0][RTW89_FCC][2] = 70,
[0][0][2][0][RTW89_ETSI][2] = 60,
- [0][0][2][0][RTW89_MKK][2] = 76,
- [0][0][2][0][RTW89_IC][2] = 80,
+ [0][0][2][0][RTW89_MKK][2] = 78,
+ [0][0][2][0][RTW89_IC][2] = 78,
[0][0][2][0][RTW89_ACMA][2] = 60,
- [0][0][2][0][RTW89_FCC][3] = 80,
+ [0][0][2][0][RTW89_FCC][3] = 70,
[0][0][2][0][RTW89_ETSI][3] = 60,
- [0][0][2][0][RTW89_MKK][3] = 76,
- [0][0][2][0][RTW89_IC][3] = 80,
+ [0][0][2][0][RTW89_MKK][3] = 78,
+ [0][0][2][0][RTW89_IC][3] = 78,
[0][0][2][0][RTW89_ACMA][3] = 60,
- [0][0][2][0][RTW89_FCC][4] = 80,
+ [0][0][2][0][RTW89_FCC][4] = 70,
[0][0][2][0][RTW89_ETSI][4] = 60,
- [0][0][2][0][RTW89_MKK][4] = 76,
- [0][0][2][0][RTW89_IC][4] = 80,
+ [0][0][2][0][RTW89_MKK][4] = 78,
+ [0][0][2][0][RTW89_IC][4] = 78,
[0][0][2][0][RTW89_ACMA][4] = 60,
- [0][0][2][0][RTW89_FCC][5] = 80,
+ [0][0][2][0][RTW89_FCC][5] = 70,
[0][0][2][0][RTW89_ETSI][5] = 60,
- [0][0][2][0][RTW89_MKK][5] = 76,
- [0][0][2][0][RTW89_IC][5] = 80,
+ [0][0][2][0][RTW89_MKK][5] = 78,
+ [0][0][2][0][RTW89_IC][5] = 78,
[0][0][2][0][RTW89_ACMA][5] = 60,
- [0][0][2][0][RTW89_FCC][6] = 80,
+ [0][0][2][0][RTW89_FCC][6] = 70,
[0][0][2][0][RTW89_ETSI][6] = 60,
- [0][0][2][0][RTW89_MKK][6] = 76,
- [0][0][2][0][RTW89_IC][6] = 80,
+ [0][0][2][0][RTW89_MKK][6] = 78,
+ [0][0][2][0][RTW89_IC][6] = 78,
[0][0][2][0][RTW89_ACMA][6] = 60,
- [0][0][2][0][RTW89_FCC][7] = 80,
+ [0][0][2][0][RTW89_FCC][7] = 70,
[0][0][2][0][RTW89_ETSI][7] = 60,
- [0][0][2][0][RTW89_MKK][7] = 76,
- [0][0][2][0][RTW89_IC][7] = 80,
+ [0][0][2][0][RTW89_MKK][7] = 78,
+ [0][0][2][0][RTW89_IC][7] = 78,
[0][0][2][0][RTW89_ACMA][7] = 60,
- [0][0][2][0][RTW89_FCC][8] = 78,
+ [0][0][2][0][RTW89_FCC][8] = 68,
[0][0][2][0][RTW89_ETSI][8] = 60,
- [0][0][2][0][RTW89_MKK][8] = 76,
+ [0][0][2][0][RTW89_MKK][8] = 78,
[0][0][2][0][RTW89_IC][8] = 78,
[0][0][2][0][RTW89_ACMA][8] = 60,
- [0][0][2][0][RTW89_FCC][9] = 74,
+ [0][0][2][0][RTW89_FCC][9] = 64,
[0][0][2][0][RTW89_ETSI][9] = 60,
- [0][0][2][0][RTW89_MKK][9] = 76,
+ [0][0][2][0][RTW89_MKK][9] = 78,
[0][0][2][0][RTW89_IC][9] = 74,
[0][0][2][0][RTW89_ACMA][9] = 60,
- [0][0][2][0][RTW89_FCC][10] = 74,
+ [0][0][2][0][RTW89_FCC][10] = 64,
[0][0][2][0][RTW89_ETSI][10] = 60,
- [0][0][2][0][RTW89_MKK][10] = 76,
+ [0][0][2][0][RTW89_MKK][10] = 78,
[0][0][2][0][RTW89_IC][10] = 74,
[0][0][2][0][RTW89_ACMA][10] = 60,
- [0][0][2][0][RTW89_FCC][11] = 56,
+ [0][0][2][0][RTW89_FCC][11] = 46,
[0][0][2][0][RTW89_ETSI][11] = 60,
- [0][0][2][0][RTW89_MKK][11] = 76,
+ [0][0][2][0][RTW89_MKK][11] = 78,
[0][0][2][0][RTW89_IC][11] = 56,
[0][0][2][0][RTW89_ACMA][11] = 60,
- [0][0][2][0][RTW89_FCC][12] = 52,
+ [0][0][2][0][RTW89_FCC][12] = 42,
[0][0][2][0][RTW89_ETSI][12] = 60,
- [0][0][2][0][RTW89_MKK][12] = 76,
+ [0][0][2][0][RTW89_MKK][12] = 78,
[0][0][2][0][RTW89_IC][12] = 52,
[0][0][2][0][RTW89_ACMA][12] = 60,
[0][0][2][0][RTW89_FCC][13] = 127,
@@ -14503,69 +14503,69 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_MKK][13] = 127,
[0][0][2][0][RTW89_IC][13] = 127,
[0][0][2][0][RTW89_ACMA][13] = 127,
- [0][1][2][0][RTW89_FCC][0] = 60,
+ [0][1][2][0][RTW89_FCC][0] = 50,
[0][1][2][0][RTW89_ETSI][0] = 48,
- [0][1][2][0][RTW89_MKK][0] = 70,
+ [0][1][2][0][RTW89_MKK][0] = 68,
[0][1][2][0][RTW89_IC][0] = 60,
[0][1][2][0][RTW89_ACMA][0] = 48,
- [0][1][2][0][RTW89_FCC][1] = 60,
+ [0][1][2][0][RTW89_FCC][1] = 50,
[0][1][2][0][RTW89_ETSI][1] = 48,
- [0][1][2][0][RTW89_MKK][1] = 70,
+ [0][1][2][0][RTW89_MKK][1] = 68,
[0][1][2][0][RTW89_IC][1] = 60,
[0][1][2][0][RTW89_ACMA][1] = 48,
- [0][1][2][0][RTW89_FCC][2] = 64,
+ [0][1][2][0][RTW89_FCC][2] = 54,
[0][1][2][0][RTW89_ETSI][2] = 48,
- [0][1][2][0][RTW89_MKK][2] = 70,
+ [0][1][2][0][RTW89_MKK][2] = 68,
[0][1][2][0][RTW89_IC][2] = 64,
[0][1][2][0][RTW89_ACMA][2] = 48,
- [0][1][2][0][RTW89_FCC][3] = 68,
+ [0][1][2][0][RTW89_FCC][3] = 58,
[0][1][2][0][RTW89_ETSI][3] = 48,
- [0][1][2][0][RTW89_MKK][3] = 70,
+ [0][1][2][0][RTW89_MKK][3] = 68,
[0][1][2][0][RTW89_IC][3] = 68,
[0][1][2][0][RTW89_ACMA][3] = 48,
- [0][1][2][0][RTW89_FCC][4] = 74,
+ [0][1][2][0][RTW89_FCC][4] = 64,
[0][1][2][0][RTW89_ETSI][4] = 48,
- [0][1][2][0][RTW89_MKK][4] = 70,
+ [0][1][2][0][RTW89_MKK][4] = 68,
[0][1][2][0][RTW89_IC][4] = 74,
[0][1][2][0][RTW89_ACMA][4] = 48,
- [0][1][2][0][RTW89_FCC][5] = 80,
+ [0][1][2][0][RTW89_FCC][5] = 70,
[0][1][2][0][RTW89_ETSI][5] = 48,
- [0][1][2][0][RTW89_MKK][5] = 70,
- [0][1][2][0][RTW89_IC][5] = 80,
+ [0][1][2][0][RTW89_MKK][5] = 68,
+ [0][1][2][0][RTW89_IC][5] = 78,
[0][1][2][0][RTW89_ACMA][5] = 48,
- [0][1][2][0][RTW89_FCC][6] = 76,
+ [0][1][2][0][RTW89_FCC][6] = 66,
[0][1][2][0][RTW89_ETSI][6] = 48,
- [0][1][2][0][RTW89_MKK][6] = 70,
+ [0][1][2][0][RTW89_MKK][6] = 68,
[0][1][2][0][RTW89_IC][6] = 76,
[0][1][2][0][RTW89_ACMA][6] = 48,
- [0][1][2][0][RTW89_FCC][7] = 68,
+ [0][1][2][0][RTW89_FCC][7] = 58,
[0][1][2][0][RTW89_ETSI][7] = 48,
- [0][1][2][0][RTW89_MKK][7] = 70,
+ [0][1][2][0][RTW89_MKK][7] = 68,
[0][1][2][0][RTW89_IC][7] = 68,
[0][1][2][0][RTW89_ACMA][7] = 48,
- [0][1][2][0][RTW89_FCC][8] = 64,
+ [0][1][2][0][RTW89_FCC][8] = 54,
[0][1][2][0][RTW89_ETSI][8] = 48,
- [0][1][2][0][RTW89_MKK][8] = 70,
+ [0][1][2][0][RTW89_MKK][8] = 68,
[0][1][2][0][RTW89_IC][8] = 64,
[0][1][2][0][RTW89_ACMA][8] = 48,
- [0][1][2][0][RTW89_FCC][9] = 60,
+ [0][1][2][0][RTW89_FCC][9] = 50,
[0][1][2][0][RTW89_ETSI][9] = 48,
- [0][1][2][0][RTW89_MKK][9] = 70,
+ [0][1][2][0][RTW89_MKK][9] = 68,
[0][1][2][0][RTW89_IC][9] = 60,
[0][1][2][0][RTW89_ACMA][9] = 48,
- [0][1][2][0][RTW89_FCC][10] = 60,
+ [0][1][2][0][RTW89_FCC][10] = 50,
[0][1][2][0][RTW89_ETSI][10] = 48,
- [0][1][2][0][RTW89_MKK][10] = 70,
+ [0][1][2][0][RTW89_MKK][10] = 68,
[0][1][2][0][RTW89_IC][10] = 60,
[0][1][2][0][RTW89_ACMA][10] = 48,
- [0][1][2][0][RTW89_FCC][11] = 48,
+ [0][1][2][0][RTW89_FCC][11] = 38,
[0][1][2][0][RTW89_ETSI][11] = 48,
- [0][1][2][0][RTW89_MKK][11] = 70,
+ [0][1][2][0][RTW89_MKK][11] = 68,
[0][1][2][0][RTW89_IC][11] = 48,
[0][1][2][0][RTW89_ACMA][11] = 48,
- [0][1][2][0][RTW89_FCC][12] = 44,
+ [0][1][2][0][RTW89_FCC][12] = 34,
[0][1][2][0][RTW89_ETSI][12] = 48,
- [0][1][2][0][RTW89_MKK][12] = 70,
+ [0][1][2][0][RTW89_MKK][12] = 68,
[0][1][2][0][RTW89_IC][12] = 44,
[0][1][2][0][RTW89_ACMA][12] = 48,
[0][1][2][0][RTW89_FCC][13] = 127,
@@ -14573,69 +14573,69 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_MKK][13] = 127,
[0][1][2][0][RTW89_IC][13] = 127,
[0][1][2][0][RTW89_ACMA][13] = 127,
- [0][1][2][1][RTW89_FCC][0] = 60,
- [0][1][2][1][RTW89_ETSI][0] = 38,
- [0][1][2][1][RTW89_MKK][0] = 58,
+ [0][1][2][1][RTW89_FCC][0] = 50,
+ [0][1][2][1][RTW89_ETSI][0] = 36,
+ [0][1][2][1][RTW89_MKK][0] = 68,
[0][1][2][1][RTW89_IC][0] = 60,
[0][1][2][1][RTW89_ACMA][0] = 36,
- [0][1][2][1][RTW89_FCC][1] = 60,
- [0][1][2][1][RTW89_ETSI][1] = 38,
- [0][1][2][1][RTW89_MKK][1] = 58,
+ [0][1][2][1][RTW89_FCC][1] = 50,
+ [0][1][2][1][RTW89_ETSI][1] = 36,
+ [0][1][2][1][RTW89_MKK][1] = 68,
[0][1][2][1][RTW89_IC][1] = 60,
[0][1][2][1][RTW89_ACMA][1] = 36,
- [0][1][2][1][RTW89_FCC][2] = 64,
- [0][1][2][1][RTW89_ETSI][2] = 38,
- [0][1][2][1][RTW89_MKK][2] = 58,
+ [0][1][2][1][RTW89_FCC][2] = 54,
+ [0][1][2][1][RTW89_ETSI][2] = 36,
+ [0][1][2][1][RTW89_MKK][2] = 68,
[0][1][2][1][RTW89_IC][2] = 64,
[0][1][2][1][RTW89_ACMA][2] = 36,
- [0][1][2][1][RTW89_FCC][3] = 68,
- [0][1][2][1][RTW89_ETSI][3] = 38,
- [0][1][2][1][RTW89_MKK][3] = 58,
+ [0][1][2][1][RTW89_FCC][3] = 58,
+ [0][1][2][1][RTW89_ETSI][3] = 36,
+ [0][1][2][1][RTW89_MKK][3] = 68,
[0][1][2][1][RTW89_IC][3] = 68,
[0][1][2][1][RTW89_ACMA][3] = 36,
- [0][1][2][1][RTW89_FCC][4] = 74,
- [0][1][2][1][RTW89_ETSI][4] = 38,
- [0][1][2][1][RTW89_MKK][4] = 58,
+ [0][1][2][1][RTW89_FCC][4] = 64,
+ [0][1][2][1][RTW89_ETSI][4] = 36,
+ [0][1][2][1][RTW89_MKK][4] = 68,
[0][1][2][1][RTW89_IC][4] = 74,
[0][1][2][1][RTW89_ACMA][4] = 36,
- [0][1][2][1][RTW89_FCC][5] = 80,
- [0][1][2][1][RTW89_ETSI][5] = 38,
- [0][1][2][1][RTW89_MKK][5] = 58,
- [0][1][2][1][RTW89_IC][5] = 80,
+ [0][1][2][1][RTW89_FCC][5] = 70,
+ [0][1][2][1][RTW89_ETSI][5] = 36,
+ [0][1][2][1][RTW89_MKK][5] = 68,
+ [0][1][2][1][RTW89_IC][5] = 78,
[0][1][2][1][RTW89_ACMA][5] = 36,
- [0][1][2][1][RTW89_FCC][6] = 76,
- [0][1][2][1][RTW89_ETSI][6] = 38,
- [0][1][2][1][RTW89_MKK][6] = 58,
+ [0][1][2][1][RTW89_FCC][6] = 66,
+ [0][1][2][1][RTW89_ETSI][6] = 36,
+ [0][1][2][1][RTW89_MKK][6] = 68,
[0][1][2][1][RTW89_IC][6] = 76,
[0][1][2][1][RTW89_ACMA][6] = 36,
- [0][1][2][1][RTW89_FCC][7] = 68,
- [0][1][2][1][RTW89_ETSI][7] = 38,
- [0][1][2][1][RTW89_MKK][7] = 58,
+ [0][1][2][1][RTW89_FCC][7] = 58,
+ [0][1][2][1][RTW89_ETSI][7] = 36,
+ [0][1][2][1][RTW89_MKK][7] = 68,
[0][1][2][1][RTW89_IC][7] = 68,
[0][1][2][1][RTW89_ACMA][7] = 36,
- [0][1][2][1][RTW89_FCC][8] = 64,
- [0][1][2][1][RTW89_ETSI][8] = 38,
- [0][1][2][1][RTW89_MKK][8] = 58,
+ [0][1][2][1][RTW89_FCC][8] = 54,
+ [0][1][2][1][RTW89_ETSI][8] = 36,
+ [0][1][2][1][RTW89_MKK][8] = 68,
[0][1][2][1][RTW89_IC][8] = 64,
[0][1][2][1][RTW89_ACMA][8] = 36,
- [0][1][2][1][RTW89_FCC][9] = 60,
- [0][1][2][1][RTW89_ETSI][9] = 38,
- [0][1][2][1][RTW89_MKK][9] = 58,
+ [0][1][2][1][RTW89_FCC][9] = 50,
+ [0][1][2][1][RTW89_ETSI][9] = 36,
+ [0][1][2][1][RTW89_MKK][9] = 68,
[0][1][2][1][RTW89_IC][9] = 60,
[0][1][2][1][RTW89_ACMA][9] = 36,
- [0][1][2][1][RTW89_FCC][10] = 60,
- [0][1][2][1][RTW89_ETSI][10] = 38,
- [0][1][2][1][RTW89_MKK][10] = 58,
+ [0][1][2][1][RTW89_FCC][10] = 50,
+ [0][1][2][1][RTW89_ETSI][10] = 36,
+ [0][1][2][1][RTW89_MKK][10] = 68,
[0][1][2][1][RTW89_IC][10] = 60,
[0][1][2][1][RTW89_ACMA][10] = 36,
- [0][1][2][1][RTW89_FCC][11] = 48,
- [0][1][2][1][RTW89_ETSI][11] = 38,
- [0][1][2][1][RTW89_MKK][11] = 58,
+ [0][1][2][1][RTW89_FCC][11] = 38,
+ [0][1][2][1][RTW89_ETSI][11] = 36,
+ [0][1][2][1][RTW89_MKK][11] = 68,
[0][1][2][1][RTW89_IC][11] = 48,
[0][1][2][1][RTW89_ACMA][11] = 36,
- [0][1][2][1][RTW89_FCC][12] = 44,
- [0][1][2][1][RTW89_ETSI][12] = 38,
- [0][1][2][1][RTW89_MKK][12] = 58,
+ [0][1][2][1][RTW89_FCC][12] = 34,
+ [0][1][2][1][RTW89_ETSI][12] = 36,
+ [0][1][2][1][RTW89_MKK][12] = 68,
[0][1][2][1][RTW89_IC][12] = 44,
[0][1][2][1][RTW89_ACMA][12] = 36,
[0][1][2][1][RTW89_FCC][13] = 127,
@@ -14653,49 +14653,49 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_MKK][1] = 127,
[1][0][2][0][RTW89_IC][1] = 127,
[1][0][2][0][RTW89_ACMA][1] = 127,
- [1][0][2][0][RTW89_FCC][2] = 72,
+ [1][0][2][0][RTW89_FCC][2] = 62,
[1][0][2][0][RTW89_ETSI][2] = 60,
- [1][0][2][0][RTW89_MKK][2] = 72,
+ [1][0][2][0][RTW89_MKK][2] = 74,
[1][0][2][0][RTW89_IC][2] = 72,
[1][0][2][0][RTW89_ACMA][2] = 60,
- [1][0][2][0][RTW89_FCC][3] = 72,
+ [1][0][2][0][RTW89_FCC][3] = 62,
[1][0][2][0][RTW89_ETSI][3] = 60,
- [1][0][2][0][RTW89_MKK][3] = 72,
+ [1][0][2][0][RTW89_MKK][3] = 74,
[1][0][2][0][RTW89_IC][3] = 72,
[1][0][2][0][RTW89_ACMA][3] = 60,
- [1][0][2][0][RTW89_FCC][4] = 74,
+ [1][0][2][0][RTW89_FCC][4] = 64,
[1][0][2][0][RTW89_ETSI][4] = 60,
- [1][0][2][0][RTW89_MKK][4] = 72,
+ [1][0][2][0][RTW89_MKK][4] = 74,
[1][0][2][0][RTW89_IC][4] = 74,
[1][0][2][0][RTW89_ACMA][4] = 60,
- [1][0][2][0][RTW89_FCC][5] = 74,
+ [1][0][2][0][RTW89_FCC][5] = 64,
[1][0][2][0][RTW89_ETSI][5] = 60,
- [1][0][2][0][RTW89_MKK][5] = 72,
+ [1][0][2][0][RTW89_MKK][5] = 74,
[1][0][2][0][RTW89_IC][5] = 74,
[1][0][2][0][RTW89_ACMA][5] = 60,
- [1][0][2][0][RTW89_FCC][6] = 74,
+ [1][0][2][0][RTW89_FCC][6] = 64,
[1][0][2][0][RTW89_ETSI][6] = 60,
- [1][0][2][0][RTW89_MKK][6] = 72,
+ [1][0][2][0][RTW89_MKK][6] = 74,
[1][0][2][0][RTW89_IC][6] = 74,
[1][0][2][0][RTW89_ACMA][6] = 60,
- [1][0][2][0][RTW89_FCC][7] = 70,
+ [1][0][2][0][RTW89_FCC][7] = 60,
[1][0][2][0][RTW89_ETSI][7] = 60,
- [1][0][2][0][RTW89_MKK][7] = 72,
+ [1][0][2][0][RTW89_MKK][7] = 74,
[1][0][2][0][RTW89_IC][7] = 70,
[1][0][2][0][RTW89_ACMA][7] = 60,
- [1][0][2][0][RTW89_FCC][8] = 70,
+ [1][0][2][0][RTW89_FCC][8] = 60,
[1][0][2][0][RTW89_ETSI][8] = 60,
- [1][0][2][0][RTW89_MKK][8] = 72,
+ [1][0][2][0][RTW89_MKK][8] = 74,
[1][0][2][0][RTW89_IC][8] = 70,
[1][0][2][0][RTW89_ACMA][8] = 60,
- [1][0][2][0][RTW89_FCC][9] = 70,
+ [1][0][2][0][RTW89_FCC][9] = 60,
[1][0][2][0][RTW89_ETSI][9] = 60,
- [1][0][2][0][RTW89_MKK][9] = 72,
+ [1][0][2][0][RTW89_MKK][9] = 74,
[1][0][2][0][RTW89_IC][9] = 70,
[1][0][2][0][RTW89_ACMA][9] = 60,
- [1][0][2][0][RTW89_FCC][10] = 68,
+ [1][0][2][0][RTW89_FCC][10] = 58,
[1][0][2][0][RTW89_ETSI][10] = 60,
- [1][0][2][0][RTW89_MKK][10] = 72,
+ [1][0][2][0][RTW89_MKK][10] = 74,
[1][0][2][0][RTW89_IC][10] = 68,
[1][0][2][0][RTW89_ACMA][10] = 60,
[1][0][2][0][RTW89_FCC][11] = 127,
@@ -14723,49 +14723,49 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_MKK][1] = 127,
[1][1][2][0][RTW89_IC][1] = 127,
[1][1][2][0][RTW89_ACMA][1] = 127,
- [1][1][2][0][RTW89_FCC][2] = 56,
+ [1][1][2][0][RTW89_FCC][2] = 46,
[1][1][2][0][RTW89_ETSI][2] = 48,
- [1][1][2][0][RTW89_MKK][2] = 70,
+ [1][1][2][0][RTW89_MKK][2] = 68,
[1][1][2][0][RTW89_IC][2] = 56,
[1][1][2][0][RTW89_ACMA][2] = 48,
- [1][1][2][0][RTW89_FCC][3] = 56,
+ [1][1][2][0][RTW89_FCC][3] = 46,
[1][1][2][0][RTW89_ETSI][3] = 48,
- [1][1][2][0][RTW89_MKK][3] = 70,
+ [1][1][2][0][RTW89_MKK][3] = 68,
[1][1][2][0][RTW89_IC][3] = 56,
[1][1][2][0][RTW89_ACMA][3] = 48,
- [1][1][2][0][RTW89_FCC][4] = 60,
+ [1][1][2][0][RTW89_FCC][4] = 50,
[1][1][2][0][RTW89_ETSI][4] = 48,
- [1][1][2][0][RTW89_MKK][4] = 70,
+ [1][1][2][0][RTW89_MKK][4] = 68,
[1][1][2][0][RTW89_IC][4] = 60,
[1][1][2][0][RTW89_ACMA][4] = 48,
- [1][1][2][0][RTW89_FCC][5] = 68,
+ [1][1][2][0][RTW89_FCC][5] = 58,
[1][1][2][0][RTW89_ETSI][5] = 48,
- [1][1][2][0][RTW89_MKK][5] = 70,
+ [1][1][2][0][RTW89_MKK][5] = 68,
[1][1][2][0][RTW89_IC][5] = 68,
[1][1][2][0][RTW89_ACMA][5] = 48,
- [1][1][2][0][RTW89_FCC][6] = 60,
+ [1][1][2][0][RTW89_FCC][6] = 50,
[1][1][2][0][RTW89_ETSI][6] = 48,
- [1][1][2][0][RTW89_MKK][6] = 70,
+ [1][1][2][0][RTW89_MKK][6] = 68,
[1][1][2][0][RTW89_IC][6] = 60,
[1][1][2][0][RTW89_ACMA][6] = 48,
- [1][1][2][0][RTW89_FCC][7] = 56,
+ [1][1][2][0][RTW89_FCC][7] = 46,
[1][1][2][0][RTW89_ETSI][7] = 48,
- [1][1][2][0][RTW89_MKK][7] = 70,
+ [1][1][2][0][RTW89_MKK][7] = 68,
[1][1][2][0][RTW89_IC][7] = 56,
[1][1][2][0][RTW89_ACMA][7] = 48,
- [1][1][2][0][RTW89_FCC][8] = 56,
+ [1][1][2][0][RTW89_FCC][8] = 46,
[1][1][2][0][RTW89_ETSI][8] = 48,
- [1][1][2][0][RTW89_MKK][8] = 70,
+ [1][1][2][0][RTW89_MKK][8] = 68,
[1][1][2][0][RTW89_IC][8] = 56,
[1][1][2][0][RTW89_ACMA][8] = 48,
- [1][1][2][0][RTW89_FCC][9] = 44,
+ [1][1][2][0][RTW89_FCC][9] = 34,
[1][1][2][0][RTW89_ETSI][9] = 48,
- [1][1][2][0][RTW89_MKK][9] = 70,
+ [1][1][2][0][RTW89_MKK][9] = 68,
[1][1][2][0][RTW89_IC][9] = 44,
[1][1][2][0][RTW89_ACMA][9] = 48,
- [1][1][2][0][RTW89_FCC][10] = 40,
+ [1][1][2][0][RTW89_FCC][10] = 30,
[1][1][2][0][RTW89_ETSI][10] = 48,
- [1][1][2][0][RTW89_MKK][10] = 70,
+ [1][1][2][0][RTW89_MKK][10] = 68,
[1][1][2][0][RTW89_IC][10] = 40,
[1][1][2][0][RTW89_ACMA][10] = 48,
[1][1][2][0][RTW89_FCC][11] = 127,
@@ -14793,49 +14793,49 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_MKK][1] = 127,
[1][1][2][1][RTW89_IC][1] = 127,
[1][1][2][1][RTW89_ACMA][1] = 127,
- [1][1][2][1][RTW89_FCC][2] = 56,
- [1][1][2][1][RTW89_ETSI][2] = 38,
- [1][1][2][1][RTW89_MKK][2] = 58,
+ [1][1][2][1][RTW89_FCC][2] = 46,
+ [1][1][2][1][RTW89_ETSI][2] = 36,
+ [1][1][2][1][RTW89_MKK][2] = 68,
[1][1][2][1][RTW89_IC][2] = 56,
[1][1][2][1][RTW89_ACMA][2] = 36,
- [1][1][2][1][RTW89_FCC][3] = 56,
- [1][1][2][1][RTW89_ETSI][3] = 38,
- [1][1][2][1][RTW89_MKK][3] = 58,
+ [1][1][2][1][RTW89_FCC][3] = 46,
+ [1][1][2][1][RTW89_ETSI][3] = 36,
+ [1][1][2][1][RTW89_MKK][3] = 68,
[1][1][2][1][RTW89_IC][3] = 56,
[1][1][2][1][RTW89_ACMA][3] = 36,
- [1][1][2][1][RTW89_FCC][4] = 60,
- [1][1][2][1][RTW89_ETSI][4] = 38,
- [1][1][2][1][RTW89_MKK][4] = 58,
+ [1][1][2][1][RTW89_FCC][4] = 50,
+ [1][1][2][1][RTW89_ETSI][4] = 36,
+ [1][1][2][1][RTW89_MKK][4] = 68,
[1][1][2][1][RTW89_IC][4] = 60,
[1][1][2][1][RTW89_ACMA][4] = 36,
- [1][1][2][1][RTW89_FCC][5] = 68,
- [1][1][2][1][RTW89_ETSI][5] = 38,
- [1][1][2][1][RTW89_MKK][5] = 58,
+ [1][1][2][1][RTW89_FCC][5] = 58,
+ [1][1][2][1][RTW89_ETSI][5] = 36,
+ [1][1][2][1][RTW89_MKK][5] = 68,
[1][1][2][1][RTW89_IC][5] = 68,
[1][1][2][1][RTW89_ACMA][5] = 36,
- [1][1][2][1][RTW89_FCC][6] = 60,
- [1][1][2][1][RTW89_ETSI][6] = 38,
- [1][1][2][1][RTW89_MKK][6] = 58,
+ [1][1][2][1][RTW89_FCC][6] = 50,
+ [1][1][2][1][RTW89_ETSI][6] = 36,
+ [1][1][2][1][RTW89_MKK][6] = 68,
[1][1][2][1][RTW89_IC][6] = 60,
[1][1][2][1][RTW89_ACMA][6] = 36,
- [1][1][2][1][RTW89_FCC][7] = 56,
- [1][1][2][1][RTW89_ETSI][7] = 38,
- [1][1][2][1][RTW89_MKK][7] = 58,
+ [1][1][2][1][RTW89_FCC][7] = 46,
+ [1][1][2][1][RTW89_ETSI][7] = 36,
+ [1][1][2][1][RTW89_MKK][7] = 68,
[1][1][2][1][RTW89_IC][7] = 56,
[1][1][2][1][RTW89_ACMA][7] = 36,
- [1][1][2][1][RTW89_FCC][8] = 56,
- [1][1][2][1][RTW89_ETSI][8] = 38,
- [1][1][2][1][RTW89_MKK][8] = 58,
+ [1][1][2][1][RTW89_FCC][8] = 46,
+ [1][1][2][1][RTW89_ETSI][8] = 36,
+ [1][1][2][1][RTW89_MKK][8] = 68,
[1][1][2][1][RTW89_IC][8] = 56,
[1][1][2][1][RTW89_ACMA][8] = 36,
- [1][1][2][1][RTW89_FCC][9] = 44,
- [1][1][2][1][RTW89_ETSI][9] = 38,
- [1][1][2][1][RTW89_MKK][9] = 58,
+ [1][1][2][1][RTW89_FCC][9] = 34,
+ [1][1][2][1][RTW89_ETSI][9] = 36,
+ [1][1][2][1][RTW89_MKK][9] = 68,
[1][1][2][1][RTW89_IC][9] = 44,
[1][1][2][1][RTW89_ACMA][9] = 36,
- [1][1][2][1][RTW89_FCC][10] = 40,
- [1][1][2][1][RTW89_ETSI][10] = 38,
- [1][1][2][1][RTW89_MKK][10] = 58,
+ [1][1][2][1][RTW89_FCC][10] = 30,
+ [1][1][2][1][RTW89_ETSI][10] = 36,
+ [1][1][2][1][RTW89_MKK][10] = 68,
[1][1][2][1][RTW89_IC][10] = 40,
[1][1][2][1][RTW89_ACMA][10] = 36,
[1][1][2][1][RTW89_FCC][11] = 127,
@@ -14871,21 +14871,21 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_WW][19] = 60,
[0][0][1][0][RTW89_WW][21] = 60,
[0][0][1][0][RTW89_WW][23] = 60,
- [0][0][1][0][RTW89_WW][25] = 60,
- [0][0][1][0][RTW89_WW][27] = 60,
- [0][0][1][0][RTW89_WW][29] = 60,
+ [0][0][1][0][RTW89_WW][25] = 66,
+ [0][0][1][0][RTW89_WW][27] = 66,
+ [0][0][1][0][RTW89_WW][29] = 66,
[0][0][1][0][RTW89_WW][31] = 60,
[0][0][1][0][RTW89_WW][33] = 60,
[0][0][1][0][RTW89_WW][35] = 60,
- [0][0][1][0][RTW89_WW][37] = 78,
+ [0][0][1][0][RTW89_WW][37] = 70,
[0][0][1][0][RTW89_WW][38] = 30,
[0][0][1][0][RTW89_WW][40] = 30,
[0][0][1][0][RTW89_WW][42] = 30,
[0][0][1][0][RTW89_WW][44] = 30,
[0][0][1][0][RTW89_WW][46] = 30,
- [0][0][1][0][RTW89_WW][48] = 80,
- [0][0][1][0][RTW89_WW][50] = 80,
- [0][0][1][0][RTW89_WW][52] = 80,
+ [0][0][1][0][RTW89_WW][48] = 70,
+ [0][0][1][0][RTW89_WW][50] = 70,
+ [0][0][1][0][RTW89_WW][52] = 70,
[0][1][1][0][RTW89_WW][0] = 42,
[0][1][1][0][RTW89_WW][2] = 42,
[0][1][1][0][RTW89_WW][4] = 42,
@@ -14899,26 +14899,26 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_WW][19] = 48,
[0][1][1][0][RTW89_WW][21] = 48,
[0][1][1][0][RTW89_WW][23] = 48,
- [0][1][1][0][RTW89_WW][25] = 48,
- [0][1][1][0][RTW89_WW][27] = 48,
- [0][1][1][0][RTW89_WW][29] = 48,
+ [0][1][1][0][RTW89_WW][25] = 54,
+ [0][1][1][0][RTW89_WW][27] = 54,
+ [0][1][1][0][RTW89_WW][29] = 54,
[0][1][1][0][RTW89_WW][31] = 48,
[0][1][1][0][RTW89_WW][33] = 48,
[0][1][1][0][RTW89_WW][35] = 48,
- [0][1][1][0][RTW89_WW][37] = 70,
+ [0][1][1][0][RTW89_WW][37] = 60,
[0][1][1][0][RTW89_WW][38] = 18,
[0][1][1][0][RTW89_WW][40] = 16,
[0][1][1][0][RTW89_WW][42] = 18,
[0][1][1][0][RTW89_WW][44] = 16,
[0][1][1][0][RTW89_WW][46] = 18,
- [0][1][1][0][RTW89_WW][48] = 58,
- [0][1][1][0][RTW89_WW][50] = 58,
- [0][1][1][0][RTW89_WW][52] = 58,
+ [0][1][1][0][RTW89_WW][48] = 48,
+ [0][1][1][0][RTW89_WW][50] = 48,
+ [0][1][1][0][RTW89_WW][52] = 48,
[0][0][2][0][RTW89_WW][0] = 62,
[0][0][2][0][RTW89_WW][2] = 62,
[0][0][2][0][RTW89_WW][4] = 62,
- [0][0][2][0][RTW89_WW][6] = 62,
- [0][0][2][0][RTW89_WW][8] = 62,
+ [0][0][2][0][RTW89_WW][6] = 60,
+ [0][0][2][0][RTW89_WW][8] = 58,
[0][0][2][0][RTW89_WW][10] = 62,
[0][0][2][0][RTW89_WW][12] = 62,
[0][0][2][0][RTW89_WW][14] = 62,
@@ -14927,26 +14927,26 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_WW][19] = 62,
[0][0][2][0][RTW89_WW][21] = 62,
[0][0][2][0][RTW89_WW][23] = 62,
- [0][0][2][0][RTW89_WW][25] = 62,
- [0][0][2][0][RTW89_WW][27] = 62,
- [0][0][2][0][RTW89_WW][29] = 62,
+ [0][0][2][0][RTW89_WW][25] = 66,
+ [0][0][2][0][RTW89_WW][27] = 66,
+ [0][0][2][0][RTW89_WW][29] = 66,
[0][0][2][0][RTW89_WW][31] = 62,
[0][0][2][0][RTW89_WW][33] = 62,
[0][0][2][0][RTW89_WW][35] = 62,
- [0][0][2][0][RTW89_WW][37] = 78,
+ [0][0][2][0][RTW89_WW][37] = 70,
[0][0][2][0][RTW89_WW][38] = 30,
[0][0][2][0][RTW89_WW][40] = 30,
[0][0][2][0][RTW89_WW][42] = 30,
[0][0][2][0][RTW89_WW][44] = 30,
[0][0][2][0][RTW89_WW][46] = 30,
- [0][0][2][0][RTW89_WW][48] = 80,
- [0][0][2][0][RTW89_WW][50] = 80,
- [0][0][2][0][RTW89_WW][52] = 80,
+ [0][0][2][0][RTW89_WW][48] = 70,
+ [0][0][2][0][RTW89_WW][50] = 70,
+ [0][0][2][0][RTW89_WW][52] = 70,
[0][1][2][0][RTW89_WW][0] = 44,
[0][1][2][0][RTW89_WW][2] = 44,
[0][1][2][0][RTW89_WW][4] = 44,
[0][1][2][0][RTW89_WW][6] = 44,
- [0][1][2][0][RTW89_WW][8] = 50,
+ [0][1][2][0][RTW89_WW][8] = 42,
[0][1][2][0][RTW89_WW][10] = 50,
[0][1][2][0][RTW89_WW][12] = 50,
[0][1][2][0][RTW89_WW][14] = 50,
@@ -14955,21 +14955,21 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_WW][19] = 50,
[0][1][2][0][RTW89_WW][21] = 50,
[0][1][2][0][RTW89_WW][23] = 50,
- [0][1][2][0][RTW89_WW][25] = 50,
- [0][1][2][0][RTW89_WW][27] = 50,
- [0][1][2][0][RTW89_WW][29] = 50,
+ [0][1][2][0][RTW89_WW][25] = 54,
+ [0][1][2][0][RTW89_WW][27] = 54,
+ [0][1][2][0][RTW89_WW][29] = 54,
[0][1][2][0][RTW89_WW][31] = 50,
[0][1][2][0][RTW89_WW][33] = 50,
[0][1][2][0][RTW89_WW][35] = 50,
- [0][1][2][0][RTW89_WW][37] = 72,
+ [0][1][2][0][RTW89_WW][37] = 62,
[0][1][2][0][RTW89_WW][38] = 18,
[0][1][2][0][RTW89_WW][40] = 18,
[0][1][2][0][RTW89_WW][42] = 18,
[0][1][2][0][RTW89_WW][44] = 18,
[0][1][2][0][RTW89_WW][46] = 18,
- [0][1][2][0][RTW89_WW][48] = 60,
- [0][1][2][0][RTW89_WW][50] = 60,
- [0][1][2][0][RTW89_WW][52] = 60,
+ [0][1][2][0][RTW89_WW][48] = 50,
+ [0][1][2][0][RTW89_WW][50] = 50,
+ [0][1][2][0][RTW89_WW][52] = 50,
[0][1][2][1][RTW89_WW][0] = 38,
[0][1][2][1][RTW89_WW][2] = 38,
[0][1][2][1][RTW89_WW][4] = 38,
@@ -14983,1149 +14983,1149 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_WW][19] = 38,
[0][1][2][1][RTW89_WW][21] = 38,
[0][1][2][1][RTW89_WW][23] = 38,
- [0][1][2][1][RTW89_WW][25] = 42,
- [0][1][2][1][RTW89_WW][27] = 42,
- [0][1][2][1][RTW89_WW][29] = 42,
+ [0][1][2][1][RTW89_WW][25] = 40,
+ [0][1][2][1][RTW89_WW][27] = 40,
+ [0][1][2][1][RTW89_WW][29] = 40,
[0][1][2][1][RTW89_WW][31] = 38,
[0][1][2][1][RTW89_WW][33] = 38,
[0][1][2][1][RTW89_WW][35] = 38,
- [0][1][2][1][RTW89_WW][37] = 70,
- [0][1][2][1][RTW89_WW][38] = 8,
- [0][1][2][1][RTW89_WW][40] = 8,
- [0][1][2][1][RTW89_WW][42] = 8,
- [0][1][2][1][RTW89_WW][44] = 8,
- [0][1][2][1][RTW89_WW][46] = 8,
- [0][1][2][1][RTW89_WW][48] = 60,
- [0][1][2][1][RTW89_WW][50] = 60,
- [0][1][2][1][RTW89_WW][52] = 60,
- [1][0][2][0][RTW89_WW][1] = 66,
+ [0][1][2][1][RTW89_WW][37] = 60,
+ [0][1][2][1][RTW89_WW][38] = 6,
+ [0][1][2][1][RTW89_WW][40] = 6,
+ [0][1][2][1][RTW89_WW][42] = 6,
+ [0][1][2][1][RTW89_WW][44] = 6,
+ [0][1][2][1][RTW89_WW][46] = 6,
+ [0][1][2][1][RTW89_WW][48] = 50,
+ [0][1][2][1][RTW89_WW][50] = 50,
+ [0][1][2][1][RTW89_WW][52] = 50,
+ [1][0][2][0][RTW89_WW][1] = 58,
[1][0][2][0][RTW89_WW][5] = 66,
[1][0][2][0][RTW89_WW][9] = 66,
- [1][0][2][0][RTW89_WW][13] = 66,
- [1][0][2][0][RTW89_WW][16] = 66,
+ [1][0][2][0][RTW89_WW][13] = 58,
+ [1][0][2][0][RTW89_WW][16] = 56,
[1][0][2][0][RTW89_WW][20] = 66,
[1][0][2][0][RTW89_WW][24] = 66,
[1][0][2][0][RTW89_WW][28] = 66,
[1][0][2][0][RTW89_WW][32] = 66,
- [1][0][2][0][RTW89_WW][36] = 76,
+ [1][0][2][0][RTW89_WW][36] = 66,
[1][0][2][0][RTW89_WW][39] = 30,
[1][0][2][0][RTW89_WW][43] = 30,
- [1][0][2][0][RTW89_WW][47] = 80,
- [1][0][2][0][RTW89_WW][51] = 72,
- [1][1][2][0][RTW89_WW][1] = 54,
- [1][1][2][0][RTW89_WW][5] = 54,
- [1][1][2][0][RTW89_WW][9] = 54,
- [1][1][2][0][RTW89_WW][13] = 54,
- [1][1][2][0][RTW89_WW][16] = 54,
+ [1][0][2][0][RTW89_WW][47] = 68,
+ [1][0][2][0][RTW89_WW][51] = 68,
+ [1][1][2][0][RTW89_WW][1] = 48,
+ [1][1][2][0][RTW89_WW][5] = 52,
+ [1][1][2][0][RTW89_WW][9] = 52,
+ [1][1][2][0][RTW89_WW][13] = 52,
+ [1][1][2][0][RTW89_WW][16] = 48,
[1][1][2][0][RTW89_WW][20] = 54,
[1][1][2][0][RTW89_WW][24] = 54,
[1][1][2][0][RTW89_WW][28] = 54,
[1][1][2][0][RTW89_WW][32] = 54,
- [1][1][2][0][RTW89_WW][36] = 72,
+ [1][1][2][0][RTW89_WW][36] = 66,
[1][1][2][0][RTW89_WW][39] = 18,
[1][1][2][0][RTW89_WW][43] = 18,
- [1][1][2][0][RTW89_WW][47] = 70,
- [1][1][2][0][RTW89_WW][51] = 68,
- [1][1][2][1][RTW89_WW][1] = 42,
- [1][1][2][1][RTW89_WW][5] = 42,
- [1][1][2][1][RTW89_WW][9] = 42,
- [1][1][2][1][RTW89_WW][13] = 42,
- [1][1][2][1][RTW89_WW][16] = 42,
- [1][1][2][1][RTW89_WW][20] = 42,
- [1][1][2][1][RTW89_WW][24] = 42,
- [1][1][2][1][RTW89_WW][28] = 42,
- [1][1][2][1][RTW89_WW][32] = 42,
- [1][1][2][1][RTW89_WW][36] = 70,
- [1][1][2][1][RTW89_WW][39] = 8,
- [1][1][2][1][RTW89_WW][43] = 8,
- [1][1][2][1][RTW89_WW][47] = 70,
- [1][1][2][1][RTW89_WW][51] = 68,
- [2][0][2][0][RTW89_WW][3] = 64,
- [2][0][2][0][RTW89_WW][11] = 66,
- [2][0][2][0][RTW89_WW][18] = 64,
- [2][0][2][0][RTW89_WW][26] = 66,
- [2][0][2][0][RTW89_WW][34] = 72,
+ [1][1][2][0][RTW89_WW][47] = 60,
+ [1][1][2][0][RTW89_WW][51] = 58,
+ [1][1][2][1][RTW89_WW][1] = 40,
+ [1][1][2][1][RTW89_WW][5] = 40,
+ [1][1][2][1][RTW89_WW][9] = 40,
+ [1][1][2][1][RTW89_WW][13] = 40,
+ [1][1][2][1][RTW89_WW][16] = 40,
+ [1][1][2][1][RTW89_WW][20] = 40,
+ [1][1][2][1][RTW89_WW][24] = 40,
+ [1][1][2][1][RTW89_WW][28] = 40,
+ [1][1][2][1][RTW89_WW][32] = 40,
+ [1][1][2][1][RTW89_WW][36] = 60,
+ [1][1][2][1][RTW89_WW][39] = 6,
+ [1][1][2][1][RTW89_WW][43] = 6,
+ [1][1][2][1][RTW89_WW][47] = 60,
+ [1][1][2][1][RTW89_WW][51] = 58,
+ [2][0][2][0][RTW89_WW][3] = 56,
+ [2][0][2][0][RTW89_WW][11] = 58,
+ [2][0][2][0][RTW89_WW][18] = 54,
+ [2][0][2][0][RTW89_WW][26] = 60,
+ [2][0][2][0][RTW89_WW][34] = 60,
[2][0][2][0][RTW89_WW][41] = 30,
- [2][0][2][0][RTW89_WW][49] = 66,
- [2][1][2][0][RTW89_WW][3] = 54,
- [2][1][2][0][RTW89_WW][11] = 54,
- [2][1][2][0][RTW89_WW][18] = 54,
+ [2][0][2][0][RTW89_WW][49] = 56,
+ [2][1][2][0][RTW89_WW][3] = 48,
+ [2][1][2][0][RTW89_WW][11] = 52,
+ [2][1][2][0][RTW89_WW][18] = 48,
[2][1][2][0][RTW89_WW][26] = 54,
- [2][1][2][0][RTW89_WW][34] = 72,
+ [2][1][2][0][RTW89_WW][34] = 60,
[2][1][2][0][RTW89_WW][41] = 18,
- [2][1][2][0][RTW89_WW][49] = 60,
- [2][1][2][1][RTW89_WW][3] = 42,
- [2][1][2][1][RTW89_WW][11] = 42,
- [2][1][2][1][RTW89_WW][18] = 42,
- [2][1][2][1][RTW89_WW][26] = 44,
- [2][1][2][1][RTW89_WW][34] = 70,
- [2][1][2][1][RTW89_WW][41] = 8,
- [2][1][2][1][RTW89_WW][49] = 60,
- [3][0][2][0][RTW89_WW][7] = 56,
- [3][0][2][0][RTW89_WW][22] = 56,
- [3][0][2][0][RTW89_WW][45] = 56,
- [3][1][2][0][RTW89_WW][7] = 44,
- [3][1][2][0][RTW89_WW][22] = 44,
- [3][1][2][0][RTW89_WW][45] = 44,
- [3][1][2][1][RTW89_WW][7] = 32,
- [3][1][2][1][RTW89_WW][22] = 32,
- [3][1][2][1][RTW89_WW][45] = 32,
- [0][0][1][0][RTW89_FCC][0] = 80,
- [0][0][1][0][RTW89_ETSI][0] = 60,
- [0][0][1][0][RTW89_MKK][0] = 62,
+ [2][1][2][0][RTW89_WW][49] = 50,
+ [2][1][2][1][RTW89_WW][3] = 40,
+ [2][1][2][1][RTW89_WW][11] = 40,
+ [2][1][2][1][RTW89_WW][18] = 40,
+ [2][1][2][1][RTW89_WW][26] = 42,
+ [2][1][2][1][RTW89_WW][34] = 60,
+ [2][1][2][1][RTW89_WW][41] = 6,
+ [2][1][2][1][RTW89_WW][49] = 50,
+ [3][0][2][0][RTW89_WW][7] = 38,
+ [3][0][2][0][RTW89_WW][22] = 50,
+ [3][0][2][0][RTW89_WW][45] = 0,
+ [3][1][2][0][RTW89_WW][7] = 26,
+ [3][1][2][0][RTW89_WW][22] = 42,
+ [3][1][2][0][RTW89_WW][45] = 0,
+ [3][1][2][1][RTW89_WW][7] = 14,
+ [3][1][2][1][RTW89_WW][22] = 30,
+ [3][1][2][1][RTW89_WW][45] = 0,
+ [0][0][1][0][RTW89_FCC][0] = 70,
+ [0][0][1][0][RTW89_ETSI][0] = 66,
+ [0][0][1][0][RTW89_MKK][0] = 66,
[0][0][1][0][RTW89_IC][0] = 62,
[0][0][1][0][RTW89_ACMA][0] = 60,
- [0][0][1][0][RTW89_FCC][2] = 80,
- [0][0][1][0][RTW89_ETSI][2] = 60,
- [0][0][1][0][RTW89_MKK][2] = 62,
+ [0][0][1][0][RTW89_FCC][2] = 70,
+ [0][0][1][0][RTW89_ETSI][2] = 66,
+ [0][0][1][0][RTW89_MKK][2] = 66,
[0][0][1][0][RTW89_IC][2] = 62,
[0][0][1][0][RTW89_ACMA][2] = 60,
- [0][0][1][0][RTW89_FCC][4] = 80,
- [0][0][1][0][RTW89_ETSI][4] = 60,
- [0][0][1][0][RTW89_MKK][4] = 62,
+ [0][0][1][0][RTW89_FCC][4] = 70,
+ [0][0][1][0][RTW89_ETSI][4] = 66,
+ [0][0][1][0][RTW89_MKK][4] = 66,
[0][0][1][0][RTW89_IC][4] = 62,
[0][0][1][0][RTW89_ACMA][4] = 60,
- [0][0][1][0][RTW89_FCC][6] = 80,
- [0][0][1][0][RTW89_ETSI][6] = 60,
- [0][0][1][0][RTW89_MKK][6] = 62,
+ [0][0][1][0][RTW89_FCC][6] = 70,
+ [0][0][1][0][RTW89_ETSI][6] = 66,
+ [0][0][1][0][RTW89_MKK][6] = 66,
[0][0][1][0][RTW89_IC][6] = 62,
[0][0][1][0][RTW89_ACMA][6] = 60,
- [0][0][1][0][RTW89_FCC][8] = 80,
- [0][0][1][0][RTW89_ETSI][8] = 60,
- [0][0][1][0][RTW89_MKK][8] = 64,
+ [0][0][1][0][RTW89_FCC][8] = 70,
+ [0][0][1][0][RTW89_ETSI][8] = 66,
+ [0][0][1][0][RTW89_MKK][8] = 66,
[0][0][1][0][RTW89_IC][8] = 66,
[0][0][1][0][RTW89_ACMA][8] = 60,
- [0][0][1][0][RTW89_FCC][10] = 80,
- [0][0][1][0][RTW89_ETSI][10] = 60,
- [0][0][1][0][RTW89_MKK][10] = 64,
+ [0][0][1][0][RTW89_FCC][10] = 70,
+ [0][0][1][0][RTW89_ETSI][10] = 66,
+ [0][0][1][0][RTW89_MKK][10] = 66,
[0][0][1][0][RTW89_IC][10] = 66,
[0][0][1][0][RTW89_ACMA][10] = 60,
- [0][0][1][0][RTW89_FCC][12] = 80,
- [0][0][1][0][RTW89_ETSI][12] = 60,
- [0][0][1][0][RTW89_MKK][12] = 64,
+ [0][0][1][0][RTW89_FCC][12] = 70,
+ [0][0][1][0][RTW89_ETSI][12] = 66,
+ [0][0][1][0][RTW89_MKK][12] = 66,
[0][0][1][0][RTW89_IC][12] = 66,
[0][0][1][0][RTW89_ACMA][12] = 60,
- [0][0][1][0][RTW89_FCC][14] = 80,
- [0][0][1][0][RTW89_ETSI][14] = 60,
- [0][0][1][0][RTW89_MKK][14] = 62,
+ [0][0][1][0][RTW89_FCC][14] = 70,
+ [0][0][1][0][RTW89_ETSI][14] = 66,
+ [0][0][1][0][RTW89_MKK][14] = 66,
[0][0][1][0][RTW89_IC][14] = 66,
[0][0][1][0][RTW89_ACMA][14] = 60,
- [0][0][1][0][RTW89_FCC][15] = 78,
- [0][0][1][0][RTW89_ETSI][15] = 60,
- [0][0][1][0][RTW89_MKK][15] = 78,
- [0][0][1][0][RTW89_IC][15] = 78,
+ [0][0][1][0][RTW89_FCC][15] = 68,
+ [0][0][1][0][RTW89_ETSI][15] = 66,
+ [0][0][1][0][RTW89_MKK][15] = 70,
+ [0][0][1][0][RTW89_IC][15] = 70,
[0][0][1][0][RTW89_ACMA][15] = 60,
- [0][0][1][0][RTW89_FCC][17] = 80,
- [0][0][1][0][RTW89_ETSI][17] = 60,
- [0][0][1][0][RTW89_MKK][17] = 78,
- [0][0][1][0][RTW89_IC][17] = 80,
+ [0][0][1][0][RTW89_FCC][17] = 70,
+ [0][0][1][0][RTW89_ETSI][17] = 66,
+ [0][0][1][0][RTW89_MKK][17] = 70,
+ [0][0][1][0][RTW89_IC][17] = 70,
[0][0][1][0][RTW89_ACMA][17] = 60,
- [0][0][1][0][RTW89_FCC][19] = 80,
- [0][0][1][0][RTW89_ETSI][19] = 60,
- [0][0][1][0][RTW89_MKK][19] = 78,
- [0][0][1][0][RTW89_IC][19] = 80,
+ [0][0][1][0][RTW89_FCC][19] = 70,
+ [0][0][1][0][RTW89_ETSI][19] = 66,
+ [0][0][1][0][RTW89_MKK][19] = 70,
+ [0][0][1][0][RTW89_IC][19] = 70,
[0][0][1][0][RTW89_ACMA][19] = 60,
- [0][0][1][0][RTW89_FCC][21] = 80,
- [0][0][1][0][RTW89_ETSI][21] = 60,
- [0][0][1][0][RTW89_MKK][21] = 78,
- [0][0][1][0][RTW89_IC][21] = 80,
+ [0][0][1][0][RTW89_FCC][21] = 70,
+ [0][0][1][0][RTW89_ETSI][21] = 66,
+ [0][0][1][0][RTW89_MKK][21] = 70,
+ [0][0][1][0][RTW89_IC][21] = 70,
[0][0][1][0][RTW89_ACMA][21] = 60,
- [0][0][1][0][RTW89_FCC][23] = 80,
- [0][0][1][0][RTW89_ETSI][23] = 60,
- [0][0][1][0][RTW89_MKK][23] = 78,
- [0][0][1][0][RTW89_IC][23] = 80,
+ [0][0][1][0][RTW89_FCC][23] = 70,
+ [0][0][1][0][RTW89_ETSI][23] = 66,
+ [0][0][1][0][RTW89_MKK][23] = 70,
+ [0][0][1][0][RTW89_IC][23] = 70,
[0][0][1][0][RTW89_ACMA][23] = 60,
- [0][0][1][0][RTW89_FCC][25] = 80,
- [0][0][1][0][RTW89_ETSI][25] = 60,
- [0][0][1][0][RTW89_MKK][25] = 78,
+ [0][0][1][0][RTW89_FCC][25] = 70,
+ [0][0][1][0][RTW89_ETSI][25] = 66,
+ [0][0][1][0][RTW89_MKK][25] = 70,
[0][0][1][0][RTW89_IC][25] = 127,
[0][0][1][0][RTW89_ACMA][25] = 127,
- [0][0][1][0][RTW89_FCC][27] = 80,
- [0][0][1][0][RTW89_ETSI][27] = 60,
- [0][0][1][0][RTW89_MKK][27] = 78,
+ [0][0][1][0][RTW89_FCC][27] = 70,
+ [0][0][1][0][RTW89_ETSI][27] = 66,
+ [0][0][1][0][RTW89_MKK][27] = 70,
[0][0][1][0][RTW89_IC][27] = 127,
[0][0][1][0][RTW89_ACMA][27] = 127,
- [0][0][1][0][RTW89_FCC][29] = 80,
- [0][0][1][0][RTW89_ETSI][29] = 60,
- [0][0][1][0][RTW89_MKK][29] = 78,
+ [0][0][1][0][RTW89_FCC][29] = 70,
+ [0][0][1][0][RTW89_ETSI][29] = 66,
+ [0][0][1][0][RTW89_MKK][29] = 70,
[0][0][1][0][RTW89_IC][29] = 127,
[0][0][1][0][RTW89_ACMA][29] = 127,
- [0][0][1][0][RTW89_FCC][31] = 80,
- [0][0][1][0][RTW89_ETSI][31] = 60,
- [0][0][1][0][RTW89_MKK][31] = 78,
- [0][0][1][0][RTW89_IC][31] = 80,
+ [0][0][1][0][RTW89_FCC][31] = 70,
+ [0][0][1][0][RTW89_ETSI][31] = 66,
+ [0][0][1][0][RTW89_MKK][31] = 70,
+ [0][0][1][0][RTW89_IC][31] = 70,
[0][0][1][0][RTW89_ACMA][31] = 60,
- [0][0][1][0][RTW89_FCC][33] = 80,
- [0][0][1][0][RTW89_ETSI][33] = 60,
- [0][0][1][0][RTW89_MKK][33] = 78,
- [0][0][1][0][RTW89_IC][33] = 80,
+ [0][0][1][0][RTW89_FCC][33] = 70,
+ [0][0][1][0][RTW89_ETSI][33] = 66,
+ [0][0][1][0][RTW89_MKK][33] = 70,
+ [0][0][1][0][RTW89_IC][33] = 70,
[0][0][1][0][RTW89_ACMA][33] = 60,
- [0][0][1][0][RTW89_FCC][35] = 72,
- [0][0][1][0][RTW89_ETSI][35] = 60,
- [0][0][1][0][RTW89_MKK][35] = 78,
- [0][0][1][0][RTW89_IC][35] = 72,
+ [0][0][1][0][RTW89_FCC][35] = 62,
+ [0][0][1][0][RTW89_ETSI][35] = 66,
+ [0][0][1][0][RTW89_MKK][35] = 70,
+ [0][0][1][0][RTW89_IC][35] = 70,
[0][0][1][0][RTW89_ACMA][35] = 60,
- [0][0][1][0][RTW89_FCC][37] = 80,
+ [0][0][1][0][RTW89_FCC][37] = 70,
[0][0][1][0][RTW89_ETSI][37] = 127,
- [0][0][1][0][RTW89_MKK][37] = 78,
- [0][0][1][0][RTW89_IC][37] = 80,
- [0][0][1][0][RTW89_ACMA][37] = 78,
- [0][0][1][0][RTW89_FCC][38] = 80,
+ [0][0][1][0][RTW89_MKK][37] = 70,
+ [0][0][1][0][RTW89_IC][37] = 70,
+ [0][0][1][0][RTW89_ACMA][37] = 70,
+ [0][0][1][0][RTW89_FCC][38] = 70,
[0][0][1][0][RTW89_ETSI][38] = 30,
[0][0][1][0][RTW89_MKK][38] = 127,
- [0][0][1][0][RTW89_IC][38] = 80,
- [0][0][1][0][RTW89_ACMA][38] = 78,
- [0][0][1][0][RTW89_FCC][40] = 80,
+ [0][0][1][0][RTW89_IC][38] = 70,
+ [0][0][1][0][RTW89_ACMA][38] = 70,
+ [0][0][1][0][RTW89_FCC][40] = 70,
[0][0][1][0][RTW89_ETSI][40] = 30,
[0][0][1][0][RTW89_MKK][40] = 127,
- [0][0][1][0][RTW89_IC][40] = 80,
- [0][0][1][0][RTW89_ACMA][40] = 78,
- [0][0][1][0][RTW89_FCC][42] = 80,
+ [0][0][1][0][RTW89_IC][40] = 70,
+ [0][0][1][0][RTW89_ACMA][40] = 70,
+ [0][0][1][0][RTW89_FCC][42] = 70,
[0][0][1][0][RTW89_ETSI][42] = 30,
[0][0][1][0][RTW89_MKK][42] = 127,
- [0][0][1][0][RTW89_IC][42] = 80,
- [0][0][1][0][RTW89_ACMA][42] = 78,
- [0][0][1][0][RTW89_FCC][44] = 80,
+ [0][0][1][0][RTW89_IC][42] = 70,
+ [0][0][1][0][RTW89_ACMA][42] = 70,
+ [0][0][1][0][RTW89_FCC][44] = 70,
[0][0][1][0][RTW89_ETSI][44] = 30,
[0][0][1][0][RTW89_MKK][44] = 127,
- [0][0][1][0][RTW89_IC][44] = 80,
- [0][0][1][0][RTW89_ACMA][44] = 78,
- [0][0][1][0][RTW89_FCC][46] = 80,
+ [0][0][1][0][RTW89_IC][44] = 70,
+ [0][0][1][0][RTW89_ACMA][44] = 70,
+ [0][0][1][0][RTW89_FCC][46] = 70,
[0][0][1][0][RTW89_ETSI][46] = 30,
[0][0][1][0][RTW89_MKK][46] = 127,
- [0][0][1][0][RTW89_IC][46] = 80,
- [0][0][1][0][RTW89_ACMA][46] = 78,
- [0][0][1][0][RTW89_FCC][48] = 80,
+ [0][0][1][0][RTW89_IC][46] = 70,
+ [0][0][1][0][RTW89_ACMA][46] = 70,
+ [0][0][1][0][RTW89_FCC][48] = 70,
[0][0][1][0][RTW89_ETSI][48] = 127,
[0][0][1][0][RTW89_MKK][48] = 127,
[0][0][1][0][RTW89_IC][48] = 127,
[0][0][1][0][RTW89_ACMA][48] = 127,
- [0][0][1][0][RTW89_FCC][50] = 80,
+ [0][0][1][0][RTW89_FCC][50] = 70,
[0][0][1][0][RTW89_ETSI][50] = 127,
[0][0][1][0][RTW89_MKK][50] = 127,
[0][0][1][0][RTW89_IC][50] = 127,
[0][0][1][0][RTW89_ACMA][50] = 127,
- [0][0][1][0][RTW89_FCC][52] = 80,
+ [0][0][1][0][RTW89_FCC][52] = 70,
[0][0][1][0][RTW89_ETSI][52] = 127,
[0][0][1][0][RTW89_MKK][52] = 127,
[0][0][1][0][RTW89_IC][52] = 127,
[0][0][1][0][RTW89_ACMA][52] = 127,
- [0][1][1][0][RTW89_FCC][0] = 70,
- [0][1][1][0][RTW89_ETSI][0] = 48,
- [0][1][1][0][RTW89_MKK][0] = 50,
+ [0][1][1][0][RTW89_FCC][0] = 60,
+ [0][1][1][0][RTW89_ETSI][0] = 54,
+ [0][1][1][0][RTW89_MKK][0] = 54,
[0][1][1][0][RTW89_IC][0] = 42,
[0][1][1][0][RTW89_ACMA][0] = 48,
- [0][1][1][0][RTW89_FCC][2] = 70,
- [0][1][1][0][RTW89_ETSI][2] = 48,
- [0][1][1][0][RTW89_MKK][2] = 50,
+ [0][1][1][0][RTW89_FCC][2] = 60,
+ [0][1][1][0][RTW89_ETSI][2] = 54,
+ [0][1][1][0][RTW89_MKK][2] = 54,
[0][1][1][0][RTW89_IC][2] = 42,
[0][1][1][0][RTW89_ACMA][2] = 48,
- [0][1][1][0][RTW89_FCC][4] = 70,
- [0][1][1][0][RTW89_ETSI][4] = 48,
- [0][1][1][0][RTW89_MKK][4] = 50,
+ [0][1][1][0][RTW89_FCC][4] = 60,
+ [0][1][1][0][RTW89_ETSI][4] = 54,
+ [0][1][1][0][RTW89_MKK][4] = 54,
[0][1][1][0][RTW89_IC][4] = 42,
[0][1][1][0][RTW89_ACMA][4] = 48,
- [0][1][1][0][RTW89_FCC][6] = 70,
- [0][1][1][0][RTW89_ETSI][6] = 48,
- [0][1][1][0][RTW89_MKK][6] = 50,
+ [0][1][1][0][RTW89_FCC][6] = 60,
+ [0][1][1][0][RTW89_ETSI][6] = 54,
+ [0][1][1][0][RTW89_MKK][6] = 54,
[0][1][1][0][RTW89_IC][6] = 42,
[0][1][1][0][RTW89_ACMA][6] = 48,
- [0][1][1][0][RTW89_FCC][8] = 70,
- [0][1][1][0][RTW89_ETSI][8] = 48,
- [0][1][1][0][RTW89_MKK][8] = 50,
+ [0][1][1][0][RTW89_FCC][8] = 60,
+ [0][1][1][0][RTW89_ETSI][8] = 54,
+ [0][1][1][0][RTW89_MKK][8] = 52,
[0][1][1][0][RTW89_IC][8] = 54,
[0][1][1][0][RTW89_ACMA][8] = 48,
- [0][1][1][0][RTW89_FCC][10] = 70,
- [0][1][1][0][RTW89_ETSI][10] = 48,
- [0][1][1][0][RTW89_MKK][10] = 50,
+ [0][1][1][0][RTW89_FCC][10] = 60,
+ [0][1][1][0][RTW89_ETSI][10] = 54,
+ [0][1][1][0][RTW89_MKK][10] = 54,
[0][1][1][0][RTW89_IC][10] = 54,
[0][1][1][0][RTW89_ACMA][10] = 48,
- [0][1][1][0][RTW89_FCC][12] = 70,
- [0][1][1][0][RTW89_ETSI][12] = 48,
- [0][1][1][0][RTW89_MKK][12] = 50,
+ [0][1][1][0][RTW89_FCC][12] = 60,
+ [0][1][1][0][RTW89_ETSI][12] = 54,
+ [0][1][1][0][RTW89_MKK][12] = 54,
[0][1][1][0][RTW89_IC][12] = 54,
[0][1][1][0][RTW89_ACMA][12] = 48,
- [0][1][1][0][RTW89_FCC][14] = 70,
- [0][1][1][0][RTW89_ETSI][14] = 48,
- [0][1][1][0][RTW89_MKK][14] = 50,
+ [0][1][1][0][RTW89_FCC][14] = 60,
+ [0][1][1][0][RTW89_ETSI][14] = 54,
+ [0][1][1][0][RTW89_MKK][14] = 54,
[0][1][1][0][RTW89_IC][14] = 54,
[0][1][1][0][RTW89_ACMA][14] = 48,
- [0][1][1][0][RTW89_FCC][15] = 68,
- [0][1][1][0][RTW89_ETSI][15] = 48,
+ [0][1][1][0][RTW89_FCC][15] = 58,
+ [0][1][1][0][RTW89_ETSI][15] = 54,
[0][1][1][0][RTW89_MKK][15] = 70,
[0][1][1][0][RTW89_IC][15] = 68,
[0][1][1][0][RTW89_ACMA][15] = 48,
- [0][1][1][0][RTW89_FCC][17] = 70,
- [0][1][1][0][RTW89_ETSI][17] = 48,
- [0][1][1][0][RTW89_MKK][17] = 72,
+ [0][1][1][0][RTW89_FCC][17] = 60,
+ [0][1][1][0][RTW89_ETSI][17] = 54,
+ [0][1][1][0][RTW89_MKK][17] = 70,
[0][1][1][0][RTW89_IC][17] = 70,
[0][1][1][0][RTW89_ACMA][17] = 48,
- [0][1][1][0][RTW89_FCC][19] = 70,
- [0][1][1][0][RTW89_ETSI][19] = 48,
- [0][1][1][0][RTW89_MKK][19] = 72,
+ [0][1][1][0][RTW89_FCC][19] = 60,
+ [0][1][1][0][RTW89_ETSI][19] = 54,
+ [0][1][1][0][RTW89_MKK][19] = 70,
[0][1][1][0][RTW89_IC][19] = 70,
[0][1][1][0][RTW89_ACMA][19] = 48,
- [0][1][1][0][RTW89_FCC][21] = 70,
- [0][1][1][0][RTW89_ETSI][21] = 48,
- [0][1][1][0][RTW89_MKK][21] = 72,
+ [0][1][1][0][RTW89_FCC][21] = 60,
+ [0][1][1][0][RTW89_ETSI][21] = 54,
+ [0][1][1][0][RTW89_MKK][21] = 70,
[0][1][1][0][RTW89_IC][21] = 70,
[0][1][1][0][RTW89_ACMA][21] = 48,
- [0][1][1][0][RTW89_FCC][23] = 70,
- [0][1][1][0][RTW89_ETSI][23] = 48,
- [0][1][1][0][RTW89_MKK][23] = 72,
+ [0][1][1][0][RTW89_FCC][23] = 60,
+ [0][1][1][0][RTW89_ETSI][23] = 54,
+ [0][1][1][0][RTW89_MKK][23] = 70,
[0][1][1][0][RTW89_IC][23] = 70,
[0][1][1][0][RTW89_ACMA][23] = 48,
- [0][1][1][0][RTW89_FCC][25] = 70,
- [0][1][1][0][RTW89_ETSI][25] = 48,
+ [0][1][1][0][RTW89_FCC][25] = 60,
+ [0][1][1][0][RTW89_ETSI][25] = 54,
[0][1][1][0][RTW89_MKK][25] = 70,
[0][1][1][0][RTW89_IC][25] = 127,
[0][1][1][0][RTW89_ACMA][25] = 127,
- [0][1][1][0][RTW89_FCC][27] = 70,
- [0][1][1][0][RTW89_ETSI][27] = 48,
- [0][1][1][0][RTW89_MKK][27] = 72,
+ [0][1][1][0][RTW89_FCC][27] = 60,
+ [0][1][1][0][RTW89_ETSI][27] = 54,
+ [0][1][1][0][RTW89_MKK][27] = 70,
[0][1][1][0][RTW89_IC][27] = 127,
[0][1][1][0][RTW89_ACMA][27] = 127,
- [0][1][1][0][RTW89_FCC][29] = 70,
- [0][1][1][0][RTW89_ETSI][29] = 48,
- [0][1][1][0][RTW89_MKK][29] = 72,
+ [0][1][1][0][RTW89_FCC][29] = 60,
+ [0][1][1][0][RTW89_ETSI][29] = 54,
+ [0][1][1][0][RTW89_MKK][29] = 70,
[0][1][1][0][RTW89_IC][29] = 127,
[0][1][1][0][RTW89_ACMA][29] = 127,
- [0][1][1][0][RTW89_FCC][31] = 70,
- [0][1][1][0][RTW89_ETSI][31] = 48,
- [0][1][1][0][RTW89_MKK][31] = 72,
+ [0][1][1][0][RTW89_FCC][31] = 60,
+ [0][1][1][0][RTW89_ETSI][31] = 54,
+ [0][1][1][0][RTW89_MKK][31] = 70,
[0][1][1][0][RTW89_IC][31] = 70,
[0][1][1][0][RTW89_ACMA][31] = 48,
- [0][1][1][0][RTW89_FCC][33] = 70,
- [0][1][1][0][RTW89_ETSI][33] = 48,
- [0][1][1][0][RTW89_MKK][33] = 72,
+ [0][1][1][0][RTW89_FCC][33] = 60,
+ [0][1][1][0][RTW89_ETSI][33] = 54,
+ [0][1][1][0][RTW89_MKK][33] = 70,
[0][1][1][0][RTW89_IC][33] = 70,
[0][1][1][0][RTW89_ACMA][33] = 48,
- [0][1][1][0][RTW89_FCC][35] = 68,
- [0][1][1][0][RTW89_ETSI][35] = 48,
- [0][1][1][0][RTW89_MKK][35] = 72,
+ [0][1][1][0][RTW89_FCC][35] = 58,
+ [0][1][1][0][RTW89_ETSI][35] = 54,
+ [0][1][1][0][RTW89_MKK][35] = 70,
[0][1][1][0][RTW89_IC][35] = 68,
[0][1][1][0][RTW89_ACMA][35] = 48,
- [0][1][1][0][RTW89_FCC][37] = 70,
+ [0][1][1][0][RTW89_FCC][37] = 60,
[0][1][1][0][RTW89_ETSI][37] = 127,
- [0][1][1][0][RTW89_MKK][37] = 72,
+ [0][1][1][0][RTW89_MKK][37] = 70,
[0][1][1][0][RTW89_IC][37] = 70,
- [0][1][1][0][RTW89_ACMA][37] = 72,
- [0][1][1][0][RTW89_FCC][38] = 80,
+ [0][1][1][0][RTW89_ACMA][37] = 70,
+ [0][1][1][0][RTW89_FCC][38] = 70,
[0][1][1][0][RTW89_ETSI][38] = 18,
[0][1][1][0][RTW89_MKK][38] = 127,
- [0][1][1][0][RTW89_IC][38] = 80,
- [0][1][1][0][RTW89_ACMA][38] = 74,
- [0][1][1][0][RTW89_FCC][40] = 80,
+ [0][1][1][0][RTW89_IC][38] = 70,
+ [0][1][1][0][RTW89_ACMA][38] = 70,
+ [0][1][1][0][RTW89_FCC][40] = 70,
[0][1][1][0][RTW89_ETSI][40] = 18,
[0][1][1][0][RTW89_MKK][40] = 127,
- [0][1][1][0][RTW89_IC][40] = 80,
+ [0][1][1][0][RTW89_IC][40] = 70,
[0][1][1][0][RTW89_ACMA][40] = 16,
- [0][1][1][0][RTW89_FCC][42] = 80,
+ [0][1][1][0][RTW89_FCC][42] = 70,
[0][1][1][0][RTW89_ETSI][42] = 18,
[0][1][1][0][RTW89_MKK][42] = 127,
- [0][1][1][0][RTW89_IC][42] = 80,
- [0][1][1][0][RTW89_ACMA][42] = 78,
- [0][1][1][0][RTW89_FCC][44] = 80,
+ [0][1][1][0][RTW89_IC][42] = 70,
+ [0][1][1][0][RTW89_ACMA][42] = 70,
+ [0][1][1][0][RTW89_FCC][44] = 70,
[0][1][1][0][RTW89_ETSI][44] = 18,
[0][1][1][0][RTW89_MKK][44] = 127,
- [0][1][1][0][RTW89_IC][44] = 80,
+ [0][1][1][0][RTW89_IC][44] = 70,
[0][1][1][0][RTW89_ACMA][44] = 16,
- [0][1][1][0][RTW89_FCC][46] = 80,
+ [0][1][1][0][RTW89_FCC][46] = 70,
[0][1][1][0][RTW89_ETSI][46] = 18,
[0][1][1][0][RTW89_MKK][46] = 127,
- [0][1][1][0][RTW89_IC][46] = 80,
- [0][1][1][0][RTW89_ACMA][46] = 78,
- [0][1][1][0][RTW89_FCC][48] = 58,
+ [0][1][1][0][RTW89_IC][46] = 70,
+ [0][1][1][0][RTW89_ACMA][46] = 70,
+ [0][1][1][0][RTW89_FCC][48] = 48,
[0][1][1][0][RTW89_ETSI][48] = 127,
[0][1][1][0][RTW89_MKK][48] = 127,
[0][1][1][0][RTW89_IC][48] = 127,
[0][1][1][0][RTW89_ACMA][48] = 127,
- [0][1][1][0][RTW89_FCC][50] = 58,
+ [0][1][1][0][RTW89_FCC][50] = 48,
[0][1][1][0][RTW89_ETSI][50] = 127,
[0][1][1][0][RTW89_MKK][50] = 127,
[0][1][1][0][RTW89_IC][50] = 127,
[0][1][1][0][RTW89_ACMA][50] = 127,
- [0][1][1][0][RTW89_FCC][52] = 58,
+ [0][1][1][0][RTW89_FCC][52] = 48,
[0][1][1][0][RTW89_ETSI][52] = 127,
[0][1][1][0][RTW89_MKK][52] = 127,
[0][1][1][0][RTW89_IC][52] = 127,
[0][1][1][0][RTW89_ACMA][52] = 127,
- [0][0][2][0][RTW89_FCC][0] = 80,
- [0][0][2][0][RTW89_ETSI][0] = 62,
- [0][0][2][0][RTW89_MKK][0] = 64,
+ [0][0][2][0][RTW89_FCC][0] = 70,
+ [0][0][2][0][RTW89_ETSI][0] = 66,
+ [0][0][2][0][RTW89_MKK][0] = 68,
[0][0][2][0][RTW89_IC][0] = 66,
[0][0][2][0][RTW89_ACMA][0] = 62,
- [0][0][2][0][RTW89_FCC][2] = 80,
- [0][0][2][0][RTW89_ETSI][2] = 62,
- [0][0][2][0][RTW89_MKK][2] = 64,
+ [0][0][2][0][RTW89_FCC][2] = 70,
+ [0][0][2][0][RTW89_ETSI][2] = 66,
+ [0][0][2][0][RTW89_MKK][2] = 68,
[0][0][2][0][RTW89_IC][2] = 66,
[0][0][2][0][RTW89_ACMA][2] = 62,
- [0][0][2][0][RTW89_FCC][4] = 80,
- [0][0][2][0][RTW89_ETSI][4] = 62,
- [0][0][2][0][RTW89_MKK][4] = 64,
+ [0][0][2][0][RTW89_FCC][4] = 70,
+ [0][0][2][0][RTW89_ETSI][4] = 66,
+ [0][0][2][0][RTW89_MKK][4] = 68,
[0][0][2][0][RTW89_IC][4] = 66,
[0][0][2][0][RTW89_ACMA][4] = 62,
- [0][0][2][0][RTW89_FCC][6] = 80,
- [0][0][2][0][RTW89_ETSI][6] = 62,
- [0][0][2][0][RTW89_MKK][6] = 64,
+ [0][0][2][0][RTW89_FCC][6] = 70,
+ [0][0][2][0][RTW89_ETSI][6] = 66,
+ [0][0][2][0][RTW89_MKK][6] = 60,
[0][0][2][0][RTW89_IC][6] = 66,
[0][0][2][0][RTW89_ACMA][6] = 62,
- [0][0][2][0][RTW89_FCC][8] = 80,
- [0][0][2][0][RTW89_ETSI][8] = 62,
- [0][0][2][0][RTW89_MKK][8] = 64,
+ [0][0][2][0][RTW89_FCC][8] = 70,
+ [0][0][2][0][RTW89_ETSI][8] = 66,
+ [0][0][2][0][RTW89_MKK][8] = 58,
[0][0][2][0][RTW89_IC][8] = 66,
[0][0][2][0][RTW89_ACMA][8] = 62,
- [0][0][2][0][RTW89_FCC][10] = 80,
- [0][0][2][0][RTW89_ETSI][10] = 62,
- [0][0][2][0][RTW89_MKK][10] = 64,
+ [0][0][2][0][RTW89_FCC][10] = 70,
+ [0][0][2][0][RTW89_ETSI][10] = 66,
+ [0][0][2][0][RTW89_MKK][10] = 70,
[0][0][2][0][RTW89_IC][10] = 66,
[0][0][2][0][RTW89_ACMA][10] = 62,
- [0][0][2][0][RTW89_FCC][12] = 80,
- [0][0][2][0][RTW89_ETSI][12] = 62,
- [0][0][2][0][RTW89_MKK][12] = 64,
+ [0][0][2][0][RTW89_FCC][12] = 70,
+ [0][0][2][0][RTW89_ETSI][12] = 66,
+ [0][0][2][0][RTW89_MKK][12] = 70,
[0][0][2][0][RTW89_IC][12] = 66,
[0][0][2][0][RTW89_ACMA][12] = 62,
- [0][0][2][0][RTW89_FCC][14] = 80,
- [0][0][2][0][RTW89_ETSI][14] = 62,
- [0][0][2][0][RTW89_MKK][14] = 64,
+ [0][0][2][0][RTW89_FCC][14] = 70,
+ [0][0][2][0][RTW89_ETSI][14] = 66,
+ [0][0][2][0][RTW89_MKK][14] = 70,
[0][0][2][0][RTW89_IC][14] = 66,
[0][0][2][0][RTW89_ACMA][14] = 62,
- [0][0][2][0][RTW89_FCC][15] = 76,
- [0][0][2][0][RTW89_ETSI][15] = 62,
- [0][0][2][0][RTW89_MKK][15] = 78,
- [0][0][2][0][RTW89_IC][15] = 76,
+ [0][0][2][0][RTW89_FCC][15] = 66,
+ [0][0][2][0][RTW89_ETSI][15] = 66,
+ [0][0][2][0][RTW89_MKK][15] = 70,
+ [0][0][2][0][RTW89_IC][15] = 70,
[0][0][2][0][RTW89_ACMA][15] = 62,
- [0][0][2][0][RTW89_FCC][17] = 80,
- [0][0][2][0][RTW89_ETSI][17] = 62,
- [0][0][2][0][RTW89_MKK][17] = 78,
- [0][0][2][0][RTW89_IC][17] = 80,
+ [0][0][2][0][RTW89_FCC][17] = 70,
+ [0][0][2][0][RTW89_ETSI][17] = 66,
+ [0][0][2][0][RTW89_MKK][17] = 70,
+ [0][0][2][0][RTW89_IC][17] = 70,
[0][0][2][0][RTW89_ACMA][17] = 62,
- [0][0][2][0][RTW89_FCC][19] = 80,
- [0][0][2][0][RTW89_ETSI][19] = 62,
- [0][0][2][0][RTW89_MKK][19] = 78,
- [0][0][2][0][RTW89_IC][19] = 80,
+ [0][0][2][0][RTW89_FCC][19] = 70,
+ [0][0][2][0][RTW89_ETSI][19] = 66,
+ [0][0][2][0][RTW89_MKK][19] = 70,
+ [0][0][2][0][RTW89_IC][19] = 70,
[0][0][2][0][RTW89_ACMA][19] = 62,
- [0][0][2][0][RTW89_FCC][21] = 80,
- [0][0][2][0][RTW89_ETSI][21] = 62,
- [0][0][2][0][RTW89_MKK][21] = 78,
- [0][0][2][0][RTW89_IC][21] = 80,
+ [0][0][2][0][RTW89_FCC][21] = 70,
+ [0][0][2][0][RTW89_ETSI][21] = 66,
+ [0][0][2][0][RTW89_MKK][21] = 70,
+ [0][0][2][0][RTW89_IC][21] = 70,
[0][0][2][0][RTW89_ACMA][21] = 62,
- [0][0][2][0][RTW89_FCC][23] = 80,
- [0][0][2][0][RTW89_ETSI][23] = 62,
- [0][0][2][0][RTW89_MKK][23] = 78,
- [0][0][2][0][RTW89_IC][23] = 80,
+ [0][0][2][0][RTW89_FCC][23] = 70,
+ [0][0][2][0][RTW89_ETSI][23] = 66,
+ [0][0][2][0][RTW89_MKK][23] = 70,
+ [0][0][2][0][RTW89_IC][23] = 70,
[0][0][2][0][RTW89_ACMA][23] = 62,
- [0][0][2][0][RTW89_FCC][25] = 80,
- [0][0][2][0][RTW89_ETSI][25] = 62,
- [0][0][2][0][RTW89_MKK][25] = 78,
+ [0][0][2][0][RTW89_FCC][25] = 70,
+ [0][0][2][0][RTW89_ETSI][25] = 66,
+ [0][0][2][0][RTW89_MKK][25] = 70,
[0][0][2][0][RTW89_IC][25] = 127,
[0][0][2][0][RTW89_ACMA][25] = 127,
- [0][0][2][0][RTW89_FCC][27] = 80,
- [0][0][2][0][RTW89_ETSI][27] = 62,
- [0][0][2][0][RTW89_MKK][27] = 78,
+ [0][0][2][0][RTW89_FCC][27] = 70,
+ [0][0][2][0][RTW89_ETSI][27] = 66,
+ [0][0][2][0][RTW89_MKK][27] = 70,
[0][0][2][0][RTW89_IC][27] = 127,
[0][0][2][0][RTW89_ACMA][27] = 127,
- [0][0][2][0][RTW89_FCC][29] = 80,
- [0][0][2][0][RTW89_ETSI][29] = 62,
- [0][0][2][0][RTW89_MKK][29] = 78,
+ [0][0][2][0][RTW89_FCC][29] = 70,
+ [0][0][2][0][RTW89_ETSI][29] = 66,
+ [0][0][2][0][RTW89_MKK][29] = 70,
[0][0][2][0][RTW89_IC][29] = 127,
[0][0][2][0][RTW89_ACMA][29] = 127,
- [0][0][2][0][RTW89_FCC][31] = 80,
- [0][0][2][0][RTW89_ETSI][31] = 62,
- [0][0][2][0][RTW89_MKK][31] = 78,
- [0][0][2][0][RTW89_IC][31] = 80,
+ [0][0][2][0][RTW89_FCC][31] = 70,
+ [0][0][2][0][RTW89_ETSI][31] = 66,
+ [0][0][2][0][RTW89_MKK][31] = 70,
+ [0][0][2][0][RTW89_IC][31] = 70,
[0][0][2][0][RTW89_ACMA][31] = 62,
- [0][0][2][0][RTW89_FCC][33] = 80,
- [0][0][2][0][RTW89_ETSI][33] = 62,
- [0][0][2][0][RTW89_MKK][33] = 78,
- [0][0][2][0][RTW89_IC][33] = 80,
+ [0][0][2][0][RTW89_FCC][33] = 70,
+ [0][0][2][0][RTW89_ETSI][33] = 66,
+ [0][0][2][0][RTW89_MKK][33] = 70,
+ [0][0][2][0][RTW89_IC][33] = 70,
[0][0][2][0][RTW89_ACMA][33] = 62,
- [0][0][2][0][RTW89_FCC][35] = 72,
- [0][0][2][0][RTW89_ETSI][35] = 62,
- [0][0][2][0][RTW89_MKK][35] = 78,
- [0][0][2][0][RTW89_IC][35] = 72,
+ [0][0][2][0][RTW89_FCC][35] = 62,
+ [0][0][2][0][RTW89_ETSI][35] = 66,
+ [0][0][2][0][RTW89_MKK][35] = 70,
+ [0][0][2][0][RTW89_IC][35] = 70,
[0][0][2][0][RTW89_ACMA][35] = 62,
- [0][0][2][0][RTW89_FCC][37] = 80,
+ [0][0][2][0][RTW89_FCC][37] = 70,
[0][0][2][0][RTW89_ETSI][37] = 127,
- [0][0][2][0][RTW89_MKK][37] = 78,
- [0][0][2][0][RTW89_IC][37] = 80,
- [0][0][2][0][RTW89_ACMA][37] = 78,
- [0][0][2][0][RTW89_FCC][38] = 80,
+ [0][0][2][0][RTW89_MKK][37] = 70,
+ [0][0][2][0][RTW89_IC][37] = 70,
+ [0][0][2][0][RTW89_ACMA][37] = 70,
+ [0][0][2][0][RTW89_FCC][38] = 70,
[0][0][2][0][RTW89_ETSI][38] = 30,
[0][0][2][0][RTW89_MKK][38] = 127,
- [0][0][2][0][RTW89_IC][38] = 80,
- [0][0][2][0][RTW89_ACMA][38] = 78,
- [0][0][2][0][RTW89_FCC][40] = 80,
+ [0][0][2][0][RTW89_IC][38] = 70,
+ [0][0][2][0][RTW89_ACMA][38] = 70,
+ [0][0][2][0][RTW89_FCC][40] = 70,
[0][0][2][0][RTW89_ETSI][40] = 30,
[0][0][2][0][RTW89_MKK][40] = 127,
- [0][0][2][0][RTW89_IC][40] = 80,
- [0][0][2][0][RTW89_ACMA][40] = 78,
- [0][0][2][0][RTW89_FCC][42] = 80,
+ [0][0][2][0][RTW89_IC][40] = 70,
+ [0][0][2][0][RTW89_ACMA][40] = 70,
+ [0][0][2][0][RTW89_FCC][42] = 70,
[0][0][2][0][RTW89_ETSI][42] = 30,
[0][0][2][0][RTW89_MKK][42] = 127,
- [0][0][2][0][RTW89_IC][42] = 80,
- [0][0][2][0][RTW89_ACMA][42] = 78,
- [0][0][2][0][RTW89_FCC][44] = 80,
+ [0][0][2][0][RTW89_IC][42] = 70,
+ [0][0][2][0][RTW89_ACMA][42] = 70,
+ [0][0][2][0][RTW89_FCC][44] = 70,
[0][0][2][0][RTW89_ETSI][44] = 30,
[0][0][2][0][RTW89_MKK][44] = 127,
- [0][0][2][0][RTW89_IC][44] = 80,
- [0][0][2][0][RTW89_ACMA][44] = 78,
- [0][0][2][0][RTW89_FCC][46] = 80,
+ [0][0][2][0][RTW89_IC][44] = 70,
+ [0][0][2][0][RTW89_ACMA][44] = 70,
+ [0][0][2][0][RTW89_FCC][46] = 70,
[0][0][2][0][RTW89_ETSI][46] = 30,
[0][0][2][0][RTW89_MKK][46] = 127,
- [0][0][2][0][RTW89_IC][46] = 80,
- [0][0][2][0][RTW89_ACMA][46] = 78,
- [0][0][2][0][RTW89_FCC][48] = 80,
+ [0][0][2][0][RTW89_IC][46] = 70,
+ [0][0][2][0][RTW89_ACMA][46] = 70,
+ [0][0][2][0][RTW89_FCC][48] = 70,
[0][0][2][0][RTW89_ETSI][48] = 127,
[0][0][2][0][RTW89_MKK][48] = 127,
[0][0][2][0][RTW89_IC][48] = 127,
[0][0][2][0][RTW89_ACMA][48] = 127,
- [0][0][2][0][RTW89_FCC][50] = 80,
+ [0][0][2][0][RTW89_FCC][50] = 70,
[0][0][2][0][RTW89_ETSI][50] = 127,
[0][0][2][0][RTW89_MKK][50] = 127,
[0][0][2][0][RTW89_IC][50] = 127,
[0][0][2][0][RTW89_ACMA][50] = 127,
- [0][0][2][0][RTW89_FCC][52] = 80,
+ [0][0][2][0][RTW89_FCC][52] = 70,
[0][0][2][0][RTW89_ETSI][52] = 127,
[0][0][2][0][RTW89_MKK][52] = 127,
[0][0][2][0][RTW89_IC][52] = 127,
[0][0][2][0][RTW89_ACMA][52] = 127,
- [0][1][2][0][RTW89_FCC][0] = 72,
- [0][1][2][0][RTW89_ETSI][0] = 50,
- [0][1][2][0][RTW89_MKK][0] = 52,
+ [0][1][2][0][RTW89_FCC][0] = 62,
+ [0][1][2][0][RTW89_ETSI][0] = 54,
+ [0][1][2][0][RTW89_MKK][0] = 54,
[0][1][2][0][RTW89_IC][0] = 44,
[0][1][2][0][RTW89_ACMA][0] = 50,
- [0][1][2][0][RTW89_FCC][2] = 72,
- [0][1][2][0][RTW89_ETSI][2] = 50,
- [0][1][2][0][RTW89_MKK][2] = 52,
+ [0][1][2][0][RTW89_FCC][2] = 62,
+ [0][1][2][0][RTW89_ETSI][2] = 54,
+ [0][1][2][0][RTW89_MKK][2] = 54,
[0][1][2][0][RTW89_IC][2] = 44,
[0][1][2][0][RTW89_ACMA][2] = 50,
- [0][1][2][0][RTW89_FCC][4] = 72,
- [0][1][2][0][RTW89_ETSI][4] = 50,
- [0][1][2][0][RTW89_MKK][4] = 52,
+ [0][1][2][0][RTW89_FCC][4] = 62,
+ [0][1][2][0][RTW89_ETSI][4] = 54,
+ [0][1][2][0][RTW89_MKK][4] = 54,
[0][1][2][0][RTW89_IC][4] = 44,
[0][1][2][0][RTW89_ACMA][4] = 50,
- [0][1][2][0][RTW89_FCC][6] = 72,
- [0][1][2][0][RTW89_ETSI][6] = 50,
- [0][1][2][0][RTW89_MKK][6] = 52,
+ [0][1][2][0][RTW89_FCC][6] = 62,
+ [0][1][2][0][RTW89_ETSI][6] = 54,
+ [0][1][2][0][RTW89_MKK][6] = 50,
[0][1][2][0][RTW89_IC][6] = 44,
[0][1][2][0][RTW89_ACMA][6] = 50,
- [0][1][2][0][RTW89_FCC][8] = 72,
- [0][1][2][0][RTW89_ETSI][8] = 50,
- [0][1][2][0][RTW89_MKK][8] = 52,
+ [0][1][2][0][RTW89_FCC][8] = 62,
+ [0][1][2][0][RTW89_ETSI][8] = 54,
+ [0][1][2][0][RTW89_MKK][8] = 42,
[0][1][2][0][RTW89_IC][8] = 54,
[0][1][2][0][RTW89_ACMA][8] = 50,
- [0][1][2][0][RTW89_FCC][10] = 72,
- [0][1][2][0][RTW89_ETSI][10] = 50,
- [0][1][2][0][RTW89_MKK][10] = 52,
+ [0][1][2][0][RTW89_FCC][10] = 62,
+ [0][1][2][0][RTW89_ETSI][10] = 54,
+ [0][1][2][0][RTW89_MKK][10] = 54,
[0][1][2][0][RTW89_IC][10] = 54,
[0][1][2][0][RTW89_ACMA][10] = 50,
- [0][1][2][0][RTW89_FCC][12] = 72,
- [0][1][2][0][RTW89_ETSI][12] = 50,
- [0][1][2][0][RTW89_MKK][12] = 52,
+ [0][1][2][0][RTW89_FCC][12] = 62,
+ [0][1][2][0][RTW89_ETSI][12] = 54,
+ [0][1][2][0][RTW89_MKK][12] = 54,
[0][1][2][0][RTW89_IC][12] = 54,
[0][1][2][0][RTW89_ACMA][12] = 50,
- [0][1][2][0][RTW89_FCC][14] = 72,
- [0][1][2][0][RTW89_ETSI][14] = 50,
- [0][1][2][0][RTW89_MKK][14] = 52,
+ [0][1][2][0][RTW89_FCC][14] = 62,
+ [0][1][2][0][RTW89_ETSI][14] = 54,
+ [0][1][2][0][RTW89_MKK][14] = 54,
[0][1][2][0][RTW89_IC][14] = 54,
[0][1][2][0][RTW89_ACMA][14] = 50,
- [0][1][2][0][RTW89_FCC][15] = 70,
- [0][1][2][0][RTW89_ETSI][15] = 50,
- [0][1][2][0][RTW89_MKK][15] = 72,
+ [0][1][2][0][RTW89_FCC][15] = 60,
+ [0][1][2][0][RTW89_ETSI][15] = 54,
+ [0][1][2][0][RTW89_MKK][15] = 68,
[0][1][2][0][RTW89_IC][15] = 70,
[0][1][2][0][RTW89_ACMA][15] = 50,
- [0][1][2][0][RTW89_FCC][17] = 72,
- [0][1][2][0][RTW89_ETSI][17] = 50,
- [0][1][2][0][RTW89_MKK][17] = 72,
- [0][1][2][0][RTW89_IC][17] = 72,
+ [0][1][2][0][RTW89_FCC][17] = 62,
+ [0][1][2][0][RTW89_ETSI][17] = 54,
+ [0][1][2][0][RTW89_MKK][17] = 68,
+ [0][1][2][0][RTW89_IC][17] = 70,
[0][1][2][0][RTW89_ACMA][17] = 50,
- [0][1][2][0][RTW89_FCC][19] = 72,
- [0][1][2][0][RTW89_ETSI][19] = 50,
- [0][1][2][0][RTW89_MKK][19] = 72,
- [0][1][2][0][RTW89_IC][19] = 72,
+ [0][1][2][0][RTW89_FCC][19] = 62,
+ [0][1][2][0][RTW89_ETSI][19] = 54,
+ [0][1][2][0][RTW89_MKK][19] = 68,
+ [0][1][2][0][RTW89_IC][19] = 70,
[0][1][2][0][RTW89_ACMA][19] = 50,
- [0][1][2][0][RTW89_FCC][21] = 72,
- [0][1][2][0][RTW89_ETSI][21] = 50,
- [0][1][2][0][RTW89_MKK][21] = 72,
- [0][1][2][0][RTW89_IC][21] = 72,
+ [0][1][2][0][RTW89_FCC][21] = 62,
+ [0][1][2][0][RTW89_ETSI][21] = 54,
+ [0][1][2][0][RTW89_MKK][21] = 68,
+ [0][1][2][0][RTW89_IC][21] = 70,
[0][1][2][0][RTW89_ACMA][21] = 50,
- [0][1][2][0][RTW89_FCC][23] = 72,
- [0][1][2][0][RTW89_ETSI][23] = 50,
- [0][1][2][0][RTW89_MKK][23] = 72,
- [0][1][2][0][RTW89_IC][23] = 72,
+ [0][1][2][0][RTW89_FCC][23] = 62,
+ [0][1][2][0][RTW89_ETSI][23] = 54,
+ [0][1][2][0][RTW89_MKK][23] = 68,
+ [0][1][2][0][RTW89_IC][23] = 70,
[0][1][2][0][RTW89_ACMA][23] = 50,
- [0][1][2][0][RTW89_FCC][25] = 72,
- [0][1][2][0][RTW89_ETSI][25] = 50,
- [0][1][2][0][RTW89_MKK][25] = 72,
+ [0][1][2][0][RTW89_FCC][25] = 62,
+ [0][1][2][0][RTW89_ETSI][25] = 54,
+ [0][1][2][0][RTW89_MKK][25] = 68,
[0][1][2][0][RTW89_IC][25] = 127,
[0][1][2][0][RTW89_ACMA][25] = 127,
- [0][1][2][0][RTW89_FCC][27] = 72,
- [0][1][2][0][RTW89_ETSI][27] = 50,
- [0][1][2][0][RTW89_MKK][27] = 72,
+ [0][1][2][0][RTW89_FCC][27] = 62,
+ [0][1][2][0][RTW89_ETSI][27] = 54,
+ [0][1][2][0][RTW89_MKK][27] = 68,
[0][1][2][0][RTW89_IC][27] = 127,
[0][1][2][0][RTW89_ACMA][27] = 127,
- [0][1][2][0][RTW89_FCC][29] = 72,
- [0][1][2][0][RTW89_ETSI][29] = 50,
- [0][1][2][0][RTW89_MKK][29] = 72,
+ [0][1][2][0][RTW89_FCC][29] = 62,
+ [0][1][2][0][RTW89_ETSI][29] = 54,
+ [0][1][2][0][RTW89_MKK][29] = 68,
[0][1][2][0][RTW89_IC][29] = 127,
[0][1][2][0][RTW89_ACMA][29] = 127,
- [0][1][2][0][RTW89_FCC][31] = 72,
- [0][1][2][0][RTW89_ETSI][31] = 50,
- [0][1][2][0][RTW89_MKK][31] = 72,
- [0][1][2][0][RTW89_IC][31] = 72,
+ [0][1][2][0][RTW89_FCC][31] = 62,
+ [0][1][2][0][RTW89_ETSI][31] = 54,
+ [0][1][2][0][RTW89_MKK][31] = 68,
+ [0][1][2][0][RTW89_IC][31] = 70,
[0][1][2][0][RTW89_ACMA][31] = 50,
- [0][1][2][0][RTW89_FCC][33] = 72,
- [0][1][2][0][RTW89_ETSI][33] = 50,
- [0][1][2][0][RTW89_MKK][33] = 72,
- [0][1][2][0][RTW89_IC][33] = 72,
+ [0][1][2][0][RTW89_FCC][33] = 62,
+ [0][1][2][0][RTW89_ETSI][33] = 54,
+ [0][1][2][0][RTW89_MKK][33] = 68,
+ [0][1][2][0][RTW89_IC][33] = 70,
[0][1][2][0][RTW89_ACMA][33] = 50,
- [0][1][2][0][RTW89_FCC][35] = 68,
- [0][1][2][0][RTW89_ETSI][35] = 50,
- [0][1][2][0][RTW89_MKK][35] = 72,
+ [0][1][2][0][RTW89_FCC][35] = 58,
+ [0][1][2][0][RTW89_ETSI][35] = 54,
+ [0][1][2][0][RTW89_MKK][35] = 68,
[0][1][2][0][RTW89_IC][35] = 68,
[0][1][2][0][RTW89_ACMA][35] = 50,
- [0][1][2][0][RTW89_FCC][37] = 72,
+ [0][1][2][0][RTW89_FCC][37] = 62,
[0][1][2][0][RTW89_ETSI][37] = 127,
- [0][1][2][0][RTW89_MKK][37] = 72,
- [0][1][2][0][RTW89_IC][37] = 72,
- [0][1][2][0][RTW89_ACMA][37] = 72,
- [0][1][2][0][RTW89_FCC][38] = 80,
+ [0][1][2][0][RTW89_MKK][37] = 68,
+ [0][1][2][0][RTW89_IC][37] = 70,
+ [0][1][2][0][RTW89_ACMA][37] = 70,
+ [0][1][2][0][RTW89_FCC][38] = 70,
[0][1][2][0][RTW89_ETSI][38] = 18,
[0][1][2][0][RTW89_MKK][38] = 127,
- [0][1][2][0][RTW89_IC][38] = 80,
- [0][1][2][0][RTW89_ACMA][38] = 76,
- [0][1][2][0][RTW89_FCC][40] = 80,
+ [0][1][2][0][RTW89_IC][38] = 70,
+ [0][1][2][0][RTW89_ACMA][38] = 70,
+ [0][1][2][0][RTW89_FCC][40] = 70,
[0][1][2][0][RTW89_ETSI][40] = 18,
[0][1][2][0][RTW89_MKK][40] = 127,
- [0][1][2][0][RTW89_IC][40] = 80,
- [0][1][2][0][RTW89_ACMA][40] = 76,
- [0][1][2][0][RTW89_FCC][42] = 80,
+ [0][1][2][0][RTW89_IC][40] = 70,
+ [0][1][2][0][RTW89_ACMA][40] = 70,
+ [0][1][2][0][RTW89_FCC][42] = 70,
[0][1][2][0][RTW89_ETSI][42] = 18,
[0][1][2][0][RTW89_MKK][42] = 127,
- [0][1][2][0][RTW89_IC][42] = 80,
- [0][1][2][0][RTW89_ACMA][42] = 78,
- [0][1][2][0][RTW89_FCC][44] = 80,
+ [0][1][2][0][RTW89_IC][42] = 70,
+ [0][1][2][0][RTW89_ACMA][42] = 70,
+ [0][1][2][0][RTW89_FCC][44] = 70,
[0][1][2][0][RTW89_ETSI][44] = 18,
[0][1][2][0][RTW89_MKK][44] = 127,
- [0][1][2][0][RTW89_IC][44] = 80,
- [0][1][2][0][RTW89_ACMA][44] = 78,
- [0][1][2][0][RTW89_FCC][46] = 80,
+ [0][1][2][0][RTW89_IC][44] = 70,
+ [0][1][2][0][RTW89_ACMA][44] = 70,
+ [0][1][2][0][RTW89_FCC][46] = 70,
[0][1][2][0][RTW89_ETSI][46] = 18,
[0][1][2][0][RTW89_MKK][46] = 127,
- [0][1][2][0][RTW89_IC][46] = 80,
- [0][1][2][0][RTW89_ACMA][46] = 78,
- [0][1][2][0][RTW89_FCC][48] = 60,
+ [0][1][2][0][RTW89_IC][46] = 70,
+ [0][1][2][0][RTW89_ACMA][46] = 70,
+ [0][1][2][0][RTW89_FCC][48] = 50,
[0][1][2][0][RTW89_ETSI][48] = 127,
[0][1][2][0][RTW89_MKK][48] = 127,
[0][1][2][0][RTW89_IC][48] = 127,
[0][1][2][0][RTW89_ACMA][48] = 127,
- [0][1][2][0][RTW89_FCC][50] = 60,
+ [0][1][2][0][RTW89_FCC][50] = 50,
[0][1][2][0][RTW89_ETSI][50] = 127,
[0][1][2][0][RTW89_MKK][50] = 127,
[0][1][2][0][RTW89_IC][50] = 127,
[0][1][2][0][RTW89_ACMA][50] = 127,
- [0][1][2][0][RTW89_FCC][52] = 60,
+ [0][1][2][0][RTW89_FCC][52] = 50,
[0][1][2][0][RTW89_ETSI][52] = 127,
[0][1][2][0][RTW89_MKK][52] = 127,
[0][1][2][0][RTW89_IC][52] = 127,
[0][1][2][0][RTW89_ACMA][52] = 127,
- [0][1][2][1][RTW89_FCC][0] = 70,
- [0][1][2][1][RTW89_ETSI][0] = 42,
- [0][1][2][1][RTW89_MKK][0] = 52,
+ [0][1][2][1][RTW89_FCC][0] = 60,
+ [0][1][2][1][RTW89_ETSI][0] = 40,
+ [0][1][2][1][RTW89_MKK][0] = 54,
[0][1][2][1][RTW89_IC][0] = 42,
[0][1][2][1][RTW89_ACMA][0] = 38,
- [0][1][2][1][RTW89_FCC][2] = 70,
- [0][1][2][1][RTW89_ETSI][2] = 42,
- [0][1][2][1][RTW89_MKK][2] = 52,
+ [0][1][2][1][RTW89_FCC][2] = 60,
+ [0][1][2][1][RTW89_ETSI][2] = 40,
+ [0][1][2][1][RTW89_MKK][2] = 54,
[0][1][2][1][RTW89_IC][2] = 42,
[0][1][2][1][RTW89_ACMA][2] = 38,
- [0][1][2][1][RTW89_FCC][4] = 70,
- [0][1][2][1][RTW89_ETSI][4] = 42,
- [0][1][2][1][RTW89_MKK][4] = 52,
+ [0][1][2][1][RTW89_FCC][4] = 60,
+ [0][1][2][1][RTW89_ETSI][4] = 40,
+ [0][1][2][1][RTW89_MKK][4] = 54,
[0][1][2][1][RTW89_IC][4] = 42,
[0][1][2][1][RTW89_ACMA][4] = 38,
- [0][1][2][1][RTW89_FCC][6] = 70,
- [0][1][2][1][RTW89_ETSI][6] = 42,
- [0][1][2][1][RTW89_MKK][6] = 52,
+ [0][1][2][1][RTW89_FCC][6] = 60,
+ [0][1][2][1][RTW89_ETSI][6] = 40,
+ [0][1][2][1][RTW89_MKK][6] = 50,
[0][1][2][1][RTW89_IC][6] = 42,
[0][1][2][1][RTW89_ACMA][6] = 38,
- [0][1][2][1][RTW89_FCC][8] = 70,
- [0][1][2][1][RTW89_ETSI][8] = 42,
- [0][1][2][1][RTW89_MKK][8] = 52,
+ [0][1][2][1][RTW89_FCC][8] = 60,
+ [0][1][2][1][RTW89_ETSI][8] = 40,
+ [0][1][2][1][RTW89_MKK][8] = 42,
[0][1][2][1][RTW89_IC][8] = 42,
[0][1][2][1][RTW89_ACMA][8] = 38,
- [0][1][2][1][RTW89_FCC][10] = 70,
- [0][1][2][1][RTW89_ETSI][10] = 42,
- [0][1][2][1][RTW89_MKK][10] = 52,
+ [0][1][2][1][RTW89_FCC][10] = 60,
+ [0][1][2][1][RTW89_ETSI][10] = 40,
+ [0][1][2][1][RTW89_MKK][10] = 66,
[0][1][2][1][RTW89_IC][10] = 42,
[0][1][2][1][RTW89_ACMA][10] = 38,
- [0][1][2][1][RTW89_FCC][12] = 70,
- [0][1][2][1][RTW89_ETSI][12] = 42,
- [0][1][2][1][RTW89_MKK][12] = 52,
+ [0][1][2][1][RTW89_FCC][12] = 60,
+ [0][1][2][1][RTW89_ETSI][12] = 40,
+ [0][1][2][1][RTW89_MKK][12] = 66,
[0][1][2][1][RTW89_IC][12] = 42,
[0][1][2][1][RTW89_ACMA][12] = 38,
- [0][1][2][1][RTW89_FCC][14] = 70,
- [0][1][2][1][RTW89_ETSI][14] = 42,
- [0][1][2][1][RTW89_MKK][14] = 52,
+ [0][1][2][1][RTW89_FCC][14] = 60,
+ [0][1][2][1][RTW89_ETSI][14] = 40,
+ [0][1][2][1][RTW89_MKK][14] = 66,
[0][1][2][1][RTW89_IC][14] = 42,
[0][1][2][1][RTW89_ACMA][14] = 38,
- [0][1][2][1][RTW89_FCC][15] = 70,
- [0][1][2][1][RTW89_ETSI][15] = 42,
- [0][1][2][1][RTW89_MKK][15] = 72,
+ [0][1][2][1][RTW89_FCC][15] = 60,
+ [0][1][2][1][RTW89_ETSI][15] = 40,
+ [0][1][2][1][RTW89_MKK][15] = 68,
[0][1][2][1][RTW89_IC][15] = 70,
[0][1][2][1][RTW89_ACMA][15] = 38,
- [0][1][2][1][RTW89_FCC][17] = 70,
- [0][1][2][1][RTW89_ETSI][17] = 42,
- [0][1][2][1][RTW89_MKK][17] = 72,
+ [0][1][2][1][RTW89_FCC][17] = 60,
+ [0][1][2][1][RTW89_ETSI][17] = 40,
+ [0][1][2][1][RTW89_MKK][17] = 68,
[0][1][2][1][RTW89_IC][17] = 70,
[0][1][2][1][RTW89_ACMA][17] = 38,
- [0][1][2][1][RTW89_FCC][19] = 70,
- [0][1][2][1][RTW89_ETSI][19] = 42,
- [0][1][2][1][RTW89_MKK][19] = 72,
+ [0][1][2][1][RTW89_FCC][19] = 60,
+ [0][1][2][1][RTW89_ETSI][19] = 40,
+ [0][1][2][1][RTW89_MKK][19] = 68,
[0][1][2][1][RTW89_IC][19] = 70,
[0][1][2][1][RTW89_ACMA][19] = 38,
- [0][1][2][1][RTW89_FCC][21] = 70,
- [0][1][2][1][RTW89_ETSI][21] = 42,
- [0][1][2][1][RTW89_MKK][21] = 72,
+ [0][1][2][1][RTW89_FCC][21] = 60,
+ [0][1][2][1][RTW89_ETSI][21] = 40,
+ [0][1][2][1][RTW89_MKK][21] = 68,
[0][1][2][1][RTW89_IC][21] = 70,
[0][1][2][1][RTW89_ACMA][21] = 38,
- [0][1][2][1][RTW89_FCC][23] = 70,
- [0][1][2][1][RTW89_ETSI][23] = 42,
- [0][1][2][1][RTW89_MKK][23] = 72,
+ [0][1][2][1][RTW89_FCC][23] = 60,
+ [0][1][2][1][RTW89_ETSI][23] = 40,
+ [0][1][2][1][RTW89_MKK][23] = 68,
[0][1][2][1][RTW89_IC][23] = 70,
[0][1][2][1][RTW89_ACMA][23] = 38,
- [0][1][2][1][RTW89_FCC][25] = 68,
- [0][1][2][1][RTW89_ETSI][25] = 42,
- [0][1][2][1][RTW89_MKK][25] = 72,
+ [0][1][2][1][RTW89_FCC][25] = 58,
+ [0][1][2][1][RTW89_ETSI][25] = 40,
+ [0][1][2][1][RTW89_MKK][25] = 68,
[0][1][2][1][RTW89_IC][25] = 127,
[0][1][2][1][RTW89_ACMA][25] = 127,
- [0][1][2][1][RTW89_FCC][27] = 68,
- [0][1][2][1][RTW89_ETSI][27] = 42,
- [0][1][2][1][RTW89_MKK][27] = 72,
+ [0][1][2][1][RTW89_FCC][27] = 58,
+ [0][1][2][1][RTW89_ETSI][27] = 40,
+ [0][1][2][1][RTW89_MKK][27] = 68,
[0][1][2][1][RTW89_IC][27] = 127,
[0][1][2][1][RTW89_ACMA][27] = 127,
- [0][1][2][1][RTW89_FCC][29] = 68,
- [0][1][2][1][RTW89_ETSI][29] = 42,
- [0][1][2][1][RTW89_MKK][29] = 72,
+ [0][1][2][1][RTW89_FCC][29] = 58,
+ [0][1][2][1][RTW89_ETSI][29] = 40,
+ [0][1][2][1][RTW89_MKK][29] = 68,
[0][1][2][1][RTW89_IC][29] = 127,
[0][1][2][1][RTW89_ACMA][29] = 127,
- [0][1][2][1][RTW89_FCC][31] = 68,
- [0][1][2][1][RTW89_ETSI][31] = 42,
- [0][1][2][1][RTW89_MKK][31] = 72,
+ [0][1][2][1][RTW89_FCC][31] = 58,
+ [0][1][2][1][RTW89_ETSI][31] = 40,
+ [0][1][2][1][RTW89_MKK][31] = 68,
[0][1][2][1][RTW89_IC][31] = 68,
[0][1][2][1][RTW89_ACMA][31] = 38,
- [0][1][2][1][RTW89_FCC][33] = 68,
- [0][1][2][1][RTW89_ETSI][33] = 42,
- [0][1][2][1][RTW89_MKK][33] = 72,
+ [0][1][2][1][RTW89_FCC][33] = 58,
+ [0][1][2][1][RTW89_ETSI][33] = 40,
+ [0][1][2][1][RTW89_MKK][33] = 68,
[0][1][2][1][RTW89_IC][33] = 68,
[0][1][2][1][RTW89_ACMA][33] = 38,
- [0][1][2][1][RTW89_FCC][35] = 68,
- [0][1][2][1][RTW89_ETSI][35] = 42,
- [0][1][2][1][RTW89_MKK][35] = 72,
+ [0][1][2][1][RTW89_FCC][35] = 58,
+ [0][1][2][1][RTW89_ETSI][35] = 40,
+ [0][1][2][1][RTW89_MKK][35] = 68,
[0][1][2][1][RTW89_IC][35] = 68,
[0][1][2][1][RTW89_ACMA][35] = 38,
- [0][1][2][1][RTW89_FCC][37] = 70,
+ [0][1][2][1][RTW89_FCC][37] = 60,
[0][1][2][1][RTW89_ETSI][37] = 127,
- [0][1][2][1][RTW89_MKK][37] = 72,
+ [0][1][2][1][RTW89_MKK][37] = 68,
[0][1][2][1][RTW89_IC][37] = 70,
- [0][1][2][1][RTW89_ACMA][37] = 72,
- [0][1][2][1][RTW89_FCC][38] = 80,
- [0][1][2][1][RTW89_ETSI][38] = 8,
+ [0][1][2][1][RTW89_ACMA][37] = 70,
+ [0][1][2][1][RTW89_FCC][38] = 70,
+ [0][1][2][1][RTW89_ETSI][38] = 6,
[0][1][2][1][RTW89_MKK][38] = 127,
- [0][1][2][1][RTW89_IC][38] = 80,
- [0][1][2][1][RTW89_ACMA][38] = 76,
- [0][1][2][1][RTW89_FCC][40] = 80,
- [0][1][2][1][RTW89_ETSI][40] = 8,
+ [0][1][2][1][RTW89_IC][38] = 70,
+ [0][1][2][1][RTW89_ACMA][38] = 70,
+ [0][1][2][1][RTW89_FCC][40] = 70,
+ [0][1][2][1][RTW89_ETSI][40] = 6,
[0][1][2][1][RTW89_MKK][40] = 127,
- [0][1][2][1][RTW89_IC][40] = 80,
- [0][1][2][1][RTW89_ACMA][40] = 76,
- [0][1][2][1][RTW89_FCC][42] = 80,
- [0][1][2][1][RTW89_ETSI][42] = 8,
+ [0][1][2][1][RTW89_IC][40] = 70,
+ [0][1][2][1][RTW89_ACMA][40] = 70,
+ [0][1][2][1][RTW89_FCC][42] = 70,
+ [0][1][2][1][RTW89_ETSI][42] = 6,
[0][1][2][1][RTW89_MKK][42] = 127,
- [0][1][2][1][RTW89_IC][42] = 80,
- [0][1][2][1][RTW89_ACMA][42] = 78,
- [0][1][2][1][RTW89_FCC][44] = 80,
- [0][1][2][1][RTW89_ETSI][44] = 8,
+ [0][1][2][1][RTW89_IC][42] = 70,
+ [0][1][2][1][RTW89_ACMA][42] = 70,
+ [0][1][2][1][RTW89_FCC][44] = 70,
+ [0][1][2][1][RTW89_ETSI][44] = 6,
[0][1][2][1][RTW89_MKK][44] = 127,
- [0][1][2][1][RTW89_IC][44] = 80,
- [0][1][2][1][RTW89_ACMA][44] = 78,
- [0][1][2][1][RTW89_FCC][46] = 80,
- [0][1][2][1][RTW89_ETSI][46] = 8,
+ [0][1][2][1][RTW89_IC][44] = 70,
+ [0][1][2][1][RTW89_ACMA][44] = 70,
+ [0][1][2][1][RTW89_FCC][46] = 70,
+ [0][1][2][1][RTW89_ETSI][46] = 6,
[0][1][2][1][RTW89_MKK][46] = 127,
- [0][1][2][1][RTW89_IC][46] = 80,
- [0][1][2][1][RTW89_ACMA][46] = 78,
- [0][1][2][1][RTW89_FCC][48] = 60,
+ [0][1][2][1][RTW89_IC][46] = 70,
+ [0][1][2][1][RTW89_ACMA][46] = 70,
+ [0][1][2][1][RTW89_FCC][48] = 50,
[0][1][2][1][RTW89_ETSI][48] = 127,
[0][1][2][1][RTW89_MKK][48] = 127,
[0][1][2][1][RTW89_IC][48] = 127,
[0][1][2][1][RTW89_ACMA][48] = 127,
- [0][1][2][1][RTW89_FCC][50] = 60,
+ [0][1][2][1][RTW89_FCC][50] = 50,
[0][1][2][1][RTW89_ETSI][50] = 127,
[0][1][2][1][RTW89_MKK][50] = 127,
[0][1][2][1][RTW89_IC][50] = 127,
[0][1][2][1][RTW89_ACMA][50] = 127,
- [0][1][2][1][RTW89_FCC][52] = 60,
+ [0][1][2][1][RTW89_FCC][52] = 50,
[0][1][2][1][RTW89_ETSI][52] = 127,
[0][1][2][1][RTW89_MKK][52] = 127,
[0][1][2][1][RTW89_IC][52] = 127,
[0][1][2][1][RTW89_ACMA][52] = 127,
- [1][0][2][0][RTW89_FCC][1] = 68,
+ [1][0][2][0][RTW89_FCC][1] = 58,
[1][0][2][0][RTW89_ETSI][1] = 66,
- [1][0][2][0][RTW89_MKK][1] = 72,
- [1][0][2][0][RTW89_IC][1] = 72,
- [1][0][2][0][RTW89_ACMA][1] = 72,
- [1][0][2][0][RTW89_FCC][5] = 80,
+ [1][0][2][0][RTW89_MKK][1] = 66,
+ [1][0][2][0][RTW89_IC][1] = 66,
+ [1][0][2][0][RTW89_ACMA][1] = 66,
+ [1][0][2][0][RTW89_FCC][5] = 68,
[1][0][2][0][RTW89_ETSI][5] = 66,
- [1][0][2][0][RTW89_MKK][5] = 72,
- [1][0][2][0][RTW89_IC][5] = 72,
- [1][0][2][0][RTW89_ACMA][5] = 72,
- [1][0][2][0][RTW89_FCC][9] = 80,
+ [1][0][2][0][RTW89_MKK][5] = 66,
+ [1][0][2][0][RTW89_IC][5] = 66,
+ [1][0][2][0][RTW89_ACMA][5] = 66,
+ [1][0][2][0][RTW89_FCC][9] = 68,
[1][0][2][0][RTW89_ETSI][9] = 66,
- [1][0][2][0][RTW89_MKK][9] = 72,
- [1][0][2][0][RTW89_IC][9] = 72,
- [1][0][2][0][RTW89_ACMA][9] = 72,
- [1][0][2][0][RTW89_FCC][13] = 68,
+ [1][0][2][0][RTW89_MKK][9] = 66,
+ [1][0][2][0][RTW89_IC][9] = 66,
+ [1][0][2][0][RTW89_ACMA][9] = 66,
+ [1][0][2][0][RTW89_FCC][13] = 58,
[1][0][2][0][RTW89_ETSI][13] = 66,
- [1][0][2][0][RTW89_MKK][13] = 72,
- [1][0][2][0][RTW89_IC][13] = 72,
- [1][0][2][0][RTW89_ACMA][13] = 72,
- [1][0][2][0][RTW89_FCC][16] = 66,
+ [1][0][2][0][RTW89_MKK][13] = 66,
+ [1][0][2][0][RTW89_IC][13] = 66,
+ [1][0][2][0][RTW89_ACMA][13] = 66,
+ [1][0][2][0][RTW89_FCC][16] = 56,
[1][0][2][0][RTW89_ETSI][16] = 66,
- [1][0][2][0][RTW89_MKK][16] = 76,
- [1][0][2][0][RTW89_IC][16] = 72,
- [1][0][2][0][RTW89_ACMA][16] = 72,
- [1][0][2][0][RTW89_FCC][20] = 80,
+ [1][0][2][0][RTW89_MKK][16] = 66,
+ [1][0][2][0][RTW89_IC][16] = 66,
+ [1][0][2][0][RTW89_ACMA][16] = 66,
+ [1][0][2][0][RTW89_FCC][20] = 68,
[1][0][2][0][RTW89_ETSI][20] = 66,
- [1][0][2][0][RTW89_MKK][20] = 76,
- [1][0][2][0][RTW89_IC][20] = 80,
- [1][0][2][0][RTW89_ACMA][20] = 72,
- [1][0][2][0][RTW89_FCC][24] = 80,
+ [1][0][2][0][RTW89_MKK][20] = 66,
+ [1][0][2][0][RTW89_IC][20] = 66,
+ [1][0][2][0][RTW89_ACMA][20] = 66,
+ [1][0][2][0][RTW89_FCC][24] = 68,
[1][0][2][0][RTW89_ETSI][24] = 66,
- [1][0][2][0][RTW89_MKK][24] = 76,
+ [1][0][2][0][RTW89_MKK][24] = 66,
[1][0][2][0][RTW89_IC][24] = 127,
[1][0][2][0][RTW89_ACMA][24] = 127,
- [1][0][2][0][RTW89_FCC][28] = 80,
+ [1][0][2][0][RTW89_FCC][28] = 68,
[1][0][2][0][RTW89_ETSI][28] = 66,
- [1][0][2][0][RTW89_MKK][28] = 76,
+ [1][0][2][0][RTW89_MKK][28] = 66,
[1][0][2][0][RTW89_IC][28] = 127,
[1][0][2][0][RTW89_ACMA][28] = 127,
- [1][0][2][0][RTW89_FCC][32] = 78,
+ [1][0][2][0][RTW89_FCC][32] = 68,
[1][0][2][0][RTW89_ETSI][32] = 66,
- [1][0][2][0][RTW89_MKK][32] = 76,
- [1][0][2][0][RTW89_IC][32] = 78,
+ [1][0][2][0][RTW89_MKK][32] = 66,
+ [1][0][2][0][RTW89_IC][32] = 66,
[1][0][2][0][RTW89_ACMA][32] = 66,
- [1][0][2][0][RTW89_FCC][36] = 80,
+ [1][0][2][0][RTW89_FCC][36] = 68,
[1][0][2][0][RTW89_ETSI][36] = 127,
- [1][0][2][0][RTW89_MKK][36] = 76,
- [1][0][2][0][RTW89_IC][36] = 80,
- [1][0][2][0][RTW89_ACMA][36] = 76,
- [1][0][2][0][RTW89_FCC][39] = 80,
+ [1][0][2][0][RTW89_MKK][36] = 66,
+ [1][0][2][0][RTW89_IC][36] = 66,
+ [1][0][2][0][RTW89_ACMA][36] = 66,
+ [1][0][2][0][RTW89_FCC][39] = 68,
[1][0][2][0][RTW89_ETSI][39] = 30,
[1][0][2][0][RTW89_MKK][39] = 127,
- [1][0][2][0][RTW89_IC][39] = 80,
- [1][0][2][0][RTW89_ACMA][39] = 76,
- [1][0][2][0][RTW89_FCC][43] = 80,
+ [1][0][2][0][RTW89_IC][39] = 66,
+ [1][0][2][0][RTW89_ACMA][39] = 66,
+ [1][0][2][0][RTW89_FCC][43] = 68,
[1][0][2][0][RTW89_ETSI][43] = 30,
[1][0][2][0][RTW89_MKK][43] = 127,
- [1][0][2][0][RTW89_IC][43] = 80,
- [1][0][2][0][RTW89_ACMA][43] = 76,
- [1][0][2][0][RTW89_FCC][47] = 80,
+ [1][0][2][0][RTW89_IC][43] = 66,
+ [1][0][2][0][RTW89_ACMA][43] = 66,
+ [1][0][2][0][RTW89_FCC][47] = 68,
[1][0][2][0][RTW89_ETSI][47] = 127,
[1][0][2][0][RTW89_MKK][47] = 127,
[1][0][2][0][RTW89_IC][47] = 127,
[1][0][2][0][RTW89_ACMA][47] = 127,
- [1][0][2][0][RTW89_FCC][51] = 72,
+ [1][0][2][0][RTW89_FCC][51] = 68,
[1][0][2][0][RTW89_ETSI][51] = 127,
[1][0][2][0][RTW89_MKK][51] = 127,
[1][0][2][0][RTW89_IC][51] = 127,
[1][0][2][0][RTW89_ACMA][51] = 127,
- [1][1][2][0][RTW89_FCC][1] = 64,
+ [1][1][2][0][RTW89_FCC][1] = 54,
[1][1][2][0][RTW89_ETSI][1] = 54,
- [1][1][2][0][RTW89_MKK][1] = 60,
+ [1][1][2][0][RTW89_MKK][1] = 48,
[1][1][2][0][RTW89_IC][1] = 60,
[1][1][2][0][RTW89_ACMA][1] = 60,
- [1][1][2][0][RTW89_FCC][5] = 78,
+ [1][1][2][0][RTW89_FCC][5] = 68,
[1][1][2][0][RTW89_ETSI][5] = 54,
- [1][1][2][0][RTW89_MKK][5] = 60,
+ [1][1][2][0][RTW89_MKK][5] = 52,
[1][1][2][0][RTW89_IC][5] = 60,
[1][1][2][0][RTW89_ACMA][5] = 60,
- [1][1][2][0][RTW89_FCC][9] = 78,
+ [1][1][2][0][RTW89_FCC][9] = 68,
[1][1][2][0][RTW89_ETSI][9] = 54,
- [1][1][2][0][RTW89_MKK][9] = 60,
+ [1][1][2][0][RTW89_MKK][9] = 52,
[1][1][2][0][RTW89_IC][9] = 60,
[1][1][2][0][RTW89_ACMA][9] = 60,
- [1][1][2][0][RTW89_FCC][13] = 64,
+ [1][1][2][0][RTW89_FCC][13] = 54,
[1][1][2][0][RTW89_ETSI][13] = 54,
- [1][1][2][0][RTW89_MKK][13] = 60,
+ [1][1][2][0][RTW89_MKK][13] = 52,
[1][1][2][0][RTW89_IC][13] = 60,
[1][1][2][0][RTW89_ACMA][13] = 60,
- [1][1][2][0][RTW89_FCC][16] = 58,
+ [1][1][2][0][RTW89_FCC][16] = 48,
[1][1][2][0][RTW89_ETSI][16] = 54,
- [1][1][2][0][RTW89_MKK][16] = 72,
+ [1][1][2][0][RTW89_MKK][16] = 66,
[1][1][2][0][RTW89_IC][16] = 58,
[1][1][2][0][RTW89_ACMA][16] = 60,
- [1][1][2][0][RTW89_FCC][20] = 78,
+ [1][1][2][0][RTW89_FCC][20] = 68,
[1][1][2][0][RTW89_ETSI][20] = 54,
- [1][1][2][0][RTW89_MKK][20] = 72,
- [1][1][2][0][RTW89_IC][20] = 78,
+ [1][1][2][0][RTW89_MKK][20] = 66,
+ [1][1][2][0][RTW89_IC][20] = 66,
[1][1][2][0][RTW89_ACMA][20] = 60,
- [1][1][2][0][RTW89_FCC][24] = 78,
+ [1][1][2][0][RTW89_FCC][24] = 68,
[1][1][2][0][RTW89_ETSI][24] = 54,
- [1][1][2][0][RTW89_MKK][24] = 72,
+ [1][1][2][0][RTW89_MKK][24] = 66,
[1][1][2][0][RTW89_IC][24] = 127,
[1][1][2][0][RTW89_ACMA][24] = 127,
- [1][1][2][0][RTW89_FCC][28] = 78,
+ [1][1][2][0][RTW89_FCC][28] = 68,
[1][1][2][0][RTW89_ETSI][28] = 54,
- [1][1][2][0][RTW89_MKK][28] = 72,
+ [1][1][2][0][RTW89_MKK][28] = 66,
[1][1][2][0][RTW89_IC][28] = 127,
[1][1][2][0][RTW89_ACMA][28] = 127,
- [1][1][2][0][RTW89_FCC][32] = 70,
+ [1][1][2][0][RTW89_FCC][32] = 60,
[1][1][2][0][RTW89_ETSI][32] = 54,
- [1][1][2][0][RTW89_MKK][32] = 72,
- [1][1][2][0][RTW89_IC][32] = 70,
+ [1][1][2][0][RTW89_MKK][32] = 66,
+ [1][1][2][0][RTW89_IC][32] = 66,
[1][1][2][0][RTW89_ACMA][32] = 54,
- [1][1][2][0][RTW89_FCC][36] = 78,
+ [1][1][2][0][RTW89_FCC][36] = 68,
[1][1][2][0][RTW89_ETSI][36] = 127,
- [1][1][2][0][RTW89_MKK][36] = 72,
- [1][1][2][0][RTW89_IC][36] = 78,
- [1][1][2][0][RTW89_ACMA][36] = 76,
- [1][1][2][0][RTW89_FCC][39] = 80,
+ [1][1][2][0][RTW89_MKK][36] = 66,
+ [1][1][2][0][RTW89_IC][36] = 66,
+ [1][1][2][0][RTW89_ACMA][36] = 66,
+ [1][1][2][0][RTW89_FCC][39] = 68,
[1][1][2][0][RTW89_ETSI][39] = 18,
[1][1][2][0][RTW89_MKK][39] = 127,
- [1][1][2][0][RTW89_IC][39] = 80,
- [1][1][2][0][RTW89_ACMA][39] = 74,
- [1][1][2][0][RTW89_FCC][43] = 80,
+ [1][1][2][0][RTW89_IC][39] = 66,
+ [1][1][2][0][RTW89_ACMA][39] = 66,
+ [1][1][2][0][RTW89_FCC][43] = 68,
[1][1][2][0][RTW89_ETSI][43] = 18,
[1][1][2][0][RTW89_MKK][43] = 127,
- [1][1][2][0][RTW89_IC][43] = 80,
- [1][1][2][0][RTW89_ACMA][43] = 76,
- [1][1][2][0][RTW89_FCC][47] = 70,
+ [1][1][2][0][RTW89_IC][43] = 66,
+ [1][1][2][0][RTW89_ACMA][43] = 66,
+ [1][1][2][0][RTW89_FCC][47] = 60,
[1][1][2][0][RTW89_ETSI][47] = 127,
[1][1][2][0][RTW89_MKK][47] = 127,
[1][1][2][0][RTW89_IC][47] = 127,
[1][1][2][0][RTW89_ACMA][47] = 127,
- [1][1][2][0][RTW89_FCC][51] = 68,
+ [1][1][2][0][RTW89_FCC][51] = 58,
[1][1][2][0][RTW89_ETSI][51] = 127,
[1][1][2][0][RTW89_MKK][51] = 127,
[1][1][2][0][RTW89_IC][51] = 127,
[1][1][2][0][RTW89_ACMA][51] = 127,
- [1][1][2][1][RTW89_FCC][1] = 64,
- [1][1][2][1][RTW89_ETSI][1] = 42,
- [1][1][2][1][RTW89_MKK][1] = 60,
+ [1][1][2][1][RTW89_FCC][1] = 54,
+ [1][1][2][1][RTW89_ETSI][1] = 40,
+ [1][1][2][1][RTW89_MKK][1] = 48,
[1][1][2][1][RTW89_IC][1] = 48,
[1][1][2][1][RTW89_ACMA][1] = 48,
- [1][1][2][1][RTW89_FCC][5] = 70,
- [1][1][2][1][RTW89_ETSI][5] = 42,
- [1][1][2][1][RTW89_MKK][5] = 60,
+ [1][1][2][1][RTW89_FCC][5] = 60,
+ [1][1][2][1][RTW89_ETSI][5] = 40,
+ [1][1][2][1][RTW89_MKK][5] = 52,
[1][1][2][1][RTW89_IC][5] = 48,
[1][1][2][1][RTW89_ACMA][5] = 48,
- [1][1][2][1][RTW89_FCC][9] = 70,
- [1][1][2][1][RTW89_ETSI][9] = 42,
- [1][1][2][1][RTW89_MKK][9] = 60,
+ [1][1][2][1][RTW89_FCC][9] = 60,
+ [1][1][2][1][RTW89_ETSI][9] = 40,
+ [1][1][2][1][RTW89_MKK][9] = 52,
[1][1][2][1][RTW89_IC][9] = 48,
[1][1][2][1][RTW89_ACMA][9] = 48,
- [1][1][2][1][RTW89_FCC][13] = 64,
- [1][1][2][1][RTW89_ETSI][13] = 42,
- [1][1][2][1][RTW89_MKK][13] = 60,
+ [1][1][2][1][RTW89_FCC][13] = 54,
+ [1][1][2][1][RTW89_ETSI][13] = 40,
+ [1][1][2][1][RTW89_MKK][13] = 52,
[1][1][2][1][RTW89_IC][13] = 48,
[1][1][2][1][RTW89_ACMA][13] = 48,
- [1][1][2][1][RTW89_FCC][16] = 58,
- [1][1][2][1][RTW89_ETSI][16] = 42,
- [1][1][2][1][RTW89_MKK][16] = 72,
+ [1][1][2][1][RTW89_FCC][16] = 48,
+ [1][1][2][1][RTW89_ETSI][16] = 40,
+ [1][1][2][1][RTW89_MKK][16] = 66,
[1][1][2][1][RTW89_IC][16] = 58,
[1][1][2][1][RTW89_ACMA][16] = 48,
- [1][1][2][1][RTW89_FCC][20] = 70,
- [1][1][2][1][RTW89_ETSI][20] = 42,
- [1][1][2][1][RTW89_MKK][20] = 72,
- [1][1][2][1][RTW89_IC][20] = 70,
+ [1][1][2][1][RTW89_FCC][20] = 60,
+ [1][1][2][1][RTW89_ETSI][20] = 40,
+ [1][1][2][1][RTW89_MKK][20] = 66,
+ [1][1][2][1][RTW89_IC][20] = 66,
[1][1][2][1][RTW89_ACMA][20] = 48,
- [1][1][2][1][RTW89_FCC][24] = 70,
- [1][1][2][1][RTW89_ETSI][24] = 42,
- [1][1][2][1][RTW89_MKK][24] = 72,
+ [1][1][2][1][RTW89_FCC][24] = 60,
+ [1][1][2][1][RTW89_ETSI][24] = 40,
+ [1][1][2][1][RTW89_MKK][24] = 66,
[1][1][2][1][RTW89_IC][24] = 127,
[1][1][2][1][RTW89_ACMA][24] = 127,
- [1][1][2][1][RTW89_FCC][28] = 70,
- [1][1][2][1][RTW89_ETSI][28] = 42,
- [1][1][2][1][RTW89_MKK][28] = 72,
+ [1][1][2][1][RTW89_FCC][28] = 60,
+ [1][1][2][1][RTW89_ETSI][28] = 40,
+ [1][1][2][1][RTW89_MKK][28] = 66,
[1][1][2][1][RTW89_IC][28] = 127,
[1][1][2][1][RTW89_ACMA][28] = 127,
- [1][1][2][1][RTW89_FCC][32] = 70,
- [1][1][2][1][RTW89_ETSI][32] = 42,
- [1][1][2][1][RTW89_MKK][32] = 72,
- [1][1][2][1][RTW89_IC][32] = 70,
+ [1][1][2][1][RTW89_FCC][32] = 60,
+ [1][1][2][1][RTW89_ETSI][32] = 40,
+ [1][1][2][1][RTW89_MKK][32] = 66,
+ [1][1][2][1][RTW89_IC][32] = 66,
[1][1][2][1][RTW89_ACMA][32] = 42,
- [1][1][2][1][RTW89_FCC][36] = 70,
+ [1][1][2][1][RTW89_FCC][36] = 60,
[1][1][2][1][RTW89_ETSI][36] = 127,
- [1][1][2][1][RTW89_MKK][36] = 72,
- [1][1][2][1][RTW89_IC][36] = 70,
- [1][1][2][1][RTW89_ACMA][36] = 72,
- [1][1][2][1][RTW89_FCC][39] = 80,
- [1][1][2][1][RTW89_ETSI][39] = 8,
+ [1][1][2][1][RTW89_MKK][36] = 66,
+ [1][1][2][1][RTW89_IC][36] = 66,
+ [1][1][2][1][RTW89_ACMA][36] = 66,
+ [1][1][2][1][RTW89_FCC][39] = 68,
+ [1][1][2][1][RTW89_ETSI][39] = 6,
[1][1][2][1][RTW89_MKK][39] = 127,
- [1][1][2][1][RTW89_IC][39] = 80,
- [1][1][2][1][RTW89_ACMA][39] = 74,
- [1][1][2][1][RTW89_FCC][43] = 80,
- [1][1][2][1][RTW89_ETSI][43] = 8,
+ [1][1][2][1][RTW89_IC][39] = 66,
+ [1][1][2][1][RTW89_ACMA][39] = 66,
+ [1][1][2][1][RTW89_FCC][43] = 68,
+ [1][1][2][1][RTW89_ETSI][43] = 6,
[1][1][2][1][RTW89_MKK][43] = 127,
- [1][1][2][1][RTW89_IC][43] = 80,
- [1][1][2][1][RTW89_ACMA][43] = 76,
- [1][1][2][1][RTW89_FCC][47] = 70,
+ [1][1][2][1][RTW89_IC][43] = 66,
+ [1][1][2][1][RTW89_ACMA][43] = 66,
+ [1][1][2][1][RTW89_FCC][47] = 60,
[1][1][2][1][RTW89_ETSI][47] = 127,
[1][1][2][1][RTW89_MKK][47] = 127,
[1][1][2][1][RTW89_IC][47] = 127,
[1][1][2][1][RTW89_ACMA][47] = 127,
- [1][1][2][1][RTW89_FCC][51] = 68,
+ [1][1][2][1][RTW89_FCC][51] = 58,
[1][1][2][1][RTW89_ETSI][51] = 127,
[1][1][2][1][RTW89_MKK][51] = 127,
[1][1][2][1][RTW89_IC][51] = 127,
[1][1][2][1][RTW89_ACMA][51] = 127,
- [2][0][2][0][RTW89_FCC][3] = 66,
- [2][0][2][0][RTW89_ETSI][3] = 66,
- [2][0][2][0][RTW89_MKK][3] = 66,
- [2][0][2][0][RTW89_IC][3] = 64,
- [2][0][2][0][RTW89_ACMA][3] = 66,
- [2][0][2][0][RTW89_FCC][11] = 68,
- [2][0][2][0][RTW89_ETSI][11] = 66,
- [2][0][2][0][RTW89_MKK][11] = 66,
- [2][0][2][0][RTW89_IC][11] = 66,
- [2][0][2][0][RTW89_ACMA][11] = 66,
- [2][0][2][0][RTW89_FCC][18] = 64,
- [2][0][2][0][RTW89_ETSI][18] = 66,
- [2][0][2][0][RTW89_MKK][18] = 72,
- [2][0][2][0][RTW89_IC][18] = 64,
- [2][0][2][0][RTW89_ACMA][18] = 66,
- [2][0][2][0][RTW89_FCC][26] = 76,
- [2][0][2][0][RTW89_ETSI][26] = 66,
- [2][0][2][0][RTW89_MKK][26] = 72,
+ [2][0][2][0][RTW89_FCC][3] = 56,
+ [2][0][2][0][RTW89_ETSI][3] = 60,
+ [2][0][2][0][RTW89_MKK][3] = 60,
+ [2][0][2][0][RTW89_IC][3] = 60,
+ [2][0][2][0][RTW89_ACMA][3] = 60,
+ [2][0][2][0][RTW89_FCC][11] = 58,
+ [2][0][2][0][RTW89_ETSI][11] = 60,
+ [2][0][2][0][RTW89_MKK][11] = 60,
+ [2][0][2][0][RTW89_IC][11] = 60,
+ [2][0][2][0][RTW89_ACMA][11] = 60,
+ [2][0][2][0][RTW89_FCC][18] = 54,
+ [2][0][2][0][RTW89_ETSI][18] = 60,
+ [2][0][2][0][RTW89_MKK][18] = 60,
+ [2][0][2][0][RTW89_IC][18] = 60,
+ [2][0][2][0][RTW89_ACMA][18] = 60,
+ [2][0][2][0][RTW89_FCC][26] = 62,
+ [2][0][2][0][RTW89_ETSI][26] = 60,
+ [2][0][2][0][RTW89_MKK][26] = 60,
[2][0][2][0][RTW89_IC][26] = 127,
[2][0][2][0][RTW89_ACMA][26] = 127,
- [2][0][2][0][RTW89_FCC][34] = 76,
+ [2][0][2][0][RTW89_FCC][34] = 62,
[2][0][2][0][RTW89_ETSI][34] = 127,
- [2][0][2][0][RTW89_MKK][34] = 72,
- [2][0][2][0][RTW89_IC][34] = 76,
- [2][0][2][0][RTW89_ACMA][34] = 72,
- [2][0][2][0][RTW89_FCC][41] = 76,
+ [2][0][2][0][RTW89_MKK][34] = 60,
+ [2][0][2][0][RTW89_IC][34] = 60,
+ [2][0][2][0][RTW89_ACMA][34] = 60,
+ [2][0][2][0][RTW89_FCC][41] = 62,
[2][0][2][0][RTW89_ETSI][41] = 30,
[2][0][2][0][RTW89_MKK][41] = 127,
- [2][0][2][0][RTW89_IC][41] = 76,
- [2][0][2][0][RTW89_ACMA][41] = 72,
- [2][0][2][0][RTW89_FCC][49] = 66,
+ [2][0][2][0][RTW89_IC][41] = 60,
+ [2][0][2][0][RTW89_ACMA][41] = 60,
+ [2][0][2][0][RTW89_FCC][49] = 56,
[2][0][2][0][RTW89_ETSI][49] = 127,
[2][0][2][0][RTW89_MKK][49] = 127,
[2][0][2][0][RTW89_IC][49] = 127,
[2][0][2][0][RTW89_ACMA][49] = 127,
- [2][1][2][0][RTW89_FCC][3] = 58,
+ [2][1][2][0][RTW89_FCC][3] = 48,
[2][1][2][0][RTW89_ETSI][3] = 54,
- [2][1][2][0][RTW89_MKK][3] = 54,
- [2][1][2][0][RTW89_IC][3] = 54,
- [2][1][2][0][RTW89_ACMA][3] = 54,
- [2][1][2][0][RTW89_FCC][11] = 64,
+ [2][1][2][0][RTW89_MKK][3] = 56,
+ [2][1][2][0][RTW89_IC][3] = 52,
+ [2][1][2][0][RTW89_ACMA][3] = 52,
+ [2][1][2][0][RTW89_FCC][11] = 54,
[2][1][2][0][RTW89_ETSI][11] = 54,
[2][1][2][0][RTW89_MKK][11] = 54,
- [2][1][2][0][RTW89_IC][11] = 54,
- [2][1][2][0][RTW89_ACMA][11] = 54,
- [2][1][2][0][RTW89_FCC][18] = 58,
+ [2][1][2][0][RTW89_IC][11] = 52,
+ [2][1][2][0][RTW89_ACMA][11] = 52,
+ [2][1][2][0][RTW89_FCC][18] = 48,
[2][1][2][0][RTW89_ETSI][18] = 54,
- [2][1][2][0][RTW89_MKK][18] = 72,
+ [2][1][2][0][RTW89_MKK][18] = 60,
[2][1][2][0][RTW89_IC][18] = 58,
- [2][1][2][0][RTW89_ACMA][18] = 54,
- [2][1][2][0][RTW89_FCC][26] = 72,
+ [2][1][2][0][RTW89_ACMA][18] = 52,
+ [2][1][2][0][RTW89_FCC][26] = 62,
[2][1][2][0][RTW89_ETSI][26] = 54,
- [2][1][2][0][RTW89_MKK][26] = 72,
+ [2][1][2][0][RTW89_MKK][26] = 56,
[2][1][2][0][RTW89_IC][26] = 127,
[2][1][2][0][RTW89_ACMA][26] = 127,
- [2][1][2][0][RTW89_FCC][34] = 76,
+ [2][1][2][0][RTW89_FCC][34] = 62,
[2][1][2][0][RTW89_ETSI][34] = 127,
- [2][1][2][0][RTW89_MKK][34] = 72,
- [2][1][2][0][RTW89_IC][34] = 76,
- [2][1][2][0][RTW89_ACMA][34] = 72,
- [2][1][2][0][RTW89_FCC][41] = 76,
+ [2][1][2][0][RTW89_MKK][34] = 60,
+ [2][1][2][0][RTW89_IC][34] = 60,
+ [2][1][2][0][RTW89_ACMA][34] = 60,
+ [2][1][2][0][RTW89_FCC][41] = 62,
[2][1][2][0][RTW89_ETSI][41] = 18,
[2][1][2][0][RTW89_MKK][41] = 127,
- [2][1][2][0][RTW89_IC][41] = 76,
- [2][1][2][0][RTW89_ACMA][41] = 72,
- [2][1][2][0][RTW89_FCC][49] = 60,
+ [2][1][2][0][RTW89_IC][41] = 60,
+ [2][1][2][0][RTW89_ACMA][41] = 60,
+ [2][1][2][0][RTW89_FCC][49] = 50,
[2][1][2][0][RTW89_ETSI][49] = 127,
[2][1][2][0][RTW89_MKK][49] = 127,
[2][1][2][0][RTW89_IC][49] = 127,
[2][1][2][0][RTW89_ACMA][49] = 127,
- [2][1][2][1][RTW89_FCC][3] = 58,
- [2][1][2][1][RTW89_ETSI][3] = 42,
- [2][1][2][1][RTW89_MKK][3] = 54,
- [2][1][2][1][RTW89_IC][3] = 42,
- [2][1][2][1][RTW89_ACMA][3] = 42,
- [2][1][2][1][RTW89_FCC][11] = 64,
- [2][1][2][1][RTW89_ETSI][11] = 42,
+ [2][1][2][1][RTW89_FCC][3] = 48,
+ [2][1][2][1][RTW89_ETSI][3] = 40,
+ [2][1][2][1][RTW89_MKK][3] = 56,
+ [2][1][2][1][RTW89_IC][3] = 40,
+ [2][1][2][1][RTW89_ACMA][3] = 40,
+ [2][1][2][1][RTW89_FCC][11] = 54,
+ [2][1][2][1][RTW89_ETSI][11] = 40,
[2][1][2][1][RTW89_MKK][11] = 54,
- [2][1][2][1][RTW89_IC][11] = 42,
- [2][1][2][1][RTW89_ACMA][11] = 42,
- [2][1][2][1][RTW89_FCC][18] = 58,
- [2][1][2][1][RTW89_ETSI][18] = 42,
- [2][1][2][1][RTW89_MKK][18] = 72,
+ [2][1][2][1][RTW89_IC][11] = 40,
+ [2][1][2][1][RTW89_ACMA][11] = 40,
+ [2][1][2][1][RTW89_FCC][18] = 48,
+ [2][1][2][1][RTW89_ETSI][18] = 40,
+ [2][1][2][1][RTW89_MKK][18] = 60,
[2][1][2][1][RTW89_IC][18] = 58,
- [2][1][2][1][RTW89_ACMA][18] = 42,
- [2][1][2][1][RTW89_FCC][26] = 70,
- [2][1][2][1][RTW89_ETSI][26] = 44,
- [2][1][2][1][RTW89_MKK][26] = 72,
+ [2][1][2][1][RTW89_ACMA][18] = 40,
+ [2][1][2][1][RTW89_FCC][26] = 60,
+ [2][1][2][1][RTW89_ETSI][26] = 42,
+ [2][1][2][1][RTW89_MKK][26] = 56,
[2][1][2][1][RTW89_IC][26] = 127,
[2][1][2][1][RTW89_ACMA][26] = 127,
- [2][1][2][1][RTW89_FCC][34] = 70,
+ [2][1][2][1][RTW89_FCC][34] = 60,
[2][1][2][1][RTW89_ETSI][34] = 127,
- [2][1][2][1][RTW89_MKK][34] = 72,
- [2][1][2][1][RTW89_IC][34] = 70,
- [2][1][2][1][RTW89_ACMA][34] = 72,
- [2][1][2][1][RTW89_FCC][41] = 76,
- [2][1][2][1][RTW89_ETSI][41] = 8,
+ [2][1][2][1][RTW89_MKK][34] = 60,
+ [2][1][2][1][RTW89_IC][34] = 60,
+ [2][1][2][1][RTW89_ACMA][34] = 60,
+ [2][1][2][1][RTW89_FCC][41] = 62,
+ [2][1][2][1][RTW89_ETSI][41] = 6,
[2][1][2][1][RTW89_MKK][41] = 127,
- [2][1][2][1][RTW89_IC][41] = 76,
- [2][1][2][1][RTW89_ACMA][41] = 72,
- [2][1][2][1][RTW89_FCC][49] = 60,
+ [2][1][2][1][RTW89_IC][41] = 60,
+ [2][1][2][1][RTW89_ACMA][41] = 60,
+ [2][1][2][1][RTW89_FCC][49] = 50,
[2][1][2][1][RTW89_ETSI][49] = 127,
[2][1][2][1][RTW89_MKK][49] = 127,
[2][1][2][1][RTW89_IC][49] = 127,
[2][1][2][1][RTW89_ACMA][49] = 127,
- [3][0][2][0][RTW89_FCC][7] = 56,
- [3][0][2][0][RTW89_ETSI][7] = 56,
- [3][0][2][0][RTW89_MKK][7] = 56,
- [3][0][2][0][RTW89_IC][7] = 56,
- [3][0][2][0][RTW89_ACMA][7] = 56,
- [3][0][2][0][RTW89_FCC][22] = 56,
- [3][0][2][0][RTW89_ETSI][22] = 56,
- [3][0][2][0][RTW89_MKK][22] = 56,
- [3][0][2][0][RTW89_IC][22] = 56,
- [3][0][2][0][RTW89_ACMA][22] = 56,
- [3][0][2][0][RTW89_FCC][45] = 56,
+ [3][0][2][0][RTW89_FCC][7] = 38,
+ [3][0][2][0][RTW89_ETSI][7] = 50,
+ [3][0][2][0][RTW89_MKK][7] = 50,
+ [3][0][2][0][RTW89_IC][7] = 50,
+ [3][0][2][0][RTW89_ACMA][7] = 50,
+ [3][0][2][0][RTW89_FCC][22] = 52,
+ [3][0][2][0][RTW89_ETSI][22] = 50,
+ [3][0][2][0][RTW89_MKK][22] = 50,
+ [3][0][2][0][RTW89_IC][22] = 50,
+ [3][0][2][0][RTW89_ACMA][22] = 50,
+ [3][0][2][0][RTW89_FCC][45] = 127,
[3][0][2][0][RTW89_ETSI][45] = 127,
[3][0][2][0][RTW89_MKK][45] = 127,
[3][0][2][0][RTW89_IC][45] = 127,
[3][0][2][0][RTW89_ACMA][45] = 127,
- [3][1][2][0][RTW89_FCC][7] = 44,
- [3][1][2][0][RTW89_ETSI][7] = 44,
- [3][1][2][0][RTW89_MKK][7] = 44,
+ [3][1][2][0][RTW89_FCC][7] = 26,
+ [3][1][2][0][RTW89_ETSI][7] = 50,
+ [3][1][2][0][RTW89_MKK][7] = 36,
[3][1][2][0][RTW89_IC][7] = 44,
[3][1][2][0][RTW89_ACMA][7] = 44,
- [3][1][2][0][RTW89_FCC][22] = 44,
- [3][1][2][0][RTW89_ETSI][22] = 44,
- [3][1][2][0][RTW89_MKK][22] = 44,
+ [3][1][2][0][RTW89_FCC][22] = 42,
+ [3][1][2][0][RTW89_ETSI][22] = 50,
+ [3][1][2][0][RTW89_MKK][22] = 48,
[3][1][2][0][RTW89_IC][22] = 44,
[3][1][2][0][RTW89_ACMA][22] = 44,
- [3][1][2][0][RTW89_FCC][45] = 44,
+ [3][1][2][0][RTW89_FCC][45] = 127,
[3][1][2][0][RTW89_ETSI][45] = 127,
[3][1][2][0][RTW89_MKK][45] = 127,
[3][1][2][0][RTW89_IC][45] = 127,
[3][1][2][0][RTW89_ACMA][45] = 127,
- [3][1][2][1][RTW89_FCC][7] = 32,
- [3][1][2][1][RTW89_ETSI][7] = 32,
- [3][1][2][1][RTW89_MKK][7] = 32,
+ [3][1][2][1][RTW89_FCC][7] = 14,
+ [3][1][2][1][RTW89_ETSI][7] = 42,
+ [3][1][2][1][RTW89_MKK][7] = 36,
[3][1][2][1][RTW89_IC][7] = 32,
[3][1][2][1][RTW89_ACMA][7] = 32,
- [3][1][2][1][RTW89_FCC][22] = 32,
- [3][1][2][1][RTW89_ETSI][22] = 32,
- [3][1][2][1][RTW89_MKK][22] = 32,
+ [3][1][2][1][RTW89_FCC][22] = 30,
+ [3][1][2][1][RTW89_ETSI][22] = 42,
+ [3][1][2][1][RTW89_MKK][22] = 48,
[3][1][2][1][RTW89_IC][22] = 32,
[3][1][2][1][RTW89_ACMA][22] = 32,
- [3][1][2][1][RTW89_FCC][45] = 32,
+ [3][1][2][1][RTW89_FCC][45] = 127,
[3][1][2][1][RTW89_ETSI][45] = 127,
[3][1][2][1][RTW89_MKK][45] = 127,
[3][1][2][1][RTW89_IC][45] = 127,
@@ -17127,7 +17127,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_WW][9] = 32,
[0][0][RTW89_WW][10] = 32,
[0][0][RTW89_WW][11] = 32,
- [0][0][RTW89_WW][12] = 32,
+ [0][0][RTW89_WW][12] = 24,
[0][0][RTW89_WW][13] = 0,
[0][1][RTW89_WW][0] = 20,
[0][1][RTW89_WW][1] = 22,
@@ -17154,8 +17154,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_WW][8] = 44,
[1][0][RTW89_WW][9] = 44,
[1][0][RTW89_WW][10] = 44,
- [1][0][RTW89_WW][11] = 44,
- [1][0][RTW89_WW][12] = 38,
+ [1][0][RTW89_WW][11] = 42,
+ [1][0][RTW89_WW][12] = 30,
[1][0][RTW89_WW][13] = 0,
[1][1][RTW89_WW][0] = 32,
[1][1][RTW89_WW][1] = 32,
@@ -17168,8 +17168,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_WW][8] = 32,
[1][1][RTW89_WW][9] = 32,
[1][1][RTW89_WW][10] = 32,
- [1][1][RTW89_WW][11] = 32,
- [1][1][RTW89_WW][12] = 32,
+ [1][1][RTW89_WW][11] = 30,
+ [1][1][RTW89_WW][12] = 24,
[1][1][RTW89_WW][13] = 0,
[2][0][RTW89_WW][0] = 56,
[2][0][RTW89_WW][1] = 56,
@@ -17182,8 +17182,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_WW][8] = 56,
[2][0][RTW89_WW][9] = 56,
[2][0][RTW89_WW][10] = 56,
- [2][0][RTW89_WW][11] = 56,
- [2][0][RTW89_WW][12] = 56,
+ [2][0][RTW89_WW][11] = 42,
+ [2][0][RTW89_WW][12] = 38,
[2][0][RTW89_WW][13] = 0,
[2][1][RTW89_WW][0] = 44,
[2][1][RTW89_WW][1] = 44,
@@ -17196,72 +17196,72 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_WW][8] = 44,
[2][1][RTW89_WW][9] = 44,
[2][1][RTW89_WW][10] = 44,
- [2][1][RTW89_WW][11] = 44,
- [2][1][RTW89_WW][12] = 42,
+ [2][1][RTW89_WW][11] = 30,
+ [2][1][RTW89_WW][12] = 26,
[2][1][RTW89_WW][13] = 0,
- [0][0][RTW89_FCC][0] = 68,
- [0][0][RTW89_ETSI][0] = 36,
- [0][0][RTW89_MKK][0] = 38,
+ [0][0][RTW89_FCC][0] = 60,
+ [0][0][RTW89_ETSI][0] = 34,
+ [0][0][RTW89_MKK][0] = 36,
[0][0][RTW89_IC][0] = 68,
[0][0][RTW89_ACMA][0] = 32,
- [0][0][RTW89_FCC][1] = 68,
- [0][0][RTW89_ETSI][1] = 40,
- [0][0][RTW89_MKK][1] = 44,
+ [0][0][RTW89_FCC][1] = 60,
+ [0][0][RTW89_ETSI][1] = 38,
+ [0][0][RTW89_MKK][1] = 40,
[0][0][RTW89_IC][1] = 68,
[0][0][RTW89_ACMA][1] = 32,
- [0][0][RTW89_FCC][2] = 72,
- [0][0][RTW89_ETSI][2] = 40,
- [0][0][RTW89_MKK][2] = 44,
+ [0][0][RTW89_FCC][2] = 64,
+ [0][0][RTW89_ETSI][2] = 38,
+ [0][0][RTW89_MKK][2] = 40,
[0][0][RTW89_IC][2] = 72,
[0][0][RTW89_ACMA][2] = 32,
- [0][0][RTW89_FCC][3] = 76,
- [0][0][RTW89_ETSI][3] = 40,
- [0][0][RTW89_MKK][3] = 44,
+ [0][0][RTW89_FCC][3] = 68,
+ [0][0][RTW89_ETSI][3] = 38,
+ [0][0][RTW89_MKK][3] = 40,
[0][0][RTW89_IC][3] = 76,
[0][0][RTW89_ACMA][3] = 32,
- [0][0][RTW89_FCC][4] = 76,
- [0][0][RTW89_ETSI][4] = 40,
- [0][0][RTW89_MKK][4] = 44,
+ [0][0][RTW89_FCC][4] = 68,
+ [0][0][RTW89_ETSI][4] = 38,
+ [0][0][RTW89_MKK][4] = 40,
[0][0][RTW89_IC][4] = 76,
[0][0][RTW89_ACMA][4] = 32,
- [0][0][RTW89_FCC][5] = 84,
- [0][0][RTW89_ETSI][5] = 40,
- [0][0][RTW89_MKK][5] = 44,
+ [0][0][RTW89_FCC][5] = 76,
+ [0][0][RTW89_ETSI][5] = 38,
+ [0][0][RTW89_MKK][5] = 40,
[0][0][RTW89_IC][5] = 84,
[0][0][RTW89_ACMA][5] = 32,
- [0][0][RTW89_FCC][6] = 74,
- [0][0][RTW89_ETSI][6] = 40,
- [0][0][RTW89_MKK][6] = 44,
+ [0][0][RTW89_FCC][6] = 66,
+ [0][0][RTW89_ETSI][6] = 38,
+ [0][0][RTW89_MKK][6] = 40,
[0][0][RTW89_IC][6] = 74,
[0][0][RTW89_ACMA][6] = 32,
- [0][0][RTW89_FCC][7] = 74,
- [0][0][RTW89_ETSI][7] = 40,
- [0][0][RTW89_MKK][7] = 44,
+ [0][0][RTW89_FCC][7] = 66,
+ [0][0][RTW89_ETSI][7] = 38,
+ [0][0][RTW89_MKK][7] = 40,
[0][0][RTW89_IC][7] = 74,
[0][0][RTW89_ACMA][7] = 32,
- [0][0][RTW89_FCC][8] = 70,
- [0][0][RTW89_ETSI][8] = 40,
- [0][0][RTW89_MKK][8] = 44,
+ [0][0][RTW89_FCC][8] = 62,
+ [0][0][RTW89_ETSI][8] = 38,
+ [0][0][RTW89_MKK][8] = 40,
[0][0][RTW89_IC][8] = 70,
[0][0][RTW89_ACMA][8] = 32,
- [0][0][RTW89_FCC][9] = 66,
- [0][0][RTW89_ETSI][9] = 40,
- [0][0][RTW89_MKK][9] = 44,
+ [0][0][RTW89_FCC][9] = 58,
+ [0][0][RTW89_ETSI][9] = 38,
+ [0][0][RTW89_MKK][9] = 40,
[0][0][RTW89_IC][9] = 66,
[0][0][RTW89_ACMA][9] = 32,
- [0][0][RTW89_FCC][10] = 66,
- [0][0][RTW89_ETSI][10] = 40,
- [0][0][RTW89_MKK][10] = 44,
+ [0][0][RTW89_FCC][10] = 58,
+ [0][0][RTW89_ETSI][10] = 38,
+ [0][0][RTW89_MKK][10] = 40,
[0][0][RTW89_IC][10] = 66,
[0][0][RTW89_ACMA][10] = 32,
- [0][0][RTW89_FCC][11] = 56,
- [0][0][RTW89_ETSI][11] = 40,
- [0][0][RTW89_MKK][11] = 44,
+ [0][0][RTW89_FCC][11] = 42,
+ [0][0][RTW89_ETSI][11] = 38,
+ [0][0][RTW89_MKK][11] = 40,
[0][0][RTW89_IC][11] = 56,
[0][0][RTW89_ACMA][11] = 32,
- [0][0][RTW89_FCC][12] = 32,
- [0][0][RTW89_ETSI][12] = 36,
- [0][0][RTW89_MKK][12] = 38,
+ [0][0][RTW89_FCC][12] = 24,
+ [0][0][RTW89_ETSI][12] = 34,
+ [0][0][RTW89_MKK][12] = 36,
[0][0][RTW89_IC][12] = 32,
[0][0][RTW89_ACMA][12] = 32,
[0][0][RTW89_FCC][13] = 127,
@@ -17269,69 +17269,69 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_MKK][13] = 127,
[0][0][RTW89_IC][13] = 127,
[0][0][RTW89_ACMA][13] = 127,
- [0][1][RTW89_FCC][0] = 62,
- [0][1][RTW89_ETSI][0] = 24,
- [0][1][RTW89_MKK][0] = 26,
+ [0][1][RTW89_FCC][0] = 46,
+ [0][1][RTW89_ETSI][0] = 22,
+ [0][1][RTW89_MKK][0] = 24,
[0][1][RTW89_IC][0] = 62,
[0][1][RTW89_ACMA][0] = 20,
- [0][1][RTW89_FCC][1] = 62,
- [0][1][RTW89_ETSI][1] = 26,
- [0][1][RTW89_MKK][1] = 32,
+ [0][1][RTW89_FCC][1] = 46,
+ [0][1][RTW89_ETSI][1] = 24,
+ [0][1][RTW89_MKK][1] = 30,
[0][1][RTW89_IC][1] = 62,
[0][1][RTW89_ACMA][1] = 22,
- [0][1][RTW89_FCC][2] = 66,
- [0][1][RTW89_ETSI][2] = 26,
- [0][1][RTW89_MKK][2] = 32,
+ [0][1][RTW89_FCC][2] = 50,
+ [0][1][RTW89_ETSI][2] = 24,
+ [0][1][RTW89_MKK][2] = 30,
[0][1][RTW89_IC][2] = 66,
[0][1][RTW89_ACMA][2] = 22,
- [0][1][RTW89_FCC][3] = 70,
- [0][1][RTW89_ETSI][3] = 26,
- [0][1][RTW89_MKK][3] = 32,
+ [0][1][RTW89_FCC][3] = 54,
+ [0][1][RTW89_ETSI][3] = 24,
+ [0][1][RTW89_MKK][3] = 30,
[0][1][RTW89_IC][3] = 70,
[0][1][RTW89_ACMA][3] = 22,
- [0][1][RTW89_FCC][4] = 74,
- [0][1][RTW89_ETSI][4] = 26,
- [0][1][RTW89_MKK][4] = 32,
+ [0][1][RTW89_FCC][4] = 58,
+ [0][1][RTW89_ETSI][4] = 24,
+ [0][1][RTW89_MKK][4] = 30,
[0][1][RTW89_IC][4] = 74,
[0][1][RTW89_ACMA][4] = 22,
- [0][1][RTW89_FCC][5] = 74,
- [0][1][RTW89_ETSI][5] = 26,
- [0][1][RTW89_MKK][5] = 32,
+ [0][1][RTW89_FCC][5] = 66,
+ [0][1][RTW89_ETSI][5] = 24,
+ [0][1][RTW89_MKK][5] = 30,
[0][1][RTW89_IC][5] = 74,
[0][1][RTW89_ACMA][5] = 22,
- [0][1][RTW89_FCC][6] = 72,
- [0][1][RTW89_ETSI][6] = 26,
- [0][1][RTW89_MKK][6] = 32,
+ [0][1][RTW89_FCC][6] = 58,
+ [0][1][RTW89_ETSI][6] = 24,
+ [0][1][RTW89_MKK][6] = 30,
[0][1][RTW89_IC][6] = 72,
[0][1][RTW89_ACMA][6] = 22,
- [0][1][RTW89_FCC][7] = 68,
- [0][1][RTW89_ETSI][7] = 26,
- [0][1][RTW89_MKK][7] = 32,
+ [0][1][RTW89_FCC][7] = 54,
+ [0][1][RTW89_ETSI][7] = 24,
+ [0][1][RTW89_MKK][7] = 30,
[0][1][RTW89_IC][7] = 68,
[0][1][RTW89_ACMA][7] = 22,
- [0][1][RTW89_FCC][8] = 64,
- [0][1][RTW89_ETSI][8] = 26,
- [0][1][RTW89_MKK][8] = 32,
+ [0][1][RTW89_FCC][8] = 50,
+ [0][1][RTW89_ETSI][8] = 24,
+ [0][1][RTW89_MKK][8] = 30,
[0][1][RTW89_IC][8] = 64,
[0][1][RTW89_ACMA][8] = 22,
- [0][1][RTW89_FCC][9] = 60,
- [0][1][RTW89_ETSI][9] = 26,
- [0][1][RTW89_MKK][9] = 32,
+ [0][1][RTW89_FCC][9] = 46,
+ [0][1][RTW89_ETSI][9] = 24,
+ [0][1][RTW89_MKK][9] = 30,
[0][1][RTW89_IC][9] = 60,
[0][1][RTW89_ACMA][9] = 22,
- [0][1][RTW89_FCC][10] = 60,
- [0][1][RTW89_ETSI][10] = 26,
- [0][1][RTW89_MKK][10] = 32,
+ [0][1][RTW89_FCC][10] = 46,
+ [0][1][RTW89_ETSI][10] = 24,
+ [0][1][RTW89_MKK][10] = 30,
[0][1][RTW89_IC][10] = 60,
[0][1][RTW89_ACMA][10] = 22,
- [0][1][RTW89_FCC][11] = 52,
- [0][1][RTW89_ETSI][11] = 26,
- [0][1][RTW89_MKK][11] = 32,
+ [0][1][RTW89_FCC][11] = 30,
+ [0][1][RTW89_ETSI][11] = 24,
+ [0][1][RTW89_MKK][11] = 30,
[0][1][RTW89_IC][11] = 52,
[0][1][RTW89_ACMA][11] = 22,
- [0][1][RTW89_FCC][12] = 30,
- [0][1][RTW89_ETSI][12] = 22,
- [0][1][RTW89_MKK][12] = 26,
+ [0][1][RTW89_FCC][12] = 22,
+ [0][1][RTW89_ETSI][12] = 20,
+ [0][1][RTW89_MKK][12] = 24,
[0][1][RTW89_IC][12] = 30,
[0][1][RTW89_ACMA][12] = 20,
[0][1][RTW89_FCC][13] = 127,
@@ -17339,69 +17339,69 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_MKK][13] = 127,
[0][1][RTW89_IC][13] = 127,
[0][1][RTW89_ACMA][13] = 127,
- [1][0][RTW89_FCC][0] = 78,
- [1][0][RTW89_ETSI][0] = 48,
+ [1][0][RTW89_FCC][0] = 64,
+ [1][0][RTW89_ETSI][0] = 46,
[1][0][RTW89_MKK][0] = 48,
[1][0][RTW89_IC][0] = 78,
[1][0][RTW89_ACMA][0] = 42,
- [1][0][RTW89_FCC][1] = 78,
- [1][0][RTW89_ETSI][1] = 48,
+ [1][0][RTW89_FCC][1] = 64,
+ [1][0][RTW89_ETSI][1] = 46,
[1][0][RTW89_MKK][1] = 48,
[1][0][RTW89_IC][1] = 78,
[1][0][RTW89_ACMA][1] = 44,
- [1][0][RTW89_FCC][2] = 82,
- [1][0][RTW89_ETSI][2] = 48,
+ [1][0][RTW89_FCC][2] = 68,
+ [1][0][RTW89_ETSI][2] = 46,
[1][0][RTW89_MKK][2] = 48,
[1][0][RTW89_IC][2] = 82,
[1][0][RTW89_ACMA][2] = 44,
- [1][0][RTW89_FCC][3] = 84,
- [1][0][RTW89_ETSI][3] = 48,
+ [1][0][RTW89_FCC][3] = 70,
+ [1][0][RTW89_ETSI][3] = 46,
[1][0][RTW89_MKK][3] = 48,
[1][0][RTW89_IC][3] = 84,
[1][0][RTW89_ACMA][3] = 44,
- [1][0][RTW89_FCC][4] = 84,
- [1][0][RTW89_ETSI][4] = 48,
+ [1][0][RTW89_FCC][4] = 70,
+ [1][0][RTW89_ETSI][4] = 46,
[1][0][RTW89_MKK][4] = 48,
[1][0][RTW89_IC][4] = 84,
[1][0][RTW89_ACMA][4] = 44,
- [1][0][RTW89_FCC][5] = 84,
- [1][0][RTW89_ETSI][5] = 48,
+ [1][0][RTW89_FCC][5] = 76,
+ [1][0][RTW89_ETSI][5] = 46,
[1][0][RTW89_MKK][5] = 48,
[1][0][RTW89_IC][5] = 84,
[1][0][RTW89_ACMA][5] = 44,
- [1][0][RTW89_FCC][6] = 78,
- [1][0][RTW89_ETSI][6] = 46,
+ [1][0][RTW89_FCC][6] = 64,
+ [1][0][RTW89_ETSI][6] = 44,
[1][0][RTW89_MKK][6] = 48,
[1][0][RTW89_IC][6] = 78,
[1][0][RTW89_ACMA][6] = 44,
- [1][0][RTW89_FCC][7] = 78,
- [1][0][RTW89_ETSI][7] = 48,
+ [1][0][RTW89_FCC][7] = 64,
+ [1][0][RTW89_ETSI][7] = 46,
[1][0][RTW89_MKK][7] = 48,
[1][0][RTW89_IC][7] = 78,
[1][0][RTW89_ACMA][7] = 44,
- [1][0][RTW89_FCC][8] = 78,
- [1][0][RTW89_ETSI][8] = 48,
+ [1][0][RTW89_FCC][8] = 64,
+ [1][0][RTW89_ETSI][8] = 46,
[1][0][RTW89_MKK][8] = 48,
[1][0][RTW89_IC][8] = 78,
[1][0][RTW89_ACMA][8] = 44,
- [1][0][RTW89_FCC][9] = 74,
- [1][0][RTW89_ETSI][9] = 48,
+ [1][0][RTW89_FCC][9] = 60,
+ [1][0][RTW89_ETSI][9] = 46,
[1][0][RTW89_MKK][9] = 48,
[1][0][RTW89_IC][9] = 74,
[1][0][RTW89_ACMA][9] = 44,
- [1][0][RTW89_FCC][10] = 74,
- [1][0][RTW89_ETSI][10] = 48,
+ [1][0][RTW89_FCC][10] = 60,
+ [1][0][RTW89_ETSI][10] = 46,
[1][0][RTW89_MKK][10] = 48,
[1][0][RTW89_IC][10] = 74,
[1][0][RTW89_ACMA][10] = 44,
- [1][0][RTW89_FCC][11] = 72,
- [1][0][RTW89_ETSI][11] = 48,
+ [1][0][RTW89_FCC][11] = 42,
+ [1][0][RTW89_ETSI][11] = 46,
[1][0][RTW89_MKK][11] = 48,
[1][0][RTW89_IC][11] = 72,
[1][0][RTW89_ACMA][11] = 44,
- [1][0][RTW89_FCC][12] = 38,
- [1][0][RTW89_ETSI][12] = 48,
- [1][0][RTW89_MKK][12] = 48,
+ [1][0][RTW89_FCC][12] = 30,
+ [1][0][RTW89_ETSI][12] = 46,
+ [1][0][RTW89_MKK][12] = 46,
[1][0][RTW89_IC][12] = 38,
[1][0][RTW89_ACMA][12] = 42,
[1][0][RTW89_FCC][13] = 127,
@@ -17409,69 +17409,69 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_MKK][13] = 127,
[1][0][RTW89_IC][13] = 127,
[1][0][RTW89_ACMA][13] = 127,
- [1][1][RTW89_FCC][0] = 66,
- [1][1][RTW89_ETSI][0] = 34,
- [1][1][RTW89_MKK][0] = 36,
+ [1][1][RTW89_FCC][0] = 46,
+ [1][1][RTW89_ETSI][0] = 32,
+ [1][1][RTW89_MKK][0] = 34,
[1][1][RTW89_IC][0] = 66,
[1][1][RTW89_ACMA][0] = 32,
- [1][1][RTW89_FCC][1] = 66,
- [1][1][RTW89_ETSI][1] = 36,
- [1][1][RTW89_MKK][1] = 36,
+ [1][1][RTW89_FCC][1] = 46,
+ [1][1][RTW89_ETSI][1] = 34,
+ [1][1][RTW89_MKK][1] = 34,
[1][1][RTW89_IC][1] = 66,
[1][1][RTW89_ACMA][1] = 32,
- [1][1][RTW89_FCC][2] = 70,
- [1][1][RTW89_ETSI][2] = 36,
- [1][1][RTW89_MKK][2] = 36,
+ [1][1][RTW89_FCC][2] = 50,
+ [1][1][RTW89_ETSI][2] = 34,
+ [1][1][RTW89_MKK][2] = 34,
[1][1][RTW89_IC][2] = 70,
[1][1][RTW89_ACMA][2] = 32,
- [1][1][RTW89_FCC][3] = 74,
- [1][1][RTW89_ETSI][3] = 36,
- [1][1][RTW89_MKK][3] = 36,
+ [1][1][RTW89_FCC][3] = 54,
+ [1][1][RTW89_ETSI][3] = 34,
+ [1][1][RTW89_MKK][3] = 34,
[1][1][RTW89_IC][3] = 74,
[1][1][RTW89_ACMA][3] = 32,
- [1][1][RTW89_FCC][4] = 74,
- [1][1][RTW89_ETSI][4] = 36,
- [1][1][RTW89_MKK][4] = 36,
+ [1][1][RTW89_FCC][4] = 58,
+ [1][1][RTW89_ETSI][4] = 34,
+ [1][1][RTW89_MKK][4] = 34,
[1][1][RTW89_IC][4] = 74,
[1][1][RTW89_ACMA][4] = 32,
- [1][1][RTW89_FCC][5] = 74,
- [1][1][RTW89_ETSI][5] = 36,
- [1][1][RTW89_MKK][5] = 36,
+ [1][1][RTW89_FCC][5] = 66,
+ [1][1][RTW89_ETSI][5] = 34,
+ [1][1][RTW89_MKK][5] = 34,
[1][1][RTW89_IC][5] = 74,
[1][1][RTW89_ACMA][5] = 32,
- [1][1][RTW89_FCC][6] = 74,
- [1][1][RTW89_ETSI][6] = 36,
- [1][1][RTW89_MKK][6] = 36,
+ [1][1][RTW89_FCC][6] = 58,
+ [1][1][RTW89_ETSI][6] = 34,
+ [1][1][RTW89_MKK][6] = 34,
[1][1][RTW89_IC][6] = 74,
[1][1][RTW89_ACMA][6] = 32,
- [1][1][RTW89_FCC][7] = 74,
- [1][1][RTW89_ETSI][7] = 36,
- [1][1][RTW89_MKK][7] = 36,
+ [1][1][RTW89_FCC][7] = 54,
+ [1][1][RTW89_ETSI][7] = 34,
+ [1][1][RTW89_MKK][7] = 34,
[1][1][RTW89_IC][7] = 74,
[1][1][RTW89_ACMA][7] = 32,
- [1][1][RTW89_FCC][8] = 70,
- [1][1][RTW89_ETSI][8] = 36,
- [1][1][RTW89_MKK][8] = 36,
+ [1][1][RTW89_FCC][8] = 50,
+ [1][1][RTW89_ETSI][8] = 34,
+ [1][1][RTW89_MKK][8] = 34,
[1][1][RTW89_IC][8] = 70,
[1][1][RTW89_ACMA][8] = 32,
- [1][1][RTW89_FCC][9] = 66,
- [1][1][RTW89_ETSI][9] = 36,
- [1][1][RTW89_MKK][9] = 36,
+ [1][1][RTW89_FCC][9] = 46,
+ [1][1][RTW89_ETSI][9] = 34,
+ [1][1][RTW89_MKK][9] = 34,
[1][1][RTW89_IC][9] = 66,
[1][1][RTW89_ACMA][9] = 32,
- [1][1][RTW89_FCC][10] = 66,
- [1][1][RTW89_ETSI][10] = 36,
- [1][1][RTW89_MKK][10] = 36,
+ [1][1][RTW89_FCC][10] = 46,
+ [1][1][RTW89_ETSI][10] = 34,
+ [1][1][RTW89_MKK][10] = 34,
[1][1][RTW89_IC][10] = 66,
[1][1][RTW89_ACMA][10] = 32,
- [1][1][RTW89_FCC][11] = 48,
- [1][1][RTW89_ETSI][11] = 36,
- [1][1][RTW89_MKK][11] = 36,
+ [1][1][RTW89_FCC][11] = 30,
+ [1][1][RTW89_ETSI][11] = 34,
+ [1][1][RTW89_MKK][11] = 34,
[1][1][RTW89_IC][11] = 48,
[1][1][RTW89_ACMA][11] = 32,
- [1][1][RTW89_FCC][12] = 32,
- [1][1][RTW89_ETSI][12] = 36,
- [1][1][RTW89_MKK][12] = 36,
+ [1][1][RTW89_FCC][12] = 24,
+ [1][1][RTW89_ETSI][12] = 34,
+ [1][1][RTW89_MKK][12] = 34,
[1][1][RTW89_IC][12] = 32,
[1][1][RTW89_ACMA][12] = 32,
[1][1][RTW89_FCC][13] = 127,
@@ -17479,69 +17479,69 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_MKK][13] = 127,
[1][1][RTW89_IC][13] = 127,
[1][1][RTW89_ACMA][13] = 127,
- [2][0][RTW89_FCC][0] = 78,
- [2][0][RTW89_ETSI][0] = 60,
- [2][0][RTW89_MKK][0] = 60,
+ [2][0][RTW89_FCC][0] = 64,
+ [2][0][RTW89_ETSI][0] = 58,
+ [2][0][RTW89_MKK][0] = 58,
[2][0][RTW89_IC][0] = 78,
[2][0][RTW89_ACMA][0] = 56,
- [2][0][RTW89_FCC][1] = 78,
- [2][0][RTW89_ETSI][1] = 60,
- [2][0][RTW89_MKK][1] = 60,
+ [2][0][RTW89_FCC][1] = 64,
+ [2][0][RTW89_ETSI][1] = 58,
+ [2][0][RTW89_MKK][1] = 58,
[2][0][RTW89_IC][1] = 78,
[2][0][RTW89_ACMA][1] = 56,
- [2][0][RTW89_FCC][2] = 80,
- [2][0][RTW89_ETSI][2] = 60,
- [2][0][RTW89_MKK][2] = 60,
+ [2][0][RTW89_FCC][2] = 66,
+ [2][0][RTW89_ETSI][2] = 58,
+ [2][0][RTW89_MKK][2] = 58,
[2][0][RTW89_IC][2] = 80,
[2][0][RTW89_ACMA][2] = 56,
- [2][0][RTW89_FCC][3] = 80,
- [2][0][RTW89_ETSI][3] = 60,
- [2][0][RTW89_MKK][3] = 60,
+ [2][0][RTW89_FCC][3] = 66,
+ [2][0][RTW89_ETSI][3] = 58,
+ [2][0][RTW89_MKK][3] = 58,
[2][0][RTW89_IC][3] = 80,
[2][0][RTW89_ACMA][3] = 56,
- [2][0][RTW89_FCC][4] = 80,
- [2][0][RTW89_ETSI][4] = 60,
- [2][0][RTW89_MKK][4] = 60,
+ [2][0][RTW89_FCC][4] = 66,
+ [2][0][RTW89_ETSI][4] = 58,
+ [2][0][RTW89_MKK][4] = 58,
[2][0][RTW89_IC][4] = 80,
[2][0][RTW89_ACMA][4] = 56,
- [2][0][RTW89_FCC][5] = 84,
- [2][0][RTW89_ETSI][5] = 60,
- [2][0][RTW89_MKK][5] = 60,
+ [2][0][RTW89_FCC][5] = 76,
+ [2][0][RTW89_ETSI][5] = 58,
+ [2][0][RTW89_MKK][5] = 58,
[2][0][RTW89_IC][5] = 84,
[2][0][RTW89_ACMA][5] = 56,
- [2][0][RTW89_FCC][6] = 76,
- [2][0][RTW89_ETSI][6] = 58,
- [2][0][RTW89_MKK][6] = 60,
+ [2][0][RTW89_FCC][6] = 62,
+ [2][0][RTW89_ETSI][6] = 56,
+ [2][0][RTW89_MKK][6] = 58,
[2][0][RTW89_IC][6] = 76,
[2][0][RTW89_ACMA][6] = 56,
- [2][0][RTW89_FCC][7] = 76,
- [2][0][RTW89_ETSI][7] = 60,
- [2][0][RTW89_MKK][7] = 60,
+ [2][0][RTW89_FCC][7] = 62,
+ [2][0][RTW89_ETSI][7] = 58,
+ [2][0][RTW89_MKK][7] = 58,
[2][0][RTW89_IC][7] = 76,
[2][0][RTW89_ACMA][7] = 56,
- [2][0][RTW89_FCC][8] = 76,
- [2][0][RTW89_ETSI][8] = 60,
- [2][0][RTW89_MKK][8] = 60,
+ [2][0][RTW89_FCC][8] = 62,
+ [2][0][RTW89_ETSI][8] = 58,
+ [2][0][RTW89_MKK][8] = 58,
[2][0][RTW89_IC][8] = 76,
[2][0][RTW89_ACMA][8] = 56,
- [2][0][RTW89_FCC][9] = 74,
- [2][0][RTW89_ETSI][9] = 60,
- [2][0][RTW89_MKK][9] = 60,
+ [2][0][RTW89_FCC][9] = 60,
+ [2][0][RTW89_ETSI][9] = 58,
+ [2][0][RTW89_MKK][9] = 58,
[2][0][RTW89_IC][9] = 74,
[2][0][RTW89_ACMA][9] = 56,
- [2][0][RTW89_FCC][10] = 74,
- [2][0][RTW89_ETSI][10] = 60,
- [2][0][RTW89_MKK][10] = 60,
+ [2][0][RTW89_FCC][10] = 60,
+ [2][0][RTW89_ETSI][10] = 58,
+ [2][0][RTW89_MKK][10] = 58,
[2][0][RTW89_IC][10] = 74,
[2][0][RTW89_ACMA][10] = 56,
- [2][0][RTW89_FCC][11] = 66,
- [2][0][RTW89_ETSI][11] = 60,
- [2][0][RTW89_MKK][11] = 60,
+ [2][0][RTW89_FCC][11] = 42,
+ [2][0][RTW89_ETSI][11] = 58,
+ [2][0][RTW89_MKK][11] = 58,
[2][0][RTW89_IC][11] = 66,
[2][0][RTW89_ACMA][11] = 56,
- [2][0][RTW89_FCC][12] = 56,
- [2][0][RTW89_ETSI][12] = 60,
- [2][0][RTW89_MKK][12] = 60,
+ [2][0][RTW89_FCC][12] = 38,
+ [2][0][RTW89_ETSI][12] = 58,
+ [2][0][RTW89_MKK][12] = 58,
[2][0][RTW89_IC][12] = 56,
[2][0][RTW89_ACMA][12] = 56,
[2][0][RTW89_FCC][13] = 127,
@@ -17549,69 +17549,69 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_MKK][13] = 127,
[2][0][RTW89_IC][13] = 127,
[2][0][RTW89_ACMA][13] = 127,
- [2][1][RTW89_FCC][0] = 70,
- [2][1][RTW89_ETSI][0] = 48,
- [2][1][RTW89_MKK][0] = 48,
+ [2][1][RTW89_FCC][0] = 46,
+ [2][1][RTW89_ETSI][0] = 46,
+ [2][1][RTW89_MKK][0] = 46,
[2][1][RTW89_IC][0] = 70,
[2][1][RTW89_ACMA][0] = 44,
- [2][1][RTW89_FCC][1] = 70,
- [2][1][RTW89_ETSI][1] = 48,
- [2][1][RTW89_MKK][1] = 48,
+ [2][1][RTW89_FCC][1] = 46,
+ [2][1][RTW89_ETSI][1] = 46,
+ [2][1][RTW89_MKK][1] = 46,
[2][1][RTW89_IC][1] = 70,
[2][1][RTW89_ACMA][1] = 44,
- [2][1][RTW89_FCC][2] = 74,
- [2][1][RTW89_ETSI][2] = 48,
- [2][1][RTW89_MKK][2] = 48,
+ [2][1][RTW89_FCC][2] = 50,
+ [2][1][RTW89_ETSI][2] = 46,
+ [2][1][RTW89_MKK][2] = 46,
[2][1][RTW89_IC][2] = 74,
[2][1][RTW89_ACMA][2] = 44,
- [2][1][RTW89_FCC][3] = 78,
- [2][1][RTW89_ETSI][3] = 48,
- [2][1][RTW89_MKK][3] = 48,
+ [2][1][RTW89_FCC][3] = 54,
+ [2][1][RTW89_ETSI][3] = 46,
+ [2][1][RTW89_MKK][3] = 46,
[2][1][RTW89_IC][3] = 78,
[2][1][RTW89_ACMA][3] = 44,
- [2][1][RTW89_FCC][4] = 80,
- [2][1][RTW89_ETSI][4] = 48,
- [2][1][RTW89_MKK][4] = 48,
+ [2][1][RTW89_FCC][4] = 56,
+ [2][1][RTW89_ETSI][4] = 46,
+ [2][1][RTW89_MKK][4] = 46,
[2][1][RTW89_IC][4] = 80,
[2][1][RTW89_ACMA][4] = 44,
- [2][1][RTW89_FCC][5] = 80,
- [2][1][RTW89_ETSI][5] = 48,
- [2][1][RTW89_MKK][5] = 48,
+ [2][1][RTW89_FCC][5] = 72,
+ [2][1][RTW89_ETSI][5] = 46,
+ [2][1][RTW89_MKK][5] = 46,
[2][1][RTW89_IC][5] = 80,
[2][1][RTW89_ACMA][5] = 44,
- [2][1][RTW89_FCC][6] = 78,
- [2][1][RTW89_ETSI][6] = 46,
- [2][1][RTW89_MKK][6] = 48,
+ [2][1][RTW89_FCC][6] = 54,
+ [2][1][RTW89_ETSI][6] = 44,
+ [2][1][RTW89_MKK][6] = 46,
[2][1][RTW89_IC][6] = 78,
[2][1][RTW89_ACMA][6] = 44,
- [2][1][RTW89_FCC][7] = 78,
- [2][1][RTW89_ETSI][7] = 48,
- [2][1][RTW89_MKK][7] = 48,
+ [2][1][RTW89_FCC][7] = 54,
+ [2][1][RTW89_ETSI][7] = 46,
+ [2][1][RTW89_MKK][7] = 46,
[2][1][RTW89_IC][7] = 78,
[2][1][RTW89_ACMA][7] = 44,
- [2][1][RTW89_FCC][8] = 74,
- [2][1][RTW89_ETSI][8] = 48,
- [2][1][RTW89_MKK][8] = 48,
+ [2][1][RTW89_FCC][8] = 50,
+ [2][1][RTW89_ETSI][8] = 46,
+ [2][1][RTW89_MKK][8] = 46,
[2][1][RTW89_IC][8] = 74,
[2][1][RTW89_ACMA][8] = 44,
- [2][1][RTW89_FCC][9] = 70,
- [2][1][RTW89_ETSI][9] = 48,
- [2][1][RTW89_MKK][9] = 48,
+ [2][1][RTW89_FCC][9] = 46,
+ [2][1][RTW89_ETSI][9] = 46,
+ [2][1][RTW89_MKK][9] = 46,
[2][1][RTW89_IC][9] = 70,
[2][1][RTW89_ACMA][9] = 44,
- [2][1][RTW89_FCC][10] = 70,
- [2][1][RTW89_ETSI][10] = 48,
- [2][1][RTW89_MKK][10] = 48,
+ [2][1][RTW89_FCC][10] = 46,
+ [2][1][RTW89_ETSI][10] = 46,
+ [2][1][RTW89_MKK][10] = 46,
[2][1][RTW89_IC][10] = 70,
[2][1][RTW89_ACMA][10] = 44,
- [2][1][RTW89_FCC][11] = 60,
- [2][1][RTW89_ETSI][11] = 48,
- [2][1][RTW89_MKK][11] = 48,
+ [2][1][RTW89_FCC][11] = 30,
+ [2][1][RTW89_ETSI][11] = 46,
+ [2][1][RTW89_MKK][11] = 46,
[2][1][RTW89_IC][11] = 60,
[2][1][RTW89_ACMA][11] = 44,
- [2][1][RTW89_FCC][12] = 44,
- [2][1][RTW89_ETSI][12] = 46,
- [2][1][RTW89_MKK][12] = 48,
+ [2][1][RTW89_FCC][12] = 26,
+ [2][1][RTW89_ETSI][12] = 44,
+ [2][1][RTW89_MKK][12] = 46,
[2][1][RTW89_IC][12] = 44,
[2][1][RTW89_ACMA][12] = 42,
[2][1][RTW89_FCC][13] = 127,
@@ -17625,10 +17625,10 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[RTW89_REGD_NUM][RTW89_5G_CH_NUM] = {
[0][0][RTW89_WW][0] = 24,
[0][0][RTW89_WW][2] = 24,
- [0][0][RTW89_WW][4] = 24,
- [0][0][RTW89_WW][6] = 24,
- [0][0][RTW89_WW][8] = 24,
- [0][0][RTW89_WW][10] = 24,
+ [0][0][RTW89_WW][4] = 22,
+ [0][0][RTW89_WW][6] = 22,
+ [0][0][RTW89_WW][8] = 18,
+ [0][0][RTW89_WW][10] = 18,
[0][0][RTW89_WW][12] = 24,
[0][0][RTW89_WW][14] = 24,
[0][0][RTW89_WW][15] = 24,
@@ -17636,21 +17636,21 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_WW][19] = 24,
[0][0][RTW89_WW][21] = 24,
[0][0][RTW89_WW][23] = 24,
- [0][0][RTW89_WW][25] = 32,
- [0][0][RTW89_WW][27] = 32,
- [0][0][RTW89_WW][29] = 32,
+ [0][0][RTW89_WW][25] = 30,
+ [0][0][RTW89_WW][27] = 30,
+ [0][0][RTW89_WW][29] = 30,
[0][0][RTW89_WW][31] = 24,
[0][0][RTW89_WW][33] = 24,
[0][0][RTW89_WW][35] = 24,
[0][0][RTW89_WW][37] = 44,
- [0][0][RTW89_WW][38] = 30,
- [0][0][RTW89_WW][40] = 30,
- [0][0][RTW89_WW][42] = 30,
- [0][0][RTW89_WW][44] = 30,
- [0][0][RTW89_WW][46] = 30,
- [0][0][RTW89_WW][48] = 32,
- [0][0][RTW89_WW][50] = 32,
- [0][0][RTW89_WW][52] = 32,
+ [0][0][RTW89_WW][38] = 28,
+ [0][0][RTW89_WW][40] = 28,
+ [0][0][RTW89_WW][42] = 28,
+ [0][0][RTW89_WW][44] = 28,
+ [0][0][RTW89_WW][46] = 28,
+ [0][0][RTW89_WW][48] = 24,
+ [0][0][RTW89_WW][50] = 24,
+ [0][0][RTW89_WW][52] = 24,
[0][1][RTW89_WW][0] = 0,
[0][1][RTW89_WW][2] = 4,
[0][1][RTW89_WW][4] = 0,
@@ -17664,21 +17664,21 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_WW][19] = 12,
[0][1][RTW89_WW][21] = 12,
[0][1][RTW89_WW][23] = 12,
- [0][1][RTW89_WW][25] = 20,
- [0][1][RTW89_WW][27] = 18,
- [0][1][RTW89_WW][29] = 18,
+ [0][1][RTW89_WW][25] = 18,
+ [0][1][RTW89_WW][27] = 16,
+ [0][1][RTW89_WW][29] = 16,
[0][1][RTW89_WW][31] = 12,
[0][1][RTW89_WW][33] = 12,
[0][1][RTW89_WW][35] = 12,
- [0][1][RTW89_WW][37] = 34,
- [0][1][RTW89_WW][38] = 18,
- [0][1][RTW89_WW][40] = 18,
- [0][1][RTW89_WW][42] = 18,
- [0][1][RTW89_WW][44] = 18,
- [0][1][RTW89_WW][46] = 18,
- [0][1][RTW89_WW][48] = 20,
- [0][1][RTW89_WW][50] = 20,
- [0][1][RTW89_WW][52] = 20,
+ [0][1][RTW89_WW][37] = 30,
+ [0][1][RTW89_WW][38] = 16,
+ [0][1][RTW89_WW][40] = 16,
+ [0][1][RTW89_WW][42] = 16,
+ [0][1][RTW89_WW][44] = 16,
+ [0][1][RTW89_WW][46] = 16,
+ [0][1][RTW89_WW][48] = 12,
+ [0][1][RTW89_WW][50] = 12,
+ [0][1][RTW89_WW][52] = 12,
[1][0][RTW89_WW][0] = 34,
[1][0][RTW89_WW][2] = 34,
[1][0][RTW89_WW][4] = 34,
@@ -17692,21 +17692,21 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_WW][19] = 34,
[1][0][RTW89_WW][21] = 34,
[1][0][RTW89_WW][23] = 34,
- [1][0][RTW89_WW][25] = 42,
- [1][0][RTW89_WW][27] = 44,
- [1][0][RTW89_WW][29] = 44,
+ [1][0][RTW89_WW][25] = 40,
+ [1][0][RTW89_WW][27] = 42,
+ [1][0][RTW89_WW][29] = 42,
[1][0][RTW89_WW][31] = 34,
[1][0][RTW89_WW][33] = 34,
[1][0][RTW89_WW][35] = 34,
- [1][0][RTW89_WW][37] = 52,
- [1][0][RTW89_WW][38] = 30,
- [1][0][RTW89_WW][40] = 30,
- [1][0][RTW89_WW][42] = 30,
- [1][0][RTW89_WW][44] = 30,
- [1][0][RTW89_WW][46] = 30,
- [1][0][RTW89_WW][48] = 44,
- [1][0][RTW89_WW][50] = 44,
- [1][0][RTW89_WW][52] = 44,
+ [1][0][RTW89_WW][37] = 56,
+ [1][0][RTW89_WW][38] = 28,
+ [1][0][RTW89_WW][40] = 28,
+ [1][0][RTW89_WW][42] = 28,
+ [1][0][RTW89_WW][44] = 28,
+ [1][0][RTW89_WW][46] = 28,
+ [1][0][RTW89_WW][48] = 36,
+ [1][0][RTW89_WW][50] = 36,
+ [1][0][RTW89_WW][52] = 36,
[1][1][RTW89_WW][0] = 10,
[1][1][RTW89_WW][2] = 14,
[1][1][RTW89_WW][4] = 10,
@@ -17720,55 +17720,55 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_WW][19] = 22,
[1][1][RTW89_WW][21] = 22,
[1][1][RTW89_WW][23] = 22,
- [1][1][RTW89_WW][25] = 30,
- [1][1][RTW89_WW][27] = 32,
- [1][1][RTW89_WW][29] = 32,
+ [1][1][RTW89_WW][25] = 28,
+ [1][1][RTW89_WW][27] = 30,
+ [1][1][RTW89_WW][29] = 30,
[1][1][RTW89_WW][31] = 22,
[1][1][RTW89_WW][33] = 22,
[1][1][RTW89_WW][35] = 22,
- [1][1][RTW89_WW][37] = 42,
- [1][1][RTW89_WW][38] = 18,
- [1][1][RTW89_WW][40] = 18,
- [1][1][RTW89_WW][42] = 18,
- [1][1][RTW89_WW][44] = 18,
- [1][1][RTW89_WW][46] = 18,
- [1][1][RTW89_WW][48] = 32,
- [1][1][RTW89_WW][50] = 32,
- [1][1][RTW89_WW][52] = 32,
+ [1][1][RTW89_WW][37] = 40,
+ [1][1][RTW89_WW][38] = 16,
+ [1][1][RTW89_WW][40] = 16,
+ [1][1][RTW89_WW][42] = 16,
+ [1][1][RTW89_WW][44] = 16,
+ [1][1][RTW89_WW][46] = 16,
+ [1][1][RTW89_WW][48] = 24,
+ [1][1][RTW89_WW][50] = 24,
+ [1][1][RTW89_WW][52] = 24,
[2][0][RTW89_WW][0] = 46,
[2][0][RTW89_WW][2] = 46,
[2][0][RTW89_WW][4] = 46,
[2][0][RTW89_WW][6] = 46,
- [2][0][RTW89_WW][8] = 48,
- [2][0][RTW89_WW][10] = 48,
- [2][0][RTW89_WW][12] = 46,
- [2][0][RTW89_WW][14] = 46,
+ [2][0][RTW89_WW][8] = 44,
+ [2][0][RTW89_WW][10] = 44,
+ [2][0][RTW89_WW][12] = 48,
+ [2][0][RTW89_WW][14] = 48,
[2][0][RTW89_WW][15] = 48,
[2][0][RTW89_WW][17] = 48,
[2][0][RTW89_WW][19] = 48,
[2][0][RTW89_WW][21] = 48,
[2][0][RTW89_WW][23] = 48,
- [2][0][RTW89_WW][25] = 54,
- [2][0][RTW89_WW][27] = 54,
- [2][0][RTW89_WW][29] = 54,
+ [2][0][RTW89_WW][25] = 52,
+ [2][0][RTW89_WW][27] = 52,
+ [2][0][RTW89_WW][29] = 52,
[2][0][RTW89_WW][31] = 48,
[2][0][RTW89_WW][33] = 48,
[2][0][RTW89_WW][35] = 48,
- [2][0][RTW89_WW][37] = 66,
- [2][0][RTW89_WW][38] = 30,
- [2][0][RTW89_WW][40] = 30,
- [2][0][RTW89_WW][42] = 30,
- [2][0][RTW89_WW][44] = 30,
- [2][0][RTW89_WW][46] = 30,
- [2][0][RTW89_WW][48] = 56,
- [2][0][RTW89_WW][50] = 56,
- [2][0][RTW89_WW][52] = 56,
+ [2][0][RTW89_WW][37] = 62,
+ [2][0][RTW89_WW][38] = 28,
+ [2][0][RTW89_WW][40] = 28,
+ [2][0][RTW89_WW][42] = 28,
+ [2][0][RTW89_WW][44] = 28,
+ [2][0][RTW89_WW][46] = 28,
+ [2][0][RTW89_WW][48] = 48,
+ [2][0][RTW89_WW][50] = 48,
+ [2][0][RTW89_WW][52] = 48,
[2][1][RTW89_WW][0] = 20,
[2][1][RTW89_WW][2] = 18,
[2][1][RTW89_WW][4] = 22,
[2][1][RTW89_WW][6] = 22,
- [2][1][RTW89_WW][8] = 34,
- [2][1][RTW89_WW][10] = 34,
+ [2][1][RTW89_WW][8] = 32,
+ [2][1][RTW89_WW][10] = 32,
[2][1][RTW89_WW][12] = 36,
[2][1][RTW89_WW][14] = 36,
[2][1][RTW89_WW][15] = 36,
@@ -17776,857 +17776,857 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_WW][19] = 36,
[2][1][RTW89_WW][21] = 36,
[2][1][RTW89_WW][23] = 36,
- [2][1][RTW89_WW][25] = 42,
- [2][1][RTW89_WW][27] = 42,
- [2][1][RTW89_WW][29] = 42,
+ [2][1][RTW89_WW][25] = 40,
+ [2][1][RTW89_WW][27] = 40,
+ [2][1][RTW89_WW][29] = 40,
[2][1][RTW89_WW][31] = 36,
[2][1][RTW89_WW][33] = 36,
[2][1][RTW89_WW][35] = 36,
- [2][1][RTW89_WW][37] = 50,
- [2][1][RTW89_WW][38] = 18,
- [2][1][RTW89_WW][40] = 18,
- [2][1][RTW89_WW][42] = 18,
- [2][1][RTW89_WW][44] = 18,
- [2][1][RTW89_WW][46] = 18,
- [2][1][RTW89_WW][48] = 44,
- [2][1][RTW89_WW][50] = 44,
- [2][1][RTW89_WW][52] = 44,
- [0][0][RTW89_FCC][0] = 52,
- [0][0][RTW89_ETSI][0] = 32,
- [0][0][RTW89_MKK][0] = 26,
+ [2][1][RTW89_WW][37] = 42,
+ [2][1][RTW89_WW][38] = 16,
+ [2][1][RTW89_WW][40] = 16,
+ [2][1][RTW89_WW][42] = 16,
+ [2][1][RTW89_WW][44] = 16,
+ [2][1][RTW89_WW][46] = 16,
+ [2][1][RTW89_WW][48] = 36,
+ [2][1][RTW89_WW][50] = 36,
+ [2][1][RTW89_WW][52] = 36,
+ [0][0][RTW89_FCC][0] = 44,
+ [0][0][RTW89_ETSI][0] = 30,
+ [0][0][RTW89_MKK][0] = 36,
[0][0][RTW89_IC][0] = 24,
[0][0][RTW89_ACMA][0] = 24,
- [0][0][RTW89_FCC][2] = 52,
- [0][0][RTW89_ETSI][2] = 32,
- [0][0][RTW89_MKK][2] = 26,
+ [0][0][RTW89_FCC][2] = 44,
+ [0][0][RTW89_ETSI][2] = 30,
+ [0][0][RTW89_MKK][2] = 36,
[0][0][RTW89_IC][2] = 24,
[0][0][RTW89_ACMA][2] = 24,
- [0][0][RTW89_FCC][4] = 52,
- [0][0][RTW89_ETSI][4] = 32,
- [0][0][RTW89_MKK][4] = 26,
+ [0][0][RTW89_FCC][4] = 44,
+ [0][0][RTW89_ETSI][4] = 30,
+ [0][0][RTW89_MKK][4] = 22,
[0][0][RTW89_IC][4] = 24,
[0][0][RTW89_ACMA][4] = 24,
- [0][0][RTW89_FCC][6] = 52,
- [0][0][RTW89_ETSI][6] = 32,
- [0][0][RTW89_MKK][6] = 26,
+ [0][0][RTW89_FCC][6] = 44,
+ [0][0][RTW89_ETSI][6] = 30,
+ [0][0][RTW89_MKK][6] = 22,
[0][0][RTW89_IC][6] = 24,
[0][0][RTW89_ACMA][6] = 24,
- [0][0][RTW89_FCC][8] = 52,
- [0][0][RTW89_ETSI][8] = 30,
- [0][0][RTW89_MKK][8] = 26,
+ [0][0][RTW89_FCC][8] = 44,
+ [0][0][RTW89_ETSI][8] = 28,
+ [0][0][RTW89_MKK][8] = 18,
[0][0][RTW89_IC][8] = 52,
[0][0][RTW89_ACMA][8] = 24,
- [0][0][RTW89_FCC][10] = 52,
- [0][0][RTW89_ETSI][10] = 30,
- [0][0][RTW89_MKK][10] = 26,
+ [0][0][RTW89_FCC][10] = 44,
+ [0][0][RTW89_ETSI][10] = 28,
+ [0][0][RTW89_MKK][10] = 18,
[0][0][RTW89_IC][10] = 52,
[0][0][RTW89_ACMA][10] = 24,
- [0][0][RTW89_FCC][12] = 52,
- [0][0][RTW89_ETSI][12] = 30,
- [0][0][RTW89_MKK][12] = 24,
+ [0][0][RTW89_FCC][12] = 44,
+ [0][0][RTW89_ETSI][12] = 28,
+ [0][0][RTW89_MKK][12] = 34,
[0][0][RTW89_IC][12] = 52,
[0][0][RTW89_ACMA][12] = 24,
- [0][0][RTW89_FCC][14] = 52,
- [0][0][RTW89_ETSI][14] = 30,
- [0][0][RTW89_MKK][14] = 24,
+ [0][0][RTW89_FCC][14] = 44,
+ [0][0][RTW89_ETSI][14] = 28,
+ [0][0][RTW89_MKK][14] = 34,
[0][0][RTW89_IC][14] = 52,
[0][0][RTW89_ACMA][14] = 24,
- [0][0][RTW89_FCC][15] = 52,
- [0][0][RTW89_ETSI][15] = 32,
- [0][0][RTW89_MKK][15] = 46,
+ [0][0][RTW89_FCC][15] = 44,
+ [0][0][RTW89_ETSI][15] = 30,
+ [0][0][RTW89_MKK][15] = 56,
[0][0][RTW89_IC][15] = 52,
[0][0][RTW89_ACMA][15] = 24,
- [0][0][RTW89_FCC][17] = 52,
- [0][0][RTW89_ETSI][17] = 32,
- [0][0][RTW89_MKK][17] = 48,
+ [0][0][RTW89_FCC][17] = 44,
+ [0][0][RTW89_ETSI][17] = 30,
+ [0][0][RTW89_MKK][17] = 58,
[0][0][RTW89_IC][17] = 52,
[0][0][RTW89_ACMA][17] = 24,
- [0][0][RTW89_FCC][19] = 52,
- [0][0][RTW89_ETSI][19] = 32,
- [0][0][RTW89_MKK][19] = 48,
+ [0][0][RTW89_FCC][19] = 44,
+ [0][0][RTW89_ETSI][19] = 30,
+ [0][0][RTW89_MKK][19] = 58,
[0][0][RTW89_IC][19] = 52,
[0][0][RTW89_ACMA][19] = 24,
- [0][0][RTW89_FCC][21] = 52,
- [0][0][RTW89_ETSI][21] = 32,
- [0][0][RTW89_MKK][21] = 48,
+ [0][0][RTW89_FCC][21] = 44,
+ [0][0][RTW89_ETSI][21] = 30,
+ [0][0][RTW89_MKK][21] = 58,
[0][0][RTW89_IC][21] = 52,
[0][0][RTW89_ACMA][21] = 24,
- [0][0][RTW89_FCC][23] = 52,
- [0][0][RTW89_ETSI][23] = 32,
- [0][0][RTW89_MKK][23] = 48,
+ [0][0][RTW89_FCC][23] = 44,
+ [0][0][RTW89_ETSI][23] = 30,
+ [0][0][RTW89_MKK][23] = 58,
[0][0][RTW89_IC][23] = 52,
[0][0][RTW89_ACMA][23] = 24,
- [0][0][RTW89_FCC][25] = 52,
- [0][0][RTW89_ETSI][25] = 32,
- [0][0][RTW89_MKK][25] = 48,
+ [0][0][RTW89_FCC][25] = 44,
+ [0][0][RTW89_ETSI][25] = 30,
+ [0][0][RTW89_MKK][25] = 58,
[0][0][RTW89_IC][25] = 127,
[0][0][RTW89_ACMA][25] = 127,
- [0][0][RTW89_FCC][27] = 52,
- [0][0][RTW89_ETSI][27] = 32,
- [0][0][RTW89_MKK][27] = 48,
+ [0][0][RTW89_FCC][27] = 44,
+ [0][0][RTW89_ETSI][27] = 30,
+ [0][0][RTW89_MKK][27] = 58,
[0][0][RTW89_IC][27] = 127,
[0][0][RTW89_ACMA][27] = 127,
- [0][0][RTW89_FCC][29] = 52,
- [0][0][RTW89_ETSI][29] = 32,
- [0][0][RTW89_MKK][29] = 48,
+ [0][0][RTW89_FCC][29] = 44,
+ [0][0][RTW89_ETSI][29] = 30,
+ [0][0][RTW89_MKK][29] = 58,
[0][0][RTW89_IC][29] = 127,
[0][0][RTW89_ACMA][29] = 127,
- [0][0][RTW89_FCC][31] = 52,
- [0][0][RTW89_ETSI][31] = 32,
- [0][0][RTW89_MKK][31] = 48,
+ [0][0][RTW89_FCC][31] = 44,
+ [0][0][RTW89_ETSI][31] = 30,
+ [0][0][RTW89_MKK][31] = 58,
[0][0][RTW89_IC][31] = 52,
[0][0][RTW89_ACMA][31] = 24,
- [0][0][RTW89_FCC][33] = 52,
- [0][0][RTW89_ETSI][33] = 32,
- [0][0][RTW89_MKK][33] = 48,
+ [0][0][RTW89_FCC][33] = 44,
+ [0][0][RTW89_ETSI][33] = 30,
+ [0][0][RTW89_MKK][33] = 58,
[0][0][RTW89_IC][33] = 52,
[0][0][RTW89_ACMA][33] = 24,
- [0][0][RTW89_FCC][35] = 52,
- [0][0][RTW89_ETSI][35] = 32,
- [0][0][RTW89_MKK][35] = 48,
+ [0][0][RTW89_FCC][35] = 44,
+ [0][0][RTW89_ETSI][35] = 30,
+ [0][0][RTW89_MKK][35] = 58,
[0][0][RTW89_IC][35] = 52,
[0][0][RTW89_ACMA][35] = 24,
- [0][0][RTW89_FCC][37] = 52,
+ [0][0][RTW89_FCC][37] = 44,
[0][0][RTW89_ETSI][37] = 127,
- [0][0][RTW89_MKK][37] = 44,
+ [0][0][RTW89_MKK][37] = 58,
[0][0][RTW89_IC][37] = 52,
[0][0][RTW89_ACMA][37] = 52,
- [0][0][RTW89_FCC][38] = 84,
- [0][0][RTW89_ETSI][38] = 30,
+ [0][0][RTW89_FCC][38] = 76,
+ [0][0][RTW89_ETSI][38] = 28,
[0][0][RTW89_MKK][38] = 127,
[0][0][RTW89_IC][38] = 84,
[0][0][RTW89_ACMA][38] = 84,
- [0][0][RTW89_FCC][40] = 84,
- [0][0][RTW89_ETSI][40] = 30,
+ [0][0][RTW89_FCC][40] = 76,
+ [0][0][RTW89_ETSI][40] = 28,
[0][0][RTW89_MKK][40] = 127,
[0][0][RTW89_IC][40] = 84,
[0][0][RTW89_ACMA][40] = 84,
- [0][0][RTW89_FCC][42] = 84,
- [0][0][RTW89_ETSI][42] = 30,
+ [0][0][RTW89_FCC][42] = 76,
+ [0][0][RTW89_ETSI][42] = 28,
[0][0][RTW89_MKK][42] = 127,
[0][0][RTW89_IC][42] = 84,
[0][0][RTW89_ACMA][42] = 84,
- [0][0][RTW89_FCC][44] = 84,
- [0][0][RTW89_ETSI][44] = 30,
+ [0][0][RTW89_FCC][44] = 76,
+ [0][0][RTW89_ETSI][44] = 28,
[0][0][RTW89_MKK][44] = 127,
[0][0][RTW89_IC][44] = 84,
[0][0][RTW89_ACMA][44] = 84,
- [0][0][RTW89_FCC][46] = 84,
- [0][0][RTW89_ETSI][46] = 30,
+ [0][0][RTW89_FCC][46] = 76,
+ [0][0][RTW89_ETSI][46] = 28,
[0][0][RTW89_MKK][46] = 127,
[0][0][RTW89_IC][46] = 84,
[0][0][RTW89_ACMA][46] = 84,
- [0][0][RTW89_FCC][48] = 32,
+ [0][0][RTW89_FCC][48] = 24,
[0][0][RTW89_ETSI][48] = 127,
[0][0][RTW89_MKK][48] = 127,
[0][0][RTW89_IC][48] = 127,
[0][0][RTW89_ACMA][48] = 127,
- [0][0][RTW89_FCC][50] = 32,
+ [0][0][RTW89_FCC][50] = 24,
[0][0][RTW89_ETSI][50] = 127,
[0][0][RTW89_MKK][50] = 127,
[0][0][RTW89_IC][50] = 127,
[0][0][RTW89_ACMA][50] = 127,
- [0][0][RTW89_FCC][52] = 32,
+ [0][0][RTW89_FCC][52] = 24,
[0][0][RTW89_ETSI][52] = 127,
[0][0][RTW89_MKK][52] = 127,
[0][0][RTW89_IC][52] = 127,
[0][0][RTW89_ACMA][52] = 127,
- [0][1][RTW89_FCC][0] = 34,
- [0][1][RTW89_ETSI][0] = 20,
- [0][1][RTW89_MKK][0] = 12,
+ [0][1][RTW89_FCC][0] = 26,
+ [0][1][RTW89_ETSI][0] = 18,
+ [0][1][RTW89_MKK][0] = 20,
[0][1][RTW89_IC][0] = 0,
[0][1][RTW89_ACMA][0] = 12,
- [0][1][RTW89_FCC][2] = 38,
- [0][1][RTW89_ETSI][2] = 20,
- [0][1][RTW89_MKK][2] = 12,
+ [0][1][RTW89_FCC][2] = 30,
+ [0][1][RTW89_ETSI][2] = 18,
+ [0][1][RTW89_MKK][2] = 20,
[0][1][RTW89_IC][2] = 4,
[0][1][RTW89_ACMA][2] = 12,
- [0][1][RTW89_FCC][4] = 34,
- [0][1][RTW89_ETSI][4] = 20,
- [0][1][RTW89_MKK][4] = 14,
+ [0][1][RTW89_FCC][4] = 26,
+ [0][1][RTW89_ETSI][4] = 18,
+ [0][1][RTW89_MKK][4] = 8,
[0][1][RTW89_IC][4] = 0,
[0][1][RTW89_ACMA][4] = 12,
- [0][1][RTW89_FCC][6] = 34,
- [0][1][RTW89_ETSI][6] = 20,
- [0][1][RTW89_MKK][6] = 14,
+ [0][1][RTW89_FCC][6] = 26,
+ [0][1][RTW89_ETSI][6] = 18,
+ [0][1][RTW89_MKK][6] = 8,
[0][1][RTW89_IC][6] = 0,
[0][1][RTW89_ACMA][6] = 12,
- [0][1][RTW89_FCC][8] = 34,
- [0][1][RTW89_ETSI][8] = 18,
- [0][1][RTW89_MKK][8] = 14,
+ [0][1][RTW89_FCC][8] = 26,
+ [0][1][RTW89_ETSI][8] = 16,
+ [0][1][RTW89_MKK][8] = 20,
[0][1][RTW89_IC][8] = 34,
[0][1][RTW89_ACMA][8] = 12,
- [0][1][RTW89_FCC][10] = 34,
- [0][1][RTW89_ETSI][10] = 18,
- [0][1][RTW89_MKK][10] = 14,
+ [0][1][RTW89_FCC][10] = 26,
+ [0][1][RTW89_ETSI][10] = 16,
+ [0][1][RTW89_MKK][10] = 20,
[0][1][RTW89_IC][10] = 34,
[0][1][RTW89_ACMA][10] = 12,
- [0][1][RTW89_FCC][12] = 38,
- [0][1][RTW89_ETSI][12] = 18,
- [0][1][RTW89_MKK][12] = 12,
+ [0][1][RTW89_FCC][12] = 30,
+ [0][1][RTW89_ETSI][12] = 16,
+ [0][1][RTW89_MKK][12] = 34,
[0][1][RTW89_IC][12] = 38,
[0][1][RTW89_ACMA][12] = 12,
- [0][1][RTW89_FCC][14] = 34,
- [0][1][RTW89_ETSI][14] = 18,
- [0][1][RTW89_MKK][14] = 12,
+ [0][1][RTW89_FCC][14] = 26,
+ [0][1][RTW89_ETSI][14] = 16,
+ [0][1][RTW89_MKK][14] = 34,
[0][1][RTW89_IC][14] = 34,
[0][1][RTW89_ACMA][14] = 12,
- [0][1][RTW89_FCC][15] = 34,
- [0][1][RTW89_ETSI][15] = 20,
- [0][1][RTW89_MKK][15] = 32,
+ [0][1][RTW89_FCC][15] = 26,
+ [0][1][RTW89_ETSI][15] = 18,
+ [0][1][RTW89_MKK][15] = 44,
[0][1][RTW89_IC][15] = 34,
[0][1][RTW89_ACMA][15] = 12,
- [0][1][RTW89_FCC][17] = 34,
- [0][1][RTW89_ETSI][17] = 20,
- [0][1][RTW89_MKK][17] = 34,
+ [0][1][RTW89_FCC][17] = 26,
+ [0][1][RTW89_ETSI][17] = 18,
+ [0][1][RTW89_MKK][17] = 44,
[0][1][RTW89_IC][17] = 34,
[0][1][RTW89_ACMA][17] = 12,
- [0][1][RTW89_FCC][19] = 38,
- [0][1][RTW89_ETSI][19] = 20,
- [0][1][RTW89_MKK][19] = 34,
+ [0][1][RTW89_FCC][19] = 30,
+ [0][1][RTW89_ETSI][19] = 18,
+ [0][1][RTW89_MKK][19] = 44,
[0][1][RTW89_IC][19] = 38,
[0][1][RTW89_ACMA][19] = 12,
- [0][1][RTW89_FCC][21] = 38,
- [0][1][RTW89_ETSI][21] = 20,
- [0][1][RTW89_MKK][21] = 34,
+ [0][1][RTW89_FCC][21] = 30,
+ [0][1][RTW89_ETSI][21] = 18,
+ [0][1][RTW89_MKK][21] = 44,
[0][1][RTW89_IC][21] = 38,
[0][1][RTW89_ACMA][21] = 12,
- [0][1][RTW89_FCC][23] = 38,
- [0][1][RTW89_ETSI][23] = 20,
- [0][1][RTW89_MKK][23] = 34,
+ [0][1][RTW89_FCC][23] = 30,
+ [0][1][RTW89_ETSI][23] = 18,
+ [0][1][RTW89_MKK][23] = 44,
[0][1][RTW89_IC][23] = 38,
[0][1][RTW89_ACMA][23] = 12,
- [0][1][RTW89_FCC][25] = 38,
- [0][1][RTW89_ETSI][25] = 20,
- [0][1][RTW89_MKK][25] = 34,
+ [0][1][RTW89_FCC][25] = 30,
+ [0][1][RTW89_ETSI][25] = 18,
+ [0][1][RTW89_MKK][25] = 44,
[0][1][RTW89_IC][25] = 127,
[0][1][RTW89_ACMA][25] = 127,
- [0][1][RTW89_FCC][27] = 38,
- [0][1][RTW89_ETSI][27] = 18,
- [0][1][RTW89_MKK][27] = 34,
+ [0][1][RTW89_FCC][27] = 30,
+ [0][1][RTW89_ETSI][27] = 16,
+ [0][1][RTW89_MKK][27] = 44,
[0][1][RTW89_IC][27] = 127,
[0][1][RTW89_ACMA][27] = 127,
- [0][1][RTW89_FCC][29] = 38,
- [0][1][RTW89_ETSI][29] = 18,
- [0][1][RTW89_MKK][29] = 34,
+ [0][1][RTW89_FCC][29] = 30,
+ [0][1][RTW89_ETSI][29] = 16,
+ [0][1][RTW89_MKK][29] = 44,
[0][1][RTW89_IC][29] = 127,
[0][1][RTW89_ACMA][29] = 127,
- [0][1][RTW89_FCC][31] = 38,
- [0][1][RTW89_ETSI][31] = 18,
- [0][1][RTW89_MKK][31] = 34,
+ [0][1][RTW89_FCC][31] = 30,
+ [0][1][RTW89_ETSI][31] = 16,
+ [0][1][RTW89_MKK][31] = 44,
[0][1][RTW89_IC][31] = 34,
[0][1][RTW89_ACMA][31] = 12,
- [0][1][RTW89_FCC][33] = 34,
- [0][1][RTW89_ETSI][33] = 18,
- [0][1][RTW89_MKK][33] = 34,
+ [0][1][RTW89_FCC][33] = 26,
+ [0][1][RTW89_ETSI][33] = 16,
+ [0][1][RTW89_MKK][33] = 44,
[0][1][RTW89_IC][33] = 34,
[0][1][RTW89_ACMA][33] = 12,
- [0][1][RTW89_FCC][35] = 34,
- [0][1][RTW89_ETSI][35] = 18,
- [0][1][RTW89_MKK][35] = 34,
+ [0][1][RTW89_FCC][35] = 26,
+ [0][1][RTW89_ETSI][35] = 16,
+ [0][1][RTW89_MKK][35] = 44,
[0][1][RTW89_IC][35] = 34,
[0][1][RTW89_ACMA][35] = 12,
- [0][1][RTW89_FCC][37] = 38,
+ [0][1][RTW89_FCC][37] = 30,
[0][1][RTW89_ETSI][37] = 127,
- [0][1][RTW89_MKK][37] = 34,
+ [0][1][RTW89_MKK][37] = 44,
[0][1][RTW89_IC][37] = 38,
[0][1][RTW89_ACMA][37] = 38,
- [0][1][RTW89_FCC][38] = 82,
- [0][1][RTW89_ETSI][38] = 18,
+ [0][1][RTW89_FCC][38] = 74,
+ [0][1][RTW89_ETSI][38] = 16,
[0][1][RTW89_MKK][38] = 127,
[0][1][RTW89_IC][38] = 82,
[0][1][RTW89_ACMA][38] = 84,
- [0][1][RTW89_FCC][40] = 82,
- [0][1][RTW89_ETSI][40] = 18,
+ [0][1][RTW89_FCC][40] = 74,
+ [0][1][RTW89_ETSI][40] = 16,
[0][1][RTW89_MKK][40] = 127,
[0][1][RTW89_IC][40] = 82,
[0][1][RTW89_ACMA][40] = 84,
- [0][1][RTW89_FCC][42] = 82,
- [0][1][RTW89_ETSI][42] = 18,
+ [0][1][RTW89_FCC][42] = 74,
+ [0][1][RTW89_ETSI][42] = 16,
[0][1][RTW89_MKK][42] = 127,
[0][1][RTW89_IC][42] = 82,
[0][1][RTW89_ACMA][42] = 84,
- [0][1][RTW89_FCC][44] = 82,
- [0][1][RTW89_ETSI][44] = 18,
+ [0][1][RTW89_FCC][44] = 74,
+ [0][1][RTW89_ETSI][44] = 16,
[0][1][RTW89_MKK][44] = 127,
[0][1][RTW89_IC][44] = 82,
[0][1][RTW89_ACMA][44] = 84,
- [0][1][RTW89_FCC][46] = 82,
- [0][1][RTW89_ETSI][46] = 18,
+ [0][1][RTW89_FCC][46] = 74,
+ [0][1][RTW89_ETSI][46] = 16,
[0][1][RTW89_MKK][46] = 127,
[0][1][RTW89_IC][46] = 82,
[0][1][RTW89_ACMA][46] = 84,
- [0][1][RTW89_FCC][48] = 20,
+ [0][1][RTW89_FCC][48] = 12,
[0][1][RTW89_ETSI][48] = 127,
[0][1][RTW89_MKK][48] = 127,
[0][1][RTW89_IC][48] = 127,
[0][1][RTW89_ACMA][48] = 127,
- [0][1][RTW89_FCC][50] = 20,
+ [0][1][RTW89_FCC][50] = 12,
[0][1][RTW89_ETSI][50] = 127,
[0][1][RTW89_MKK][50] = 127,
[0][1][RTW89_IC][50] = 127,
[0][1][RTW89_ACMA][50] = 127,
- [0][1][RTW89_FCC][52] = 20,
+ [0][1][RTW89_FCC][52] = 12,
[0][1][RTW89_ETSI][52] = 127,
[0][1][RTW89_MKK][52] = 127,
[0][1][RTW89_IC][52] = 127,
[0][1][RTW89_ACMA][52] = 127,
- [1][0][RTW89_FCC][0] = 62,
- [1][0][RTW89_ETSI][0] = 42,
- [1][0][RTW89_MKK][0] = 36,
+ [1][0][RTW89_FCC][0] = 54,
+ [1][0][RTW89_ETSI][0] = 40,
+ [1][0][RTW89_MKK][0] = 48,
[1][0][RTW89_IC][0] = 36,
[1][0][RTW89_ACMA][0] = 34,
- [1][0][RTW89_FCC][2] = 62,
- [1][0][RTW89_ETSI][2] = 42,
- [1][0][RTW89_MKK][2] = 36,
+ [1][0][RTW89_FCC][2] = 54,
+ [1][0][RTW89_ETSI][2] = 40,
+ [1][0][RTW89_MKK][2] = 48,
[1][0][RTW89_IC][2] = 36,
[1][0][RTW89_ACMA][2] = 34,
- [1][0][RTW89_FCC][4] = 62,
- [1][0][RTW89_ETSI][4] = 42,
- [1][0][RTW89_MKK][4] = 34,
+ [1][0][RTW89_FCC][4] = 54,
+ [1][0][RTW89_ETSI][4] = 40,
+ [1][0][RTW89_MKK][4] = 40,
[1][0][RTW89_IC][4] = 36,
[1][0][RTW89_ACMA][4] = 34,
- [1][0][RTW89_FCC][6] = 62,
- [1][0][RTW89_ETSI][6] = 42,
- [1][0][RTW89_MKK][6] = 34,
+ [1][0][RTW89_FCC][6] = 54,
+ [1][0][RTW89_ETSI][6] = 40,
+ [1][0][RTW89_MKK][6] = 40,
[1][0][RTW89_IC][6] = 36,
[1][0][RTW89_ACMA][6] = 34,
- [1][0][RTW89_FCC][8] = 62,
- [1][0][RTW89_ETSI][8] = 42,
- [1][0][RTW89_MKK][8] = 36,
+ [1][0][RTW89_FCC][8] = 54,
+ [1][0][RTW89_ETSI][8] = 40,
+ [1][0][RTW89_MKK][8] = 34,
[1][0][RTW89_IC][8] = 62,
[1][0][RTW89_ACMA][8] = 34,
- [1][0][RTW89_FCC][10] = 62,
- [1][0][RTW89_ETSI][10] = 42,
- [1][0][RTW89_MKK][10] = 36,
+ [1][0][RTW89_FCC][10] = 54,
+ [1][0][RTW89_ETSI][10] = 40,
+ [1][0][RTW89_MKK][10] = 34,
[1][0][RTW89_IC][10] = 62,
[1][0][RTW89_ACMA][10] = 34,
- [1][0][RTW89_FCC][12] = 64,
- [1][0][RTW89_ETSI][12] = 42,
- [1][0][RTW89_MKK][12] = 36,
+ [1][0][RTW89_FCC][12] = 56,
+ [1][0][RTW89_ETSI][12] = 40,
+ [1][0][RTW89_MKK][12] = 46,
[1][0][RTW89_IC][12] = 64,
[1][0][RTW89_ACMA][12] = 34,
- [1][0][RTW89_FCC][14] = 62,
- [1][0][RTW89_ETSI][14] = 42,
- [1][0][RTW89_MKK][14] = 36,
+ [1][0][RTW89_FCC][14] = 54,
+ [1][0][RTW89_ETSI][14] = 40,
+ [1][0][RTW89_MKK][14] = 46,
[1][0][RTW89_IC][14] = 62,
[1][0][RTW89_ACMA][14] = 34,
- [1][0][RTW89_FCC][15] = 62,
- [1][0][RTW89_ETSI][15] = 42,
- [1][0][RTW89_MKK][15] = 54,
+ [1][0][RTW89_FCC][15] = 54,
+ [1][0][RTW89_ETSI][15] = 40,
+ [1][0][RTW89_MKK][15] = 62,
[1][0][RTW89_IC][15] = 62,
[1][0][RTW89_ACMA][15] = 34,
- [1][0][RTW89_FCC][17] = 62,
- [1][0][RTW89_ETSI][17] = 42,
- [1][0][RTW89_MKK][17] = 58,
+ [1][0][RTW89_FCC][17] = 54,
+ [1][0][RTW89_ETSI][17] = 40,
+ [1][0][RTW89_MKK][17] = 68,
[1][0][RTW89_IC][17] = 62,
[1][0][RTW89_ACMA][17] = 34,
- [1][0][RTW89_FCC][19] = 62,
- [1][0][RTW89_ETSI][19] = 42,
- [1][0][RTW89_MKK][19] = 58,
+ [1][0][RTW89_FCC][19] = 54,
+ [1][0][RTW89_ETSI][19] = 40,
+ [1][0][RTW89_MKK][19] = 68,
[1][0][RTW89_IC][19] = 62,
[1][0][RTW89_ACMA][19] = 34,
- [1][0][RTW89_FCC][21] = 62,
- [1][0][RTW89_ETSI][21] = 42,
- [1][0][RTW89_MKK][21] = 58,
+ [1][0][RTW89_FCC][21] = 54,
+ [1][0][RTW89_ETSI][21] = 40,
+ [1][0][RTW89_MKK][21] = 68,
[1][0][RTW89_IC][21] = 62,
[1][0][RTW89_ACMA][21] = 34,
- [1][0][RTW89_FCC][23] = 62,
- [1][0][RTW89_ETSI][23] = 42,
- [1][0][RTW89_MKK][23] = 58,
+ [1][0][RTW89_FCC][23] = 54,
+ [1][0][RTW89_ETSI][23] = 40,
+ [1][0][RTW89_MKK][23] = 68,
[1][0][RTW89_IC][23] = 62,
[1][0][RTW89_ACMA][23] = 34,
- [1][0][RTW89_FCC][25] = 62,
- [1][0][RTW89_ETSI][25] = 42,
- [1][0][RTW89_MKK][25] = 58,
+ [1][0][RTW89_FCC][25] = 54,
+ [1][0][RTW89_ETSI][25] = 40,
+ [1][0][RTW89_MKK][25] = 68,
[1][0][RTW89_IC][25] = 127,
[1][0][RTW89_ACMA][25] = 127,
- [1][0][RTW89_FCC][27] = 62,
- [1][0][RTW89_ETSI][27] = 44,
- [1][0][RTW89_MKK][27] = 58,
+ [1][0][RTW89_FCC][27] = 54,
+ [1][0][RTW89_ETSI][27] = 42,
+ [1][0][RTW89_MKK][27] = 68,
[1][0][RTW89_IC][27] = 127,
[1][0][RTW89_ACMA][27] = 127,
- [1][0][RTW89_FCC][29] = 62,
- [1][0][RTW89_ETSI][29] = 44,
- [1][0][RTW89_MKK][29] = 58,
+ [1][0][RTW89_FCC][29] = 54,
+ [1][0][RTW89_ETSI][29] = 42,
+ [1][0][RTW89_MKK][29] = 68,
[1][0][RTW89_IC][29] = 127,
[1][0][RTW89_ACMA][29] = 127,
- [1][0][RTW89_FCC][31] = 62,
- [1][0][RTW89_ETSI][31] = 44,
- [1][0][RTW89_MKK][31] = 58,
+ [1][0][RTW89_FCC][31] = 54,
+ [1][0][RTW89_ETSI][31] = 42,
+ [1][0][RTW89_MKK][31] = 68,
[1][0][RTW89_IC][31] = 62,
[1][0][RTW89_ACMA][31] = 34,
- [1][0][RTW89_FCC][33] = 62,
- [1][0][RTW89_ETSI][33] = 44,
- [1][0][RTW89_MKK][33] = 58,
+ [1][0][RTW89_FCC][33] = 54,
+ [1][0][RTW89_ETSI][33] = 42,
+ [1][0][RTW89_MKK][33] = 68,
[1][0][RTW89_IC][33] = 62,
[1][0][RTW89_ACMA][33] = 34,
- [1][0][RTW89_FCC][35] = 62,
- [1][0][RTW89_ETSI][35] = 44,
- [1][0][RTW89_MKK][35] = 58,
+ [1][0][RTW89_FCC][35] = 54,
+ [1][0][RTW89_ETSI][35] = 42,
+ [1][0][RTW89_MKK][35] = 68,
[1][0][RTW89_IC][35] = 62,
[1][0][RTW89_ACMA][35] = 34,
- [1][0][RTW89_FCC][37] = 64,
+ [1][0][RTW89_FCC][37] = 56,
[1][0][RTW89_ETSI][37] = 127,
- [1][0][RTW89_MKK][37] = 52,
+ [1][0][RTW89_MKK][37] = 68,
[1][0][RTW89_IC][37] = 64,
[1][0][RTW89_ACMA][37] = 64,
- [1][0][RTW89_FCC][38] = 84,
- [1][0][RTW89_ETSI][38] = 30,
+ [1][0][RTW89_FCC][38] = 76,
+ [1][0][RTW89_ETSI][38] = 28,
[1][0][RTW89_MKK][38] = 127,
[1][0][RTW89_IC][38] = 84,
[1][0][RTW89_ACMA][38] = 84,
- [1][0][RTW89_FCC][40] = 84,
- [1][0][RTW89_ETSI][40] = 30,
+ [1][0][RTW89_FCC][40] = 76,
+ [1][0][RTW89_ETSI][40] = 28,
[1][0][RTW89_MKK][40] = 127,
[1][0][RTW89_IC][40] = 84,
[1][0][RTW89_ACMA][40] = 84,
- [1][0][RTW89_FCC][42] = 84,
- [1][0][RTW89_ETSI][42] = 30,
+ [1][0][RTW89_FCC][42] = 76,
+ [1][0][RTW89_ETSI][42] = 28,
[1][0][RTW89_MKK][42] = 127,
[1][0][RTW89_IC][42] = 84,
[1][0][RTW89_ACMA][42] = 84,
- [1][0][RTW89_FCC][44] = 84,
- [1][0][RTW89_ETSI][44] = 30,
+ [1][0][RTW89_FCC][44] = 76,
+ [1][0][RTW89_ETSI][44] = 28,
[1][0][RTW89_MKK][44] = 127,
[1][0][RTW89_IC][44] = 84,
[1][0][RTW89_ACMA][44] = 84,
- [1][0][RTW89_FCC][46] = 84,
- [1][0][RTW89_ETSI][46] = 30,
+ [1][0][RTW89_FCC][46] = 76,
+ [1][0][RTW89_ETSI][46] = 28,
[1][0][RTW89_MKK][46] = 127,
[1][0][RTW89_IC][46] = 84,
[1][0][RTW89_ACMA][46] = 84,
- [1][0][RTW89_FCC][48] = 44,
+ [1][0][RTW89_FCC][48] = 36,
[1][0][RTW89_ETSI][48] = 127,
[1][0][RTW89_MKK][48] = 127,
[1][0][RTW89_IC][48] = 127,
[1][0][RTW89_ACMA][48] = 127,
- [1][0][RTW89_FCC][50] = 44,
+ [1][0][RTW89_FCC][50] = 36,
[1][0][RTW89_ETSI][50] = 127,
[1][0][RTW89_MKK][50] = 127,
[1][0][RTW89_IC][50] = 127,
[1][0][RTW89_ACMA][50] = 127,
- [1][0][RTW89_FCC][52] = 44,
+ [1][0][RTW89_FCC][52] = 36,
[1][0][RTW89_ETSI][52] = 127,
[1][0][RTW89_MKK][52] = 127,
[1][0][RTW89_IC][52] = 127,
[1][0][RTW89_ACMA][52] = 127,
- [1][1][RTW89_FCC][0] = 42,
- [1][1][RTW89_ETSI][0] = 32,
- [1][1][RTW89_MKK][0] = 22,
+ [1][1][RTW89_FCC][0] = 34,
+ [1][1][RTW89_ETSI][0] = 30,
+ [1][1][RTW89_MKK][0] = 34,
[1][1][RTW89_IC][0] = 10,
[1][1][RTW89_ACMA][0] = 22,
- [1][1][RTW89_FCC][2] = 44,
- [1][1][RTW89_ETSI][2] = 32,
- [1][1][RTW89_MKK][2] = 22,
+ [1][1][RTW89_FCC][2] = 36,
+ [1][1][RTW89_ETSI][2] = 30,
+ [1][1][RTW89_MKK][2] = 34,
[1][1][RTW89_IC][2] = 14,
[1][1][RTW89_ACMA][2] = 22,
- [1][1][RTW89_FCC][4] = 42,
- [1][1][RTW89_ETSI][4] = 32,
- [1][1][RTW89_MKK][4] = 20,
+ [1][1][RTW89_FCC][4] = 34,
+ [1][1][RTW89_ETSI][4] = 30,
+ [1][1][RTW89_MKK][4] = 26,
[1][1][RTW89_IC][4] = 10,
[1][1][RTW89_ACMA][4] = 22,
- [1][1][RTW89_FCC][6] = 42,
- [1][1][RTW89_ETSI][6] = 32,
- [1][1][RTW89_MKK][6] = 20,
+ [1][1][RTW89_FCC][6] = 34,
+ [1][1][RTW89_ETSI][6] = 30,
+ [1][1][RTW89_MKK][6] = 26,
[1][1][RTW89_IC][6] = 10,
[1][1][RTW89_ACMA][6] = 22,
- [1][1][RTW89_FCC][8] = 44,
- [1][1][RTW89_ETSI][8] = 32,
+ [1][1][RTW89_FCC][8] = 36,
+ [1][1][RTW89_ETSI][8] = 30,
[1][1][RTW89_MKK][8] = 20,
[1][1][RTW89_IC][8] = 44,
[1][1][RTW89_ACMA][8] = 22,
- [1][1][RTW89_FCC][10] = 44,
- [1][1][RTW89_ETSI][10] = 32,
+ [1][1][RTW89_FCC][10] = 36,
+ [1][1][RTW89_ETSI][10] = 30,
[1][1][RTW89_MKK][10] = 20,
[1][1][RTW89_IC][10] = 44,
[1][1][RTW89_ACMA][10] = 22,
- [1][1][RTW89_FCC][12] = 46,
- [1][1][RTW89_ETSI][12] = 32,
- [1][1][RTW89_MKK][12] = 22,
+ [1][1][RTW89_FCC][12] = 38,
+ [1][1][RTW89_ETSI][12] = 30,
+ [1][1][RTW89_MKK][12] = 34,
[1][1][RTW89_IC][12] = 46,
[1][1][RTW89_ACMA][12] = 22,
- [1][1][RTW89_FCC][14] = 42,
- [1][1][RTW89_ETSI][14] = 32,
- [1][1][RTW89_MKK][14] = 22,
+ [1][1][RTW89_FCC][14] = 34,
+ [1][1][RTW89_ETSI][14] = 30,
+ [1][1][RTW89_MKK][14] = 34,
[1][1][RTW89_IC][14] = 40,
[1][1][RTW89_ACMA][14] = 22,
- [1][1][RTW89_FCC][15] = 42,
- [1][1][RTW89_ETSI][15] = 30,
- [1][1][RTW89_MKK][15] = 42,
+ [1][1][RTW89_FCC][15] = 34,
+ [1][1][RTW89_ETSI][15] = 28,
+ [1][1][RTW89_MKK][15] = 56,
[1][1][RTW89_IC][15] = 42,
[1][1][RTW89_ACMA][15] = 22,
- [1][1][RTW89_FCC][17] = 42,
- [1][1][RTW89_ETSI][17] = 30,
- [1][1][RTW89_MKK][17] = 44,
+ [1][1][RTW89_FCC][17] = 34,
+ [1][1][RTW89_ETSI][17] = 28,
+ [1][1][RTW89_MKK][17] = 58,
[1][1][RTW89_IC][17] = 42,
[1][1][RTW89_ACMA][17] = 22,
- [1][1][RTW89_FCC][19] = 42,
- [1][1][RTW89_ETSI][19] = 30,
- [1][1][RTW89_MKK][19] = 44,
+ [1][1][RTW89_FCC][19] = 34,
+ [1][1][RTW89_ETSI][19] = 28,
+ [1][1][RTW89_MKK][19] = 58,
[1][1][RTW89_IC][19] = 42,
[1][1][RTW89_ACMA][19] = 22,
- [1][1][RTW89_FCC][21] = 42,
- [1][1][RTW89_ETSI][21] = 30,
- [1][1][RTW89_MKK][21] = 44,
+ [1][1][RTW89_FCC][21] = 34,
+ [1][1][RTW89_ETSI][21] = 28,
+ [1][1][RTW89_MKK][21] = 58,
[1][1][RTW89_IC][21] = 42,
[1][1][RTW89_ACMA][21] = 22,
- [1][1][RTW89_FCC][23] = 42,
- [1][1][RTW89_ETSI][23] = 30,
- [1][1][RTW89_MKK][23] = 44,
+ [1][1][RTW89_FCC][23] = 34,
+ [1][1][RTW89_ETSI][23] = 28,
+ [1][1][RTW89_MKK][23] = 58,
[1][1][RTW89_IC][23] = 42,
[1][1][RTW89_ACMA][23] = 22,
- [1][1][RTW89_FCC][25] = 42,
- [1][1][RTW89_ETSI][25] = 30,
- [1][1][RTW89_MKK][25] = 44,
+ [1][1][RTW89_FCC][25] = 34,
+ [1][1][RTW89_ETSI][25] = 28,
+ [1][1][RTW89_MKK][25] = 58,
[1][1][RTW89_IC][25] = 127,
[1][1][RTW89_ACMA][25] = 127,
- [1][1][RTW89_FCC][27] = 42,
- [1][1][RTW89_ETSI][27] = 32,
- [1][1][RTW89_MKK][27] = 44,
+ [1][1][RTW89_FCC][27] = 34,
+ [1][1][RTW89_ETSI][27] = 30,
+ [1][1][RTW89_MKK][27] = 58,
[1][1][RTW89_IC][27] = 127,
[1][1][RTW89_ACMA][27] = 127,
- [1][1][RTW89_FCC][29] = 42,
- [1][1][RTW89_ETSI][29] = 32,
- [1][1][RTW89_MKK][29] = 44,
+ [1][1][RTW89_FCC][29] = 34,
+ [1][1][RTW89_ETSI][29] = 30,
+ [1][1][RTW89_MKK][29] = 58,
[1][1][RTW89_IC][29] = 127,
[1][1][RTW89_ACMA][29] = 127,
- [1][1][RTW89_FCC][31] = 42,
- [1][1][RTW89_ETSI][31] = 32,
- [1][1][RTW89_MKK][31] = 44,
+ [1][1][RTW89_FCC][31] = 34,
+ [1][1][RTW89_ETSI][31] = 30,
+ [1][1][RTW89_MKK][31] = 58,
[1][1][RTW89_IC][31] = 38,
[1][1][RTW89_ACMA][31] = 22,
- [1][1][RTW89_FCC][33] = 40,
- [1][1][RTW89_ETSI][33] = 32,
- [1][1][RTW89_MKK][33] = 44,
+ [1][1][RTW89_FCC][33] = 32,
+ [1][1][RTW89_ETSI][33] = 30,
+ [1][1][RTW89_MKK][33] = 58,
[1][1][RTW89_IC][33] = 38,
[1][1][RTW89_ACMA][33] = 22,
- [1][1][RTW89_FCC][35] = 40,
- [1][1][RTW89_ETSI][35] = 32,
- [1][1][RTW89_MKK][35] = 44,
+ [1][1][RTW89_FCC][35] = 32,
+ [1][1][RTW89_ETSI][35] = 30,
+ [1][1][RTW89_MKK][35] = 58,
[1][1][RTW89_IC][35] = 38,
[1][1][RTW89_ACMA][35] = 22,
- [1][1][RTW89_FCC][37] = 48,
+ [1][1][RTW89_FCC][37] = 40,
[1][1][RTW89_ETSI][37] = 127,
- [1][1][RTW89_MKK][37] = 42,
+ [1][1][RTW89_MKK][37] = 58,
[1][1][RTW89_IC][37] = 48,
[1][1][RTW89_ACMA][37] = 48,
- [1][1][RTW89_FCC][38] = 84,
- [1][1][RTW89_ETSI][38] = 18,
+ [1][1][RTW89_FCC][38] = 76,
+ [1][1][RTW89_ETSI][38] = 16,
[1][1][RTW89_MKK][38] = 127,
[1][1][RTW89_IC][38] = 84,
[1][1][RTW89_ACMA][38] = 82,
- [1][1][RTW89_FCC][40] = 84,
- [1][1][RTW89_ETSI][40] = 18,
+ [1][1][RTW89_FCC][40] = 76,
+ [1][1][RTW89_ETSI][40] = 16,
[1][1][RTW89_MKK][40] = 127,
[1][1][RTW89_IC][40] = 84,
[1][1][RTW89_ACMA][40] = 82,
- [1][1][RTW89_FCC][42] = 84,
- [1][1][RTW89_ETSI][42] = 18,
+ [1][1][RTW89_FCC][42] = 76,
+ [1][1][RTW89_ETSI][42] = 16,
[1][1][RTW89_MKK][42] = 127,
[1][1][RTW89_IC][42] = 84,
[1][1][RTW89_ACMA][42] = 84,
- [1][1][RTW89_FCC][44] = 84,
- [1][1][RTW89_ETSI][44] = 18,
+ [1][1][RTW89_FCC][44] = 76,
+ [1][1][RTW89_ETSI][44] = 16,
[1][1][RTW89_MKK][44] = 127,
[1][1][RTW89_IC][44] = 84,
[1][1][RTW89_ACMA][44] = 84,
- [1][1][RTW89_FCC][46] = 84,
- [1][1][RTW89_ETSI][46] = 18,
+ [1][1][RTW89_FCC][46] = 76,
+ [1][1][RTW89_ETSI][46] = 16,
[1][1][RTW89_MKK][46] = 127,
[1][1][RTW89_IC][46] = 84,
[1][1][RTW89_ACMA][46] = 84,
- [1][1][RTW89_FCC][48] = 32,
+ [1][1][RTW89_FCC][48] = 24,
[1][1][RTW89_ETSI][48] = 127,
[1][1][RTW89_MKK][48] = 127,
[1][1][RTW89_IC][48] = 127,
[1][1][RTW89_ACMA][48] = 127,
- [1][1][RTW89_FCC][50] = 32,
+ [1][1][RTW89_FCC][50] = 24,
[1][1][RTW89_ETSI][50] = 127,
[1][1][RTW89_MKK][50] = 127,
[1][1][RTW89_IC][50] = 127,
[1][1][RTW89_ACMA][50] = 127,
- [1][1][RTW89_FCC][52] = 32,
+ [1][1][RTW89_FCC][52] = 24,
[1][1][RTW89_ETSI][52] = 127,
[1][1][RTW89_MKK][52] = 127,
[1][1][RTW89_IC][52] = 127,
[1][1][RTW89_ACMA][52] = 127,
- [2][0][RTW89_FCC][0] = 70,
- [2][0][RTW89_ETSI][0] = 54,
- [2][0][RTW89_MKK][0] = 48,
+ [2][0][RTW89_FCC][0] = 62,
+ [2][0][RTW89_ETSI][0] = 52,
+ [2][0][RTW89_MKK][0] = 60,
[2][0][RTW89_IC][0] = 46,
[2][0][RTW89_ACMA][0] = 48,
- [2][0][RTW89_FCC][2] = 70,
- [2][0][RTW89_ETSI][2] = 54,
- [2][0][RTW89_MKK][2] = 48,
+ [2][0][RTW89_FCC][2] = 62,
+ [2][0][RTW89_ETSI][2] = 52,
+ [2][0][RTW89_MKK][2] = 60,
[2][0][RTW89_IC][2] = 46,
[2][0][RTW89_ACMA][2] = 48,
- [2][0][RTW89_FCC][4] = 70,
- [2][0][RTW89_ETSI][4] = 54,
- [2][0][RTW89_MKK][4] = 48,
+ [2][0][RTW89_FCC][4] = 62,
+ [2][0][RTW89_ETSI][4] = 52,
+ [2][0][RTW89_MKK][4] = 50,
[2][0][RTW89_IC][4] = 46,
[2][0][RTW89_ACMA][4] = 48,
- [2][0][RTW89_FCC][6] = 70,
- [2][0][RTW89_ETSI][6] = 54,
- [2][0][RTW89_MKK][6] = 48,
+ [2][0][RTW89_FCC][6] = 62,
+ [2][0][RTW89_ETSI][6] = 52,
+ [2][0][RTW89_MKK][6] = 50,
[2][0][RTW89_IC][6] = 46,
[2][0][RTW89_ACMA][6] = 48,
- [2][0][RTW89_FCC][8] = 70,
- [2][0][RTW89_ETSI][8] = 54,
- [2][0][RTW89_MKK][8] = 48,
+ [2][0][RTW89_FCC][8] = 62,
+ [2][0][RTW89_ETSI][8] = 52,
+ [2][0][RTW89_MKK][8] = 44,
[2][0][RTW89_IC][8] = 66,
[2][0][RTW89_ACMA][8] = 48,
- [2][0][RTW89_FCC][10] = 70,
- [2][0][RTW89_ETSI][10] = 54,
- [2][0][RTW89_MKK][10] = 48,
+ [2][0][RTW89_FCC][10] = 62,
+ [2][0][RTW89_ETSI][10] = 52,
+ [2][0][RTW89_MKK][10] = 44,
[2][0][RTW89_IC][10] = 66,
[2][0][RTW89_ACMA][10] = 48,
- [2][0][RTW89_FCC][12] = 70,
- [2][0][RTW89_ETSI][12] = 54,
- [2][0][RTW89_MKK][12] = 46,
+ [2][0][RTW89_FCC][12] = 62,
+ [2][0][RTW89_ETSI][12] = 52,
+ [2][0][RTW89_MKK][12] = 58,
[2][0][RTW89_IC][12] = 66,
[2][0][RTW89_ACMA][12] = 48,
- [2][0][RTW89_FCC][14] = 70,
- [2][0][RTW89_ETSI][14] = 54,
- [2][0][RTW89_MKK][14] = 46,
+ [2][0][RTW89_FCC][14] = 62,
+ [2][0][RTW89_ETSI][14] = 52,
+ [2][0][RTW89_MKK][14] = 58,
[2][0][RTW89_IC][14] = 66,
[2][0][RTW89_ACMA][14] = 48,
- [2][0][RTW89_FCC][15] = 70,
- [2][0][RTW89_ETSI][15] = 54,
+ [2][0][RTW89_FCC][15] = 62,
+ [2][0][RTW89_ETSI][15] = 52,
[2][0][RTW89_MKK][15] = 68,
[2][0][RTW89_IC][15] = 70,
[2][0][RTW89_ACMA][15] = 48,
- [2][0][RTW89_FCC][17] = 70,
- [2][0][RTW89_ETSI][17] = 54,
- [2][0][RTW89_MKK][17] = 70,
+ [2][0][RTW89_FCC][17] = 62,
+ [2][0][RTW89_ETSI][17] = 52,
+ [2][0][RTW89_MKK][17] = 74,
[2][0][RTW89_IC][17] = 70,
[2][0][RTW89_ACMA][17] = 48,
- [2][0][RTW89_FCC][19] = 70,
- [2][0][RTW89_ETSI][19] = 54,
- [2][0][RTW89_MKK][19] = 70,
+ [2][0][RTW89_FCC][19] = 62,
+ [2][0][RTW89_ETSI][19] = 52,
+ [2][0][RTW89_MKK][19] = 74,
[2][0][RTW89_IC][19] = 70,
[2][0][RTW89_ACMA][19] = 48,
- [2][0][RTW89_FCC][21] = 70,
- [2][0][RTW89_ETSI][21] = 54,
- [2][0][RTW89_MKK][21] = 70,
+ [2][0][RTW89_FCC][21] = 62,
+ [2][0][RTW89_ETSI][21] = 52,
+ [2][0][RTW89_MKK][21] = 74,
[2][0][RTW89_IC][21] = 70,
[2][0][RTW89_ACMA][21] = 48,
- [2][0][RTW89_FCC][23] = 70,
- [2][0][RTW89_ETSI][23] = 54,
- [2][0][RTW89_MKK][23] = 70,
+ [2][0][RTW89_FCC][23] = 62,
+ [2][0][RTW89_ETSI][23] = 52,
+ [2][0][RTW89_MKK][23] = 74,
[2][0][RTW89_IC][23] = 70,
[2][0][RTW89_ACMA][23] = 48,
- [2][0][RTW89_FCC][25] = 70,
- [2][0][RTW89_ETSI][25] = 54,
- [2][0][RTW89_MKK][25] = 70,
+ [2][0][RTW89_FCC][25] = 62,
+ [2][0][RTW89_ETSI][25] = 52,
+ [2][0][RTW89_MKK][25] = 74,
[2][0][RTW89_IC][25] = 127,
[2][0][RTW89_ACMA][25] = 127,
- [2][0][RTW89_FCC][27] = 70,
- [2][0][RTW89_ETSI][27] = 54,
- [2][0][RTW89_MKK][27] = 70,
+ [2][0][RTW89_FCC][27] = 62,
+ [2][0][RTW89_ETSI][27] = 52,
+ [2][0][RTW89_MKK][27] = 74,
[2][0][RTW89_IC][27] = 127,
[2][0][RTW89_ACMA][27] = 127,
- [2][0][RTW89_FCC][29] = 70,
- [2][0][RTW89_ETSI][29] = 54,
- [2][0][RTW89_MKK][29] = 70,
+ [2][0][RTW89_FCC][29] = 62,
+ [2][0][RTW89_ETSI][29] = 52,
+ [2][0][RTW89_MKK][29] = 74,
[2][0][RTW89_IC][29] = 127,
[2][0][RTW89_ACMA][29] = 127,
- [2][0][RTW89_FCC][31] = 70,
- [2][0][RTW89_ETSI][31] = 54,
- [2][0][RTW89_MKK][31] = 70,
+ [2][0][RTW89_FCC][31] = 62,
+ [2][0][RTW89_ETSI][31] = 52,
+ [2][0][RTW89_MKK][31] = 74,
[2][0][RTW89_IC][31] = 72,
[2][0][RTW89_ACMA][31] = 48,
- [2][0][RTW89_FCC][33] = 72,
- [2][0][RTW89_ETSI][33] = 54,
- [2][0][RTW89_MKK][33] = 70,
+ [2][0][RTW89_FCC][33] = 64,
+ [2][0][RTW89_ETSI][33] = 52,
+ [2][0][RTW89_MKK][33] = 74,
[2][0][RTW89_IC][33] = 72,
[2][0][RTW89_ACMA][33] = 48,
- [2][0][RTW89_FCC][35] = 72,
- [2][0][RTW89_ETSI][35] = 54,
- [2][0][RTW89_MKK][35] = 70,
+ [2][0][RTW89_FCC][35] = 64,
+ [2][0][RTW89_ETSI][35] = 52,
+ [2][0][RTW89_MKK][35] = 74,
[2][0][RTW89_IC][35] = 72,
[2][0][RTW89_ACMA][35] = 48,
- [2][0][RTW89_FCC][37] = 70,
+ [2][0][RTW89_FCC][37] = 62,
[2][0][RTW89_ETSI][37] = 127,
- [2][0][RTW89_MKK][37] = 66,
+ [2][0][RTW89_MKK][37] = 74,
[2][0][RTW89_IC][37] = 70,
[2][0][RTW89_ACMA][37] = 76,
- [2][0][RTW89_FCC][38] = 84,
- [2][0][RTW89_ETSI][38] = 30,
+ [2][0][RTW89_FCC][38] = 76,
+ [2][0][RTW89_ETSI][38] = 28,
[2][0][RTW89_MKK][38] = 127,
[2][0][RTW89_IC][38] = 84,
[2][0][RTW89_ACMA][38] = 84,
- [2][0][RTW89_FCC][40] = 84,
- [2][0][RTW89_ETSI][40] = 30,
+ [2][0][RTW89_FCC][40] = 76,
+ [2][0][RTW89_ETSI][40] = 28,
[2][0][RTW89_MKK][40] = 127,
[2][0][RTW89_IC][40] = 84,
[2][0][RTW89_ACMA][40] = 84,
- [2][0][RTW89_FCC][42] = 84,
- [2][0][RTW89_ETSI][42] = 30,
+ [2][0][RTW89_FCC][42] = 76,
+ [2][0][RTW89_ETSI][42] = 28,
[2][0][RTW89_MKK][42] = 127,
[2][0][RTW89_IC][42] = 84,
[2][0][RTW89_ACMA][42] = 84,
- [2][0][RTW89_FCC][44] = 84,
- [2][0][RTW89_ETSI][44] = 30,
+ [2][0][RTW89_FCC][44] = 76,
+ [2][0][RTW89_ETSI][44] = 28,
[2][0][RTW89_MKK][44] = 127,
[2][0][RTW89_IC][44] = 84,
[2][0][RTW89_ACMA][44] = 84,
- [2][0][RTW89_FCC][46] = 84,
- [2][0][RTW89_ETSI][46] = 30,
+ [2][0][RTW89_FCC][46] = 76,
+ [2][0][RTW89_ETSI][46] = 28,
[2][0][RTW89_MKK][46] = 127,
[2][0][RTW89_IC][46] = 84,
[2][0][RTW89_ACMA][46] = 84,
- [2][0][RTW89_FCC][48] = 56,
+ [2][0][RTW89_FCC][48] = 48,
[2][0][RTW89_ETSI][48] = 127,
[2][0][RTW89_MKK][48] = 127,
[2][0][RTW89_IC][48] = 127,
[2][0][RTW89_ACMA][48] = 127,
- [2][0][RTW89_FCC][50] = 56,
+ [2][0][RTW89_FCC][50] = 48,
[2][0][RTW89_ETSI][50] = 127,
[2][0][RTW89_MKK][50] = 127,
[2][0][RTW89_IC][50] = 127,
[2][0][RTW89_ACMA][50] = 127,
- [2][0][RTW89_FCC][52] = 56,
+ [2][0][RTW89_FCC][52] = 48,
[2][0][RTW89_ETSI][52] = 127,
[2][0][RTW89_MKK][52] = 127,
[2][0][RTW89_IC][52] = 127,
[2][0][RTW89_ACMA][52] = 127,
- [2][1][RTW89_FCC][0] = 50,
- [2][1][RTW89_ETSI][0] = 42,
- [2][1][RTW89_MKK][0] = 36,
+ [2][1][RTW89_FCC][0] = 42,
+ [2][1][RTW89_ETSI][0] = 40,
+ [2][1][RTW89_MKK][0] = 44,
[2][1][RTW89_IC][0] = 20,
[2][1][RTW89_ACMA][0] = 36,
- [2][1][RTW89_FCC][2] = 50,
- [2][1][RTW89_ETSI][2] = 42,
- [2][1][RTW89_MKK][2] = 36,
+ [2][1][RTW89_FCC][2] = 42,
+ [2][1][RTW89_ETSI][2] = 40,
+ [2][1][RTW89_MKK][2] = 44,
[2][1][RTW89_IC][2] = 18,
[2][1][RTW89_ACMA][2] = 36,
- [2][1][RTW89_FCC][4] = 50,
- [2][1][RTW89_ETSI][4] = 42,
+ [2][1][RTW89_FCC][4] = 42,
+ [2][1][RTW89_ETSI][4] = 40,
[2][1][RTW89_MKK][4] = 36,
[2][1][RTW89_IC][4] = 22,
[2][1][RTW89_ACMA][4] = 36,
- [2][1][RTW89_FCC][6] = 50,
- [2][1][RTW89_ETSI][6] = 42,
+ [2][1][RTW89_FCC][6] = 42,
+ [2][1][RTW89_ETSI][6] = 40,
[2][1][RTW89_MKK][6] = 36,
[2][1][RTW89_IC][6] = 22,
[2][1][RTW89_ACMA][6] = 36,
- [2][1][RTW89_FCC][8] = 50,
- [2][1][RTW89_ETSI][8] = 42,
- [2][1][RTW89_MKK][8] = 34,
+ [2][1][RTW89_FCC][8] = 42,
+ [2][1][RTW89_ETSI][8] = 40,
+ [2][1][RTW89_MKK][8] = 32,
[2][1][RTW89_IC][8] = 50,
[2][1][RTW89_ACMA][8] = 36,
- [2][1][RTW89_FCC][10] = 50,
- [2][1][RTW89_ETSI][10] = 42,
- [2][1][RTW89_MKK][10] = 34,
+ [2][1][RTW89_FCC][10] = 42,
+ [2][1][RTW89_ETSI][10] = 40,
+ [2][1][RTW89_MKK][10] = 32,
[2][1][RTW89_IC][10] = 50,
[2][1][RTW89_ACMA][10] = 36,
- [2][1][RTW89_FCC][12] = 52,
- [2][1][RTW89_ETSI][12] = 42,
- [2][1][RTW89_MKK][12] = 36,
+ [2][1][RTW89_FCC][12] = 44,
+ [2][1][RTW89_ETSI][12] = 40,
+ [2][1][RTW89_MKK][12] = 44,
[2][1][RTW89_IC][12] = 52,
[2][1][RTW89_ACMA][12] = 36,
- [2][1][RTW89_FCC][14] = 52,
- [2][1][RTW89_ETSI][14] = 42,
- [2][1][RTW89_MKK][14] = 36,
+ [2][1][RTW89_FCC][14] = 44,
+ [2][1][RTW89_ETSI][14] = 40,
+ [2][1][RTW89_MKK][14] = 44,
[2][1][RTW89_IC][14] = 52,
[2][1][RTW89_ACMA][14] = 36,
- [2][1][RTW89_FCC][15] = 50,
- [2][1][RTW89_ETSI][15] = 42,
- [2][1][RTW89_MKK][15] = 54,
+ [2][1][RTW89_FCC][15] = 42,
+ [2][1][RTW89_ETSI][15] = 40,
+ [2][1][RTW89_MKK][15] = 66,
[2][1][RTW89_IC][15] = 50,
[2][1][RTW89_ACMA][15] = 36,
- [2][1][RTW89_FCC][17] = 50,
- [2][1][RTW89_ETSI][17] = 42,
- [2][1][RTW89_MKK][17] = 56,
+ [2][1][RTW89_FCC][17] = 42,
+ [2][1][RTW89_ETSI][17] = 40,
+ [2][1][RTW89_MKK][17] = 66,
[2][1][RTW89_IC][17] = 50,
[2][1][RTW89_ACMA][17] = 36,
- [2][1][RTW89_FCC][19] = 50,
- [2][1][RTW89_ETSI][19] = 42,
- [2][1][RTW89_MKK][19] = 56,
+ [2][1][RTW89_FCC][19] = 42,
+ [2][1][RTW89_ETSI][19] = 40,
+ [2][1][RTW89_MKK][19] = 66,
[2][1][RTW89_IC][19] = 50,
[2][1][RTW89_ACMA][19] = 36,
- [2][1][RTW89_FCC][21] = 50,
- [2][1][RTW89_ETSI][21] = 42,
- [2][1][RTW89_MKK][21] = 56,
+ [2][1][RTW89_FCC][21] = 42,
+ [2][1][RTW89_ETSI][21] = 40,
+ [2][1][RTW89_MKK][21] = 66,
[2][1][RTW89_IC][21] = 50,
[2][1][RTW89_ACMA][21] = 36,
- [2][1][RTW89_FCC][23] = 50,
- [2][1][RTW89_ETSI][23] = 42,
- [2][1][RTW89_MKK][23] = 56,
+ [2][1][RTW89_FCC][23] = 42,
+ [2][1][RTW89_ETSI][23] = 40,
+ [2][1][RTW89_MKK][23] = 66,
[2][1][RTW89_IC][23] = 50,
[2][1][RTW89_ACMA][23] = 36,
- [2][1][RTW89_FCC][25] = 50,
- [2][1][RTW89_ETSI][25] = 42,
- [2][1][RTW89_MKK][25] = 56,
+ [2][1][RTW89_FCC][25] = 42,
+ [2][1][RTW89_ETSI][25] = 40,
+ [2][1][RTW89_MKK][25] = 66,
[2][1][RTW89_IC][25] = 127,
[2][1][RTW89_ACMA][25] = 127,
- [2][1][RTW89_FCC][27] = 50,
- [2][1][RTW89_ETSI][27] = 42,
- [2][1][RTW89_MKK][27] = 56,
+ [2][1][RTW89_FCC][27] = 42,
+ [2][1][RTW89_ETSI][27] = 40,
+ [2][1][RTW89_MKK][27] = 66,
[2][1][RTW89_IC][27] = 127,
[2][1][RTW89_ACMA][27] = 127,
- [2][1][RTW89_FCC][29] = 50,
- [2][1][RTW89_ETSI][29] = 42,
- [2][1][RTW89_MKK][29] = 56,
+ [2][1][RTW89_FCC][29] = 42,
+ [2][1][RTW89_ETSI][29] = 40,
+ [2][1][RTW89_MKK][29] = 66,
[2][1][RTW89_IC][29] = 127,
[2][1][RTW89_ACMA][29] = 127,
- [2][1][RTW89_FCC][31] = 50,
- [2][1][RTW89_ETSI][31] = 42,
- [2][1][RTW89_MKK][31] = 56,
+ [2][1][RTW89_FCC][31] = 42,
+ [2][1][RTW89_ETSI][31] = 40,
+ [2][1][RTW89_MKK][31] = 66,
[2][1][RTW89_IC][31] = 50,
[2][1][RTW89_ACMA][31] = 36,
- [2][1][RTW89_FCC][33] = 50,
- [2][1][RTW89_ETSI][33] = 42,
- [2][1][RTW89_MKK][33] = 56,
+ [2][1][RTW89_FCC][33] = 42,
+ [2][1][RTW89_ETSI][33] = 40,
+ [2][1][RTW89_MKK][33] = 66,
[2][1][RTW89_IC][33] = 50,
[2][1][RTW89_ACMA][33] = 36,
- [2][1][RTW89_FCC][35] = 50,
- [2][1][RTW89_ETSI][35] = 42,
- [2][1][RTW89_MKK][35] = 56,
+ [2][1][RTW89_FCC][35] = 42,
+ [2][1][RTW89_ETSI][35] = 40,
+ [2][1][RTW89_MKK][35] = 66,
[2][1][RTW89_IC][35] = 50,
[2][1][RTW89_ACMA][35] = 36,
- [2][1][RTW89_FCC][37] = 50,
+ [2][1][RTW89_FCC][37] = 42,
[2][1][RTW89_ETSI][37] = 127,
- [2][1][RTW89_MKK][37] = 54,
+ [2][1][RTW89_MKK][37] = 66,
[2][1][RTW89_IC][37] = 50,
[2][1][RTW89_ACMA][37] = 60,
- [2][1][RTW89_FCC][38] = 84,
- [2][1][RTW89_ETSI][38] = 18,
+ [2][1][RTW89_FCC][38] = 76,
+ [2][1][RTW89_ETSI][38] = 16,
[2][1][RTW89_MKK][38] = 127,
[2][1][RTW89_IC][38] = 84,
[2][1][RTW89_ACMA][38] = 84,
- [2][1][RTW89_FCC][40] = 84,
- [2][1][RTW89_ETSI][40] = 18,
+ [2][1][RTW89_FCC][40] = 76,
+ [2][1][RTW89_ETSI][40] = 16,
[2][1][RTW89_MKK][40] = 127,
[2][1][RTW89_IC][40] = 84,
[2][1][RTW89_ACMA][40] = 84,
- [2][1][RTW89_FCC][42] = 84,
- [2][1][RTW89_ETSI][42] = 18,
+ [2][1][RTW89_FCC][42] = 76,
+ [2][1][RTW89_ETSI][42] = 16,
[2][1][RTW89_MKK][42] = 127,
[2][1][RTW89_IC][42] = 84,
[2][1][RTW89_ACMA][42] = 84,
- [2][1][RTW89_FCC][44] = 84,
- [2][1][RTW89_ETSI][44] = 18,
+ [2][1][RTW89_FCC][44] = 76,
+ [2][1][RTW89_ETSI][44] = 16,
[2][1][RTW89_MKK][44] = 127,
[2][1][RTW89_IC][44] = 84,
[2][1][RTW89_ACMA][44] = 84,
- [2][1][RTW89_FCC][46] = 84,
- [2][1][RTW89_ETSI][46] = 18,
+ [2][1][RTW89_FCC][46] = 76,
+ [2][1][RTW89_ETSI][46] = 16,
[2][1][RTW89_MKK][46] = 127,
[2][1][RTW89_IC][46] = 84,
[2][1][RTW89_ACMA][46] = 84,
- [2][1][RTW89_FCC][48] = 44,
+ [2][1][RTW89_FCC][48] = 36,
[2][1][RTW89_ETSI][48] = 127,
[2][1][RTW89_MKK][48] = 127,
[2][1][RTW89_IC][48] = 127,
[2][1][RTW89_ACMA][48] = 127,
- [2][1][RTW89_FCC][50] = 44,
+ [2][1][RTW89_FCC][50] = 36,
[2][1][RTW89_ETSI][50] = 127,
[2][1][RTW89_MKK][50] = 127,
[2][1][RTW89_IC][50] = 127,
[2][1][RTW89_ACMA][50] = 127,
- [2][1][RTW89_FCC][52] = 44,
+ [2][1][RTW89_FCC][52] = 36,
[2][1][RTW89_ETSI][52] = 127,
[2][1][RTW89_MKK][52] = 127,
[2][1][RTW89_IC][52] = 127,
diff --git a/drivers/net/wireless/silabs/wfx/bh.c b/drivers/net/wireless/silabs/wfx/bh.c
index bcea9d5b119c..21dfdcf9cc27 100644
--- a/drivers/net/wireless/silabs/wfx/bh.c
+++ b/drivers/net/wireless/silabs/wfx/bh.c
@@ -267,7 +267,7 @@ void wfx_bh_request_rx(struct wfx_dev *wdev)
wfx_control_reg_read(wdev, &cur);
prev = atomic_xchg(&wdev->hif.ctrl_reg, cur);
complete(&wdev->hif.ctrl_ready);
- queue_work(system_highpri_wq, &wdev->hif.bh);
+ queue_work(wdev->bh_wq, &wdev->hif.bh);
if (!(cur & CTRL_NEXT_LEN_MASK))
dev_err(wdev->dev, "unexpected control register value: length field is 0: %04x\n",
@@ -280,7 +280,7 @@ void wfx_bh_request_rx(struct wfx_dev *wdev)
/* Driver want to send data */
void wfx_bh_request_tx(struct wfx_dev *wdev)
{
- queue_work(system_highpri_wq, &wdev->hif.bh);
+ queue_work(wdev->bh_wq, &wdev->hif.bh);
}
/* If IRQ is not available, this function allow to manually poll the control register and simulate
@@ -295,7 +295,7 @@ void wfx_bh_poll_irq(struct wfx_dev *wdev)
u32 reg;
WARN(!wdev->poll_irq, "unexpected IRQ polling can mask IRQ");
- flush_workqueue(system_highpri_wq);
+ flush_workqueue(wdev->bh_wq);
start = ktime_get();
for (;;) {
wfx_control_reg_read(wdev, &reg);
diff --git a/drivers/net/wireless/silabs/wfx/data_rx.c b/drivers/net/wireless/silabs/wfx/data_rx.c
index a4b5ffe158e4..e099a9e65bae 100644
--- a/drivers/net/wireless/silabs/wfx/data_rx.c
+++ b/drivers/net/wireless/silabs/wfx/data_rx.c
@@ -15,6 +15,7 @@
static void wfx_rx_handle_ba(struct wfx_vif *wvif, struct ieee80211_mgmt *mgmt)
{
+ struct ieee80211_vif *vif = wvif_to_vif(wvif);
int params, tid;
if (wfx_api_older_than(wvif->wdev, 3, 6))
@@ -24,12 +25,12 @@ static void wfx_rx_handle_ba(struct wfx_vif *wvif, struct ieee80211_mgmt *mgmt)
case WLAN_ACTION_ADDBA_REQ:
params = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
tid = (params & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
- ieee80211_start_rx_ba_session_offl(wvif->vif, mgmt->sa, tid);
+ ieee80211_start_rx_ba_session_offl(vif, mgmt->sa, tid);
break;
case WLAN_ACTION_DELBA:
params = le16_to_cpu(mgmt->u.action.u.delba.params);
tid = (params & IEEE80211_DELBA_PARAM_TID_MASK) >> 12;
- ieee80211_stop_rx_ba_session_offl(wvif->vif, mgmt->sa, tid);
+ ieee80211_stop_rx_ba_session_offl(vif, mgmt->sa, tid);
break;
}
}
diff --git a/drivers/net/wireless/silabs/wfx/data_tx.c b/drivers/net/wireless/silabs/wfx/data_tx.c
index e07381b2ff4d..6a5e52a96d18 100644
--- a/drivers/net/wireless/silabs/wfx/data_tx.c
+++ b/drivers/net/wireless/silabs/wfx/data_tx.c
@@ -212,11 +212,12 @@ static u8 wfx_tx_get_link_id(struct wfx_vif *wvif, struct ieee80211_sta *sta,
struct ieee80211_hdr *hdr)
{
struct wfx_sta_priv *sta_priv = sta ? (struct wfx_sta_priv *)&sta->drv_priv : NULL;
+ struct ieee80211_vif *vif = wvif_to_vif(wvif);
const u8 *da = ieee80211_get_DA(hdr);
if (sta_priv && sta_priv->link_id)
return sta_priv->link_id;
- if (wvif->vif->type != NL80211_IFTYPE_AP)
+ if (vif->type != NL80211_IFTYPE_AP)
return 0;
if (is_multicast_ether_addr(da))
return 0;
diff --git a/drivers/net/wireless/silabs/wfx/hif_tx.c b/drivers/net/wireless/silabs/wfx/hif_tx.c
index ae3cc5919dcd..2b92c227efbc 100644
--- a/drivers/net/wireless/silabs/wfx/hif_tx.c
+++ b/drivers/net/wireless/silabs/wfx/hif_tx.c
@@ -73,7 +73,7 @@ int wfx_cmd_send(struct wfx_dev *wdev, struct wfx_hif_msg *request,
if (no_reply) {
/* Chip won't reply. Ensure the wq has send the buffer before to continue. */
- flush_workqueue(system_highpri_wq);
+ flush_workqueue(wdev->bh_wq);
ret = 0;
goto end;
}
diff --git a/drivers/net/wireless/silabs/wfx/key.c b/drivers/net/wireless/silabs/wfx/key.c
index 8f23e8d42bd4..196d64ef68f3 100644
--- a/drivers/net/wireless/silabs/wfx/key.c
+++ b/drivers/net/wireless/silabs/wfx/key.c
@@ -156,6 +156,7 @@ static int wfx_add_key(struct wfx_vif *wvif, struct ieee80211_sta *sta,
struct wfx_dev *wdev = wvif->wdev;
int idx = wfx_alloc_key(wvif->wdev);
bool pairwise = key->flags & IEEE80211_KEY_FLAG_PAIRWISE;
+ struct ieee80211_vif *vif = wvif_to_vif(wvif);
WARN(key->flags & IEEE80211_KEY_FLAG_PAIRWISE && !sta, "inconsistent data");
ieee80211_get_key_rx_seq(key, 0, &seq);
@@ -174,7 +175,7 @@ static int wfx_add_key(struct wfx_vif *wvif, struct ieee80211_sta *sta,
k.type = fill_tkip_pair(&k.key.tkip_pairwise_key, key, sta->addr);
else
k.type = fill_tkip_group(&k.key.tkip_group_key, key, &seq,
- wvif->vif->type);
+ vif->type);
} else if (key->cipher == WLAN_CIPHER_SUITE_CCMP) {
if (pairwise)
k.type = fill_ccmp_pair(&k.key.aes_pairwise_key, key, sta->addr);
@@ -224,4 +225,3 @@ int wfx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_
mutex_unlock(&wvif->wdev->conf_mutex);
return ret;
}
-
diff --git a/drivers/net/wireless/silabs/wfx/main.c b/drivers/net/wireless/silabs/wfx/main.c
index b93b16b900c8..bbfd3fa51921 100644
--- a/drivers/net/wireless/silabs/wfx/main.c
+++ b/drivers/net/wireless/silabs/wfx/main.c
@@ -345,6 +345,10 @@ int wfx_probe(struct wfx_dev *wdev)
wdev->pdata.gpio_wakeup = NULL;
wdev->poll_irq = true;
+ wdev->bh_wq = alloc_workqueue("wfx_bh_wq", WQ_HIGHPRI, 0);
+ if (!wdev->bh_wq)
+ return -ENOMEM;
+
wfx_bh_register(wdev);
err = wfx_init_device(wdev);
@@ -458,6 +462,7 @@ irq_unsubscribe:
wdev->hwbus_ops->irq_unsubscribe(wdev->hwbus_priv);
bh_unregister:
wfx_bh_unregister(wdev);
+ destroy_workqueue(wdev->bh_wq);
return err;
}
@@ -467,6 +472,7 @@ void wfx_release(struct wfx_dev *wdev)
wfx_hif_shutdown(wdev);
wdev->hwbus_ops->irq_unsubscribe(wdev->hwbus_priv);
wfx_bh_unregister(wdev);
+ destroy_workqueue(wdev->bh_wq);
}
static int __init wfx_core_init(void)
diff --git a/drivers/net/wireless/silabs/wfx/queue.c b/drivers/net/wireless/silabs/wfx/queue.c
index 729825230db2..37f492e5d3be 100644
--- a/drivers/net/wireless/silabs/wfx/queue.c
+++ b/drivers/net/wireless/silabs/wfx/queue.c
@@ -205,9 +205,10 @@ unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev, struct sk_buff *
bool wfx_tx_queues_has_cab(struct wfx_vif *wvif)
{
+ struct ieee80211_vif *vif = wvif_to_vif(wvif);
int i;
- if (wvif->vif->type != NL80211_IFTYPE_AP)
+ if (vif->type != NL80211_IFTYPE_AP)
return false;
for (i = 0; i < IEEE80211_NUM_ACS; ++i)
/* Note: since only AP can have mcast frames in queue and only one vif can be AP,
diff --git a/drivers/net/wireless/silabs/wfx/scan.c b/drivers/net/wireless/silabs/wfx/scan.c
index 7f34f0d322f9..16f619ed22e0 100644
--- a/drivers/net/wireless/silabs/wfx/scan.c
+++ b/drivers/net/wireless/silabs/wfx/scan.c
@@ -23,9 +23,11 @@ static void wfx_ieee80211_scan_completed_compat(struct ieee80211_hw *hw, bool ab
static int update_probe_tmpl(struct wfx_vif *wvif, struct cfg80211_scan_request *req)
{
+ struct ieee80211_vif *vif = wvif_to_vif(wvif);
struct sk_buff *skb;
- skb = ieee80211_probereq_get(wvif->wdev->hw, wvif->vif->addr, NULL, 0, req->ie_len);
+ skb = ieee80211_probereq_get(wvif->wdev->hw, vif->addr, NULL, 0,
+ req->ie_len);
if (!skb)
return -ENOMEM;
@@ -37,8 +39,9 @@ static int update_probe_tmpl(struct wfx_vif *wvif, struct cfg80211_scan_request
static int send_scan_req(struct wfx_vif *wvif, struct cfg80211_scan_request *req, int start_idx)
{
- int i, ret;
+ struct ieee80211_vif *vif = wvif_to_vif(wvif);
struct ieee80211_channel *ch_start, *ch_cur;
+ int i, ret;
for (i = start_idx; i < req->n_channels; i++) {
ch_start = req->channels[start_idx];
@@ -75,8 +78,8 @@ static int send_scan_req(struct wfx_vif *wvif, struct cfg80211_scan_request *req
} else {
ret = wvif->scan_nb_chan_done;
}
- if (req->channels[start_idx]->max_power != wvif->vif->bss_conf.txpower)
- wfx_hif_set_output_power(wvif, wvif->vif->bss_conf.txpower);
+ if (req->channels[start_idx]->max_power != vif->bss_conf.txpower)
+ wfx_hif_set_output_power(wvif, vif->bss_conf.txpower);
wfx_tx_unlock(wvif->wdev);
return ret;
}
diff --git a/drivers/net/wireless/silabs/wfx/sta.c b/drivers/net/wireless/silabs/wfx/sta.c
index 3297d73c327a..e551fa284a43 100644
--- a/drivers/net/wireless/silabs/wfx/sta.c
+++ b/drivers/net/wireless/silabs/wfx/sta.c
@@ -98,9 +98,10 @@ static void wfx_filter_beacon(struct wfx_vif *wvif, bool filter_beacon)
void wfx_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
unsigned int *total_flags, u64 unused)
{
- struct wfx_vif *wvif = NULL;
- struct wfx_dev *wdev = hw->priv;
bool filter_bssid, filter_prbreq, filter_beacon;
+ struct ieee80211_vif *vif = NULL;
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif = NULL;
/* Notes:
* - Probe responses (FIF_BCN_PRBRESP_PROMISC) are never filtered
@@ -131,8 +132,9 @@ void wfx_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
else
filter_bssid = true;
+ vif = wvif_to_vif(wvif);
/* In AP mode, chip can reply to probe request itself */
- if (*total_flags & FIF_PROBE_REQ && wvif->vif->type == NL80211_IFTYPE_AP) {
+ if (*total_flags & FIF_PROBE_REQ && vif->type == NL80211_IFTYPE_AP) {
dev_dbg(wdev->dev, "do not forward probe request in AP mode\n");
*total_flags &= ~FIF_PROBE_REQ;
}
@@ -152,19 +154,28 @@ static int wfx_get_ps_timeout(struct wfx_vif *wvif, bool *enable_ps)
{
struct ieee80211_channel *chan0 = NULL, *chan1 = NULL;
struct ieee80211_conf *conf = &wvif->wdev->hw->conf;
+ struct ieee80211_vif *vif = wvif_to_vif(wvif);
- WARN(!wvif->vif->bss_conf.assoc && enable_ps,
+ WARN(!vif->bss_conf.assoc && enable_ps,
"enable_ps is reliable only if associated");
- if (wdev_to_wvif(wvif->wdev, 0))
- chan0 = wdev_to_wvif(wvif->wdev, 0)->vif->bss_conf.chandef.chan;
- if (wdev_to_wvif(wvif->wdev, 1))
- chan1 = wdev_to_wvif(wvif->wdev, 1)->vif->bss_conf.chandef.chan;
- if (chan0 && chan1 && wvif->vif->type != NL80211_IFTYPE_AP) {
+ if (wdev_to_wvif(wvif->wdev, 0)) {
+ struct wfx_vif *wvif_ch0 = wdev_to_wvif(wvif->wdev, 0);
+ struct ieee80211_vif *vif_ch0 = wvif_to_vif(wvif_ch0);
+
+ chan0 = vif_ch0->bss_conf.chandef.chan;
+ }
+ if (wdev_to_wvif(wvif->wdev, 1)) {
+ struct wfx_vif *wvif_ch1 = wdev_to_wvif(wvif->wdev, 1);
+ struct ieee80211_vif *vif_ch1 = wvif_to_vif(wvif_ch1);
+
+ chan1 = vif_ch1->bss_conf.chandef.chan;
+ }
+ if (chan0 && chan1 && vif->type != NL80211_IFTYPE_AP) {
if (chan0->hw_value == chan1->hw_value) {
/* It is useless to enable PS if channels are the same. */
if (enable_ps)
*enable_ps = false;
- if (wvif->vif->bss_conf.assoc && wvif->vif->bss_conf.ps)
+ if (vif->bss_conf.assoc && vif->bss_conf.ps)
dev_info(wvif->wdev->dev, "ignoring requested PS mode");
return -1;
}
@@ -177,8 +188,8 @@ static int wfx_get_ps_timeout(struct wfx_vif *wvif, bool *enable_ps)
return 30;
}
if (enable_ps)
- *enable_ps = wvif->vif->bss_conf.ps;
- if (wvif->vif->bss_conf.assoc && wvif->vif->bss_conf.ps)
+ *enable_ps = vif->bss_conf.ps;
+ if (vif->bss_conf.assoc && vif->bss_conf.ps)
return conf->dynamic_ps_timeout;
else
return -1;
@@ -186,10 +197,11 @@ static int wfx_get_ps_timeout(struct wfx_vif *wvif, bool *enable_ps)
int wfx_update_pm(struct wfx_vif *wvif)
{
+ struct ieee80211_vif *vif = wvif_to_vif(wvif);
int ps_timeout;
bool ps;
- if (!wvif->vif->bss_conf.assoc)
+ if (!vif->bss_conf.assoc)
return 0;
ps_timeout = wfx_get_ps_timeout(wvif, &ps);
if (!ps)
@@ -215,7 +227,8 @@ int wfx_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mutex_lock(&wdev->conf_mutex);
assign_bit(queue, &wvif->uapsd_mask, params->uapsd);
wfx_hif_set_edca_queue_params(wvif, queue, params);
- if (wvif->vif->type == NL80211_IFTYPE_STATION && old_uapsd != wvif->uapsd_mask) {
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ old_uapsd != wvif->uapsd_mask) {
wfx_hif_set_uapsd_info(wvif, wvif->uapsd_mask);
wfx_update_pm(wvif);
}
@@ -238,24 +251,26 @@ void wfx_event_report_rssi(struct wfx_vif *wvif, u8 raw_rcpi_rssi)
/* RSSI: signed Q8.0, RCPI: unsigned Q7.1
* RSSI = RCPI / 2 - 110
*/
+ struct ieee80211_vif *vif = wvif_to_vif(wvif);
int rcpi_rssi;
int cqm_evt;
rcpi_rssi = raw_rcpi_rssi / 2 - 110;
- if (rcpi_rssi <= wvif->vif->bss_conf.cqm_rssi_thold)
+ if (rcpi_rssi <= vif->bss_conf.cqm_rssi_thold)
cqm_evt = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
else
cqm_evt = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
- ieee80211_cqm_rssi_notify(wvif->vif, cqm_evt, rcpi_rssi, GFP_KERNEL);
+ ieee80211_cqm_rssi_notify(vif, cqm_evt, rcpi_rssi, GFP_KERNEL);
}
static void wfx_beacon_loss_work(struct work_struct *work)
{
struct wfx_vif *wvif = container_of(to_delayed_work(work), struct wfx_vif,
beacon_loss_work);
- struct ieee80211_bss_conf *bss_conf = &wvif->vif->bss_conf;
+ struct ieee80211_vif *vif = wvif_to_vif(wvif);
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
- ieee80211_beacon_loss(wvif->vif);
+ ieee80211_beacon_loss(vif);
schedule_delayed_work(to_delayed_work(work), msecs_to_jiffies(bss_conf->beacon_int));
}
@@ -321,15 +336,16 @@ int wfx_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ie
static int wfx_upload_ap_templates(struct wfx_vif *wvif)
{
+ struct ieee80211_vif *vif = wvif_to_vif(wvif);
struct sk_buff *skb;
- skb = ieee80211_beacon_get(wvif->wdev->hw, wvif->vif);
+ skb = ieee80211_beacon_get(wvif->wdev->hw, vif);
if (!skb)
return -ENOMEM;
wfx_hif_set_template_frame(wvif, skb, HIF_TMPLT_BCN, API_RATE_INDEX_B_1MBPS);
dev_kfree_skb(skb);
- skb = ieee80211_proberesp_get(wvif->wdev->hw, wvif->vif);
+ skb = ieee80211_proberesp_get(wvif->wdev->hw, vif);
if (!skb)
return -ENOMEM;
wfx_hif_set_template_frame(wvif, skb, HIF_TMPLT_PRBRES, API_RATE_INDEX_B_1MBPS);
@@ -339,7 +355,8 @@ static int wfx_upload_ap_templates(struct wfx_vif *wvif)
static void wfx_set_mfp_ap(struct wfx_vif *wvif)
{
- struct sk_buff *skb = ieee80211_beacon_get(wvif->wdev->hw, wvif->vif);
+ struct ieee80211_vif *vif = wvif_to_vif(wvif);
+ struct sk_buff *skb = ieee80211_beacon_get(wvif->wdev->hw, vif);
const int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
const u16 *ptr = (u16 *)cfg80211_find_ie(WLAN_EID_RSN, skb->data + ieoffset,
skb->len - ieoffset);
@@ -388,12 +405,13 @@ void wfx_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
static void wfx_join(struct wfx_vif *wvif)
{
- int ret;
- struct ieee80211_bss_conf *conf = &wvif->vif->bss_conf;
+ struct ieee80211_vif *vif = wvif_to_vif(wvif);
+ struct ieee80211_bss_conf *conf = &vif->bss_conf;
struct cfg80211_bss *bss = NULL;
u8 ssid[IEEE80211_MAX_SSID_LEN];
const u8 *ssidie = NULL;
int ssidlen = 0;
+ int ret;
wfx_tx_lock_flush(wvif->wdev);
@@ -420,7 +438,7 @@ static void wfx_join(struct wfx_vif *wvif)
wvif->join_in_progress = true;
ret = wfx_hif_join(wvif, conf, wvif->channel, ssid, ssidlen);
if (ret) {
- ieee80211_connection_loss(wvif->vif);
+ ieee80211_connection_loss(vif);
wfx_reset(wvif);
} else {
/* Due to beacon filtering it is possible that the AP's beacon is not known for the
@@ -434,13 +452,14 @@ static void wfx_join(struct wfx_vif *wvif)
static void wfx_join_finalize(struct wfx_vif *wvif, struct ieee80211_bss_conf *info)
{
+ struct ieee80211_vif *vif = wvif_to_vif(wvif);
struct ieee80211_sta *sta = NULL;
int ampdu_density = 0;
bool greenfield = false;
rcu_read_lock(); /* protect sta */
if (info->bssid && !info->ibss_joined)
- sta = ieee80211_find_sta(wvif->vif, info->bssid);
+ sta = ieee80211_find_sta(vif, info->bssid);
if (sta && sta->deflink.ht_cap.ht_supported)
ampdu_density = sta->deflink.ht_cap.ampdu_density;
if (sta && sta->deflink.ht_cap.ht_supported &&
@@ -561,11 +580,13 @@ void wfx_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
static int wfx_update_tim(struct wfx_vif *wvif)
{
+ struct ieee80211_vif *vif = wvif_to_vif(wvif);
struct sk_buff *skb;
u16 tim_offset, tim_length;
u8 *tim_ptr;
- skb = ieee80211_beacon_get_tim(wvif->wdev->hw, wvif->vif, &tim_offset, &tim_length);
+ skb = ieee80211_beacon_get_tim(wvif->wdev->hw, vif, &tim_offset,
+ &tim_length);
if (!skb)
return -ENOENT;
tim_ptr = skb->data + tim_offset;
@@ -707,8 +728,6 @@ int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
return -EOPNOTSUPP;
}
- /* FIXME: prefer use of container_of() to get vif */
- wvif->vif = vif;
wvif->wdev = wdev;
wvif->link_id_map = 1; /* link-id 0 is reserved for multicast */
@@ -767,7 +786,6 @@ void wfx_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
cancel_delayed_work_sync(&wvif->beacon_loss_work);
wdev->vif[wvif->id] = NULL;
- wvif->vif = NULL;
mutex_unlock(&wdev->conf_mutex);
diff --git a/drivers/net/wireless/silabs/wfx/wfx.h b/drivers/net/wireless/silabs/wfx/wfx.h
index 6594cc647c2f..13ba84b3b2c3 100644
--- a/drivers/net/wireless/silabs/wfx/wfx.h
+++ b/drivers/net/wireless/silabs/wfx/wfx.h
@@ -57,11 +57,11 @@ struct wfx_dev {
struct mutex rx_stats_lock;
struct wfx_hif_tx_power_loop_info tx_power_loop_info;
struct mutex tx_power_loop_info_lock;
+ struct workqueue_struct *bh_wq;
};
struct wfx_vif {
struct wfx_dev *wdev;
- struct ieee80211_vif *vif;
struct ieee80211_channel *channel;
int id;
@@ -91,6 +91,11 @@ struct wfx_vif {
struct completion set_pm_mode_complete;
};
+static inline struct ieee80211_vif *wvif_to_vif(struct wfx_vif *wvif)
+{
+ return container_of((void *)wvif, struct ieee80211_vif, drv_priv);
+}
+
static inline struct wfx_vif *wdev_to_wvif(struct wfx_dev *wdev, int vif_id)
{
if (vif_id >= ARRAY_SIZE(wdev->vif)) {
diff --git a/drivers/net/wireless/ti/wl1251/event.c b/drivers/net/wireless/ti/wl1251/event.c
index e6d426edab56..e945aafd88ee 100644
--- a/drivers/net/wireless/ti/wl1251/event.c
+++ b/drivers/net/wireless/ti/wl1251/event.c
@@ -169,11 +169,9 @@ int wl1251_event_wait(struct wl1251 *wl, u32 mask, int timeout_ms)
msleep(1);
/* read from both event fields */
- wl1251_mem_read(wl, wl->mbox_ptr[0], &events_vector,
- sizeof(events_vector));
+ events_vector = wl1251_mem_read32(wl, wl->mbox_ptr[0]);
event = events_vector & mask;
- wl1251_mem_read(wl, wl->mbox_ptr[1], &events_vector,
- sizeof(events_vector));
+ events_vector = wl1251_mem_read32(wl, wl->mbox_ptr[1]);
event |= events_vector & mask;
} while (!event);
@@ -202,7 +200,7 @@ void wl1251_event_mbox_config(struct wl1251 *wl)
int wl1251_event_handle(struct wl1251 *wl, u8 mbox_num)
{
- struct event_mailbox mbox;
+ struct event_mailbox *mbox;
int ret;
wl1251_debug(DEBUG_EVENT, "EVENT on mbox %d", mbox_num);
@@ -210,12 +208,20 @@ int wl1251_event_handle(struct wl1251 *wl, u8 mbox_num)
if (mbox_num > 1)
return -EINVAL;
+ mbox = kmalloc(sizeof(*mbox), GFP_KERNEL);
+ if (!mbox) {
+ wl1251_error("can not allocate mbox buffer");
+ return -ENOMEM;
+ }
+
/* first we read the mbox descriptor */
- wl1251_mem_read(wl, wl->mbox_ptr[mbox_num], &mbox,
- sizeof(struct event_mailbox));
+ wl1251_mem_read(wl, wl->mbox_ptr[mbox_num], mbox,
+ sizeof(*mbox));
/* process the descriptor */
- ret = wl1251_event_process(wl, &mbox);
+ ret = wl1251_event_process(wl, mbox);
+ kfree(mbox);
+
if (ret < 0)
return ret;
diff --git a/drivers/net/wireless/ti/wl1251/io.c b/drivers/net/wireless/ti/wl1251/io.c
index 5ebe7958ed5c..e8d567af74b4 100644
--- a/drivers/net/wireless/ti/wl1251/io.c
+++ b/drivers/net/wireless/ti/wl1251/io.c
@@ -121,7 +121,13 @@ void wl1251_set_partition(struct wl1251 *wl,
u32 mem_start, u32 mem_size,
u32 reg_start, u32 reg_size)
{
- struct wl1251_partition partition[2];
+ struct wl1251_partition_set *partition;
+
+ partition = kmalloc(sizeof(*partition), GFP_KERNEL);
+ if (!partition) {
+ wl1251_error("can not allocate partition buffer");
+ return;
+ }
wl1251_debug(DEBUG_SPI, "mem_start %08X mem_size %08X",
mem_start, mem_size);
@@ -164,10 +170,10 @@ void wl1251_set_partition(struct wl1251 *wl,
reg_start, reg_size);
}
- partition[0].start = mem_start;
- partition[0].size = mem_size;
- partition[1].start = reg_start;
- partition[1].size = reg_size;
+ partition->mem.start = mem_start;
+ partition->mem.size = mem_size;
+ partition->reg.start = reg_start;
+ partition->reg.size = reg_size;
wl->physical_mem_addr = mem_start;
wl->physical_reg_addr = reg_start;
@@ -176,5 +182,7 @@ void wl1251_set_partition(struct wl1251 *wl,
wl->virtual_reg_addr = mem_size;
wl->if_ops->write(wl, HW_ACCESS_PART0_SIZE_ADDR, partition,
- sizeof(partition));
+ sizeof(*partition));
+
+ kfree(partition);
}
diff --git a/drivers/net/wireless/ti/wl1251/tx.c b/drivers/net/wireless/ti/wl1251/tx.c
index 98cd39619d57..e9dc3c72bb11 100644
--- a/drivers/net/wireless/ti/wl1251/tx.c
+++ b/drivers/net/wireless/ti/wl1251/tx.c
@@ -443,19 +443,25 @@ static void wl1251_tx_packet_cb(struct wl1251 *wl,
void wl1251_tx_complete(struct wl1251 *wl)
{
int i, result_index, num_complete = 0, queue_len;
- struct tx_result result[FW_TX_CMPLT_BLOCK_SIZE], *result_ptr;
+ struct tx_result *result, *result_ptr;
unsigned long flags;
if (unlikely(wl->state != WL1251_STATE_ON))
return;
+ result = kmalloc_array(FW_TX_CMPLT_BLOCK_SIZE, sizeof(*result), GFP_KERNEL);
+ if (!result) {
+ wl1251_error("can not allocate result buffer");
+ return;
+ }
+
/* First we read the result */
- wl1251_mem_read(wl, wl->data_path->tx_complete_addr,
- result, sizeof(result));
+ wl1251_mem_read(wl, wl->data_path->tx_complete_addr, result,
+ FW_TX_CMPLT_BLOCK_SIZE * sizeof(*result));
result_index = wl->next_tx_complete;
- for (i = 0; i < ARRAY_SIZE(result); i++) {
+ for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++) {
result_ptr = &result[result_index];
if (result_ptr->done_1 == 1 &&
@@ -538,6 +544,7 @@ void wl1251_tx_complete(struct wl1251 *wl)
}
+ kfree(result);
wl->next_tx_complete = result_index;
}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_coredump.h b/drivers/net/wwan/iosm/iosm_ipc_coredump.h
index 0809ba664276..3da5ec75e0f0 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_coredump.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_coredump.h
@@ -14,9 +14,6 @@
/* Max buffer allocated to receive coredump data */
#define MAX_DATA_SIZE 0x00010000
-/* Max number of file entries */
-#define MAX_NOF_ENTRY 256
-
/* Max length */
#define MAX_SIZE_LEN 32
@@ -38,7 +35,7 @@ struct iosm_cd_list_entry {
*/
struct iosm_cd_list {
__le32 num_entries;
- struct iosm_cd_list_entry entry[MAX_NOF_ENTRY];
+ struct iosm_cd_list_entry entry[];
} __packed;
/**
diff --git a/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c b/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c
index c6b032f95d2e..4627847c6daa 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c
@@ -372,8 +372,6 @@ bool ipc_protocol_dl_td_prepare(struct iosm_protocol *ipc_protocol,
struct sk_buff *ipc_protocol_dl_td_process(struct iosm_protocol *ipc_protocol,
struct ipc_pipe *pipe)
{
- u32 tail =
- le32_to_cpu(ipc_protocol->p_ap_shm->tail_array[pipe->pipe_nr]);
struct ipc_protocol_td *p_td;
struct sk_buff *skb;
@@ -403,14 +401,6 @@ struct sk_buff *ipc_protocol_dl_td_process(struct iosm_protocol *ipc_protocol,
goto ret;
}
- if (!IPC_CB(skb)) {
- dev_err(ipc_protocol->dev, "pipe# %d, tail: %d skb_cb is NULL",
- pipe->pipe_nr, tail);
- ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
- skb = NULL;
- goto ret;
- }
-
if (p_td->buffer.address != IPC_CB(skb)->mapping) {
dev_err(ipc_protocol->dev, "invalid buf=%llx or skb=%p",
(unsigned long long)p_td->buffer.address, skb->data);
diff --git a/drivers/net/wwan/t7xx/t7xx_dpmaif.c b/drivers/net/wwan/t7xx/t7xx_dpmaif.c
index c8bf6929af51..6d3edadecbec 100644
--- a/drivers/net/wwan/t7xx/t7xx_dpmaif.c
+++ b/drivers/net/wwan/t7xx/t7xx_dpmaif.c
@@ -1043,15 +1043,13 @@ unsigned int t7xx_dpmaif_dl_dlq_pit_get_wr_idx(struct dpmaif_hw_info *hw_info,
return value & DPMAIF_DL_RD_WR_IDX_MSK;
}
-static bool t7xx_dl_add_timedout(struct dpmaif_hw_info *hw_info)
+static int t7xx_dl_add_timedout(struct dpmaif_hw_info *hw_info)
{
u32 value;
- int ret;
- ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_ADD,
+ return ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_ADD,
value, !(value & DPMAIF_DL_ADD_NOT_READY), 0,
DPMAIF_CHECK_TIMEOUT_US);
- return ret;
}
int t7xx_dpmaif_dl_snd_hw_bat_cnt(struct dpmaif_hw_info *hw_info, unsigned int bat_entry_cnt)
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
index 46066dcd2607..6ff30cb8eb16 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
@@ -91,14 +91,13 @@ static void t7xx_cldma_gpd_set_next_ptr(struct cldma_gpd *gpd, dma_addr_t next_p
}
static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma_request *req,
- size_t size)
+ size_t size, gfp_t gfp_mask)
{
- req->skb = __dev_alloc_skb(size, GFP_KERNEL);
+ req->skb = __dev_alloc_skb(size, gfp_mask);
if (!req->skb)
return -ENOMEM;
- req->mapped_buff = dma_map_single(md_ctrl->dev, req->skb->data,
- skb_data_area_size(req->skb), DMA_FROM_DEVICE);
+ req->mapped_buff = dma_map_single(md_ctrl->dev, req->skb->data, size, DMA_FROM_DEVICE);
if (dma_mapping_error(md_ctrl->dev, req->mapped_buff)) {
dev_kfree_skb_any(req->skb);
req->skb = NULL;
@@ -154,7 +153,7 @@ static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool
if (req->mapped_buff) {
dma_unmap_single(md_ctrl->dev, req->mapped_buff,
- skb_data_area_size(skb), DMA_FROM_DEVICE);
+ queue->tr_ring->pkt_size, DMA_FROM_DEVICE);
req->mapped_buff = 0;
}
@@ -175,7 +174,7 @@ static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool
spin_unlock_irqrestore(&queue->ring_lock, flags);
req = queue->rx_refill;
- ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size);
+ ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size, GFP_KERNEL);
if (ret)
return ret;
@@ -376,7 +375,7 @@ static void t7xx_cldma_ring_free(struct cldma_ctrl *md_ctrl,
list_for_each_entry_safe(req_cur, req_next, &ring->gpd_ring, entry) {
if (req_cur->mapped_buff && req_cur->skb) {
dma_unmap_single(md_ctrl->dev, req_cur->mapped_buff,
- skb_data_area_size(req_cur->skb), tx_rx);
+ ring->pkt_size, tx_rx);
req_cur->mapped_buff = 0;
}
@@ -403,7 +402,7 @@ static struct cldma_request *t7xx_alloc_rx_request(struct cldma_ctrl *md_ctrl, s
if (!req->gpd)
goto err_free_req;
- val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size);
+ val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size, GFP_KERNEL);
if (val)
goto err_free_pool;
@@ -802,7 +801,7 @@ static int t7xx_cldma_clear_rxq(struct cldma_ctrl *md_ctrl, int qnum)
if (req->skb)
continue;
- ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size);
+ ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size, GFP_ATOMIC);
if (ret)
break;
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
index 35a8a0d7c1ee..91a0eb19e0d8 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
@@ -151,14 +151,12 @@ static bool t7xx_alloc_and_map_skb_info(const struct dpmaif_ctrl *dpmaif_ctrl,
{
dma_addr_t data_bus_addr;
struct sk_buff *skb;
- size_t data_len;
skb = __dev_alloc_skb(size, GFP_KERNEL);
if (!skb)
return false;
- data_len = skb_data_area_size(skb);
- data_bus_addr = dma_map_single(dpmaif_ctrl->dev, skb->data, data_len, DMA_FROM_DEVICE);
+ data_bus_addr = dma_map_single(dpmaif_ctrl->dev, skb->data, size, DMA_FROM_DEVICE);
if (dma_mapping_error(dpmaif_ctrl->dev, data_bus_addr)) {
dev_err_ratelimited(dpmaif_ctrl->dev, "DMA mapping error\n");
dev_kfree_skb_any(skb);
@@ -167,7 +165,7 @@ static bool t7xx_alloc_and_map_skb_info(const struct dpmaif_ctrl *dpmaif_ctrl,
cur_skb->skb = skb;
cur_skb->data_bus_addr = data_bus_addr;
- cur_skb->data_len = data_len;
+ cur_skb->data_len = size;
return true;
}
diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.c b/drivers/net/wwan/t7xx/t7xx_modem_ops.c
index 1056ad9bf34f..3458af31e864 100644
--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.c
+++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c
@@ -458,9 +458,9 @@ static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_fsm_ctl *ctl
enum t7xx_fsm_event_state event_id,
enum t7xx_fsm_event_state err_detect)
{
+ struct t7xx_fsm_event *event = NULL, *event_next;
struct t7xx_sys_info *core_info = &md->core_md;
struct device *dev = &md->t7xx_dev->pdev->dev;
- struct t7xx_fsm_event *event, *event_next;
unsigned long flags;
int ret;
@@ -493,7 +493,7 @@ static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_fsm_ctl *ctl
goto err_free_event;
}
- if (ctl->exp_flg)
+ if (!event || ctl->exp_flg)
goto err_free_event;
ret = t7xx_parse_host_rt_data(ctl, core_info, dev, event->data, event->length);
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c
index 5f1bb8d6afb6..871f2a27a398 100644
--- a/drivers/net/wwan/t7xx/t7xx_pci.c
+++ b/drivers/net/wwan/t7xx/t7xx_pci.c
@@ -568,7 +568,7 @@ static const struct dev_pm_ops t7xx_pci_pm_ops = {
static int t7xx_request_irq(struct pci_dev *pdev)
{
struct t7xx_pci_dev *t7xx_dev;
- int ret, i;
+ int ret = 0, i;
t7xx_dev = pci_get_drvdata(pdev);
diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.c b/drivers/net/wwan/t7xx/t7xx_port_proxy.c
index 7d2c0e81e33d..d4de047ff0d4 100644
--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c
@@ -350,9 +350,6 @@ static int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *s
u16 seq_num, channel;
int ret;
- if (!skb)
- return -EINVAL;
-
channel = FIELD_GET(CCCI_H_CHN_FLD, le32_to_cpu(ccci_h->status));
if (t7xx_fsm_get_md_state(ctl) == MD_STATE_INVALID) {
dev_err_ratelimited(dev, "Packet drop on channel 0x%x, modem not ready\n", channel);
diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
index a491db46e3bd..d9f6367b9993 100644
--- a/drivers/nfc/pn533/pn533.c
+++ b/drivers/nfc/pn533/pn533.c
@@ -2787,13 +2787,14 @@ void pn53x_common_clean(struct pn533 *priv)
{
struct pn533_cmd *cmd, *n;
+ /* delete the timer before cleanup the worker */
+ del_timer_sync(&priv->listen_timer);
+
flush_delayed_work(&priv->poll_work);
destroy_workqueue(priv->wq);
skb_queue_purge(&priv->resp_q);
- del_timer(&priv->listen_timer);
-
list_for_each_entry_safe(cmd, n, &priv->cmd_queue, queue) {
list_del(&cmd->queue);
kfree(cmd);
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 6ab90891801d..816028c0f6ed 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -1550,6 +1550,11 @@ static const struct qcom_pcie_cfg sc7280_cfg = {
.pipe_clk_need_muxing = true,
};
+static const struct qcom_pcie_cfg sc8180x_cfg = {
+ .ops = &ops_1_9_0,
+ .has_tbu_clk = true,
+};
+
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = qcom_pcie_link_up,
.start_link = qcom_pcie_start_link,
@@ -1656,7 +1661,7 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-qcs404", .data = &ipq4019_cfg },
{ .compatible = "qcom,pcie-sdm845", .data = &sdm845_cfg },
{ .compatible = "qcom,pcie-sm8250", .data = &sm8250_cfg },
- { .compatible = "qcom,pcie-sc8180x", .data = &sm8250_cfg },
+ { .compatible = "qcom,pcie-sc8180x", .data = &sc8180x_cfg },
{ .compatible = "qcom,pcie-sm8450-pcie0", .data = &sm8450_pcie0_cfg },
{ .compatible = "qcom,pcie-sm8450-pcie1", .data = &sm8450_pcie1_cfg },
{ .compatible = "qcom,pcie-sc7280", .data = &sc7280_cfg },
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index 09d9bf465d72..ffec82c8a523 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -272,7 +272,6 @@ struct advk_pcie {
u32 actions;
} wins[OB_WIN_COUNT];
u8 wins_count;
- int irq;
struct irq_domain *rp_irq_domain;
struct irq_domain *irq_domain;
struct irq_chip irq_chip;
@@ -1570,26 +1569,21 @@ static void advk_pcie_handle_int(struct advk_pcie *pcie)
}
}
-static void advk_pcie_irq_handler(struct irq_desc *desc)
+static irqreturn_t advk_pcie_irq_handler(int irq, void *arg)
{
- struct advk_pcie *pcie = irq_desc_get_handler_data(desc);
- struct irq_chip *chip = irq_desc_get_chip(desc);
- u32 val, mask, status;
+ struct advk_pcie *pcie = arg;
+ u32 status;
- chained_irq_enter(chip, desc);
+ status = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG);
+ if (!(status & PCIE_IRQ_CORE_INT))
+ return IRQ_NONE;
- val = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG);
- mask = advk_readl(pcie, HOST_CTRL_INT_MASK_REG);
- status = val & ((~mask) & PCIE_IRQ_ALL_MASK);
+ advk_pcie_handle_int(pcie);
- if (status & PCIE_IRQ_CORE_INT) {
- advk_pcie_handle_int(pcie);
+ /* Clear interrupt */
+ advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG);
- /* Clear interrupt */
- advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG);
- }
-
- chained_irq_exit(chip, desc);
+ return IRQ_HANDLED;
}
static int advk_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
@@ -1669,7 +1663,7 @@ static int advk_pcie_probe(struct platform_device *pdev)
struct advk_pcie *pcie;
struct pci_host_bridge *bridge;
struct resource_entry *entry;
- int ret;
+ int ret, irq;
bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie));
if (!bridge)
@@ -1755,9 +1749,17 @@ static int advk_pcie_probe(struct platform_device *pdev)
if (IS_ERR(pcie->base))
return PTR_ERR(pcie->base);
- pcie->irq = platform_get_irq(pdev, 0);
- if (pcie->irq < 0)
- return pcie->irq;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, advk_pcie_irq_handler,
+ IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie",
+ pcie);
+ if (ret) {
+ dev_err(dev, "Failed to register interrupt\n");
+ return ret;
+ }
pcie->reset_gpio = devm_gpiod_get_from_of_node(dev, dev->of_node,
"reset-gpios", 0,
@@ -1814,15 +1816,12 @@ static int advk_pcie_probe(struct platform_device *pdev)
return ret;
}
- irq_set_chained_handler_and_data(pcie->irq, advk_pcie_irq_handler, pcie);
-
bridge->sysdata = pcie;
bridge->ops = &advk_pcie_ops;
bridge->map_irq = advk_pcie_map_irq;
ret = pci_host_probe(bridge);
if (ret < 0) {
- irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
advk_pcie_remove_rp_irq_domain(pcie);
advk_pcie_remove_msi_irq_domain(pcie);
advk_pcie_remove_irq_domain(pcie);
@@ -1871,9 +1870,6 @@ static int advk_pcie_remove(struct platform_device *pdev)
advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
- /* Remove IRQ handler */
- irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
-
/* Remove IRQ domains */
advk_pcie_remove_rp_irq_domain(pcie);
advk_pcie_remove_msi_irq_domain(pcie);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 9ecce435fb3f..d25122fbe98a 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -2920,6 +2920,16 @@ static const struct dmi_system_id bridge_d3_blacklist[] = {
DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
},
+ /*
+ * Downstream device is not accessible after putting a root port
+ * into D3cold and back into D0 on Elo i2.
+ */
+ .ident = "Elo i2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Elo Touch Solutions"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Elo i2"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "RevB"),
+ },
},
#endif
{ }
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
index a3fa03bcd9a3..80838dc54b3a 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
@@ -1236,18 +1236,17 @@ FUNC_GROUP_DECL(SALT8, AA12);
FUNC_GROUP_DECL(WDTRST4, AA12);
#define AE12 196
-SIG_EXPR_LIST_DECL_SEMG(AE12, FWSPIDQ2, FWQSPID, FWSPID,
- SIG_DESC_SET(SCU438, 4));
+SIG_EXPR_LIST_DECL_SESG(AE12, FWSPIQ2, FWQSPI, SIG_DESC_SET(SCU438, 4));
SIG_EXPR_LIST_DECL_SESG(AE12, GPIOY4, GPIOY4);
-PIN_DECL_(AE12, SIG_EXPR_LIST_PTR(AE12, FWSPIDQ2),
+PIN_DECL_(AE12, SIG_EXPR_LIST_PTR(AE12, FWSPIQ2),
SIG_EXPR_LIST_PTR(AE12, GPIOY4));
#define AF12 197
-SIG_EXPR_LIST_DECL_SEMG(AF12, FWSPIDQ3, FWQSPID, FWSPID,
- SIG_DESC_SET(SCU438, 5));
+SIG_EXPR_LIST_DECL_SESG(AF12, FWSPIQ3, FWQSPI, SIG_DESC_SET(SCU438, 5));
SIG_EXPR_LIST_DECL_SESG(AF12, GPIOY5, GPIOY5);
-PIN_DECL_(AF12, SIG_EXPR_LIST_PTR(AF12, FWSPIDQ3),
+PIN_DECL_(AF12, SIG_EXPR_LIST_PTR(AF12, FWSPIQ3),
SIG_EXPR_LIST_PTR(AF12, GPIOY5));
+FUNC_GROUP_DECL(FWQSPI, AE12, AF12);
#define AC12 198
SSSF_PIN_DECL(AC12, GPIOY6, FWSPIABR, SIG_DESC_SET(SCU438, 6));
@@ -1520,9 +1519,8 @@ SIG_EXPR_LIST_DECL_SEMG(Y4, EMMCDAT7, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 3));
PIN_DECL_3(Y4, GPIO18E3, FWSPIDMISO, VBMISO, EMMCDAT7);
GROUP_DECL(FWSPID, Y1, Y2, Y3, Y4);
-GROUP_DECL(FWQSPID, Y1, Y2, Y3, Y4, AE12, AF12);
GROUP_DECL(EMMCG8, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5, Y1, Y2, Y3, Y4);
-FUNC_DECL_2(FWSPID, FWSPID, FWQSPID);
+FUNC_DECL_1(FWSPID, FWSPID);
FUNC_GROUP_DECL(VB, Y1, Y2, Y3, Y4);
FUNC_DECL_3(EMMC, EMMCG1, EMMCG4, EMMCG8);
/*
@@ -1918,7 +1916,7 @@ static const struct aspeed_pin_group aspeed_g6_groups[] = {
ASPEED_PINCTRL_GROUP(FSI2),
ASPEED_PINCTRL_GROUP(FWSPIABR),
ASPEED_PINCTRL_GROUP(FWSPID),
- ASPEED_PINCTRL_GROUP(FWQSPID),
+ ASPEED_PINCTRL_GROUP(FWQSPI),
ASPEED_PINCTRL_GROUP(FWSPIWP),
ASPEED_PINCTRL_GROUP(GPIT0),
ASPEED_PINCTRL_GROUP(GPIT1),
@@ -2160,6 +2158,7 @@ static const struct aspeed_pin_function aspeed_g6_functions[] = {
ASPEED_PINCTRL_FUNC(FSI2),
ASPEED_PINCTRL_FUNC(FWSPIABR),
ASPEED_PINCTRL_FUNC(FWSPID),
+ ASPEED_PINCTRL_FUNC(FWQSPI),
ASPEED_PINCTRL_FUNC(FWSPIWP),
ASPEED_PINCTRL_FUNC(GPIT0),
ASPEED_PINCTRL_FUNC(GPIT1),
diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c
index 08e429a06922..cb258e1448d5 100644
--- a/drivers/ptp/ptp_clockmatrix.c
+++ b/drivers/ptp/ptp_clockmatrix.c
@@ -239,73 +239,97 @@ static int wait_for_boot_status_ready(struct idtcm *idtcm)
return -EBUSY;
}
-static int _idtcm_set_scsr_read_trig(struct idtcm_channel *channel,
- enum scsr_read_trig_sel trig, u8 ref)
+static int arm_tod_read_trig_sel_refclk(struct idtcm_channel *channel, u8 ref)
{
struct idtcm *idtcm = channel->idtcm;
- u16 tod_read_cmd = IDTCM_FW_REG(idtcm->fw_ver, V520, TOD_READ_PRIMARY_CMD);
- u8 val;
+ u16 tod_read_cmd = IDTCM_FW_REG(idtcm->fw_ver, V520, TOD_READ_SECONDARY_CMD);
+ u8 val = 0;
int err;
- if (trig == SCSR_TOD_READ_TRIG_SEL_REFCLK) {
- err = idtcm_read(idtcm, channel->tod_read_primary,
- TOD_READ_PRIMARY_SEL_CFG_0, &val, sizeof(val));
- if (err)
- return err;
-
- val &= ~(WR_REF_INDEX_MASK << WR_REF_INDEX_SHIFT);
- val |= (ref << WR_REF_INDEX_SHIFT);
-
- err = idtcm_write(idtcm, channel->tod_read_primary,
- TOD_READ_PRIMARY_SEL_CFG_0, &val, sizeof(val));
- if (err)
- return err;
- }
+ val &= ~(WR_REF_INDEX_MASK << WR_REF_INDEX_SHIFT);
+ val |= (ref << WR_REF_INDEX_SHIFT);
- err = idtcm_read(idtcm, channel->tod_read_primary,
- tod_read_cmd, &val, sizeof(val));
+ err = idtcm_write(idtcm, channel->tod_read_secondary,
+ TOD_READ_SECONDARY_SEL_CFG_0, &val, sizeof(val));
if (err)
return err;
- val &= ~(TOD_READ_TRIGGER_MASK << TOD_READ_TRIGGER_SHIFT);
- val |= (trig << TOD_READ_TRIGGER_SHIFT);
- val &= ~TOD_READ_TRIGGER_MODE; /* single shot */
+ val = 0 | (SCSR_TOD_READ_TRIG_SEL_REFCLK << TOD_READ_TRIGGER_SHIFT);
+
+ err = idtcm_write(idtcm, channel->tod_read_secondary, tod_read_cmd,
+ &val, sizeof(val));
+ if (err)
+ dev_err(idtcm->dev, "%s: err = %d", __func__, err);
- err = idtcm_write(idtcm, channel->tod_read_primary,
- tod_read_cmd, &val, sizeof(val));
return err;
}
-static int idtcm_enable_extts(struct idtcm_channel *channel, u8 todn, u8 ref,
- bool enable)
+static bool is_single_shot(u8 mask)
{
- struct idtcm *idtcm = channel->idtcm;
- u8 old_mask = idtcm->extts_mask;
- u8 mask = 1 << todn;
+ /* Treat single bit ToD masks as continuous trigger */
+ return mask <= 8 && is_power_of_2(mask);
+}
+
+static int idtcm_extts_enable(struct idtcm_channel *channel,
+ struct ptp_clock_request *rq, int on)
+{
+ u8 index = rq->extts.index;
+ struct idtcm *idtcm;
+ u8 mask = 1 << index;
int err = 0;
+ u8 old_mask;
+ int ref;
- if (todn >= MAX_TOD)
+ idtcm = channel->idtcm;
+ old_mask = idtcm->extts_mask;
+
+ /* Reject requests with unsupported flags */
+ if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
+ /* Reject requests to enable time stamping on falling edge */
+ if ((rq->extts.flags & PTP_ENABLE_FEATURE) &&
+ (rq->extts.flags & PTP_FALLING_EDGE))
+ return -EOPNOTSUPP;
+
+ if (index >= MAX_TOD)
return -EINVAL;
- if (enable) {
- if (ref > 0xF) /* E_REF_CLK15 */
- return -EINVAL;
- if (idtcm->extts_mask & mask)
- return 0;
- err = _idtcm_set_scsr_read_trig(&idtcm->channel[todn],
- SCSR_TOD_READ_TRIG_SEL_REFCLK,
- ref);
+ if (on) {
+ /* Support triggering more than one TOD_0/1/2/3 by same pin */
+ /* Use the pin configured for the channel */
+ ref = ptp_find_pin(channel->ptp_clock, PTP_PF_EXTTS, channel->tod);
+
+ if (ref < 0) {
+ dev_err(idtcm->dev, "%s: No valid pin found for TOD%d!\n",
+ __func__, channel->tod);
+ return -EBUSY;
+ }
+
+ err = arm_tod_read_trig_sel_refclk(&idtcm->channel[index], ref);
+
if (err == 0) {
idtcm->extts_mask |= mask;
- idtcm->event_channel[todn] = channel;
- idtcm->channel[todn].refn = ref;
+ idtcm->event_channel[index] = channel;
+ idtcm->channel[index].refn = ref;
+ idtcm->extts_single_shot = is_single_shot(idtcm->extts_mask);
+
+ if (old_mask)
+ return 0;
+
+ schedule_delayed_work(&idtcm->extts_work,
+ msecs_to_jiffies(EXTTS_PERIOD_MS));
}
- } else
+ } else {
idtcm->extts_mask &= ~mask;
+ idtcm->extts_single_shot = is_single_shot(idtcm->extts_mask);
- if (old_mask == 0 && idtcm->extts_mask)
- schedule_delayed_work(&idtcm->extts_work,
- msecs_to_jiffies(EXTTS_PERIOD_MS));
+ if (idtcm->extts_mask == 0)
+ cancel_delayed_work(&idtcm->extts_work);
+ }
return err;
}
@@ -371,6 +395,31 @@ static void wait_for_chip_ready(struct idtcm *idtcm)
"Continuing while SYS APLL/DPLL is not locked");
}
+static int _idtcm_gettime_triggered(struct idtcm_channel *channel,
+ struct timespec64 *ts)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ u16 tod_read_cmd = IDTCM_FW_REG(idtcm->fw_ver, V520, TOD_READ_SECONDARY_CMD);
+ u8 buf[TOD_BYTE_COUNT];
+ u8 trigger;
+ int err;
+
+ err = idtcm_read(idtcm, channel->tod_read_secondary,
+ tod_read_cmd, &trigger, sizeof(trigger));
+ if (err)
+ return err;
+
+ if (trigger & TOD_READ_TRIGGER_MASK)
+ return -EBUSY;
+
+ err = idtcm_read(idtcm, channel->tod_read_secondary,
+ TOD_READ_SECONDARY_BASE, buf, sizeof(buf));
+ if (err)
+ return err;
+
+ return char_array_to_timespec(buf, sizeof(buf), ts);
+}
+
static int _idtcm_gettime(struct idtcm_channel *channel,
struct timespec64 *ts, u8 timeout)
{
@@ -396,7 +445,7 @@ static int _idtcm_gettime(struct idtcm_channel *channel,
} while (trigger & TOD_READ_TRIGGER_MASK);
err = idtcm_read(idtcm, channel->tod_read_primary,
- TOD_READ_PRIMARY, buf, sizeof(buf));
+ TOD_READ_PRIMARY_BASE, buf, sizeof(buf));
if (err)
return err;
@@ -415,67 +464,38 @@ static int idtcm_extts_check_channel(struct idtcm *idtcm, u8 todn)
extts_channel = &idtcm->channel[todn];
ptp_channel = idtcm->event_channel[todn];
+
if (extts_channel == ptp_channel)
dco_delay = ptp_channel->dco_delay;
- err = _idtcm_gettime(extts_channel, &ts, 1);
- if (err == 0) {
- event.type = PTP_CLOCK_EXTTS;
- event.index = todn;
- event.timestamp = timespec64_to_ns(&ts) - dco_delay;
- ptp_clock_event(ptp_channel->ptp_clock, &event);
- }
- return err;
-}
+ err = _idtcm_gettime_triggered(extts_channel, &ts);
+ if (err)
+ return err;
-static u8 idtcm_enable_extts_mask(struct idtcm_channel *channel,
- u8 extts_mask, bool enable)
-{
- struct idtcm *idtcm = channel->idtcm;
- int i, err;
+ /* Triggered - save timestamp */
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = todn;
+ event.timestamp = timespec64_to_ns(&ts) - dco_delay;
+ ptp_clock_event(ptp_channel->ptp_clock, &event);
- for (i = 0; i < MAX_TOD; i++) {
- u8 mask = 1 << i;
- u8 refn = idtcm->channel[i].refn;
-
- if (extts_mask & mask) {
- /* check extts before disabling it */
- if (enable == false) {
- err = idtcm_extts_check_channel(idtcm, i);
- /* trigger happened so we won't re-enable it */
- if (err == 0)
- extts_mask &= ~mask;
- }
- (void)idtcm_enable_extts(channel, i, refn, enable);
- }
- }
-
- return extts_mask;
+ return err;
}
static int _idtcm_gettime_immediate(struct idtcm_channel *channel,
struct timespec64 *ts)
{
struct idtcm *idtcm = channel->idtcm;
- u8 extts_mask = 0;
- int err;
- /* Disable extts */
- if (idtcm->extts_mask) {
- extts_mask = idtcm_enable_extts_mask(channel, idtcm->extts_mask,
- false);
- }
-
- err = _idtcm_set_scsr_read_trig(channel,
- SCSR_TOD_READ_TRIG_SEL_IMMEDIATE, 0);
- if (err == 0)
- err = _idtcm_gettime(channel, ts, 10);
+ u16 tod_read_cmd = IDTCM_FW_REG(idtcm->fw_ver, V520, TOD_READ_PRIMARY_CMD);
+ u8 val = (SCSR_TOD_READ_TRIG_SEL_IMMEDIATE << TOD_READ_TRIGGER_SHIFT);
+ int err;
- /* Re-enable extts */
- if (extts_mask)
- idtcm_enable_extts_mask(channel, extts_mask, true);
+ err = idtcm_write(idtcm, channel->tod_read_primary,
+ tod_read_cmd, &val, sizeof(val));
+ if (err)
+ return err;
- return err;
+ return _idtcm_gettime(channel, ts, 10);
}
static int _sync_pll_output(struct idtcm *idtcm,
@@ -1332,43 +1352,15 @@ static int idtcm_output_enable(struct idtcm_channel *channel,
return idtcm_write(idtcm, (u16)base, OUT_CTRL_1, &val, sizeof(val));
}
-static int idtcm_output_mask_enable(struct idtcm_channel *channel,
- bool enable)
-{
- u16 mask;
- int err;
- u8 outn;
-
- mask = channel->output_mask;
- outn = 0;
-
- while (mask) {
- if (mask & 0x1) {
- err = idtcm_output_enable(channel, enable, outn);
- if (err)
- return err;
- }
-
- mask >>= 0x1;
- outn++;
- }
-
- return 0;
-}
-
static int idtcm_perout_enable(struct idtcm_channel *channel,
struct ptp_perout_request *perout,
bool enable)
{
struct idtcm *idtcm = channel->idtcm;
- unsigned int flags = perout->flags;
struct timespec64 ts = {0, 0};
int err;
- if (flags == PEROUT_ENABLE_OUTPUT_MASK)
- err = idtcm_output_mask_enable(channel, enable);
- else
- err = idtcm_output_enable(channel, enable, perout->index);
+ err = idtcm_output_enable(channel, enable, perout->index);
if (err) {
dev_err(idtcm->dev, "Unable to set output enable");
@@ -1702,6 +1694,9 @@ static int initialize_dco_operating_mode(struct idtcm_channel *channel)
/*
* Maximum absolute value for write phase offset in picoseconds
*
+ * @channel: channel
+ * @delta_ns: delta in nanoseconds
+ *
* Destination signed register is 32-bit register in resolution of 50ps
*
* 0x7fffffff * 50 = 2147483647 * 50 = 107374182350
@@ -1869,7 +1864,7 @@ static int idtcm_adjtime(struct ptp_clock_info *ptp, s64 delta)
int err;
if (channel->phase_pull_in == true)
- return 0;
+ return -EBUSY;
mutex_lock(idtcm->lock);
@@ -1958,8 +1953,7 @@ static int idtcm_enable(struct ptp_clock_info *ptp,
err = idtcm_perout_enable(channel, &rq->perout, true);
break;
case PTP_CLK_REQ_EXTTS:
- err = idtcm_enable_extts(channel, rq->extts.index,
- rq->extts.rsv[0], on);
+ err = idtcm_extts_enable(channel, rq, on);
break;
default:
break;
@@ -1982,13 +1976,6 @@ static int idtcm_enable_tod(struct idtcm_channel *channel)
u8 cfg;
int err;
- /* STEELAI-366 - Temporary workaround for ts2phc compatibility */
- if (0) {
- err = idtcm_output_mask_enable(channel, false);
- if (err)
- return err;
- }
-
/*
* Start the TOD clock ticking.
*/
@@ -2038,17 +2025,35 @@ static void idtcm_set_version_info(struct idtcm *idtcm)
product_id, hw_rev_id, config_select);
}
+static int idtcm_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_EXTTS:
+ break;
+ case PTP_PF_PEROUT:
+ case PTP_PF_PHYSYNC:
+ return -1;
+ }
+ return 0;
+}
+
+static struct ptp_pin_desc pin_config[MAX_TOD][MAX_REF_CLK];
+
static const struct ptp_clock_info idtcm_caps = {
.owner = THIS_MODULE,
.max_adj = 244000,
.n_per_out = 12,
.n_ext_ts = MAX_TOD,
+ .n_pins = MAX_REF_CLK,
.adjphase = &idtcm_adjphase,
.adjfine = &idtcm_adjfine,
.adjtime = &idtcm_adjtime,
.gettime64 = &idtcm_gettime,
.settime64 = &idtcm_settime,
.enable = &idtcm_enable,
+ .verify = &idtcm_verify_pin,
.do_aux_work = &idtcm_work_handler,
};
@@ -2057,12 +2062,14 @@ static const struct ptp_clock_info idtcm_caps_deprecated = {
.max_adj = 244000,
.n_per_out = 12,
.n_ext_ts = MAX_TOD,
+ .n_pins = MAX_REF_CLK,
.adjphase = &idtcm_adjphase,
.adjfine = &idtcm_adjfine,
.adjtime = &idtcm_adjtime_deprecated,
.gettime64 = &idtcm_gettime,
.settime64 = &idtcm_settime_deprecated,
.enable = &idtcm_enable,
+ .verify = &idtcm_verify_pin,
.do_aux_work = &idtcm_work_handler,
};
@@ -2174,8 +2181,9 @@ static u32 idtcm_get_dco_delay(struct idtcm_channel *channel)
n = 1;
fodFreq = (u32)div_u64(m, n);
+
if (fodFreq >= 500000000)
- return 18 * (u32)div_u64(NSEC_PER_SEC, fodFreq);
+ return (u32)div_u64(18 * (u64)NSEC_PER_SEC, fodFreq);
return 0;
}
@@ -2188,24 +2196,28 @@ static int configure_channel_tod(struct idtcm_channel *channel, u32 index)
switch (index) {
case 0:
channel->tod_read_primary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_PRIMARY_0);
+ channel->tod_read_secondary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_SECONDARY_0);
channel->tod_write = IDTCM_FW_REG(fw_ver, V520, TOD_WRITE_0);
channel->tod_n = IDTCM_FW_REG(fw_ver, V520, TOD_0);
channel->sync_src = SYNC_SOURCE_DPLL0_TOD_PPS;
break;
case 1:
channel->tod_read_primary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_PRIMARY_1);
+ channel->tod_read_secondary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_SECONDARY_1);
channel->tod_write = IDTCM_FW_REG(fw_ver, V520, TOD_WRITE_1);
channel->tod_n = IDTCM_FW_REG(fw_ver, V520, TOD_1);
channel->sync_src = SYNC_SOURCE_DPLL1_TOD_PPS;
break;
case 2:
channel->tod_read_primary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_PRIMARY_2);
+ channel->tod_read_secondary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_SECONDARY_2);
channel->tod_write = IDTCM_FW_REG(fw_ver, V520, TOD_WRITE_2);
channel->tod_n = IDTCM_FW_REG(fw_ver, V520, TOD_2);
channel->sync_src = SYNC_SOURCE_DPLL2_TOD_PPS;
break;
case 3:
channel->tod_read_primary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_PRIMARY_3);
+ channel->tod_read_secondary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_SECONDARY_3);
channel->tod_write = IDTCM_FW_REG(fw_ver, V520, TOD_WRITE_3);
channel->tod_n = IDTCM_FW_REG(fw_ver, V520, TOD_3);
channel->sync_src = SYNC_SOURCE_DPLL3_TOD_PPS;
@@ -2221,6 +2233,7 @@ static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
{
struct idtcm_channel *channel;
int err;
+ int i;
if (!(index < MAX_TOD))
return -EINVAL;
@@ -2248,6 +2261,17 @@ static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
snprintf(channel->caps.name, sizeof(channel->caps.name),
"IDT CM TOD%u", index);
+ channel->caps.pin_config = pin_config[index];
+
+ for (i = 0; i < channel->caps.n_pins; ++i) {
+ struct ptp_pin_desc *ppd = &channel->caps.pin_config[i];
+
+ snprintf(ppd->name, sizeof(ppd->name), "input_ref%d", i);
+ ppd->index = i;
+ ppd->func = PTP_PF_NONE;
+ ppd->chan = index;
+ }
+
err = initialize_dco_operating_mode(channel);
if (err)
return err;
@@ -2302,26 +2326,40 @@ static int idtcm_enable_extts_channel(struct idtcm *idtcm, u32 index)
static void idtcm_extts_check(struct work_struct *work)
{
struct idtcm *idtcm = container_of(work, struct idtcm, extts_work.work);
- int err, i;
+ struct idtcm_channel *channel;
+ u8 mask;
+ int err;
+ int i;
if (idtcm->extts_mask == 0)
return;
mutex_lock(idtcm->lock);
+
for (i = 0; i < MAX_TOD; i++) {
- u8 mask = 1 << i;
+ mask = 1 << i;
+
+ if ((idtcm->extts_mask & mask) == 0)
+ continue;
- if (idtcm->extts_mask & mask) {
- err = idtcm_extts_check_channel(idtcm, i);
+ err = idtcm_extts_check_channel(idtcm, i);
+
+ if (err == 0) {
/* trigger clears itself, so clear the mask */
- if (err == 0)
+ if (idtcm->extts_single_shot) {
idtcm->extts_mask &= ~mask;
+ } else {
+ /* Re-arm */
+ channel = &idtcm->channel[i];
+ arm_tod_read_trig_sel_refclk(channel, channel->refn);
+ }
}
}
if (idtcm->extts_mask)
schedule_delayed_work(&idtcm->extts_work,
msecs_to_jiffies(EXTTS_PERIOD_MS));
+
mutex_unlock(idtcm->lock);
}
@@ -2342,6 +2380,11 @@ static void set_default_masks(struct idtcm *idtcm)
idtcm->tod_mask = DEFAULT_TOD_MASK;
idtcm->extts_mask = 0;
+ idtcm->channel[0].tod = 0;
+ idtcm->channel[1].tod = 1;
+ idtcm->channel[2].tod = 2;
+ idtcm->channel[3].tod = 3;
+
idtcm->channel[0].pll = DEFAULT_TOD0_PTP_PLL;
idtcm->channel[1].pll = DEFAULT_TOD1_PTP_PLL;
idtcm->channel[2].pll = DEFAULT_TOD2_PTP_PLL;
@@ -2420,8 +2463,8 @@ static int idtcm_remove(struct platform_device *pdev)
{
struct idtcm *idtcm = platform_get_drvdata(pdev);
+ idtcm->extts_mask = 0;
ptp_clock_unregister_all(idtcm);
-
cancel_delayed_work_sync(&idtcm->extts_work);
return 0;
diff --git a/drivers/ptp/ptp_clockmatrix.h b/drivers/ptp/ptp_clockmatrix.h
index 0f3059ae1fff..bf1e49409844 100644
--- a/drivers/ptp/ptp_clockmatrix.h
+++ b/drivers/ptp/ptp_clockmatrix.h
@@ -10,11 +10,13 @@
#include <linux/ktime.h>
#include <linux/mfd/idt8a340_reg.h>
+#include <linux/ptp_clock.h>
#include <linux/regmap.h>
#define FW_FILENAME "idtcm.bin"
#define MAX_TOD (4)
#define MAX_PLL (8)
+#define MAX_REF_CLK (16)
#define MAX_ABS_WRITE_PHASE_PICOSECONDS (107374182350LL)
@@ -52,8 +54,6 @@
#define LOCK_TIMEOUT_MS (2000)
#define LOCK_POLL_INTERVAL_MS (10)
-#define PEROUT_ENABLE_OUTPUT_MASK (0xdeadbeef)
-
#define IDTCM_MAX_WRITE_COUNT (512)
#define PHASE_PULL_IN_MAX_PPB (144000)
@@ -90,6 +90,7 @@ struct idtcm_channel {
u16 dpll_ctrl_n;
u16 dpll_phase_pull_in;
u16 tod_read_primary;
+ u16 tod_read_secondary;
u16 tod_write;
u16 tod_n;
u16 hw_dpll_n;
@@ -105,6 +106,7 @@ struct idtcm_channel {
/* last input trigger for extts */
u8 refn;
u8 pll;
+ u8 tod;
u16 output_mask;
};
@@ -116,6 +118,7 @@ struct idtcm {
enum fw_version fw_ver;
/* Polls for external time stamps */
u8 extts_mask;
+ bool extts_single_shot;
struct delayed_work extts_work;
/* Remember the ptp channel to report extts */
struct idtcm_channel *event_channel[MAX_TOD];
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index dd45471f6780..860672d6a03c 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -300,7 +300,7 @@ struct ptp_ocp {
struct platform_device *spi_flash;
struct clk_hw *i2c_clk;
struct timer_list watchdog;
- const struct ocp_attr_group *attr_tbl;
+ const struct attribute_group **attr_group;
const struct ptp_ocp_eeprom_map *eeprom_map;
struct dentry *debug_root;
time64_t gnss_lost;
@@ -841,7 +841,7 @@ __ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u32 adj_val)
}
static void
-ptp_ocp_adjtime_coarse(struct ptp_ocp *bp, u64 delta_ns)
+ptp_ocp_adjtime_coarse(struct ptp_ocp *bp, s64 delta_ns)
{
struct timespec64 ts;
unsigned long flags;
@@ -850,7 +850,8 @@ ptp_ocp_adjtime_coarse(struct ptp_ocp *bp, u64 delta_ns)
spin_lock_irqsave(&bp->lock, flags);
err = __ptp_ocp_gettime_locked(bp, &ts, NULL);
if (likely(!err)) {
- timespec64_add_ns(&ts, delta_ns);
+ set_normalized_timespec64(&ts, ts.tv_sec,
+ ts.tv_nsec + delta_ns);
__ptp_ocp_settime_locked(bp, &ts);
}
spin_unlock_irqrestore(&bp->lock, flags);
@@ -1836,6 +1837,42 @@ ptp_ocp_signal_init(struct ptp_ocp *bp)
}
static void
+ptp_ocp_attr_group_del(struct ptp_ocp *bp)
+{
+ sysfs_remove_groups(&bp->dev.kobj, bp->attr_group);
+ kfree(bp->attr_group);
+}
+
+static int
+ptp_ocp_attr_group_add(struct ptp_ocp *bp,
+ const struct ocp_attr_group *attr_tbl)
+{
+ int count, i;
+ int err;
+
+ count = 0;
+ for (i = 0; attr_tbl[i].cap; i++)
+ if (attr_tbl[i].cap & bp->fw_cap)
+ count++;
+
+ bp->attr_group = kcalloc(count + 1, sizeof(struct attribute_group *),
+ GFP_KERNEL);
+ if (!bp->attr_group)
+ return -ENOMEM;
+
+ count = 0;
+ for (i = 0; attr_tbl[i].cap; i++)
+ if (attr_tbl[i].cap & bp->fw_cap)
+ bp->attr_group[count++] = attr_tbl[i].group;
+
+ err = sysfs_create_groups(&bp->dev.kobj, bp->attr_group);
+ if (err)
+ bp->attr_group[0] = NULL;
+
+ return err;
+}
+
+static void
ptp_ocp_sma_init(struct ptp_ocp *bp)
{
u32 reg;
@@ -1904,7 +1941,6 @@ ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
bp->flash_start = 1024 * 4096;
bp->eeprom_map = fb_eeprom_map;
bp->fw_version = ioread32(&bp->image->version);
- bp->attr_tbl = fb_timecard_groups;
bp->fw_cap = OCP_CAP_BASIC;
ver = bp->fw_version & 0xffff;
@@ -1918,6 +1954,10 @@ ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
ptp_ocp_sma_init(bp);
ptp_ocp_signal_init(bp);
+ err = ptp_ocp_attr_group_add(bp, fb_timecard_groups);
+ if (err)
+ return err;
+
err = ptp_ocp_fb_set_pins(bp);
if (err)
return err;
@@ -3388,7 +3428,6 @@ ptp_ocp_complete(struct ptp_ocp *bp)
{
struct pps_device *pps;
char buf[32];
- int i, err;
if (bp->gnss_port != -1) {
sprintf(buf, "ttyS%d", bp->gnss_port);
@@ -3413,14 +3452,6 @@ ptp_ocp_complete(struct ptp_ocp *bp)
if (pps)
ptp_ocp_symlink(bp, pps->dev, "pps");
- for (i = 0; bp->attr_tbl[i].cap; i++) {
- if (!(bp->attr_tbl[i].cap & bp->fw_cap))
- continue;
- err = sysfs_create_group(&bp->dev.kobj, bp->attr_tbl[i].group);
- if (err)
- return err;
- }
-
ptp_ocp_debugfs_add_device(bp);
return 0;
@@ -3492,15 +3523,11 @@ static void
ptp_ocp_detach_sysfs(struct ptp_ocp *bp)
{
struct device *dev = &bp->dev;
- int i;
sysfs_remove_link(&dev->kobj, "ttyGNSS");
sysfs_remove_link(&dev->kobj, "ttyMAC");
sysfs_remove_link(&dev->kobj, "ptp");
sysfs_remove_link(&dev->kobj, "pps");
- if (bp->attr_tbl)
- for (i = 0; bp->attr_tbl[i].cap; i++)
- sysfs_remove_group(&dev->kobj, bp->attr_tbl[i].group);
}
static void
@@ -3510,6 +3537,7 @@ ptp_ocp_detach(struct ptp_ocp *bp)
ptp_ocp_debugfs_remove_device(bp);
ptp_ocp_detach_sysfs(bp);
+ ptp_ocp_attr_group_del(bp);
if (timer_pending(&bp->watchdog))
del_timer_sync(&bp->watchdog);
if (bp->ts0)
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 37d06f993b76..1d9be771f3ee 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -1172,9 +1172,8 @@ static blk_status_t alua_prep_fn(struct scsi_device *sdev, struct request *req)
case SCSI_ACCESS_STATE_OPTIMAL:
case SCSI_ACCESS_STATE_ACTIVE:
case SCSI_ACCESS_STATE_LBA:
- return BLK_STS_OK;
case SCSI_ACCESS_STATE_TRANSITIONING:
- return BLK_STS_AGAIN;
+ return BLK_STS_OK;
default:
req->rq_flags |= RQF_QUIET;
return BLK_STS_IOERR;
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 44ca6110213c..79b2827e4081 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -667,7 +667,7 @@ static void fcoe_netdev_features_change(struct fc_lport *lport,
if (netdev->features & NETIF_F_FSO) {
lport->seq_offload = 1;
- lport->lso_max = netdev->gso_max_size;
+ lport->lso_max = min(netdev->gso_max_size, GSO_LEGACY_MAX_SIZE);
FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n",
lport->lso_max);
} else {
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index ef6e8cd8c26a..872a26376ccb 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1330,7 +1330,7 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
LPFC_SLI_INTF_IF_TYPE_0) {
/* FLOGI needs to be 3 for WQE FCFI */
- ct = ((SLI4_CT_FCFI >> 1) & 1) | (SLI4_CT_FCFI & 1);
+ ct = SLI4_CT_FCFI;
bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
/* Set the fcfi to the fcfi we registered with */
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index bda2a7ba4e77..6adaf79e67cc 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -10720,10 +10720,10 @@ __lpfc_sli_prep_gen_req_s4(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
/* Words 0 - 2 */
bde = (struct ulp_bde64_le *)&cmdwqe->generic.bde;
- bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys));
- bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys));
+ bde->addr_low = bpl->addr_low;
+ bde->addr_high = bpl->addr_high;
bde->type_size = cpu_to_le32(xmit_len);
- bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BLP_64);
+ bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
/* Word 3 */
cmdwqe->gen_req.request_payload_len = xmit_len;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 85dbf81f3204..6dfcfd8e7337 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -3826,6 +3826,9 @@ int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
spin_lock_irqsave(&cmd->cmd_lock, flags);
if (cmd->aborted) {
+ if (cmd->sg_mapped)
+ qlt_unmap_sg(vha, cmd);
+
spin_unlock_irqrestore(&cmd->cmd_lock, flags);
/*
* It's normal to see 2 calls in this path:
diff --git a/drivers/slimbus/qcom-ctrl.c b/drivers/slimbus/qcom-ctrl.c
index f04b961b96cd..ec58091fc948 100644
--- a/drivers/slimbus/qcom-ctrl.c
+++ b/drivers/slimbus/qcom-ctrl.c
@@ -510,9 +510,9 @@ static int qcom_slim_probe(struct platform_device *pdev)
}
ctrl->irq = platform_get_irq(pdev, 0);
- if (!ctrl->irq) {
+ if (ctrl->irq < 0) {
dev_err(&pdev->dev, "no slimbus IRQ\n");
- return -ENODEV;
+ return ctrl->irq;
}
sctrl = &ctrl->ctrl;
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index 148bcb99c212..493bebbba521 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -914,7 +914,6 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
err = 0;
goto out_free;
}
- pr_warn("WARNING: Invalid SPROM CRC (corrupt SPROM)\n");
}
}
err = sprom_extract(bus, sprom, buf, bus->sprom_size);
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index d97f496bab9b..79931ddc582a 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -194,12 +194,31 @@ static int int3400_thermal_run_osc(acpi_handle handle, char *uuid_str, int *enab
return result;
}
+static int set_os_uuid_mask(struct int3400_thermal_priv *priv, u32 mask)
+{
+ int cap = 0;
+
+ /*
+ * Capability bits:
+ * Bit 0: set to 1 to indicate DPTF is active
+ * Bi1 1: set to 1 to active cooling is supported by user space daemon
+ * Bit 2: set to 1 to passive cooling is supported by user space daemon
+ * Bit 3: set to 1 to critical trip is handled by user space daemon
+ */
+ if (mask)
+ cap = (priv->os_uuid_mask << 1) | 0x01;
+
+ return int3400_thermal_run_osc(priv->adev->handle,
+ "b23ba85d-c8b7-3542-88de-8de2ffcfd698",
+ &cap);
+}
+
static ssize_t current_uuid_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct int3400_thermal_priv *priv = dev_get_drvdata(dev);
- int i;
+ int ret, i;
for (i = 0; i < INT3400_THERMAL_MAXIMUM_UUID; ++i) {
if (!strncmp(buf, int3400_thermal_uuids[i],
@@ -231,19 +250,7 @@ static ssize_t current_uuid_store(struct device *dev,
}
if (priv->os_uuid_mask) {
- int cap, ret;
-
- /*
- * Capability bits:
- * Bit 0: set to 1 to indicate DPTF is active
- * Bi1 1: set to 1 to active cooling is supported by user space daemon
- * Bit 2: set to 1 to passive cooling is supported by user space daemon
- * Bit 3: set to 1 to critical trip is handled by user space daemon
- */
- cap = ((priv->os_uuid_mask << 1) | 0x01);
- ret = int3400_thermal_run_osc(priv->adev->handle,
- "b23ba85d-c8b7-3542-88de-8de2ffcfd698",
- &cap);
+ ret = set_os_uuid_mask(priv, priv->os_uuid_mask);
if (ret)
return ret;
}
@@ -469,17 +476,26 @@ static int int3400_thermal_change_mode(struct thermal_zone_device *thermal,
if (mode != thermal->mode) {
int enabled;
+ enabled = mode == THERMAL_DEVICE_ENABLED;
+
+ if (priv->os_uuid_mask) {
+ if (!enabled) {
+ priv->os_uuid_mask = 0;
+ result = set_os_uuid_mask(priv, priv->os_uuid_mask);
+ }
+ goto eval_odvp;
+ }
+
if (priv->current_uuid_index < 0 ||
priv->current_uuid_index >= INT3400_THERMAL_MAXIMUM_UUID)
return -EINVAL;
- enabled = (mode == THERMAL_DEVICE_ENABLED);
result = int3400_thermal_run_osc(priv->adev->handle,
int3400_thermal_uuids[priv->current_uuid_index],
&enabled);
}
-
+eval_odvp:
evaluate_odvp(priv);
return result;
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index a38b922bcbc1..fd8b86dde525 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -137,6 +137,7 @@ struct gsm_dlci {
int retries;
/* Uplink tty if active */
struct tty_port port; /* The tty bound to this DLCI if there is one */
+#define TX_SIZE 4096 /* Must be power of 2. */
struct kfifo fifo; /* Queue fifo for the DLCI */
int adaption; /* Adaption layer in use */
int prev_adaption;
@@ -1658,6 +1659,7 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen)
if (len == 0)
return;
}
+ len--;
slen++;
tty = tty_port_tty_get(port);
if (tty) {
@@ -1730,7 +1732,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
return NULL;
spin_lock_init(&dlci->lock);
mutex_init(&dlci->mutex);
- if (kfifo_alloc(&dlci->fifo, 4096, GFP_KERNEL) < 0) {
+ if (kfifo_alloc(&dlci->fifo, TX_SIZE, GFP_KERNEL) < 0) {
kfree(dlci);
return NULL;
}
@@ -2351,6 +2353,7 @@ static void gsm_copy_config_values(struct gsm_mux *gsm,
static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c)
{
+ int ret = 0;
int need_close = 0;
int need_restart = 0;
@@ -2418,10 +2421,13 @@ static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c)
* FIXME: We need to separate activation/deactivation from adding
* and removing from the mux array
*/
- if (need_restart)
- gsm_activate_mux(gsm);
- if (gsm->initiator && need_close)
- gsm_dlci_begin_open(gsm->dlci[0]);
+ if (gsm->dead) {
+ ret = gsm_activate_mux(gsm);
+ if (ret)
+ return ret;
+ if (gsm->initiator)
+ gsm_dlci_begin_open(gsm->dlci[0]);
+ }
return 0;
}
@@ -2971,8 +2977,6 @@ static struct tty_ldisc_ops tty_ldisc_packet = {
* Virtual tty side
*/
-#define TX_SIZE 512
-
/**
* gsm_modem_upd_via_data - send modem bits via convergence layer
* @dlci: channel
@@ -3212,7 +3216,7 @@ static unsigned int gsmtty_write_room(struct tty_struct *tty)
struct gsm_dlci *dlci = tty->driver_data;
if (dlci->state == DLCI_CLOSED)
return 0;
- return TX_SIZE - kfifo_len(&dlci->fifo);
+ return kfifo_avail(&dlci->fifo);
}
static unsigned int gsmtty_chars_in_buffer(struct tty_struct *tty)
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index f4a0caa56f84..21053db93ff1 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -37,6 +37,7 @@
#define MTK_UART_IER_RTSI 0x40 /* Enable RTS Modem status interrupt */
#define MTK_UART_IER_CTSI 0x80 /* Enable CTS Modem status interrupt */
+#define MTK_UART_EFR 38 /* I/O: Extended Features Register */
#define MTK_UART_EFR_EN 0x10 /* Enable enhancement feature */
#define MTK_UART_EFR_RTS 0x40 /* Enable hardware rx flow control */
#define MTK_UART_EFR_CTS 0x80 /* Enable hardware tx flow control */
@@ -53,6 +54,12 @@
#define MTK_UART_TX_TRIGGER 1
#define MTK_UART_RX_TRIGGER MTK_UART_RX_SIZE
+#define MTK_UART_FEATURE_SEL 39 /* Feature Selection register */
+#define MTK_UART_FEAT_NEWRMAP BIT(0) /* Use new register map */
+
+#define MTK_UART_XON1 40 /* I/O: Xon character 1 */
+#define MTK_UART_XOFF1 42 /* I/O: Xoff character 1 */
+
#ifdef CONFIG_SERIAL_8250_DMA
enum dma_rx_status {
DMA_RX_START = 0,
@@ -169,7 +176,7 @@ static void mtk8250_dma_enable(struct uart_8250_port *up)
MTK_UART_DMA_EN_RX | MTK_UART_DMA_EN_TX);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
- serial_out(up, UART_EFR, UART_EFR_ECB);
+ serial_out(up, MTK_UART_EFR, UART_EFR_ECB);
serial_out(up, UART_LCR, lcr);
if (dmaengine_slave_config(dma->rxchan, &dma->rxconf) != 0)
@@ -232,7 +239,7 @@ static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode)
int lcr = serial_in(up, UART_LCR);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
- serial_out(up, UART_EFR, UART_EFR_ECB);
+ serial_out(up, MTK_UART_EFR, UART_EFR_ECB);
serial_out(up, UART_LCR, lcr);
lcr = serial_in(up, UART_LCR);
@@ -241,7 +248,7 @@ static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode)
serial_out(up, MTK_UART_ESCAPE_DAT, MTK_UART_ESCAPE_CHAR);
serial_out(up, MTK_UART_ESCAPE_EN, 0x00);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
- serial_out(up, UART_EFR, serial_in(up, UART_EFR) &
+ serial_out(up, MTK_UART_EFR, serial_in(up, MTK_UART_EFR) &
(~(MTK_UART_EFR_HW_FC | MTK_UART_EFR_SW_FC_MASK)));
serial_out(up, UART_LCR, lcr);
mtk8250_disable_intrs(up, MTK_UART_IER_XOFFI |
@@ -255,8 +262,8 @@ static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode)
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
/*enable hw flow control*/
- serial_out(up, UART_EFR, MTK_UART_EFR_HW_FC |
- (serial_in(up, UART_EFR) &
+ serial_out(up, MTK_UART_EFR, MTK_UART_EFR_HW_FC |
+ (serial_in(up, MTK_UART_EFR) &
(~(MTK_UART_EFR_HW_FC | MTK_UART_EFR_SW_FC_MASK))));
serial_out(up, UART_LCR, lcr);
@@ -270,12 +277,12 @@ static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode)
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
/*enable sw flow control */
- serial_out(up, UART_EFR, MTK_UART_EFR_XON1_XOFF1 |
- (serial_in(up, UART_EFR) &
+ serial_out(up, MTK_UART_EFR, MTK_UART_EFR_XON1_XOFF1 |
+ (serial_in(up, MTK_UART_EFR) &
(~(MTK_UART_EFR_HW_FC | MTK_UART_EFR_SW_FC_MASK))));
- serial_out(up, UART_XON1, START_CHAR(port->state->port.tty));
- serial_out(up, UART_XOFF1, STOP_CHAR(port->state->port.tty));
+ serial_out(up, MTK_UART_XON1, START_CHAR(port->state->port.tty));
+ serial_out(up, MTK_UART_XOFF1, STOP_CHAR(port->state->port.tty));
serial_out(up, UART_LCR, lcr);
mtk8250_disable_intrs(up, MTK_UART_IER_CTSI|MTK_UART_IER_RTSI);
mtk8250_enable_intrs(up, MTK_UART_IER_XOFFI);
@@ -568,6 +575,10 @@ static int mtk8250_probe(struct platform_device *pdev)
uart.dma = data->dma;
#endif
+ /* Set AP UART new register map */
+ writel(MTK_UART_FEAT_NEWRMAP, uart.port.membase +
+ (MTK_UART_FEATURE_SEL << uart.port.regshift));
+
/* Disable Rate Fix function */
writel(0x0, uart.port.membase +
(MTK_UART_RATE_FIX << uart.port.regshift));
diff --git a/drivers/tty/serial/digicolor-usart.c b/drivers/tty/serial/digicolor-usart.c
index 6d70fea76bb3..e37a917b9dbb 100644
--- a/drivers/tty/serial/digicolor-usart.c
+++ b/drivers/tty/serial/digicolor-usart.c
@@ -471,11 +471,10 @@ static int digicolor_uart_probe(struct platform_device *pdev)
if (IS_ERR(uart_clk))
return PTR_ERR(uart_clk);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dp->port.mapbase = res->start;
- dp->port.membase = devm_ioremap_resource(&pdev->dev, res);
+ dp->port.membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(dp->port.membase))
return PTR_ERR(dp->port.membase);
+ dp->port.mapbase = res->start;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 87789872f400..be12fee94db5 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -2664,6 +2664,7 @@ static int lpuart_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct lpuart_port *sport;
struct resource *res;
+ irq_handler_t handler;
int ret;
sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL);
@@ -2741,17 +2742,11 @@ static int lpuart_probe(struct platform_device *pdev)
if (lpuart_is_32(sport)) {
lpuart_reg.cons = LPUART32_CONSOLE;
- ret = devm_request_irq(&pdev->dev, sport->port.irq, lpuart32_int, 0,
- DRIVER_NAME, sport);
+ handler = lpuart32_int;
} else {
lpuart_reg.cons = LPUART_CONSOLE;
- ret = devm_request_irq(&pdev->dev, sport->port.irq, lpuart_int, 0,
- DRIVER_NAME, sport);
+ handler = lpuart_int;
}
-
- if (ret)
- goto failed_irq_request;
-
ret = uart_add_one_port(&lpuart_reg, &sport->port);
if (ret)
goto failed_attach_port;
@@ -2773,13 +2768,18 @@ static int lpuart_probe(struct platform_device *pdev)
sport->port.rs485_config(&sport->port, &sport->port.rs485);
+ ret = devm_request_irq(&pdev->dev, sport->port.irq, handler, 0,
+ DRIVER_NAME, sport);
+ if (ret)
+ goto failed_irq_request;
+
return 0;
+failed_irq_request:
failed_get_rs485:
failed_reset:
uart_remove_one_port(&lpuart_reg, &sport->port);
failed_attach_port:
-failed_irq_request:
lpuart_disable_clks(sport);
failed_clock_enable:
failed_out_of_range:
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 7f2c83f299d3..eebe782380fb 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -774,6 +774,7 @@ static int wdm_release(struct inode *inode, struct file *file)
poison_urbs(desc);
spin_lock_irq(&desc->iuspin);
desc->resp_count = 0;
+ clear_bit(WDM_RESPONDING, &desc->flags);
spin_unlock_irq(&desc->iuspin);
desc->manage_power(desc->intf, 0);
unpoison_urbs(desc);
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index 71bb5e477dba..d37965867b23 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -890,13 +890,37 @@ static void uvc_function_unbind(struct usb_configuration *c,
{
struct usb_composite_dev *cdev = c->cdev;
struct uvc_device *uvc = to_uvc(f);
+ long wait_ret = 1;
uvcg_info(f, "%s()\n", __func__);
+ /* If we know we're connected via v4l2, then there should be a cleanup
+ * of the device from userspace either via UVC_EVENT_DISCONNECT or
+ * though the video device removal uevent. Allow some time for the
+ * application to close out before things get deleted.
+ */
+ if (uvc->func_connected) {
+ uvcg_dbg(f, "waiting for clean disconnect\n");
+ wait_ret = wait_event_interruptible_timeout(uvc->func_connected_queue,
+ uvc->func_connected == false, msecs_to_jiffies(500));
+ uvcg_dbg(f, "done waiting with ret: %ld\n", wait_ret);
+ }
+
device_remove_file(&uvc->vdev.dev, &dev_attr_function_name);
video_unregister_device(&uvc->vdev);
v4l2_device_unregister(&uvc->v4l2_dev);
+ if (uvc->func_connected) {
+ /* Wait for the release to occur to ensure there are no longer any
+ * pending operations that may cause panics when resources are cleaned
+ * up.
+ */
+ uvcg_warn(f, "%s no clean disconnect, wait for release\n", __func__);
+ wait_ret = wait_event_interruptible_timeout(uvc->func_connected_queue,
+ uvc->func_connected == false, msecs_to_jiffies(1000));
+ uvcg_dbg(f, "done waiting for release with ret: %ld\n", wait_ret);
+ }
+
usb_ep_free_request(cdev->gadget->ep0, uvc->control_req);
kfree(uvc->control_buf);
@@ -915,6 +939,7 @@ static struct usb_function *uvc_alloc(struct usb_function_instance *fi)
mutex_init(&uvc->video.mutex);
uvc->state = UVC_STATE_DISCONNECTED;
+ init_waitqueue_head(&uvc->func_connected_queue);
opts = fi_to_f_uvc_opts(fi);
mutex_lock(&opts->lock);
diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h
index c3607a32b986..886103a1fe9b 100644
--- a/drivers/usb/gadget/function/uvc.h
+++ b/drivers/usb/gadget/function/uvc.h
@@ -14,6 +14,7 @@
#include <linux/spinlock.h>
#include <linux/usb/composite.h>
#include <linux/videodev2.h>
+#include <linux/wait.h>
#include <media/v4l2-device.h>
#include <media/v4l2-dev.h>
@@ -129,6 +130,7 @@ struct uvc_device {
struct usb_function func;
struct uvc_video video;
bool func_connected;
+ wait_queue_head_t func_connected_queue;
/* Descriptors */
struct {
diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
index a2c78690c5c2..fd8f73bb726d 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.c
+++ b/drivers/usb/gadget/function/uvc_v4l2.c
@@ -253,10 +253,11 @@ uvc_v4l2_subscribe_event(struct v4l2_fh *fh,
static void uvc_v4l2_disable(struct uvc_device *uvc)
{
- uvc->func_connected = false;
uvc_function_disconnect(uvc);
uvcg_video_enable(&uvc->video, 0);
uvcg_free_buffers(&uvc->video.queue);
+ uvc->func_connected = false;
+ wake_up_interruptible(&uvc->func_connected_queue);
}
static int
diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c
index 8d40a1f2ec57..e9440f7bf019 100644
--- a/drivers/usb/gadget/legacy/raw_gadget.c
+++ b/drivers/usb/gadget/legacy/raw_gadget.c
@@ -145,6 +145,7 @@ enum dev_state {
STATE_DEV_INVALID = 0,
STATE_DEV_OPENED,
STATE_DEV_INITIALIZED,
+ STATE_DEV_REGISTERING,
STATE_DEV_RUNNING,
STATE_DEV_CLOSED,
STATE_DEV_FAILED
@@ -508,6 +509,7 @@ static int raw_ioctl_run(struct raw_dev *dev, unsigned long value)
ret = -EINVAL;
goto out_unlock;
}
+ dev->state = STATE_DEV_REGISTERING;
spin_unlock_irqrestore(&dev->lock, flags);
ret = usb_gadget_probe_driver(&dev->driver);
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index f3139ce7b0a9..06a6b19acaae 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -19,11 +19,6 @@
#define HS_BW_BOUNDARY 6144
/* usb2 spec section11.18.1: at most 188 FS bytes per microframe */
#define FS_PAYLOAD_MAX 188
-/*
- * max number of microframes for split transfer,
- * for fs isoc in : 1 ss + 1 idle + 7 cs
- */
-#define TT_MICROFRAMES_MAX 9
#define DBG_BUF_EN 64
@@ -242,28 +237,17 @@ static void drop_tt(struct usb_device *udev)
static struct mu3h_sch_ep_info *
create_sch_ep(struct xhci_hcd_mtk *mtk, struct usb_device *udev,
- struct usb_host_endpoint *ep, struct xhci_ep_ctx *ep_ctx)
+ struct usb_host_endpoint *ep)
{
struct mu3h_sch_ep_info *sch_ep;
struct mu3h_sch_bw_info *bw_info;
struct mu3h_sch_tt *tt = NULL;
- u32 len_bw_budget_table;
bw_info = get_bw_info(mtk, udev, ep);
if (!bw_info)
return ERR_PTR(-ENODEV);
- if (is_fs_or_ls(udev->speed))
- len_bw_budget_table = TT_MICROFRAMES_MAX;
- else if ((udev->speed >= USB_SPEED_SUPER)
- && usb_endpoint_xfer_isoc(&ep->desc))
- len_bw_budget_table = get_esit(ep_ctx);
- else
- len_bw_budget_table = 1;
-
- sch_ep = kzalloc(struct_size(sch_ep, bw_budget_table,
- len_bw_budget_table),
- GFP_KERNEL);
+ sch_ep = kzalloc(sizeof(*sch_ep), GFP_KERNEL);
if (!sch_ep)
return ERR_PTR(-ENOMEM);
@@ -295,8 +279,6 @@ static void setup_sch_info(struct xhci_ep_ctx *ep_ctx,
u32 mult;
u32 esit_pkts;
u32 max_esit_payload;
- u32 *bwb_table = sch_ep->bw_budget_table;
- int i;
ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
maxpkt = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
@@ -332,7 +314,6 @@ static void setup_sch_info(struct xhci_ep_ctx *ep_ctx,
*/
sch_ep->pkts = max_burst + 1;
sch_ep->bw_cost_per_microframe = maxpkt * sch_ep->pkts;
- bwb_table[0] = sch_ep->bw_cost_per_microframe;
} else if (sch_ep->speed >= USB_SPEED_SUPER) {
/* usb3_r1 spec section4.4.7 & 4.4.8 */
sch_ep->cs_count = 0;
@@ -349,7 +330,6 @@ static void setup_sch_info(struct xhci_ep_ctx *ep_ctx,
if (ep_type == INT_IN_EP || ep_type == INT_OUT_EP) {
sch_ep->pkts = esit_pkts;
sch_ep->num_budget_microframes = 1;
- bwb_table[0] = maxpkt * sch_ep->pkts;
}
if (ep_type == ISOC_IN_EP || ep_type == ISOC_OUT_EP) {
@@ -366,15 +346,8 @@ static void setup_sch_info(struct xhci_ep_ctx *ep_ctx,
DIV_ROUND_UP(esit_pkts, sch_ep->pkts);
sch_ep->repeat = !!(sch_ep->num_budget_microframes > 1);
- sch_ep->bw_cost_per_microframe = maxpkt * sch_ep->pkts;
-
- for (i = 0; i < sch_ep->num_budget_microframes - 1; i++)
- bwb_table[i] = sch_ep->bw_cost_per_microframe;
-
- /* last one <= bw_cost_per_microframe */
- bwb_table[i] = maxpkt * esit_pkts
- - i * sch_ep->bw_cost_per_microframe;
}
+ sch_ep->bw_cost_per_microframe = maxpkt * sch_ep->pkts;
} else if (is_fs_or_ls(sch_ep->speed)) {
sch_ep->pkts = 1; /* at most one packet for each microframe */
@@ -384,28 +357,7 @@ static void setup_sch_info(struct xhci_ep_ctx *ep_ctx,
*/
sch_ep->cs_count = DIV_ROUND_UP(maxpkt, FS_PAYLOAD_MAX);
sch_ep->num_budget_microframes = sch_ep->cs_count;
- sch_ep->bw_cost_per_microframe =
- (maxpkt < FS_PAYLOAD_MAX) ? maxpkt : FS_PAYLOAD_MAX;
-
- /* init budget table */
- if (ep_type == ISOC_OUT_EP) {
- for (i = 0; i < sch_ep->num_budget_microframes; i++)
- bwb_table[i] = sch_ep->bw_cost_per_microframe;
- } else if (ep_type == INT_OUT_EP) {
- /* only first one consumes bandwidth, others as zero */
- bwb_table[0] = sch_ep->bw_cost_per_microframe;
- } else { /* INT_IN_EP or ISOC_IN_EP */
- bwb_table[0] = 0; /* start split */
- bwb_table[1] = 0; /* idle */
- /*
- * due to cs_count will be updated according to cs
- * position, assign all remainder budget array
- * elements as @bw_cost_per_microframe, but only first
- * @num_budget_microframes elements will be used later
- */
- for (i = 2; i < TT_MICROFRAMES_MAX; i++)
- bwb_table[i] = sch_ep->bw_cost_per_microframe;
- }
+ sch_ep->bw_cost_per_microframe = min_t(u32, maxpkt, FS_PAYLOAD_MAX);
}
}
@@ -422,7 +374,7 @@ static u32 get_max_bw(struct mu3h_sch_bw_info *sch_bw,
for (j = 0; j < sch_ep->num_budget_microframes; j++) {
k = XHCI_MTK_BW_INDEX(base + j);
- bw = sch_bw->bus_bw[k] + sch_ep->bw_budget_table[j];
+ bw = sch_bw->bus_bw[k] + sch_ep->bw_cost_per_microframe;
if (bw > max_bw)
max_bw = bw;
}
@@ -433,18 +385,16 @@ static u32 get_max_bw(struct mu3h_sch_bw_info *sch_bw,
static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw,
struct mu3h_sch_ep_info *sch_ep, bool used)
{
+ int bw_updated;
u32 base;
- int i, j, k;
+ int i, j;
+
+ bw_updated = sch_ep->bw_cost_per_microframe * (used ? 1 : -1);
for (i = 0; i < sch_ep->num_esit; i++) {
base = sch_ep->offset + i * sch_ep->esit;
- for (j = 0; j < sch_ep->num_budget_microframes; j++) {
- k = XHCI_MTK_BW_INDEX(base + j);
- if (used)
- sch_bw->bus_bw[k] += sch_ep->bw_budget_table[j];
- else
- sch_bw->bus_bw[k] -= sch_ep->bw_budget_table[j];
- }
+ for (j = 0; j < sch_ep->num_budget_microframes; j++)
+ sch_bw->bus_bw[XHCI_MTK_BW_INDEX(base + j)] += bw_updated;
}
}
@@ -464,7 +414,7 @@ static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset)
*/
for (j = 0; j < sch_ep->num_budget_microframes; j++) {
k = XHCI_MTK_BW_INDEX(base + j);
- tmp = tt->fs_bus_bw[k] + sch_ep->bw_budget_table[j];
+ tmp = tt->fs_bus_bw[k] + sch_ep->bw_cost_per_microframe;
if (tmp > FS_PAYLOAD_MAX)
return -ESCH_BW_OVERFLOW;
}
@@ -538,19 +488,17 @@ static int check_sch_tt(struct mu3h_sch_ep_info *sch_ep, u32 offset)
static void update_sch_tt(struct mu3h_sch_ep_info *sch_ep, bool used)
{
struct mu3h_sch_tt *tt = sch_ep->sch_tt;
+ int bw_updated;
u32 base;
- int i, j, k;
+ int i, j;
+
+ bw_updated = sch_ep->bw_cost_per_microframe * (used ? 1 : -1);
for (i = 0; i < sch_ep->num_esit; i++) {
base = sch_ep->offset + i * sch_ep->esit;
- for (j = 0; j < sch_ep->num_budget_microframes; j++) {
- k = XHCI_MTK_BW_INDEX(base + j);
- if (used)
- tt->fs_bus_bw[k] += sch_ep->bw_budget_table[j];
- else
- tt->fs_bus_bw[k] -= sch_ep->bw_budget_table[j];
- }
+ for (j = 0; j < sch_ep->num_budget_microframes; j++)
+ tt->fs_bus_bw[XHCI_MTK_BW_INDEX(base + j)] += bw_updated;
}
if (used)
@@ -710,7 +658,7 @@ static int add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
xhci_dbg(xhci, "%s %s\n", __func__, decode_ep(ep, udev->speed));
- sch_ep = create_sch_ep(mtk, udev, ep, ep_ctx);
+ sch_ep = create_sch_ep(mtk, udev, ep);
if (IS_ERR_OR_NULL(sch_ep))
return -ENOMEM;
diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
index ffd4b493b4ba..1174a510dd38 100644
--- a/drivers/usb/host/xhci-mtk.h
+++ b/drivers/usb/host/xhci-mtk.h
@@ -83,7 +83,6 @@ struct mu3h_sch_bw_info {
* times; 1: distribute the (bMaxBurst+1)*(Mult+1) packets
* according to @pkts and @repeat. normal mode is used by
* default
- * @bw_budget_table: table to record bandwidth budget per microframe
*/
struct mu3h_sch_ep_info {
u32 esit;
@@ -109,7 +108,6 @@ struct mu3h_sch_ep_info {
u32 pkts;
u32 cs_count;
u32 burst_mode;
- u32 bw_budget_table[];
};
#define MU3C_U3_PORT_MAX 4
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 1364ce7f0abf..152ad882657d 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -2123,10 +2123,14 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(3) },
{ USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */
.driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
+ { USB_DEVICE(0x1782, 0x4d10) }, /* Fibocom L610 (AT mode) */
+ { USB_DEVICE_INTERFACE_CLASS(0x1782, 0x4d11, 0xff) }, /* Fibocom L610 (ECM/RNDIS mode) */
{ USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
.driver_info = RSVD(4) | RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
.driver_info = RSVD(6) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0106, 0xff) }, /* Fibocom MA510 (ECM mode w/ diag intf.) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x010a, 0xff) }, /* Fibocom MA510 (ECM mode) */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 88b284d61681..1d878d05a658 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -106,6 +106,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LCM960_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LM920_PRODUCT_ID) },
+ { USB_DEVICE(HP_VENDOR_ID, HP_LM930_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LM940_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_TD620_PRODUCT_ID) },
{ USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index c5406452b774..732f9b13ad5d 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -135,6 +135,7 @@
#define HP_TD620_PRODUCT_ID 0x0956
#define HP_LD960_PRODUCT_ID 0x0b39
#define HP_LD381_PRODUCT_ID 0x0f7f
+#define HP_LM930_PRODUCT_ID 0x0f9b
#define HP_LCM220_PRODUCT_ID 0x3139
#define HP_LCM960_PRODUCT_ID 0x3239
#define HP_LD220_PRODUCT_ID 0x3524
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index c18bf8164bc2..586ef5551e76 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -166,6 +166,8 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x1199, 0x9090)}, /* Sierra Wireless EM7565 QDL */
{DEVICE_SWI(0x1199, 0x9091)}, /* Sierra Wireless EM7565 */
{DEVICE_SWI(0x1199, 0x90d2)}, /* Sierra Wireless EM9191 QDL */
+ {DEVICE_SWI(0x1199, 0xc080)}, /* Sierra Wireless EM7590 QDL */
+ {DEVICE_SWI(0x1199, 0xc081)}, /* Sierra Wireless EM7590 */
{DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index e07d26a3cd8e..f33e08eb7670 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -877,7 +877,7 @@ static int tcpci_remove(struct i2c_client *client)
/* Disable chip interrupts before unregistering port */
err = tcpci_write16(chip->tcpci, TCPC_ALERT_MASK, 0);
if (err < 0)
- return err;
+ dev_warn(&client->dev, "Failed to disable irqs (%pe)\n", ERR_PTR(err));
tcpci_unregister_port(chip->tcpci);
diff --git a/drivers/usb/typec/tcpm/tcpci_mt6360.c b/drivers/usb/typec/tcpm/tcpci_mt6360.c
index f1bd9e09bc87..8a952eaf9016 100644
--- a/drivers/usb/typec/tcpm/tcpci_mt6360.c
+++ b/drivers/usb/typec/tcpm/tcpci_mt6360.c
@@ -15,6 +15,9 @@
#include "tcpci.h"
+#define MT6360_REG_PHYCTRL1 0x80
+#define MT6360_REG_PHYCTRL3 0x82
+#define MT6360_REG_PHYCTRL7 0x86
#define MT6360_REG_VCONNCTRL1 0x8C
#define MT6360_REG_MODECTRL2 0x8F
#define MT6360_REG_SWRESET 0xA0
@@ -22,6 +25,8 @@
#define MT6360_REG_DRPCTRL1 0xA2
#define MT6360_REG_DRPCTRL2 0xA3
#define MT6360_REG_I2CTORST 0xBF
+#define MT6360_REG_PHYCTRL11 0xCA
+#define MT6360_REG_RXCTRL1 0xCE
#define MT6360_REG_RXCTRL2 0xCF
#define MT6360_REG_CTDCTRL2 0xEC
@@ -106,6 +111,27 @@ static int mt6360_tcpc_init(struct tcpci *tcpci, struct tcpci_data *tdata)
if (ret)
return ret;
+ /* BMC PHY */
+ ret = mt6360_tcpc_write16(regmap, MT6360_REG_PHYCTRL1, 0x3A70);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(regmap, MT6360_REG_PHYCTRL3, 0x82);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(regmap, MT6360_REG_PHYCTRL7, 0x36);
+ if (ret)
+ return ret;
+
+ ret = mt6360_tcpc_write16(regmap, MT6360_REG_PHYCTRL11, 0x3C60);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(regmap, MT6360_REG_RXCTRL1, 0xE8);
+ if (ret)
+ return ret;
+
/* Set shipping mode off, AUTOIDLE on */
return regmap_write(regmap, MT6360_REG_MODECTRL2, 0x7A);
}
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 79001301b383..e0de44000d92 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -161,6 +161,7 @@ struct mlx5_vdpa_net {
struct mlx5_flow_handle *rx_rule_mcast;
bool setup;
u32 cur_num_vqs;
+ u32 rqt_size;
struct notifier_block nb;
struct vdpa_callback config_cb;
struct mlx5_vdpa_wq_ent cvq_ent;
@@ -204,17 +205,12 @@ static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val)
return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val);
}
-static inline u32 mlx5_vdpa_max_qps(int max_vqs)
-{
- return max_vqs / 2;
-}
-
static u16 ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev)
{
if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ)))
return 2;
- return 2 * mlx5_vdpa_max_qps(mvdev->max_vqs);
+ return mvdev->max_vqs;
}
static bool is_ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev, u16 idx)
@@ -1236,25 +1232,13 @@ static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *
static int create_rqt(struct mlx5_vdpa_net *ndev)
{
__be32 *list;
- int max_rqt;
void *rqtc;
int inlen;
void *in;
int i, j;
int err;
- int num;
-
- if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ)))
- num = 1;
- else
- num = ndev->cur_num_vqs / 2;
- max_rqt = min_t(int, roundup_pow_of_two(num),
- 1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
- if (max_rqt < 1)
- return -EOPNOTSUPP;
-
- inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + max_rqt * MLX5_ST_SZ_BYTES(rq_num);
+ inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + ndev->rqt_size * MLX5_ST_SZ_BYTES(rq_num);
in = kzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1263,12 +1247,12 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
- MLX5_SET(rqtc, rqtc, rqt_max_size, max_rqt);
+ MLX5_SET(rqtc, rqtc, rqt_max_size, ndev->rqt_size);
list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
- for (i = 0, j = 0; i < max_rqt; i++, j += 2)
- list[i] = cpu_to_be32(ndev->vqs[j % (2 * num)].virtq_id);
+ for (i = 0, j = 0; i < ndev->rqt_size; i++, j += 2)
+ list[i] = cpu_to_be32(ndev->vqs[j % ndev->cur_num_vqs].virtq_id);
- MLX5_SET(rqtc, rqtc, rqt_actual_size, max_rqt);
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, ndev->rqt_size);
err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn);
kfree(in);
if (err)
@@ -1282,19 +1266,13 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
{
__be32 *list;
- int max_rqt;
void *rqtc;
int inlen;
void *in;
int i, j;
int err;
- max_rqt = min_t(int, roundup_pow_of_two(ndev->cur_num_vqs / 2),
- 1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
- if (max_rqt < 1)
- return -EOPNOTSUPP;
-
- inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + max_rqt * MLX5_ST_SZ_BYTES(rq_num);
+ inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + ndev->rqt_size * MLX5_ST_SZ_BYTES(rq_num);
in = kzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1305,10 +1283,10 @@ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
- for (i = 0, j = 0; i < max_rqt; i++, j += 2)
+ for (i = 0, j = 0; i < ndev->rqt_size; i++, j += 2)
list[i] = cpu_to_be32(ndev->vqs[j % num].virtq_id);
- MLX5_SET(rqtc, rqtc, rqt_actual_size, max_rqt);
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, ndev->rqt_size);
err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn);
kfree(in);
if (err)
@@ -1625,7 +1603,7 @@ static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd)
newqps = mlx5vdpa16_to_cpu(mvdev, mq.virtqueue_pairs);
if (newqps < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
- newqps > mlx5_vdpa_max_qps(mvdev->max_vqs))
+ newqps > ndev->rqt_size)
break;
if (ndev->cur_num_vqs == 2 * newqps) {
@@ -1989,7 +1967,7 @@ static int setup_virtqueues(struct mlx5_vdpa_dev *mvdev)
int err;
int i;
- for (i = 0; i < 2 * mlx5_vdpa_max_qps(mvdev->max_vqs); i++) {
+ for (i = 0; i < mvdev->max_vqs; i++) {
err = setup_vq(ndev, &ndev->vqs[i]);
if (err)
goto err_vq;
@@ -2060,9 +2038,11 @@ static int mlx5_vdpa_set_driver_features(struct vdpa_device *vdev, u64 features)
ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ))
- ndev->cur_num_vqs = 2 * mlx5vdpa16_to_cpu(mvdev, ndev->config.max_virtqueue_pairs);
+ ndev->rqt_size = mlx5vdpa16_to_cpu(mvdev, ndev->config.max_virtqueue_pairs);
else
- ndev->cur_num_vqs = 2;
+ ndev->rqt_size = 1;
+
+ ndev->cur_num_vqs = 2 * ndev->rqt_size;
update_cvq_info(mvdev);
return err;
@@ -2529,7 +2509,7 @@ static void init_mvqs(struct mlx5_vdpa_net *ndev)
struct mlx5_vdpa_virtqueue *mvq;
int i;
- for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); ++i) {
+ for (i = 0; i < ndev->mvdev.max_vqs; ++i) {
mvq = &ndev->vqs[i];
memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri));
mvq->index = i;
@@ -2671,7 +2651,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
return -EOPNOTSUPP;
}
- max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues);
+ max_vqs = min_t(int, MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues),
+ 1 << MLX5_CAP_GEN(mdev, log_max_rqt_size));
if (max_vqs < 2) {
dev_warn(mdev->device,
"%d virtqueues are supported. At least 2 are required\n",
@@ -2742,7 +2723,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_MAC);
}
- config->max_virtqueue_pairs = cpu_to_mlx5vdpa16(mvdev, mlx5_vdpa_max_qps(max_vqs));
+ config->max_virtqueue_pairs = cpu_to_mlx5vdpa16(mvdev, max_vqs / 2);
mvdev->vdev.dma_dev = &mdev->pdev->dev;
err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
if (err)
@@ -2769,7 +2750,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
ndev->nb.notifier_call = event_handler;
mlx5_notifier_register(mdev, &ndev->nb);
mvdev->vdev.mdev = &mgtdev->mgtdev;
- err = _vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs) + 1);
+ err = _vdpa_register_device(&mvdev->vdev, max_vqs + 1);
if (err)
goto err_reg;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 792ab5f23647..297b5db47454 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1450,13 +1450,9 @@ err:
return ERR_PTR(r);
}
-static struct ptr_ring *get_tap_ptr_ring(int fd)
+static struct ptr_ring *get_tap_ptr_ring(struct file *file)
{
struct ptr_ring *ring;
- struct file *file = fget(fd);
-
- if (!file)
- return NULL;
ring = tun_get_tx_ring(file);
if (!IS_ERR(ring))
goto out;
@@ -1465,7 +1461,6 @@ static struct ptr_ring *get_tap_ptr_ring(int fd)
goto out;
ring = NULL;
out:
- fput(file);
return ring;
}
@@ -1552,8 +1547,12 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
r = vhost_net_enable_vq(n, vq);
if (r)
goto err_used;
- if (index == VHOST_NET_VQ_RX)
- nvq->rx_ring = get_tap_ptr_ring(fd);
+ if (index == VHOST_NET_VQ_RX) {
+ if (sock)
+ nvq->rx_ring = get_tap_ptr_ring(sock->file);
+ else
+ nvq->rx_ring = NULL;
+ }
oldubufs = nvq->ubufs;
nvq->ubufs = ubufs;
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 97eb0dee411c..a6bb0e438216 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1434,10 +1434,7 @@ fb_release(struct inode *inode, struct file *file)
__acquires(&info->lock)
__releases(&info->lock)
{
- struct fb_info * const info = file_fb_info(file);
-
- if (!info)
- return -ENODEV;
+ struct fb_info * const info = file->private_data;
lock_fb_info(info);
if (info->fbops->fb_release)
diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c
index 26892940c213..82e31a2d845e 100644
--- a/drivers/video/fbdev/core/fbsysfs.c
+++ b/drivers/video/fbdev/core/fbsysfs.c
@@ -80,6 +80,10 @@ void framebuffer_release(struct fb_info *info)
{
if (!info)
return;
+
+ if (WARN_ON(refcount_read(&info->count)))
+ return;
+
kfree(info->apertures);
kfree(info);
}
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index ea42ba6445b2..b3d5f884c544 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -243,6 +243,10 @@ error:
static inline void efifb_show_boot_graphics(struct fb_info *info) {}
#endif
+/*
+ * fb_ops.fb_destroy is called by the last put_fb_info() call at the end
+ * of unregister_framebuffer() or fb_release(). Do any cleanup here.
+ */
static void efifb_destroy(struct fb_info *info)
{
if (efifb_pci_dev)
@@ -254,10 +258,13 @@ static void efifb_destroy(struct fb_info *info)
else
memunmap(info->screen_base);
}
+
if (request_mem_succeeded)
release_mem_region(info->apertures->ranges[0].base,
info->apertures->ranges[0].size);
fb_dealloc_cmap(&info->cmap);
+
+ framebuffer_release(info);
}
static const struct fb_ops efifb_ops = {
@@ -620,9 +627,9 @@ static int efifb_remove(struct platform_device *pdev)
{
struct fb_info *info = platform_get_drvdata(pdev);
+ /* efifb_destroy takes care of info cleanup */
unregister_framebuffer(info);
sysfs_remove_groups(&pdev->dev.kobj, efifb_groups);
- framebuffer_release(info);
return 0;
}
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index 94fc9c6d0411..2c198561c338 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -84,6 +84,10 @@ struct simplefb_par {
static void simplefb_clocks_destroy(struct simplefb_par *par);
static void simplefb_regulators_destroy(struct simplefb_par *par);
+/*
+ * fb_ops.fb_destroy is called by the last put_fb_info() call at the end
+ * of unregister_framebuffer() or fb_release(). Do any cleanup here.
+ */
static void simplefb_destroy(struct fb_info *info)
{
struct simplefb_par *par = info->par;
@@ -94,6 +98,8 @@ static void simplefb_destroy(struct fb_info *info)
if (info->screen_base)
iounmap(info->screen_base);
+ framebuffer_release(info);
+
if (mem)
release_mem_region(mem->start, resource_size(mem));
}
@@ -545,8 +551,8 @@ static int simplefb_remove(struct platform_device *pdev)
{
struct fb_info *info = platform_get_drvdata(pdev);
+ /* simplefb_destroy takes care of info cleanup */
unregister_framebuffer(info);
- framebuffer_release(info);
return 0;
}
diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
index df6de5a9dd4c..e25e8de5ff67 100644
--- a/drivers/video/fbdev/vesafb.c
+++ b/drivers/video/fbdev/vesafb.c
@@ -179,6 +179,10 @@ static int vesafb_setcolreg(unsigned regno, unsigned red, unsigned green,
return err;
}
+/*
+ * fb_ops.fb_destroy is called by the last put_fb_info() call at the end
+ * of unregister_framebuffer() or fb_release(). Do any cleanup here.
+ */
static void vesafb_destroy(struct fb_info *info)
{
struct vesafb_par *par = info->par;
@@ -188,6 +192,8 @@ static void vesafb_destroy(struct fb_info *info)
if (info->screen_base)
iounmap(info->screen_base);
release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size);
+
+ framebuffer_release(info);
}
static struct fb_ops vesafb_ops = {
@@ -484,10 +490,10 @@ static int vesafb_remove(struct platform_device *pdev)
{
struct fb_info *info = platform_get_drvdata(pdev);
+ /* vesafb_destroy takes care of info cleanup */
unregister_framebuffer(info);
if (((struct vesafb_par *)(info->par))->region)
release_region(0x3c0, 32);
- framebuffer_release(info);
return 0;
}
diff --git a/fs/afs/misc.c b/fs/afs/misc.c
index 1d1a8debe472..933e67fcdab1 100644
--- a/fs/afs/misc.c
+++ b/fs/afs/misc.c
@@ -163,8 +163,11 @@ void afs_prioritise_error(struct afs_error *e, int error, u32 abort_code)
return;
case -ECONNABORTED:
+ error = afs_abort_to_error(abort_code);
+ fallthrough;
+ case -ENETRESET: /* Responded, but we seem to have changed address */
e->responded = true;
- e->error = afs_abort_to_error(abort_code);
+ e->error = error;
return;
}
}
diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c
index 79e1a5f6701b..a840c3588ebb 100644
--- a/fs/afs/rotate.c
+++ b/fs/afs/rotate.c
@@ -292,6 +292,10 @@ bool afs_select_fileserver(struct afs_operation *op)
op->error = error;
goto iterate_address;
+ case -ENETRESET:
+ pr_warn("kAFS: Peer reset %s (op=%x)\n",
+ op->type ? op->type->name : "???", op->debug_id);
+ fallthrough;
case -ECONNRESET:
_debug("call reset");
op->error = error;
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 23a1a92d64bb..a5434f3e57c6 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -537,6 +537,8 @@ static void afs_deliver_to_call(struct afs_call *call)
case -ENODATA:
case -EBADMSG:
case -EMSGSIZE:
+ case -ENOMEM:
+ case -EFAULT:
abort_code = RXGEN_CC_UNMARSHAL;
if (state != AFS_CALL_CL_AWAIT_REPLY)
abort_code = RXGEN_SS_UNMARSHAL;
@@ -544,7 +546,7 @@ static void afs_deliver_to_call(struct afs_call *call)
abort_code, ret, "KUM");
goto local_abort;
default:
- abort_code = RX_USER_ABORT;
+ abort_code = RX_CALL_DEAD;
rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
abort_code, ret, "KER");
goto local_abort;
@@ -836,7 +838,7 @@ void afs_send_empty_reply(struct afs_call *call)
case -ENOMEM:
_debug("oom");
rxrpc_kernel_abort_call(net->socket, call->rxcall,
- RX_USER_ABORT, -ENOMEM, "KOO");
+ RXGEN_SS_MARSHAL, -ENOMEM, "KOO");
fallthrough;
default:
_leave(" [error]");
@@ -878,7 +880,7 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
if (n == -ENOMEM) {
_debug("oom");
rxrpc_kernel_abort_call(net->socket, call->rxcall,
- RX_USER_ABORT, -ENOMEM, "KOO");
+ RXGEN_SS_MARSHAL, -ENOMEM, "KOO");
}
_leave(" [error]");
}
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 4763132ca57e..c1bc52ac7de1 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -636,6 +636,7 @@ static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping,
case -EKEYEXPIRED:
case -EKEYREJECTED:
case -EKEYREVOKED:
+ case -ENETRESET:
afs_redirty_pages(wbc, mapping, start, len);
mapping_set_error(mapping, ret);
break;
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index aa25bffd4823..b6edcf89a429 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -85,7 +85,7 @@ static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio)
if (folio_test_dirty(folio)) {
dout("%p dirty_folio %p idx %lu -- already dirty\n",
mapping->host, folio, folio->index);
- BUG_ON(!folio_get_private(folio));
+ VM_BUG_ON_FOLIO(!folio_test_private(folio), folio);
return false;
}
@@ -122,7 +122,7 @@ static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio)
* Reference snap context in folio->private. Also set
* PagePrivate so that we get invalidate_folio callback.
*/
- BUG_ON(folio_get_private(folio));
+ VM_BUG_ON_FOLIO(folio_test_private(folio), folio);
folio_attach_private(folio, snapc);
return ceph_fscache_dirty_folio(mapping, folio);
@@ -150,7 +150,7 @@ static void ceph_invalidate_folio(struct folio *folio, size_t offset,
}
WARN_ON(!folio_test_locked(folio));
- if (folio_get_private(folio)) {
+ if (folio_test_private(folio)) {
dout("%p invalidate_folio idx %lu full dirty page\n",
inode, folio->index);
@@ -729,8 +729,11 @@ static void writepages_finish(struct ceph_osd_request *req)
/* clean all pages */
for (i = 0; i < req->r_num_ops; i++) {
- if (req->r_ops[i].op != CEPH_OSD_OP_WRITE)
+ if (req->r_ops[i].op != CEPH_OSD_OP_WRITE) {
+ pr_warn("%s incorrect op %d req %p index %d tid %llu\n",
+ __func__, req->r_ops[i].op, req, i, req->r_tid);
break;
+ }
osd_data = osd_req_op_extent_osd_data(req, i);
BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 6c9e837aa1d3..8c8226c0feac 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -629,9 +629,15 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
iinfo.change_attr = 1;
ceph_encode_timespec64(&iinfo.btime, &now);
- iinfo.xattr_len = ARRAY_SIZE(xattr_buf);
- iinfo.xattr_data = xattr_buf;
- memset(iinfo.xattr_data, 0, iinfo.xattr_len);
+ if (req->r_pagelist) {
+ iinfo.xattr_len = req->r_pagelist->length;
+ iinfo.xattr_data = req->r_pagelist->mapped_tail;
+ } else {
+ /* fake it */
+ iinfo.xattr_len = ARRAY_SIZE(xattr_buf);
+ iinfo.xattr_data = xattr_buf;
+ memset(iinfo.xattr_data, 0, iinfo.xattr_len);
+ }
in.ino = cpu_to_le64(vino.ino);
in.snapid = cpu_to_le64(CEPH_NOSNAP);
@@ -743,6 +749,10 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
err = ceph_security_init_secctx(dentry, mode, &as_ctx);
if (err < 0)
goto out_ctx;
+ /* Async create can't handle more than a page of xattrs */
+ if (as_ctx.pagelist &&
+ !list_is_singular(&as_ctx.pagelist->head))
+ try_async = false;
} else if (!d_in_lookup(dentry)) {
/* If it's not being looked up, it's negative */
return -ENOENT;
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 39080b2d6cf8..b6697333bb2b 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -1153,13 +1153,12 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
if (length != written && (iomap->flags & IOMAP_F_NEW)) {
/* Deallocate blocks that were just allocated. */
- loff_t blockmask = i_blocksize(inode) - 1;
- loff_t end = (pos + length) & ~blockmask;
+ loff_t hstart = round_up(pos + written, i_blocksize(inode));
+ loff_t hend = iomap->offset + iomap->length;
- pos = (pos + written + blockmask) & ~blockmask;
- if (pos < end) {
- truncate_pagecache_range(inode, pos, end - 1);
- punch_hole(ip, pos, end - pos);
+ if (hstart < hend) {
+ truncate_pagecache_range(inode, hstart, hend - 1);
+ punch_hole(ip, hstart, hend - hstart);
}
}
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 48f01323c37c..2556ae1f92ea 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -770,30 +770,27 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
return ret ? ret : ret1;
}
-static inline bool should_fault_in_pages(ssize_t ret, struct iov_iter *i,
+static inline bool should_fault_in_pages(struct iov_iter *i,
+ struct kiocb *iocb,
size_t *prev_count,
size_t *window_size)
{
size_t count = iov_iter_count(i);
size_t size, offs;
- if (likely(!count))
- return false;
- if (ret <= 0 && ret != -EFAULT)
+ if (!count)
return false;
if (!iter_is_iovec(i))
return false;
size = PAGE_SIZE;
- offs = offset_in_page(i->iov[0].iov_base + i->iov_offset);
+ offs = offset_in_page(iocb->ki_pos);
if (*prev_count != count || !*window_size) {
size_t nr_dirtied;
- size = ALIGN(offs + count, PAGE_SIZE);
- size = min_t(size_t, size, SZ_1M);
nr_dirtied = max(current->nr_dirtied_pause -
current->nr_dirtied, 8);
- size = min(size, nr_dirtied << PAGE_SHIFT);
+ size = min_t(size_t, SZ_1M, nr_dirtied << PAGE_SHIFT);
}
*prev_count = count;
@@ -807,7 +804,7 @@ static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to,
struct file *file = iocb->ki_filp;
struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
size_t prev_count = 0, window_size = 0;
- size_t written = 0;
+ size_t read = 0;
ssize_t ret;
/*
@@ -835,35 +832,31 @@ retry:
ret = gfs2_glock_nq(gh);
if (ret)
goto out_uninit;
-retry_under_glock:
pagefault_disable();
to->nofault = true;
ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL,
- IOMAP_DIO_PARTIAL, written);
+ IOMAP_DIO_PARTIAL, read);
to->nofault = false;
pagefault_enable();
+ if (ret <= 0 && ret != -EFAULT)
+ goto out_unlock;
if (ret > 0)
- written = ret;
-
- if (should_fault_in_pages(ret, to, &prev_count, &window_size)) {
- size_t leftover;
+ read = ret;
- gfs2_holder_allow_demote(gh);
- leftover = fault_in_iov_iter_writeable(to, window_size);
- gfs2_holder_disallow_demote(gh);
- if (leftover != window_size) {
- if (gfs2_holder_queued(gh))
- goto retry_under_glock;
+ if (should_fault_in_pages(to, iocb, &prev_count, &window_size)) {
+ gfs2_glock_dq(gh);
+ window_size -= fault_in_iov_iter_writeable(to, window_size);
+ if (window_size)
goto retry;
- }
}
+out_unlock:
if (gfs2_holder_queued(gh))
gfs2_glock_dq(gh);
out_uninit:
gfs2_holder_uninit(gh);
if (ret < 0)
return ret;
- return written;
+ return read;
}
static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from,
@@ -873,7 +866,7 @@ static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from,
struct inode *inode = file->f_mapping->host;
struct gfs2_inode *ip = GFS2_I(inode);
size_t prev_count = 0, window_size = 0;
- size_t read = 0;
+ size_t written = 0;
ssize_t ret;
/*
@@ -901,39 +894,35 @@ retry:
goto out_uninit;
/* Silently fall back to buffered I/O when writing beyond EOF */
if (iocb->ki_pos + iov_iter_count(from) > i_size_read(&ip->i_inode))
- goto out;
-retry_under_glock:
+ goto out_unlock;
from->nofault = true;
ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL,
- IOMAP_DIO_PARTIAL, read);
+ IOMAP_DIO_PARTIAL, written);
from->nofault = false;
-
- if (ret == -ENOTBLK)
- ret = 0;
+ if (ret <= 0) {
+ if (ret == -ENOTBLK)
+ ret = 0;
+ if (ret != -EFAULT)
+ goto out_unlock;
+ }
if (ret > 0)
- read = ret;
-
- if (should_fault_in_pages(ret, from, &prev_count, &window_size)) {
- size_t leftover;
+ written = ret;
- gfs2_holder_allow_demote(gh);
- leftover = fault_in_iov_iter_readable(from, window_size);
- gfs2_holder_disallow_demote(gh);
- if (leftover != window_size) {
- if (gfs2_holder_queued(gh))
- goto retry_under_glock;
+ if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
+ gfs2_glock_dq(gh);
+ window_size -= fault_in_iov_iter_readable(from, window_size);
+ if (window_size)
goto retry;
- }
}
-out:
+out_unlock:
if (gfs2_holder_queued(gh))
gfs2_glock_dq(gh);
out_uninit:
gfs2_holder_uninit(gh);
if (ret < 0)
return ret;
- return read;
+ return written;
}
static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
@@ -941,7 +930,7 @@ static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
struct gfs2_inode *ip;
struct gfs2_holder gh;
size_t prev_count = 0, window_size = 0;
- size_t written = 0;
+ size_t read = 0;
ssize_t ret;
/*
@@ -962,7 +951,7 @@ static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
if (ret >= 0) {
if (!iov_iter_count(to))
return ret;
- written = ret;
+ read = ret;
} else if (ret != -EFAULT) {
if (ret != -EAGAIN)
return ret;
@@ -975,30 +964,26 @@ retry:
ret = gfs2_glock_nq(&gh);
if (ret)
goto out_uninit;
-retry_under_glock:
pagefault_disable();
ret = generic_file_read_iter(iocb, to);
pagefault_enable();
+ if (ret <= 0 && ret != -EFAULT)
+ goto out_unlock;
if (ret > 0)
- written += ret;
-
- if (should_fault_in_pages(ret, to, &prev_count, &window_size)) {
- size_t leftover;
+ read += ret;
- gfs2_holder_allow_demote(&gh);
- leftover = fault_in_iov_iter_writeable(to, window_size);
- gfs2_holder_disallow_demote(&gh);
- if (leftover != window_size) {
- if (gfs2_holder_queued(&gh))
- goto retry_under_glock;
+ if (should_fault_in_pages(to, iocb, &prev_count, &window_size)) {
+ gfs2_glock_dq(&gh);
+ window_size -= fault_in_iov_iter_writeable(to, window_size);
+ if (window_size)
goto retry;
- }
}
+out_unlock:
if (gfs2_holder_queued(&gh))
gfs2_glock_dq(&gh);
out_uninit:
gfs2_holder_uninit(&gh);
- return written ? written : ret;
+ return read ? read : ret;
}
static ssize_t gfs2_file_buffered_write(struct kiocb *iocb,
@@ -1012,7 +997,7 @@ static ssize_t gfs2_file_buffered_write(struct kiocb *iocb,
struct gfs2_holder *statfs_gh = NULL;
size_t prev_count = 0, window_size = 0;
size_t orig_count = iov_iter_count(from);
- size_t read = 0;
+ size_t written = 0;
ssize_t ret;
/*
@@ -1030,10 +1015,18 @@ static ssize_t gfs2_file_buffered_write(struct kiocb *iocb,
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, gh);
retry:
+ if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
+ window_size -= fault_in_iov_iter_readable(from, window_size);
+ if (!window_size) {
+ ret = -EFAULT;
+ goto out_uninit;
+ }
+ from->count = min(from->count, window_size);
+ }
ret = gfs2_glock_nq(gh);
if (ret)
goto out_uninit;
-retry_under_glock:
+
if (inode == sdp->sd_rindex) {
struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
@@ -1050,25 +1043,19 @@ retry_under_glock:
current->backing_dev_info = NULL;
if (ret > 0) {
iocb->ki_pos += ret;
- read += ret;
+ written += ret;
}
if (inode == sdp->sd_rindex)
gfs2_glock_dq_uninit(statfs_gh);
- from->count = orig_count - read;
- if (should_fault_in_pages(ret, from, &prev_count, &window_size)) {
- size_t leftover;
-
- gfs2_holder_allow_demote(gh);
- leftover = fault_in_iov_iter_readable(from, window_size);
- gfs2_holder_disallow_demote(gh);
- if (leftover != window_size) {
- from->count = min(from->count, window_size - leftover);
- if (gfs2_holder_queued(gh))
- goto retry_under_glock;
- goto retry;
- }
+ if (ret <= 0 && ret != -EFAULT)
+ goto out_unlock;
+
+ from->count = orig_count - written;
+ if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
+ gfs2_glock_dq(gh);
+ goto retry;
}
out_unlock:
if (gfs2_holder_queued(gh))
@@ -1077,8 +1064,8 @@ out_uninit:
gfs2_holder_uninit(gh);
if (statfs_gh)
kfree(statfs_gh);
- from->count = orig_count - read;
- return read ? read : ret;
+ from->count = orig_count - written;
+ return written ? written : ret;
}
/**
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 91de361ea9ab..e0823f58f795 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -4481,6 +4481,9 @@ done:
if (ret < 0)
req_set_fail(req);
__io_req_complete(req, issue_flags, ret, 0);
+ /* put file to avoid an attempt to IOPOLL the req */
+ io_put_file(req->file);
+ req->file = NULL;
return 0;
}
diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c
index e2d59bb5e6bb..9a16897e8dc6 100644
--- a/fs/nfs/fs_context.c
+++ b/fs/nfs/fs_context.c
@@ -517,7 +517,7 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
if (result.negated)
ctx->flags &= ~NFS_MOUNT_SOFTREVAL;
else
- ctx->flags &= NFS_MOUNT_SOFTREVAL;
+ ctx->flags |= NFS_MOUNT_SOFTREVAL;
break;
case Opt_posix:
if (result.negated)
diff --git a/fs/proc/fd.c b/fs/proc/fd.c
index 172c86270b31..913bef0d2a36 100644
--- a/fs/proc/fd.c
+++ b/fs/proc/fd.c
@@ -72,7 +72,7 @@ out:
return 0;
}
-static int seq_fdinfo_open(struct inode *inode, struct file *file)
+static int proc_fdinfo_access_allowed(struct inode *inode)
{
bool allowed = false;
struct task_struct *task = get_proc_task(inode);
@@ -86,6 +86,16 @@ static int seq_fdinfo_open(struct inode *inode, struct file *file)
if (!allowed)
return -EACCES;
+ return 0;
+}
+
+static int seq_fdinfo_open(struct inode *inode, struct file *file)
+{
+ int ret = proc_fdinfo_access_allowed(inode);
+
+ if (ret)
+ return ret;
+
return single_open(file, seq_show, inode);
}
@@ -348,12 +358,23 @@ static int proc_readfdinfo(struct file *file, struct dir_context *ctx)
proc_fdinfo_instantiate);
}
+static int proc_open_fdinfo(struct inode *inode, struct file *file)
+{
+ int ret = proc_fdinfo_access_allowed(inode);
+
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
const struct inode_operations proc_fdinfo_inode_operations = {
.lookup = proc_lookupfdinfo,
.setattr = proc_setattr,
};
const struct file_operations proc_fdinfo_operations = {
+ .open = proc_open_fdinfo,
.read = generic_read_dir,
.iterate_shared = proc_readfdinfo,
.llseek = generic_file_llseek,
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 7ab8a58c29b6..9456a2032224 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -931,6 +931,38 @@ struct list_head *seq_list_next(void *v, struct list_head *head, loff_t *ppos)
}
EXPORT_SYMBOL(seq_list_next);
+struct list_head *seq_list_start_rcu(struct list_head *head, loff_t pos)
+{
+ struct list_head *lh;
+
+ list_for_each_rcu(lh, head)
+ if (pos-- == 0)
+ return lh;
+
+ return NULL;
+}
+EXPORT_SYMBOL(seq_list_start_rcu);
+
+struct list_head *seq_list_start_head_rcu(struct list_head *head, loff_t pos)
+{
+ if (!pos)
+ return head;
+
+ return seq_list_start_rcu(head, pos - 1);
+}
+EXPORT_SYMBOL(seq_list_start_head_rcu);
+
+struct list_head *seq_list_next_rcu(void *v, struct list_head *head,
+ loff_t *ppos)
+{
+ struct list_head *lh;
+
+ lh = list_next_rcu((struct list_head *)v);
+ ++*ppos;
+ return lh == head ? NULL : lh;
+}
+EXPORT_SYMBOL(seq_list_next_rcu);
+
/**
* seq_hlist_start - start an iteration of a hlist
* @head: the head of the hlist
diff --git a/include/linux/audit.h b/include/linux/audit.h
index d06134ac6245..cece70231138 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -339,7 +339,7 @@ static inline void audit_uring_entry(u8 op)
}
static inline void audit_uring_exit(int success, long code)
{
- if (unlikely(!audit_dummy_context()))
+ if (unlikely(audit_context()))
__audit_uring_exit(success, code);
}
static inline void audit_syscall_entry(int major, unsigned long a0,
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index c2ea47f30046..e22dc03c850e 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -17,7 +17,6 @@
#include <linux/can.h>
#include <linux/can/bittiming.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
#include <linux/can/length.h>
#include <linux/can/netlink.h>
#include <linux/can/skb.h>
@@ -85,15 +84,6 @@ struct can_priv {
int (*do_get_berr_counter)(const struct net_device *dev,
struct can_berr_counter *bec);
int (*do_get_auto_tdcv)(const struct net_device *dev, u32 *tdcv);
-
-#ifdef CONFIG_CAN_LEDS
- struct led_trigger *tx_led_trig;
- char tx_led_trig_name[CAN_LED_NAME_SZ];
- struct led_trigger *rx_led_trig;
- char rx_led_trig_name[CAN_LED_NAME_SZ];
- struct led_trigger *rxtx_led_trig;
- char rxtx_led_trig_name[CAN_LED_NAME_SZ];
-#endif
};
static inline bool can_tdc_is_enabled(const struct can_priv *priv)
diff --git a/include/linux/can/led.h b/include/linux/can/led.h
deleted file mode 100644
index 7c3cfd798c56..000000000000
--- a/include/linux/can/led.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2012, Fabio Baltieri <fabio.baltieri@gmail.com>
- */
-
-#ifndef _CAN_LED_H
-#define _CAN_LED_H
-
-#include <linux/if.h>
-#include <linux/leds.h>
-#include <linux/netdevice.h>
-
-enum can_led_event {
- CAN_LED_EVENT_OPEN,
- CAN_LED_EVENT_STOP,
- CAN_LED_EVENT_TX,
- CAN_LED_EVENT_RX,
-};
-
-#ifdef CONFIG_CAN_LEDS
-
-/* keep space for interface name + "-tx"/"-rx"/"-rxtx"
- * suffix and null terminator
- */
-#define CAN_LED_NAME_SZ (IFNAMSIZ + 6)
-
-void can_led_event(struct net_device *netdev, enum can_led_event event);
-void devm_can_led_init(struct net_device *netdev);
-int __init can_led_notifier_init(void);
-void __exit can_led_notifier_exit(void);
-
-#else
-
-static inline void can_led_event(struct net_device *netdev,
- enum can_led_event event)
-{
-}
-static inline void devm_can_led_init(struct net_device *netdev)
-{
-}
-static inline int can_led_notifier_init(void)
-{
- return 0;
-}
-static inline void can_led_notifier_exit(void)
-{
-}
-
-#endif
-
-#endif /* !_CAN_LED_H */
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index ec5ca392eaa3..38c8203d52cb 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -145,6 +145,7 @@ struct inet6_skb_parm {
#define IP6SKB_L3SLAVE 64
#define IP6SKB_JUMBOGRAM 128
#define IP6SKB_SEG6 256
+#define IP6SKB_FAKEJUMBO 512
};
#if defined(CONFIG_NET_L3_MASTER_DEV)
diff --git a/include/linux/list.h b/include/linux/list.h
index c147eeb2d39d..57e8b559cdf6 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -606,6 +606,16 @@ static inline void list_splice_tail_init(struct list_head *list,
for (pos = (head)->next; !list_is_head(pos, (head)); pos = pos->next)
/**
+ * list_for_each_rcu - Iterate over a list in an RCU-safe fashion
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @head: the head for your list.
+ */
+#define list_for_each_rcu(pos, head) \
+ for (pos = rcu_dereference((head)->next); \
+ !list_is_head(pos, (head)); \
+ pos = rcu_dereference(pos->next))
+
+/**
* list_for_each_continue - continue iteration over a list
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.
diff --git a/include/linux/mfd/idt8a340_reg.h b/include/linux/mfd/idt8a340_reg.h
index a18c1539a152..0c706085c205 100644
--- a/include/linux/mfd/idt8a340_reg.h
+++ b/include/linux/mfd/idt8a340_reg.h
@@ -407,7 +407,7 @@
#define TOD_READ_PRIMARY_0 0xcc40
#define TOD_READ_PRIMARY_0_V520 0xcc50
/* 8-bit subns, 32-bit ns, 48-bit seconds */
-#define TOD_READ_PRIMARY 0x0000
+#define TOD_READ_PRIMARY_BASE 0x0000
/* Counter increments after TOD write is completed */
#define TOD_READ_PRIMARY_COUNTER 0x000b
/* Read trigger configuration */
@@ -424,6 +424,16 @@
#define TOD_READ_SECONDARY_0 0xcc90
#define TOD_READ_SECONDARY_0_V520 0xcca0
+/* 8-bit subns, 32-bit ns, 48-bit seconds */
+#define TOD_READ_SECONDARY_BASE 0x0000
+/* Counter increments after TOD write is completed */
+#define TOD_READ_SECONDARY_COUNTER 0x000b
+/* Read trigger configuration */
+#define TOD_READ_SECONDARY_SEL_CFG_0 0x000c
+/* Read trigger selection */
+#define TOD_READ_SECONDARY_CMD 0x000e
+#define TOD_READ_SECONDARY_CMD_V520 0x000f
+
#define TOD_READ_SECONDARY_1 0xcca0
#define TOD_READ_SECONDARY_1_V520 0xccb0
#define TOD_READ_SECONDARY_2 0xccb0
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index d6bac3976913..b064bc278f52 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -272,6 +272,8 @@ struct mlx5_cmd_stats {
u32 last_failed_errno;
/* last bad status returned by FW */
u8 last_failed_mbox_status;
+ /* last command failed syndrome returned by FW */
+ u32 last_failed_syndrome;
struct dentry *root;
/* protect command average calculations */
spinlock_t lock;
@@ -1051,9 +1053,14 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
int size_in, void *data_out, int size_out,
u16 reg_num, int arg, int write);
-int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
int node);
+
+static inline int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
+{
+ return mlx5_db_alloc_node(dev, db, dev->priv.numa_node);
+}
+
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
const char *mlx5_command_str(int command);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 7bab3e51c61e..78b3d3465dd7 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1359,7 +1359,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 vhca_resource_manager[0x1];
u8 hca_cap_2[0x1];
- u8 reserved_at_21[0x1];
+ u8 create_lag_when_not_master_up[0x1];
u8 dtor[0x1];
u8 event_on_vhca_state_teardown_request[0x1];
u8 event_on_vhca_state_in_use[0x1];
@@ -10816,7 +10816,8 @@ struct mlx5_ifc_dcbx_param_bits {
enum {
MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY = 0,
- MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT,
+ MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT = 1,
+ MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_MPESW = 2,
};
struct mlx5_ifc_lagc_bits {
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 536321691c72..f615a66c89e9 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -910,7 +910,7 @@ struct net_device_path_stack {
struct net_device_path_ctx {
const struct net_device *dev;
- const u8 *daddr;
+ u8 daddr[ETH_ALEN];
int num_vlans;
struct {
@@ -2119,6 +2119,8 @@ struct net_device {
/* Protocol-specific pointers */
+ struct in_device __rcu *ip_ptr;
+ struct inet6_dev __rcu *ip6_ptr;
#if IS_ENABLED(CONFIG_VLAN_8021Q)
struct vlan_info __rcu *vlan_info;
#endif
@@ -2131,16 +2133,18 @@ struct net_device {
#if IS_ENABLED(CONFIG_ATALK)
void *atalk_ptr;
#endif
- struct in_device __rcu *ip_ptr;
#if IS_ENABLED(CONFIG_DECNET)
struct dn_dev __rcu *dn_ptr;
#endif
- struct inet6_dev __rcu *ip6_ptr;
#if IS_ENABLED(CONFIG_AX25)
void *ax25_ptr;
#endif
+#if IS_ENABLED(CONFIG_CFG80211)
struct wireless_dev *ieee80211_ptr;
+#endif
+#if IS_ENABLED(CONFIG_IEEE802154) || IS_ENABLED(CONFIG_6LOWPAN)
struct wpan_dev *ieee802154_ptr;
+#endif
#if IS_ENABLED(CONFIG_MPLS_ROUTING)
struct mpls_dev __rcu *mpls_ptr;
#endif
@@ -2161,7 +2165,11 @@ struct net_device {
struct bpf_prog __rcu *xdp_prog;
unsigned long gro_flush_timeout;
int napi_defer_hard_irqs;
-#define GRO_MAX_SIZE 65536
+#define GRO_LEGACY_MAX_SIZE 65536u
+/* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE),
+ * and shinfo->gso_segs is a 16bit field.
+ */
+#define GRO_MAX_SIZE (8 * 65535u)
unsigned int gro_max_size;
rx_handler_func_t __rcu *rx_handler;
void __rcu *rx_handler_data;
@@ -2272,12 +2280,17 @@ struct net_device {
const struct rtnl_link_ops *rtnl_link_ops;
/* for setting kernel sock attribute on TCP connection setup */
-#define GSO_MAX_SIZE 65536
+#define GSO_MAX_SEGS 65535u
+#define GSO_LEGACY_MAX_SIZE 65536u
+/* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE),
+ * and shinfo->gso_segs is a 16bit field.
+ */
+#define GSO_MAX_SIZE (8 * GSO_MAX_SEGS)
+
unsigned int gso_max_size;
#define TSO_LEGACY_MAX_SIZE 65536
#define TSO_MAX_SIZE UINT_MAX
unsigned int tso_max_size;
-#define GSO_MAX_SEGS 65535
u16 gso_max_segs;
#define TSO_MAX_SEGS U16_MAX
u16 tso_max_segs;
@@ -3127,6 +3140,7 @@ struct softnet_data {
/* Another possibly contended cache line */
spinlock_t defer_lock ____cacheline_aligned_in_smp;
int defer_count;
+ int defer_ipi_scheduled;
struct sk_buff *defer_list;
call_single_data_t defer_csd;
};
diff --git a/include/linux/qed/qed_fcoe_if.h b/include/linux/qed/qed_fcoe_if.h
index 16752eca5cbd..90e3045b2dcb 100644
--- a/include/linux/qed/qed_fcoe_if.h
+++ b/include/linux/qed/qed_fcoe_if.h
@@ -76,7 +76,7 @@ void qed_fcoe_set_pf_params(struct qed_dev *cdev,
* @fill_dev_info: fills FCoE specific information
* @param cdev
* @param info
- * @return 0 on sucesss, otherwise error value.
+ * @return 0 on success, otherwise error value.
* @register_ops: register FCoE operations
* @param cdev
* @param ops - specified using qed_iscsi_cb_ops
@@ -96,7 +96,7 @@ void qed_fcoe_set_pf_params(struct qed_dev *cdev,
* connection.
* @param p_doorbell - qed will fill the address of the
* doorbell.
- * return 0 on sucesss, otherwise error value.
+ * return 0 on success, otherwise error value.
* @release_conn: release a previously acquired fcoe connection
* @param cdev
* @param handle - the connection handle.
diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h
index 494cdc3cd840..fbf7973ae9ba 100644
--- a/include/linux/qed/qed_iscsi_if.h
+++ b/include/linux/qed/qed_iscsi_if.h
@@ -133,7 +133,7 @@ struct qed_iscsi_cb_ops {
* @fill_dev_info: fills iSCSI specific information
* @param cdev
* @param info
- * @return 0 on sucesss, otherwise error value.
+ * @return 0 on success, otherwise error value.
* @register_ops: register iscsi operations
* @param cdev
* @param ops - specified using qed_iscsi_cb_ops
@@ -152,7 +152,7 @@ struct qed_iscsi_cb_ops {
* connection.
* @param p_doorbell - qed will fill the address of the
* doorbell.
- * @return 0 on sucesss, otherwise error value.
+ * @return 0 on success, otherwise error value.
* @release_conn: release a previously acquired iscsi connection
* @param cdev
* @param handle - the connection handle.
diff --git a/include/linux/qed/qed_nvmetcp_if.h b/include/linux/qed/qed_nvmetcp_if.h
index 1d51df347560..bbfbfba51f37 100644
--- a/include/linux/qed/qed_nvmetcp_if.h
+++ b/include/linux/qed/qed_nvmetcp_if.h
@@ -132,7 +132,7 @@ struct nvmetcp_task_params {
* connection.
* @param p_doorbell - qed will fill the address of the
* doorbell.
- * @return 0 on sucesss, otherwise error value.
+ * @return 0 on success, otherwise error value.
* @release_conn: release a previously acquired nvmetcp connection
* @param cdev
* @param handle - the connection handle.
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 60820ab511d2..bd023dd38ae6 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -277,6 +277,10 @@ extern struct list_head *seq_list_start_head(struct list_head *head,
extern struct list_head *seq_list_next(void *v, struct list_head *head,
loff_t *ppos);
+extern struct list_head *seq_list_start_rcu(struct list_head *head, loff_t pos);
+extern struct list_head *seq_list_start_head_rcu(struct list_head *head, loff_t pos);
+extern struct list_head *seq_list_next_rcu(void *v, struct list_head *head, loff_t *ppos);
+
/*
* Helpers for iteration over hlist_head-s in seq_files
*/
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 9d82a8b6c8f1..da96f0d3e753 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -510,7 +510,8 @@ enum skb_drop_reason {
(name = SKB_DROP_REASON_##reason)
#define SKB_DR_OR(name, reason) \
do { \
- if (name == SKB_DROP_REASON_NOT_SPECIFIED) \
+ if (name == SKB_DROP_REASON_NOT_SPECIFIED || \
+ name == SKB_NOT_DROPPED_YET) \
SKB_DR_SET(name, reason); \
} while (0)
@@ -1764,11 +1765,6 @@ static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset)
}
#endif
-static inline unsigned int skb_data_area_size(struct sk_buff *skb)
-{
- return skb_end_pointer(skb) - skb->data;
-}
-
struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size,
struct ubuf_info *uarg);
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index db5149567305..90501404fa49 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -160,7 +160,7 @@ struct rpc_add_xprt_test {
#define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT (1UL << 9)
#define RPC_CLNT_CREATE_SOFTERR (1UL << 10)
#define RPC_CLNT_CREATE_REUSEPORT (1UL << 11)
-#define RPC_CLNT_CREATE_IGNORE_NULL_UNAVAIL (1UL << 12)
+#define RPC_CLNT_CREATE_CONNECTED (1UL << 12)
struct rpc_clnt *rpc_create(struct rpc_create_args *args);
struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 68713388b617..6d02e12e4702 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1183,6 +1183,9 @@ struct cfg80211_mbssid_elems {
* Token (measurement type 11)
* @lci_len: LCI data length
* @civicloc_len: Civic location data length
+ * @he_bss_color: BSS Color settings
+ * @he_bss_color_valid: indicates whether bss color
+ * attribute is present in beacon data or not.
*/
struct cfg80211_beacon_data {
const u8 *head, *tail;
@@ -1202,6 +1205,8 @@ struct cfg80211_beacon_data {
size_t probe_resp_len;
size_t lci_len;
size_t civicloc_len;
+ struct cfg80211_he_bss_color he_bss_color;
+ bool he_bss_color_valid;
};
struct mac_address {
@@ -1292,7 +1297,6 @@ struct cfg80211_unsol_bcast_probe_resp {
* @sae_h2e_required: stations must support direct H2E technique in SAE
* @flags: flags, as defined in enum cfg80211_ap_settings_flags
* @he_obss_pd: OBSS Packet Detection settings
- * @he_bss_color: BSS Color settings
* @he_oper: HE operation IE (or %NULL if HE isn't enabled)
* @fils_discovery: FILS discovery transmission parameters
* @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters
@@ -1326,7 +1330,6 @@ struct cfg80211_ap_settings {
bool twt_responder;
u32 flags;
struct ieee80211_he_obss_pd he_obss_pd;
- struct cfg80211_he_bss_color he_bss_color;
struct cfg80211_fils_discovery fils_discovery;
struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp;
struct cfg80211_mbssid_config mbssid_config;
@@ -2735,6 +2738,7 @@ struct cfg80211_auth_request {
* userspace if this flag is set. Only applicable for cfg80211_connect()
* request (connect callback).
* @ASSOC_REQ_DISABLE_HE: Disable HE
+ * @ASSOC_REQ_DISABLE_EHT: Disable EHT
*/
enum cfg80211_assoc_req_flags {
ASSOC_REQ_DISABLE_HT = BIT(0),
@@ -2742,6 +2746,7 @@ enum cfg80211_assoc_req_flags {
ASSOC_REQ_USE_RRM = BIT(2),
CONNECT_REQ_EXTERNAL_AUTH_SUPPORT = BIT(3),
ASSOC_REQ_DISABLE_HE = BIT(4),
+ ASSOC_REQ_DISABLE_EHT = BIT(5),
};
/**
@@ -5549,8 +5554,6 @@ static inline void wiphy_unlock(struct wiphy *wiphy)
* @conn_owner_nlportid: (private) connection owner socket port ID
* @disconnect_wk: (private) auto-disconnect work
* @disconnect_bssid: (private) the BSSID to use for auto-disconnect
- * @ibss_fixed: (private) IBSS is using fixed BSSID
- * @ibss_dfs_possible: (private) IBSS may change to a DFS channel
* @event_list: (private) list for internal event processing
* @event_lock: (private) lock for event list
* @owner_nlportid: (private) owner socket port ID
@@ -5599,9 +5602,6 @@ struct wireless_dev {
struct cfg80211_chan_def preset_chandef;
struct cfg80211_chan_def chandef;
- bool ibss_fixed;
- bool ibss_dfs_possible;
-
bool ps;
int ps_timeout;
@@ -8006,7 +8006,9 @@ int cfg80211_register_netdevice(struct net_device *dev);
*/
static inline void cfg80211_unregister_netdevice(struct net_device *dev)
{
+#if IS_ENABLED(CONFIG_CFG80211)
cfg80211_unregister_wdev(dev->ieee80211_ptr);
+#endif
}
/**
diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
index 85f9e8417688..d8d8719315fd 100644
--- a/include/net/cfg802154.h
+++ b/include/net/cfg802154.h
@@ -373,6 +373,7 @@ struct wpan_dev {
#define to_phy(_dev) container_of(_dev, struct wpan_phy, dev)
+#if IS_ENABLED(CONFIG_IEEE802154) || IS_ENABLED(CONFIG_6LOWPAN)
static inline int
wpan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
const struct ieee802154_addr *daddr,
@@ -383,6 +384,7 @@ wpan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
return wpan_dev->header_ops->create(skb, dev, daddr, saddr, len);
}
+#endif
struct wpan_phy *
wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size);
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index 81b965953036..f259e1ae14ba 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -103,15 +103,25 @@ struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
const int dif);
int inet6_hash(struct sock *sk);
-#endif /* IS_ENABLED(CONFIG_IPV6) */
-#define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif, __sdif) \
- (((__sk)->sk_portpair == (__ports)) && \
- ((__sk)->sk_family == AF_INET6) && \
- ipv6_addr_equal(&(__sk)->sk_v6_daddr, (__saddr)) && \
- ipv6_addr_equal(&(__sk)->sk_v6_rcv_saddr, (__daddr)) && \
- (((__sk)->sk_bound_dev_if == (__dif)) || \
- ((__sk)->sk_bound_dev_if == (__sdif))) && \
- net_eq(sock_net(__sk), (__net)))
+static inline bool inet6_match(struct net *net, const struct sock *sk,
+ const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ const __portpair ports,
+ const int dif, const int sdif)
+{
+ int bound_dev_if;
+
+ if (!net_eq(sock_net(sk), net) ||
+ sk->sk_family != AF_INET6 ||
+ sk->sk_portpair != ports ||
+ !ipv6_addr_equal(&sk->sk_v6_daddr, saddr) ||
+ !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
+ return false;
+
+ bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
+ return bound_dev_if == dif || bound_dev_if == sdif;
+}
+#endif /* IS_ENABLED(CONFIG_IPV6) */
#endif /* _INET6_HASHTABLES_H */
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 85cd695e7fd1..077cd730ce2f 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -25,6 +25,7 @@
#undef INET_CSK_CLEAR_TIMERS
struct inet_bind_bucket;
+struct inet_bind2_bucket;
struct tcp_congestion_ops;
/*
@@ -57,6 +58,7 @@ struct inet_connection_sock_af_ops {
*
* @icsk_accept_queue: FIFO of established children
* @icsk_bind_hash: Bind node
+ * @icsk_bind2_hash: Bind node in the bhash2 table
* @icsk_timeout: Timeout
* @icsk_retransmit_timer: Resend (no ack)
* @icsk_rto: Retransmit timeout
@@ -83,6 +85,7 @@ struct inet_connection_sock {
struct inet_sock icsk_inet;
struct request_sock_queue icsk_accept_queue;
struct inet_bind_bucket *icsk_bind_hash;
+ struct inet_bind2_bucket *icsk_bind2_hash;
unsigned long icsk_timeout;
struct timer_list icsk_retransmit_timer;
struct timer_list icsk_delack_timer;
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 1b8706719d4f..a0887b70967b 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -90,11 +90,32 @@ struct inet_bind_bucket {
struct hlist_head owners;
};
+struct inet_bind2_bucket {
+ possible_net_t ib_net;
+ int l3mdev;
+ unsigned short port;
+ union {
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr v6_rcv_saddr;
+#endif
+ __be32 rcv_saddr;
+ };
+ /* Node in the inet2_bind_hashbucket chain */
+ struct hlist_node node;
+ /* List of sockets hashed to this bucket */
+ struct hlist_head owners;
+};
+
static inline struct net *ib_net(struct inet_bind_bucket *ib)
{
return read_pnet(&ib->ib_net);
}
+static inline struct net *ib2_net(struct inet_bind2_bucket *ib)
+{
+ return read_pnet(&ib->ib_net);
+}
+
#define inet_bind_bucket_for_each(tb, head) \
hlist_for_each_entry(tb, head, node)
@@ -103,6 +124,15 @@ struct inet_bind_hashbucket {
struct hlist_head chain;
};
+/* This is synchronized using the inet_bind_hashbucket's spinlock.
+ * Instead of having separate spinlocks, the inet_bind2_hashbucket can share
+ * the inet_bind_hashbucket's given that in every case where the bhash2 table
+ * is useful, a lookup in the bhash table also occurs.
+ */
+struct inet_bind2_hashbucket {
+ struct hlist_head chain;
+};
+
/* Sockets can be hashed in established or listening table.
* We must use different 'nulls' end-of-chain value for all hash buckets :
* A socket might transition from ESTABLISH to LISTEN state without
@@ -134,6 +164,12 @@ struct inet_hashinfo {
*/
struct kmem_cache *bind_bucket_cachep;
struct inet_bind_hashbucket *bhash;
+ /* The 2nd binding table hashed by port and address.
+ * This is used primarily for expediting the resolution of bind
+ * conflicts.
+ */
+ struct kmem_cache *bind2_bucket_cachep;
+ struct inet_bind2_hashbucket *bhash2;
unsigned int bhash_size;
/* The 2nd listener table hashed by local port and address */
@@ -193,6 +229,36 @@ inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
void inet_bind_bucket_destroy(struct kmem_cache *cachep,
struct inet_bind_bucket *tb);
+static inline bool check_bind_bucket_match(struct inet_bind_bucket *tb,
+ struct net *net,
+ const unsigned short port,
+ int l3mdev)
+{
+ return net_eq(ib_net(tb), net) && tb->port == port &&
+ tb->l3mdev == l3mdev;
+}
+
+struct inet_bind2_bucket *
+inet_bind2_bucket_create(struct kmem_cache *cachep, struct net *net,
+ struct inet_bind2_hashbucket *head,
+ const unsigned short port, int l3mdev,
+ const struct sock *sk);
+
+void inet_bind2_bucket_destroy(struct kmem_cache *cachep,
+ struct inet_bind2_bucket *tb);
+
+struct inet_bind2_bucket *
+inet_bind2_bucket_find(struct inet_hashinfo *hinfo, struct net *net,
+ const unsigned short port, int l3mdev,
+ struct sock *sk,
+ struct inet_bind2_hashbucket **head);
+
+bool check_bind2_bucket_match_nulladdr(struct inet_bind2_bucket *tb,
+ struct net *net,
+ const unsigned short port,
+ int l3mdev,
+ const struct sock *sk);
+
static inline u32 inet_bhashfn(const struct net *net, const __u16 lport,
const u32 bhash_size)
{
@@ -200,7 +266,7 @@ static inline u32 inet_bhashfn(const struct net *net, const __u16 lport,
}
void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
- const unsigned short snum);
+ struct inet_bind2_bucket *tb2, const unsigned short snum);
/* Caller must disable local BH processing. */
int __inet_inherit_port(const struct sock *sk, struct sock *child);
@@ -255,7 +321,6 @@ static inline struct sock *inet_lookup_listener(struct net *net,
((__force __portpair)(((__u32)(__dport) << 16) | (__force __u32)(__be16)(__sport)))
#endif
-#if (BITS_PER_LONG == 64)
#ifdef __BIG_ENDIAN
#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
const __addrpair __name = (__force __addrpair) ( \
@@ -267,24 +332,22 @@ static inline struct sock *inet_lookup_listener(struct net *net,
(((__force __u64)(__be32)(__daddr)) << 32) | \
((__force __u64)(__be32)(__saddr)))
#endif /* __BIG_ENDIAN */
-#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \
- (((__sk)->sk_portpair == (__ports)) && \
- ((__sk)->sk_addrpair == (__cookie)) && \
- (((__sk)->sk_bound_dev_if == (__dif)) || \
- ((__sk)->sk_bound_dev_if == (__sdif))) && \
- net_eq(sock_net(__sk), (__net)))
-#else /* 32-bit arch */
-#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
- const int __name __deprecated __attribute__((unused))
-
-#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \
- (((__sk)->sk_portpair == (__ports)) && \
- ((__sk)->sk_daddr == (__saddr)) && \
- ((__sk)->sk_rcv_saddr == (__daddr)) && \
- (((__sk)->sk_bound_dev_if == (__dif)) || \
- ((__sk)->sk_bound_dev_if == (__sdif))) && \
- net_eq(sock_net(__sk), (__net)))
-#endif /* 64-bit arch */
+
+static inline bool inet_match(struct net *net, const struct sock *sk,
+ const __addrpair cookie, const __portpair ports,
+ int dif, int sdif)
+{
+ int bound_dev_if;
+
+ if (!net_eq(sock_net(sk), net) ||
+ sk->sk_portpair != ports ||
+ sk->sk_addrpair != cookie)
+ return false;
+
+ /* Paired with WRITE_ONCE() from sock_bindtoindex_locked() */
+ bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
+ return bound_dev_if == dif || bound_dev_if == sdif;
+}
/* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
* not check it for lookups anymore, thanks Alexey. -DaveM
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 234d70ae5f4c..c1b5dcd6597c 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -116,14 +116,15 @@ static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb)
static inline int inet_request_bound_dev_if(const struct sock *sk,
struct sk_buff *skb)
{
+ int bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
#ifdef CONFIG_NET_L3_MASTER_DEV
struct net *net = sock_net(sk);
- if (!sk->sk_bound_dev_if && net->ipv4.sysctl_tcp_l3mdev_accept)
+ if (!bound_dev_if && net->ipv4.sysctl_tcp_l3mdev_accept)
return l3mdev_master_ifindex_by_index(net, skb->skb_iif);
#endif
- return sk->sk_bound_dev_if;
+ return bound_dev_if;
}
static inline int inet_sk_bound_l3mdev(const struct sock *sk)
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 463ae5d33eb0..5b47545f22d3 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -71,7 +71,6 @@ struct inet_timewait_sock {
tw_tos : 8;
u32 tw_txhash;
u32 tw_priority;
- u32 tw_bslot; /* bind bucket slot */
struct timer_list tw_timer;
struct inet_bind_bucket *tw_tb;
};
@@ -110,6 +109,8 @@ static inline void inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo
void inet_twsk_deschedule_put(struct inet_timewait_sock *tw);
+void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family);
+
static inline
struct net *twsk_net(const struct inet_timewait_sock *twsk)
{
diff --git a/include/net/ip.h b/include/net/ip.h
index 3984f2c39c4b..26fffda78cca 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -56,6 +56,7 @@ struct inet_skb_parm {
#define IPSKB_DOREDIRECT BIT(5)
#define IPSKB_FRAG_PMTU BIT(6)
#define IPSKB_L3SLAVE BIT(7)
+#define IPSKB_NOPOLICY BIT(8)
u16 frag_max_size;
};
@@ -93,7 +94,7 @@ static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
ipcm->sockc.mark = inet->sk.sk_mark;
ipcm->sockc.tsflags = inet->sk.sk_tsflags;
- ipcm->oif = inet->sk.sk_bound_dev_if;
+ ipcm->oif = READ_ONCE(inet->sk.sk_bound_dev_if);
ipcm->addr = inet->inet_saddr;
}
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 213612f1680c..5b38bf1a586b 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -151,6 +151,17 @@ struct frag_hdr {
__be32 identification;
};
+/*
+ * Jumbo payload option, as described in RFC 2675 2.
+ */
+struct hop_jumbo_hdr {
+ u8 nexthdr;
+ u8 hdrlen;
+ u8 tlv_type; /* IPV6_TLV_JUMBO, 0xC2 */
+ u8 tlv_len; /* 4 */
+ __be32 jumbo_payload_len;
+};
+
#define IP6_MF 0x0001
#define IP6_OFFSET 0xFFF8
@@ -456,6 +467,39 @@ bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb,
struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
struct ipv6_txoptions *opt);
+/* This helper is specialized for BIG TCP needs.
+ * It assumes the hop_jumbo_hdr will immediately follow the IPV6 header.
+ * It assumes headers are already in skb->head.
+ * Returns 0, or IPPROTO_TCP if a BIG TCP packet is there.
+ */
+static inline int ipv6_has_hopopt_jumbo(const struct sk_buff *skb)
+{
+ const struct hop_jumbo_hdr *jhdr;
+ const struct ipv6hdr *nhdr;
+
+ if (likely(skb->len <= GRO_LEGACY_MAX_SIZE))
+ return 0;
+
+ if (skb->protocol != htons(ETH_P_IPV6))
+ return 0;
+
+ if (skb_network_offset(skb) +
+ sizeof(struct ipv6hdr) +
+ sizeof(struct hop_jumbo_hdr) > skb_headlen(skb))
+ return 0;
+
+ nhdr = ipv6_hdr(skb);
+
+ if (nhdr->nexthdr != NEXTHDR_HOP)
+ return 0;
+
+ jhdr = (const struct hop_jumbo_hdr *) (nhdr + 1);
+ if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 ||
+ jhdr->nexthdr != IPPROTO_TCP)
+ return 0;
+ return jhdr->nexthdr;
+}
+
static inline bool ipv6_accept_ra(struct inet6_dev *idev)
{
/* If forwarding is enabled, RA are not accepted unless the special
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 75880fc70700..ebadb2103968 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -514,7 +514,6 @@ struct ieee80211_fils_discovery {
* to that BSS) that can change during the lifetime of the BSS.
*
* @htc_trig_based_pkt_ext: default PE in 4us units, if BSS supports HE
- * @multi_sta_back_32bit: supports BA bitmap of 32-bits in Multi-STA BACK
* @uora_exists: is the UORA element advertised by AP
* @ack_enabled: indicates support to receive a multi-TID that solicits either
* ACK, BACK or both
@@ -1144,20 +1143,41 @@ ieee80211_info_get_tx_time_est(struct ieee80211_tx_info *info)
return info->tx_time_est << 2;
}
+/***
+ * struct ieee80211_rate_status - mrr stage for status path
+ *
+ * This struct is used in struct ieee80211_tx_status to provide drivers a
+ * dynamic way to report about used rates and power levels per packet.
+ *
+ * @rate_idx The actual used rate.
+ * @try_count How often the rate was tried.
+ * @tx_power_idx An idx into the ieee80211_hw->tx_power_levels list of the
+ * corresponding wifi hardware. The idx shall point to the power level
+ * that was used when sending the packet.
+ */
+struct ieee80211_rate_status {
+ struct rate_info rate_idx;
+ u8 try_count;
+ u8 tx_power_idx;
+};
+
/**
* struct ieee80211_tx_status - extended tx status info for rate control
*
* @sta: Station that the packet was transmitted for
* @info: Basic tx status information
* @skb: Packet skb (can be NULL if not provided by the driver)
- * @rate: The TX rate that was used when sending the packet
+ * @rates: Mrr stages that were used when sending the packet
+ * @n_rates: Number of mrr stages (count of instances for @rates)
* @free_list: list where processed skbs are stored to be free'd by the driver
*/
struct ieee80211_tx_status {
struct ieee80211_sta *sta;
struct ieee80211_tx_info *info;
struct sk_buff *skb;
- struct rate_info *rate;
+ struct ieee80211_rate_status *rates;
+ u8 n_rates;
+
struct list_head *free_list;
};
@@ -1701,7 +1721,7 @@ enum ieee80211_offload_flags {
* these need to be set (or cleared) when the interface is added
* or, if supported by the driver, the interface type is changed
* at runtime, mac80211 will never touch this field
- * @offloaad_flags: hardware offload capabilities/flags for this interface.
+ * @offload_flags: hardware offload capabilities/flags for this interface.
* These are initialized by mac80211 before calling .add_interface,
* .change_interface or .update_vif_offload and updated by the driver
* within these ops, based on supported features or runtime change
@@ -2658,6 +2678,12 @@ enum ieee80211_hw_flags {
* refilling deficit of each TXQ.
*
* @max_mtu: the max mtu could be set.
+ *
+ * @tx_power_levels: a list of power levels supported by the wifi hardware.
+ * The power levels can be specified either as integer or fractions.
+ * The power level at idx 0 shall be the maximum positive power level.
+ *
+ * @max_txpwr_levels_idx: the maximum valid idx of 'tx_power_levels' list.
*/
struct ieee80211_hw {
struct ieee80211_conf conf;
@@ -2696,6 +2722,8 @@ struct ieee80211_hw {
u8 tx_sk_pacing_shift;
u8 weight_multiplier;
u32 max_mtu;
+ const s8 *tx_power_levels;
+ u8 max_txpwr_levels_idx;
};
static inline bool _ieee80211_hw_check(struct ieee80211_hw *hw,
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 69e6c6a218be..a32be8aa7ed2 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -45,7 +45,8 @@ union nf_conntrack_expect_proto {
struct nf_conntrack_net_ecache {
struct delayed_work dwork;
- struct netns_ct *ct_net;
+ spinlock_t dying_lock;
+ struct hlist_nulls_head dying_list;
};
struct nf_conntrack_net {
@@ -100,7 +101,6 @@ struct nf_conn {
/* Have we seen traffic both ways yet? (bitset) */
unsigned long status;
- u16 cpu;
possible_net_t ct_net;
#if IS_ENABLED(CONFIG_NF_NAT)
@@ -236,13 +236,16 @@ static inline bool nf_ct_kill(struct nf_conn *ct)
return nf_ct_delete(ct, 0, 0);
}
-/* Set all unconfirmed conntrack as dying */
-void nf_ct_unconfirmed_destroy(struct net *);
+struct nf_ct_iter_data {
+ struct net *net;
+ void *data;
+ u32 portid;
+ int report;
+};
/* Iterate over all conntracks: if iter returns true, it's deleted. */
-void nf_ct_iterate_cleanup_net(struct net *net,
- int (*iter)(struct nf_conn *i, void *data),
- void *data, u32 portid, int report);
+void nf_ct_iterate_cleanup_net(int (*iter)(struct nf_conn *i, void *data),
+ const struct nf_ct_iter_data *iter_data);
/* also set unconfirmed conntracks as dying. Only use in module exit path. */
void nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data),
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index 13807ea94cd2..6406cfee34c2 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -60,7 +60,7 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb)
if (ct) {
if (!nf_ct_is_confirmed(ct))
ret = __nf_conntrack_confirm(skb);
- if (likely(ret == NF_ACCEPT))
+ if (ret == NF_ACCEPT && nf_ct_ecache_exist(ct))
nf_ct_deliver_cached_events(ct);
}
return ret;
diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h
index 9645b47fa7e4..e227d997fc71 100644
--- a/include/net/netfilter/nf_conntrack_count.h
+++ b/include/net/netfilter/nf_conntrack_count.h
@@ -10,6 +10,7 @@ struct nf_conncount_data;
struct nf_conncount_list {
spinlock_t list_lock;
+ u32 last_gc; /* jiffies at most recent gc */
struct list_head head; /* connections with the same filtering key */
unsigned int count; /* length of list */
};
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index 6c4c490a3e34..0c1dac318e02 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -14,7 +14,6 @@
#include <net/netfilter/nf_conntrack_extend.h>
enum nf_ct_ecache_state {
- NFCT_ECACHE_UNKNOWN, /* destroy event not sent */
NFCT_ECACHE_DESTROY_FAIL, /* tried but failed to send destroy event */
NFCT_ECACHE_DESTROY_SENT, /* sent destroy event after failure */
};
@@ -23,7 +22,6 @@ struct nf_conntrack_ecache {
unsigned long cache; /* bitops want long */
u16 ctmask; /* bitmask of ct events to be delivered */
u16 expmask; /* bitmask of expect events to be delivered */
- enum nf_ct_ecache_state state:8;/* ecache state */
u32 missed; /* missed events */
u32 portid; /* netlink portid of destroyer */
};
@@ -38,28 +36,12 @@ nf_ct_ecache_find(const struct nf_conn *ct)
#endif
}
-static inline struct nf_conntrack_ecache *
-nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp)
+static inline bool nf_ct_ecache_exist(const struct nf_conn *ct)
{
#ifdef CONFIG_NF_CONNTRACK_EVENTS
- struct net *net = nf_ct_net(ct);
- struct nf_conntrack_ecache *e;
-
- if (!ctmask && !expmask && net->ct.sysctl_events) {
- ctmask = ~0;
- expmask = ~0;
- }
- if (!ctmask && !expmask)
- return NULL;
-
- e = nf_ct_ext_add(ct, NF_CT_EXT_ECACHE, gfp);
- if (e) {
- e->ctmask = ctmask;
- e->expmask = expmask;
- }
- return e;
+ return nf_ct_ext_exist(ct, NF_CT_EXT_ECACHE);
#else
- return NULL;
+ return false;
#endif
}
@@ -91,6 +73,7 @@ void nf_ct_deliver_cached_events(struct nf_conn *ct);
int nf_conntrack_eventmask_report(unsigned int eventmask, struct nf_conn *ct,
u32 portid, int report);
+bool nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp);
#else
static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct)
@@ -105,6 +88,10 @@ static inline int nf_conntrack_eventmask_report(unsigned int eventmask,
return 0;
}
+static inline bool nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp)
+{
+ return false;
+}
#endif
static inline void
@@ -130,30 +117,20 @@ nf_conntrack_event_report(enum ip_conntrack_events event, struct nf_conn *ct,
u32 portid, int report)
{
#ifdef CONFIG_NF_CONNTRACK_EVENTS
- const struct net *net = nf_ct_net(ct);
-
- if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
- return 0;
-
- return nf_conntrack_eventmask_report(1 << event, ct, portid, report);
-#else
- return 0;
+ if (nf_ct_ecache_exist(ct))
+ return nf_conntrack_eventmask_report(1 << event, ct, portid, report);
#endif
+ return 0;
}
static inline int
nf_conntrack_event(enum ip_conntrack_events event, struct nf_conn *ct)
{
#ifdef CONFIG_NF_CONNTRACK_EVENTS
- const struct net *net = nf_ct_net(ct);
-
- if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
- return 0;
-
- return nf_conntrack_eventmask_report(1 << event, ct, 0, 0);
-#else
- return 0;
+ if (nf_ct_ecache_exist(ct))
+ return nf_conntrack_eventmask_report(1 << event, ct, 0, 0);
#endif
+ return 0;
}
#ifdef CONFIG_NF_CONNTRACK_EVENTS
@@ -166,6 +143,8 @@ void nf_conntrack_ecache_work(struct net *net, enum nf_ct_ecache_state state);
void nf_conntrack_ecache_pernet_init(struct net *net);
void nf_conntrack_ecache_pernet_fini(struct net *net);
+struct nf_conntrack_net_ecache *nf_conn_pernet_ecache(const struct net *net);
+
static inline bool nf_conntrack_ecache_dwork_pending(const struct net *net)
{
return net->ct.ecache_dwork_pending;
diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h
index 96635ad2acc7..0b247248b032 100644
--- a/include/net/netfilter/nf_conntrack_extend.h
+++ b/include/net/netfilter/nf_conntrack_extend.h
@@ -34,21 +34,11 @@ enum nf_ct_ext_id {
NF_CT_EXT_NUM,
};
-#define NF_CT_EXT_HELPER_TYPE struct nf_conn_help
-#define NF_CT_EXT_NAT_TYPE struct nf_conn_nat
-#define NF_CT_EXT_SEQADJ_TYPE struct nf_conn_seqadj
-#define NF_CT_EXT_ACCT_TYPE struct nf_conn_acct
-#define NF_CT_EXT_ECACHE_TYPE struct nf_conntrack_ecache
-#define NF_CT_EXT_TSTAMP_TYPE struct nf_conn_tstamp
-#define NF_CT_EXT_TIMEOUT_TYPE struct nf_conn_timeout
-#define NF_CT_EXT_LABELS_TYPE struct nf_conn_labels
-#define NF_CT_EXT_SYNPROXY_TYPE struct nf_conn_synproxy
-#define NF_CT_EXT_ACT_CT_TYPE struct nf_conn_act_ct_ext
-
/* Extensions: optional stuff which isn't permanently in struct. */
struct nf_ct_ext {
u8 offset[NF_CT_EXT_NUM];
u8 len;
+ unsigned int gen_id;
char data[] __aligned(8);
};
@@ -62,17 +52,28 @@ static inline bool nf_ct_ext_exist(const struct nf_conn *ct, u8 id)
return (ct->ext && __nf_ct_ext_exist(ct->ext, id));
}
-static inline void *__nf_ct_ext_find(const struct nf_conn *ct, u8 id)
+void *__nf_ct_ext_find(const struct nf_ct_ext *ext, u8 id);
+
+static inline void *nf_ct_ext_find(const struct nf_conn *ct, u8 id)
{
- if (!nf_ct_ext_exist(ct, id))
+ struct nf_ct_ext *ext = ct->ext;
+
+ if (!ext || !__nf_ct_ext_exist(ext, id))
return NULL;
+ if (unlikely(ext->gen_id))
+ return __nf_ct_ext_find(ext, id);
+
return (void *)ct->ext + ct->ext->offset[id];
}
-#define nf_ct_ext_find(ext, id) \
- ((id##_TYPE *)__nf_ct_ext_find((ext), (id)))
/* Add this type, returns pointer to data or NULL. */
void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp);
+/* ext genid. if ext->id != ext_genid, extensions cannot be used
+ * anymore unless conntrack has CONFIRMED bit set.
+ */
+extern atomic_t nf_conntrack_ext_genid;
+void nf_ct_ext_bump_genid(void);
+
#endif /* _NF_CONNTRACK_EXTEND_H */
diff --git a/include/net/netfilter/nf_conntrack_labels.h b/include/net/netfilter/nf_conntrack_labels.h
index 3c23298e68ca..66bab6c60d12 100644
--- a/include/net/netfilter/nf_conntrack_labels.h
+++ b/include/net/netfilter/nf_conntrack_labels.h
@@ -17,10 +17,18 @@ struct nf_conn_labels {
unsigned long bits[NF_CT_LABELS_MAX_SIZE / sizeof(long)];
};
+/* Can't use nf_ct_ext_find(), flow dissector cannot use symbols
+ * exported by nf_conntrack module.
+ */
static inline struct nf_conn_labels *nf_ct_labels_find(const struct nf_conn *ct)
{
#ifdef CONFIG_NF_CONNTRACK_LABELS
- return nf_ct_ext_find(ct, NF_CT_EXT_LABELS);
+ struct nf_ct_ext *ext = ct->ext;
+
+ if (!ext || !__nf_ct_ext_exist(ext, NF_CT_EXT_LABELS))
+ return NULL;
+
+ return (void *)ct->ext + ct->ext->offset[NF_CT_EXT_LABELS];
#else
return NULL;
#endif
diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h
index 3ea94f6f3844..fea258983d23 100644
--- a/include/net/netfilter/nf_conntrack_timeout.h
+++ b/include/net/netfilter/nf_conntrack_timeout.h
@@ -17,14 +17,6 @@ struct nf_ct_timeout {
char data[];
};
-struct ctnl_timeout {
- struct list_head head;
- struct rcu_head rcu_head;
- refcount_t refcnt;
- char name[CTNL_TIMEOUT_NAME_MAX];
- struct nf_ct_timeout timeout;
-};
-
struct nf_conn_timeout {
struct nf_ct_timeout __rcu *timeout;
};
diff --git a/include/net/netfilter/nf_reject.h b/include/net/netfilter/nf_reject.h
index 9051c3a0c8e7..7c669792fb9c 100644
--- a/include/net/netfilter/nf_reject.h
+++ b/include/net/netfilter/nf_reject.h
@@ -5,12 +5,28 @@
#include <linux/types.h>
#include <uapi/linux/in.h>
-static inline bool nf_reject_verify_csum(__u8 proto)
+static inline bool nf_reject_verify_csum(struct sk_buff *skb, int dataoff,
+ __u8 proto)
{
/* Skip protocols that don't use 16-bit one's complement checksum
* of the entire payload.
*/
switch (proto) {
+ /* Protocols with optional checksums. */
+ case IPPROTO_UDP: {
+ const struct udphdr *udp_hdr;
+ struct udphdr _udp_hdr;
+
+ udp_hdr = skb_header_pointer(skb, dataoff,
+ sizeof(_udp_hdr),
+ &_udp_hdr);
+ if (!udp_hdr || udp_hdr->check)
+ return true;
+
+ return false;
+ }
+ case IPPROTO_GRE:
+
/* Protocols with other integrity checks. */
case IPPROTO_AH:
case IPPROTO_ESP:
@@ -19,9 +35,6 @@ static inline bool nf_reject_verify_csum(__u8 proto)
/* Protocols with partial checksums. */
case IPPROTO_UDPLITE:
case IPPROTO_DCCP:
-
- /* Protocols with optional checksums. */
- case IPPROTO_GRE:
return false;
}
return true;
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index 0294f3d473af..0677cd3de034 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -93,14 +93,9 @@ struct nf_ip_net {
#endif
};
-struct ct_pcpu {
- spinlock_t lock;
- struct hlist_nulls_head unconfirmed;
- struct hlist_nulls_head dying;
-};
-
struct netns_ct {
#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ bool ctnetlink_has_listener;
bool ecache_dwork_pending;
#endif
u8 sysctl_log_invalid; /* Log invalid packets */
@@ -110,7 +105,6 @@ struct netns_ct {
u8 sysctl_tstamp;
u8 sysctl_checksum;
- struct ct_pcpu __percpu *pcpu_lists;
struct ip_conntrack_stat __percpu *stat;
struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
struct nf_ip_net nf_ct_proto;
diff --git a/include/net/sock.h b/include/net/sock.h
index 73063c88a249..c585ef6565d9 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -161,9 +161,6 @@ typedef __u64 __bitwise __addrpair;
* for struct sock and struct inet_timewait_sock.
*/
struct sock_common {
- /* skc_daddr and skc_rcv_saddr must be grouped on a 8 bytes aligned
- * address on 64bit arches : cf INET_MATCH()
- */
union {
__addrpair skc_addrpair;
struct {
@@ -351,6 +348,7 @@ struct sk_filter;
* @sk_txtime_report_errors: set report errors mode for SO_TXTIME
* @sk_txtime_unused: unused txtime flags
* @ns_tracker: tracker for netns reference
+ * @sk_bind2_node: bind node in the bhash2 table
*/
struct sock {
/*
@@ -540,6 +538,7 @@ struct sock {
#endif
struct rcu_head sk_rcu;
netns_tracker ns_tracker;
+ struct hlist_node sk_bind2_node;
};
enum sk_pacing {
@@ -820,6 +819,16 @@ static inline void sk_add_bind_node(struct sock *sk,
hlist_add_head(&sk->sk_bind_node, list);
}
+static inline void __sk_del_bind2_node(struct sock *sk)
+{
+ __hlist_del(&sk->sk_bind2_node);
+}
+
+static inline void sk_add_bind2_node(struct sock *sk, struct hlist_head *list)
+{
+ hlist_add_head(&sk->sk_bind2_node, list);
+}
+
#define sk_for_each(__sk, list) \
hlist_for_each_entry(__sk, list, sk_node)
#define sk_for_each_rcu(__sk, list) \
@@ -837,6 +846,8 @@ static inline void sk_add_bind_node(struct sock *sk,
hlist_for_each_entry_safe(__sk, tmp, list, sk_node)
#define sk_for_each_bound(__sk, list) \
hlist_for_each_entry(__sk, list, sk_bind_node)
+#define sk_for_each_bound_bhash2(__sk, list) \
+ hlist_for_each_entry(__sk, list, sk_bind2_node)
/**
* sk_for_each_entry_offset_rcu - iterate over a list at a given struct offset
@@ -2878,13 +2889,14 @@ static inline void sk_pacing_shift_update(struct sock *sk, int val)
*/
static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif)
{
+ int bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
int mdif;
- if (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif)
+ if (!bound_dev_if || bound_dev_if == dif)
return true;
mdif = l3mdev_master_ifindex_by_index(sock_net(sk), dif);
- if (mdif && mdif == sk->sk_bound_dev_if)
+ if (mdif && mdif == bound_dev_if)
return true;
return false;
diff --git a/include/net/tls.h b/include/net/tls.h
index b59f0a63292b..8017f1703447 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -238,6 +238,7 @@ struct tls_context {
u8 tx_conf:3;
u8 rx_conf:3;
+ u8 zerocopy_sendfile:1;
int (*push_pending_record)(struct sock *sk, int flags);
void (*sk_write_space)(struct sock *sk);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 6fb899ff5afc..c39d910d4b45 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -126,13 +126,17 @@ struct xfrm_state_walk {
struct xfrm_address_filter *filter;
};
-struct xfrm_state_offload {
+enum {
+ XFRM_DEV_OFFLOAD_IN = 1,
+ XFRM_DEV_OFFLOAD_OUT,
+};
+
+struct xfrm_dev_offload {
struct net_device *dev;
netdevice_tracker dev_tracker;
struct net_device *real_dev;
unsigned long offload_handle;
- unsigned int num_exthdrs;
- u8 flags;
+ u8 dir : 2;
};
struct xfrm_mode {
@@ -247,7 +251,7 @@ struct xfrm_state {
struct xfrm_lifetime_cur curlft;
struct hrtimer mtimer;
- struct xfrm_state_offload xso;
+ struct xfrm_dev_offload xso;
/* used to fix curlft->add_time when changing date */
long saved_tmo;
@@ -1006,7 +1010,7 @@ struct xfrm_offload {
#define CRYPTO_FALLBACK 8
#define XFRM_GSO_SEGMENT 16
#define XFRM_GRO 32
-#define XFRM_ESP_NO_TRAILER 64
+/* 64 is free */
#define XFRM_DEV_RESUME 128
#define XFRM_XMIT 256
@@ -1093,6 +1097,18 @@ static inline bool __xfrm_check_nopolicy(struct net *net, struct sk_buff *skb,
return false;
}
+static inline bool __xfrm_check_dev_nopolicy(struct sk_buff *skb,
+ int dir, unsigned short family)
+{
+ if (dir != XFRM_POLICY_OUT && family == AF_INET) {
+ /* same dst may be used for traffic originating from
+ * devices with different policy settings.
+ */
+ return IPCB(skb)->flags & IPSKB_NOPOLICY;
+ }
+ return skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY);
+}
+
static inline int __xfrm_policy_check2(struct sock *sk, int dir,
struct sk_buff *skb,
unsigned int family, int reverse)
@@ -1104,7 +1120,7 @@ static inline int __xfrm_policy_check2(struct sock *sk, int dir,
return __xfrm_policy_check(sk, ndir, skb, family);
return __xfrm_check_nopolicy(net, skb, dir) ||
- (skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY)) ||
+ __xfrm_check_dev_nopolicy(skb, dir, family) ||
__xfrm_policy_check(sk, ndir, skb, family);
}
@@ -1866,7 +1882,7 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
{
- struct xfrm_state_offload *xso = &x->xso;
+ struct xfrm_dev_offload *xso = &x->xso;
if (xso->dev && xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn)
xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn(x);
@@ -1892,7 +1908,7 @@ static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
static inline void xfrm_dev_state_delete(struct xfrm_state *x)
{
- struct xfrm_state_offload *xso = &x->xso;
+ struct xfrm_dev_offload *xso = &x->xso;
if (xso->dev)
xso->dev->xfrmdev_ops->xdo_dev_state_delete(x);
@@ -1900,7 +1916,7 @@ static inline void xfrm_dev_state_delete(struct xfrm_state *x)
static inline void xfrm_dev_state_free(struct xfrm_state *x)
{
- struct xfrm_state_offload *xso = &x->xso;
+ struct xfrm_dev_offload *xso = &x->xso;
struct net_device *dev = xso->dev;
if (dev && dev->xfrmdev_ops) {
diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h
index 3b8c5a54fb00..5f88385a7748 100644
--- a/include/soc/mscc/ocelot.h
+++ b/include/soc/mscc/ocelot.h
@@ -654,6 +654,8 @@ struct ocelot_mirror {
int to;
};
+struct ocelot_port;
+
struct ocelot_port {
struct ocelot *ocelot;
@@ -662,6 +664,8 @@ struct ocelot_port {
struct net_device *bond;
struct net_device *bridge;
+ struct ocelot_port *dsa_8021q_cpu;
+
/* VLAN that untagged frames are classified to, on ingress */
const struct ocelot_bridge_vlan *pvid_vlan;
@@ -865,8 +869,9 @@ void ocelot_deinit(struct ocelot *ocelot);
void ocelot_init_port(struct ocelot *ocelot, int port);
void ocelot_deinit_port(struct ocelot *ocelot, int port);
-void ocelot_port_set_dsa_8021q_cpu(struct ocelot *ocelot, int port);
-void ocelot_port_unset_dsa_8021q_cpu(struct ocelot *ocelot, int port);
+void ocelot_port_assign_dsa_8021q_cpu(struct ocelot *ocelot, int port, int cpu);
+void ocelot_port_unassign_dsa_8021q_cpu(struct ocelot *ocelot, int port);
+u32 ocelot_port_assigned_dsa_8021q_cpu_mask(struct ocelot *ocelot, int port);
/* DSA callbacks */
void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data);
@@ -878,9 +883,7 @@ void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs);
int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, bool enabled,
struct netlink_ext_ack *extack);
void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state);
-u32 ocelot_get_dsa_8021q_cpu_mask(struct ocelot *ocelot);
u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port);
-void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining);
int ocelot_port_pre_bridge_flags(struct ocelot *ocelot, int port,
struct switchdev_brport_flags val);
void ocelot_port_bridge_flags(struct ocelot *ocelot, int port,
diff --git a/include/soc/mscc/ocelot_vcap.h b/include/soc/mscc/ocelot_vcap.h
index de26c992f821..c601a4598b0d 100644
--- a/include/soc/mscc/ocelot_vcap.h
+++ b/include/soc/mscc/ocelot_vcap.h
@@ -11,7 +11,7 @@
/* Cookie definitions for private VCAP filters installed by the driver.
* Must be unique per VCAP block.
*/
-#define OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port) (port)
+#define OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream) ((upstream) << 16 | (port))
#define OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port) (port)
#define OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port) (port)
#define OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, port) ((ocelot)->num_phys_ports + (port))
diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h
index cddf5b6fbeb4..80d2588a090c 100644
--- a/include/trace/events/io_uring.h
+++ b/include/trace/events/io_uring.h
@@ -147,7 +147,7 @@ TRACE_EVENT(io_uring_queue_async_work,
TP_PROTO(void *ctx, void * req, unsigned long long user_data, u8 opcode,
unsigned int flags, struct io_wq_work *work, int rw),
- TP_ARGS(ctx, req, user_data, flags, opcode, work, rw),
+ TP_ARGS(ctx, req, user_data, opcode, flags, work, rw),
TP_STRUCT__entry (
__field( void *, ctx )
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 4a3ab0ed6e06..66915b872a44 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -14,215 +14,6 @@
#include <linux/errqueue.h>
/*
- * Define enums for tracing information.
- *
- * These should all be kept sorted, making it easier to match the string
- * mapping tables further on.
- */
-#ifndef __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY
-#define __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY
-
-enum rxrpc_skb_trace {
- rxrpc_skb_cleaned,
- rxrpc_skb_freed,
- rxrpc_skb_got,
- rxrpc_skb_lost,
- rxrpc_skb_new,
- rxrpc_skb_purged,
- rxrpc_skb_received,
- rxrpc_skb_rotated,
- rxrpc_skb_seen,
- rxrpc_skb_unshared,
- rxrpc_skb_unshared_nomem,
-};
-
-enum rxrpc_local_trace {
- rxrpc_local_got,
- rxrpc_local_new,
- rxrpc_local_processing,
- rxrpc_local_put,
- rxrpc_local_queued,
-};
-
-enum rxrpc_peer_trace {
- rxrpc_peer_got,
- rxrpc_peer_new,
- rxrpc_peer_processing,
- rxrpc_peer_put,
-};
-
-enum rxrpc_conn_trace {
- rxrpc_conn_got,
- rxrpc_conn_new_client,
- rxrpc_conn_new_service,
- rxrpc_conn_put_client,
- rxrpc_conn_put_service,
- rxrpc_conn_queued,
- rxrpc_conn_reap_service,
- rxrpc_conn_seen,
-};
-
-enum rxrpc_client_trace {
- rxrpc_client_activate_chans,
- rxrpc_client_alloc,
- rxrpc_client_chan_activate,
- rxrpc_client_chan_disconnect,
- rxrpc_client_chan_pass,
- rxrpc_client_chan_wait_failed,
- rxrpc_client_cleanup,
- rxrpc_client_discard,
- rxrpc_client_duplicate,
- rxrpc_client_exposed,
- rxrpc_client_replace,
- rxrpc_client_to_active,
- rxrpc_client_to_idle,
-};
-
-enum rxrpc_call_trace {
- rxrpc_call_connected,
- rxrpc_call_error,
- rxrpc_call_got,
- rxrpc_call_got_kernel,
- rxrpc_call_got_timer,
- rxrpc_call_got_userid,
- rxrpc_call_new_client,
- rxrpc_call_new_service,
- rxrpc_call_put,
- rxrpc_call_put_kernel,
- rxrpc_call_put_noqueue,
- rxrpc_call_put_notimer,
- rxrpc_call_put_timer,
- rxrpc_call_put_userid,
- rxrpc_call_queued,
- rxrpc_call_queued_ref,
- rxrpc_call_release,
- rxrpc_call_seen,
-};
-
-enum rxrpc_transmit_trace {
- rxrpc_transmit_await_reply,
- rxrpc_transmit_end,
- rxrpc_transmit_queue,
- rxrpc_transmit_queue_last,
- rxrpc_transmit_rotate,
- rxrpc_transmit_rotate_last,
- rxrpc_transmit_wait,
-};
-
-enum rxrpc_receive_trace {
- rxrpc_receive_end,
- rxrpc_receive_front,
- rxrpc_receive_incoming,
- rxrpc_receive_queue,
- rxrpc_receive_queue_last,
- rxrpc_receive_rotate,
-};
-
-enum rxrpc_recvmsg_trace {
- rxrpc_recvmsg_cont,
- rxrpc_recvmsg_data_return,
- rxrpc_recvmsg_dequeue,
- rxrpc_recvmsg_enter,
- rxrpc_recvmsg_full,
- rxrpc_recvmsg_hole,
- rxrpc_recvmsg_next,
- rxrpc_recvmsg_requeue,
- rxrpc_recvmsg_return,
- rxrpc_recvmsg_terminal,
- rxrpc_recvmsg_to_be_accepted,
- rxrpc_recvmsg_wait,
-};
-
-enum rxrpc_rtt_tx_trace {
- rxrpc_rtt_tx_cancel,
- rxrpc_rtt_tx_data,
- rxrpc_rtt_tx_no_slot,
- rxrpc_rtt_tx_ping,
-};
-
-enum rxrpc_rtt_rx_trace {
- rxrpc_rtt_rx_cancel,
- rxrpc_rtt_rx_lost,
- rxrpc_rtt_rx_obsolete,
- rxrpc_rtt_rx_ping_response,
- rxrpc_rtt_rx_requested_ack,
-};
-
-enum rxrpc_timer_trace {
- rxrpc_timer_begin,
- rxrpc_timer_exp_ack,
- rxrpc_timer_exp_hard,
- rxrpc_timer_exp_idle,
- rxrpc_timer_exp_keepalive,
- rxrpc_timer_exp_lost_ack,
- rxrpc_timer_exp_normal,
- rxrpc_timer_exp_ping,
- rxrpc_timer_exp_resend,
- rxrpc_timer_expired,
- rxrpc_timer_init_for_reply,
- rxrpc_timer_init_for_send_reply,
- rxrpc_timer_restart,
- rxrpc_timer_set_for_ack,
- rxrpc_timer_set_for_hard,
- rxrpc_timer_set_for_idle,
- rxrpc_timer_set_for_keepalive,
- rxrpc_timer_set_for_lost_ack,
- rxrpc_timer_set_for_normal,
- rxrpc_timer_set_for_ping,
- rxrpc_timer_set_for_resend,
- rxrpc_timer_set_for_send,
-};
-
-enum rxrpc_propose_ack_trace {
- rxrpc_propose_ack_client_tx_end,
- rxrpc_propose_ack_input_data,
- rxrpc_propose_ack_ping_for_check_life,
- rxrpc_propose_ack_ping_for_keepalive,
- rxrpc_propose_ack_ping_for_lost_ack,
- rxrpc_propose_ack_ping_for_lost_reply,
- rxrpc_propose_ack_ping_for_params,
- rxrpc_propose_ack_processing_op,
- rxrpc_propose_ack_respond_to_ack,
- rxrpc_propose_ack_respond_to_ping,
- rxrpc_propose_ack_retry_tx,
- rxrpc_propose_ack_rotate_rx,
- rxrpc_propose_ack_terminal_ack,
-};
-
-enum rxrpc_propose_ack_outcome {
- rxrpc_propose_ack_subsume,
- rxrpc_propose_ack_update,
- rxrpc_propose_ack_use,
-};
-
-enum rxrpc_congest_change {
- rxrpc_cong_begin_retransmission,
- rxrpc_cong_cleared_nacks,
- rxrpc_cong_new_low_nack,
- rxrpc_cong_no_change,
- rxrpc_cong_progress,
- rxrpc_cong_retransmit_again,
- rxrpc_cong_rtt_window_end,
- rxrpc_cong_saw_nack,
-};
-
-enum rxrpc_tx_point {
- rxrpc_tx_point_call_abort,
- rxrpc_tx_point_call_ack,
- rxrpc_tx_point_call_data_frag,
- rxrpc_tx_point_call_data_nofrag,
- rxrpc_tx_point_call_final_resend,
- rxrpc_tx_point_conn_abort,
- rxrpc_tx_point_rxkad_challenge,
- rxrpc_tx_point_rxkad_response,
- rxrpc_tx_point_reject,
- rxrpc_tx_point_version_keepalive,
- rxrpc_tx_point_version_reply,
-};
-
-#endif /* end __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY */
-
-/*
* Declare tracing information enums and their string mappings for display.
*/
#define rxrpc_skb_traces \
@@ -452,6 +243,36 @@ enum rxrpc_tx_point {
E_(rxrpc_tx_point_version_reply, "VerReply")
/*
+ * Generate enums for tracing information.
+ */
+#ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
+#define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
+
+#undef EM
+#undef E_
+#define EM(a, b) a,
+#define E_(a, b) a
+
+enum rxrpc_call_trace { rxrpc_call_traces } __mode(byte);
+enum rxrpc_client_trace { rxrpc_client_traces } __mode(byte);
+enum rxrpc_congest_change { rxrpc_congest_changes } __mode(byte);
+enum rxrpc_conn_trace { rxrpc_conn_traces } __mode(byte);
+enum rxrpc_local_trace { rxrpc_local_traces } __mode(byte);
+enum rxrpc_peer_trace { rxrpc_peer_traces } __mode(byte);
+enum rxrpc_propose_ack_outcome { rxrpc_propose_ack_outcomes } __mode(byte);
+enum rxrpc_propose_ack_trace { rxrpc_propose_ack_traces } __mode(byte);
+enum rxrpc_receive_trace { rxrpc_receive_traces } __mode(byte);
+enum rxrpc_recvmsg_trace { rxrpc_recvmsg_traces } __mode(byte);
+enum rxrpc_rtt_rx_trace { rxrpc_rtt_rx_traces } __mode(byte);
+enum rxrpc_rtt_tx_trace { rxrpc_rtt_tx_traces } __mode(byte);
+enum rxrpc_skb_trace { rxrpc_skb_traces } __mode(byte);
+enum rxrpc_timer_trace { rxrpc_timer_traces } __mode(byte);
+enum rxrpc_transmit_trace { rxrpc_transmit_traces } __mode(byte);
+enum rxrpc_tx_point { rxrpc_tx_points } __mode(byte);
+
+#endif /* end __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY */
+
+/*
* Export enum symbols via userspace.
*/
#undef EM
@@ -459,21 +280,21 @@ enum rxrpc_tx_point {
#define EM(a, b) TRACE_DEFINE_ENUM(a);
#define E_(a, b) TRACE_DEFINE_ENUM(a);
-rxrpc_skb_traces;
-rxrpc_local_traces;
-rxrpc_conn_traces;
-rxrpc_client_traces;
rxrpc_call_traces;
-rxrpc_transmit_traces;
+rxrpc_client_traces;
+rxrpc_congest_changes;
+rxrpc_congest_modes;
+rxrpc_conn_traces;
+rxrpc_local_traces;
+rxrpc_propose_ack_outcomes;
+rxrpc_propose_ack_traces;
rxrpc_receive_traces;
rxrpc_recvmsg_traces;
-rxrpc_rtt_tx_traces;
rxrpc_rtt_rx_traces;
+rxrpc_rtt_tx_traces;
+rxrpc_skb_traces;
rxrpc_timer_traces;
-rxrpc_propose_ack_traces;
-rxrpc_propose_ack_outcomes;
-rxrpc_congest_modes;
-rxrpc_congest_changes;
+rxrpc_transmit_traces;
rxrpc_tx_points;
/*
@@ -583,7 +404,7 @@ TRACE_EVENT(rxrpc_client,
TP_fast_assign(
__entry->conn = conn ? conn->debug_id : 0;
__entry->channel = channel;
- __entry->usage = conn ? atomic_read(&conn->usage) : -2;
+ __entry->usage = conn ? refcount_read(&conn->ref) : -2;
__entry->op = op;
__entry->cid = conn ? conn->proto.cid : 0;
),
@@ -1574,6 +1395,8 @@ TRACE_EVENT(rxrpc_rx_discard_ack,
__entry->call_ackr_prev)
);
+#undef EM
+#undef E_
#endif /* _TRACE_RXRPC_H */
/* This part must be outside protection */
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 65e786756321..fbb99a61f714 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -222,11 +222,11 @@ static inline long __trace_sched_switch_state(bool preempt,
TRACE_EVENT(sched_switch,
TP_PROTO(bool preempt,
- unsigned int prev_state,
struct task_struct *prev,
- struct task_struct *next),
+ struct task_struct *next,
+ unsigned int prev_state),
- TP_ARGS(preempt, prev_state, prev, next),
+ TP_ARGS(preempt, prev, next, prev_state),
TP_STRUCT__entry(
__array( char, prev_comm, TASK_COMM_LEN )
diff --git a/include/uapi/linux/can/isotp.h b/include/uapi/linux/can/isotp.h
index 590f8aea2b6d..439c982f7e81 100644
--- a/include/uapi/linux/can/isotp.h
+++ b/include/uapi/linux/can/isotp.h
@@ -124,18 +124,19 @@ struct can_isotp_ll_options {
/* flags for isotp behaviour */
-#define CAN_ISOTP_LISTEN_MODE 0x001 /* listen only (do not send FC) */
-#define CAN_ISOTP_EXTEND_ADDR 0x002 /* enable extended addressing */
-#define CAN_ISOTP_TX_PADDING 0x004 /* enable CAN frame padding tx path */
-#define CAN_ISOTP_RX_PADDING 0x008 /* enable CAN frame padding rx path */
-#define CAN_ISOTP_CHK_PAD_LEN 0x010 /* check received CAN frame padding */
-#define CAN_ISOTP_CHK_PAD_DATA 0x020 /* check received CAN frame padding */
-#define CAN_ISOTP_HALF_DUPLEX 0x040 /* half duplex error state handling */
-#define CAN_ISOTP_FORCE_TXSTMIN 0x080 /* ignore stmin from received FC */
-#define CAN_ISOTP_FORCE_RXSTMIN 0x100 /* ignore CFs depending on rx stmin */
-#define CAN_ISOTP_RX_EXT_ADDR 0x200 /* different rx extended addressing */
-#define CAN_ISOTP_WAIT_TX_DONE 0x400 /* wait for tx completion */
-#define CAN_ISOTP_SF_BROADCAST 0x800 /* 1-to-N functional addressing */
+#define CAN_ISOTP_LISTEN_MODE 0x0001 /* listen only (do not send FC) */
+#define CAN_ISOTP_EXTEND_ADDR 0x0002 /* enable extended addressing */
+#define CAN_ISOTP_TX_PADDING 0x0004 /* enable CAN frame padding tx path */
+#define CAN_ISOTP_RX_PADDING 0x0008 /* enable CAN frame padding rx path */
+#define CAN_ISOTP_CHK_PAD_LEN 0x0010 /* check received CAN frame padding */
+#define CAN_ISOTP_CHK_PAD_DATA 0x0020 /* check received CAN frame padding */
+#define CAN_ISOTP_HALF_DUPLEX 0x0040 /* half duplex error state handling */
+#define CAN_ISOTP_FORCE_TXSTMIN 0x0080 /* ignore stmin from received FC */
+#define CAN_ISOTP_FORCE_RXSTMIN 0x0100 /* ignore CFs depending on rx stmin */
+#define CAN_ISOTP_RX_EXT_ADDR 0x0200 /* different rx extended addressing */
+#define CAN_ISOTP_WAIT_TX_DONE 0x0400 /* wait for tx completion */
+#define CAN_ISOTP_SF_BROADCAST 0x0800 /* 1-to-N functional addressing */
+#define CAN_ISOTP_CF_BROADCAST 0x1000 /* 1-to-N transmission w/o FC */
/* protocol machine default values */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index d1e600816b82..5f58dcfe2787 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -368,6 +368,8 @@ enum {
IFLA_PARENT_DEV_NAME,
IFLA_PARENT_DEV_BUS_NAME,
IFLA_GRO_MAX_SIZE,
+ IFLA_TSO_MAX_SIZE,
+ IFLA_TSO_MAX_SEGS,
__IFLA_MAX
};
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 0568a79097b8..d9490e3062a7 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -3175,6 +3175,8 @@ enum nl80211_attrs {
NL80211_ATTR_EHT_CAPABILITY,
+ NL80211_ATTR_DISABLE_EHT,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
diff --git a/include/uapi/linux/tls.h b/include/uapi/linux/tls.h
index 5f38be0ec0f3..ac39328eabe7 100644
--- a/include/uapi/linux/tls.h
+++ b/include/uapi/linux/tls.h
@@ -39,6 +39,7 @@
/* TLS socket options */
#define TLS_TX 1 /* Set transmit parameters */
#define TLS_RX 2 /* Set receive parameters */
+#define TLS_TX_ZEROCOPY_SENDFILE 3 /* transmit zerocopy sendfile */
/* Supported versions */
#define TLS_VERSION_MINOR(ver) ((ver) & 0xFF)
@@ -160,6 +161,7 @@ enum {
TLS_INFO_CIPHER,
TLS_INFO_TXCONF,
TLS_INFO_RXCONF,
+ TLS_INFO_ZC_SENDFILE,
__TLS_INFO_MAX,
};
#define TLS_INFO_MAX (__TLS_INFO_MAX - 1)
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index ea2ee1181921..f3a2abd6d1a1 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1959,6 +1959,12 @@ void __audit_uring_exit(int success, long code)
{
struct audit_context *ctx = audit_context();
+ if (ctx->dummy) {
+ if (ctx->context != AUDIT_CTX_URING)
+ return;
+ goto out;
+ }
+
if (ctx->context == AUDIT_CTX_SYSCALL) {
/*
* NOTE: See the note in __audit_uring_entry() about the case
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 0099b87dd853..d323b180b0f3 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -701,7 +701,6 @@ EXPORT_SYMBOL_GPL(generic_handle_irq_safe);
*/
int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq)
{
- WARN_ON_ONCE(!in_hardirq());
return handle_irq_desc(irq_resolve_mapping(domain, hwirq));
}
EXPORT_SYMBOL_GPL(generic_handle_domain_irq);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 51efaabac3e4..d58c0389eb23 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6382,7 +6382,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)
migrate_disable_switch(rq, prev);
psi_sched_switch(prev, next, !task_on_rq_queued(prev));
- trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev_state, prev, next);
+ trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
/* Also unlocks the rq: */
rq = context_switch(rq, prev, next, &rf);
diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
index 8f4fb328133a..a7e84c8543cb 100644
--- a/kernel/trace/fgraph.c
+++ b/kernel/trace/fgraph.c
@@ -404,9 +404,9 @@ free:
static void
ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
- unsigned int prev_state,
struct task_struct *prev,
- struct task_struct *next)
+ struct task_struct *next,
+ unsigned int prev_state)
{
unsigned long long timestamp;
int index;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 4f1d2f5e7263..af899b058c8d 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -7420,9 +7420,9 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
static void
ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
- unsigned int prev_state,
struct task_struct *prev,
- struct task_struct *next)
+ struct task_struct *next,
+ unsigned int prev_state)
{
struct trace_array *tr = data;
struct trace_pid_list *pid_list;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index e11e167b7809..f97de82d1342 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -773,9 +773,9 @@ void trace_event_follow_fork(struct trace_array *tr, bool enable)
static void
event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
- unsigned int prev_state,
struct task_struct *prev,
- struct task_struct *next)
+ struct task_struct *next,
+ unsigned int prev_state)
{
struct trace_array *tr = data;
struct trace_pid_list *no_pid_list;
@@ -799,9 +799,9 @@ event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
static void
event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
- unsigned int prev_state,
struct task_struct *prev,
- struct task_struct *next)
+ struct task_struct *next,
+ unsigned int prev_state)
{
struct trace_array *tr = data;
struct trace_pid_list *no_pid_list;
diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
index e9ae1f33a7f0..afb92e2f0aea 100644
--- a/kernel/trace/trace_osnoise.c
+++ b/kernel/trace/trace_osnoise.c
@@ -1168,9 +1168,9 @@ thread_exit(struct osnoise_variables *osn_var, struct task_struct *t)
*/
static void
trace_sched_switch_callback(void *data, bool preempt,
- unsigned int prev_state,
struct task_struct *p,
- struct task_struct *n)
+ struct task_struct *n,
+ unsigned int prev_state)
{
struct osnoise_variables *osn_var = this_cpu_osn_var();
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 45796d8bd4b2..c9ffdcfe622e 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -22,8 +22,8 @@ static DEFINE_MUTEX(sched_register_mutex);
static void
probe_sched_switch(void *ignore, bool preempt,
- unsigned int prev_state,
- struct task_struct *prev, struct task_struct *next)
+ struct task_struct *prev, struct task_struct *next,
+ unsigned int prev_state)
{
int flags;
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 46429f9a96fa..330aee1c1a49 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -426,8 +426,8 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
static void notrace
probe_wakeup_sched_switch(void *ignore, bool preempt,
- unsigned int prev_state,
- struct task_struct *prev, struct task_struct *next)
+ struct task_struct *prev, struct task_struct *next,
+ unsigned int prev_state)
{
struct trace_array_cpu *data;
u64 T0, T1, delta;
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index af9302141bcf..e5c5315da274 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -76,6 +76,7 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
data = kzalloc(sizeof(*ref->data), gfp);
if (!data) {
free_percpu((void __percpu *)ref->percpu_count_ptr);
+ ref->percpu_count_ptr = 0;
return -ENOMEM;
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c468fee595ff..910a138e9859 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2495,11 +2495,16 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
struct address_space *mapping = NULL;
int extra_pins, ret;
pgoff_t end;
+ bool is_hzp;
- VM_BUG_ON_PAGE(is_huge_zero_page(head), head);
VM_BUG_ON_PAGE(!PageLocked(head), head);
VM_BUG_ON_PAGE(!PageCompound(head), head);
+ is_hzp = is_huge_zero_page(head);
+ VM_WARN_ON_ONCE_PAGE(is_hzp, head);
+ if (is_hzp)
+ return -EBUSY;
+
if (PageWriteback(head))
return -EBUSY;
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 9b2b5f56f4ae..11a954763be9 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -621,6 +621,16 @@ static bool __init kfence_init_pool_early(void)
* fails for the first page, and therefore expect addr==__kfence_pool in
* most failure cases.
*/
+ for (char *p = (char *)addr; p < __kfence_pool + KFENCE_POOL_SIZE; p += PAGE_SIZE) {
+ struct slab *slab = virt_to_slab(p);
+
+ if (!slab)
+ continue;
+#ifdef CONFIG_MEMCG
+ slab->memcg_data = 0;
+#endif
+ __folio_clear_slab(slab_folio(slab));
+ }
memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
__kfence_pool = NULL;
return false;
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 27760c19bad7..d4a4adcca01f 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1274,7 +1274,7 @@ try_again:
}
out:
if (ret == -EIO)
- dump_page(p, "hwpoison: unhandlable page");
+ pr_err("Memory failure: %#lx: unhandlable page.\n", page_to_pfn(p));
return ret;
}
@@ -1861,19 +1861,6 @@ try_again:
if (PageTransHuge(hpage)) {
/*
- * Bail out before SetPageHasHWPoisoned() if hpage is
- * huge_zero_page, although PG_has_hwpoisoned is not
- * checked in set_huge_zero_page().
- *
- * TODO: Handle memory failure of huge_zero_page thoroughly.
- */
- if (is_huge_zero_page(hpage)) {
- action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED);
- res = -EBUSY;
- goto unlock_mutex;
- }
-
- /*
* The flag must be set after the refcount is bumped
* otherwise it may race with THP split.
* And the flag can't be set in get_hwpoison_page() since
diff --git a/mm/mremap.c b/mm/mremap.c
index 303d3290b938..0b93fac76851 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -947,7 +947,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
return -EINTR;
vma = vma_lookup(mm, addr);
if (!vma) {
- ret = EFAULT;
+ ret = -EFAULT;
goto out;
}
diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
index d2a244e1c260..b80fccbac62a 100644
--- a/net/ax25/ax25_dev.c
+++ b/net/ax25/ax25_dev.c
@@ -115,23 +115,13 @@ void ax25_dev_device_down(struct net_device *dev)
if ((s = ax25_dev_list) == ax25_dev) {
ax25_dev_list = s->next;
- spin_unlock_bh(&ax25_dev_lock);
- ax25_dev_put(ax25_dev);
- dev->ax25_ptr = NULL;
- dev_put_track(dev, &ax25_dev->dev_tracker);
- ax25_dev_put(ax25_dev);
- return;
+ goto unlock_put;
}
while (s != NULL && s->next != NULL) {
if (s->next == ax25_dev) {
s->next = ax25_dev->next;
- spin_unlock_bh(&ax25_dev_lock);
- ax25_dev_put(ax25_dev);
- dev->ax25_ptr = NULL;
- dev_put_track(dev, &ax25_dev->dev_tracker);
- ax25_dev_put(ax25_dev);
- return;
+ goto unlock_put;
}
s = s->next;
@@ -139,6 +129,14 @@ void ax25_dev_device_down(struct net_device *dev)
spin_unlock_bh(&ax25_dev_lock);
dev->ax25_ptr = NULL;
ax25_dev_put(ax25_dev);
+ return;
+
+unlock_put:
+ spin_unlock_bh(&ax25_dev_lock);
+ ax25_dev_put(ax25_dev);
+ dev->ax25_ptr = NULL;
+ dev_put_track(dev, &ax25_dev->dev_tracker);
+ ax25_dev_put(ax25_dev);
}
int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd)
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 83fb51b6e299..b8f8da7ee3de 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -307,9 +307,11 @@ static bool batadv_is_cfg80211_netdev(struct net_device *net_device)
if (!net_device)
return false;
+#if IS_ENABLED(CONFIG_CFG80211)
/* cfg80211 drivers have to set ieee80211_ptr */
if (net_device->ieee80211_ptr)
return true;
+#endif
return false;
}
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index 8d54fef9a568..9b5a1f630bb0 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -1001,7 +1001,7 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
cb->pkt_len = skb->len;
} else {
if (__skb->wire_len < skb->len ||
- __skb->wire_len > GSO_MAX_SIZE)
+ __skb->wire_len > GSO_LEGACY_MAX_SIZE)
return -EINVAL;
cb->pkt_len = __skb->wire_len;
}
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 196417859c4a..68b3e850bcb9 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -39,6 +39,13 @@ static int br_pass_frame_up(struct sk_buff *skb)
dev_sw_netstats_rx_add(brdev, skb->len);
vg = br_vlan_group_rcu(br);
+
+ /* Reset the offload_fwd_mark because there could be a stacked
+ * bridge above, and it should not think this bridge it doing
+ * that bridge's work forwarding out its ports.
+ */
+ br_switchdev_frame_unmark(skb);
+
/* Bridge is just like any other port. Make sure the
* packet is allowed except in promisc mode when someone
* may be running packet capture.
diff --git a/net/can/isotp.c b/net/can/isotp.c
index 35a1ae61744c..43a27d19cdac 100644
--- a/net/can/isotp.c
+++ b/net/can/isotp.c
@@ -104,6 +104,7 @@ MODULE_ALIAS("can-proto-6");
#define FC_CONTENT_SZ 3 /* flow control content size in byte (FS/BS/STmin) */
#define ISOTP_CHECK_PADDING (CAN_ISOTP_CHK_PAD_LEN | CAN_ISOTP_CHK_PAD_DATA)
+#define ISOTP_ALL_BC_FLAGS (CAN_ISOTP_SF_BROADCAST | CAN_ISOTP_CF_BROADCAST)
/* Flow Status given in FC frame */
#define ISOTP_FC_CTS 0 /* clear to send */
@@ -159,6 +160,23 @@ static inline struct isotp_sock *isotp_sk(const struct sock *sk)
return (struct isotp_sock *)sk;
}
+static u32 isotp_bc_flags(struct isotp_sock *so)
+{
+ return so->opt.flags & ISOTP_ALL_BC_FLAGS;
+}
+
+static bool isotp_register_rxid(struct isotp_sock *so)
+{
+ /* no broadcast modes => register rx_id for FC frame reception */
+ return (isotp_bc_flags(so) == 0);
+}
+
+static bool isotp_register_txecho(struct isotp_sock *so)
+{
+ /* all modes but SF_BROADCAST register for tx echo skbs */
+ return (isotp_bc_flags(so) != CAN_ISOTP_SF_BROADCAST);
+}
+
static enum hrtimer_restart isotp_rx_timer_handler(struct hrtimer *hrtimer)
{
struct isotp_sock *so = container_of(hrtimer, struct isotp_sock,
@@ -803,7 +821,6 @@ static void isotp_create_fframe(struct canfd_frame *cf, struct isotp_sock *so,
cf->data[i] = so->tx.buf[so->tx.idx++];
so->tx.sn = 1;
- so->tx.state = ISOTP_WAIT_FIRST_FC;
}
static void isotp_rcv_echo(struct sk_buff *skb, void *data)
@@ -936,7 +953,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
off = (so->tx.ll_dl > CAN_MAX_DLEN) ? 1 : 0;
/* does the given data fit into a single frame for SF_BROADCAST? */
- if ((so->opt.flags & CAN_ISOTP_SF_BROADCAST) &&
+ if ((isotp_bc_flags(so) == CAN_ISOTP_SF_BROADCAST) &&
(size > so->tx.ll_dl - SF_PCI_SZ4 - ae - off)) {
err = -EINVAL;
goto err_out_drop;
@@ -1000,12 +1017,41 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
/* don't enable wait queue for a single frame transmission */
wait_tx_done = 0;
} else {
- /* send first frame and wait for FC */
+ /* send first frame */
isotp_create_fframe(cf, so, ae);
- /* start timeout for FC */
- hrtimer_sec = 1;
+ if (isotp_bc_flags(so) == CAN_ISOTP_CF_BROADCAST) {
+ /* set timer for FC-less operation (STmin = 0) */
+ if (so->opt.flags & CAN_ISOTP_FORCE_TXSTMIN)
+ so->tx_gap = ktime_set(0, so->force_tx_stmin);
+ else
+ so->tx_gap = ktime_set(0, so->frame_txtime);
+
+ /* disable wait for FCs due to activated block size */
+ so->txfc.bs = 0;
+
+ /* cfecho should have been zero'ed by init */
+ if (so->cfecho)
+ pr_notice_once("can-isotp: no fc cfecho %08X\n",
+ so->cfecho);
+
+ /* set consecutive frame echo tag */
+ so->cfecho = *(u32 *)cf->data;
+
+ /* switch directly to ISOTP_SENDING state */
+ so->tx.state = ISOTP_SENDING;
+
+ /* start timeout for unlikely lost echo skb */
+ hrtimer_sec = 2;
+ } else {
+ /* standard flow control check */
+ so->tx.state = ISOTP_WAIT_FIRST_FC;
+
+ /* start timeout for FC */
+ hrtimer_sec = 1;
+ }
+
hrtimer_start(&so->txtimer, ktime_set(hrtimer_sec, 0),
HRTIMER_MODE_REL_SOFT);
}
@@ -1025,6 +1071,9 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
if (hrtimer_sec)
hrtimer_cancel(&so->txtimer);
+ /* reset consecutive frame echo tag */
+ so->cfecho = 0;
+
goto err_out_drop;
}
@@ -1120,15 +1169,17 @@ static int isotp_release(struct socket *sock)
lock_sock(sk);
/* remove current filters & unregister */
- if (so->bound && (!(so->opt.flags & CAN_ISOTP_SF_BROADCAST))) {
+ if (so->bound && isotp_register_txecho(so)) {
if (so->ifindex) {
struct net_device *dev;
dev = dev_get_by_index(net, so->ifindex);
if (dev) {
- can_rx_unregister(net, dev, so->rxid,
- SINGLE_MASK(so->rxid),
- isotp_rcv, sk);
+ if (isotp_register_rxid(so))
+ can_rx_unregister(net, dev, so->rxid,
+ SINGLE_MASK(so->rxid),
+ isotp_rcv, sk);
+
can_rx_unregister(net, dev, so->txid,
SINGLE_MASK(so->txid),
isotp_rcv_echo, sk);
@@ -1161,26 +1212,35 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
struct net *net = sock_net(sk);
int ifindex;
struct net_device *dev;
- canid_t tx_id, rx_id;
+ canid_t tx_id = addr->can_addr.tp.tx_id;
+ canid_t rx_id = addr->can_addr.tp.rx_id;
int err = 0;
int notify_enetdown = 0;
- int do_rx_reg = 1;
if (len < ISOTP_MIN_NAMELEN)
return -EINVAL;
- /* sanitize tx/rx CAN identifiers */
- tx_id = addr->can_addr.tp.tx_id;
+ /* sanitize tx CAN identifier */
if (tx_id & CAN_EFF_FLAG)
tx_id &= (CAN_EFF_FLAG | CAN_EFF_MASK);
else
tx_id &= CAN_SFF_MASK;
- rx_id = addr->can_addr.tp.rx_id;
- if (rx_id & CAN_EFF_FLAG)
- rx_id &= (CAN_EFF_FLAG | CAN_EFF_MASK);
- else
- rx_id &= CAN_SFF_MASK;
+ /* give feedback on wrong CAN-ID value */
+ if (tx_id != addr->can_addr.tp.tx_id)
+ return -EINVAL;
+
+ /* sanitize rx CAN identifier (if needed) */
+ if (isotp_register_rxid(so)) {
+ if (rx_id & CAN_EFF_FLAG)
+ rx_id &= (CAN_EFF_FLAG | CAN_EFF_MASK);
+ else
+ rx_id &= CAN_SFF_MASK;
+
+ /* give feedback on wrong CAN-ID value */
+ if (rx_id != addr->can_addr.tp.rx_id)
+ return -EINVAL;
+ }
if (!addr->can_ifindex)
return -ENODEV;
@@ -1192,12 +1252,8 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
goto out;
}
- /* do not register frame reception for functional addressing */
- if (so->opt.flags & CAN_ISOTP_SF_BROADCAST)
- do_rx_reg = 0;
-
- /* do not validate rx address for functional addressing */
- if (do_rx_reg && rx_id == tx_id) {
+ /* ensure different CAN IDs when the rx_id is to be registered */
+ if (isotp_register_rxid(so) && rx_id == tx_id) {
err = -EADDRNOTAVAIL;
goto out;
}
@@ -1222,10 +1278,11 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
ifindex = dev->ifindex;
- if (do_rx_reg) {
+ if (isotp_register_rxid(so))
can_rx_register(net, dev, rx_id, SINGLE_MASK(rx_id),
isotp_rcv, sk, "isotp", sk);
+ if (isotp_register_txecho(so)) {
/* no consecutive frame echo skb in flight */
so->cfecho = 0;
@@ -1294,6 +1351,15 @@ static int isotp_setsockopt_locked(struct socket *sock, int level, int optname,
if (!(so->opt.flags & CAN_ISOTP_RX_EXT_ADDR))
so->opt.rx_ext_address = so->opt.ext_address;
+ /* these broadcast flags are not allowed together */
+ if (isotp_bc_flags(so) == ISOTP_ALL_BC_FLAGS) {
+ /* CAN_ISOTP_SF_BROADCAST is prioritized */
+ so->opt.flags &= ~CAN_ISOTP_CF_BROADCAST;
+
+ /* give user feedback on wrong config attempt */
+ ret = -EINVAL;
+ }
+
/* check for frame_txtime changes (0 => no changes) */
if (so->opt.frame_txtime) {
if (so->opt.frame_txtime == CAN_ISOTP_FRAME_TXTIME_ZERO)
@@ -1444,10 +1510,12 @@ static void isotp_notify(struct isotp_sock *so, unsigned long msg,
case NETDEV_UNREGISTER:
lock_sock(sk);
/* remove current filters & unregister */
- if (so->bound && (!(so->opt.flags & CAN_ISOTP_SF_BROADCAST))) {
- can_rx_unregister(dev_net(dev), dev, so->rxid,
- SINGLE_MASK(so->rxid),
- isotp_rcv, sk);
+ if (so->bound && isotp_register_txecho(so)) {
+ if (isotp_register_rxid(so))
+ can_rx_unregister(dev_net(dev), dev, so->rxid,
+ SINGLE_MASK(so->rxid),
+ isotp_rcv, sk);
+
can_rx_unregister(dev_net(dev), dev, so->txid,
SINGLE_MASK(so->txid),
isotp_rcv_echo, sk);
diff --git a/net/can/raw.c b/net/can/raw.c
index b7dbb57557f3..d1bd9cc51ebe 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -772,6 +772,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
{
struct sock *sk = sock->sk;
struct raw_sock *ro = raw_sk(sk);
+ struct sockcm_cookie sockc;
struct sk_buff *skb;
struct net_device *dev;
int ifindex;
@@ -817,11 +818,18 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
if (err < 0)
goto free_skb;
- skb_setup_tx_timestamp(skb, sk->sk_tsflags);
+ sockcm_init(&sockc, sk);
+ if (msg->msg_controllen) {
+ err = sock_cmsg_send(sk, msg, &sockc);
+ if (unlikely(err))
+ goto free_skb;
+ }
skb->dev = dev;
- skb->sk = sk;
skb->priority = sk->sk_priority;
+ skb->tstamp = sockc.transmit_time;
+
+ skb_setup_tx_timestamp(skb, sockc.tsflags);
err = can_send(skb, ro->loopback);
diff --git a/net/core/dev.c b/net/core/dev.c
index a601da3b4a7c..721ba9c26554 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -682,11 +682,11 @@ int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
const struct net_device *last_dev;
struct net_device_path_ctx ctx = {
.dev = dev,
- .daddr = daddr,
};
struct net_device_path *path;
int ret = 0;
+ memcpy(ctx.daddr, daddr, sizeof(ctx.daddr));
stack->num_paths = 0;
while (ctx.dev && ctx.dev->netdev_ops->ndo_fill_forward_path) {
last_dev = ctx.dev;
@@ -2998,11 +2998,12 @@ EXPORT_SYMBOL(netif_set_real_num_queues);
* @size: max skb->len of a TSO frame
*
* Set the limit on the size of TSO super-frames the device can handle.
- * Unless explicitly set the stack will assume the value of %GSO_MAX_SIZE.
+ * Unless explicitly set the stack will assume the value of
+ * %GSO_LEGACY_MAX_SIZE.
*/
void netif_set_tso_max_size(struct net_device *dev, unsigned int size)
{
- dev->tso_max_size = size;
+ dev->tso_max_size = min(GSO_MAX_SIZE, size);
if (size < READ_ONCE(dev->gso_max_size))
netif_set_gso_max_size(dev, size);
}
@@ -4329,6 +4330,7 @@ int netdev_max_backlog __read_mostly = 1000;
EXPORT_SYMBOL(netdev_max_backlog);
int netdev_tstamp_prequeue __read_mostly = 1;
+unsigned int sysctl_skb_defer_max __read_mostly = 64;
int netdev_budget __read_mostly = 300;
/* Must be at least 2 jiffes to guarantee 1 jiffy timeout */
unsigned int __read_mostly netdev_budget_usecs = 2 * USEC_PER_SEC / HZ;
@@ -4581,9 +4583,12 @@ static void rps_trigger_softirq(void *data)
#endif /* CONFIG_RPS */
/* Called from hardirq (IPI) context */
-static void trigger_rx_softirq(void *data __always_unused)
+static void trigger_rx_softirq(void *data)
{
+ struct softnet_data *sd = data;
+
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
+ smp_store_release(&sd->defer_ipi_scheduled, 0);
}
/*
@@ -6629,7 +6634,7 @@ static void skb_defer_free_flush(struct softnet_data *sd)
while (skb != NULL) {
next = skb->next;
- __kfree_skb(skb);
+ napi_consume_skb(skb, 1);
skb = next;
}
}
@@ -6650,6 +6655,8 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
for (;;) {
struct napi_struct *n;
+ skb_defer_free_flush(sd);
+
if (list_empty(&list)) {
if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
goto end;
@@ -6679,8 +6686,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
net_rps_action_and_irq_enable(sd);
-end:
- skb_defer_free_flush(sd);
+end:;
}
struct netdev_adjacent {
@@ -10595,9 +10601,9 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
dev_net_set(dev, &init_net);
- dev->gso_max_size = GSO_MAX_SIZE;
+ dev->gso_max_size = GSO_LEGACY_MAX_SIZE;
dev->gso_max_segs = GSO_MAX_SEGS;
- dev->gro_max_size = GRO_MAX_SIZE;
+ dev->gro_max_size = GRO_LEGACY_MAX_SIZE;
dev->tso_max_size = TSO_LEGACY_MAX_SIZE;
dev->tso_max_segs = TSO_MAX_SEGS;
dev->upper_level = 1;
@@ -11381,7 +11387,7 @@ static int __init net_dev_init(void)
INIT_CSD(&sd->csd, rps_trigger_softirq, sd);
sd->cpu = i;
#endif
- INIT_CSD(&sd->defer_csd, trigger_rx_softirq, NULL);
+ INIT_CSD(&sd->defer_csd, trigger_rx_softirq, sd);
spin_lock_init(&sd->defer_lock);
init_gro_hash(&sd->backlog);
diff --git a/net/core/dev.h b/net/core/dev.h
index 328b37af90ba..cbb8a925175a 100644
--- a/net/core/dev.h
+++ b/net/core/dev.h
@@ -39,7 +39,7 @@ void dev_addr_check(struct net_device *dev);
/* sysctls not referred to from outside net/core/ */
extern int netdev_budget;
extern unsigned int netdev_budget_usecs;
-
+extern unsigned int sysctl_skb_defer_max;
extern int netdev_tstamp_prequeue;
extern int netdev_unregister_timeout_secs;
extern int weight_p;
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index b89e3e95bffc..41cac0e4834e 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -517,7 +517,7 @@ static void net_dm_packet_trace_kfree_skb_hit(void *ignore,
if (!nskb)
return;
- if ((unsigned int)reason >= SKB_DROP_REASON_MAX)
+ if (unlikely(reason >= SKB_DROP_REASON_MAX || reason <= 0))
reason = SKB_DROP_REASON_NOT_SPECIFIED;
cb = NET_DM_SKB_CB(nskb);
cb->reason = reason;
diff --git a/net/core/gro.c b/net/core/gro.c
index 78110edf5d4b..b4190eb08467 100644
--- a/net/core/gro.c
+++ b/net/core/gro.c
@@ -167,6 +167,14 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
if (unlikely(p->len + len >= gro_max_size || NAPI_GRO_CB(skb)->flush))
return -E2BIG;
+ if (unlikely(p->len + len >= GRO_LEGACY_MAX_SIZE)) {
+ if (p->protocol != htons(ETH_P_IPV6) ||
+ skb_headroom(p) < sizeof(struct hop_jumbo_hdr) ||
+ ipv6_hdr(p)->nexthdr != IPPROTO_TCP ||
+ p->encapsulation)
+ return -E2BIG;
+ }
+
lp = NAPI_GRO_CB(p)->last;
pinfo = skb_shinfo(lp);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 4980c3a50475..e319e242dddf 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -746,7 +746,6 @@ static const struct attribute_group netstat_group = {
.attrs = netstat_attrs,
};
-#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
static struct attribute *wireless_attrs[] = {
NULL
};
@@ -755,7 +754,19 @@ static const struct attribute_group wireless_group = {
.name = "wireless",
.attrs = wireless_attrs,
};
+
+static bool wireless_group_needed(struct net_device *ndev)
+{
+#if IS_ENABLED(CONFIG_CFG80211)
+ if (ndev->ieee80211_ptr)
+ return true;
#endif
+#if IS_ENABLED(CONFIG_WIRELESS_EXT)
+ if (ndev->wireless_handlers)
+ return true;
+#endif
+ return false;
+}
#else /* CONFIG_SYSFS */
#define net_class_groups NULL
@@ -1996,14 +2007,8 @@ int netdev_register_kobject(struct net_device *ndev)
*groups++ = &netstat_group;
-#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
- if (ndev->ieee80211_ptr)
- *groups++ = &wireless_group;
-#if IS_ENABLED(CONFIG_WIRELESS_EXT)
- else if (ndev->wireless_handlers)
+ if (wireless_group_needed(ndev))
*groups++ = &wireless_group;
-#endif
-#endif
#endif /* CONFIG_SYSFS */
error = device_add(dev);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index bdc891326102..ac45328607f7 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1064,6 +1064,8 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ nla_total_size(4) /* IFLA_GSO_MAX_SEGS */
+ nla_total_size(4) /* IFLA_GSO_MAX_SIZE */
+ nla_total_size(4) /* IFLA_GRO_MAX_SIZE */
+ + nla_total_size(4) /* IFLA_TSO_MAX_SIZE */
+ + nla_total_size(4) /* IFLA_TSO_MAX_SEGS */
+ nla_total_size(1) /* IFLA_OPERSTATE */
+ nla_total_size(1) /* IFLA_LINKMODE */
+ nla_total_size(4) /* IFLA_CARRIER_CHANGES */
@@ -1769,6 +1771,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) ||
nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) ||
nla_put_u32(skb, IFLA_GRO_MAX_SIZE, dev->gro_max_size) ||
+ nla_put_u32(skb, IFLA_TSO_MAX_SIZE, dev->tso_max_size) ||
+ nla_put_u32(skb, IFLA_TSO_MAX_SEGS, dev->tso_max_segs) ||
#ifdef CONFIG_RPS
nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
#endif
@@ -1922,6 +1926,8 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_NEW_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1),
[IFLA_PARENT_DEV_NAME] = { .type = NLA_NUL_STRING },
[IFLA_GRO_MAX_SIZE] = { .type = NLA_U32 },
+ [IFLA_TSO_MAX_SIZE] = { .type = NLA_REJECT },
+ [IFLA_TSO_MAX_SEGS] = { .type = NLA_REJECT },
};
static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
@@ -2354,14 +2360,6 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[],
}
}
- if (tb[IFLA_GRO_MAX_SIZE]) {
- u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_MAX_SIZE]);
-
- if (gro_max_size > GRO_MAX_SIZE) {
- NL_SET_ERR_MSG(extack, "too big gro_max_size");
- return -EINVAL;
- }
- }
return 0;
}
@@ -2811,7 +2809,7 @@ static int do_setlink(const struct sk_buff *skb,
if (tb[IFLA_GSO_MAX_SIZE]) {
u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]);
- if (max_size > GSO_MAX_SIZE || max_size > dev->tso_max_size) {
+ if (max_size > dev->tso_max_size) {
err = -EINVAL;
goto errout;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index bd16e158b366..5b3559cb1d82 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -80,6 +80,7 @@
#include <linux/user_namespace.h>
#include <linux/indirect_call_wrapper.h>
+#include "dev.h"
#include "sock_destructor.h"
struct kmem_cache *skbuff_head_cache __ro_after_init;
@@ -771,6 +772,8 @@ void kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason)
if (!skb_unref(skb))
return;
+ DEBUG_NET_WARN_ON_ONCE(reason <= 0 || reason >= SKB_DROP_REASON_MAX);
+
trace_kfree_skb(skb, __builtin_return_address(0), reason);
__kfree_skb(skb);
}
@@ -6494,37 +6497,35 @@ void skb_attempt_defer_free(struct sk_buff *skb)
int cpu = skb->alloc_cpu;
struct softnet_data *sd;
unsigned long flags;
+ unsigned int defer_max;
bool kick;
if (WARN_ON_ONCE(cpu >= nr_cpu_ids) ||
!cpu_online(cpu) ||
cpu == raw_smp_processor_id()) {
- __kfree_skb(skb);
+nodefer: __kfree_skb(skb);
return;
}
sd = &per_cpu(softnet_data, cpu);
- /* We do not send an IPI or any signal.
- * Remote cpu will eventually call skb_defer_free_flush()
- */
+ defer_max = READ_ONCE(sysctl_skb_defer_max);
+ if (READ_ONCE(sd->defer_count) >= defer_max)
+ goto nodefer;
+
spin_lock_irqsave(&sd->defer_lock, flags);
+ /* Send an IPI every time queue reaches half capacity. */
+ kick = sd->defer_count == (defer_max >> 1);
+ /* Paired with the READ_ONCE() few lines above */
+ WRITE_ONCE(sd->defer_count, sd->defer_count + 1);
+
skb->next = sd->defer_list;
/* Paired with READ_ONCE() in skb_defer_free_flush() */
WRITE_ONCE(sd->defer_list, skb);
- sd->defer_count++;
-
- /* kick every time queue length reaches 128.
- * This should avoid blocking in smp_call_function_single_async().
- * This condition should hardly be bit under normal conditions,
- * unless cpu suddenly stopped to receive NIC interrupts.
- */
- kick = sd->defer_count == 128;
-
spin_unlock_irqrestore(&sd->defer_lock, flags);
/* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
* if we are unlucky enough (this seems very unlikely).
*/
- if (unlikely(kick))
+ if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1))
smp_call_function_single_async(cpu, &sd->defer_csd);
}
diff --git a/net/core/sock.c b/net/core/sock.c
index 6b287eb5427b..2ff40dd0a7a6 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -635,7 +635,9 @@ static int sock_bindtoindex_locked(struct sock *sk, int ifindex)
if (ifindex < 0)
goto out;
- sk->sk_bound_dev_if = ifindex;
+ /* Paired with all READ_ONCE() done locklessly. */
+ WRITE_ONCE(sk->sk_bound_dev_if, ifindex);
+
if (sk->sk_prot->rehash)
sk->sk_prot->rehash(sk);
sk_dst_reset(sk);
@@ -713,10 +715,11 @@ static int sock_getbindtodevice(struct sock *sk, char __user *optval,
{
int ret = -ENOPROTOOPT;
#ifdef CONFIG_NETDEVICES
+ int bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
struct net *net = sock_net(sk);
char devname[IFNAMSIZ];
- if (sk->sk_bound_dev_if == 0) {
+ if (bound_dev_if == 0) {
len = 0;
goto zero;
}
@@ -725,7 +728,7 @@ static int sock_getbindtodevice(struct sock *sk, char __user *optval,
if (len < IFNAMSIZ)
goto out;
- ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
+ ret = netdev_get_name(net, devname, bound_dev_if);
if (ret)
goto out;
@@ -1861,7 +1864,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
break;
case SO_BINDTOIFINDEX:
- v.val = sk->sk_bound_dev_if;
+ v.val = READ_ONCE(sk->sk_bound_dev_if);
break;
case SO_NETNS_COOKIE:
@@ -2293,6 +2296,19 @@ void sk_free_unlock_clone(struct sock *sk)
}
EXPORT_SYMBOL_GPL(sk_free_unlock_clone);
+static void sk_trim_gso_size(struct sock *sk)
+{
+ if (sk->sk_gso_max_size <= GSO_LEGACY_MAX_SIZE)
+ return;
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6 &&
+ sk_is_tcp(sk) &&
+ !ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
+ return;
+#endif
+ sk->sk_gso_max_size = GSO_LEGACY_MAX_SIZE;
+}
+
void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
{
u32 max_segs = 1;
@@ -2312,6 +2328,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
/* pairs with the WRITE_ONCE() in netif_set_gso_max_size() */
sk->sk_gso_max_size = READ_ONCE(dst->dev->gso_max_size);
+ sk_trim_gso_size(sk);
sk->sk_gso_max_size -= (MAX_TCP_HEADER + 1);
/* pairs with the WRITE_ONCE() in netif_set_gso_max_segs() */
max_segs = max_t(u32, READ_ONCE(dst->dev->gso_max_segs), 1);
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 195ca5c28771..ca8d38325e1e 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -578,6 +578,14 @@ static struct ctl_table net_core_table[] = {
.extra1 = SYSCTL_ONE,
.extra2 = &int_3600,
},
+ {
+ .procname = "skb_defer_max",
+ .data = &sysctl_skb_defer_max,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ },
{ }
};
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 82696ab86f74..da6e3b20cd75 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -628,7 +628,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
ireq->ir_mark = inet_request_mark(sk, skb);
ireq->ireq_family = AF_INET;
- ireq->ir_iif = sk->sk_bound_dev_if;
+ ireq->ir_iif = READ_ONCE(sk->sk_bound_dev_if);
/*
* Step 3: Process LISTEN state
@@ -1029,9 +1029,15 @@ static void __net_exit dccp_v4_exit_net(struct net *net)
inet_ctl_sock_destroy(pn->v4_ctl_sk);
}
+static void __net_exit dccp_v4_exit_batch(struct list_head *net_exit_list)
+{
+ inet_twsk_purge(&dccp_hashinfo, AF_INET);
+}
+
static struct pernet_operations dccp_v4_ops = {
.init = dccp_v4_init_net,
.exit = dccp_v4_exit_net,
+ .exit_batch = dccp_v4_exit_batch,
.id = &dccp_v4_pernet_id,
.size = sizeof(struct dccp_v4_pernet),
};
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 4d95b6400915..fd44638ec16b 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -374,10 +374,10 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
refcount_inc(&skb->users);
ireq->pktopts = skb;
}
- ireq->ir_iif = sk->sk_bound_dev_if;
+ ireq->ir_iif = READ_ONCE(sk->sk_bound_dev_if);
/* So that link locals have meaning */
- if (!sk->sk_bound_dev_if &&
+ if (!ireq->ir_iif &&
ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
ireq->ir_iif = inet6_iif(skb);
@@ -1115,9 +1115,15 @@ static void __net_exit dccp_v6_exit_net(struct net *net)
inet_ctl_sock_destroy(pn->v6_ctl_sk);
}
+static void __net_exit dccp_v6_exit_batch(struct list_head *net_exit_list)
+{
+ inet_twsk_purge(&dccp_hashinfo, AF_INET6);
+}
+
static struct pernet_operations dccp_v6_ops = {
.init = dccp_v6_init_net,
.exit = dccp_v6_exit_net,
+ .exit_batch = dccp_v6_exit_batch,
.id = &dccp_v6_pernet_id,
.size = sizeof(struct dccp_v6_pernet),
};
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index eb8e128e43e8..2e78458900f2 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -1120,6 +1120,12 @@ static int __init dccp_init(void)
SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, NULL);
if (!dccp_hashinfo.bind_bucket_cachep)
goto out_free_hashinfo2;
+ dccp_hashinfo.bind2_bucket_cachep =
+ kmem_cache_create("dccp_bind2_bucket",
+ sizeof(struct inet_bind2_bucket), 0,
+ SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, NULL);
+ if (!dccp_hashinfo.bind2_bucket_cachep)
+ goto out_free_bind_bucket_cachep;
/*
* Size and allocate the main established and bind bucket
@@ -1150,7 +1156,7 @@ static int __init dccp_init(void)
if (!dccp_hashinfo.ehash) {
DCCP_CRIT("Failed to allocate DCCP established hash table");
- goto out_free_bind_bucket_cachep;
+ goto out_free_bind2_bucket_cachep;
}
for (i = 0; i <= dccp_hashinfo.ehash_mask; i++)
@@ -1176,14 +1182,23 @@ static int __init dccp_init(void)
goto out_free_dccp_locks;
}
+ dccp_hashinfo.bhash2 = (struct inet_bind2_hashbucket *)
+ __get_free_pages(GFP_ATOMIC | __GFP_NOWARN, bhash_order);
+
+ if (!dccp_hashinfo.bhash2) {
+ DCCP_CRIT("Failed to allocate DCCP bind2 hash table");
+ goto out_free_dccp_bhash;
+ }
+
for (i = 0; i < dccp_hashinfo.bhash_size; i++) {
spin_lock_init(&dccp_hashinfo.bhash[i].lock);
INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain);
+ INIT_HLIST_HEAD(&dccp_hashinfo.bhash2[i].chain);
}
rc = dccp_mib_init();
if (rc)
- goto out_free_dccp_bhash;
+ goto out_free_dccp_bhash2;
rc = dccp_ackvec_init();
if (rc)
@@ -1207,30 +1222,38 @@ out_ackvec_exit:
dccp_ackvec_exit();
out_free_dccp_mib:
dccp_mib_exit();
+out_free_dccp_bhash2:
+ free_pages((unsigned long)dccp_hashinfo.bhash2, bhash_order);
out_free_dccp_bhash:
free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
out_free_dccp_locks:
inet_ehash_locks_free(&dccp_hashinfo);
out_free_dccp_ehash:
free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
+out_free_bind2_bucket_cachep:
+ kmem_cache_destroy(dccp_hashinfo.bind2_bucket_cachep);
out_free_bind_bucket_cachep:
kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
out_free_hashinfo2:
inet_hashinfo2_free_mod(&dccp_hashinfo);
out_fail:
dccp_hashinfo.bhash = NULL;
+ dccp_hashinfo.bhash2 = NULL;
dccp_hashinfo.ehash = NULL;
dccp_hashinfo.bind_bucket_cachep = NULL;
+ dccp_hashinfo.bind2_bucket_cachep = NULL;
return rc;
}
static void __exit dccp_fini(void)
{
+ int bhash_order = get_order(dccp_hashinfo.bhash_size *
+ sizeof(struct inet_bind_hashbucket));
+
ccid_cleanup_builtins();
dccp_mib_exit();
- free_pages((unsigned long)dccp_hashinfo.bhash,
- get_order(dccp_hashinfo.bhash_size *
- sizeof(struct inet_bind_hashbucket)));
+ free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
+ free_pages((unsigned long)dccp_hashinfo.bhash2, bhash_order);
free_pages((unsigned long)dccp_hashinfo.ehash,
get_order((dccp_hashinfo.ehash_mask + 1) *
sizeof(struct inet_ehash_bucket)));
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index d1d78a463a06..552a53f1d5d0 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -159,7 +159,7 @@ static void dn_dst_ifdown(struct dst_entry *dst, struct net_device *dev, int how
struct neighbour *n = rt->n;
if (n && n->dev == dev) {
- n->dev = dev_net(dev)->loopback_dev;
+ n->dev = blackhole_netdev;
dev_hold(n->dev);
dev_put(dev);
}
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index d0a2452a1e24..cac48a741f27 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include <linux/of.h>
+#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <net/devlink.h>
#include <net/sch_generic.h>
@@ -852,6 +853,7 @@ disconnect:
static int dsa_switch_setup(struct dsa_switch *ds)
{
struct dsa_devlink_priv *dl_priv;
+ struct device_node *dn;
struct dsa_port *dp;
int err;
@@ -907,7 +909,10 @@ static int dsa_switch_setup(struct dsa_switch *ds)
dsa_slave_mii_bus_init(ds);
- err = mdiobus_register(ds->slave_mii_bus);
+ dn = of_get_child_by_name(ds->dev->of_node, "mdio");
+
+ err = of_mdiobus_register(ds->slave_mii_bus, dn);
+ of_node_put(dn);
if (err < 0)
goto free_slave_mii_bus;
}
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index d747166bb291..b21238df3301 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -705,7 +705,6 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
static inline int esp_remove_trailer(struct sk_buff *skb)
{
struct xfrm_state *x = xfrm_input_state(skb);
- struct xfrm_offload *xo = xfrm_offload(skb);
struct crypto_aead *aead = x->data;
int alen, hlen, elen;
int padlen, trimlen;
@@ -717,11 +716,6 @@ static inline int esp_remove_trailer(struct sk_buff *skb)
hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
elen = skb->len - hlen;
- if (xo && (xo->flags & XFRM_ESP_NO_TRAILER)) {
- ret = xo->proto;
- goto out;
- }
-
if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2))
BUG();
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 1e5b53c2bb26..c0b7e6c21360 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -117,6 +117,32 @@ bool inet_rcv_saddr_any(const struct sock *sk)
return !sk->sk_rcv_saddr;
}
+static bool use_bhash2_on_bind(const struct sock *sk)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ int addr_type;
+
+ if (sk->sk_family == AF_INET6) {
+ addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
+ return addr_type != IPV6_ADDR_ANY &&
+ addr_type != IPV6_ADDR_MAPPED;
+ }
+#endif
+ return sk->sk_rcv_saddr != htonl(INADDR_ANY);
+}
+
+static u32 get_bhash2_nulladdr_hash(const struct sock *sk, struct net *net,
+ int port)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr nulladdr = {};
+
+ if (sk->sk_family == AF_INET6)
+ return ipv6_portaddr_hash(net, &nulladdr, port);
+#endif
+ return ipv4_portaddr_hash(net, 0, port);
+}
+
void inet_get_local_port_range(struct net *net, int *low, int *high)
{
unsigned int seq;
@@ -130,16 +156,71 @@ void inet_get_local_port_range(struct net *net, int *low, int *high)
}
EXPORT_SYMBOL(inet_get_local_port_range);
-static int inet_csk_bind_conflict(const struct sock *sk,
- const struct inet_bind_bucket *tb,
- bool relax, bool reuseport_ok)
+static bool bind_conflict_exist(const struct sock *sk, struct sock *sk2,
+ kuid_t sk_uid, bool relax,
+ bool reuseport_cb_ok, bool reuseport_ok)
+{
+ int bound_dev_if2;
+
+ if (sk == sk2)
+ return false;
+
+ bound_dev_if2 = READ_ONCE(sk2->sk_bound_dev_if);
+
+ if (!sk->sk_bound_dev_if || !bound_dev_if2 ||
+ sk->sk_bound_dev_if == bound_dev_if2) {
+ if (sk->sk_reuse && sk2->sk_reuse &&
+ sk2->sk_state != TCP_LISTEN) {
+ if (!relax || (!reuseport_ok && sk->sk_reuseport &&
+ sk2->sk_reuseport && reuseport_cb_ok &&
+ (sk2->sk_state == TCP_TIME_WAIT ||
+ uid_eq(sk_uid, sock_i_uid(sk2)))))
+ return true;
+ } else if (!reuseport_ok || !sk->sk_reuseport ||
+ !sk2->sk_reuseport || !reuseport_cb_ok ||
+ (sk2->sk_state != TCP_TIME_WAIT &&
+ !uid_eq(sk_uid, sock_i_uid(sk2)))) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool check_bhash2_conflict(const struct sock *sk,
+ struct inet_bind2_bucket *tb2, kuid_t sk_uid,
+ bool relax, bool reuseport_cb_ok,
+ bool reuseport_ok)
{
struct sock *sk2;
- bool reuseport_cb_ok;
- bool reuse = sk->sk_reuse;
- bool reuseport = !!sk->sk_reuseport;
- struct sock_reuseport *reuseport_cb;
+
+ sk_for_each_bound_bhash2(sk2, &tb2->owners) {
+ if (sk->sk_family == AF_INET && ipv6_only_sock(sk2))
+ continue;
+
+ if (bind_conflict_exist(sk, sk2, sk_uid, relax,
+ reuseport_cb_ok, reuseport_ok))
+ return true;
+ }
+ return false;
+}
+
+/* This should be called only when the corresponding inet_bind_bucket spinlock
+ * is held
+ */
+static int inet_csk_bind_conflict(const struct sock *sk, int port,
+ struct inet_bind_bucket *tb,
+ struct inet_bind2_bucket *tb2, /* may be null */
+ bool relax, bool reuseport_ok)
+{
+ struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
kuid_t uid = sock_i_uid((struct sock *)sk);
+ struct sock_reuseport *reuseport_cb;
+ struct inet_bind2_hashbucket *head2;
+ bool reuseport_cb_ok;
+ struct sock *sk2;
+ struct net *net;
+ int l3mdev;
+ u32 hash;
rcu_read_lock();
reuseport_cb = rcu_dereference(sk->sk_reuseport_cb);
@@ -150,36 +231,42 @@ static int inet_csk_bind_conflict(const struct sock *sk,
/*
* Unlike other sk lookup places we do not check
* for sk_net here, since _all_ the socks listed
- * in tb->owners list belong to the same net - the
- * one this bucket belongs to.
+ * in tb->owners and tb2->owners list belong
+ * to the same net
*/
- sk_for_each_bound(sk2, &tb->owners) {
- if (sk != sk2 &&
- (!sk->sk_bound_dev_if ||
- !sk2->sk_bound_dev_if ||
- sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
- if (reuse && sk2->sk_reuse &&
- sk2->sk_state != TCP_LISTEN) {
- if ((!relax ||
- (!reuseport_ok &&
- reuseport && sk2->sk_reuseport &&
- reuseport_cb_ok &&
- (sk2->sk_state == TCP_TIME_WAIT ||
- uid_eq(uid, sock_i_uid(sk2))))) &&
- inet_rcv_saddr_equal(sk, sk2, true))
- break;
- } else if (!reuseport_ok ||
- !reuseport || !sk2->sk_reuseport ||
- !reuseport_cb_ok ||
- (sk2->sk_state != TCP_TIME_WAIT &&
- !uid_eq(uid, sock_i_uid(sk2)))) {
- if (inet_rcv_saddr_equal(sk, sk2, true))
- break;
- }
- }
+ if (!use_bhash2_on_bind(sk)) {
+ sk_for_each_bound(sk2, &tb->owners)
+ if (bind_conflict_exist(sk, sk2, uid, relax,
+ reuseport_cb_ok, reuseport_ok) &&
+ inet_rcv_saddr_equal(sk, sk2, true))
+ return true;
+
+ return false;
}
- return sk2 != NULL;
+
+ if (tb2 && check_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok,
+ reuseport_ok))
+ return true;
+
+ net = sock_net(sk);
+
+ /* check there's no conflict with an existing IPV6_ADDR_ANY (if ipv6) or
+ * INADDR_ANY (if ipv4) socket.
+ */
+ hash = get_bhash2_nulladdr_hash(sk, net, port);
+ head2 = &hinfo->bhash2[hash & (hinfo->bhash_size - 1)];
+
+ l3mdev = inet_sk_bound_l3mdev(sk);
+ inet_bind_bucket_for_each(tb2, &head2->chain)
+ if (check_bind2_bucket_match_nulladdr(tb2, net, port, l3mdev, sk))
+ break;
+
+ if (tb2 && check_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok,
+ reuseport_ok))
+ return true;
+
+ return false;
}
/*
@@ -187,16 +274,20 @@ static int inet_csk_bind_conflict(const struct sock *sk,
* inet_bind_hashbucket lock held.
*/
static struct inet_bind_hashbucket *
-inet_csk_find_open_port(struct sock *sk, struct inet_bind_bucket **tb_ret, int *port_ret)
+inet_csk_find_open_port(struct sock *sk, struct inet_bind_bucket **tb_ret,
+ struct inet_bind2_bucket **tb2_ret,
+ struct inet_bind2_hashbucket **head2_ret, int *port_ret)
{
struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
- int port = 0;
+ struct inet_bind2_hashbucket *head2;
struct inet_bind_hashbucket *head;
struct net *net = sock_net(sk);
- bool relax = false;
int i, low, high, attempt_half;
+ struct inet_bind2_bucket *tb2;
struct inet_bind_bucket *tb;
u32 remaining, offset;
+ bool relax = false;
+ int port = 0;
int l3mdev;
l3mdev = inet_sk_bound_l3mdev(sk);
@@ -235,10 +326,12 @@ other_parity_scan:
head = &hinfo->bhash[inet_bhashfn(net, port,
hinfo->bhash_size)];
spin_lock_bh(&head->lock);
+ tb2 = inet_bind2_bucket_find(hinfo, net, port, l3mdev, sk,
+ &head2);
inet_bind_bucket_for_each(tb, &head->chain)
- if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev &&
- tb->port == port) {
- if (!inet_csk_bind_conflict(sk, tb, relax, false))
+ if (check_bind_bucket_match(tb, net, port, l3mdev)) {
+ if (!inet_csk_bind_conflict(sk, port, tb, tb2,
+ relax, false))
goto success;
goto next_port;
}
@@ -268,6 +361,8 @@ next_port:
success:
*port_ret = port;
*tb_ret = tb;
+ *tb2_ret = tb2;
+ *head2_ret = head2;
return head;
}
@@ -363,54 +458,81 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
{
bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
- int ret = 1, port = snum;
+ bool bhash_created = false, bhash2_created = false;
+ struct inet_bind2_bucket *tb2 = NULL;
+ struct inet_bind2_hashbucket *head2;
+ struct inet_bind_bucket *tb = NULL;
struct inet_bind_hashbucket *head;
struct net *net = sock_net(sk);
- struct inet_bind_bucket *tb = NULL;
+ int ret = 1, port = snum;
+ bool found_port = false;
int l3mdev;
l3mdev = inet_sk_bound_l3mdev(sk);
if (!port) {
- head = inet_csk_find_open_port(sk, &tb, &port);
+ head = inet_csk_find_open_port(sk, &tb, &tb2, &head2, &port);
if (!head)
return ret;
+ if (tb && tb2)
+ goto success;
+ found_port = true;
+ } else {
+ head = &hinfo->bhash[inet_bhashfn(net, port,
+ hinfo->bhash_size)];
+ spin_lock_bh(&head->lock);
+ inet_bind_bucket_for_each(tb, &head->chain)
+ if (check_bind_bucket_match(tb, net, port, l3mdev))
+ break;
+
+ tb2 = inet_bind2_bucket_find(hinfo, net, port, l3mdev, sk,
+ &head2);
+ }
+
+ if (!tb) {
+ tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, net,
+ head, port, l3mdev);
if (!tb)
- goto tb_not_found;
- goto success;
+ goto fail_unlock;
+ bhash_created = true;
}
- head = &hinfo->bhash[inet_bhashfn(net, port,
- hinfo->bhash_size)];
- spin_lock_bh(&head->lock);
- inet_bind_bucket_for_each(tb, &head->chain)
- if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev &&
- tb->port == port)
- goto tb_found;
-tb_not_found:
- tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
- net, head, port, l3mdev);
- if (!tb)
- goto fail_unlock;
-tb_found:
- if (!hlist_empty(&tb->owners)) {
+
+ if (!tb2) {
+ tb2 = inet_bind2_bucket_create(hinfo->bind2_bucket_cachep,
+ net, head2, port, l3mdev, sk);
+ if (!tb2)
+ goto fail_unlock;
+ bhash2_created = true;
+ }
+
+ /* If we had to find an open port, we already checked for conflicts */
+ if (!found_port && !hlist_empty(&tb->owners)) {
if (sk->sk_reuse == SK_FORCE_REUSE)
goto success;
if ((tb->fastreuse > 0 && reuse) ||
sk_reuseport_match(tb, sk))
goto success;
- if (inet_csk_bind_conflict(sk, tb, true, true))
+ if (inet_csk_bind_conflict(sk, port, tb, tb2, true, true))
goto fail_unlock;
}
success:
inet_csk_update_fastreuse(tb, sk);
if (!inet_csk(sk)->icsk_bind_hash)
- inet_bind_hash(sk, tb, port);
+ inet_bind_hash(sk, tb, tb2, port);
WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
+ WARN_ON(inet_csk(sk)->icsk_bind2_hash != tb2);
ret = 0;
fail_unlock:
+ if (ret) {
+ if (bhash_created)
+ inet_bind_bucket_destroy(hinfo->bind_bucket_cachep, tb);
+ if (bhash2_created)
+ inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep,
+ tb2);
+ }
spin_unlock_bh(&head->lock);
return ret;
}
@@ -957,6 +1079,7 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
inet_sk_set_state(newsk, TCP_SYN_RECV);
newicsk->icsk_bind_hash = NULL;
+ newicsk->icsk_bind2_hash = NULL;
inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num;
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 0b8235fbd440..e8de5e699b3f 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -81,6 +81,41 @@ struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
return tb;
}
+struct inet_bind2_bucket *inet_bind2_bucket_create(struct kmem_cache *cachep,
+ struct net *net,
+ struct inet_bind2_hashbucket *head,
+ const unsigned short port,
+ int l3mdev,
+ const struct sock *sk)
+{
+ struct inet_bind2_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
+
+ if (tb) {
+ write_pnet(&tb->ib_net, net);
+ tb->l3mdev = l3mdev;
+ tb->port = port;
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6)
+ tb->v6_rcv_saddr = sk->sk_v6_rcv_saddr;
+ else
+#endif
+ tb->rcv_saddr = sk->sk_rcv_saddr;
+ INIT_HLIST_HEAD(&tb->owners);
+ hlist_add_head(&tb->node, &head->chain);
+ }
+ return tb;
+}
+
+static bool bind2_bucket_addr_match(struct inet_bind2_bucket *tb2, struct sock *sk)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6)
+ return ipv6_addr_equal(&tb2->v6_rcv_saddr,
+ &sk->sk_v6_rcv_saddr);
+#endif
+ return tb2->rcv_saddr == sk->sk_rcv_saddr;
+}
+
/*
* Caller must hold hashbucket lock for this tb with local BH disabled
*/
@@ -92,12 +127,25 @@ void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket
}
}
+/* Caller must hold the lock for the corresponding hashbucket in the bhash table
+ * with local BH disabled
+ */
+void inet_bind2_bucket_destroy(struct kmem_cache *cachep, struct inet_bind2_bucket *tb)
+{
+ if (hlist_empty(&tb->owners)) {
+ __hlist_del(&tb->node);
+ kmem_cache_free(cachep, tb);
+ }
+}
+
void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
- const unsigned short snum)
+ struct inet_bind2_bucket *tb2, const unsigned short snum)
{
inet_sk(sk)->inet_num = snum;
sk_add_bind_node(sk, &tb->owners);
inet_csk(sk)->icsk_bind_hash = tb;
+ sk_add_bind2_node(sk, &tb2->owners);
+ inet_csk(sk)->icsk_bind2_hash = tb2;
}
/*
@@ -109,6 +157,7 @@ static void __inet_put_port(struct sock *sk)
const int bhash = inet_bhashfn(sock_net(sk), inet_sk(sk)->inet_num,
hashinfo->bhash_size);
struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
+ struct inet_bind2_bucket *tb2;
struct inet_bind_bucket *tb;
spin_lock(&head->lock);
@@ -117,6 +166,13 @@ static void __inet_put_port(struct sock *sk)
inet_csk(sk)->icsk_bind_hash = NULL;
inet_sk(sk)->inet_num = 0;
inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
+
+ if (inet_csk(sk)->icsk_bind2_hash) {
+ tb2 = inet_csk(sk)->icsk_bind2_hash;
+ __sk_del_bind2_node(sk);
+ inet_csk(sk)->icsk_bind2_hash = NULL;
+ inet_bind2_bucket_destroy(hashinfo->bind2_bucket_cachep, tb2);
+ }
spin_unlock(&head->lock);
}
@@ -133,14 +189,19 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child)
struct inet_hashinfo *table = sk->sk_prot->h.hashinfo;
unsigned short port = inet_sk(child)->inet_num;
const int bhash = inet_bhashfn(sock_net(sk), port,
- table->bhash_size);
+ table->bhash_size);
struct inet_bind_hashbucket *head = &table->bhash[bhash];
+ struct inet_bind2_hashbucket *head_bhash2;
+ bool created_inet_bind_bucket = false;
+ struct net *net = sock_net(sk);
+ struct inet_bind2_bucket *tb2;
struct inet_bind_bucket *tb;
int l3mdev;
spin_lock(&head->lock);
tb = inet_csk(sk)->icsk_bind_hash;
- if (unlikely(!tb)) {
+ tb2 = inet_csk(sk)->icsk_bind2_hash;
+ if (unlikely(!tb || !tb2)) {
spin_unlock(&head->lock);
return -ENOENT;
}
@@ -153,25 +214,45 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child)
* as that of the child socket. We have to look up or
* create a new bind bucket for the child here. */
inet_bind_bucket_for_each(tb, &head->chain) {
- if (net_eq(ib_net(tb), sock_net(sk)) &&
- tb->l3mdev == l3mdev && tb->port == port)
+ if (check_bind_bucket_match(tb, net, port, l3mdev))
break;
}
if (!tb) {
tb = inet_bind_bucket_create(table->bind_bucket_cachep,
- sock_net(sk), head, port,
- l3mdev);
+ net, head, port, l3mdev);
if (!tb) {
spin_unlock(&head->lock);
return -ENOMEM;
}
+ created_inet_bind_bucket = true;
}
inet_csk_update_fastreuse(tb, child);
+
+ goto bhash2_find;
+ } else if (!bind2_bucket_addr_match(tb2, child)) {
+ l3mdev = inet_sk_bound_l3mdev(sk);
+
+bhash2_find:
+ tb2 = inet_bind2_bucket_find(table, net, port, l3mdev, child,
+ &head_bhash2);
+ if (!tb2) {
+ tb2 = inet_bind2_bucket_create(table->bind2_bucket_cachep,
+ net, head_bhash2, port,
+ l3mdev, child);
+ if (!tb2)
+ goto error;
+ }
}
- inet_bind_hash(child, tb, port);
+ inet_bind_hash(child, tb, tb2, port);
spin_unlock(&head->lock);
return 0;
+
+error:
+ if (created_inet_bind_bucket)
+ inet_bind_bucket_destroy(table->bind_bucket_cachep, tb);
+ spin_unlock(&head->lock);
+ return -ENOMEM;
}
EXPORT_SYMBOL_GPL(__inet_inherit_port);
@@ -373,13 +454,11 @@ begin:
sk_nulls_for_each_rcu(sk, node, &head->chain) {
if (sk->sk_hash != hash)
continue;
- if (likely(INET_MATCH(sk, net, acookie,
- saddr, daddr, ports, dif, sdif))) {
+ if (likely(inet_match(net, sk, acookie, ports, dif, sdif))) {
if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
goto out;
- if (unlikely(!INET_MATCH(sk, net, acookie,
- saddr, daddr, ports,
- dif, sdif))) {
+ if (unlikely(!inet_match(net, sk, acookie,
+ ports, dif, sdif))) {
sock_gen_put(sk);
goto begin;
}
@@ -428,8 +507,7 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
if (sk2->sk_hash != hash)
continue;
- if (likely(INET_MATCH(sk2, net, acookie,
- saddr, daddr, ports, dif, sdif))) {
+ if (likely(inet_match(net, sk2, acookie, ports, dif, sdif))) {
if (sk2->sk_state == TCP_TIME_WAIT) {
tw = inet_twsk(sk2);
if (twsk_unique(sk, sk2, twp))
@@ -495,16 +573,14 @@ static bool inet_ehash_lookup_by_sk(struct sock *sk,
if (esk->sk_hash != sk->sk_hash)
continue;
if (sk->sk_family == AF_INET) {
- if (unlikely(INET_MATCH(esk, net, acookie,
- sk->sk_daddr,
- sk->sk_rcv_saddr,
+ if (unlikely(inet_match(net, esk, acookie,
ports, dif, sdif))) {
return true;
}
}
#if IS_ENABLED(CONFIG_IPV6)
else if (sk->sk_family == AF_INET6) {
- if (unlikely(INET6_MATCH(esk, net,
+ if (unlikely(inet6_match(net, esk,
&sk->sk_v6_daddr,
&sk->sk_v6_rcv_saddr,
ports, dif, sdif))) {
@@ -680,6 +756,76 @@ void inet_unhash(struct sock *sk)
}
EXPORT_SYMBOL_GPL(inet_unhash);
+static bool check_bind2_bucket_match(struct inet_bind2_bucket *tb,
+ struct net *net, unsigned short port,
+ int l3mdev, struct sock *sk)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6)
+ return net_eq(ib2_net(tb), net) && tb->port == port &&
+ tb->l3mdev == l3mdev &&
+ ipv6_addr_equal(&tb->v6_rcv_saddr, &sk->sk_v6_rcv_saddr);
+ else
+#endif
+ return net_eq(ib2_net(tb), net) && tb->port == port &&
+ tb->l3mdev == l3mdev && tb->rcv_saddr == sk->sk_rcv_saddr;
+}
+
+bool check_bind2_bucket_match_nulladdr(struct inet_bind2_bucket *tb,
+ struct net *net, const unsigned short port,
+ int l3mdev, const struct sock *sk)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr nulladdr = {};
+
+ if (sk->sk_family == AF_INET6)
+ return net_eq(ib2_net(tb), net) && tb->port == port &&
+ tb->l3mdev == l3mdev &&
+ ipv6_addr_equal(&tb->v6_rcv_saddr, &nulladdr);
+ else
+#endif
+ return net_eq(ib2_net(tb), net) && tb->port == port &&
+ tb->l3mdev == l3mdev && tb->rcv_saddr == 0;
+}
+
+static struct inet_bind2_hashbucket *
+inet_bhashfn_portaddr(struct inet_hashinfo *hinfo, const struct sock *sk,
+ const struct net *net, unsigned short port)
+{
+ u32 hash;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6)
+ hash = ipv6_portaddr_hash(net, &sk->sk_v6_rcv_saddr, port);
+ else
+#endif
+ hash = ipv4_portaddr_hash(net, sk->sk_rcv_saddr, port);
+ return &hinfo->bhash2[hash & (hinfo->bhash_size - 1)];
+}
+
+/* This should only be called when the spinlock for the socket's corresponding
+ * bind_hashbucket is held
+ */
+struct inet_bind2_bucket *
+inet_bind2_bucket_find(struct inet_hashinfo *hinfo, struct net *net,
+ const unsigned short port, int l3mdev, struct sock *sk,
+ struct inet_bind2_hashbucket **head)
+{
+ struct inet_bind2_bucket *bhash2 = NULL;
+ struct inet_bind2_hashbucket *h;
+
+ h = inet_bhashfn_portaddr(hinfo, sk, net, port);
+ inet_bind_bucket_for_each(bhash2, &h->chain) {
+ if (check_bind2_bucket_match(bhash2, net, port, l3mdev, sk))
+ break;
+ }
+
+ if (head)
+ *head = h;
+
+ return bhash2;
+}
+
/* RFC 6056 3.3.4. Algorithm 4: Double-Hash Port Selection Algorithm
* Note that we use 32bit integers (vs RFC 'short integers')
* because 2^16 is not a multiple of num_ephemeral and this
@@ -700,10 +846,13 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
{
struct inet_hashinfo *hinfo = death_row->hashinfo;
struct inet_timewait_sock *tw = NULL;
+ struct inet_bind2_hashbucket *head2;
struct inet_bind_hashbucket *head;
int port = inet_sk(sk)->inet_num;
struct net *net = sock_net(sk);
+ struct inet_bind2_bucket *tb2;
struct inet_bind_bucket *tb;
+ bool tb_created = false;
u32 remaining, offset;
int ret, i, low, high;
int l3mdev;
@@ -760,8 +909,7 @@ other_parity_scan:
* the established check is already unique enough.
*/
inet_bind_bucket_for_each(tb, &head->chain) {
- if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev &&
- tb->port == port) {
+ if (check_bind_bucket_match(tb, net, port, l3mdev)) {
if (tb->fastreuse >= 0 ||
tb->fastreuseport >= 0)
goto next_port;
@@ -779,6 +927,7 @@ other_parity_scan:
spin_unlock_bh(&head->lock);
return -ENOMEM;
}
+ tb_created = true;
tb->fastreuse = -1;
tb->fastreuseport = -1;
goto ok;
@@ -794,6 +943,17 @@ next_port:
return -EADDRNOTAVAIL;
ok:
+ /* Find the corresponding tb2 bucket since we need to
+ * add the socket to the bhash2 table as well
+ */
+ tb2 = inet_bind2_bucket_find(hinfo, net, port, l3mdev, sk, &head2);
+ if (!tb2) {
+ tb2 = inet_bind2_bucket_create(hinfo->bind2_bucket_cachep, net,
+ head2, port, l3mdev, sk);
+ if (!tb2)
+ goto error;
+ }
+
/* Here we want to add a little bit of randomness to the next source
* port that will be chosen. We use a max() with a random here so that
* on low contention the randomness is maximal and on high contention
@@ -803,7 +963,7 @@ ok:
WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2);
/* Head lock still held and bh's disabled */
- inet_bind_hash(sk, tb, port);
+ inet_bind_hash(sk, tb, tb2, port);
if (sk_unhashed(sk)) {
inet_sk(sk)->inet_sport = htons(port);
inet_ehash_nolisten(sk, (struct sock *)tw, NULL);
@@ -815,6 +975,12 @@ ok:
inet_twsk_deschedule_put(tw);
local_bh_enable();
return 0;
+
+error:
+ if (tb_created)
+ inet_bind_bucket_destroy(hinfo->bind_bucket_cachep, tb);
+ spin_unlock_bh(&head->lock);
+ return -ENOMEM;
}
/*
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 9e0bbd026560..0ec501845cb3 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -52,7 +52,8 @@ static void inet_twsk_kill(struct inet_timewait_sock *tw)
spin_unlock(lock);
/* Disassociate with bind bucket. */
- bhead = &hashinfo->bhash[tw->tw_bslot];
+ bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num,
+ hashinfo->bhash_size)];
spin_lock(&bhead->lock);
inet_twsk_bind_unhash(tw, hashinfo);
@@ -111,12 +112,8 @@ void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
Note, that any socket with inet->num != 0 MUST be bound in
binding cache, even if it is closed.
*/
- /* Cache inet_bhashfn(), because 'struct net' might be no longer
- * available later in inet_twsk_kill().
- */
- tw->tw_bslot = inet_bhashfn(twsk_net(tw), inet->inet_num,
- hashinfo->bhash_size);
- bhead = &hashinfo->bhash[tw->tw_bslot];
+ bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num,
+ hashinfo->bhash_size)];
spin_lock(&bhead->lock);
tw->tw_tb = icsk->icsk_bind_hash;
WARN_ON(!icsk->icsk_bind_hash);
@@ -257,3 +254,50 @@ void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm)
}
}
EXPORT_SYMBOL_GPL(__inet_twsk_schedule);
+
+void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family)
+{
+ struct inet_timewait_sock *tw;
+ struct sock *sk;
+ struct hlist_nulls_node *node;
+ unsigned int slot;
+
+ for (slot = 0; slot <= hashinfo->ehash_mask; slot++) {
+ struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
+restart_rcu:
+ cond_resched();
+ rcu_read_lock();
+restart:
+ sk_nulls_for_each_rcu(sk, node, &head->chain) {
+ if (sk->sk_state != TCP_TIME_WAIT)
+ continue;
+ tw = inet_twsk(sk);
+ if ((tw->tw_family != family) ||
+ refcount_read(&twsk_net(tw)->ns.count))
+ continue;
+
+ if (unlikely(!refcount_inc_not_zero(&tw->tw_refcnt)))
+ continue;
+
+ if (unlikely((tw->tw_family != family) ||
+ refcount_read(&twsk_net(tw)->ns.count))) {
+ inet_twsk_put(tw);
+ goto restart;
+ }
+
+ rcu_read_unlock();
+ local_bh_disable();
+ inet_twsk_deschedule_put(tw);
+ local_bh_enable();
+ goto restart_rcu;
+ }
+ /* If the nulls value we got at the end of this lookup is
+ * not the expected one, we must restart lookup.
+ * We probably met an item that was moved to another chain.
+ */
+ if (get_nulls_value(node) != slot)
+ goto restart;
+ rcu_read_unlock();
+ }
+}
+EXPORT_SYMBOL_GPL(inet_twsk_purge);
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index aff707988e23..bd135165482a 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -45,8 +45,7 @@ int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, un
fl4.saddr = saddr;
fl4.flowi4_tos = RT_TOS(iph->tos);
fl4.flowi4_oif = sk ? sk->sk_bound_dev_if : 0;
- if (!fl4.flowi4_oif)
- fl4.flowi4_oif = l3mdev_master_ifindex(dev);
+ fl4.flowi4_l3mdev = l3mdev_master_ifindex(dev);
fl4.flowi4_mark = skb->mark;
fl4.flowi4_flags = flags;
fib4_rules_early_flow_dissect(net, skb, &fl4, &flkeys);
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
index 4eed5afca392..918c61fda0f3 100644
--- a/net/ipv4/netfilter/nf_reject_ipv4.c
+++ b/net/ipv4/netfilter/nf_reject_ipv4.c
@@ -80,6 +80,7 @@ struct sk_buff *nf_reject_skb_v4_unreach(struct net *net,
struct iphdr *niph;
struct icmphdr *icmph;
unsigned int len;
+ int dataoff;
__wsum csum;
u8 proto;
@@ -99,10 +100,11 @@ struct sk_buff *nf_reject_skb_v4_unreach(struct net *net,
if (pskb_trim_rcsum(oldskb, ntohs(ip_hdr(oldskb)->tot_len)))
return NULL;
+ dataoff = ip_hdrlen(oldskb);
proto = ip_hdr(oldskb)->protocol;
if (!skb_csum_unnecessary(oldskb) &&
- nf_reject_verify_csum(proto) &&
+ nf_reject_verify_csum(oldskb, dataoff, proto) &&
nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), proto))
return NULL;
@@ -311,6 +313,7 @@ EXPORT_SYMBOL_GPL(nf_send_reset);
void nf_send_unreach(struct sk_buff *skb_in, int code, int hook)
{
struct iphdr *iph = ip_hdr(skb_in);
+ int dataoff = ip_hdrlen(skb_in);
u8 proto = iph->protocol;
if (iph->frag_off & htons(IP_OFFSET))
@@ -320,12 +323,13 @@ void nf_send_unreach(struct sk_buff *skb_in, int code, int hook)
nf_reject_fill_skb_dst(skb_in) < 0)
return;
- if (skb_csum_unnecessary(skb_in) || !nf_reject_verify_csum(proto)) {
+ if (skb_csum_unnecessary(skb_in) ||
+ !nf_reject_verify_csum(skb_in, dataoff, proto)) {
icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
return;
}
- if (nf_ip_checksum(skb_in, hook, ip_hdrlen(skb_in), proto) == 0)
+ if (nf_ip_checksum(skb_in, hook, dataoff, proto) == 0)
icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
}
EXPORT_SYMBOL_GPL(nf_send_unreach);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 444d4a2a422d..356f535f3443 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1727,6 +1727,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
struct in_device *in_dev = __in_dev_get_rcu(dev);
unsigned int flags = RTCF_MULTICAST;
struct rtable *rth;
+ bool no_policy;
u32 itag = 0;
int err;
@@ -1737,8 +1738,12 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
if (our)
flags |= RTCF_LOCAL;
+ no_policy = IN_DEV_ORCONF(in_dev, NOPOLICY);
+ if (no_policy)
+ IPCB(skb)->flags |= IPSKB_NOPOLICY;
+
rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
- IN_DEV_ORCONF(in_dev, NOPOLICY), false);
+ no_policy, false);
if (!rth)
return -ENOBUFS;
@@ -1797,7 +1802,7 @@ static int __mkroute_input(struct sk_buff *skb,
struct rtable *rth;
int err;
struct in_device *out_dev;
- bool do_cache;
+ bool do_cache, no_policy;
u32 itag = 0;
/* get a working reference to the output device */
@@ -1842,6 +1847,10 @@ static int __mkroute_input(struct sk_buff *skb,
}
}
+ no_policy = IN_DEV_ORCONF(in_dev, NOPOLICY);
+ if (no_policy)
+ IPCB(skb)->flags |= IPSKB_NOPOLICY;
+
fnhe = find_exception(nhc, daddr);
if (do_cache) {
if (fnhe)
@@ -1854,8 +1863,7 @@ static int __mkroute_input(struct sk_buff *skb,
}
}
- rth = rt_dst_alloc(out_dev->dev, 0, res->type,
- IN_DEV_ORCONF(in_dev, NOPOLICY),
+ rth = rt_dst_alloc(out_dev->dev, 0, res->type, no_policy,
IN_DEV_ORCONF(out_dev, NOXFRM));
if (!rth) {
err = -ENOBUFS;
@@ -2230,6 +2238,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
struct rtable *rth;
struct flowi4 fl4;
bool do_cache = true;
+ bool no_policy;
/* IP on this device is disabled. */
@@ -2348,6 +2357,10 @@ brd_input:
RT_CACHE_STAT_INC(in_brd);
local_input:
+ no_policy = IN_DEV_ORCONF(in_dev, NOPOLICY);
+ if (no_policy)
+ IPCB(skb)->flags |= IPSKB_NOPOLICY;
+
do_cache &= res->fi && !itag;
if (do_cache) {
struct fib_nh_common *nhc = FIB_RES_NHC(*res);
@@ -2362,7 +2375,7 @@ local_input:
rth = rt_dst_alloc(ip_rt_get_dev(net, res),
flags | RTCF_LOCAL, res->type,
- IN_DEV_ORCONF(in_dev, NOPOLICY), false);
+ no_policy, false);
if (!rth)
goto e_nobufs;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 028513d3e2a2..9984d23a7f3e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -4604,6 +4604,12 @@ void __init tcp_init(void)
SLAB_HWCACHE_ALIGN | SLAB_PANIC |
SLAB_ACCOUNT,
NULL);
+ tcp_hashinfo.bind2_bucket_cachep =
+ kmem_cache_create("tcp_bind2_bucket",
+ sizeof(struct inet_bind2_bucket), 0,
+ SLAB_HWCACHE_ALIGN | SLAB_PANIC |
+ SLAB_ACCOUNT,
+ NULL);
/* Size and allocate the main established and bind bucket
* hash tables.
@@ -4626,8 +4632,9 @@ void __init tcp_init(void)
if (inet_ehash_locks_alloc(&tcp_hashinfo))
panic("TCP: failed to alloc ehash_locks");
tcp_hashinfo.bhash =
- alloc_large_system_hash("TCP bind",
- sizeof(struct inet_bind_hashbucket),
+ alloc_large_system_hash("TCP bind bhash tables",
+ sizeof(struct inet_bind_hashbucket) +
+ sizeof(struct inet_bind2_hashbucket),
tcp_hashinfo.ehash_mask + 1,
17, /* one slot per 128 KB of memory */
0,
@@ -4636,9 +4643,12 @@ void __init tcp_init(void)
0,
64 * 1024);
tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size;
+ tcp_hashinfo.bhash2 =
+ (struct inet_bind2_hashbucket *)(tcp_hashinfo.bhash + tcp_hashinfo.bhash_size);
for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
spin_lock_init(&tcp_hashinfo.bhash[i].lock);
INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
+ INIT_HLIST_HEAD(&tcp_hashinfo.bhash2[i].chain);
}
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index c7d30a3bbd81..075e744bfb48 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -310,7 +310,7 @@ static u32 bbr_tso_segs_goal(struct sock *sk)
*/
bytes = min_t(unsigned long,
sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift),
- GSO_MAX_SIZE - 1 - MAX_TCP_HEADER);
+ GSO_LEGACY_MAX_SIZE - 1 - MAX_TCP_HEADER);
segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk));
return min(segs, 0x7FU);
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index b0918839bee7..68178e7280ce 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -372,7 +372,7 @@ static void cubictcp_state(struct sock *sk, u8 new_state)
* We apply another 100% factor because @rate is doubled at this point.
* We cap the cushion to 1ms.
*/
-static u32 hystart_ack_delay(struct sock *sk)
+static u32 hystart_ack_delay(const struct sock *sk)
{
unsigned long rate;
@@ -380,7 +380,7 @@ static u32 hystart_ack_delay(struct sock *sk)
if (!rate)
return 0;
return min_t(u64, USEC_PER_MSEC,
- div64_ul((u64)GSO_MAX_SIZE * 4 * USEC_PER_SEC, rate));
+ div64_ul((u64)sk->sk_gso_max_size * 4 * USEC_PER_SEC, rate));
}
static void hystart_update(struct sock *sk, u32 delay)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 97cfcd85f84e..3231af73e430 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2620,12 +2620,12 @@ void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost,
u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
tp->prior_cwnd - 1;
sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
- } else if (flag & FLAG_SND_UNA_ADVANCED && !newly_lost) {
- sndcnt = min_t(int, delta,
- max_t(int, tp->prr_delivered - tp->prr_out,
- newly_acked_sacked) + 1);
} else {
- sndcnt = min(delta, newly_acked_sacked);
+ sndcnt = max_t(int, tp->prr_delivered - tp->prr_out,
+ newly_acked_sacked);
+ if (flag & FLAG_SND_UNA_ADVANCED && !newly_lost)
+ sndcnt++;
+ sndcnt = min(delta, sndcnt);
}
/* Force a fast retransmit upon entering fast recovery */
sndcnt = max(sndcnt, (tp->prr_out ? 0 : 1));
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 218ad871c0e4..dac2650f3863 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2101,6 +2101,7 @@ bad_packet:
}
discard_it:
+ SKB_DR_OR(drop_reason, NOT_SPECIFIED);
/* Discard frame. */
kfree_skb_reason(skb, drop_reason);
return 0;
@@ -3168,6 +3169,8 @@ static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
{
struct net *net;
+ inet_twsk_purge(&tcp_hashinfo, AF_INET);
+
list_for_each_entry(net, net_exit_list, exit_list)
tcp_fastopen_ctx_destroy(net);
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index b092228e4342..b4b2284ed4a2 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1553,7 +1553,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
* SO_SNDBUF values.
* Also allow first and last skb in retransmit queue to be split.
*/
- limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE);
+ limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_LEGACY_MAX_SIZE);
if (unlikely((sk->sk_wmem_queued >> 1) > limit &&
tcp_queue != TCP_FRAG_IN_WRITE_QUEUE &&
skb != tcp_rtx_queue_head(sk) &&
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 9d5071c79c95..aa9f2ec3dc46 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2563,8 +2563,7 @@ static struct sock *__udp4_lib_demux_lookup(struct net *net,
struct sock *sk;
udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
- if (INET_MATCH(sk, net, acookie, rmt_addr,
- loc_addr, ports, dif, sdif))
+ if (inet_match(net, sk, acookie, ports, dif, sdif))
return sk;
/* Only check first socket in chain */
break;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 39b2327edc4e..df665d4e8f0f 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -218,11 +218,11 @@ ipv4_connected:
err = -EINVAL;
goto out;
}
- sk->sk_bound_dev_if = usin->sin6_scope_id;
+ WRITE_ONCE(sk->sk_bound_dev_if, usin->sin6_scope_id);
}
if (!sk->sk_bound_dev_if && (addr_type & IPV6_ADDR_MULTICAST))
- sk->sk_bound_dev_if = np->mcast_oif;
+ WRITE_ONCE(sk->sk_bound_dev_if, np->mcast_oif);
/* Connect to link-local address requires an interface */
if (!sk->sk_bound_dev_if) {
@@ -798,7 +798,7 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
if (src_idx) {
if (fl6->flowi6_oif &&
src_idx != fl6->flowi6_oif &&
- (sk->sk_bound_dev_if != fl6->flowi6_oif ||
+ (READ_ONCE(sk->sk_bound_dev_if) != fl6->flowi6_oif ||
!sk_dev_equal_l3scope(sk, src_idx)))
return -EINVAL;
fl6->flowi6_oif = src_idx;
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index f2120e92caf1..36e1d0f8dd06 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -741,7 +741,6 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
static inline int esp_remove_trailer(struct sk_buff *skb)
{
struct xfrm_state *x = xfrm_input_state(skb);
- struct xfrm_offload *xo = xfrm_offload(skb);
struct crypto_aead *aead = x->data;
int alen, hlen, elen;
int padlen, trimlen;
@@ -753,11 +752,6 @@ static inline int esp_remove_trailer(struct sk_buff *skb)
hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
elen = skb->len - hlen;
- if (xo && (xo->flags & XFRM_ESP_NO_TRAILER)) {
- ret = xo->proto;
- goto out;
- }
-
ret = skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2);
BUG_ON(ret);
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index a758f2ab7b51..7d53d62783b1 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -71,12 +71,12 @@ begin:
sk_nulls_for_each_rcu(sk, node, &head->chain) {
if (sk->sk_hash != hash)
continue;
- if (!INET6_MATCH(sk, net, saddr, daddr, ports, dif, sdif))
+ if (!inet6_match(net, sk, saddr, daddr, ports, dif, sdif))
continue;
if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
goto out;
- if (unlikely(!INET6_MATCH(sk, net, saddr, daddr, ports, dif, sdif))) {
+ if (unlikely(!inet6_match(net, sk, saddr, daddr, ports, dif, sdif))) {
sock_gen_put(sk);
goto begin;
}
@@ -268,7 +268,7 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
if (sk2->sk_hash != hash)
continue;
- if (likely(INET6_MATCH(sk2, net, saddr, daddr, ports,
+ if (likely(inet6_match(net, sk2, saddr, daddr, ports,
dif, sdif))) {
if (sk2->sk_state == TCP_TIME_WAIT) {
tw = inet_twsk(sk2);
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index c4fc03c1ac99..d12dba2dd535 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -77,7 +77,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
struct sk_buff *segs = ERR_PTR(-EINVAL);
struct ipv6hdr *ipv6h;
const struct net_offload *ops;
- int proto;
+ int proto, nexthdr;
struct frag_hdr *fptr;
unsigned int payload_len;
u8 *prevhdr;
@@ -87,6 +87,28 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
bool gso_partial;
skb_reset_network_header(skb);
+ nexthdr = ipv6_has_hopopt_jumbo(skb);
+ if (nexthdr) {
+ const int hophdr_len = sizeof(struct hop_jumbo_hdr);
+ int err;
+
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ /* remove the HBH header.
+ * Layout: [Ethernet header][IPv6 header][HBH][TCP header]
+ */
+ memmove(skb_mac_header(skb) + hophdr_len,
+ skb_mac_header(skb),
+ ETH_HLEN + sizeof(struct ipv6hdr));
+ skb->data += hophdr_len;
+ skb->len -= hophdr_len;
+ skb->network_header += hophdr_len;
+ skb->mac_header += hophdr_len;
+ ipv6h = (struct ipv6hdr *)skb->data;
+ ipv6h->nexthdr = nexthdr;
+ }
nhoff = skb_network_header(skb) - skb_mac_header(skb);
if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
goto out;
@@ -320,15 +342,43 @@ static struct sk_buff *ip4ip6_gro_receive(struct list_head *head,
INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
{
const struct net_offload *ops;
- struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff);
+ struct ipv6hdr *iph;
int err = -ENOSYS;
+ u32 payload_len;
if (skb->encapsulation) {
skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IPV6));
skb_set_inner_network_header(skb, nhoff);
}
- iph->payload_len = htons(skb->len - nhoff - sizeof(*iph));
+ payload_len = skb->len - nhoff - sizeof(*iph);
+ if (unlikely(payload_len > IPV6_MAXPLEN)) {
+ struct hop_jumbo_hdr *hop_jumbo;
+ int hoplen = sizeof(*hop_jumbo);
+
+ /* Move network header left */
+ memmove(skb_mac_header(skb) - hoplen, skb_mac_header(skb),
+ skb->transport_header - skb->mac_header);
+ skb->data -= hoplen;
+ skb->len += hoplen;
+ skb->mac_header -= hoplen;
+ skb->network_header -= hoplen;
+ iph = (struct ipv6hdr *)(skb->data + nhoff);
+ hop_jumbo = (struct hop_jumbo_hdr *)(iph + 1);
+
+ /* Build hop-by-hop options */
+ hop_jumbo->nexthdr = iph->nexthdr;
+ hop_jumbo->hdrlen = 0;
+ hop_jumbo->tlv_type = IPV6_TLV_JUMBO;
+ hop_jumbo->tlv_len = 4;
+ hop_jumbo->jumbo_payload_len = htonl(payload_len + hoplen);
+
+ iph->nexthdr = NEXTHDR_HOP;
+ iph->payload_len = 0;
+ } else {
+ iph = (struct ipv6hdr *)(skb->data + nhoff);
+ iph->payload_len = htons(payload_len);
+ }
nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops);
if (WARN_ON(!ops || !ops->callbacks.gro_complete))
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index afa5bd4ad167..4081b12a01ff 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -182,7 +182,9 @@ static int __ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff
#endif
mtu = ip6_skb_dst_mtu(skb);
- if (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu))
+ if (skb_is_gso(skb) &&
+ !(IP6CB(skb)->flags & IP6SKB_FAKEJUMBO) &&
+ !skb_gso_validate_network_len(skb, mtu))
return ip6_finish_output_gso_slowpath_drop(net, sk, skb, mtu);
if ((skb->len > mtu && !skb_is_gso(skb)) ||
@@ -252,6 +254,8 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
struct dst_entry *dst = skb_dst(skb);
struct net_device *dev = dst->dev;
struct inet6_dev *idev = ip6_dst_idev(dst);
+ struct hop_jumbo_hdr *hop_jumbo;
+ int hoplen = sizeof(*hop_jumbo);
unsigned int head_room;
struct ipv6hdr *hdr;
u8 proto = fl6->flowi6_proto;
@@ -259,7 +263,7 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
int hlimit = -1;
u32 mtu;
- head_room = sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dev);
+ head_room = sizeof(struct ipv6hdr) + hoplen + LL_RESERVED_SPACE(dev);
if (opt)
head_room += opt->opt_nflen + opt->opt_flen;
@@ -282,6 +286,20 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
&fl6->saddr);
}
+ if (unlikely(seg_len > IPV6_MAXPLEN)) {
+ hop_jumbo = skb_push(skb, hoplen);
+
+ hop_jumbo->nexthdr = proto;
+ hop_jumbo->hdrlen = 0;
+ hop_jumbo->tlv_type = IPV6_TLV_JUMBO;
+ hop_jumbo->tlv_len = 4;
+ hop_jumbo->jumbo_payload_len = htonl(seg_len + hoplen);
+
+ proto = IPPROTO_HOPOPTS;
+ seg_len = 0;
+ IP6CB(skb)->flags |= IP6SKB_FAKEJUMBO;
+ }
+
skb_push(skb, sizeof(struct ipv6hdr));
skb_reset_network_header(skb);
hdr = ipv6_hdr(skb);
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 8ce60ab89015..857713d7a38a 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -31,6 +31,7 @@ int ip6_route_me_harder(struct net *net, struct sock *sk_partial, struct sk_buff
int strict = (ipv6_addr_type(&iph->daddr) &
(IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
struct flowi6 fl6 = {
+ .flowi6_l3mdev = l3mdev_master_ifindex(dev),
.flowi6_mark = skb->mark,
.flowi6_uid = sock_net_uid(net, sk),
.daddr = iph->daddr,
@@ -42,8 +43,6 @@ int ip6_route_me_harder(struct net *net, struct sock *sk_partial, struct sk_buff
fl6.flowi6_oif = sk->sk_bound_dev_if;
else if (strict)
fl6.flowi6_oif = dev->ifindex;
- else
- fl6.flowi6_oif = l3mdev_master_ifindex(dev);
fib6_rules_early_flow_dissect(net, skb, &fl6, &flkeys);
dst = ip6_route_output(net, sk, &fl6);
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index dffeaaaadcde..f61d4f18e1cf 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -31,7 +31,7 @@ static bool nf_reject_v6_csum_ok(struct sk_buff *skb, int hook)
if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
return false;
- if (!nf_reject_verify_csum(proto))
+ if (!nf_reject_verify_csum(skb, thoff, proto))
return true;
return nf_ip6_checksum(skb, hook, thoff, proto) == 0;
@@ -388,7 +388,7 @@ static bool reject6_csum_ok(struct sk_buff *skb, int hook)
if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
return false;
- if (!nf_reject_verify_csum(proto))
+ if (!nf_reject_verify_csum(skb, thoff, proto))
return true;
return nf_ip6_checksum(skb, hook, thoff, proto) == 0;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 60bdec257ba7..f37dd4aa91c6 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1762,6 +1762,7 @@ bad_packet:
}
discard_it:
+ SKB_DR_OR(drop_reason, NOT_SPECIFIED);
kfree_skb_reason(skb, drop_reason);
return 0;
@@ -2206,9 +2207,15 @@ static void __net_exit tcpv6_net_exit(struct net *net)
inet_ctl_sock_destroy(net->ipv6.tcp_sk);
}
+static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
+{
+ inet_twsk_purge(&tcp_hashinfo, AF_INET6);
+}
+
static struct pernet_operations tcpv6_net_ops = {
.init = tcpv6_net_init,
.exit = tcpv6_net_exit,
+ .exit_batch = tcpv6_net_exit_batch,
};
int __init tcpv6_init(void)
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 3fc97d4621ac..55afd7f39c04 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -105,7 +105,7 @@ static int compute_score(struct sock *sk, struct net *net,
const struct in6_addr *daddr, unsigned short hnum,
int dif, int sdif)
{
- int score;
+ int bound_dev_if, score;
struct inet_sock *inet;
bool dev_match;
@@ -132,10 +132,11 @@ static int compute_score(struct sock *sk, struct net *net,
score++;
}
- dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif);
+ bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
+ dev_match = udp_sk_bound_dev_eq(net, bound_dev_if, dif, sdif);
if (!dev_match)
return -1;
- if (sk->sk_bound_dev_if)
+ if (bound_dev_if)
score++;
if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
@@ -789,7 +790,7 @@ static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
(inet->inet_dport && inet->inet_dport != rmt_port) ||
(!ipv6_addr_any(&sk->sk_v6_daddr) &&
!ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
- !udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif) ||
+ !udp_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif, sdif) ||
(!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
return false;
@@ -1043,7 +1044,7 @@ static struct sock *__udp6_lib_demux_lookup(struct net *net,
udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
if (sk->sk_state == TCP_ESTABLISHED &&
- INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif, sdif))
+ inet6_match(net, sk, rmt_addr, loc_addr, ports, dif, sdif))
return sk;
/* Only check first socket in chain */
break;
@@ -1433,7 +1434,7 @@ do_udp_sendmsg:
}
if (!fl6->flowi6_oif)
- fl6->flowi6_oif = sk->sk_bound_dev_if;
+ fl6->flowi6_oif = READ_ONCE(sk->sk_bound_dev_if);
if (!fl6->flowi6_oif)
fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 175a162eec58..11e1a3a3e442 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -2826,8 +2826,10 @@ static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb
void *ext_hdrs[SADB_EXT_MAX];
int err;
- pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
- BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
+ err = pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
+ BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
+ if (err)
+ return err;
memset(ext_hdrs, 0, sizeof(ext_hdrs));
err = parse_exthdrs(skb, hdr, ext_hdrs);
@@ -2898,7 +2900,7 @@ static int count_ah_combs(const struct xfrm_tmpl *t)
break;
if (!aalg->pfkey_supported)
continue;
- if (aalg_tmpl_set(t, aalg))
+ if (aalg_tmpl_set(t, aalg) && aalg->available)
sz += sizeof(struct sadb_comb);
}
return sz + sizeof(struct sadb_prop);
@@ -2916,7 +2918,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
if (!ealg->pfkey_supported)
continue;
- if (!(ealg_tmpl_set(t, ealg)))
+ if (!(ealg_tmpl_set(t, ealg) && ealg->available))
continue;
for (k = 1; ; k++) {
@@ -2927,7 +2929,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
if (!aalg->pfkey_supported)
continue;
- if (aalg_tmpl_set(t, aalg))
+ if (aalg_tmpl_set(t, aalg) && aalg->available)
sz += sizeof(struct sadb_comb);
}
}
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 6af09e188e52..4db5a554bdbd 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -50,11 +50,13 @@ static struct sock *__l2tp_ip_bind_lookup(const struct net *net, __be32 laddr,
sk_for_each_bound(sk, &l2tp_ip_bind_table) {
const struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk);
const struct inet_sock *inet = inet_sk(sk);
+ int bound_dev_if;
if (!net_eq(sock_net(sk), net))
continue;
- if (sk->sk_bound_dev_if && dif && sk->sk_bound_dev_if != dif)
+ bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
+ if (bound_dev_if && dif && bound_dev_if != dif)
continue;
if (inet->inet_rcv_saddr && laddr &&
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 217c7192691e..c6ff8bf9b55f 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -62,11 +62,13 @@ static struct sock *__l2tp_ip6_bind_lookup(const struct net *net,
const struct in6_addr *sk_laddr = inet6_rcv_saddr(sk);
const struct in6_addr *sk_raddr = &sk->sk_v6_daddr;
const struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
+ int bound_dev_if;
if (!net_eq(sock_net(sk), net))
continue;
- if (sk->sk_bound_dev_if && dif && sk->sk_bound_dev_if != dif)
+ bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
+ if (bound_dev_if && dif && bound_dev_if != dif)
continue;
if (sk_laddr && !ipv6_addr_any(sk_laddr) &&
@@ -445,7 +447,7 @@ static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,
lsa->l2tp_conn_id = lsk->conn_id;
}
if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
- lsa->l2tp_scope_id = sk->sk_bound_dev_if;
+ lsa->l2tp_scope_id = READ_ONCE(sk->sk_bound_dev_if);
return sizeof(*lsa);
}
@@ -560,7 +562,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
}
if (fl6.flowi6_oif == 0)
- fl6.flowi6_oif = sk->sk_bound_dev_if;
+ fl6.flowi6_oif = READ_ONCE(sk->sk_bound_dev_if);
if (msg->msg_controllen) {
opt = &opt_space;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index f1d211e61e49..f7896f257e1b 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1174,7 +1174,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK);
changed |= BSS_CHANGED_HE_OBSS_PD;
- if (params->he_bss_color.enabled)
+ if (params->beacon.he_bss_color.enabled)
changed |= BSS_CHANGED_HE_BSS_COLOR;
}
@@ -1231,7 +1231,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
sdata->vif.bss_conf.allow_p2p_go_ps = sdata->vif.p2p;
sdata->vif.bss_conf.twt_responder = params->twt_responder;
sdata->vif.bss_conf.he_obss_pd = params->he_obss_pd;
- sdata->vif.bss_conf.he_bss_color = params->he_bss_color;
+ sdata->vif.bss_conf.he_bss_color = params->beacon.he_bss_color;
sdata->vif.bss_conf.s1g = params->chandef.chan->band ==
NL80211_BAND_S1GHZ;
@@ -1316,6 +1316,7 @@ static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_beacon_data *params)
{
struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_bss_conf *bss_conf;
struct beacon_data *old;
int err;
@@ -1335,10 +1336,28 @@ static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev,
err = ieee80211_assign_beacon(sdata, params, NULL, NULL);
if (err < 0)
return err;
+
+ bss_conf = &sdata->vif.bss_conf;
+ if (params->he_bss_color_valid &&
+ params->he_bss_color.enabled != bss_conf->he_bss_color.enabled) {
+ bss_conf->he_bss_color.enabled = params->he_bss_color.enabled;
+ err |= BSS_CHANGED_HE_BSS_COLOR;
+ }
+
ieee80211_bss_info_change_notify(sdata, err);
return 0;
}
+static void ieee80211_free_next_beacon(struct ieee80211_sub_if_data *sdata)
+{
+ if (!sdata->u.ap.next_beacon)
+ return;
+
+ kfree(sdata->u.ap.next_beacon->mbssid_ies);
+ kfree(sdata->u.ap.next_beacon);
+ sdata->u.ap.next_beacon = NULL;
+}
+
static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -1373,11 +1392,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
mutex_unlock(&local->mtx);
- if (sdata->u.ap.next_beacon) {
- kfree(sdata->u.ap.next_beacon->mbssid_ies);
- kfree(sdata->u.ap.next_beacon);
- sdata->u.ap.next_beacon = NULL;
- }
+ ieee80211_free_next_beacon(sdata);
/* turn off carrier for this interface and dependent VLANs */
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
@@ -2928,7 +2943,7 @@ int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata,
sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT)
return 0;
- ap = sdata->u.mgd.associated->bssid;
+ ap = sdata->u.mgd.bssid;
rcu_read_lock();
list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
@@ -3312,9 +3327,7 @@ static int ieee80211_set_after_csa_beacon(struct ieee80211_sub_if_data *sdata,
err = ieee80211_assign_beacon(sdata, sdata->u.ap.next_beacon,
NULL, NULL);
- kfree(sdata->u.ap.next_beacon->mbssid_ies);
- kfree(sdata->u.ap.next_beacon);
- sdata->u.ap.next_beacon = NULL;
+ ieee80211_free_next_beacon(sdata);
if (err < 0)
return err;
@@ -3470,9 +3483,7 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
IEEE80211_MAX_CNTDWN_COUNTERS_NUM) ||
(params->n_counter_offsets_presp >
IEEE80211_MAX_CNTDWN_COUNTERS_NUM)) {
- kfree(sdata->u.ap.next_beacon->mbssid_ies);
- kfree(sdata->u.ap.next_beacon);
- sdata->u.ap.next_beacon = NULL;
+ ieee80211_free_next_beacon(sdata);
return -EINVAL;
}
@@ -3484,9 +3495,7 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
err = ieee80211_assign_beacon(sdata, &params->beacon_csa, &csa, NULL);
if (err < 0) {
- kfree(sdata->u.ap.next_beacon->mbssid_ies);
- kfree(sdata->u.ap.next_beacon);
- sdata->u.ap.next_beacon = NULL;
+ ieee80211_free_next_beacon(sdata);
return err;
}
*changed |= err;
@@ -3576,11 +3585,8 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
static void ieee80211_color_change_abort(struct ieee80211_sub_if_data *sdata)
{
sdata->vif.color_change_active = false;
- if (sdata->u.ap.next_beacon) {
- kfree(sdata->u.ap.next_beacon->mbssid_ies);
- kfree(sdata->u.ap.next_beacon);
- sdata->u.ap.next_beacon = NULL;
- }
+
+ ieee80211_free_next_beacon(sdata);
cfg80211_color_change_aborted_notify(sdata->dev);
}
@@ -4321,9 +4327,7 @@ ieee80211_set_after_color_change_beacon(struct ieee80211_sub_if_data *sdata,
ret = ieee80211_assign_beacon(sdata, sdata->u.ap.next_beacon,
NULL, NULL);
- kfree(sdata->u.ap.next_beacon->mbssid_ies);
- kfree(sdata->u.ap.next_beacon);
- sdata->u.ap.next_beacon = NULL;
+ ieee80211_free_next_beacon(sdata);
if (ret < 0)
return ret;
@@ -4366,11 +4370,7 @@ ieee80211_set_color_change_beacon(struct ieee80211_sub_if_data *sdata,
err = ieee80211_assign_beacon(sdata, &params->beacon_color_change,
NULL, &color_change);
if (err < 0) {
- if (sdata->u.ap.next_beacon) {
- kfree(sdata->u.ap.next_beacon->mbssid_ies);
- kfree(sdata->u.ap.next_beacon);
- sdata->u.ap.next_beacon = NULL;
- }
+ ieee80211_free_next_beacon(sdata);
return err;
}
*changed |= err;
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index e490c3da3aca..cf71484658c6 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -337,7 +337,7 @@ static ssize_t ieee80211_if_parse_tkip_mic_test(
dev_kfree_skb(skb);
return -ENOTCONN;
}
- memcpy(hdr->addr1, sdata->u.mgd.associated->bssid, ETH_ALEN);
+ memcpy(hdr->addr1, sdata->u.mgd.bssid, ETH_ALEN);
memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
memcpy(hdr->addr3, addr, ETH_ALEN);
sdata_unlock(sdata);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index d4a7ba4a8202..86ef0a46a68c 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -453,9 +453,10 @@ struct ieee80211_if_managed {
bool nullfunc_failed;
u8 connection_loss:1,
driver_disconnect:1,
- reconnect:1;
+ reconnect:1,
+ associated:1;
- struct cfg80211_bss *associated;
+ struct cfg80211_bss *assoc_bss;
struct ieee80211_mgd_auth_data *auth_data;
struct ieee80211_mgd_assoc_data *assoc_data;
@@ -1148,6 +1149,9 @@ struct tpt_led_trigger {
* a scan complete for an aborted scan.
* @SCAN_HW_CANCELLED: Set for our scan work function when the scan is being
* cancelled.
+ * @SCAN_BEACON_WAIT: Set whenever we're passive scanning because of radar/no-IR
+ * and could send a probe request after receiving a beacon.
+ * @SCAN_BEACON_DONE: Beacon received, we can now send a probe request
*/
enum {
SCAN_SW_SCANNING,
@@ -1156,6 +1160,8 @@ enum {
SCAN_COMPLETED,
SCAN_ABORTED,
SCAN_HW_CANCELLED,
+ SCAN_BEACON_WAIT,
+ SCAN_BEACON_DONE,
};
/**
@@ -1854,7 +1860,7 @@ void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata);
void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata);
void ieee80211_sta_handle_tspec_ac_params(struct ieee80211_sub_if_data *sdata);
void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
- u8 *bssid, u8 reason, bool tx);
+ u8 reason, bool tx);
/* IBSS code */
void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local);
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index a48a32f87897..5a385d4146b9 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -287,8 +287,8 @@ static void ieee80211_restart_work(struct work_struct *work)
if (sdata->vif.csa_active) {
sdata_lock(sdata);
ieee80211_sta_connection_lost(sdata,
- sdata->u.mgd.associated->bssid,
- WLAN_REASON_UNSPECIFIED, false);
+ WLAN_REASON_UNSPECIFIED,
+ false);
sdata_unlock(sdata);
}
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 07a96f7c5dc3..58d48dcae030 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1376,7 +1376,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct cfg80211_bss *cbss = ifmgd->associated;
+ struct cfg80211_bss *cbss = ifmgd->assoc_bss;
struct ieee80211_chanctx_conf *conf;
struct ieee80211_chanctx *chanctx;
enum nl80211_band current_band;
@@ -1398,7 +1398,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
res = ieee80211_parse_ch_switch_ie(sdata, elems, current_band,
bss->vht_cap_info,
ifmgd->flags,
- ifmgd->associated->bssid, &csa_ie);
+ ifmgd->bssid, &csa_ie);
if (!res) {
ch_switch.timestamp = timestamp;
@@ -1427,7 +1427,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
csa_ie.chandef.chan->band) {
sdata_info(sdata,
"AP %pM switches to different band (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n",
- ifmgd->associated->bssid,
+ ifmgd->bssid,
csa_ie.chandef.chan->center_freq,
csa_ie.chandef.width, csa_ie.chandef.center_freq1,
csa_ie.chandef.center_freq2);
@@ -1440,7 +1440,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
"AP %pM switches to unsupported channel "
"(%d.%03d MHz, width:%d, CF1/2: %d.%03d/%d MHz), "
"disconnecting\n",
- ifmgd->associated->bssid,
+ ifmgd->bssid,
csa_ie.chandef.chan->center_freq,
csa_ie.chandef.chan->freq_offset,
csa_ie.chandef.width, csa_ie.chandef.center_freq1,
@@ -1456,7 +1456,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
return;
sdata_info(sdata,
"AP %pM tries to chanswitch to same channel, ignore\n",
- ifmgd->associated->bssid);
+ ifmgd->bssid);
ifmgd->csa_ignored_same_chan = true;
return;
}
@@ -2266,7 +2266,8 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
sdata->u.mgd.beacon_timeout = usecs_to_jiffies(ieee80211_tu_to_usec(
beacon_loss_count * bss_conf->beacon_int));
- sdata->u.mgd.associated = cbss;
+ sdata->u.mgd.associated = true;
+ sdata->u.mgd.assoc_bss = cbss;
memcpy(sdata->u.mgd.bssid, cbss->bssid, ETH_ALEN);
ieee80211_check_rate_mask(sdata);
@@ -2361,7 +2362,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
ieee80211_stop_poll(sdata);
- ifmgd->associated = NULL;
+ ifmgd->associated = false;
+ ifmgd->assoc_bss = NULL;
netif_carrier_off(sdata->dev);
/*
@@ -2608,8 +2610,7 @@ static void ieee80211_mlme_send_probe_req(struct ieee80211_sub_if_data *sdata,
static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- const struct element *ssid;
- u8 *dst = ifmgd->associated->bssid;
+ u8 *dst = ifmgd->bssid;
u8 unicast_limit = max(1, max_probe_tries - 3);
struct sta_info *sta;
@@ -2642,19 +2643,10 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
ifmgd->nullfunc_failed = false;
ieee80211_send_nullfunc(sdata->local, sdata, false);
} else {
- int ssid_len;
-
- rcu_read_lock();
- ssid = ieee80211_bss_get_elem(ifmgd->associated, WLAN_EID_SSID);
- if (WARN_ON_ONCE(ssid == NULL))
- ssid_len = 0;
- else
- ssid_len = ssid->datalen;
-
ieee80211_mlme_send_probe_req(sdata, sdata->vif.addr, dst,
- ssid->data, ssid_len,
- ifmgd->associated->channel);
- rcu_read_unlock();
+ sdata->vif.bss_conf.ssid,
+ sdata->vif.bss_conf.ssid_len,
+ ifmgd->assoc_bss->channel);
}
ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms);
@@ -2744,7 +2736,7 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
sdata_assert_lock(sdata);
if (ifmgd->associated)
- cbss = ifmgd->associated;
+ cbss = ifmgd->assoc_bss;
else if (ifmgd->auth_data)
cbss = ifmgd->auth_data->bss;
else if (ifmgd->assoc_data)
@@ -2809,7 +2801,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
* AP is probably out of range (or not reachable for another
* reason) so remove the bss struct for that AP.
*/
- cfg80211_unlink_bss(local->hw.wiphy, ifmgd->associated);
+ cfg80211_unlink_bss(local->hw.wiphy, ifmgd->assoc_bss);
}
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
@@ -3219,8 +3211,8 @@ static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
}
if (ifmgd->associated &&
- ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) {
- const u8 *bssid = ifmgd->associated->bssid;
+ ether_addr_equal(mgmt->bssid, ifmgd->bssid)) {
+ const u8 *bssid = ifmgd->bssid;
sdata_info(sdata, "deauthenticated from %pM (Reason: %u=%s)\n",
bssid, reason_code,
@@ -3262,7 +3254,7 @@ static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
return;
if (!ifmgd->associated ||
- !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid))
+ !ether_addr_equal(mgmt->bssid, ifmgd->bssid))
return;
reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
@@ -3972,7 +3964,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
ieee80211_rx_bss_info(sdata, mgmt, len, rx_status);
if (ifmgd->associated &&
- ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid))
+ ether_addr_equal(mgmt->bssid, ifmgd->bssid))
ieee80211_reset_ap_probe(sdata);
}
@@ -4201,9 +4193,9 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
}
if (!ifmgd->associated ||
- !ieee80211_rx_our_beacon(bssid, ifmgd->associated))
+ !ieee80211_rx_our_beacon(bssid, ifmgd->assoc_bss))
return;
- bssid = ifmgd->associated->bssid;
+ bssid = ifmgd->bssid;
if (!(rx_status->flag & RX_FLAG_NO_SIGNAL_VAL))
ieee80211_handle_beacon_sig(sdata, ifmgd, bss_conf,
@@ -4519,7 +4511,7 @@ static void ieee80211_sta_timer(struct timer_list *t)
}
void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
- u8 *bssid, u8 reason, bool tx)
+ u8 reason, bool tx)
{
u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
@@ -4750,11 +4742,9 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL &&
ifmgd->associated) {
- u8 bssid[ETH_ALEN];
+ u8 *bssid = ifmgd->bssid;
int max_tries;
- memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
-
if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
max_tries = max_nullfunc_tries;
else
@@ -4774,7 +4764,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
mlme_dbg(sdata,
"No ack for nullfunc frame to AP %pM, disconnecting.\n",
bssid);
- ieee80211_sta_connection_lost(sdata, bssid,
+ ieee80211_sta_connection_lost(sdata,
WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
false);
}
@@ -4784,7 +4774,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
mlme_dbg(sdata,
"Failed to send nullfunc to AP %pM after %dms, disconnecting\n",
bssid, probe_wait_ms);
- ieee80211_sta_connection_lost(sdata, bssid,
+ ieee80211_sta_connection_lost(sdata,
WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, false);
} else if (ifmgd->probe_send_count < max_tries) {
mlme_dbg(sdata,
@@ -4801,7 +4791,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
"No probe response from AP %pM after %dms, disconnecting.\n",
bssid, probe_wait_ms);
- ieee80211_sta_connection_lost(sdata, bssid,
+ ieee80211_sta_connection_lost(sdata,
WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, false);
}
}
@@ -4934,7 +4924,7 @@ void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata)
.bssid = bssid,
};
- memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
+ memcpy(bssid, ifmgd->bssid, ETH_ALEN);
ieee80211_mgd_deauth(sdata, &req);
}
@@ -4956,7 +4946,6 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
sdata->flags &= ~IEEE80211_SDATA_DISCONNECT_RESUME;
mlme_dbg(sdata, "driver requested disconnect after resume\n");
ieee80211_sta_connection_lost(sdata,
- ifmgd->associated->bssid,
WLAN_REASON_UNSPECIFIED,
true);
sdata_unlock(sdata);
@@ -4967,7 +4956,6 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
sdata->flags &= ~IEEE80211_SDATA_DISCONNECT_HW_RESTART;
mlme_dbg(sdata, "driver requested disconnect after hardware restart\n");
ieee80211_sta_connection_lost(sdata,
- ifmgd->associated->bssid,
WLAN_REASON_UNSPECIFIED,
true);
sdata_unlock(sdata);
@@ -5842,7 +5830,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
sdata_info(sdata,
"disconnect from AP %pM for new auth to %pM\n",
- ifmgd->associated->bssid, req->bss->bssid);
+ ifmgd->bssid, req->bss->bssid);
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
WLAN_REASON_UNSPECIFIED,
false, frame_buf);
@@ -5918,7 +5906,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
sdata_info(sdata,
"disconnect from AP %pM for new assoc to %pM\n",
- ifmgd->associated->bssid, req->bss->bssid);
+ ifmgd->bssid, req->bss->bssid);
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
WLAN_REASON_UNSPECIFIED,
false, frame_buf);
@@ -6132,6 +6120,9 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
ifmgd->flags |= IEEE80211_STA_DISABLE_EHT;
}
+ if (req->flags & ASSOC_REQ_DISABLE_EHT)
+ ifmgd->flags |= IEEE80211_STA_DISABLE_EHT;
+
err = ieee80211_prep_connection(sdata, req->bss, true, override);
if (err)
goto err_clear;
@@ -6273,7 +6264,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
}
if (ifmgd->associated &&
- ether_addr_equal(ifmgd->associated->bssid, req->bssid)) {
+ ether_addr_equal(ifmgd->bssid, req->bssid)) {
sdata_info(sdata,
"deauthenticating from %pM by local choice (Reason: %u=%s)\n",
req->bssid, req->reason_code,
@@ -6304,7 +6295,7 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
* to cfg80211 while that's in a locked section already
* trying to tell us that the user wants to disconnect.
*/
- if (ifmgd->associated != req->bss)
+ if (ifmgd->assoc_bss != req->bss)
return -ENOLINK;
sdata_info(sdata,
@@ -6382,3 +6373,43 @@ void ieee80211_cqm_beacon_loss_notify(struct ieee80211_vif *vif, gfp_t gfp)
cfg80211_cqm_beacon_loss_notify(sdata->dev, gfp);
}
EXPORT_SYMBOL(ieee80211_cqm_beacon_loss_notify);
+
+static void _ieee80211_enable_rssi_reports(struct ieee80211_sub_if_data *sdata,
+ int rssi_min_thold,
+ int rssi_max_thold)
+{
+ trace_api_enable_rssi_reports(sdata, rssi_min_thold, rssi_max_thold);
+
+ if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
+ return;
+
+ /*
+ * Scale up threshold values before storing it, as the RSSI averaging
+ * algorithm uses a scaled up value as well. Change this scaling
+ * factor if the RSSI averaging algorithm changes.
+ */
+ sdata->u.mgd.rssi_min_thold = rssi_min_thold*16;
+ sdata->u.mgd.rssi_max_thold = rssi_max_thold*16;
+}
+
+void ieee80211_enable_rssi_reports(struct ieee80211_vif *vif,
+ int rssi_min_thold,
+ int rssi_max_thold)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+
+ WARN_ON(rssi_min_thold == rssi_max_thold ||
+ rssi_min_thold > rssi_max_thold);
+
+ _ieee80211_enable_rssi_reports(sdata, rssi_min_thold,
+ rssi_max_thold);
+}
+EXPORT_SYMBOL(ieee80211_enable_rssi_reports);
+
+void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+
+ _ieee80211_enable_rssi_reports(sdata, 0, 0);
+}
+EXPORT_SYMBOL(ieee80211_disable_rssi_reports);
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 853c9a369d72..c5d2ab9df1e7 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -819,7 +819,7 @@ int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
if (!sdata->u.mgd.associated ||
(params->offchan && params->wait &&
local->ops->remain_on_channel &&
- memcmp(sdata->u.mgd.associated->bssid,
+ memcmp(sdata->u.mgd.bssid,
mgmt->bssid, ETH_ALEN)))
need_offchan = true;
sdata_unlock(sdata);
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 7b1f5c045e06..5f27e6746762 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -333,6 +333,17 @@ minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate)
!!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH));
}
+/*
+ * Look up an MCS group index based on new cfg80211 rate_info.
+ */
+static int
+minstrel_ht_ri_get_group_idx(struct rate_info *rate)
+{
+ return GROUP_IDX((rate->mcs / 8) + 1,
+ !!(rate->flags & RATE_INFO_FLAGS_SHORT_GI),
+ !!(rate->bw & RATE_INFO_BW_40));
+}
+
static int
minstrel_vht_get_group_idx(struct ieee80211_tx_rate *rate)
{
@@ -342,6 +353,18 @@ minstrel_vht_get_group_idx(struct ieee80211_tx_rate *rate)
2*!!(rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH));
}
+/*
+ * Look up an MCS group index based on new cfg80211 rate_info.
+ */
+static int
+minstrel_vht_ri_get_group_idx(struct rate_info *rate)
+{
+ return VHT_GROUP_IDX(rate->nss,
+ !!(rate->flags & RATE_INFO_FLAGS_SHORT_GI),
+ !!(rate->bw & RATE_INFO_BW_40) +
+ 2*!!(rate->bw & RATE_INFO_BW_80));
+}
+
static struct minstrel_rate_stats *
minstrel_ht_get_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
struct ieee80211_tx_rate *rate)
@@ -385,6 +408,50 @@ out:
return &mi->groups[group].rates[idx];
}
+/*
+ * Get the minstrel rate statistics for specified STA and rate info.
+ */
+static struct minstrel_rate_stats *
+minstrel_ht_ri_get_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
+ struct ieee80211_rate_status *rate_status)
+{
+ int group, idx;
+ struct rate_info *rate = &rate_status->rate_idx;
+
+ if (rate->flags & RATE_INFO_FLAGS_MCS) {
+ group = minstrel_ht_ri_get_group_idx(rate);
+ idx = rate->mcs % 8;
+ goto out;
+ }
+
+ if (rate->flags & RATE_INFO_FLAGS_VHT_MCS) {
+ group = minstrel_vht_ri_get_group_idx(rate);
+ idx = rate->mcs;
+ goto out;
+ }
+
+ group = MINSTREL_CCK_GROUP;
+ for (idx = 0; idx < ARRAY_SIZE(mp->cck_rates); idx++) {
+ if (rate->legacy != minstrel_cck_bitrates[ mp->cck_rates[idx] ])
+ continue;
+
+ /* short preamble */
+ if ((mi->supported[group] & BIT(idx + 4)) &&
+ mi->use_short_preamble)
+ idx += 4;
+ goto out;
+ }
+
+ group = MINSTREL_OFDM_GROUP;
+ for (idx = 0; idx < ARRAY_SIZE(mp->ofdm_rates[0]); idx++)
+ if (rate->legacy == minstrel_ofdm_bitrates[ mp->ofdm_rates[mi->band][idx] ])
+ goto out;
+
+ idx = 0;
+out:
+ return &mi->groups[group].rates[idx];
+}
+
static inline struct minstrel_rate_stats *
minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index)
{
@@ -1152,6 +1219,40 @@ minstrel_ht_txstat_valid(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
return false;
}
+/*
+ * Check whether rate_status contains valid information.
+ */
+static bool
+minstrel_ht_ri_txstat_valid(struct minstrel_priv *mp,
+ struct minstrel_ht_sta *mi,
+ struct ieee80211_rate_status *rate_status)
+{
+ int i;
+
+ if (!rate_status)
+ return false;
+ if (!rate_status->try_count)
+ return false;
+
+ if (rate_status->rate_idx.flags & RATE_INFO_FLAGS_MCS ||
+ rate_status->rate_idx.flags & RATE_INFO_FLAGS_VHT_MCS)
+ return true;
+
+ for (i = 0; i < ARRAY_SIZE(mp->cck_rates); i++) {
+ if (rate_status->rate_idx.legacy ==
+ minstrel_cck_bitrates[ mp->cck_rates[i] ])
+ return true;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mp->ofdm_rates); i++) {
+ if (rate_status->rate_idx.legacy ==
+ minstrel_ofdm_bitrates[ mp->ofdm_rates[mi->band][i] ])
+ return true;
+ }
+
+ return false;
+}
+
static void
minstrel_downgrade_rate(struct minstrel_ht_sta *mi, u16 *idx, bool primary)
{
@@ -1217,16 +1318,34 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
mi->ampdu_packets++;
mi->ampdu_len += info->status.ampdu_len;
- last = !minstrel_ht_txstat_valid(mp, mi, &ar[0]);
- for (i = 0; !last; i++) {
- last = (i == IEEE80211_TX_MAX_RATES - 1) ||
- !minstrel_ht_txstat_valid(mp, mi, &ar[i + 1]);
+ if (st->rates && st->n_rates) {
+ last = !minstrel_ht_ri_txstat_valid(mp, mi, &(st->rates[0]));
+ for (i = 0; !last; i++) {
+ last = (i == st->n_rates - 1) ||
+ !minstrel_ht_ri_txstat_valid(mp, mi,
+ &(st->rates[i + 1]));
+
+ rate = minstrel_ht_ri_get_stats(mp, mi,
+ &(st->rates[i]));
- rate = minstrel_ht_get_stats(mp, mi, &ar[i]);
- if (last)
- rate->success += info->status.ampdu_ack_len;
+ if (last)
+ rate->success += info->status.ampdu_ack_len;
- rate->attempts += ar[i].count * info->status.ampdu_len;
+ rate->attempts += st->rates[i].try_count *
+ info->status.ampdu_len;
+ }
+ } else {
+ last = !minstrel_ht_txstat_valid(mp, mi, &ar[0]);
+ for (i = 0; !last; i++) {
+ last = (i == IEEE80211_TX_MAX_RATES - 1) ||
+ !minstrel_ht_txstat_valid(mp, mi, &ar[i + 1]);
+
+ rate = minstrel_ht_get_stats(mp, mi, &ar[i]);
+ if (last)
+ rate->success += info->status.ampdu_ack_len;
+
+ rate->attempts += ar[i].count * info->status.ampdu_len;
+ }
}
if (mp->hw->max_rates > 1) {
@@ -1439,17 +1558,17 @@ minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
/* Start with max_tp_rate[0] */
minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate[0]);
- if (mp->hw->max_rates >= 3) {
- /* At least 3 tx rates supported, use max_tp_rate[1] next */
- minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate[1]);
- }
+ /* Fill up remaining, keep one entry for max_probe_rate */
+ for (; i < (mp->hw->max_rates - 1); i++)
+ minstrel_ht_set_rate(mp, mi, rates, i, mi->max_tp_rate[i]);
- if (mp->hw->max_rates >= 2) {
+ if (i < mp->hw->max_rates)
minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_prob_rate);
- }
+
+ if (i < IEEE80211_TX_RATE_TABLE_SIZE)
+ rates->rate[i].idx = -1;
mi->sta->max_rc_amsdu_len = minstrel_ht_get_max_amsdu_len(mi);
- rates->rate[i].idx = -1;
rate_control_set_rates(mp->hw, mi->sta, rates);
}
@@ -1583,6 +1702,7 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
u16 ht_cap = sta->deflink.ht_cap.cap;
struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
const struct ieee80211_rate *ctl_rate;
+ struct sta_info *sta_info;
bool ldpc, erp;
int use_vht;
int n_supported = 0;
@@ -1701,6 +1821,10 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
n_supported++;
}
+ sta_info = container_of(sta, struct sta_info, sta);
+ mi->use_short_preamble = test_sta_flag(sta_info, WLAN_STA_SHORT_PREAMBLE) &&
+ sta_info->sdata->vif.bss_conf.use_short_preamble;
+
minstrel_ht_update_cck(mp, mi, sband, sta);
minstrel_ht_update_ofdm(mp, mi, sband, sta);
diff --git a/net/mac80211/rc80211_minstrel_ht.h b/net/mac80211/rc80211_minstrel_ht.h
index 06e7126727ad..1766ff0c78d3 100644
--- a/net/mac80211/rc80211_minstrel_ht.h
+++ b/net/mac80211/rc80211_minstrel_ht.h
@@ -180,7 +180,7 @@ struct minstrel_ht_sta {
/* tx flags to add for frames for this sta */
u32 tx_flags;
-
+ bool use_short_preamble;
u8 band;
u8 sample_seq;
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 5e6b275afc9e..b698756887eb 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -281,6 +281,16 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
if (likely(!sdata1 && !sdata2))
return;
+ if (test_and_clear_bit(SCAN_BEACON_WAIT, &local->scanning)) {
+ /*
+ * we were passive scanning because of radar/no-IR, but
+ * the beacon/proberesp rx gives us an opportunity to upgrade
+ * to active scan
+ */
+ set_bit(SCAN_BEACON_DONE, &local->scanning);
+ ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
+ }
+
if (ieee80211_is_probe_resp(mgmt->frame_control)) {
struct cfg80211_scan_request *scan_req;
struct cfg80211_sched_scan_request *sched_scan_req;
@@ -787,6 +797,8 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
IEEE80211_CHAN_RADAR)) ||
!req->n_ssids) {
next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
+ if (req->n_ssids)
+ set_bit(SCAN_BEACON_WAIT, &local->scanning);
} else {
ieee80211_scan_state_send_probe(local, &next_delay);
next_delay = IEEE80211_CHANNEL_TIME;
@@ -998,6 +1010,8 @@ set_channel:
!scan_req->n_ssids) {
*next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
local->next_scan_state = SCAN_DECISION;
+ if (scan_req->n_ssids)
+ set_bit(SCAN_BEACON_WAIT, &local->scanning);
return;
}
@@ -1090,6 +1104,8 @@ void ieee80211_scan_work(struct work_struct *work)
goto out;
}
+ clear_bit(SCAN_BEACON_WAIT, &local->scanning);
+
/*
* as long as no delay is required advance immediately
* without scheduling a new work
@@ -1100,6 +1116,10 @@ void ieee80211_scan_work(struct work_struct *work)
goto out_complete;
}
+ if (test_and_clear_bit(SCAN_BEACON_DONE, &local->scanning) &&
+ local->next_scan_state == SCAN_DECISION)
+ local->next_scan_state = SCAN_SEND_PROBE;
+
switch (local->next_scan_state) {
case SCAN_DECISION:
/* if no more bands/channels left, complete scan */
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index c563fa718d84..e69272139437 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -247,15 +247,19 @@ static void ieee80211_set_bar_pending(struct sta_info *sta, u8 tid, u16 ssn)
static int ieee80211_tx_radiotap_len(struct ieee80211_tx_info *info,
struct ieee80211_tx_status *status)
{
+ struct ieee80211_rate_status *status_rate = NULL;
int len = sizeof(struct ieee80211_radiotap_header);
+ if (status && status->n_rates)
+ status_rate = &status->rates[status->n_rates - 1];
+
/* IEEE80211_RADIOTAP_RATE rate */
- if (status && status->rate && !(status->rate->flags &
- (RATE_INFO_FLAGS_MCS |
- RATE_INFO_FLAGS_DMG |
- RATE_INFO_FLAGS_EDMG |
- RATE_INFO_FLAGS_VHT_MCS |
- RATE_INFO_FLAGS_HE_MCS)))
+ if (status_rate && !(status_rate->rate_idx.flags &
+ (RATE_INFO_FLAGS_MCS |
+ RATE_INFO_FLAGS_DMG |
+ RATE_INFO_FLAGS_EDMG |
+ RATE_INFO_FLAGS_VHT_MCS |
+ RATE_INFO_FLAGS_HE_MCS)))
len += 2;
else if (info->status.rates[0].idx >= 0 &&
!(info->status.rates[0].flags &
@@ -270,12 +274,12 @@ static int ieee80211_tx_radiotap_len(struct ieee80211_tx_info *info,
/* IEEE80211_RADIOTAP_MCS
* IEEE80211_RADIOTAP_VHT */
- if (status && status->rate) {
- if (status->rate->flags & RATE_INFO_FLAGS_MCS)
+ if (status_rate) {
+ if (status_rate->rate_idx.flags & RATE_INFO_FLAGS_MCS)
len += 3;
- else if (status->rate->flags & RATE_INFO_FLAGS_VHT_MCS)
+ else if (status_rate->rate_idx.flags & RATE_INFO_FLAGS_VHT_MCS)
len = ALIGN(len, 2) + 12;
- else if (status->rate->flags & RATE_INFO_FLAGS_HE_MCS)
+ else if (status_rate->rate_idx.flags & RATE_INFO_FLAGS_HE_MCS)
len = ALIGN(len, 2) + 12;
} else if (info->status.rates[0].idx >= 0) {
if (info->status.rates[0].flags & IEEE80211_TX_RC_MCS)
@@ -297,10 +301,14 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_radiotap_header *rthdr;
+ struct ieee80211_rate_status *status_rate = NULL;
unsigned char *pos;
u16 legacy_rate = 0;
u16 txflags;
+ if (status && status->n_rates)
+ status_rate = &status->rates[status->n_rates - 1];
+
rthdr = skb_push(skb, rtap_len);
memset(rthdr, 0, rtap_len);
@@ -318,13 +326,14 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local,
/* IEEE80211_RADIOTAP_RATE */
- if (status && status->rate) {
- if (!(status->rate->flags & (RATE_INFO_FLAGS_MCS |
- RATE_INFO_FLAGS_DMG |
- RATE_INFO_FLAGS_EDMG |
- RATE_INFO_FLAGS_VHT_MCS |
- RATE_INFO_FLAGS_HE_MCS)))
- legacy_rate = status->rate->legacy;
+ if (status_rate) {
+ if (!(status_rate->rate_idx.flags &
+ (RATE_INFO_FLAGS_MCS |
+ RATE_INFO_FLAGS_DMG |
+ RATE_INFO_FLAGS_EDMG |
+ RATE_INFO_FLAGS_VHT_MCS |
+ RATE_INFO_FLAGS_HE_MCS)))
+ legacy_rate = status_rate->rate_idx.legacy;
} else if (info->status.rates[0].idx >= 0 &&
!(info->status.rates[0].flags & (IEEE80211_TX_RC_MCS |
IEEE80211_TX_RC_VHT_MCS)))
@@ -357,20 +366,21 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local,
*pos = retry_count;
pos++;
- if (status && status->rate &&
- (status->rate->flags & RATE_INFO_FLAGS_MCS)) {
+ if (status_rate && (status_rate->rate_idx.flags & RATE_INFO_FLAGS_MCS))
+ {
rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_MCS));
pos[0] = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
IEEE80211_RADIOTAP_MCS_HAVE_GI |
IEEE80211_RADIOTAP_MCS_HAVE_BW;
- if (status->rate->flags & RATE_INFO_FLAGS_SHORT_GI)
+ if (status_rate->rate_idx.flags & RATE_INFO_FLAGS_SHORT_GI)
pos[1] |= IEEE80211_RADIOTAP_MCS_SGI;
- if (status->rate->bw == RATE_INFO_BW_40)
+ if (status_rate->rate_idx.bw == RATE_INFO_BW_40)
pos[1] |= IEEE80211_RADIOTAP_MCS_BW_40;
- pos[2] = status->rate->mcs;
+ pos[2] = status_rate->rate_idx.mcs;
pos += 3;
- } else if (status && status->rate &&
- (status->rate->flags & RATE_INFO_FLAGS_VHT_MCS)) {
+ } else if (status_rate && (status_rate->rate_idx.flags &
+ RATE_INFO_FLAGS_VHT_MCS))
+ {
u16 known = local->hw.radiotap_vht_details &
(IEEE80211_RADIOTAP_VHT_KNOWN_GI |
IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH);
@@ -385,12 +395,12 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local,
pos += 2;
/* u8 flags - IEEE80211_RADIOTAP_VHT_FLAG_* */
- if (status->rate->flags & RATE_INFO_FLAGS_SHORT_GI)
+ if (status_rate->rate_idx.flags & RATE_INFO_FLAGS_SHORT_GI)
*pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI;
pos++;
/* u8 bandwidth */
- switch (status->rate->bw) {
+ switch (status_rate->rate_idx.bw) {
case RATE_INFO_BW_160:
*pos = 11;
break;
@@ -407,7 +417,8 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local,
pos++;
/* u8 mcs_nss[4] */
- *pos = (status->rate->mcs << 4) | status->rate->nss;
+ *pos = (status_rate->rate_idx.mcs << 4) |
+ status_rate->rate_idx.nss;
pos += 4;
/* u8 coding */
@@ -416,8 +427,9 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local,
pos++;
/* u16 partial_aid */
pos += 2;
- } else if (status && status->rate &&
- (status->rate->flags & RATE_INFO_FLAGS_HE_MCS)) {
+ } else if (status_rate && (status_rate->rate_idx.flags &
+ RATE_INFO_FLAGS_HE_MCS))
+ {
struct ieee80211_radiotap_he *he;
rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_HE));
@@ -435,7 +447,7 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local,
#define HE_PREP(f, val) le16_encode_bits(val, IEEE80211_RADIOTAP_HE_##f)
- he->data6 |= HE_PREP(DATA6_NSTS, status->rate->nss);
+ he->data6 |= HE_PREP(DATA6_NSTS, status_rate->rate_idx.nss);
#define CHECK_GI(s) \
BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_GI_##s != \
@@ -445,12 +457,12 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local,
CHECK_GI(1_6);
CHECK_GI(3_2);
- he->data3 |= HE_PREP(DATA3_DATA_MCS, status->rate->mcs);
- he->data3 |= HE_PREP(DATA3_DATA_DCM, status->rate->he_dcm);
+ he->data3 |= HE_PREP(DATA3_DATA_MCS, status_rate->rate_idx.mcs);
+ he->data3 |= HE_PREP(DATA3_DATA_DCM, status_rate->rate_idx.he_dcm);
- he->data5 |= HE_PREP(DATA5_GI, status->rate->he_gi);
+ he->data5 |= HE_PREP(DATA5_GI, status_rate->rate_idx.he_gi);
- switch (status->rate->bw) {
+ switch (status_rate->rate_idx.bw) {
case RATE_INFO_BW_20:
he->data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ);
@@ -481,16 +493,16 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local,
CHECK_RU_ALLOC(2x996);
he->data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
- status->rate->he_ru_alloc + 4);
+ status_rate->rate_idx.he_ru_alloc + 4);
break;
default:
- WARN_ONCE(1, "Invalid SU BW %d\n", status->rate->bw);
+ WARN_ONCE(1, "Invalid SU BW %d\n", status_rate->rate_idx.bw);
}
pos += sizeof(struct ieee80211_radiotap_he);
}
- if ((status && status->rate) || info->status.rates[0].idx < 0)
+ if (status_rate || info->status.rates[0].idx < 0)
return;
/* IEEE80211_RADIOTAP_MCS
@@ -1111,8 +1123,9 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
if (pubsta) {
sta = container_of(pubsta, struct sta_info, sta);
- if (status->rate)
- sta->deflink.tx_stats.last_rate_info = *status->rate;
+ if (status->n_rates)
+ sta->deflink.tx_stats.last_rate_info =
+ status->rates[status->n_rates - 1].rate_idx;
}
if (skb && (tx_time_est =
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 13253eb39d09..0e4efc08c762 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -3150,8 +3150,6 @@ void ieee80211_check_fast_xmit(struct sta_info *sta)
fast_tx = kmemdup(&build, sizeof(build), GFP_ATOMIC);
/* if the kmemdup fails, continue w/o fast_tx */
- if (!fast_tx)
- goto out;
out:
/* we might have raced against another call to this function */
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 682a164f795a..1e26b5235add 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -2854,46 +2854,6 @@ size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset)
return pos;
}
-static void _ieee80211_enable_rssi_reports(struct ieee80211_sub_if_data *sdata,
- int rssi_min_thold,
- int rssi_max_thold)
-{
- trace_api_enable_rssi_reports(sdata, rssi_min_thold, rssi_max_thold);
-
- if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
- return;
-
- /*
- * Scale up threshold values before storing it, as the RSSI averaging
- * algorithm uses a scaled up value as well. Change this scaling
- * factor if the RSSI averaging algorithm changes.
- */
- sdata->u.mgd.rssi_min_thold = rssi_min_thold*16;
- sdata->u.mgd.rssi_max_thold = rssi_max_thold*16;
-}
-
-void ieee80211_enable_rssi_reports(struct ieee80211_vif *vif,
- int rssi_min_thold,
- int rssi_max_thold)
-{
- struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
-
- WARN_ON(rssi_min_thold == rssi_max_thold ||
- rssi_min_thold > rssi_max_thold);
-
- _ieee80211_enable_rssi_reports(sdata, rssi_min_thold,
- rssi_max_thold);
-}
-EXPORT_SYMBOL(ieee80211_enable_rssi_reports);
-
-void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif)
-{
- struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
-
- _ieee80211_enable_rssi_reports(sdata, 0, 0);
-}
-EXPORT_SYMBOL(ieee80211_disable_rssi_reports);
-
u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
u16 cap)
{
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 7ed0d268aff2..5fd8a3e8b5b4 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -311,19 +311,21 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
return RX_CONTINUE;
}
-
-static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *b_0, u8 *aad)
+/*
+ * Calculate AAD for CCMP/GCMP, returning qos_tid since we
+ * need that in CCMP also for b_0.
+ */
+static u8 ccmp_gcmp_aad(struct sk_buff *skb, u8 *aad)
{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
__le16 mask_fc;
int a4_included, mgmt;
u8 qos_tid;
- u16 len_a;
- unsigned int hdrlen;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ u16 len_a = 22;
/*
* Mask FC: zero subtype b4 b5 b6 (if not mgmt)
- * Retry, PwrMgt, MoreData; set Protected
+ * Retry, PwrMgt, MoreData, Order (if Qos Data); set Protected
*/
mgmt = ieee80211_is_mgmt(hdr->frame_control);
mask_fc = hdr->frame_control;
@@ -333,30 +335,17 @@ static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *b_0, u8 *aad)
mask_fc &= ~cpu_to_le16(0x0070);
mask_fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
- hdrlen = ieee80211_hdrlen(hdr->frame_control);
- len_a = hdrlen - 2;
a4_included = ieee80211_has_a4(hdr->frame_control);
+ if (a4_included)
+ len_a += 6;
- if (ieee80211_is_data_qos(hdr->frame_control))
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
qos_tid = ieee80211_get_tid(hdr);
- else
+ mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_ORDER);
+ len_a += 2;
+ } else {
qos_tid = 0;
-
- /* In CCM, the initial vectors (IV) used for CTR mode encryption and CBC
- * mode authentication are not allowed to collide, yet both are derived
- * from this vector b_0. We only set L := 1 here to indicate that the
- * data size can be represented in (L+1) bytes. The CCM layer will take
- * care of storing the data length in the top (L+1) bytes and setting
- * and clearing the other bits as is required to derive the two IVs.
- */
- b_0[0] = 0x1;
-
- /* Nonce: Nonce Flags | A2 | PN
- * Nonce Flags: Priority (b0..b3) | Management (b4) | Reserved (b5..b7)
- */
- b_0[1] = qos_tid | (mgmt << 4);
- memcpy(&b_0[2], hdr->addr2, ETH_ALEN);
- memcpy(&b_0[8], pn, IEEE80211_CCMP_PN_LEN);
+ }
/* AAD (extra authenticate-only data) / masked 802.11 header
* FC | A1 | A2 | A3 | SC | [A4] | [QC] */
@@ -376,8 +365,31 @@ static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *b_0, u8 *aad)
memset(&aad[24], 0, ETH_ALEN + IEEE80211_QOS_CTL_LEN);
aad[24] = qos_tid;
}
+
+ return qos_tid;
}
+static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *b_0, u8 *aad)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ u8 qos_tid = ccmp_gcmp_aad(skb, aad);
+
+ /* In CCM, the initial vectors (IV) used for CTR mode encryption and CBC
+ * mode authentication are not allowed to collide, yet both are derived
+ * from this vector b_0. We only set L := 1 here to indicate that the
+ * data size can be represented in (L+1) bytes. The CCM layer will take
+ * care of storing the data length in the top (L+1) bytes and setting
+ * and clearing the other bits as is required to derive the two IVs.
+ */
+ b_0[0] = 0x1;
+
+ /* Nonce: Nonce Flags | A2 | PN
+ * Nonce Flags: Priority (b0..b3) | Management (b4) | Reserved (b5..b7)
+ */
+ b_0[1] = qos_tid | (ieee80211_is_mgmt(hdr->frame_control) << 4);
+ memcpy(&b_0[2], hdr->addr2, ETH_ALEN);
+ memcpy(&b_0[8], pn, IEEE80211_CCMP_PN_LEN);
+}
static inline void ccmp_pn2hdr(u8 *hdr, u8 *pn, int key_id)
{
@@ -571,9 +583,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx,
static void gcmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *j_0, u8 *aad)
{
- __le16 mask_fc;
- u8 qos_tid;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
memcpy(j_0, hdr->addr2, ETH_ALEN);
memcpy(&j_0[ETH_ALEN], pn, IEEE80211_GCMP_PN_LEN);
@@ -581,40 +591,7 @@ static void gcmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *j_0, u8 *aad)
j_0[14] = 0;
j_0[AES_BLOCK_SIZE - 1] = 0x01;
- /* AAD (extra authenticate-only data) / masked 802.11 header
- * FC | A1 | A2 | A3 | SC | [A4] | [QC]
- */
- put_unaligned_be16(ieee80211_hdrlen(hdr->frame_control) - 2, &aad[0]);
- /* Mask FC: zero subtype b4 b5 b6 (if not mgmt)
- * Retry, PwrMgt, MoreData; set Protected
- */
- mask_fc = hdr->frame_control;
- mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_RETRY |
- IEEE80211_FCTL_PM | IEEE80211_FCTL_MOREDATA);
- if (!ieee80211_is_mgmt(hdr->frame_control))
- mask_fc &= ~cpu_to_le16(0x0070);
- mask_fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
-
- put_unaligned(mask_fc, (__le16 *)&aad[2]);
- memcpy(&aad[4], &hdr->addr1, 3 * ETH_ALEN);
-
- /* Mask Seq#, leave Frag# */
- aad[22] = *((u8 *)&hdr->seq_ctrl) & 0x0f;
- aad[23] = 0;
-
- if (ieee80211_is_data_qos(hdr->frame_control))
- qos_tid = ieee80211_get_tid(hdr);
- else
- qos_tid = 0;
-
- if (ieee80211_has_a4(hdr->frame_control)) {
- memcpy(&aad[24], hdr->addr4, ETH_ALEN);
- aad[30] = qos_tid;
- aad[31] = 0;
- } else {
- memset(&aad[24], 0, ETH_ALEN + IEEE80211_QOS_CTL_LEN);
- aad[24] = qos_tid;
- }
+ ccmp_gcmp_aad(skb, aad);
}
static inline void gcmp_pn2hdr(u8 *hdr, const u8 *pn, int key_id)
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index ac3b7b8a02f6..be3b918a6d15 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -107,7 +107,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
ptr += 2;
}
if (opsize == TCPOLEN_MPTCP_MPC_ACK_DATA_CSUM) {
- mp_opt->csum = (__force __sum16)get_unaligned_be16(ptr);
+ mp_opt->csum = get_unaligned((__force __sum16 *)ptr);
mp_opt->suboptions |= OPTION_MPTCP_CSUMREQD;
ptr += 2;
}
@@ -221,7 +221,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
if (opsize == expected_opsize + TCPOLEN_MPTCP_DSS_CHECKSUM) {
mp_opt->suboptions |= OPTION_MPTCP_CSUMREQD;
- mp_opt->csum = (__force __sum16)get_unaligned_be16(ptr);
+ mp_opt->csum = get_unaligned((__force __sum16 *)ptr);
ptr += 2;
}
@@ -1282,7 +1282,7 @@ raise_win:
}
}
-u16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum)
+__sum16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum)
{
struct csum_pseudo_header header;
__wsum csum;
@@ -1298,15 +1298,25 @@ u16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum)
header.csum = 0;
csum = csum_partial(&header, sizeof(header), sum);
- return (__force u16)csum_fold(csum);
+ return csum_fold(csum);
}
-static u16 mptcp_make_csum(const struct mptcp_ext *mpext)
+static __sum16 mptcp_make_csum(const struct mptcp_ext *mpext)
{
return __mptcp_make_csum(mpext->data_seq, mpext->subflow_seq, mpext->data_len,
~csum_unfold(mpext->csum));
}
+static void put_len_csum(u16 len, __sum16 csum, void *data)
+{
+ __sum16 *sumptr = data + 2;
+ __be16 *ptr = data;
+
+ put_unaligned_be16(len, ptr);
+
+ put_unaligned(csum, sumptr);
+}
+
void mptcp_write_options(struct tcphdr *th, __be32 *ptr, struct tcp_sock *tp,
struct mptcp_out_options *opts)
{
@@ -1385,9 +1395,9 @@ void mptcp_write_options(struct tcphdr *th, __be32 *ptr, struct tcp_sock *tp,
/* data_len == 0 is reserved for the infinite mapping,
* the checksum will also be set to 0.
*/
- put_unaligned_be32(mpext->data_len << 16 |
- (mpext->data_len ? mptcp_make_csum(mpext) : 0),
- ptr);
+ put_len_csum(mpext->data_len,
+ (mpext->data_len ? mptcp_make_csum(mpext) : 0),
+ ptr);
} else {
put_unaligned_be32(mpext->data_len << 16 |
TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);
@@ -1438,11 +1448,12 @@ void mptcp_write_options(struct tcphdr *th, __be32 *ptr, struct tcp_sock *tp,
goto mp_capable_done;
if (opts->csum_reqd) {
- put_unaligned_be32(opts->data_len << 16 |
- __mptcp_make_csum(opts->data_seq,
- opts->subflow_seq,
- opts->data_len,
- ~csum_unfold(opts->csum)), ptr);
+ put_len_csum(opts->data_len,
+ __mptcp_make_csum(opts->data_seq,
+ opts->subflow_seq,
+ opts->data_len,
+ ~csum_unfold(opts->csum)),
+ ptr);
} else {
put_unaligned_be32(opts->data_len << 16 |
TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);
diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
index cdc2d79071f8..59a85220edc9 100644
--- a/net/mptcp/pm.c
+++ b/net/mptcp/pm.c
@@ -181,15 +181,14 @@ void mptcp_pm_subflow_check_next(struct mptcp_sock *msk, const struct sock *ssk,
struct mptcp_pm_data *pm = &msk->pm;
bool update_subflows;
- update_subflows = (ssk->sk_state == TCP_CLOSE) &&
- (subflow->request_join || subflow->mp_join) &&
+ update_subflows = (subflow->request_join || subflow->mp_join) &&
mptcp_pm_is_kernel(msk);
if (!READ_ONCE(pm->work_pending) && !update_subflows)
return;
spin_lock_bh(&pm->lock);
if (update_subflows)
- pm->subflows--;
+ __mptcp_pm_close_subflow(msk);
/* Even if this subflow is not really established, tell the PM to try
* to pick the next ones, if possible.
@@ -304,7 +303,7 @@ void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq)
pr_debug("fail_seq=%llu", fail_seq);
- if (mptcp_has_another_subflow(sk) || !READ_ONCE(msk->allow_infinite_fallback))
+ if (!READ_ONCE(msk->allow_infinite_fallback))
return;
if (!READ_ONCE(subflow->mp_fail_response_expect)) {
@@ -313,13 +312,10 @@ void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq)
subflow->send_mp_fail = 1;
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFAILTX);
subflow->send_infinite_map = 1;
- } else if (s && inet_sk_state_load(s) != TCP_CLOSE) {
+ } else if (!sock_flag(sk, SOCK_DEAD)) {
pr_debug("MP_FAIL response received");
- mptcp_data_lock(s);
- if (inet_sk_state_load(s) != TCP_CLOSE)
- sk_stop_timer(s, &s->sk_timer);
- mptcp_data_unlock(s);
+ sk_stop_timer(s, &s->sk_timer);
}
}
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 9e46cc89a8f7..17e13396024a 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -1613,10 +1613,8 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags)
out:
/* ensure the rtx timer is running */
- mptcp_data_lock(sk);
if (!mptcp_timer_pending(sk))
mptcp_reset_timer(sk);
- mptcp_data_unlock(sk);
if (copied)
__mptcp_check_send_data_fin(sk);
}
@@ -2192,23 +2190,10 @@ mp_fail_response_expect_subflow(struct mptcp_sock *msk)
return ret;
}
-static void mptcp_check_mp_fail_response(struct mptcp_sock *msk)
-{
- struct mptcp_subflow_context *subflow;
- struct sock *sk = (struct sock *)msk;
-
- bh_lock_sock(sk);
- subflow = mp_fail_response_expect_subflow(msk);
- if (subflow)
- __set_bit(MPTCP_FAIL_NO_RESPONSE, &msk->flags);
- bh_unlock_sock(sk);
-}
-
static void mptcp_timeout_timer(struct timer_list *t)
{
struct sock *sk = from_timer(sk, t, sk_timer);
- mptcp_check_mp_fail_response(mptcp_sk(sk));
mptcp_schedule_work(sk);
sock_put(sk);
}
@@ -2529,10 +2514,8 @@ static void __mptcp_retrans(struct sock *sk)
reset_timer:
mptcp_check_and_set_pending(sk);
- mptcp_data_lock(sk);
if (!mptcp_timer_pending(sk))
mptcp_reset_timer(sk);
- mptcp_data_unlock(sk);
}
static void mptcp_mp_fail_no_response(struct mptcp_sock *msk)
@@ -2592,8 +2575,7 @@ static void mptcp_worker(struct work_struct *work)
if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
__mptcp_retrans(sk);
- if (test_and_clear_bit(MPTCP_FAIL_NO_RESPONSE, &msk->flags))
- mptcp_mp_fail_no_response(msk);
+ mptcp_mp_fail_no_response(msk);
unlock:
release_sock(sk);
@@ -2711,10 +2693,8 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
} else {
pr_debug("Sending DATA_FIN on subflow %p", ssk);
tcp_send_ack(ssk);
- mptcp_data_lock(sk);
if (!mptcp_timer_pending(sk))
mptcp_reset_timer(sk);
- mptcp_data_unlock(sk);
}
break;
}
@@ -2815,10 +2795,8 @@ static void __mptcp_destroy_sock(struct sock *sk)
/* join list will be eventually flushed (with rst) at sock lock release time*/
list_splice_init(&msk->conn_list, &conn_list);
- mptcp_data_lock(sk);
mptcp_stop_timer(sk);
sk_stop_timer(sk, &sk->sk_timer);
- mptcp_data_unlock(sk);
msk->pm.status = 0;
/* clears msk->subflow, allowing the following loop to close
@@ -2880,9 +2858,7 @@ cleanup:
__mptcp_destroy_sock(sk);
do_cancel_work = true;
} else {
- mptcp_data_lock(sk);
sk_reset_timer(sk, &sk->sk_timer, jiffies + TCP_TIMEWAIT_LEN);
- mptcp_data_unlock(sk);
}
release_sock(sk);
if (do_cancel_work)
@@ -2927,10 +2903,8 @@ static int mptcp_disconnect(struct sock *sk, int flags)
__mptcp_close_ssk(sk, ssk, subflow, MPTCP_CF_FASTCLOSE);
}
- mptcp_data_lock(sk);
mptcp_stop_timer(sk);
sk_stop_timer(sk, &sk->sk_timer);
- mptcp_data_unlock(sk);
if (mptcp_sk(sk)->token)
mptcp_event(MPTCP_EVENT_CLOSED, mptcp_sk(sk), NULL, GFP_KERNEL);
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 4672901d0dfe..200f89f6d62f 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -117,7 +117,6 @@
#define MPTCP_WORK_EOF 3
#define MPTCP_FALLBACK_DONE 4
#define MPTCP_WORK_CLOSE_SUBFLOW 5
-#define MPTCP_FAIL_NO_RESPONSE 6
/* MPTCP socket release cb flags */
#define MPTCP_PUSH_PENDING 1
@@ -466,7 +465,8 @@ struct mptcp_subflow_context {
can_ack : 1, /* only after processing the remote a key */
disposable : 1, /* ctx can be free at ulp release time */
stale : 1, /* unable to snd/rcv data, do not use for xmit */
- local_id_valid : 1; /* local_id is correctly initialized */
+ local_id_valid : 1, /* local_id is correctly initialized */
+ valid_csum_seen : 1; /* at least one csum validated */
enum mptcp_data_avail data_avail;
bool mp_fail_response_expect;
u32 remote_nonce;
@@ -649,19 +649,6 @@ static inline void mptcp_subflow_tcp_fallback(struct sock *sk,
inet_csk(sk)->icsk_af_ops = ctx->icsk_af_ops;
}
-static inline bool mptcp_has_another_subflow(struct sock *ssk)
-{
- struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk), *tmp;
- struct mptcp_sock *msk = mptcp_sk(subflow->conn);
-
- mptcp_for_each_subflow(msk, tmp) {
- if (tmp != subflow)
- return true;
- }
-
- return false;
-}
-
void __init mptcp_proto_init(void);
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
int __init mptcp_proto_v6_init(void);
@@ -751,7 +738,7 @@ void mptcp_token_destroy(struct mptcp_sock *msk);
void mptcp_crypto_key_sha(u64 key, u32 *token, u64 *idsn);
void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac);
-u16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum);
+__sum16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum);
void __init mptcp_pm_init(void);
void mptcp_pm_data_init(struct mptcp_sock *msk);
@@ -893,6 +880,20 @@ unsigned int mptcp_pm_get_add_addr_accept_max(const struct mptcp_sock *msk);
unsigned int mptcp_pm_get_subflows_max(const struct mptcp_sock *msk);
unsigned int mptcp_pm_get_local_addr_max(const struct mptcp_sock *msk);
+/* called under PM lock */
+static inline void __mptcp_pm_close_subflow(struct mptcp_sock *msk)
+{
+ if (--msk->pm.subflows < mptcp_pm_get_subflows_max(msk))
+ WRITE_ONCE(msk->pm.accept_subflow, true);
+}
+
+static inline void mptcp_pm_close_subflow(struct mptcp_sock *msk)
+{
+ spin_lock_bh(&msk->pm.lock);
+ __mptcp_pm_close_subflow(msk);
+ spin_unlock_bh(&msk->pm.lock);
+}
+
void mptcp_sockopt_sync(struct mptcp_sock *msk, struct sock *ssk);
void mptcp_sockopt_sync_locked(struct mptcp_sock *msk, struct sock *ssk);
diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
index 826b0c1dae98..423d3826ca1e 100644
--- a/net/mptcp/sockopt.c
+++ b/net/mptcp/sockopt.c
@@ -756,6 +756,18 @@ static int mptcp_setsockopt_v4(struct mptcp_sock *msk, int optname,
return -EOPNOTSUPP;
}
+static int mptcp_setsockopt_sol_tcp_defer(struct mptcp_sock *msk, sockptr_t optval,
+ unsigned int optlen)
+{
+ struct socket *listener;
+
+ listener = __mptcp_nmpc_socket(msk);
+ if (!listener)
+ return 0; /* TCP_DEFER_ACCEPT does not fail */
+
+ return tcp_setsockopt(listener->sk, SOL_TCP, TCP_DEFER_ACCEPT, optval, optlen);
+}
+
static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
sockptr_t optval, unsigned int optlen)
{
@@ -782,6 +794,8 @@ static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
return mptcp_setsockopt_sol_tcp_cork(msk, optval, optlen);
case TCP_NODELAY:
return mptcp_setsockopt_sol_tcp_nodelay(msk, optval, optlen);
+ case TCP_DEFER_ACCEPT:
+ return mptcp_setsockopt_sol_tcp_defer(msk, optval, optlen);
}
return -EOPNOTSUPP;
@@ -1142,6 +1156,7 @@ static int mptcp_getsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
case TCP_CONGESTION:
case TCP_INFO:
case TCP_CC_INFO:
+ case TCP_DEFER_ACCEPT:
return mptcp_getsockopt_first_sf_only(msk, SOL_TCP, optname,
optval, optlen);
case TCP_INQ:
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 6d59336a8e1e..8841e8cd9ad8 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -891,7 +891,7 @@ static enum mapping_status validate_data_csum(struct sock *ssk, struct sk_buff *
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
u32 offset, seq, delta;
- u16 csum;
+ __sum16 csum;
int len;
if (!csum_reqd)
@@ -958,11 +958,14 @@ static enum mapping_status validate_data_csum(struct sock *ssk, struct sk_buff *
subflow->map_data_csum);
if (unlikely(csum)) {
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DATACSUMERR);
- subflow->send_mp_fail = 1;
- MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPFAILTX);
+ if (subflow->mp_join || subflow->valid_csum_seen) {
+ subflow->send_mp_fail = 1;
+ MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPFAILTX);
+ }
return subflow->mp_join ? MAPPING_INVALID : MAPPING_DUMMY;
}
+ subflow->valid_csum_seen = 1;
return MAPPING_OK;
}
@@ -1013,12 +1016,9 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
pr_debug("infinite mapping received");
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
subflow->map_data_len = 0;
- if (sk && inet_sk_state_load(sk) != TCP_CLOSE) {
- mptcp_data_lock(sk);
- if (inet_sk_state_load(sk) != TCP_CLOSE)
- sk_stop_timer(sk, &sk->sk_timer);
- mptcp_data_unlock(sk);
- }
+ if (!sock_flag(ssk, SOCK_DEAD))
+ sk_stop_timer(sk, &sk->sk_timer);
+
return MAPPING_INVALID;
}
@@ -1153,6 +1153,18 @@ static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ss
}
}
+static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)
+{
+ struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+
+ if (subflow->mp_join)
+ return false;
+ else if (READ_ONCE(msk->csum_enabled))
+ return !subflow->valid_csum_seen;
+ else
+ return !subflow->fully_established;
+}
+
static bool subflow_check_data_avail(struct sock *ssk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
@@ -1218,8 +1230,7 @@ fallback:
if (!__mptcp_check_fallback(msk)) {
/* RFC 8684 section 3.7. */
if (subflow->send_mp_fail) {
- if (mptcp_has_another_subflow(ssk) ||
- !READ_ONCE(msk->allow_infinite_fallback)) {
+ if (!READ_ONCE(msk->allow_infinite_fallback)) {
ssk->sk_err = EBADMSG;
tcp_set_state(ssk, TCP_CLOSE);
subflow->reset_transient = 0;
@@ -1227,9 +1238,8 @@ fallback:
tcp_send_active_reset(ssk, GFP_ATOMIC);
while ((skb = skb_peek(&ssk->sk_receive_queue)))
sk_eat_skb(ssk, skb);
- } else {
+ } else if (!sock_flag(ssk, SOCK_DEAD)) {
WRITE_ONCE(subflow->mp_fail_response_expect, true);
- /* The data lock is acquired in __mptcp_move_skbs() */
sk_reset_timer((struct sock *)msk,
&((struct sock *)msk)->sk_timer,
jiffies + TCP_RTO_MAX);
@@ -1238,7 +1248,7 @@ fallback:
return true;
}
- if ((subflow->mp_join || subflow->fully_established) && subflow->map_data_len) {
+ if (!subflow_can_fallback(subflow) && subflow->map_data_len) {
/* fatal protocol error, close the socket.
* subflow_error_report() will introduce the appropriate barriers
*/
@@ -1444,20 +1454,20 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
struct sockaddr_storage addr;
int remote_id = remote->id;
int local_id = loc->id;
+ int err = -ENOTCONN;
struct socket *sf;
struct sock *ssk;
u32 remote_token;
int addrlen;
int ifindex;
u8 flags;
- int err;
if (!mptcp_is_fully_established(sk))
- return -ENOTCONN;
+ goto err_out;
err = mptcp_subflow_create_socket(sk, &sf);
if (err)
- return err;
+ goto err_out;
ssk = sf->sk;
subflow = mptcp_subflow_ctx(ssk);
@@ -1515,6 +1525,12 @@ failed_unlink:
failed:
subflow->disposable = 1;
sock_release(sf);
+
+err_out:
+ /* we account subflows before the creation, and this failures will not
+ * be caught by sk_state_change()
+ */
+ mptcp_pm_close_subflow(msk);
return err;
}
diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
index 82f36beb2e76..5d8ed6c90b7e 100644
--- a/net/netfilter/nf_conncount.c
+++ b/net/netfilter/nf_conncount.c
@@ -132,6 +132,9 @@ static int __nf_conncount_add(struct net *net,
struct nf_conn *found_ct;
unsigned int collect = 0;
+ if (time_is_after_eq_jiffies((unsigned long)list->last_gc))
+ goto add_new_node;
+
/* check the saved connections */
list_for_each_entry_safe(conn, conn_n, &list->head, node) {
if (collect > CONNCOUNT_GC_MAX_NODES)
@@ -177,6 +180,7 @@ static int __nf_conncount_add(struct net *net,
nf_ct_put(found_ct);
}
+add_new_node:
if (WARN_ON_ONCE(list->count > INT_MAX))
return -EOVERFLOW;
@@ -190,6 +194,7 @@ static int __nf_conncount_add(struct net *net,
conn->jiffies32 = (u32)jiffies;
list_add_tail(&conn->node, &list->head);
list->count++;
+ list->last_gc = (u32)jiffies;
return 0;
}
@@ -214,6 +219,7 @@ void nf_conncount_list_init(struct nf_conncount_list *list)
spin_lock_init(&list->list_lock);
INIT_LIST_HEAD(&list->head);
list->count = 0;
+ list->last_gc = (u32)jiffies;
}
EXPORT_SYMBOL_GPL(nf_conncount_list_init);
@@ -227,6 +233,10 @@ bool nf_conncount_gc_list(struct net *net,
unsigned int collected = 0;
bool ret = false;
+ /* don't bother if we just did GC */
+ if (time_is_after_eq_jiffies((unsigned long)READ_ONCE(list->last_gc)))
+ return false;
+
/* don't bother if other cpu is already doing GC */
if (!spin_trylock(&list->list_lock))
return false;
@@ -258,6 +268,7 @@ bool nf_conncount_gc_list(struct net *net,
if (!list->count)
ret = true;
+ list->last_gc = (u32)jiffies;
spin_unlock(&list->list_lock);
return ret;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 0164e5f522e8..082a2fd8d85b 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -525,50 +525,6 @@ clean_from_lists(struct nf_conn *ct)
nf_ct_remove_expectations(ct);
}
-/* must be called with local_bh_disable */
-static void nf_ct_add_to_dying_list(struct nf_conn *ct)
-{
- struct ct_pcpu *pcpu;
-
- /* add this conntrack to the (per cpu) dying list */
- ct->cpu = smp_processor_id();
- pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
-
- spin_lock(&pcpu->lock);
- hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
- &pcpu->dying);
- spin_unlock(&pcpu->lock);
-}
-
-/* must be called with local_bh_disable */
-static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct)
-{
- struct ct_pcpu *pcpu;
-
- /* add this conntrack to the (per cpu) unconfirmed list */
- ct->cpu = smp_processor_id();
- pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
-
- spin_lock(&pcpu->lock);
- hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
- &pcpu->unconfirmed);
- spin_unlock(&pcpu->lock);
-}
-
-/* must be called with local_bh_disable */
-static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
-{
- struct ct_pcpu *pcpu;
-
- /* We overload first tuple to link into unconfirmed or dying list.*/
- pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
-
- spin_lock(&pcpu->lock);
- BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
- hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
- spin_unlock(&pcpu->lock);
-}
-
#define NFCT_ALIGN(len) (((len) + NFCT_INFOMASK) & ~NFCT_INFOMASK)
/* Released via nf_ct_destroy() */
@@ -640,7 +596,6 @@ void nf_ct_destroy(struct nf_conntrack *nfct)
if (unlikely(nf_ct_protonum(ct) == IPPROTO_GRE))
destroy_gre_conntrack(ct);
- local_bh_disable();
/* Expectations will have been removed in clean_from_lists,
* except TFTP can create an expectation on the first packet,
* before connection is in the list, so we need to clean here,
@@ -648,10 +603,6 @@ void nf_ct_destroy(struct nf_conntrack *nfct)
*/
nf_ct_remove_expectations(ct);
- nf_ct_del_from_dying_or_unconfirmed_list(ct);
-
- local_bh_enable();
-
if (ct->master)
nf_ct_put(ct->master);
@@ -660,15 +611,12 @@ void nf_ct_destroy(struct nf_conntrack *nfct)
}
EXPORT_SYMBOL(nf_ct_destroy);
-static void nf_ct_delete_from_lists(struct nf_conn *ct)
+static void __nf_ct_delete_from_lists(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
unsigned int hash, reply_hash;
unsigned int sequence;
- nf_ct_helper_destroy(ct);
-
- local_bh_disable();
do {
sequence = read_seqcount_begin(&nf_conntrack_generation);
hash = hash_conntrack(net,
@@ -681,12 +629,30 @@ static void nf_ct_delete_from_lists(struct nf_conn *ct)
clean_from_lists(ct);
nf_conntrack_double_unlock(hash, reply_hash);
+}
+
+static void nf_ct_delete_from_lists(struct nf_conn *ct)
+{
+ nf_ct_helper_destroy(ct);
+ local_bh_disable();
- nf_ct_add_to_dying_list(ct);
+ __nf_ct_delete_from_lists(ct);
local_bh_enable();
}
+static void nf_ct_add_to_ecache_list(struct nf_conn *ct)
+{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ struct nf_conntrack_net *cnet = nf_ct_pernet(nf_ct_net(ct));
+
+ spin_lock(&cnet->ecache.dying_lock);
+ hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
+ &cnet->ecache.dying_list);
+ spin_unlock(&cnet->ecache.dying_lock);
+#endif
+}
+
bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
{
struct nf_conn_tstamp *tstamp;
@@ -709,7 +675,12 @@ bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
/* destroy event was not delivered. nf_ct_put will
* be done by event cache worker on redelivery.
*/
- nf_ct_delete_from_lists(ct);
+ nf_ct_helper_destroy(ct);
+ local_bh_disable();
+ __nf_ct_delete_from_lists(ct);
+ nf_ct_add_to_ecache_list(ct);
+ local_bh_enable();
+
nf_conntrack_ecache_work(nf_ct_net(ct), NFCT_ECACHE_DESTROY_FAIL);
return false;
}
@@ -870,6 +841,33 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct,
&nf_conntrack_hash[reply_hash]);
}
+static bool nf_ct_ext_valid_pre(const struct nf_ct_ext *ext)
+{
+ /* if ext->gen_id is not equal to nf_conntrack_ext_genid, some extensions
+ * may contain stale pointers to e.g. helper that has been removed.
+ *
+ * The helper can't clear this because the nf_conn object isn't in
+ * any hash and synchronize_rcu() isn't enough because associated skb
+ * might sit in a queue.
+ */
+ return !ext || ext->gen_id == atomic_read(&nf_conntrack_ext_genid);
+}
+
+static bool nf_ct_ext_valid_post(struct nf_ct_ext *ext)
+{
+ if (!ext)
+ return true;
+
+ if (ext->gen_id != atomic_read(&nf_conntrack_ext_genid))
+ return false;
+
+ /* inserted into conntrack table, nf_ct_iterate_cleanup()
+ * will find it. Disable nf_ct_ext_find() id check.
+ */
+ WRITE_ONCE(ext->gen_id, 0);
+ return true;
+}
+
int
nf_conntrack_hash_check_insert(struct nf_conn *ct)
{
@@ -885,6 +883,11 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
zone = nf_ct_zone(ct);
+ if (!nf_ct_ext_valid_pre(ct->ext)) {
+ NF_CT_STAT_INC(net, insert_failed);
+ return -ETIMEDOUT;
+ }
+
local_bh_disable();
do {
sequence = read_seqcount_begin(&nf_conntrack_generation);
@@ -925,6 +928,13 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
nf_conntrack_double_unlock(hash, reply_hash);
NF_CT_STAT_INC(net, insert);
local_bh_enable();
+
+ if (!nf_ct_ext_valid_post(ct->ext)) {
+ nf_ct_kill(ct);
+ NF_CT_STAT_INC(net, drop);
+ return -ETIMEDOUT;
+ }
+
return 0;
chaintoolong:
NF_CT_STAT_INC(net, chaintoolong);
@@ -972,7 +982,6 @@ static void __nf_conntrack_insert_prepare(struct nf_conn *ct)
struct nf_conn_tstamp *tstamp;
refcount_inc(&ct->ct_general.use);
- ct->status |= IPS_CONFIRMED;
/* set conntrack timestamp, if enabled. */
tstamp = nf_conn_tstamp_find(ct);
@@ -1001,7 +1010,6 @@ static int __nf_ct_resolve_clash(struct sk_buff *skb,
nf_conntrack_get(&ct->ct_general);
nf_ct_acct_merge(ct, ctinfo, loser_ct);
- nf_ct_add_to_dying_list(loser_ct);
nf_ct_put(loser_ct);
nf_ct_set(skb, ct, ctinfo);
@@ -1134,7 +1142,6 @@ nf_ct_resolve_clash(struct sk_buff *skb, struct nf_conntrack_tuple_hash *h,
return ret;
drop:
- nf_ct_add_to_dying_list(loser_ct);
NF_CT_STAT_INC(net, drop);
NF_CT_STAT_INC(net, insert_failed);
return NF_DROP;
@@ -1195,16 +1202,20 @@ __nf_conntrack_confirm(struct sk_buff *skb)
return NF_DROP;
}
+ if (!nf_ct_ext_valid_pre(ct->ext)) {
+ NF_CT_STAT_INC(net, insert_failed);
+ goto dying;
+ }
+
pr_debug("Confirming conntrack %p\n", ct);
/* We have to check the DYING flag after unlink to prevent
* a race against nf_ct_get_next_corpse() possibly called from
* user context, else we insert an already 'dead' hash, blocking
* further use of that particular connection -JM.
*/
- nf_ct_del_from_dying_or_unconfirmed_list(ct);
+ ct->status |= IPS_CONFIRMED;
if (unlikely(nf_ct_is_dying(ct))) {
- nf_ct_add_to_dying_list(ct);
NF_CT_STAT_INC(net, insert_failed);
goto dying;
}
@@ -1228,7 +1239,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
goto out;
if (chainlen++ > max_chainlen) {
chaintoolong:
- nf_ct_add_to_dying_list(ct);
NF_CT_STAT_INC(net, chaintoolong);
NF_CT_STAT_INC(net, insert_failed);
ret = NF_DROP;
@@ -1252,6 +1262,16 @@ chaintoolong:
nf_conntrack_double_unlock(hash, reply_hash);
local_bh_enable();
+ /* ext area is still valid (rcu read lock is held,
+ * but will go out of scope soon, we need to remove
+ * this conntrack again.
+ */
+ if (!nf_ct_ext_valid_post(ct->ext)) {
+ nf_ct_kill(ct);
+ NF_CT_STAT_INC(net, drop);
+ return NF_DROP;
+ }
+
help = nfct_help(ct);
if (help && help->helper)
nf_conntrack_event_cache(IPCT_HELPER, ct);
@@ -1678,7 +1698,9 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
struct nf_conn *ct;
struct nf_conn_help *help;
struct nf_conntrack_tuple repl_tuple;
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
struct nf_conntrack_ecache *ecache;
+#endif
struct nf_conntrack_expect *exp = NULL;
const struct nf_conntrack_zone *zone;
struct nf_conn_timeout *timeout_ext;
@@ -1711,15 +1733,21 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
nf_ct_labels_ext_add(ct);
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
- nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
- ecache ? ecache->expmask : 0,
- GFP_ATOMIC);
- local_bh_disable();
+ if ((ecache || net->ct.sysctl_events) &&
+ !nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
+ ecache ? ecache->expmask : 0,
+ GFP_ATOMIC)) {
+ nf_conntrack_free(ct);
+ return ERR_PTR(-ENOMEM);
+ }
+#endif
+
cnet = nf_ct_pernet(net);
if (cnet->expect_count) {
- spin_lock(&nf_conntrack_expect_lock);
+ spin_lock_bh(&nf_conntrack_expect_lock);
exp = nf_ct_find_expectation(net, zone, tuple);
if (exp) {
pr_debug("expectation arrives ct=%p exp=%p\n",
@@ -1742,16 +1770,13 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
#endif
NF_CT_STAT_INC(net, expect_new);
}
- spin_unlock(&nf_conntrack_expect_lock);
+ spin_unlock_bh(&nf_conntrack_expect_lock);
}
if (!exp)
__nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
- /* Now it is inserted into the unconfirmed list, set refcount to 1. */
+ /* Now it is going to be associated with an sk_buff, set refcount to 1. */
refcount_set(&ct->ct_general.use, 1);
- nf_ct_add_to_unconfirmed_list(ct);
-
- local_bh_enable();
if (exp) {
if (exp->expectfn)
@@ -2319,7 +2344,7 @@ static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
/* Bring out ya dead! */
static struct nf_conn *
get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
- void *data, unsigned int *bucket)
+ const struct nf_ct_iter_data *iter_data, unsigned int *bucket)
{
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
@@ -2350,7 +2375,12 @@ get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
* tuple while iterating.
*/
ct = nf_ct_tuplehash_to_ctrack(h);
- if (iter(ct, data))
+
+ if (iter_data->net &&
+ !net_eq(iter_data->net, nf_ct_net(ct)))
+ continue;
+
+ if (iter(ct, iter_data->data))
goto found;
}
spin_unlock(lockp);
@@ -2367,7 +2397,7 @@ found:
}
static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data),
- void *data, u32 portid, int report)
+ const struct nf_ct_iter_data *iter_data)
{
unsigned int bucket = 0;
struct nf_conn *ct;
@@ -2375,91 +2405,28 @@ static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data),
might_sleep();
mutex_lock(&nf_conntrack_mutex);
- while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
+ while ((ct = get_next_corpse(iter, iter_data, &bucket)) != NULL) {
/* Time to push up daises... */
- nf_ct_delete(ct, portid, report);
+ nf_ct_delete(ct, iter_data->portid, iter_data->report);
nf_ct_put(ct);
cond_resched();
}
mutex_unlock(&nf_conntrack_mutex);
}
-struct iter_data {
- int (*iter)(struct nf_conn *i, void *data);
- void *data;
- struct net *net;
-};
-
-static int iter_net_only(struct nf_conn *i, void *data)
-{
- struct iter_data *d = data;
-
- if (!net_eq(d->net, nf_ct_net(i)))
- return 0;
-
- return d->iter(i, d->data);
-}
-
-static void
-__nf_ct_unconfirmed_destroy(struct net *net)
-{
- int cpu;
-
- for_each_possible_cpu(cpu) {
- struct nf_conntrack_tuple_hash *h;
- struct hlist_nulls_node *n;
- struct ct_pcpu *pcpu;
-
- pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
-
- spin_lock_bh(&pcpu->lock);
- hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) {
- struct nf_conn *ct;
-
- ct = nf_ct_tuplehash_to_ctrack(h);
-
- /* we cannot call iter() on unconfirmed list, the
- * owning cpu can reallocate ct->ext at any time.
- */
- set_bit(IPS_DYING_BIT, &ct->status);
- }
- spin_unlock_bh(&pcpu->lock);
- cond_resched();
- }
-}
-
-void nf_ct_unconfirmed_destroy(struct net *net)
+void nf_ct_iterate_cleanup_net(int (*iter)(struct nf_conn *i, void *data),
+ const struct nf_ct_iter_data *iter_data)
{
+ struct net *net = iter_data->net;
struct nf_conntrack_net *cnet = nf_ct_pernet(net);
might_sleep();
- if (atomic_read(&cnet->count) > 0) {
- __nf_ct_unconfirmed_destroy(net);
- nf_queue_nf_hook_drop(net);
- synchronize_net();
- }
-}
-EXPORT_SYMBOL_GPL(nf_ct_unconfirmed_destroy);
-
-void nf_ct_iterate_cleanup_net(struct net *net,
- int (*iter)(struct nf_conn *i, void *data),
- void *data, u32 portid, int report)
-{
- struct nf_conntrack_net *cnet = nf_ct_pernet(net);
- struct iter_data d;
-
- might_sleep();
-
if (atomic_read(&cnet->count) == 0)
return;
- d.iter = iter;
- d.data = data;
- d.net = net;
-
- nf_ct_iterate_cleanup(iter_net_only, &d, portid, report);
+ nf_ct_iterate_cleanup(iter, iter_data);
}
EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup_net);
@@ -2477,6 +2444,7 @@ EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup_net);
void
nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data), void *data)
{
+ struct nf_ct_iter_data iter_data = {};
struct net *net;
down_read(&net_rwsem);
@@ -2485,31 +2453,41 @@ nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data), void *data)
if (atomic_read(&cnet->count) == 0)
continue;
- __nf_ct_unconfirmed_destroy(net);
nf_queue_nf_hook_drop(net);
}
up_read(&net_rwsem);
/* Need to wait for netns cleanup worker to finish, if its
* running -- it might have deleted a net namespace from
- * the global list, so our __nf_ct_unconfirmed_destroy() might
- * not have affected all namespaces.
+ * the global list, so hook drop above might not have
+ * affected all namespaces.
*/
net_ns_barrier();
- /* a conntrack could have been unlinked from unconfirmed list
- * before we grabbed pcpu lock in __nf_ct_unconfirmed_destroy().
+ /* a skb w. unconfirmed conntrack could have been reinjected just
+ * before we called nf_queue_nf_hook_drop().
+ *
* This makes sure its inserted into conntrack table.
*/
synchronize_net();
- nf_ct_iterate_cleanup(iter, data, 0, 0);
+ nf_ct_ext_bump_genid();
+ iter_data.data = data;
+ nf_ct_iterate_cleanup(iter, &iter_data);
+
+ /* Another cpu might be in a rcu read section with
+ * rcu protected pointer cleared in iter callback
+ * or hidden via nf_ct_ext_bump_genid() above.
+ *
+ * Wait until those are done.
+ */
+ synchronize_rcu();
}
EXPORT_SYMBOL_GPL(nf_ct_iterate_destroy);
static int kill_all(struct nf_conn *i, void *data)
{
- return net_eq(nf_ct_net(i), data);
+ return 1;
}
void nf_conntrack_cleanup_start(void)
@@ -2544,8 +2522,9 @@ void nf_conntrack_cleanup_net(struct net *net)
void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
{
- int busy;
+ struct nf_ct_iter_data iter_data = {};
struct net *net;
+ int busy;
/*
* This makes sure all current packets have passed through
@@ -2558,7 +2537,8 @@ i_see_dead_people:
list_for_each_entry(net, net_exit_list, exit_list) {
struct nf_conntrack_net *cnet = nf_ct_pernet(net);
- nf_ct_iterate_cleanup(kill_all, net, 0, 0);
+ iter_data.net = net;
+ nf_ct_iterate_cleanup_net(kill_all, &iter_data);
if (atomic_read(&cnet->count) != 0)
busy = 1;
}
@@ -2571,7 +2551,6 @@ i_see_dead_people:
nf_conntrack_ecache_pernet_fini(net);
nf_conntrack_expect_pernet_fini(net);
free_percpu(net->ct.stat);
- free_percpu(net->ct.pcpu_lists);
}
}
@@ -2777,33 +2756,19 @@ void nf_conntrack_init_end(void)
* We need to use special "null" values, not used in hash table
*/
#define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
-#define DYING_NULLS_VAL ((1<<30)+1)
int nf_conntrack_init_net(struct net *net)
{
struct nf_conntrack_net *cnet = nf_ct_pernet(net);
int ret = -ENOMEM;
- int cpu;
BUILD_BUG_ON(IP_CT_UNTRACKED == IP_CT_NUMBER);
BUILD_BUG_ON_NOT_POWER_OF_2(CONNTRACK_LOCKS);
atomic_set(&cnet->count, 0);
- net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu);
- if (!net->ct.pcpu_lists)
- goto err_stat;
-
- for_each_possible_cpu(cpu) {
- struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
-
- spin_lock_init(&pcpu->lock);
- INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
- INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL);
- }
-
net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
if (!net->ct.stat)
- goto err_pcpu_lists;
+ return ret;
ret = nf_conntrack_expect_pernet_init(net);
if (ret < 0)
@@ -2819,8 +2784,5 @@ int nf_conntrack_init_net(struct net *net)
err_expect:
free_percpu(net->ct.stat);
-err_pcpu_lists:
- free_percpu(net->ct.pcpu_lists);
-err_stat:
return ret;
}
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index 0cb2da0a759a..8698b3424646 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -16,7 +16,6 @@
#include <linux/vmalloc.h>
#include <linux/stddef.h>
#include <linux/err.h>
-#include <linux/percpu.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
@@ -29,8 +28,9 @@
static DEFINE_MUTEX(nf_ct_ecache_mutex);
-#define ECACHE_RETRY_WAIT (HZ/10)
-#define ECACHE_STACK_ALLOC (256 / sizeof(void *))
+#define DYING_NULLS_VAL ((1 << 30) + 1)
+#define ECACHE_MAX_JIFFIES msecs_to_jiffies(10)
+#define ECACHE_RETRY_JIFFIES msecs_to_jiffies(10)
enum retry_state {
STATE_CONGESTED,
@@ -38,58 +38,67 @@ enum retry_state {
STATE_DONE,
};
-static enum retry_state ecache_work_evict_list(struct ct_pcpu *pcpu)
+struct nf_conntrack_net_ecache *nf_conn_pernet_ecache(const struct net *net)
{
- struct nf_conn *refs[ECACHE_STACK_ALLOC];
+ struct nf_conntrack_net *cnet = nf_ct_pernet(net);
+
+ return &cnet->ecache;
+}
+#if IS_MODULE(CONFIG_NF_CT_NETLINK)
+EXPORT_SYMBOL_GPL(nf_conn_pernet_ecache);
+#endif
+
+static enum retry_state ecache_work_evict_list(struct nf_conntrack_net *cnet)
+{
+ unsigned long stop = jiffies + ECACHE_MAX_JIFFIES;
+ struct hlist_nulls_head evicted_list;
enum retry_state ret = STATE_DONE;
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
- unsigned int evicted = 0;
+ unsigned int sent;
- spin_lock(&pcpu->lock);
+ INIT_HLIST_NULLS_HEAD(&evicted_list, DYING_NULLS_VAL);
- hlist_nulls_for_each_entry(h, n, &pcpu->dying, hnnode) {
+next:
+ sent = 0;
+ spin_lock_bh(&cnet->ecache.dying_lock);
+
+ hlist_nulls_for_each_entry_safe(h, n, &cnet->ecache.dying_list, hnnode) {
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
- struct nf_conntrack_ecache *e;
-
- if (!nf_ct_is_confirmed(ct))
- continue;
-
- /* This ecache access is safe because the ct is on the
- * pcpu dying list and we hold the spinlock -- the entry
- * cannot be free'd until after the lock is released.
- *
- * This is true even if ct has a refcount of 0: the
- * cpu that is about to free the entry must remove it
- * from the dying list and needs the lock to do so.
- */
- e = nf_ct_ecache_find(ct);
- if (!e || e->state != NFCT_ECACHE_DESTROY_FAIL)
- continue;
- /* ct is in NFCT_ECACHE_DESTROY_FAIL state, this means
- * the worker owns this entry: the ct will remain valid
- * until the worker puts its ct reference.
+ /* The worker owns all entries, ct remains valid until nf_ct_put
+ * in the loop below.
*/
if (nf_conntrack_event(IPCT_DESTROY, ct)) {
ret = STATE_CONGESTED;
break;
}
- e->state = NFCT_ECACHE_DESTROY_SENT;
- refs[evicted] = ct;
+ hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
+ hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode, &evicted_list);
- if (++evicted >= ARRAY_SIZE(refs)) {
+ if (time_after(stop, jiffies)) {
ret = STATE_RESTART;
break;
}
+
+ if (sent++ > 16) {
+ spin_unlock_bh(&cnet->ecache.dying_lock);
+ cond_resched();
+ goto next;
+ }
}
- spin_unlock(&pcpu->lock);
+ spin_unlock_bh(&cnet->ecache.dying_lock);
- /* can't _put while holding lock */
- while (evicted)
- nf_ct_put(refs[--evicted]);
+ hlist_nulls_for_each_entry_safe(h, n, &evicted_list, hnnode) {
+ struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
+
+ hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
+ nf_ct_put(ct);
+
+ cond_resched();
+ }
return ret;
}
@@ -97,35 +106,20 @@ static enum retry_state ecache_work_evict_list(struct ct_pcpu *pcpu)
static void ecache_work(struct work_struct *work)
{
struct nf_conntrack_net *cnet = container_of(work, struct nf_conntrack_net, ecache.dwork.work);
- struct netns_ct *ctnet = cnet->ecache.ct_net;
- int cpu, delay = -1;
- struct ct_pcpu *pcpu;
-
- local_bh_disable();
-
- for_each_possible_cpu(cpu) {
- enum retry_state ret;
-
- pcpu = per_cpu_ptr(ctnet->pcpu_lists, cpu);
-
- ret = ecache_work_evict_list(pcpu);
-
- switch (ret) {
- case STATE_CONGESTED:
- delay = ECACHE_RETRY_WAIT;
- goto out;
- case STATE_RESTART:
- delay = 0;
- break;
- case STATE_DONE:
- break;
- }
+ int ret, delay = -1;
+
+ ret = ecache_work_evict_list(cnet);
+ switch (ret) {
+ case STATE_CONGESTED:
+ delay = ECACHE_RETRY_JIFFIES;
+ break;
+ case STATE_RESTART:
+ delay = 0;
+ break;
+ case STATE_DONE:
+ break;
}
- out:
- local_bh_enable();
-
- ctnet->ecache_dwork_pending = delay > 0;
if (delay >= 0)
schedule_delayed_work(&cnet->ecache.dwork, delay);
}
@@ -199,7 +193,6 @@ int nf_conntrack_eventmask_report(unsigned int events, struct nf_conn *ct,
*/
if (e->portid == 0 && portid != 0)
e->portid = portid;
- e->state = NFCT_ECACHE_DESTROY_FAIL;
}
return ret;
@@ -297,12 +290,51 @@ void nf_conntrack_ecache_work(struct net *net, enum nf_ct_ecache_state state)
schedule_delayed_work(&cnet->ecache.dwork, HZ);
net->ct.ecache_dwork_pending = true;
} else if (state == NFCT_ECACHE_DESTROY_SENT) {
- net->ct.ecache_dwork_pending = false;
- mod_delayed_work(system_wq, &cnet->ecache.dwork, 0);
+ if (!hlist_nulls_empty(&cnet->ecache.dying_list))
+ mod_delayed_work(system_wq, &cnet->ecache.dwork, 0);
+ else
+ net->ct.ecache_dwork_pending = false;
}
}
-#define NF_CT_EVENTS_DEFAULT 1
+bool nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp)
+{
+ struct net *net = nf_ct_net(ct);
+ struct nf_conntrack_ecache *e;
+
+ switch (net->ct.sysctl_events) {
+ case 0:
+ /* assignment via template / ruleset? ignore sysctl. */
+ if (ctmask || expmask)
+ break;
+ return true;
+ case 2: /* autodetect: no event listener, don't allocate extension. */
+ if (!READ_ONCE(net->ct.ctnetlink_has_listener))
+ return true;
+ fallthrough;
+ case 1:
+ /* always allocate an extension. */
+ if (!ctmask && !expmask) {
+ ctmask = ~0;
+ expmask = ~0;
+ }
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return true;
+ }
+
+ e = nf_ct_ext_add(ct, NF_CT_EXT_ECACHE, gfp);
+ if (e) {
+ e->ctmask = ctmask;
+ e->expmask = expmask;
+ }
+
+ return e != NULL;
+}
+EXPORT_SYMBOL_GPL(nf_ct_ecache_ext_add);
+
+#define NF_CT_EVENTS_DEFAULT 2
static int nf_ct_events __read_mostly = NF_CT_EVENTS_DEFAULT;
void nf_conntrack_ecache_pernet_init(struct net *net)
@@ -311,8 +343,9 @@ void nf_conntrack_ecache_pernet_init(struct net *net)
net->ct.sysctl_events = nf_ct_events;
- cnet->ecache.ct_net = &net->ct;
INIT_DELAYED_WORK(&cnet->ecache.dwork, ecache_work);
+ INIT_HLIST_NULLS_HEAD(&cnet->ecache.dying_list, DYING_NULLS_VAL);
+ spin_lock_init(&cnet->ecache.dying_lock);
BUILD_BUG_ON(__IPCT_MAX >= 16); /* e->ctmask is u16 */
}
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index 1296fda54ac6..0b513f7bf9f3 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -27,6 +27,8 @@
#define NF_CT_EXT_PREALLOC 128u /* conntrack events are on by default */
+atomic_t nf_conntrack_ext_genid __read_mostly = ATOMIC_INIT(1);
+
static const u8 nf_ct_ext_type_len[NF_CT_EXT_NUM] = {
[NF_CT_EXT_HELPER] = sizeof(struct nf_conn_help),
#if IS_ENABLED(CONFIG_NF_NAT)
@@ -116,8 +118,10 @@ void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
if (!new)
return NULL;
- if (!ct->ext)
+ if (!ct->ext) {
memset(new->offset, 0, sizeof(new->offset));
+ new->gen_id = atomic_read(&nf_conntrack_ext_genid);
+ }
new->offset[id] = newoff;
new->len = newlen;
@@ -127,3 +131,29 @@ void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
return (void *)new + newoff;
}
EXPORT_SYMBOL(nf_ct_ext_add);
+
+/* Use nf_ct_ext_find wrapper. This is only useful for unconfirmed entries. */
+void *__nf_ct_ext_find(const struct nf_ct_ext *ext, u8 id)
+{
+ unsigned int gen_id = atomic_read(&nf_conntrack_ext_genid);
+ unsigned int this_id = READ_ONCE(ext->gen_id);
+
+ if (!__nf_ct_ext_exist(ext, id))
+ return NULL;
+
+ if (this_id == 0 || ext->gen_id == gen_id)
+ return (void *)ext + ext->offset[id];
+
+ return NULL;
+}
+EXPORT_SYMBOL(__nf_ct_ext_find);
+
+void nf_ct_ext_bump_genid(void)
+{
+ unsigned int value = atomic_inc_return(&nf_conntrack_ext_genid);
+
+ if (value == UINT_MAX)
+ atomic_set(&nf_conntrack_ext_genid, 1);
+
+ msleep(HZ);
+}
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 8dec42ec603e..c12a87ebc3ee 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -468,11 +468,6 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
nf_ct_expect_iterate_destroy(expect_iter_me, NULL);
nf_ct_iterate_destroy(unhelp, me);
-
- /* Maybe someone has gotten the helper already when unhelp above.
- * So need to wait it.
- */
- synchronize_rcu();
}
EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 924d766e6c53..722af5e309ba 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1559,6 +1559,11 @@ static int ctnetlink_flush_conntrack(struct net *net,
u32 portid, int report, u8 family)
{
struct ctnetlink_filter *filter = NULL;
+ struct nf_ct_iter_data iter = {
+ .net = net,
+ .portid = portid,
+ .report = report,
+ };
if (ctnetlink_needs_filter(family, cda)) {
if (cda[CTA_FILTER])
@@ -1567,10 +1572,11 @@ static int ctnetlink_flush_conntrack(struct net *net,
filter = ctnetlink_alloc_filter(cda, family);
if (IS_ERR(filter))
return PTR_ERR(filter);
+
+ iter.data = filter;
}
- nf_ct_iterate_cleanup_net(net, ctnetlink_flush_iterate, filter,
- portid, report);
+ nf_ct_iterate_cleanup_net(ctnetlink_flush_iterate, &iter);
kfree(filter);
return 0;
@@ -1708,6 +1714,7 @@ static int ctnetlink_done_list(struct netlink_callback *cb)
return 0;
}
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
static int ctnetlink_dump_one_entry(struct sk_buff *skb,
struct netlink_callback *cb,
struct nf_conn *ct,
@@ -1748,63 +1755,62 @@ static int ctnetlink_dump_one_entry(struct sk_buff *skb,
return res;
}
+#endif
static int
-ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying)
+ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ return 0;
+}
+
+static int
+ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb)
{
struct ctnetlink_list_dump_ctx *ctx = (void *)cb->ctx;
- struct nf_conn *ct, *last;
+ struct nf_conn *last = ctx->last;
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ const struct net *net = sock_net(skb->sk);
+ struct nf_conntrack_net_ecache *ecache_net;
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
- struct hlist_nulls_head *list;
- struct net *net = sock_net(skb->sk);
- int res, cpu;
+#endif
if (ctx->done)
return 0;
- last = ctx->last;
+ ctx->last = NULL;
- for (cpu = ctx->cpu; cpu < nr_cpu_ids; cpu++) {
- struct ct_pcpu *pcpu;
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ ecache_net = nf_conn_pernet_ecache(net);
+ spin_lock_bh(&ecache_net->dying_lock);
- if (!cpu_possible(cpu))
- continue;
+ hlist_nulls_for_each_entry(h, n, &ecache_net->dying_list, hnnode) {
+ struct nf_conn *ct;
+ int res;
- pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
- spin_lock_bh(&pcpu->lock);
- list = dying ? &pcpu->dying : &pcpu->unconfirmed;
-restart:
- hlist_nulls_for_each_entry(h, n, list, hnnode) {
- ct = nf_ct_tuplehash_to_ctrack(h);
+ ct = nf_ct_tuplehash_to_ctrack(h);
+ if (last && last != ct)
+ continue;
- res = ctnetlink_dump_one_entry(skb, cb, ct, dying);
- if (res < 0) {
- ctx->cpu = cpu;
- spin_unlock_bh(&pcpu->lock);
- goto out;
- }
- }
- if (ctx->last) {
- ctx->last = NULL;
- goto restart;
+ res = ctnetlink_dump_one_entry(skb, cb, ct, true);
+ if (res < 0) {
+ spin_unlock_bh(&ecache_net->dying_lock);
+ nf_ct_put(last);
+ return skb->len;
}
- spin_unlock_bh(&pcpu->lock);
+
+ nf_ct_put(last);
+ last = NULL;
}
+
+ spin_unlock_bh(&ecache_net->dying_lock);
+#endif
ctx->done = true;
-out:
- if (last)
- nf_ct_put(last);
+ nf_ct_put(last);
return skb->len;
}
-static int
-ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb)
-{
- return ctnetlink_dump_list(skb, cb, true);
-}
-
static int ctnetlink_get_ct_dying(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const cda[])
@@ -1820,12 +1826,6 @@ static int ctnetlink_get_ct_dying(struct sk_buff *skb,
return -EOPNOTSUPP;
}
-static int
-ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb)
-{
- return ctnetlink_dump_list(skb, cb, false);
-}
-
static int ctnetlink_get_ct_unconfirmed(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const cda[])
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index d1f2d3c8d2b1..895b09cbd7cf 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -538,9 +538,13 @@ retry:
out_unlock:
mutex_unlock(&nf_ct_proto_mutex);
- if (fixup_needed)
- nf_ct_iterate_cleanup_net(net, nf_ct_tcp_fixup,
- (void *)(unsigned long)nfproto, 0, 0);
+ if (fixup_needed) {
+ struct nf_ct_iter_data iter_data = {
+ .net = net,
+ .data = (void *)(unsigned long)nfproto,
+ };
+ nf_ct_iterate_cleanup_net(nf_ct_tcp_fixup, &iter_data);
+ }
return err;
}
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 204a5cdff5b1..a63b51dceaf2 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -485,7 +485,6 @@ static bool tcp_in_window(struct nf_conn *ct,
struct nf_tcp_net *tn = nf_tcp_pernet(net);
struct ip_ct_tcp_state *sender = &state->seen[dir];
struct ip_ct_tcp_state *receiver = &state->seen[!dir];
- const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
__u32 seq, ack, sack, end, win, swin;
u16 win_raw;
s32 receiver_offset;
@@ -508,18 +507,6 @@ static bool tcp_in_window(struct nf_conn *ct,
ack -= receiver_offset;
sack -= receiver_offset;
- pr_debug("tcp_in_window: START\n");
- pr_debug("tcp_in_window: ");
- nf_ct_dump_tuple(tuple);
- pr_debug("seq=%u ack=%u+(%d) sack=%u+(%d) win=%u end=%u\n",
- seq, ack, receiver_offset, sack, receiver_offset, win, end);
- pr_debug("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
- "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
- sender->td_end, sender->td_maxend, sender->td_maxwin,
- sender->td_scale,
- receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
- receiver->td_scale);
-
if (sender->td_maxwin == 0) {
/*
* Initialize sender data.
@@ -597,27 +584,10 @@ static bool tcp_in_window(struct nf_conn *ct,
*/
seq = end = sender->td_end;
- pr_debug("tcp_in_window: ");
- nf_ct_dump_tuple(tuple);
- pr_debug("seq=%u ack=%u+(%d) sack=%u+(%d) win=%u end=%u\n",
- seq, ack, receiver_offset, sack, receiver_offset, win, end);
- pr_debug("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
- "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
- sender->td_end, sender->td_maxend, sender->td_maxwin,
- sender->td_scale,
- receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
- receiver->td_scale);
-
/* Is the ending sequence in the receive window (if available)? */
in_recv_win = !receiver->td_maxwin ||
after(end, sender->td_end - receiver->td_maxwin - 1);
- pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n",
- before(seq, sender->td_maxend + 1),
- (in_recv_win ? 1 : 0),
- before(sack, receiver->td_end + 1),
- after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1));
-
if (before(seq, sender->td_maxend + 1) &&
in_recv_win &&
before(sack, receiver->td_end + 1) &&
@@ -698,11 +668,6 @@ static bool tcp_in_window(struct nf_conn *ct,
}
}
- pr_debug("tcp_in_window: res=%u sender end=%u maxend=%u maxwin=%u "
- "receiver end=%u maxend=%u maxwin=%u\n",
- res, sender->td_end, sender->td_maxend, sender->td_maxwin,
- receiver->td_end, receiver->td_maxend, receiver->td_maxwin);
-
return res;
}
@@ -772,8 +737,6 @@ static noinline bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
enum tcp_conntrack new_state;
struct net *net = nf_ct_net(ct);
const struct nf_tcp_net *tn = nf_tcp_pernet(net);
- const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[0];
- const struct ip_ct_tcp_state *receiver = &ct->proto.tcp.seen[1];
/* Don't need lock here: this conntrack not in circulation yet */
new_state = tcp_conntracks[0][get_conntrack_index(th)][TCP_CONNTRACK_NONE];
@@ -826,14 +789,6 @@ static noinline bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
/* tcp_packet will set them */
ct->proto.tcp.last_index = TCP_NONE_SET;
-
- pr_debug("%s: sender end=%u maxend=%u maxwin=%u scale=%i "
- "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
- __func__,
- sender->td_end, sender->td_maxend, sender->td_maxwin,
- sender->td_scale,
- receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
- receiver->td_scale);
return true;
}
@@ -1032,10 +987,11 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
}
/* Invalid packet */
- pr_debug("nf_ct_tcp: Invalid dir=%i index=%u ostate=%u\n",
- dir, get_conntrack_index(th), old_state);
spin_unlock_bh(&ct->lock);
- nf_ct_l4proto_log_invalid(skb, ct, state, "invalid state");
+ nf_ct_l4proto_log_invalid(skb, ct, state,
+ "packet (index %d) in dir %d invalid, state %s",
+ index, dir,
+ tcp_conntrack_names[old_state]);
return -NF_ACCEPT;
case TCP_CONNTRACK_TIME_WAIT:
/* RFC5961 compliance cause stack to send "challenge-ACK"
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 55aa55b252b2..6ad7bbc90d38 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -693,7 +693,7 @@ static struct ctl_table nf_ct_sysctl_table[] = {
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
+ .extra2 = SYSCTL_TWO,
},
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
diff --git a/net/netfilter/nf_conntrack_timeout.c b/net/netfilter/nf_conntrack_timeout.c
index cec166ecba77..0f828d05ea60 100644
--- a/net/netfilter/nf_conntrack_timeout.c
+++ b/net/netfilter/nf_conntrack_timeout.c
@@ -38,7 +38,12 @@ static int untimeout(struct nf_conn *ct, void *timeout)
void nf_ct_untimeout(struct net *net, struct nf_ct_timeout *timeout)
{
- nf_ct_iterate_cleanup_net(net, untimeout, timeout, 0, 0);
+ struct nf_ct_iter_data iter_data = {
+ .net = net,
+ .data = timeout,
+ };
+
+ nf_ct_iterate_cleanup_net(untimeout, &iter_data);
}
EXPORT_SYMBOL_GPL(nf_ct_untimeout);
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 3db256da919b..f2def06d1070 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -179,12 +179,11 @@ EXPORT_SYMBOL_GPL(flow_offload_route_init);
static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
{
- tcp->state = TCP_CONNTRACK_ESTABLISHED;
tcp->seen[0].td_maxwin = 0;
tcp->seen[1].td_maxwin = 0;
}
-static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
+static void flow_offload_fixup_ct(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
int l4num = nf_ct_protonum(ct);
@@ -193,7 +192,9 @@ static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
if (l4num == IPPROTO_TCP) {
struct nf_tcp_net *tn = nf_tcp_pernet(net);
- timeout = tn->timeouts[TCP_CONNTRACK_ESTABLISHED];
+ flow_offload_fixup_tcp(&ct->proto.tcp);
+
+ timeout = tn->timeouts[ct->proto.tcp.state];
timeout -= tn->offload_timeout;
} else if (l4num == IPPROTO_UDP) {
struct nf_udp_net *tn = nf_udp_pernet(net);
@@ -211,18 +212,6 @@ static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
WRITE_ONCE(ct->timeout, nfct_time_stamp + timeout);
}
-static void flow_offload_fixup_ct_state(struct nf_conn *ct)
-{
- if (nf_ct_protonum(ct) == IPPROTO_TCP)
- flow_offload_fixup_tcp(&ct->proto.tcp);
-}
-
-static void flow_offload_fixup_ct(struct nf_conn *ct)
-{
- flow_offload_fixup_ct_state(ct);
- flow_offload_fixup_ct_timeout(ct);
-}
-
static void flow_offload_route_release(struct flow_offload *flow)
{
nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
@@ -335,8 +324,10 @@ void flow_offload_refresh(struct nf_flowtable *flow_table,
u32 timeout;
timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
- if (READ_ONCE(flow->timeout) != timeout)
+ if (timeout - READ_ONCE(flow->timeout) > HZ)
WRITE_ONCE(flow->timeout, timeout);
+ else
+ return;
if (likely(!nf_flowtable_hw_offload(flow_table)))
return;
@@ -359,22 +350,14 @@ static void flow_offload_del(struct nf_flowtable *flow_table,
rhashtable_remove_fast(&flow_table->rhashtable,
&flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
nf_flow_offload_rhash_params);
-
- clear_bit(IPS_OFFLOAD_BIT, &flow->ct->status);
-
- if (nf_flow_has_expired(flow))
- flow_offload_fixup_ct(flow->ct);
- else
- flow_offload_fixup_ct_timeout(flow->ct);
-
flow_offload_free(flow);
}
void flow_offload_teardown(struct flow_offload *flow)
{
+ clear_bit(IPS_OFFLOAD_BIT, &flow->ct->status);
set_bit(NF_FLOW_TEARDOWN, &flow->flags);
-
- flow_offload_fixup_ct_state(flow->ct);
+ flow_offload_fixup_ct(flow->ct);
}
EXPORT_SYMBOL_GPL(flow_offload_teardown);
@@ -438,33 +421,12 @@ nf_flow_table_iterate(struct nf_flowtable *flow_table,
return err;
}
-static bool flow_offload_stale_dst(struct flow_offload_tuple *tuple)
-{
- struct dst_entry *dst;
-
- if (tuple->xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
- tuple->xmit_type == FLOW_OFFLOAD_XMIT_XFRM) {
- dst = tuple->dst_cache;
- if (!dst_check(dst, tuple->dst_cookie))
- return true;
- }
-
- return false;
-}
-
-static bool nf_flow_has_stale_dst(struct flow_offload *flow)
-{
- return flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple) ||
- flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple);
-}
-
static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table,
struct flow_offload *flow, void *data)
{
if (nf_flow_has_expired(flow) ||
- nf_ct_is_dying(flow->ct) ||
- nf_flow_has_stale_dst(flow))
- set_bit(NF_FLOW_TEARDOWN, &flow->flags);
+ nf_ct_is_dying(flow->ct))
+ flow_offload_teardown(flow);
if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
if (test_bit(NF_FLOW_HW, &flow->flags)) {
diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
index 32c0eb1b4821..b350fe9d00b0 100644
--- a/net/netfilter/nf_flow_table_ip.c
+++ b/net/netfilter/nf_flow_table_ip.c
@@ -248,6 +248,15 @@ static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
return true;
}
+static inline bool nf_flow_dst_check(struct flow_offload_tuple *tuple)
+{
+ if (tuple->xmit_type != FLOW_OFFLOAD_XMIT_NEIGH &&
+ tuple->xmit_type != FLOW_OFFLOAD_XMIT_XFRM)
+ return true;
+
+ return dst_check(tuple->dst_cache, tuple->dst_cookie);
+}
+
static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
const struct nf_hook_state *state,
struct dst_entry *dst)
@@ -367,6 +376,11 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
if (nf_flow_state_check(flow, iph->protocol, skb, thoff))
return NF_ACCEPT;
+ if (!nf_flow_dst_check(&tuplehash->tuple)) {
+ flow_offload_teardown(flow);
+ return NF_ACCEPT;
+ }
+
if (skb_try_make_writable(skb, thoff + hdrsize))
return NF_DROP;
@@ -624,6 +638,11 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
if (nf_flow_state_check(flow, ip6h->nexthdr, skb, thoff))
return NF_ACCEPT;
+ if (!nf_flow_dst_check(&tuplehash->tuple)) {
+ flow_offload_teardown(flow);
+ return NF_ACCEPT;
+ }
+
if (skb_try_make_writable(skb, thoff + hdrsize))
return NF_DROP;
diff --git a/net/netfilter/nf_nat_masquerade.c b/net/netfilter/nf_nat_masquerade.c
index e32fac374608..1a506b0c6511 100644
--- a/net/netfilter/nf_nat_masquerade.c
+++ b/net/netfilter/nf_nat_masquerade.c
@@ -77,11 +77,14 @@ EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4);
static void iterate_cleanup_work(struct work_struct *work)
{
+ struct nf_ct_iter_data iter_data = {};
struct masq_dev_work *w;
w = container_of(work, struct masq_dev_work, work);
- nf_ct_iterate_cleanup_net(w->net, w->iter, (void *)w, 0, 0);
+ iter_data.net = w->net;
+ iter_data.data = (void *)w;
+ nf_ct_iterate_cleanup_net(w->iter, &iter_data);
put_net_track(w->net, &w->ns_tracker);
kfree(w);
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index f3ad02a399f8..12fc9cda4a2c 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -8342,16 +8342,7 @@ EXPORT_SYMBOL_GPL(nf_tables_trans_destroy_flush_work);
static bool nft_expr_reduce(struct nft_regs_track *track,
const struct nft_expr *expr)
{
- if (!expr->ops->reduce) {
- pr_warn_once("missing reduce for expression %s ",
- expr->ops->type->name);
- return false;
- }
-
- if (nft_reduce_is_readonly(expr))
- return false;
-
- return expr->ops->reduce(track, expr);
+ return false;
}
static int nf_tables_commit_chain_prepare(struct net *net, struct nft_chain *chain)
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 7e2c8dd01408..ad3bbe34ca88 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -45,6 +45,7 @@ MODULE_DESCRIPTION("Netfilter messages via netlink socket");
static unsigned int nfnetlink_pernet_id __read_mostly;
struct nfnl_net {
+ unsigned int ctnetlink_listeners;
struct sock *nfnl;
};
@@ -654,7 +655,6 @@ static void nfnetlink_rcv(struct sk_buff *skb)
netlink_rcv_skb(skb, nfnetlink_rcv_msg);
}
-#ifdef CONFIG_MODULES
static int nfnetlink_bind(struct net *net, int group)
{
const struct nfnetlink_subsystem *ss;
@@ -670,9 +670,44 @@ static int nfnetlink_bind(struct net *net, int group)
rcu_read_unlock();
if (!ss)
request_module_nowait("nfnetlink-subsys-%d", type);
+
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ if (type == NFNL_SUBSYS_CTNETLINK) {
+ struct nfnl_net *nfnlnet = nfnl_pernet(net);
+
+ nfnl_lock(NFNL_SUBSYS_CTNETLINK);
+
+ if (WARN_ON_ONCE(nfnlnet->ctnetlink_listeners == UINT_MAX)) {
+ nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
+ return -EOVERFLOW;
+ }
+
+ nfnlnet->ctnetlink_listeners++;
+ if (nfnlnet->ctnetlink_listeners == 1)
+ WRITE_ONCE(net->ct.ctnetlink_has_listener, true);
+ nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
+ }
+#endif
return 0;
}
+
+static void nfnetlink_unbind(struct net *net, int group)
+{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ int type = nfnl_group2type[group];
+
+ if (type == NFNL_SUBSYS_CTNETLINK) {
+ struct nfnl_net *nfnlnet = nfnl_pernet(net);
+
+ nfnl_lock(NFNL_SUBSYS_CTNETLINK);
+ WARN_ON_ONCE(nfnlnet->ctnetlink_listeners == 0);
+ nfnlnet->ctnetlink_listeners--;
+ if (nfnlnet->ctnetlink_listeners == 0)
+ WRITE_ONCE(net->ct.ctnetlink_has_listener, false);
+ nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
+ }
#endif
+}
static int __net_init nfnetlink_net_init(struct net *net)
{
@@ -680,9 +715,8 @@ static int __net_init nfnetlink_net_init(struct net *net)
struct netlink_kernel_cfg cfg = {
.groups = NFNLGRP_MAX,
.input = nfnetlink_rcv,
-#ifdef CONFIG_MODULES
.bind = nfnetlink_bind,
-#endif
+ .unbind = nfnetlink_unbind,
};
nfnlnet->nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, &cfg);
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index eea486f32971..f069c24c6146 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -33,8 +33,19 @@
static unsigned int nfct_timeout_id __read_mostly;
+struct ctnl_timeout {
+ struct list_head head;
+ struct rcu_head rcu_head;
+ refcount_t refcnt;
+ char name[CTNL_TIMEOUT_NAME_MAX];
+ struct nf_ct_timeout timeout;
+
+ struct list_head free_head;
+};
+
struct nfct_timeout_pernet {
struct list_head nfct_timeout_list;
+ struct list_head nfct_timeout_freelist;
};
MODULE_LICENSE("GPL");
@@ -574,20 +585,36 @@ static int __net_init cttimeout_net_init(struct net *net)
struct nfct_timeout_pernet *pernet = nfct_timeout_pernet(net);
INIT_LIST_HEAD(&pernet->nfct_timeout_list);
+ INIT_LIST_HEAD(&pernet->nfct_timeout_freelist);
return 0;
}
+static void __net_exit cttimeout_net_pre_exit(struct net *net)
+{
+ struct nfct_timeout_pernet *pernet = nfct_timeout_pernet(net);
+ struct ctnl_timeout *cur, *tmp;
+
+ list_for_each_entry_safe(cur, tmp, &pernet->nfct_timeout_list, head) {
+ list_del_rcu(&cur->head);
+ list_add(&cur->free_head, &pernet->nfct_timeout_freelist);
+ }
+
+ /* core calls synchronize_rcu() after this */
+}
+
static void __net_exit cttimeout_net_exit(struct net *net)
{
struct nfct_timeout_pernet *pernet = nfct_timeout_pernet(net);
struct ctnl_timeout *cur, *tmp;
- nf_ct_unconfirmed_destroy(net);
+ if (list_empty(&pernet->nfct_timeout_freelist))
+ return;
+
nf_ct_untimeout(net, NULL);
- list_for_each_entry_safe(cur, tmp, &pernet->nfct_timeout_list, head) {
- list_del_rcu(&cur->head);
+ list_for_each_entry_safe(cur, tmp, &pernet->nfct_timeout_freelist, head) {
+ list_del(&cur->free_head);
if (refcount_dec_and_test(&cur->refcnt))
kfree_rcu(cur, rcu_head);
@@ -596,6 +623,7 @@ static void __net_exit cttimeout_net_exit(struct net *net)
static struct pernet_operations cttimeout_ops = {
.init = cttimeout_net_init,
+ .pre_exit = cttimeout_net_pre_exit,
.exit = cttimeout_net_exit,
.id = &nfct_timeout_id,
.size = sizeof(struct nfct_timeout_pernet),
@@ -628,13 +656,24 @@ err_out:
return ret;
}
+static int untimeout(struct nf_conn *ct, void *timeout)
+{
+ struct nf_conn_timeout *timeout_ext = nf_ct_timeout_find(ct);
+
+ if (timeout_ext)
+ RCU_INIT_POINTER(timeout_ext->timeout, NULL);
+
+ return 0;
+}
+
static void __exit cttimeout_exit(void)
{
nfnetlink_subsys_unregister(&cttimeout_subsys);
unregister_pernet_subsys(&cttimeout_ops);
RCU_INIT_POINTER(nf_ct_timeout_hook, NULL);
- synchronize_rcu();
+
+ nf_ct_iterate_destroy(untimeout, NULL);
}
module_init(cttimeout_init);
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index 900d48c810a1..a16cf47199b7 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -36,6 +36,15 @@ static void nft_default_forward_path(struct nf_flow_route *route,
route->tuple[dir].xmit_type = nft_xmit_type(dst_cache);
}
+static bool nft_is_valid_ether_device(const struct net_device *dev)
+{
+ if (!dev || (dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
+ dev->addr_len != ETH_ALEN || !is_valid_ether_addr(dev->dev_addr))
+ return false;
+
+ return true;
+}
+
static int nft_dev_fill_forward_path(const struct nf_flow_route *route,
const struct dst_entry *dst_cache,
const struct nf_conn *ct,
@@ -47,6 +56,9 @@ static int nft_dev_fill_forward_path(const struct nf_flow_route *route,
struct neighbour *n;
u8 nud_state;
+ if (!nft_is_valid_ether_device(dev))
+ goto out;
+
n = dst_neigh_lookup(dst_cache, daddr);
if (!n)
return -1;
@@ -60,6 +72,7 @@ static int nft_dev_fill_forward_path(const struct nf_flow_route *route,
if (!(nud_state & NUD_VALID))
return -1;
+out:
return dev_fill_forward_path(dev, ha, stack);
}
@@ -78,15 +91,6 @@ struct nft_forward_info {
enum flow_offload_xmit_type xmit_type;
};
-static bool nft_is_valid_ether_device(const struct net_device *dev)
-{
- if (!dev || (dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
- dev->addr_len != ETH_ALEN || !is_valid_ether_addr(dev->dev_addr))
- return false;
-
- return true;
-}
-
static void nft_dev_path_info(const struct net_device_path_stack *stack,
struct nft_forward_info *info,
unsigned char *ha, struct nf_flowtable *flowtable)
@@ -119,7 +123,8 @@ static void nft_dev_path_info(const struct net_device_path_stack *stack,
info->indev = NULL;
break;
}
- info->outdev = path->dev;
+ if (!info->outdev)
+ info->outdev = path->dev;
info->encap[info->num_encaps].id = path->encap.id;
info->encap[info->num_encaps].proto = path->encap.proto;
info->num_encaps++;
@@ -227,11 +232,19 @@ static int nft_flow_route(const struct nft_pktinfo *pkt,
switch (nft_pf(pkt)) {
case NFPROTO_IPV4:
fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip;
+ fl.u.ip4.saddr = ct->tuplehash[dir].tuple.dst.u3.ip;
fl.u.ip4.flowi4_oif = nft_in(pkt)->ifindex;
+ fl.u.ip4.flowi4_iif = this_dst->dev->ifindex;
+ fl.u.ip4.flowi4_tos = RT_TOS(ip_hdr(pkt->skb)->tos);
+ fl.u.ip4.flowi4_mark = pkt->skb->mark;
break;
case NFPROTO_IPV6:
fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6;
+ fl.u.ip6.saddr = ct->tuplehash[dir].tuple.dst.u3.in6;
fl.u.ip6.flowi6_oif = nft_in(pkt)->ifindex;
+ fl.u.ip6.flowi6_iif = this_dst->dev->ifindex;
+ fl.u.ip6.flowlabel = ip6_flowinfo(ipv6_hdr(pkt->skb));
+ fl.u.ip6.flowi6_mark = pkt->skb->mark;
break;
}
@@ -293,7 +306,8 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
case IPPROTO_TCP:
tcph = skb_header_pointer(pkt->skb, nft_thoff(pkt),
sizeof(_tcph), &_tcph);
- if (unlikely(!tcph || tcph->fin || tcph->rst))
+ if (unlikely(!tcph || tcph->fin || tcph->rst ||
+ !nf_conntrack_tcp_established(ct)))
goto out;
break;
case IPPROTO_UDP:
diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
index 6055dc9a82aa..aa5e712adf07 100644
--- a/net/nfc/nci/data.c
+++ b/net/nfc/nci/data.c
@@ -118,7 +118,7 @@ static int nci_queue_tx_data_frags(struct nci_dev *ndev,
skb_frag = nci_skb_alloc(ndev,
(NCI_DATA_HDR_SIZE + frag_len),
- GFP_KERNEL);
+ GFP_ATOMIC);
if (skb_frag == NULL) {
rc = -ENOMEM;
goto free_exit;
diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c
index 19703a649b5a..78c4b6addf15 100644
--- a/net/nfc/nci/hci.c
+++ b/net/nfc/nci/hci.c
@@ -153,7 +153,7 @@ static int nci_hci_send_data(struct nci_dev *ndev, u8 pipe,
i = 0;
skb = nci_skb_alloc(ndev, conn_info->max_pkt_payload_len +
- NCI_DATA_HDR_SIZE, GFP_KERNEL);
+ NCI_DATA_HDR_SIZE, GFP_ATOMIC);
if (!skb)
return -ENOMEM;
@@ -184,7 +184,7 @@ static int nci_hci_send_data(struct nci_dev *ndev, u8 pipe,
if (i < data_len) {
skb = nci_skb_alloc(ndev,
conn_info->max_pkt_payload_len +
- NCI_DATA_HDR_SIZE, GFP_KERNEL);
+ NCI_DATA_HDR_SIZE, GFP_ATOMIC);
if (!skb)
return -ENOMEM;
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 2b5f89713e36..ceba28e9dce6 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -351,7 +351,7 @@ static void rxrpc_dummy_notify_rx(struct sock *sk, struct rxrpc_call *rxcall,
*/
void rxrpc_kernel_end_call(struct socket *sock, struct rxrpc_call *call)
{
- _enter("%d{%d}", call->debug_id, atomic_read(&call->usage));
+ _enter("%d{%d}", call->debug_id, refcount_read(&call->ref));
mutex_lock(&call->user_mutex);
rxrpc_release_call(rxrpc_sk(sock->sk), call);
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 969e532f77a9..dcc0ec0bf3de 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -15,14 +15,6 @@
#include <keys/rxrpc-type.h>
#include "protocol.h"
-#if 0
-#define CHECK_SLAB_OKAY(X) \
- BUG_ON(atomic_read((X)) >> (sizeof(atomic_t) - 2) == \
- (POISON_FREE << 8 | POISON_FREE))
-#else
-#define CHECK_SLAB_OKAY(X) do {} while (0)
-#endif
-
#define FCRYPT_BSIZE 8
struct rxrpc_crypt {
union {
@@ -68,7 +60,7 @@ struct rxrpc_net {
struct proc_dir_entry *proc_net; /* Subdir in /proc/net */
u32 epoch; /* Local epoch for detecting local-end reset */
struct list_head calls; /* List of calls active in this namespace */
- rwlock_t call_lock; /* Lock for ->calls */
+ spinlock_t call_lock; /* Lock for ->calls */
atomic_t nr_calls; /* Count of allocated calls */
atomic_t nr_conns;
@@ -88,7 +80,7 @@ struct rxrpc_net {
struct work_struct client_conn_reaper;
struct timer_list client_conn_reap_timer;
- struct list_head local_endpoints;
+ struct hlist_head local_endpoints;
struct mutex local_mutex; /* Lock for ->local_endpoints */
DECLARE_HASHTABLE (peer_hash, 10);
@@ -279,9 +271,9 @@ struct rxrpc_security {
struct rxrpc_local {
struct rcu_head rcu;
atomic_t active_users; /* Number of users of the local endpoint */
- atomic_t usage; /* Number of references to the structure */
+ refcount_t ref; /* Number of references to the structure */
struct rxrpc_net *rxnet; /* The network ns in which this resides */
- struct list_head link;
+ struct hlist_node link;
struct socket *socket; /* my UDP socket */
struct work_struct processor;
struct rxrpc_sock __rcu *service; /* Service(s) listening on this endpoint */
@@ -304,7 +296,7 @@ struct rxrpc_local {
*/
struct rxrpc_peer {
struct rcu_head rcu; /* This must be first */
- atomic_t usage;
+ refcount_t ref;
unsigned long hash_key;
struct hlist_node hash_link;
struct rxrpc_local *local;
@@ -406,7 +398,7 @@ enum rxrpc_conn_proto_state {
*/
struct rxrpc_bundle {
struct rxrpc_conn_parameters params;
- atomic_t usage;
+ refcount_t ref;
unsigned int debug_id;
bool try_upgrade; /* True if the bundle is attempting upgrade */
bool alloc_conn; /* True if someone's getting a conn */
@@ -427,7 +419,7 @@ struct rxrpc_connection {
struct rxrpc_conn_proto proto;
struct rxrpc_conn_parameters params;
- atomic_t usage;
+ refcount_t ref;
struct rcu_head rcu;
struct list_head cache_link;
@@ -609,7 +601,7 @@ struct rxrpc_call {
int error; /* Local error incurred */
enum rxrpc_call_state state; /* current state of call */
enum rxrpc_call_completion completion; /* Call completion condition */
- atomic_t usage;
+ refcount_t ref;
u16 service_id; /* service ID */
u8 security_ix; /* Security type */
enum rxrpc_interruptibility interruptibility; /* At what point call may be interrupted */
@@ -1014,6 +1006,7 @@ void rxrpc_put_peer_locked(struct rxrpc_peer *);
extern const struct seq_operations rxrpc_call_seq_ops;
extern const struct seq_operations rxrpc_connection_seq_ops;
extern const struct seq_operations rxrpc_peer_seq_ops;
+extern const struct seq_operations rxrpc_local_seq_ops;
/*
* recvmsg.c
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index 1ae90fb97936..99e10eea3732 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -91,7 +91,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
(head + 1) & (size - 1));
trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service,
- atomic_read(&conn->usage), here);
+ refcount_read(&conn->ref), here);
}
/* Now it gets complicated, because calls get registered with the
@@ -104,7 +104,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
call->state = RXRPC_CALL_SERVER_PREALLOC;
trace_rxrpc_call(call->debug_id, rxrpc_call_new_service,
- atomic_read(&call->usage),
+ refcount_read(&call->ref),
here, (const void *)user_call_ID);
write_lock(&rx->call_lock);
@@ -140,9 +140,9 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
write_unlock(&rx->call_lock);
rxnet = call->rxnet;
- write_lock(&rxnet->call_lock);
- list_add_tail(&call->link, &rxnet->calls);
- write_unlock(&rxnet->call_lock);
+ spin_lock_bh(&rxnet->call_lock);
+ list_add_tail_rcu(&call->link, &rxnet->calls);
+ spin_unlock_bh(&rxnet->call_lock);
b->call_backlog[call_head] = call;
smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index 22e05de5d1ca..e426f6831aab 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -377,9 +377,9 @@ recheck_state:
if (test_bit(RXRPC_CALL_RX_HEARD, &call->flags) &&
(int)call->conn->hi_serial - (int)call->rx_serial > 0) {
trace_rxrpc_call_reset(call);
- rxrpc_abort_call("EXP", call, 0, RX_USER_ABORT, -ECONNRESET);
+ rxrpc_abort_call("EXP", call, 0, RX_CALL_DEAD, -ECONNRESET);
} else {
- rxrpc_abort_call("EXP", call, 0, RX_USER_ABORT, -ETIME);
+ rxrpc_abort_call("EXP", call, 0, RX_CALL_TIMEOUT, -ETIME);
}
set_bit(RXRPC_CALL_EV_ABORT, &call->events);
goto recheck_state;
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 043508fd8d8a..84d0a4109645 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -112,7 +112,7 @@ struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
found_extant_call:
rxrpc_get_call(call, rxrpc_call_got);
read_unlock(&rx->call_lock);
- _leave(" = %p [%d]", call, atomic_read(&call->usage));
+ _leave(" = %p [%d]", call, refcount_read(&call->ref));
return call;
}
@@ -160,7 +160,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
spin_lock_init(&call->notify_lock);
spin_lock_init(&call->input_lock);
rwlock_init(&call->state_lock);
- atomic_set(&call->usage, 1);
+ refcount_set(&call->ref, 1);
call->debug_id = debug_id;
call->tx_total_len = -1;
call->next_rx_timo = 20 * HZ;
@@ -299,7 +299,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
call->interruptibility = p->interruptibility;
call->tx_total_len = p->tx_total_len;
trace_rxrpc_call(call->debug_id, rxrpc_call_new_client,
- atomic_read(&call->usage),
+ refcount_read(&call->ref),
here, (const void *)p->user_call_ID);
if (p->kernel)
__set_bit(RXRPC_CALL_KERNEL, &call->flags);
@@ -337,9 +337,9 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
write_unlock(&rx->call_lock);
rxnet = call->rxnet;
- write_lock(&rxnet->call_lock);
- list_add_tail(&call->link, &rxnet->calls);
- write_unlock(&rxnet->call_lock);
+ spin_lock_bh(&rxnet->call_lock);
+ list_add_tail_rcu(&call->link, &rxnet->calls);
+ spin_unlock_bh(&rxnet->call_lock);
/* From this point on, the call is protected by its own lock. */
release_sock(&rx->sk);
@@ -352,7 +352,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
goto error_attached_to_socket;
trace_rxrpc_call(call->debug_id, rxrpc_call_connected,
- atomic_read(&call->usage), here, NULL);
+ refcount_read(&call->ref), here, NULL);
rxrpc_start_call_timer(call);
@@ -372,7 +372,7 @@ error_dup_user_ID:
__rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
RX_CALL_DEAD, -EEXIST);
trace_rxrpc_call(call->debug_id, rxrpc_call_error,
- atomic_read(&call->usage), here, ERR_PTR(-EEXIST));
+ refcount_read(&call->ref), here, ERR_PTR(-EEXIST));
rxrpc_release_call(rx, call);
mutex_unlock(&call->user_mutex);
rxrpc_put_call(call, rxrpc_call_put);
@@ -386,7 +386,7 @@ error_dup_user_ID:
*/
error_attached_to_socket:
trace_rxrpc_call(call->debug_id, rxrpc_call_error,
- atomic_read(&call->usage), here, ERR_PTR(ret));
+ refcount_read(&call->ref), here, ERR_PTR(ret));
set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
__rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
RX_CALL_DEAD, ret);
@@ -442,8 +442,9 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx,
bool rxrpc_queue_call(struct rxrpc_call *call)
{
const void *here = __builtin_return_address(0);
- int n = atomic_fetch_add_unless(&call->usage, 1, 0);
- if (n == 0)
+ int n;
+
+ if (!__refcount_inc_not_zero(&call->ref, &n))
return false;
if (rxrpc_queue_work(&call->processor))
trace_rxrpc_call(call->debug_id, rxrpc_call_queued, n + 1,
@@ -459,7 +460,7 @@ bool rxrpc_queue_call(struct rxrpc_call *call)
bool __rxrpc_queue_call(struct rxrpc_call *call)
{
const void *here = __builtin_return_address(0);
- int n = atomic_read(&call->usage);
+ int n = refcount_read(&call->ref);
ASSERTCMP(n, >=, 1);
if (rxrpc_queue_work(&call->processor))
trace_rxrpc_call(call->debug_id, rxrpc_call_queued_ref, n,
@@ -476,7 +477,7 @@ void rxrpc_see_call(struct rxrpc_call *call)
{
const void *here = __builtin_return_address(0);
if (call) {
- int n = atomic_read(&call->usage);
+ int n = refcount_read(&call->ref);
trace_rxrpc_call(call->debug_id, rxrpc_call_seen, n,
here, NULL);
@@ -486,11 +487,11 @@ void rxrpc_see_call(struct rxrpc_call *call)
bool rxrpc_try_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
{
const void *here = __builtin_return_address(0);
- int n = atomic_fetch_add_unless(&call->usage, 1, 0);
+ int n;
- if (n == 0)
+ if (!__refcount_inc_not_zero(&call->ref, &n))
return false;
- trace_rxrpc_call(call->debug_id, op, n, here, NULL);
+ trace_rxrpc_call(call->debug_id, op, n + 1, here, NULL);
return true;
}
@@ -500,9 +501,10 @@ bool rxrpc_try_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
{
const void *here = __builtin_return_address(0);
- int n = atomic_inc_return(&call->usage);
+ int n;
- trace_rxrpc_call(call->debug_id, op, n, here, NULL);
+ __refcount_inc(&call->ref, &n);
+ trace_rxrpc_call(call->debug_id, op, n + 1, here, NULL);
}
/*
@@ -527,10 +529,10 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
struct rxrpc_connection *conn = call->conn;
bool put = false;
- _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
+ _enter("{%d,%d}", call->debug_id, refcount_read(&call->ref));
trace_rxrpc_call(call->debug_id, rxrpc_call_release,
- atomic_read(&call->usage),
+ refcount_read(&call->ref),
here, (const void *)call->flags);
ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
@@ -619,21 +621,21 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
struct rxrpc_net *rxnet = call->rxnet;
const void *here = __builtin_return_address(0);
unsigned int debug_id = call->debug_id;
+ bool dead;
int n;
ASSERT(call != NULL);
- n = atomic_dec_return(&call->usage);
+ dead = __refcount_dec_and_test(&call->ref, &n);
trace_rxrpc_call(debug_id, op, n, here, NULL);
- ASSERTCMP(n, >=, 0);
- if (n == 0) {
+ if (dead) {
_debug("call %d dead", call->debug_id);
ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
if (!list_empty(&call->link)) {
- write_lock(&rxnet->call_lock);
+ spin_lock_bh(&rxnet->call_lock);
list_del_init(&call->link);
- write_unlock(&rxnet->call_lock);
+ spin_unlock_bh(&rxnet->call_lock);
}
rxrpc_cleanup_call(call);
@@ -705,7 +707,7 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
_enter("");
if (!list_empty(&rxnet->calls)) {
- write_lock(&rxnet->call_lock);
+ spin_lock_bh(&rxnet->call_lock);
while (!list_empty(&rxnet->calls)) {
call = list_entry(rxnet->calls.next,
@@ -716,16 +718,16 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
list_del_init(&call->link);
pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
- call, atomic_read(&call->usage),
+ call, refcount_read(&call->ref),
rxrpc_call_states[call->state],
call->flags, call->events);
- write_unlock(&rxnet->call_lock);
+ spin_unlock_bh(&rxnet->call_lock);
cond_resched();
- write_lock(&rxnet->call_lock);
+ spin_lock_bh(&rxnet->call_lock);
}
- write_unlock(&rxnet->call_lock);
+ spin_unlock_bh(&rxnet->call_lock);
}
atomic_dec(&rxnet->nr_calls);
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 8120138dac01..3c9eeb5b750c 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -102,7 +102,7 @@ void rxrpc_destroy_client_conn_ids(void)
if (!idr_is_empty(&rxrpc_client_conn_ids)) {
idr_for_each_entry(&rxrpc_client_conn_ids, conn, id) {
pr_err("AF_RXRPC: Leaked client conn %p {%d}\n",
- conn, atomic_read(&conn->usage));
+ conn, refcount_read(&conn->ref));
}
BUG();
}
@@ -122,7 +122,7 @@ static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_conn_parameters *cp,
if (bundle) {
bundle->params = *cp;
rxrpc_get_peer(bundle->params.peer);
- atomic_set(&bundle->usage, 1);
+ refcount_set(&bundle->ref, 1);
spin_lock_init(&bundle->channel_lock);
INIT_LIST_HEAD(&bundle->waiting_calls);
}
@@ -131,7 +131,7 @@ static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_conn_parameters *cp,
struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *bundle)
{
- atomic_inc(&bundle->usage);
+ refcount_inc(&bundle->ref);
return bundle;
}
@@ -144,10 +144,13 @@ static void rxrpc_free_bundle(struct rxrpc_bundle *bundle)
void rxrpc_put_bundle(struct rxrpc_bundle *bundle)
{
unsigned int d = bundle->debug_id;
- unsigned int u = atomic_dec_return(&bundle->usage);
+ bool dead;
+ int r;
- _debug("PUT B=%x %u", d, u);
- if (u == 0)
+ dead = __refcount_dec_and_test(&bundle->ref, &r);
+
+ _debug("PUT B=%x %d", d, r);
+ if (dead)
rxrpc_free_bundle(bundle);
}
@@ -169,7 +172,7 @@ rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle, gfp_t gfp)
return ERR_PTR(-ENOMEM);
}
- atomic_set(&conn->usage, 1);
+ refcount_set(&conn->ref, 1);
conn->bundle = bundle;
conn->params = bundle->params;
conn->out_clientflag = RXRPC_CLIENT_INITIATED;
@@ -195,7 +198,7 @@ rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle, gfp_t gfp)
key_get(conn->params.key);
trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_client,
- atomic_read(&conn->usage),
+ refcount_read(&conn->ref),
__builtin_return_address(0));
atomic_inc(&rxnet->nr_client_conns);
@@ -966,14 +969,13 @@ void rxrpc_put_client_conn(struct rxrpc_connection *conn)
{
const void *here = __builtin_return_address(0);
unsigned int debug_id = conn->debug_id;
- int n;
+ bool dead;
+ int r;
- n = atomic_dec_return(&conn->usage);
- trace_rxrpc_conn(debug_id, rxrpc_conn_put_client, n, here);
- if (n <= 0) {
- ASSERTCMP(n, >=, 0);
+ dead = __refcount_dec_and_test(&conn->ref, &r);
+ trace_rxrpc_conn(debug_id, rxrpc_conn_put_client, r - 1, here);
+ if (dead)
rxrpc_kill_client_conn(conn);
- }
}
/*
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index b2159dbf5412..22089e37e97f 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -104,7 +104,7 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
goto not_found;
*_peer = peer;
conn = rxrpc_find_service_conn_rcu(peer, skb);
- if (!conn || atomic_read(&conn->usage) == 0)
+ if (!conn || refcount_read(&conn->ref) == 0)
goto not_found;
_leave(" = %p", conn);
return conn;
@@ -114,7 +114,7 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
*/
conn = idr_find(&rxrpc_client_conn_ids,
sp->hdr.cid >> RXRPC_CIDSHIFT);
- if (!conn || atomic_read(&conn->usage) == 0) {
+ if (!conn || refcount_read(&conn->ref) == 0) {
_debug("no conn");
goto not_found;
}
@@ -183,7 +183,7 @@ void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
chan->last_type = RXRPC_PACKET_TYPE_ABORT;
break;
default:
- chan->last_abort = RX_USER_ABORT;
+ chan->last_abort = RX_CALL_DEAD;
chan->last_type = RXRPC_PACKET_TYPE_ABORT;
break;
}
@@ -263,11 +263,12 @@ void rxrpc_kill_connection(struct rxrpc_connection *conn)
bool rxrpc_queue_conn(struct rxrpc_connection *conn)
{
const void *here = __builtin_return_address(0);
- int n = atomic_fetch_add_unless(&conn->usage, 1, 0);
- if (n == 0)
+ int r;
+
+ if (!__refcount_inc_not_zero(&conn->ref, &r))
return false;
if (rxrpc_queue_work(&conn->processor))
- trace_rxrpc_conn(conn->debug_id, rxrpc_conn_queued, n + 1, here);
+ trace_rxrpc_conn(conn->debug_id, rxrpc_conn_queued, r + 1, here);
else
rxrpc_put_connection(conn);
return true;
@@ -280,7 +281,7 @@ void rxrpc_see_connection(struct rxrpc_connection *conn)
{
const void *here = __builtin_return_address(0);
if (conn) {
- int n = atomic_read(&conn->usage);
+ int n = refcount_read(&conn->ref);
trace_rxrpc_conn(conn->debug_id, rxrpc_conn_seen, n, here);
}
@@ -292,9 +293,10 @@ void rxrpc_see_connection(struct rxrpc_connection *conn)
struct rxrpc_connection *rxrpc_get_connection(struct rxrpc_connection *conn)
{
const void *here = __builtin_return_address(0);
- int n = atomic_inc_return(&conn->usage);
+ int r;
- trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, n, here);
+ __refcount_inc(&conn->ref, &r);
+ trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, r, here);
return conn;
}
@@ -305,11 +307,11 @@ struct rxrpc_connection *
rxrpc_get_connection_maybe(struct rxrpc_connection *conn)
{
const void *here = __builtin_return_address(0);
+ int r;
if (conn) {
- int n = atomic_fetch_add_unless(&conn->usage, 1, 0);
- if (n > 0)
- trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, n + 1, here);
+ if (__refcount_inc_not_zero(&conn->ref, &r))
+ trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, r + 1, here);
else
conn = NULL;
}
@@ -333,12 +335,11 @@ void rxrpc_put_service_conn(struct rxrpc_connection *conn)
{
const void *here = __builtin_return_address(0);
unsigned int debug_id = conn->debug_id;
- int n;
+ int r;
- n = atomic_dec_return(&conn->usage);
- trace_rxrpc_conn(debug_id, rxrpc_conn_put_service, n, here);
- ASSERTCMP(n, >=, 0);
- if (n == 1)
+ __refcount_dec(&conn->ref, &r);
+ trace_rxrpc_conn(debug_id, rxrpc_conn_put_service, r - 1, here);
+ if (r - 1 == 1)
rxrpc_set_service_reap_timer(conn->params.local->rxnet,
jiffies + rxrpc_connection_expiry);
}
@@ -351,9 +352,9 @@ static void rxrpc_destroy_connection(struct rcu_head *rcu)
struct rxrpc_connection *conn =
container_of(rcu, struct rxrpc_connection, rcu);
- _enter("{%d,u=%d}", conn->debug_id, atomic_read(&conn->usage));
+ _enter("{%d,u=%d}", conn->debug_id, refcount_read(&conn->ref));
- ASSERTCMP(atomic_read(&conn->usage), ==, 0);
+ ASSERTCMP(refcount_read(&conn->ref), ==, 0);
_net("DESTROY CONN %d", conn->debug_id);
@@ -392,8 +393,8 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
write_lock(&rxnet->conn_lock);
list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
- ASSERTCMP(atomic_read(&conn->usage), >, 0);
- if (likely(atomic_read(&conn->usage) > 1))
+ ASSERTCMP(refcount_read(&conn->ref), >, 0);
+ if (likely(refcount_read(&conn->ref) > 1))
continue;
if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
continue;
@@ -405,7 +406,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
expire_at = idle_timestamp + rxrpc_closed_conn_expiry * HZ;
_debug("reap CONN %d { u=%d,t=%ld }",
- conn->debug_id, atomic_read(&conn->usage),
+ conn->debug_id, refcount_read(&conn->ref),
(long)expire_at - (long)now);
if (time_before(now, expire_at)) {
@@ -418,7 +419,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
/* The usage count sits at 1 whilst the object is unused on the
* list; we reduce that to 0 to make the object unavailable.
*/
- if (atomic_cmpxchg(&conn->usage, 1, 0) != 1)
+ if (!refcount_dec_if_one(&conn->ref))
continue;
trace_rxrpc_conn(conn->debug_id, rxrpc_conn_reap_service, 0, NULL);
@@ -442,7 +443,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
link);
list_del_init(&conn->link);
- ASSERTCMP(atomic_read(&conn->usage), ==, 0);
+ ASSERTCMP(refcount_read(&conn->ref), ==, 0);
rxrpc_kill_connection(conn);
}
@@ -470,7 +471,7 @@ void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
write_lock(&rxnet->conn_lock);
list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
pr_err("AF_RXRPC: Leaked conn %p {%d}\n",
- conn, atomic_read(&conn->usage));
+ conn, refcount_read(&conn->ref));
leak = true;
}
write_unlock(&rxnet->conn_lock);
diff --git a/net/rxrpc/conn_service.c b/net/rxrpc/conn_service.c
index e1966dfc9152..6e6aa02c6f9e 100644
--- a/net/rxrpc/conn_service.c
+++ b/net/rxrpc/conn_service.c
@@ -9,7 +9,7 @@
#include "ar-internal.h"
static struct rxrpc_bundle rxrpc_service_dummy_bundle = {
- .usage = ATOMIC_INIT(1),
+ .ref = REFCOUNT_INIT(1),
.debug_id = UINT_MAX,
.channel_lock = __SPIN_LOCK_UNLOCKED(&rxrpc_service_dummy_bundle.channel_lock),
};
@@ -99,7 +99,7 @@ conn_published:
return;
found_extant_conn:
- if (atomic_read(&cursor->usage) == 0)
+ if (refcount_read(&cursor->ref) == 0)
goto replace_old_connection;
write_sequnlock_bh(&peer->service_conn_lock);
/* We should not be able to get here. rxrpc_incoming_connection() is
@@ -132,7 +132,7 @@ struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxn
* the rxrpc_connections list.
*/
conn->state = RXRPC_CONN_SERVICE_PREALLOC;
- atomic_set(&conn->usage, 2);
+ refcount_set(&conn->ref, 2);
conn->bundle = rxrpc_get_bundle(&rxrpc_service_dummy_bundle);
atomic_inc(&rxnet->nr_conns);
@@ -142,7 +142,7 @@ struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxn
write_unlock(&rxnet->conn_lock);
trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service,
- atomic_read(&conn->usage),
+ refcount_read(&conn->ref),
__builtin_return_address(0));
}
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index dc201363f2c4..16c0af41c202 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -903,6 +903,33 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
rxrpc_propose_ack_respond_to_ack);
}
+ /* If we get an EXCEEDS_WINDOW ACK from the server, it probably
+ * indicates that the client address changed due to NAT. The server
+ * lost the call because it switched to a different peer.
+ */
+ if (unlikely(buf.ack.reason == RXRPC_ACK_EXCEEDS_WINDOW) &&
+ first_soft_ack == 1 &&
+ prev_pkt == 0 &&
+ rxrpc_is_client_call(call)) {
+ rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
+ 0, -ENETRESET);
+ return;
+ }
+
+ /* If we get an OUT_OF_SEQUENCE ACK from the server, that can also
+ * indicate a change of address. However, we can retransmit the call
+ * if we still have it buffered to the beginning.
+ */
+ if (unlikely(buf.ack.reason == RXRPC_ACK_OUT_OF_SEQUENCE) &&
+ first_soft_ack == 1 &&
+ prev_pkt == 0 &&
+ call->tx_hard_ack == 0 &&
+ rxrpc_is_client_call(call)) {
+ rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
+ 0, -ENETRESET);
+ return;
+ }
+
/* Discard any out-of-order or duplicate ACKs (outside lock). */
if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) {
trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial,
@@ -1154,8 +1181,6 @@ static void rxrpc_post_packet_to_local(struct rxrpc_local *local,
*/
static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
{
- CHECK_SLAB_OKAY(&local->usage);
-
if (rxrpc_get_local_maybe(local)) {
skb_queue_tail(&local->reject_queue, skb);
rxrpc_queue_local(local);
@@ -1413,7 +1438,7 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
}
}
- if (!call || atomic_read(&call->usage) == 0) {
+ if (!call || refcount_read(&call->ref) == 0) {
if (rxrpc_to_client(sp) ||
sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
goto bad_message;
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index 6a1611b0e303..96ecb7356c0f 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -79,10 +79,10 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
if (local) {
- atomic_set(&local->usage, 1);
+ refcount_set(&local->ref, 1);
atomic_set(&local->active_users, 1);
local->rxnet = rxnet;
- INIT_LIST_HEAD(&local->link);
+ INIT_HLIST_NODE(&local->link);
INIT_WORK(&local->processor, rxrpc_local_processor);
init_rwsem(&local->defrag_sem);
skb_queue_head_init(&local->reject_queue);
@@ -180,7 +180,7 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
{
struct rxrpc_local *local;
struct rxrpc_net *rxnet = rxrpc_net(net);
- struct list_head *cursor;
+ struct hlist_node *cursor;
const char *age;
long diff;
int ret;
@@ -190,16 +190,12 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
mutex_lock(&rxnet->local_mutex);
- for (cursor = rxnet->local_endpoints.next;
- cursor != &rxnet->local_endpoints;
- cursor = cursor->next) {
- local = list_entry(cursor, struct rxrpc_local, link);
+ hlist_for_each(cursor, &rxnet->local_endpoints) {
+ local = hlist_entry(cursor, struct rxrpc_local, link);
diff = rxrpc_local_cmp_key(local, srx);
- if (diff < 0)
+ if (diff != 0)
continue;
- if (diff > 0)
- break;
/* Services aren't allowed to share transport sockets, so
* reject that here. It is possible that the object is dying -
@@ -211,9 +207,10 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
goto addr_in_use;
}
- /* Found a match. We replace a dying object. Attempting to
- * bind the transport socket may still fail if we're attempting
- * to use a local address that the dying object is still using.
+ /* Found a match. We want to replace a dying object.
+ * Attempting to bind the transport socket may still fail if
+ * we're attempting to use a local address that the dying
+ * object is still using.
*/
if (!rxrpc_use_local(local))
break;
@@ -230,10 +227,12 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
if (ret < 0)
goto sock_error;
- if (cursor != &rxnet->local_endpoints)
- list_replace_init(cursor, &local->link);
- else
- list_add_tail(&local->link, cursor);
+ if (cursor) {
+ hlist_replace_rcu(cursor, &local->link);
+ cursor->pprev = NULL;
+ } else {
+ hlist_add_head_rcu(&local->link, &rxnet->local_endpoints);
+ }
age = "new";
found:
@@ -266,10 +265,10 @@ addr_in_use:
struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local)
{
const void *here = __builtin_return_address(0);
- int n;
+ int r;
- n = atomic_inc_return(&local->usage);
- trace_rxrpc_local(local->debug_id, rxrpc_local_got, n, here);
+ __refcount_inc(&local->ref, &r);
+ trace_rxrpc_local(local->debug_id, rxrpc_local_got, r + 1, here);
return local;
}
@@ -279,12 +278,12 @@ struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local)
struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
{
const void *here = __builtin_return_address(0);
+ int r;
if (local) {
- int n = atomic_fetch_add_unless(&local->usage, 1, 0);
- if (n > 0)
+ if (__refcount_inc_not_zero(&local->ref, &r))
trace_rxrpc_local(local->debug_id, rxrpc_local_got,
- n + 1, here);
+ r + 1, here);
else
local = NULL;
}
@@ -298,10 +297,10 @@ void rxrpc_queue_local(struct rxrpc_local *local)
{
const void *here = __builtin_return_address(0);
unsigned int debug_id = local->debug_id;
- int n = atomic_read(&local->usage);
+ int r = refcount_read(&local->ref);
if (rxrpc_queue_work(&local->processor))
- trace_rxrpc_local(debug_id, rxrpc_local_queued, n, here);
+ trace_rxrpc_local(debug_id, rxrpc_local_queued, r + 1, here);
else
rxrpc_put_local(local);
}
@@ -313,15 +312,16 @@ void rxrpc_put_local(struct rxrpc_local *local)
{
const void *here = __builtin_return_address(0);
unsigned int debug_id;
- int n;
+ bool dead;
+ int r;
if (local) {
debug_id = local->debug_id;
- n = atomic_dec_return(&local->usage);
- trace_rxrpc_local(debug_id, rxrpc_local_put, n, here);
+ dead = __refcount_dec_and_test(&local->ref, &r);
+ trace_rxrpc_local(debug_id, rxrpc_local_put, r, here);
- if (n == 0)
+ if (dead)
call_rcu(&local->rcu, rxrpc_local_rcu);
}
}
@@ -374,7 +374,7 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
local->dead = true;
mutex_lock(&rxnet->local_mutex);
- list_del_init(&local->link);
+ hlist_del_init_rcu(&local->link);
mutex_unlock(&rxnet->local_mutex);
rxrpc_clean_up_local_conns(local);
@@ -406,7 +406,7 @@ static void rxrpc_local_processor(struct work_struct *work)
bool again;
trace_rxrpc_local(local->debug_id, rxrpc_local_processing,
- atomic_read(&local->usage), NULL);
+ refcount_read(&local->ref), NULL);
do {
again = false;
@@ -458,11 +458,11 @@ void rxrpc_destroy_all_locals(struct rxrpc_net *rxnet)
flush_workqueue(rxrpc_workqueue);
- if (!list_empty(&rxnet->local_endpoints)) {
+ if (!hlist_empty(&rxnet->local_endpoints)) {
mutex_lock(&rxnet->local_mutex);
- list_for_each_entry(local, &rxnet->local_endpoints, link) {
+ hlist_for_each_entry(local, &rxnet->local_endpoints, link) {
pr_err("AF_RXRPC: Leaked local %p {%d}\n",
- local, atomic_read(&local->usage));
+ local, refcount_read(&local->ref));
}
mutex_unlock(&rxnet->local_mutex);
BUG();
diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c
index cc7e30733feb..bb4c25d6df64 100644
--- a/net/rxrpc/net_ns.c
+++ b/net/rxrpc/net_ns.c
@@ -50,7 +50,7 @@ static __net_init int rxrpc_init_net(struct net *net)
rxnet->epoch |= RXRPC_RANDOM_EPOCH;
INIT_LIST_HEAD(&rxnet->calls);
- rwlock_init(&rxnet->call_lock);
+ spin_lock_init(&rxnet->call_lock);
atomic_set(&rxnet->nr_calls, 1);
atomic_set(&rxnet->nr_conns, 1);
@@ -72,7 +72,7 @@ static __net_init int rxrpc_init_net(struct net *net)
timer_setup(&rxnet->client_conn_reap_timer,
rxrpc_client_conn_reap_timeout, 0);
- INIT_LIST_HEAD(&rxnet->local_endpoints);
+ INIT_HLIST_HEAD(&rxnet->local_endpoints);
mutex_init(&rxnet->local_mutex);
hash_init(rxnet->peer_hash);
@@ -98,6 +98,9 @@ static __net_init int rxrpc_init_net(struct net *net)
proc_create_net("peers", 0444, rxnet->proc_net,
&rxrpc_peer_seq_ops,
sizeof(struct seq_net_private));
+ proc_create_net("locals", 0444, rxnet->proc_net,
+ &rxrpc_local_seq_ops,
+ sizeof(struct seq_net_private));
return 0;
err_proc:
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 0298fe2ad6d3..26d2ae9baaf2 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -121,7 +121,7 @@ static struct rxrpc_peer *__rxrpc_lookup_peer_rcu(
hash_for_each_possible_rcu(rxnet->peer_hash, peer, hash_link, hash_key) {
if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0 &&
- atomic_read(&peer->usage) > 0)
+ refcount_read(&peer->ref) > 0)
return peer;
}
@@ -140,7 +140,7 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *local,
peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
if (peer) {
_net("PEER %d {%pISp}", peer->debug_id, &peer->srx.transport);
- _leave(" = %p {u=%d}", peer, atomic_read(&peer->usage));
+ _leave(" = %p {u=%d}", peer, refcount_read(&peer->ref));
}
return peer;
}
@@ -216,7 +216,7 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
peer = kzalloc(sizeof(struct rxrpc_peer), gfp);
if (peer) {
- atomic_set(&peer->usage, 1);
+ refcount_set(&peer->ref, 1);
peer->local = rxrpc_get_local(local);
INIT_HLIST_HEAD(&peer->error_targets);
peer->service_conns = RB_ROOT;
@@ -378,7 +378,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx,
_net("PEER %d {%pISp}", peer->debug_id, &peer->srx.transport);
- _leave(" = %p {u=%d}", peer, atomic_read(&peer->usage));
+ _leave(" = %p {u=%d}", peer, refcount_read(&peer->ref));
return peer;
}
@@ -388,10 +388,10 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx,
struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer)
{
const void *here = __builtin_return_address(0);
- int n;
+ int r;
- n = atomic_inc_return(&peer->usage);
- trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, n, here);
+ __refcount_inc(&peer->ref, &r);
+ trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, r + 1, here);
return peer;
}
@@ -401,11 +401,11 @@ struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer)
struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
{
const void *here = __builtin_return_address(0);
+ int r;
if (peer) {
- int n = atomic_fetch_add_unless(&peer->usage, 1, 0);
- if (n > 0)
- trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, n + 1, here);
+ if (__refcount_inc_not_zero(&peer->ref, &r))
+ trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, r + 1, here);
else
peer = NULL;
}
@@ -436,13 +436,14 @@ void rxrpc_put_peer(struct rxrpc_peer *peer)
{
const void *here = __builtin_return_address(0);
unsigned int debug_id;
- int n;
+ bool dead;
+ int r;
if (peer) {
debug_id = peer->debug_id;
- n = atomic_dec_return(&peer->usage);
- trace_rxrpc_peer(debug_id, rxrpc_peer_put, n, here);
- if (n == 0)
+ dead = __refcount_dec_and_test(&peer->ref, &r);
+ trace_rxrpc_peer(debug_id, rxrpc_peer_put, r - 1, here);
+ if (dead)
__rxrpc_put_peer(peer);
}
}
@@ -455,11 +456,12 @@ void rxrpc_put_peer_locked(struct rxrpc_peer *peer)
{
const void *here = __builtin_return_address(0);
unsigned int debug_id = peer->debug_id;
- int n;
+ bool dead;
+ int r;
- n = atomic_dec_return(&peer->usage);
- trace_rxrpc_peer(debug_id, rxrpc_peer_put, n, here);
- if (n == 0) {
+ dead = __refcount_dec_and_test(&peer->ref, &r);
+ trace_rxrpc_peer(debug_id, rxrpc_peer_put, r - 1, here);
+ if (dead) {
hash_del_rcu(&peer->hash_link);
list_del_init(&peer->keepalive_link);
rxrpc_free_peer(peer);
@@ -481,7 +483,7 @@ void rxrpc_destroy_all_peers(struct rxrpc_net *rxnet)
hlist_for_each_entry(peer, &rxnet->peer_hash[i], hash_link) {
pr_err("Leaked peer %u {%u} %pISp\n",
peer->debug_id,
- atomic_read(&peer->usage),
+ refcount_read(&peer->ref),
&peer->srx.transport);
}
}
diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c
index e2f990754f88..245418943e01 100644
--- a/net/rxrpc/proc.c
+++ b/net/rxrpc/proc.c
@@ -26,29 +26,23 @@ static const char *const rxrpc_conn_states[RXRPC_CONN__NR_STATES] = {
*/
static void *rxrpc_call_seq_start(struct seq_file *seq, loff_t *_pos)
__acquires(rcu)
- __acquires(rxnet->call_lock)
{
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
rcu_read_lock();
- read_lock(&rxnet->call_lock);
- return seq_list_start_head(&rxnet->calls, *_pos);
+ return seq_list_start_head_rcu(&rxnet->calls, *_pos);
}
static void *rxrpc_call_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
- return seq_list_next(v, &rxnet->calls, pos);
+ return seq_list_next_rcu(v, &rxnet->calls, pos);
}
static void rxrpc_call_seq_stop(struct seq_file *seq, void *v)
- __releases(rxnet->call_lock)
__releases(rcu)
{
- struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
-
- read_unlock(&rxnet->call_lock);
rcu_read_unlock();
}
@@ -107,7 +101,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
call->cid,
call->call_id,
rxrpc_is_service_call(call) ? "Svc" : "Clt",
- atomic_read(&call->usage),
+ refcount_read(&call->ref),
rxrpc_call_states[call->state],
call->abort_code,
call->debug_id,
@@ -189,7 +183,7 @@ print:
conn->service_id,
conn->proto.cid,
rxrpc_conn_is_service(conn) ? "Svc" : "Clt",
- atomic_read(&conn->usage),
+ refcount_read(&conn->ref),
rxrpc_conn_states[conn->state],
key_serial(conn->params.key),
atomic_read(&conn->serial),
@@ -239,7 +233,7 @@ static int rxrpc_peer_seq_show(struct seq_file *seq, void *v)
" %3u %5u %6llus %8u %8u\n",
lbuff,
rbuff,
- atomic_read(&peer->usage),
+ refcount_read(&peer->ref),
peer->cong_cwnd,
peer->mtu,
now - peer->last_tx_at,
@@ -334,3 +328,72 @@ const struct seq_operations rxrpc_peer_seq_ops = {
.stop = rxrpc_peer_seq_stop,
.show = rxrpc_peer_seq_show,
};
+
+/*
+ * Generate a list of extant virtual local endpoints in /proc/net/rxrpc/locals
+ */
+static int rxrpc_local_seq_show(struct seq_file *seq, void *v)
+{
+ struct rxrpc_local *local;
+ char lbuff[50];
+
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(seq,
+ "Proto Local "
+ " Use Act\n");
+ return 0;
+ }
+
+ local = hlist_entry(v, struct rxrpc_local, link);
+
+ sprintf(lbuff, "%pISpc", &local->srx.transport);
+
+ seq_printf(seq,
+ "UDP %-47.47s %3u %3u\n",
+ lbuff,
+ refcount_read(&local->ref),
+ atomic_read(&local->active_users));
+
+ return 0;
+}
+
+static void *rxrpc_local_seq_start(struct seq_file *seq, loff_t *_pos)
+ __acquires(rcu)
+{
+ struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
+ unsigned int n;
+
+ rcu_read_lock();
+
+ if (*_pos >= UINT_MAX)
+ return NULL;
+
+ n = *_pos;
+ if (n == 0)
+ return SEQ_START_TOKEN;
+
+ return seq_hlist_start_rcu(&rxnet->local_endpoints, n - 1);
+}
+
+static void *rxrpc_local_seq_next(struct seq_file *seq, void *v, loff_t *_pos)
+{
+ struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
+
+ if (*_pos >= UINT_MAX)
+ return NULL;
+
+ return seq_hlist_next_rcu(v, &rxnet->local_endpoints, _pos);
+}
+
+static void rxrpc_local_seq_stop(struct seq_file *seq, void *v)
+ __releases(rcu)
+{
+ rcu_read_unlock();
+}
+
+const struct seq_operations rxrpc_local_seq_ops = {
+ .start = rxrpc_local_seq_start,
+ .next = rxrpc_local_seq_next,
+ .stop = rxrpc_local_seq_stop,
+ .show = rxrpc_local_seq_show,
+};
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index af8ad6c30b9f..1d38e279e2ef 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -444,6 +444,12 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
success:
ret = copied;
+ if (READ_ONCE(call->state) == RXRPC_CALL_COMPLETE) {
+ read_lock_bh(&call->state_lock);
+ if (call->error < 0)
+ ret = call->error;
+ read_unlock_bh(&call->state_lock);
+ }
out:
call->tx_pending = skb;
_leave(" = %d", ret);
diff --git a/net/rxrpc/skbuff.c b/net/rxrpc/skbuff.c
index 0348d2bf6f7d..580a5acffee7 100644
--- a/net/rxrpc/skbuff.c
+++ b/net/rxrpc/skbuff.c
@@ -71,7 +71,6 @@ void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
const void *here = __builtin_return_address(0);
if (skb) {
int n;
- CHECK_SLAB_OKAY(&skb->users);
n = atomic_dec_return(select_skb_count(skb));
trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n,
rxrpc_skb(skb)->rx_flags, here);
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index d1221daa0952..823ee643371c 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -232,6 +232,10 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
for (i = 0; i < p->tcfp_nkeys; ++i) {
u32 cur = p->tcfp_keys[i].off;
+ /* sanitize the shift value for any later use */
+ p->tcfp_keys[i].shift = min_t(size_t, BITS_PER_TYPE(int) - 1,
+ p->tcfp_keys[i].shift);
+
/* The AT option can read a single byte, we can bound the actual
* value with uchar max.
*/
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 0a04468b7314..49bae3d5006b 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -311,12 +311,15 @@ META_COLLECTOR(int_sk_bound_if)
META_COLLECTOR(var_sk_bound_if)
{
+ int bound_dev_if;
+
if (skip_nonlocal(skb)) {
*err = -1;
return;
}
- if (skb->sk->sk_bound_dev_if == 0) {
+ bound_dev_if = READ_ONCE(skb->sk->sk_bound_dev_if);
+ if (bound_dev_if == 0) {
dst->value = (unsigned long) "any";
dst->len = 3;
} else {
@@ -324,7 +327,7 @@ META_COLLECTOR(var_sk_bound_if)
rcu_read_lock();
dev = dev_get_by_index_rcu(sock_net(skb->sk),
- skb->sk->sk_bound_dev_if);
+ bound_dev_if);
*err = var_dev(dev, dst);
rcu_read_unlock();
}
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 90e12bafdd48..4f43afa8678f 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -92,6 +92,7 @@ int sctp_rcv(struct sk_buff *skb)
struct sctp_chunk *chunk;
union sctp_addr src;
union sctp_addr dest;
+ int bound_dev_if;
int family;
struct sctp_af *af;
struct net *net = dev_net(skb->dev);
@@ -169,7 +170,8 @@ int sctp_rcv(struct sk_buff *skb)
* If a frame arrives on an interface and the receiving socket is
* bound to another interface, via SO_BINDTODEVICE, treat it as OOTB
*/
- if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) {
+ bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
+ if (bound_dev_if && (bound_dev_if != af->skb_iif(skb))) {
if (transport) {
sctp_transport_put(transport);
asoc = NULL;
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 72fe6669c50d..a63df055ac57 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -134,7 +134,8 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
dst_hold(tp->dst);
sk_setup_caps(sk, tp->dst);
}
- packet->max_size = sk_can_gso(sk) ? READ_ONCE(tp->dst->dev->gso_max_size)
+ packet->max_size = sk_can_gso(sk) ? min(READ_ONCE(tp->dst->dev->gso_max_size),
+ GSO_LEGACY_MAX_SIZE)
: asoc->pathmtu;
rcu_read_unlock();
}
diff --git a/net/sctp/stream_sched.c b/net/sctp/stream_sched.c
index 99e5f69fbb74..518b1b9bf89d 100644
--- a/net/sctp/stream_sched.c
+++ b/net/sctp/stream_sched.c
@@ -146,14 +146,11 @@ int sctp_sched_set_sched(struct sctp_association *asoc,
/* Give the next scheduler a clean slate. */
for (i = 0; i < asoc->stream.outcnt; i++) {
- void *p = SCTP_SO(&asoc->stream, i)->ext;
+ struct sctp_stream_out_ext *ext = SCTP_SO(&asoc->stream, i)->ext;
- if (!p)
+ if (!ext)
continue;
-
- p += offsetofend(struct sctp_stream_out_ext, outq);
- memset(p, 0, sizeof(struct sctp_stream_out_ext) -
- offsetofend(struct sctp_stream_out_ext, outq));
+ memset_after(ext, 0, outq);
}
}
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index fce16b9d6e1a..5f70642a8044 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -1544,9 +1544,29 @@ static int smc_connect(struct socket *sock, struct sockaddr *addr,
goto out_err;
lock_sock(sk);
+ switch (sock->state) {
+ default:
+ rc = -EINVAL;
+ goto out;
+ case SS_CONNECTED:
+ rc = sk->sk_state == SMC_ACTIVE ? -EISCONN : -EINVAL;
+ goto out;
+ case SS_CONNECTING:
+ if (sk->sk_state == SMC_ACTIVE)
+ goto connected;
+ break;
+ case SS_UNCONNECTED:
+ sock->state = SS_CONNECTING;
+ break;
+ }
+
switch (sk->sk_state) {
default:
goto out;
+ case SMC_CLOSED:
+ rc = sock_error(sk) ? : -ECONNABORTED;
+ sock->state = SS_UNCONNECTED;
+ goto out;
case SMC_ACTIVE:
rc = -EISCONN;
goto out;
@@ -1565,20 +1585,24 @@ static int smc_connect(struct socket *sock, struct sockaddr *addr,
goto out;
sock_hold(&smc->sk); /* sock put in passive closing */
- if (smc->use_fallback)
+ if (smc->use_fallback) {
+ sock->state = rc ? SS_CONNECTING : SS_CONNECTED;
goto out;
+ }
if (flags & O_NONBLOCK) {
if (queue_work(smc_hs_wq, &smc->connect_work))
smc->connect_nonblock = 1;
rc = -EINPROGRESS;
+ goto out;
} else {
rc = __smc_connect(smc);
if (rc < 0)
goto out;
- else
- rc = 0; /* success cases including fallback */
}
+connected:
+ rc = 0;
+ sock->state = SS_CONNECTED;
out:
release_sock(sk);
out_err:
@@ -1693,6 +1717,7 @@ struct sock *smc_accept_dequeue(struct sock *parent,
}
if (new_sock) {
sock_graft(new_sk, new_sock);
+ new_sock->state = SS_CONNECTED;
if (isk->use_fallback) {
smc_sk(new_sk)->clcsock->file = new_sock->file;
isk->clcsock->file->private_data = isk->clcsock;
@@ -2424,7 +2449,7 @@ static int smc_listen(struct socket *sock, int backlog)
rc = -EINVAL;
if ((sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) ||
- smc->connect_nonblock)
+ smc->connect_nonblock || sock->state != SS_UNCONNECTED)
goto out;
rc = 0;
@@ -2716,6 +2741,17 @@ static int smc_shutdown(struct socket *sock, int how)
lock_sock(sk);
+ if (sock->state == SS_CONNECTING) {
+ if (sk->sk_state == SMC_ACTIVE)
+ sock->state = SS_CONNECTED;
+ else if (sk->sk_state == SMC_PEERCLOSEWAIT1 ||
+ sk->sk_state == SMC_PEERCLOSEWAIT2 ||
+ sk->sk_state == SMC_APPCLOSEWAIT1 ||
+ sk->sk_state == SMC_APPCLOSEWAIT2 ||
+ sk->sk_state == SMC_APPFINCLOSEWAIT)
+ sock->state = SS_DISCONNECTING;
+ }
+
rc = -ENOTCONN;
if ((sk->sk_state != SMC_ACTIVE) &&
(sk->sk_state != SMC_PEERCLOSEWAIT1) &&
@@ -2729,6 +2765,7 @@ static int smc_shutdown(struct socket *sock, int how)
sk->sk_shutdown = smc->clcsock->sk->sk_shutdown;
if (sk->sk_shutdown == SHUTDOWN_MASK) {
sk->sk_state = SMC_CLOSED;
+ sk->sk_socket->state = SS_UNCONNECTED;
sock_put(sk);
}
goto out;
@@ -2754,6 +2791,10 @@ static int smc_shutdown(struct socket *sock, int how)
/* map sock_shutdown_cmd constants to sk_shutdown value range */
sk->sk_shutdown |= how + 1;
+ if (sk->sk_state == SMC_CLOSED)
+ sock->state = SS_UNCONNECTED;
+ else
+ sock->state = SS_DISCONNECTING;
out:
release_sock(sk);
return rc ? rc : rc1;
@@ -3139,6 +3180,7 @@ static int __smc_create(struct net *net, struct socket *sock, int protocol,
rc = -ENOBUFS;
sock->ops = &smc_sock_ops;
+ sock->state = SS_UNCONNECTED;
sk = smc_sock_alloc(net, sock, protocol);
if (!sk)
goto out;
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index a3e2d3b89568..dcda4165d107 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -671,6 +671,7 @@ int smc_ib_create_queue_pair(struct smc_link *lnk)
.max_recv_wr = SMC_WR_BUF_CNT * 3,
.max_send_sge = SMC_IB_MAX_SEND_SGE,
.max_recv_sge = sges_per_buf,
+ .max_inline_data = 0,
},
.sq_sig_type = IB_SIGNAL_REQ_WR,
.qp_type = IB_QPT_RC,
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index 98ca9229fe87..805a546e8c04 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -391,12 +391,20 @@ static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len,
int rc;
for (dstchunk = 0; dstchunk < 2; dstchunk++) {
- struct ib_sge *sge =
- wr_rdma_buf->wr_tx_rdma[dstchunk].wr.sg_list;
+ struct ib_rdma_wr *wr = &wr_rdma_buf->wr_tx_rdma[dstchunk];
+ struct ib_sge *sge = wr->wr.sg_list;
+ u64 base_addr = dma_addr;
+
+ if (dst_len < link->qp_attr.cap.max_inline_data) {
+ base_addr = (uintptr_t)conn->sndbuf_desc->cpu_addr;
+ wr->wr.send_flags |= IB_SEND_INLINE;
+ } else {
+ wr->wr.send_flags &= ~IB_SEND_INLINE;
+ }
num_sges = 0;
for (srcchunk = 0; srcchunk < 2; srcchunk++) {
- sge[srcchunk].addr = dma_addr + src_off;
+ sge[srcchunk].addr = base_addr + src_off;
sge[srcchunk].length = src_len;
num_sges++;
@@ -410,8 +418,7 @@ static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len,
src_len = dst_len - src_len; /* remainder */
src_len_sum += src_len;
}
- rc = smc_tx_rdma_write(conn, dst_off, num_sges,
- &wr_rdma_buf->wr_tx_rdma[dstchunk]);
+ rc = smc_tx_rdma_write(conn, dst_off, num_sges, wr);
if (rc)
return rc;
if (dst_len_sum == len)
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
index 24be1d03fef9..26f8f240d9e8 100644
--- a/net/smc/smc_wr.c
+++ b/net/smc/smc_wr.c
@@ -554,10 +554,11 @@ void smc_wr_remember_qp_attr(struct smc_link *lnk)
static void smc_wr_init_sge(struct smc_link *lnk)
{
int sges_per_buf = (lnk->lgr->smc_version == SMC_V2) ? 2 : 1;
+ bool send_inline = (lnk->qp_attr.cap.max_inline_data > SMC_WR_TX_SIZE);
u32 i;
for (i = 0; i < lnk->wr_tx_cnt; i++) {
- lnk->wr_tx_sges[i].addr =
+ lnk->wr_tx_sges[i].addr = send_inline ? (uintptr_t)(&lnk->wr_tx_bufs[i]) :
lnk->wr_tx_dma_addr + i * SMC_WR_BUF_SIZE;
lnk->wr_tx_sges[i].length = SMC_WR_TX_SIZE;
lnk->wr_tx_sges[i].lkey = lnk->roce_pd->local_dma_lkey;
@@ -575,6 +576,8 @@ static void smc_wr_init_sge(struct smc_link *lnk)
lnk->wr_tx_ibs[i].opcode = IB_WR_SEND;
lnk->wr_tx_ibs[i].send_flags =
IB_SEND_SIGNALED | IB_SEND_SOLICITED;
+ if (send_inline)
+ lnk->wr_tx_ibs[i].send_flags |= IB_SEND_INLINE;
lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.opcode = IB_WR_RDMA_WRITE;
lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.opcode = IB_WR_RDMA_WRITE;
lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.sg_list =
diff --git a/net/sunrpc/auth_gss/gss_rpc_upcall.c b/net/sunrpc/auth_gss/gss_rpc_upcall.c
index 8ca1d809b78d..f549e4c05def 100644
--- a/net/sunrpc/auth_gss/gss_rpc_upcall.c
+++ b/net/sunrpc/auth_gss/gss_rpc_upcall.c
@@ -97,7 +97,8 @@ static int gssp_rpc_create(struct net *net, struct rpc_clnt **_clnt)
* timeout, which would result in reconnections being
* done without the correct namespace:
*/
- .flags = RPC_CLNT_CREATE_IGNORE_NULL_UNAVAIL |
+ .flags = RPC_CLNT_CREATE_NOPING |
+ RPC_CLNT_CREATE_CONNECTED |
RPC_CLNT_CREATE_NO_IDLE_TIMEOUT
};
struct rpc_clnt *clnt;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 22c28cf43eba..e2c6eca0271b 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -76,6 +76,7 @@ static int rpc_encode_header(struct rpc_task *task,
static int rpc_decode_header(struct rpc_task *task,
struct xdr_stream *xdr);
static int rpc_ping(struct rpc_clnt *clnt);
+static int rpc_ping_noreply(struct rpc_clnt *clnt);
static void rpc_check_timeout(struct rpc_task *task);
static void rpc_register_client(struct rpc_clnt *clnt)
@@ -479,9 +480,12 @@ static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
int err = rpc_ping(clnt);
- if ((args->flags & RPC_CLNT_CREATE_IGNORE_NULL_UNAVAIL) &&
- err == -EOPNOTSUPP)
- err = 0;
+ if (err != 0) {
+ rpc_shutdown_client(clnt);
+ return ERR_PTR(err);
+ }
+ } else if (args->flags & RPC_CLNT_CREATE_CONNECTED) {
+ int err = rpc_ping_noreply(clnt);
if (err != 0) {
rpc_shutdown_client(clnt);
return ERR_PTR(err);
@@ -2712,6 +2716,10 @@ static const struct rpc_procinfo rpcproc_null = {
.p_decode = rpcproc_decode_null,
};
+static const struct rpc_procinfo rpcproc_null_noreply = {
+ .p_encode = rpcproc_encode_null,
+};
+
static void
rpc_null_call_prepare(struct rpc_task *task, void *data)
{
@@ -2765,6 +2773,28 @@ static int rpc_ping(struct rpc_clnt *clnt)
return status;
}
+static int rpc_ping_noreply(struct rpc_clnt *clnt)
+{
+ struct rpc_message msg = {
+ .rpc_proc = &rpcproc_null_noreply,
+ };
+ struct rpc_task_setup task_setup_data = {
+ .rpc_client = clnt,
+ .rpc_message = &msg,
+ .callback_ops = &rpc_null_ops,
+ .flags = RPC_TASK_SOFT | RPC_TASK_SOFTCONN | RPC_TASK_NULLCREDS,
+ };
+ struct rpc_task *task;
+ int status;
+
+ task = rpc_run_task(&task_setup_data);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ status = task->tk_status;
+ rpc_put_task(task);
+ return status;
+}
+
struct rpc_cb_add_xprt_calldata {
struct rpc_xprt_switch *xps;
struct rpc_xprt *xprt;
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index bca00521ebc1..ec6f4b699a2b 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -411,10 +411,16 @@ static int tls_device_copy_data(void *addr, size_t bytes, struct iov_iter *i)
return 0;
}
+union tls_iter_offset {
+ struct iov_iter *msg_iter;
+ int offset;
+};
+
static int tls_push_data(struct sock *sk,
- struct iov_iter *msg_iter,
+ union tls_iter_offset iter_offset,
size_t size, int flags,
- unsigned char record_type)
+ unsigned char record_type,
+ struct page *zc_page)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_prot_info *prot = &tls_ctx->prot_info;
@@ -480,12 +486,21 @@ handle_error:
}
record = ctx->open_record;
- copy = min_t(size_t, size, (pfrag->size - pfrag->offset));
- copy = min_t(size_t, copy, (max_open_record_len - record->len));
- if (copy) {
+ copy = min_t(size_t, size, max_open_record_len - record->len);
+ if (copy && zc_page) {
+ struct page_frag zc_pfrag;
+
+ zc_pfrag.page = zc_page;
+ zc_pfrag.offset = iter_offset.offset;
+ zc_pfrag.size = copy;
+ tls_append_frag(record, &zc_pfrag, copy);
+ } else if (copy) {
+ copy = min_t(size_t, copy, pfrag->size - pfrag->offset);
+
rc = tls_device_copy_data(page_address(pfrag->page) +
- pfrag->offset, copy, msg_iter);
+ pfrag->offset, copy,
+ iter_offset.msg_iter);
if (rc)
goto handle_error;
tls_append_frag(record, pfrag, copy);
@@ -540,6 +555,7 @@ int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
{
unsigned char record_type = TLS_RECORD_TYPE_DATA;
struct tls_context *tls_ctx = tls_get_ctx(sk);
+ union tls_iter_offset iter;
int rc;
mutex_lock(&tls_ctx->tx_lock);
@@ -551,8 +567,8 @@ int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
goto out;
}
- rc = tls_push_data(sk, &msg->msg_iter, size,
- msg->msg_flags, record_type);
+ iter.msg_iter = &msg->msg_iter;
+ rc = tls_push_data(sk, iter, size, msg->msg_flags, record_type, NULL);
out:
release_sock(sk);
@@ -564,7 +580,8 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
- struct iov_iter msg_iter;
+ union tls_iter_offset iter_offset;
+ struct iov_iter msg_iter;
char *kaddr;
struct kvec iov;
int rc;
@@ -580,12 +597,20 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
goto out;
}
+ if (tls_ctx->zerocopy_sendfile) {
+ iter_offset.offset = offset;
+ rc = tls_push_data(sk, iter_offset, size,
+ flags, TLS_RECORD_TYPE_DATA, page);
+ goto out;
+ }
+
kaddr = kmap(page);
iov.iov_base = kaddr + offset;
iov.iov_len = size;
iov_iter_kvec(&msg_iter, WRITE, &iov, 1, size);
- rc = tls_push_data(sk, &msg_iter, size,
- flags, TLS_RECORD_TYPE_DATA);
+ iter_offset.msg_iter = &msg_iter;
+ rc = tls_push_data(sk, iter_offset, size, flags, TLS_RECORD_TYPE_DATA,
+ NULL);
kunmap(page);
out:
@@ -656,10 +681,12 @@ EXPORT_SYMBOL(tls_get_record);
static int tls_device_push_pending_record(struct sock *sk, int flags)
{
- struct iov_iter msg_iter;
+ union tls_iter_offset iter;
+ struct iov_iter msg_iter;
iov_iter_kvec(&msg_iter, WRITE, NULL, 0, 0);
- return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA);
+ iter.msg_iter = &msg_iter;
+ return tls_push_data(sk, iter, 0, flags, TLS_RECORD_TYPE_DATA, NULL);
}
void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 7b2b0e7ffee4..b91ddc110786 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -513,6 +513,26 @@ out:
return rc;
}
+static int do_tls_getsockopt_tx_zc(struct sock *sk, char __user *optval,
+ int __user *optlen)
+{
+ struct tls_context *ctx = tls_get_ctx(sk);
+ unsigned int value;
+ int len;
+
+ if (get_user(len, optlen))
+ return -EFAULT;
+
+ if (len != sizeof(value))
+ return -EINVAL;
+
+ value = ctx->zerocopy_sendfile;
+ if (copy_to_user(optval, &value, sizeof(value)))
+ return -EFAULT;
+
+ return 0;
+}
+
static int do_tls_getsockopt(struct sock *sk, int optname,
char __user *optval, int __user *optlen)
{
@@ -524,6 +544,9 @@ static int do_tls_getsockopt(struct sock *sk, int optname,
rc = do_tls_getsockopt_conf(sk, optval, optlen,
optname == TLS_TX);
break;
+ case TLS_TX_ZEROCOPY_SENDFILE:
+ rc = do_tls_getsockopt_tx_zc(sk, optval, optlen);
+ break;
default:
rc = -ENOPROTOOPT;
break;
@@ -675,6 +698,26 @@ err_crypto_info:
return rc;
}
+static int do_tls_setsockopt_tx_zc(struct sock *sk, sockptr_t optval,
+ unsigned int optlen)
+{
+ struct tls_context *ctx = tls_get_ctx(sk);
+ unsigned int value;
+
+ if (sockptr_is_null(optval) || optlen != sizeof(value))
+ return -EINVAL;
+
+ if (copy_from_sockptr(&value, optval, sizeof(value)))
+ return -EFAULT;
+
+ if (value > 1)
+ return -EINVAL;
+
+ ctx->zerocopy_sendfile = value;
+
+ return 0;
+}
+
static int do_tls_setsockopt(struct sock *sk, int optname, sockptr_t optval,
unsigned int optlen)
{
@@ -688,6 +731,11 @@ static int do_tls_setsockopt(struct sock *sk, int optname, sockptr_t optval,
optname == TLS_TX);
release_sock(sk);
break;
+ case TLS_TX_ZEROCOPY_SENDFILE:
+ lock_sock(sk);
+ rc = do_tls_setsockopt_tx_zc(sk, optval, optlen);
+ release_sock(sk);
+ break;
default:
rc = -ENOPROTOOPT;
break;
@@ -921,6 +969,12 @@ static int tls_get_info(const struct sock *sk, struct sk_buff *skb)
if (err)
goto nla_failure;
+ if (ctx->tx_conf == TLS_HW && ctx->zerocopy_sendfile) {
+ err = nla_put_flag(skb, TLS_INFO_ZC_SENDFILE);
+ if (err)
+ goto nla_failure;
+ }
+
rcu_read_unlock();
nla_nest_end(skb, start);
return 0;
@@ -940,6 +994,7 @@ static size_t tls_get_info_size(const struct sock *sk)
nla_total_size(sizeof(u16)) + /* TLS_INFO_CIPHER */
nla_total_size(sizeof(u16)) + /* TLS_INFO_RXCONF */
nla_total_size(sizeof(u16)) + /* TLS_INFO_TXCONF */
+ nla_total_size(0) + /* TLS_INFO_ZC_SENDFILE */
0;
return size;
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 939d1673f508..0513f82b8537 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -1837,15 +1837,17 @@ leave_on_list:
bool partially_consumed = chunk > len;
if (bpf_strp_enabled) {
+ /* BPF may try to queue the skb */
+ __skb_unlink(skb, &ctx->rx_list);
err = sk_psock_tls_strp_read(psock, skb);
if (err != __SK_PASS) {
rxm->offset = rxm->offset + rxm->full_len;
rxm->full_len = 0;
- __skb_unlink(skb, &ctx->rx_list);
if (err == __SK_DROP)
consume_skb(skb);
continue;
}
+ __skb_queue_tail(&ctx->rx_list, skb);
}
if (partially_consumed)
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 8b7fb4a9e07b..f74f176e0d9d 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -6,7 +6,7 @@
*
* Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
- * Copyright 2018-2021 Intel Corporation
+ * Copyright 2018-2022 Intel Corporation
*/
#include <linux/export.h>
@@ -1344,97 +1344,6 @@ int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev,
return rdev_set_monitor_channel(rdev, chandef);
}
-void
-cfg80211_get_chan_state(struct wireless_dev *wdev,
- struct ieee80211_channel **chan,
- enum cfg80211_chan_mode *chanmode,
- u8 *radar_detect)
-{
- int ret;
-
- *chan = NULL;
- *chanmode = CHAN_MODE_UNDEFINED;
-
- ASSERT_WDEV_LOCK(wdev);
-
- if (wdev->netdev && !netif_running(wdev->netdev))
- return;
-
- switch (wdev->iftype) {
- case NL80211_IFTYPE_ADHOC:
- if (wdev->current_bss) {
- *chan = wdev->current_bss->pub.channel;
- *chanmode = (wdev->ibss_fixed &&
- !wdev->ibss_dfs_possible)
- ? CHAN_MODE_SHARED
- : CHAN_MODE_EXCLUSIVE;
-
- /* consider worst-case - IBSS can try to return to the
- * original user-specified channel as creator */
- if (wdev->ibss_dfs_possible)
- *radar_detect |= BIT(wdev->chandef.width);
- return;
- }
- break;
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_P2P_CLIENT:
- if (wdev->current_bss) {
- *chan = wdev->current_bss->pub.channel;
- *chanmode = CHAN_MODE_SHARED;
- return;
- }
- break;
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_P2P_GO:
- if (wdev->cac_started) {
- *chan = wdev->chandef.chan;
- *chanmode = CHAN_MODE_SHARED;
- *radar_detect |= BIT(wdev->chandef.width);
- } else if (wdev->beacon_interval) {
- *chan = wdev->chandef.chan;
- *chanmode = CHAN_MODE_SHARED;
-
- ret = cfg80211_chandef_dfs_required(wdev->wiphy,
- &wdev->chandef,
- wdev->iftype);
- WARN_ON(ret < 0);
- if (ret > 0)
- *radar_detect |= BIT(wdev->chandef.width);
- }
- return;
- case NL80211_IFTYPE_MESH_POINT:
- if (wdev->mesh_id_len) {
- *chan = wdev->chandef.chan;
- *chanmode = CHAN_MODE_SHARED;
-
- ret = cfg80211_chandef_dfs_required(wdev->wiphy,
- &wdev->chandef,
- wdev->iftype);
- WARN_ON(ret < 0);
- if (ret > 0)
- *radar_detect |= BIT(wdev->chandef.width);
- }
- return;
- case NL80211_IFTYPE_OCB:
- if (wdev->chandef.chan) {
- *chan = wdev->chandef.chan;
- *chanmode = CHAN_MODE_SHARED;
- return;
- }
- break;
- case NL80211_IFTYPE_MONITOR:
- case NL80211_IFTYPE_AP_VLAN:
- case NL80211_IFTYPE_P2P_DEVICE:
- case NL80211_IFTYPE_NAN:
- /* these interface types don't really have a channel */
- return;
- case NL80211_IFTYPE_UNSPECIFIED:
- case NL80211_IFTYPE_WDS:
- case NUM_NL80211_IFTYPES:
- WARN_ON(1);
- }
-}
-
bool cfg80211_any_usable_channels(struct wiphy *wiphy,
unsigned long sband_mask,
u32 prohibited_flags)
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 3a7dbd63d8c6..5436ada91b1a 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -3,7 +3,7 @@
* Wireless configuration interface internals.
*
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#ifndef __NET_WIRELESS_CORE_H
#define __NET_WIRELESS_CORE_H
@@ -281,12 +281,6 @@ struct cfg80211_cached_keys {
int def;
};
-enum cfg80211_chan_mode {
- CHAN_MODE_UNDEFINED,
- CHAN_MODE_SHARED,
- CHAN_MODE_EXCLUSIVE,
-};
-
struct cfg80211_beacon_registration {
struct list_head list;
u32 nlportid;
@@ -525,12 +519,6 @@ static inline unsigned int elapsed_jiffies_msecs(unsigned long start)
return jiffies_to_msecs(end + (ULONG_MAX - start) + 1);
}
-void
-cfg80211_get_chan_state(struct wireless_dev *wdev,
- struct ieee80211_channel **chan,
- enum cfg80211_chan_mode *chanmode,
- u8 *radar_detect);
-
int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev,
struct cfg80211_chan_def *chandef);
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 8f98e546becf..5d89eec2869a 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -3,7 +3,7 @@
* Some IBSS support code for cfg80211.
*
* Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2020-2021 Intel Corporation
+ * Copyright (C) 2020-2022 Intel Corporation
*/
#include <linux/etherdevice.h>
@@ -131,8 +131,6 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
kfree_sensitive(wdev->connect_keys);
wdev->connect_keys = connkeys;
- wdev->ibss_fixed = params->channel_fixed;
- wdev->ibss_dfs_possible = params->userspace_handles_dfs;
wdev->chandef = params->chandef;
if (connkeys) {
params->wep_keys = connkeys->params;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 02a29052e41d..740b29481bc6 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -791,6 +791,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
NLA_POLICY_RANGE(NLA_BINARY,
NL80211_EHT_MIN_CAPABILITY_LEN,
NL80211_EHT_MAX_CAPABILITY_LEN),
+ [NL80211_ATTR_DISABLE_EHT] = { .type = NLA_FLAG },
};
/* policy for the key attributes */
@@ -5181,6 +5182,30 @@ nl80211_parse_mbssid_elems(struct wiphy *wiphy, struct nlattr *attrs)
return elems;
}
+static int nl80211_parse_he_bss_color(struct nlattr *attrs,
+ struct cfg80211_he_bss_color *he_bss_color)
+{
+ struct nlattr *tb[NL80211_HE_BSS_COLOR_ATTR_MAX + 1];
+ int err;
+
+ err = nla_parse_nested(tb, NL80211_HE_BSS_COLOR_ATTR_MAX, attrs,
+ he_bss_color_policy, NULL);
+ if (err)
+ return err;
+
+ if (!tb[NL80211_HE_BSS_COLOR_ATTR_COLOR])
+ return -EINVAL;
+
+ he_bss_color->color =
+ nla_get_u8(tb[NL80211_HE_BSS_COLOR_ATTR_COLOR]);
+ he_bss_color->enabled =
+ !nla_get_flag(tb[NL80211_HE_BSS_COLOR_ATTR_DISABLED]);
+ he_bss_color->partial =
+ nla_get_flag(tb[NL80211_HE_BSS_COLOR_ATTR_PARTIAL]);
+
+ return 0;
+}
+
static int nl80211_parse_beacon(struct cfg80211_registered_device *rdev,
struct nlattr *attrs[],
struct cfg80211_beacon_data *bcn)
@@ -5261,6 +5286,14 @@ static int nl80211_parse_beacon(struct cfg80211_registered_device *rdev,
bcn->ftm_responder = -1;
}
+ if (attrs[NL80211_ATTR_HE_BSS_COLOR]) {
+ err = nl80211_parse_he_bss_color(attrs[NL80211_ATTR_HE_BSS_COLOR],
+ &bcn->he_bss_color);
+ if (err)
+ return err;
+ bcn->he_bss_color_valid = true;
+ }
+
if (attrs[NL80211_ATTR_MBSSID_ELEMS]) {
struct cfg80211_mbssid_elems *mbssid =
nl80211_parse_mbssid_elems(&rdev->wiphy,
@@ -5319,30 +5352,6 @@ static int nl80211_parse_he_obss_pd(struct nlattr *attrs,
return 0;
}
-static int nl80211_parse_he_bss_color(struct nlattr *attrs,
- struct cfg80211_he_bss_color *he_bss_color)
-{
- struct nlattr *tb[NL80211_HE_BSS_COLOR_ATTR_MAX + 1];
- int err;
-
- err = nla_parse_nested(tb, NL80211_HE_BSS_COLOR_ATTR_MAX, attrs,
- he_bss_color_policy, NULL);
- if (err)
- return err;
-
- if (!tb[NL80211_HE_BSS_COLOR_ATTR_COLOR])
- return -EINVAL;
-
- he_bss_color->color =
- nla_get_u8(tb[NL80211_HE_BSS_COLOR_ATTR_COLOR]);
- he_bss_color->enabled =
- !nla_get_flag(tb[NL80211_HE_BSS_COLOR_ATTR_DISABLED]);
- he_bss_color->partial =
- nla_get_flag(tb[NL80211_HE_BSS_COLOR_ATTR_PARTIAL]);
-
- return 0;
-}
-
static int nl80211_parse_fils_discovery(struct cfg80211_registered_device *rdev,
struct nlattr *attrs,
struct cfg80211_ap_settings *params)
@@ -5734,14 +5743,6 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- if (info->attrs[NL80211_ATTR_HE_BSS_COLOR]) {
- err = nl80211_parse_he_bss_color(
- info->attrs[NL80211_ATTR_HE_BSS_COLOR],
- &params->he_bss_color);
- if (err)
- goto out;
- }
-
if (info->attrs[NL80211_ATTR_FILS_DISCOVERY]) {
err = nl80211_parse_fils_discovery(rdev,
info->attrs[NL80211_ATTR_FILS_DISCOVERY],
@@ -10387,6 +10388,9 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_HE]))
req.flags |= ASSOC_REQ_DISABLE_HE;
+ if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_EHT]))
+ req.flags |= ASSOC_REQ_DISABLE_EHT;
+
if (info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK])
memcpy(&req.vht_capa_mask,
nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK]),
@@ -11175,6 +11179,9 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_HE]))
connect.flags |= ASSOC_REQ_DISABLE_HE;
+ if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_EHT]))
+ connect.flags |= ASSOC_REQ_DISABLE_EHT;
+
if (info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK])
memcpy(&connect.vht_capa_mask,
nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK]),
@@ -15301,23 +15308,79 @@ static int nl80211_set_fils_aad(struct sk_buff *skb,
#define NL80211_FLAG_CLEAR_SKB 0x20
#define NL80211_FLAG_NO_WIPHY_MTX 0x40
+#define INTERNAL_FLAG_SELECTORS(__sel) \
+ SELECTOR(__sel, NONE, 0) /* must be first */ \
+ SELECTOR(__sel, WIPHY, \
+ NL80211_FLAG_NEED_WIPHY) \
+ SELECTOR(__sel, WDEV, \
+ NL80211_FLAG_NEED_WDEV) \
+ SELECTOR(__sel, NETDEV, \
+ NL80211_FLAG_NEED_NETDEV) \
+ SELECTOR(__sel, WIPHY_RTNL, \
+ NL80211_FLAG_NEED_WIPHY | \
+ NL80211_FLAG_NEED_RTNL) \
+ SELECTOR(__sel, WIPHY_RTNL_NOMTX, \
+ NL80211_FLAG_NEED_WIPHY | \
+ NL80211_FLAG_NEED_RTNL | \
+ NL80211_FLAG_NO_WIPHY_MTX) \
+ SELECTOR(__sel, WDEV_RTNL, \
+ NL80211_FLAG_NEED_WDEV | \
+ NL80211_FLAG_NEED_RTNL) \
+ SELECTOR(__sel, NETDEV_RTNL, \
+ NL80211_FLAG_NEED_NETDEV | \
+ NL80211_FLAG_NEED_RTNL) \
+ SELECTOR(__sel, NETDEV_UP, \
+ NL80211_FLAG_NEED_NETDEV_UP) \
+ SELECTOR(__sel, NETDEV_UP_NOTMX, \
+ NL80211_FLAG_NEED_NETDEV_UP | \
+ NL80211_FLAG_NO_WIPHY_MTX) \
+ SELECTOR(__sel, NETDEV_UP_CLEAR, \
+ NL80211_FLAG_NEED_NETDEV_UP | \
+ NL80211_FLAG_CLEAR_SKB) \
+ SELECTOR(__sel, WDEV_UP, \
+ NL80211_FLAG_NEED_WDEV_UP) \
+ SELECTOR(__sel, WDEV_UP_RTNL, \
+ NL80211_FLAG_NEED_WDEV_UP | \
+ NL80211_FLAG_NEED_RTNL) \
+ SELECTOR(__sel, WIPHY_CLEAR, \
+ NL80211_FLAG_NEED_WIPHY | \
+ NL80211_FLAG_CLEAR_SKB)
+
+enum nl80211_internal_flags_selector {
+#define SELECTOR(_, name, value) NL80211_IFL_SEL_##name,
+ INTERNAL_FLAG_SELECTORS(_)
+#undef SELECTOR
+};
+
+static u32 nl80211_internal_flags[] = {
+#define SELECTOR(_, name, value) [NL80211_IFL_SEL_##name] = value,
+ INTERNAL_FLAG_SELECTORS(_)
+#undef SELECTOR
+};
+
static int nl80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
struct genl_info *info)
{
struct cfg80211_registered_device *rdev = NULL;
struct wireless_dev *wdev;
struct net_device *dev;
+ u32 internal_flags;
+
+ if (WARN_ON(ops->internal_flags >= ARRAY_SIZE(nl80211_internal_flags)))
+ return -EINVAL;
+
+ internal_flags = nl80211_internal_flags[ops->internal_flags];
rtnl_lock();
- if (ops->internal_flags & NL80211_FLAG_NEED_WIPHY) {
+ if (internal_flags & NL80211_FLAG_NEED_WIPHY) {
rdev = cfg80211_get_dev_from_info(genl_info_net(info), info);
if (IS_ERR(rdev)) {
rtnl_unlock();
return PTR_ERR(rdev);
}
info->user_ptr[0] = rdev;
- } else if (ops->internal_flags & NL80211_FLAG_NEED_NETDEV ||
- ops->internal_flags & NL80211_FLAG_NEED_WDEV) {
+ } else if (internal_flags & NL80211_FLAG_NEED_NETDEV ||
+ internal_flags & NL80211_FLAG_NEED_WDEV) {
wdev = __cfg80211_wdev_from_attrs(NULL, genl_info_net(info),
info->attrs);
if (IS_ERR(wdev)) {
@@ -15328,7 +15391,7 @@ static int nl80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
dev = wdev->netdev;
rdev = wiphy_to_rdev(wdev->wiphy);
- if (ops->internal_flags & NL80211_FLAG_NEED_NETDEV) {
+ if (internal_flags & NL80211_FLAG_NEED_NETDEV) {
if (!dev) {
rtnl_unlock();
return -EINVAL;
@@ -15339,7 +15402,7 @@ static int nl80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
info->user_ptr[1] = wdev;
}
- if (ops->internal_flags & NL80211_FLAG_CHECK_NETDEV_UP &&
+ if (internal_flags & NL80211_FLAG_CHECK_NETDEV_UP &&
!wdev_running(wdev)) {
rtnl_unlock();
return -ENETDOWN;
@@ -15349,12 +15412,12 @@ static int nl80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
info->user_ptr[0] = rdev;
}
- if (rdev && !(ops->internal_flags & NL80211_FLAG_NO_WIPHY_MTX)) {
+ if (rdev && !(internal_flags & NL80211_FLAG_NO_WIPHY_MTX)) {
wiphy_lock(&rdev->wiphy);
/* we keep the mutex locked until post_doit */
__release(&rdev->wiphy.mtx);
}
- if (!(ops->internal_flags & NL80211_FLAG_NEED_RTNL))
+ if (!(internal_flags & NL80211_FLAG_NEED_RTNL))
rtnl_unlock();
return 0;
@@ -15363,8 +15426,10 @@ static int nl80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
static void nl80211_post_doit(const struct genl_ops *ops, struct sk_buff *skb,
struct genl_info *info)
{
+ u32 internal_flags = nl80211_internal_flags[ops->internal_flags];
+
if (info->user_ptr[1]) {
- if (ops->internal_flags & NL80211_FLAG_NEED_WDEV) {
+ if (internal_flags & NL80211_FLAG_NEED_WDEV) {
struct wireless_dev *wdev = info->user_ptr[1];
dev_put(wdev->netdev);
@@ -15374,7 +15439,7 @@ static void nl80211_post_doit(const struct genl_ops *ops, struct sk_buff *skb,
}
if (info->user_ptr[0] &&
- !(ops->internal_flags & NL80211_FLAG_NO_WIPHY_MTX)) {
+ !(internal_flags & NL80211_FLAG_NO_WIPHY_MTX)) {
struct cfg80211_registered_device *rdev = info->user_ptr[0];
/* we kept the mutex locked since pre_doit */
@@ -15382,7 +15447,7 @@ static void nl80211_post_doit(const struct genl_ops *ops, struct sk_buff *skb,
wiphy_unlock(&rdev->wiphy);
}
- if (ops->internal_flags & NL80211_FLAG_NEED_RTNL)
+ if (internal_flags & NL80211_FLAG_NEED_RTNL)
rtnl_unlock();
/* If needed, clear the netlink message payload from the SKB
@@ -15390,7 +15455,7 @@ static void nl80211_post_doit(const struct genl_ops *ops, struct sk_buff *skb,
* the heap after the SKB is freed. The netlink message header
* is still needed for further processing, so leave it intact.
*/
- if (ops->internal_flags & NL80211_FLAG_CLEAR_SKB) {
+ if (internal_flags & NL80211_FLAG_CLEAR_SKB) {
struct nlmsghdr *nlh = nlmsg_hdr(skb);
memset(nlmsg_data(nlh), 0, nlmsg_len(nlh));
@@ -15500,6 +15565,11 @@ error:
return err;
}
+#define SELECTOR(__sel, name, value) \
+ ((__sel) == (value)) ? NL80211_IFL_SEL_##name :
+int __missing_selector(void);
+#define IFLAGS(__val) INTERNAL_FLAG_SELECTORS(__val) __missing_selector()
+
static const struct genl_ops nl80211_ops[] = {
{
.cmd = NL80211_CMD_GET_WIPHY,
@@ -15508,7 +15578,7 @@ static const struct genl_ops nl80211_ops[] = {
.dumpit = nl80211_dump_wiphy,
.done = nl80211_dump_wiphy_done,
/* can be retrieved by unprivileged users */
- .internal_flags = NL80211_FLAG_NEED_WIPHY,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_WIPHY),
},
};
@@ -15525,112 +15595,113 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.doit = nl80211_get_interface,
.dumpit = nl80211_dump_interface,
/* can be retrieved by unprivileged users */
- .internal_flags = NL80211_FLAG_NEED_WDEV,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV),
},
{
.cmd = NL80211_CMD_SET_INTERFACE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_set_interface,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV |
- NL80211_FLAG_NEED_RTNL,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV |
+ NL80211_FLAG_NEED_RTNL),
},
{
.cmd = NL80211_CMD_NEW_INTERFACE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_new_interface,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_WIPHY |
- NL80211_FLAG_NEED_RTNL |
- /* we take the wiphy mutex later ourselves */
- NL80211_FLAG_NO_WIPHY_MTX,
+ .internal_flags =
+ IFLAGS(NL80211_FLAG_NEED_WIPHY |
+ NL80211_FLAG_NEED_RTNL |
+ /* we take the wiphy mutex later ourselves */
+ NL80211_FLAG_NO_WIPHY_MTX),
},
{
.cmd = NL80211_CMD_DEL_INTERFACE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_del_interface,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_WDEV |
- NL80211_FLAG_NEED_RTNL,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV |
+ NL80211_FLAG_NEED_RTNL),
},
{
.cmd = NL80211_CMD_GET_KEY,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_get_key,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_SET_KEY,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_set_key,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
- NL80211_FLAG_CLEAR_SKB,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_CLEAR_SKB),
},
{
.cmd = NL80211_CMD_NEW_KEY,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_new_key,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
- NL80211_FLAG_CLEAR_SKB,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_CLEAR_SKB),
},
{
.cmd = NL80211_CMD_DEL_KEY,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_del_key,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_SET_BEACON,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM,
.doit = nl80211_set_beacon,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_START_AP,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM,
.doit = nl80211_start_ap,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_STOP_AP,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM,
.doit = nl80211_stop_ap,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_GET_STATION,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_get_station,
.dumpit = nl80211_dump_station,
- .internal_flags = NL80211_FLAG_NEED_NETDEV,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV),
},
{
.cmd = NL80211_CMD_SET_STATION,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_set_station,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_NEW_STATION,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_new_station,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_DEL_STATION,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_del_station,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_GET_MPATH,
@@ -15638,7 +15709,7 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.doit = nl80211_get_mpath,
.dumpit = nl80211_dump_mpath,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_GET_MPP,
@@ -15646,42 +15717,41 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.doit = nl80211_get_mpp,
.dumpit = nl80211_dump_mpp,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_SET_MPATH,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_set_mpath,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_NEW_MPATH,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_new_mpath,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_DEL_MPATH,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_del_mpath,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_SET_BSS,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_set_bss,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_GET_REG,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_get_reg_do,
.dumpit = nl80211_get_reg_dump,
- .internal_flags = 0,
/* can be retrieved by unprivileged users */
},
#ifdef CONFIG_CFG80211_CRDA_SUPPORT
@@ -15690,7 +15760,6 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_set_reg,
.flags = GENL_ADMIN_PERM,
- .internal_flags = 0,
},
#endif
{
@@ -15710,28 +15779,28 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_get_mesh_config,
/* can be retrieved by unprivileged users */
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_SET_MESH_CONFIG,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_update_mesh_config,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_TRIGGER_SCAN,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_trigger_scan,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_WDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV_UP),
},
{
.cmd = NL80211_CMD_ABORT_SCAN,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_abort_scan,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_WDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV_UP),
},
{
.cmd = NL80211_CMD_GET_SCAN,
@@ -15743,60 +15812,58 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_start_sched_scan,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_STOP_SCHED_SCAN,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_stop_sched_scan,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_AUTHENTICATE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_authenticate,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
- 0 |
- NL80211_FLAG_CLEAR_SKB,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_CLEAR_SKB),
},
{
.cmd = NL80211_CMD_ASSOCIATE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_associate,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
- 0 |
- NL80211_FLAG_CLEAR_SKB,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_CLEAR_SKB),
},
{
.cmd = NL80211_CMD_DEAUTHENTICATE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_deauthenticate,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_DISASSOCIATE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_disassociate,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_JOIN_IBSS,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_join_ibss,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_LEAVE_IBSS,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_leave_ibss,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
#ifdef CONFIG_NL80211_TESTMODE
{
@@ -15805,7 +15872,7 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.doit = nl80211_testmode_do,
.dumpit = nl80211_testmode_dump,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_WIPHY,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_WIPHY),
},
#endif
{
@@ -15813,34 +15880,32 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_connect,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
- 0 |
- NL80211_FLAG_CLEAR_SKB,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_CLEAR_SKB),
},
{
.cmd = NL80211_CMD_UPDATE_CONNECT_PARAMS,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_update_connect_params,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
- 0 |
- NL80211_FLAG_CLEAR_SKB,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_CLEAR_SKB),
},
{
.cmd = NL80211_CMD_DISCONNECT,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_disconnect,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_SET_WIPHY_NETNS,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_wiphy_netns,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_WIPHY |
- NL80211_FLAG_NEED_RTNL |
- NL80211_FLAG_NO_WIPHY_MTX,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_WIPHY |
+ NL80211_FLAG_NEED_RTNL |
+ NL80211_FLAG_NO_WIPHY_MTX),
},
{
.cmd = NL80211_CMD_GET_SURVEY,
@@ -15852,121 +15917,120 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_setdel_pmksa,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
- 0 |
- NL80211_FLAG_CLEAR_SKB,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_CLEAR_SKB),
},
{
.cmd = NL80211_CMD_DEL_PMKSA,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_setdel_pmksa,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_FLUSH_PMKSA,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_flush_pmksa,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_REMAIN_ON_CHANNEL,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_remain_on_channel,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_WDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV_UP),
},
{
.cmd = NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_cancel_remain_on_channel,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_WDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV_UP),
},
{
.cmd = NL80211_CMD_SET_TX_BITRATE_MASK,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_set_tx_bitrate_mask,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV),
},
{
.cmd = NL80211_CMD_REGISTER_FRAME,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_register_mgmt,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_WDEV,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV),
},
{
.cmd = NL80211_CMD_FRAME,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_tx_mgmt,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_WDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV_UP),
},
{
.cmd = NL80211_CMD_FRAME_WAIT_CANCEL,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_tx_mgmt_cancel_wait,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_WDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV_UP),
},
{
.cmd = NL80211_CMD_SET_POWER_SAVE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_set_power_save,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV),
},
{
.cmd = NL80211_CMD_GET_POWER_SAVE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_get_power_save,
/* can be retrieved by unprivileged users */
- .internal_flags = NL80211_FLAG_NEED_NETDEV,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV),
},
{
.cmd = NL80211_CMD_SET_CQM,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_set_cqm,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV),
},
{
.cmd = NL80211_CMD_SET_CHANNEL,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_set_channel,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV),
},
{
.cmd = NL80211_CMD_JOIN_MESH,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_join_mesh,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_LEAVE_MESH,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_leave_mesh,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_JOIN_OCB,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_join_ocb,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_LEAVE_OCB,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_leave_ocb,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
#ifdef CONFIG_PM
{
@@ -15974,14 +16038,14 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_get_wowlan,
/* can be retrieved by unprivileged users */
- .internal_flags = NL80211_FLAG_NEED_WIPHY,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_WIPHY),
},
{
.cmd = NL80211_CMD_SET_WOWLAN,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_set_wowlan,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_WIPHY,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_WIPHY),
},
#endif
{
@@ -15989,126 +16053,125 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_set_rekey_data,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
- 0 |
- NL80211_FLAG_CLEAR_SKB,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_CLEAR_SKB),
},
{
.cmd = NL80211_CMD_TDLS_MGMT,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_tdls_mgmt,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_TDLS_OPER,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_tdls_oper,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_UNEXPECTED_FRAME,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_register_unexpected_frame,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV),
},
{
.cmd = NL80211_CMD_PROBE_CLIENT,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_probe_client,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_REGISTER_BEACONS,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_register_beacons,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_WIPHY,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_WIPHY),
},
{
.cmd = NL80211_CMD_SET_NOACK_MAP,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_set_noack_map,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV),
},
{
.cmd = NL80211_CMD_START_P2P_DEVICE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_start_p2p_device,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_WDEV |
- NL80211_FLAG_NEED_RTNL,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV |
+ NL80211_FLAG_NEED_RTNL),
},
{
.cmd = NL80211_CMD_STOP_P2P_DEVICE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_stop_p2p_device,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
- NL80211_FLAG_NEED_RTNL,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV_UP |
+ NL80211_FLAG_NEED_RTNL),
},
{
.cmd = NL80211_CMD_START_NAN,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_start_nan,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_WDEV |
- NL80211_FLAG_NEED_RTNL,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV |
+ NL80211_FLAG_NEED_RTNL),
},
{
.cmd = NL80211_CMD_STOP_NAN,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_stop_nan,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
- NL80211_FLAG_NEED_RTNL,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV_UP |
+ NL80211_FLAG_NEED_RTNL),
},
{
.cmd = NL80211_CMD_ADD_NAN_FUNCTION,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_nan_add_func,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_WDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV_UP),
},
{
.cmd = NL80211_CMD_DEL_NAN_FUNCTION,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_nan_del_func,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_WDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV_UP),
},
{
.cmd = NL80211_CMD_CHANGE_NAN_CONFIG,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_nan_change_config,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_WDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV_UP),
},
{
.cmd = NL80211_CMD_SET_MCAST_RATE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_set_mcast_rate,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV),
},
{
.cmd = NL80211_CMD_SET_MAC_ACL,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_set_mac_acl,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV),
},
{
.cmd = NL80211_CMD_RADAR_DETECT,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_start_radar_detection,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
- NL80211_FLAG_NO_WIPHY_MTX,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NO_WIPHY_MTX),
},
{
.cmd = NL80211_CMD_GET_PROTOCOL_FEATURES,
@@ -16120,41 +16183,41 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_update_ft_ies,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_CRIT_PROTOCOL_START,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_crit_protocol_start,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_WDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV_UP),
},
{
.cmd = NL80211_CMD_CRIT_PROTOCOL_STOP,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_crit_protocol_stop,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_WDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV_UP),
},
{
.cmd = NL80211_CMD_GET_COALESCE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_get_coalesce,
- .internal_flags = NL80211_FLAG_NEED_WIPHY,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_WIPHY),
},
{
.cmd = NL80211_CMD_SET_COALESCE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_set_coalesce,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_WIPHY,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_WIPHY),
},
{
.cmd = NL80211_CMD_CHANNEL_SWITCH,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_channel_switch,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_VENDOR,
@@ -16162,140 +16225,137 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.doit = nl80211_vendor_cmd,
.dumpit = nl80211_vendor_cmd_dump,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_WIPHY |
- 0 |
- NL80211_FLAG_CLEAR_SKB,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_WIPHY |
+ NL80211_FLAG_CLEAR_SKB),
},
{
.cmd = NL80211_CMD_SET_QOS_MAP,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_set_qos_map,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_ADD_TX_TS,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_add_tx_ts,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_DEL_TX_TS,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_del_tx_ts,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_TDLS_CHANNEL_SWITCH,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_tdls_channel_switch,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_TDLS_CANCEL_CHANNEL_SWITCH,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_tdls_cancel_channel_switch,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_SET_MULTICAST_TO_UNICAST,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_set_multicast_to_unicast,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV),
},
{
.cmd = NL80211_CMD_SET_PMK,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_set_pmk,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
- 0 |
- NL80211_FLAG_CLEAR_SKB,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_CLEAR_SKB),
},
{
.cmd = NL80211_CMD_DEL_PMK,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_del_pmk,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_EXTERNAL_AUTH,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_external_auth,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_CONTROL_PORT_FRAME,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_tx_control_port,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_GET_FTM_RESPONDER_STATS,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_get_ftm_responder_stats,
- .internal_flags = NL80211_FLAG_NEED_NETDEV,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV),
},
{
.cmd = NL80211_CMD_PEER_MEASUREMENT_START,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_pmsr_start,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_WDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV_UP),
},
{
.cmd = NL80211_CMD_NOTIFY_RADAR,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_notify_radar_detection,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_UPDATE_OWE_INFO,
.doit = nl80211_update_owe_info,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_PROBE_MESH_LINK,
.doit = nl80211_probe_mesh_link,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_SET_TID_CONFIG,
.doit = nl80211_set_tid_config,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV),
},
{
.cmd = NL80211_CMD_SET_SAR_SPECS,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_set_sar_specs,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_WIPHY |
- NL80211_FLAG_NEED_RTNL,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_WIPHY |
+ NL80211_FLAG_NEED_RTNL),
},
{
.cmd = NL80211_CMD_COLOR_CHANGE_REQUEST,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_color_change,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
- NL80211_FLAG_NEED_RTNL,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
.cmd = NL80211_CMD_SET_FILS_AAD,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_set_fils_aad,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
};
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index c76cd973f06e..58e83ce642ad 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -807,6 +807,8 @@ static int __init load_builtin_regdb_keys(void)
return 0;
}
+MODULE_FIRMWARE("regulatory.db.p7s");
+
static bool regdb_has_valid_signature(const u8 *data, unsigned int size)
{
const struct firmware *sig;
@@ -1078,6 +1080,8 @@ static void regdb_fw_cb(const struct firmware *fw, void *context)
release_firmware(fw);
}
+MODULE_FIRMWARE("regulatory.db");
+
static int query_regdb_file(const char *alpha2)
{
ASSERT_RTNL();
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index 36aa01d92b65..35c7e89b2e7d 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -117,7 +117,7 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
sp = skb_sec_path(skb);
x = sp->xvec[sp->len - 1];
- if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
+ if (xo->flags & XFRM_GRO || x->xso.dir == XFRM_DEV_OFFLOAD_IN)
return skb;
/* This skb was already validated on the upper/virtual dev */
@@ -212,7 +212,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
int err;
struct dst_entry *dst;
struct net_device *dev;
- struct xfrm_state_offload *xso = &x->xso;
+ struct xfrm_dev_offload *xso = &x->xso;
xfrm_address_t *saddr;
xfrm_address_t *daddr;
@@ -264,15 +264,16 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
xso->dev = dev;
netdev_tracker_alloc(dev, &xso->dev_tracker, GFP_ATOMIC);
xso->real_dev = dev;
- xso->num_exthdrs = 1;
- /* Don't forward bit that is not implemented */
- xso->flags = xuo->flags & ~XFRM_OFFLOAD_IPV6;
+
+ if (xuo->flags & XFRM_OFFLOAD_INBOUND)
+ xso->dir = XFRM_DEV_OFFLOAD_IN;
+ else
+ xso->dir = XFRM_DEV_OFFLOAD_OUT;
err = dev->xfrmdev_ops->xdo_dev_state_add(x);
if (err) {
- xso->num_exthdrs = 0;
- xso->flags = 0;
xso->dev = NULL;
+ xso->dir = 0;
xso->real_dev = NULL;
dev_put_track(dev, &xso->dev_tracker);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 00bd0ecff5a1..f1876ea61fdc 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -3744,7 +3744,7 @@ static int stale_bundle(struct dst_entry *dst)
void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
{
while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) {
- dst->dev = dev_net(dev)->loopback_dev;
+ dst->dev = blackhole_netdev;
dev_hold(dst->dev);
dev_put(dev);
}
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index b749935152ba..08564e0eef20 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -751,7 +751,7 @@ xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool
for (i = 0; i <= net->xfrm.state_hmask; i++) {
struct xfrm_state *x;
- struct xfrm_state_offload *xso;
+ struct xfrm_dev_offload *xso;
hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
xso = &x->xso;
@@ -835,7 +835,7 @@ int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_vali
err = -ESRCH;
for (i = 0; i <= net->xfrm.state_hmask; i++) {
struct xfrm_state *x;
- struct xfrm_state_offload *xso;
+ struct xfrm_dev_offload *xso;
restart:
hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
xso = &x->xso;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 64fa8fdd6bbd..6a58fec6a1fb 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -840,7 +840,7 @@ static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb)
return 0;
}
-static int copy_user_offload(struct xfrm_state_offload *xso, struct sk_buff *skb)
+static int copy_user_offload(struct xfrm_dev_offload *xso, struct sk_buff *skb)
{
struct xfrm_user_offload *xuo;
struct nlattr *attr;
@@ -852,7 +852,8 @@ static int copy_user_offload(struct xfrm_state_offload *xso, struct sk_buff *skb
xuo = nla_data(attr);
memset(xuo, 0, sizeof(*xuo));
xuo->ifindex = xso->dev->ifindex;
- xuo->flags = xso->flags;
+ if (xso->dir == XFRM_DEV_OFFLOAD_IN)
+ xuo->flags = XFRM_OFFLOAD_INBOUND;
return 0;
}
diff --git a/samples/trace_events/trace_custom_sched.h b/samples/trace_events/trace_custom_sched.h
index 9fdd8e7c2a45..951388334a3f 100644
--- a/samples/trace_events/trace_custom_sched.h
+++ b/samples/trace_events/trace_custom_sched.h
@@ -25,11 +25,11 @@ TRACE_CUSTOM_EVENT(sched_switch,
* that the custom event is using.
*/
TP_PROTO(bool preempt,
- unsigned int prev_state,
struct task_struct *prev,
- struct task_struct *next),
+ struct task_struct *next,
+ unsigned int prev_state),
- TP_ARGS(preempt, prev_state, prev, next),
+ TP_ARGS(preempt, prev, next, prev_state),
/*
* The next fields are where the customization happens.
diff --git a/security/selinux/ss/hashtab.c b/security/selinux/ss/hashtab.c
index 0ae4e4e57a40..3fb8f9026e9b 100644
--- a/security/selinux/ss/hashtab.c
+++ b/security/selinux/ss/hashtab.c
@@ -179,7 +179,8 @@ int hashtab_duplicate(struct hashtab *new, struct hashtab *orig,
kmem_cache_free(hashtab_node_cachep, cur);
}
}
- kmem_cache_free(hashtab_node_cachep, new);
+ kfree(new->htable);
+ memset(new, 0, sizeof(*new));
return -ENOMEM;
}
diff --git a/sound/isa/wavefront/wavefront_synth.c b/sound/isa/wavefront/wavefront_synth.c
index 69cbc79fbb71..2aaaa6807174 100644
--- a/sound/isa/wavefront/wavefront_synth.c
+++ b/sound/isa/wavefront/wavefront_synth.c
@@ -1094,7 +1094,8 @@ wavefront_send_sample (snd_wavefront_t *dev,
if (dataptr < data_end) {
- __get_user (sample_short, dataptr);
+ if (get_user(sample_short, dataptr))
+ return -EFAULT;
dataptr += skip;
if (data_is_unsigned) { /* GUS ? */
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index cf531c1efa13..ad292df7d805 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -937,6 +937,9 @@ static int alc_init(struct hda_codec *codec)
return 0;
}
+#define alc_free snd_hda_gen_free
+
+#ifdef CONFIG_PM
static inline void alc_shutup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
@@ -950,9 +953,6 @@ static inline void alc_shutup(struct hda_codec *codec)
alc_shutup_pins(codec);
}
-#define alc_free snd_hda_gen_free
-
-#ifdef CONFIG_PM
static void alc_power_eapd(struct hda_codec *codec)
{
alc_auto_setup_eapd(codec, false);
@@ -966,9 +966,7 @@ static int alc_suspend(struct hda_codec *codec)
spec->power_hook(codec);
return 0;
}
-#endif
-#ifdef CONFIG_PM
static int alc_resume(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
@@ -6780,6 +6778,41 @@ static void alc256_fixup_mic_no_presence_and_resume(struct hda_codec *codec,
}
}
+static void alc_fixup_dell4_mic_no_presence_quiet(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
+{
+ struct alc_spec *spec = codec->spec;
+ struct hda_input_mux *imux = &spec->gen.input_mux;
+ int i;
+
+ alc269_fixup_limit_int_mic_boost(codec, fix, action);
+
+ switch (action) {
+ case HDA_FIXUP_ACT_PRE_PROBE:
+ /**
+ * Set the vref of pin 0x19 (Headset Mic) and pin 0x1b (Headphone Mic)
+ * to Hi-Z to avoid pop noises at startup and when plugging and
+ * unplugging headphones.
+ */
+ snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ);
+ snd_hda_codec_set_pin_target(codec, 0x1b, PIN_VREFHIZ);
+ break;
+ case HDA_FIXUP_ACT_PROBE:
+ /**
+ * Make the internal mic (0x12) the default input source to
+ * prevent pop noises on cold boot.
+ */
+ for (i = 0; i < imux->num_items; i++) {
+ if (spec->gen.imux_pins[i] == 0x12) {
+ spec->gen.cur_mux[0] = i;
+ break;
+ }
+ }
+ break;
+ }
+}
+
enum {
ALC269_FIXUP_GPIO2,
ALC269_FIXUP_SONY_VAIO,
@@ -6821,6 +6854,7 @@ enum {
ALC269_FIXUP_DELL2_MIC_NO_PRESENCE,
ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
+ ALC269_FIXUP_DELL4_MIC_NO_PRESENCE_QUIET,
ALC269_FIXUP_HEADSET_MODE,
ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC,
ALC269_FIXUP_ASPIRE_HEADSET_MIC,
@@ -7012,6 +7046,7 @@ enum {
ALC245_FIXUP_CS35L41_SPI_4,
ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED,
ALC285_FIXUP_HP_SPEAKERS_MICMUTE_LED,
+ ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -8808,6 +8843,21 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC285_FIXUP_HP_MUTE_LED,
},
+ [ALC269_FIXUP_DELL4_MIC_NO_PRESENCE_QUIET] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_dell4_mic_no_presence_quiet,
+ .chained = true,
+ .chain_id = ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
+ },
+ [ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x19, 0x02a1112c }, /* use as headset mic, without its own jack detect */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -8898,6 +8948,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x09bf, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0a2e, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
SND_PCI_QUIRK(0x1028, 0x0a30, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1028, 0x0a38, "Dell Latitude 7520", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE_QUIET),
SND_PCI_QUIRK(0x1028, 0x0a58, "Dell", ALC255_FIXUP_DELL_HEADSET_MIC),
SND_PCI_QUIRK(0x1028, 0x0a61, "Dell XPS 15 9510", ALC289_FIXUP_DUAL_SPK),
SND_PCI_QUIRK(0x1028, 0x0a62, "Dell Precision 5560", ALC289_FIXUP_DUAL_SPK),
@@ -9040,6 +9091,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8995, "HP EliteBook 855 G9", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x89a4, "HP ProBook 440 G9", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x89a6, "HP ProBook 450 G9", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x89aa, "HP EliteBook 630 G9", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x89ac, "HP EliteBook 640 G9", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x89ae, "HP EliteBook 650 G9", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x89c3, "Zbook Studio G9", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
@@ -9290,6 +9342,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_SET_COEF_DEFAULTS),
+ SND_PCI_QUIRK(0x1d05, 0x1096, "TongFang GMxMRxx", ALC269_FIXUP_NO_SHUTUP),
+ SND_PCI_QUIRK(0x1d05, 0x1100, "TongFang GKxNRxx", ALC269_FIXUP_NO_SHUTUP),
+ SND_PCI_QUIRK(0x1d05, 0x1111, "TongFang GMxZGxx", ALC269_FIXUP_NO_SHUTUP),
+ SND_PCI_QUIRK(0x1d05, 0x1119, "TongFang GMxZGxx", ALC269_FIXUP_NO_SHUTUP),
+ SND_PCI_QUIRK(0x1d05, 0x1129, "TongFang GMxZGxx", ALC269_FIXUP_NO_SHUTUP),
+ SND_PCI_QUIRK(0x1d05, 0x1147, "TongFang GMxTGxx", ALC269_FIXUP_NO_SHUTUP),
+ SND_PCI_QUIRK(0x1d05, 0x115c, "TongFang GMxTGxx", ALC269_FIXUP_NO_SHUTUP),
+ SND_PCI_QUIRK(0x1d05, 0x121b, "TongFang GMxAGxx", ALC269_FIXUP_NO_SHUTUP),
SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
@@ -9297,6 +9357,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC),
SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
+ SND_PCI_QUIRK(0xf111, 0x0001, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
#if 0
/* Below is a quirk table taken from the old code.
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 0ea39565e623..40a5e3eb4ef2 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -3235,6 +3235,15 @@ YAMAHA_DEVICE(0x7010, "UB99"),
}
},
+/* Rane SL-1 */
+{
+ USB_DEVICE(0x13e5, 0x0001),
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
+ }
+},
+
/* disabled due to regression for other devices;
* see https://bugzilla.kernel.org/show_bug.cgi?id=199905
*/
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index ab9f3da49941..fbbe59054c3f 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1822,6 +1822,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
QUIRK_FLAG_IGNORE_CTL_ERROR),
DEVICE_FLG(0x06f8, 0xd002, /* Hercules DJ Console (Macintosh Edition) */
QUIRK_FLAG_IGNORE_CTL_ERROR),
+ DEVICE_FLG(0x0711, 0x5800, /* MCT Trigger 5 USB-to-HDMI */
+ QUIRK_FLAG_GET_SAMPLE_RATE),
DEVICE_FLG(0x074d, 0x3553, /* Outlaw RR2150 (Micronas UAC3553B) */
QUIRK_FLAG_GET_SAMPLE_RATE),
DEVICE_FLG(0x08bb, 0x2702, /* LineX FM Transmitter */
diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h
index e1ba2d51b717..b339bf2196ca 100644
--- a/tools/include/uapi/linux/if_link.h
+++ b/tools/include/uapi/linux/if_link.h
@@ -348,6 +348,8 @@ enum {
IFLA_PARENT_DEV_NAME,
IFLA_PARENT_DEV_BUS_NAME,
IFLA_GRO_MAX_SIZE,
+ IFLA_TSO_MAX_SIZE,
+ IFLA_TSO_MAX_SEGS,
__IFLA_MAX
};
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index 91a6fe4e02c0..6a184d260c7f 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -445,7 +445,13 @@ struct kvm_run {
#define KVM_SYSTEM_EVENT_RESET 2
#define KVM_SYSTEM_EVENT_CRASH 3
__u32 type;
- __u64 flags;
+ __u32 ndata;
+ union {
+#ifndef __KERNEL__
+ __u64 flags;
+#endif
+ __u64 data[16];
+ };
} system_event;
/* KVM_EXIT_S390_STSI */
struct {
@@ -1144,6 +1150,8 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_S390_MEM_OP_EXTENSION 211
#define KVM_CAP_PMU_CAPABILITY 212
#define KVM_CAP_DISABLE_QUIRKS2 213
+/* #define KVM_CAP_VM_TSC_CONTROL 214 */
+#define KVM_CAP_SYSTEM_EVENT_DATA 215
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index 44e1f8a44087..d5289fa58a4f 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -311,6 +311,7 @@ err_out:
/* BUG_ON due to failure in allocation of orig_mask/mask */
BUG_ON(-1);
+ return NULL;
}
static cpu_set_t *bind_to_node(int target_node)
@@ -364,6 +365,7 @@ err_out:
/* BUG_ON due to failure in allocation of orig_mask/mask */
BUG_ON(-1);
+ return NULL;
}
static void bind_to_cpumask(cpu_set_t *mask)
diff --git a/tools/perf/tests/shell/test_arm_coresight.sh b/tools/perf/tests/shell/test_arm_coresight.sh
index 6de53b7ef5ff..e4cb4f1806ff 100755
--- a/tools/perf/tests/shell/test_arm_coresight.sh
+++ b/tools/perf/tests/shell/test_arm_coresight.sh
@@ -29,7 +29,6 @@ cleanup_files()
rm -f ${file}
rm -f "${perfdata}.old"
trap - exit term int
- kill -2 $$
exit $glb_err
}
diff --git a/tools/testing/selftests/drivers/net/netdevsim/hw_stats_l3.sh b/tools/testing/selftests/drivers/net/netdevsim/hw_stats_l3.sh
index fe1898402987..cba5ac08426b 100755
--- a/tools/testing/selftests/drivers/net/netdevsim/hw_stats_l3.sh
+++ b/tools/testing/selftests/drivers/net/netdevsim/hw_stats_l3.sh
@@ -319,11 +319,11 @@ counter_test()
((pkts < 10))
check_err $? "$type stats show >= 10 packets after first enablement"
- sleep 2
+ sleep 2.5
local pkts=$(get_hwstat dummy1 l3 rx.packets)
((pkts >= 20))
- check_err $? "$type stats show < 20 packets after 2s passed"
+ check_err $? "$type stats show < 20 packets after 2.5s passed"
$IP stats set dev dummy1 ${type}_stats off
diff --git a/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh b/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
index 4401a654c2c0..9c79bbcce5a8 100755
--- a/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
+++ b/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
@@ -4,35 +4,17 @@
WAIT_TIME=1
NUM_NETIFS=4
+STABLE_MAC_ADDRS=yes
lib_dir=$(dirname $0)/../../../net/forwarding
source $lib_dir/tc_common.sh
source $lib_dir/lib.sh
require_command tcpdump
-#
-# +---------------------------------------------+
-# | DUT ports Generator ports |
-# | +--------+ +--------+ +--------+ +--------+ |
-# | | | | | | | | | |
-# | | eth0 | | eth1 | | eth2 | | eth3 | |
-# | | | | | | | | | |
-# +-+--------+-+--------+-+--------+-+--------+-+
-# | | | |
-# | | | |
-# | +-----------+ |
-# | |
-# +--------------------------------+
-
-eth0=${NETIFS[p1]}
-eth1=${NETIFS[p2]}
-eth2=${NETIFS[p3]}
-eth3=${NETIFS[p4]}
-
-eth0_mac="de:ad:be:ef:00:00"
-eth1_mac="de:ad:be:ef:00:01"
-eth2_mac="de:ad:be:ef:00:02"
-eth3_mac="de:ad:be:ef:00:03"
+h1=${NETIFS[p1]}
+swp1=${NETIFS[p2]}
+swp2=${NETIFS[p3]}
+h2=${NETIFS[p4]}
# Helpers to map a VCAP IS1 and VCAP IS2 lookup and policy to a chain number
# used by the kernel driver. The numbers are:
@@ -156,39 +138,39 @@ create_tcam_skeleton()
setup_prepare()
{
- ip link set $eth0 up
- ip link set $eth1 up
- ip link set $eth2 up
- ip link set $eth3 up
+ ip link set $swp1 up
+ ip link set $swp2 up
+ ip link set $h2 up
+ ip link set $h1 up
- create_tcam_skeleton $eth0
+ create_tcam_skeleton $swp1
ip link add br0 type bridge
- ip link set $eth0 master br0
- ip link set $eth1 master br0
+ ip link set $swp1 master br0
+ ip link set $swp2 master br0
ip link set br0 up
- ip link add link $eth3 name $eth3.100 type vlan id 100
- ip link set $eth3.100 up
+ ip link add link $h1 name $h1.100 type vlan id 100
+ ip link set $h1.100 up
- ip link add link $eth3 name $eth3.200 type vlan id 200
- ip link set $eth3.200 up
+ ip link add link $h1 name $h1.200 type vlan id 200
+ ip link set $h1.200 up
- tc filter add dev $eth0 ingress chain $(IS1 1) pref 1 \
+ tc filter add dev $swp1 ingress chain $(IS1 1) pref 1 \
protocol 802.1Q flower skip_sw vlan_id 100 \
action vlan pop \
action goto chain $(IS1 2)
- tc filter add dev $eth0 egress chain $(ES0) pref 1 \
- flower skip_sw indev $eth1 \
+ tc filter add dev $swp1 egress chain $(ES0) pref 1 \
+ flower skip_sw indev $swp2 \
action vlan push protocol 802.1Q id 100
- tc filter add dev $eth0 ingress chain $(IS1 0) pref 2 \
+ tc filter add dev $swp1 ingress chain $(IS1 0) pref 2 \
protocol ipv4 flower skip_sw src_ip 10.1.1.2 \
action skbedit priority 7 \
action goto chain $(IS1 1)
- tc filter add dev $eth0 ingress chain $(IS2 0 0) pref 1 \
+ tc filter add dev $swp1 ingress chain $(IS2 0 0) pref 1 \
protocol ipv4 flower skip_sw ip_proto udp dst_port 5201 \
action police rate 50mbit burst 64k conform-exceed drop/pipe \
action goto chain $(IS2 1 0)
@@ -196,150 +178,160 @@ setup_prepare()
cleanup()
{
- ip link del $eth3.200
- ip link del $eth3.100
- tc qdisc del dev $eth0 clsact
+ ip link del $h1.200
+ ip link del $h1.100
+ tc qdisc del dev $swp1 clsact
ip link del br0
}
test_vlan_pop()
{
- printf "Testing VLAN pop.. "
+ local h1_mac=$(mac_get $h1)
+ local h2_mac=$(mac_get $h2)
+
+ RET=0
- tcpdump_start $eth2
+ tcpdump_start $h2
# Work around Mausezahn VLAN builder bug
# (https://github.com/netsniff-ng/netsniff-ng/issues/225) by using
# an 8021q upper
- $MZ $eth3.100 -q -c 1 -p 64 -a $eth3_mac -b $eth2_mac -t ip
+ $MZ $h1.100 -q -c 1 -p 64 -a $h1_mac -b $h2_mac -t ip
sleep 1
- tcpdump_stop $eth2
+ tcpdump_stop $h2
- if tcpdump_show $eth2 | grep -q "$eth3_mac > $eth2_mac, ethertype IPv4"; then
- echo "OK"
- else
- echo "FAIL"
- fi
+ tcpdump_show $h2 | grep -q "$h1_mac > $h2_mac, ethertype IPv4"
+ check_err "$?" "untagged reception"
+
+ tcpdump_cleanup $h2
- tcpdump_cleanup $eth2
+ log_test "VLAN pop"
}
test_vlan_push()
{
- printf "Testing VLAN push.. "
+ local h1_mac=$(mac_get $h1)
+ local h2_mac=$(mac_get $h2)
- tcpdump_start $eth3.100
+ RET=0
- $MZ $eth2 -q -c 1 -p 64 -a $eth2_mac -b $eth3_mac -t ip
+ tcpdump_start $h1.100
+
+ $MZ $h2 -q -c 1 -p 64 -a $h2_mac -b $h1_mac -t ip
sleep 1
- tcpdump_stop $eth3.100
+ tcpdump_stop $h1.100
- if tcpdump_show $eth3.100 | grep -q "$eth2_mac > $eth3_mac"; then
- echo "OK"
- else
- echo "FAIL"
- fi
+ tcpdump_show $h1.100 | grep -q "$h2_mac > $h1_mac"
+ check_err "$?" "tagged reception"
- tcpdump_cleanup $eth3.100
+ tcpdump_cleanup $h1.100
+
+ log_test "VLAN push"
}
test_vlan_ingress_modify()
{
- printf "Testing ingress VLAN modification.. "
+ local h1_mac=$(mac_get $h1)
+ local h2_mac=$(mac_get $h2)
+
+ RET=0
ip link set br0 type bridge vlan_filtering 1
- bridge vlan add dev $eth0 vid 200
- bridge vlan add dev $eth0 vid 300
- bridge vlan add dev $eth1 vid 300
+ bridge vlan add dev $swp1 vid 200
+ bridge vlan add dev $swp1 vid 300
+ bridge vlan add dev $swp2 vid 300
- tc filter add dev $eth0 ingress chain $(IS1 2) pref 3 \
+ tc filter add dev $swp1 ingress chain $(IS1 2) pref 3 \
protocol 802.1Q flower skip_sw vlan_id 200 \
action vlan modify id 300 \
action goto chain $(IS2 0 0)
- tcpdump_start $eth2
+ tcpdump_start $h2
- $MZ $eth3.200 -q -c 1 -p 64 -a $eth3_mac -b $eth2_mac -t ip
+ $MZ $h1.200 -q -c 1 -p 64 -a $h1_mac -b $h2_mac -t ip
sleep 1
- tcpdump_stop $eth2
+ tcpdump_stop $h2
- if tcpdump_show $eth2 | grep -q "$eth3_mac > $eth2_mac, .* vlan 300"; then
- echo "OK"
- else
- echo "FAIL"
- fi
+ tcpdump_show $h2 | grep -q "$h1_mac > $h2_mac, .* vlan 300"
+ check_err "$?" "tagged reception"
- tcpdump_cleanup $eth2
+ tcpdump_cleanup $h2
- tc filter del dev $eth0 ingress chain $(IS1 2) pref 3
+ tc filter del dev $swp1 ingress chain $(IS1 2) pref 3
- bridge vlan del dev $eth0 vid 200
- bridge vlan del dev $eth0 vid 300
- bridge vlan del dev $eth1 vid 300
+ bridge vlan del dev $swp1 vid 200
+ bridge vlan del dev $swp1 vid 300
+ bridge vlan del dev $swp2 vid 300
ip link set br0 type bridge vlan_filtering 0
+
+ log_test "Ingress VLAN modification"
}
test_vlan_egress_modify()
{
- printf "Testing egress VLAN modification.. "
+ local h1_mac=$(mac_get $h1)
+ local h2_mac=$(mac_get $h2)
- tc qdisc add dev $eth1 clsact
+ RET=0
+
+ tc qdisc add dev $swp2 clsact
ip link set br0 type bridge vlan_filtering 1
- bridge vlan add dev $eth0 vid 200
- bridge vlan add dev $eth1 vid 200
+ bridge vlan add dev $swp1 vid 200
+ bridge vlan add dev $swp2 vid 200
- tc filter add dev $eth1 egress chain $(ES0) pref 3 \
+ tc filter add dev $swp2 egress chain $(ES0) pref 3 \
protocol 802.1Q flower skip_sw vlan_id 200 vlan_prio 0 \
action vlan modify id 300 priority 7
- tcpdump_start $eth2
+ tcpdump_start $h2
- $MZ $eth3.200 -q -c 1 -p 64 -a $eth3_mac -b $eth2_mac -t ip
+ $MZ $h1.200 -q -c 1 -p 64 -a $h1_mac -b $h2_mac -t ip
sleep 1
- tcpdump_stop $eth2
+ tcpdump_stop $h2
- if tcpdump_show $eth2 | grep -q "$eth3_mac > $eth2_mac, .* vlan 300"; then
- echo "OK"
- else
- echo "FAIL"
- fi
+ tcpdump_show $h2 | grep -q "$h1_mac > $h2_mac, .* vlan 300"
+ check_err "$?" "tagged reception"
- tcpdump_cleanup $eth2
+ tcpdump_cleanup $h2
- tc filter del dev $eth1 egress chain $(ES0) pref 3
- tc qdisc del dev $eth1 clsact
+ tc filter del dev $swp2 egress chain $(ES0) pref 3
+ tc qdisc del dev $swp2 clsact
- bridge vlan del dev $eth0 vid 200
- bridge vlan del dev $eth1 vid 200
+ bridge vlan del dev $swp1 vid 200
+ bridge vlan del dev $swp2 vid 200
ip link set br0 type bridge vlan_filtering 0
+
+ log_test "Egress VLAN modification"
}
test_skbedit_priority()
{
+ local h1_mac=$(mac_get $h1)
+ local h2_mac=$(mac_get $h2)
local num_pkts=100
- printf "Testing frame prioritization.. "
+ before=$(ethtool_stats_get $swp1 'rx_green_prio_7')
- before=$(ethtool_stats_get $eth0 'rx_green_prio_7')
+ $MZ $h1 -q -c $num_pkts -p 64 -a $h1_mac -b $h2_mac -t ip -A 10.1.1.2
- $MZ $eth3 -q -c $num_pkts -p 64 -a $eth3_mac -b $eth2_mac -t ip -A 10.1.1.2
-
- after=$(ethtool_stats_get $eth0 'rx_green_prio_7')
+ after=$(ethtool_stats_get $swp1 'rx_green_prio_7')
if [ $((after - before)) = $num_pkts ]; then
- echo "OK"
+ RET=0
else
- echo "FAIL"
+ RET=1
fi
+
+ log_test "Frame prioritization"
}
trap cleanup EXIT
diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore
index 21a411b04890..b984f8c8d523 100644
--- a/tools/testing/selftests/net/.gitignore
+++ b/tools/testing/selftests/net/.gitignore
@@ -5,6 +5,7 @@ socket
psock_fanout
psock_snd
psock_tpacket
+stress_reuseport_listen
reuseport_addr_any
reuseport_bpf
reuseport_bpf_cpu
@@ -36,3 +37,4 @@ gro
ioam6_parser
toeplitz
cmsg_sender
+bind_bhash_test
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 7ea54af55490..464df13831f2 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -59,6 +59,7 @@ TEST_GEN_FILES += toeplitz
TEST_GEN_FILES += cmsg_sender
TEST_GEN_FILES += stress_reuseport_listen
TEST_PROGS += test_vxlan_vnifiltering.sh
+TEST_GEN_FILES += bind_bhash_test
TEST_FILES := settings
@@ -69,4 +70,5 @@ include bpf/Makefile
$(OUTPUT)/reuseport_bpf_numa: LDLIBS += -lnuma
$(OUTPUT)/tcp_mmap: LDLIBS += -lpthread
+$(OUTPUT)/bind_bhash_test: LDLIBS += -lpthread
$(OUTPUT)/tcp_inq: LDLIBS += -lpthread
diff --git a/tools/testing/selftests/net/bind_bhash_test.c b/tools/testing/selftests/net/bind_bhash_test.c
new file mode 100644
index 000000000000..252e73754e76
--- /dev/null
+++ b/tools/testing/selftests/net/bind_bhash_test.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This times how long it takes to bind to a port when the port already
+ * has multiple sockets in its bhash table.
+ *
+ * In the setup(), we populate the port's bhash table with
+ * MAX_THREADS * MAX_CONNECTIONS number of entries.
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <netdb.h>
+#include <pthread.h>
+
+#define MAX_THREADS 600
+#define MAX_CONNECTIONS 40
+
+static const char *bind_addr = "::1";
+static const char *port;
+
+static int fd_array[MAX_THREADS][MAX_CONNECTIONS];
+
+static int bind_socket(int opt, const char *addr)
+{
+ struct addrinfo *res, hint = {};
+ int sock_fd, reuse = 1, err;
+
+ sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
+ if (sock_fd < 0) {
+ perror("socket fd err");
+ return -1;
+ }
+
+ hint.ai_family = AF_INET6;
+ hint.ai_socktype = SOCK_STREAM;
+
+ err = getaddrinfo(addr, port, &hint, &res);
+ if (err) {
+ perror("getaddrinfo failed");
+ return -1;
+ }
+
+ if (opt) {
+ err = setsockopt(sock_fd, SOL_SOCKET, opt, &reuse, sizeof(reuse));
+ if (err) {
+ perror("setsockopt failed");
+ return -1;
+ }
+ }
+
+ err = bind(sock_fd, res->ai_addr, res->ai_addrlen);
+ if (err) {
+ perror("failed to bind to port");
+ return -1;
+ }
+
+ return sock_fd;
+}
+
+static void *setup(void *arg)
+{
+ int sock_fd, i;
+ int *array = (int *)arg;
+
+ for (i = 0; i < MAX_CONNECTIONS; i++) {
+ sock_fd = bind_socket(SO_REUSEADDR | SO_REUSEPORT, bind_addr);
+ if (sock_fd < 0)
+ return NULL;
+ array[i] = sock_fd;
+ }
+
+ return NULL;
+}
+
+int main(int argc, const char *argv[])
+{
+ int listener_fd, sock_fd, i, j;
+ pthread_t tid[MAX_THREADS];
+ clock_t begin, end;
+
+ if (argc != 2) {
+ printf("Usage: listener <port>\n");
+ return -1;
+ }
+
+ port = argv[1];
+
+ listener_fd = bind_socket(SO_REUSEADDR | SO_REUSEPORT, bind_addr);
+ if (listen(listener_fd, 100) < 0) {
+ perror("listen failed");
+ return -1;
+ }
+
+ /* Set up threads to populate the bhash table entry for the port */
+ for (i = 0; i < MAX_THREADS; i++)
+ pthread_create(&tid[i], NULL, setup, fd_array[i]);
+
+ for (i = 0; i < MAX_THREADS; i++)
+ pthread_join(tid[i], NULL);
+
+ begin = clock();
+
+ /* Bind to the same port on a different address */
+ sock_fd = bind_socket(0, "2001:0db8:0:f101::1");
+
+ end = clock();
+
+ printf("time spent = %f\n", (double)(end - begin) / CLOCKS_PER_SEC);
+
+ /* clean up */
+ close(sock_fd);
+ close(listener_fd);
+ for (i = 0; i < MAX_THREADS; i++) {
+ for (j = 0; i < MAX_THREADS; i++)
+ close(fd_array[i][j]);
+ }
+
+ return 0;
+}
diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh
index a99ee3fb2e13..d5a0dd548989 100755
--- a/tools/testing/selftests/net/fib_nexthops.sh
+++ b/tools/testing/selftests/net/fib_nexthops.sh
@@ -56,6 +56,7 @@ TESTS="${ALL_TESTS}"
VERBOSE=0
PAUSE_ON_FAIL=no
PAUSE=no
+PING_TIMEOUT=5
nsid=100
@@ -882,13 +883,13 @@ ipv6_fcnal_runtime()
log_test $? 0 "Route delete"
run_cmd "$IP ro add 2001:db8:101::1/128 nhid 81"
- run_cmd "ip netns exec me ping -c1 -w5 2001:db8:101::1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 2001:db8:101::1"
log_test $? 0 "Ping with nexthop"
run_cmd "$IP nexthop add id 82 via 2001:db8:92::2 dev veth3"
run_cmd "$IP nexthop add id 122 group 81/82"
run_cmd "$IP ro replace 2001:db8:101::1/128 nhid 122"
- run_cmd "ip netns exec me ping -c1 -w5 2001:db8:101::1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 2001:db8:101::1"
log_test $? 0 "Ping - multipath"
#
@@ -896,26 +897,26 @@ ipv6_fcnal_runtime()
#
run_cmd "$IP -6 nexthop add id 83 blackhole"
run_cmd "$IP ro replace 2001:db8:101::1/128 nhid 83"
- run_cmd "ip netns exec me ping -c1 -w5 2001:db8:101::1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 2001:db8:101::1"
log_test $? 2 "Ping - blackhole"
run_cmd "$IP nexthop replace id 83 via 2001:db8:91::2 dev veth1"
- run_cmd "ip netns exec me ping -c1 -w5 2001:db8:101::1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 2001:db8:101::1"
log_test $? 0 "Ping - blackhole replaced with gateway"
run_cmd "$IP -6 nexthop replace id 83 blackhole"
- run_cmd "ip netns exec me ping -c1 -w5 2001:db8:101::1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 2001:db8:101::1"
log_test $? 2 "Ping - gateway replaced by blackhole"
run_cmd "$IP ro replace 2001:db8:101::1/128 nhid 122"
- run_cmd "ip netns exec me ping -c1 -w5 2001:db8:101::1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 2001:db8:101::1"
if [ $? -eq 0 ]; then
run_cmd "$IP nexthop replace id 122 group 83"
- run_cmd "ip netns exec me ping -c1 -w5 2001:db8:101::1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 2001:db8:101::1"
log_test $? 2 "Ping - group with blackhole"
run_cmd "$IP nexthop replace id 122 group 81/82"
- run_cmd "ip netns exec me ping -c1 -w5 2001:db8:101::1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 2001:db8:101::1"
log_test $? 0 "Ping - group blackhole replaced with gateways"
else
log_test 2 0 "Ping - multipath failed"
@@ -1003,10 +1004,10 @@ ipv6_fcnal_runtime()
run_cmd "$IP nexthop add id 92 via 2001:db8:92::2 dev veth3"
run_cmd "$IP nexthop add id 93 group 91/92"
run_cmd "$IP -6 ro add default nhid 91"
- run_cmd "ip netns exec me ping -c1 -w5 2001:db8:101::1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 2001:db8:101::1"
log_test $? 0 "Nexthop with default route and rpfilter"
run_cmd "$IP -6 ro replace default nhid 93"
- run_cmd "ip netns exec me ping -c1 -w5 2001:db8:101::1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 2001:db8:101::1"
log_test $? 0 "Nexthop with multipath default route and rpfilter"
# TO-DO:
@@ -1460,13 +1461,13 @@ ipv4_fcnal_runtime()
#
run_cmd "$IP nexthop replace id 21 via 172.16.1.2 dev veth1"
run_cmd "$IP ro replace 172.16.101.1/32 nhid 21"
- run_cmd "ip netns exec me ping -c1 -w5 172.16.101.1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 172.16.101.1"
log_test $? 0 "Basic ping"
run_cmd "$IP nexthop replace id 22 via 172.16.2.2 dev veth3"
run_cmd "$IP nexthop add id 122 group 21/22"
run_cmd "$IP ro replace 172.16.101.1/32 nhid 122"
- run_cmd "ip netns exec me ping -c1 -w5 172.16.101.1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 172.16.101.1"
log_test $? 0 "Ping - multipath"
run_cmd "$IP ro delete 172.16.101.1/32 nhid 122"
@@ -1477,7 +1478,7 @@ ipv4_fcnal_runtime()
run_cmd "$IP nexthop add id 501 via 172.16.1.2 dev veth1"
run_cmd "$IP ro add default nhid 501"
run_cmd "$IP ro add default via 172.16.1.3 dev veth1 metric 20"
- run_cmd "ip netns exec me ping -c1 -w5 172.16.101.1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 172.16.101.1"
log_test $? 0 "Ping - multiple default routes, nh first"
# flip the order
@@ -1486,7 +1487,7 @@ ipv4_fcnal_runtime()
run_cmd "$IP ro add default via 172.16.1.2 dev veth1 metric 20"
run_cmd "$IP nexthop replace id 501 via 172.16.1.3 dev veth1"
run_cmd "$IP ro add default nhid 501 metric 20"
- run_cmd "ip netns exec me ping -c1 -w5 172.16.101.1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 172.16.101.1"
log_test $? 0 "Ping - multiple default routes, nh second"
run_cmd "$IP nexthop delete nhid 501"
@@ -1497,26 +1498,26 @@ ipv4_fcnal_runtime()
#
run_cmd "$IP nexthop add id 23 blackhole"
run_cmd "$IP ro replace 172.16.101.1/32 nhid 23"
- run_cmd "ip netns exec me ping -c1 -w5 172.16.101.1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 172.16.101.1"
log_test $? 2 "Ping - blackhole"
run_cmd "$IP nexthop replace id 23 via 172.16.1.2 dev veth1"
- run_cmd "ip netns exec me ping -c1 -w5 172.16.101.1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 172.16.101.1"
log_test $? 0 "Ping - blackhole replaced with gateway"
run_cmd "$IP nexthop replace id 23 blackhole"
- run_cmd "ip netns exec me ping -c1 -w5 172.16.101.1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 172.16.101.1"
log_test $? 2 "Ping - gateway replaced by blackhole"
run_cmd "$IP ro replace 172.16.101.1/32 nhid 122"
- run_cmd "ip netns exec me ping -c1 -w5 172.16.101.1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 172.16.101.1"
if [ $? -eq 0 ]; then
run_cmd "$IP nexthop replace id 122 group 23"
- run_cmd "ip netns exec me ping -c1 -w5 172.16.101.1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 172.16.101.1"
log_test $? 2 "Ping - group with blackhole"
run_cmd "$IP nexthop replace id 122 group 21/22"
- run_cmd "ip netns exec me ping -c1 -w5 172.16.101.1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 172.16.101.1"
log_test $? 0 "Ping - group blackhole replaced with gateways"
else
log_test 2 0 "Ping - multipath failed"
@@ -1543,7 +1544,7 @@ ipv4_fcnal_runtime()
run_cmd "$IP nexthop add id 24 via ${lladdr} dev veth1"
set +e
run_cmd "$IP ro replace 172.16.101.1/32 nhid 24"
- run_cmd "ip netns exec me ping -c1 -w5 172.16.101.1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 172.16.101.1"
log_test $? 0 "IPv6 nexthop with IPv4 route"
$IP neigh sh | grep -q "${lladdr} dev veth1"
@@ -1567,11 +1568,11 @@ ipv4_fcnal_runtime()
check_route "172.16.101.1" "172.16.101.1 nhid 101 nexthop via inet6 ${lladdr} dev veth1 weight 1 nexthop via 172.16.1.2 dev veth1 weight 1"
- run_cmd "ip netns exec me ping -c1 -w5 172.16.101.1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 172.16.101.1"
log_test $? 0 "IPv6 nexthop with IPv4 route"
run_cmd "$IP ro replace 172.16.101.1/32 via inet6 ${lladdr} dev veth1"
- run_cmd "ip netns exec me ping -c1 -w5 172.16.101.1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 172.16.101.1"
log_test $? 0 "IPv4 route with IPv6 gateway"
$IP neigh sh | grep -q "${lladdr} dev veth1"
@@ -1588,7 +1589,7 @@ ipv4_fcnal_runtime()
run_cmd "$IP ro del 172.16.101.1/32 via inet6 ${lladdr} dev veth1"
run_cmd "$IP -4 ro add default via inet6 ${lladdr} dev veth1"
- run_cmd "ip netns exec me ping -c1 -w5 172.16.101.1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 172.16.101.1"
log_test $? 0 "IPv4 default route with IPv6 gateway"
#
@@ -2253,6 +2254,7 @@ usage: ${0##*/} OPTS
-p Pause on fail
-P Pause after each test before cleanup
-v verbose mode (show commands and output)
+ -w Timeout for ping
Runtime test
-n num Number of nexthops to target
@@ -2265,7 +2267,7 @@ EOF
################################################################################
# main
-while getopts :t:pP46hv o
+while getopts :t:pP46hv:w: o
do
case $o in
t) TESTS=$OPTARG;;
@@ -2274,6 +2276,7 @@ do
p) PAUSE_ON_FAIL=yes;;
P) PAUSE=yes;;
v) VERBOSE=$(($VERBOSE + 1));;
+ w) PING_TIMEOUT=$OPTARG;;
h) usage; exit 0;;
*) usage; exit 1;;
esac
diff --git a/tools/testing/selftests/net/forwarding/Makefile b/tools/testing/selftests/net/forwarding/Makefile
index b5181b5a8e29..8f481218a492 100644
--- a/tools/testing/selftests/net/forwarding/Makefile
+++ b/tools/testing/selftests/net/forwarding/Makefile
@@ -88,7 +88,7 @@ TEST_PROGS = bridge_igmp.sh \
vxlan_bridge_1d_port_8472.sh \
vxlan_bridge_1d.sh \
vxlan_bridge_1q_ipv6.sh \
- vxlan_bridge_1q_port_8472_ipv6.sh
+ vxlan_bridge_1q_port_8472_ipv6.sh \
vxlan_bridge_1q_port_8472.sh \
vxlan_bridge_1q.sh \
vxlan_symmetric_ipv6.sh \
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index d1de1e7702fb..a4406b7a8064 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -1571,6 +1571,33 @@ chk_prio_nr()
[ "${dump_stats}" = 1 ] && dump_stats
}
+chk_subflow_nr()
+{
+ local need_title="$1"
+ local msg="$2"
+ local subflow_nr=$3
+ local cnt1
+ local cnt2
+
+ if [ -n "${need_title}" ]; then
+ printf "%03u %-36s %s" "${TEST_COUNT}" "${TEST_NAME}" "${msg}"
+ else
+ printf "%-${nr_blank}s %s" " " "${msg}"
+ fi
+
+ cnt1=$(ss -N $ns1 -tOni | grep -c token)
+ cnt2=$(ss -N $ns2 -tOni | grep -c token)
+ if [ "$cnt1" != "$subflow_nr" -o "$cnt2" != "$subflow_nr" ]; then
+ echo "[fail] got $cnt1:$cnt2 subflows expected $subflow_nr"
+ fail_test
+ dump_stats=1
+ else
+ echo "[ ok ]"
+ fi
+
+ [ "${dump_stats}" = 1 ] && ( ss -N $ns1 -tOni ; ss -N $ns1 -tOni | grep token; ip -n $ns1 mptcp endpoint )
+}
+
chk_link_usage()
{
local ns=$1
@@ -2693,6 +2720,7 @@ fastclose_tests()
pedit_action_pkts()
{
tc -n $ns2 -j -s action show action pedit index 100 | \
+ grep "packets" | \
sed 's/.*"packets":\([0-9]\+\),.*/\1/'
}
@@ -2704,6 +2732,16 @@ fail_tests()
chk_join_nr 0 0 0 +1 +0 1 0 1 "$(pedit_action_pkts)"
chk_fail_nr 1 -1 invert
fi
+
+ # multiple subflows
+ if reset_with_fail "MP_FAIL MP_RST" 2; then
+ tc -n $ns2 qdisc add dev ns2eth1 root netem rate 1mbit delay 5
+ pm_nl_set_limits $ns1 0 1
+ pm_nl_set_limits $ns2 0 1
+ pm_nl_add_endpoint $ns2 10.0.2.2 dev ns2eth2 flags subflow
+ run_tests $ns1 $ns2 10.0.1.1 1024
+ chk_join_nr 1 1 1 1 0 1 1 0 "$(pedit_action_pkts)"
+ fi
}
userspace_tests()
@@ -2774,7 +2812,7 @@ userspace_tests()
fi
}
-implicit_tests()
+endpoint_tests()
{
# userspace pm type prevents add_addr
if reset "implicit EP"; then
@@ -2796,6 +2834,23 @@ implicit_tests()
$ns2 10.0.2.2 id 1 flags signal
wait
fi
+
+ if reset "delete and re-add"; then
+ pm_nl_set_limits $ns1 1 1
+ pm_nl_set_limits $ns2 1 1
+ pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow
+ run_tests $ns1 $ns2 10.0.1.1 4 0 0 slow &
+
+ wait_mpj $ns2
+ pm_nl_del_endpoint $ns2 2 10.0.2.2
+ sleep 0.5
+ chk_subflow_nr needtitle "after delete" 1
+
+ pm_nl_add_endpoint $ns2 10.0.2.2 dev ns2eth2 flags subflow
+ wait_mpj $ns2
+ chk_subflow_nr "" "after re-add" 2
+ wait
+ fi
}
# [$1: error message]
@@ -2844,7 +2899,7 @@ all_tests_sorted=(
z@fastclose_tests
F@fail_tests
u@userspace_tests
- I@implicit_tests
+ I@endpoint_tests
)
all_tests_args=""
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index 04a49e876a46..5b1ecd00695b 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -57,9 +57,9 @@ CAN_BUILD_I386 := $(shell ./../x86/check_cc.sh "$(CC)" ../x86/trivial_32bit_prog
CAN_BUILD_X86_64 := $(shell ./../x86/check_cc.sh "$(CC)" ../x86/trivial_64bit_program.c)
CAN_BUILD_WITH_NOPIE := $(shell ./../x86/check_cc.sh "$(CC)" ../x86/trivial_program.c -no-pie)
-TARGETS := protection_keys
-BINARIES_32 := $(TARGETS:%=%_32)
-BINARIES_64 := $(TARGETS:%=%_64)
+VMTARGETS := protection_keys
+BINARIES_32 := $(VMTARGETS:%=%_32)
+BINARIES_64 := $(VMTARGETS:%=%_64)
ifeq ($(CAN_BUILD_WITH_NOPIE),1)
CFLAGS += -no-pie
@@ -112,7 +112,7 @@ $(BINARIES_32): CFLAGS += -m32 -mxsave
$(BINARIES_32): LDLIBS += -lrt -ldl -lm
$(BINARIES_32): $(OUTPUT)/%_32: %.c
$(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(notdir $^) $(LDLIBS) -o $@
-$(foreach t,$(TARGETS),$(eval $(call gen-target-rule-32,$(t))))
+$(foreach t,$(VMTARGETS),$(eval $(call gen-target-rule-32,$(t))))
endif
ifeq ($(CAN_BUILD_X86_64),1)
@@ -120,7 +120,7 @@ $(BINARIES_64): CFLAGS += -m64 -mxsave
$(BINARIES_64): LDLIBS += -lrt -ldl
$(BINARIES_64): $(OUTPUT)/%_64: %.c
$(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(notdir $^) $(LDLIBS) -o $@
-$(foreach t,$(TARGETS),$(eval $(call gen-target-rule-64,$(t))))
+$(foreach t,$(VMTARGETS),$(eval $(call gen-target-rule-64,$(t))))
endif
# x86_64 users should be encouraged to install 32-bit libraries