summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap7
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt18
-rw-r--r--Documentation/devicetree/bindings/net/ethernet-controller.yaml123
-rw-r--r--Documentation/devicetree/bindings/net/fsl,fec.yaml8
-rw-r--r--Documentation/devicetree/bindings/net/nfc/marvell,nci.yaml4
-rw-r--r--Documentation/devicetree/bindings/net/nfc/st,st-nci.yaml5
-rw-r--r--Documentation/devicetree/bindings/net/nfc/st,st95hf.yaml7
-rw-r--r--Documentation/devicetree/bindings/net/nfc/ti,trf7970a.yaml7
-rw-r--r--Documentation/devicetree/bindings/net/wireless/brcm,bcm4329-fmac.yaml10
-rw-r--r--Documentation/networking/devlink/devlink-selftests.rst38
-rw-r--r--Documentation/networking/ip-sysctl.rst19
-rw-r--r--Documentation/virt/kvm/api.rst2
-rw-r--r--MAINTAINERS8
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/lan966x.dtsi2
-rw-r--r--arch/arm/mach-pxa/corgi.c2
-rw-r--r--arch/arm/mach-pxa/hx4700.c2
-rw-r--r--arch/arm/mach-pxa/icontrol.c4
-rw-r--r--arch/arm/mach-pxa/littleton.c2
-rw-r--r--arch/arm/mach-pxa/magician.c2
-rw-r--r--arch/arm/mach-pxa/spitz.c2
-rw-r--r--arch/arm/mach-pxa/z2.c4
-rw-r--r--arch/riscv/Makefile1
-rw-r--r--arch/riscv/boot/dts/canaan/canaan_kd233.dts2
-rw-r--r--arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts2
-rw-r--r--arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts2
-rw-r--r--arch/riscv/boot/dts/canaan/sipeed_maix_go.dts6
-rw-r--r--arch/riscv/boot/dts/canaan/sipeed_maixduino.dts2
-rw-r--r--arch/riscv/kernel/Makefile2
-rw-r--r--arch/riscv/kernel/elf_kexec.c2
-rw-r--r--arch/s390/include/asm/archrandom.h9
-rw-r--r--arch/x86/Kconfig8
-rw-r--r--arch/x86/Makefile1
-rw-r--r--arch/x86/events/intel/lbr.c19
-rw-r--r--arch/x86/include/asm/cpufeatures.h1
-rw-r--r--arch/x86/include/asm/nospec-branch.h2
-rw-r--r--arch/x86/kernel/alternative.c4
-rw-r--r--arch/x86/kernel/cpu/bugs.c14
-rw-r--r--arch/x86/kvm/x86.c8
-rw-r--r--certs/Kconfig1
-rw-r--r--drivers/acpi/cppc_acpi.c6
-rw-r--r--drivers/clk/clk-lan966x.c2
-rw-r--r--drivers/firewire/net.c14
-rw-r--r--drivers/gpio/gpio-pca953x.c22
-rw-r--r--drivers/gpio/gpio-xilinx.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c16
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c27
-rw-r--r--drivers/gpu/drm/drm_gem_ttm_helper.c9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.h10
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h45
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c374
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c56
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h7
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-dev.c3
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c6
-rw-r--r--drivers/i2c/busses/i2c-cadence.c30
-rw-r--r--drivers/i2c/busses/i2c-imx.c2
-rw-r--r--drivers/i2c/busses/i2c-mlxcpld.c2
-rw-r--r--drivers/md/raid5.c4
-rw-r--r--drivers/misc/lkdtm/Makefile9
-rw-r--r--drivers/mmc/host/sdhci-omap.c14
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c6
-rw-r--r--drivers/net/amt.c2
-rw-r--r--drivers/net/can/at91_can.c6
-rw-r--r--drivers/net/can/c_can/c_can.h2
-rw-r--r--drivers/net/can/c_can/c_can_ethtool.c8
-rw-r--r--drivers/net/can/c_can/c_can_main.c2
-rw-r--r--drivers/net/can/can327.c13
-rw-r--r--drivers/net/can/cc770/cc770.c6
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_base.c6
-rw-r--r--drivers/net/can/dev/dev.c50
-rw-r--r--drivers/net/can/dev/skb.c6
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c2
-rw-r--r--drivers/net/can/flexcan/flexcan-ethtool.c8
-rw-r--r--drivers/net/can/flexcan/flexcan.h2
-rw-r--r--drivers/net/can/grcan.c6
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c6
-rw-r--r--drivers/net/can/janz-ican3.c8
-rw-r--r--drivers/net/can/kvaser_pciefd.c7
-rw-r--r--drivers/net/can/m_can/m_can.c6
-rw-r--r--drivers/net/can/mscan/mscan.c5
-rw-r--r--drivers/net/can/pch_can.c6
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c48
-rw-r--r--drivers/net/can/rcar/rcar_can.c6
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c6
-rw-r--r--drivers/net/can/sja1000/sja1000.c6
-rw-r--r--drivers/net/can/slcan/slcan-core.c461
-rw-r--r--drivers/net/can/slcan/slcan-ethtool.c8
-rw-r--r--drivers/net/can/slcan/slcan.h3
-rw-r--r--drivers/net/can/softing/softing_main.c10
-rw-r--r--drivers/net/can/spi/hi311x.c6
-rw-r--r--drivers/net/can/spi/mcp251x.c6
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c1
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c2
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c1
-rw-r--r--drivers/net/can/sun4i_can.c6
-rw-r--r--drivers/net/can/ti_hecc.c6
-rw-r--r--drivers/net/can/usb/ems_usb.c10
-rw-r--r--drivers/net/can/usb/esd_usb.c8
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.c34
-rw-r--r--drivers/net/can/usb/gs_usb.c8
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb.h1
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c29
-rw-r--r--drivers/net/can/usb/mcba_usb.c6
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c1
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c41
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h1
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c1
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c1
-rw-r--r--drivers/net/can/usb/ucan.c6
-rw-r--r--drivers/net/can/usb/usb_8dev.c10
-rw-r--r--drivers/net/can/vcan.c8
-rw-r--r--drivers/net/can/vxcan.c8
-rw-r--r--drivers/net/can/xilinx_can.c6
-rw-r--r--drivers/net/dsa/qca/Makefile1
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c (renamed from drivers/net/dsa/qca/qca8k.c)1259
-rw-r--r--drivers/net/dsa/qca/qca8k-common.c1210
-rw-r--r--drivers/net/dsa/qca/qca8k.h100
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c61
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h12
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_rx.c5
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_tx.c149
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_txrx.h6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c67
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c96
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c40
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c136
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c89
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c54
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c106
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c193
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/crdump.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c117
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c100
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c189
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c513
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c554
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c143
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c99
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c118
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c576
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h60
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c2
-rw-r--r--drivers/net/ethernet/sfc/Makefile3
-rw-r--r--drivers/net/ethernet/sfc/ef10.c26
-rw-r--r--drivers/net/ethernet/sfc/ef100.c3
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.c4
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c91
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.h2
-rw-r--r--drivers/net/ethernet/sfc/ef100_rep.c199
-rw-r--r--drivers/net/ethernet/sfc/ef100_rep.h20
-rw-r--r--drivers/net/ethernet/sfc/ef100_rx.c46
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c16
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c1
-rw-r--r--drivers/net/ethernet/sfc/filter.h18
-rw-r--r--drivers/net/ethernet/sfc/mae.c304
-rw-r--r--drivers/net/ethernet/sfc/mae.h20
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h4
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.c6
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.h1
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol_mae.h24
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h5
-rw-r--r--drivers/net/ethernet/sfc/ptp.c22
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c4
-rw-r--r--drivers/net/ethernet/sfc/tc.c252
-rw-r--r--drivers/net/ethernet/sfc/tc.h85
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c9
-rw-r--r--drivers/net/geneve.c1
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.h2
-rw-r--r--drivers/net/macsec.c33
-rw-r--r--drivers/net/netdevsim/bpf.c8
-rw-r--r--drivers/net/netdevsim/dev.c6
-rw-r--r--drivers/net/pcs/pcs-xpcs.c2
-rw-r--r--drivers/net/sungem_phy.c1
-rw-r--r--drivers/net/usb/catc.c44
-rw-r--r--drivers/net/usb/cdc_subset.c10
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/plusb.c2
-rw-r--r--drivers/net/usb/usbnet.c2
-rw-r--r--drivers/net/virtio_net.c37
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c9
-rw-r--r--drivers/net/vxlan/vxlan_core.c11
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c52
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c87
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h5
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c58
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c70
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.c57
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c41
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h25
-rw-r--r--drivers/net/wireless/ath/wcn36xx/Makefile3
-rw-r--r--drivers/net/wireless/ath/wcn36xx/debug.c39
-rw-r--r--drivers/net/wireless/ath/wcn36xx/debug.h1
-rw-r--r--drivers/net/wireless/ath/wcn36xx/firmware.c125
-rw-r--r--drivers/net/wireless/ath/wcn36xx/firmware.h84
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h68
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c86
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c57
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h3
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c4
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c49
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c41
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c15
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c1
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c2
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c3
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.c6
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.h1
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c9
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.h1
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c13
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c8
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.c9
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.h1
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan_cfg.c6
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/usb.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c21
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_table.c896
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c3
-rw-r--r--drivers/pinctrl/Kconfig2
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c65
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c214
-rw-r--r--drivers/pinctrl/ralink/pinctrl-ralink.c2
-rw-r--r--drivers/pinctrl/sunplus/sppctl.c3
-rw-r--r--drivers/ptp/Kconfig1
-rw-r--r--drivers/s390/net/qeth_core_main.c2
-rw-r--r--drivers/spi/spi-bcm2835.c12
-rw-r--r--drivers/spi/spi-cadence.c2
-rw-r--r--drivers/spi/spi-rspi.c4
-rw-r--r--drivers/virt/coco/sev-guest/sev-guest.c9
-rw-r--r--fs/io_uring.c14
-rw-r--r--fs/ntfs/attrib.c8
-rw-r--r--fs/ocfs2/ocfs2.h4
-rw-r--r--fs/ocfs2/slot_map.c46
-rw-r--r--fs/ocfs2/super.c21
-rw-r--r--fs/read_write.c3
-rw-r--r--fs/userfaultfd.c12
-rw-r--r--include/asm-generic/io.h2
-rw-r--r--include/asm-generic/tlb.h3
-rw-r--r--include/drm/gpu_scheduler.h4
-rw-r--r--include/linux/can/dev.h4
-rw-r--r--include/linux/mm.h14
-rw-r--r--include/net/addrconf.h3
-rw-r--r--include/net/bluetooth/l2cap.h1
-rw-r--r--include/net/devlink.h21
-rw-r--r--include/net/firewire.h3
-rw-r--r--include/net/inet_connection_sock.h10
-rw-r--r--include/net/ip_tunnels.h1
-rw-r--r--include/net/sock.h8
-rw-r--r--include/net/tcp.h2
-rw-r--r--include/net/tls.h2
-rw-r--r--include/uapi/asm-generic/fcntl.h2
-rw-r--r--include/uapi/linux/devlink.h29
-rw-r--r--include/uapi/linux/kvm.h2
-rw-r--r--include/uapi/linux/seg6_iptunnel.h2
-rw-r--r--kernel/bpf/btf.c2
-rw-r--r--kernel/bpf/devmap.c4
-rw-r--r--kernel/bpf/trampoline.c9
-rw-r--r--kernel/rcu/srcutree.c98
-rw-r--r--kernel/sched/deadline.c5
-rw-r--r--kernel/watch_queue.c58
-rw-r--r--mm/gup.c6
-rw-r--r--mm/hugetlb.c10
-rw-r--r--mm/kfence/core.c18
-rw-r--r--mm/memory.c9
-rw-r--r--mm/memremap.c6
-rw-r--r--mm/secretmem.c33
-rw-r--r--mm/shmem.c7
-rw-r--r--net/bluetooth/hci_sync.c6
-rw-r--r--net/bluetooth/l2cap_core.c61
-rw-r--r--net/bluetooth/mgmt.c1
-rw-r--r--net/bridge/br_netlink.c8
-rw-r--r--net/caif/caif_socket.c20
-rw-r--r--net/core/devlink.c457
-rw-r--r--net/core/filter.c3
-rw-r--r--net/decnet/af_decnet.c4
-rw-r--r--net/decnet/dn_route.c2
-rw-r--r--net/dsa/switch.c1
-rw-r--r--net/ipv4/fib_trie.c7
-rw-r--r--net/ipv4/tcp.c23
-rw-r--r--net/ipv4/tcp_input.c41
-rw-r--r--net/ipv4/tcp_ipv4.c4
-rw-r--r--net/ipv4/tcp_metrics.c10
-rw-r--r--net/ipv4/tcp_output.c27
-rw-r--r--net/ipv4/udp.c8
-rw-r--r--net/ipv6/mcast.c14
-rw-r--r--net/ipv6/ping.c6
-rw-r--r--net/ipv6/seg6_iptunnel.c140
-rw-r--r--net/ipv6/tcp_ipv6.c4
-rw-r--r--net/mptcp/options.c2
-rw-r--r--net/mptcp/protocol.c8
-rw-r--r--net/mptcp/subflow.c2
-rw-r--r--net/netfilter/nf_tables_api.c6
-rw-r--r--net/netfilter/nfnetlink_queue.c7
-rw-r--r--net/netfilter/nft_queue.c27
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/sctp/associola.c5
-rw-r--r--net/sctp/stream.c19
-rw-r--r--net/sctp/stream_sched.c2
-rw-r--r--net/tipc/socket.c2
-rw-r--r--net/tls/tls_device.c86
-rw-r--r--net/tls/tls_strp.c5
-rw-r--r--net/tls/tls_sw.c39
-rw-r--r--sound/soc/rockchip/rockchip_i2s.c160
-rw-r--r--tools/bpf/bpftool/gen.c2
-rw-r--r--tools/bpf/bpftool/prog.c2
-rw-r--r--tools/include/uapi/asm-generic/fcntl.h11
-rw-r--r--tools/include/uapi/linux/kvm.h2
-rw-r--r--tools/lib/bpf/bpf.c9
-rw-r--r--tools/lib/bpf/bpf.h11
-rw-r--r--tools/lib/bpf/bpf_tracing.h15
-rw-r--r--tools/lib/bpf/libbpf.c11
-rw-r--r--tools/lib/bpf/libbpf.map1
-rw-r--r--tools/testing/selftests/bpf/DENYLIST6
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.s390x67
-rw-r--r--tools/testing/selftests/bpf/config99
-rw-r--r--tools/testing/selftests/bpf/config.s390x147
-rw-r--r--tools/testing/selftests/bpf/config.x86_64251
-rw-r--r--tools/testing/selftests/bpf/prog_tests/probe_user.c35
-rw-r--r--tools/testing/selftests/bpf/prog_tests/send_signal.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_tunnel.c17
-rw-r--r--tools/testing/selftests/bpf/progs/test_probe_user.c29
-rw-r--r--tools/testing/selftests/bpf/progs/test_tunnel_kern.c80
-rw-r--r--tools/testing/selftests/bpf/test_progs.c7
-rwxr-xr-xtools/testing/selftests/bpf/vmtest.sh53
-rw-r--r--tools/testing/selftests/drivers/net/dsa/Makefile17
-rw-r--r--tools/testing/selftests/gpio/Makefile2
-rw-r--r--tools/testing/selftests/kvm/rseq_test.c8
-rw-r--r--tools/testing/selftests/net/Makefile2
-rwxr-xr-xtools/testing/selftests/net/srv6_hencap_red_l3vpn_test.sh879
-rwxr-xr-xtools/testing/selftests/net/srv6_hl2encap_red_l2vpn_test.sh821
-rw-r--r--tools/testing/selftests/net/tls.c26
410 files changed, 11990 insertions, 5066 deletions
diff --git a/.mailmap b/.mailmap
index 13e4f504e17f..71577c396252 100644
--- a/.mailmap
+++ b/.mailmap
@@ -60,6 +60,10 @@ Arnd Bergmann <arnd@arndb.de>
Atish Patra <atishp@atishpatra.org> <atish.patra@wdc.com>
Axel Dyks <xl@xlsigned.net>
Axel Lin <axel.lin@gmail.com>
+Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@linaro.org>
+Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@spreadtrum.com>
+Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@unisoc.com>
+Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang7@gmail.com>
Bart Van Assche <bvanassche@acm.org> <bart.vanassche@sandisk.com>
Bart Van Assche <bvanassche@acm.org> <bart.vanassche@wdc.com>
Ben Gardner <bgardner@wabtec.com>
@@ -135,6 +139,8 @@ Frank Rowand <frowand.list@gmail.com> <frowand@mvista.com>
Frank Zago <fzago@systemfabricworks.com>
Gao Xiang <xiang@kernel.org> <gaoxiang25@huawei.com>
Gao Xiang <xiang@kernel.org> <hsiangkao@aol.com>
+Gao Xiang <xiang@kernel.org> <hsiangkao@linux.alibaba.com>
+Gao Xiang <xiang@kernel.org> <hsiangkao@redhat.com>
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@de.ibm.com>
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com>
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@linux.vnet.ibm.com>
@@ -371,6 +377,7 @@ Sean Nyekjaer <sean@geanix.com> <sean.nyekjaer@prevas.dk>
Sebastian Reichel <sre@kernel.org> <sebastian.reichel@collabora.co.uk>
Sebastian Reichel <sre@kernel.org> <sre@debian.org>
Sedat Dilek <sedat.dilek@gmail.com> <sedat.dilek@credativ.de>
+Seth Forshee <sforshee@kernel.org> <seth.forshee@canonical.com>
Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com>
Shuah Khan <shuah@kernel.org> <shuahkhan@gmail.com>
Shuah Khan <shuah@kernel.org> <shuah.khan@hp.com>
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index f2d26cb7e853..c0fdb04a0435 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -5796,6 +5796,24 @@
expediting. Set to zero to disable automatic
expediting.
+ srcutree.srcu_max_nodelay [KNL]
+ Specifies the number of no-delay instances
+ per jiffy for which the SRCU grace period
+ worker thread will be rescheduled with zero
+ delay. Beyond this limit, worker thread will
+ be rescheduled with a sleep delay of one jiffy.
+
+ srcutree.srcu_max_nodelay_phase [KNL]
+ Specifies the per-grace-period phase, number of
+ non-sleeping polls of readers. Beyond this limit,
+ grace period worker thread will be rescheduled
+ with a sleep delay of one jiffy, between each
+ rescan of the readers, for a grace period phase.
+
+ srcutree.srcu_retry_check_delay [KNL]
+ Specifies number of microseconds of non-sleeping
+ delay between each non-sleeping poll of readers.
+
srcutree.small_contention_lim [KNL]
Specifies the number of update-side contention
events per jiffy will be tolerated before
diff --git a/Documentation/devicetree/bindings/net/ethernet-controller.yaml b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
index 56d9aca8c954..c138a1022879 100644
--- a/Documentation/devicetree/bindings/net/ethernet-controller.yaml
+++ b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
@@ -155,70 +155,65 @@ properties:
- in-band-status
fixed-link:
- allOf:
- - if:
- type: array
- then:
- deprecated: true
- items:
- - minimum: 0
- maximum: 31
- description:
- Emulated PHY ID, choose any but unique to the all
- specified fixed-links
-
- - enum: [0, 1]
- description:
- Duplex configuration. 0 for half duplex or 1 for
- full duplex
-
- - enum: [10, 100, 1000, 2500, 10000]
- description:
- Link speed in Mbits/sec.
-
- - enum: [0, 1]
- description:
- Pause configuration. 0 for no pause, 1 for pause
-
- - enum: [0, 1]
- description:
- Asymmetric pause configuration. 0 for no asymmetric
- pause, 1 for asymmetric pause
-
-
- - if:
- type: object
- then:
- properties:
- speed:
- description:
- Link speed.
- $ref: /schemas/types.yaml#/definitions/uint32
- enum: [10, 100, 1000, 2500, 10000]
-
- full-duplex:
- $ref: /schemas/types.yaml#/definitions/flag
- description:
- Indicates that full-duplex is used. When absent, half
- duplex is assumed.
-
- pause:
- $ref: /schemas/types.yaml#definitions/flag
- description:
- Indicates that pause should be enabled.
-
- asym-pause:
- $ref: /schemas/types.yaml#/definitions/flag
- description:
- Indicates that asym_pause should be enabled.
-
- link-gpios:
- maxItems: 1
- description:
- GPIO to determine if the link is up
-
- required:
- - speed
+ oneOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ deprecated: true
+ items:
+ - minimum: 0
+ maximum: 31
+ description:
+ Emulated PHY ID, choose any but unique to the all
+ specified fixed-links
+
+ - enum: [0, 1]
+ description:
+ Duplex configuration. 0 for half duplex or 1 for
+ full duplex
+
+ - enum: [10, 100, 1000, 2500, 10000]
+ description:
+ Link speed in Mbits/sec.
+
+ - enum: [0, 1]
+ description:
+ Pause configuration. 0 for no pause, 1 for pause
+
+ - enum: [0, 1]
+ description:
+ Asymmetric pause configuration. 0 for no asymmetric
+ pause, 1 for asymmetric pause
+ - type: object
+ additionalProperties: false
+ properties:
+ speed:
+ description:
+ Link speed.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [10, 100, 1000, 2500, 10000]
+
+ full-duplex:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Indicates that full-duplex is used. When absent, half
+ duplex is assumed.
+
+ pause:
+ $ref: /schemas/types.yaml#definitions/flag
+ description:
+ Indicates that pause should be enabled.
+
+ asym-pause:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Indicates that asym_pause should be enabled.
+
+ link-gpios:
+ maxItems: 1
+ description:
+ GPIO to determine if the link is up
+
+ required:
+ - speed
allOf:
- if:
diff --git a/Documentation/devicetree/bindings/net/fsl,fec.yaml b/Documentation/devicetree/bindings/net/fsl,fec.yaml
index 85a8d8fb6b8f..5cfb661be124 100644
--- a/Documentation/devicetree/bindings/net/fsl,fec.yaml
+++ b/Documentation/devicetree/bindings/net/fsl,fec.yaml
@@ -58,6 +58,11 @@ properties:
- fsl,imx8qxp-fec
- const: fsl,imx8qm-fec
- const: fsl,imx6sx-fec
+ - items:
+ - enum:
+ - fsl,imx8ulp-fec
+ - const: fsl,imx6ul-fec
+ - const: fsl,imx6q-fec
reg:
maxItems: 1
@@ -187,6 +192,7 @@ properties:
Should specify the gpio for phy reset.
phy-reset-duration:
+ $ref: /schemas/types.yaml#/definitions/uint32
deprecated: true
description:
Reset duration in milliseconds. Should present only if property
@@ -195,12 +201,14 @@ properties:
and 1 millisecond will be used instead.
phy-reset-active-high:
+ type: boolean
deprecated: true
description:
If present then the reset sequence using the GPIO specified in the
"phy-reset-gpios" property is reversed (H=reset state, L=operation state).
phy-reset-post-delay:
+ $ref: /schemas/types.yaml#/definitions/uint32
deprecated: true
description:
Post reset delay in milliseconds. If present then a delay of phy-reset-post-delay
diff --git a/Documentation/devicetree/bindings/net/nfc/marvell,nci.yaml b/Documentation/devicetree/bindings/net/nfc/marvell,nci.yaml
index 1bcaf6ba822c..a191a04e681c 100644
--- a/Documentation/devicetree/bindings/net/nfc/marvell,nci.yaml
+++ b/Documentation/devicetree/bindings/net/nfc/marvell,nci.yaml
@@ -58,7 +58,6 @@ properties:
spi-cpha: true
spi-cpol: true
- spi-max-frequency: true
required:
- compatible
@@ -85,6 +84,7 @@ allOf:
contains:
const: marvell,nfc-spi
then:
+ $ref: /schemas/spi/spi-peripheral-props.yaml#
properties:
break-control: false
flow-control: false
@@ -108,7 +108,7 @@ allOf:
spi-max-frequency: false
reg: false
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/net/nfc/st,st-nci.yaml b/Documentation/devicetree/bindings/net/nfc/st,st-nci.yaml
index ef1155038a2f..1dcbddbc5a74 100644
--- a/Documentation/devicetree/bindings/net/nfc/st,st-nci.yaml
+++ b/Documentation/devicetree/bindings/net/nfc/st,st-nci.yaml
@@ -30,8 +30,6 @@ properties:
reg:
maxItems: 1
- spi-max-frequency: true
-
uicc-present:
type: boolean
description: |
@@ -55,10 +53,11 @@ then:
properties:
spi-max-frequency: false
else:
+ $ref: /schemas/spi/spi-peripheral-props.yaml#
required:
- spi-max-frequency
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/net/nfc/st,st95hf.yaml b/Documentation/devicetree/bindings/net/nfc/st,st95hf.yaml
index 963d9531a856..647569051ed8 100644
--- a/Documentation/devicetree/bindings/net/nfc/st,st95hf.yaml
+++ b/Documentation/devicetree/bindings/net/nfc/st,st95hf.yaml
@@ -25,8 +25,6 @@ properties:
st95hfvin-supply:
description: ST95HF transceiver's Vin regulator supply
- spi-max-frequency: true
-
required:
- compatible
- enable-gpio
@@ -34,7 +32,10 @@ required:
- reg
- spi-max-frequency
-additionalProperties: false
+allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/net/nfc/ti,trf7970a.yaml b/Documentation/devicetree/bindings/net/nfc/ti,trf7970a.yaml
index 404c8df99364..9cc236ec42f2 100644
--- a/Documentation/devicetree/bindings/net/nfc/ti,trf7970a.yaml
+++ b/Documentation/devicetree/bindings/net/nfc/ti,trf7970a.yaml
@@ -40,8 +40,6 @@ properties:
reg:
maxItems: 1
- spi-max-frequency: true
-
ti,enable-gpios:
minItems: 1
maxItems: 2
@@ -65,7 +63,10 @@ required:
- ti,enable-gpios
- vin-supply
-additionalProperties: false
+allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/net/wireless/brcm,bcm4329-fmac.yaml b/Documentation/devicetree/bindings/net/wireless/brcm,bcm4329-fmac.yaml
index c11f23b20c4c..53b4153d9bfc 100644
--- a/Documentation/devicetree/bindings/net/wireless/brcm,bcm4329-fmac.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/brcm,bcm4329-fmac.yaml
@@ -75,6 +75,16 @@ properties:
items:
pattern: '^[A-Z][A-Z]-[A-Z][0-9A-Z]-[0-9]+$'
+ brcm,ccode-map-trivial:
+ description: |
+ Use a trivial mapping of ISO3166 country codes to brcmfmac firmware
+ country code and revision: cc -> { cc, 0 }. In other words, assume that
+ the CLM blob firmware uses ISO3166 country codes as well, and that all
+ revisions are zero. This property is mutually exclusive with
+ brcm,ccode-map. If both properties are specified, then brcm,ccode-map
+ takes precedence.
+ type: boolean
+
required:
- compatible
- reg
diff --git a/Documentation/networking/devlink/devlink-selftests.rst b/Documentation/networking/devlink/devlink-selftests.rst
new file mode 100644
index 000000000000..c0aa1f3aef0d
--- /dev/null
+++ b/Documentation/networking/devlink/devlink-selftests.rst
@@ -0,0 +1,38 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+=================
+Devlink Selftests
+=================
+
+The ``devlink-selftests`` API allows executing selftests on the device.
+
+Tests Mask
+==========
+The ``devlink-selftests`` command should be run with a mask indicating
+the tests to be executed.
+
+Tests Description
+=================
+The following is a list of tests that drivers may execute.
+
+.. list-table:: List of tests
+ :widths: 5 90
+
+ * - Name
+ - Description
+ * - ``DEVLINK_SELFTEST_FLASH``
+ - Devices may have the firmware on non-volatile memory on the board, e.g.
+ flash. This particular test helps to run a flash selftest on the device.
+ Implementation of the test is left to the driver/firmware.
+
+example usage
+-------------
+
+.. code:: shell
+
+ # Query selftests supported on the devlink device
+ $ devlink dev selftests show DEV
+ # Query selftests supported on all devlink devices
+ $ devlink dev selftests show
+ # Executes selftests on the device
+ $ devlink dev selftests run DEV id flash
diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
index 5879ef3bc2cb..56cd4ea059b2 100644
--- a/Documentation/networking/ip-sysctl.rst
+++ b/Documentation/networking/ip-sysctl.rst
@@ -636,6 +636,16 @@ tcp_recovery - INTEGER
Default: 0x1
+tcp_reflect_tos - BOOLEAN
+ For listening sockets, reuse the DSCP value of the initial SYN message
+ for outgoing packets. This allows to have both directions of a TCP
+ stream to use the same DSCP value, assuming DSCP remains unchanged for
+ the lifetime of the connection.
+
+ This options affects both IPv4 and IPv6.
+
+ Default: 0 (disabled)
+
tcp_reordering - INTEGER
Initial reordering level of packets in a TCP stream.
TCP stack can then dynamically adjust flow reordering level
@@ -2884,7 +2894,14 @@ sctp_rmem - vector of 3 INTEGERs: min, default, max
Default: 4K
sctp_wmem - vector of 3 INTEGERs: min, default, max
- Currently this tunable has no effect.
+ Only the first value ("min") is used, "default" and "max" are
+ ignored.
+
+ min: Minimum size of send buffer that can be used by SCTP sockets.
+ It is guaranteed to each SCTP socket (but not association) even
+ under moderate memory pressure.
+
+ Default: 4K
addr_scope_policy - INTEGER
Control IPv4 address scoping - draft-stewart-tsvwg-sctp-ipv4-00
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index 6e090fb96a0e..98a283930307 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -5658,7 +5658,7 @@ by a string of size ``name_size``.
#define KVM_STATS_UNIT_SECONDS (0x2 << KVM_STATS_UNIT_SHIFT)
#define KVM_STATS_UNIT_CYCLES (0x3 << KVM_STATS_UNIT_SHIFT)
#define KVM_STATS_UNIT_BOOLEAN (0x4 << KVM_STATS_UNIT_SHIFT)
- #define KVM_STATS_UNIT_MAX KVM_STATS_UNIT_CYCLES
+ #define KVM_STATS_UNIT_MAX KVM_STATS_UNIT_BOOLEAN
#define KVM_STATS_BASE_SHIFT 8
#define KVM_STATS_BASE_MASK (0xF << KVM_STATS_BASE_SHIFT)
diff --git a/MAINTAINERS b/MAINTAINERS
index 46b345ddc67c..2cfda104ba4e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -15862,7 +15862,7 @@ PIN CONTROLLER - FREESCALE
M: Dong Aisheng <aisheng.dong@nxp.com>
M: Fabio Estevam <festevam@gmail.com>
M: Shawn Guo <shawnguo@kernel.org>
-M: Stefan Agner <stefan@agner.ch>
+M: Jacky Bai <ping.bai@nxp.com>
R: Pengutronix Kernel Team <kernel@pengutronix.de>
L: linux-gpio@vger.kernel.org
S: Maintained
@@ -18470,6 +18470,12 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab.git
F: include/linux/sl?b*.h
F: mm/sl?b*
+SLCAN CAN NETWORK DRIVER
+M: Dario Binacchi <dario.binacchi@amarulasolutions.com>
+L: linux-can@vger.kernel.org
+S: Maintained
+F: drivers/net/can/slcan/
+
SLEEPABLE READ-COPY UPDATE (SRCU)
M: Lai Jiangshan <jiangshanlai@gmail.com>
M: "Paul E. McKenney" <paulmck@kernel.org>
diff --git a/Makefile b/Makefile
index 00fd80c5dd6e..b79c1c18149d 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 19
SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION = -rc8
NAME = Superb Owl
# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/lan966x.dtsi b/arch/arm/boot/dts/lan966x.dtsi
index 3cb02fffe716..38e90a31d2dd 100644
--- a/arch/arm/boot/dts/lan966x.dtsi
+++ b/arch/arm/boot/dts/lan966x.dtsi
@@ -38,7 +38,7 @@
sys_clk: sys_clk {
compatible = "fixed-clock";
#clock-cells = <0>;
- clock-frequency = <162500000>;
+ clock-frequency = <165625000>;
};
cpu_clk: cpu_clk {
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
index c546356d0f02..5738496717e2 100644
--- a/arch/arm/mach-pxa/corgi.c
+++ b/arch/arm/mach-pxa/corgi.c
@@ -549,7 +549,7 @@ static struct pxa2xx_spi_controller corgi_spi_info = {
};
static struct gpiod_lookup_table corgi_spi_gpio_table = {
- .dev_id = "pxa2xx-spi.1",
+ .dev_id = "spi1",
.table = {
GPIO_LOOKUP_IDX("gpio-pxa", CORGI_GPIO_ADS7846_CS, "cs", 0, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX("gpio-pxa", CORGI_GPIO_LCDCON_CS, "cs", 1, GPIO_ACTIVE_LOW),
diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c
index 2ae06edf413c..2fd665944103 100644
--- a/arch/arm/mach-pxa/hx4700.c
+++ b/arch/arm/mach-pxa/hx4700.c
@@ -635,7 +635,7 @@ static struct pxa2xx_spi_controller pxa_ssp2_master_info = {
};
static struct gpiod_lookup_table pxa_ssp2_gpio_table = {
- .dev_id = "pxa2xx-spi.2",
+ .dev_id = "spi2",
.table = {
GPIO_LOOKUP_IDX("gpio-pxa", GPIO88_HX4700_TSC2046_CS, "cs", 0, GPIO_ACTIVE_LOW),
{ },
diff --git a/arch/arm/mach-pxa/icontrol.c b/arch/arm/mach-pxa/icontrol.c
index 753fe166ab68..624088257cfc 100644
--- a/arch/arm/mach-pxa/icontrol.c
+++ b/arch/arm/mach-pxa/icontrol.c
@@ -140,7 +140,7 @@ struct platform_device pxa_spi_ssp4 = {
};
static struct gpiod_lookup_table pxa_ssp3_gpio_table = {
- .dev_id = "pxa2xx-spi.3",
+ .dev_id = "spi3",
.table = {
GPIO_LOOKUP_IDX("gpio-pxa", ICONTROL_MCP251x_nCS1, "cs", 0, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX("gpio-pxa", ICONTROL_MCP251x_nCS2, "cs", 1, GPIO_ACTIVE_LOW),
@@ -149,7 +149,7 @@ static struct gpiod_lookup_table pxa_ssp3_gpio_table = {
};
static struct gpiod_lookup_table pxa_ssp4_gpio_table = {
- .dev_id = "pxa2xx-spi.4",
+ .dev_id = "spi4",
.table = {
GPIO_LOOKUP_IDX("gpio-pxa", ICONTROL_MCP251x_nCS3, "cs", 0, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX("gpio-pxa", ICONTROL_MCP251x_nCS4, "cs", 1, GPIO_ACTIVE_LOW),
diff --git a/arch/arm/mach-pxa/littleton.c b/arch/arm/mach-pxa/littleton.c
index f98dc61e87af..98423a96f440 100644
--- a/arch/arm/mach-pxa/littleton.c
+++ b/arch/arm/mach-pxa/littleton.c
@@ -207,7 +207,7 @@ static struct spi_board_info littleton_spi_devices[] __initdata = {
};
static struct gpiod_lookup_table littleton_spi_gpio_table = {
- .dev_id = "pxa2xx-spi.2",
+ .dev_id = "spi2",
.table = {
GPIO_LOOKUP_IDX("gpio-pxa", LITTLETON_GPIO_LCD_CS, "cs", 0, GPIO_ACTIVE_LOW),
{ },
diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c
index 20456a55c4c5..0827ebca1d38 100644
--- a/arch/arm/mach-pxa/magician.c
+++ b/arch/arm/mach-pxa/magician.c
@@ -994,7 +994,7 @@ static struct pxa2xx_spi_controller magician_spi_info = {
};
static struct gpiod_lookup_table magician_spi_gpio_table = {
- .dev_id = "pxa2xx-spi.2",
+ .dev_id = "spi2",
.table = {
/* NOTICE must be GPIO, incompatibility with hw PXA SPI framing */
GPIO_LOOKUP_IDX("gpio-pxa", GPIO14_MAGICIAN_TSC2046_CS, "cs", 0, GPIO_ACTIVE_LOW),
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index dd88953adc9d..9964729cd428 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -578,7 +578,7 @@ static struct pxa2xx_spi_controller spitz_spi_info = {
};
static struct gpiod_lookup_table spitz_spi_gpio_table = {
- .dev_id = "pxa2xx-spi.2",
+ .dev_id = "spi2",
.table = {
GPIO_LOOKUP_IDX("gpio-pxa", SPITZ_GPIO_ADS7846_CS, "cs", 0, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX("gpio-pxa", SPITZ_GPIO_LCDCON_CS, "cs", 1, GPIO_ACTIVE_LOW),
diff --git a/arch/arm/mach-pxa/z2.c b/arch/arm/mach-pxa/z2.c
index d03520555497..c4d4162a7e6e 100644
--- a/arch/arm/mach-pxa/z2.c
+++ b/arch/arm/mach-pxa/z2.c
@@ -623,7 +623,7 @@ static struct pxa2xx_spi_controller pxa_ssp2_master_info = {
};
static struct gpiod_lookup_table pxa_ssp1_gpio_table = {
- .dev_id = "pxa2xx-spi.1",
+ .dev_id = "spi1",
.table = {
GPIO_LOOKUP_IDX("gpio-pxa", GPIO24_ZIPITZ2_WIFI_CS, "cs", 0, GPIO_ACTIVE_LOW),
{ },
@@ -631,7 +631,7 @@ static struct gpiod_lookup_table pxa_ssp1_gpio_table = {
};
static struct gpiod_lookup_table pxa_ssp2_gpio_table = {
- .dev_id = "pxa2xx-spi.2",
+ .dev_id = "spi2",
.table = {
GPIO_LOOKUP_IDX("gpio-pxa", GPIO88_ZIPITZ2_LCD_CS, "cs", 0, GPIO_ACTIVE_LOW),
{ },
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index 34cf8a598617..a4c46a03d2e2 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -73,6 +73,7 @@ ifeq ($(CONFIG_PERF_EVENTS),y)
endif
KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax)
+KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax)
# GCC versions that support the "-mstrict-align" option default to allowing
# unaligned accesses. While unaligned accesses are explicitly allowed in the
diff --git a/arch/riscv/boot/dts/canaan/canaan_kd233.dts b/arch/riscv/boot/dts/canaan/canaan_kd233.dts
index 039b92abf046..f72540bd14a3 100644
--- a/arch/riscv/boot/dts/canaan/canaan_kd233.dts
+++ b/arch/riscv/boot/dts/canaan/canaan_kd233.dts
@@ -35,7 +35,7 @@
gpio-keys {
compatible = "gpio-keys";
- key0 {
+ key {
label = "KEY0";
linux,code = <BTN_0>;
gpios = <&gpio0 10 GPIO_ACTIVE_LOW>;
diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts
index b9e30df127fe..8abdbe26a1d0 100644
--- a/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts
+++ b/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts
@@ -47,7 +47,7 @@
gpio-keys {
compatible = "gpio-keys";
- boot {
+ key-boot {
label = "BOOT";
linux,code = <BTN_0>;
gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts
index 8d23401b0bbb..3c6df1ecf76f 100644
--- a/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts
+++ b/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts
@@ -52,7 +52,7 @@
gpio-keys {
compatible = "gpio-keys";
- boot {
+ key-boot {
label = "BOOT";
linux,code = <BTN_0>;
gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts
index 24fd83b43d9d..03c9843d503e 100644
--- a/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts
+++ b/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts
@@ -46,19 +46,19 @@
gpio-keys {
compatible = "gpio-keys";
- up {
+ key-up {
label = "UP";
linux,code = <BTN_1>;
gpios = <&gpio1_0 7 GPIO_ACTIVE_LOW>;
};
- press {
+ key-press {
label = "PRESS";
linux,code = <BTN_0>;
gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
};
- down {
+ key-down {
label = "DOWN";
linux,code = <BTN_2>;
gpios = <&gpio0 1 GPIO_ACTIVE_LOW>;
diff --git a/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts b/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts
index 25341f38292a..7164ad063178 100644
--- a/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts
+++ b/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts
@@ -23,7 +23,7 @@
gpio-keys {
compatible = "gpio-keys";
- boot {
+ key-boot {
label = "BOOT";
linux,code = <BTN_0>;
gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index c71d6591d539..33bb60a354cd 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -78,7 +78,7 @@ obj-$(CONFIG_SMP) += cpu_ops_sbi.o
endif
obj-$(CONFIG_HOTPLUG_CPU) += cpu-hotplug.o
obj-$(CONFIG_KGDB) += kgdb.o
-obj-$(CONFIG_KEXEC) += kexec_relocate.o crash_save_regs.o machine_kexec.o
+obj-$(CONFIG_KEXEC_CORE) += kexec_relocate.o crash_save_regs.o machine_kexec.o
obj-$(CONFIG_KEXEC_FILE) += elf_kexec.o machine_kexec_file.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
diff --git a/arch/riscv/kernel/elf_kexec.c b/arch/riscv/kernel/elf_kexec.c
index 9cb85095fd45..0cb94992c15b 100644
--- a/arch/riscv/kernel/elf_kexec.c
+++ b/arch/riscv/kernel/elf_kexec.c
@@ -349,7 +349,7 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
{
const char *strtab, *name, *shstrtab;
const Elf_Shdr *sechdrs;
- Elf_Rela *relas;
+ Elf64_Rela *relas;
int i, r_type;
/* String & section header string table */
diff --git a/arch/s390/include/asm/archrandom.h b/arch/s390/include/asm/archrandom.h
index 2c6e1c6ecbe7..4120c428dc37 100644
--- a/arch/s390/include/asm/archrandom.h
+++ b/arch/s390/include/asm/archrandom.h
@@ -2,7 +2,7 @@
/*
* Kernel interface for the s390 arch_random_* functions
*
- * Copyright IBM Corp. 2017, 2020
+ * Copyright IBM Corp. 2017, 2022
*
* Author: Harald Freudenberger <freude@de.ibm.com>
*
@@ -14,6 +14,7 @@
#ifdef CONFIG_ARCH_RANDOM
#include <linux/static_key.h>
+#include <linux/preempt.h>
#include <linux/atomic.h>
#include <asm/cpacf.h>
@@ -32,7 +33,8 @@ static inline bool __must_check arch_get_random_int(unsigned int *v)
static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
{
- if (static_branch_likely(&s390_arch_random_available)) {
+ if (static_branch_likely(&s390_arch_random_available) &&
+ in_task()) {
cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
atomic64_add(sizeof(*v), &s390_arch_random_counter);
return true;
@@ -42,7 +44,8 @@ static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
{
- if (static_branch_likely(&s390_arch_random_available)) {
+ if (static_branch_likely(&s390_arch_random_available) &&
+ in_task()) {
cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
atomic64_add(sizeof(*v), &s390_arch_random_counter);
return true;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 7fff10e15969..52a7f91527fe 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2474,7 +2474,7 @@ config RETHUNK
bool "Enable return-thunks"
depends on RETPOLINE && CC_HAS_RETURN_THUNK
select OBJTOOL if HAVE_OBJTOOL
- default y
+ default y if X86_64
help
Compile the kernel with the return-thunks compiler option to guard
against kernel-to-user data leaks by avoiding return speculation.
@@ -2483,21 +2483,21 @@ config RETHUNK
config CPU_UNRET_ENTRY
bool "Enable UNRET on kernel entry"
- depends on CPU_SUP_AMD && RETHUNK
+ depends on CPU_SUP_AMD && RETHUNK && X86_64
default y
help
Compile the kernel with support for the retbleed=unret mitigation.
config CPU_IBPB_ENTRY
bool "Enable IBPB on kernel entry"
- depends on CPU_SUP_AMD
+ depends on CPU_SUP_AMD && X86_64
default y
help
Compile the kernel with support for the retbleed=ibpb mitigation.
config CPU_IBRS_ENTRY
bool "Enable IBRS on kernel entry"
- depends on CPU_SUP_INTEL
+ depends on CPU_SUP_INTEL && X86_64
default y
help
Compile the kernel with support for the spectre_v2=ibrs mitigation.
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 1f40dad30d50..7854685c5f25 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -27,6 +27,7 @@ RETHUNK_CFLAGS := -mfunction-return=thunk-extern
RETPOLINE_CFLAGS += $(RETHUNK_CFLAGS)
endif
+export RETHUNK_CFLAGS
export RETPOLINE_CFLAGS
export RETPOLINE_VDSO_CFLAGS
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 13179f31fe10..4f70fb6c2c1e 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -278,9 +278,9 @@ enum {
};
/*
- * For formats with LBR_TSX flags (e.g. LBR_FORMAT_EIP_FLAGS2), bits 61:62 in
- * MSR_LAST_BRANCH_FROM_x are the TSX flags when TSX is supported, but when
- * TSX is not supported they have no consistent behavior:
+ * For format LBR_FORMAT_EIP_FLAGS2, bits 61:62 in MSR_LAST_BRANCH_FROM_x
+ * are the TSX flags when TSX is supported, but when TSX is not supported
+ * they have no consistent behavior:
*
* - For wrmsr(), bits 61:62 are considered part of the sign extension.
* - For HW updates (branch captures) bits 61:62 are always OFF and are not
@@ -288,7 +288,7 @@ enum {
*
* Therefore, if:
*
- * 1) LBR has TSX format
+ * 1) LBR format LBR_FORMAT_EIP_FLAGS2
* 2) CPU has no TSX support enabled
*
* ... then any value passed to wrmsr() must be sign extended to 63 bits and any
@@ -300,7 +300,7 @@ static inline bool lbr_from_signext_quirk_needed(void)
bool tsx_support = boot_cpu_has(X86_FEATURE_HLE) ||
boot_cpu_has(X86_FEATURE_RTM);
- return !tsx_support && x86_pmu.lbr_has_tsx;
+ return !tsx_support;
}
static DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key);
@@ -1609,9 +1609,6 @@ void intel_pmu_lbr_init_hsw(void)
x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
x86_get_pmu(smp_processor_id())->task_ctx_cache = create_lbr_kmem_cache(size, 0);
-
- if (lbr_from_signext_quirk_needed())
- static_branch_enable(&lbr_from_quirk_key);
}
/* skylake */
@@ -1702,7 +1699,11 @@ void intel_pmu_lbr_init(void)
switch (x86_pmu.intel_cap.lbr_format) {
case LBR_FORMAT_EIP_FLAGS2:
x86_pmu.lbr_has_tsx = 1;
- fallthrough;
+ x86_pmu.lbr_from_flags = 1;
+ if (lbr_from_signext_quirk_needed())
+ static_branch_enable(&lbr_from_quirk_key);
+ break;
+
case LBR_FORMAT_EIP_FLAGS:
x86_pmu.lbr_from_flags = 1;
break;
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 00f5227c8459..a77b915d36a8 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -302,6 +302,7 @@
#define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */
#define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */
#define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */
+#define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 10a3bfc1eb23..38a3e86e665e 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -297,6 +297,8 @@ do { \
alternative_msr_write(MSR_IA32_SPEC_CTRL, \
spec_ctrl_current() | SPEC_CTRL_IBRS, \
X86_FEATURE_USE_IBRS_FW); \
+ alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, \
+ X86_FEATURE_USE_IBPB_FW); \
} while (0)
#define firmware_restrict_branch_speculation_end() \
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index d6858533e6e5..62f6b8b7c4a5 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -555,7 +555,9 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end)
dest = addr + insn.length + insn.immediate.value;
if (__static_call_fixup(addr, op, dest) ||
- WARN_ON_ONCE(dest != &__x86_return_thunk))
+ WARN_ONCE(dest != &__x86_return_thunk,
+ "missing return thunk: %pS-%pS: %*ph",
+ addr, dest, 5, addr))
continue;
DPRINTK("return thunk at: %pS (%px) len: %d to: %pS",
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index aa34f908c39f..6454bc767f0f 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -975,6 +975,7 @@ static inline const char *spectre_v2_module_string(void) { return ""; }
#define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
#define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
#define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
+#define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n"
#ifdef CONFIG_BPF_SYSCALL
void unpriv_ebpf_notify(int new_state)
@@ -1415,6 +1416,8 @@ static void __init spectre_v2_select_mitigation(void)
case SPECTRE_V2_IBRS:
setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS);
+ if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
+ pr_warn(SPECTRE_V2_IBRS_PERF_MSG);
break;
case SPECTRE_V2_LFENCE:
@@ -1516,7 +1519,16 @@ static void __init spectre_v2_select_mitigation(void)
* the CPU supports Enhanced IBRS, kernel might un-intentionally not
* enable IBRS around firmware calls.
*/
- if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) {
+ if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
+ (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
+ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) {
+
+ if (retbleed_cmd != RETBLEED_CMD_IBPB) {
+ setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW);
+ pr_info("Enabling Speculation Barrier for firmware calls\n");
+ }
+
+ } else if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) {
setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
pr_info("Enabling Restricted Speculation for firmware calls\n");
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 143e37298d8a..e5fa335a4ea7 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6029,6 +6029,11 @@ split_irqchip_unlock:
r = 0;
break;
case KVM_CAP_X86_USER_SPACE_MSR:
+ r = -EINVAL;
+ if (cap->args[0] & ~(KVM_MSR_EXIT_REASON_INVAL |
+ KVM_MSR_EXIT_REASON_UNKNOWN |
+ KVM_MSR_EXIT_REASON_FILTER))
+ break;
kvm->arch.user_space_msr_mask = cap->args[0];
r = 0;
break;
@@ -6183,6 +6188,9 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
if (copy_from_user(&filter, user_msr_filter, sizeof(filter)))
return -EFAULT;
+ if (filter.flags & ~KVM_MSR_FILTER_DEFAULT_DENY)
+ return -EINVAL;
+
for (i = 0; i < ARRAY_SIZE(filter.ranges); i++)
empty &= !filter.ranges[i].nmsrs;
diff --git a/certs/Kconfig b/certs/Kconfig
index 476755703cf8..bf9b511573d7 100644
--- a/certs/Kconfig
+++ b/certs/Kconfig
@@ -43,6 +43,7 @@ config SYSTEM_TRUSTED_KEYRING
bool "Provide system-wide ring of trusted keys"
depends on KEYS
depends on ASYMMETRIC_KEY_TYPE
+ depends on X509_CERTIFICATE_PARSER
help
Provide a system keyring to which trusted keys can be added. Keys in
the keyring are considered to be trusted. Keys may be added at will
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 6ff1901d7d43..3c6d4ef87be0 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -782,7 +782,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
if (!osc_cpc_flexible_adr_space_confirmed) {
pr_debug("Flexible address space capability not supported\n");
- goto out_free;
+ if (!cpc_supported_by_cpu())
+ goto out_free;
}
addr = ioremap(gas_t->address, gas_t->bit_width/8);
@@ -809,7 +810,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
}
if (!osc_cpc_flexible_adr_space_confirmed) {
pr_debug("Flexible address space capability not supported\n");
- goto out_free;
+ if (!cpc_supported_by_cpu())
+ goto out_free;
}
} else {
if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
diff --git a/drivers/clk/clk-lan966x.c b/drivers/clk/clk-lan966x.c
index d1535ac13e89..81cb90955d68 100644
--- a/drivers/clk/clk-lan966x.c
+++ b/drivers/clk/clk-lan966x.c
@@ -213,7 +213,7 @@ static int lan966x_gate_clk_register(struct device *dev,
hw_data->hws[i] =
devm_clk_hw_register_gate(dev, clk_gate_desc[idx].name,
- "lan966x", 0, base,
+ "lan966x", 0, gate_base,
clk_gate_desc[idx].bit_idx,
0, &clk_gate_lock);
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index dcc141068128..af22be84034b 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -202,15 +202,6 @@ struct fwnet_packet_task {
};
/*
- * Get fifo address embedded in hwaddr
- */
-static __u64 fwnet_hwaddr_fifo(union fwnet_hwaddr *ha)
-{
- return (u64)get_unaligned_be16(&ha->uc.fifo_hi) << 32
- | get_unaligned_be32(&ha->uc.fifo_lo);
-}
-
-/*
* saddr == NULL means use device source address.
* daddr == NULL means leave destination address (eg unresolved arp).
*/
@@ -1306,7 +1297,7 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
max_payload = peer->max_payload;
datagram_label_ptr = &peer->datagram_label;
- ptask->fifo_addr = fwnet_hwaddr_fifo(ha);
+ ptask->fifo_addr = get_unaligned_be48(ha->uc.fifo);
ptask->generation = generation;
ptask->dest_node = dest_node;
ptask->speed = peer->speed;
@@ -1494,8 +1485,7 @@ static int fwnet_probe(struct fw_unit *unit,
ha.uc.uniq_id = cpu_to_be64(card->guid);
ha.uc.max_rec = dev->card->max_receive;
ha.uc.sspd = dev->card->link_speed;
- ha.uc.fifo_hi = cpu_to_be16(dev->local_fifo >> 32);
- ha.uc.fifo_lo = cpu_to_be32(dev->local_fifo & 0xffffffff);
+ put_unaligned_be48(dev->local_fifo, ha.uc.fifo);
dev_addr_set(net, ha.u);
memset(net->broadcast, -1, net->addr_len);
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 08bc52c3cdcb..ecd7d169470b 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -351,6 +351,9 @@ static const struct regmap_config pca953x_i2c_regmap = {
.reg_bits = 8,
.val_bits = 8,
+ .use_single_read = true,
+ .use_single_write = true,
+
.readable_reg = pca953x_readable_register,
.writeable_reg = pca953x_writeable_register,
.volatile_reg = pca953x_volatile_register,
@@ -906,15 +909,18 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
static int device_pca95xx_init(struct pca953x_chip *chip, u32 invert)
{
DECLARE_BITMAP(val, MAX_LINE);
+ u8 regaddr;
int ret;
- ret = regcache_sync_region(chip->regmap, chip->regs->output,
- chip->regs->output + NBANK(chip));
+ regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0);
+ ret = regcache_sync_region(chip->regmap, regaddr,
+ regaddr + NBANK(chip) - 1);
if (ret)
goto out;
- ret = regcache_sync_region(chip->regmap, chip->regs->direction,
- chip->regs->direction + NBANK(chip));
+ regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0);
+ ret = regcache_sync_region(chip->regmap, regaddr,
+ regaddr + NBANK(chip) - 1);
if (ret)
goto out;
@@ -1127,14 +1133,14 @@ static int pca953x_regcache_sync(struct device *dev)
* sync these registers first and only then sync the rest.
*/
regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0);
- ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip));
+ ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip) - 1);
if (ret) {
dev_err(dev, "Failed to sync GPIO dir registers: %d\n", ret);
return ret;
}
regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0);
- ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip));
+ ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip) - 1);
if (ret) {
dev_err(dev, "Failed to sync GPIO out registers: %d\n", ret);
return ret;
@@ -1144,7 +1150,7 @@ static int pca953x_regcache_sync(struct device *dev)
if (chip->driver_data & PCA_PCAL) {
regaddr = pca953x_recalc_addr(chip, PCAL953X_IN_LATCH, 0);
ret = regcache_sync_region(chip->regmap, regaddr,
- regaddr + NBANK(chip));
+ regaddr + NBANK(chip) - 1);
if (ret) {
dev_err(dev, "Failed to sync INT latch registers: %d\n",
ret);
@@ -1153,7 +1159,7 @@ static int pca953x_regcache_sync(struct device *dev)
regaddr = pca953x_recalc_addr(chip, PCAL953X_INT_MASK, 0);
ret = regcache_sync_region(chip->regmap, regaddr,
- regaddr + NBANK(chip));
+ regaddr + NBANK(chip) - 1);
if (ret) {
dev_err(dev, "Failed to sync INT mask registers: %d\n",
ret);
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index b6d3a57e27ed..7f8e2fed2988 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -99,7 +99,7 @@ static inline void xgpio_set_value32(unsigned long *map, int bit, u32 v)
const unsigned long offset = (bit % BITS_PER_LONG) & BIT(5);
map[index] &= ~(0xFFFFFFFFul << offset);
- map[index] |= v << offset;
+ map[index] |= (unsigned long)v << offset;
}
static inline int xgpio_regoffset(struct xgpio_instance *chip, int ch)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 6b6d46e29e6e..4608599ba6bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1364,16 +1364,10 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
struct amdkfd_process_info *process_info = vm->process_info;
- struct amdgpu_bo *pd = vm->root.bo;
if (!process_info)
return;
- /* Release eviction fence from PD */
- amdgpu_bo_reserve(pd, false);
- amdgpu_bo_fence(pd, NULL, false);
- amdgpu_bo_unreserve(pd);
-
/* Update process info */
mutex_lock(&process_info->lock);
process_info->n_vms--;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 714178f1b6c6..2168163aad2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -40,7 +40,7 @@ static void amdgpu_bo_list_free_rcu(struct rcu_head *rcu)
{
struct amdgpu_bo_list *list = container_of(rcu, struct amdgpu_bo_list,
rhead);
-
+ mutex_destroy(&list->bo_list_mutex);
kvfree(list);
}
@@ -136,6 +136,7 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
trace_amdgpu_cs_bo_status(list->num_entries, total_size);
+ mutex_init(&list->bo_list_mutex);
*result = list;
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
index 529d52a204cf..9caea1688fc3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
@@ -47,6 +47,10 @@ struct amdgpu_bo_list {
struct amdgpu_bo *oa_obj;
unsigned first_userptr;
unsigned num_entries;
+
+ /* Protect access during command submission.
+ */
+ struct mutex bo_list_mutex;
};
int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index b28af04b0c3e..d8f1335bc68f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -519,6 +519,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
return r;
}
+ mutex_lock(&p->bo_list->bo_list_mutex);
+
/* One for TTM and one for the CS job */
amdgpu_bo_list_for_each_entry(e, p->bo_list)
e->tv.num_shared = 2;
@@ -651,6 +653,7 @@ out_free_user_pages:
kvfree(e->user_pages);
e->user_pages = NULL;
}
+ mutex_unlock(&p->bo_list->bo_list_mutex);
}
return r;
}
@@ -690,9 +693,11 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
{
unsigned i;
- if (error && backoff)
+ if (error && backoff) {
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
+ mutex_unlock(&parser->bo_list->bo_list_mutex);
+ }
for (i = 0; i < parser->num_post_deps; i++) {
drm_syncobj_put(parser->post_deps[i].syncobj);
@@ -832,12 +837,16 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
continue;
r = amdgpu_vm_bo_update(adev, bo_va, false);
- if (r)
+ if (r) {
+ mutex_unlock(&p->bo_list->bo_list_mutex);
return r;
+ }
r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update);
- if (r)
+ if (r) {
+ mutex_unlock(&p->bo_list->bo_list_mutex);
return r;
+ }
}
r = amdgpu_vm_handle_moved(adev, vm);
@@ -1278,6 +1287,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
mutex_unlock(&p->adev->notifier_lock);
+ mutex_unlock(&p->bo_list->bo_list_mutex);
return 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 93ac33a8de9a..3087dd1a1856 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1653,7 +1653,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
#endif
- if (dc_enable_dmub_notifications(adev->dm.dc)) {
+ if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
init_completion(&adev->dm.dmub_aux_transfer_done);
adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
if (!adev->dm.dmub_notify) {
@@ -1689,6 +1689,13 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
goto error;
}
+ /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
+ * It is expected that DMUB will resend any pending notifications at this point, for
+ * example HPD from DPIA.
+ */
+ if (dc_is_dmub_outbox_supported(adev->dm.dc))
+ dc_enable_dmub_outbox(adev->dm.dc);
+
/* create fake encoders for MST */
dm_dp_create_fake_mst_encoders(adev);
@@ -2678,9 +2685,6 @@ static int dm_resume(void *handle)
*/
link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
- if (dc_enable_dmub_notifications(adev->dm.dc))
- amdgpu_dm_outbox_init(adev);
-
r = dm_dmub_hw_init(adev);
if (r)
DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
@@ -2698,6 +2702,11 @@ static int dm_resume(void *handle)
}
}
+ if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+ amdgpu_dm_outbox_init(adev);
+ dc_enable_dmub_outbox(adev->dm.dc);
+ }
+
WARN_ON(!dc_commit_state(dm->dc, dc_state));
dm_gpureset_commit_state(dm->cached_dc_state, dm);
@@ -2719,13 +2728,15 @@ static int dm_resume(void *handle)
/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
dc_resource_state_construct(dm->dc, dm_state->context);
- /* Re-enable outbox interrupts for DPIA. */
- if (dc_enable_dmub_notifications(adev->dm.dc))
- amdgpu_dm_outbox_init(adev);
-
/* Before powering on DC we need to re-initialize DMUB. */
dm_dmub_hw_resume(adev);
+ /* Re-enable outbox interrupts for DPIA. */
+ if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+ amdgpu_dm_outbox_init(adev);
+ dc_enable_dmub_outbox(adev->dm.dc);
+ }
+
/* power on hardware */
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c
index d5962a34c01d..e5fc875990c4 100644
--- a/drivers/gpu/drm/drm_gem_ttm_helper.c
+++ b/drivers/gpu/drm/drm_gem_ttm_helper.c
@@ -64,8 +64,13 @@ int drm_gem_ttm_vmap(struct drm_gem_object *gem,
struct iosys_map *map)
{
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
+ int ret;
+
+ dma_resv_lock(gem->resv, NULL);
+ ret = ttm_bo_vmap(bo, map);
+ dma_resv_unlock(gem->resv);
- return ttm_bo_vmap(bo, map);
+ return ret;
}
EXPORT_SYMBOL(drm_gem_ttm_vmap);
@@ -82,7 +87,9 @@ void drm_gem_ttm_vunmap(struct drm_gem_object *gem,
{
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
+ dma_resv_lock(gem->resv, NULL);
ttm_bo_vunmap(bo, map);
+ dma_resv_unlock(gem->resv);
}
EXPORT_SYMBOL(drm_gem_ttm_vunmap);
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 09f82545789f..44e7339e7a4a 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -273,10 +273,17 @@ struct intel_context {
u8 child_index;
/** @guc: GuC specific members for parallel submission */
struct {
- /** @wqi_head: head pointer in work queue */
+ /** @wqi_head: cached head pointer in work queue */
u16 wqi_head;
- /** @wqi_tail: tail pointer in work queue */
+ /** @wqi_tail: cached tail pointer in work queue */
u16 wqi_tail;
+ /** @wq_head: pointer to the actual head in work queue */
+ u32 *wq_head;
+ /** @wq_tail: pointer to the actual head in work queue */
+ u32 *wq_tail;
+ /** @wq_status: pointer to the status in work queue */
+ u32 *wq_status;
+
/**
* @parent_page: page in context state (ce->state) used
* by parent for work queue, process descriptor
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 86f7a9ac1c39..2b0266cab66b 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -661,6 +661,16 @@ static inline void execlists_schedule_out(struct i915_request *rq)
i915_request_put(rq);
}
+static u32 map_i915_prio_to_lrc_desc_prio(int prio)
+{
+ if (prio > I915_PRIORITY_NORMAL)
+ return GEN12_CTX_PRIORITY_HIGH;
+ else if (prio < I915_PRIORITY_NORMAL)
+ return GEN12_CTX_PRIORITY_LOW;
+ else
+ return GEN12_CTX_PRIORITY_NORMAL;
+}
+
static u64 execlists_update_context(struct i915_request *rq)
{
struct intel_context *ce = rq->context;
@@ -669,7 +679,7 @@ static u64 execlists_update_context(struct i915_request *rq)
desc = ce->lrc.desc;
if (rq->engine->flags & I915_ENGINE_HAS_EU_PRIORITY)
- desc |= lrc_desc_priority(rq_prio(rq));
+ desc |= map_i915_prio_to_lrc_desc_prio(rq_prio(rq));
/*
* WaIdleLiteRestore:bdw,skl
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h
index 31be734010db..a390f0813c8b 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.h
@@ -111,16 +111,6 @@ enum {
#define XEHP_SW_COUNTER_SHIFT 58
#define XEHP_SW_COUNTER_WIDTH 6
-static inline u32 lrc_desc_priority(int prio)
-{
- if (prio > I915_PRIORITY_NORMAL)
- return GEN12_CTX_PRIORITY_HIGH;
- else if (prio < I915_PRIORITY_NORMAL)
- return GEN12_CTX_PRIORITY_LOW;
- else
- return GEN12_CTX_PRIORITY_NORMAL;
-}
-
static inline void lrc_runtime_start(struct intel_context *ce)
{
struct intel_context_stats *stats = &ce->stats;
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
index 4ef9990ed7f8..29ef8afc8c2e 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
@@ -122,6 +122,9 @@ enum intel_guc_action {
INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE = 0x1002,
INTEL_GUC_ACTION_SCHED_ENGINE_MODE_SET = 0x1003,
INTEL_GUC_ACTION_SCHED_ENGINE_MODE_DONE = 0x1004,
+ INTEL_GUC_ACTION_V69_SET_CONTEXT_PRIORITY = 0x1005,
+ INTEL_GUC_ACTION_V69_SET_CONTEXT_EXECUTION_QUANTUM = 0x1006,
+ INTEL_GUC_ACTION_V69_SET_CONTEXT_PREEMPTION_TIMEOUT = 0x1007,
INTEL_GUC_ACTION_CONTEXT_RESET_NOTIFICATION = 0x1008,
INTEL_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION = 0x1009,
INTEL_GUC_ACTION_HOST2GUC_UPDATE_CONTEXT_POLICIES = 0x100B,
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 966e69a8b1c1..9feda105f913 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -170,6 +170,11 @@ struct intel_guc {
/** @ads_engine_usage_size: size of engine usage in the ADS */
u32 ads_engine_usage_size;
+ /** @lrc_desc_pool_v69: object allocated to hold the GuC LRC descriptor pool */
+ struct i915_vma *lrc_desc_pool_v69;
+ /** @lrc_desc_pool_vaddr_v69: contents of the GuC LRC descriptor pool */
+ void *lrc_desc_pool_vaddr_v69;
+
/**
* @context_lookup: used to resolve intel_context from guc_id, if a
* context is present in this structure it is registered with the GuC
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index 42cb7a9a6199..89a7e5ec0614 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -203,6 +203,20 @@ struct guc_wq_item {
u32 fence_id;
} __packed;
+struct guc_process_desc_v69 {
+ u32 stage_id;
+ u64 db_base_addr;
+ u32 head;
+ u32 tail;
+ u32 error_offset;
+ u64 wq_base_addr;
+ u32 wq_size_bytes;
+ u32 wq_status;
+ u32 engine_presence;
+ u32 priority;
+ u32 reserved[36];
+} __packed;
+
struct guc_sched_wq_desc {
u32 head;
u32 tail;
@@ -227,6 +241,37 @@ struct guc_ctxt_registration_info {
};
#define CONTEXT_REGISTRATION_FLAG_KMD BIT(0)
+/* Preempt to idle on quantum expiry */
+#define CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE_V69 BIT(0)
+
+/*
+ * GuC Context registration descriptor.
+ * FIXME: This is only required to exist during context registration.
+ * The current 1:1 between guc_lrc_desc and LRCs for the lifetime of the LRC
+ * is not required.
+ */
+struct guc_lrc_desc_v69 {
+ u32 hw_context_desc;
+ u32 slpm_perf_mode_hint; /* SPLC v1 only */
+ u32 slpm_freq_hint;
+ u32 engine_submit_mask; /* In logical space */
+ u8 engine_class;
+ u8 reserved0[3];
+ u32 priority;
+ u32 process_desc;
+ u32 wq_addr;
+ u32 wq_size;
+ u32 context_flags; /* CONTEXT_REGISTRATION_* */
+ /* Time for one workload to execute. (in micro seconds) */
+ u32 execution_quantum;
+ /* Time to wait for a preemption request to complete before issuing a
+ * reset. (in micro seconds).
+ */
+ u32 preemption_timeout;
+ u32 policy_flags; /* CONTEXT_POLICY_* */
+ u32 reserved1[19];
+} __packed;
+
/* 32-bit KLV structure as used by policy updates and others */
struct guc_klv_generic_dw_t {
u32 kl;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 1726f0f19901..9ffb343d0f79 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -414,12 +414,15 @@ struct sync_semaphore {
};
struct parent_scratch {
- struct guc_sched_wq_desc wq_desc;
+ union guc_descs {
+ struct guc_sched_wq_desc wq_desc;
+ struct guc_process_desc_v69 pdesc;
+ } descs;
struct sync_semaphore go;
struct sync_semaphore join[MAX_ENGINE_INSTANCE + 1];
- u8 unused[WQ_OFFSET - sizeof(struct guc_sched_wq_desc) -
+ u8 unused[WQ_OFFSET - sizeof(union guc_descs) -
sizeof(struct sync_semaphore) * (MAX_ENGINE_INSTANCE + 2)];
u32 wq[WQ_SIZE / sizeof(u32)];
@@ -456,17 +459,23 @@ __get_parent_scratch(struct intel_context *ce)
LRC_STATE_OFFSET) / sizeof(u32)));
}
+static struct guc_process_desc_v69 *
+__get_process_desc_v69(struct intel_context *ce)
+{
+ struct parent_scratch *ps = __get_parent_scratch(ce);
+
+ return &ps->descs.pdesc;
+}
+
static struct guc_sched_wq_desc *
-__get_wq_desc(struct intel_context *ce)
+__get_wq_desc_v70(struct intel_context *ce)
{
struct parent_scratch *ps = __get_parent_scratch(ce);
- return &ps->wq_desc;
+ return &ps->descs.wq_desc;
}
-static u32 *get_wq_pointer(struct guc_sched_wq_desc *wq_desc,
- struct intel_context *ce,
- u32 wqi_size)
+static u32 *get_wq_pointer(struct intel_context *ce, u32 wqi_size)
{
/*
* Check for space in work queue. Caching a value of head pointer in
@@ -476,7 +485,7 @@ static u32 *get_wq_pointer(struct guc_sched_wq_desc *wq_desc,
#define AVAILABLE_SPACE \
CIRC_SPACE(ce->parallel.guc.wqi_tail, ce->parallel.guc.wqi_head, WQ_SIZE)
if (wqi_size > AVAILABLE_SPACE) {
- ce->parallel.guc.wqi_head = READ_ONCE(wq_desc->head);
+ ce->parallel.guc.wqi_head = READ_ONCE(*ce->parallel.guc.wq_head);
if (wqi_size > AVAILABLE_SPACE)
return NULL;
@@ -495,11 +504,55 @@ static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id)
return ce;
}
+static struct guc_lrc_desc_v69 *__get_lrc_desc_v69(struct intel_guc *guc, u32 index)
+{
+ struct guc_lrc_desc_v69 *base = guc->lrc_desc_pool_vaddr_v69;
+
+ if (!base)
+ return NULL;
+
+ GEM_BUG_ON(index >= GUC_MAX_CONTEXT_ID);
+
+ return &base[index];
+}
+
+static int guc_lrc_desc_pool_create_v69(struct intel_guc *guc)
+{
+ u32 size;
+ int ret;
+
+ size = PAGE_ALIGN(sizeof(struct guc_lrc_desc_v69) *
+ GUC_MAX_CONTEXT_ID);
+ ret = intel_guc_allocate_and_map_vma(guc, size, &guc->lrc_desc_pool_v69,
+ (void **)&guc->lrc_desc_pool_vaddr_v69);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void guc_lrc_desc_pool_destroy_v69(struct intel_guc *guc)
+{
+ if (!guc->lrc_desc_pool_vaddr_v69)
+ return;
+
+ guc->lrc_desc_pool_vaddr_v69 = NULL;
+ i915_vma_unpin_and_release(&guc->lrc_desc_pool_v69, I915_VMA_RELEASE_MAP);
+}
+
static inline bool guc_submission_initialized(struct intel_guc *guc)
{
return guc->submission_initialized;
}
+static inline void _reset_lrc_desc_v69(struct intel_guc *guc, u32 id)
+{
+ struct guc_lrc_desc_v69 *desc = __get_lrc_desc_v69(guc, id);
+
+ if (desc)
+ memset(desc, 0, sizeof(*desc));
+}
+
static inline bool ctx_id_mapped(struct intel_guc *guc, u32 id)
{
return __get_context(guc, id);
@@ -526,6 +579,8 @@ static inline void clr_ctx_id_mapping(struct intel_guc *guc, u32 id)
if (unlikely(!guc_submission_initialized(guc)))
return;
+ _reset_lrc_desc_v69(guc, id);
+
/*
* xarray API doesn't have xa_erase_irqsave wrapper, so calling
* the lower level functions directly.
@@ -611,7 +666,7 @@ int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout)
true, timeout);
}
-static int guc_context_policy_init(struct intel_context *ce, bool loop);
+static int guc_context_policy_init_v70(struct intel_context *ce, bool loop);
static int try_context_registration(struct intel_context *ce, bool loop);
static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
@@ -639,7 +694,7 @@ static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
GEM_BUG_ON(context_guc_id_invalid(ce));
if (context_policy_required(ce)) {
- err = guc_context_policy_init(ce, false);
+ err = guc_context_policy_init_v70(ce, false);
if (err)
return err;
}
@@ -737,9 +792,7 @@ static u32 wq_space_until_wrap(struct intel_context *ce)
return (WQ_SIZE - ce->parallel.guc.wqi_tail);
}
-static void write_wqi(struct guc_sched_wq_desc *wq_desc,
- struct intel_context *ce,
- u32 wqi_size)
+static void write_wqi(struct intel_context *ce, u32 wqi_size)
{
BUILD_BUG_ON(!is_power_of_2(WQ_SIZE));
@@ -750,13 +803,12 @@ static void write_wqi(struct guc_sched_wq_desc *wq_desc,
ce->parallel.guc.wqi_tail = (ce->parallel.guc.wqi_tail + wqi_size) &
(WQ_SIZE - 1);
- WRITE_ONCE(wq_desc->tail, ce->parallel.guc.wqi_tail);
+ WRITE_ONCE(*ce->parallel.guc.wq_tail, ce->parallel.guc.wqi_tail);
}
static int guc_wq_noop_append(struct intel_context *ce)
{
- struct guc_sched_wq_desc *wq_desc = __get_wq_desc(ce);
- u32 *wqi = get_wq_pointer(wq_desc, ce, wq_space_until_wrap(ce));
+ u32 *wqi = get_wq_pointer(ce, wq_space_until_wrap(ce));
u32 len_dw = wq_space_until_wrap(ce) / sizeof(u32) - 1;
if (!wqi)
@@ -775,7 +827,6 @@ static int __guc_wq_item_append(struct i915_request *rq)
{
struct intel_context *ce = request_to_scheduling_context(rq);
struct intel_context *child;
- struct guc_sched_wq_desc *wq_desc = __get_wq_desc(ce);
unsigned int wqi_size = (ce->parallel.number_children + 4) *
sizeof(u32);
u32 *wqi;
@@ -795,7 +846,7 @@ static int __guc_wq_item_append(struct i915_request *rq)
return ret;
}
- wqi = get_wq_pointer(wq_desc, ce, wqi_size);
+ wqi = get_wq_pointer(ce, wqi_size);
if (!wqi)
return -EBUSY;
@@ -810,7 +861,7 @@ static int __guc_wq_item_append(struct i915_request *rq)
for_each_child(ce, child)
*wqi++ = child->ring->tail / sizeof(u64);
- write_wqi(wq_desc, ce, wqi_size);
+ write_wqi(ce, wqi_size);
return 0;
}
@@ -1868,20 +1919,34 @@ static void reset_fail_worker_func(struct work_struct *w);
int intel_guc_submission_init(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
+ int ret;
if (guc->submission_initialized)
return 0;
+ if (guc->fw.major_ver_found < 70) {
+ ret = guc_lrc_desc_pool_create_v69(guc);
+ if (ret)
+ return ret;
+ }
+
guc->submission_state.guc_ids_bitmap =
bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
- if (!guc->submission_state.guc_ids_bitmap)
- return -ENOMEM;
+ if (!guc->submission_state.guc_ids_bitmap) {
+ ret = -ENOMEM;
+ goto destroy_pool;
+ }
guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
guc->timestamp.shift = gpm_timestamp_shift(gt);
guc->submission_initialized = true;
return 0;
+
+destroy_pool:
+ guc_lrc_desc_pool_destroy_v69(guc);
+
+ return ret;
}
void intel_guc_submission_fini(struct intel_guc *guc)
@@ -1890,6 +1955,7 @@ void intel_guc_submission_fini(struct intel_guc *guc)
return;
guc_flush_destroyed_contexts(guc);
+ guc_lrc_desc_pool_destroy_v69(guc);
i915_sched_engine_put(guc->sched_engine);
bitmap_free(guc->submission_state.guc_ids_bitmap);
guc->submission_initialized = false;
@@ -2147,10 +2213,34 @@ static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce)
spin_unlock_irqrestore(&guc->submission_state.lock, flags);
}
-static int __guc_action_register_multi_lrc(struct intel_guc *guc,
- struct intel_context *ce,
- struct guc_ctxt_registration_info *info,
- bool loop)
+static int __guc_action_register_multi_lrc_v69(struct intel_guc *guc,
+ struct intel_context *ce,
+ u32 guc_id,
+ u32 offset,
+ bool loop)
+{
+ struct intel_context *child;
+ u32 action[4 + MAX_ENGINE_INSTANCE];
+ int len = 0;
+
+ GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE);
+
+ action[len++] = INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
+ action[len++] = guc_id;
+ action[len++] = ce->parallel.number_children + 1;
+ action[len++] = offset;
+ for_each_child(ce, child) {
+ offset += sizeof(struct guc_lrc_desc_v69);
+ action[len++] = offset;
+ }
+
+ return guc_submission_send_busy_loop(guc, action, len, 0, loop);
+}
+
+static int __guc_action_register_multi_lrc_v70(struct intel_guc *guc,
+ struct intel_context *ce,
+ struct guc_ctxt_registration_info *info,
+ bool loop)
{
struct intel_context *child;
u32 action[13 + (MAX_ENGINE_INSTANCE * 2)];
@@ -2190,9 +2280,24 @@ static int __guc_action_register_multi_lrc(struct intel_guc *guc,
return guc_submission_send_busy_loop(guc, action, len, 0, loop);
}
-static int __guc_action_register_context(struct intel_guc *guc,
- struct guc_ctxt_registration_info *info,
- bool loop)
+static int __guc_action_register_context_v69(struct intel_guc *guc,
+ u32 guc_id,
+ u32 offset,
+ bool loop)
+{
+ u32 action[] = {
+ INTEL_GUC_ACTION_REGISTER_CONTEXT,
+ guc_id,
+ offset,
+ };
+
+ return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
+ 0, loop);
+}
+
+static int __guc_action_register_context_v70(struct intel_guc *guc,
+ struct guc_ctxt_registration_info *info,
+ bool loop)
{
u32 action[] = {
INTEL_GUC_ACTION_REGISTER_CONTEXT,
@@ -2213,24 +2318,52 @@ static int __guc_action_register_context(struct intel_guc *guc,
0, loop);
}
-static void prepare_context_registration_info(struct intel_context *ce,
- struct guc_ctxt_registration_info *info);
+static void prepare_context_registration_info_v69(struct intel_context *ce);
+static void prepare_context_registration_info_v70(struct intel_context *ce,
+ struct guc_ctxt_registration_info *info);
-static int register_context(struct intel_context *ce, bool loop)
+static int
+register_context_v69(struct intel_guc *guc, struct intel_context *ce, bool loop)
+{
+ u32 offset = intel_guc_ggtt_offset(guc, guc->lrc_desc_pool_v69) +
+ ce->guc_id.id * sizeof(struct guc_lrc_desc_v69);
+
+ prepare_context_registration_info_v69(ce);
+
+ if (intel_context_is_parent(ce))
+ return __guc_action_register_multi_lrc_v69(guc, ce, ce->guc_id.id,
+ offset, loop);
+ else
+ return __guc_action_register_context_v69(guc, ce->guc_id.id,
+ offset, loop);
+}
+
+static int
+register_context_v70(struct intel_guc *guc, struct intel_context *ce, bool loop)
{
struct guc_ctxt_registration_info info;
+
+ prepare_context_registration_info_v70(ce, &info);
+
+ if (intel_context_is_parent(ce))
+ return __guc_action_register_multi_lrc_v70(guc, ce, &info, loop);
+ else
+ return __guc_action_register_context_v70(guc, &info, loop);
+}
+
+static int register_context(struct intel_context *ce, bool loop)
+{
struct intel_guc *guc = ce_to_guc(ce);
int ret;
GEM_BUG_ON(intel_context_is_child(ce));
trace_intel_context_register(ce);
- prepare_context_registration_info(ce, &info);
-
- if (intel_context_is_parent(ce))
- ret = __guc_action_register_multi_lrc(guc, ce, &info, loop);
+ if (guc->fw.major_ver_found >= 70)
+ ret = register_context_v70(guc, ce, loop);
else
- ret = __guc_action_register_context(guc, &info, loop);
+ ret = register_context_v69(guc, ce, loop);
+
if (likely(!ret)) {
unsigned long flags;
@@ -2238,7 +2371,8 @@ static int register_context(struct intel_context *ce, bool loop)
set_context_registered(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
- guc_context_policy_init(ce, loop);
+ if (guc->fw.major_ver_found >= 70)
+ guc_context_policy_init_v70(ce, loop);
}
return ret;
@@ -2335,7 +2469,7 @@ static int __guc_context_set_context_policies(struct intel_guc *guc,
0, loop);
}
-static int guc_context_policy_init(struct intel_context *ce, bool loop)
+static int guc_context_policy_init_v70(struct intel_context *ce, bool loop)
{
struct intel_engine_cs *engine = ce->engine;
struct intel_guc *guc = &engine->gt->uc.guc;
@@ -2394,8 +2528,108 @@ static int guc_context_policy_init(struct intel_context *ce, bool loop)
return ret;
}
-static void prepare_context_registration_info(struct intel_context *ce,
- struct guc_ctxt_registration_info *info)
+static void guc_context_policy_init_v69(struct intel_engine_cs *engine,
+ struct guc_lrc_desc_v69 *desc)
+{
+ desc->policy_flags = 0;
+
+ if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
+ desc->policy_flags |= CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE_V69;
+
+ /* NB: For both of these, zero means disabled. */
+ desc->execution_quantum = engine->props.timeslice_duration_ms * 1000;
+ desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000;
+}
+
+static u32 map_guc_prio_to_lrc_desc_prio(u8 prio)
+{
+ /*
+ * this matches the mapping we do in map_i915_prio_to_guc_prio()
+ * (e.g. prio < I915_PRIORITY_NORMAL maps to GUC_CLIENT_PRIORITY_NORMAL)
+ */
+ switch (prio) {
+ default:
+ MISSING_CASE(prio);
+ fallthrough;
+ case GUC_CLIENT_PRIORITY_KMD_NORMAL:
+ return GEN12_CTX_PRIORITY_NORMAL;
+ case GUC_CLIENT_PRIORITY_NORMAL:
+ return GEN12_CTX_PRIORITY_LOW;
+ case GUC_CLIENT_PRIORITY_HIGH:
+ case GUC_CLIENT_PRIORITY_KMD_HIGH:
+ return GEN12_CTX_PRIORITY_HIGH;
+ }
+}
+
+static void prepare_context_registration_info_v69(struct intel_context *ce)
+{
+ struct intel_engine_cs *engine = ce->engine;
+ struct intel_guc *guc = &engine->gt->uc.guc;
+ u32 ctx_id = ce->guc_id.id;
+ struct guc_lrc_desc_v69 *desc;
+ struct intel_context *child;
+
+ GEM_BUG_ON(!engine->mask);
+
+ /*
+ * Ensure LRC + CT vmas are is same region as write barrier is done
+ * based on CT vma region.
+ */
+ GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) !=
+ i915_gem_object_is_lmem(ce->ring->vma->obj));
+
+ desc = __get_lrc_desc_v69(guc, ctx_id);
+ desc->engine_class = engine_class_to_guc_class(engine->class);
+ desc->engine_submit_mask = engine->logical_mask;
+ desc->hw_context_desc = ce->lrc.lrca;
+ desc->priority = ce->guc_state.prio;
+ desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
+ guc_context_policy_init_v69(engine, desc);
+
+ /*
+ * If context is a parent, we need to register a process descriptor
+ * describing a work queue and register all child contexts.
+ */
+ if (intel_context_is_parent(ce)) {
+ struct guc_process_desc_v69 *pdesc;
+
+ ce->parallel.guc.wqi_tail = 0;
+ ce->parallel.guc.wqi_head = 0;
+
+ desc->process_desc = i915_ggtt_offset(ce->state) +
+ __get_parent_scratch_offset(ce);
+ desc->wq_addr = i915_ggtt_offset(ce->state) +
+ __get_wq_offset(ce);
+ desc->wq_size = WQ_SIZE;
+
+ pdesc = __get_process_desc_v69(ce);
+ memset(pdesc, 0, sizeof(*(pdesc)));
+ pdesc->stage_id = ce->guc_id.id;
+ pdesc->wq_base_addr = desc->wq_addr;
+ pdesc->wq_size_bytes = desc->wq_size;
+ pdesc->wq_status = WQ_STATUS_ACTIVE;
+
+ ce->parallel.guc.wq_head = &pdesc->head;
+ ce->parallel.guc.wq_tail = &pdesc->tail;
+ ce->parallel.guc.wq_status = &pdesc->wq_status;
+
+ for_each_child(ce, child) {
+ desc = __get_lrc_desc_v69(guc, child->guc_id.id);
+
+ desc->engine_class =
+ engine_class_to_guc_class(engine->class);
+ desc->hw_context_desc = child->lrc.lrca;
+ desc->priority = ce->guc_state.prio;
+ desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
+ guc_context_policy_init_v69(engine, desc);
+ }
+
+ clear_children_join_go_memory(ce);
+ }
+}
+
+static void prepare_context_registration_info_v70(struct intel_context *ce,
+ struct guc_ctxt_registration_info *info)
{
struct intel_engine_cs *engine = ce->engine;
struct intel_guc *guc = &engine->gt->uc.guc;
@@ -2420,6 +2654,8 @@ static void prepare_context_registration_info(struct intel_context *ce,
*/
info->hwlrca_lo = lower_32_bits(ce->lrc.lrca);
info->hwlrca_hi = upper_32_bits(ce->lrc.lrca);
+ if (engine->flags & I915_ENGINE_HAS_EU_PRIORITY)
+ info->hwlrca_lo |= map_guc_prio_to_lrc_desc_prio(ce->guc_state.prio);
info->flags = CONTEXT_REGISTRATION_FLAG_KMD;
/*
@@ -2443,10 +2679,14 @@ static void prepare_context_registration_info(struct intel_context *ce,
info->wq_base_hi = upper_32_bits(wq_base_offset);
info->wq_size = WQ_SIZE;
- wq_desc = __get_wq_desc(ce);
+ wq_desc = __get_wq_desc_v70(ce);
memset(wq_desc, 0, sizeof(*wq_desc));
wq_desc->wq_status = WQ_STATUS_ACTIVE;
+ ce->parallel.guc.wq_head = &wq_desc->head;
+ ce->parallel.guc.wq_tail = &wq_desc->tail;
+ ce->parallel.guc.wq_status = &wq_desc->wq_status;
+
clear_children_join_go_memory(ce);
}
}
@@ -2761,11 +3001,21 @@ static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
u16 guc_id,
u32 preemption_timeout)
{
- struct context_policy policy;
+ if (guc->fw.major_ver_found >= 70) {
+ struct context_policy policy;
- __guc_context_policy_start_klv(&policy, guc_id);
- __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
- __guc_context_set_context_policies(guc, &policy, true);
+ __guc_context_policy_start_klv(&policy, guc_id);
+ __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
+ __guc_context_set_context_policies(guc, &policy, true);
+ } else {
+ u32 action[] = {
+ INTEL_GUC_ACTION_V69_SET_CONTEXT_PREEMPTION_TIMEOUT,
+ guc_id,
+ preemption_timeout
+ };
+
+ intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
+ }
}
static void guc_context_ban(struct intel_context *ce, struct i915_request *rq)
@@ -3013,11 +3263,21 @@ static int guc_context_alloc(struct intel_context *ce)
static void __guc_context_set_prio(struct intel_guc *guc,
struct intel_context *ce)
{
- struct context_policy policy;
+ if (guc->fw.major_ver_found >= 70) {
+ struct context_policy policy;
- __guc_context_policy_start_klv(&policy, ce->guc_id.id);
- __guc_context_policy_add_priority(&policy, ce->guc_state.prio);
- __guc_context_set_context_policies(guc, &policy, true);
+ __guc_context_policy_start_klv(&policy, ce->guc_id.id);
+ __guc_context_policy_add_priority(&policy, ce->guc_state.prio);
+ __guc_context_set_context_policies(guc, &policy, true);
+ } else {
+ u32 action[] = {
+ INTEL_GUC_ACTION_V69_SET_CONTEXT_PRIORITY,
+ ce->guc_id.id,
+ ce->guc_state.prio,
+ };
+
+ guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
+ }
}
static void guc_context_set_prio(struct intel_guc *guc,
@@ -4527,17 +4787,19 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc,
guc_log_context_priority(p, ce);
if (intel_context_is_parent(ce)) {
- struct guc_sched_wq_desc *wq_desc = __get_wq_desc(ce);
struct intel_context *child;
drm_printf(p, "\t\tNumber children: %u\n",
ce->parallel.number_children);
- drm_printf(p, "\t\tWQI Head: %u\n",
- READ_ONCE(wq_desc->head));
- drm_printf(p, "\t\tWQI Tail: %u\n",
- READ_ONCE(wq_desc->tail));
- drm_printf(p, "\t\tWQI Status: %u\n\n",
- READ_ONCE(wq_desc->wq_status));
+
+ if (ce->parallel.guc.wq_status) {
+ drm_printf(p, "\t\tWQI Head: %u\n",
+ READ_ONCE(*ce->parallel.guc.wq_head));
+ drm_printf(p, "\t\tWQI Tail: %u\n",
+ READ_ONCE(*ce->parallel.guc.wq_tail));
+ drm_printf(p, "\t\tWQI Status: %u\n\n",
+ READ_ONCE(*ce->parallel.guc.wq_status));
+ }
if (ce->engine->emit_bb_start ==
emit_bb_start_parent_no_preempt_mid_batch) {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 2ff55b9994bc..703f42ba5ddd 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -70,6 +70,10 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
fw_def(BROXTON, 0, guc_def(bxt, 70, 1, 1)) \
fw_def(SKYLAKE, 0, guc_def(skl, 70, 1, 1))
+#define INTEL_GUC_FIRMWARE_DEFS_FALLBACK(fw_def, guc_def) \
+ fw_def(ALDERLAKE_P, 0, guc_def(adlp, 69, 0, 3)) \
+ fw_def(ALDERLAKE_S, 0, guc_def(tgl, 69, 0, 3))
+
#define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_def) \
fw_def(ALDERLAKE_P, 0, huc_def(tgl, 7, 9, 3)) \
fw_def(ALDERLAKE_S, 0, huc_def(tgl, 7, 9, 3)) \
@@ -105,6 +109,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
MODULE_FIRMWARE(uc_);
INTEL_GUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH)
+INTEL_GUC_FIRMWARE_DEFS_FALLBACK(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH)
INTEL_HUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_HUC_FW_PATH)
/* The below structs and macros are used to iterate across the list of blobs */
@@ -149,6 +154,9 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
static const struct uc_fw_platform_requirement blobs_guc[] = {
INTEL_GUC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB)
};
+ static const struct uc_fw_platform_requirement blobs_guc_fallback[] = {
+ INTEL_GUC_FIRMWARE_DEFS_FALLBACK(MAKE_FW_LIST, GUC_FW_BLOB)
+ };
static const struct uc_fw_platform_requirement blobs_huc[] = {
INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB)
};
@@ -179,12 +187,29 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
if (p == fw_blobs[i].p && rev >= fw_blobs[i].rev) {
const struct uc_fw_blob *blob = &fw_blobs[i].blob;
uc_fw->path = blob->path;
+ uc_fw->wanted_path = blob->path;
uc_fw->major_ver_wanted = blob->major;
uc_fw->minor_ver_wanted = blob->minor;
break;
}
}
+ if (uc_fw->type == INTEL_UC_FW_TYPE_GUC) {
+ const struct uc_fw_platform_requirement *blobs = blobs_guc_fallback;
+ u32 count = ARRAY_SIZE(blobs_guc_fallback);
+
+ for (i = 0; i < count && p <= blobs[i].p; i++) {
+ if (p == blobs[i].p && rev >= blobs[i].rev) {
+ const struct uc_fw_blob *blob = &blobs[i].blob;
+
+ uc_fw->fallback.path = blob->path;
+ uc_fw->fallback.major_ver = blob->major;
+ uc_fw->fallback.minor_ver = blob->minor;
+ break;
+ }
+ }
+ }
+
/* make sure the list is ordered as expected */
if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) {
for (i = 1; i < fw_count; i++) {
@@ -338,7 +363,24 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
__force_fw_fetch_failures(uc_fw, -EINVAL);
__force_fw_fetch_failures(uc_fw, -ESTALE);
- err = request_firmware(&fw, uc_fw->path, dev);
+ err = firmware_request_nowarn(&fw, uc_fw->path, dev);
+ if (err && !intel_uc_fw_is_overridden(uc_fw) && uc_fw->fallback.path) {
+ err = firmware_request_nowarn(&fw, uc_fw->fallback.path, dev);
+ if (!err) {
+ drm_notice(&i915->drm,
+ "%s firmware %s is recommended, but only %s was found\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ uc_fw->wanted_path,
+ uc_fw->fallback.path);
+ drm_info(&i915->drm,
+ "Consider updating your linux-firmware pkg or downloading from %s\n",
+ INTEL_UC_FIRMWARE_URL);
+
+ uc_fw->path = uc_fw->fallback.path;
+ uc_fw->major_ver_wanted = uc_fw->fallback.major_ver;
+ uc_fw->minor_ver_wanted = uc_fw->fallback.minor_ver;
+ }
+ }
if (err)
goto fail;
@@ -437,8 +479,8 @@ fail:
INTEL_UC_FIRMWARE_MISSING :
INTEL_UC_FIRMWARE_ERROR);
- drm_notice(&i915->drm, "%s firmware %s: fetch failed with error %d\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
+ i915_probe_error(i915, "%s firmware %s: fetch failed with error %d\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
drm_info(&i915->drm, "%s firmware(s) can be downloaded from %s\n",
intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
@@ -796,7 +838,13 @@ size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
{
drm_printf(p, "%s firmware: %s\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->wanted_path);
+ if (uc_fw->fallback.path) {
+ drm_printf(p, "%s firmware fallback: %s\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->fallback.path);
+ drm_printf(p, "fallback selected: %s\n",
+ str_yes_no(uc_fw->path == uc_fw->fallback.path));
+ }
drm_printf(p, "\tstatus: %s\n",
intel_uc_fw_status_repr(uc_fw->status));
drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n",
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
index 3229018877d3..562acdf88adb 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -74,6 +74,7 @@ struct intel_uc_fw {
const enum intel_uc_fw_status status;
enum intel_uc_fw_status __status; /* no accidental overwrites */
};
+ const char *wanted_path;
const char *path;
bool user_overridden;
size_t size;
@@ -98,6 +99,12 @@ struct intel_uc_fw {
u16 major_ver_found;
u16 minor_ver_found;
+ struct {
+ const char *path;
+ u16 major_ver;
+ u16 minor_ver;
+ } fallback;
+
u32 rsa_size;
u32 ucode_size;
diff --git a/drivers/gpu/drm/imx/dcss/dcss-dev.c b/drivers/gpu/drm/imx/dcss/dcss-dev.c
index c849533ca83e..3f5750cc2673 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-dev.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-dev.c
@@ -207,6 +207,7 @@ struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output)
ret = dcss_submodules_init(dcss);
if (ret) {
+ of_node_put(dcss->of_port);
dev_err(dev, "submodules initialization failed\n");
goto clks_err;
}
@@ -237,6 +238,8 @@ void dcss_dev_destroy(struct dcss_dev *dcss)
dcss_clocks_disable(dcss);
}
+ of_node_put(dcss->of_port);
+
pm_runtime_disable(dcss->dev);
dcss_submodules_stop(dcss);
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index c96014464355..a189982601a4 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -713,7 +713,7 @@ static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel)
of_property_read_u32(dev->of_node, "hpd-reliable-delay-ms", &reliable_ms);
desc->delay.hpd_reliable = reliable_ms;
of_property_read_u32(dev->of_node, "hpd-absent-delay-ms", &absent_ms);
- desc->delay.hpd_reliable = absent_ms;
+ desc->delay.hpd_absent = absent_ms;
/* Power the panel on so we can read the EDID */
ret = pm_runtime_get_sync(dev);
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 191c56064f19..6b25b2f4f5a3 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -190,7 +190,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
}
EXPORT_SYMBOL(drm_sched_entity_flush);
-static void drm_sched_entity_kill_jobs_irq_work(struct irq_work *wrk)
+static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
{
struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
@@ -207,8 +207,8 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
finish_cb);
- init_irq_work(&job->work, drm_sched_entity_kill_jobs_irq_work);
- irq_work_queue(&job->work);
+ INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
+ schedule_work(&job->work);
}
static struct dma_fence *
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 3d6f8ee355bf..630cfa4ddd46 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -388,9 +388,9 @@ static irqreturn_t cdns_i2c_slave_isr(void *ptr)
*/
static irqreturn_t cdns_i2c_master_isr(void *ptr)
{
- unsigned int isr_status, avail_bytes, updatetx;
+ unsigned int isr_status, avail_bytes;
unsigned int bytes_to_send;
- bool hold_quirk;
+ bool updatetx;
struct cdns_i2c *id = ptr;
/* Signal completion only after everything is updated */
int done_flag = 0;
@@ -410,11 +410,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
* Check if transfer size register needs to be updated again for a
* large data receive operation.
*/
- updatetx = 0;
- if (id->recv_count > id->curr_recv_count)
- updatetx = 1;
-
- hold_quirk = (id->quirks & CDNS_I2C_BROKEN_HOLD_BIT) && updatetx;
+ updatetx = id->recv_count > id->curr_recv_count;
/* When receiving, handle data interrupt and completion interrupt */
if (id->p_recv_buf &&
@@ -445,7 +441,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
break;
}
- if (cdns_is_holdquirk(id, hold_quirk))
+ if (cdns_is_holdquirk(id, updatetx))
break;
}
@@ -456,7 +452,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
* maintain transfer size non-zero while performing a large
* receive operation.
*/
- if (cdns_is_holdquirk(id, hold_quirk)) {
+ if (cdns_is_holdquirk(id, updatetx)) {
/* wait while fifo is full */
while (cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET) !=
(id->curr_recv_count - CDNS_I2C_FIFO_DEPTH))
@@ -478,22 +474,6 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
CDNS_I2C_XFER_SIZE_OFFSET);
id->curr_recv_count = id->recv_count;
}
- } else if (id->recv_count && !hold_quirk &&
- !id->curr_recv_count) {
-
- /* Set the slave address in address register*/
- cdns_i2c_writereg(id->p_msg->addr & CDNS_I2C_ADDR_MASK,
- CDNS_I2C_ADDR_OFFSET);
-
- if (id->recv_count > CDNS_I2C_TRANSFER_SIZE) {
- cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE,
- CDNS_I2C_XFER_SIZE_OFFSET);
- id->curr_recv_count = CDNS_I2C_TRANSFER_SIZE;
- } else {
- cdns_i2c_writereg(id->recv_count,
- CDNS_I2C_XFER_SIZE_OFFSET);
- id->curr_recv_count = id->recv_count;
- }
}
/* Clear hold (if not repeated start) and signal completion */
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index e9e2db68b9fb..78fb1a4274a6 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -66,7 +66,7 @@
/* IMX I2C registers:
* the I2C register offset is different between SoCs,
- * to provid support for all these chips, split the
+ * to provide support for all these chips, split the
* register offset into a fixed base address and a
* variable shift value, then the full register offset
* will be calculated by
diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c
index 56aa424fd71d..815cc561386b 100644
--- a/drivers/i2c/busses/i2c-mlxcpld.c
+++ b/drivers/i2c/busses/i2c-mlxcpld.c
@@ -49,7 +49,7 @@
#define MLXCPLD_LPCI2C_NACK_IND 2
#define MLXCPLD_I2C_FREQ_1000KHZ_SET 0x04
-#define MLXCPLD_I2C_FREQ_400KHZ_SET 0x0c
+#define MLXCPLD_I2C_FREQ_400KHZ_SET 0x0e
#define MLXCPLD_I2C_FREQ_100KHZ_SET 0x42
enum mlxcpld_i2c_frequency {
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 20e53b167f81..c8539d0e12dd 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -7304,7 +7304,9 @@ static struct r5conf *setup_conf(struct mddev *mddev)
goto abort;
conf->mddev = mddev;
- if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
+ ret = -ENOMEM;
+ conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!conf->stripe_hashtbl)
goto abort;
/* We init hash_locks[0] separately to that it can be used
diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile
index 2e0aa74ac185..95ef971b5e1c 100644
--- a/drivers/misc/lkdtm/Makefile
+++ b/drivers/misc/lkdtm/Makefile
@@ -13,10 +13,13 @@ lkdtm-$(CONFIG_LKDTM) += cfi.o
lkdtm-$(CONFIG_LKDTM) += fortify.o
lkdtm-$(CONFIG_PPC_64S_HASH_MMU) += powerpc.o
-KASAN_SANITIZE_rodata.o := n
KASAN_SANITIZE_stackleak.o := n
-KCOV_INSTRUMENT_rodata.o := n
-CFLAGS_REMOVE_rodata.o += $(CC_FLAGS_LTO)
+
+KASAN_SANITIZE_rodata.o := n
+KCSAN_SANITIZE_rodata.o := n
+KCOV_INSTRUMENT_rodata.o := n
+OBJECT_FILES_NON_STANDARD_rodata.o := y
+CFLAGS_REMOVE_rodata.o += $(CC_FLAGS_LTO) $(RETHUNK_CFLAGS)
OBJCOPYFLAGS :=
OBJCOPYFLAGS_rodata_objcopy.o := \
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index 86e867ffbb10..033be559a730 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -1298,8 +1298,9 @@ static int sdhci_omap_probe(struct platform_device *pdev)
/*
* omap_device_pm_domain has callbacks to enable the main
* functional clock, interface clock and also configure the
- * SYSCONFIG register of omap devices. The callback will be invoked
- * as part of pm_runtime_get_sync.
+ * SYSCONFIG register to clear any boot loader set voltage
+ * capabilities before calling sdhci_setup_host(). The
+ * callback will be invoked as part of pm_runtime_get_sync.
*/
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, 50);
@@ -1441,7 +1442,8 @@ static int __maybe_unused sdhci_omap_runtime_suspend(struct device *dev)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
- sdhci_runtime_suspend_host(host);
+ if (omap_host->con != -EINVAL)
+ sdhci_runtime_suspend_host(host);
sdhci_omap_context_save(omap_host);
@@ -1458,10 +1460,10 @@ static int __maybe_unused sdhci_omap_runtime_resume(struct device *dev)
pinctrl_pm_select_default_state(dev);
- if (omap_host->con != -EINVAL)
+ if (omap_host->con != -EINVAL) {
sdhci_omap_context_restore(omap_host);
-
- sdhci_runtime_resume_host(host, 0);
+ sdhci_runtime_resume_host(host, 0);
+ }
return 0;
}
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index 889e40329956..93da23682d86 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -850,9 +850,10 @@ static int gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
unsigned int tRP_ps;
bool use_half_period;
int sample_delay_ps, sample_delay_factor;
- u16 busy_timeout_cycles;
+ unsigned int busy_timeout_cycles;
u8 wrn_dly_sel;
unsigned long clk_rate, min_rate;
+ u64 busy_timeout_ps;
if (sdr->tRC_min >= 30000) {
/* ONFI non-EDO modes [0-3] */
@@ -885,7 +886,8 @@ static int gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps);
data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps);
data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps);
- busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps);
+ busy_timeout_ps = max(sdr->tBERS_max, sdr->tPROG_max);
+ busy_timeout_cycles = TO_CYCLES(busy_timeout_ps, period_ps);
hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) |
BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) |
diff --git a/drivers/net/amt.c b/drivers/net/amt.c
index febfcf2d92af..9a247eb7679c 100644
--- a/drivers/net/amt.c
+++ b/drivers/net/amt.c
@@ -449,7 +449,7 @@ out:
dev_put(amt->dev);
}
-/* Non-existant group is created as INCLUDE {empty}:
+/* Non-existent group is created as INCLUDE {empty}:
*
* RFC 3376 - 5.1. Action on Change of Interface State
*
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 29ed0d3cd171..3a2d109a3792 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/errno.h>
+#include <linux/ethtool.h>
#include <linux/if_arp.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -1152,6 +1153,10 @@ static const struct net_device_ops at91_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops at91_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static ssize_t mb0_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1293,6 +1298,7 @@ static int at91_can_probe(struct platform_device *pdev)
}
dev->netdev_ops = &at91_netdev_ops;
+ dev->ethtool_ops = &at91_ethtool_ops;
dev->irq = irq;
dev->flags |= IFF_ECHO;
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index bd2f6dc01194..f23a03300a81 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -223,7 +223,7 @@ int c_can_power_up(struct net_device *dev);
int c_can_power_down(struct net_device *dev);
#endif
-void c_can_set_ethtool_ops(struct net_device *dev);
+extern const struct ethtool_ops c_can_ethtool_ops;
static inline u8 c_can_get_tx_head(const struct c_can_tx_ring *ring)
{
diff --git a/drivers/net/can/c_can/c_can_ethtool.c b/drivers/net/can/c_can/c_can_ethtool.c
index 8a826a6813bd..e41167eda673 100644
--- a/drivers/net/can/c_can/c_can_ethtool.c
+++ b/drivers/net/can/c_can/c_can_ethtool.c
@@ -24,11 +24,7 @@ static void c_can_get_ringparam(struct net_device *netdev,
ring->tx_pending = priv->msg_obj_tx_num;
}
-static const struct ethtool_ops c_can_ethtool_ops = {
+const struct ethtool_ops c_can_ethtool_ops = {
.get_ringparam = c_can_get_ringparam,
+ .get_ts_info = ethtool_op_get_ts_info,
};
-
-void c_can_set_ethtool_ops(struct net_device *netdev)
-{
- netdev->ethtool_ops = &c_can_ethtool_ops;
-}
diff --git a/drivers/net/can/c_can/c_can_main.c b/drivers/net/can/c_can/c_can_main.c
index de38d8f7b5f7..dc8132862f33 100644
--- a/drivers/net/can/c_can/c_can_main.c
+++ b/drivers/net/can/c_can/c_can_main.c
@@ -1364,7 +1364,7 @@ int register_c_can_dev(struct net_device *dev)
dev->flags |= IFF_ECHO; /* we support local echo */
dev->netdev_ops = &c_can_netdev_ops;
- c_can_set_ethtool_ops(dev);
+ dev->ethtool_ops = &c_can_ethtool_ops;
return register_candev(dev);
}
diff --git a/drivers/net/can/can327.c b/drivers/net/can/can327.c
index 5da7778d92dc..0aa1af31d0fe 100644
--- a/drivers/net/can/can327.c
+++ b/drivers/net/can/can327.c
@@ -10,7 +10,7 @@
* Fred N. van Kempen <waltje@uwalt.nl.mugnet.org>
*/
-#define pr_fmt(fmt) "can327: " fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/module.h>
@@ -827,7 +827,7 @@ static netdev_tx_t can327_netdev_start_xmit(struct sk_buff *skb,
netif_stop_queue(dev);
/* BHs are already disabled, so no spin_lock_bh().
- * See Documentation/networking/netdevices.txt
+ * See Documentation/networking/netdevices.rst
*/
spin_lock(&elm->lock);
can327_send_frame(elm, frame);
@@ -836,6 +836,8 @@ static netdev_tx_t can327_netdev_start_xmit(struct sk_buff *skb,
dev->stats.tx_packets++;
dev->stats.tx_bytes += frame->can_id & CAN_RTR_FLAG ? 0 : frame->len;
+ skb_tx_timestamp(skb);
+
out:
kfree_skb(skb);
return NETDEV_TX_OK;
@@ -848,6 +850,10 @@ static const struct net_device_ops can327_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops can327_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static bool can327_is_valid_rx_char(u8 c)
{
static const bool lut_char_is_valid['z'] = {
@@ -1032,6 +1038,7 @@ static int can327_ldisc_open(struct tty_struct *tty)
/* Configure netdev interface */
elm->dev = dev;
dev->netdev_ops = &can327_netdev_ops;
+ dev->ethtool_ops = &can327_ethtool_ops;
/* Mark ldisc channel as alive */
elm->tty = tty;
@@ -1100,7 +1107,7 @@ static int can327_ldisc_ioctl(struct tty_struct *tty, unsigned int cmd,
static struct tty_ldisc_ops can327_ldisc = {
.owner = THIS_MODULE,
- .name = "can327",
+ .name = KBUILD_MODNAME,
.num = N_CAN327,
.receive_buf = can327_ldisc_rx,
.write_wakeup = can327_ldisc_tx_wakeup,
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index 797a954bb1a0..0b9dfc76e769 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -17,6 +17,7 @@
#include <linux/ptrace.h>
#include <linux/string.h>
#include <linux/errno.h>
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
@@ -836,6 +837,10 @@ static const struct net_device_ops cc770_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops cc770_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
int register_cc770dev(struct net_device *dev)
{
struct cc770_priv *priv = netdev_priv(dev);
@@ -846,6 +851,7 @@ int register_cc770dev(struct net_device *dev)
return err;
dev->netdev_ops = &cc770_netdev_ops;
+ dev->ethtool_ops = &cc770_ethtool_ops;
dev->flags |= IFF_ECHO; /* we support local echo */
diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c
index 6b281f6eb9b4..3c18d028bd8c 100644
--- a/drivers/net/can/ctucanfd/ctucanfd_base.c
+++ b/drivers/net/can/ctucanfd/ctucanfd_base.c
@@ -19,6 +19,7 @@
#include <linux/clk.h>
#include <linux/errno.h>
+#include <linux/ethtool.h>
#include <linux/init.h>
#include <linux/bitfield.h>
#include <linux/interrupt.h>
@@ -1301,6 +1302,10 @@ static const struct net_device_ops ctucan_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops ctucan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
int ctucan_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
@@ -1377,6 +1382,7 @@ int ctucan_probe_common(struct device *dev, void __iomem *addr, int irq, unsigne
set_drvdata_fnc(dev, ndev);
SET_NETDEV_DEV(ndev, dev);
ndev->netdev_ops = &ctucan_netdev_ops;
+ ndev->ethtool_ops = &ctucan_ethtool_ops;
/* Getting the can_clk info */
if (!can_clk_rate) {
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index 523eaacfe29e..c1956b1e9faf 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -322,6 +322,56 @@ int can_change_mtu(struct net_device *dev, int new_mtu)
}
EXPORT_SYMBOL_GPL(can_change_mtu);
+/* generic implementation of netdev_ops::ndo_eth_ioctl for CAN devices
+ * supporting hardware timestamps
+ */
+int can_eth_ioctl_hwts(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct hwtstamp_config hwts_cfg = { 0 };
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP: /* set */
+ if (copy_from_user(&hwts_cfg, ifr->ifr_data, sizeof(hwts_cfg)))
+ return -EFAULT;
+ if (hwts_cfg.tx_type == HWTSTAMP_TX_ON &&
+ hwts_cfg.rx_filter == HWTSTAMP_FILTER_ALL)
+ return 0;
+ return -ERANGE;
+
+ case SIOCGHWTSTAMP: /* get */
+ hwts_cfg.tx_type = HWTSTAMP_TX_ON;
+ hwts_cfg.rx_filter = HWTSTAMP_FILTER_ALL;
+ if (copy_to_user(ifr->ifr_data, &hwts_cfg, sizeof(hwts_cfg)))
+ return -EFAULT;
+ return 0;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+EXPORT_SYMBOL(can_eth_ioctl_hwts);
+
+/* generic implementation of ethtool_ops::get_ts_info for CAN devices
+ * supporting hardware timestamps
+ */
+int can_ethtool_op_get_ts_info_hwts(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->phc_index = -1;
+ info->tx_types = BIT(HWTSTAMP_TX_ON);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+EXPORT_SYMBOL(can_ethtool_op_get_ts_info_hwts);
+
/* Common open function when the device gets opened.
*
* This function should be called in the open function of the device
diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c
index 8bb62dd864c8..07e0feac8629 100644
--- a/drivers/net/can/dev/skb.c
+++ b/drivers/net/can/dev/skb.c
@@ -72,6 +72,9 @@ int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
/* save frame_len to reuse it when transmission is completed */
can_skb_prv(skb)->frame_len = frame_len;
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
skb_tx_timestamp(skb);
/* save this skb for tx interrupt echo handling */
@@ -107,6 +110,9 @@ __can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr,
struct can_skb_priv *can_skb_priv = can_skb_prv(skb);
struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+ if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)
+ skb_tstamp_tx(skb, skb_hwtstamps(skb));
+
/* get the real payload length for netdev statistics */
if (cf->can_id & CAN_RTR_FLAG)
*len_ptr = 0;
diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c
index d060088047f1..f857968efed7 100644
--- a/drivers/net/can/flexcan/flexcan-core.c
+++ b/drivers/net/can/flexcan/flexcan-core.c
@@ -2113,7 +2113,7 @@ static int flexcan_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
dev->netdev_ops = &flexcan_netdev_ops;
- flexcan_set_ethtool_ops(dev);
+ dev->ethtool_ops = &flexcan_ethtool_ops;
dev->irq = irq;
dev->flags |= IFF_ECHO;
diff --git a/drivers/net/can/flexcan/flexcan-ethtool.c b/drivers/net/can/flexcan/flexcan-ethtool.c
index 3ae535577700..50e86b2da532 100644
--- a/drivers/net/can/flexcan/flexcan-ethtool.c
+++ b/drivers/net/can/flexcan/flexcan-ethtool.c
@@ -100,15 +100,11 @@ static int flexcan_get_sset_count(struct net_device *netdev, int sset)
}
}
-static const struct ethtool_ops flexcan_ethtool_ops = {
+const struct ethtool_ops flexcan_ethtool_ops = {
.get_ringparam = flexcan_get_ringparam,
.get_strings = flexcan_get_strings,
.get_priv_flags = flexcan_get_priv_flags,
.set_priv_flags = flexcan_set_priv_flags,
.get_sset_count = flexcan_get_sset_count,
+ .get_ts_info = ethtool_op_get_ts_info,
};
-
-void flexcan_set_ethtool_ops(struct net_device *netdev)
-{
- netdev->ethtool_ops = &flexcan_ethtool_ops;
-}
diff --git a/drivers/net/can/flexcan/flexcan.h b/drivers/net/can/flexcan/flexcan.h
index 23fc09a7e10f..8621a8ea1dea 100644
--- a/drivers/net/can/flexcan/flexcan.h
+++ b/drivers/net/can/flexcan/flexcan.h
@@ -114,7 +114,7 @@ struct flexcan_priv {
void (*write)(u32 val, void __iomem *addr);
};
-void flexcan_set_ethtool_ops(struct net_device *dev);
+extern const struct ethtool_ops flexcan_ethtool_ops;
static inline bool
flexcan_supports_rx_mailbox(const struct flexcan_priv *priv)
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index 24035a6187c9..6c37aab93eb3 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -27,6 +27,7 @@
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/delay.h>
+#include <linux/ethtool.h>
#include <linux/io.h>
#include <linux/can/dev.h>
#include <linux/spinlock.h>
@@ -1561,6 +1562,10 @@ static const struct net_device_ops grcan_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops grcan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static int grcan_setup_netdev(struct platform_device *ofdev,
void __iomem *base,
int irq, u32 ambafreq, bool txbug)
@@ -1577,6 +1582,7 @@ static int grcan_setup_netdev(struct platform_device *ofdev,
dev->irq = irq;
dev->flags |= IFF_ECHO;
dev->netdev_ops = &grcan_netdev_ops;
+ dev->ethtool_ops = &grcan_ethtool_ops;
dev->sysfs_groups[0] = &sysfs_grcan_group;
priv = netdev_priv(dev);
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 64e3be8b73af..ad7a89b95da7 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -13,6 +13,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/ethtool.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -925,6 +926,10 @@ static const struct net_device_ops ifi_canfd_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops ifi_canfd_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static int ifi_canfd_plat_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -962,6 +967,7 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
ndev->irq = irq;
ndev->flags |= IFF_ECHO; /* we support local echo */
ndev->netdev_ops = &ifi_canfd_netdev_ops;
+ ndev->ethtool_ops = &ifi_canfd_ethtool_ops;
priv = netdev_priv(ndev);
priv->ndev = ndev;
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index ccb5c5405224..71a2caae0757 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/ethtool.h>
#include <linux/platform_device.h>
#include <linux/netdevice.h>
@@ -1277,6 +1278,8 @@ static void ican3_put_echo_skb(struct ican3_dev *mod, struct sk_buff *skb)
if (!skb)
return;
+ skb_tx_timestamp(skb);
+
/* save this skb for tx interrupt echo handling */
skb_queue_tail(&mod->echoq, skb);
}
@@ -1752,6 +1755,10 @@ static const struct net_device_ops ican3_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops ican3_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
/*
* Low-level CAN Device
*/
@@ -1923,6 +1930,7 @@ static int ican3_probe(struct platform_device *pdev)
mod->free_page = DPM_FREE_START;
ndev->netdev_ops = &ican3_netdev_ops;
+ ndev->ethtool_ops = &ican3_ethtool_ops;
ndev->flags |= IFF_ECHO;
SET_NETDEV_DEV(ndev, &pdev->dev);
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index dcd2c9d50d5e..ed54c0b3c7d4 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/ethtool.h>
#include <linux/pci.h>
#include <linux/can/dev.h>
#include <linux/timer.h>
@@ -919,10 +920,15 @@ static void kvaser_pciefd_bec_poll_timer(struct timer_list *data)
static const struct net_device_ops kvaser_pciefd_netdev_ops = {
.ndo_open = kvaser_pciefd_open,
.ndo_stop = kvaser_pciefd_stop,
+ .ndo_eth_ioctl = can_eth_ioctl_hwts,
.ndo_start_xmit = kvaser_pciefd_start_xmit,
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops kvaser_pciefd_ethtool_ops = {
+ .get_ts_info = can_ethtool_op_get_ts_info_hwts,
+};
+
static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
{
int i;
@@ -939,6 +945,7 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
can = netdev_priv(netdev);
netdev->netdev_ops = &kvaser_pciefd_netdev_ops;
+ netdev->ethtool_ops = &kvaser_pciefd_ethtool_ops;
can->reg_base = pcie->reg_base + KVASER_PCIEFD_KCAN0_BASE +
i * KVASER_PCIEFD_KCAN_BASE_OFFSET;
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 713a4b0edf86..4709c012b1dc 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -9,6 +9,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/ethtool.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -1829,10 +1830,15 @@ static const struct net_device_ops m_can_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops m_can_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static int register_m_can_dev(struct net_device *dev)
{
dev->flags |= IFF_ECHO; /* we support local echo */
dev->netdev_ops = &m_can_netdev_ops;
+ dev->ethtool_ops = &m_can_ethtool_ops;
return register_candev(dev);
}
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 78a21ab63601..2119fbb287ef 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -616,6 +616,10 @@ static const struct net_device_ops mscan_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops mscan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
int register_mscandev(struct net_device *dev, int mscan_clksrc)
{
struct mscan_priv *priv = netdev_priv(dev);
@@ -676,6 +680,7 @@ struct net_device *alloc_mscandev(void)
priv = netdev_priv(dev);
dev->netdev_ops = &mscan_netdev_ops;
+ dev->ethtool_ops = &mscan_ethtool_ops;
dev->flags |= IFF_ECHO; /* we support local echo */
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 32804fed116c..0558ff67ec6a 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -6,6 +6,7 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/ethtool.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/sched.h>
@@ -938,6 +939,10 @@ static const struct net_device_ops pch_can_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops pch_can_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static void pch_can_remove(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
@@ -1188,6 +1193,7 @@ static int pch_can_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->netdev_ops = &pch_can_netdev_ops;
+ ndev->ethtool_ops = &pch_can_ethtool_ops;
priv->can.clock.freq = PCH_CAN_CLK; /* Hz */
netif_napi_add_weight(ndev, &priv->napi, pch_can_poll, PCH_RX_OBJ_END);
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index afb9adb3d5c2..f8420cc1d907 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -7,6 +7,7 @@
#include <linux/can.h>
#include <linux/can/dev.h>
+#include <linux/ethtool.h>
#include "peak_canfd_user.h"
@@ -742,13 +743,59 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+static int peak_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct hwtstamp_config hwts_cfg = { 0 };
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP: /* set */
+ if (copy_from_user(&hwts_cfg, ifr->ifr_data, sizeof(hwts_cfg)))
+ return -EFAULT;
+ if (hwts_cfg.tx_type == HWTSTAMP_TX_OFF &&
+ hwts_cfg.rx_filter == HWTSTAMP_FILTER_ALL)
+ return 0;
+ return -ERANGE;
+
+ case SIOCGHWTSTAMP: /* get */
+ hwts_cfg.tx_type = HWTSTAMP_TX_OFF;
+ hwts_cfg.rx_filter = HWTSTAMP_FILTER_ALL;
+ if (copy_to_user(ifr->ifr_data, &hwts_cfg, sizeof(hwts_cfg)))
+ return -EFAULT;
+ return 0;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops peak_canfd_netdev_ops = {
.ndo_open = peak_canfd_open,
.ndo_stop = peak_canfd_close,
+ .ndo_eth_ioctl = peak_eth_ioctl,
.ndo_start_xmit = peak_canfd_start_xmit,
.ndo_change_mtu = can_change_mtu,
};
+static int peak_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->phc_index = -1;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static const struct ethtool_ops peak_canfd_ethtool_ops = {
+ .get_ts_info = peak_get_ts_info,
+};
+
struct net_device *alloc_peak_canfd_dev(int sizeof_priv, int index,
int echo_skb_max)
{
@@ -789,6 +836,7 @@ struct net_device *alloc_peak_canfd_dev(int sizeof_priv, int index,
ndev->flags |= IFF_ECHO;
ndev->netdev_ops = &peak_canfd_netdev_ops;
+ ndev->ethtool_ops = &peak_canfd_ethtool_ops;
ndev->dev_id = index;
return ndev;
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index d11db2112a4a..6ee968c59ac9 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
#include <linux/can/dev.h>
@@ -630,6 +631,10 @@ static const struct net_device_ops rcar_can_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops rcar_can_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static void rcar_can_rx_pkt(struct rcar_can_priv *priv)
{
struct net_device_stats *stats = &priv->ndev->stats;
@@ -785,6 +790,7 @@ static int rcar_can_probe(struct platform_device *pdev)
}
ndev->netdev_ops = &rcar_can_netdev_ops;
+ ndev->ethtool_ops = &rcar_can_ethtool_ops;
ndev->irq = irq;
ndev->flags |= IFF_ECHO;
priv->ndev = ndev;
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index d3e569a02b4d..27085b796e75 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -27,6 +27,7 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
#include <linux/can/dev.h>
@@ -1695,6 +1696,10 @@ static const struct net_device_ops rcar_canfd_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops rcar_canfd_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
u32 fcan_freq)
{
@@ -1711,6 +1716,7 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
priv = netdev_priv(ndev);
ndev->netdev_ops = &rcar_canfd_netdev_ops;
+ ndev->ethtool_ops = &rcar_canfd_ethtool_ops;
ndev->flags |= IFF_ECHO;
priv->ndev = ndev;
priv->base = gpriv->base;
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 75a2f9bf8c16..98dfd5f295a7 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -52,6 +52,7 @@
#include <linux/ptrace.h>
#include <linux/string.h>
#include <linux/errno.h>
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
@@ -654,6 +655,10 @@ static const struct net_device_ops sja1000_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops sja1000_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
int register_sja1000dev(struct net_device *dev)
{
int ret;
@@ -663,6 +668,7 @@ int register_sja1000dev(struct net_device *dev)
dev->flags |= IFF_ECHO; /* we support local echo */
dev->netdev_ops = &sja1000_netdev_ops;
+ dev->ethtool_ops = &sja1000_ethtool_ops;
set_reset_mode(dev);
chipset_init(dev);
diff --git a/drivers/net/can/slcan/slcan-core.c b/drivers/net/can/slcan/slcan-core.c
index dc28e715bbe1..8d13fdf8c28a 100644
--- a/drivers/net/can/slcan/slcan-core.c
+++ b/drivers/net/can/slcan/slcan-core.c
@@ -1,11 +1,14 @@
/*
* slcan.c - serial line CAN interface driver (using tty line discipline)
*
- * This file is derived from linux/drivers/net/slip/slip.c
+ * This file is derived from linux/drivers/net/slip/slip.c and got
+ * inspiration from linux/drivers/net/can/can327.c for the rework made
+ * on the line discipline code.
*
* slip.c Authors : Laurence Culhane <loz@holmes.demon.co.uk>
* Fred N. van Kempen <waltje@uwalt.nl.mugnet.org>
* slcan.c Author : Oliver Hartkopp <socketcan@hartkopp.net>
+ * can327.c Author : Max Staudt <max-linux@enpas.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -35,8 +38,9 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/uaccess.h>
#include <linux/bitops.h>
@@ -46,10 +50,6 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
-#include <linux/if_arp.h>
-#include <linux/if_ether.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/workqueue.h>
@@ -63,30 +63,22 @@ MODULE_ALIAS_LDISC(N_SLCAN);
MODULE_DESCRIPTION("serial line CAN interface");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
-
-#define SLCAN_MAGIC 0x53CA
-
-static int maxdev = 10; /* MAX number of SLCAN channels;
- * This can be overridden with
- * insmod slcan.ko maxdev=nnn
- */
-module_param(maxdev, int, 0);
-MODULE_PARM_DESC(maxdev, "Maximum number of slcan interfaces");
+MODULE_AUTHOR("Dario Binacchi <dario.binacchi@amarulasolutions.com>");
/* maximum rx buffer len: extended CAN frame with timestamp */
-#define SLC_MTU (sizeof("T1111222281122334455667788EA5F\r") + 1)
-
-#define SLC_CMD_LEN 1
-#define SLC_SFF_ID_LEN 3
-#define SLC_EFF_ID_LEN 8
-#define SLC_STATE_LEN 1
-#define SLC_STATE_BE_RXCNT_LEN 3
-#define SLC_STATE_BE_TXCNT_LEN 3
-#define SLC_STATE_FRAME_LEN (1 + SLC_CMD_LEN + SLC_STATE_BE_RXCNT_LEN + \
- SLC_STATE_BE_TXCNT_LEN)
+#define SLCAN_MTU (sizeof("T1111222281122334455667788EA5F\r") + 1)
+
+#define SLCAN_CMD_LEN 1
+#define SLCAN_SFF_ID_LEN 3
+#define SLCAN_EFF_ID_LEN 8
+#define SLCAN_STATE_LEN 1
+#define SLCAN_STATE_BE_RXCNT_LEN 3
+#define SLCAN_STATE_BE_TXCNT_LEN 3
+#define SLCAN_STATE_FRAME_LEN (1 + SLCAN_CMD_LEN + \
+ SLCAN_STATE_BE_RXCNT_LEN + \
+ SLCAN_STATE_BE_TXCNT_LEN)
struct slcan {
struct can_priv can;
- int magic;
/* Various fields. */
struct tty_struct *tty; /* ptr to TTY structure */
@@ -95,24 +87,21 @@ struct slcan {
struct work_struct tx_work; /* Flushes transmit buffer */
/* These are pointers to the malloc()ed frame buffers. */
- unsigned char rbuff[SLC_MTU]; /* receiver buffer */
+ unsigned char rbuff[SLCAN_MTU]; /* receiver buffer */
int rcount; /* received chars counter */
- unsigned char xbuff[SLC_MTU]; /* transmitter buffer */
+ unsigned char xbuff[SLCAN_MTU]; /* transmitter buffer*/
unsigned char *xhead; /* pointer to next XMIT byte */
int xleft; /* bytes left in XMIT queue */
unsigned long flags; /* Flag values/ mode etc */
-#define SLF_INUSE 0 /* Channel in use */
-#define SLF_ERROR 1 /* Parity, etc. error */
-#define SLF_XCMD 2 /* Command transmission */
+#define SLF_ERROR 0 /* Parity, etc. error */
+#define SLF_XCMD 1 /* Command transmission */
unsigned long cmd_flags; /* Command flags */
#define CF_ERR_RST 0 /* Reset errors on open */
wait_queue_head_t xcmd_wait; /* Wait queue for commands */
/* transmission */
};
-static struct net_device **slcan_devs;
-
static const u32 slcan_bitrate_const[] = {
10000, 20000, 50000, 100000, 125000,
250000, 500000, 800000, 1000000
@@ -179,7 +168,7 @@ int slcan_enable_err_rst_on_open(struct net_device *ndev, bool on)
*************************************************************************/
/* Send one completely decapsulated can_frame to the network layer */
-static void slc_bump_frame(struct slcan *sl)
+static void slcan_bump_frame(struct slcan *sl)
{
struct sk_buff *skb;
struct can_frame *cf;
@@ -199,10 +188,10 @@ static void slc_bump_frame(struct slcan *sl)
fallthrough;
case 't':
/* store dlc ASCII value and terminate SFF CAN ID string */
- cf->len = sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN];
- sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN] = 0;
+ cf->len = sl->rbuff[SLCAN_CMD_LEN + SLCAN_SFF_ID_LEN];
+ sl->rbuff[SLCAN_CMD_LEN + SLCAN_SFF_ID_LEN] = 0;
/* point to payload data behind the dlc */
- cmd += SLC_CMD_LEN + SLC_SFF_ID_LEN + 1;
+ cmd += SLCAN_CMD_LEN + SLCAN_SFF_ID_LEN + 1;
break;
case 'R':
cf->can_id = CAN_RTR_FLAG;
@@ -210,16 +199,16 @@ static void slc_bump_frame(struct slcan *sl)
case 'T':
cf->can_id |= CAN_EFF_FLAG;
/* store dlc ASCII value and terminate EFF CAN ID string */
- cf->len = sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN];
- sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN] = 0;
+ cf->len = sl->rbuff[SLCAN_CMD_LEN + SLCAN_EFF_ID_LEN];
+ sl->rbuff[SLCAN_CMD_LEN + SLCAN_EFF_ID_LEN] = 0;
/* point to payload data behind the dlc */
- cmd += SLC_CMD_LEN + SLC_EFF_ID_LEN + 1;
+ cmd += SLCAN_CMD_LEN + SLCAN_EFF_ID_LEN + 1;
break;
default:
goto decode_failed;
}
- if (kstrtou32(sl->rbuff + SLC_CMD_LEN, 16, &tmpid))
+ if (kstrtou32(sl->rbuff + SLCAN_CMD_LEN, 16, &tmpid))
goto decode_failed;
cf->can_id |= tmpid;
@@ -266,7 +255,7 @@ decode_failed:
* sb256256 : state bus-off: rx counter 256, tx counter 256
* sa057033 : state active, rx counter 57, tx counter 33
*/
-static void slc_bump_state(struct slcan *sl)
+static void slcan_bump_state(struct slcan *sl)
{
struct net_device *dev = sl->dev;
struct sk_buff *skb;
@@ -292,16 +281,16 @@ static void slc_bump_state(struct slcan *sl)
return;
}
- if (state == sl->can.state || sl->rcount < SLC_STATE_FRAME_LEN)
+ if (state == sl->can.state || sl->rcount < SLCAN_STATE_FRAME_LEN)
return;
- cmd += SLC_STATE_BE_RXCNT_LEN + SLC_CMD_LEN + 1;
- cmd[SLC_STATE_BE_TXCNT_LEN] = 0;
+ cmd += SLCAN_STATE_BE_RXCNT_LEN + SLCAN_CMD_LEN + 1;
+ cmd[SLCAN_STATE_BE_TXCNT_LEN] = 0;
if (kstrtou32(cmd, 10, &txerr))
return;
*cmd = 0;
- cmd -= SLC_STATE_BE_RXCNT_LEN;
+ cmd -= SLCAN_STATE_BE_RXCNT_LEN;
if (kstrtou32(cmd, 10, &rxerr))
return;
@@ -330,7 +319,7 @@ static void slc_bump_state(struct slcan *sl)
* e1a : len 1, errors: ACK error
* e3bcO: len 3, errors: Bit0 error, CRC error, Tx overrun error
*/
-static void slc_bump_err(struct slcan *sl)
+static void slcan_bump_err(struct slcan *sl)
{
struct net_device *dev = sl->dev;
struct sk_buff *skb;
@@ -346,7 +335,7 @@ static void slc_bump_err(struct slcan *sl)
else
return;
- if ((len + SLC_CMD_LEN + 1) > sl->rcount)
+ if ((len + SLCAN_CMD_LEN + 1) > sl->rcount)
return;
skb = alloc_can_err_skb(dev, &cf);
@@ -354,7 +343,7 @@ static void slc_bump_err(struct slcan *sl)
if (skb)
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
- cmd += SLC_CMD_LEN + 1;
+ cmd += SLCAN_CMD_LEN + 1;
for (i = 0; i < len; i++, cmd++) {
switch (*cmd) {
case 'a':
@@ -443,7 +432,7 @@ static void slc_bump_err(struct slcan *sl)
netif_rx(skb);
}
-static void slc_bump(struct slcan *sl)
+static void slcan_bump(struct slcan *sl)
{
switch (sl->rbuff[0]) {
case 'r':
@@ -453,11 +442,11 @@ static void slc_bump(struct slcan *sl)
case 'R':
fallthrough;
case 'T':
- return slc_bump_frame(sl);
+ return slcan_bump_frame(sl);
case 'e':
- return slc_bump_err(sl);
+ return slcan_bump_err(sl);
case 's':
- return slc_bump_state(sl);
+ return slcan_bump_state(sl);
default:
return;
}
@@ -469,12 +458,12 @@ static void slcan_unesc(struct slcan *sl, unsigned char s)
if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */
if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
sl->rcount > 4)
- slc_bump(sl);
+ slcan_bump(sl);
sl->rcount = 0;
} else {
if (!test_bit(SLF_ERROR, &sl->flags)) {
- if (sl->rcount < SLC_MTU) {
+ if (sl->rcount < SLCAN_MTU) {
sl->rbuff[sl->rcount++] = s;
return;
}
@@ -490,7 +479,7 @@ static void slcan_unesc(struct slcan *sl, unsigned char s)
*************************************************************************/
/* Encapsulate one can_frame and stuff into a TTY queue. */
-static void slc_encaps(struct slcan *sl, struct can_frame *cf)
+static void slcan_encaps(struct slcan *sl, struct can_frame *cf)
{
int actual, i;
unsigned char *pos;
@@ -507,11 +496,11 @@ static void slc_encaps(struct slcan *sl, struct can_frame *cf)
/* determine number of chars for the CAN-identifier */
if (cf->can_id & CAN_EFF_FLAG) {
id &= CAN_EFF_MASK;
- endpos = pos + SLC_EFF_ID_LEN;
+ endpos = pos + SLCAN_EFF_ID_LEN;
} else {
*pos |= 0x20; /* convert R/T to lower case for SFF */
id &= CAN_SFF_MASK;
- endpos = pos + SLC_SFF_ID_LEN;
+ endpos = pos + SLCAN_SFF_ID_LEN;
}
/* build 3 (SFF) or 8 (EFF) digit CAN identifier */
@@ -521,7 +510,8 @@ static void slc_encaps(struct slcan *sl, struct can_frame *cf)
id >>= 4;
}
- pos += (cf->can_id & CAN_EFF_FLAG) ? SLC_EFF_ID_LEN : SLC_SFF_ID_LEN;
+ pos += (cf->can_id & CAN_EFF_FLAG) ?
+ SLCAN_EFF_ID_LEN : SLCAN_SFF_ID_LEN;
*pos++ = cf->len + '0';
@@ -557,9 +547,8 @@ static void slcan_transmit(struct work_struct *work)
spin_lock_bh(&sl->lock);
/* First make sure we're connected. */
- if (!sl->tty || sl->magic != SLCAN_MAGIC ||
- (unlikely(!netif_running(sl->dev)) &&
- likely(!test_bit(SLF_XCMD, &sl->flags)))) {
+ if (unlikely(!netif_running(sl->dev)) &&
+ likely(!test_bit(SLF_XCMD, &sl->flags))) {
spin_unlock_bh(&sl->lock);
return;
}
@@ -594,17 +583,14 @@ static void slcan_transmit(struct work_struct *work)
*/
static void slcan_write_wakeup(struct tty_struct *tty)
{
- struct slcan *sl;
+ struct slcan *sl = (struct slcan *)tty->disc_data;
- rcu_read_lock();
- sl = rcu_dereference(tty->disc_data);
- if (sl)
- schedule_work(&sl->tx_work);
- rcu_read_unlock();
+ schedule_work(&sl->tx_work);
}
/* Send a can_frame to a TTY queue. */
-static netdev_tx_t slc_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t slcan_netdev_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
struct slcan *sl = netdev_priv(dev);
@@ -623,9 +609,11 @@ static netdev_tx_t slc_xmit(struct sk_buff *skb, struct net_device *dev)
}
netif_stop_queue(sl->dev);
- slc_encaps(sl, (struct can_frame *)skb->data); /* encaps & send */
+ slcan_encaps(sl, (struct can_frame *)skb->data); /* encaps & send */
spin_unlock(&sl->lock);
+ skb_tx_timestamp(skb);
+
out:
kfree_skb(skb);
return NETDEV_TX_OK;
@@ -666,30 +654,26 @@ static int slcan_transmit_cmd(struct slcan *sl, const unsigned char *cmd)
}
/* Netdevice UP -> DOWN routine */
-static int slc_close(struct net_device *dev)
+static int slcan_netdev_close(struct net_device *dev)
{
struct slcan *sl = netdev_priv(dev);
int err;
- spin_lock_bh(&sl->lock);
- if (sl->tty) {
- if (sl->can.bittiming.bitrate &&
- sl->can.bittiming.bitrate != CAN_BITRATE_UNKNOWN) {
- spin_unlock_bh(&sl->lock);
- err = slcan_transmit_cmd(sl, "C\r");
- spin_lock_bh(&sl->lock);
- if (err)
- netdev_warn(dev,
- "failed to send close command 'C\\r'\n");
- }
-
- /* TTY discipline is running. */
- clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+ if (sl->can.bittiming.bitrate &&
+ sl->can.bittiming.bitrate != CAN_BITRATE_UNKNOWN) {
+ err = slcan_transmit_cmd(sl, "C\r");
+ if (err)
+ netdev_warn(dev,
+ "failed to send close command 'C\\r'\n");
}
+
+ /* TTY discipline is running. */
+ clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+ flush_work(&sl->tx_work);
+
netif_stop_queue(dev);
sl->rcount = 0;
sl->xleft = 0;
- spin_unlock_bh(&sl->lock);
close_candev(dev);
sl->can.state = CAN_STATE_STOPPED;
if (sl->can.bittiming.bitrate == CAN_BITRATE_UNKNOWN)
@@ -699,15 +683,12 @@ static int slc_close(struct net_device *dev)
}
/* Netdevice DOWN -> UP routine */
-static int slc_open(struct net_device *dev)
+static int slcan_netdev_open(struct net_device *dev)
{
struct slcan *sl = netdev_priv(dev);
- unsigned char cmd[SLC_MTU];
+ unsigned char cmd[SLCAN_MTU];
int err, s;
- if (!sl->tty)
- return -ENODEV;
-
/* The baud rate is not set with the command
* `ip link set <iface> type can bitrate <baud>' and therefore
* can.bittiming.bitrate is CAN_BITRATE_UNSET (0), causing
@@ -722,8 +703,6 @@ static int slc_open(struct net_device *dev)
return err;
}
- sl->flags &= BIT(SLF_INUSE);
-
if (sl->can.bittiming.bitrate != CAN_BITRATE_UNKNOWN) {
for (s = 0; s < ARRAY_SIZE(slcan_bitrate_const); s++) {
if (sl->can.bittiming.bitrate == slcan_bitrate_const[s])
@@ -751,10 +730,20 @@ static int slc_open(struct net_device *dev)
}
}
- err = slcan_transmit_cmd(sl, "O\r");
- if (err) {
- netdev_err(dev, "failed to send open command 'O\\r'\n");
- goto cmd_transmit_failed;
+ if (sl->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
+ err = slcan_transmit_cmd(sl, "L\r");
+ if (err) {
+ netdev_err(dev,
+ "failed to send listen-only command 'L\\r'\n");
+ goto cmd_transmit_failed;
+ }
+ } else {
+ err = slcan_transmit_cmd(sl, "O\r");
+ if (err) {
+ netdev_err(dev,
+ "failed to send open command 'O\\r'\n");
+ goto cmd_transmit_failed;
+ }
}
}
@@ -767,24 +756,11 @@ cmd_transmit_failed:
return err;
}
-static void slc_dealloc(struct slcan *sl)
-{
- int i = sl->dev->base_addr;
-
- free_candev(sl->dev);
- slcan_devs[i] = NULL;
-}
-
-static int slcan_change_mtu(struct net_device *dev, int new_mtu)
-{
- return -EINVAL;
-}
-
-static const struct net_device_ops slc_netdev_ops = {
- .ndo_open = slc_open,
- .ndo_stop = slc_close,
- .ndo_start_xmit = slc_xmit,
- .ndo_change_mtu = slcan_change_mtu,
+static const struct net_device_ops slcan_netdev_ops = {
+ .ndo_open = slcan_netdev_open,
+ .ndo_stop = slcan_netdev_close,
+ .ndo_start_xmit = slcan_netdev_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
/******************************************
@@ -804,7 +780,7 @@ static void slcan_receive_buf(struct tty_struct *tty,
{
struct slcan *sl = (struct slcan *)tty->disc_data;
- if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
+ if (!netif_running(sl->dev))
return;
/* Read the characters out of the buffer */
@@ -819,80 +795,15 @@ static void slcan_receive_buf(struct tty_struct *tty,
}
}
-/************************************
- * slcan_open helper routines.
- ************************************/
-
-/* Collect hanged up channels */
-static void slc_sync(void)
-{
- int i;
- struct net_device *dev;
- struct slcan *sl;
-
- for (i = 0; i < maxdev; i++) {
- dev = slcan_devs[i];
- if (!dev)
- break;
-
- sl = netdev_priv(dev);
- if (sl->tty)
- continue;
- if (dev->flags & IFF_UP)
- dev_close(dev);
- }
-}
-
-/* Find a free SLCAN channel, and link in this `tty' line. */
-static struct slcan *slc_alloc(void)
-{
- int i;
- struct net_device *dev = NULL;
- struct slcan *sl;
-
- for (i = 0; i < maxdev; i++) {
- dev = slcan_devs[i];
- if (!dev)
- break;
- }
-
- /* Sorry, too many, all slots in use */
- if (i >= maxdev)
- return NULL;
-
- dev = alloc_candev(sizeof(*sl), 1);
- if (!dev)
- return NULL;
-
- snprintf(dev->name, sizeof(dev->name), "slcan%d", i);
- dev->netdev_ops = &slc_netdev_ops;
- dev->base_addr = i;
- slcan_set_ethtool_ops(dev);
- sl = netdev_priv(dev);
-
- /* Initialize channel control data */
- sl->magic = SLCAN_MAGIC;
- sl->dev = dev;
- sl->can.bitrate_const = slcan_bitrate_const;
- sl->can.bitrate_const_cnt = ARRAY_SIZE(slcan_bitrate_const);
- spin_lock_init(&sl->lock);
- INIT_WORK(&sl->tx_work, slcan_transmit);
- init_waitqueue_head(&sl->xcmd_wait);
- slcan_devs[i] = dev;
-
- return sl;
-}
-
/* Open the high-level part of the SLCAN channel.
* This function is called by the TTY module when the
- * SLCAN line discipline is called for. Because we are
- * sure the tty line exists, we only have to link it to
- * a free SLCAN channel...
+ * SLCAN line discipline is called for.
*
* Called in process context serialized from other ldisc calls.
*/
static int slcan_open(struct tty_struct *tty)
{
+ struct net_device *dev;
struct slcan *sl;
int err;
@@ -902,72 +813,50 @@ static int slcan_open(struct tty_struct *tty)
if (!tty->ops->write)
return -EOPNOTSUPP;
- /* RTnetlink lock is misused here to serialize concurrent
- * opens of slcan channels. There are better ways, but it is
- * the simplest one.
- */
- rtnl_lock();
+ dev = alloc_candev(sizeof(*sl), 1);
+ if (!dev)
+ return -ENFILE;
- /* Collect hanged up channels. */
- slc_sync();
+ sl = netdev_priv(dev);
- sl = tty->disc_data;
+ /* Configure TTY interface */
+ tty->receive_room = 65536; /* We don't flow control */
+ sl->rcount = 0;
+ sl->xleft = 0;
+ spin_lock_init(&sl->lock);
+ INIT_WORK(&sl->tx_work, slcan_transmit);
+ init_waitqueue_head(&sl->xcmd_wait);
- err = -EEXIST;
- /* First make sure we're not already connected. */
- if (sl && sl->magic == SLCAN_MAGIC)
- goto err_exit;
+ /* Configure CAN metadata */
+ sl->can.bitrate_const = slcan_bitrate_const;
+ sl->can.bitrate_const_cnt = ARRAY_SIZE(slcan_bitrate_const);
+ sl->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY;
- /* OK. Find a free SLCAN channel to use. */
- err = -ENFILE;
- sl = slc_alloc();
- if (!sl)
- goto err_exit;
+ /* Configure netdev interface */
+ sl->dev = dev;
+ dev->netdev_ops = &slcan_netdev_ops;
+ dev->ethtool_ops = &slcan_ethtool_ops;
+ /* Mark ldisc channel as alive */
sl->tty = tty;
tty->disc_data = sl;
- if (!test_bit(SLF_INUSE, &sl->flags)) {
- /* Perform the low-level SLCAN initialization. */
- sl->rcount = 0;
- sl->xleft = 0;
-
- set_bit(SLF_INUSE, &sl->flags);
-
- rtnl_unlock();
- err = register_candev(sl->dev);
- if (err) {
- pr_err("slcan: can't register candev\n");
- goto err_free_chan;
- }
- } else {
- rtnl_unlock();
+ err = register_candev(dev);
+ if (err) {
+ free_candev(dev);
+ pr_err("can't register candev\n");
+ return err;
}
- tty->receive_room = 65536; /* We don't flow control */
-
+ netdev_info(dev, "slcan on %s.\n", tty->name);
/* TTY layer expects 0 on success */
return 0;
-
-err_free_chan:
- rtnl_lock();
- sl->tty = NULL;
- tty->disc_data = NULL;
- clear_bit(SLF_INUSE, &sl->flags);
- slc_dealloc(sl);
- rtnl_unlock();
- return err;
-
-err_exit:
- rtnl_unlock();
-
- /* Count references from TTY module */
- return err;
}
/* Close down a SLCAN channel.
* This means flushing out any pending queues, and then returning. This
* call is serialized against other ldisc functions.
+ * Once this is called, no other ldisc function of ours is entered.
*
* We also use this method for a hangup event.
*/
@@ -975,28 +864,20 @@ static void slcan_close(struct tty_struct *tty)
{
struct slcan *sl = (struct slcan *)tty->disc_data;
- /* First make sure we're connected. */
- if (!sl || sl->magic != SLCAN_MAGIC || sl->tty != tty)
- return;
+ /* unregister_netdev() calls .ndo_stop() so we don't have to.
+ * Our .ndo_stop() also flushes the TTY write wakeup handler,
+ * so we can safely set sl->tty = NULL after this.
+ */
+ unregister_candev(sl->dev);
+ /* Mark channel as dead */
spin_lock_bh(&sl->lock);
- rcu_assign_pointer(tty->disc_data, NULL);
+ tty->disc_data = NULL;
sl->tty = NULL;
spin_unlock_bh(&sl->lock);
- synchronize_rcu();
- flush_work(&sl->tx_work);
-
- slc_close(sl->dev);
- unregister_candev(sl->dev);
- rtnl_lock();
- slc_dealloc(sl);
- rtnl_unlock();
-}
-
-static void slcan_hangup(struct tty_struct *tty)
-{
- slcan_close(tty);
+ netdev_info(sl->dev, "slcan off %s.\n", tty->name);
+ free_candev(sl->dev);
}
/* Perform I/O control on an active SLCAN channel. */
@@ -1006,10 +887,6 @@ static int slcan_ioctl(struct tty_struct *tty, unsigned int cmd,
struct slcan *sl = (struct slcan *)tty->disc_data;
unsigned int tmp;
- /* First make sure we're connected. */
- if (!sl || sl->magic != SLCAN_MAGIC)
- return -EINVAL;
-
switch (cmd) {
case SIOCGIFNAME:
tmp = strlen(sl->dev->name) + 1;
@@ -1025,13 +902,12 @@ static int slcan_ioctl(struct tty_struct *tty, unsigned int cmd,
}
}
-static struct tty_ldisc_ops slc_ldisc = {
+static struct tty_ldisc_ops slcan_ldisc = {
.owner = THIS_MODULE,
.num = N_SLCAN,
- .name = "slcan",
+ .name = KBUILD_MODNAME,
.open = slcan_open,
.close = slcan_close,
- .hangup = slcan_hangup,
.ioctl = slcan_ioctl,
.receive_buf = slcan_receive_buf,
.write_wakeup = slcan_write_wakeup,
@@ -1041,79 +917,22 @@ static int __init slcan_init(void)
{
int status;
- if (maxdev < 4)
- maxdev = 4; /* Sanity */
-
- pr_info("slcan: serial line CAN interface driver\n");
- pr_info("slcan: %d dynamic interface channels.\n", maxdev);
-
- slcan_devs = kcalloc(maxdev, sizeof(struct net_device *), GFP_KERNEL);
- if (!slcan_devs)
- return -ENOMEM;
+ pr_info("serial line CAN interface driver\n");
/* Fill in our line protocol discipline, and register it */
- status = tty_register_ldisc(&slc_ldisc);
- if (status) {
- pr_err("slcan: can't register line discipline\n");
- kfree(slcan_devs);
- }
+ status = tty_register_ldisc(&slcan_ldisc);
+ if (status)
+ pr_err("can't register line discipline\n");
+
return status;
}
static void __exit slcan_exit(void)
{
- int i;
- struct net_device *dev;
- struct slcan *sl;
- unsigned long timeout = jiffies + HZ;
- int busy = 0;
-
- if (!slcan_devs)
- return;
-
- /* First of all: check for active disciplines and hangup them.
- */
- do {
- if (busy)
- msleep_interruptible(100);
-
- busy = 0;
- for (i = 0; i < maxdev; i++) {
- dev = slcan_devs[i];
- if (!dev)
- continue;
- sl = netdev_priv(dev);
- spin_lock_bh(&sl->lock);
- if (sl->tty) {
- busy++;
- tty_hangup(sl->tty);
- }
- spin_unlock_bh(&sl->lock);
- }
- } while (busy && time_before(jiffies, timeout));
-
- /* FIXME: hangup is async so we should wait when doing this second
- * phase
+ /* This will only be called when all channels have been closed by
+ * userspace - tty_ldisc.c takes care of the module's refcount.
*/
-
- for (i = 0; i < maxdev; i++) {
- dev = slcan_devs[i];
- if (!dev)
- continue;
-
- sl = netdev_priv(dev);
- if (sl->tty)
- netdev_err(dev, "tty discipline still running\n");
-
- slc_close(dev);
- unregister_candev(dev);
- slc_dealloc(sl);
- }
-
- kfree(slcan_devs);
- slcan_devs = NULL;
-
- tty_unregister_ldisc(&slc_ldisc);
+ tty_unregister_ldisc(&slcan_ldisc);
}
module_init(slcan_init);
diff --git a/drivers/net/can/slcan/slcan-ethtool.c b/drivers/net/can/slcan/slcan-ethtool.c
index bf0afdc4e49d..f598c653fbfa 100644
--- a/drivers/net/can/slcan/slcan-ethtool.c
+++ b/drivers/net/can/slcan/slcan-ethtool.c
@@ -52,14 +52,10 @@ static int slcan_get_sset_count(struct net_device *netdev, int sset)
}
}
-static const struct ethtool_ops slcan_ethtool_ops = {
+const struct ethtool_ops slcan_ethtool_ops = {
.get_strings = slcan_get_strings,
.get_priv_flags = slcan_get_priv_flags,
.set_priv_flags = slcan_set_priv_flags,
.get_sset_count = slcan_get_sset_count,
+ .get_ts_info = ethtool_op_get_ts_info,
};
-
-void slcan_set_ethtool_ops(struct net_device *netdev)
-{
- netdev->ethtool_ops = &slcan_ethtool_ops;
-}
diff --git a/drivers/net/can/slcan/slcan.h b/drivers/net/can/slcan/slcan.h
index d463c8d99e22..85cedf856db3 100644
--- a/drivers/net/can/slcan/slcan.h
+++ b/drivers/net/can/slcan/slcan.h
@@ -13,6 +13,7 @@
bool slcan_err_rst_on_open(struct net_device *ndev);
int slcan_enable_err_rst_on_open(struct net_device *ndev, bool on);
-void slcan_set_ethtool_ops(struct net_device *ndev);
+
+extern const struct ethtool_ops slcan_ethtool_ops;
#endif /* _SLCAN_H */
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 8d27ac66ca7f..a5ef57f415f7 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -5,6 +5,7 @@
* - Kurt Van Dijck, EIA Electronics
*/
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <asm/io.h>
@@ -611,8 +612,12 @@ static const struct net_device_ops softing_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops softing_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct can_bittiming_const softing_btr_const = {
- .name = "softing",
+ .name = KBUILD_MODNAME,
.tseg1_min = 1,
.tseg1_max = 16,
.tseg2_min = 1,
@@ -649,6 +654,7 @@ static struct net_device *softing_netdev_create(struct softing *card,
netdev->flags |= IFF_ECHO;
netdev->netdev_ops = &softing_netdev_ops;
+ netdev->ethtool_ops = &softing_ethtool_ops;
priv->can.do_set_mode = softing_candev_set_mode;
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
@@ -846,7 +852,7 @@ platform_resource_failed:
static struct platform_driver softing_driver = {
.driver = {
- .name = "softing",
+ .name = KBUILD_MODNAME,
},
.probe = softing_pdev_probe,
.remove = softing_pdev_remove,
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index 167114aae6dd..b87dc420428d 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -20,6 +20,7 @@
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/ethtool.h>
#include <linux/freezer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -802,6 +803,10 @@ static const struct net_device_ops hi3110_netdev_ops = {
.ndo_start_xmit = hi3110_hard_start_xmit,
};
+static const struct ethtool_ops hi3110_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct of_device_id hi3110_of_match[] = {
{
.compatible = "holt,hi3110",
@@ -856,6 +861,7 @@ static int hi3110_can_probe(struct spi_device *spi)
goto out_free;
net->netdev_ops = &hi3110_netdev_ops;
+ net->ethtool_ops = &hi3110_ethtool_ops;
net->flags |= IFF_ECHO;
priv = netdev_priv(net);
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 666a4505a55a..e750d13c8841 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -26,6 +26,7 @@
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/ethtool.h>
#include <linux/freezer.h>
#include <linux/gpio.h>
#include <linux/gpio/driver.h>
@@ -1248,6 +1249,10 @@ static const struct net_device_ops mcp251x_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops mcp251x_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct of_device_id mcp251x_of_match[] = {
{
.compatible = "microchip,mcp2510",
@@ -1313,6 +1318,7 @@ static int mcp251x_can_probe(struct spi_device *spi)
goto out_free;
net->netdev_ops = &mcp251x_netdev_ops;
+ net->ethtool_ops = &mcp251x_ethtool_ops;
net->flags |= IFF_ECHO;
priv = netdev_priv(net);
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index 7fc86ed405c6..68df6d4641b5 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -1671,6 +1671,7 @@ static const struct net_device_ops mcp251xfd_netdev_ops = {
.ndo_open = mcp251xfd_open,
.ndo_stop = mcp251xfd_stop,
.ndo_start_xmit = mcp251xfd_start_xmit,
+ .ndo_eth_ioctl = can_eth_ioctl_hwts,
.ndo_change_mtu = can_change_mtu,
};
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
index c991b30bc9f0..004eaf96262b 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
@@ -253,7 +253,7 @@ void mcp251xfd_dump(const struct mcp251xfd_priv *priv)
file_size += mcp251xfd_dump_reg_space[i].size / sizeof(u32) *
sizeof(struct mcp251xfd_dump_object_reg);
- /* TEF ring, RX ring, TX rings */
+ /* TEF ring, RX rings, TX ring */
rings_num = 1 + priv->rx_ring_num + 1;
obj_num += rings_num;
file_size += rings_num * __MCP251XFD_DUMP_OBJECT_RING_KEY_MAX *
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c
index 6c7a57f16cc6..3585f02575df 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c
@@ -124,6 +124,7 @@ static const struct ethtool_ops mcp251xfd_ethtool_ops = {
.set_ringparam = mcp251xfd_ring_set_ringparam,
.get_coalesce = mcp251xfd_ring_get_coalesce,
.set_coalesce = mcp251xfd_ring_set_coalesce,
+ .get_ts_info = can_ethtool_op_get_ts_info_hwts,
};
void mcp251xfd_ethtool_init(struct mcp251xfd_priv *priv)
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index b90dfb429ccd..525309da1320 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -53,6 +53,7 @@
#include <linux/can/error.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/ethtool.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -761,6 +762,10 @@ static const struct net_device_ops sun4ican_netdev_ops = {
.ndo_start_xmit = sun4ican_start_xmit,
};
+static const struct ethtool_ops sun4ican_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct sun4ican_quirks sun4ican_quirks_a10 = {
.has_reset = false,
};
@@ -851,6 +856,7 @@ static int sun4ican_probe(struct platform_device *pdev)
}
dev->netdev_ops = &sun4ican_netdev_ops;
+ dev->ethtool_ops = &sun4ican_ethtool_ops;
dev->irq = irq;
dev->flags |= IFF_ECHO;
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index afa38771520e..ec0ffeeb2015 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -23,6 +23,7 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/platform_device.h>
@@ -841,6 +842,10 @@ static const struct net_device_ops ti_hecc_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops ti_hecc_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct of_device_id ti_hecc_dt_ids[] = {
{
.compatible = "ti,am3517-hecc",
@@ -918,6 +923,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->netdev_ops = &ti_hecc_netdev_ops;
+ ndev->ethtool_ops = &ti_hecc_ethtool_ops;
priv->clk = clk_get(&pdev->dev, "hecc_ck");
if (IS_ERR(priv->clk)) {
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index bbec3311d893..d1e1a459c045 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -4,6 +4,7 @@
*
* Copyright (C) 2004-2009 EMS Dr. Thomas Wuensche
*/
+#include <linux/ethtool.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -879,8 +880,12 @@ static const struct net_device_ops ems_usb_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops ems_usb_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct can_bittiming_const ems_usb_bittiming_const = {
- .name = "ems_usb",
+ .name = KBUILD_MODNAME,
.tseg1_min = 1,
.tseg1_max = 16,
.tseg2_min = 1,
@@ -990,6 +995,7 @@ static int ems_usb_probe(struct usb_interface *intf,
dev->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
netdev->netdev_ops = &ems_usb_netdev_ops;
+ netdev->ethtool_ops = &ems_usb_ethtool_ops;
netdev->flags |= IFF_ECHO; /* we support local echo */
@@ -1074,7 +1080,7 @@ static void ems_usb_disconnect(struct usb_interface *intf)
/* usb specific object needed to register this driver with the usb subsystem */
static struct usb_driver ems_usb_driver = {
- .name = "ems_usb",
+ .name = KBUILD_MODNAME,
.probe = ems_usb_probe,
.disconnect = ems_usb_disconnect,
.id_table = ems_usb_table,
diff --git a/drivers/net/can/usb/esd_usb.c b/drivers/net/can/usb/esd_usb.c
index 177ed33e08d9..1bcfad11b1e4 100644
--- a/drivers/net/can/usb/esd_usb.c
+++ b/drivers/net/can/usb/esd_usb.c
@@ -5,6 +5,7 @@
* Copyright (C) 2010-2012 esd electronic system design gmbh, Matthias Fuchs <socketcan@esd.eu>
* Copyright (C) 2022 esd electronics gmbh, Frank Jungclaus <frank.jungclaus@esd.eu>
*/
+#include <linux/ethtool.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -882,6 +883,10 @@ static const struct net_device_ops esd_usb_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops esd_usb_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct can_bittiming_const esd_usb2_bittiming_const = {
.name = "esd_usb2",
.tseg1_min = ESD_USB2_TSEG1_MIN,
@@ -1015,6 +1020,7 @@ static int esd_usb_probe_one_net(struct usb_interface *intf, int index)
netdev->flags |= IFF_ECHO; /* we support local echo */
netdev->netdev_ops = &esd_usb_netdev_ops;
+ netdev->ethtool_ops = &esd_usb_ethtool_ops;
SET_NETDEV_DEV(netdev, &intf->dev);
netdev->dev_id = index;
@@ -1138,7 +1144,7 @@ static void esd_usb_disconnect(struct usb_interface *intf)
/* usb specific object needed to register this driver with the usb subsystem */
static struct usb_driver esd_usb_driver = {
- .name = "esd_usb",
+ .name = KBUILD_MODNAME,
.probe = esd_usb_probe,
.disconnect = esd_usb_disconnect,
.id_table = esd_usb_table,
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c
index 7353745f92d7..51294b717040 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.c
@@ -10,6 +10,7 @@
* Copyright (c) 2020, 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
*/
+#include <linux/ethtool.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/usb.h>
@@ -18,14 +19,11 @@
#include "es58x_core.h"
-#define DRV_VERSION "1.00"
MODULE_AUTHOR("Vincent Mailhol <mailhol.vincent@wanadoo.fr>");
MODULE_AUTHOR("Arunachalam Santhanam <arunachalam.santhanam@in.bosch.com>");
MODULE_DESCRIPTION("Socket CAN driver for ETAS ES58X USB adapters");
-MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL v2");
-#define ES58X_MODULE_NAME "etas_es58x"
#define ES58X_VENDOR_ID 0x108C
#define ES581_4_PRODUCT_ID 0x0159
#define ES582_1_PRODUCT_ID 0x0168
@@ -59,11 +57,11 @@ MODULE_DEVICE_TABLE(usb, es58x_id_table);
#define es58x_print_hex_dump(buf, len) \
print_hex_dump(KERN_DEBUG, \
- ES58X_MODULE_NAME " " __stringify(buf) ": ", \
+ KBUILD_MODNAME " " __stringify(buf) ": ", \
DUMP_PREFIX_NONE, 16, 1, buf, len, false)
#define es58x_print_hex_dump_debug(buf, len) \
- print_hex_dump_debug(ES58X_MODULE_NAME " " __stringify(buf) ": ",\
+ print_hex_dump_debug(KBUILD_MODNAME " " __stringify(buf) ": ",\
DUMP_PREFIX_NONE, 16, 1, buf, len, false)
/* The last two bytes of an ES58X command is a CRC16. The first two
@@ -1462,10 +1460,6 @@ static void es58x_read_bulk_callback(struct urb *urb)
}
resubmit_urb:
- usb_fill_bulk_urb(urb, es58x_dev->udev, es58x_dev->rx_pipe,
- urb->transfer_buffer, urb->transfer_buffer_length,
- es58x_read_bulk_callback, es58x_dev);
-
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret == -ENODEV) {
for (i = 0; i < es58x_dev->num_can_ch; i++)
@@ -1599,7 +1593,8 @@ static struct urb *es58x_get_tx_urb(struct es58x_device *es58x_dev)
return NULL;
usb_fill_bulk_urb(urb, es58x_dev->udev, es58x_dev->tx_pipe,
- buf, tx_buf_len, NULL, NULL);
+ buf, tx_buf_len, es58x_write_bulk_callback,
+ NULL);
return urb;
}
@@ -1632,9 +1627,7 @@ static int es58x_submit_urb(struct es58x_device *es58x_dev, struct urb *urb,
int ret;
es58x_set_crc(urb->transfer_buffer, urb->transfer_buffer_length);
- usb_fill_bulk_urb(urb, es58x_dev->udev, es58x_dev->tx_pipe,
- urb->transfer_buffer, urb->transfer_buffer_length,
- es58x_write_bulk_callback, netdev);
+ urb->context = netdev;
usb_anchor_urb(urb, &es58x_dev->tx_urbs_busy);
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret) {
@@ -1981,7 +1974,12 @@ static netdev_tx_t es58x_start_xmit(struct sk_buff *skb,
static const struct net_device_ops es58x_netdev_ops = {
.ndo_open = es58x_open,
.ndo_stop = es58x_stop,
- .ndo_start_xmit = es58x_start_xmit
+ .ndo_start_xmit = es58x_start_xmit,
+ .ndo_eth_ioctl = can_eth_ioctl_hwts,
+};
+
+static const struct ethtool_ops es58x_ethtool_ops = {
+ .get_ts_info = can_ethtool_op_get_ts_info_hwts,
};
/**
@@ -2088,6 +2086,7 @@ static int es58x_init_netdev(struct es58x_device *es58x_dev, int channel_idx)
es58x_init_priv(es58x_dev, es58x_priv(netdev), channel_idx);
netdev->netdev_ops = &es58x_netdev_ops;
+ netdev->ethtool_ops = &es58x_ethtool_ops;
netdev->flags |= IFF_ECHO; /* We support local echo */
netdev->dev_port = channel_idx;
@@ -2181,9 +2180,8 @@ static struct es58x_device *es58x_init_es58x_dev(struct usb_interface *intf,
struct usb_endpoint_descriptor *ep_in, *ep_out;
int ret;
- dev_info(dev,
- "Starting %s %s (Serial Number %s) driver version %s\n",
- udev->manufacturer, udev->product, udev->serial, DRV_VERSION);
+ dev_info(dev, "Starting %s %s (Serial Number %s)\n",
+ udev->manufacturer, udev->product, udev->serial);
ret = usb_find_common_endpoints(intf->cur_altsetting, &ep_in, &ep_out,
NULL, NULL);
@@ -2280,7 +2278,7 @@ static void es58x_disconnect(struct usb_interface *intf)
}
static struct usb_driver es58x_driver = {
- .name = ES58X_MODULE_NAME,
+ .name = KBUILD_MODNAME,
.probe = es58x_probe,
.disconnect = es58x_disconnect,
.id_table = es58x_id_table
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index d3a658b444b5..baf749c8cda3 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -946,6 +946,7 @@ static int gs_usb_set_phys_id(struct net_device *dev,
static const struct ethtool_ops gs_usb_ethtool_ops = {
.set_phys_id = gs_usb_set_phys_id,
+ .get_ts_info = ethtool_op_get_ts_info,
};
static struct gs_can *gs_make_candev(unsigned int channel,
@@ -989,11 +990,12 @@ static struct gs_can *gs_make_candev(unsigned int channel,
dev = netdev_priv(netdev);
netdev->netdev_ops = &gs_usb_netdev_ops;
+ netdev->ethtool_ops = &gs_usb_ethtool_ops;
netdev->flags |= IFF_ECHO; /* we support full roundtrip echo */
/* dev setup */
- strcpy(dev->bt_const.name, "gs_usb");
+ strcpy(dev->bt_const.name, KBUILD_MODNAME);
dev->bt_const.tseg1_min = le32_to_cpu(bt_const->tseg1_min);
dev->bt_const.tseg1_max = le32_to_cpu(bt_const->tseg1_max);
dev->bt_const.tseg2_min = le32_to_cpu(bt_const->tseg2_min);
@@ -1100,7 +1102,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
return ERR_PTR(rc);
}
- strcpy(dev->data_bt_const.name, "gs_usb");
+ strcpy(dev->data_bt_const.name, KBUILD_MODNAME);
dev->data_bt_const.tseg1_min = le32_to_cpu(bt_const_extended->dtseg1_min);
dev->data_bt_const.tseg1_max = le32_to_cpu(bt_const_extended->dtseg1_max);
dev->data_bt_const.tseg2_min = le32_to_cpu(bt_const_extended->dtseg2_min);
@@ -1270,7 +1272,7 @@ static const struct usb_device_id gs_usb_table[] = {
MODULE_DEVICE_TABLE(usb, gs_usb_table);
static struct usb_driver gs_usb_driver = {
- .name = "gs_usb",
+ .name = KBUILD_MODNAME,
.probe = gs_usb_probe,
.disconnect = gs_usb_disconnect,
.id_table = gs_usb_table,
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
index eefcbe3aadce..841da29cef93 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
@@ -39,6 +39,7 @@
#define KVASER_USB_QUIRK_HAS_SILENT_MODE BIT(0)
#define KVASER_USB_QUIRK_HAS_TXRX_ERRORS BIT(1)
#define KVASER_USB_QUIRK_IGNORE_CLK_FREQ BIT(2)
+#define KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP BIT(3)
/* Device capabilities */
#define KVASER_USB_CAP_BERR_CAP 0x01
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index f211bfcb1d97..824cab80aa02 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -13,6 +13,7 @@
#include <linux/completion.h>
#include <linux/device.h>
+#include <linux/ethtool.h>
#include <linux/gfp.h>
#include <linux/if.h>
#include <linux/kernel.h>
@@ -89,7 +90,7 @@
#define USB_HYBRID_PRO_CANLIN_PRODUCT_ID 278
static const struct kvaser_usb_driver_info kvaser_usb_driver_info_hydra = {
- .quirks = 0,
+ .quirks = KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP,
.ops = &kvaser_usb_hydra_dev_ops,
};
@@ -665,6 +666,22 @@ static const struct net_device_ops kvaser_usb_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct net_device_ops kvaser_usb_netdev_ops_hwts = {
+ .ndo_open = kvaser_usb_open,
+ .ndo_stop = kvaser_usb_close,
+ .ndo_eth_ioctl = can_eth_ioctl_hwts,
+ .ndo_start_xmit = kvaser_usb_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+static const struct ethtool_ops kvaser_usb_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+static const struct ethtool_ops kvaser_usb_ethtool_ops_hwts = {
+ .get_ts_info = can_ethtool_op_get_ts_info_hwts,
+};
+
static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
{
int i;
@@ -742,7 +759,13 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
netdev->flags |= IFF_ECHO;
netdev->netdev_ops = &kvaser_usb_netdev_ops;
-
+ if (driver_info->quirks & KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP) {
+ netdev->netdev_ops = &kvaser_usb_netdev_ops_hwts;
+ netdev->ethtool_ops = &kvaser_usb_ethtool_ops_hwts;
+ } else {
+ netdev->netdev_ops = &kvaser_usb_netdev_ops;
+ netdev->ethtool_ops = &kvaser_usb_ethtool_ops;
+ }
SET_NETDEV_DEV(netdev, &dev->intf->dev);
netdev->dev_id = channel;
@@ -869,7 +892,7 @@ static void kvaser_usb_disconnect(struct usb_interface *intf)
}
static struct usb_driver kvaser_usb_driver = {
- .name = "kvaser_usb",
+ .name = KBUILD_MODNAME,
.probe = kvaser_usb_probe,
.disconnect = kvaser_usb_disconnect,
.id_table = kvaser_usb_table,
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index 792ab9da317d..69346c63021f 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -10,6 +10,7 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/signal.h>
@@ -758,6 +759,10 @@ static const struct net_device_ops mcba_netdev_ops = {
.ndo_start_xmit = mcba_usb_start_xmit,
};
+static const struct ethtool_ops mcba_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
/* Microchip CANBUS has hardcoded bittiming values by default.
* This function sends request via USB to change the speed and align bittiming
* values for presentation purposes only
@@ -836,6 +841,7 @@ static int mcba_usb_probe(struct usb_interface *intf,
priv->can.do_set_bittiming = mcba_net_set_bittiming;
netdev->netdev_ops = &mcba_netdev_ops;
+ netdev->ethtool_ops = &mcba_ethtool_ops;
netdev->flags |= IFF_ECHO; /* we support local echo */
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index d07b7ee79e3e..687dd542f7f6 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -965,6 +965,7 @@ static int pcan_usb_set_phys_id(struct net_device *netdev,
static const struct ethtool_ops pcan_usb_ethtool_ops = {
.set_phys_id = pcan_usb_set_phys_id,
+ .get_ts_info = pcan_get_ts_info,
};
/*
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 27b0a72fd885..8c9d53f6e24c 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -775,13 +775,54 @@ static int peak_usb_set_data_bittiming(struct net_device *netdev)
return 0;
}
+static int peak_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct hwtstamp_config hwts_cfg = { 0 };
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP: /* set */
+ if (copy_from_user(&hwts_cfg, ifr->ifr_data, sizeof(hwts_cfg)))
+ return -EFAULT;
+ if (hwts_cfg.tx_type == HWTSTAMP_TX_OFF &&
+ hwts_cfg.rx_filter == HWTSTAMP_FILTER_ALL)
+ return 0;
+ return -ERANGE;
+
+ case SIOCGHWTSTAMP: /* get */
+ hwts_cfg.tx_type = HWTSTAMP_TX_OFF;
+ hwts_cfg.rx_filter = HWTSTAMP_FILTER_ALL;
+ if (copy_to_user(ifr->ifr_data, &hwts_cfg, sizeof(hwts_cfg)))
+ return -EFAULT;
+ return 0;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops peak_usb_netdev_ops = {
.ndo_open = peak_usb_ndo_open,
.ndo_stop = peak_usb_ndo_stop,
+ .ndo_eth_ioctl = peak_eth_ioctl,
.ndo_start_xmit = peak_usb_ndo_start_xmit,
.ndo_change_mtu = can_change_mtu,
};
+int pcan_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
+{
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->phc_index = -1;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
/*
* create one device which is attached to CAN controller #ctrl_idx of the
* usb adapter.
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index 9c90487b9c92..f6bdd8b3f290 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -145,5 +145,6 @@ int peak_usb_netif_rx(struct sk_buff *skb,
int peak_usb_netif_rx_64(struct sk_buff *skb, u32 ts_low, u32 ts_high);
void peak_usb_async_complete(struct urb *urb);
void peak_usb_restart_complete(struct peak_usb_device *dev);
+int pcan_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info);
#endif
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 3d7e0e370505..2ea1500df393 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -1080,6 +1080,7 @@ static int pcan_usb_fd_set_phys_id(struct net_device *netdev,
static const struct ethtool_ops pcan_usb_fd_ethtool_ops = {
.set_phys_id = pcan_usb_fd_set_phys_id,
+ .get_ts_info = pcan_get_ts_info,
};
/* describes the PCAN-USB FD adapter */
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 457887113e75..5d8f6a40bb2c 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -1022,6 +1022,7 @@ static int pcan_usb_pro_set_phys_id(struct net_device *netdev,
static const struct ethtool_ops pcan_usb_pro_ethtool_ops = {
.set_phys_id = pcan_usb_pro_set_phys_id,
+ .get_ts_info = pcan_get_ts_info,
};
/*
diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
index 5ae0d7c017cc..7c35f50fda4e 100644
--- a/drivers/net/can/usb/ucan.c
+++ b/drivers/net/can/usb/ucan.c
@@ -28,6 +28,7 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/signal.h>
@@ -1233,6 +1234,10 @@ static const struct net_device_ops ucan_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops ucan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
/* Request to set bittiming
*
* This function generates an USB set bittiming message and transmits
@@ -1512,6 +1517,7 @@ static int ucan_probe(struct usb_interface *intf,
spin_lock_init(&up->context_lock);
spin_lock_init(&up->echo_skb_lock);
netdev->netdev_ops = &ucan_netdev_ops;
+ netdev->ethtool_ops = &ucan_ethtool_ops;
usb_set_intfdata(intf, up);
SET_NETDEV_DEV(netdev, &intf->dev);
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index 8b7cd69e20b0..64c00abe91cf 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -12,6 +12,7 @@
* who were very cooperative and answered my questions.
*/
+#include <linux/ethtool.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -870,8 +871,12 @@ static const struct net_device_ops usb_8dev_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops usb_8dev_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct can_bittiming_const usb_8dev_bittiming_const = {
- .name = "usb_8dev",
+ .name = KBUILD_MODNAME,
.tseg1_min = 1,
.tseg1_max = 16,
.tseg2_min = 1,
@@ -927,6 +932,7 @@ static int usb_8dev_probe(struct usb_interface *intf,
CAN_CTRLMODE_CC_LEN8_DLC;
netdev->netdev_ops = &usb_8dev_netdev_ops;
+ netdev->ethtool_ops = &usb_8dev_ethtool_ops;
netdev->flags |= IFF_ECHO; /* we support local echo */
@@ -997,7 +1003,7 @@ static void usb_8dev_disconnect(struct usb_interface *intf)
}
static struct usb_driver usb_8dev_driver = {
- .name = "usb_8dev",
+ .name = KBUILD_MODNAME,
.probe = usb_8dev_probe,
.disconnect = usb_8dev_disconnect,
.id_table = usb_8dev_table,
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index a15619d883ec..36b6310a2e5b 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -40,6 +40,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/netdevice.h>
@@ -99,6 +100,8 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
/* set flag whether this packet has to be looped back */
loop = skb->pkt_type == PACKET_LOOPBACK;
+ skb_tx_timestamp(skb);
+
if (!echo) {
/* no echo handling available inside this driver */
if (loop) {
@@ -146,6 +149,10 @@ static const struct net_device_ops vcan_netdev_ops = {
.ndo_change_mtu = vcan_change_mtu,
};
+static const struct ethtool_ops vcan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static void vcan_setup(struct net_device *dev)
{
dev->type = ARPHRD_CAN;
@@ -161,6 +168,7 @@ static void vcan_setup(struct net_device *dev)
dev->flags |= IFF_ECHO;
dev->netdev_ops = &vcan_netdev_ops;
+ dev->ethtool_ops = &vcan_ethtool_ops;
dev->needs_free_netdev = true;
}
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index 577a80300514..cffd107d8b28 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -9,6 +9,7 @@
* Copyright (c) 2017 Oliver Hartkopp <socketcan@hartkopp.net>
*/
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/netdevice.h>
@@ -53,6 +54,8 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *oskb, struct net_device *dev)
goto out_unlock;
}
+ skb_tx_timestamp(oskb);
+
skb = skb_clone(oskb, GFP_ATOMIC);
if (skb) {
consume_skb(oskb);
@@ -144,6 +147,10 @@ static const struct net_device_ops vxcan_netdev_ops = {
.ndo_change_mtu = vxcan_change_mtu,
};
+static const struct ethtool_ops vxcan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static void vxcan_setup(struct net_device *dev)
{
struct can_ml_priv *can_ml;
@@ -155,6 +162,7 @@ static void vxcan_setup(struct net_device *dev)
dev->tx_queue_len = 0;
dev->flags = IFF_NOARP;
dev->netdev_ops = &vxcan_netdev_ops;
+ dev->ethtool_ops = &vxcan_ethtool_ops;
dev->needs_free_netdev = true;
can_ml = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN);
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index caa6b4cee63f..5d3172795ad0 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -12,6 +12,7 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/errno.h>
+#include <linux/ethtool.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -1540,6 +1541,10 @@ static const struct net_device_ops xcan_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops xcan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
/**
* xcan_suspend - Suspend method for the driver
* @dev: Address of the device structure
@@ -1821,6 +1826,7 @@ static int xcan_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->netdev_ops = &xcan_netdev_ops;
+ ndev->ethtool_ops = &xcan_ethtool_ops;
/* Getting the CAN can_clk info */
priv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
diff --git a/drivers/net/dsa/qca/Makefile b/drivers/net/dsa/qca/Makefile
index 40bb7c27285b..701f1d199e93 100644
--- a/drivers/net/dsa/qca/Makefile
+++ b/drivers/net/dsa/qca/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_NET_DSA_AR9331) += ar9331.o
obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
+qca8k-y += qca8k-common.o qca8k-8xxx.o
diff --git a/drivers/net/dsa/qca/qca8k.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index 1cbb05b0323f..1d3e7782a71f 100644
--- a/drivers/net/dsa/qca/qca8k.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -15,7 +15,6 @@
#include <linux/of_net.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
-#include <linux/if_bridge.h>
#include <linux/mdio.h>
#include <linux/phylink.h>
#include <linux/gpio/consumer.h>
@@ -24,57 +23,6 @@
#include "qca8k.h"
-#define MIB_DESC(_s, _o, _n) \
- { \
- .size = (_s), \
- .offset = (_o), \
- .name = (_n), \
- }
-
-static const struct qca8k_mib_desc ar8327_mib[] = {
- MIB_DESC(1, 0x00, "RxBroad"),
- MIB_DESC(1, 0x04, "RxPause"),
- MIB_DESC(1, 0x08, "RxMulti"),
- MIB_DESC(1, 0x0c, "RxFcsErr"),
- MIB_DESC(1, 0x10, "RxAlignErr"),
- MIB_DESC(1, 0x14, "RxRunt"),
- MIB_DESC(1, 0x18, "RxFragment"),
- MIB_DESC(1, 0x1c, "Rx64Byte"),
- MIB_DESC(1, 0x20, "Rx128Byte"),
- MIB_DESC(1, 0x24, "Rx256Byte"),
- MIB_DESC(1, 0x28, "Rx512Byte"),
- MIB_DESC(1, 0x2c, "Rx1024Byte"),
- MIB_DESC(1, 0x30, "Rx1518Byte"),
- MIB_DESC(1, 0x34, "RxMaxByte"),
- MIB_DESC(1, 0x38, "RxTooLong"),
- MIB_DESC(2, 0x3c, "RxGoodByte"),
- MIB_DESC(2, 0x44, "RxBadByte"),
- MIB_DESC(1, 0x4c, "RxOverFlow"),
- MIB_DESC(1, 0x50, "Filtered"),
- MIB_DESC(1, 0x54, "TxBroad"),
- MIB_DESC(1, 0x58, "TxPause"),
- MIB_DESC(1, 0x5c, "TxMulti"),
- MIB_DESC(1, 0x60, "TxUnderRun"),
- MIB_DESC(1, 0x64, "Tx64Byte"),
- MIB_DESC(1, 0x68, "Tx128Byte"),
- MIB_DESC(1, 0x6c, "Tx256Byte"),
- MIB_DESC(1, 0x70, "Tx512Byte"),
- MIB_DESC(1, 0x74, "Tx1024Byte"),
- MIB_DESC(1, 0x78, "Tx1518Byte"),
- MIB_DESC(1, 0x7c, "TxMaxByte"),
- MIB_DESC(1, 0x80, "TxOverSize"),
- MIB_DESC(2, 0x84, "TxByte"),
- MIB_DESC(1, 0x8c, "TxCollision"),
- MIB_DESC(1, 0x90, "TxAbortCol"),
- MIB_DESC(1, 0x94, "TxMultiCol"),
- MIB_DESC(1, 0x98, "TxSingleCol"),
- MIB_DESC(1, 0x9c, "TxExcDefer"),
- MIB_DESC(1, 0xa0, "TxDefer"),
- MIB_DESC(1, 0xa4, "TxLateCol"),
- MIB_DESC(1, 0xa8, "RXUnicast"),
- MIB_DESC(1, 0xac, "TXUnicast"),
-};
-
static void
qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
{
@@ -184,24 +132,6 @@ qca8k_set_page(struct qca8k_priv *priv, u16 page)
return 0;
}
-static int
-qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
-{
- return regmap_read(priv->regmap, reg, val);
-}
-
-static int
-qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
-{
- return regmap_write(priv->regmap, reg, val);
-}
-
-static int
-qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
-{
- return regmap_update_bits(priv->regmap, reg, mask, write_val);
-}
-
static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
{
struct qca8k_mgmt_eth_data *mgmt_eth_data;
@@ -412,43 +342,6 @@ qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 wri
}
static int
-qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
-{
- int i, count = len / sizeof(u32), ret;
-
- if (priv->mgmt_master && !qca8k_read_eth(priv, reg, val, len))
- return 0;
-
- for (i = 0; i < count; i++) {
- ret = regmap_read(priv->regmap, reg + (i * 4), val + i);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
-static int
-qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
-{
- int i, count = len / sizeof(u32), ret;
- u32 tmp;
-
- if (priv->mgmt_master && !qca8k_write_eth(priv, reg, val, len))
- return 0;
-
- for (i = 0; i < count; i++) {
- tmp = val[i];
-
- ret = regmap_write(priv->regmap, reg + (i * 4), tmp);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
-static int
qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
@@ -534,30 +427,6 @@ exit:
return ret;
}
-static const struct regmap_range qca8k_readable_ranges[] = {
- regmap_reg_range(0x0000, 0x00e4), /* Global control */
- regmap_reg_range(0x0100, 0x0168), /* EEE control */
- regmap_reg_range(0x0200, 0x0270), /* Parser control */
- regmap_reg_range(0x0400, 0x0454), /* ACL */
- regmap_reg_range(0x0600, 0x0718), /* Lookup */
- regmap_reg_range(0x0800, 0x0b70), /* QM */
- regmap_reg_range(0x0c00, 0x0c80), /* PKT */
- regmap_reg_range(0x0e00, 0x0e98), /* L3 */
- regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
- regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
- regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
- regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
- regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
- regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
- regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
-
-};
-
-static const struct regmap_access_table qca8k_readable_table = {
- .yes_ranges = qca8k_readable_ranges,
- .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
-};
-
static struct regmap_config qca8k_regmap_config = {
.reg_bits = 16,
.val_bits = 32,
@@ -572,385 +441,6 @@ static struct regmap_config qca8k_regmap_config = {
};
static int
-qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
-{
- u32 val;
-
- return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0,
- QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC);
-}
-
-static int
-qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
-{
- u32 reg[3];
- int ret;
-
- /* load the ARL table into an array */
- ret = qca8k_bulk_read(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
- if (ret)
- return ret;
-
- /* vid - 83:72 */
- fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
- /* aging - 67:64 */
- fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]);
- /* portmask - 54:48 */
- fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]);
- /* mac - 47:0 */
- fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]);
- fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]);
- fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]);
- fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]);
- fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]);
- fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]);
-
- return 0;
-}
-
-static void
-qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac,
- u8 aging)
-{
- u32 reg[3] = { 0 };
-
- /* vid - 83:72 */
- reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
- /* aging - 67:64 */
- reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging);
- /* portmask - 54:48 */
- reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask);
- /* mac - 47:0 */
- reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]);
- reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]);
- reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]);
- reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]);
- reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]);
- reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
-
- /* load the array into the ARL table */
- qca8k_bulk_write(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
-}
-
-static int
-qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port)
-{
- u32 reg;
- int ret;
-
- /* Set the command and FDB index */
- reg = QCA8K_ATU_FUNC_BUSY;
- reg |= cmd;
- if (port >= 0) {
- reg |= QCA8K_ATU_FUNC_PORT_EN;
- reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port);
- }
-
- /* Write the function register triggering the table access */
- ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
- if (ret)
- return ret;
-
- /* wait for completion */
- ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY);
- if (ret)
- return ret;
-
- /* Check for table full violation when adding an entry */
- if (cmd == QCA8K_FDB_LOAD) {
- ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, &reg);
- if (ret < 0)
- return ret;
- if (reg & QCA8K_ATU_FUNC_FULL)
- return -1;
- }
-
- return 0;
-}
-
-static int
-qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb, int port)
-{
- int ret;
-
- qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
- ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
- if (ret < 0)
- return ret;
-
- return qca8k_fdb_read(priv, fdb);
-}
-
-static int
-qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac, u16 port_mask,
- u16 vid, u8 aging)
-{
- int ret;
-
- mutex_lock(&priv->reg_mutex);
- qca8k_fdb_write(priv, vid, port_mask, mac, aging);
- ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
- mutex_unlock(&priv->reg_mutex);
-
- return ret;
-}
-
-static int
-qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac, u16 port_mask, u16 vid)
-{
- int ret;
-
- mutex_lock(&priv->reg_mutex);
- qca8k_fdb_write(priv, vid, port_mask, mac, 0);
- ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
- mutex_unlock(&priv->reg_mutex);
-
- return ret;
-}
-
-static void
-qca8k_fdb_flush(struct qca8k_priv *priv)
-{
- mutex_lock(&priv->reg_mutex);
- qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
- mutex_unlock(&priv->reg_mutex);
-}
-
-static int
-qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
- const u8 *mac, u16 vid)
-{
- struct qca8k_fdb fdb = { 0 };
- int ret;
-
- mutex_lock(&priv->reg_mutex);
-
- qca8k_fdb_write(priv, vid, 0, mac, 0);
- ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
- if (ret < 0)
- goto exit;
-
- ret = qca8k_fdb_read(priv, &fdb);
- if (ret < 0)
- goto exit;
-
- /* Rule exist. Delete first */
- if (!fdb.aging) {
- ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
- if (ret)
- goto exit;
- }
-
- /* Add port to fdb portmask */
- fdb.port_mask |= port_mask;
-
- qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
- ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
-
-exit:
- mutex_unlock(&priv->reg_mutex);
- return ret;
-}
-
-static int
-qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask,
- const u8 *mac, u16 vid)
-{
- struct qca8k_fdb fdb = { 0 };
- int ret;
-
- mutex_lock(&priv->reg_mutex);
-
- qca8k_fdb_write(priv, vid, 0, mac, 0);
- ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
- if (ret < 0)
- goto exit;
-
- /* Rule doesn't exist. Why delete? */
- if (!fdb.aging) {
- ret = -EINVAL;
- goto exit;
- }
-
- ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
- if (ret)
- goto exit;
-
- /* Only port in the rule is this port. Don't re insert */
- if (fdb.port_mask == port_mask)
- goto exit;
-
- /* Remove port from port mask */
- fdb.port_mask &= ~port_mask;
-
- qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
- ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
-
-exit:
- mutex_unlock(&priv->reg_mutex);
- return ret;
-}
-
-static int
-qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid)
-{
- u32 reg;
- int ret;
-
- /* Set the command and VLAN index */
- reg = QCA8K_VTU_FUNC1_BUSY;
- reg |= cmd;
- reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid);
-
- /* Write the function register triggering the table access */
- ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
- if (ret)
- return ret;
-
- /* wait for completion */
- ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
- if (ret)
- return ret;
-
- /* Check for table full violation when adding an entry */
- if (cmd == QCA8K_VLAN_LOAD) {
- ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, &reg);
- if (ret < 0)
- return ret;
- if (reg & QCA8K_VTU_FUNC1_FULL)
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static int
-qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, bool untagged)
-{
- u32 reg;
- int ret;
-
- /*
- We do the right thing with VLAN 0 and treat it as untagged while
- preserving the tag on egress.
- */
- if (vid == 0)
- return 0;
-
- mutex_lock(&priv->reg_mutex);
- ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
- if (ret < 0)
- goto out;
-
- ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
- if (ret < 0)
- goto out;
- reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
- reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
- if (untagged)
- reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port);
- else
- reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port);
-
- ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
- if (ret)
- goto out;
- ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
-
-out:
- mutex_unlock(&priv->reg_mutex);
-
- return ret;
-}
-
-static int
-qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
-{
- u32 reg, mask;
- int ret, i;
- bool del;
-
- mutex_lock(&priv->reg_mutex);
- ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
- if (ret < 0)
- goto out;
-
- ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
- if (ret < 0)
- goto out;
- reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
- reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port);
-
- /* Check if we're the last member to be removed */
- del = true;
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
-
- if ((reg & mask) != mask) {
- del = false;
- break;
- }
- }
-
- if (del) {
- ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
- } else {
- ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
- if (ret)
- goto out;
- ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
- }
-
-out:
- mutex_unlock(&priv->reg_mutex);
-
- return ret;
-}
-
-static int
-qca8k_mib_init(struct qca8k_priv *priv)
-{
- int ret;
-
- mutex_lock(&priv->reg_mutex);
- ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
- QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
- FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) |
- QCA8K_MIB_BUSY);
- if (ret)
- goto exit;
-
- ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
- if (ret)
- goto exit;
-
- ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
- if (ret)
- goto exit;
-
- ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
-
-exit:
- mutex_unlock(&priv->reg_mutex);
- return ret;
-}
-
-static void
-qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
-{
- u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
-
- /* Port 0 and 6 have no internal PHY */
- if (port > 0 && port < 6)
- mask |= QCA8K_PORT_STATUS_LINK_AUTO;
-
- if (enable)
- regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
- else
- regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
-}
-
-static int
qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
struct sk_buff *read_skb, u32 *val)
{
@@ -1462,8 +952,8 @@ static int qca8k_find_cpu_port(struct dsa_switch *ds)
static int
qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
{
+ const struct qca8k_match_data *data = priv->info;
struct device_node *node = priv->dev->of_node;
- const struct qca8k_match_data *data;
u32 val = 0;
int ret;
@@ -1472,8 +962,6 @@ qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
* Should be applied by default but we set this just to make sure.
*/
if (priv->switch_id == QCA8K_ID_QCA8327) {
- data = of_device_get_match_data(priv->dev);
-
/* Set the correct package of 148 pin for QCA8327 */
if (data->reduced_package)
val |= QCA8327_PWS_PACKAGE148_EN;
@@ -1993,26 +1481,8 @@ static void qca8k_setup_pcs(struct qca8k_priv *priv, struct qca8k_pcs *qpcs,
qpcs->port = port;
}
-static void
-qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
-{
- const struct qca8k_match_data *match_data;
- struct qca8k_priv *priv = ds->priv;
- int i;
-
- if (stringset != ETH_SS_STATS)
- return;
-
- match_data = of_device_get_match_data(priv->dev);
-
- for (i = 0; i < match_data->mib_count; i++)
- strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
- ETH_GSTRING_LEN);
-}
-
static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb)
{
- const struct qca8k_match_data *match_data;
struct qca8k_mib_eth_data *mib_eth_data;
struct qca8k_priv *priv = ds->priv;
const struct qca8k_mib_desc *mib;
@@ -2031,10 +1501,9 @@ static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *sk
if (port != mib_eth_data->req_port)
goto exit;
- match_data = device_get_match_data(priv->dev);
data = mib_eth_data->data;
- for (i = 0; i < match_data->mib_count; i++) {
+ for (i = 0; i < priv->info->mib_count; i++) {
mib = &ar8327_mib[i];
/* First 3 mib are present in the skb head */
@@ -2101,522 +1570,6 @@ exit:
return ret;
}
-static void
-qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
- uint64_t *data)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- const struct qca8k_match_data *match_data;
- const struct qca8k_mib_desc *mib;
- u32 reg, i, val;
- u32 hi = 0;
- int ret;
-
- if (priv->mgmt_master &&
- qca8k_get_ethtool_stats_eth(ds, port, data) > 0)
- return;
-
- match_data = of_device_get_match_data(priv->dev);
-
- for (i = 0; i < match_data->mib_count; i++) {
- mib = &ar8327_mib[i];
- reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
-
- ret = qca8k_read(priv, reg, &val);
- if (ret < 0)
- continue;
-
- if (mib->size == 2) {
- ret = qca8k_read(priv, reg + 4, &hi);
- if (ret < 0)
- continue;
- }
-
- data[i] = val;
- if (mib->size == 2)
- data[i] |= (u64)hi << 32;
- }
-}
-
-static int
-qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
-{
- const struct qca8k_match_data *match_data;
- struct qca8k_priv *priv = ds->priv;
-
- if (sset != ETH_SS_STATS)
- return 0;
-
- match_data = of_device_get_match_data(priv->dev);
-
- return match_data->mib_count;
-}
-
-static int
-qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
- u32 reg;
- int ret;
-
- mutex_lock(&priv->reg_mutex);
- ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, &reg);
- if (ret < 0)
- goto exit;
-
- if (eee->eee_enabled)
- reg |= lpi_en;
- else
- reg &= ~lpi_en;
- ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
-
-exit:
- mutex_unlock(&priv->reg_mutex);
- return ret;
-}
-
-static int
-qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
-{
- /* Nothing to do on the port's MAC */
- return 0;
-}
-
-static void
-qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- u32 stp_state;
-
- switch (state) {
- case BR_STATE_DISABLED:
- stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
- break;
- case BR_STATE_BLOCKING:
- stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
- break;
- case BR_STATE_LISTENING:
- stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
- break;
- case BR_STATE_LEARNING:
- stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
- break;
- case BR_STATE_FORWARDING:
- default:
- stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
- break;
- }
-
- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
-}
-
-static int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge,
- bool *tx_fwd_offload,
- struct netlink_ext_ack *extack)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- int port_mask, cpu_port;
- int i, ret;
-
- cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
- port_mask = BIT(cpu_port);
-
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- if (dsa_is_cpu_port(ds, i))
- continue;
- if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
- continue;
- /* Add this port to the portvlan mask of the other ports
- * in the bridge
- */
- ret = regmap_set_bits(priv->regmap,
- QCA8K_PORT_LOOKUP_CTRL(i),
- BIT(port));
- if (ret)
- return ret;
- if (i != port)
- port_mask |= BIT(i);
- }
-
- /* Add all other ports to this ports portvlan mask */
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_MEMBER, port_mask);
-
- return ret;
-}
-
-static void qca8k_port_bridge_leave(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- int cpu_port, i;
-
- cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
-
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- if (dsa_is_cpu_port(ds, i))
- continue;
- if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
- continue;
- /* Remove this port to the portvlan mask of the other ports
- * in the bridge
- */
- regmap_clear_bits(priv->regmap,
- QCA8K_PORT_LOOKUP_CTRL(i),
- BIT(port));
- }
-
- /* Set the cpu port to be the only one in the portvlan mask of
- * this port
- */
- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
-}
-
-static void
-qca8k_port_fast_age(struct dsa_switch *ds, int port)
-{
- struct qca8k_priv *priv = ds->priv;
-
- mutex_lock(&priv->reg_mutex);
- qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
- mutex_unlock(&priv->reg_mutex);
-}
-
-static int
-qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
-{
- struct qca8k_priv *priv = ds->priv;
- unsigned int secs = msecs / 1000;
- u32 val;
-
- /* AGE_TIME reg is set in 7s step */
- val = secs / 7;
-
- /* Handle case with 0 as val to NOT disable
- * learning
- */
- if (!val)
- val = 1;
-
- return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL, QCA8K_ATU_AGE_TIME_MASK,
- QCA8K_ATU_AGE_TIME(val));
-}
-
-static int
-qca8k_port_enable(struct dsa_switch *ds, int port,
- struct phy_device *phy)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-
- qca8k_port_set_status(priv, port, 1);
- priv->port_enabled_map |= BIT(port);
-
- if (dsa_is_user_port(ds, port))
- phy_support_asym_pause(phy);
-
- return 0;
-}
-
-static void
-qca8k_port_disable(struct dsa_switch *ds, int port)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-
- qca8k_port_set_status(priv, port, 0);
- priv->port_enabled_map &= ~BIT(port);
-}
-
-static int
-qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
-{
- struct qca8k_priv *priv = ds->priv;
- int ret;
-
- /* We have only have a general MTU setting.
- * DSA always set the CPU port's MTU to the largest MTU of the slave
- * ports.
- * Setting MTU just for the CPU port is sufficient to correctly set a
- * value for every port.
- */
- if (!dsa_is_cpu_port(ds, port))
- return 0;
-
- /* To change the MAX_FRAME_SIZE the cpu ports must be off or
- * the switch panics.
- * Turn off both cpu ports before applying the new value to prevent
- * this.
- */
- if (priv->port_enabled_map & BIT(0))
- qca8k_port_set_status(priv, 0, 0);
-
- if (priv->port_enabled_map & BIT(6))
- qca8k_port_set_status(priv, 6, 0);
-
- /* Include L2 header / FCS length */
- ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu + ETH_HLEN + ETH_FCS_LEN);
-
- if (priv->port_enabled_map & BIT(0))
- qca8k_port_set_status(priv, 0, 1);
-
- if (priv->port_enabled_map & BIT(6))
- qca8k_port_set_status(priv, 6, 1);
-
- return ret;
-}
-
-static int
-qca8k_port_max_mtu(struct dsa_switch *ds, int port)
-{
- return QCA8K_MAX_MTU;
-}
-
-static int
-qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
- u16 port_mask, u16 vid)
-{
- /* Set the vid to the port vlan id if no vid is set */
- if (!vid)
- vid = QCA8K_PORT_VID_DEF;
-
- return qca8k_fdb_add(priv, addr, port_mask, vid,
- QCA8K_ATU_STATUS_STATIC);
-}
-
-static int
-qca8k_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid,
- struct dsa_db db)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- u16 port_mask = BIT(port);
-
- return qca8k_port_fdb_insert(priv, addr, port_mask, vid);
-}
-
-static int
-qca8k_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid,
- struct dsa_db db)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- u16 port_mask = BIT(port);
-
- if (!vid)
- vid = QCA8K_PORT_VID_DEF;
-
- return qca8k_fdb_del(priv, addr, port_mask, vid);
-}
-
-static int
-qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
- dsa_fdb_dump_cb_t *cb, void *data)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- struct qca8k_fdb _fdb = { 0 };
- int cnt = QCA8K_NUM_FDB_RECORDS;
- bool is_static;
- int ret = 0;
-
- mutex_lock(&priv->reg_mutex);
- while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
- if (!_fdb.aging)
- break;
- is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC);
- ret = cb(_fdb.mac, _fdb.vid, is_static, data);
- if (ret)
- break;
- }
- mutex_unlock(&priv->reg_mutex);
-
- return 0;
-}
-
-static int
-qca8k_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb,
- struct dsa_db db)
-{
- struct qca8k_priv *priv = ds->priv;
- const u8 *addr = mdb->addr;
- u16 vid = mdb->vid;
-
- return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid);
-}
-
-static int
-qca8k_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb,
- struct dsa_db db)
-{
- struct qca8k_priv *priv = ds->priv;
- const u8 *addr = mdb->addr;
- u16 vid = mdb->vid;
-
- return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
-}
-
-static int
-qca8k_port_mirror_add(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress, struct netlink_ext_ack *extack)
-{
- struct qca8k_priv *priv = ds->priv;
- int monitor_port, ret;
- u32 reg, val;
-
- /* Check for existent entry */
- if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
- return -EEXIST;
-
- ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val);
- if (ret)
- return ret;
-
- /* QCA83xx can have only one port set to mirror mode.
- * Check that the correct port is requested and return error otherwise.
- * When no mirror port is set, the values is set to 0xF
- */
- monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
- if (monitor_port != 0xF && monitor_port != mirror->to_local_port)
- return -EEXIST;
-
- /* Set the monitor port */
- val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM,
- mirror->to_local_port);
- ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
- QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
- if (ret)
- return ret;
-
- if (ingress) {
- reg = QCA8K_PORT_LOOKUP_CTRL(port);
- val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
- } else {
- reg = QCA8K_REG_PORT_HOL_CTRL1(port);
- val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
- }
-
- ret = regmap_update_bits(priv->regmap, reg, val, val);
- if (ret)
- return ret;
-
- /* Track mirror port for tx and rx to decide when the
- * mirror port has to be disabled.
- */
- if (ingress)
- priv->mirror_rx |= BIT(port);
- else
- priv->mirror_tx |= BIT(port);
-
- return 0;
-}
-
-static void
-qca8k_port_mirror_del(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror)
-{
- struct qca8k_priv *priv = ds->priv;
- u32 reg, val;
- int ret;
-
- if (mirror->ingress) {
- reg = QCA8K_PORT_LOOKUP_CTRL(port);
- val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
- } else {
- reg = QCA8K_REG_PORT_HOL_CTRL1(port);
- val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
- }
-
- ret = regmap_clear_bits(priv->regmap, reg, val);
- if (ret)
- goto err;
-
- if (mirror->ingress)
- priv->mirror_rx &= ~BIT(port);
- else
- priv->mirror_tx &= ~BIT(port);
-
- /* No port set to send packet to mirror port. Disable mirror port */
- if (!priv->mirror_rx && !priv->mirror_tx) {
- val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF);
- ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
- QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
- if (ret)
- goto err;
- }
-err:
- dev_err(priv->dev, "Failed to del mirror port from %d", port);
-}
-
-static int
-qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
- struct netlink_ext_ack *extack)
-{
- struct qca8k_priv *priv = ds->priv;
- int ret;
-
- if (vlan_filtering) {
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
- QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
- } else {
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
- QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
- }
-
- return ret;
-}
-
-static int
-qca8k_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct netlink_ext_ack *extack)
-{
- bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
- bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
- struct qca8k_priv *priv = ds->priv;
- int ret;
-
- ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
- if (ret) {
- dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
- return ret;
- }
-
- if (pvid) {
- ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
- QCA8K_EGREES_VLAN_PORT_MASK(port),
- QCA8K_EGREES_VLAN_PORT(port, vlan->vid));
- if (ret)
- return ret;
-
- ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
- QCA8K_PORT_VLAN_CVID(vlan->vid) |
- QCA8K_PORT_VLAN_SVID(vlan->vid));
- }
-
- return ret;
-}
-
-static int
-qca8k_port_vlan_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
-{
- struct qca8k_priv *priv = ds->priv;
- int ret;
-
- ret = qca8k_vlan_del(priv, port, vlan->vid);
- if (ret)
- dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
-
- return ret;
-}
-
static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port)
{
struct qca8k_priv *priv = ds->priv;
@@ -2640,174 +1593,6 @@ qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
return DSA_TAG_PROTO_QCA;
}
-static bool
-qca8k_lag_can_offload(struct dsa_switch *ds, struct dsa_lag lag,
- struct netdev_lag_upper_info *info)
-{
- struct dsa_port *dp;
- int members = 0;
-
- if (!lag.id)
- return false;
-
- dsa_lag_foreach_port(dp, ds->dst, &lag)
- /* Includes the port joining the LAG */
- members++;
-
- if (members > QCA8K_NUM_PORTS_FOR_LAG)
- return false;
-
- if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
- return false;
-
- if (info->hash_type != NETDEV_LAG_HASH_L2 &&
- info->hash_type != NETDEV_LAG_HASH_L23)
- return false;
-
- return true;
-}
-
-static int
-qca8k_lag_setup_hash(struct dsa_switch *ds, struct dsa_lag lag,
- struct netdev_lag_upper_info *info)
-{
- struct net_device *lag_dev = lag.dev;
- struct qca8k_priv *priv = ds->priv;
- bool unique_lag = true;
- unsigned int i;
- u32 hash = 0;
-
- switch (info->hash_type) {
- case NETDEV_LAG_HASH_L23:
- hash |= QCA8K_TRUNK_HASH_SIP_EN;
- hash |= QCA8K_TRUNK_HASH_DIP_EN;
- fallthrough;
- case NETDEV_LAG_HASH_L2:
- hash |= QCA8K_TRUNK_HASH_SA_EN;
- hash |= QCA8K_TRUNK_HASH_DA_EN;
- break;
- default: /* We should NEVER reach this */
- return -EOPNOTSUPP;
- }
-
- /* Check if we are the unique configured LAG */
- dsa_lags_foreach_id(i, ds->dst)
- if (i != lag.id && dsa_lag_by_id(ds->dst, i)) {
- unique_lag = false;
- break;
- }
-
- /* Hash Mode is global. Make sure the same Hash Mode
- * is set to all the 4 possible lag.
- * If we are the unique LAG we can set whatever hash
- * mode we want.
- * To change hash mode it's needed to remove all LAG
- * and change the mode with the latest.
- */
- if (unique_lag) {
- priv->lag_hash_mode = hash;
- } else if (priv->lag_hash_mode != hash) {
- netdev_err(lag_dev, "Error: Mismatched Hash Mode across different lag is not supported\n");
- return -EOPNOTSUPP;
- }
-
- return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL,
- QCA8K_TRUNK_HASH_MASK, hash);
-}
-
-static int
-qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
- struct dsa_lag lag, bool delete)
-{
- struct qca8k_priv *priv = ds->priv;
- int ret, id, i;
- u32 val;
-
- /* DSA LAG IDs are one-based, hardware is zero-based */
- id = lag.id - 1;
-
- /* Read current port member */
- ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
- if (ret)
- return ret;
-
- /* Shift val to the correct trunk */
- val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id);
- val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK;
- if (delete)
- val &= ~BIT(port);
- else
- val |= BIT(port);
-
- /* Update port member. With empty portmap disable trunk */
- ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0,
- QCA8K_REG_GOL_TRUNK_MEMBER(id) |
- QCA8K_REG_GOL_TRUNK_EN(id),
- !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) |
- val << QCA8K_REG_GOL_TRUNK_SHIFT(id));
-
- /* Search empty member if adding or port on deleting */
- for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) {
- ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val);
- if (ret)
- return ret;
-
- val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i);
- val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK;
-
- if (delete) {
- /* If port flagged to be disabled assume this member is
- * empty
- */
- if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
- continue;
-
- val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK;
- if (val != port)
- continue;
- } else {
- /* If port flagged to be enabled assume this member is
- * already set
- */
- if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
- continue;
- }
-
- /* We have found the member to add/remove */
- break;
- }
-
- /* Set port in the correct port mask or disable port if in delete mode */
- return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id),
- QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) |
- QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i),
- !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) |
- port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i));
-}
-
-static int
-qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
- struct netdev_lag_upper_info *info)
-{
- int ret;
-
- if (!qca8k_lag_can_offload(ds, lag, info))
- return -EOPNOTSUPP;
-
- ret = qca8k_lag_setup_hash(ds, lag, info);
- if (ret)
- return ret;
-
- return qca8k_lag_refresh_portmap(ds, port, lag, false);
-}
-
-static int
-qca8k_port_lag_leave(struct dsa_switch *ds, int port,
- struct dsa_lag lag)
-{
- return qca8k_lag_refresh_portmap(ds, port, lag, true);
-}
-
static void
qca8k_master_change(struct dsa_switch *ds, const struct net_device *master,
bool operational)
@@ -3091,36 +1876,6 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
.connect_tag_protocol = qca8k_connect_tag_protocol,
};
-static int qca8k_read_switch_id(struct qca8k_priv *priv)
-{
- const struct qca8k_match_data *data;
- u32 val;
- u8 id;
- int ret;
-
- /* get the switches ID from the compatible */
- data = of_device_get_match_data(priv->dev);
- if (!data)
- return -ENODEV;
-
- ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val);
- if (ret < 0)
- return -ENODEV;
-
- id = QCA8K_MASK_CTRL_DEVICE_ID(val);
- if (id != data->id) {
- dev_err(priv->dev, "Switch id detected %x but expected %x", id, data->id);
- return -ENODEV;
- }
-
- priv->switch_id = id;
-
- /* Save revision to communicate to the internal PHY driver */
- priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val);
-
- return 0;
-}
-
static int
qca8k_sw_probe(struct mdio_device *mdiodev)
{
@@ -3134,6 +1889,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
if (!priv)
return -ENOMEM;
+ priv->info = of_device_get_match_data(priv->dev);
priv->bus = mdiodev->bus;
priv->dev = &mdiodev->dev;
@@ -3256,20 +2012,29 @@ static int qca8k_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
qca8k_suspend, qca8k_resume);
+static const struct qca8k_info_ops qca8xxx_ops = {
+ .autocast_mib = qca8k_get_ethtool_stats_eth,
+ .read_eth = qca8k_read_eth,
+ .write_eth = qca8k_write_eth,
+};
+
static const struct qca8k_match_data qca8327 = {
.id = QCA8K_ID_QCA8327,
.reduced_package = true,
.mib_count = QCA8K_QCA832X_MIB_COUNT,
+ .ops = &qca8xxx_ops,
};
static const struct qca8k_match_data qca8328 = {
.id = QCA8K_ID_QCA8327,
.mib_count = QCA8K_QCA832X_MIB_COUNT,
+ .ops = &qca8xxx_ops,
};
static const struct qca8k_match_data qca833x = {
.id = QCA8K_ID_QCA8337,
.mib_count = QCA8K_QCA833X_MIB_COUNT,
+ .ops = &qca8xxx_ops,
};
static const struct of_device_id qca8k_of_match[] = {
diff --git a/drivers/net/dsa/qca/qca8k-common.c b/drivers/net/dsa/qca/qca8k-common.c
new file mode 100644
index 000000000000..bba95613e218
--- /dev/null
+++ b/drivers/net/dsa/qca/qca8k-common.c
@@ -0,0 +1,1210 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/netdevice.h>
+#include <net/dsa.h>
+#include <linux/if_bridge.h>
+
+#include "qca8k.h"
+
+#define MIB_DESC(_s, _o, _n) \
+ { \
+ .size = (_s), \
+ .offset = (_o), \
+ .name = (_n), \
+ }
+
+const struct qca8k_mib_desc ar8327_mib[] = {
+ MIB_DESC(1, 0x00, "RxBroad"),
+ MIB_DESC(1, 0x04, "RxPause"),
+ MIB_DESC(1, 0x08, "RxMulti"),
+ MIB_DESC(1, 0x0c, "RxFcsErr"),
+ MIB_DESC(1, 0x10, "RxAlignErr"),
+ MIB_DESC(1, 0x14, "RxRunt"),
+ MIB_DESC(1, 0x18, "RxFragment"),
+ MIB_DESC(1, 0x1c, "Rx64Byte"),
+ MIB_DESC(1, 0x20, "Rx128Byte"),
+ MIB_DESC(1, 0x24, "Rx256Byte"),
+ MIB_DESC(1, 0x28, "Rx512Byte"),
+ MIB_DESC(1, 0x2c, "Rx1024Byte"),
+ MIB_DESC(1, 0x30, "Rx1518Byte"),
+ MIB_DESC(1, 0x34, "RxMaxByte"),
+ MIB_DESC(1, 0x38, "RxTooLong"),
+ MIB_DESC(2, 0x3c, "RxGoodByte"),
+ MIB_DESC(2, 0x44, "RxBadByte"),
+ MIB_DESC(1, 0x4c, "RxOverFlow"),
+ MIB_DESC(1, 0x50, "Filtered"),
+ MIB_DESC(1, 0x54, "TxBroad"),
+ MIB_DESC(1, 0x58, "TxPause"),
+ MIB_DESC(1, 0x5c, "TxMulti"),
+ MIB_DESC(1, 0x60, "TxUnderRun"),
+ MIB_DESC(1, 0x64, "Tx64Byte"),
+ MIB_DESC(1, 0x68, "Tx128Byte"),
+ MIB_DESC(1, 0x6c, "Tx256Byte"),
+ MIB_DESC(1, 0x70, "Tx512Byte"),
+ MIB_DESC(1, 0x74, "Tx1024Byte"),
+ MIB_DESC(1, 0x78, "Tx1518Byte"),
+ MIB_DESC(1, 0x7c, "TxMaxByte"),
+ MIB_DESC(1, 0x80, "TxOverSize"),
+ MIB_DESC(2, 0x84, "TxByte"),
+ MIB_DESC(1, 0x8c, "TxCollision"),
+ MIB_DESC(1, 0x90, "TxAbortCol"),
+ MIB_DESC(1, 0x94, "TxMultiCol"),
+ MIB_DESC(1, 0x98, "TxSingleCol"),
+ MIB_DESC(1, 0x9c, "TxExcDefer"),
+ MIB_DESC(1, 0xa0, "TxDefer"),
+ MIB_DESC(1, 0xa4, "TxLateCol"),
+ MIB_DESC(1, 0xa8, "RXUnicast"),
+ MIB_DESC(1, 0xac, "TXUnicast"),
+};
+
+int qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
+{
+ return regmap_read(priv->regmap, reg, val);
+}
+
+int qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
+{
+ return regmap_write(priv->regmap, reg, val);
+}
+
+int qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
+{
+ return regmap_update_bits(priv->regmap, reg, mask, write_val);
+}
+
+static const struct regmap_range qca8k_readable_ranges[] = {
+ regmap_reg_range(0x0000, 0x00e4), /* Global control */
+ regmap_reg_range(0x0100, 0x0168), /* EEE control */
+ regmap_reg_range(0x0200, 0x0270), /* Parser control */
+ regmap_reg_range(0x0400, 0x0454), /* ACL */
+ regmap_reg_range(0x0600, 0x0718), /* Lookup */
+ regmap_reg_range(0x0800, 0x0b70), /* QM */
+ regmap_reg_range(0x0c00, 0x0c80), /* PKT */
+ regmap_reg_range(0x0e00, 0x0e98), /* L3 */
+ regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
+ regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
+ regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
+ regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
+ regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
+ regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
+ regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
+};
+
+const struct regmap_access_table qca8k_readable_table = {
+ .yes_ranges = qca8k_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
+};
+
+/* TODO: remove these extra ops when we can support regmap bulk read/write */
+static int qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
+{
+ int i, count = len / sizeof(u32), ret;
+
+ if (priv->mgmt_master && priv->info->ops->read_eth &&
+ !priv->info->ops->read_eth(priv, reg, val, len))
+ return 0;
+
+ for (i = 0; i < count; i++) {
+ ret = regmap_read(priv->regmap, reg + (i * 4), val + i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* TODO: remove these extra ops when we can support regmap bulk read/write */
+static int qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
+{
+ int i, count = len / sizeof(u32), ret;
+ u32 tmp;
+
+ if (priv->mgmt_master && priv->info->ops->write_eth &&
+ !priv->info->ops->write_eth(priv, reg, val, len))
+ return 0;
+
+ for (i = 0; i < count; i++) {
+ tmp = val[i];
+
+ ret = regmap_write(priv->regmap, reg + (i * 4), tmp);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
+{
+ u32 val;
+
+ return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0,
+ QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC);
+}
+
+static int qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
+{
+ u32 reg[3];
+ int ret;
+
+ /* load the ARL table into an array */
+ ret = qca8k_bulk_read(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
+ if (ret)
+ return ret;
+
+ /* vid - 83:72 */
+ fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
+ /* aging - 67:64 */
+ fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]);
+ /* portmask - 54:48 */
+ fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]);
+ /* mac - 47:0 */
+ fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]);
+ fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]);
+ fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]);
+ fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]);
+ fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]);
+ fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]);
+
+ return 0;
+}
+
+static void qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask,
+ const u8 *mac, u8 aging)
+{
+ u32 reg[3] = { 0 };
+
+ /* vid - 83:72 */
+ reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
+ /* aging - 67:64 */
+ reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging);
+ /* portmask - 54:48 */
+ reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask);
+ /* mac - 47:0 */
+ reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]);
+ reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]);
+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]);
+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]);
+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]);
+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
+
+ /* load the array into the ARL table */
+ qca8k_bulk_write(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
+}
+
+static int qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd,
+ int port)
+{
+ u32 reg;
+ int ret;
+
+ /* Set the command and FDB index */
+ reg = QCA8K_ATU_FUNC_BUSY;
+ reg |= cmd;
+ if (port >= 0) {
+ reg |= QCA8K_ATU_FUNC_PORT_EN;
+ reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port);
+ }
+
+ /* Write the function register triggering the table access */
+ ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
+ if (ret)
+ return ret;
+
+ /* wait for completion */
+ ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY);
+ if (ret)
+ return ret;
+
+ /* Check for table full violation when adding an entry */
+ if (cmd == QCA8K_FDB_LOAD) {
+ ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, &reg);
+ if (ret < 0)
+ return ret;
+ if (reg & QCA8K_ATU_FUNC_FULL)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb,
+ int port)
+{
+ int ret;
+
+ qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
+ if (ret < 0)
+ return ret;
+
+ return qca8k_fdb_read(priv, fdb);
+}
+
+static int qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac,
+ u16 port_mask, u16 vid, u8 aging)
+{
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_write(priv, vid, port_mask, mac, aging);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+static int qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac,
+ u16 port_mask, u16 vid)
+{
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_write(priv, vid, port_mask, mac, 0);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+void qca8k_fdb_flush(struct qca8k_priv *priv)
+{
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
+ mutex_unlock(&priv->reg_mutex);
+}
+
+static int qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
+ const u8 *mac, u16 vid)
+{
+ struct qca8k_fdb fdb = { 0 };
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+
+ qca8k_fdb_write(priv, vid, 0, mac, 0);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
+ if (ret < 0)
+ goto exit;
+
+ ret = qca8k_fdb_read(priv, &fdb);
+ if (ret < 0)
+ goto exit;
+
+ /* Rule exist. Delete first */
+ if (!fdb.aging) {
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
+ if (ret)
+ goto exit;
+ }
+
+ /* Add port to fdb portmask */
+ fdb.port_mask |= port_mask;
+
+ qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
+
+exit:
+ mutex_unlock(&priv->reg_mutex);
+ return ret;
+}
+
+static int qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask,
+ const u8 *mac, u16 vid)
+{
+ struct qca8k_fdb fdb = { 0 };
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+
+ qca8k_fdb_write(priv, vid, 0, mac, 0);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
+ if (ret < 0)
+ goto exit;
+
+ /* Rule doesn't exist. Why delete? */
+ if (!fdb.aging) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
+ if (ret)
+ goto exit;
+
+ /* Only port in the rule is this port. Don't re insert */
+ if (fdb.port_mask == port_mask)
+ goto exit;
+
+ /* Remove port from port mask */
+ fdb.port_mask &= ~port_mask;
+
+ qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
+
+exit:
+ mutex_unlock(&priv->reg_mutex);
+ return ret;
+}
+
+static int qca8k_vlan_access(struct qca8k_priv *priv,
+ enum qca8k_vlan_cmd cmd, u16 vid)
+{
+ u32 reg;
+ int ret;
+
+ /* Set the command and VLAN index */
+ reg = QCA8K_VTU_FUNC1_BUSY;
+ reg |= cmd;
+ reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid);
+
+ /* Write the function register triggering the table access */
+ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
+ if (ret)
+ return ret;
+
+ /* wait for completion */
+ ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
+ if (ret)
+ return ret;
+
+ /* Check for table full violation when adding an entry */
+ if (cmd == QCA8K_VLAN_LOAD) {
+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, &reg);
+ if (ret < 0)
+ return ret;
+ if (reg & QCA8K_VTU_FUNC1_FULL)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid,
+ bool untagged)
+{
+ u32 reg;
+ int ret;
+
+ /* We do the right thing with VLAN 0 and treat it as untagged while
+ * preserving the tag on egress.
+ */
+ if (vid == 0)
+ return 0;
+
+ mutex_lock(&priv->reg_mutex);
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
+ if (ret < 0)
+ goto out;
+
+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
+ if (ret < 0)
+ goto out;
+ reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
+ reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
+ if (untagged)
+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port);
+ else
+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port);
+
+ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
+ if (ret)
+ goto out;
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
+
+out:
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+static int qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
+{
+ u32 reg, mask;
+ int ret, i;
+ bool del;
+
+ mutex_lock(&priv->reg_mutex);
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
+ if (ret < 0)
+ goto out;
+
+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
+ if (ret < 0)
+ goto out;
+ reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port);
+
+ /* Check if we're the last member to be removed */
+ del = true;
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
+
+ if ((reg & mask) != mask) {
+ del = false;
+ break;
+ }
+ }
+
+ if (del) {
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
+ } else {
+ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
+ if (ret)
+ goto out;
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
+ }
+
+out:
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+int qca8k_mib_init(struct qca8k_priv *priv)
+{
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
+ QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
+ FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) |
+ QCA8K_MIB_BUSY);
+ if (ret)
+ goto exit;
+
+ ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
+ if (ret)
+ goto exit;
+
+ ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
+ if (ret)
+ goto exit;
+
+ ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
+
+exit:
+ mutex_unlock(&priv->reg_mutex);
+ return ret;
+}
+
+void qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
+{
+ u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
+
+ /* Port 0 and 6 have no internal PHY */
+ if (port > 0 && port < 6)
+ mask |= QCA8K_PORT_STATUS_LINK_AUTO;
+
+ if (enable)
+ regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
+ else
+ regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
+}
+
+void qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < priv->info->mib_count; i++)
+ strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
+ ETH_GSTRING_LEN);
+}
+
+void qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct qca8k_priv *priv = ds->priv;
+ const struct qca8k_mib_desc *mib;
+ u32 reg, i, val;
+ u32 hi = 0;
+ int ret;
+
+ if (priv->mgmt_master && priv->info->ops->autocast_mib &&
+ priv->info->ops->autocast_mib(ds, port, data) > 0)
+ return;
+
+ for (i = 0; i < priv->info->mib_count; i++) {
+ mib = &ar8327_mib[i];
+ reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
+
+ ret = qca8k_read(priv, reg, &val);
+ if (ret < 0)
+ continue;
+
+ if (mib->size == 2) {
+ ret = qca8k_read(priv, reg + 4, &hi);
+ if (ret < 0)
+ continue;
+ }
+
+ data[i] = val;
+ if (mib->size == 2)
+ data[i] |= (u64)hi << 32;
+ }
+}
+
+int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ return priv->info->mib_count;
+}
+
+int qca8k_set_mac_eee(struct dsa_switch *ds, int port,
+ struct ethtool_eee *eee)
+{
+ u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
+ struct qca8k_priv *priv = ds->priv;
+ u32 reg;
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+ ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, &reg);
+ if (ret < 0)
+ goto exit;
+
+ if (eee->eee_enabled)
+ reg |= lpi_en;
+ else
+ reg &= ~lpi_en;
+ ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
+
+exit:
+ mutex_unlock(&priv->reg_mutex);
+ return ret;
+}
+
+int qca8k_get_mac_eee(struct dsa_switch *ds, int port,
+ struct ethtool_eee *e)
+{
+ /* Nothing to do on the port's MAC */
+ return 0;
+}
+
+void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ struct qca8k_priv *priv = ds->priv;
+ u32 stp_state;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
+ break;
+ case BR_STATE_BLOCKING:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
+ break;
+ case BR_STATE_LISTENING:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
+ break;
+ case BR_STATE_LEARNING:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ default:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
+ break;
+ }
+
+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
+}
+
+int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int port_mask, cpu_port;
+ int i, ret;
+
+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+ port_mask = BIT(cpu_port);
+
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ if (dsa_is_cpu_port(ds, i))
+ continue;
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
+ continue;
+ /* Add this port to the portvlan mask of the other ports
+ * in the bridge
+ */
+ ret = regmap_set_bits(priv->regmap,
+ QCA8K_PORT_LOOKUP_CTRL(i),
+ BIT(port));
+ if (ret)
+ return ret;
+ if (i != port)
+ port_mask |= BIT(i);
+ }
+
+ /* Add all other ports to this ports portvlan mask */
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_MEMBER, port_mask);
+
+ return ret;
+}
+
+void qca8k_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int cpu_port, i;
+
+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ if (dsa_is_cpu_port(ds, i))
+ continue;
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
+ continue;
+ /* Remove this port to the portvlan mask of the other ports
+ * in the bridge
+ */
+ regmap_clear_bits(priv->regmap,
+ QCA8K_PORT_LOOKUP_CTRL(i),
+ BIT(port));
+ }
+
+ /* Set the cpu port to be the only one in the portvlan mask of
+ * this port
+ */
+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
+}
+
+void qca8k_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
+ mutex_unlock(&priv->reg_mutex);
+}
+
+int qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
+{
+ struct qca8k_priv *priv = ds->priv;
+ unsigned int secs = msecs / 1000;
+ u32 val;
+
+ /* AGE_TIME reg is set in 7s step */
+ val = secs / 7;
+
+ /* Handle case with 0 as val to NOT disable
+ * learning
+ */
+ if (!val)
+ val = 1;
+
+ return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL,
+ QCA8K_ATU_AGE_TIME_MASK,
+ QCA8K_ATU_AGE_TIME(val));
+}
+
+int qca8k_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ qca8k_port_set_status(priv, port, 1);
+ priv->port_enabled_map |= BIT(port);
+
+ if (dsa_is_user_port(ds, port))
+ phy_support_asym_pause(phy);
+
+ return 0;
+}
+
+void qca8k_port_disable(struct dsa_switch *ds, int port)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ qca8k_port_set_status(priv, port, 0);
+ priv->port_enabled_map &= ~BIT(port);
+}
+
+int qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int ret;
+
+ /* We have only have a general MTU setting.
+ * DSA always set the CPU port's MTU to the largest MTU of the slave
+ * ports.
+ * Setting MTU just for the CPU port is sufficient to correctly set a
+ * value for every port.
+ */
+ if (!dsa_is_cpu_port(ds, port))
+ return 0;
+
+ /* To change the MAX_FRAME_SIZE the cpu ports must be off or
+ * the switch panics.
+ * Turn off both cpu ports before applying the new value to prevent
+ * this.
+ */
+ if (priv->port_enabled_map & BIT(0))
+ qca8k_port_set_status(priv, 0, 0);
+
+ if (priv->port_enabled_map & BIT(6))
+ qca8k_port_set_status(priv, 6, 0);
+
+ /* Include L2 header / FCS length */
+ ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu +
+ ETH_HLEN + ETH_FCS_LEN);
+
+ if (priv->port_enabled_map & BIT(0))
+ qca8k_port_set_status(priv, 0, 1);
+
+ if (priv->port_enabled_map & BIT(6))
+ qca8k_port_set_status(priv, 6, 1);
+
+ return ret;
+}
+
+int qca8k_port_max_mtu(struct dsa_switch *ds, int port)
+{
+ return QCA8K_MAX_MTU;
+}
+
+int qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
+ u16 port_mask, u16 vid)
+{
+ /* Set the vid to the port vlan id if no vid is set */
+ if (!vid)
+ vid = QCA8K_PORT_VID_DEF;
+
+ return qca8k_fdb_add(priv, addr, port_mask, vid,
+ QCA8K_ATU_STATUS_STATIC);
+}
+
+int qca8k_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ u16 port_mask = BIT(port);
+
+ return qca8k_port_fdb_insert(priv, addr, port_mask, vid);
+}
+
+int qca8k_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ u16 port_mask = BIT(port);
+
+ if (!vid)
+ vid = QCA8K_PORT_VID_DEF;
+
+ return qca8k_fdb_del(priv, addr, port_mask, vid);
+}
+
+int qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ struct qca8k_fdb _fdb = { 0 };
+ int cnt = QCA8K_NUM_FDB_RECORDS;
+ bool is_static;
+ int ret = 0;
+
+ mutex_lock(&priv->reg_mutex);
+ while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
+ if (!_fdb.aging)
+ break;
+ is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC);
+ ret = cb(_fdb.mac, _fdb.vid, is_static, data);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&priv->reg_mutex);
+
+ return 0;
+}
+
+int qca8k_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct qca8k_priv *priv = ds->priv;
+ const u8 *addr = mdb->addr;
+ u16 vid = mdb->vid;
+
+ return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid);
+}
+
+int qca8k_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct qca8k_priv *priv = ds->priv;
+ const u8 *addr = mdb->addr;
+ u16 vid = mdb->vid;
+
+ return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
+}
+
+int qca8k_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int monitor_port, ret;
+ u32 reg, val;
+
+ /* Check for existent entry */
+ if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
+ return -EEXIST;
+
+ ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val);
+ if (ret)
+ return ret;
+
+ /* QCA83xx can have only one port set to mirror mode.
+ * Check that the correct port is requested and return error otherwise.
+ * When no mirror port is set, the values is set to 0xF
+ */
+ monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
+ if (monitor_port != 0xF && monitor_port != mirror->to_local_port)
+ return -EEXIST;
+
+ /* Set the monitor port */
+ val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM,
+ mirror->to_local_port);
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
+ QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
+ if (ret)
+ return ret;
+
+ if (ingress) {
+ reg = QCA8K_PORT_LOOKUP_CTRL(port);
+ val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
+ } else {
+ reg = QCA8K_REG_PORT_HOL_CTRL1(port);
+ val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
+ }
+
+ ret = regmap_update_bits(priv->regmap, reg, val, val);
+ if (ret)
+ return ret;
+
+ /* Track mirror port for tx and rx to decide when the
+ * mirror port has to be disabled.
+ */
+ if (ingress)
+ priv->mirror_rx |= BIT(port);
+ else
+ priv->mirror_tx |= BIT(port);
+
+ return 0;
+}
+
+void qca8k_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct qca8k_priv *priv = ds->priv;
+ u32 reg, val;
+ int ret;
+
+ if (mirror->ingress) {
+ reg = QCA8K_PORT_LOOKUP_CTRL(port);
+ val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
+ } else {
+ reg = QCA8K_REG_PORT_HOL_CTRL1(port);
+ val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
+ }
+
+ ret = regmap_clear_bits(priv->regmap, reg, val);
+ if (ret)
+ goto err;
+
+ if (mirror->ingress)
+ priv->mirror_rx &= ~BIT(port);
+ else
+ priv->mirror_tx &= ~BIT(port);
+
+ /* No port set to send packet to mirror port. Disable mirror port */
+ if (!priv->mirror_rx && !priv->mirror_tx) {
+ val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF);
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
+ QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
+ if (ret)
+ goto err;
+ }
+err:
+ dev_err(priv->dev, "Failed to del mirror port from %d", port);
+}
+
+int qca8k_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering,
+ struct netlink_ext_ack *extack)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int ret;
+
+ if (vlan_filtering) {
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
+ QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
+ } else {
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
+ QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
+ }
+
+ return ret;
+}
+
+int qca8k_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct qca8k_priv *priv = ds->priv;
+ int ret;
+
+ ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
+ if (ret) {
+ dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
+ return ret;
+ }
+
+ if (pvid) {
+ ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
+ QCA8K_EGREES_VLAN_PORT_MASK(port),
+ QCA8K_EGREES_VLAN_PORT(port, vlan->vid));
+ if (ret)
+ return ret;
+
+ ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
+ QCA8K_PORT_VLAN_CVID(vlan->vid) |
+ QCA8K_PORT_VLAN_SVID(vlan->vid));
+ }
+
+ return ret;
+}
+
+int qca8k_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int ret;
+
+ ret = qca8k_vlan_del(priv, port, vlan->vid);
+ if (ret)
+ dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
+
+ return ret;
+}
+
+static bool qca8k_lag_can_offload(struct dsa_switch *ds,
+ struct dsa_lag lag,
+ struct netdev_lag_upper_info *info)
+{
+ struct dsa_port *dp;
+ int members = 0;
+
+ if (!lag.id)
+ return false;
+
+ dsa_lag_foreach_port(dp, ds->dst, &lag)
+ /* Includes the port joining the LAG */
+ members++;
+
+ if (members > QCA8K_NUM_PORTS_FOR_LAG)
+ return false;
+
+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
+ return false;
+
+ if (info->hash_type != NETDEV_LAG_HASH_L2 &&
+ info->hash_type != NETDEV_LAG_HASH_L23)
+ return false;
+
+ return true;
+}
+
+static int qca8k_lag_setup_hash(struct dsa_switch *ds,
+ struct dsa_lag lag,
+ struct netdev_lag_upper_info *info)
+{
+ struct net_device *lag_dev = lag.dev;
+ struct qca8k_priv *priv = ds->priv;
+ bool unique_lag = true;
+ unsigned int i;
+ u32 hash = 0;
+
+ switch (info->hash_type) {
+ case NETDEV_LAG_HASH_L23:
+ hash |= QCA8K_TRUNK_HASH_SIP_EN;
+ hash |= QCA8K_TRUNK_HASH_DIP_EN;
+ fallthrough;
+ case NETDEV_LAG_HASH_L2:
+ hash |= QCA8K_TRUNK_HASH_SA_EN;
+ hash |= QCA8K_TRUNK_HASH_DA_EN;
+ break;
+ default: /* We should NEVER reach this */
+ return -EOPNOTSUPP;
+ }
+
+ /* Check if we are the unique configured LAG */
+ dsa_lags_foreach_id(i, ds->dst)
+ if (i != lag.id && dsa_lag_by_id(ds->dst, i)) {
+ unique_lag = false;
+ break;
+ }
+
+ /* Hash Mode is global. Make sure the same Hash Mode
+ * is set to all the 4 possible lag.
+ * If we are the unique LAG we can set whatever hash
+ * mode we want.
+ * To change hash mode it's needed to remove all LAG
+ * and change the mode with the latest.
+ */
+ if (unique_lag) {
+ priv->lag_hash_mode = hash;
+ } else if (priv->lag_hash_mode != hash) {
+ netdev_err(lag_dev, "Error: Mismatched Hash Mode across different lag is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL,
+ QCA8K_TRUNK_HASH_MASK, hash);
+}
+
+static int qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
+ struct dsa_lag lag, bool delete)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int ret, id, i;
+ u32 val;
+
+ /* DSA LAG IDs are one-based, hardware is zero-based */
+ id = lag.id - 1;
+
+ /* Read current port member */
+ ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
+ if (ret)
+ return ret;
+
+ /* Shift val to the correct trunk */
+ val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id);
+ val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK;
+ if (delete)
+ val &= ~BIT(port);
+ else
+ val |= BIT(port);
+
+ /* Update port member. With empty portmap disable trunk */
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0,
+ QCA8K_REG_GOL_TRUNK_MEMBER(id) |
+ QCA8K_REG_GOL_TRUNK_EN(id),
+ !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) |
+ val << QCA8K_REG_GOL_TRUNK_SHIFT(id));
+
+ /* Search empty member if adding or port on deleting */
+ for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) {
+ ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val);
+ if (ret)
+ return ret;
+
+ val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i);
+ val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK;
+
+ if (delete) {
+ /* If port flagged to be disabled assume this member is
+ * empty
+ */
+ if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
+ continue;
+
+ val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK;
+ if (val != port)
+ continue;
+ } else {
+ /* If port flagged to be enabled assume this member is
+ * already set
+ */
+ if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
+ continue;
+ }
+
+ /* We have found the member to add/remove */
+ break;
+ }
+
+ /* Set port in the correct port mask or disable port if in delete mode */
+ return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id),
+ QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) |
+ QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i),
+ !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) |
+ port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i));
+}
+
+int qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
+ struct netdev_lag_upper_info *info)
+{
+ int ret;
+
+ if (!qca8k_lag_can_offload(ds, lag, info))
+ return -EOPNOTSUPP;
+
+ ret = qca8k_lag_setup_hash(ds, lag, info);
+ if (ret)
+ return ret;
+
+ return qca8k_lag_refresh_portmap(ds, port, lag, false);
+}
+
+int qca8k_port_lag_leave(struct dsa_switch *ds, int port,
+ struct dsa_lag lag)
+{
+ return qca8k_lag_refresh_portmap(ds, port, lag, true);
+}
+
+int qca8k_read_switch_id(struct qca8k_priv *priv)
+{
+ u32 val;
+ u8 id;
+ int ret;
+
+ if (!priv->info)
+ return -ENODEV;
+
+ ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val);
+ if (ret < 0)
+ return -ENODEV;
+
+ id = QCA8K_MASK_CTRL_DEVICE_ID(val);
+ if (id != priv->info->id) {
+ dev_err(priv->dev,
+ "Switch id detected %x but expected %x",
+ id, priv->info->id);
+ return -ENODEV;
+ }
+
+ priv->switch_id = id;
+
+ /* Save revision to communicate to the internal PHY driver */
+ priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val);
+
+ return 0;
+}
diff --git a/drivers/net/dsa/qca/qca8k.h b/drivers/net/dsa/qca/qca8k.h
index ec58d0e80a70..e36ecc9777f4 100644
--- a/drivers/net/dsa/qca/qca8k.h
+++ b/drivers/net/dsa/qca/qca8k.h
@@ -324,10 +324,20 @@ enum qca8k_mid_cmd {
QCA8K_MIB_CAST = 3,
};
+struct qca8k_priv;
+
+struct qca8k_info_ops {
+ int (*autocast_mib)(struct dsa_switch *ds, int port, u64 *data);
+ /* TODO: remove these extra ops when we can support regmap bulk read/write */
+ int (*read_eth)(struct qca8k_priv *priv, u32 reg, u32 *val, int len);
+ int (*write_eth)(struct qca8k_priv *priv, u32 reg, u32 *val, int len);
+};
+
struct qca8k_match_data {
u8 id;
bool reduced_package;
u8 mib_count;
+ const struct qca8k_info_ops *ops;
};
enum {
@@ -401,6 +411,7 @@ struct qca8k_priv {
struct qca8k_mdio_cache mdio_cache;
struct qca8k_pcs pcs_port_0;
struct qca8k_pcs pcs_port_6;
+ const struct qca8k_match_data *info;
};
struct qca8k_mib_desc {
@@ -416,4 +427,93 @@ struct qca8k_fdb {
u8 mac[6];
};
+/* Common setup function */
+extern const struct qca8k_mib_desc ar8327_mib[];
+extern const struct regmap_access_table qca8k_readable_table;
+int qca8k_mib_init(struct qca8k_priv *priv);
+void qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable);
+int qca8k_read_switch_id(struct qca8k_priv *priv);
+
+/* Common read/write/rmw function */
+int qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val);
+int qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val);
+int qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val);
+
+/* Common ops function */
+void qca8k_fdb_flush(struct qca8k_priv *priv);
+
+/* Common ethtool stats function */
+void qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data);
+void qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data);
+int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset);
+
+/* Common eee function */
+int qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee);
+int qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
+
+/* Common bridge function */
+void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state);
+int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack);
+void qca8k_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge);
+
+/* Common port enable/disable function */
+int qca8k_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy);
+void qca8k_port_disable(struct dsa_switch *ds, int port);
+
+/* Common MTU function */
+int qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu);
+int qca8k_port_max_mtu(struct dsa_switch *ds, int port);
+
+/* Common fast age function */
+void qca8k_port_fast_age(struct dsa_switch *ds, int port);
+int qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs);
+
+/* Common FDB function */
+int qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
+ u16 port_mask, u16 vid);
+int qca8k_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
+int qca8k_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
+int qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data);
+
+/* Common MDB function */
+int qca8k_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
+int qca8k_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
+
+/* Common port mirror function */
+int qca8k_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack);
+void qca8k_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror);
+
+/* Common port VLAN function */
+int qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
+ struct netlink_ext_ack *extack);
+int qca8k_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack);
+int qca8k_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+
+/* Common port LAG function */
+int qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
+ struct netdev_lag_upper_info *info);
+int qca8k_port_lag_leave(struct dsa_switch *ds, int port,
+ struct dsa_lag lag);
+
#endif /* __QCA8K_H */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 6b3d4f4c2a75..14df8cfc2946 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -20,6 +20,8 @@
#include "bnxt_ulp.h"
#include "bnxt_ptp.h"
#include "bnxt_coredump.h"
+#include "bnxt_nvm_defs.h"
+#include "bnxt_ethtool.h"
static void __bnxt_fw_recover(struct bnxt *bp)
{
@@ -610,6 +612,63 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
return rc;
}
+static bool bnxt_nvm_test(struct bnxt *bp, struct netlink_ext_ack *extack)
+{
+ u32 datalen;
+ u16 index;
+ u8 *buf;
+
+ if (bnxt_find_nvram_item(bp->dev, BNX_DIR_TYPE_VPD,
+ BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
+ &index, NULL, &datalen) || !datalen) {
+ NL_SET_ERR_MSG_MOD(extack, "nvm test vpd entry error");
+ return false;
+ }
+
+ buf = kzalloc(datalen, GFP_KERNEL);
+ if (!buf) {
+ NL_SET_ERR_MSG_MOD(extack, "insufficient memory for nvm test");
+ return false;
+ }
+
+ if (bnxt_get_nvram_item(bp->dev, index, 0, datalen, buf)) {
+ NL_SET_ERR_MSG_MOD(extack, "nvm test vpd read error");
+ goto err;
+ }
+
+ if (bnxt_flash_nvram(bp->dev, BNX_DIR_TYPE_VPD, BNX_DIR_ORDINAL_FIRST,
+ BNX_DIR_EXT_NONE, 0, 0, buf, datalen)) {
+ NL_SET_ERR_MSG_MOD(extack, "nvm test vpd write error");
+ goto err;
+ }
+
+ return true;
+
+err:
+ kfree(buf);
+ return false;
+}
+
+static bool bnxt_dl_selftest_check(struct devlink *dl, unsigned int id,
+ struct netlink_ext_ack *extack)
+{
+ return id == DEVLINK_ATTR_SELFTEST_ID_FLASH;
+}
+
+static enum devlink_selftest_status bnxt_dl_selftest_run(struct devlink *dl,
+ unsigned int id,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+
+ if (id == DEVLINK_ATTR_SELFTEST_ID_FLASH)
+ return bnxt_nvm_test(bp, extack) ?
+ DEVLINK_SELFTEST_STATUS_PASS :
+ DEVLINK_SELFTEST_STATUS_FAIL;
+
+ return DEVLINK_SELFTEST_STATUS_SKIP;
+}
+
static const struct devlink_ops bnxt_dl_ops = {
#ifdef CONFIG_BNXT_SRIOV
.eswitch_mode_set = bnxt_dl_eswitch_mode_set,
@@ -622,6 +681,8 @@ static const struct devlink_ops bnxt_dl_ops = {
.reload_limits = BIT(DEVLINK_RELOAD_LIMIT_NO_RESET),
.reload_down = bnxt_dl_reload_down,
.reload_up = bnxt_dl_reload_up,
+ .selftest_check = bnxt_dl_selftest_check,
+ .selftest_run = bnxt_dl_selftest_run,
};
static const struct devlink_ops bnxt_vf_dl_ops;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 7191e5d74208..87eb5362ad70 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -2176,14 +2176,14 @@ static void bnxt_print_admin_err(struct bnxt *bp)
netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n");
}
-static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
- u16 ext, u16 *index, u32 *item_length,
- u32 *data_length);
+int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+ u16 ext, u16 *index, u32 *item_length,
+ u32 *data_length);
-static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
- u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
- u32 dir_item_len, const u8 *data,
- size_t data_len)
+int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
+ u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
+ u32 dir_item_len, const u8 *data,
+ size_t data_len)
{
struct bnxt *bp = netdev_priv(dev);
struct hwrm_nvm_write_input *req;
@@ -2836,8 +2836,8 @@ static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
return rc;
}
-static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
- u32 length, u8 *data)
+int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
+ u32 length, u8 *data)
{
struct bnxt *bp = netdev_priv(dev);
int rc;
@@ -2871,9 +2871,9 @@ static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
return rc;
}
-static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
- u16 ext, u16 *index, u32 *item_length,
- u32 *data_length)
+int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+ u16 ext, u16 *index, u32 *item_length,
+ u32 *data_length)
{
struct hwrm_nvm_find_dir_entry_output *output;
struct hwrm_nvm_find_dir_entry_input *req;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index a59284215e78..a8ecef8ab82c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -58,5 +58,17 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size);
void bnxt_ethtool_init(struct bnxt *bp);
void bnxt_ethtool_free(struct bnxt *bp);
+int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+ u16 ext, u16 *index, u32 *item_length,
+ u32 *data_length);
+int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+ u16 ext, u16 *index, u32 *item_length,
+ u32 *data_length);
+int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
+ u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
+ u32 dir_item_len, const u8 *data,
+ size_t data_len);
+int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
+ u32 length, u8 *data);
#endif
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_rx.c b/drivers/net/ethernet/fungible/funeth/funeth_rx.c
index 0f6a549b9f67..29a6c2ede43a 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_rx.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_rx.c
@@ -142,6 +142,7 @@ static void *fun_run_xdp(struct funeth_rxq *q, skb_frag_t *frags, void *buf_va,
int ref_ok, struct funeth_txq *xdp_q)
{
struct bpf_prog *xdp_prog;
+ struct xdp_frame *xdpf;
struct xdp_buff xdp;
u32 act;
@@ -163,7 +164,9 @@ static void *fun_run_xdp(struct funeth_rxq *q, skb_frag_t *frags, void *buf_va,
case XDP_TX:
if (unlikely(!ref_ok))
goto pass;
- if (!fun_xdp_tx(xdp_q, xdp.data, xdp.data_end - xdp.data))
+
+ xdpf = xdp_convert_buff_to_frame(&xdp);
+ if (!xdpf || !fun_xdp_tx(xdp_q, xdpf))
goto xdp_error;
FUN_QSTAT_INC(q, xdp_tx);
q->xdp_flush |= FUN_XDP_FLUSH_TX;
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_tx.c b/drivers/net/ethernet/fungible/funeth/funeth_tx.c
index a97e3af00cb9..706d81e39a54 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_tx.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_tx.c
@@ -16,23 +16,24 @@
#define FUN_XDP_CLEAN_BATCH 16
/* DMA-map a packet and return the (length, DMA_address) pairs for its
- * segments. If a mapping error occurs -ENOMEM is returned.
+ * segments. If a mapping error occurs -ENOMEM is returned. The packet
+ * consists of an skb_shared_info and one additional address/length pair.
*/
-static int map_skb(const struct sk_buff *skb, struct device *dev,
- dma_addr_t *addr, unsigned int *len)
+static int fun_map_pkt(struct device *dev, const struct skb_shared_info *si,
+ void *data, unsigned int data_len,
+ dma_addr_t *addr, unsigned int *len)
{
- const struct skb_shared_info *si;
const skb_frag_t *fp, *end;
- *len = skb_headlen(skb);
- *addr = dma_map_single(dev, skb->data, *len, DMA_TO_DEVICE);
+ *len = data_len;
+ *addr = dma_map_single(dev, data, *len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, *addr))
return -ENOMEM;
- si = skb_shinfo(skb);
- end = &si->frags[si->nr_frags];
+ if (!si)
+ return 0;
- for (fp = si->frags; fp < end; fp++) {
+ for (fp = si->frags, end = fp + si->nr_frags; fp < end; fp++) {
*++len = skb_frag_size(fp);
*++addr = skb_frag_dma_map(dev, fp, 0, *len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, *addr))
@@ -44,7 +45,7 @@ unwind:
while (fp-- > si->frags)
dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
- dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
+ dma_unmap_single(dev, addr[-1], data_len, DMA_TO_DEVICE);
return -ENOMEM;
}
@@ -71,6 +72,33 @@ static unsigned int tx_req_ndesc(const struct fun_eth_tx_req *req)
return DIV_ROUND_UP(req->len8, FUNETH_SQE_SIZE / 8);
}
+/* Write a gather list to the Tx descriptor at @req from @ngle address/length
+ * pairs.
+ */
+static struct fun_dataop_gl *fun_write_gl(const struct funeth_txq *q,
+ struct fun_eth_tx_req *req,
+ const dma_addr_t *addrs,
+ const unsigned int *lens,
+ unsigned int ngle)
+{
+ struct fun_dataop_gl *gle;
+ unsigned int i;
+
+ req->len8 = (sizeof(*req) + ngle * sizeof(*gle)) / 8;
+
+ for (i = 0, gle = (struct fun_dataop_gl *)req->dataop.imm;
+ i < ngle && txq_to_end(q, gle); i++, gle++)
+ fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]);
+
+ if (txq_to_end(q, gle) == 0) {
+ gle = (struct fun_dataop_gl *)q->desc;
+ for ( ; i < ngle; i++, gle++)
+ fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]);
+ }
+
+ return gle;
+}
+
static __be16 tcp_hdr_doff_flags(const struct tcphdr *th)
{
return *(__be16 *)&tcp_flag_word(th);
@@ -129,11 +157,13 @@ static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
struct fun_eth_tx_req *req;
struct fun_dataop_gl *gle;
const struct tcphdr *th;
- unsigned int ngle, i;
unsigned int l4_hlen;
+ unsigned int ngle;
u16 flags;
- if (unlikely(map_skb(skb, q->dma_dev, addrs, lens))) {
+ shinfo = skb_shinfo(skb);
+ if (unlikely(fun_map_pkt(q->dma_dev, shinfo, skb->data,
+ skb_headlen(skb), addrs, lens))) {
FUN_QSTAT_INC(q, tx_map_err);
return 0;
}
@@ -146,7 +176,6 @@ static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
req->repr_idn = 0;
req->encap_proto = 0;
- shinfo = skb_shinfo(skb);
if (likely(shinfo->gso_size)) {
if (skb->encapsulation) {
u16 ol4_ofst;
@@ -243,18 +272,9 @@ static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
}
ngle = shinfo->nr_frags + 1;
- req->len8 = (sizeof(*req) + ngle * sizeof(*gle)) / 8;
req->dataop = FUN_DATAOP_HDR_INIT(ngle, 0, ngle, 0, skb->len);
- for (i = 0, gle = (struct fun_dataop_gl *)req->dataop.imm;
- i < ngle && txq_to_end(q, gle); i++, gle++)
- fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]);
-
- if (txq_to_end(q, gle) == 0) {
- gle = (struct fun_dataop_gl *)q->desc;
- for ( ; i < ngle; i++, gle++)
- fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]);
- }
+ gle = fun_write_gl(q, req, addrs, lens, ngle);
if (IS_ENABLED(CONFIG_TLS_DEVICE) && unlikely(tls_len)) {
struct fun_eth_tls *tls = (struct fun_eth_tls *)gle;
@@ -371,7 +391,7 @@ static u16 txq_hw_head(const struct funeth_txq *q)
/* Unmap the Tx packet starting at the given descriptor index and
* return the number of Tx descriptors it occupied.
*/
-static unsigned int unmap_skb(const struct funeth_txq *q, unsigned int idx)
+static unsigned int fun_unmap_pkt(const struct funeth_txq *q, unsigned int idx)
{
const struct fun_eth_tx_req *req = fun_tx_desc_addr(q, idx);
unsigned int ngle = req->dataop.ngather;
@@ -419,7 +439,7 @@ static bool fun_txq_reclaim(struct funeth_txq *q, int budget)
rmb();
do {
- unsigned int pkt_desc = unmap_skb(q, reclaim_idx);
+ unsigned int pkt_desc = fun_unmap_pkt(q, reclaim_idx);
struct sk_buff *skb = q->info[reclaim_idx].skb;
trace_funeth_tx_free(q, reclaim_idx, pkt_desc, head);
@@ -461,20 +481,10 @@ int fun_txq_napi_poll(struct napi_struct *napi, int budget)
return 0;
}
-static void fun_xdp_unmap(const struct funeth_txq *q, unsigned int idx)
-{
- const struct fun_eth_tx_req *req = fun_tx_desc_addr(q, idx);
- const struct fun_dataop_gl *gle;
-
- gle = (const struct fun_dataop_gl *)req->dataop.imm;
- dma_unmap_single(q->dma_dev, be64_to_cpu(gle->sgl_data),
- be32_to_cpu(gle->sgl_len), DMA_TO_DEVICE);
-}
-
-/* Reclaim up to @budget completed Tx descriptors from a TX XDP queue. */
+/* Reclaim up to @budget completed Tx packets from a TX XDP queue. */
static unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget)
{
- unsigned int npkts = 0, head, reclaim_idx;
+ unsigned int npkts = 0, ndesc = 0, head, reclaim_idx;
for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask;
head != reclaim_idx && npkts < budget; head = txq_hw_head(q)) {
@@ -486,37 +496,49 @@ static unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget)
rmb();
do {
- fun_xdp_unmap(q, reclaim_idx);
- page_frag_free(q->info[reclaim_idx].vaddr);
+ unsigned int pkt_desc = fun_unmap_pkt(q, reclaim_idx);
+
+ xdp_return_frame(q->info[reclaim_idx].xdpf);
- trace_funeth_tx_free(q, reclaim_idx, 1, head);
+ trace_funeth_tx_free(q, reclaim_idx, pkt_desc, head);
- reclaim_idx = (reclaim_idx + 1) & q->mask;
+ reclaim_idx = (reclaim_idx + pkt_desc) & q->mask;
+ ndesc += pkt_desc;
npkts++;
} while (reclaim_idx != head && npkts < budget);
}
- q->cons_cnt += npkts;
+ q->cons_cnt += ndesc;
return npkts;
}
-bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len)
+bool fun_xdp_tx(struct funeth_txq *q, struct xdp_frame *xdpf)
{
+ unsigned int idx, nfrags = 1, ndesc = 1, tot_len = xdpf->len;
+ const struct skb_shared_info *si = NULL;
+ unsigned int lens[MAX_SKB_FRAGS + 1];
+ dma_addr_t dma[MAX_SKB_FRAGS + 1];
struct fun_eth_tx_req *req;
- struct fun_dataop_gl *gle;
- unsigned int idx;
- dma_addr_t dma;
if (fun_txq_avail(q) < FUN_XDP_CLEAN_THRES)
fun_xdpq_clean(q, FUN_XDP_CLEAN_BATCH);
- if (!unlikely(fun_txq_avail(q))) {
+ if (unlikely(xdp_frame_has_frags(xdpf))) {
+ si = xdp_get_shared_info_from_frame(xdpf);
+ tot_len = xdp_get_frame_len(xdpf);
+ nfrags += si->nr_frags;
+ ndesc = DIV_ROUND_UP((sizeof(*req) + nfrags *
+ sizeof(struct fun_dataop_gl)),
+ FUNETH_SQE_SIZE);
+ }
+
+ if (unlikely(fun_txq_avail(q) < ndesc)) {
FUN_QSTAT_INC(q, tx_xdp_full);
return false;
}
- dma = dma_map_single(q->dma_dev, data, len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(q->dma_dev, dma))) {
+ if (unlikely(fun_map_pkt(q->dma_dev, si, xdpf->data, xdpf->len, dma,
+ lens))) {
FUN_QSTAT_INC(q, tx_map_err);
return false;
}
@@ -524,26 +546,25 @@ bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len)
idx = q->prod_cnt & q->mask;
req = fun_tx_desc_addr(q, idx);
req->op = FUN_ETH_OP_TX;
- req->len8 = (sizeof(*req) + sizeof(*gle)) / 8;
+ req->len8 = 0;
req->flags = 0;
req->suboff8 = offsetof(struct fun_eth_tx_req, dataop);
req->repr_idn = 0;
req->encap_proto = 0;
fun_eth_offload_init(&req->offload, 0, 0, 0, 0, 0, 0, 0, 0);
- req->dataop = FUN_DATAOP_HDR_INIT(1, 0, 1, 0, len);
+ req->dataop = FUN_DATAOP_HDR_INIT(nfrags, 0, nfrags, 0, tot_len);
- gle = (struct fun_dataop_gl *)req->dataop.imm;
- fun_dataop_gl_init(gle, 0, 0, len, dma);
+ fun_write_gl(q, req, dma, lens, nfrags);
- q->info[idx].vaddr = data;
+ q->info[idx].xdpf = xdpf;
u64_stats_update_begin(&q->syncp);
- q->stats.tx_bytes += len;
+ q->stats.tx_bytes += tot_len;
q->stats.tx_pkts++;
u64_stats_update_end(&q->syncp);
- trace_funeth_tx(q, len, idx, 1);
- q->prod_cnt++;
+ trace_funeth_tx(q, tot_len, idx, nfrags);
+ q->prod_cnt += ndesc;
return true;
}
@@ -566,12 +587,9 @@ int fun_xdp_xmit_frames(struct net_device *dev, int n,
if (unlikely(q_idx >= fp->num_xdpqs))
return -ENXIO;
- for (q = xdpqs[q_idx], i = 0; i < n; i++) {
- const struct xdp_frame *xdpf = frames[i];
-
- if (!fun_xdp_tx(q, xdpf->data, xdpf->len))
+ for (q = xdpqs[q_idx], i = 0; i < n; i++)
+ if (!fun_xdp_tx(q, frames[i]))
break;
- }
if (unlikely(flags & XDP_XMIT_FLUSH))
fun_txq_wr_db(q);
@@ -586,7 +604,7 @@ static void fun_txq_purge(struct funeth_txq *q)
while (q->cons_cnt != q->prod_cnt) {
unsigned int idx = q->cons_cnt & q->mask;
- q->cons_cnt += unmap_skb(q, idx);
+ q->cons_cnt += fun_unmap_pkt(q, idx);
dev_kfree_skb_any(q->info[idx].skb);
}
netdev_tx_reset_queue(q->ndq);
@@ -597,9 +615,8 @@ static void fun_xdpq_purge(struct funeth_txq *q)
while (q->cons_cnt != q->prod_cnt) {
unsigned int idx = q->cons_cnt & q->mask;
- fun_xdp_unmap(q, idx);
- page_frag_free(q->info[idx].vaddr);
- q->cons_cnt++;
+ q->cons_cnt += fun_unmap_pkt(q, idx);
+ xdp_return_frame(q->info[idx].xdpf);
}
}
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_txrx.h b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
index 1711f82cad71..53b7e95213a8 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
+++ b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
@@ -96,8 +96,8 @@ struct funeth_txq_stats { /* per Tx queue SW counters */
struct funeth_tx_info { /* per Tx descriptor state */
union {
- struct sk_buff *skb; /* associated packet */
- void *vaddr; /* start address for XDP */
+ struct sk_buff *skb; /* associated packet (sk_buff path) */
+ struct xdp_frame *xdpf; /* associated XDP frame (XDP path) */
};
};
@@ -246,7 +246,7 @@ static inline int fun_irq_node(const struct fun_irq *p)
int fun_rxq_napi_poll(struct napi_struct *napi, int budget);
int fun_txq_napi_poll(struct napi_struct *napi, int budget);
netdev_tx_t fun_start_xmit(struct sk_buff *skb, struct net_device *netdev);
-bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len);
+bool fun_xdp_tx(struct funeth_txq *q, struct xdp_frame *xdpf);
int fun_xdp_xmit_frames(struct net_device *dev, int n,
struct xdp_frame **frames, u32 flags);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index fb9f476fb33c..b36bf9c3e1e4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -2033,11 +2033,15 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
* non-zero req_queue_pairs says that user requested a new
* queue count via ethtool's set_channels, so use this
* value for queues distribution across traffic classes
+ * We need at least one queue pair for the interface
+ * to be usable as we see in else statement.
*/
if (vsi->req_queue_pairs > 0)
vsi->num_queue_pairs = vsi->req_queue_pairs;
else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
vsi->num_queue_pairs = pf->num_lan_msix;
+ else
+ vsi->num_queue_pairs = 1;
}
/* Number of queues per enabled TC */
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 36b440b1aaff..cc5b85afd437 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -248,8 +248,6 @@ struct ice_sw {
struct ice_pf *pf;
u16 sw_id; /* switch ID for this switch */
u16 bridge_mode; /* VEB/VEPA/Port Virtualizer */
- struct ice_vsi *dflt_vsi; /* default VSI for this switch */
- u8 dflt_vsi_ena:1; /* true if above dflt_vsi is enabled */
};
enum ice_pf_state {
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index 6a463b242c7d..e35371e61e07 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -133,8 +133,8 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
if (ice_vsi_add_vlan_zero(uplink_vsi))
goto err_def_rx;
- if (!ice_is_dflt_vsi_in_use(uplink_vsi->vsw)) {
- if (ice_set_dflt_vsi(uplink_vsi->vsw, uplink_vsi))
+ if (!ice_is_dflt_vsi_in_use(uplink_vsi->port_info)) {
+ if (ice_set_dflt_vsi(uplink_vsi))
goto err_def_rx;
rule_added = true;
}
@@ -151,7 +151,7 @@ err_override_control:
ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
err_override_uplink:
if (rule_added)
- ice_clear_dflt_vsi(uplink_vsi->vsw);
+ ice_clear_dflt_vsi(uplink_vsi);
err_def_rx:
ice_fltr_add_mac_and_broadcast(uplink_vsi,
uplink_vsi->port_info->mac.perm_addr,
@@ -411,7 +411,7 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
- ice_clear_dflt_vsi(uplink_vsi->vsw);
+ ice_clear_dflt_vsi(uplink_vsi);
ice_fltr_add_mac_and_broadcast(uplink_vsi,
uplink_vsi->port_info->mac.perm_addr,
ICE_FWD_TO_VSI);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 70335f6e8524..a6fff8ebaf9d 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -658,7 +658,8 @@ static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring)
rx_desc = ICE_RX_DESC(rx_ring, i);
if (!(rx_desc->wb.status_error0 &
- cpu_to_le16(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)))
+ (cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) |
+ cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)))))
continue;
rx_buf = &rx_ring->rx_buf[i];
@@ -1292,7 +1293,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
* promiscuous mode because it's not supported
*/
if (test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, change_flags) &&
- ice_is_any_vf_in_promisc(pf)) {
+ ice_is_any_vf_in_unicast_promisc(pf)) {
dev_err(dev, "Changing vf-true-promisc-support flag while VF(s) are in promiscuous mode not supported\n");
/* toggle bit back to previous state */
change_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags);
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index bc357dfae306..a830f7f9aed0 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -3006,8 +3006,8 @@ int ice_vsi_release(struct ice_vsi *vsi)
}
}
- if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
- ice_clear_dflt_vsi(pf->first_sw);
+ if (ice_is_vsi_dflt_vsi(vsi))
+ ice_clear_dflt_vsi(vsi);
ice_fltr_remove_all(vsi);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
@@ -3690,116 +3690,97 @@ void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes)
/**
* ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used
- * @sw: switch to check if its default forwarding VSI is free
+ * @pi: port info of the switch with default VSI
*
- * Return true if the default forwarding VSI is already being used, else returns
- * false signalling that it's available to use.
+ * Return true if the there is a single VSI in default forwarding VSI list
*/
-bool ice_is_dflt_vsi_in_use(struct ice_sw *sw)
+bool ice_is_dflt_vsi_in_use(struct ice_port_info *pi)
{
- return (sw->dflt_vsi && sw->dflt_vsi_ena);
+ bool exists = false;
+
+ ice_check_if_dflt_vsi(pi, 0, &exists);
+ return exists;
}
/**
* ice_is_vsi_dflt_vsi - check if the VSI passed in is the default VSI
- * @sw: switch for the default forwarding VSI to compare against
* @vsi: VSI to compare against default forwarding VSI
*
* If this VSI passed in is the default forwarding VSI then return true, else
* return false
*/
-bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
+bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi)
{
- return (sw->dflt_vsi == vsi && sw->dflt_vsi_ena);
+ return ice_check_if_dflt_vsi(vsi->port_info, vsi->idx, NULL);
}
/**
* ice_set_dflt_vsi - set the default forwarding VSI
- * @sw: switch used to assign the default forwarding VSI
* @vsi: VSI getting set as the default forwarding VSI on the switch
*
* If the VSI passed in is already the default VSI and it's enabled just return
* success.
*
- * If there is already a default VSI on the switch and it's enabled then return
- * -EEXIST since there can only be one default VSI per switch.
- *
- * Otherwise try to set the VSI passed in as the switch's default VSI and
- * return the result.
+ * Otherwise try to set the VSI passed in as the switch's default VSI and
+ * return the result.
*/
-int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
+int ice_set_dflt_vsi(struct ice_vsi *vsi)
{
struct device *dev;
int status;
- if (!sw || !vsi)
+ if (!vsi)
return -EINVAL;
dev = ice_pf_to_dev(vsi->back);
/* the VSI passed in is already the default VSI */
- if (ice_is_vsi_dflt_vsi(sw, vsi)) {
+ if (ice_is_vsi_dflt_vsi(vsi)) {
dev_dbg(dev, "VSI %d passed in is already the default forwarding VSI, nothing to do\n",
vsi->vsi_num);
return 0;
}
- /* another VSI is already the default VSI for this switch */
- if (ice_is_dflt_vsi_in_use(sw)) {
- dev_err(dev, "Default forwarding VSI %d already in use, disable it and try again\n",
- sw->dflt_vsi->vsi_num);
- return -EEXIST;
- }
-
- status = ice_cfg_dflt_vsi(&vsi->back->hw, vsi->idx, true, ICE_FLTR_RX);
+ status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, true, ICE_FLTR_RX);
if (status) {
dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %d\n",
vsi->vsi_num, status);
return status;
}
- sw->dflt_vsi = vsi;
- sw->dflt_vsi_ena = true;
-
return 0;
}
/**
* ice_clear_dflt_vsi - clear the default forwarding VSI
- * @sw: switch used to clear the default VSI
+ * @vsi: VSI to remove from filter list
*
* If the switch has no default VSI or it's not enabled then return error.
*
* Otherwise try to clear the default VSI and return the result.
*/
-int ice_clear_dflt_vsi(struct ice_sw *sw)
+int ice_clear_dflt_vsi(struct ice_vsi *vsi)
{
- struct ice_vsi *dflt_vsi;
struct device *dev;
int status;
- if (!sw)
+ if (!vsi)
return -EINVAL;
- dev = ice_pf_to_dev(sw->pf);
-
- dflt_vsi = sw->dflt_vsi;
+ dev = ice_pf_to_dev(vsi->back);
/* there is no default VSI configured */
- if (!ice_is_dflt_vsi_in_use(sw))
+ if (!ice_is_dflt_vsi_in_use(vsi->port_info))
return -ENODEV;
- status = ice_cfg_dflt_vsi(&dflt_vsi->back->hw, dflt_vsi->idx, false,
+ status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, false,
ICE_FLTR_RX);
if (status) {
dev_err(dev, "Failed to clear the default forwarding VSI %d, error %d\n",
- dflt_vsi->vsi_num, status);
+ vsi->vsi_num, status);
return -EIO;
}
- sw->dflt_vsi = NULL;
- sw->dflt_vsi_ena = false;
-
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 0095329949d4..8712b1d2ceec 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -102,13 +102,10 @@ int ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
bool ice_is_safe_mode(struct ice_pf *pf);
bool ice_is_rdma_ena(struct ice_pf *pf);
-bool ice_is_dflt_vsi_in_use(struct ice_sw *sw);
-
-bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi);
-
-int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi);
-
-int ice_clear_dflt_vsi(struct ice_sw *sw);
+bool ice_is_dflt_vsi_in_use(struct ice_port_info *pi);
+bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi);
+int ice_set_dflt_vsi(struct ice_vsi *vsi);
+int ice_clear_dflt_vsi(struct ice_vsi *vsi);
int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate);
int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate);
int ice_get_link_speed_kbps(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 313716615e98..9a3b14d42835 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -410,8 +410,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
if (vsi->current_netdev_flags & IFF_PROMISC) {
/* Apply Rx filter rule to get traffic from wire */
- if (!ice_is_dflt_vsi_in_use(pf->first_sw)) {
- err = ice_set_dflt_vsi(pf->first_sw, vsi);
+ if (!ice_is_dflt_vsi_in_use(vsi->port_info)) {
+ err = ice_set_dflt_vsi(vsi);
if (err && err != -EEXIST) {
netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
err, vsi->vsi_num);
@@ -424,8 +424,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
} else {
/* Clear Rx filter to remove traffic from wire */
- if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) {
- err = ice_clear_dflt_vsi(pf->first_sw);
+ if (ice_is_vsi_dflt_vsi(vsi)) {
+ err = ice_clear_dflt_vsi(vsi);
if (err) {
netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
err, vsi->vsi_num);
@@ -3358,6 +3358,7 @@ static void ice_set_netdev_features(struct net_device *netdev)
netdev->features |= netdev->hw_features;
netdev->hw_features |= NETIF_F_HW_TC;
+ netdev->hw_features |= NETIF_F_LOOPBACK;
/* encap and VLAN devices inherit default, csumo and tso features */
netdev->hw_enc_features |= dflt_features | csumo_features |
@@ -4656,6 +4657,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
ice_set_safe_mode_caps(hw);
}
+ hw->ucast_shared = true;
+
err = ice_init_pf(pf);
if (err) {
dev_err(dev, "ice_init_pf failed: %d\n", err);
@@ -5911,6 +5914,32 @@ ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
}
/**
+ * ice_set_loopback - turn on/off loopback mode on underlying PF
+ * @vsi: ptr to VSI
+ * @ena: flag to indicate the on/off setting
+ */
+static int ice_set_loopback(struct ice_vsi *vsi, bool ena)
+{
+ bool if_running = netif_running(vsi->netdev);
+ int ret;
+
+ if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
+ ret = ice_down(vsi);
+ if (ret) {
+ netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n");
+ return ret;
+ }
+ }
+ ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL);
+ if (ret)
+ netdev_err(vsi->netdev, "Failed to toggle loopback state\n");
+ if (if_running)
+ ret = ice_up(vsi);
+
+ return ret;
+}
+
+/**
* ice_set_features - set the netdev feature flags
* @netdev: ptr to the netdev being adjusted
* @features: the feature set that the stack is suggesting
@@ -5918,44 +5947,41 @@ ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
static int
ice_set_features(struct net_device *netdev, netdev_features_t features)
{
+ netdev_features_t changed = netdev->features ^ features;
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
int ret = 0;
/* Don't set any netdev advanced features with device in Safe Mode */
- if (ice_is_safe_mode(vsi->back)) {
- dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n");
+ if (ice_is_safe_mode(pf)) {
+ dev_err(ice_pf_to_dev(pf),
+ "Device is in Safe Mode - not enabling advanced netdev features\n");
return ret;
}
/* Do not change setting during reset */
if (ice_is_reset_in_progress(pf->state)) {
- dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
+ dev_err(ice_pf_to_dev(pf),
+ "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
return -EBUSY;
}
/* Multiple features can be changed in one call so keep features in
* separate if/else statements to guarantee each feature is checked
*/
- if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
- ice_vsi_manage_rss_lut(vsi, true);
- else if (!(features & NETIF_F_RXHASH) &&
- netdev->features & NETIF_F_RXHASH)
- ice_vsi_manage_rss_lut(vsi, false);
+ if (changed & NETIF_F_RXHASH)
+ ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH));
ret = ice_set_vlan_features(netdev, features);
if (ret)
return ret;
- if ((features & NETIF_F_NTUPLE) &&
- !(netdev->features & NETIF_F_NTUPLE)) {
- ice_vsi_manage_fdir(vsi, true);
- ice_init_arfs(vsi);
- } else if (!(features & NETIF_F_NTUPLE) &&
- (netdev->features & NETIF_F_NTUPLE)) {
- ice_vsi_manage_fdir(vsi, false);
- ice_clear_arfs(vsi);
+ if (changed & NETIF_F_NTUPLE) {
+ bool ena = !!(features & NETIF_F_NTUPLE);
+
+ ice_vsi_manage_fdir(vsi, ena);
+ ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi);
}
/* don't turn off hw_tc_offload when ADQ is already enabled */
@@ -5964,13 +5990,17 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
return -EACCES;
}
- if ((features & NETIF_F_HW_TC) &&
- !(netdev->features & NETIF_F_HW_TC))
- set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
- else
- clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
+ if (changed & NETIF_F_HW_TC) {
+ bool ena = !!(features & NETIF_F_HW_TC);
- return 0;
+ ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) :
+ clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
+ }
+
+ if (changed & NETIF_F_LOOPBACK)
+ ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
+
+ return ret;
}
/**
@@ -6005,10 +6035,12 @@ int ice_vsi_cfg(struct ice_vsi *vsi)
if (vsi->netdev) {
ice_set_rx_mode(vsi->netdev);
- err = ice_vsi_vlan_setup(vsi);
+ if (vsi->type != ICE_VSI_LB) {
+ err = ice_vsi_vlan_setup(vsi);
- if (err)
- return err;
+ if (err)
+ return err;
+ }
}
ice_vsi_cfg_dcb_rings(vsi);
@@ -6990,12 +7022,6 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_init_ctrlq;
}
- if (pf->first_sw->dflt_vsi_ena)
- dev_info(dev, "Clearing default VSI, re-enable after reset completes\n");
- /* clear the default VSI configuration if it exists */
- pf->first_sw->dflt_vsi = NULL;
- pf->first_sw->dflt_vsi_ena = false;
-
ice_clear_pxe_mode(hw);
err = ice_init_nvm(hw);
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 86093b2511d8..3ba1408c56a9 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -1310,39 +1310,6 @@ out_put_vf:
}
/**
- * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch
- * @pf: PF used to reference the switch's rules
- * @umac: unicast MAC to compare against existing switch rules
- *
- * Return true on the first/any match, else return false
- */
-static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac)
-{
- struct ice_sw_recipe *mac_recipe_list =
- &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC];
- struct ice_fltr_mgmt_list_entry *list_itr;
- struct list_head *rule_head;
- struct mutex *rule_lock; /* protect MAC filter list access */
-
- rule_head = &mac_recipe_list->filt_rules;
- rule_lock = &mac_recipe_list->filt_rule_lock;
-
- mutex_lock(rule_lock);
- list_for_each_entry(list_itr, rule_head, list_entry) {
- u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0];
-
- if (ether_addr_equal(existing_mac, umac)) {
- mutex_unlock(rule_lock);
- return true;
- }
- }
-
- mutex_unlock(rule_lock);
-
- return false;
-}
-
-/**
* ice_set_vf_mac
* @netdev: network interface device structure
* @vf_id: VF identifier
@@ -1376,13 +1343,6 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
if (ret)
goto out_put_vf;
- if (ice_unicast_mac_exists(pf, mac)) {
- netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n",
- mac, vf_id, mac);
- ret = -EINVAL;
- goto out_put_vf;
- }
-
mutex_lock(&vf->cfg_lock);
/* VF is notified of its new MAC via the PF's response to the
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 4a6a8334a0e0..fce204693dbb 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -1762,7 +1762,8 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
lkup_type == ICE_SW_LKUP_ETHERTYPE ||
lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
lkup_type == ICE_SW_LKUP_PROMISC ||
- lkup_type == ICE_SW_LKUP_PROMISC_VLAN) {
+ lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
+ lkup_type == ICE_SW_LKUP_DFLT) {
sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
} else if (lkup_type == ICE_SW_LKUP_VLAN) {
sw_buf->res_type =
@@ -2255,8 +2256,6 @@ ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
pi->sw_id = swid;
pi->pf_vf_num = pf_vf_num;
pi->is_vf = is_vf;
- pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
- pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
break;
default:
ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
@@ -2691,7 +2690,8 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
lkup_type == ICE_SW_LKUP_ETHERTYPE ||
lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
lkup_type == ICE_SW_LKUP_PROMISC ||
- lkup_type == ICE_SW_LKUP_PROMISC_VLAN)
+ lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
+ lkup_type == ICE_SW_LKUP_DFLT)
rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
ICE_AQC_SW_RULES_T_VSI_LIST_SET;
else if (lkup_type == ICE_SW_LKUP_VLAN)
@@ -3873,7 +3873,7 @@ ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
/**
* ice_cfg_dflt_vsi - change state of VSI to set/clear default
- * @hw: pointer to the hardware structure
+ * @pi: pointer to the port_info structure
* @vsi_handle: VSI handle to set as default
* @set: true to add the above mentioned switch rule, false to remove it
* @direction: ICE_FLTR_RX or ICE_FLTR_TX
@@ -3881,25 +3881,20 @@ ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
* add filter rule to set/unset given VSI as default VSI for the switch
* (represented by swid)
*/
-int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
+int
+ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
+ u8 direction)
{
- struct ice_sw_rule_lkup_rx_tx *s_rule;
+ struct ice_fltr_list_entry f_list_entry;
struct ice_fltr_info f_info;
- enum ice_adminq_opc opcode;
- u16 s_rule_size;
+ struct ice_hw *hw = pi->hw;
u16 hw_vsi_id;
int status;
if (!ice_is_vsi_valid(hw, vsi_handle))
return -EINVAL;
- hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
-
- s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule) :
- ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule);
- s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
- if (!s_rule)
- return -ENOMEM;
+ hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
memset(&f_info, 0, sizeof(f_info));
@@ -3907,54 +3902,80 @@ int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
f_info.flag = direction;
f_info.fltr_act = ICE_FWD_TO_VSI;
f_info.fwd_id.hw_vsi_id = hw_vsi_id;
+ f_info.vsi_handle = vsi_handle;
if (f_info.flag & ICE_FLTR_RX) {
f_info.src = hw->port_info->lport;
f_info.src_id = ICE_SRC_ID_LPORT;
- if (!set)
- f_info.fltr_rule_id =
- hw->port_info->dflt_rx_vsi_rule_id;
} else if (f_info.flag & ICE_FLTR_TX) {
f_info.src_id = ICE_SRC_ID_VSI;
f_info.src = hw_vsi_id;
- if (!set)
- f_info.fltr_rule_id =
- hw->port_info->dflt_tx_vsi_rule_id;
}
+ f_list_entry.fltr_info = f_info;
if (set)
- opcode = ice_aqc_opc_add_sw_rules;
+ status = ice_add_rule_internal(hw, ICE_SW_LKUP_DFLT,
+ &f_list_entry);
else
- opcode = ice_aqc_opc_remove_sw_rules;
-
- ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
-
- status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
- if (status || !(f_info.flag & ICE_FLTR_TX_RX))
- goto out;
- if (set) {
- u16 index = le16_to_cpu(s_rule->index);
-
- if (f_info.flag & ICE_FLTR_TX) {
- hw->port_info->dflt_tx_vsi_num = hw_vsi_id;
- hw->port_info->dflt_tx_vsi_rule_id = index;
- } else if (f_info.flag & ICE_FLTR_RX) {
- hw->port_info->dflt_rx_vsi_num = hw_vsi_id;
- hw->port_info->dflt_rx_vsi_rule_id = index;
- }
- } else {
- if (f_info.flag & ICE_FLTR_TX) {
- hw->port_info->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
- hw->port_info->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
- } else if (f_info.flag & ICE_FLTR_RX) {
- hw->port_info->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
- hw->port_info->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
+ status = ice_remove_rule_internal(hw, ICE_SW_LKUP_DFLT,
+ &f_list_entry);
+
+ return status;
+}
+
+/**
+ * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
+ * @fm_entry: filter entry to inspect
+ * @vsi_handle: VSI handle to compare with filter info
+ */
+static bool
+ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
+{
+ return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
+ fm_entry->fltr_info.vsi_handle == vsi_handle) ||
+ (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
+ fm_entry->vsi_list_info &&
+ (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
+}
+
+/**
+ * ice_check_if_dflt_vsi - check if VSI is default VSI
+ * @pi: pointer to the port_info structure
+ * @vsi_handle: vsi handle to check for in filter list
+ * @rule_exists: indicates if there are any VSI's in the rule list
+ *
+ * checks if the VSI is in a default VSI list, and also indicates
+ * if the default VSI list is empty
+ */
+bool
+ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle,
+ bool *rule_exists)
+{
+ struct ice_fltr_mgmt_list_entry *fm_entry;
+ struct ice_sw_recipe *recp_list;
+ struct list_head *rule_head;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
+ bool ret = false;
+
+ recp_list = &pi->hw->switch_info->recp_list[ICE_SW_LKUP_DFLT];
+ rule_lock = &recp_list->filt_rule_lock;
+ rule_head = &recp_list->filt_rules;
+
+ mutex_lock(rule_lock);
+
+ if (rule_exists && !list_empty(rule_head))
+ *rule_exists = true;
+
+ list_for_each_entry(fm_entry, rule_head, list_entry) {
+ if (ice_vsi_uses_fltr(fm_entry, vsi_handle)) {
+ ret = true;
+ break;
}
}
-out:
- devm_kfree(ice_hw_to_dev(hw), s_rule);
- return status;
+ mutex_unlock(rule_lock);
+
+ return ret;
}
/**
@@ -4074,21 +4095,6 @@ int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
}
/**
- * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
- * @fm_entry: filter entry to inspect
- * @vsi_handle: VSI handle to compare with filter info
- */
-static bool
-ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
-{
- return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
- fm_entry->fltr_info.vsi_handle == vsi_handle) ||
- (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
- fm_entry->vsi_list_info &&
- (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
-}
-
-/**
* ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to remove filters from
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index 59488e3e9d6a..68d8e8a6a189 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -359,7 +359,13 @@ int ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable);
void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle);
/* Promisc/defport setup for VSIs */
-int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction);
+int
+ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
+ u8 direction);
+bool
+ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle,
+ bool *rule_exists);
+
int
ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 3f8b7274ed2f..836dce840712 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -1751,11 +1751,13 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
protocol = vlan_get_protocol(skb);
- if (eth_p_mpls(protocol))
+ if (eth_p_mpls(protocol)) {
ip.hdr = skb_inner_network_header(skb);
- else
+ l4.hdr = skb_checksum_start(skb);
+ } else {
ip.hdr = skb_network_header(skb);
- l4.hdr = skb_checksum_start(skb);
+ l4.hdr = skb_transport_header(skb);
+ }
/* compute outer L2 header size */
l2_len = ip.hdr - skb->data;
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index f2a518a1fd94..861b64322959 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -693,10 +693,6 @@ struct ice_port_info {
#define ICE_SCHED_PORT_STATE_READY 0x1
u8 lport;
#define ICE_LPORT_MASK 0xff
- u16 dflt_tx_vsi_rule_id;
- u16 dflt_tx_vsi_num;
- u16 dflt_rx_vsi_rule_id;
- u16 dflt_rx_vsi_num;
struct ice_fc_info fc;
struct ice_mac_info mac;
struct ice_phy_info phy;
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 7adf9ddf129e..8fd7c3e37f5e 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -271,13 +271,14 @@ static int ice_vf_rebuild_vsi(struct ice_vf *vf)
}
/**
- * ice_is_any_vf_in_promisc - check if any VF(s) are in promiscuous mode
+ * ice_is_any_vf_in_unicast_promisc - check if any VF(s)
+ * are in unicast promiscuous mode
* @pf: PF structure for accessing VF(s)
*
- * Return false if no VF(s) are in unicast and/or multicast promiscuous mode,
+ * Return false if no VF(s) are in unicast promiscuous mode,
* else return true
*/
-bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
+bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf)
{
bool is_vf_promisc = false;
struct ice_vf *vf;
@@ -286,8 +287,7 @@ bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
rcu_read_lock();
ice_for_each_vf_rcu(pf, bkt, vf) {
/* found a VF that has promiscuous mode configured */
- if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
- test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
+ if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
is_vf_promisc = true;
break;
}
@@ -298,6 +298,73 @@ bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
}
/**
+ * ice_vf_get_promisc_masks - Calculate masks for promiscuous modes
+ * @vf: the VF pointer
+ * @vsi: the VSI to configure
+ * @ucast_m: promiscuous mask to apply to unicast
+ * @mcast_m: promiscuous mask to apply to multicast
+ *
+ * Decide which mask should be used for unicast and multicast filter,
+ * based on presence of VLANs
+ */
+void
+ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi,
+ u8 *ucast_m, u8 *mcast_m)
+{
+ if (ice_vf_is_port_vlan_ena(vf) ||
+ ice_vsi_has_non_zero_vlans(vsi)) {
+ *mcast_m = ICE_MCAST_VLAN_PROMISC_BITS;
+ *ucast_m = ICE_UCAST_VLAN_PROMISC_BITS;
+ } else {
+ *mcast_m = ICE_MCAST_PROMISC_BITS;
+ *ucast_m = ICE_UCAST_PROMISC_BITS;
+ }
+}
+
+/**
+ * ice_vf_clear_all_promisc_modes - Clear promisc/allmulticast on VF VSI
+ * @vf: the VF pointer
+ * @vsi: the VSI to configure
+ *
+ * Clear all promiscuous/allmulticast filters for a VF
+ */
+static int
+ice_vf_clear_all_promisc_modes(struct ice_vf *vf, struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vf->pf;
+ u8 ucast_m, mcast_m;
+ int ret = 0;
+
+ ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m);
+ if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
+ if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
+ if (ice_is_dflt_vsi_in_use(vsi->port_info))
+ ret = ice_clear_dflt_vsi(vsi);
+ } else {
+ ret = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m);
+ }
+
+ if (ret) {
+ dev_err(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode failed\n");
+ } else {
+ clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
+ dev_info(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode succeeded\n");
+ }
+ }
+
+ if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
+ ret = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
+ if (ret) {
+ dev_err(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode failed\n");
+ } else {
+ clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
+ dev_info(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode succeeded\n");
+ }
+ }
+ return ret;
+}
+
+/**
* ice_vf_set_vsi_promisc - Enable promiscuous mode for a VF VSI
* @vf: the VF to configure
* @vsi: the VF's VSI
@@ -487,7 +554,6 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
struct ice_vsi *vsi;
struct device *dev;
struct ice_hw *hw;
- u8 promisc_m;
int err = 0;
bool rsd;
@@ -554,16 +620,7 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
/* disable promiscuous modes in case they were enabled
* ignore any error if disabling process failed
*/
- if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
- test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
- if (ice_vf_is_port_vlan_ena(vf) || vsi->num_vlan)
- promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
- else
- promisc_m = ICE_UCAST_PROMISC_BITS;
-
- if (ice_vf_clear_vsi_promisc(vf, vsi, promisc_m))
- dev_err(dev, "disabling promiscuous mode failed\n");
- }
+ ice_vf_clear_all_promisc_modes(vf, vsi);
ice_eswitch_del_vf_mac_rule(vf);
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index 1b4380d6d949..52bd9a3816bf 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -214,7 +214,10 @@ struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf);
bool ice_is_vf_disabled(struct ice_vf *vf);
int ice_check_vf_ready_for_cfg(struct ice_vf *vf);
void ice_set_vf_state_qs_dis(struct ice_vf *vf);
-bool ice_is_any_vf_in_promisc(struct ice_pf *pf);
+bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf);
+void
+ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi,
+ u8 *ucast_m, u8 *mcast_m);
int
ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m);
int
@@ -260,7 +263,7 @@ static inline void ice_set_vf_state_qs_dis(struct ice_vf *vf)
{
}
-static inline bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
+static inline bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf)
{
return false;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index b2b5d2ee83a5..094e3c97a1ea 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -999,6 +999,7 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
int mcast_err = 0, ucast_err = 0;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
+ u8 mcast_m, ucast_m;
struct device *dev;
int ret = 0;
@@ -1045,39 +1046,33 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
+ ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m);
+
if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
- bool set_dflt_vsi = alluni || allmulti;
+ if (alluni) {
+ /* in this case we're turning on promiscuous mode */
+ ret = ice_set_dflt_vsi(vsi);
+ } else {
+ /* in this case we're turning off promiscuous mode */
+ if (ice_is_dflt_vsi_in_use(vsi->port_info))
+ ret = ice_clear_dflt_vsi(vsi);
+ }
- if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw))
- /* only attempt to set the default forwarding VSI if
- * it's not currently set
- */
- ret = ice_set_dflt_vsi(pf->first_sw, vsi);
- else if (!set_dflt_vsi &&
- ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
- /* only attempt to free the default forwarding VSI if we
- * are the owner
- */
- ret = ice_clear_dflt_vsi(pf->first_sw);
+ /* in this case we're turning on/off only
+ * allmulticast
+ */
+ if (allmulti)
+ mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m);
+ else
+ mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
if (ret) {
- dev_err(dev, "%sable VF %d as the default VSI failed, error %d\n",
- set_dflt_vsi ? "en" : "dis", vf->vf_id, ret);
+ dev_err(dev, "Turning on/off promiscuous mode for VF %d failed, error: %d\n",
+ vf->vf_id, ret);
v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
goto error_param;
}
} else {
- u8 mcast_m, ucast_m;
-
- if (ice_vf_is_port_vlan_ena(vf) ||
- ice_vsi_has_non_zero_vlans(vsi)) {
- mcast_m = ICE_MCAST_VLAN_PROMISC_BITS;
- ucast_m = ICE_UCAST_VLAN_PROMISC_BITS;
- } else {
- mcast_m = ICE_MCAST_PROMISC_BITS;
- ucast_m = ICE_UCAST_PROMISC_BITS;
- }
-
if (alluni)
ucast_err = ice_vf_set_vsi_promisc(vf, vsi, ucast_m);
else
@@ -1102,6 +1097,9 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
vf->vf_states))
dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n",
vf->vf_id);
+ } else {
+ dev_err(dev, "Error while modifying multicast promiscuous mode for VF %u, error: %d\n",
+ vf->vf_id, mcast_err);
}
if (!ucast_err) {
@@ -1114,6 +1112,9 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
vf->vf_states))
dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n",
vf->vf_id);
+ } else {
+ dev_err(dev, "Error while modifying unicast promiscuous mode for VF %u, error: %d\n",
+ vf->vf_id, ucast_err);
}
error_param:
@@ -2971,7 +2972,8 @@ ice_vc_validate_add_vlan_filter_list(struct ice_vsi *vsi,
struct virtchnl_vlan_filtering_caps *vfc,
struct virtchnl_vlan_filter_list_v2 *vfl)
{
- u16 num_requested_filters = vsi->num_vlan + vfl->num_elements;
+ u16 num_requested_filters = ice_vsi_num_non_zero_vlans(vsi) +
+ vfl->num_elements;
if (num_requested_filters > vfc->max_filters)
return false;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 28b19945d716..e64318c110fd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -28,6 +28,9 @@
#define MAX_RATE_EXPONENT 0x0FULL
#define MAX_RATE_MANTISSA 0xFFULL
+#define CN10K_MAX_BURST_MANTISSA 0x7FFFULL
+#define CN10K_MAX_BURST_SIZE 8453888ULL
+
/* Bitfields in NIX_TLX_PIR register */
#define TLX_RATE_MANTISSA GENMASK_ULL(8, 1)
#define TLX_RATE_EXPONENT GENMASK_ULL(12, 9)
@@ -35,6 +38,9 @@
#define TLX_BURST_MANTISSA GENMASK_ULL(36, 29)
#define TLX_BURST_EXPONENT GENMASK_ULL(40, 37)
+#define CN10K_TLX_BURST_MANTISSA GENMASK_ULL(43, 29)
+#define CN10K_TLX_BURST_EXPONENT GENMASK_ULL(47, 44)
+
struct otx2_tc_flow_stats {
u64 bytes;
u64 pkts;
@@ -77,33 +83,42 @@ int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic)
}
EXPORT_SYMBOL(otx2_tc_alloc_ent_bitmap);
-static void otx2_get_egress_burst_cfg(u32 burst, u32 *burst_exp,
- u32 *burst_mantissa)
+static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst,
+ u32 *burst_exp, u32 *burst_mantissa)
{
+ int max_burst, max_mantissa;
unsigned int tmp;
+ if (is_dev_otx2(nic->pdev)) {
+ max_burst = MAX_BURST_SIZE;
+ max_mantissa = MAX_BURST_MANTISSA;
+ } else {
+ max_burst = CN10K_MAX_BURST_SIZE;
+ max_mantissa = CN10K_MAX_BURST_MANTISSA;
+ }
+
/* Burst is calculated as
* ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256
* Max supported burst size is 130,816 bytes.
*/
- burst = min_t(u32, burst, MAX_BURST_SIZE);
+ burst = min_t(u32, burst, max_burst);
if (burst) {
*burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0;
tmp = burst - rounddown_pow_of_two(burst);
- if (burst < MAX_BURST_MANTISSA)
+ if (burst < max_mantissa)
*burst_mantissa = tmp * 2;
else
*burst_mantissa = tmp / (1ULL << (*burst_exp - 7));
} else {
*burst_exp = MAX_BURST_EXPONENT;
- *burst_mantissa = MAX_BURST_MANTISSA;
+ *burst_mantissa = max_mantissa;
}
}
-static void otx2_get_egress_rate_cfg(u32 maxrate, u32 *exp,
+static void otx2_get_egress_rate_cfg(u64 maxrate, u32 *exp,
u32 *mantissa, u32 *div_exp)
{
- unsigned int tmp;
+ u64 tmp;
/* Rate calculation by hardware
*
@@ -132,21 +147,44 @@ static void otx2_get_egress_rate_cfg(u32 maxrate, u32 *exp,
}
}
-static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u32 maxrate)
+static u64 otx2_get_txschq_rate_regval(struct otx2_nic *nic,
+ u64 maxrate, u32 burst)
{
- struct otx2_hw *hw = &nic->hw;
- struct nix_txschq_config *req;
u32 burst_exp, burst_mantissa;
u32 exp, mantissa, div_exp;
+ u64 regval = 0;
+
+ /* Get exponent and mantissa values from the desired rate */
+ otx2_get_egress_burst_cfg(nic, burst, &burst_exp, &burst_mantissa);
+ otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp);
+
+ if (is_dev_otx2(nic->pdev)) {
+ regval = FIELD_PREP(TLX_BURST_EXPONENT, (u64)burst_exp) |
+ FIELD_PREP(TLX_BURST_MANTISSA, (u64)burst_mantissa) |
+ FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
+ FIELD_PREP(TLX_RATE_EXPONENT, exp) |
+ FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
+ } else {
+ regval = FIELD_PREP(CN10K_TLX_BURST_EXPONENT, (u64)burst_exp) |
+ FIELD_PREP(CN10K_TLX_BURST_MANTISSA, (u64)burst_mantissa) |
+ FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
+ FIELD_PREP(TLX_RATE_EXPONENT, exp) |
+ FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
+ }
+
+ return regval;
+}
+
+static int otx2_set_matchall_egress_rate(struct otx2_nic *nic,
+ u32 burst, u64 maxrate)
+{
+ struct otx2_hw *hw = &nic->hw;
+ struct nix_txschq_config *req;
int txschq, err;
/* All SQs share the same TL4, so pick the first scheduler */
txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
- /* Get exponent and mantissa values from the desired rate */
- otx2_get_egress_burst_cfg(burst, &burst_exp, &burst_mantissa);
- otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp);
-
mutex_lock(&nic->mbox.lock);
req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox);
if (!req) {
@@ -157,11 +195,7 @@ static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u32 ma
req->lvl = NIX_TXSCH_LVL_TL4;
req->num_regs = 1;
req->reg[0] = NIX_AF_TL4X_PIR(txschq);
- req->regval[0] = FIELD_PREP(TLX_BURST_EXPONENT, burst_exp) |
- FIELD_PREP(TLX_BURST_MANTISSA, burst_mantissa) |
- FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
- FIELD_PREP(TLX_RATE_EXPONENT, exp) |
- FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
+ req->regval[0] = otx2_get_txschq_rate_regval(nic, maxrate, burst);
err = otx2_sync_mbox_msg(&nic->mbox);
mutex_unlock(&nic->mbox.lock);
@@ -230,7 +264,7 @@ static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
struct netlink_ext_ack *extack = cls->common.extack;
struct flow_action *actions = &cls->rule->action;
struct flow_action_entry *entry;
- u32 rate;
+ u64 rate;
int err;
err = otx2_tc_validate_flow(nic, actions, extack);
@@ -256,7 +290,7 @@ static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
}
/* Convert bytes per second to Mbps */
rate = entry->police.rate_bytes_ps * 8;
- rate = max_t(u32, rate / 1000000, 1);
+ rate = max_t(u64, rate / 1000000, 1);
err = otx2_set_matchall_egress_rate(nic, entry->police.burst, rate);
if (err)
return err;
@@ -614,21 +648,27 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
flow_spec->dport = match.key->dst;
flow_mask->dport = match.mask->dst;
- if (ip_proto == IPPROTO_UDP)
- req->features |= BIT_ULL(NPC_DPORT_UDP);
- else if (ip_proto == IPPROTO_TCP)
- req->features |= BIT_ULL(NPC_DPORT_TCP);
- else if (ip_proto == IPPROTO_SCTP)
- req->features |= BIT_ULL(NPC_DPORT_SCTP);
+
+ if (flow_mask->dport) {
+ if (ip_proto == IPPROTO_UDP)
+ req->features |= BIT_ULL(NPC_DPORT_UDP);
+ else if (ip_proto == IPPROTO_TCP)
+ req->features |= BIT_ULL(NPC_DPORT_TCP);
+ else if (ip_proto == IPPROTO_SCTP)
+ req->features |= BIT_ULL(NPC_DPORT_SCTP);
+ }
flow_spec->sport = match.key->src;
flow_mask->sport = match.mask->src;
- if (ip_proto == IPPROTO_UDP)
- req->features |= BIT_ULL(NPC_SPORT_UDP);
- else if (ip_proto == IPPROTO_TCP)
- req->features |= BIT_ULL(NPC_SPORT_TCP);
- else if (ip_proto == IPPROTO_SCTP)
- req->features |= BIT_ULL(NPC_SPORT_SCTP);
+
+ if (flow_mask->sport) {
+ if (ip_proto == IPPROTO_UDP)
+ req->features |= BIT_ULL(NPC_SPORT_UDP);
+ else if (ip_proto == IPPROTO_TCP)
+ req->features |= BIT_ULL(NPC_SPORT_TCP);
+ else if (ip_proto == IPPROTO_SCTP)
+ req->features |= BIT_ULL(NPC_SPORT_SCTP);
+ }
}
return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index c267ca1ccdba..4b64bda3f9c2 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -847,7 +847,7 @@ static void prestera_event_handlers_unregister(struct prestera_switch *sw)
static int prestera_switch_set_base_mac_addr(struct prestera_switch *sw)
{
struct device_node *base_mac_np;
- int ret;
+ int ret = 0;
if (sw->np) {
base_mac_np = of_parse_phandle(sw->np, "base-mac-provider", 0);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index c370d6589596..d9426b01f462 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1001,7 +1001,7 @@ static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
}
static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
- bool napi)
+ struct xdp_frame_bulk *bq, bool napi)
{
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
@@ -1031,23 +1031,24 @@ static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
}
}
- if (tx_buf->type == MTK_TYPE_SKB) {
- if (tx_buf->data &&
- tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+ if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+ if (tx_buf->type == MTK_TYPE_SKB) {
struct sk_buff *skb = tx_buf->data;
if (napi)
napi_consume_skb(skb, napi);
else
dev_kfree_skb_any(skb);
- }
- } else if (tx_buf->data) {
- struct xdp_frame *xdpf = tx_buf->data;
+ } else {
+ struct xdp_frame *xdpf = tx_buf->data;
- if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
- xdp_return_frame_rx_napi(xdpf);
- else
- xdp_return_frame(xdpf);
+ if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
+ xdp_return_frame_rx_napi(xdpf);
+ else if (bq)
+ xdp_return_frame_bulk(xdpf, bq);
+ else
+ xdp_return_frame(xdpf);
+ }
}
tx_buf->flags = 0;
tx_buf->data = NULL;
@@ -1297,7 +1298,7 @@ err_dma:
tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
/* unmap dma */
- mtk_tx_unmap(eth, tx_buf, false);
+ mtk_tx_unmap(eth, tx_buf, NULL, false);
itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
@@ -1523,68 +1524,112 @@ static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
skb_free_frag(data);
}
+static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
+ struct mtk_tx_dma_desc_info *txd_info,
+ struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf,
+ void *data, u16 headroom, int index, bool dma_map)
+{
+ struct mtk_tx_ring *ring = &eth->tx_ring;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_tx_dma *txd_pdma;
+
+ if (dma_map) { /* ndo_xdp_xmit */
+ txd_info->addr = dma_map_single(eth->dma_dev, data,
+ txd_info->size, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr)))
+ return -ENOMEM;
+
+ tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
+ } else {
+ struct page *page = virt_to_head_page(data);
+
+ txd_info->addr = page_pool_get_dma_addr(page) +
+ sizeof(struct xdp_frame) + headroom;
+ dma_sync_single_for_device(eth->dma_dev, txd_info->addr,
+ txd_info->size, DMA_BIDIRECTIONAL);
+ }
+ mtk_tx_set_dma_desc(dev, txd, txd_info);
+
+ tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
+ tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
+ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
+
+ txd_pdma = qdma_to_pdma(ring, txd);
+ setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
+ index);
+
+ return 0;
+}
+
static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
struct net_device *dev, bool dma_map)
{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
const struct mtk_soc_data *soc = eth->soc;
struct mtk_tx_ring *ring = &eth->tx_ring;
struct mtk_tx_dma_desc_info txd_info = {
.size = xdpf->len,
.first = true,
- .last = true,
+ .last = !xdp_frame_has_frags(xdpf),
};
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_tx_dma *txd, *txd_pdma;
- int err = 0, index = 0, n_desc = 1;
- struct mtk_tx_buf *tx_buf;
+ int err, index = 0, n_desc = 1, nr_frags;
+ struct mtk_tx_dma *htxd, *txd, *txd_pdma;
+ struct mtk_tx_buf *htx_buf, *tx_buf;
+ void *data = xdpf->data;
if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
return -EBUSY;
- if (unlikely(atomic_read(&ring->free_count) <= 1))
+ nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
+ if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
return -EBUSY;
spin_lock(&eth->page_lock);
txd = ring->next_free;
if (txd == ring->last_free) {
- err = -ENOMEM;
- goto out;
+ spin_unlock(&eth->page_lock);
+ return -ENOMEM;
}
+ htxd = txd;
tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
memset(tx_buf, 0, sizeof(*tx_buf));
+ htx_buf = tx_buf;
- if (dma_map) { /* ndo_xdp_xmit */
- txd_info.addr = dma_map_single(eth->dma_dev, xdpf->data,
- txd_info.size, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) {
- err = -ENOMEM;
- goto out;
- }
- tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
- } else {
- struct page *page = virt_to_head_page(xdpf->data);
+ for (;;) {
+ err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
+ data, xdpf->headroom, index, dma_map);
+ if (err < 0)
+ goto unmap;
- txd_info.addr = page_pool_get_dma_addr(page) +
- sizeof(*xdpf) + xdpf->headroom;
- dma_sync_single_for_device(eth->dma_dev, txd_info.addr,
- txd_info.size,
- DMA_BIDIRECTIONAL);
- }
- mtk_tx_set_dma_desc(dev, txd, &txd_info);
+ if (txd_info.last)
+ break;
- tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
+ txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
+ txd_pdma = qdma_to_pdma(ring, txd);
+ if (txd == ring->last_free)
+ goto unmap;
- txd_pdma = qdma_to_pdma(ring, txd);
- setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr, txd_info.size,
- index++);
+ tx_buf = mtk_desc_to_tx_buf(ring, txd,
+ soc->txrx.txd_size);
+ memset(tx_buf, 0, sizeof(*tx_buf));
+ n_desc++;
+ }
+
+ memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
+ txd_info.size = skb_frag_size(&sinfo->frags[index]);
+ txd_info.last = index + 1 == nr_frags;
+ data = skb_frag_address(&sinfo->frags[index]);
+ index++;
+ }
/* store xdpf for cleanup */
- tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
- tx_buf->data = xdpf;
+ htx_buf->data = xdpf;
if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ txd_pdma = qdma_to_pdma(ring, txd);
if (index & 1)
txd_pdma->txd2 |= TX_DMA_LS0;
else
@@ -1608,7 +1653,24 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
MT7628_TX_CTX_IDX0);
}
-out:
+
+ spin_unlock(&eth->page_lock);
+
+ return 0;
+
+unmap:
+ while (htxd != txd) {
+ txd_pdma = qdma_to_pdma(ring, htxd);
+ tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
+ mtk_tx_unmap(eth, tx_buf, NULL, false);
+
+ htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
+ txd_pdma->txd2 = TX_DMA_DESP2_DEF;
+
+ htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
+ }
+
spin_unlock(&eth->page_lock);
return err;
@@ -1913,6 +1975,7 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
struct mtk_tx_ring *ring = &eth->tx_ring;
struct mtk_tx_buf *tx_buf;
+ struct xdp_frame_bulk bq;
struct mtk_tx_dma *desc;
u32 cpu, dma;
@@ -1920,6 +1983,7 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
desc = mtk_qdma_phys_to_virt(ring, cpu);
+ xdp_frame_bulk_init(&bq);
while ((cpu != dma) && budget) {
u32 next_cpu = desc->txd2;
@@ -1937,25 +2001,23 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
if (!tx_buf->data)
break;
- if (tx_buf->type == MTK_TYPE_SKB &&
- tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
- struct sk_buff *skb = tx_buf->data;
+ if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+ if (tx_buf->type == MTK_TYPE_SKB) {
+ struct sk_buff *skb = tx_buf->data;
- bytes[mac] += skb->len;
- done[mac]++;
- budget--;
- } else if (tx_buf->type == MTK_TYPE_XDP_TX ||
- tx_buf->type == MTK_TYPE_XDP_NDO) {
+ bytes[mac] += skb->len;
+ done[mac]++;
+ }
budget--;
}
-
- mtk_tx_unmap(eth, tx_buf, true);
+ mtk_tx_unmap(eth, tx_buf, &bq, true);
ring->last_free = desc;
atomic_inc(&ring->free_count);
cpu = next_cpu;
}
+ xdp_flush_frame_bulk(&bq);
ring->last_free_ptr = cpu;
mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
@@ -1968,29 +2030,29 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
{
struct mtk_tx_ring *ring = &eth->tx_ring;
struct mtk_tx_buf *tx_buf;
+ struct xdp_frame_bulk bq;
struct mtk_tx_dma *desc;
u32 cpu, dma;
cpu = ring->cpu_idx;
dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
+ xdp_frame_bulk_init(&bq);
while ((cpu != dma) && budget) {
tx_buf = &ring->buf[cpu];
if (!tx_buf->data)
break;
- if (tx_buf->type == MTK_TYPE_SKB &&
- tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
- struct sk_buff *skb = tx_buf->data;
- bytes[0] += skb->len;
- done[0]++;
- budget--;
- } else if (tx_buf->type == MTK_TYPE_XDP_TX ||
- tx_buf->type == MTK_TYPE_XDP_NDO) {
+ if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+ if (tx_buf->type == MTK_TYPE_SKB) {
+ struct sk_buff *skb = tx_buf->data;
+
+ bytes[0] += skb->len;
+ done[0]++;
+ }
budget--;
}
-
- mtk_tx_unmap(eth, tx_buf, true);
+ mtk_tx_unmap(eth, tx_buf, &bq, true);
desc = ring->dma + cpu * eth->soc->txrx.txd_size;
ring->last_free = desc;
@@ -1998,6 +2060,7 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
}
+ xdp_flush_frame_bulk(&bq);
ring->cpu_idx = cpu;
@@ -2207,7 +2270,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
if (ring->buf) {
for (i = 0; i < MTK_DMA_SIZE; i++)
- mtk_tx_unmap(eth, &ring->buf[i], false);
+ mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
kfree(ring->buf);
ring->buf = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 5b11557f1ae4..0eb7b83637d8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -204,9 +204,13 @@ out:
static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist)
{
+ struct mlx4_dev *dev = persist->dev;
+ struct devlink *devlink;
int err = 0;
mlx4_enter_error_state(persist);
+ devlink = priv_to_devlink(mlx4_priv(dev));
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP &&
!(persist->interface_state & MLX4_INTERFACE_STATE_DELETION)) {
@@ -215,6 +219,7 @@ static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist)
err);
}
mutex_unlock(&persist->interface_state_mutex);
+ devl_unlock(devlink);
}
static void dump_err_buf(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/crdump.c b/drivers/net/ethernet/mellanox/mlx4/crdump.c
index ac5468b77488..82a07a31cde7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/crdump.c
+++ b/drivers/net/ethernet/mellanox/mlx4/crdump.c
@@ -226,10 +226,10 @@ int mlx4_crdump_init(struct mlx4_dev *dev)
/* Create cr-space region */
crdump->region_crspace =
- devlink_region_create(devlink,
- &region_cr_space_ops,
- MAX_NUM_OF_DUMPS_TO_STORE,
- pci_resource_len(pdev, 0));
+ devl_region_create(devlink,
+ &region_cr_space_ops,
+ MAX_NUM_OF_DUMPS_TO_STORE,
+ pci_resource_len(pdev, 0));
if (IS_ERR(crdump->region_crspace))
mlx4_warn(dev, "crdump: create devlink region %s err %ld\n",
region_cr_space_str,
@@ -237,10 +237,10 @@ int mlx4_crdump_init(struct mlx4_dev *dev)
/* Create fw-health region */
crdump->region_fw_health =
- devlink_region_create(devlink,
- &region_fw_health_ops,
- MAX_NUM_OF_DUMPS_TO_STORE,
- HEALTH_BUFFER_SIZE);
+ devl_region_create(devlink,
+ &region_fw_health_ops,
+ MAX_NUM_OF_DUMPS_TO_STORE,
+ HEALTH_BUFFER_SIZE);
if (IS_ERR(crdump->region_fw_health))
mlx4_warn(dev, "crdump: create devlink region %s err %ld\n",
region_fw_health_str,
@@ -253,6 +253,6 @@ void mlx4_crdump_end(struct mlx4_dev *dev)
{
struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
- devlink_region_destroy(crdump->region_fw_health);
- devlink_region_destroy(crdump->region_crspace);
+ devl_region_destroy(crdump->region_fw_health);
+ devl_region_destroy(crdump->region_crspace);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index b187c210d4d6..78c5f40382c9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -3033,7 +3033,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
int err;
- err = devlink_port_register(devlink, &info->devlink_port, port);
+ err = devl_port_register(devlink, &info->devlink_port, port);
if (err)
return err;
@@ -3071,7 +3071,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
err = device_create_file(&dev->persist->pdev->dev, &info->port_attr);
if (err) {
mlx4_err(dev, "Failed to create file for port %d\n", port);
- devlink_port_unregister(&info->devlink_port);
+ devl_port_unregister(&info->devlink_port);
info->port = -1;
return err;
}
@@ -3093,7 +3093,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
device_remove_file(&info->dev->persist->pdev->dev,
&info->port_attr);
- devlink_port_unregister(&info->devlink_port);
+ devl_port_unregister(&info->devlink_port);
info->port = -1;
return err;
}
@@ -3109,7 +3109,7 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
device_remove_file(&info->dev->persist->pdev->dev,
&info->port_mtu_attr);
- devlink_port_unregister(&info->devlink_port);
+ devl_port_unregister(&info->devlink_port);
#ifdef CONFIG_RFS_ACCEL
free_irq_cpu_rmap(info->rmap);
@@ -3333,6 +3333,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
int total_vfs, int *nvfs, struct mlx4_priv *priv,
int reset_flow)
{
+ struct devlink *devlink = priv_to_devlink(priv);
struct mlx4_dev *dev;
unsigned sum = 0;
int err;
@@ -3341,6 +3342,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
struct mlx4_dev_cap *dev_cap = NULL;
int existing_vfs = 0;
+ devl_assert_locked(devlink);
dev = &priv->dev;
INIT_LIST_HEAD(&priv->ctx_list);
@@ -3999,6 +4001,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
devlink = devlink_alloc(&mlx4_devlink_ops, sizeof(*priv), &pdev->dev);
if (!devlink)
return -ENOMEM;
+ devl_lock(devlink);
priv = devlink_priv(devlink);
dev = &priv->dev;
@@ -4026,6 +4029,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
pci_save_state(pdev);
devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ devl_unlock(devlink);
devlink_register(devlink);
return 0;
@@ -4035,6 +4039,7 @@ err_params_unregister:
err_devlink_unregister:
kfree(dev->persist);
err_devlink_free:
+ devl_unlock(devlink);
devlink_free(devlink);
return ret;
}
@@ -4056,8 +4061,11 @@ static void mlx4_unload_one(struct pci_dev *pdev)
struct mlx4_dev *dev = persist->dev;
struct mlx4_priv *priv = mlx4_priv(dev);
int pci_dev_data;
+ struct devlink *devlink;
int p, i;
+ devlink = priv_to_devlink(priv);
+ devl_assert_locked(devlink);
if (priv->removed)
return;
@@ -4137,6 +4145,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
devlink_unregister(devlink);
+ devl_lock(devlink);
if (mlx4_is_slave(dev))
persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;
@@ -4172,6 +4181,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
devlink_params_unregister(devlink, mlx4_devlink_params,
ARRAY_SIZE(mlx4_devlink_params));
kfree(dev->persist);
+ devl_unlock(devlink);
devlink_free(devlink);
}
@@ -4292,15 +4302,20 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+ struct mlx4_dev *dev = persist->dev;
+ struct devlink *devlink;
mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n");
mlx4_enter_error_state(persist);
+ devlink = priv_to_devlink(mlx4_priv(dev));
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
mlx4_unload_one(pdev);
mutex_unlock(&persist->interface_state_mutex);
+ devl_unlock(devlink);
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
@@ -4333,6 +4348,7 @@ static void mlx4_pci_resume(struct pci_dev *pdev)
struct mlx4_dev *dev = persist->dev;
struct mlx4_priv *priv = mlx4_priv(dev);
int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
+ struct devlink *devlink;
int total_vfs;
int err;
@@ -4340,6 +4356,8 @@ static void mlx4_pci_resume(struct pci_dev *pdev)
total_vfs = dev->persist->num_vfs;
memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
+ devlink = priv_to_devlink(priv);
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
@@ -4358,19 +4376,23 @@ static void mlx4_pci_resume(struct pci_dev *pdev)
}
end:
mutex_unlock(&persist->interface_state_mutex);
-
+ devl_unlock(devlink);
}
static void mlx4_shutdown(struct pci_dev *pdev)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
+ struct devlink *devlink;
mlx4_info(persist->dev, "mlx4_shutdown was called\n");
+ devlink = priv_to_devlink(mlx4_priv(dev));
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
mlx4_unload_one(pdev);
mutex_unlock(&persist->interface_state_mutex);
+ devl_unlock(devlink);
mlx4_pci_disable_device(dev);
}
@@ -4385,12 +4407,16 @@ static int __maybe_unused mlx4_suspend(struct device *dev_d)
struct pci_dev *pdev = to_pci_dev(dev_d);
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
+ struct devlink *devlink;
mlx4_err(dev, "suspend was called\n");
+ devlink = priv_to_devlink(mlx4_priv(dev));
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
mlx4_unload_one(pdev);
mutex_unlock(&persist->interface_state_mutex);
+ devl_unlock(devlink);
return 0;
}
@@ -4402,6 +4428,7 @@ static int __maybe_unused mlx4_resume(struct device *dev_d)
struct mlx4_dev *dev = persist->dev;
struct mlx4_priv *priv = mlx4_priv(dev);
int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
+ struct devlink *devlink;
int total_vfs;
int ret = 0;
@@ -4409,6 +4436,8 @@ static int __maybe_unused mlx4_resume(struct device *dev_d)
total_vfs = dev->persist->num_vfs;
memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
+ devlink = priv_to_devlink(priv);
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs,
@@ -4422,6 +4451,7 @@ static int __maybe_unused mlx4_resume(struct device *dev_d)
}
}
mutex_unlock(&persist->interface_state_mutex);
+ devl_unlock(devlink);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index ccf2068d2e79..0571e40c6ee5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -335,13 +335,12 @@ static void del_adev(struct auxiliary_device *adev)
int mlx5_attach_device(struct mlx5_core_dev *dev)
{
- struct devlink *devlink = priv_to_devlink(dev);
struct mlx5_priv *priv = &dev->priv;
struct auxiliary_device *adev;
struct auxiliary_driver *adrv;
int ret = 0, i;
- devl_lock(devlink);
+ devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&mlx5_intf_mutex);
priv->flags &= ~MLX5_PRIV_FLAGS_DETACH;
priv->flags |= MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
@@ -394,20 +393,18 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
}
priv->flags &= ~MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
mutex_unlock(&mlx5_intf_mutex);
- devl_unlock(devlink);
return ret;
}
void mlx5_detach_device(struct mlx5_core_dev *dev)
{
- struct devlink *devlink = priv_to_devlink(dev);
struct mlx5_priv *priv = &dev->priv;
struct auxiliary_device *adev;
struct auxiliary_driver *adrv;
pm_message_t pm = {};
int i;
- devl_lock(devlink);
+ devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&mlx5_intf_mutex);
priv->flags |= MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) {
@@ -441,21 +438,17 @@ skip_suspend:
priv->flags &= ~MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
priv->flags |= MLX5_PRIV_FLAGS_DETACH;
mutex_unlock(&mlx5_intf_mutex);
- devl_unlock(devlink);
}
int mlx5_register_device(struct mlx5_core_dev *dev)
{
- struct devlink *devlink;
int ret;
- devlink = priv_to_devlink(dev);
- devl_lock(devlink);
+ devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&mlx5_intf_mutex);
dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
ret = mlx5_rescan_drivers_locked(dev);
mutex_unlock(&mlx5_intf_mutex);
- devl_unlock(devlink);
if (ret)
mlx5_unregister_device(dev);
@@ -464,15 +457,11 @@ int mlx5_register_device(struct mlx5_core_dev *dev)
void mlx5_unregister_device(struct mlx5_core_dev *dev)
{
- struct devlink *devlink;
-
- devlink = priv_to_devlink(dev);
- devl_lock(devlink);
+ devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&mlx5_intf_mutex);
dev->priv.flags = MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
mlx5_rescan_drivers_locked(dev);
mutex_unlock(&mlx5_intf_mutex);
- devl_unlock(devlink);
}
static int add_drivers(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index f85166e587f2..66c6a7017695 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -104,7 +104,16 @@ static int mlx5_devlink_reload_fw_activate(struct devlink *devlink, struct netli
if (err)
return err;
- return mlx5_fw_reset_wait_reset_done(dev);
+ err = mlx5_fw_reset_wait_reset_done(dev);
+ if (err)
+ return err;
+
+ mlx5_unload_one_devl_locked(dev);
+ err = mlx5_health_wait_pci_up(dev);
+ if (err)
+ NL_SET_ERR_MSG_MOD(extack, "FW activate aborted, PCI reads fail after reset");
+
+ return err;
}
static int mlx5_devlink_trigger_fw_live_patch(struct devlink *devlink,
@@ -134,6 +143,7 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct pci_dev *pdev = dev->pdev;
bool sf_dev_allocated;
+ int ret = 0;
sf_dev_allocated = mlx5_sf_dev_allocated(dev);
if (sf_dev_allocated) {
@@ -156,17 +166,21 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
- mlx5_unload_one(dev);
- return 0;
+ mlx5_unload_one_devl_locked(dev);
+ break;
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
- return mlx5_devlink_trigger_fw_live_patch(devlink, extack);
- return mlx5_devlink_reload_fw_activate(devlink, extack);
+ ret = mlx5_devlink_trigger_fw_live_patch(devlink, extack);
+ else
+ ret = mlx5_devlink_reload_fw_activate(devlink, extack);
+ break;
default:
/* Unsupported action should not get to this function */
WARN_ON(1);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
}
+
+ return ret;
}
static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_action action,
@@ -174,24 +188,27 @@ static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_a
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
+ int ret = 0;
*actions_performed = BIT(action);
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
- return mlx5_load_one(dev, false);
+ ret = mlx5_load_one_devl_locked(dev, false);
+ break;
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
break;
/* On fw_activate action, also driver is reloaded and reinit performed */
*actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
- return mlx5_load_one(dev, false);
+ ret = mlx5_load_one_devl_locked(dev, false);
+ break;
default:
/* Unsupported action should not get to this function */
WARN_ON(1);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
}
- return 0;
+ return ret;
}
static struct mlx5_devlink_trap *mlx5_find_trap_by_id(struct mlx5_core_dev *dev, int trap_id)
@@ -828,28 +845,28 @@ static int mlx5_devlink_traps_register(struct devlink *devlink)
struct mlx5_core_dev *core_dev = devlink_priv(devlink);
int err;
- err = devlink_trap_groups_register(devlink, mlx5_trap_groups_arr,
- ARRAY_SIZE(mlx5_trap_groups_arr));
+ err = devl_trap_groups_register(devlink, mlx5_trap_groups_arr,
+ ARRAY_SIZE(mlx5_trap_groups_arr));
if (err)
return err;
- err = devlink_traps_register(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr),
- &core_dev->priv);
+ err = devl_traps_register(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr),
+ &core_dev->priv);
if (err)
goto err_trap_group;
return 0;
err_trap_group:
- devlink_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
- ARRAY_SIZE(mlx5_trap_groups_arr));
+ devl_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
+ ARRAY_SIZE(mlx5_trap_groups_arr));
return err;
}
static void mlx5_devlink_traps_unregister(struct devlink *devlink)
{
- devlink_traps_unregister(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr));
- devlink_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
- ARRAY_SIZE(mlx5_trap_groups_arr));
+ devl_traps_unregister(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr));
+ devl_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
+ ARRAY_SIZE(mlx5_trap_groups_arr));
}
int mlx5_devlink_register(struct devlink *devlink)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index b07228f69b91..bea4a1329474 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -921,7 +921,7 @@ struct mlx5e_priv {
struct mlx5e_rx_res *rx_res;
u32 *tx_rates;
- struct mlx5e_flow_steering fs;
+ struct mlx5e_flow_steering *fs;
struct workqueue_struct *wq;
struct work_struct update_carrier_work;
@@ -987,6 +987,8 @@ enum mlx5e_profile_feature {
MLX5E_PROFILE_FEATURE_PTP_RX,
MLX5E_PROFILE_FEATURE_PTP_TX,
MLX5E_PROFILE_FEATURE_QOS_HTB,
+ MLX5E_PROFILE_FEATURE_FS_VLAN,
+ MLX5E_PROFILE_FEATURE_FS_TC,
};
struct mlx5e_profile {
@@ -1022,7 +1024,6 @@ void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
-void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
int mlx5e_self_test_num(struct mlx5e_priv *priv);
int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data);
void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 6e3a90a959e9..9b8cdf2e68ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -15,29 +15,6 @@ enum {
MLX5E_TC_MISS_LEVEL,
};
-struct mlx5e_tc_table {
- /* Protects the dynamic assignment of the t parameter
- * which is the nic tc root table.
- */
- struct mutex t_lock;
- struct mlx5_flow_table *t;
- struct mlx5_flow_table *miss_t;
- struct mlx5_fs_chains *chains;
- struct mlx5e_post_act *post_act;
-
- struct rhashtable ht;
-
- struct mod_hdr_tbl mod_hdr;
- struct mutex hairpin_tbl_lock; /* protects hairpin_tbl */
- DECLARE_HASHTABLE(hairpin_tbl, 8);
-
- struct notifier_block netdevice_nb;
- struct netdev_net_notifier netdevice_nn;
-
- struct mlx5_tc_ct_priv *ct;
- struct mapping_ctx *mapping;
-};
-
struct mlx5e_flow_table {
int num_groups;
struct mlx5_flow_table *t;
@@ -160,16 +137,20 @@ static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) { return -EOPNOTSU
struct mlx5e_accel_fs_tcp;
#endif
+struct mlx5e_profile;
struct mlx5e_fs_udp;
struct mlx5e_fs_any;
struct mlx5e_ptp_fs;
struct mlx5e_flow_steering {
+ bool state_destroy;
+ bool vlan_strip_disable;
+ struct mlx5_core_dev *mdev;
struct mlx5_flow_namespace *ns;
#ifdef CONFIG_MLX5_EN_RXNFC
struct mlx5e_ethtool_steering ethtool;
#endif
- struct mlx5e_tc_table tc;
+ struct mlx5e_tc_table *tc;
struct mlx5e_promisc_table promisc;
struct mlx5e_vlan_table *vlan;
struct mlx5e_l2_table l2;
@@ -200,13 +181,22 @@ void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
-int mlx5e_fs_init(struct mlx5e_priv *priv);
-void mlx5e_fs_cleanup(struct mlx5e_priv *priv);
+struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
+ struct mlx5_core_dev *mdev,
+ bool state_destroy);
+void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs);
int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv);
int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
void mlx5e_remove_mac_trap(struct mlx5e_priv *priv);
-
+void mlx5e_fs_set_rx_mode_work(struct mlx5e_flow_steering *fs, struct net_device *netdev);
+int mlx5e_fs_vlan_rx_add_vid(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev,
+ __be16 proto, u16 vid);
+int mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev,
+ __be16 proto, u16 vid);
+void mlx5e_fs_init_l2_addr(struct mlx5e_flow_steering *fs, struct net_device *netdev);
#endif /* __MLX5E_FLOW_STEER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
index 7aa25a5e29d7..e153d6119e02 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
@@ -94,7 +94,7 @@ mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv,
if (!spec)
return ERR_PTR(-ENOMEM);
- fs_udp = priv->fs.udp;
+ fs_udp = priv->fs->udp;
ft = fs_udp->tables[type].t;
fs_udp_set_dport_flow(spec, type, d_port);
@@ -121,10 +121,10 @@ static int fs_udp_add_default_rule(struct mlx5e_priv *priv, enum fs_udp_type typ
struct mlx5e_fs_udp *fs_udp;
int err;
- fs_udp = priv->fs.udp;
+ fs_udp = priv->fs->udp;
fs_udp_t = &fs_udp->tables[type];
- dest = mlx5_ttc_get_default_dest(priv->fs.ttc, fs_udp2tt(type));
+ dest = mlx5_ttc_get_default_dest(priv->fs->ttc, fs_udp2tt(type));
rule = mlx5_add_flow_rules(fs_udp_t->t, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -208,7 +208,7 @@ out:
static int fs_udp_create_table(struct mlx5e_priv *priv, enum fs_udp_type type)
{
- struct mlx5e_flow_table *ft = &priv->fs.udp->tables[type];
+ struct mlx5e_flow_table *ft = &priv->fs->udp->tables[type];
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -218,7 +218,7 @@ static int fs_udp_create_table(struct mlx5e_priv *priv, enum fs_udp_type type)
ft_attr.level = MLX5E_FS_TT_UDP_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -259,7 +259,7 @@ static int fs_udp_disable(struct mlx5e_priv *priv)
for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
/* Modify ttc rules destination to point back to the indir TIRs */
- err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_udp2tt(i));
+ err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_udp2tt(i));
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] default destination failed, err(%d)\n",
@@ -278,10 +278,10 @@ static int fs_udp_enable(struct mlx5e_priv *priv)
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
- dest.ft = priv->fs.udp->tables[i].t;
+ dest.ft = priv->fs->udp->tables[i].t;
/* Modify ttc rules destination to point on the accel_fs FTs */
- err = mlx5_ttc_fwd_dest(priv->fs.ttc, fs_udp2tt(i), &dest);
+ err = mlx5_ttc_fwd_dest(priv->fs->ttc, fs_udp2tt(i), &dest);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] destination to accel failed, err(%d)\n",
@@ -294,7 +294,7 @@ static int fs_udp_enable(struct mlx5e_priv *priv)
void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv)
{
- struct mlx5e_fs_udp *fs_udp = priv->fs.udp;
+ struct mlx5e_fs_udp *fs_udp = priv->fs->udp;
int i;
if (!fs_udp)
@@ -309,20 +309,20 @@ void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv)
fs_udp_destroy_table(fs_udp, i);
kfree(fs_udp);
- priv->fs.udp = NULL;
+ priv->fs->udp = NULL;
}
int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_priv *priv)
{
int i, err;
- if (priv->fs.udp) {
- priv->fs.udp->ref_cnt++;
+ if (priv->fs->udp) {
+ priv->fs->udp->ref_cnt++;
return 0;
}
- priv->fs.udp = kzalloc(sizeof(*priv->fs.udp), GFP_KERNEL);
- if (!priv->fs.udp)
+ priv->fs->udp = kzalloc(sizeof(*priv->fs->udp), GFP_KERNEL);
+ if (!priv->fs->udp)
return -ENOMEM;
for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
@@ -335,16 +335,16 @@ int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_priv *priv)
if (err)
goto err_destroy_tables;
- priv->fs.udp->ref_cnt = 1;
+ priv->fs->udp->ref_cnt = 1;
return 0;
err_destroy_tables:
while (--i >= 0)
- fs_udp_destroy_table(priv->fs.udp, i);
+ fs_udp_destroy_table(priv->fs->udp, i);
- kfree(priv->fs.udp);
- priv->fs.udp = NULL;
+ kfree(priv->fs->udp);
+ priv->fs->udp = NULL;
return err;
}
@@ -371,7 +371,7 @@ mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_priv *priv,
if (!spec)
return ERR_PTR(-ENOMEM);
- fs_any = priv->fs.any;
+ fs_any = priv->fs->any;
ft = fs_any->table.t;
fs_any_set_ethertype_flow(spec, ether_type);
@@ -398,10 +398,10 @@ static int fs_any_add_default_rule(struct mlx5e_priv *priv)
struct mlx5e_fs_any *fs_any;
int err;
- fs_any = priv->fs.any;
+ fs_any = priv->fs->any;
fs_any_t = &fs_any->table;
- dest = mlx5_ttc_get_default_dest(priv->fs.ttc, MLX5_TT_ANY);
+ dest = mlx5_ttc_get_default_dest(priv->fs->ttc, MLX5_TT_ANY);
rule = mlx5_add_flow_rules(fs_any_t->t, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -474,7 +474,7 @@ err:
static int fs_any_create_table(struct mlx5e_priv *priv)
{
- struct mlx5e_flow_table *ft = &priv->fs.any->table;
+ struct mlx5e_flow_table *ft = &priv->fs->any->table;
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -484,7 +484,7 @@ static int fs_any_create_table(struct mlx5e_priv *priv)
ft_attr.level = MLX5E_FS_TT_ANY_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -514,7 +514,7 @@ static int fs_any_disable(struct mlx5e_priv *priv)
int err;
/* Modify ttc rules destination to point back to the indir TIRs */
- err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, MLX5_TT_ANY);
+ err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, MLX5_TT_ANY);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] default destination failed, err(%d)\n",
@@ -530,10 +530,10 @@ static int fs_any_enable(struct mlx5e_priv *priv)
int err;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = priv->fs.any->table.t;
+ dest.ft = priv->fs->any->table.t;
/* Modify ttc rules destination to point on the accel_fs FTs */
- err = mlx5_ttc_fwd_dest(priv->fs.ttc, MLX5_TT_ANY, &dest);
+ err = mlx5_ttc_fwd_dest(priv->fs->ttc, MLX5_TT_ANY, &dest);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] destination to accel failed, err(%d)\n",
@@ -555,7 +555,7 @@ static void fs_any_destroy_table(struct mlx5e_fs_any *fs_any)
void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_priv *priv)
{
- struct mlx5e_fs_any *fs_any = priv->fs.any;
+ struct mlx5e_fs_any *fs_any = priv->fs->any;
if (!fs_any)
return;
@@ -568,20 +568,20 @@ void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_priv *priv)
fs_any_destroy_table(fs_any);
kfree(fs_any);
- priv->fs.any = NULL;
+ priv->fs->any = NULL;
}
int mlx5e_fs_tt_redirect_any_create(struct mlx5e_priv *priv)
{
int err;
- if (priv->fs.any) {
- priv->fs.any->ref_cnt++;
+ if (priv->fs->any) {
+ priv->fs->any->ref_cnt++;
return 0;
}
- priv->fs.any = kzalloc(sizeof(*priv->fs.any), GFP_KERNEL);
- if (!priv->fs.any)
+ priv->fs->any = kzalloc(sizeof(*priv->fs->any), GFP_KERNEL);
+ if (!priv->fs->any)
return -ENOMEM;
err = fs_any_create_table(priv);
@@ -592,14 +592,14 @@ int mlx5e_fs_tt_redirect_any_create(struct mlx5e_priv *priv)
if (err)
goto err_destroy_table;
- priv->fs.any->ref_cnt = 1;
+ priv->fs->any->ref_cnt = 1;
return 0;
err_destroy_table:
- fs_any_destroy_table(priv->fs.any);
+ fs_any_destroy_table(priv->fs->any);
- kfree(priv->fs.any);
- priv->fs.any = NULL;
+ kfree(priv->fs->any);
+ priv->fs->any = NULL;
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 78ad96cf4222..903de88bab53 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -624,7 +624,7 @@ static int mlx5e_ptp_set_state(struct mlx5e_ptp *c, struct mlx5e_params *params)
static void mlx5e_ptp_rx_unset_fs(struct mlx5e_priv *priv)
{
- struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
+ struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs;
if (!ptp_fs->valid)
return;
@@ -641,7 +641,7 @@ static void mlx5e_ptp_rx_unset_fs(struct mlx5e_priv *priv)
static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
{
u32 tirn = mlx5e_rx_res_get_tirn_ptp(priv->rx_res);
- struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
+ struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs;
struct mlx5_flow_handle *rule;
int err;
@@ -808,13 +808,13 @@ int mlx5e_ptp_alloc_rx_fs(struct mlx5e_priv *priv)
if (!ptp_fs)
return -ENOMEM;
- priv->fs.ptp_fs = ptp_fs;
+ priv->fs->ptp_fs = ptp_fs;
return 0;
}
void mlx5e_ptp_free_rx_fs(struct mlx5e_priv *priv)
{
- struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
+ struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs;
if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index 86fa0bdbee36..fac7e3ff2674 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -21,6 +21,7 @@
#include "en/tc/sample.h"
#include "en_accel/ipsec_rxtx.h"
#include "en/tc/int_port.h"
+#include "en/tc/act/act.h"
struct mlx5e_rep_indr_block_priv {
struct net_device *netdev;
@@ -511,6 +512,120 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
return 0;
}
+static int
+mlx5e_rep_indr_replace_act(struct mlx5e_rep_priv *rpriv,
+ struct flow_offload_action *fl_act)
+
+{
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ enum mlx5_flow_namespace_type ns_type;
+ struct flow_action_entry *action;
+ struct mlx5e_tc_act *act;
+ bool add = false;
+ int i;
+
+ /* There is no use case currently for more than one action (e.g. pedit).
+ * when there will be, need to handle cleaning multiple actions on err.
+ */
+ if (!flow_offload_has_one_action(&fl_act->action))
+ return -EOPNOTSUPP;
+
+ if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
+ ns_type = MLX5_FLOW_NAMESPACE_FDB;
+ else
+ ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
+
+ flow_action_for_each(i, action, &fl_act->action) {
+ act = mlx5e_tc_act_get(action->id, ns_type);
+ if (!act)
+ continue;
+
+ if (!act->offload_action)
+ continue;
+
+ if (!act->offload_action(priv, fl_act, action))
+ add = true;
+ }
+
+ return add ? 0 : -EOPNOTSUPP;
+}
+
+static int
+mlx5e_rep_indr_destroy_act(struct mlx5e_rep_priv *rpriv,
+ struct flow_offload_action *fl_act)
+{
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5e_tc_act *act;
+
+ if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
+ ns_type = MLX5_FLOW_NAMESPACE_FDB;
+ else
+ ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
+
+ act = mlx5e_tc_act_get(fl_act->id, ns_type);
+ if (!act || !act->destroy_action)
+ return -EOPNOTSUPP;
+
+ return act->destroy_action(priv, fl_act);
+}
+
+static int
+mlx5e_rep_indr_stats_act(struct mlx5e_rep_priv *rpriv,
+ struct flow_offload_action *fl_act)
+
+{
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5e_tc_act *act;
+
+ if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
+ ns_type = MLX5_FLOW_NAMESPACE_FDB;
+ else
+ ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
+
+ act = mlx5e_tc_act_get(fl_act->id, ns_type);
+ if (!act || !act->stats_action)
+ return -EOPNOTSUPP;
+
+ return act->stats_action(priv, fl_act);
+}
+
+static int
+mlx5e_rep_indr_setup_act(struct mlx5e_rep_priv *rpriv,
+ struct flow_offload_action *fl_act)
+{
+ switch (fl_act->command) {
+ case FLOW_ACT_REPLACE:
+ return mlx5e_rep_indr_replace_act(rpriv, fl_act);
+ case FLOW_ACT_DESTROY:
+ return mlx5e_rep_indr_destroy_act(rpriv, fl_act);
+ case FLOW_ACT_STATS:
+ return mlx5e_rep_indr_stats_act(rpriv, fl_act);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+mlx5e_rep_indr_no_dev_setup(struct mlx5e_rep_priv *rpriv,
+ enum tc_setup_type type,
+ void *data)
+{
+ if (!data)
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_ACT:
+ return mlx5e_rep_indr_setup_act(rpriv, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static
int mlx5e_rep_indr_setup_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
enum tc_setup_type type, void *type_data,
@@ -518,7 +633,7 @@ int mlx5e_rep_indr_setup_cb(struct net_device *netdev, struct Qdisc *sch, void *
void (*cleanup)(struct flow_block_cb *block_cb))
{
if (!netdev)
- return -EOPNOTSUPP;
+ return mlx5e_rep_indr_no_dev_setup(cb_priv, type, data);
switch (type) {
case TC_SETUP_BLOCK:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
index 095ff8ef80e2..e1570ff056ae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
@@ -50,6 +50,16 @@ struct mlx5e_tc_act {
bool (*is_multi_table_act)(struct mlx5e_priv *priv,
const struct flow_action_entry *act,
struct mlx5_flow_attr *attr);
+
+ int (*offload_action)(struct mlx5e_priv *priv,
+ struct flow_offload_action *fl_act,
+ struct flow_action_entry *act);
+
+ int (*destroy_action)(struct mlx5e_priv *priv,
+ struct flow_offload_action *fl_act);
+
+ int (*stats_action)(struct mlx5e_priv *priv,
+ struct flow_offload_action *fl_act);
};
struct mlx5e_tc_flow_action {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
index 4726bcb46eec..69949ab830b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
@@ -21,7 +21,7 @@ validate_goto_chain(struct mlx5e_priv *priv,
u32 max_chain;
esw = priv->mdev->priv.eswitch;
- chains = is_esw ? esw_chains(esw) : mlx5e_nic_chains(priv);
+ chains = is_esw ? esw_chains(esw) : mlx5e_nic_chains(priv->fs->tc);
max_chain = mlx5_chains_get_chain_range(chains);
reformat_and_fwd = is_esw ?
MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_and_fwd_to_table) :
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
index 4bd9c04a49e3..37522352e4b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
@@ -24,14 +24,9 @@ tc_act_can_offload_police(struct mlx5e_tc_act_parse_state *parse_state,
}
static int
-tc_act_parse_police(struct mlx5e_tc_act_parse_state *parse_state,
- const struct flow_action_entry *act,
- struct mlx5e_priv *priv,
- struct mlx5_flow_attr *attr)
+fill_meter_params_from_act(const struct flow_action_entry *act,
+ struct mlx5e_flow_meter_params *params)
{
- struct mlx5e_flow_meter_params *params;
-
- params = &attr->meter_attr.params;
params->index = act->hw_index;
if (act->police.rate_bytes_ps) {
params->mode = MLX5_RATE_LIMIT_BPS;
@@ -46,6 +41,21 @@ tc_act_parse_police(struct mlx5e_tc_act_parse_state *parse_state,
return -EOPNOTSUPP;
}
+ return 0;
+}
+
+static int
+tc_act_parse_police(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ int err;
+
+ err = fill_meter_params_from_act(act, &attr->meter_attr.params);
+ if (err)
+ return err;
+
attr->action |= MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO;
attr->exe_aso_type = MLX5_EXE_ASO_FLOW_METER;
@@ -60,8 +70,84 @@ tc_act_is_multi_table_act_police(struct mlx5e_priv *priv,
return true;
}
+static int
+tc_act_police_offload(struct mlx5e_priv *priv,
+ struct flow_offload_action *fl_act,
+ struct flow_action_entry *act)
+{
+ struct mlx5e_flow_meter_params params = {};
+ struct mlx5e_flow_meter_handle *meter;
+ int err = 0;
+
+ err = fill_meter_params_from_act(act, &params);
+ if (err)
+ return err;
+
+ meter = mlx5e_tc_meter_get(priv->mdev, &params);
+ if (IS_ERR(meter) && PTR_ERR(meter) == -ENOENT) {
+ meter = mlx5e_tc_meter_replace(priv->mdev, &params);
+ } else if (!IS_ERR(meter)) {
+ err = mlx5e_tc_meter_update(meter, &params);
+ mlx5e_tc_meter_put(meter);
+ }
+
+ if (IS_ERR(meter)) {
+ NL_SET_ERR_MSG_MOD(fl_act->extack, "Failed to get flow meter");
+ mlx5_core_err(priv->mdev, "Failed to get flow meter %d\n", params.index);
+ err = PTR_ERR(meter);
+ }
+
+ return err;
+}
+
+static int
+tc_act_police_destroy(struct mlx5e_priv *priv,
+ struct flow_offload_action *fl_act)
+{
+ struct mlx5e_flow_meter_params params = {};
+ struct mlx5e_flow_meter_handle *meter;
+
+ params.index = fl_act->index;
+ meter = mlx5e_tc_meter_get(priv->mdev, &params);
+ if (IS_ERR(meter)) {
+ NL_SET_ERR_MSG_MOD(fl_act->extack, "Failed to get flow meter");
+ mlx5_core_err(priv->mdev, "Failed to get flow meter %d\n", params.index);
+ return PTR_ERR(meter);
+ }
+ /* first put for the get and second for cleanup */
+ mlx5e_tc_meter_put(meter);
+ mlx5e_tc_meter_put(meter);
+ return 0;
+}
+
+static int
+tc_act_police_stats(struct mlx5e_priv *priv,
+ struct flow_offload_action *fl_act)
+{
+ struct mlx5e_flow_meter_params params = {};
+ struct mlx5e_flow_meter_handle *meter;
+ u64 bytes, packets, drops, lastuse;
+
+ params.index = fl_act->index;
+ meter = mlx5e_tc_meter_get(priv->mdev, &params);
+ if (IS_ERR(meter)) {
+ NL_SET_ERR_MSG_MOD(fl_act->extack, "Failed to get flow meter");
+ mlx5_core_err(priv->mdev, "Failed to get flow meter %d\n", params.index);
+ return PTR_ERR(meter);
+ }
+
+ mlx5e_tc_meter_get_stats(meter, &bytes, &packets, &drops, &lastuse);
+ flow_stats_update(&fl_act->stats, bytes, packets, drops, lastuse,
+ FLOW_ACTION_HW_STATS_DELAYED);
+ mlx5e_tc_meter_put(meter);
+ return 0;
+}
+
struct mlx5e_tc_act mlx5e_tc_act_police = {
.can_offload = tc_act_can_offload_police,
.parse_action = tc_act_parse_police,
.is_multi_table_act = tc_act_is_multi_table_act_police,
+ .offload_action = tc_act_police_offload,
+ .destroy_action = tc_act_police_destroy,
+ .stats_action = tc_act_police_stats,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
index ca33f673396f..a53e205f4a89 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
@@ -6,7 +6,6 @@
#include "en/tc/post_act.h"
#include "meter.h"
#include "en/tc_priv.h"
-#include "post_meter.h"
#define MLX5_START_COLOR_SHIFT 28
#define MLX5_METER_MODE_SHIFT 24
@@ -47,8 +46,6 @@ struct mlx5e_flow_meters {
struct mlx5_core_dev *mdev;
struct mlx5e_post_act *post_act;
-
- struct mlx5e_post_meter_priv *post_meter;
};
static void
@@ -243,6 +240,7 @@ __mlx5e_flow_meter_alloc(struct mlx5e_flow_meters *flow_meters)
struct mlx5_core_dev *mdev = flow_meters->mdev;
struct mlx5e_flow_meter_aso_obj *meters_obj;
struct mlx5e_flow_meter_handle *meter;
+ struct mlx5_fc *counter;
int err, pos, total;
u32 id;
@@ -250,6 +248,20 @@ __mlx5e_flow_meter_alloc(struct mlx5e_flow_meters *flow_meters)
if (!meter)
return ERR_PTR(-ENOMEM);
+ counter = mlx5_fc_create(mdev, true);
+ if (IS_ERR(counter)) {
+ err = PTR_ERR(counter);
+ goto err_red_counter;
+ }
+ meter->red_counter = counter;
+
+ counter = mlx5_fc_create(mdev, true);
+ if (IS_ERR(counter)) {
+ err = PTR_ERR(counter);
+ goto err_green_counter;
+ }
+ meter->green_counter = counter;
+
meters_obj = list_first_entry_or_null(&flow_meters->partial_list,
struct mlx5e_flow_meter_aso_obj,
entry);
@@ -295,6 +307,10 @@ __mlx5e_flow_meter_alloc(struct mlx5e_flow_meters *flow_meters)
err_mem:
mlx5e_flow_meter_destroy_aso_obj(mdev, id);
err_create:
+ mlx5_fc_destroy(mdev, meter->green_counter);
+err_green_counter:
+ mlx5_fc_destroy(mdev, meter->red_counter);
+err_red_counter:
kfree(meter);
return ERR_PTR(err);
}
@@ -307,6 +323,9 @@ __mlx5e_flow_meter_free(struct mlx5e_flow_meter_handle *meter)
struct mlx5e_flow_meter_aso_obj *meters_obj;
int n, pos;
+ mlx5_fc_destroy(mdev, meter->green_counter);
+ mlx5_fc_destroy(mdev, meter->red_counter);
+
meters_obj = meter->meters_obj;
pos = (meter->obj_id - meters_obj->base_id) * 2 + meter->idx;
bitmap_clear(meters_obj->meters_map, pos, 1);
@@ -325,75 +344,155 @@ __mlx5e_flow_meter_free(struct mlx5e_flow_meter_handle *meter)
kfree(meter);
}
+static struct mlx5e_flow_meter_handle *
+__mlx5e_tc_meter_get(struct mlx5e_flow_meters *flow_meters, u32 index)
+{
+ struct mlx5e_flow_meter_handle *meter;
+
+ hash_for_each_possible(flow_meters->hashtbl, meter, hlist, index)
+ if (meter->params.index == index)
+ goto add_ref;
+
+ return ERR_PTR(-ENOENT);
+
+add_ref:
+ meter->refcnt++;
+
+ return meter;
+}
+
struct mlx5e_flow_meter_handle *
mlx5e_tc_meter_get(struct mlx5_core_dev *mdev, struct mlx5e_flow_meter_params *params)
{
struct mlx5e_flow_meters *flow_meters;
struct mlx5e_flow_meter_handle *meter;
- int err;
flow_meters = mlx5e_get_flow_meters(mdev);
if (!flow_meters)
return ERR_PTR(-EOPNOTSUPP);
mutex_lock(&flow_meters->sync_lock);
- hash_for_each_possible(flow_meters->hashtbl, meter, hlist, params->index)
- if (meter->params.index == params->index)
- goto add_ref;
+ meter = __mlx5e_tc_meter_get(flow_meters, params->index);
+ mutex_unlock(&flow_meters->sync_lock);
- meter = __mlx5e_flow_meter_alloc(flow_meters);
- if (IS_ERR(meter)) {
- err = PTR_ERR(meter);
- goto err_alloc;
+ return meter;
+}
+
+static void
+__mlx5e_tc_meter_put(struct mlx5e_flow_meter_handle *meter)
+{
+ if (--meter->refcnt == 0) {
+ hash_del(&meter->hlist);
+ __mlx5e_flow_meter_free(meter);
}
+}
+
+void
+mlx5e_tc_meter_put(struct mlx5e_flow_meter_handle *meter)
+{
+ struct mlx5e_flow_meters *flow_meters = meter->flow_meters;
+
+ mutex_lock(&flow_meters->sync_lock);
+ __mlx5e_tc_meter_put(meter);
+ mutex_unlock(&flow_meters->sync_lock);
+}
+
+static struct mlx5e_flow_meter_handle *
+mlx5e_tc_meter_alloc(struct mlx5e_flow_meters *flow_meters,
+ struct mlx5e_flow_meter_params *params)
+{
+ struct mlx5e_flow_meter_handle *meter;
+
+ meter = __mlx5e_flow_meter_alloc(flow_meters);
+ if (IS_ERR(meter))
+ return meter;
hash_add(flow_meters->hashtbl, &meter->hlist, params->index);
meter->params.index = params->index;
-
-add_ref:
meter->refcnt++;
+ return meter;
+}
+
+static int
+__mlx5e_tc_meter_update(struct mlx5e_flow_meter_handle *meter,
+ struct mlx5e_flow_meter_params *params)
+{
+ struct mlx5_core_dev *mdev = meter->flow_meters->mdev;
+ int err = 0;
+
if (meter->params.mode != params->mode || meter->params.rate != params->rate ||
meter->params.burst != params->burst) {
err = mlx5e_tc_meter_modify(mdev, meter, params);
if (err)
- goto err_update;
+ goto out;
meter->params.mode = params->mode;
meter->params.rate = params->rate;
meter->params.burst = params->burst;
}
- mutex_unlock(&flow_meters->sync_lock);
- return meter;
+out:
+ return err;
+}
-err_update:
- if (--meter->refcnt == 0) {
- hash_del(&meter->hlist);
- __mlx5e_flow_meter_free(meter);
- }
-err_alloc:
+int
+mlx5e_tc_meter_update(struct mlx5e_flow_meter_handle *meter,
+ struct mlx5e_flow_meter_params *params)
+{
+ struct mlx5_core_dev *mdev = meter->flow_meters->mdev;
+ struct mlx5e_flow_meters *flow_meters;
+ int err;
+
+ flow_meters = mlx5e_get_flow_meters(mdev);
+ if (!flow_meters)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&flow_meters->sync_lock);
+ err = __mlx5e_tc_meter_update(meter, params);
mutex_unlock(&flow_meters->sync_lock);
- return ERR_PTR(err);
+ return err;
}
-void
-mlx5e_tc_meter_put(struct mlx5e_flow_meter_handle *meter)
+struct mlx5e_flow_meter_handle *
+mlx5e_tc_meter_replace(struct mlx5_core_dev *mdev, struct mlx5e_flow_meter_params *params)
{
- struct mlx5e_flow_meters *flow_meters = meter->flow_meters;
+ struct mlx5e_flow_meters *flow_meters;
+ struct mlx5e_flow_meter_handle *meter;
+ int err;
+
+ flow_meters = mlx5e_get_flow_meters(mdev);
+ if (!flow_meters)
+ return ERR_PTR(-EOPNOTSUPP);
mutex_lock(&flow_meters->sync_lock);
- if (--meter->refcnt == 0) {
- hash_del(&meter->hlist);
- __mlx5e_flow_meter_free(meter);
+ meter = __mlx5e_tc_meter_get(flow_meters, params->index);
+ if (IS_ERR(meter)) {
+ meter = mlx5e_tc_meter_alloc(flow_meters, params);
+ if (IS_ERR(meter)) {
+ err = PTR_ERR(meter);
+ goto err_get;
+ }
}
+
+ err = __mlx5e_tc_meter_update(meter, params);
+ if (err)
+ goto err_update;
+
mutex_unlock(&flow_meters->sync_lock);
+ return meter;
+
+err_update:
+ __mlx5e_tc_meter_put(meter);
+err_get:
+ mutex_unlock(&flow_meters->sync_lock);
+ return ERR_PTR(err);
}
-struct mlx5_flow_table *
-mlx5e_tc_meter_get_post_meter_ft(struct mlx5e_flow_meters *flow_meters)
+enum mlx5_flow_namespace_type
+mlx5e_tc_meter_get_namespace(struct mlx5e_flow_meters *flow_meters)
{
- return mlx5e_post_meter_get_ft(flow_meters->post_meter);
+ return flow_meters->ns_type;
}
struct mlx5e_flow_meters *
@@ -432,12 +531,6 @@ mlx5e_flow_meters_init(struct mlx5e_priv *priv,
goto err_sq;
}
- flow_meters->post_meter = mlx5e_post_meter_init(priv, ns_type, post_act);
- if (IS_ERR(flow_meters->post_meter)) {
- err = PTR_ERR(flow_meters->post_meter);
- goto err_post_meter;
- }
-
mutex_init(&flow_meters->sync_lock);
INIT_LIST_HEAD(&flow_meters->partial_list);
INIT_LIST_HEAD(&flow_meters->full_list);
@@ -451,8 +544,6 @@ mlx5e_flow_meters_init(struct mlx5e_priv *priv,
return flow_meters;
-err_post_meter:
- mlx5_aso_destroy(flow_meters->aso);
err_sq:
mlx5_core_dealloc_pd(mdev, flow_meters->pdn);
err_out:
@@ -466,9 +557,23 @@ mlx5e_flow_meters_cleanup(struct mlx5e_flow_meters *flow_meters)
if (IS_ERR_OR_NULL(flow_meters))
return;
- mlx5e_post_meter_cleanup(flow_meters->post_meter);
mlx5_aso_destroy(flow_meters->aso);
mlx5_core_dealloc_pd(flow_meters->mdev, flow_meters->pdn);
-
kfree(flow_meters);
}
+
+void
+mlx5e_tc_meter_get_stats(struct mlx5e_flow_meter_handle *meter,
+ u64 *bytes, u64 *packets, u64 *drops, u64 *lastuse)
+{
+ u64 bytes1, packets1, lastuse1;
+ u64 bytes2, packets2, lastuse2;
+
+ mlx5_fc_query_cached(meter->green_counter, &bytes1, &packets1, &lastuse1);
+ mlx5_fc_query_cached(meter->red_counter, &bytes2, &packets2, &lastuse2);
+
+ *bytes = bytes1 + bytes2;
+ *packets = packets1 + packets2;
+ *drops = packets2;
+ *lastuse = max_t(u64, lastuse1, lastuse2);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
index 78885db5dc7d..6de6e8a16327 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
@@ -4,6 +4,7 @@
#ifndef __MLX5_EN_FLOW_METER_H__
#define __MLX5_EN_FLOW_METER_H__
+struct mlx5e_post_meter_priv;
struct mlx5e_flow_meter_aso_obj;
struct mlx5e_flow_meters;
struct mlx5_flow_attr;
@@ -30,11 +31,15 @@ struct mlx5e_flow_meter_handle {
int refcnt;
struct hlist_node hlist;
struct mlx5e_flow_meter_params params;
+
+ struct mlx5_fc *green_counter;
+ struct mlx5_fc *red_counter;
};
struct mlx5e_meter_attr {
struct mlx5e_flow_meter_params params;
struct mlx5e_flow_meter_handle *meter;
+ struct mlx5e_post_meter_priv *post_meter;
};
int
@@ -46,9 +51,14 @@ struct mlx5e_flow_meter_handle *
mlx5e_tc_meter_get(struct mlx5_core_dev *mdev, struct mlx5e_flow_meter_params *params);
void
mlx5e_tc_meter_put(struct mlx5e_flow_meter_handle *meter);
+int
+mlx5e_tc_meter_update(struct mlx5e_flow_meter_handle *meter,
+ struct mlx5e_flow_meter_params *params);
+struct mlx5e_flow_meter_handle *
+mlx5e_tc_meter_replace(struct mlx5_core_dev *mdev, struct mlx5e_flow_meter_params *params);
-struct mlx5_flow_table *
-mlx5e_tc_meter_get_post_meter_ft(struct mlx5e_flow_meters *flow_meters);
+enum mlx5_flow_namespace_type
+mlx5e_tc_meter_get_namespace(struct mlx5e_flow_meters *flow_meters);
struct mlx5e_flow_meters *
mlx5e_flow_meters_init(struct mlx5e_priv *priv,
@@ -57,4 +67,8 @@ mlx5e_flow_meters_init(struct mlx5e_priv *priv,
void
mlx5e_flow_meters_cleanup(struct mlx5e_flow_meters *flow_meters);
+void
+mlx5e_tc_meter_get_stats(struct mlx5e_flow_meter_handle *meter,
+ u64 *bytes, u64 *packets, u64 *drops, u64 *lastuse);
+
#endif /* __MLX5_EN_FLOW_METER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
index efa20356764e..8b77e822810e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
@@ -84,9 +84,11 @@ mlx5e_post_meter_fg_create(struct mlx5e_priv *priv,
static int
mlx5e_post_meter_rules_create(struct mlx5e_priv *priv,
struct mlx5e_post_meter_priv *post_meter,
- struct mlx5e_post_act *post_act)
+ struct mlx5e_post_act *post_act,
+ struct mlx5_fc *green_counter,
+ struct mlx5_fc *red_counter)
{
- struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_destination dest[2] = {};
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
@@ -98,10 +100,13 @@ mlx5e_post_meter_rules_create(struct mlx5e_priv *priv,
mlx5e_tc_match_to_reg_match(spec, PACKET_COLOR_TO_REG,
MLX5_FLOW_METER_COLOR_RED, MLX5_PACKET_COLOR_MASK);
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[0].counter_id = mlx5_fc_id(red_counter);
- rule = mlx5_add_flow_rules(post_meter->ft, spec, &flow_act, NULL, 0);
+ rule = mlx5_add_flow_rules(post_meter->ft, spec, &flow_act, dest, 1);
if (IS_ERR(rule)) {
mlx5_core_warn(priv->mdev, "Failed to create post_meter flow drop rule\n");
err = PTR_ERR(rule);
@@ -111,11 +116,14 @@ mlx5e_post_meter_rules_create(struct mlx5e_priv *priv,
mlx5e_tc_match_to_reg_match(spec, PACKET_COLOR_TO_REG,
MLX5_FLOW_METER_COLOR_GREEN, MLX5_PACKET_COLOR_MASK);
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = mlx5e_tc_post_act_get_ft(post_act);
-
- rule = mlx5_add_flow_rules(post_meter->ft, spec, &flow_act, &dest, 1);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[0].ft = mlx5e_tc_post_act_get_ft(post_act);
+ dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[1].counter_id = mlx5_fc_id(green_counter);
+
+ rule = mlx5_add_flow_rules(post_meter->ft, spec, &flow_act, dest, 2);
if (IS_ERR(rule)) {
mlx5_core_warn(priv->mdev, "Failed to create post_meter flow fwd rule\n");
err = PTR_ERR(rule);
@@ -155,7 +163,9 @@ mlx5e_post_meter_table_destroy(struct mlx5e_post_meter_priv *post_meter)
struct mlx5e_post_meter_priv *
mlx5e_post_meter_init(struct mlx5e_priv *priv,
enum mlx5_flow_namespace_type ns_type,
- struct mlx5e_post_act *post_act)
+ struct mlx5e_post_act *post_act,
+ struct mlx5_fc *green_counter,
+ struct mlx5_fc *red_counter)
{
struct mlx5e_post_meter_priv *post_meter;
int err;
@@ -172,7 +182,8 @@ mlx5e_post_meter_init(struct mlx5e_priv *priv,
if (err)
goto err_fg;
- err = mlx5e_post_meter_rules_create(priv, post_meter, post_act);
+ err = mlx5e_post_meter_rules_create(priv, post_meter, post_act, green_counter,
+ red_counter);
if (err)
goto err_rules;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h
index c74f3cbd810d..34d0e4b9fc7a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h
@@ -20,7 +20,9 @@ mlx5e_post_meter_get_ft(struct mlx5e_post_meter_priv *post_meter);
struct mlx5e_post_meter_priv *
mlx5e_post_meter_init(struct mlx5e_priv *priv,
enum mlx5_flow_namespace_type ns_type,
- struct mlx5e_post_act *post_act);
+ struct mlx5e_post_act *post_act,
+ struct mlx5_fc *green_counter,
+ struct mlx5_fc *red_counter);
void
mlx5e_post_meter_cleanup(struct mlx5e_post_meter_priv *post_meter);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
index d2bdfd6872bc..10c9a8a79d00 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
@@ -11,7 +11,6 @@
#define MLX5E_TC_MAX_SPLITS 1
-#define mlx5e_nic_chains(priv) ((priv)->fs.tc.chains)
enum {
MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT,
@@ -44,6 +43,8 @@ struct mlx5e_tc_flow_parse_attr {
struct mlx5e_tc_act_parse_state parse_state;
};
+struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc);
+
/* Helper struct for accessing a struct containing list_head array.
* Containing struct
* |- Helper array
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 04c0a5e1c89a..1839f1ab1ddd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -194,4 +194,14 @@ static inline void mlx5e_accel_cleanup_rx(struct mlx5e_priv *priv)
{
mlx5e_ktls_cleanup_rx(priv);
}
+
+static inline int mlx5e_accel_init_tx(struct mlx5e_priv *priv)
+{
+ return mlx5e_ktls_init_tx(priv);
+}
+
+static inline void mlx5e_accel_cleanup_tx(struct mlx5e_priv *priv)
+{
+ mlx5e_ktls_cleanup_tx(priv);
+}
#endif /* __MLX5E_EN_ACCEL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
index 3ae6067c7e6b..20a4f1e585af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
@@ -86,7 +86,7 @@ struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
if (!spec)
return ERR_PTR(-ENOMEM);
- fs_tcp = priv->fs.accel_tcp;
+ fs_tcp = priv->fs->accel_tcp;
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
@@ -158,10 +158,10 @@ static int accel_fs_tcp_add_default_rule(struct mlx5e_priv *priv,
struct mlx5_flow_handle *rule;
int err = 0;
- fs_tcp = priv->fs.accel_tcp;
+ fs_tcp = priv->fs->accel_tcp;
accel_fs_t = &fs_tcp->tables[type];
- dest = mlx5_ttc_get_default_dest(priv->fs.ttc, fs_accel2tt(type));
+ dest = mlx5_ttc_get_default_dest(priv->fs->ttc, fs_accel2tt(type));
rule = mlx5_add_flow_rules(accel_fs_t->t, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -267,7 +267,7 @@ out:
static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_type type)
{
- struct mlx5e_flow_table *ft = &priv->fs.accel_tcp->tables[type];
+ struct mlx5e_flow_table *ft = &priv->fs->accel_tcp->tables[type];
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -277,7 +277,7 @@ static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_
ft_attr.level = MLX5E_ACCEL_FS_TCP_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -307,7 +307,7 @@ static int accel_fs_tcp_disable(struct mlx5e_priv *priv)
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
/* Modify ttc rules destination to point back to the indir TIRs */
- err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_accel2tt(i));
+ err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_accel2tt(i));
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] default destination failed, err(%d)\n",
@@ -326,10 +326,10 @@ static int accel_fs_tcp_enable(struct mlx5e_priv *priv)
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
- dest.ft = priv->fs.accel_tcp->tables[i].t;
+ dest.ft = priv->fs->accel_tcp->tables[i].t;
/* Modify ttc rules destination to point on the accel_fs FTs */
- err = mlx5_ttc_fwd_dest(priv->fs.ttc, fs_accel2tt(i), &dest);
+ err = mlx5_ttc_fwd_dest(priv->fs->ttc, fs_accel2tt(i), &dest);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] destination to accel failed, err(%d)\n",
@@ -344,7 +344,7 @@ static void accel_fs_tcp_destroy_table(struct mlx5e_priv *priv, int i)
{
struct mlx5e_accel_fs_tcp *fs_tcp;
- fs_tcp = priv->fs.accel_tcp;
+ fs_tcp = priv->fs->accel_tcp;
if (IS_ERR_OR_NULL(fs_tcp->tables[i].t))
return;
@@ -357,7 +357,7 @@ void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv)
{
int i;
- if (!priv->fs.accel_tcp)
+ if (!priv->fs->accel_tcp)
return;
accel_fs_tcp_disable(priv);
@@ -365,8 +365,8 @@ void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv)
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++)
accel_fs_tcp_destroy_table(priv, i);
- kfree(priv->fs.accel_tcp);
- priv->fs.accel_tcp = NULL;
+ kfree(priv->fs->accel_tcp);
+ priv->fs->accel_tcp = NULL;
}
int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv)
@@ -376,8 +376,8 @@ int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv)
if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version))
return -EOPNOTSUPP;
- priv->fs.accel_tcp = kzalloc(sizeof(*priv->fs.accel_tcp), GFP_KERNEL);
- if (!priv->fs.accel_tcp)
+ priv->fs->accel_tcp = kzalloc(sizeof(*priv->fs->accel_tcp), GFP_KERNEL);
+ if (!priv->fs->accel_tcp)
return -ENOMEM;
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
@@ -396,7 +396,7 @@ err_destroy_tables:
while (--i >= 0)
accel_fs_tcp_destroy_table(priv, i);
- kfree(priv->fs.accel_tcp);
- priv->fs.accel_tcp = NULL;
+ kfree(priv->fs->accel_tcp);
+ priv->fs->accel_tcp = NULL;
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 8315e8f603d7..f8113fd23265 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -184,13 +184,13 @@ static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
fs_prot = &accel_esp->fs_prot[type];
fs_prot->default_dest =
- mlx5_ttc_get_default_dest(priv->fs.ttc, fs_esp2tt(type));
+ mlx5_ttc_get_default_dest(priv->fs->ttc, fs_esp2tt(type));
ft_attr.max_fte = 1;
ft_attr.autogroup.max_num_groups = 1;
ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
+ ft = mlx5_create_auto_grouped_flow_table(priv->fs->ns, &ft_attr);
if (IS_ERR(ft))
return PTR_ERR(ft);
@@ -205,7 +205,7 @@ static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
ft_attr.prio = MLX5E_NIC_PRIO;
ft_attr.autogroup.num_reserved_entries = 1;
ft_attr.autogroup.max_num_groups = 1;
- ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
+ ft = mlx5_create_auto_grouped_flow_table(priv->fs->ns, &ft_attr);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_fs_ft;
@@ -249,7 +249,7 @@ static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
/* connect */
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = fs_prot->ft;
- mlx5_ttc_fwd_dest(priv->fs.ttc, fs_esp2tt(type), &dest);
+ mlx5_ttc_fwd_dest(priv->fs->ttc, fs_esp2tt(type), &dest);
skip:
fs_prot->refcnt++;
@@ -271,7 +271,7 @@ static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
goto out;
/* disconnect */
- mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_esp2tt(type));
+ mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_esp2tt(type));
/* remove FT */
rx_destroy(priv, type);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index d016624fbc9d..948400dee525 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -42,6 +42,8 @@ static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
}
void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv);
+int mlx5e_ktls_init_tx(struct mlx5e_priv *priv);
+void mlx5e_ktls_cleanup_tx(struct mlx5e_priv *priv);
int mlx5e_ktls_init_rx(struct mlx5e_priv *priv);
void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv);
int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable);
@@ -62,6 +64,8 @@ static inline bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev)
struct mlx5e_tls_sw_stats {
atomic64_t tx_tls_ctx;
atomic64_t tx_tls_del;
+ atomic64_t tx_tls_pool_alloc;
+ atomic64_t tx_tls_pool_free;
atomic64_t rx_tls_ctx;
atomic64_t rx_tls_del;
};
@@ -69,6 +73,7 @@ struct mlx5e_tls_sw_stats {
struct mlx5e_tls {
struct mlx5e_tls_sw_stats sw_stats;
struct workqueue_struct *rx_wq;
+ struct mlx5e_tls_tx_pool *tx_pool;
};
int mlx5e_ktls_init(struct mlx5e_priv *priv);
@@ -83,6 +88,15 @@ static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
{
}
+static inline int mlx5e_ktls_init_tx(struct mlx5e_priv *priv)
+{
+ return 0;
+}
+
+static inline void mlx5e_ktls_cleanup_tx(struct mlx5e_priv *priv)
+{
+}
+
static inline int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
{
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c
index 2ab46c4247ff..7c1c0eb16787 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c
@@ -41,6 +41,8 @@
static const struct counter_desc mlx5e_ktls_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_ctx) },
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_del) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_pool_alloc) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_pool_free) },
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_ctx) },
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_del) },
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index fba21edf88d8..6b6c7044b64a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -35,30 +35,70 @@ u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *pa
stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS);
stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS);
stop_room += num_dumps * mlx5e_stop_room_for_wqe(mdev, MLX5E_KTLS_DUMP_WQEBBS);
+ stop_room += 1; /* fence nop */
return stop_room;
}
+static void mlx5e_ktls_set_tisc(struct mlx5_core_dev *mdev, void *tisc)
+{
+ MLX5_SET(tisc, tisc, tls_en, 1);
+ MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.hw_objs.pdn);
+ MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn);
+}
+
static int mlx5e_ktls_create_tis(struct mlx5_core_dev *mdev, u32 *tisn)
{
u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
- void *tisc;
- tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+ mlx5e_ktls_set_tisc(mdev, MLX5_ADDR_OF(create_tis_in, in, ctx));
- MLX5_SET(tisc, tisc, tls_en, 1);
+ return mlx5_core_create_tis(mdev, in, tisn);
+}
+
+static int mlx5e_ktls_create_tis_cb(struct mlx5_core_dev *mdev,
+ struct mlx5_async_ctx *async_ctx,
+ u32 *out, int outlen,
+ mlx5_async_cbk_t callback,
+ struct mlx5_async_work *context)
+{
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
+
+ mlx5e_ktls_set_tisc(mdev, MLX5_ADDR_OF(create_tis_in, in, ctx));
+ MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
+
+ return mlx5_cmd_exec_cb(async_ctx, in, sizeof(in),
+ out, outlen, callback, context);
+}
+
+static int mlx5e_ktls_destroy_tis_cb(struct mlx5_core_dev *mdev, u32 tisn,
+ struct mlx5_async_ctx *async_ctx,
+ u32 *out, int outlen,
+ mlx5_async_cbk_t callback,
+ struct mlx5_async_work *context)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
+
+ MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
+ MLX5_SET(destroy_tis_in, in, tisn, tisn);
- return mlx5e_create_tis(mdev, in, tisn);
+ return mlx5_cmd_exec_cb(async_ctx, in, sizeof(in),
+ out, outlen, callback, context);
}
struct mlx5e_ktls_offload_context_tx {
- struct tls_offload_context_tx *tx_ctx;
- struct tls12_crypto_info_aes_gcm_128 crypto_info;
- struct mlx5e_tls_sw_stats *sw_stats;
+ /* fast path */
u32 expected_seq;
u32 tisn;
- u32 key_id;
bool ctx_post_pending;
+ /* control / resync */
+ struct list_head list_node; /* member of the pool */
+ struct tls12_crypto_info_aes_gcm_128 crypto_info;
+ struct tls_offload_context_tx *tx_ctx;
+ struct mlx5_core_dev *mdev;
+ struct mlx5e_tls_sw_stats *sw_stats;
+ u32 key_id;
+ u8 create_err : 1;
};
static void
@@ -82,28 +122,368 @@ mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx)
return *ctx;
}
+/* struct for callback API management */
+struct mlx5e_async_ctx {
+ struct mlx5_async_work context;
+ struct mlx5_async_ctx async_ctx;
+ struct work_struct work;
+ struct mlx5e_ktls_offload_context_tx *priv_tx;
+ struct completion complete;
+ int err;
+ union {
+ u32 out_create[MLX5_ST_SZ_DW(create_tis_out)];
+ u32 out_destroy[MLX5_ST_SZ_DW(destroy_tis_out)];
+ };
+};
+
+static struct mlx5e_async_ctx *mlx5e_bulk_async_init(struct mlx5_core_dev *mdev, int n)
+{
+ struct mlx5e_async_ctx *bulk_async;
+ int i;
+
+ bulk_async = kvcalloc(n, sizeof(struct mlx5e_async_ctx), GFP_KERNEL);
+ if (!bulk_async)
+ return NULL;
+
+ for (i = 0; i < n; i++) {
+ struct mlx5e_async_ctx *async = &bulk_async[i];
+
+ mlx5_cmd_init_async_ctx(mdev, &async->async_ctx);
+ init_completion(&async->complete);
+ }
+
+ return bulk_async;
+}
+
+static void mlx5e_bulk_async_cleanup(struct mlx5e_async_ctx *bulk_async, int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++) {
+ struct mlx5e_async_ctx *async = &bulk_async[i];
+
+ mlx5_cmd_cleanup_async_ctx(&async->async_ctx);
+ }
+ kvfree(bulk_async);
+}
+
+static void create_tis_callback(int status, struct mlx5_async_work *context)
+{
+ struct mlx5e_async_ctx *async =
+ container_of(context, struct mlx5e_async_ctx, context);
+ struct mlx5e_ktls_offload_context_tx *priv_tx = async->priv_tx;
+
+ if (status) {
+ async->err = status;
+ priv_tx->create_err = 1;
+ goto out;
+ }
+
+ priv_tx->tisn = MLX5_GET(create_tis_out, async->out_create, tisn);
+out:
+ complete(&async->complete);
+}
+
+static void destroy_tis_callback(int status, struct mlx5_async_work *context)
+{
+ struct mlx5e_async_ctx *async =
+ container_of(context, struct mlx5e_async_ctx, context);
+ struct mlx5e_ktls_offload_context_tx *priv_tx = async->priv_tx;
+
+ complete(&async->complete);
+ kfree(priv_tx);
+}
+
+static struct mlx5e_ktls_offload_context_tx *
+mlx5e_tls_priv_tx_init(struct mlx5_core_dev *mdev, struct mlx5e_tls_sw_stats *sw_stats,
+ struct mlx5e_async_ctx *async)
+{
+ struct mlx5e_ktls_offload_context_tx *priv_tx;
+ int err;
+
+ priv_tx = kzalloc(sizeof(*priv_tx), GFP_KERNEL);
+ if (!priv_tx)
+ return ERR_PTR(-ENOMEM);
+
+ priv_tx->mdev = mdev;
+ priv_tx->sw_stats = sw_stats;
+
+ if (!async) {
+ err = mlx5e_ktls_create_tis(mdev, &priv_tx->tisn);
+ if (err)
+ goto err_out;
+ } else {
+ async->priv_tx = priv_tx;
+ err = mlx5e_ktls_create_tis_cb(mdev, &async->async_ctx,
+ async->out_create, sizeof(async->out_create),
+ create_tis_callback, &async->context);
+ if (err)
+ goto err_out;
+ }
+
+ return priv_tx;
+
+err_out:
+ kfree(priv_tx);
+ return ERR_PTR(err);
+}
+
+static void mlx5e_tls_priv_tx_cleanup(struct mlx5e_ktls_offload_context_tx *priv_tx,
+ struct mlx5e_async_ctx *async)
+{
+ if (priv_tx->create_err) {
+ complete(&async->complete);
+ kfree(priv_tx);
+ return;
+ }
+ async->priv_tx = priv_tx;
+ mlx5e_ktls_destroy_tis_cb(priv_tx->mdev, priv_tx->tisn,
+ &async->async_ctx,
+ async->out_destroy, sizeof(async->out_destroy),
+ destroy_tis_callback, &async->context);
+}
+
+static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev,
+ struct list_head *list, int size)
+{
+ struct mlx5e_ktls_offload_context_tx *obj;
+ struct mlx5e_async_ctx *bulk_async;
+ int i;
+
+ bulk_async = mlx5e_bulk_async_init(mdev, size);
+ if (!bulk_async)
+ return;
+
+ i = 0;
+ list_for_each_entry(obj, list, list_node) {
+ mlx5e_tls_priv_tx_cleanup(obj, &bulk_async[i]);
+ i++;
+ }
+
+ for (i = 0; i < size; i++) {
+ struct mlx5e_async_ctx *async = &bulk_async[i];
+
+ wait_for_completion(&async->complete);
+ }
+ mlx5e_bulk_async_cleanup(bulk_async, size);
+}
+
+/* Recycling pool API */
+
+#define MLX5E_TLS_TX_POOL_BULK (16)
+#define MLX5E_TLS_TX_POOL_HIGH (4 * 1024)
+#define MLX5E_TLS_TX_POOL_LOW (MLX5E_TLS_TX_POOL_HIGH / 4)
+
+struct mlx5e_tls_tx_pool {
+ struct mlx5_core_dev *mdev;
+ struct mlx5e_tls_sw_stats *sw_stats;
+ struct mutex lock; /* Protects access to the pool */
+ struct list_head list;
+ size_t size;
+
+ struct workqueue_struct *wq;
+ struct work_struct create_work;
+ struct work_struct destroy_work;
+};
+
+static void create_work(struct work_struct *work)
+{
+ struct mlx5e_tls_tx_pool *pool =
+ container_of(work, struct mlx5e_tls_tx_pool, create_work);
+ struct mlx5e_ktls_offload_context_tx *obj;
+ struct mlx5e_async_ctx *bulk_async;
+ LIST_HEAD(local_list);
+ int i, j, err = 0;
+
+ bulk_async = mlx5e_bulk_async_init(pool->mdev, MLX5E_TLS_TX_POOL_BULK);
+ if (!bulk_async)
+ return;
+
+ for (i = 0; i < MLX5E_TLS_TX_POOL_BULK; i++) {
+ obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, &bulk_async[i]);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ break;
+ }
+ list_add(&obj->list_node, &local_list);
+ }
+
+ for (j = 0; j < i; j++) {
+ struct mlx5e_async_ctx *async = &bulk_async[j];
+
+ wait_for_completion(&async->complete);
+ if (!err && async->err)
+ err = async->err;
+ }
+ atomic64_add(i, &pool->sw_stats->tx_tls_pool_alloc);
+ mlx5e_bulk_async_cleanup(bulk_async, MLX5E_TLS_TX_POOL_BULK);
+ if (err)
+ goto err_out;
+
+ mutex_lock(&pool->lock);
+ if (pool->size + MLX5E_TLS_TX_POOL_BULK >= MLX5E_TLS_TX_POOL_HIGH) {
+ mutex_unlock(&pool->lock);
+ goto err_out;
+ }
+ list_splice(&local_list, &pool->list);
+ pool->size += MLX5E_TLS_TX_POOL_BULK;
+ if (pool->size <= MLX5E_TLS_TX_POOL_LOW)
+ queue_work(pool->wq, work);
+ mutex_unlock(&pool->lock);
+ return;
+
+err_out:
+ mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, i);
+ atomic64_add(i, &pool->sw_stats->tx_tls_pool_free);
+}
+
+static void destroy_work(struct work_struct *work)
+{
+ struct mlx5e_tls_tx_pool *pool =
+ container_of(work, struct mlx5e_tls_tx_pool, destroy_work);
+ struct mlx5e_ktls_offload_context_tx *obj;
+ LIST_HEAD(local_list);
+ int i = 0;
+
+ mutex_lock(&pool->lock);
+ if (pool->size < MLX5E_TLS_TX_POOL_HIGH) {
+ mutex_unlock(&pool->lock);
+ return;
+ }
+
+ list_for_each_entry(obj, &pool->list, list_node)
+ if (++i == MLX5E_TLS_TX_POOL_BULK)
+ break;
+
+ list_cut_position(&local_list, &pool->list, &obj->list_node);
+ pool->size -= MLX5E_TLS_TX_POOL_BULK;
+ if (pool->size >= MLX5E_TLS_TX_POOL_HIGH)
+ queue_work(pool->wq, work);
+ mutex_unlock(&pool->lock);
+
+ mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, MLX5E_TLS_TX_POOL_BULK);
+ atomic64_add(MLX5E_TLS_TX_POOL_BULK, &pool->sw_stats->tx_tls_pool_free);
+}
+
+static struct mlx5e_tls_tx_pool *mlx5e_tls_tx_pool_init(struct mlx5_core_dev *mdev,
+ struct mlx5e_tls_sw_stats *sw_stats)
+{
+ struct mlx5e_tls_tx_pool *pool;
+
+ BUILD_BUG_ON(MLX5E_TLS_TX_POOL_LOW + MLX5E_TLS_TX_POOL_BULK >= MLX5E_TLS_TX_POOL_HIGH);
+
+ pool = kvzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return NULL;
+
+ pool->wq = create_singlethread_workqueue("mlx5e_tls_tx_pool");
+ if (!pool->wq)
+ goto err_free;
+
+ INIT_LIST_HEAD(&pool->list);
+ mutex_init(&pool->lock);
+
+ INIT_WORK(&pool->create_work, create_work);
+ INIT_WORK(&pool->destroy_work, destroy_work);
+
+ pool->mdev = mdev;
+ pool->sw_stats = sw_stats;
+
+ return pool;
+
+err_free:
+ kvfree(pool);
+ return NULL;
+}
+
+static void mlx5e_tls_tx_pool_list_cleanup(struct mlx5e_tls_tx_pool *pool)
+{
+ while (pool->size > MLX5E_TLS_TX_POOL_BULK) {
+ struct mlx5e_ktls_offload_context_tx *obj;
+ LIST_HEAD(local_list);
+ int i = 0;
+
+ list_for_each_entry(obj, &pool->list, list_node)
+ if (++i == MLX5E_TLS_TX_POOL_BULK)
+ break;
+
+ list_cut_position(&local_list, &pool->list, &obj->list_node);
+ mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, MLX5E_TLS_TX_POOL_BULK);
+ atomic64_add(MLX5E_TLS_TX_POOL_BULK, &pool->sw_stats->tx_tls_pool_free);
+ pool->size -= MLX5E_TLS_TX_POOL_BULK;
+ }
+ if (pool->size) {
+ mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &pool->list, pool->size);
+ atomic64_add(pool->size, &pool->sw_stats->tx_tls_pool_free);
+ }
+}
+
+static void mlx5e_tls_tx_pool_cleanup(struct mlx5e_tls_tx_pool *pool)
+{
+ mlx5e_tls_tx_pool_list_cleanup(pool);
+ destroy_workqueue(pool->wq);
+ kvfree(pool);
+}
+
+static void pool_push(struct mlx5e_tls_tx_pool *pool, struct mlx5e_ktls_offload_context_tx *obj)
+{
+ mutex_lock(&pool->lock);
+ list_add(&obj->list_node, &pool->list);
+ if (++pool->size == MLX5E_TLS_TX_POOL_HIGH)
+ queue_work(pool->wq, &pool->destroy_work);
+ mutex_unlock(&pool->lock);
+}
+
+static struct mlx5e_ktls_offload_context_tx *pool_pop(struct mlx5e_tls_tx_pool *pool)
+{
+ struct mlx5e_ktls_offload_context_tx *obj;
+
+ mutex_lock(&pool->lock);
+ if (unlikely(pool->size == 0)) {
+ /* pool is empty:
+ * - trigger the populating work, and
+ * - serve the current context via the regular blocking api.
+ */
+ queue_work(pool->wq, &pool->create_work);
+ mutex_unlock(&pool->lock);
+ obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, NULL);
+ if (!IS_ERR(obj))
+ atomic64_inc(&pool->sw_stats->tx_tls_pool_alloc);
+ return obj;
+ }
+
+ obj = list_first_entry(&pool->list, struct mlx5e_ktls_offload_context_tx,
+ list_node);
+ list_del(&obj->list_node);
+ if (--pool->size == MLX5E_TLS_TX_POOL_LOW)
+ queue_work(pool->wq, &pool->create_work);
+ mutex_unlock(&pool->lock);
+ return obj;
+}
+
+/* End of pool API */
+
int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn)
{
struct mlx5e_ktls_offload_context_tx *priv_tx;
+ struct mlx5e_tls_tx_pool *pool;
struct tls_context *tls_ctx;
- struct mlx5_core_dev *mdev;
struct mlx5e_priv *priv;
int err;
tls_ctx = tls_get_ctx(sk);
priv = netdev_priv(netdev);
- mdev = priv->mdev;
+ pool = priv->tls->tx_pool;
- priv_tx = kzalloc(sizeof(*priv_tx), GFP_KERNEL);
- if (!priv_tx)
- return -ENOMEM;
+ priv_tx = pool_pop(pool);
+ if (IS_ERR(priv_tx))
+ return PTR_ERR(priv_tx);
- err = mlx5_ktls_create_key(mdev, crypto_info, &priv_tx->key_id);
+ err = mlx5_ktls_create_key(pool->mdev, crypto_info, &priv_tx->key_id);
if (err)
goto err_create_key;
- priv_tx->sw_stats = &priv->tls->sw_stats;
priv_tx->expected_seq = start_offload_tcp_sn;
priv_tx->crypto_info =
*(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
@@ -111,36 +491,29 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
mlx5e_set_ktls_tx_priv_ctx(tls_ctx, priv_tx);
- err = mlx5e_ktls_create_tis(mdev, &priv_tx->tisn);
- if (err)
- goto err_create_tis;
-
priv_tx->ctx_post_pending = true;
atomic64_inc(&priv_tx->sw_stats->tx_tls_ctx);
return 0;
-err_create_tis:
- mlx5_ktls_destroy_key(mdev, priv_tx->key_id);
err_create_key:
- kfree(priv_tx);
+ pool_push(pool, priv_tx);
return err;
}
void mlx5e_ktls_del_tx(struct net_device *netdev, struct tls_context *tls_ctx)
{
struct mlx5e_ktls_offload_context_tx *priv_tx;
- struct mlx5_core_dev *mdev;
+ struct mlx5e_tls_tx_pool *pool;
struct mlx5e_priv *priv;
priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
priv = netdev_priv(netdev);
- mdev = priv->mdev;
+ pool = priv->tls->tx_pool;
atomic64_inc(&priv_tx->sw_stats->tx_tls_del);
- mlx5e_destroy_tis(mdev, priv_tx->tisn);
- mlx5_ktls_destroy_key(mdev, priv_tx->key_id);
- kfree(priv_tx);
+ mlx5_ktls_destroy_key(priv_tx->mdev, priv_tx->key_id);
+ pool_push(pool, priv_tx);
}
static void tx_fill_wi(struct mlx5e_txqsq *sq,
@@ -201,6 +574,16 @@ post_progress_params(struct mlx5e_txqsq *sq,
sq->pc += num_wqebbs;
}
+static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+
+ tx_fill_wi(sq, pi, 1, 0, NULL);
+
+ mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
+}
+
static void
mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
struct mlx5e_ktls_offload_context_tx *priv_tx,
@@ -212,6 +595,7 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
post_static_params(sq, priv_tx, fence_first_post);
post_progress_params(sq, priv_tx, progress_fence);
+ tx_post_fence_nop(sq);
}
struct tx_sync_info {
@@ -304,7 +688,7 @@ tx_post_resync_params(struct mlx5e_txqsq *sq,
}
static int
-tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool first)
+tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn)
{
struct mlx5_wqe_ctrl_seg *cseg;
struct mlx5_wqe_data_seg *dseg;
@@ -326,7 +710,6 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
cseg->tis_tir_num = cpu_to_be32(tisn << 8);
- cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
fsz = skb_frag_size(frag);
dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
@@ -361,67 +744,39 @@ void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
stats->tls_dump_bytes += wi->num_bytes;
}
-static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
-{
- struct mlx5_wq_cyc *wq = &sq->wq;
- u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
-
- tx_fill_wi(sq, pi, 1, 0, NULL);
-
- mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
-}
-
static enum mlx5e_ktls_sync_retval
mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
struct mlx5e_txqsq *sq,
int datalen,
u32 seq)
{
- struct mlx5e_sq_stats *stats = sq->stats;
enum mlx5e_ktls_sync_retval ret;
struct tx_sync_info info = {};
- int i = 0;
+ int i;
ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
- if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
- if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
- stats->tls_skip_no_sync_data++;
- return MLX5E_KTLS_SYNC_SKIP_NO_DATA;
- }
- /* We might get here if a retransmission reaches the driver
- * after the relevant record is acked.
+ if (unlikely(ret != MLX5E_KTLS_SYNC_DONE))
+ /* We might get here with ret == FAIL if a retransmission
+ * reaches the driver after the relevant record is acked.
* It should be safe to drop the packet in this case
*/
- stats->tls_drop_no_sync_data++;
- goto err_out;
- }
-
- stats->tls_ooo++;
+ return ret;
tx_post_resync_params(sq, priv_tx, info.rcd_sn);
- /* If no dump WQE was sent, we need to have a fence NOP WQE before the
- * actual data xmit.
- */
- if (!info.nr_frags) {
- tx_post_fence_nop(sq);
- return MLX5E_KTLS_SYNC_DONE;
- }
-
- for (; i < info.nr_frags; i++) {
+ for (i = 0; i < info.nr_frags; i++) {
unsigned int orig_fsz, frag_offset = 0, n = 0;
skb_frag_t *f = &info.frags[i];
orig_fsz = skb_frag_size(f);
do {
- bool fence = !(i || frag_offset);
unsigned int fsz;
n++;
fsz = min_t(unsigned int, sq->hw_mtu, orig_fsz - frag_offset);
skb_frag_size_set(f, fsz);
- if (tx_post_resync_dump(sq, f, priv_tx->tisn, fence)) {
+ if (tx_post_resync_dump(sq, f, priv_tx->tisn)) {
page_ref_add(skb_frag_page(f), n - 1);
goto err_out;
}
@@ -469,24 +824,27 @@ bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
- if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
+ if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx)))
mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
- }
seq = ntohl(tcp_hdr(skb)->seq);
if (unlikely(priv_tx->expected_seq != seq)) {
enum mlx5e_ktls_sync_retval ret =
mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
+ stats->tls_ooo++;
+
switch (ret) {
case MLX5E_KTLS_SYNC_DONE:
break;
case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
+ stats->tls_skip_no_sync_data++;
if (likely(!skb->decrypted))
goto out;
WARN_ON_ONCE(1);
- fallthrough;
+ goto err_out;
case MLX5E_KTLS_SYNC_FAIL:
+ stats->tls_drop_no_sync_data++;
goto err_out;
}
}
@@ -505,3 +863,24 @@ err_out:
dev_kfree_skb_any(skb);
return false;
}
+
+int mlx5e_ktls_init_tx(struct mlx5e_priv *priv)
+{
+ if (!mlx5e_is_ktls_tx(priv->mdev))
+ return 0;
+
+ priv->tls->tx_pool = mlx5e_tls_tx_pool_init(priv->mdev, &priv->tls->sw_stats);
+ if (!priv->tls->tx_pool)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void mlx5e_ktls_cleanup_tx(struct mlx5e_priv *priv)
+{
+ if (!mlx5e_is_ktls_tx(priv->mdev))
+ return;
+
+ mlx5e_tls_tx_pool_cleanup(priv->tls->tx_pool);
+ priv->tls->tx_pool = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 49cca6bd49a1..cd7f245dcf14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -120,7 +120,7 @@ static int arfs_disable(struct mlx5e_priv *priv)
for (i = 0; i < ARFS_NUM_TYPES; i++) {
/* Modify ttc rules destination back to their default */
- err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, arfs_get_tt(i));
+ err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, arfs_get_tt(i));
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] default destination failed, err(%d)\n",
@@ -147,9 +147,9 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv)
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
for (i = 0; i < ARFS_NUM_TYPES; i++) {
- dest.ft = priv->fs.arfs->arfs_tables[i].ft.t;
+ dest.ft = priv->fs->arfs->arfs_tables[i].ft.t;
/* Modify ttc rules destination to point on the aRFS FTs */
- err = mlx5_ttc_fwd_dest(priv->fs.ttc, arfs_get_tt(i), &dest);
+ err = mlx5_ttc_fwd_dest(priv->fs->ttc, arfs_get_tt(i), &dest);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] dest to arfs, failed err(%d)\n",
@@ -172,10 +172,10 @@ static void _mlx5e_cleanup_tables(struct mlx5e_priv *priv)
int i;
arfs_del_rules(priv);
- destroy_workqueue(priv->fs.arfs->wq);
+ destroy_workqueue(priv->fs->arfs->wq);
for (i = 0; i < ARFS_NUM_TYPES; i++) {
- if (!IS_ERR_OR_NULL(priv->fs.arfs->arfs_tables[i].ft.t))
- arfs_destroy_table(&priv->fs.arfs->arfs_tables[i]);
+ if (!IS_ERR_OR_NULL(priv->fs->arfs->arfs_tables[i].ft.t))
+ arfs_destroy_table(&priv->fs->arfs->arfs_tables[i]);
}
}
@@ -185,13 +185,13 @@ void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv)
return;
_mlx5e_cleanup_tables(priv);
- kvfree(priv->fs.arfs);
+ kvfree(priv->fs->arfs);
}
static int arfs_add_default_rule(struct mlx5e_priv *priv,
enum arfs_type type)
{
- struct arfs_table *arfs_t = &priv->fs.arfs->arfs_tables[type];
+ struct arfs_table *arfs_t = &priv->fs->arfs->arfs_tables[type];
struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
enum mlx5_traffic_types tt;
@@ -321,7 +321,7 @@ out:
static int arfs_create_table(struct mlx5e_priv *priv,
enum arfs_type type)
{
- struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
+ struct mlx5e_arfs_tables *arfs = priv->fs->arfs;
struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -332,7 +332,7 @@ static int arfs_create_table(struct mlx5e_priv *priv,
ft_attr.level = MLX5E_ARFS_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -361,14 +361,14 @@ int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
return 0;
- priv->fs.arfs = kvzalloc(sizeof(*priv->fs.arfs), GFP_KERNEL);
- if (!priv->fs.arfs)
+ priv->fs->arfs = kvzalloc(sizeof(*priv->fs->arfs), GFP_KERNEL);
+ if (!priv->fs->arfs)
return -ENOMEM;
- spin_lock_init(&priv->fs.arfs->arfs_lock);
- INIT_LIST_HEAD(&priv->fs.arfs->rules);
- priv->fs.arfs->wq = create_singlethread_workqueue("mlx5e_arfs");
- if (!priv->fs.arfs->wq)
+ spin_lock_init(&priv->fs->arfs->arfs_lock);
+ INIT_LIST_HEAD(&priv->fs->arfs->rules);
+ priv->fs->arfs->wq = create_singlethread_workqueue("mlx5e_arfs");
+ if (!priv->fs->arfs->wq)
goto err;
for (i = 0; i < ARFS_NUM_TYPES; i++) {
@@ -381,7 +381,7 @@ int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
err_des:
_mlx5e_cleanup_tables(priv);
err:
- kvfree(priv->fs.arfs);
+ kvfree(priv->fs->arfs);
return err;
}
@@ -396,8 +396,8 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
int i;
int j;
- spin_lock_bh(&priv->fs.arfs->arfs_lock);
- mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs->arfs_tables, i, j) {
+ spin_lock_bh(&priv->fs->arfs->arfs_lock);
+ mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs->arfs->arfs_tables, i, j) {
if (!work_pending(&arfs_rule->arfs_work) &&
rps_may_expire_flow(priv->netdev,
arfs_rule->rxq, arfs_rule->flow_id,
@@ -408,7 +408,7 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
break;
}
}
- spin_unlock_bh(&priv->fs.arfs->arfs_lock);
+ spin_unlock_bh(&priv->fs->arfs->arfs_lock);
hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
if (arfs_rule->rule)
mlx5_del_flow_rules(arfs_rule->rule);
@@ -425,12 +425,12 @@ static void arfs_del_rules(struct mlx5e_priv *priv)
int i;
int j;
- spin_lock_bh(&priv->fs.arfs->arfs_lock);
- mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs->arfs_tables, i, j) {
+ spin_lock_bh(&priv->fs->arfs->arfs_lock);
+ mlx5e_for_each_arfs_rule(rule, htmp, priv->fs->arfs->arfs_tables, i, j) {
hlist_del_init(&rule->hlist);
hlist_add_head(&rule->hlist, &del_list);
}
- spin_unlock_bh(&priv->fs.arfs->arfs_lock);
+ spin_unlock_bh(&priv->fs->arfs->arfs_lock);
hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
cancel_work_sync(&rule->arfs_work);
@@ -474,7 +474,7 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
struct arfs_rule *arfs_rule)
{
- struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
+ struct mlx5e_arfs_tables *arfs = priv->fs->arfs;
struct arfs_tuple *tuple = &arfs_rule->tuple;
struct mlx5_flow_handle *rule = NULL;
struct mlx5_flow_destination dest = {};
@@ -592,9 +592,9 @@ static void arfs_handle_work(struct work_struct *work)
mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- spin_lock_bh(&priv->fs.arfs->arfs_lock);
+ spin_lock_bh(&priv->fs->arfs->arfs_lock);
hlist_del(&arfs_rule->hlist);
- spin_unlock_bh(&priv->fs.arfs->arfs_lock);
+ spin_unlock_bh(&priv->fs->arfs->arfs_lock);
mutex_unlock(&priv->state_lock);
kfree(arfs_rule);
@@ -647,7 +647,7 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
tuple->dst_port = fk->ports.dst;
rule->flow_id = flow_id;
- rule->filter_id = priv->fs.arfs->last_filter_id++ % RPS_NO_FILTER;
+ rule->filter_id = priv->fs->arfs->last_filter_id++ % RPS_NO_FILTER;
hlist_add_head(&rule->hlist,
arfs_hash_bucket(arfs_t, tuple->src_port,
@@ -691,7 +691,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
+ struct mlx5e_arfs_tables *arfs = priv->fs->arfs;
struct arfs_table *arfs_t;
struct arfs_rule *arfs_rule;
struct flow_keys fk;
@@ -725,7 +725,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
return -ENOMEM;
}
}
- queue_work(priv->fs.arfs->wq, &arfs_rule->arfs_work);
+ queue_work(priv->fs->arfs->wq, &arfs_rule->arfs_work);
spin_unlock_bh(&arfs->arfs_lock);
return arfs_rule->filter_id;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index d2f0773f95c6..e2a9b9be5c1f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -37,13 +37,13 @@
#include <linux/mlx5/fs.h>
#include <linux/mlx5/mpfs.h>
#include "en.h"
-#include "en_rep.h"
+#include "en_tc.h"
#include "lib/mpfs.h"
#include "en/ptp.h"
-static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
+static int mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering *fs,
struct mlx5e_l2_rule *ai, int type);
-static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
+static void mlx5e_del_l2_flow_rule(struct mlx5e_flow_steering *fs,
struct mlx5e_l2_rule *ai);
enum {
@@ -132,9 +132,8 @@ struct mlx5_flow_table *mlx5e_vlan_get_flowtable(struct mlx5e_vlan_table *vlan)
return vlan->ft.t;
}
-static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
+static int mlx5e_vport_context_update_vlans(struct mlx5e_flow_steering *fs)
{
- struct net_device *ndev = priv->netdev;
int max_list_size;
int list_size;
u16 *vlans;
@@ -143,15 +142,15 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
int i;
list_size = 0;
- for_each_set_bit(vlan, priv->fs.vlan->active_cvlans, VLAN_N_VID)
+ for_each_set_bit(vlan, fs->vlan->active_cvlans, VLAN_N_VID)
list_size++;
- max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
+ max_list_size = 1 << MLX5_CAP_GEN(fs->mdev, log_max_vlan_list);
if (list_size > max_list_size) {
- netdev_warn(ndev,
- "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
- list_size, max_list_size);
+ mlx5_core_warn(fs->mdev,
+ "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
+ list_size, max_list_size);
list_size = max_list_size;
}
@@ -160,16 +159,16 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
return -ENOMEM;
i = 0;
- for_each_set_bit(vlan, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
+ for_each_set_bit(vlan, fs->vlan->active_cvlans, VLAN_N_VID) {
if (i >= list_size)
break;
vlans[i++] = vlan;
}
- err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
+ err = mlx5_modify_nic_vport_vlans(fs->mdev, vlans, list_size);
if (err)
- netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n",
- err);
+ mlx5_core_err(fs->mdev, "Failed to modify vport vlans list err(%d)\n",
+ err);
kvfree(vlans);
return err;
@@ -183,18 +182,18 @@ enum mlx5e_vlan_rule_type {
MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID,
};
-static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
+static int __mlx5e_add_vlan_rule(struct mlx5e_flow_steering *fs,
enum mlx5e_vlan_rule_type rule_type,
u16 vid, struct mlx5_flow_spec *spec)
{
- struct mlx5_flow_table *ft = priv->fs.vlan->ft.t;
+ struct mlx5_flow_table *ft = fs->vlan->ft.t;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle **rule_p;
MLX5_DECLARE_FLOW_ACT(flow_act);
int err = 0;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = priv->fs.l2.ft.t;
+ dest.ft = fs->l2.ft.t;
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
@@ -204,24 +203,24 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
* disabled in match value means both S & C tags
* don't exist (untagged of both)
*/
- rule_p = &priv->fs.vlan->untagged_rule;
+ rule_p = &fs->vlan->untagged_rule;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag);
break;
case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
- rule_p = &priv->fs.vlan->any_cvlan_rule;
+ rule_p = &fs->vlan->any_cvlan_rule;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
break;
case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
- rule_p = &priv->fs.vlan->any_svlan_rule;
+ rule_p = &fs->vlan->any_svlan_rule;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.svlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
- rule_p = &priv->fs.vlan->active_svlans_rule[vid];
+ rule_p = &fs->vlan->active_svlans_rule[vid];
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.svlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
@@ -231,7 +230,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
vid);
break;
default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */
- rule_p = &priv->fs.vlan->active_cvlans_rule[vid];
+ rule_p = &fs->vlan->active_cvlans_rule[vid];
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
@@ -250,13 +249,13 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p);
*rule_p = NULL;
- netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
+ mlx5_core_err(fs->mdev, "%s: add rule failed\n", __func__);
}
return err;
}
-static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
+static int mlx5e_add_vlan_rule(struct mlx5e_flow_steering *fs,
enum mlx5e_vlan_rule_type rule_type, u16 vid)
{
struct mlx5_flow_spec *spec;
@@ -267,68 +266,68 @@ static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
return -ENOMEM;
if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID)
- mlx5e_vport_context_update_vlans(priv);
+ mlx5e_vport_context_update_vlans(fs);
- err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec);
+ err = __mlx5e_add_vlan_rule(fs, rule_type, vid, spec);
kvfree(spec);
return err;
}
-static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
- enum mlx5e_vlan_rule_type rule_type, u16 vid)
+static void mlx5e_fs_del_vlan_rule(struct mlx5e_flow_steering *fs,
+ enum mlx5e_vlan_rule_type rule_type, u16 vid)
{
switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
- if (priv->fs.vlan->untagged_rule) {
- mlx5_del_flow_rules(priv->fs.vlan->untagged_rule);
- priv->fs.vlan->untagged_rule = NULL;
+ if (fs->vlan->untagged_rule) {
+ mlx5_del_flow_rules(fs->vlan->untagged_rule);
+ fs->vlan->untagged_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
- if (priv->fs.vlan->any_cvlan_rule) {
- mlx5_del_flow_rules(priv->fs.vlan->any_cvlan_rule);
- priv->fs.vlan->any_cvlan_rule = NULL;
+ if (fs->vlan->any_cvlan_rule) {
+ mlx5_del_flow_rules(fs->vlan->any_cvlan_rule);
+ fs->vlan->any_cvlan_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
- if (priv->fs.vlan->any_svlan_rule) {
- mlx5_del_flow_rules(priv->fs.vlan->any_svlan_rule);
- priv->fs.vlan->any_svlan_rule = NULL;
+ if (fs->vlan->any_svlan_rule) {
+ mlx5_del_flow_rules(fs->vlan->any_svlan_rule);
+ fs->vlan->any_svlan_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
- if (priv->fs.vlan->active_svlans_rule[vid]) {
- mlx5_del_flow_rules(priv->fs.vlan->active_svlans_rule[vid]);
- priv->fs.vlan->active_svlans_rule[vid] = NULL;
+ if (fs->vlan->active_svlans_rule[vid]) {
+ mlx5_del_flow_rules(fs->vlan->active_svlans_rule[vid]);
+ fs->vlan->active_svlans_rule[vid] = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID:
- if (priv->fs.vlan->active_cvlans_rule[vid]) {
- mlx5_del_flow_rules(priv->fs.vlan->active_cvlans_rule[vid]);
- priv->fs.vlan->active_cvlans_rule[vid] = NULL;
+ if (fs->vlan->active_cvlans_rule[vid]) {
+ mlx5_del_flow_rules(fs->vlan->active_cvlans_rule[vid]);
+ fs->vlan->active_cvlans_rule[vid] = NULL;
}
- mlx5e_vport_context_update_vlans(priv);
+ mlx5e_vport_context_update_vlans(fs);
break;
}
}
-static void mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
+static void mlx5e_fs_del_any_vid_rules(struct mlx5e_flow_steering *fs)
{
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
+ mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
+ mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
}
-static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
+static int mlx5e_fs_add_any_vid_rules(struct mlx5e_flow_steering *fs)
{
int err;
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
+ err = mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
if (err)
return err;
- return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
+ return mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
}
static struct mlx5_flow_handle *
@@ -354,101 +353,101 @@ mlx5e_add_trap_rule(struct mlx5_flow_table *ft, int trap_id, int tir_num)
int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
{
- struct mlx5_flow_table *ft = priv->fs.vlan->ft.t;
+ struct mlx5_flow_table *ft = priv->fs->vlan->ft.t;
struct mlx5_flow_handle *rule;
int err;
rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- priv->fs.vlan->trap_rule = NULL;
- netdev_err(priv->netdev, "%s: add VLAN trap rule failed, err %d\n",
- __func__, err);
+ priv->fs->vlan->trap_rule = NULL;
+ mlx5_core_err(priv->fs->mdev, "%s: add VLAN trap rule failed, err %d\n",
+ __func__, err);
return err;
}
- priv->fs.vlan->trap_rule = rule;
+ priv->fs->vlan->trap_rule = rule;
return 0;
}
void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv)
{
- if (priv->fs.vlan->trap_rule) {
- mlx5_del_flow_rules(priv->fs.vlan->trap_rule);
- priv->fs.vlan->trap_rule = NULL;
+ if (priv->fs->vlan->trap_rule) {
+ mlx5_del_flow_rules(priv->fs->vlan->trap_rule);
+ priv->fs->vlan->trap_rule = NULL;
}
}
int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
{
- struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
+ struct mlx5_flow_table *ft = priv->fs->l2.ft.t;
struct mlx5_flow_handle *rule;
int err;
rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- priv->fs.l2.trap_rule = NULL;
- netdev_err(priv->netdev, "%s: add MAC trap rule failed, err %d\n",
- __func__, err);
+ priv->fs->l2.trap_rule = NULL;
+ mlx5_core_err(priv->fs->mdev, "%s: add MAC trap rule failed, err %d\n",
+ __func__, err);
return err;
}
- priv->fs.l2.trap_rule = rule;
+ priv->fs->l2.trap_rule = rule;
return 0;
}
void mlx5e_remove_mac_trap(struct mlx5e_priv *priv)
{
- if (priv->fs.l2.trap_rule) {
- mlx5_del_flow_rules(priv->fs.l2.trap_rule);
- priv->fs.l2.trap_rule = NULL;
+ if (priv->fs->l2.trap_rule) {
+ mlx5_del_flow_rules(priv->fs->l2.trap_rule);
+ priv->fs->l2.trap_rule = NULL;
}
}
void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
{
- if (!priv->fs.vlan->cvlan_filter_disabled)
+ if (!priv->fs->vlan->cvlan_filter_disabled)
return;
- priv->fs.vlan->cvlan_filter_disabled = false;
+ priv->fs->vlan->cvlan_filter_disabled = false;
if (priv->netdev->flags & IFF_PROMISC)
return;
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
+ mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
}
void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv)
{
- if (priv->fs.vlan->cvlan_filter_disabled)
+ if (priv->fs->vlan->cvlan_filter_disabled)
return;
- priv->fs.vlan->cvlan_filter_disabled = true;
+ priv->fs->vlan->cvlan_filter_disabled = true;
if (priv->netdev->flags & IFF_PROMISC)
return;
- mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
+ mlx5e_add_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
}
-static int mlx5e_vlan_rx_add_cvid(struct mlx5e_priv *priv, u16 vid)
+static int mlx5e_vlan_rx_add_cvid(struct mlx5e_flow_steering *fs, u16 vid)
{
int err;
- set_bit(vid, priv->fs.vlan->active_cvlans);
+ set_bit(vid, fs->vlan->active_cvlans);
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
+ err = mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
if (err)
- clear_bit(vid, priv->fs.vlan->active_cvlans);
+ clear_bit(vid, fs->vlan->active_cvlans);
return err;
}
-static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid)
+static int mlx5e_vlan_rx_add_svid(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev, u16 vid)
{
- struct net_device *netdev = priv->netdev;
int err;
- set_bit(vid, priv->fs.vlan->active_svlans);
+ set_bit(vid, fs->vlan->active_svlans);
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
+ err = mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
if (err) {
- clear_bit(vid, priv->fs.vlan->active_svlans);
+ clear_bit(vid, fs->vlan->active_svlans);
return err;
}
@@ -457,86 +456,91 @@ static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid)
return err;
}
-int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+int mlx5e_fs_vlan_rx_add_vid(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev,
+ __be16 proto, u16 vid)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
- if (mlx5e_is_uplink_rep(priv))
- return 0; /* no vlan table for uplink rep */
+ if (!fs->vlan) {
+ mlx5_core_err(fs->mdev, "Vlan doesn't exist\n");
+ return -EINVAL;
+ }
if (be16_to_cpu(proto) == ETH_P_8021Q)
- return mlx5e_vlan_rx_add_cvid(priv, vid);
+ return mlx5e_vlan_rx_add_cvid(fs, vid);
else if (be16_to_cpu(proto) == ETH_P_8021AD)
- return mlx5e_vlan_rx_add_svid(priv, vid);
+ return mlx5e_vlan_rx_add_svid(fs, netdev, vid);
return -EOPNOTSUPP;
}
-int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+int mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev,
+ __be16 proto, u16 vid)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
-
- if (mlx5e_is_uplink_rep(priv))
- return 0; /* no vlan table for uplink rep */
+ if (!fs->vlan) {
+ mlx5_core_err(fs->mdev, "Vlan doesn't exist\n");
+ return -EINVAL;
+ }
if (be16_to_cpu(proto) == ETH_P_8021Q) {
- clear_bit(vid, priv->fs.vlan->active_cvlans);
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
+ clear_bit(vid, fs->vlan->active_cvlans);
+ mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
} else if (be16_to_cpu(proto) == ETH_P_8021AD) {
- clear_bit(vid, priv->fs.vlan->active_svlans);
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
- netdev_update_features(dev);
+ clear_bit(vid, fs->vlan->active_svlans);
+ mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
+ netdev_update_features(netdev);
}
return 0;
}
-static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
+static void mlx5e_fs_add_vlan_rules(struct mlx5e_flow_steering *fs)
{
int i;
- mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+ mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- for_each_set_bit(i, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
- mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
+ for_each_set_bit(i, fs->vlan->active_cvlans, VLAN_N_VID) {
+ mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
}
- for_each_set_bit(i, priv->fs.vlan->active_svlans, VLAN_N_VID)
- mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
+ for_each_set_bit(i, fs->vlan->active_svlans, VLAN_N_VID)
+ mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
- if (priv->fs.vlan->cvlan_filter_disabled)
- mlx5e_add_any_vid_rules(priv);
+ if (fs->vlan->cvlan_filter_disabled)
+ mlx5e_fs_add_any_vid_rules(fs);
}
static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
{
int i;
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+ mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- for_each_set_bit(i, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
+ for_each_set_bit(i, priv->fs->vlan->active_cvlans, VLAN_N_VID) {
+ mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
}
- for_each_set_bit(i, priv->fs.vlan->active_svlans, VLAN_N_VID)
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
+ for_each_set_bit(i, priv->fs->vlan->active_svlans, VLAN_N_VID)
+ mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
- WARN_ON_ONCE(!(test_bit(MLX5E_STATE_DESTROYING, &priv->state)));
+ WARN_ON_ONCE(priv->fs->state_destroy);
mlx5e_remove_vlan_trap(priv);
/* must be called after DESTROY bit is set and
* set_rx_mode is called and flushed
*/
- if (priv->fs.vlan->cvlan_filter_disabled)
- mlx5e_del_any_vid_rules(priv);
+ if (priv->fs->vlan->cvlan_filter_disabled)
+ mlx5e_fs_del_any_vid_rules(priv->fs);
}
#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
-static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
+static void mlx5e_execute_l2_action(struct mlx5e_flow_steering *fs,
struct mlx5e_l2_hash_node *hn)
{
u8 action = hn->action;
@@ -547,9 +551,9 @@ static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
switch (action) {
case MLX5E_ACTION_ADD:
- mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
+ mlx5e_add_l2_flow_rule(fs, &hn->ai, MLX5E_FULLMATCH);
if (!is_multicast_ether_addr(mac_addr)) {
- l2_err = mlx5_mpfs_add_mac(priv->mdev, mac_addr);
+ l2_err = mlx5_mpfs_add_mac(fs->mdev, mac_addr);
hn->mpfs = !l2_err;
}
hn->action = MLX5E_ACTION_NONE;
@@ -557,52 +561,50 @@ static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
case MLX5E_ACTION_DEL:
if (!is_multicast_ether_addr(mac_addr) && hn->mpfs)
- l2_err = mlx5_mpfs_del_mac(priv->mdev, mac_addr);
- mlx5e_del_l2_flow_rule(priv, &hn->ai);
+ l2_err = mlx5_mpfs_del_mac(fs->mdev, mac_addr);
+ mlx5e_del_l2_flow_rule(fs, &hn->ai);
mlx5e_del_l2_from_hash(hn);
break;
}
if (l2_err)
- netdev_warn(priv->netdev, "MPFS, failed to %s mac %pM, err(%d)\n",
- action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err);
+ mlx5_core_warn(fs->mdev, "MPFS, failed to %s mac %pM, err(%d)\n",
+ action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err);
}
-static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
+static void mlx5e_sync_netdev_addr(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev)
{
- struct net_device *netdev = priv->netdev;
struct netdev_hw_addr *ha;
netif_addr_lock_bh(netdev);
- mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc,
- priv->netdev->dev_addr);
-
+ mlx5e_add_l2_to_hash(fs->l2.netdev_uc, netdev->dev_addr);
netdev_for_each_uc_addr(ha, netdev)
- mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr);
+ mlx5e_add_l2_to_hash(fs->l2.netdev_uc, ha->addr);
netdev_for_each_mc_addr(ha, netdev)
- mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr);
+ mlx5e_add_l2_to_hash(fs->l2.netdev_mc, ha->addr);
netif_addr_unlock_bh(netdev);
}
-static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
+static void mlx5e_fill_addr_array(struct mlx5e_flow_steering *fs, int list_type,
+ struct net_device *ndev,
u8 addr_array[][ETH_ALEN], int size)
{
bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
- struct net_device *ndev = priv->netdev;
struct mlx5e_l2_hash_node *hn;
struct hlist_head *addr_list;
struct hlist_node *tmp;
int i = 0;
int hi;
- addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
+ addr_list = is_uc ? fs->l2.netdev_uc : fs->l2.netdev_mc;
if (is_uc) /* Make sure our own address is pushed first */
ether_addr_copy(addr_array[i++], ndev->dev_addr);
- else if (priv->fs.l2.broadcast_enabled)
+ else if (fs->l2.broadcast_enabled)
ether_addr_copy(addr_array[i++], ndev->broadcast);
mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
@@ -614,7 +616,8 @@ static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
}
}
-static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
+static void mlx5e_vport_context_update_addr_list(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev,
int list_type)
{
bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
@@ -627,19 +630,19 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
int err;
int hi;
- size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0);
+ size = is_uc ? 0 : (fs->l2.broadcast_enabled ? 1 : 0);
max_size = is_uc ?
- 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
- 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
+ 1 << MLX5_CAP_GEN(fs->mdev, log_max_current_uc_list) :
+ 1 << MLX5_CAP_GEN(fs->mdev, log_max_current_mc_list);
- addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
+ addr_list = is_uc ? fs->l2.netdev_uc : fs->l2.netdev_mc;
mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
size++;
if (size > max_size) {
- netdev_warn(priv->netdev,
- "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
- is_uc ? "UC" : "MC", size, max_size);
+ mlx5_core_warn(fs->mdev,
+ "mdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
+ is_uc ? "UC" : "MC", size, max_size);
size = max_size;
}
@@ -649,65 +652,67 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
err = -ENOMEM;
goto out;
}
- mlx5e_fill_addr_array(priv, list_type, addr_array, size);
+ mlx5e_fill_addr_array(fs, list_type, netdev, addr_array, size);
}
- err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
+ err = mlx5_modify_nic_vport_mac_list(fs->mdev, list_type, addr_array, size);
out:
if (err)
- netdev_err(priv->netdev,
- "Failed to modify vport %s list err(%d)\n",
- is_uc ? "UC" : "MC", err);
+ mlx5_core_err(fs->mdev,
+ "Failed to modify vport %s list err(%d)\n",
+ is_uc ? "UC" : "MC", err);
kfree(addr_array);
}
-static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
+static void mlx5e_vport_context_update(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev)
{
- struct mlx5e_l2_table *ea = &priv->fs.l2;
+ struct mlx5e_l2_table *ea = &fs->l2;
- mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
- mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
- mlx5_modify_nic_vport_promisc(priv->mdev, 0,
+ mlx5e_vport_context_update_addr_list(fs, netdev, MLX5_NVPRT_LIST_TYPE_UC);
+ mlx5e_vport_context_update_addr_list(fs, netdev, MLX5_NVPRT_LIST_TYPE_MC);
+ mlx5_modify_nic_vport_promisc(fs->mdev, 0,
ea->allmulti_enabled,
ea->promisc_enabled);
}
-static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
+static void mlx5e_apply_netdev_addr(struct mlx5e_flow_steering *fs)
{
struct mlx5e_l2_hash_node *hn;
struct hlist_node *tmp;
int i;
- mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
- mlx5e_execute_l2_action(priv, hn);
+ mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_uc, i)
+ mlx5e_execute_l2_action(fs, hn);
- mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
- mlx5e_execute_l2_action(priv, hn);
+ mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_mc, i)
+ mlx5e_execute_l2_action(fs, hn);
}
-static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
+static void mlx5e_handle_netdev_addr(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev)
{
struct mlx5e_l2_hash_node *hn;
struct hlist_node *tmp;
int i;
- mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
+ mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_uc, i)
hn->action = MLX5E_ACTION_DEL;
- mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
+ mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_mc, i)
hn->action = MLX5E_ACTION_DEL;
- if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
- mlx5e_sync_netdev_addr(priv);
+ if (fs->state_destroy)
+ mlx5e_sync_netdev_addr(fs, netdev);
- mlx5e_apply_netdev_addr(priv);
+ mlx5e_apply_netdev_addr(fs);
}
#define MLX5E_PROMISC_GROUP0_SIZE BIT(0)
#define MLX5E_PROMISC_TABLE_SIZE MLX5E_PROMISC_GROUP0_SIZE
-static int mlx5e_add_promisc_rule(struct mlx5e_priv *priv)
+static int mlx5e_add_promisc_rule(struct mlx5e_flow_steering *fs)
{
- struct mlx5_flow_table *ft = priv->fs.promisc.ft.t;
+ struct mlx5_flow_table *ft = fs->promisc.ft.t;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle **rule_p;
MLX5_DECLARE_FLOW_ACT(flow_act);
@@ -718,22 +723,22 @@ static int mlx5e_add_promisc_rule(struct mlx5e_priv *priv)
if (!spec)
return -ENOMEM;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = mlx5_get_ttc_flow_table(priv->fs.ttc);
+ dest.ft = mlx5_get_ttc_flow_table(fs->ttc);
- rule_p = &priv->fs.promisc.rule;
+ rule_p = &fs->promisc.rule;
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p);
*rule_p = NULL;
- netdev_err(priv->netdev, "%s: add promiscuous rule failed\n", __func__);
+ mlx5_core_err(fs->mdev, "%s: add promiscuous rule failed\n", __func__);
}
kvfree(spec);
return err;
}
-static int mlx5e_create_promisc_table(struct mlx5e_priv *priv)
+static int mlx5e_create_promisc_table(struct mlx5e_flow_steering *fs)
{
- struct mlx5e_flow_table *ft = &priv->fs.promisc.ft;
+ struct mlx5e_flow_table *ft = &fs->promisc.ft;
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -742,14 +747,14 @@ static int mlx5e_create_promisc_table(struct mlx5e_priv *priv)
ft_attr.level = MLX5E_PROMISC_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_auto_grouped_flow_table(fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
- netdev_err(priv->netdev, "fail to create promisc table err=%d\n", err);
+ mlx5_core_err(fs->mdev, "fail to create promisc table err=%d\n", err);
return err;
}
- err = mlx5e_add_promisc_rule(priv);
+ err = mlx5e_add_promisc_rule(fs);
if (err)
goto err_destroy_promisc_table;
@@ -762,34 +767,31 @@ err_destroy_promisc_table:
return err;
}
-static void mlx5e_del_promisc_rule(struct mlx5e_priv *priv)
+static void mlx5e_del_promisc_rule(struct mlx5e_flow_steering *fs)
{
- if (WARN(!priv->fs.promisc.rule, "Trying to remove non-existing promiscuous rule"))
+ if (WARN(!fs->promisc.rule, "Trying to remove non-existing promiscuous rule"))
return;
- mlx5_del_flow_rules(priv->fs.promisc.rule);
- priv->fs.promisc.rule = NULL;
+ mlx5_del_flow_rules(fs->promisc.rule);
+ fs->promisc.rule = NULL;
}
-static void mlx5e_destroy_promisc_table(struct mlx5e_priv *priv)
+static void mlx5e_destroy_promisc_table(struct mlx5e_flow_steering *fs)
{
- if (WARN(!priv->fs.promisc.ft.t, "Trying to remove non-existing promiscuous table"))
+ if (WARN(!fs->promisc.ft.t, "Trying to remove non-existing promiscuous table"))
return;
- mlx5e_del_promisc_rule(priv);
- mlx5_destroy_flow_table(priv->fs.promisc.ft.t);
- priv->fs.promisc.ft.t = NULL;
+ mlx5e_del_promisc_rule(fs);
+ mlx5_destroy_flow_table(fs->promisc.ft.t);
+ fs->promisc.ft.t = NULL;
}
-void mlx5e_set_rx_mode_work(struct work_struct *work)
+void mlx5e_fs_set_rx_mode_work(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev)
{
- struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
- set_rx_mode_work);
+ struct mlx5e_l2_table *ea = &fs->l2;
- struct mlx5e_l2_table *ea = &priv->fs.l2;
- struct net_device *ndev = priv->netdev;
-
- bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
- bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
- bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
+ bool rx_mode_enable = fs->state_destroy;
+ bool promisc_enabled = rx_mode_enable && (netdev->flags & IFF_PROMISC);
+ bool allmulti_enabled = rx_mode_enable && (netdev->flags & IFF_ALLMULTI);
bool broadcast_enabled = rx_mode_enable;
bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
@@ -801,32 +803,32 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
int err;
if (enable_promisc) {
- err = mlx5e_create_promisc_table(priv);
+ err = mlx5e_create_promisc_table(fs);
if (err)
enable_promisc = false;
- if (!priv->channels.params.vlan_strip_disable && !err)
- netdev_warn_once(ndev,
- "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
+ if (!fs->vlan_strip_disable && !err)
+ mlx5_core_warn_once(fs->mdev,
+ "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
}
if (enable_allmulti)
- mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
+ mlx5e_add_l2_flow_rule(fs, &ea->allmulti, MLX5E_ALLMULTI);
if (enable_broadcast)
- mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
+ mlx5e_add_l2_flow_rule(fs, &ea->broadcast, MLX5E_FULLMATCH);
- mlx5e_handle_netdev_addr(priv);
+ mlx5e_handle_netdev_addr(fs, netdev);
if (disable_broadcast)
- mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
+ mlx5e_del_l2_flow_rule(fs, &ea->broadcast);
if (disable_allmulti)
- mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
+ mlx5e_del_l2_flow_rule(fs, &ea->allmulti);
if (disable_promisc)
- mlx5e_destroy_promisc_table(priv);
+ mlx5e_destroy_promisc_table(fs);
ea->promisc_enabled = promisc_enabled;
ea->allmulti_enabled = allmulti_enabled;
ea->broadcast_enabled = broadcast_enabled;
- mlx5e_vport_context_update(priv);
+ mlx5e_vport_context_update(fs, netdev);
}
static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
@@ -841,9 +843,9 @@ static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
ft->num_groups = 0;
}
-void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
+void mlx5e_fs_init_l2_addr(struct mlx5e_flow_steering *fs, struct net_device *netdev)
{
- ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast);
+ ether_addr_copy(fs->l2.broadcast.addr, netdev->broadcast);
}
void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
@@ -861,7 +863,7 @@ static void mlx5e_set_inner_ttc_params(struct mlx5e_priv *priv,
int tt;
memset(ttc_params, 0, sizeof(*ttc_params));
- ttc_params->ns = mlx5_get_flow_namespace(priv->mdev,
+ ttc_params->ns = mlx5_get_flow_namespace(priv->fs->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_NIC_PRIO;
@@ -884,7 +886,7 @@ void mlx5e_set_ttc_params(struct mlx5e_priv *priv,
int tt;
memset(ttc_params, 0, sizeof(*ttc_params));
- ttc_params->ns = mlx5_get_flow_namespace(priv->mdev,
+ ttc_params->ns = mlx5_get_flow_namespace(priv->fs->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
ft_attr->level = MLX5E_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_NIC_PRIO;
@@ -898,18 +900,18 @@ void mlx5e_set_ttc_params(struct mlx5e_priv *priv,
}
ttc_params->inner_ttc = tunnel;
- if (!tunnel || !mlx5_tunnel_inner_ft_supported(priv->mdev))
+ if (!tunnel || !mlx5_tunnel_inner_ft_supported(priv->fs->mdev))
return;
for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
ttc_params->tunnel_dests[tt].type =
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
ttc_params->tunnel_dests[tt].ft =
- mlx5_get_ttc_flow_table(priv->fs.inner_ttc);
+ mlx5_get_ttc_flow_table(priv->fs->inner_ttc);
}
}
-static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
+static void mlx5e_del_l2_flow_rule(struct mlx5e_flow_steering *fs,
struct mlx5e_l2_rule *ai)
{
if (!IS_ERR_OR_NULL(ai->rule)) {
@@ -918,10 +920,10 @@ static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
}
}
-static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
+static int mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering *fs,
struct mlx5e_l2_rule *ai, int type)
{
- struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
+ struct mlx5_flow_table *ft = fs->l2.ft.t;
struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_spec *spec;
@@ -939,7 +941,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
outer_headers.dmac_47_16);
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = mlx5_get_ttc_flow_table(priv->fs.ttc);
+ dest.ft = mlx5_get_ttc_flow_table(fs->ttc);
switch (type) {
case MLX5E_FULLMATCH:
@@ -957,8 +959,8 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(ai->rule)) {
- netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
- __func__, mv_dmac);
+ mlx5_core_err(fs->mdev, "%s: add l2 rule(mac:%pM) failed\n",
+ __func__, mv_dmac);
err = PTR_ERR(ai->rule);
ai->rule = NULL;
}
@@ -1044,12 +1046,12 @@ err_destroy_groups:
static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
{
- mlx5e_destroy_flow_table(&priv->fs.l2.ft);
+ mlx5e_destroy_flow_table(&priv->fs->l2.ft);
}
static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
{
- struct mlx5e_l2_table *l2_table = &priv->fs.l2;
+ struct mlx5e_l2_table *l2_table = &priv->fs->l2;
struct mlx5e_flow_table *ft = &l2_table->ft;
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -1060,7 +1062,7 @@ static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
ft_attr.level = MLX5E_L2_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -1180,20 +1182,20 @@ static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
return err;
}
-static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
+static int mlx5e_fs_create_vlan_table(struct mlx5e_flow_steering *fs)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_flow_table *ft;
int err;
- ft = &priv->fs.vlan->ft;
+ ft = &fs->vlan->ft;
ft->num_groups = 0;
ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
ft_attr.level = MLX5E_VLAN_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(fs->ns, &ft_attr);
if (IS_ERR(ft->t))
return PTR_ERR(ft->t);
@@ -1207,7 +1209,7 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
if (err)
goto err_free_g;
- mlx5e_add_vlan_rules(priv);
+ mlx5e_fs_add_vlan_rules(fs);
return 0;
@@ -1222,33 +1224,33 @@ err_destroy_vlan_table:
static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
{
mlx5e_del_vlan_rules(priv);
- mlx5e_destroy_flow_table(&priv->fs.vlan->ft);
+ mlx5e_destroy_flow_table(&priv->fs->vlan->ft);
}
static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
{
- if (!mlx5_tunnel_inner_ft_supported(priv->mdev))
+ if (!mlx5_tunnel_inner_ft_supported(priv->fs->mdev))
return;
- mlx5_destroy_ttc_table(priv->fs.inner_ttc);
+ mlx5_destroy_ttc_table(priv->fs->inner_ttc);
}
void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
{
- mlx5_destroy_ttc_table(priv->fs.ttc);
+ mlx5_destroy_ttc_table(priv->fs->ttc);
}
static int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
{
struct ttc_params ttc_params = {};
- if (!mlx5_tunnel_inner_ft_supported(priv->mdev))
+ if (!mlx5_tunnel_inner_ft_supported(priv->fs->mdev))
return 0;
mlx5e_set_inner_ttc_params(priv, &ttc_params);
- priv->fs.inner_ttc = mlx5_create_inner_ttc_table(priv->mdev,
- &ttc_params);
- if (IS_ERR(priv->fs.inner_ttc))
- return PTR_ERR(priv->fs.inner_ttc);
+ priv->fs->inner_ttc = mlx5_create_inner_ttc_table(priv->fs->mdev,
+ &ttc_params);
+ if (IS_ERR(priv->fs->inner_ttc))
+ return PTR_ERR(priv->fs->inner_ttc);
return 0;
}
@@ -1257,9 +1259,9 @@ int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
struct ttc_params ttc_params = {};
mlx5e_set_ttc_params(priv, &ttc_params, true);
- priv->fs.ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
- if (IS_ERR(priv->fs.ttc))
- return PTR_ERR(priv->fs.ttc);
+ priv->fs->ttc = mlx5_create_ttc_table(priv->fs->mdev, &ttc_params);
+ if (IS_ERR(priv->fs->ttc))
+ return PTR_ERR(priv->fs->ttc);
return 0;
}
@@ -1267,45 +1269,44 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
{
int err;
- priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
+ priv->fs->ns = mlx5_get_flow_namespace(priv->fs->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
- if (!priv->fs.ns)
+ if (!priv->fs->ns)
return -EOPNOTSUPP;
err = mlx5e_arfs_create_tables(priv);
if (err) {
- netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
- err);
+ mlx5_core_err(priv->fs->mdev, "Failed to create arfs tables, err=%d\n",
+ err);
priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
}
err = mlx5e_create_inner_ttc_table(priv);
if (err) {
- netdev_err(priv->netdev,
- "Failed to create inner ttc table, err=%d\n",
- err);
+ mlx5_core_err(priv->fs->mdev,
+ "Failed to create inner ttc table, err=%d\n", err);
goto err_destroy_arfs_tables;
}
err = mlx5e_create_ttc_table(priv);
if (err) {
- netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
- err);
+ mlx5_core_err(priv->fs->mdev, "Failed to create ttc table, err=%d\n",
+ err);
goto err_destroy_inner_ttc_table;
}
err = mlx5e_create_l2_table(priv);
if (err) {
- netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n",
- err);
+ mlx5_core_err(priv->fs->mdev, "Failed to create l2 table, err=%d\n",
+ err);
goto err_destroy_ttc_table;
}
- err = mlx5e_create_vlan_table(priv);
+ err = mlx5e_fs_create_vlan_table(priv->fs);
if (err) {
- netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n",
- err);
+ mlx5_core_err(priv->fs->mdev, "Failed to create vlan table, err=%d\n",
+ err);
goto err_destroy_l2_table;
}
@@ -1342,16 +1343,69 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
mlx5e_ethtool_cleanup_steering(priv);
}
-int mlx5e_fs_init(struct mlx5e_priv *priv)
+static int mlx5e_fs_vlan_alloc(struct mlx5e_flow_steering *fs)
+{
+ fs->vlan = kvzalloc(sizeof(*fs->vlan), GFP_KERNEL);
+ if (!fs->vlan)
+ return -ENOMEM;
+ return 0;
+}
+
+static void mlx5e_fs_vlan_free(struct mlx5e_flow_steering *fs)
{
- priv->fs.vlan = kvzalloc(sizeof(*priv->fs.vlan), GFP_KERNEL);
- if (!priv->fs.vlan)
+ kvfree(fs->vlan);
+}
+
+static int mlx5e_fs_tc_alloc(struct mlx5e_flow_steering *fs)
+{
+ fs->tc = mlx5e_tc_table_alloc();
+ if (IS_ERR(fs->tc))
return -ENOMEM;
return 0;
}
-void mlx5e_fs_cleanup(struct mlx5e_priv *priv)
+static void mlx5e_fs_tc_free(struct mlx5e_flow_steering *fs)
+{
+ mlx5e_tc_table_free(fs->tc);
+}
+
+struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
+ struct mlx5_core_dev *mdev,
+ bool state_destroy)
+{
+ struct mlx5e_flow_steering *fs;
+ int err;
+
+ fs = kvzalloc(sizeof(*fs), GFP_KERNEL);
+ if (!fs)
+ goto err;
+
+ fs->mdev = mdev;
+ fs->state_destroy = state_destroy;
+ if (mlx5e_profile_feature_cap(profile, FS_VLAN)) {
+ err = mlx5e_fs_vlan_alloc(fs);
+ if (err)
+ goto err_free_fs;
+ }
+
+ if (mlx5e_profile_feature_cap(profile, FS_TC)) {
+ err = mlx5e_fs_tc_alloc(fs);
+ if (err)
+ goto err_free_vlan;
+ }
+
+ return fs;
+err_free_fs:
+ kvfree(fs);
+err_free_vlan:
+ mlx5e_fs_vlan_free(fs);
+err:
+ return NULL;
+}
+
+void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs)
{
- kvfree(priv->fs.vlan);
- priv->fs.vlan = NULL;
+ mlx5e_fs_tc_free(fs);
+ mlx5e_fs_vlan_free(fs);
+ kvfree(fs);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 9466202fd97b..3e4bc7836ef4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -81,18 +81,18 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
case UDP_V6_FLOW:
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
- eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
+ eth_ft = &priv->fs->ethtool.l3_l4_ft[prio];
break;
case IP_USER_FLOW:
case IPV6_USER_FLOW:
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
- eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
+ eth_ft = &priv->fs->ethtool.l3_l4_ft[prio];
break;
case ETHER_FLOW:
max_tuples = ETHTOOL_NUM_L2_FTS;
prio = max_tuples - num_tuples;
- eth_ft = &priv->fs.ethtool.l2_ft[prio];
+ eth_ft = &priv->fs->ethtool.l2_ft[prio];
prio += MLX5E_ETHTOOL_L2_PRIO;
break;
default:
@@ -383,14 +383,14 @@ static void add_rule_to_list(struct mlx5e_priv *priv,
struct mlx5e_ethtool_rule *rule)
{
struct mlx5e_ethtool_rule *iter;
- struct list_head *head = &priv->fs.ethtool.rules;
+ struct list_head *head = &priv->fs->ethtool.rules;
- list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
+ list_for_each_entry(iter, &priv->fs->ethtool.rules, list) {
if (iter->flow_spec.location > rule->flow_spec.location)
break;
head = &iter->list;
}
- priv->fs.ethtool.tot_num_rules++;
+ priv->fs->ethtool.tot_num_rules++;
list_add(&rule->list, head);
}
@@ -507,7 +507,7 @@ static void del_ethtool_rule(struct mlx5e_priv *priv,
if (eth_rule->rss)
mlx5e_rss_refcnt_dec(eth_rule->rss);
list_del(&eth_rule->list);
- priv->fs.ethtool.tot_num_rules--;
+ priv->fs->ethtool.tot_num_rules--;
put_flow_table(eth_rule->eth_ft);
kfree(eth_rule);
}
@@ -517,7 +517,7 @@ static struct mlx5e_ethtool_rule *find_ethtool_rule(struct mlx5e_priv *priv,
{
struct mlx5e_ethtool_rule *iter;
- list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
+ list_for_each_entry(iter, &priv->fs->ethtool.rules, list) {
if (iter->flow_spec.location == location)
return iter;
}
@@ -788,7 +788,7 @@ mlx5e_ethtool_get_flow(struct mlx5e_priv *priv,
if (location < 0 || location >= MAX_NUM_OF_ETHTOOL_RULES)
return -EINVAL;
- list_for_each_entry(eth_rule, &priv->fs.ethtool.rules, list) {
+ list_for_each_entry(eth_rule, &priv->fs->ethtool.rules, list) {
int index;
if (eth_rule->flow_spec.location != location)
@@ -831,13 +831,13 @@ void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv)
struct mlx5e_ethtool_rule *iter;
struct mlx5e_ethtool_rule *temp;
- list_for_each_entry_safe(iter, temp, &priv->fs.ethtool.rules, list)
+ list_for_each_entry_safe(iter, temp, &priv->fs->ethtool.rules, list)
del_ethtool_rule(priv, iter);
}
void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
{
- INIT_LIST_HEAD(&priv->fs.ethtool.rules);
+ INIT_LIST_HEAD(&priv->fs->ethtool.rules);
}
static int flow_type_to_traffic_type(u32 flow_type)
@@ -963,7 +963,7 @@ int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
switch (info->cmd) {
case ETHTOOL_GRXCLSRLCNT:
- info->rule_cnt = priv->fs.ethtool.tot_num_rules;
+ info->rule_cnt = priv->fs->ethtool.tot_num_rules;
break;
case ETHTOOL_GRXCLSRULE:
err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 180b2f418339..d858667736a3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3144,6 +3144,7 @@ static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
mlx5e_mqprio_rl_free(priv->mqprio_rl);
priv->mqprio_rl = NULL;
}
+ mlx5e_accel_cleanup_tx(priv);
mlx5e_destroy_tises(priv);
}
@@ -3777,20 +3778,45 @@ static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
mutex_lock(&priv->state_lock);
+ priv->fs->vlan_strip_disable = !enable;
priv->channels.params.vlan_strip_disable = !enable;
+
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
goto unlock;
err = mlx5e_modify_channels_vsd(&priv->channels, !enable);
- if (err)
+ if (err) {
+ priv->fs->vlan_strip_disable = enable;
priv->channels.params.vlan_strip_disable = enable;
-
+ }
unlock:
mutex_unlock(&priv->state_lock);
return err;
}
+int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_flow_steering *fs = priv->fs;
+
+ if (mlx5e_is_uplink_rep(priv))
+ return 0; /* no vlan table for uplink rep */
+
+ return mlx5e_fs_vlan_rx_add_vid(fs, dev, proto, vid);
+}
+
+int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_flow_steering *fs = priv->fs;
+
+ if (mlx5e_is_uplink_rep(priv))
+ return 0; /* no vlan table for uplink rep */
+
+ return mlx5e_fs_vlan_rx_kill_vid(fs, dev, proto, vid);
+}
+
#ifdef CONFIG_MLX5_EN_ARFS
static int set_feature_arfs(struct net_device *netdev, bool enable)
{
@@ -3888,8 +3914,8 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
mutex_lock(&priv->state_lock);
params = &priv->channels.params;
- if (!priv->fs.vlan ||
- !bitmap_empty(mlx5e_vlan_get_active_svlans(priv->fs.vlan), VLAN_N_VID)) {
+ if (!priv->fs->vlan ||
+ !bitmap_empty(mlx5e_vlan_get_active_svlans(priv->fs->vlan), VLAN_N_VID)) {
/* HW strips the outer C-tag header, this is a problem
* for S-tag traffic.
*/
@@ -5012,6 +5038,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_flow_steering *fs;
int err;
mlx5e_build_nic_params(priv, &priv->xsk, netdev->mtu);
@@ -5019,11 +5046,14 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
mlx5e_timestamp_init(priv);
- err = mlx5e_fs_init(priv);
- if (err) {
+ fs = mlx5e_fs_init(priv->profile, mdev,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ if (!fs) {
+ err = -ENOMEM;
mlx5_core_err(mdev, "FS initialization failed, %d\n", err);
return err;
}
+ priv->fs = fs;
err = mlx5e_ipsec_init(priv);
if (err)
@@ -5042,7 +5072,7 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
mlx5e_health_destroy_reporters(priv);
mlx5e_ktls_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
- mlx5e_fs_cleanup(priv);
+ mlx5e_fs_cleanup(priv->fs);
}
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
@@ -5147,9 +5177,17 @@ static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
return err;
}
+ err = mlx5e_accel_init_tx(priv);
+ if (err)
+ goto err_destroy_tises;
+
mlx5e_set_mqprio_rl(priv);
mlx5e_dcbnl_initialize(priv);
return 0;
+
+err_destroy_tises:
+ mlx5e_destroy_tises(priv);
+ return err;
}
static void mlx5e_nic_enable(struct mlx5e_priv *priv)
@@ -5157,7 +5195,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
- mlx5e_init_l2_addr(priv);
+ mlx5e_fs_init_l2_addr(priv->fs, netdev);
/* Marking the link as currently not needed by the Driver */
if (!netif_running(netdev))
@@ -5242,7 +5280,9 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.stats_grps_num = mlx5e_nic_stats_grps_num,
.features = BIT(MLX5E_PROFILE_FEATURE_PTP_RX) |
BIT(MLX5E_PROFILE_FEATURE_PTP_TX) |
- BIT(MLX5E_PROFILE_FEATURE_QOS_HTB),
+ BIT(MLX5E_PROFILE_FEATURE_QOS_HTB) |
+ BIT(MLX5E_PROFILE_FEATURE_FS_VLAN) |
+ BIT(MLX5E_PROFILE_FEATURE_FS_TC),
};
static int mlx5e_profile_max_num_channels(struct mlx5_core_dev *mdev,
@@ -5292,6 +5332,14 @@ int mlx5e_get_pf_num_tirs(struct mlx5_core_dev *mdev)
+ mlx5e_profile_max_num_channels(mdev, &mlx5e_nic_profile);
}
+void mlx5e_set_rx_mode_work(struct work_struct *work)
+{
+ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+ set_rx_mode_work);
+
+ return mlx5e_fs_set_rx_mode_work(priv->fs, priv->netdev);
+}
+
/* mlx5e generic netdev management API (move to en_common.c) */
int mlx5e_priv_init(struct mlx5e_priv *priv,
const struct mlx5e_profile *profile,
@@ -5469,6 +5517,8 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
int err;
clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ if (priv->fs)
+ priv->fs->state_destroy = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
/* max number of channels may have changed */
max_nch = mlx5e_calc_max_nch(priv->mdev, priv->netdev, profile);
@@ -5528,6 +5578,8 @@ err_cleanup_tx:
out:
mlx5e_reset_channels(priv->netdev);
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ if (priv->fs)
+ priv->fs->state_destroy = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
cancel_work_sync(&priv->update_stats_work);
return err;
}
@@ -5537,6 +5589,8 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv)
const struct mlx5e_profile *profile = priv->profile;
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ if (priv->fs)
+ priv->fs->state_destroy = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
if (profile->disable)
profile->disable(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index ae90b06d21e2..4c1599de652c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -718,6 +718,7 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
{
+ mlx5e_fs_cleanup(priv->fs);
mlx5e_ipsec_cleanup(priv);
}
@@ -728,8 +729,8 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
struct ttc_params ttc_params = {};
int err;
- priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
- MLX5_FLOW_NAMESPACE_KERNEL);
+ priv->fs->ns = mlx5_get_flow_namespace(priv->mdev,
+ MLX5_FLOW_NAMESPACE_KERNEL);
/* The inner_ttc in the ttc params is intentionally not set */
mlx5e_set_ttc_params(priv, &ttc_params, false);
@@ -738,9 +739,9 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
/* To give uplik rep TTC a lower level for chaining from root ft */
ttc_params.ft_attr.level = MLX5E_TTC_FT_LEVEL + 1;
- priv->fs.ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
- if (IS_ERR(priv->fs.ttc)) {
- err = PTR_ERR(priv->fs.ttc);
+ priv->fs->ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
+ if (IS_ERR(priv->fs->ttc)) {
+ err = PTR_ERR(priv->fs->ttc);
netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n",
err);
return err;
@@ -760,7 +761,7 @@ static int mlx5e_create_rep_root_ft(struct mlx5e_priv *priv)
/* non uplik reps will skip any bypass tables and go directly to
* their own ttc
*/
- rpriv->root_ft = mlx5_get_ttc_flow_table(priv->fs.ttc);
+ rpriv->root_ft = mlx5_get_ttc_flow_table(priv->fs->ttc);
return 0;
}
@@ -835,11 +836,20 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
int err;
- priv->rx_res = mlx5e_rx_res_alloc();
- if (!priv->rx_res)
+ priv->fs = mlx5e_fs_init(priv->profile, mdev,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ if (!priv->fs) {
+ netdev_err(priv->netdev, "FS allocation failed\n");
return -ENOMEM;
+ }
+
+ priv->rx_res = mlx5e_rx_res_alloc();
+ if (!priv->rx_res) {
+ err = -ENOMEM;
+ goto err_free_fs;
+ }
- mlx5e_init_l2_addr(priv);
+ mlx5e_fs_init_l2_addr(priv->fs, priv->netdev);
err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
if (err) {
@@ -873,13 +883,15 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
err_destroy_root_ft:
mlx5e_destroy_rep_root_ft(priv);
err_destroy_ttc_table:
- mlx5_destroy_ttc_table(priv->fs.ttc);
+ mlx5_destroy_ttc_table(priv->fs->ttc);
err_destroy_rx_res:
mlx5e_rx_res_destroy(priv->rx_res);
err_close_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_rx_res_free(priv->rx_res);
priv->rx_res = NULL;
+err_free_fs:
+ mlx5e_fs_cleanup(priv->fs);
return err;
}
@@ -888,7 +900,7 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
mlx5e_ethtool_cleanup_steering(priv);
rep_vport_rx_rule_destroy(priv);
mlx5e_destroy_rep_root_ft(priv);
- mlx5_destroy_ttc_table(priv->fs.ttc);
+ mlx5_destroy_ttc_table(priv->fs->ttc);
mlx5e_rx_res_destroy(priv->rx_res);
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_rx_res_free(priv->rx_res);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 2e12280a936f..f154bda668ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -71,6 +71,30 @@
#define MLX5E_TC_TABLE_NUM_GROUPS 4
#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18)
+struct mlx5e_tc_table {
+ /* Protects the dynamic assignment of the t parameter
+ * which is the nic tc root table.
+ */
+ struct mutex t_lock;
+ struct mlx5e_priv *priv;
+ struct mlx5_flow_table *t;
+ struct mlx5_flow_table *miss_t;
+ struct mlx5_fs_chains *chains;
+ struct mlx5e_post_act *post_act;
+
+ struct rhashtable ht;
+
+ struct mod_hdr_tbl mod_hdr;
+ struct mutex hairpin_tbl_lock; /* protects hairpin_tbl */
+ DECLARE_HASHTABLE(hairpin_tbl, 8);
+
+ struct notifier_block netdevice_nb;
+ struct netdev_net_notifier netdevice_nn;
+
+ struct mlx5_tc_ct_priv *ct;
+ struct mapping_ctx *mapping;
+};
+
struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
[CHAIN_TO_REG] = {
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
@@ -108,6 +132,24 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
[PACKET_COLOR_TO_REG] = packet_color_to_reg,
};
+struct mlx5e_tc_table *mlx5e_tc_table_alloc(void)
+{
+ struct mlx5e_tc_table *tc;
+
+ tc = kvzalloc(sizeof(*tc), GFP_KERNEL);
+ return tc ? tc : ERR_PTR(-ENOMEM);
+}
+
+void mlx5e_tc_table_free(struct mlx5e_tc_table *tc)
+{
+ kvfree(tc);
+}
+
+struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc)
+{
+ return tc->chains;
+}
+
/* To avoid false lock dependency warning set the tc_ht lock
* class different than the lock class of the ht being used when deleting
* last flow from a group and then deleting a group, we get into del_sw_flow_group()
@@ -280,7 +322,7 @@ get_ct_priv(struct mlx5e_priv *priv)
return uplink_priv->ct_priv;
}
- return priv->fs.tc.ct;
+ return priv->fs->tc->ct;
}
static struct mlx5e_tc_psample *
@@ -314,7 +356,7 @@ get_post_action(struct mlx5e_priv *priv)
return uplink_priv->post_act;
}
- return priv->fs.tc.post_act;
+ return priv->fs->tc->post_act;
}
struct mlx5_flow_handle *
@@ -356,19 +398,42 @@ static int
mlx5e_tc_add_flow_meter(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
+ struct mlx5e_post_act *post_act = get_post_action(priv);
+ struct mlx5e_post_meter_priv *post_meter;
+ enum mlx5_flow_namespace_type ns_type;
struct mlx5e_flow_meter_handle *meter;
- meter = mlx5e_tc_meter_get(priv->mdev, &attr->meter_attr.params);
+ meter = mlx5e_tc_meter_replace(priv->mdev, &attr->meter_attr.params);
if (IS_ERR(meter)) {
mlx5_core_err(priv->mdev, "Failed to get flow meter\n");
return PTR_ERR(meter);
}
+ ns_type = mlx5e_tc_meter_get_namespace(meter->flow_meters);
+ post_meter = mlx5e_post_meter_init(priv, ns_type, post_act, meter->green_counter,
+ meter->red_counter);
+ if (IS_ERR(post_meter)) {
+ mlx5_core_err(priv->mdev, "Failed to init post meter\n");
+ goto err_meter_init;
+ }
+
attr->meter_attr.meter = meter;
- attr->dest_ft = mlx5e_tc_meter_get_post_meter_ft(meter->flow_meters);
+ attr->meter_attr.post_meter = post_meter;
+ attr->dest_ft = mlx5e_post_meter_get_ft(post_meter);
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
return 0;
+
+err_meter_init:
+ mlx5e_tc_meter_put(meter);
+ return PTR_ERR(post_meter);
+}
+
+static void
+mlx5e_tc_del_flow_meter(struct mlx5_flow_attr *attr)
+{
+ mlx5e_post_meter_cleanup(attr->meter_attr.post_meter);
+ mlx5e_tc_meter_put(attr->meter_attr.meter);
}
struct mlx5_flow_handle *
@@ -428,7 +493,7 @@ mlx5e_tc_rule_unoffload(struct mlx5e_priv *priv,
mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
if (attr->meter_attr.meter)
- mlx5e_tc_meter_put(attr->meter_attr.meter);
+ mlx5e_tc_del_flow_meter(attr);
}
int
@@ -546,7 +611,7 @@ get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
return mlx5e_get_flow_namespace(flow) == MLX5_FLOW_NAMESPACE_FDB ?
&esw->offloads.mod_hdr :
- &priv->fs.tc.mod_hdr;
+ &priv->fs->tc->mod_hdr;
}
static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
@@ -764,7 +829,7 @@ static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
hp->num_channels,
- mlx5_get_ttc_flow_table(priv->fs.ttc)->id);
+ mlx5_get_ttc_flow_table(priv->fs->ttc)->id);
return 0;
@@ -854,7 +919,7 @@ static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
struct mlx5e_hairpin_entry *hpe;
u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
- hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
+ hash_for_each_possible(priv->fs->tc->hairpin_tbl, hpe,
hairpin_hlist, hash_key) {
if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
refcount_inc(&hpe->refcnt);
@@ -869,10 +934,10 @@ static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
struct mlx5e_hairpin_entry *hpe)
{
/* no more hairpin flows for us, release the hairpin pair */
- if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc.hairpin_tbl_lock))
+ if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs->tc->hairpin_tbl_lock))
return;
hash_del(&hpe->hairpin_hlist);
- mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
+ mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
if (!IS_ERR_OR_NULL(hpe->hp)) {
netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
@@ -956,10 +1021,10 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
if (err)
return err;
- mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
+ mutex_lock(&priv->fs->tc->hairpin_tbl_lock);
hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
if (hpe) {
- mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
+ mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
wait_for_completion(&hpe->res_ready);
if (IS_ERR(hpe->hp)) {
@@ -971,7 +1036,7 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
if (!hpe) {
- mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
+ mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
return -ENOMEM;
}
@@ -983,9 +1048,9 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
refcount_set(&hpe->refcnt, 1);
init_completion(&hpe->res_ready);
- hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
+ hash_add(priv->fs->tc->hairpin_tbl, &hpe->hairpin_hlist,
hash_hairpin_info(peer_id, match_prio));
- mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
+ mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
params.log_data_size = 16;
params.log_data_size = min_t(u8, params.log_data_size,
@@ -1061,10 +1126,10 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct mlx5_flow_context *flow_context = &spec->flow_context;
- struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv);
struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr;
- struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct mlx5e_tc_table *tc = priv->fs->tc;
struct mlx5_flow_destination dest[2] = {};
+ struct mlx5_fs_chains *nic_chains;
struct mlx5_flow_act flow_act = {
.action = attr->action,
.flags = FLOW_ACT_NO_APPEND,
@@ -1073,6 +1138,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
struct mlx5_flow_table *ft;
int dest_ix = 0;
+ nic_chains = mlx5e_nic_chains(tc);
flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
flow_context->flow_tag = nic_attr->flow_tag;
@@ -1097,7 +1163,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
if (IS_ERR(dest[dest_ix].ft))
return ERR_CAST(dest[dest_ix].ft);
} else {
- dest[dest_ix].ft = mlx5e_vlan_get_flowtable(priv->fs.vlan);
+ dest[dest_ix].ft = mlx5e_vlan_get_flowtable(priv->fs->vlan);
}
dest_ix++;
}
@@ -1125,7 +1191,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
mutex_unlock(&tc->t_lock);
netdev_err(priv->netdev,
"Failed to create tc offload table\n");
- rule = ERR_CAST(priv->fs.tc.t);
+ rule = ERR_CAST(priv->fs->tc->t);
goto err_ft_get;
}
}
@@ -1227,7 +1293,7 @@ void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
struct mlx5_flow_handle *rule,
struct mlx5_flow_attr *attr)
{
- struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv);
+ struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv->fs->tc);
mlx5_del_flow_rules(rule);
@@ -1244,7 +1310,7 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct mlx5_flow_attr *attr = flow->attr;
- struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct mlx5e_tc_table *tc = priv->fs->tc;
flow_flag_clear(flow, OFFLOADED);
@@ -1256,13 +1322,13 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
/* Remove root table if no rules are left to avoid
* extra steering hops.
*/
- mutex_lock(&priv->fs.tc.t_lock);
+ mutex_lock(&priv->fs->tc->t_lock);
if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) &&
!IS_ERR_OR_NULL(tc->t)) {
- mlx5_chains_put_table(mlx5e_nic_chains(priv), 0, 1, MLX5E_TC_FT_LEVEL);
- priv->fs.tc.t = NULL;
+ mlx5_chains_put_table(mlx5e_nic_chains(tc), 0, 1, MLX5E_TC_FT_LEVEL);
+ priv->fs->tc->t = NULL;
}
- mutex_unlock(&priv->fs.tc.t_lock);
+ mutex_unlock(&priv->fs->tc->t_lock);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
@@ -3998,7 +4064,7 @@ static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
rpriv = priv->ppriv;
return &rpriv->tc_ht;
} else /* NIC offload */
- return &priv->fs.tc.ht;
+ return &priv->fs->tc->ht;
}
static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
@@ -4717,11 +4783,11 @@ static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
- mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
- hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist)
+ mutex_lock(&priv->fs->tc->hairpin_tbl_lock);
+ hash_for_each(priv->fs->tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
if (refcount_inc_not_zero(&hpe->refcnt))
list_add(&hpe->dead_peer_wait_list, &init_wait_list);
- mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
+ mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
wait_for_completion(&hpe->res_ready);
@@ -4736,7 +4802,6 @@ static int mlx5e_tc_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
- struct mlx5e_flow_steering *fs;
struct mlx5e_priv *peer_priv;
struct mlx5e_tc_table *tc;
struct mlx5e_priv *priv;
@@ -4747,8 +4812,7 @@ static int mlx5e_tc_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
- fs = container_of(tc, struct mlx5e_flow_steering, tc);
- priv = container_of(fs, struct mlx5e_priv, fs);
+ priv = tc->priv;
peer_priv = netdev_priv(ndev);
if (priv == peer_priv ||
!(priv->netdev->features & NETIF_F_HW_TC))
@@ -4777,7 +4841,7 @@ static int mlx5e_tc_nic_get_ft_size(struct mlx5_core_dev *dev)
static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv)
{
- struct mlx5_flow_table **ft = &priv->fs.tc.miss_t;
+ struct mlx5_flow_table **ft = &priv->fs->tc->miss_t;
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *ns;
int err = 0;
@@ -4799,12 +4863,12 @@ static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv)
static void mlx5e_tc_nic_destroy_miss_table(struct mlx5e_priv *priv)
{
- mlx5_destroy_flow_table(priv->fs.tc.miss_t);
+ mlx5_destroy_flow_table(priv->fs->tc->miss_t);
}
int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
{
- struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct mlx5e_tc_table *tc = priv->fs->tc;
struct mlx5_core_dev *dev = priv->mdev;
struct mapping_ctx *chains_mapping;
struct mlx5_chains_attr attr = {};
@@ -4815,6 +4879,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
mutex_init(&tc->t_lock);
mutex_init(&tc->hairpin_tbl_lock);
hash_init(tc->hairpin_tbl);
+ tc->priv = priv;
err = rhashtable_init(&tc->ht, &tc_ht_params);
if (err)
@@ -4844,7 +4909,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
- attr.default_ft = priv->fs.tc.miss_t;
+ attr.default_ft = priv->fs->tc->miss_t;
attr.mapping = chains_mapping;
tc->chains = mlx5_chains_create(dev, &attr);
@@ -4854,7 +4919,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
}
tc->post_act = mlx5e_tc_post_act_init(priv, tc->chains, MLX5_FLOW_NAMESPACE_KERNEL);
- tc->ct = mlx5_tc_ct_init(priv, tc->chains, &priv->fs.tc.mod_hdr,
+ tc->ct = mlx5_tc_ct_init(priv, tc->chains, &tc->mod_hdr,
MLX5_FLOW_NAMESPACE_KERNEL, tc->post_act);
tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
@@ -4893,7 +4958,7 @@ static void _mlx5e_tc_del_flow(void *ptr, void *arg)
void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
{
- struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct mlx5e_tc_table *tc = priv->fs->tc;
if (tc->netdevice_nb.notifier_call)
unregister_netdevice_notifier_dev_net(priv->netdev,
@@ -5098,7 +5163,7 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
u32 chain = 0, chain_tag, reg_b, zone_restore_id;
struct mlx5e_priv *priv = netdev_priv(skb->dev);
- struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct mlx5e_tc_table *tc = priv->fs->tc;
struct mlx5_mapped_obj mapped_obj;
struct tc_skb_ext *tc_skb_ext;
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 517f2252b5ff..6ce1ab6b86b7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -356,6 +356,8 @@ mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
#endif
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+struct mlx5e_tc_table *mlx5e_tc_table_alloc(void);
+void mlx5e_tc_table_free(struct mlx5e_tc_table *tc);
static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
{
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
@@ -376,6 +378,8 @@ static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb);
#else /* CONFIG_MLX5_CLS_ACT */
+static inline struct mlx5e_tc_table *mlx5e_tc_table_alloc(void) { return NULL; }
+static inline void mlx5e_tc_table_free(struct mlx5e_tc_table *tc) {}
static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
{ return false; }
static inline bool
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index dc1e01e93d5a..27f791feb517 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -152,7 +152,7 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
*hopbyhop = 0;
if (skb->encapsulation) {
- ihs = skb_tcp_all_headers(skb);
+ ihs = skb_inner_tcp_all_headers(skb);
stats->tso_inner_packets++;
stats->tso_inner_bytes += skb->len - ihs;
} else {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 30a6c9fbf1b6..6aa58044b949 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1300,20 +1300,19 @@ abort:
*/
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
{
- struct devlink *devlink;
bool toggle_lag;
int ret;
if (!mlx5_esw_allowed(esw))
return 0;
+ devl_assert_locked(priv_to_devlink(esw->dev));
+
toggle_lag = !mlx5_esw_is_fdb_created(esw);
if (toggle_lag)
mlx5_lag_disable_change(esw->dev);
- devlink = priv_to_devlink(esw->dev);
- devl_lock(devlink);
down_write(&esw->mode_lock);
if (!mlx5_esw_is_fdb_created(esw)) {
ret = mlx5_eswitch_enable_locked(esw, num_vfs);
@@ -1327,7 +1326,6 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
esw->esw_funcs.num_vfs = num_vfs;
}
up_write(&esw->mode_lock);
- devl_unlock(devlink);
if (toggle_lag)
mlx5_lag_enable_change(esw->dev);
@@ -1338,13 +1336,10 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
/* When disabling sriov, free driver level resources. */
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
{
- struct devlink *devlink;
-
if (!mlx5_esw_allowed(esw))
return;
- devlink = priv_to_devlink(esw->dev);
- devl_lock(devlink);
+ devl_assert_locked(priv_to_devlink(esw->dev));
down_write(&esw->mode_lock);
/* If driver is unloaded, this function is called twice by remove_one()
* and mlx5_unload(). Prevent the second call.
@@ -1373,7 +1368,6 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
unlock:
up_write(&esw->mode_lock);
- devl_unlock(devlink);
}
/* Free resources for corresponding eswitch mode. It is called by devlink
@@ -1407,18 +1401,14 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw)
void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
{
- struct devlink *devlink;
-
if (!mlx5_esw_allowed(esw))
return;
+ devl_assert_locked(priv_to_devlink(esw->dev));
mlx5_lag_disable_change(esw->dev);
- devlink = priv_to_devlink(esw->dev);
- devl_lock(devlink);
down_write(&esw->mode_lock);
mlx5_eswitch_disable_locked(esw);
up_write(&esw->mode_lock);
- devl_unlock(devlink);
mlx5_lag_enable_change(esw->dev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 052af4901c0b..e8896f368362 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -149,6 +149,9 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
complete(&fw_reset->done);
} else {
+ mlx5_unload_one(dev);
+ if (mlx5_health_wait_pci_up(dev))
+ mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
mlx5_load_one(dev, false);
devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0,
BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
@@ -183,15 +186,9 @@ static void mlx5_sync_reset_reload_work(struct work_struct *work)
struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
reset_reload_work);
struct mlx5_core_dev *dev = fw_reset->dev;
- int err;
mlx5_sync_reset_clear_reset_requested(dev, false);
mlx5_enter_error_state(dev, true);
- mlx5_unload_one(dev);
- err = mlx5_health_wait_pci_up(dev);
- if (err)
- mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
- fw_reset->ret = err;
mlx5_fw_reset_complete_reload(dev);
}
@@ -395,7 +392,6 @@ static void mlx5_sync_reset_now_event(struct work_struct *work)
}
mlx5_enter_error_state(dev, true);
- mlx5_unload_one(dev);
done:
fw_reset->ret = err;
mlx5_fw_reset_complete_reload(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 659021c31cbd..2cf2c9948446 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -666,16 +666,20 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
struct mlx5_fw_reporter_ctx fw_reporter_ctx;
struct mlx5_core_health *health;
struct mlx5_core_dev *dev;
+ struct devlink *devlink;
struct mlx5_priv *priv;
health = container_of(work, struct mlx5_core_health, fatal_report_work);
priv = container_of(health, struct mlx5_priv, health);
dev = container_of(priv, struct mlx5_core_dev, priv);
+ devlink = priv_to_devlink(dev);
enter_error_state(dev, false);
if (IS_ERR_OR_NULL(health->fw_fatal_reporter)) {
+ devl_lock(devlink);
if (mlx5_health_try_recover(dev))
mlx5_core_err(dev, "health recovery failed\n");
+ devl_unlock(devlink);
return;
}
fw_reporter_ctx.err_synd = health->synd;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 0a99a020a3b2..c02b7b08fb4c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -322,10 +322,10 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
{
int err;
- priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
+ priv->fs->ns = mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
- if (!priv->fs.ns)
+ if (!priv->fs->ns)
return -EINVAL;
err = mlx5e_arfs_create_tables(priv);
@@ -364,9 +364,18 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
int err;
- priv->rx_res = mlx5e_rx_res_alloc();
- if (!priv->rx_res)
+ priv->fs = mlx5e_fs_init(priv->profile, mdev,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ if (!priv->fs) {
+ netdev_err(priv->netdev, "FS allocation failed\n");
return -ENOMEM;
+ }
+
+ priv->rx_res = mlx5e_rx_res_alloc();
+ if (!priv->rx_res) {
+ err = -ENOMEM;
+ goto err_free_fs;
+ }
mlx5e_create_q_counters(priv);
@@ -397,6 +406,8 @@ err_destroy_q_counters:
mlx5e_destroy_q_counters(priv);
mlx5e_rx_res_free(priv->rx_res);
priv->rx_res = NULL;
+err_free_fs:
+ mlx5e_fs_cleanup(priv->fs);
return err;
}
@@ -408,6 +419,7 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
mlx5e_destroy_q_counters(priv);
mlx5e_rx_res_free(priv->rx_res);
priv->rx_res = NULL;
+ mlx5e_fs_cleanup(priv->fs);
}
/* The stats groups order is opposite to the update_stats() order calls */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 8b621c1ddd14..1de9b39a6359 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1304,8 +1304,10 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
int mlx5_init_one(struct mlx5_core_dev *dev)
{
+ struct devlink *devlink = priv_to_devlink(dev);
int err = 0;
+ devl_lock(devlink);
mutex_lock(&dev->intf_state_mutex);
dev->state = MLX5_DEVICE_STATE_UP;
@@ -1334,6 +1336,7 @@ int mlx5_init_one(struct mlx5_core_dev *dev)
goto err_register;
mutex_unlock(&dev->intf_state_mutex);
+ devl_unlock(devlink);
return 0;
err_register:
@@ -1348,11 +1351,15 @@ function_teardown:
err_function:
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
mutex_unlock(&dev->intf_state_mutex);
+ devl_unlock(devlink);
return err;
}
void mlx5_uninit_one(struct mlx5_core_dev *dev)
{
+ struct devlink *devlink = priv_to_devlink(dev);
+
+ devl_lock(devlink);
mutex_lock(&dev->intf_state_mutex);
mlx5_unregister_device(dev);
@@ -1371,13 +1378,15 @@ void mlx5_uninit_one(struct mlx5_core_dev *dev)
mlx5_function_teardown(dev, true);
out:
mutex_unlock(&dev->intf_state_mutex);
+ devl_unlock(devlink);
}
-int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery)
+int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery)
{
int err = 0;
u64 timeout;
+ devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&dev->intf_state_mutex);
if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
mlx5_core_warn(dev, "interface is up, NOP\n");
@@ -1419,8 +1428,20 @@ out:
return err;
}
-void mlx5_unload_one(struct mlx5_core_dev *dev)
+int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+ int ret;
+
+ devl_lock(devlink);
+ ret = mlx5_load_one_devl_locked(dev, recovery);
+ devl_unlock(devlink);
+ return ret;
+}
+
+void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev)
{
+ devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&dev->intf_state_mutex);
mlx5_detach_device(dev);
@@ -1438,6 +1459,15 @@ out:
mutex_unlock(&dev->intf_state_mutex);
}
+void mlx5_unload_one(struct mlx5_core_dev *dev)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+
+ devl_lock(devlink);
+ mlx5_unload_one_devl_locked(dev);
+ devl_unlock(devlink);
+}
+
static const int types[] = {
MLX5_CAP_GENERAL,
MLX5_CAP_GENERAL_2,
@@ -1902,7 +1932,7 @@ MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
void mlx5_disable_device(struct mlx5_core_dev *dev)
{
mlx5_error_sw_reset(dev);
- mlx5_unload_one(dev);
+ mlx5_unload_one_devl_locked(dev);
}
int mlx5_recover_device(struct mlx5_core_dev *dev)
@@ -1913,7 +1943,7 @@ int mlx5_recover_device(struct mlx5_core_dev *dev)
return -EIO;
}
- return mlx5_load_one(dev, true);
+ return mlx5_load_one_devl_locked(dev, true);
}
static struct pci_driver mlx5_core_driver = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 9cc7afea2758..ad61b86d5769 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -290,7 +290,9 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev);
int mlx5_init_one(struct mlx5_core_dev *dev);
void mlx5_uninit_one(struct mlx5_core_dev *dev);
void mlx5_unload_one(struct mlx5_core_dev *dev);
+void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev);
int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery);
+int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery);
int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 5757cd6e1819..ee2e1b7c1310 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -154,13 +154,16 @@ mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf)
static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct devlink *devlink = priv_to_devlink(dev);
int err;
+ devl_lock(devlink);
err = mlx5_device_enable_sriov(dev, num_vfs);
if (err) {
mlx5_core_warn(dev, "mlx5_device_enable_sriov failed : %d\n", err);
return err;
}
+ devl_unlock(devlink);
err = pci_enable_sriov(pdev, num_vfs);
if (err) {
@@ -173,10 +176,13 @@ static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
void mlx5_sriov_disable(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct devlink *devlink = priv_to_devlink(dev);
int num_vfs = pci_num_vf(dev->pdev);
pci_disable_sriov(pdev);
+ devl_lock(devlink);
mlx5_device_disable_sriov(dev, num_vfs, true);
+ devl_unlock(devlink);
}
int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index 1383550f44c1..b1dfad274a39 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -22,6 +22,7 @@ enum dr_action_valid_state {
DR_ACTION_STATE_PUSH_VLAN,
DR_ACTION_STATE_NON_TERM,
DR_ACTION_STATE_TERM,
+ DR_ACTION_STATE_ASO,
DR_ACTION_STATE_MAX,
};
@@ -42,6 +43,7 @@ static const char * const action_type_to_str[] = {
[DR_ACTION_TYP_SAMPLER] = "DR_ACTION_TYP_SAMPLER",
[DR_ACTION_TYP_INSERT_HDR] = "DR_ACTION_TYP_INSERT_HDR",
[DR_ACTION_TYP_REMOVE_HDR] = "DR_ACTION_TYP_REMOVE_HDR",
+ [DR_ACTION_TYP_ASO_FLOW_METER] = "DR_ACTION_TYP_ASO_FLOW_METER",
[DR_ACTION_TYP_MAX] = "DR_ACTION_UNKNOWN",
};
@@ -71,6 +73,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_DECAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -85,6 +88,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -93,6 +97,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_TAG] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -105,6 +110,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_POP_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -118,6 +124,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_PUSH_VLAN] = {
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
@@ -128,6 +135,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -145,6 +153,13 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
+ },
+ [DR_ACTION_STATE_ASO] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_TERM] = {
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM,
@@ -163,18 +178,21 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_DECAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -185,6 +203,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_POP_VLAN] = {
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
@@ -196,6 +215,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_PUSH_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -206,6 +226,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -219,6 +240,16 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
+ },
+ [DR_ACTION_STATE_ASO] = {
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ASO,
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
},
[DR_ACTION_STATE_TERM] = {
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM,
@@ -240,6 +271,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_DECAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -253,6 +285,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -261,6 +294,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -272,6 +306,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_POP_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -284,6 +319,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_PUSH_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -296,6 +332,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -312,6 +349,13 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
+ },
+ [DR_ACTION_STATE_ASO] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_TERM] = {
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM,
@@ -331,6 +375,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_DECAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -338,6 +383,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -345,6 +391,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -356,6 +403,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_POP_VLAN] = {
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
@@ -368,6 +416,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_PUSH_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -379,6 +428,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -393,6 +443,17 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
+ },
+ [DR_ACTION_STATE_ASO] = {
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_TERM] = {
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM,
@@ -738,6 +799,12 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
attr.reformat.param_0 = action->reformat->param_0;
attr.reformat.param_1 = action->reformat->param_1;
break;
+ case DR_ACTION_TYP_ASO_FLOW_METER:
+ attr.aso_flow_meter.obj_id = action->aso->obj_id;
+ attr.aso_flow_meter.offset = action->aso->offset;
+ attr.aso_flow_meter.dest_reg_id = action->aso->dest_reg_id;
+ attr.aso_flow_meter.init_color = action->aso->init_color;
+ break;
default:
mlx5dr_err(dmn, "Unsupported action type %d\n", action_type);
return -EINVAL;
@@ -798,6 +865,7 @@ static unsigned int action_size[DR_ACTION_TYP_MAX] = {
[DR_ACTION_TYP_INSERT_HDR] = sizeof(struct mlx5dr_action_reformat),
[DR_ACTION_TYP_REMOVE_HDR] = sizeof(struct mlx5dr_action_reformat),
[DR_ACTION_TYP_SAMPLER] = sizeof(struct mlx5dr_action_sampler),
+ [DR_ACTION_TYP_ASO_FLOW_METER] = sizeof(struct mlx5dr_action_aso_flow_meter),
};
static struct mlx5dr_action *
@@ -1830,6 +1898,34 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn,
return action;
}
+struct mlx5dr_action *
+mlx5dr_action_create_aso(struct mlx5dr_domain *dmn, u32 obj_id,
+ u8 dest_reg_id, u8 aso_type,
+ u8 init_color, u8 meter_id)
+{
+ struct mlx5dr_action *action;
+
+ if (aso_type != MLX5_EXE_ASO_FLOW_METER)
+ return NULL;
+
+ if (init_color > MLX5_FLOW_METER_COLOR_UNDEFINED)
+ return NULL;
+
+ action = dr_action_create_generic(DR_ACTION_TYP_ASO_FLOW_METER);
+ if (!action)
+ return NULL;
+
+ action->aso->obj_id = obj_id;
+ action->aso->offset = meter_id;
+ action->aso->dest_reg_id = dest_reg_id;
+ action->aso->init_color = init_color;
+ action->aso->dmn = dmn;
+
+ refcount_inc(&dmn->refcount);
+
+ return action;
+}
+
int mlx5dr_action_destroy(struct mlx5dr_action *action)
{
if (WARN_ON_ONCE(refcount_read(&action->refcount) > 1))
@@ -1881,6 +1977,9 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action)
case DR_ACTION_TYP_SAMPLER:
refcount_dec(&action->sampler->dmn->refcount);
break;
+ case DR_ACTION_TYP_ASO_FLOW_METER:
+ refcount_dec(&action->aso->dmn->refcount);
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
index fcb962c6db2e..ee677a5c76be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
@@ -89,6 +89,7 @@ enum dr_ste_v1_action_id {
DR_STE_V1_ACTION_ID_QUEUE_ID_SEL = 0x0d,
DR_STE_V1_ACTION_ID_ACCELERATED_LIST = 0x0e,
DR_STE_V1_ACTION_ID_MODIFY_LIST = 0x0f,
+ DR_STE_V1_ACTION_ID_ASO = 0x12,
DR_STE_V1_ACTION_ID_TRAILER = 0x13,
DR_STE_V1_ACTION_ID_COUNTER_ID = 0x14,
DR_STE_V1_ACTION_ID_MAX = 0x21,
@@ -129,6 +130,10 @@ enum {
DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1 = 0x91,
};
+enum dr_ste_v1_aso_ctx_type {
+ DR_STE_V1_ASO_CTX_TYPE_POLICERS = 0x2,
+};
+
static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field_arr[] = {
[MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
@@ -494,6 +499,27 @@ static void dr_ste_v1_set_rewrite_actions(u8 *hw_ste_p,
dr_ste_v1_set_reparse(hw_ste_p);
}
+static void dr_ste_v1_set_aso_flow_meter(u8 *d_action,
+ u32 object_id,
+ u32 offset,
+ u8 dest_reg_id,
+ u8 init_color)
+{
+ MLX5_SET(ste_double_action_aso_v1, d_action, action_id,
+ DR_STE_V1_ACTION_ID_ASO);
+ MLX5_SET(ste_double_action_aso_v1, d_action, aso_context_number,
+ object_id + (offset / MLX5DR_ASO_FLOW_METER_NUM_PER_OBJ));
+ /* Convert reg_c index to HW 64bit index */
+ MLX5_SET(ste_double_action_aso_v1, d_action, dest_reg_id,
+ (dest_reg_id - 1) / 2);
+ MLX5_SET(ste_double_action_aso_v1, d_action, aso_context_type,
+ DR_STE_V1_ASO_CTX_TYPE_POLICERS);
+ MLX5_SET(ste_double_action_aso_v1, d_action, flow_meter.line_id,
+ offset % MLX5DR_ASO_FLOW_METER_NUM_PER_OBJ);
+ MLX5_SET(ste_double_action_aso_v1, d_action, flow_meter.initial_color,
+ init_color);
+}
+
static void dr_ste_v1_arr_init_next_match(u8 **last_ste,
u32 *added_stes,
u16 gvmi)
@@ -629,6 +655,21 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action += DR_STE_ACTION_SINGLE_SZ;
}
+ if (action_type_set[DR_ACTION_TYP_ASO_FLOW_METER]) {
+ if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ }
+ dr_ste_v1_set_aso_flow_meter(action,
+ attr->aso_flow_meter.obj_id,
+ attr->aso_flow_meter.offset,
+ attr->aso_flow_meter.dest_reg_id,
+ attr->aso_flow_meter.init_color);
+ action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+ action += DR_STE_ACTION_DOUBLE_SZ;
+ }
+
dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
@@ -802,6 +843,21 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
action += DR_STE_ACTION_SINGLE_SZ;
}
+ if (action_type_set[DR_ACTION_TYP_ASO_FLOW_METER]) {
+ if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ }
+ dr_ste_v1_set_aso_flow_meter(action,
+ attr->aso_flow_meter.obj_id,
+ attr->aso_flow_meter.offset,
+ attr->aso_flow_meter.dest_reg_id,
+ attr->aso_flow_meter.init_color);
+ action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+ action += DR_STE_ACTION_DOUBLE_SZ;
+ }
+
dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 98320e3945ad..feed227944b7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -127,6 +127,7 @@ enum mlx5dr_action_type {
DR_ACTION_TYP_INSERT_HDR,
DR_ACTION_TYP_REMOVE_HDR,
DR_ACTION_TYP_SAMPLER,
+ DR_ACTION_TYP_ASO_FLOW_METER,
DR_ACTION_TYP_MAX,
};
@@ -271,6 +272,13 @@ struct mlx5dr_ste_actions_attr {
int count;
u32 headers[MLX5DR_MAX_VLANS];
} vlans;
+
+ struct {
+ u32 obj_id;
+ u32 offset;
+ u8 dest_reg_id;
+ u8 init_color;
+ } aso_flow_meter;
};
void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
@@ -1035,6 +1043,14 @@ struct mlx5dr_rule_action_member {
struct list_head list;
};
+struct mlx5dr_action_aso_flow_meter {
+ struct mlx5dr_domain *dmn;
+ u32 obj_id;
+ u32 offset;
+ u8 dest_reg_id;
+ u8 init_color;
+};
+
struct mlx5dr_action {
enum mlx5dr_action_type action_type;
refcount_t refcount;
@@ -1049,6 +1065,7 @@ struct mlx5dr_action {
struct mlx5dr_action_vport *vport;
struct mlx5dr_action_push_vlan *push_vlan;
struct mlx5dr_action_flow_tag *flow_tag;
+ struct mlx5dr_action_aso_flow_meter *aso;
};
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 6a9abba92df6..75672663359d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -500,6 +500,27 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
}
}
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
+ if (fte->action.exe_aso.type != MLX5_EXE_ASO_FLOW_METER) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+
+ tmp_action =
+ mlx5dr_action_create_aso(domain,
+ fte->action.exe_aso.object_id,
+ fte->action.exe_aso.return_reg_id,
+ fte->action.exe_aso.type,
+ fte->action.exe_aso.flow_meter.init_color,
+ fte->action.exe_aso.flow_meter.meter_idx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ actions[num_actions++] = tmp_action;
+ }
+
params.match_sz = match_sz;
params.match_buf = (u64 *)fte->val;
if (num_term_actions == 1) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
index 9604b2091358..fb078fa0f0cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
@@ -574,4 +574,30 @@ struct mlx5_ifc_dr_action_hw_copy_bits {
u8 reserved_at_38[0x8];
};
+enum {
+ MLX5DR_ASO_FLOW_METER_NUM_PER_OBJ = 2,
+};
+
+struct mlx5_ifc_ste_aso_flow_meter_action_bits {
+ u8 reserved_at_0[0xc];
+ u8 action[0x1];
+ u8 initial_color[0x2];
+ u8 line_id[0x1];
+};
+
+struct mlx5_ifc_ste_double_action_aso_v1_bits {
+ u8 action_id[0x8];
+ u8 aso_context_number[0x18];
+
+ u8 dest_reg_id[0x2];
+ u8 change_ordering_tag[0x1];
+ u8 aso_check_ordering[0x1];
+ u8 aso_context_type[0x4];
+ u8 reserved_at_28[0x8];
+ union {
+ u8 aso_fields[0x10];
+ struct mlx5_ifc_ste_aso_flow_meter_action_bits flow_meter;
+ };
+};
+
#endif /* MLX5_IFC_DR_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index 7626c85643b1..ecffc2cbe6fe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -131,6 +131,14 @@ struct mlx5dr_action *mlx5dr_action_create_pop_vlan(void);
struct mlx5dr_action *
mlx5dr_action_create_push_vlan(struct mlx5dr_domain *domain, __be32 vlan_hdr);
+struct mlx5dr_action *
+mlx5dr_action_create_aso(struct mlx5dr_domain *dmn,
+ u32 obj_id,
+ u8 return_reg_id,
+ u8 aso_type,
+ u8 init_color,
+ u8 meter_id);
+
int mlx5dr_action_destroy(struct mlx5dr_action *action);
static inline bool
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index a48f893cf7b0..75553eb2c7f2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1499,9 +1499,7 @@ mlxsw_devlink_core_bus_device_reload_down(struct devlink *devlink,
if (!(mlxsw_core->bus->features & MLXSW_BUS_F_RESET))
return -EOPNOTSUPP;
- devl_lock(devlink);
mlxsw_core_bus_device_unregister(mlxsw_core, true);
- devl_unlock(devlink);
return 0;
}
@@ -1515,12 +1513,10 @@ mlxsw_devlink_core_bus_device_reload_up(struct devlink *devlink, enum devlink_re
*actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
- devl_lock(devlink);
err = mlxsw_core_bus_device_register(mlxsw_core->bus_info,
mlxsw_core->bus,
mlxsw_core->bus_priv, true,
devlink, extack);
- devl_unlock(devlink);
return err;
}
@@ -3335,6 +3331,24 @@ u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core)
}
EXPORT_SYMBOL(mlxsw_core_read_frc_l);
+u32 mlxsw_core_read_utc_sec(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->bus->read_utc_sec(mlxsw_core->bus_priv);
+}
+EXPORT_SYMBOL(mlxsw_core_read_utc_sec);
+
+u32 mlxsw_core_read_utc_nsec(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->bus->read_utc_nsec(mlxsw_core->bus_priv);
+}
+EXPORT_SYMBOL(mlxsw_core_read_utc_nsec);
+
+bool mlxsw_core_sdq_supports_cqe_v2(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->driver->sdq_supports_cqe_v2;
+}
+EXPORT_SYMBOL(mlxsw_core_sdq_supports_cqe_v2);
+
void mlxsw_core_emad_string_tlv_enable(struct mlxsw_core *mlxsw_core)
{
mlxsw_core->emad.enable_string_tlv = true;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 7213e4528298..02d9cc2ef0c8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -427,6 +427,7 @@ struct mlxsw_driver {
u8 txhdr_len;
const struct mlxsw_config_profile *profile;
+ bool sdq_supports_cqe_v2;
};
int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
@@ -437,6 +438,11 @@ int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
u32 mlxsw_core_read_frc_h(struct mlxsw_core *mlxsw_core);
u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core);
+u32 mlxsw_core_read_utc_sec(struct mlxsw_core *mlxsw_core);
+u32 mlxsw_core_read_utc_nsec(struct mlxsw_core *mlxsw_core);
+
+bool mlxsw_core_sdq_supports_cqe_v2(struct mlxsw_core *mlxsw_core);
+
void mlxsw_core_emad_string_tlv_enable(struct mlxsw_core *mlxsw_core);
bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
@@ -476,6 +482,8 @@ struct mlxsw_bus {
u8 *p_status);
u32 (*read_frc_h)(void *bus_priv);
u32 (*read_frc_l)(void *bus_priv);
+ u32 (*read_utc_sec)(void *bus_priv);
+ u32 (*read_utc_nsec)(void *bus_priv);
u8 features;
};
@@ -550,11 +558,17 @@ enum mlxsw_devlink_param_id {
MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
};
+struct mlxsw_cqe_ts {
+ u8 sec;
+ u32 nsec;
+};
+
struct mlxsw_skb_cb {
union {
struct mlxsw_tx_info tx_info;
struct mlxsw_rx_md_info rx_md_info;
};
+ struct mlxsw_cqe_ts cqe_ts;
};
static inline struct mlxsw_skb_cb *mlxsw_skb_cb(struct sk_buff *skb)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c
index 49fee038a99c..af37e650a8ad 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c
@@ -5,7 +5,6 @@
#include <linux/module.h>
#include <linux/err.h>
#include <linux/types.h>
-#include <linux/err.h>
#include <linux/auxiliary_bus.h>
#include <linux/idr.h>
#include <linux/gfp.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 83659fb0559a..50527adc5b5a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -103,6 +103,8 @@ struct mlxsw_pci {
struct pci_dev *pdev;
u8 __iomem *hw_addr;
u64 free_running_clock_offset;
+ u64 utc_sec_offset;
+ u64 utc_nsec_offset;
struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
u32 doorbell_offset;
struct mlxsw_core *core;
@@ -456,9 +458,9 @@ static void mlxsw_pci_cq_pre_init(struct mlxsw_pci *mlxsw_pci,
{
q->u.cq.v = mlxsw_pci->max_cqe_ver;
- /* For SDQ it is pointless to use CQEv2, so use CQEv1 instead */
if (q->u.cq.v == MLXSW_PCI_CQE_V2 &&
- q->num < mlxsw_pci->num_sdq_cqs)
+ q->num < mlxsw_pci->num_sdq_cqs &&
+ !mlxsw_core_sdq_supports_cqe_v2(mlxsw_pci->core))
q->u.cq.v = MLXSW_PCI_CQE_V1;
}
@@ -511,9 +513,26 @@ static unsigned int mlxsw_pci_read32_off(struct mlxsw_pci *mlxsw_pci,
return ioread32be(mlxsw_pci->hw_addr + off);
}
+static void mlxsw_pci_skb_cb_ts_set(struct mlxsw_pci *mlxsw_pci,
+ struct sk_buff *skb,
+ enum mlxsw_pci_cqe_v cqe_v, char *cqe)
+{
+ if (cqe_v != MLXSW_PCI_CQE_V2)
+ return;
+
+ if (mlxsw_pci_cqe2_time_stamp_type_get(cqe) !=
+ MLXSW_PCI_CQE_TIME_STAMP_TYPE_UTC)
+ return;
+
+ mlxsw_skb_cb(skb)->cqe_ts.sec = mlxsw_pci_cqe2_time_stamp_sec_get(cqe);
+ mlxsw_skb_cb(skb)->cqe_ts.nsec =
+ mlxsw_pci_cqe2_time_stamp_nsec_get(cqe);
+}
+
static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
struct mlxsw_pci_queue *q,
u16 consumer_counter_limit,
+ enum mlxsw_pci_cqe_v cqe_v,
char *cqe)
{
struct pci_dev *pdev = mlxsw_pci->pdev;
@@ -533,6 +552,7 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
if (unlikely(!tx_info.is_emad &&
skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe);
mlxsw_core_ptp_transmitted(mlxsw_pci->core, skb,
tx_info.local_port);
skb = NULL;
@@ -653,6 +673,8 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
mlxsw_pci_cqe_rdq_md_tx_port_init(skb, cqe);
}
+ mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe);
+
byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
byte_count -= ETH_FCS_LEN;
@@ -704,7 +726,7 @@ static void mlxsw_pci_cq_tasklet(struct tasklet_struct *t)
sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
- wqe_counter, ncqe);
+ wqe_counter, q->u.cq.v, ncqe);
q->u.cq.comp_sdq_count++;
} else {
struct mlxsw_pci_queue *rdq;
@@ -1537,6 +1559,24 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
mlxsw_pci->free_running_clock_offset =
mlxsw_cmd_mbox_query_fw_free_running_clock_offset_get(mbox);
+ if (mlxsw_cmd_mbox_query_fw_utc_sec_bar_get(mbox) != 0) {
+ dev_err(&pdev->dev, "Unsupported UTC sec BAR queried from hw\n");
+ err = -EINVAL;
+ goto err_utc_sec_bar;
+ }
+
+ mlxsw_pci->utc_sec_offset =
+ mlxsw_cmd_mbox_query_fw_utc_sec_offset_get(mbox);
+
+ if (mlxsw_cmd_mbox_query_fw_utc_nsec_bar_get(mbox) != 0) {
+ dev_err(&pdev->dev, "Unsupported UTC nsec BAR queried from hw\n");
+ err = -EINVAL;
+ goto err_utc_nsec_bar;
+ }
+
+ mlxsw_pci->utc_nsec_offset =
+ mlxsw_cmd_mbox_query_fw_utc_nsec_offset_get(mbox);
+
num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
if (err)
@@ -1601,6 +1641,8 @@ err_query_resources:
err_boardinfo:
mlxsw_pci_fw_area_fini(mlxsw_pci);
err_fw_area_init:
+err_utc_nsec_bar:
+err_utc_sec_bar:
err_fr_rn_clk_bar:
err_doorbell_page_bar:
err_iface_rev:
@@ -1830,6 +1872,20 @@ static u32 mlxsw_pci_read_frc_l(void *bus_priv)
return mlxsw_pci_read32_off(mlxsw_pci, frc_offset_l);
}
+static u32 mlxsw_pci_read_utc_sec(void *bus_priv)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+
+ return mlxsw_pci_read32_off(mlxsw_pci, mlxsw_pci->utc_sec_offset);
+}
+
+static u32 mlxsw_pci_read_utc_nsec(void *bus_priv)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+
+ return mlxsw_pci_read32_off(mlxsw_pci, mlxsw_pci->utc_nsec_offset);
+}
+
static const struct mlxsw_bus mlxsw_pci_bus = {
.kind = "pci",
.init = mlxsw_pci_init,
@@ -1839,6 +1895,8 @@ static const struct mlxsw_bus mlxsw_pci_bus = {
.cmd_exec = mlxsw_pci_cmd_exec,
.read_frc_h = mlxsw_pci_read_frc_h,
.read_frc_l = mlxsw_pci_read_frc_l,
+ .read_utc_sec = mlxsw_pci_read_utc_sec,
+ .read_utc_nsec = mlxsw_pci_read_utc_nsec,
.features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 641078060b02..1e240cdd9cbd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -29,6 +29,7 @@
#include <net/pkt_cls.h>
#include <net/netevent.h>
#include <net/addrconf.h>
+#include <linux/ptp_classify.h>
#include "spectrum.h"
#include "pci.h"
@@ -230,8 +231,8 @@ void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
counter_index);
}
-static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
{
char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
@@ -246,6 +247,82 @@ static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
}
+int
+mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ char *txhdr;
+ u16 max_fid;
+ int err;
+
+ if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
+ err = -ENOMEM;
+ goto err_skb_cow_head;
+ }
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, FID)) {
+ err = -EIO;
+ goto err_res_valid;
+ }
+ max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID);
+
+ txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
+ memset(txhdr, 0, MLXSW_TXHDR_LEN);
+
+ mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
+ mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
+ mlxsw_tx_hdr_rx_is_router_set(txhdr, true);
+ mlxsw_tx_hdr_fid_valid_set(txhdr, true);
+ mlxsw_tx_hdr_fid_set(txhdr, max_fid + tx_info->local_port - 1);
+ mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_DATA);
+ return 0;
+
+err_res_valid:
+err_skb_cow_head:
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb);
+ return err;
+}
+
+static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb)
+{
+ unsigned int type;
+
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ return false;
+
+ type = ptp_classify_raw(skb);
+ return !!ptp_parse_header(skb, type);
+}
+
+static int mlxsw_sp_txhdr_handle(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ /* In Spectrum-2 and Spectrum-3, PTP events that require a time stamp
+ * need special handling and cannot be transmitted as regular control
+ * packets.
+ */
+ if (unlikely(mlxsw_sp_skb_requires_ts(skb)))
+ return mlxsw_sp->ptp_ops->txhdr_construct(mlxsw_core,
+ mlxsw_sp_port, skb,
+ tx_info);
+
+ if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb);
+ return -ENOMEM;
+ }
+
+ mlxsw_sp_txhdr_construct(skb, tx_info);
+ return 0;
+}
+
enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
{
switch (state) {
@@ -648,12 +725,6 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
u64 len;
int err;
- if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
- this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
@@ -664,7 +735,11 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- mlxsw_sp_txhdr_construct(skb, &tx_info);
+ err = mlxsw_sp_txhdr_handle(mlxsw_sp->core, mlxsw_sp_port, skb,
+ &tx_info);
+ if (err)
+ return NETDEV_TX_OK;
+
/* TX header is consumed by HW on the way so we shouldn't count its
* bytes as being sent.
*/
@@ -2666,6 +2741,7 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
.get_stats_count = mlxsw_sp1_get_stats_count,
.get_stats_strings = mlxsw_sp1_get_stats_strings,
.get_stats = mlxsw_sp1_get_stats,
+ .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
};
static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
@@ -2682,6 +2758,24 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
.get_stats_count = mlxsw_sp2_get_stats_count,
.get_stats_strings = mlxsw_sp2_get_stats_strings,
.get_stats = mlxsw_sp2_get_stats,
+ .txhdr_construct = mlxsw_sp2_ptp_txhdr_construct,
+};
+
+static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = {
+ .clock_init = mlxsw_sp2_ptp_clock_init,
+ .clock_fini = mlxsw_sp2_ptp_clock_fini,
+ .init = mlxsw_sp2_ptp_init,
+ .fini = mlxsw_sp2_ptp_fini,
+ .receive = mlxsw_sp2_ptp_receive,
+ .transmitted = mlxsw_sp2_ptp_transmitted,
+ .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get,
+ .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set,
+ .shaper_work = mlxsw_sp2_ptp_shaper_work,
+ .get_ts_info = mlxsw_sp2_ptp_get_ts_info,
+ .get_stats_count = mlxsw_sp2_get_stats_count,
+ .get_stats_strings = mlxsw_sp2_get_stats_strings,
+ .get_stats = mlxsw_sp2_get_stats,
+ .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
};
struct mlxsw_sp_sample_trigger_node {
@@ -3327,7 +3421,7 @@ static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
- mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
+ mlxsw_sp->ptp_ops = &mlxsw_sp4_ptp_ops;
mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
@@ -3831,6 +3925,7 @@ static struct mlxsw_driver mlxsw_sp1_driver = {
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp1_config_profile,
+ .sdq_supports_cqe_v2 = false,
};
static struct mlxsw_driver mlxsw_sp2_driver = {
@@ -3869,6 +3964,7 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
+ .sdq_supports_cqe_v2 = true,
};
static struct mlxsw_driver mlxsw_sp3_driver = {
@@ -3907,6 +4003,7 @@ static struct mlxsw_driver mlxsw_sp3_driver = {
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
+ .sdq_supports_cqe_v2 = true,
};
static struct mlxsw_driver mlxsw_sp4_driver = {
@@ -3943,6 +4040,7 @@ static struct mlxsw_driver mlxsw_sp4_driver = {
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
+ .sdq_supports_cqe_v2 = true,
};
bool mlxsw_sp_port_dev_check(const struct net_device *dev)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 50a9380b76e9..c8ff2a6d7e90 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -243,6 +243,10 @@ struct mlxsw_sp_ptp_ops {
void (*get_stats_strings)(u8 **p);
void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
u64 *data, int data_index);
+ int (*txhdr_construct)(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
};
static inline struct mlxsw_sp_upper *
@@ -700,6 +704,12 @@ int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
unsigned int *p_counter_index);
void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index);
+void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
+int mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
bool mlxsw_sp_port_dev_check(const struct net_device *dev);
struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index 5116d7ebe258..2e0b704b8a31 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -11,6 +11,7 @@
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/net_tstamp.h>
+#include <linux/refcount.h>
#include "spectrum.h"
#include "spectrum_ptp.h"
@@ -39,6 +40,14 @@ struct mlxsw_sp1_ptp_state {
u32 gc_cycle;
};
+struct mlxsw_sp2_ptp_state {
+ struct mlxsw_sp_ptp_state common;
+ refcount_t ptp_port_enabled_ref; /* Number of ports with time stamping
+ * enabled.
+ */
+ struct hwtstamp_config config;
+};
+
struct mlxsw_sp1_ptp_key {
u16 local_port;
u8 message_type;
@@ -85,6 +94,13 @@ mlxsw_sp1_ptp_state(struct mlxsw_sp *mlxsw_sp)
common);
}
+static struct mlxsw_sp2_ptp_state *
+mlxsw_sp2_ptp_state(struct mlxsw_sp *mlxsw_sp)
+{
+ return container_of(mlxsw_sp->ptp_state, struct mlxsw_sp2_ptp_state,
+ common);
+}
+
static struct mlxsw_sp1_ptp_clock *
mlxsw_sp1_ptp_clock(struct ptp_clock_info *ptp)
{
@@ -328,6 +344,153 @@ void mlxsw_sp1_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock_common)
kfree(clock);
}
+static u64 mlxsw_sp2_ptp_read_utc(struct mlxsw_sp_ptp_clock *clock,
+ struct ptp_system_timestamp *sts)
+{
+ struct mlxsw_core *mlxsw_core = clock->core;
+ u32 utc_sec1, utc_sec2, utc_nsec;
+
+ utc_sec1 = mlxsw_core_read_utc_sec(mlxsw_core);
+ ptp_read_system_prets(sts);
+ utc_nsec = mlxsw_core_read_utc_nsec(mlxsw_core);
+ ptp_read_system_postts(sts);
+ utc_sec2 = mlxsw_core_read_utc_sec(mlxsw_core);
+
+ if (utc_sec1 != utc_sec2) {
+ /* Wrap around. */
+ ptp_read_system_prets(sts);
+ utc_nsec = mlxsw_core_read_utc_nsec(mlxsw_core);
+ ptp_read_system_postts(sts);
+ }
+
+ return (u64)utc_sec2 * NSEC_PER_SEC + utc_nsec;
+}
+
+static int
+mlxsw_sp2_ptp_phc_settime(struct mlxsw_sp_ptp_clock *clock, u64 nsec)
+{
+ struct mlxsw_core *mlxsw_core = clock->core;
+ char mtutc_pl[MLXSW_REG_MTUTC_LEN];
+ u32 sec, nsec_rem;
+
+ sec = div_u64_rem(nsec, NSEC_PER_SEC, &nsec_rem);
+ mlxsw_reg_mtutc_pack(mtutc_pl,
+ MLXSW_REG_MTUTC_OPERATION_SET_TIME_IMMEDIATE,
+ 0, sec, nsec_rem, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
+}
+
+static int mlxsw_sp2_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct mlxsw_sp_ptp_clock *clock =
+ container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
+
+ /* In Spectrum-2 and newer ASICs, the frequency adjustment in MTUTC is
+ * reversed, positive values mean to decrease the frequency. Adjust the
+ * sign of PPB to this behavior.
+ */
+ return mlxsw_sp_ptp_phc_adjfreq(clock, -ppb);
+}
+
+static int mlxsw_sp2_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct mlxsw_sp_ptp_clock *clock =
+ container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ struct mlxsw_core *mlxsw_core = clock->core;
+ char mtutc_pl[MLXSW_REG_MTUTC_LEN];
+
+ /* HW time adjustment range is s16. If out of range, set time instead. */
+ if (delta < S16_MIN || delta > S16_MAX) {
+ u64 nsec;
+
+ nsec = mlxsw_sp2_ptp_read_utc(clock, NULL);
+ nsec += delta;
+
+ return mlxsw_sp2_ptp_phc_settime(clock, nsec);
+ }
+
+ mlxsw_reg_mtutc_pack(mtutc_pl,
+ MLXSW_REG_MTUTC_OPERATION_ADJUST_TIME,
+ 0, 0, 0, delta);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
+}
+
+static int mlxsw_sp2_ptp_gettimex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct mlxsw_sp_ptp_clock *clock =
+ container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ u64 nsec;
+
+ nsec = mlxsw_sp2_ptp_read_utc(clock, sts);
+ *ts = ns_to_timespec64(nsec);
+
+ return 0;
+}
+
+static int mlxsw_sp2_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct mlxsw_sp_ptp_clock *clock =
+ container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ u64 nsec = timespec64_to_ns(ts);
+
+ return mlxsw_sp2_ptp_phc_settime(clock, nsec);
+}
+
+static const struct ptp_clock_info mlxsw_sp2_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "mlxsw_sp_clock",
+ .max_adj = MLXSW_REG_MTUTC_MAX_FREQ_ADJ,
+ .adjfine = mlxsw_sp2_ptp_adjfine,
+ .adjtime = mlxsw_sp2_ptp_adjtime,
+ .gettimex64 = mlxsw_sp2_ptp_gettimex,
+ .settime64 = mlxsw_sp2_ptp_settime,
+};
+
+struct mlxsw_sp_ptp_clock *
+mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
+{
+ struct mlxsw_sp_ptp_clock *clock;
+ int err;
+
+ clock = kzalloc(sizeof(*clock), GFP_KERNEL);
+ if (!clock)
+ return ERR_PTR(-ENOMEM);
+
+ clock->core = mlxsw_sp->core;
+
+ clock->ptp_info = mlxsw_sp2_ptp_clock_info;
+
+ err = mlxsw_sp2_ptp_phc_settime(clock, 0);
+ if (err) {
+ dev_err(dev, "setting UTC time failed %d\n", err);
+ goto err_ptp_phc_settime;
+ }
+
+ clock->ptp = ptp_clock_register(&clock->ptp_info, dev);
+ if (IS_ERR(clock->ptp)) {
+ err = PTR_ERR(clock->ptp);
+ dev_err(dev, "ptp_clock_register failed %d\n", err);
+ goto err_ptp_clock_register;
+ }
+
+ return clock;
+
+err_ptp_clock_register:
+err_ptp_phc_settime:
+ kfree(clock);
+ return ERR_PTR(err);
+}
+
+void mlxsw_sp2_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock)
+{
+ ptp_clock_unregister(clock->ptp);
+ kfree(clock);
+}
+
static int mlxsw_sp_ptp_parse(struct sk_buff *skb,
u8 *p_domain_number,
u8 *p_message_type,
@@ -835,10 +998,44 @@ static int mlxsw_sp1_ptp_shaper_params_set(struct mlxsw_sp *mlxsw_sp)
return 0;
}
+static int mlxsw_sp_ptp_traps_set(struct mlxsw_sp *mlxsw_sp)
+{
+ u16 event_message_type;
+ int err;
+
+ /* Deliver these message types as PTP0. */
+ event_message_type = BIT(PTP_MSGTYPE_SYNC) |
+ BIT(PTP_MSGTYPE_DELAY_REQ) |
+ BIT(PTP_MSGTYPE_PDELAY_REQ) |
+ BIT(PTP_MSGTYPE_PDELAY_RESP);
+
+ err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0,
+ event_message_type);
+ if (err)
+ return err;
+
+ /* Everything else is PTP1. */
+ err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1,
+ ~event_message_type);
+ if (err)
+ goto err_mtptpt1_set;
+
+ return 0;
+
+err_mtptpt1_set:
+ mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
+ return err;
+}
+
+static void mlxsw_sp_ptp_traps_unset(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0);
+ mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
+}
+
struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp1_ptp_state *ptp_state;
- u16 message_type;
int err;
err = mlxsw_sp1_ptp_shaper_params_set(mlxsw_sp);
@@ -857,22 +1054,9 @@ struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_hashtable_init;
- /* Delive these message types as PTP0. */
- message_type = BIT(PTP_MSGTYPE_SYNC) |
- BIT(PTP_MSGTYPE_DELAY_REQ) |
- BIT(PTP_MSGTYPE_PDELAY_REQ) |
- BIT(PTP_MSGTYPE_PDELAY_RESP);
- err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0,
- message_type);
+ err = mlxsw_sp_ptp_traps_set(mlxsw_sp);
if (err)
- goto err_mtptpt_set;
-
- /* Everything else is PTP1. */
- message_type = ~message_type;
- err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1,
- message_type);
- if (err)
- goto err_mtptpt1_set;
+ goto err_ptp_traps_set;
err = mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, true);
if (err)
@@ -884,10 +1068,8 @@ struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
return &ptp_state->common;
err_fifo_clr:
- mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0);
-err_mtptpt1_set:
- mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
-err_mtptpt_set:
+ mlxsw_sp_ptp_traps_unset(mlxsw_sp);
+err_ptp_traps_set:
rhltable_destroy(&ptp_state->unmatched_ht);
err_hashtable_init:
kfree(ptp_state);
@@ -904,8 +1086,7 @@ void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state_common)
cancel_delayed_work_sync(&ptp_state->ht_gc_dw);
mlxsw_sp1_ptp_mtpppc_set(mlxsw_sp, 0, 0);
mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, false);
- mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0);
- mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
+ mlxsw_sp_ptp_traps_unset(mlxsw_sp);
rhltable_free_and_destroy(&ptp_state->unmatched_ht,
&mlxsw_sp1_ptp_unmatched_free_fn, NULL);
kfree(ptp_state);
@@ -1176,3 +1357,354 @@ void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
*data++ = *(u64 *)(stats + offset);
}
}
+
+struct mlxsw_sp_ptp_state *mlxsw_sp2_ptp_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state;
+ int err;
+
+ ptp_state = kzalloc(sizeof(*ptp_state), GFP_KERNEL);
+ if (!ptp_state)
+ return ERR_PTR(-ENOMEM);
+
+ ptp_state->common.mlxsw_sp = mlxsw_sp;
+
+ err = mlxsw_sp_ptp_traps_set(mlxsw_sp);
+ if (err)
+ goto err_ptp_traps_set;
+
+ refcount_set(&ptp_state->ptp_port_enabled_ref, 0);
+ return &ptp_state->common;
+
+err_ptp_traps_set:
+ kfree(ptp_state);
+ return ERR_PTR(err);
+}
+
+void mlxsw_sp2_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state_common)
+{
+ struct mlxsw_sp *mlxsw_sp = ptp_state_common->mlxsw_sp;
+ struct mlxsw_sp2_ptp_state *ptp_state;
+
+ ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
+
+ mlxsw_sp_ptp_traps_unset(mlxsw_sp);
+ kfree(ptp_state);
+}
+
+static u32 mlxsw_ptp_utc_time_stamp_sec_get(struct mlxsw_core *mlxsw_core,
+ u8 cqe_ts_sec)
+{
+ u32 utc_sec = mlxsw_core_read_utc_sec(mlxsw_core);
+
+ if (cqe_ts_sec > (utc_sec & 0xff))
+ /* Time stamp above the last bits of UTC (UTC & 0xff) means the
+ * latter has wrapped after the time stamp was collected.
+ */
+ utc_sec -= 256;
+
+ utc_sec &= ~0xff;
+ utc_sec |= cqe_ts_sec;
+
+ return utc_sec;
+}
+
+static void mlxsw_sp2_ptp_hwtstamp_fill(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_skb_cb *cb,
+ struct skb_shared_hwtstamps *hwtstamps)
+{
+ u64 ts_sec, ts_nsec, nsec;
+
+ WARN_ON_ONCE(!cb->cqe_ts.sec && !cb->cqe_ts.nsec);
+
+ /* The time stamp in the CQE is represented by 38 bits, which is a short
+ * representation of UTC time. Software should create the full time
+ * stamp using the global UTC clock. The seconds have only 8 bits in the
+ * CQE, to create the full time stamp, use the current UTC time and fix
+ * the seconds according to the relation between UTC seconds and CQE
+ * seconds.
+ */
+ ts_sec = mlxsw_ptp_utc_time_stamp_sec_get(mlxsw_core, cb->cqe_ts.sec);
+ ts_nsec = cb->cqe_ts.nsec;
+
+ nsec = ts_sec * NSEC_PER_SEC + ts_nsec;
+
+ hwtstamps->hwtstamp = ns_to_ktime(nsec);
+}
+
+void mlxsw_sp2_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u16 local_port)
+{
+ struct skb_shared_hwtstamps hwtstamps;
+
+ mlxsw_sp2_ptp_hwtstamp_fill(mlxsw_sp->core, mlxsw_skb_cb(skb),
+ &hwtstamps);
+ *skb_hwtstamps(skb) = hwtstamps;
+ mlxsw_sp_rx_listener_no_mark_func(skb, local_port, mlxsw_sp);
+}
+
+void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
+ struct sk_buff *skb, u16 local_port)
+{
+ struct skb_shared_hwtstamps hwtstamps;
+
+ mlxsw_sp2_ptp_hwtstamp_fill(mlxsw_sp->core, mlxsw_skb_cb(skb),
+ &hwtstamps);
+ skb_tstamp_tx(skb, &hwtstamps);
+ dev_kfree_skb_any(skb);
+}
+
+int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state;
+
+ ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+
+ *config = ptp_state->config;
+ return 0;
+}
+
+static int
+mlxsw_sp2_ptp_get_message_types(const struct hwtstamp_config *config,
+ u16 *p_ing_types, u16 *p_egr_types,
+ enum hwtstamp_rx_filters *p_rx_filter)
+{
+ enum hwtstamp_rx_filters rx_filter = config->rx_filter;
+ enum hwtstamp_tx_types tx_type = config->tx_type;
+ u16 ing_types = 0x00;
+ u16 egr_types = 0x00;
+
+ *p_rx_filter = rx_filter;
+
+ switch (rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ ing_types = 0x00;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ /* In Spectrum-2 and above, all packets get time stamp by
+ * default and the driver fill the time stamp only for event
+ * packets. Return all event types even if only specific types
+ * were required.
+ */
+ ing_types = 0x0f;
+ *p_rx_filter = HWTSTAMP_FILTER_SOME;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ return -ERANGE;
+ default:
+ return -EINVAL;
+ }
+
+ switch (tx_type) {
+ case HWTSTAMP_TX_OFF:
+ egr_types = 0x00;
+ break;
+ case HWTSTAMP_TX_ON:
+ egr_types = 0x0f;
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ case HWTSTAMP_TX_ONESTEP_P2P:
+ return -ERANGE;
+ default:
+ return -EINVAL;
+ }
+
+ *p_ing_types = ing_types;
+ *p_egr_types = egr_types;
+ return 0;
+}
+
+static int mlxsw_sp2_ptp_mtpcpc_set(struct mlxsw_sp *mlxsw_sp, bool ptp_trap_en,
+ u16 ing_types, u16 egr_types)
+{
+ char mtpcpc_pl[MLXSW_REG_MTPCPC_LEN];
+
+ mlxsw_reg_mtpcpc_pack(mtpcpc_pl, false, 0, ptp_trap_en, ing_types,
+ egr_types);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mtpcpc), mtpcpc_pl);
+}
+
+static int mlxsw_sp2_ptp_enable(struct mlxsw_sp *mlxsw_sp, u16 ing_types,
+ u16 egr_types,
+ struct hwtstamp_config new_config)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
+ int err;
+
+ err = mlxsw_sp2_ptp_mtpcpc_set(mlxsw_sp, true, ing_types, egr_types);
+ if (err)
+ return err;
+
+ ptp_state->config = new_config;
+ return 0;
+}
+
+static int mlxsw_sp2_ptp_disable(struct mlxsw_sp *mlxsw_sp,
+ struct hwtstamp_config new_config)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
+ int err;
+
+ err = mlxsw_sp2_ptp_mtpcpc_set(mlxsw_sp, false, 0, 0);
+ if (err)
+ return err;
+
+ ptp_state->config = new_config;
+ return 0;
+}
+
+static int mlxsw_sp2_ptp_configure_port(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 ing_types, u16 egr_types,
+ struct hwtstamp_config new_config)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state;
+ int err;
+
+ ASSERT_RTNL();
+
+ ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+
+ if (refcount_inc_not_zero(&ptp_state->ptp_port_enabled_ref))
+ return 0;
+
+ err = mlxsw_sp2_ptp_enable(mlxsw_sp_port->mlxsw_sp, ing_types,
+ egr_types, new_config);
+ if (err)
+ return err;
+
+ refcount_set(&ptp_state->ptp_port_enabled_ref, 1);
+
+ return 0;
+}
+
+static int mlxsw_sp2_ptp_deconfigure_port(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config new_config)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state;
+ int err;
+
+ ASSERT_RTNL();
+
+ ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+
+ if (!refcount_dec_and_test(&ptp_state->ptp_port_enabled_ref))
+ return 0;
+
+ err = mlxsw_sp2_ptp_disable(mlxsw_sp_port->mlxsw_sp, new_config);
+ if (err)
+ goto err_ptp_disable;
+
+ return 0;
+
+err_ptp_disable:
+ refcount_set(&ptp_state->ptp_port_enabled_ref, 1);
+ return err;
+}
+
+int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config)
+{
+ enum hwtstamp_rx_filters rx_filter;
+ struct hwtstamp_config new_config;
+ u16 new_ing_types, new_egr_types;
+ bool ptp_enabled;
+ int err;
+
+ err = mlxsw_sp2_ptp_get_message_types(config, &new_ing_types,
+ &new_egr_types, &rx_filter);
+ if (err)
+ return err;
+
+ new_config.flags = config->flags;
+ new_config.tx_type = config->tx_type;
+ new_config.rx_filter = rx_filter;
+
+ ptp_enabled = mlxsw_sp_port->ptp.ing_types ||
+ mlxsw_sp_port->ptp.egr_types;
+
+ if ((new_ing_types || new_egr_types) && !ptp_enabled) {
+ err = mlxsw_sp2_ptp_configure_port(mlxsw_sp_port, new_ing_types,
+ new_egr_types, new_config);
+ if (err)
+ return err;
+ } else if (!new_ing_types && !new_egr_types && ptp_enabled) {
+ err = mlxsw_sp2_ptp_deconfigure_port(mlxsw_sp_port, new_config);
+ if (err)
+ return err;
+ }
+
+ mlxsw_sp_port->ptp.ing_types = new_ing_types;
+ mlxsw_sp_port->ptp.egr_types = new_egr_types;
+
+ /* Notify the ioctl caller what we are actually timestamping. */
+ config->rx_filter = rx_filter;
+
+ return 0;
+}
+
+int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
+ struct ethtool_ts_info *info)
+{
+ info->phc_index = ptp_clock_index(mlxsw_sp->clock->ptp);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+}
+
+int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ mlxsw_sp_txhdr_construct(skb, tx_info);
+ return 0;
+}
+
+int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ /* In Spectrum-2 and Spectrum-3, in order for PTP event packets to have
+ * their correction field correctly set on the egress port they must be
+ * transmitted as data packets. Such packets ingress the ASIC via the
+ * CPU port and must have a VLAN tag, as the CPU port is not configured
+ * with a PVID. Push the default VLAN (4095), which is configured as
+ * egress untagged on all the ports.
+ */
+ if (!skb_vlan_tagged(skb)) {
+ skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
+ MLXSW_SP_DEFAULT_VID);
+ if (!skb) {
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+ return -ENOMEM;
+ }
+ }
+
+ return mlxsw_sp_txhdr_ptp_data_construct(mlxsw_core, mlxsw_sp_port, skb,
+ tx_info);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
index c06cd1384bca..2d1628fdefc1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
@@ -57,6 +57,40 @@ void mlxsw_sp1_get_stats_strings(u8 **p);
void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
u64 *data, int data_index);
+int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
+
+struct mlxsw_sp_ptp_clock *
+mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev);
+
+void mlxsw_sp2_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock);
+
+struct mlxsw_sp_ptp_state *mlxsw_sp2_ptp_init(struct mlxsw_sp *mlxsw_sp);
+
+void mlxsw_sp2_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state);
+
+void mlxsw_sp2_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u16 local_port);
+
+void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
+ struct sk_buff *skb, u16 local_port);
+
+int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config);
+
+int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config);
+
+int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
+ struct ethtool_ts_info *info);
+
+int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
+
#else
static inline struct mlxsw_sp_ptp_clock *
@@ -136,7 +170,14 @@ static inline void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
u64 *data, int data_index)
{
}
-#endif
+
+int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ return -EOPNOTSUPP;
+}
static inline struct mlxsw_sp_ptp_clock *
mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
@@ -184,16 +225,25 @@ mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
return -EOPNOTSUPP;
}
-static inline void mlxsw_sp2_ptp_shaper_work(struct work_struct *work)
-{
-}
-
static inline int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
struct ethtool_ts_info *info)
{
return mlxsw_sp_ptp_get_ts_info_noptp(info);
}
+int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+static inline void mlxsw_sp2_ptp_shaper_work(struct work_struct *work)
+{
+}
+
static inline int mlxsw_sp2_get_stats_count(void)
{
return 0;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index e31f8fbbc696..df2ab5cbd49b 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -4233,7 +4233,7 @@ static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog)
}
/* If the chain is ended by an load/store pair then this
- * could serve as the new head of the the next chain.
+ * could serve as the new head of the next chain.
*/
if (curr_pair_is_memcpy(meta1, meta2)) {
head_ld_meta = meta1;
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index 4c759488fc77..bb06fa228367 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -8,7 +8,8 @@ sfc-y += efx.o efx_common.o efx_channels.o nic.o \
ef100.o ef100_nic.o ef100_netdev.o \
ef100_ethtool.o ef100_rx.o ef100_tx.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
-sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o ef100_rep.o mae.o
+sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o ef100_rep.o \
+ mae.o tc.o
obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index ab979fd11133..ee734b69150f 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -2538,23 +2538,33 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx)
if (rc)
return rc;
+ down_write(&efx->filter_sem);
rc = efx_mcdi_filter_table_probe(efx, nic_data->workaround_26807);
if (rc)
- return rc;
+ goto out_unlock;
list_for_each_entry(vlan, &nic_data->vlan_list, list) {
rc = efx_mcdi_filter_add_vlan(efx, vlan->vid);
if (rc)
goto fail_add_vlan;
}
- return 0;
+ goto out_unlock;
fail_add_vlan:
efx_mcdi_filter_table_remove(efx);
+out_unlock:
+ up_write(&efx->filter_sem);
return rc;
}
+static void efx_ef10_filter_table_remove(struct efx_nic *efx)
+{
+ down_write(&efx->filter_sem);
+ efx_mcdi_filter_table_remove(efx);
+ up_write(&efx->filter_sem);
+}
+
/* This creates an entry in the RX descriptor queue */
static inline void
efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
@@ -3211,9 +3221,7 @@ static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
efx_device_detach_sync(efx);
efx_net_stop(efx->net_dev);
- down_write(&efx->filter_sem);
- efx_mcdi_filter_table_remove(efx);
- up_write(&efx->filter_sem);
+ efx_ef10_filter_table_remove(efx);
rc = efx_ef10_vadaptor_free(efx, efx->vport_id);
if (rc)
@@ -3243,9 +3251,7 @@ restore_vadaptor:
if (rc2)
goto reset_nic;
restore_filters:
- down_write(&efx->filter_sem);
rc2 = efx_ef10_filter_table_probe(efx);
- up_write(&efx->filter_sem);
if (rc2)
goto reset_nic;
@@ -3275,8 +3281,7 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
efx_net_stop(efx->net_dev);
mutex_lock(&efx->mac_lock);
- down_write(&efx->filter_sem);
- efx_mcdi_filter_table_remove(efx);
+ efx_ef10_filter_table_remove(efx);
ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR),
efx->net_dev->dev_addr);
@@ -3286,7 +3291,6 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
sizeof(inbuf), NULL, 0, NULL);
efx_ef10_filter_table_probe(efx);
- up_write(&efx->filter_sem);
mutex_unlock(&efx->mac_lock);
if (was_enabled)
@@ -4092,7 +4096,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.ev_test_generate = efx_ef10_ev_test_generate,
.filter_table_probe = efx_ef10_filter_table_probe,
.filter_table_restore = efx_mcdi_filter_table_restore,
- .filter_table_remove = efx_mcdi_filter_table_remove,
+ .filter_table_remove = efx_ef10_filter_table_remove,
.filter_update_rx_scatter = efx_mcdi_update_rx_scatter,
.filter_insert = efx_mcdi_filter_insert,
.filter_remove_safe = efx_mcdi_filter_remove_safe,
diff --git a/drivers/net/ethernet/sfc/ef100.c b/drivers/net/ethernet/sfc/ef100.c
index 425017fbcb25..71aab3d0480f 100644
--- a/drivers/net/ethernet/sfc/ef100.c
+++ b/drivers/net/ethernet/sfc/ef100.c
@@ -431,6 +431,9 @@ static void ef100_pci_remove(struct pci_dev *pci_dev)
probe_data = container_of(efx, struct efx_probe_data, efx);
ef100_remove_netdev(probe_data);
+#ifdef CONFIG_SFC_SRIOV
+ efx_fini_struct_tc(efx);
+#endif
ef100_remove(efx);
efx_fini_io(efx);
diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c
index 9e65de1ab889..17b9d37218cb 100644
--- a/drivers/net/ethernet/sfc/ef100_netdev.c
+++ b/drivers/net/ethernet/sfc/ef100_netdev.c
@@ -329,6 +329,10 @@ void ef100_remove_netdev(struct efx_probe_data *probe_data)
ef100_unregister_netdev(efx);
+#ifdef CONFIG_SFC_SRIOV
+ efx_fini_tc(efx);
+#endif
+
down_write(&efx->filter_sem);
efx_mcdi_filter_table_remove(efx);
up_write(&efx->filter_sem);
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index 4625d35269e6..8061efdaf82c 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -24,6 +24,8 @@
#include "ef100_tx.h"
#include "ef100_sriov.h"
#include "ef100_netdev.h"
+#include "tc.h"
+#include "mae.h"
#include "rx_common.h"
#define EF100_MAX_VIS 4096
@@ -374,26 +376,46 @@ static int ef100_filter_table_up(struct efx_nic *efx)
{
int rc;
+ down_write(&efx->filter_sem);
rc = efx_mcdi_filter_add_vlan(efx, EFX_FILTER_VID_UNSPEC);
- if (rc) {
- efx_mcdi_filter_table_down(efx);
- return rc;
- }
+ if (rc)
+ goto fail_unspec;
rc = efx_mcdi_filter_add_vlan(efx, 0);
- if (rc) {
- efx_mcdi_filter_del_vlan(efx, EFX_FILTER_VID_UNSPEC);
- efx_mcdi_filter_table_down(efx);
- }
+ if (rc)
+ goto fail_vlan0;
+ /* Drop the lock: we've finished altering table existence, and
+ * filter insertion will need to take the lock for read.
+ */
+ up_write(&efx->filter_sem);
+#ifdef CONFIG_SFC_SRIOV
+ rc = efx_tc_insert_rep_filters(efx);
+ /* Rep filter failure is nonfatal */
+ if (rc)
+ netif_warn(efx, drv, efx->net_dev,
+ "Failed to insert representor filters, rc %d\n",
+ rc);
+#endif
+ return 0;
+fail_vlan0:
+ efx_mcdi_filter_del_vlan(efx, EFX_FILTER_VID_UNSPEC);
+fail_unspec:
+ efx_mcdi_filter_table_down(efx);
+ up_write(&efx->filter_sem);
return rc;
}
static void ef100_filter_table_down(struct efx_nic *efx)
{
+#ifdef CONFIG_SFC_SRIOV
+ efx_tc_remove_rep_filters(efx);
+#endif
+ down_write(&efx->filter_sem);
efx_mcdi_filter_del_vlan(efx, 0);
efx_mcdi_filter_del_vlan(efx, EFX_FILTER_VID_UNSPEC);
efx_mcdi_filter_table_down(efx);
+ up_write(&efx->filter_sem);
}
/* Other
@@ -704,6 +726,31 @@ static unsigned int efx_ef100_recycle_ring_size(const struct efx_nic *efx)
return 10 * EFX_RECYCLE_RING_SIZE_10G;
}
+#ifdef CONFIG_SFC_SRIOV
+static int efx_ef100_get_base_mport(struct efx_nic *efx)
+{
+ struct ef100_nic_data *nic_data = efx->nic_data;
+ u32 selector, id;
+ int rc;
+
+ /* Construct mport selector for "physical network port" */
+ efx_mae_mport_wire(efx, &selector);
+ /* Look up actual mport ID */
+ rc = efx_mae_lookup_mport(efx, selector, &id);
+ if (rc)
+ return rc;
+ /* The ID should always fit in 16 bits, because that's how wide the
+ * corresponding fields in the RX prefix & TX override descriptor are
+ */
+ if (id >> 16)
+ netif_warn(efx, probe, efx->net_dev, "Bad base m-port id %#x\n",
+ id);
+ nic_data->base_mport = id;
+ nic_data->have_mport = true;
+ return 0;
+}
+#endif
+
static int compare_versions(const char *a, const char *b)
{
int a_major, a_minor, a_point, a_patch;
@@ -1064,6 +1111,34 @@ int ef100_probe_netdev_pf(struct efx_nic *efx)
eth_hw_addr_set(net_dev, net_dev->perm_addr);
memcpy(nic_data->port_id, net_dev->perm_addr, ETH_ALEN);
+ if (!nic_data->grp_mae)
+ return 0;
+
+#ifdef CONFIG_SFC_SRIOV
+ rc = efx_init_struct_tc(efx);
+ if (rc)
+ return rc;
+
+ rc = efx_ef100_get_base_mport(efx);
+ if (rc) {
+ netif_warn(efx, probe, net_dev,
+ "Failed to probe base mport rc %d; representors will not function\n",
+ rc);
+ }
+
+ rc = efx_init_tc(efx);
+ if (rc) {
+ /* Either we don't have an MAE at all (i.e. legacy v-switching),
+ * or we do but we failed to probe it. In the latter case, we
+ * may not have set up default rules, in which case we won't be
+ * able to pass any traffic. However, we don't fail the probe,
+ * because the user might need to use the netdevice to apply
+ * configuration changes to fix whatever's wrong with the MAE.
+ */
+ netif_warn(efx, probe, net_dev, "Failed to probe MAE rc %d\n",
+ rc);
+ }
+#endif
return 0;
fail:
diff --git a/drivers/net/ethernet/sfc/ef100_nic.h b/drivers/net/ethernet/sfc/ef100_nic.h
index 40f84a275057..0295933145fa 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.h
+++ b/drivers/net/ethernet/sfc/ef100_nic.h
@@ -72,6 +72,8 @@ struct ef100_nic_data {
u8 port_id[ETH_ALEN];
DECLARE_BITMAP(evq_phases, EFX_MAX_CHANNELS);
u64 stats[EF100_STAT_COUNT];
+ u32 base_mport;
+ bool have_mport; /* base_mport was populated successfully */
bool grp_mae; /* MAE Privilege */
u16 tso_max_hdr_len;
u16 tso_max_payload_num_segs;
diff --git a/drivers/net/ethernet/sfc/ef100_rep.c b/drivers/net/ethernet/sfc/ef100_rep.c
index d07539f091b8..73ae4656a6e7 100644
--- a/drivers/net/ethernet/sfc/ef100_rep.c
+++ b/drivers/net/ethernet/sfc/ef100_rep.c
@@ -13,15 +13,24 @@
#include "ef100_netdev.h"
#include "ef100_nic.h"
#include "mae.h"
+#include "rx_common.h"
#define EFX_EF100_REP_DRIVER "efx_ef100_rep"
+#define EFX_REP_DEFAULT_PSEUDO_RING_SIZE 64
+
+static int efx_ef100_rep_poll(struct napi_struct *napi, int weight);
+
static int efx_ef100_rep_init_struct(struct efx_nic *efx, struct efx_rep *efv,
unsigned int i)
{
efv->parent = efx;
efv->idx = i;
INIT_LIST_HEAD(&efv->list);
+ efv->dflt.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
+ INIT_LIST_HEAD(&efv->dflt.acts.list);
+ INIT_LIST_HEAD(&efv->rx_list);
+ spin_lock_init(&efv->rx_lock);
efv->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
@@ -29,6 +38,25 @@ static int efx_ef100_rep_init_struct(struct efx_nic *efx, struct efx_rep *efv,
return 0;
}
+static int efx_ef100_rep_open(struct net_device *net_dev)
+{
+ struct efx_rep *efv = netdev_priv(net_dev);
+
+ netif_napi_add(net_dev, &efv->napi, efx_ef100_rep_poll,
+ NAPI_POLL_WEIGHT);
+ napi_enable(&efv->napi);
+ return 0;
+}
+
+static int efx_ef100_rep_close(struct net_device *net_dev)
+{
+ struct efx_rep *efv = netdev_priv(net_dev);
+
+ napi_disable(&efv->napi);
+ netif_napi_del(&efv->napi);
+ return 0;
+}
+
static netdev_tx_t efx_ef100_rep_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -79,10 +107,26 @@ static int efx_ef100_rep_get_phys_port_name(struct net_device *dev,
return 0;
}
+static void efx_ef100_rep_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct efx_rep *efv = netdev_priv(dev);
+
+ stats->rx_packets = atomic64_read(&efv->stats.rx_packets);
+ stats->tx_packets = atomic64_read(&efv->stats.tx_packets);
+ stats->rx_bytes = atomic64_read(&efv->stats.rx_bytes);
+ stats->tx_bytes = atomic64_read(&efv->stats.tx_bytes);
+ stats->rx_dropped = atomic64_read(&efv->stats.rx_dropped);
+ stats->tx_errors = atomic64_read(&efv->stats.tx_errors);
+}
+
static const struct net_device_ops efx_ef100_rep_netdev_ops = {
+ .ndo_open = efx_ef100_rep_open,
+ .ndo_stop = efx_ef100_rep_close,
.ndo_start_xmit = efx_ef100_rep_xmit,
.ndo_get_port_parent_id = efx_ef100_rep_get_port_parent_id,
.ndo_get_phys_port_name = efx_ef100_rep_get_phys_port_name,
+ .ndo_get_stats64 = efx_ef100_rep_get_stats64,
};
static void efx_ef100_rep_get_drvinfo(struct net_device *dev,
@@ -106,10 +150,37 @@ static void efx_ef100_rep_ethtool_set_msglevel(struct net_device *net_dev,
efv->msg_enable = msg_enable;
}
+static void efx_ef100_rep_ethtool_get_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kring,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct efx_rep *efv = netdev_priv(net_dev);
+
+ ring->rx_max_pending = U32_MAX;
+ ring->rx_pending = efv->rx_pring_size;
+}
+
+static int efx_ef100_rep_ethtool_set_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kring,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct efx_rep *efv = netdev_priv(net_dev);
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending || ring->tx_pending)
+ return -EINVAL;
+
+ efv->rx_pring_size = ring->rx_pending;
+ return 0;
+}
+
static const struct ethtool_ops efx_ef100_rep_ethtool_ops = {
.get_drvinfo = efx_ef100_rep_get_drvinfo,
.get_msglevel = efx_ef100_rep_ethtool_get_msglevel,
.set_msglevel = efx_ef100_rep_ethtool_set_msglevel,
+ .get_ringparam = efx_ef100_rep_ethtool_get_ringparam,
+ .set_ringparam = efx_ef100_rep_ethtool_set_ringparam,
};
static struct efx_rep *efx_ef100_rep_create_netdev(struct efx_nic *efx,
@@ -159,6 +230,7 @@ static int efx_ef100_configure_rep(struct efx_rep *efv)
u32 selector;
int rc;
+ efv->rx_pring_size = EFX_REP_DEFAULT_PSEUDO_RING_SIZE;
/* Construct mport selector for corresponding VF */
efx_mae_mport_vf(efx, efv->idx, &selector);
/* Look up actual mport ID */
@@ -169,7 +241,14 @@ static int efx_ef100_configure_rep(struct efx_rep *efv)
/* mport label should fit in 16 bits */
WARN_ON(efv->mport >> 16);
- return 0;
+ return efx_tc_configure_default_rule_rep(efv);
+}
+
+static void efx_ef100_deconfigure_rep(struct efx_rep *efv)
+{
+ struct efx_nic *efx = efv->parent;
+
+ efx_tc_deconfigure_default_rule(efx, &efv->dflt);
}
static void efx_ef100_rep_destroy_netdev(struct efx_rep *efv)
@@ -181,6 +260,7 @@ static void efx_ef100_rep_destroy_netdev(struct efx_rep *efv)
list_del(&efv->list);
spin_unlock_bh(&efx->vf_reps_lock);
rtnl_unlock();
+ synchronize_rcu();
free_netdev(efv->net_dev);
}
@@ -202,19 +282,21 @@ int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i)
pci_err(efx->pci_dev,
"Failed to configure representor for VF %d, rc %d\n",
i, rc);
- goto fail;
+ goto fail1;
}
rc = register_netdev(efv->net_dev);
if (rc) {
pci_err(efx->pci_dev,
"Failed to register representor for VF %d, rc %d\n",
i, rc);
- goto fail;
+ goto fail2;
}
pci_dbg(efx->pci_dev, "Representor for VF %d is %s\n", i,
efv->net_dev->name);
return 0;
-fail:
+fail2:
+ efx_ef100_deconfigure_rep(efv);
+fail1:
efx_ef100_rep_destroy_netdev(efv);
return rc;
}
@@ -228,6 +310,7 @@ void efx_ef100_vfrep_destroy(struct efx_nic *efx, struct efx_rep *efv)
return;
netif_dbg(efx, drv, rep_dev, "Removing VF representor\n");
unregister_netdev(rep_dev);
+ efx_ef100_deconfigure_rep(efv);
efx_ef100_rep_destroy_netdev(efv);
}
@@ -242,3 +325,111 @@ void efx_ef100_fini_vfreps(struct efx_nic *efx)
list_for_each_entry_safe(efv, next, &efx->vf_reps, list)
efx_ef100_vfrep_destroy(efx, efv);
}
+
+static int efx_ef100_rep_poll(struct napi_struct *napi, int weight)
+{
+ struct efx_rep *efv = container_of(napi, struct efx_rep, napi);
+ unsigned int read_index;
+ struct list_head head;
+ struct sk_buff *skb;
+ bool need_resched;
+ int spent = 0;
+
+ INIT_LIST_HEAD(&head);
+ /* Grab up to 'weight' pending SKBs */
+ spin_lock_bh(&efv->rx_lock);
+ read_index = efv->write_index;
+ while (spent < weight && !list_empty(&efv->rx_list)) {
+ skb = list_first_entry(&efv->rx_list, struct sk_buff, list);
+ list_del(&skb->list);
+ list_add_tail(&skb->list, &head);
+ spent++;
+ }
+ spin_unlock_bh(&efv->rx_lock);
+ /* Receive them */
+ netif_receive_skb_list(&head);
+ if (spent < weight)
+ if (napi_complete_done(napi, spent)) {
+ spin_lock_bh(&efv->rx_lock);
+ efv->read_index = read_index;
+ /* If write_index advanced while we were doing the
+ * RX, then storing our read_index won't re-prime the
+ * fake-interrupt. In that case, we need to schedule
+ * NAPI again to consume the additional packet(s).
+ */
+ need_resched = efv->write_index != read_index;
+ spin_unlock_bh(&efv->rx_lock);
+ if (need_resched)
+ napi_schedule(&efv->napi);
+ }
+ return spent;
+}
+
+void efx_ef100_rep_rx_packet(struct efx_rep *efv, struct efx_rx_buffer *rx_buf)
+{
+ u8 *eh = efx_rx_buf_va(rx_buf);
+ struct sk_buff *skb;
+ bool primed;
+
+ /* Don't allow too many queued SKBs to build up, as they consume
+ * GFP_ATOMIC memory. If we overrun, just start dropping.
+ */
+ if (efv->write_index - READ_ONCE(efv->read_index) > efv->rx_pring_size) {
+ atomic64_inc(&efv->stats.rx_dropped);
+ if (net_ratelimit())
+ netif_dbg(efv->parent, rx_err, efv->net_dev,
+ "nodesc-dropped packet of length %u\n",
+ rx_buf->len);
+ return;
+ }
+
+ skb = netdev_alloc_skb(efv->net_dev, rx_buf->len);
+ if (!skb) {
+ atomic64_inc(&efv->stats.rx_dropped);
+ if (net_ratelimit())
+ netif_dbg(efv->parent, rx_err, efv->net_dev,
+ "noskb-dropped packet of length %u\n",
+ rx_buf->len);
+ return;
+ }
+ memcpy(skb->data, eh, rx_buf->len);
+ __skb_put(skb, rx_buf->len);
+
+ skb_record_rx_queue(skb, 0); /* rep is single-queue */
+
+ /* Move past the ethernet header */
+ skb->protocol = eth_type_trans(skb, efv->net_dev);
+
+ skb_checksum_none_assert(skb);
+
+ atomic64_inc(&efv->stats.rx_packets);
+ atomic64_add(rx_buf->len, &efv->stats.rx_bytes);
+
+ /* Add it to the rx list */
+ spin_lock_bh(&efv->rx_lock);
+ primed = efv->read_index == efv->write_index;
+ list_add_tail(&skb->list, &efv->rx_list);
+ efv->write_index++;
+ spin_unlock_bh(&efv->rx_lock);
+ /* Trigger rx work */
+ if (primed)
+ napi_schedule(&efv->napi);
+}
+
+struct efx_rep *efx_ef100_find_rep_by_mport(struct efx_nic *efx, u16 mport)
+{
+ struct efx_rep *efv, *out = NULL;
+
+ /* spinlock guards against list mutation while we're walking it;
+ * but caller must also hold rcu_read_lock() to ensure the netdev
+ * isn't freed after we drop the spinlock.
+ */
+ spin_lock_bh(&efx->vf_reps_lock);
+ list_for_each_entry(efv, &efx->vf_reps, list)
+ if (efv->mport == mport) {
+ out = efv;
+ break;
+ }
+ spin_unlock_bh(&efx->vf_reps_lock);
+ return out;
+}
diff --git a/drivers/net/ethernet/sfc/ef100_rep.h b/drivers/net/ethernet/sfc/ef100_rep.h
index d47fd8ff6220..070f700893c1 100644
--- a/drivers/net/ethernet/sfc/ef100_rep.h
+++ b/drivers/net/ethernet/sfc/ef100_rep.h
@@ -14,6 +14,7 @@
#define EF100_REP_H
#include "net_driver.h"
+#include "tc.h"
struct efx_rep_sw_stats {
atomic64_t rx_packets, tx_packets;
@@ -29,7 +30,14 @@ struct efx_rep_sw_stats {
* @msg_enable: log message enable flags
* @mport: m-port ID of corresponding VF
* @idx: VF index
+ * @write_index: number of packets enqueued to @rx_list
+ * @read_index: number of packets consumed from @rx_list
+ * @rx_pring_size: max length of RX list
+ * @dflt: default-rule for MAE switching
* @list: entry on efx->vf_reps
+ * @rx_list: list of SKBs queued for receive in NAPI poll
+ * @rx_lock: protects @rx_list
+ * @napi: NAPI control structure
* @stats: software traffic counters for netdev stats
*/
struct efx_rep {
@@ -38,7 +46,13 @@ struct efx_rep {
u32 msg_enable;
u32 mport;
unsigned int idx;
+ unsigned int write_index, read_index;
+ unsigned int rx_pring_size;
+ struct efx_tc_flow_rule dflt;
struct list_head list;
+ struct list_head rx_list;
+ spinlock_t rx_lock;
+ struct napi_struct napi;
struct efx_rep_sw_stats stats;
};
@@ -46,4 +60,10 @@ int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i);
void efx_ef100_vfrep_destroy(struct efx_nic *efx, struct efx_rep *efv);
void efx_ef100_fini_vfreps(struct efx_nic *efx);
+void efx_ef100_rep_rx_packet(struct efx_rep *efv, struct efx_rx_buffer *rx_buf);
+/* Returns the representor corresponding to a VF m-port, or NULL
+ * @mport is an m-port label, *not* an m-port ID!
+ * Caller must hold rcu_read_lock().
+ */
+struct efx_rep *efx_ef100_find_rep_by_mport(struct efx_nic *efx, u16 mport);
#endif /* EF100_REP_H */
diff --git a/drivers/net/ethernet/sfc/ef100_rx.c b/drivers/net/ethernet/sfc/ef100_rx.c
index 85207acf7dee..65bbe37753e6 100644
--- a/drivers/net/ethernet/sfc/ef100_rx.c
+++ b/drivers/net/ethernet/sfc/ef100_rx.c
@@ -55,10 +55,14 @@ static bool ef100_has_fcs_error(struct efx_channel *channel, u32 *prefix)
void __ef100_rx_packet(struct efx_channel *channel)
{
- struct efx_rx_buffer *rx_buf = efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+ struct efx_rx_buffer *rx_buf = efx_rx_buffer(rx_queue,
+ channel->rx_pkt_index);
struct efx_nic *efx = channel->efx;
+ struct ef100_nic_data *nic_data;
u8 *eh = efx_rx_buf_va(rx_buf);
__wsum csum = 0;
+ u16 ing_port;
u32 *prefix;
prefix = (u32 *)(eh - ESE_GZ_RX_PKT_PREFIX_LEN);
@@ -76,6 +80,37 @@ void __ef100_rx_packet(struct efx_channel *channel)
goto out;
}
+ ing_port = le16_to_cpu((__force __le16) PREFIX_FIELD(prefix, INGRESS_MPORT));
+
+ nic_data = efx->nic_data;
+
+ if (nic_data->have_mport && ing_port != nic_data->base_mport) {
+#ifdef CONFIG_SFC_SRIOV
+ struct efx_rep *efv;
+
+ rcu_read_lock();
+ efv = efx_ef100_find_rep_by_mport(efx, ing_port);
+ if (efv) {
+ if (efv->net_dev->flags & IFF_UP)
+ efx_ef100_rep_rx_packet(efv, rx_buf);
+ rcu_read_unlock();
+ /* Representor Rx doesn't care about PF Rx buffer
+ * ownership, it just makes a copy. So, we are done
+ * with the Rx buffer from PF point of view and should
+ * free it.
+ */
+ goto free_rx_buffer;
+ }
+ rcu_read_unlock();
+#endif
+ if (net_ratelimit())
+ netif_warn(efx, drv, efx->net_dev,
+ "Unrecognised ing_port %04x (base %04x), dropping\n",
+ ing_port, nic_data->base_mport);
+ channel->n_rx_mport_bad++;
+ goto free_rx_buffer;
+ }
+
if (likely(efx->net_dev->features & NETIF_F_RXCSUM)) {
if (PREFIX_FIELD(prefix, NT_OR_INNER_L3_CLASS) == 1) {
++channel->n_rx_ip_hdr_chksum_err;
@@ -87,17 +122,16 @@ void __ef100_rx_packet(struct efx_channel *channel)
}
if (channel->type->receive_skb) {
- struct efx_rx_queue *rx_queue =
- efx_channel_get_rx_queue(channel);
-
/* no support for special channels yet, so just discard */
WARN_ON_ONCE(1);
- efx_free_rx_buffers(rx_queue, rx_buf, 1);
- goto out;
+ goto free_rx_buffer;
}
efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh, csum);
+ goto out;
+free_rx_buffer:
+ efx_free_rx_buffers(rx_queue, rx_buf, 1);
out:
channel->rx_pkt_n_frags = 0;
}
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index 92550c7e85ce..9aae0d8b713f 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -501,14 +501,11 @@ int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, const u8 *mac)
efx_device_detach_sync(vf->efx);
efx_net_stop(vf->efx->net_dev);
- down_write(&vf->efx->filter_sem);
vf->efx->type->filter_table_remove(vf->efx);
rc = efx_ef10_vadaptor_free(vf->efx, EVB_PORT_ID_ASSIGNED);
- if (rc) {
- up_write(&vf->efx->filter_sem);
+ if (rc)
return rc;
- }
}
rc = efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, vf_i);
@@ -539,12 +536,9 @@ int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, const u8 *mac)
if (vf->efx) {
/* VF cannot use the vport_id that the PF created */
rc = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED);
- if (rc) {
- up_write(&vf->efx->filter_sem);
+ if (rc)
return rc;
- }
vf->efx->type->filter_table_probe(vf->efx);
- up_write(&vf->efx->filter_sem);
efx_net_open(vf->efx->net_dev);
efx_device_attach_if_not_resetting(vf->efx);
}
@@ -580,7 +574,6 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
efx_net_stop(vf->efx->net_dev);
mutex_lock(&vf->efx->mac_lock);
- down_write(&vf->efx->filter_sem);
vf->efx->type->filter_table_remove(vf->efx);
rc = efx_ef10_vadaptor_free(vf->efx, EVB_PORT_ID_ASSIGNED);
@@ -654,7 +647,6 @@ restore_filters:
if (rc2)
goto reset_nic_up_write;
- up_write(&vf->efx->filter_sem);
mutex_unlock(&vf->efx->mac_lock);
rc2 = efx_net_open(vf->efx->net_dev);
@@ -666,10 +658,8 @@ restore_filters:
return rc;
reset_nic_up_write:
- if (vf->efx) {
- up_write(&vf->efx->filter_sem);
+ if (vf->efx)
mutex_unlock(&vf->efx->mac_lock);
- }
reset_nic:
if (vf->efx) {
netif_err(efx, drv, efx->net_dev,
diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c
index 58ad9d665805..bc840ede3053 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/ethtool_common.c
@@ -91,6 +91,7 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_bad_drops),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_tx),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_redirect),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mport_bad),
#ifdef CONFIG_RFS_ACCEL
EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(rfs_filter_count),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_succeeded),
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index 40b2af8bfb81..4d928839d292 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -88,6 +88,7 @@ enum efx_filter_priority {
* the automatic filter in its place.
* @EFX_FILTER_FLAG_RX: Filter is for RX
* @EFX_FILTER_FLAG_TX: Filter is for TX
+ * @EFX_FILTER_FLAG_VPORT_ID: Virtual port ID for adapter switching.
*/
enum efx_filter_flags {
EFX_FILTER_FLAG_RX_RSS = 0x01,
@@ -95,6 +96,7 @@ enum efx_filter_flags {
EFX_FILTER_FLAG_RX_OVER_AUTO = 0x04,
EFX_FILTER_FLAG_RX = 0x08,
EFX_FILTER_FLAG_TX = 0x10,
+ EFX_FILTER_FLAG_VPORT_ID = 0x20,
};
/** enum efx_encap_type - types of encapsulation
@@ -127,6 +129,9 @@ enum efx_encap_type {
* MCFW context_id.
* @dmaq_id: Source/target queue index, or %EFX_FILTER_RX_DMAQ_ID_DROP for
* an RX drop filter
+ * @vport_id: Virtual port ID associated with RX queue, for adapter switching,
+ * if %EFX_FILTER_FLAG_VPORT_ID is set. This is an MCFW vport_id, or on
+ * EF100 an mport selector.
* @outer_vid: Outer VLAN ID to match, if %EFX_FILTER_MATCH_OUTER_VID is set
* @inner_vid: Inner VLAN ID to match, if %EFX_FILTER_MATCH_INNER_VID is set
* @loc_mac: Local MAC address to match, if %EFX_FILTER_MATCH_LOC_MAC or
@@ -156,6 +161,7 @@ struct efx_filter_spec {
u32 priority:2;
u32 flags:6;
u32 dmaq_id:12;
+ u32 vport_id;
u32 rss_context;
__be16 outer_vid __aligned(4); /* allow jhash2() of match values */
__be16 inner_vid;
@@ -292,6 +298,18 @@ static inline int efx_filter_set_mc_def(struct efx_filter_spec *spec)
return 0;
}
+/**
+ * efx_filter_set_vport_id - override virtual port id relating to filter
+ * @spec: Specification to initialise
+ * @vport_id: firmware ID of the virtual port
+ */
+static inline void efx_filter_set_vport_id(struct efx_filter_spec *spec,
+ u32 vport_id)
+{
+ spec->flags |= EFX_FILTER_FLAG_VPORT_ID;
+ spec->vport_id = vport_id;
+}
+
static inline void efx_filter_set_encap_type(struct efx_filter_spec *spec,
enum efx_encap_type encap_type)
{
diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c
index 011ebd46ada5..97627f5e3674 100644
--- a/drivers/net/ethernet/sfc/mae.c
+++ b/drivers/net/ethernet/sfc/mae.c
@@ -11,7 +11,65 @@
#include "mae.h"
#include "mcdi.h"
-#include "mcdi_pcol.h"
+#include "mcdi_pcol_mae.h"
+
+int efx_mae_allocate_mport(struct efx_nic *efx, u32 *id, u32 *label)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_MPORT_ALLOC_ALIAS_OUT_LEN);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_LEN);
+ size_t outlen;
+ int rc;
+
+ if (WARN_ON_ONCE(!id))
+ return -EINVAL;
+ if (WARN_ON_ONCE(!label))
+ return -EINVAL;
+
+ MCDI_SET_DWORD(inbuf, MAE_MPORT_ALLOC_ALIAS_IN_TYPE,
+ MC_CMD_MAE_MPORT_ALLOC_ALIAS_IN_MPORT_TYPE_ALIAS);
+ MCDI_SET_DWORD(inbuf, MAE_MPORT_ALLOC_ALIAS_IN_DELIVER_MPORT,
+ MAE_MPORT_SELECTOR_ASSIGNED);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_MPORT_ALLOC, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ *id = MCDI_DWORD(outbuf, MAE_MPORT_ALLOC_ALIAS_OUT_MPORT_ID);
+ *label = MCDI_DWORD(outbuf, MAE_MPORT_ALLOC_ALIAS_OUT_LABEL);
+ return 0;
+}
+
+int efx_mae_free_mport(struct efx_nic *efx, u32 id)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MPORT_FREE_IN_LEN);
+
+ BUILD_BUG_ON(MC_CMD_MAE_MPORT_FREE_OUT_LEN);
+ MCDI_SET_DWORD(inbuf, MAE_MPORT_FREE_IN_MPORT_ID, id);
+ return efx_mcdi_rpc(efx, MC_CMD_MAE_MPORT_FREE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+void efx_mae_mport_wire(struct efx_nic *efx, u32 *out)
+{
+ efx_dword_t mport;
+
+ EFX_POPULATE_DWORD_2(mport,
+ MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_PPORT,
+ MAE_MPORT_SELECTOR_PPORT_ID, efx->port_num);
+ *out = EFX_DWORD_VAL(mport);
+}
+
+void efx_mae_mport_uplink(struct efx_nic *efx __always_unused, u32 *out)
+{
+ efx_dword_t mport;
+
+ EFX_POPULATE_DWORD_3(mport,
+ MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_FUNC,
+ MAE_MPORT_SELECTOR_FUNC_PF_ID, MAE_MPORT_SELECTOR_FUNC_PF_ID_CALLER,
+ MAE_MPORT_SELECTOR_FUNC_VF_ID, MAE_MPORT_SELECTOR_FUNC_VF_ID_NULL);
+ *out = EFX_DWORD_VAL(mport);
+}
void efx_mae_mport_vf(struct efx_nic *efx __always_unused, u32 vf_id, u32 *out)
{
@@ -24,6 +82,17 @@ void efx_mae_mport_vf(struct efx_nic *efx __always_unused, u32 vf_id, u32 *out)
*out = EFX_DWORD_VAL(mport);
}
+/* Constructs an mport selector from an mport ID, because they're not the same */
+void efx_mae_mport_mport(struct efx_nic *efx __always_unused, u32 mport_id, u32 *out)
+{
+ efx_dword_t mport;
+
+ EFX_POPULATE_DWORD_2(mport,
+ MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_MPORT_ID,
+ MAE_MPORT_SELECTOR_MPORT_ID, mport_id);
+ *out = EFX_DWORD_VAL(mport);
+}
+
/* id is really only 24 bits wide */
int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id)
{
@@ -42,3 +111,236 @@ int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id)
*id = MCDI_DWORD(outbuf, MAE_MPORT_LOOKUP_OUT_MPORT_ID);
return 0;
}
+
+static bool efx_mae_asl_id(u32 id)
+{
+ return !!(id & BIT(31));
+}
+
+int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_ALLOC_OUT_LEN);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_SET_ALLOC_IN_LEN);
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_SRC_MAC_ID,
+ MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL);
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_DST_MAC_ID,
+ MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL);
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_COUNTER_ID,
+ MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_NULL);
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_COUNTER_LIST_ID,
+ MC_CMD_MAE_COUNTER_LIST_ALLOC_OUT_COUNTER_LIST_ID_NULL);
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_ENCAP_HEADER_ID,
+ MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT_ENCAP_HEADER_ID_NULL);
+ if (act->deliver)
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_DELIVER,
+ act->dest_mport);
+ BUILD_BUG_ON(MAE_MPORT_SELECTOR_NULL);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_ALLOC, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ act->fw_id = MCDI_DWORD(outbuf, MAE_ACTION_SET_ALLOC_OUT_AS_ID);
+ /* We rely on the high bit of AS IDs always being clear.
+ * The firmware API guarantees this, but let's check it ourselves.
+ */
+ if (WARN_ON_ONCE(efx_mae_asl_id(act->fw_id))) {
+ efx_mae_free_action_set(efx, act->fw_id);
+ return -EIO;
+ }
+ return 0;
+}
+
+int efx_mae_free_action_set(struct efx_nic *efx, u32 fw_id)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_FREE_OUT_LEN(1));
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_SET_FREE_IN_LEN(1));
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_FREE_IN_AS_ID, fw_id);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_FREE, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ /* FW freed a different ID than we asked for, should never happen.
+ * Warn because it means we've now got a different idea to the FW of
+ * what action-sets exist, which could cause mayhem later.
+ */
+ if (WARN_ON(MCDI_DWORD(outbuf, MAE_ACTION_SET_FREE_OUT_FREED_AS_ID) != fw_id))
+ return -EIO;
+ return 0;
+}
+
+int efx_mae_alloc_action_set_list(struct efx_nic *efx,
+ struct efx_tc_action_set_list *acts)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_LEN);
+ struct efx_tc_action_set *act;
+ size_t inlen, outlen, i = 0;
+ efx_dword_t *inbuf;
+ int rc;
+
+ list_for_each_entry(act, &acts->list, list)
+ i++;
+ if (i == 0)
+ return -EINVAL;
+ if (i == 1) {
+ /* Don't wrap an ASL around a single AS, just use the AS_ID
+ * directly. ASLs are a more limited resource.
+ */
+ act = list_first_entry(&acts->list, struct efx_tc_action_set, list);
+ acts->fw_id = act->fw_id;
+ return 0;
+ }
+ if (i > MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_MAXNUM_MCDI2)
+ return -EOPNOTSUPP; /* Too many actions */
+ inlen = MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LEN(i);
+ inbuf = kzalloc(inlen, GFP_KERNEL);
+ if (!inbuf)
+ return -ENOMEM;
+ i = 0;
+ list_for_each_entry(act, &acts->list, list) {
+ MCDI_SET_ARRAY_DWORD(inbuf, MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS,
+ i, act->fw_id);
+ i++;
+ }
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_LIST_ALLOC_IN_COUNT, i);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_LIST_ALLOC, inbuf, inlen,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto out_free;
+ if (outlen < sizeof(outbuf)) {
+ rc = -EIO;
+ goto out_free;
+ }
+ acts->fw_id = MCDI_DWORD(outbuf, MAE_ACTION_SET_LIST_ALLOC_OUT_ASL_ID);
+ /* We rely on the high bit of ASL IDs always being set.
+ * The firmware API guarantees this, but let's check it ourselves.
+ */
+ if (WARN_ON_ONCE(!efx_mae_asl_id(acts->fw_id))) {
+ efx_mae_free_action_set_list(efx, acts);
+ rc = -EIO;
+ }
+out_free:
+ kfree(inbuf);
+ return rc;
+}
+
+int efx_mae_free_action_set_list(struct efx_nic *efx,
+ struct efx_tc_action_set_list *acts)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LEN(1));
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LEN(1));
+ size_t outlen;
+ int rc;
+
+ /* If this is just an AS_ID with no ASL wrapper, then there is
+ * nothing for us to free. (The AS will be freed later.)
+ */
+ if (efx_mae_asl_id(acts->fw_id)) {
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_LIST_FREE_IN_ASL_ID,
+ acts->fw_id);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_SET_LIST_FREE, inbuf,
+ sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ /* FW freed a different ID than we asked for, should never happen.
+ * Warn because it means we've now got a different idea to the FW of
+ * what action-set-lists exist, which could cause mayhem later.
+ */
+ if (WARN_ON(MCDI_DWORD(outbuf, MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID) != acts->fw_id))
+ return -EIO;
+ }
+ /* We're probably about to free @acts, but let's just make sure its
+ * fw_id is blatted so that it won't look valid if it leaks out.
+ */
+ acts->fw_id = MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL;
+ return 0;
+}
+
+static int efx_mae_populate_match_criteria(MCDI_DECLARE_STRUCT_PTR(match_crit),
+ const struct efx_tc_match *match)
+{
+ if (match->mask.ingress_port) {
+ if (~match->mask.ingress_port)
+ return -EOPNOTSUPP;
+ MCDI_STRUCT_SET_DWORD(match_crit,
+ MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR,
+ match->value.ingress_port);
+ }
+ MCDI_STRUCT_SET_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_MASK,
+ match->mask.ingress_port);
+ return 0;
+}
+
+int efx_mae_insert_rule(struct efx_nic *efx, const struct efx_tc_match *match,
+ u32 prio, u32 acts_id, u32 *id)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_RULE_INSERT_IN_LEN(MAE_FIELD_MASK_VALUE_PAIRS_V2_LEN));
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_RULE_INSERT_OUT_LEN);
+ MCDI_DECLARE_STRUCT_PTR(match_crit);
+ MCDI_DECLARE_STRUCT_PTR(response);
+ size_t outlen;
+ int rc;
+
+ if (!id)
+ return -EINVAL;
+
+ match_crit = _MCDI_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA);
+ response = _MCDI_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_RESPONSE);
+ if (efx_mae_asl_id(acts_id)) {
+ MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_ASL_ID, acts_id);
+ MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_AS_ID,
+ MC_CMD_MAE_ACTION_SET_ALLOC_OUT_ACTION_SET_ID_NULL);
+ } else {
+ /* We only had one AS, so we didn't wrap it in an ASL */
+ MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_ASL_ID,
+ MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL);
+ MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_AS_ID, acts_id);
+ }
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_PRIO, prio);
+ rc = efx_mae_populate_match_criteria(match_crit, match);
+ if (rc)
+ return rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_RULE_INSERT, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ *id = MCDI_DWORD(outbuf, MAE_ACTION_RULE_INSERT_OUT_AR_ID);
+ return 0;
+}
+
+int efx_mae_delete_rule(struct efx_nic *efx, u32 id)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_RULE_DELETE_OUT_LEN(1));
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_RULE_DELETE_IN_LEN(1));
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_RULE_DELETE_IN_AR_ID, id);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_RULE_DELETE, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ /* FW freed a different ID than we asked for, should also never happen.
+ * Warn because it means we've now got a different idea to the FW of
+ * what rules exist, which could cause mayhem later.
+ */
+ if (WARN_ON(MCDI_DWORD(outbuf, MAE_ACTION_RULE_DELETE_OUT_DELETED_AR_ID) != id))
+ return -EIO;
+ return 0;
+}
diff --git a/drivers/net/ethernet/sfc/mae.h b/drivers/net/ethernet/sfc/mae.h
index 27e69e8a54b6..0369be4d8983 100644
--- a/drivers/net/ethernet/sfc/mae.h
+++ b/drivers/net/ethernet/sfc/mae.h
@@ -14,9 +14,29 @@
/* MCDI interface for the ef100 Match-Action Engine */
#include "net_driver.h"
+#include "tc.h"
+#include "mcdi_pcol.h" /* needed for various MC_CMD_MAE_*_NULL defines */
+int efx_mae_allocate_mport(struct efx_nic *efx, u32 *id, u32 *label);
+int efx_mae_free_mport(struct efx_nic *efx, u32 id);
+
+void efx_mae_mport_wire(struct efx_nic *efx, u32 *out);
+void efx_mae_mport_uplink(struct efx_nic *efx, u32 *out);
void efx_mae_mport_vf(struct efx_nic *efx, u32 vf_id, u32 *out);
+void efx_mae_mport_mport(struct efx_nic *efx, u32 mport_id, u32 *out);
int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id);
+int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act);
+int efx_mae_free_action_set(struct efx_nic *efx, u32 fw_id);
+
+int efx_mae_alloc_action_set_list(struct efx_nic *efx,
+ struct efx_tc_action_set_list *acts);
+int efx_mae_free_action_set_list(struct efx_nic *efx,
+ struct efx_tc_action_set_list *acts);
+
+int efx_mae_insert_rule(struct efx_nic *efx, const struct efx_tc_match *match,
+ u32 prio, u32 acts_id, u32 *id);
+int efx_mae_delete_rule(struct efx_nic *efx, u32 id);
+
#endif /* EF100_MAE_H */
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index f74f6ce8b27d..26bc69f76801 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -205,6 +205,8 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
((_ofst) + BUILD_BUG_ON_ZERO((_ofst) & (_align - 1)))
#define _MCDI_DWORD(_buf, _field) \
((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
+#define _MCDI_STRUCT_DWORD(_buf, _field) \
+ ((_buf) + (_MCDI_CHECK_ALIGN(_field ## _OFST, 4) >> 2))
#define MCDI_BYTE(_buf, _field) \
((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \
@@ -214,6 +216,8 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
#define MCDI_SET_DWORD(_buf, _field, _value) \
EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value)
+#define MCDI_STRUCT_SET_DWORD(_buf, _field, _value) \
+ EFX_POPULATE_DWORD_1(*_MCDI_STRUCT_DWORD(_buf, _field), EFX_DWORD_0, _value)
#define MCDI_DWORD(_buf, _field) \
EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0)
#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
diff --git a/drivers/net/ethernet/sfc/mcdi_filters.c b/drivers/net/ethernet/sfc/mcdi_filters.c
index 1523be77b9db..4ff6586116ee 100644
--- a/drivers/net/ethernet/sfc/mcdi_filters.c
+++ b/drivers/net/ethernet/sfc/mcdi_filters.c
@@ -221,7 +221,10 @@ static void efx_mcdi_filter_push_prep(struct efx_nic *efx,
efx_mcdi_filter_push_prep_set_match_fields(efx, spec, inbuf);
}
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, efx->vport_id);
+ if (flags & EFX_FILTER_FLAG_VPORT_ID)
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, spec->vport_id);
+ else
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, efx->vport_id);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
@@ -488,6 +491,7 @@ static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx,
saved_spec->flags |= spec->flags;
saved_spec->rss_context = spec->rss_context;
saved_spec->dmaq_id = spec->dmaq_id;
+ saved_spec->vport_id = spec->vport_id;
}
} else if (!replacing) {
kfree(saved_spec);
diff --git a/drivers/net/ethernet/sfc/mcdi_filters.h b/drivers/net/ethernet/sfc/mcdi_filters.h
index 06426aa9f2f3..c0d6558b9fd2 100644
--- a/drivers/net/ethernet/sfc/mcdi_filters.h
+++ b/drivers/net/ethernet/sfc/mcdi_filters.h
@@ -89,6 +89,7 @@ struct efx_mcdi_filter_table {
*/
bool mc_chaining;
bool vlan_filter;
+ /* Entries on the vlan_list are added/removed under filter_sem */
struct list_head vlan_list;
};
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol_mae.h b/drivers/net/ethernet/sfc/mcdi_pcol_mae.h
new file mode 100644
index 000000000000..ff6d80c8e486
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mcdi_pcol_mae.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2019-2022 Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef MCDI_PCOL_MAE_H
+#define MCDI_PCOL_MAE_H
+/* MCDI definitions for Match-Action Engine functionality, that are
+ * missing from the main mcdi_pcol.h
+ */
+
+/* MC_CMD_MAE_COUNTER_LIST_ALLOC is not (yet) a released API, but the
+ * following value is needed as an argument to MC_CMD_MAE_ACTION_SET_ALLOC.
+ */
+/* enum: A counter ID that is guaranteed never to represent a real counter */
+#define MC_CMD_MAE_COUNTER_LIST_ALLOC_OUT_COUNTER_LIST_ID_NULL 0xffffffff
+
+#endif /* MCDI_PCOL_MAE_H */
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 4cde54cf77b9..7ef823d7a89a 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -478,6 +478,8 @@ enum efx_sync_events_state {
* @n_rx_xdp_bad_drops: Count of RX packets dropped due to XDP errors
* @n_rx_xdp_tx: Count of RX packets retransmitted due to XDP
* @n_rx_xdp_redirect: Count of RX packets redirected to a different NIC by XDP
+ * @n_rx_mport_bad: Count of RX packets dropped because their ingress mport was
+ * not recognised
* @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
* __efx_rx_packet(), or zero if there is none
* @rx_pkt_index: Ring index of first buffer for next packet to be delivered
@@ -540,6 +542,7 @@ struct efx_channel {
unsigned int n_rx_xdp_bad_drops;
unsigned int n_rx_xdp_tx;
unsigned int n_rx_xdp_redirect;
+ unsigned int n_rx_mport_bad;
unsigned int rx_pkt_n_frags;
unsigned int rx_pkt_index;
@@ -975,6 +978,7 @@ enum efx_xdp_tx_queues_mode {
* @xdp_rxq_info_failed: Have any of the rx queues failed to initialise their
* xdp_rxq_info structures?
* @netdev_notifier: Netdevice notifier.
+ * @tc: state for TC offload (EF100).
* @mem_bar: The BAR that is mapped into membase.
* @reg_base: Offset from the start of the bar to the function control window.
* @monitor_work: Hardware monitor workitem
@@ -1158,6 +1162,7 @@ struct efx_nic {
bool xdp_rxq_info_failed;
struct notifier_block netdev_notifier;
+ struct efx_tc_state *tc;
unsigned int mem_bar;
u32 reg_base;
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 4625f85acab2..10ad0b93d283 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -1100,7 +1100,29 @@ static void efx_ptp_xmit_skb_queue(struct efx_nic *efx, struct sk_buff *skb)
tx_queue = efx_channel_get_tx_queue(ptp_data->channel, type);
if (tx_queue && tx_queue->timestamping) {
+ /* This code invokes normal driver TX code which is always
+ * protected from softirqs when called from generic TX code,
+ * which in turn disables preemption. Look at __dev_queue_xmit
+ * which uses rcu_read_lock_bh disabling preemption for RCU
+ * plus disabling softirqs. We do not need RCU reader
+ * protection here.
+ *
+ * Although it is theoretically safe for current PTP TX/RX code
+ * running without disabling softirqs, there are three good
+ * reasond for doing so:
+ *
+ * 1) The code invoked is mainly implemented for non-PTP
+ * packets and it is always executed with softirqs
+ * disabled.
+ * 2) This being a single PTP packet, better to not
+ * interrupt its processing by softirqs which can lead
+ * to high latencies.
+ * 3) netdev_xmit_more checks preemption is disabled and
+ * triggers a BUG_ON if not.
+ */
+ local_bh_disable();
efx_enqueue_skb(tx_queue, skb);
+ local_bh_enable();
} else {
WARN_ONCE(1, "PTP channel has no timestamped tx queue\n");
dev_kfree_skb_any(skb);
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index bd21d6ac778a..4826e6a7e4ce 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -793,7 +793,6 @@ int efx_probe_filters(struct efx_nic *efx)
int rc;
mutex_lock(&efx->mac_lock);
- down_write(&efx->filter_sem);
rc = efx->type->filter_table_probe(efx);
if (rc)
goto out_unlock;
@@ -830,7 +829,6 @@ int efx_probe_filters(struct efx_nic *efx)
}
#endif
out_unlock:
- up_write(&efx->filter_sem);
mutex_unlock(&efx->mac_lock);
return rc;
}
@@ -846,9 +844,7 @@ void efx_remove_filters(struct efx_nic *efx)
channel->rps_flow_id = NULL;
}
#endif
- down_write(&efx->filter_sem);
efx->type->filter_table_remove(efx);
- up_write(&efx->filter_sem);
}
#ifdef CONFIG_RFS_ACCEL
diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c
new file mode 100644
index 000000000000..0c0aeb91f500
--- /dev/null
+++ b/drivers/net/ethernet/sfc/tc.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2020-2022 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "tc.h"
+#include "mae.h"
+#include "ef100_rep.h"
+#include "efx.h"
+
+static void efx_tc_free_action_set(struct efx_nic *efx,
+ struct efx_tc_action_set *act, bool in_hw)
+{
+ /* Failure paths calling this on the 'running action' set in_hw=false,
+ * because if the alloc had succeeded we'd've put it in acts.list and
+ * not still have it in act.
+ */
+ if (in_hw) {
+ efx_mae_free_action_set(efx, act->fw_id);
+ /* in_hw is true iff we are on an acts.list; make sure to
+ * remove ourselves from that list before we are freed.
+ */
+ list_del(&act->list);
+ }
+ kfree(act);
+}
+
+static void efx_tc_free_action_set_list(struct efx_nic *efx,
+ struct efx_tc_action_set_list *acts,
+ bool in_hw)
+{
+ struct efx_tc_action_set *act, *next;
+
+ /* Failure paths set in_hw=false, because usually the acts didn't get
+ * to efx_mae_alloc_action_set_list(); if they did, the failure tree
+ * has a separate efx_mae_free_action_set_list() before calling us.
+ */
+ if (in_hw)
+ efx_mae_free_action_set_list(efx, acts);
+ /* Any act that's on the list will be in_hw even if the list isn't */
+ list_for_each_entry_safe(act, next, &acts->list, list)
+ efx_tc_free_action_set(efx, act, true);
+ /* Don't kfree, as acts is embedded inside a struct efx_tc_flow_rule */
+}
+
+static void efx_tc_delete_rule(struct efx_nic *efx, struct efx_tc_flow_rule *rule)
+{
+ efx_mae_delete_rule(efx, rule->fw_id);
+
+ /* Release entries in subsidiary tables */
+ efx_tc_free_action_set_list(efx, &rule->acts, true);
+ rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
+}
+
+static int efx_tc_configure_default_rule(struct efx_nic *efx, u32 ing_port,
+ u32 eg_port, struct efx_tc_flow_rule *rule)
+{
+ struct efx_tc_action_set_list *acts = &rule->acts;
+ struct efx_tc_match *match = &rule->match;
+ struct efx_tc_action_set *act;
+ int rc;
+
+ match->value.ingress_port = ing_port;
+ match->mask.ingress_port = ~0;
+ act = kzalloc(sizeof(*act), GFP_KERNEL);
+ if (!act)
+ return -ENOMEM;
+ act->deliver = 1;
+ act->dest_mport = eg_port;
+ rc = efx_mae_alloc_action_set(efx, act);
+ if (rc)
+ goto fail1;
+ EFX_WARN_ON_PARANOID(!list_empty(&acts->list));
+ list_add_tail(&act->list, &acts->list);
+ rc = efx_mae_alloc_action_set_list(efx, acts);
+ if (rc)
+ goto fail2;
+ rc = efx_mae_insert_rule(efx, match, EFX_TC_PRIO_DFLT,
+ acts->fw_id, &rule->fw_id);
+ if (rc)
+ goto fail3;
+ return 0;
+fail3:
+ efx_mae_free_action_set_list(efx, acts);
+fail2:
+ list_del(&act->list);
+ efx_mae_free_action_set(efx, act->fw_id);
+fail1:
+ kfree(act);
+ return rc;
+}
+
+static int efx_tc_configure_default_rule_pf(struct efx_nic *efx)
+{
+ struct efx_tc_flow_rule *rule = &efx->tc->dflt.pf;
+ u32 ing_port, eg_port;
+
+ efx_mae_mport_uplink(efx, &ing_port);
+ efx_mae_mport_wire(efx, &eg_port);
+ return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
+}
+
+static int efx_tc_configure_default_rule_wire(struct efx_nic *efx)
+{
+ struct efx_tc_flow_rule *rule = &efx->tc->dflt.wire;
+ u32 ing_port, eg_port;
+
+ efx_mae_mport_wire(efx, &ing_port);
+ efx_mae_mport_uplink(efx, &eg_port);
+ return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
+}
+
+int efx_tc_configure_default_rule_rep(struct efx_rep *efv)
+{
+ struct efx_tc_flow_rule *rule = &efv->dflt;
+ struct efx_nic *efx = efv->parent;
+ u32 ing_port, eg_port;
+
+ efx_mae_mport_mport(efx, efv->mport, &ing_port);
+ efx_mae_mport_mport(efx, efx->tc->reps_mport_id, &eg_port);
+ return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
+}
+
+void efx_tc_deconfigure_default_rule(struct efx_nic *efx,
+ struct efx_tc_flow_rule *rule)
+{
+ if (rule->fw_id != MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL)
+ efx_tc_delete_rule(efx, rule);
+ rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
+}
+
+static int efx_tc_configure_rep_mport(struct efx_nic *efx)
+{
+ u32 rep_mport_label;
+ int rc;
+
+ rc = efx_mae_allocate_mport(efx, &efx->tc->reps_mport_id, &rep_mport_label);
+ if (rc)
+ return rc;
+ pci_dbg(efx->pci_dev, "created rep mport 0x%08x (0x%04x)\n",
+ efx->tc->reps_mport_id, rep_mport_label);
+ /* Use mport *selector* as vport ID */
+ efx_mae_mport_mport(efx, efx->tc->reps_mport_id,
+ &efx->tc->reps_mport_vport_id);
+ return 0;
+}
+
+static void efx_tc_deconfigure_rep_mport(struct efx_nic *efx)
+{
+ efx_mae_free_mport(efx, efx->tc->reps_mport_id);
+ efx->tc->reps_mport_id = MAE_MPORT_SELECTOR_NULL;
+}
+
+int efx_tc_insert_rep_filters(struct efx_nic *efx)
+{
+ struct efx_filter_spec promisc, allmulti;
+ int rc;
+
+ if (efx->type->is_vf)
+ return 0;
+ if (!efx->tc)
+ return 0;
+ efx_filter_init_rx(&promisc, EFX_FILTER_PRI_REQUIRED, 0, 0);
+ efx_filter_set_uc_def(&promisc);
+ efx_filter_set_vport_id(&promisc, efx->tc->reps_mport_vport_id);
+ rc = efx_filter_insert_filter(efx, &promisc, false);
+ if (rc < 0)
+ return rc;
+ efx->tc->reps_filter_uc = rc;
+ efx_filter_init_rx(&allmulti, EFX_FILTER_PRI_REQUIRED, 0, 0);
+ efx_filter_set_mc_def(&allmulti);
+ efx_filter_set_vport_id(&allmulti, efx->tc->reps_mport_vport_id);
+ rc = efx_filter_insert_filter(efx, &allmulti, false);
+ if (rc < 0)
+ return rc;
+ efx->tc->reps_filter_mc = rc;
+ return 0;
+}
+
+void efx_tc_remove_rep_filters(struct efx_nic *efx)
+{
+ if (efx->type->is_vf)
+ return;
+ if (!efx->tc)
+ return;
+ if (efx->tc->reps_filter_mc >= 0)
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_mc);
+ efx->tc->reps_filter_mc = -1;
+ if (efx->tc->reps_filter_uc >= 0)
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_uc);
+ efx->tc->reps_filter_uc = -1;
+}
+
+int efx_init_tc(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = efx_tc_configure_default_rule_pf(efx);
+ if (rc)
+ return rc;
+ rc = efx_tc_configure_default_rule_wire(efx);
+ if (rc)
+ return rc;
+ return efx_tc_configure_rep_mport(efx);
+}
+
+void efx_fini_tc(struct efx_nic *efx)
+{
+ /* We can get called even if efx_init_struct_tc() failed */
+ if (!efx->tc)
+ return;
+ efx_tc_deconfigure_rep_mport(efx);
+ efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.pf);
+ efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.wire);
+}
+
+int efx_init_struct_tc(struct efx_nic *efx)
+{
+ if (efx->type->is_vf)
+ return 0;
+
+ efx->tc = kzalloc(sizeof(*efx->tc), GFP_KERNEL);
+ if (!efx->tc)
+ return -ENOMEM;
+
+ efx->tc->reps_filter_uc = -1;
+ efx->tc->reps_filter_mc = -1;
+ INIT_LIST_HEAD(&efx->tc->dflt.pf.acts.list);
+ efx->tc->dflt.pf.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
+ INIT_LIST_HEAD(&efx->tc->dflt.wire.acts.list);
+ efx->tc->dflt.wire.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
+ return 0;
+}
+
+void efx_fini_struct_tc(struct efx_nic *efx)
+{
+ if (!efx->tc)
+ return;
+
+ EFX_WARN_ON_PARANOID(efx->tc->dflt.pf.fw_id !=
+ MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL);
+ EFX_WARN_ON_PARANOID(efx->tc->dflt.wire.fw_id !=
+ MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL);
+ kfree(efx->tc);
+ efx->tc = NULL;
+}
diff --git a/drivers/net/ethernet/sfc/tc.h b/drivers/net/ethernet/sfc/tc.h
new file mode 100644
index 000000000000..309123c6b386
--- /dev/null
+++ b/drivers/net/ethernet/sfc/tc.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ * Copyright 2020-2022 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_TC_H
+#define EFX_TC_H
+#include "net_driver.h"
+
+struct efx_tc_action_set {
+ u16 deliver:1;
+ u32 dest_mport;
+ u32 fw_id; /* index of this entry in firmware actions table */
+ struct list_head list;
+};
+
+struct efx_tc_match_fields {
+ /* L1 */
+ u32 ingress_port;
+};
+
+struct efx_tc_match {
+ struct efx_tc_match_fields value;
+ struct efx_tc_match_fields mask;
+};
+
+struct efx_tc_action_set_list {
+ struct list_head list;
+ u32 fw_id;
+};
+
+struct efx_tc_flow_rule {
+ struct efx_tc_match match;
+ struct efx_tc_action_set_list acts;
+ u32 fw_id;
+};
+
+enum efx_tc_rule_prios {
+ EFX_TC_PRIO_DFLT, /* Default switch rule; one of efx_tc_default_rules */
+ EFX_TC_PRIO__NUM
+};
+
+/**
+ * struct efx_tc_state - control plane data for TC offload
+ *
+ * @reps_mport_id: MAE port allocated for representor RX
+ * @reps_filter_uc: VNIC filter for representor unicast RX (promisc)
+ * @reps_filter_mc: VNIC filter for representor multicast RX (allmulti)
+ * @reps_mport_vport_id: vport_id for representor RX filters
+ * @dflt: Match-action rules for default switching; at priority
+ * %EFX_TC_PRIO_DFLT. Named by *ingress* port
+ * @dflt.pf: rule for traffic ingressing from PF (egresses to wire)
+ * @dflt.wire: rule for traffic ingressing from wire (egresses to PF)
+ */
+struct efx_tc_state {
+ u32 reps_mport_id, reps_mport_vport_id;
+ s32 reps_filter_uc, reps_filter_mc;
+ struct {
+ struct efx_tc_flow_rule pf;
+ struct efx_tc_flow_rule wire;
+ } dflt;
+};
+
+struct efx_rep;
+
+int efx_tc_configure_default_rule_rep(struct efx_rep *efv);
+void efx_tc_deconfigure_default_rule(struct efx_nic *efx,
+ struct efx_tc_flow_rule *rule);
+
+int efx_tc_insert_rep_filters(struct efx_nic *efx);
+void efx_tc_remove_rep_filters(struct efx_nic *efx);
+
+int efx_init_tc(struct efx_nic *efx);
+void efx_fini_tc(struct efx_nic *efx);
+
+int efx_init_struct_tc(struct efx_nic *efx);
+void efx_fini_struct_tc(struct efx_nic *efx);
+
+#endif /* EFX_TC_H */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index ca8ab290013c..d42e1afb6521 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -688,18 +688,19 @@ static int mediatek_dwmac_probe(struct platform_device *pdev)
ret = mediatek_dwmac_clks_config(priv_plat, true);
if (ret)
- return ret;
+ goto err_remove_config_dt;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret) {
- stmmac_remove_config_dt(pdev, plat_dat);
+ if (ret)
goto err_drv_probe;
- }
return 0;
err_drv_probe:
mediatek_dwmac_clks_config(priv_plat, false);
+err_remove_config_dt:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
return ret;
}
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 2495a5719e1c..018d365f9deb 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -815,6 +815,7 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
fl4->saddr = info->key.u.ipv4.src;
fl4->fl4_dport = dport;
fl4->fl4_sport = sport;
+ fl4->flowi4_flags = info->key.flow_flags;
tos = info->key.tos;
if ((tos == 1) && !geneve->cfg.collect_md) {
diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h
index 3233d145fd87..495e85abe50b 100644
--- a/drivers/net/ipa/ipa_qmi_msg.h
+++ b/drivers/net/ipa/ipa_qmi_msg.h
@@ -214,7 +214,7 @@ struct ipa_init_modem_driver_req {
/* The response to a IPA_QMI_INIT_DRIVER request begins with a standard
* QMI response, but contains other information as well. Currently we
- * simply wait for the the INIT_DRIVER transaction to complete and
+ * simply wait for the INIT_DRIVER transaction to complete and
* ignore any other data that might be returned.
*/
struct ipa_init_modem_driver_rsp {
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index c881e1bf6f6e..f1683ce6b561 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -243,6 +243,7 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
#define DEFAULT_SEND_SCI true
#define DEFAULT_ENCRYPT false
#define DEFAULT_ENCODING_SA 0
+#define MACSEC_XPN_MAX_REPLAY_WINDOW (((1 << 30) - 1))
static bool send_sci(const struct macsec_secy *secy)
{
@@ -1697,7 +1698,7 @@ static bool validate_add_rxsa(struct nlattr **attrs)
return false;
if (attrs[MACSEC_SA_ATTR_PN] &&
- *(u64 *)nla_data(attrs[MACSEC_SA_ATTR_PN]) == 0)
+ nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
return false;
if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
@@ -1753,7 +1754,8 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
}
pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
- if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
+ if (tb_sa[MACSEC_SA_ATTR_PN] &&
+ nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n",
nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
rtnl_unlock();
@@ -1769,7 +1771,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n",
nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
- MACSEC_SA_ATTR_SALT);
+ MACSEC_SALT_LEN);
rtnl_unlock();
return -EINVAL;
}
@@ -1842,7 +1844,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
return 0;
cleanup:
- kfree(rx_sa);
+ macsec_rxsa_put(rx_sa);
rtnl_unlock();
return err;
}
@@ -1939,7 +1941,7 @@ static bool validate_add_txsa(struct nlattr **attrs)
if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
return false;
- if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
+ if (nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
return false;
if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
@@ -2011,7 +2013,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n",
nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
- MACSEC_SA_ATTR_SALT);
+ MACSEC_SALT_LEN);
rtnl_unlock();
return -EINVAL;
}
@@ -2085,7 +2087,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
cleanup:
secy->operational = was_operational;
- kfree(tx_sa);
+ macsec_txsa_put(tx_sa);
rtnl_unlock();
return err;
}
@@ -2293,7 +2295,7 @@ static bool validate_upd_sa(struct nlattr **attrs)
if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
return false;
- if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
+ if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
return false;
if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
@@ -3745,9 +3747,6 @@ static int macsec_changelink_common(struct net_device *dev,
secy->operational = tx_sa && tx_sa->active;
}
- if (data[IFLA_MACSEC_WINDOW])
- secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
-
if (data[IFLA_MACSEC_ENCRYPT])
tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]);
@@ -3793,6 +3792,16 @@ static int macsec_changelink_common(struct net_device *dev,
}
}
+ if (data[IFLA_MACSEC_WINDOW]) {
+ secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
+
+ /* IEEE 802.1AEbw-2013 10.7.8 - maximum replay window
+ * for XPN cipher suites */
+ if (secy->xpn &&
+ secy->replay_window > MACSEC_XPN_MAX_REPLAY_WINDOW)
+ return -EINVAL;
+ }
+
return 0;
}
@@ -3822,7 +3831,7 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
ret = macsec_changelink_common(dev, data);
if (ret)
- return ret;
+ goto cleanup;
/* If h/w offloading is available, propagate to the device */
if (macsec_is_offloaded(macsec)) {
diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
index a43820212932..50854265864d 100644
--- a/drivers/net/netdevsim/bpf.c
+++ b/drivers/net/netdevsim/bpf.c
@@ -351,10 +351,12 @@ nsim_map_alloc_elem(struct bpf_offloaded_map *offmap, unsigned int idx)
{
struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
- nmap->entry[idx].key = kmalloc(offmap->map.key_size, GFP_USER);
+ nmap->entry[idx].key = kmalloc(offmap->map.key_size,
+ GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
if (!nmap->entry[idx].key)
return -ENOMEM;
- nmap->entry[idx].value = kmalloc(offmap->map.value_size, GFP_USER);
+ nmap->entry[idx].value = kmalloc(offmap->map.value_size,
+ GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
if (!nmap->entry[idx].value) {
kfree(nmap->entry[idx].key);
nmap->entry[idx].key = NULL;
@@ -496,7 +498,7 @@ nsim_bpf_map_alloc(struct netdevsim *ns, struct bpf_offloaded_map *offmap)
if (offmap->map.map_flags)
return -EINVAL;
- nmap = kzalloc(sizeof(*nmap), GFP_USER);
+ nmap = kzalloc(sizeof(*nmap), GFP_KERNEL_ACCOUNT);
if (!nmap)
return -ENOMEM;
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index 5802e80e8fe1..e88f783c297e 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -948,18 +948,15 @@ static int nsim_dev_reload_down(struct devlink *devlink, bool netns_change,
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
- devl_lock(devlink);
if (nsim_dev->dont_allow_reload) {
/* For testing purposes, user set debugfs dont_allow_reload
* value to true. So forbid it.
*/
NL_SET_ERR_MSG_MOD(extack, "User forbid the reload for testing purposes");
- devl_unlock(devlink);
return -EOPNOTSUPP;
}
nsim_dev_reload_destroy(nsim_dev);
- devl_unlock(devlink);
return 0;
}
@@ -970,19 +967,16 @@ static int nsim_dev_reload_up(struct devlink *devlink, enum devlink_reload_actio
struct nsim_dev *nsim_dev = devlink_priv(devlink);
int ret;
- devl_lock(devlink);
if (nsim_dev->fail_reload) {
/* For testing purposes, user set debugfs fail_reload
* value to true. Fail right away.
*/
NL_SET_ERR_MSG_MOD(extack, "User setup the reload to fail for testing purposes");
- devl_unlock(devlink);
return -EINVAL;
}
*actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
ret = nsim_dev_reload_create(nsim_dev, extack);
- devl_unlock(devlink);
return ret;
}
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index ab0af1d2531f..70f88eae2a9e 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -986,7 +986,7 @@ static int xpcs_get_state_c37_sgmii(struct dw_xpcs *xpcs,
*/
ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_INTR_STS);
if (ret < 0)
- return false;
+ return ret;
if (ret & DW_VR_MII_C37_ANSGM_SP_LNKSTS) {
int speed_value;
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c
index ff22b6b1c686..36803d932dff 100644
--- a/drivers/net/sungem_phy.c
+++ b/drivers/net/sungem_phy.c
@@ -450,6 +450,7 @@ static int bcm5421_init(struct mii_phy* phy)
int can_low_power = 1;
if (np == NULL || of_get_property(np, "no-autolowpower", NULL))
can_low_power = 0;
+ of_node_put(np);
if (can_low_power) {
/* Enable automatic low-power */
sungem_phy_write(phy, 0x1c, 0x9002);
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 1e5c15363a2e..843893482abd 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -8,13 +8,13 @@
*
* Based on the work of
* Donald Becker
- *
+ *
* Old chipset support added by Simon Evans <spse@secret.org.uk> 2002
* - adds support for Belkin F5U011
*/
/*
- *
+ *
* Should you need to contact me, the author, you can do so either by
* e-mail - mail your message to <vojtech@suse.cz>, or by paper mail:
* Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
@@ -54,7 +54,7 @@ static const char driver_name[] = "catc";
/*
* Some defines.
- */
+ */
#define STATS_UPDATE (HZ) /* Time between stats updates */
#define TX_TIMEOUT (5*HZ) /* Max time the queue can be stopped */
@@ -332,7 +332,7 @@ static void catc_irq_done(struct urb *urb)
dev_err(&catc->usbdev->dev,
"submit(rx_urb) status %d\n", res);
}
- }
+ }
}
resubmit:
res = usb_submit_urb (urb, GFP_ATOMIC);
@@ -538,7 +538,7 @@ static int catc_ctrl_async(struct catc *catc, u8 dir, u8 request, u16 value,
unsigned long flags;
spin_lock_irqsave(&catc->ctrl_lock, flags);
-
+
q = catc->ctrl_queue + catc->ctrl_head;
q->dir = dir;
@@ -639,7 +639,7 @@ static void catc_set_multicast_list(struct net_device *netdev)
if (netdev->flags & IFF_PROMISC) {
memset(catc->multicast, 0xff, 64);
rx |= (!catc->is_f5u011) ? RxPromisc : AltRxPromisc;
- }
+ }
if (netdev->flags & IFF_ALLMULTI) {
memset(catc->multicast, 0xff, 64);
@@ -806,7 +806,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
catc->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
catc->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
catc->irq_urb = usb_alloc_urb(0, GFP_KERNEL);
- if ((!catc->ctrl_urb) || (!catc->tx_urb) ||
+ if ((!catc->ctrl_urb) || (!catc->tx_urb) ||
(!catc->rx_urb) || (!catc->irq_urb)) {
dev_err(&intf->dev, "No free urbs available.\n");
ret = -ENOMEM;
@@ -814,17 +814,17 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
}
/* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */
- if (le16_to_cpu(usbdev->descriptor.idVendor) == 0x0423 &&
+ if (le16_to_cpu(usbdev->descriptor.idVendor) == 0x0423 &&
le16_to_cpu(usbdev->descriptor.idProduct) == 0xa &&
le16_to_cpu(catc->usbdev->descriptor.bcdDevice) == 0x0130) {
dev_dbg(dev, "Testing for f5u011\n");
- catc->is_f5u011 = 1;
+ catc->is_f5u011 = 1;
atomic_set(&catc->recq_sz, 0);
pktsz = RX_PKT_SZ;
} else {
pktsz = RX_MAX_BURST * (PKT_SZ + 2);
}
-
+
usb_fill_control_urb(catc->ctrl_urb, usbdev, usb_sndctrlpipe(usbdev, 0),
NULL, NULL, 0, catc_ctrl_done, catc);
@@ -854,7 +854,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
*buf = 0x87654321;
catc_write_mem(catc, 0xfa80, buf, 4);
catc_read_mem(catc, 0x7a80, buf, 4);
-
+
switch (*buf) {
case 0x12345678:
catc_set_reg(catc, TxBufCount, 8);
@@ -873,32 +873,32 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
}
kfree(buf);
-
+
dev_dbg(dev, "Getting MAC from SEEROM.\n");
-
+
catc_get_mac(catc, macbuf);
eth_hw_addr_set(netdev, macbuf);
-
+
dev_dbg(dev, "Setting MAC into registers.\n");
-
+
for (i = 0; i < 6; i++)
catc_set_reg(catc, StationAddr0 - i, netdev->dev_addr[i]);
-
+
dev_dbg(dev, "Filling the multicast list.\n");
-
+
eth_broadcast_addr(broadcast);
catc_multicast(broadcast, catc->multicast);
catc_multicast(netdev->dev_addr, catc->multicast);
catc_write_mem(catc, 0xfa80, catc->multicast, 64);
-
+
dev_dbg(dev, "Clearing error counters.\n");
-
+
for (i = 0; i < 8; i++)
catc_set_reg(catc, EthStats + i, 0);
catc->last_stats = jiffies;
-
+
dev_dbg(dev, "Enabling.\n");
-
+
catc_set_reg(catc, MaxBurst, RX_MAX_BURST);
catc_set_reg(catc, OpModes, OpTxMerge | OpRxMerge | OpLenInclude | Op3MemWaits);
catc_set_reg(catc, LEDCtrl, LEDLink);
@@ -908,7 +908,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
catc_reset(catc);
catc_get_mac(catc, macbuf);
eth_hw_addr_set(netdev, macbuf);
-
+
dev_dbg(dev, "Setting RX Mode\n");
catc->rxmode[0] = RxEnable | RxPolarity | RxMultiCast;
catc->rxmode[1] = 0;
diff --git a/drivers/net/usb/cdc_subset.c b/drivers/net/usb/cdc_subset.c
index 32637df0f4cc..f4a44f05c6ab 100644
--- a/drivers/net/usb/cdc_subset.c
+++ b/drivers/net/usb/cdc_subset.c
@@ -120,7 +120,7 @@ static const struct driver_info an2720_info = {
#endif /* CONFIG_USB_AN2720 */
-
+
#ifdef CONFIG_USB_BELKIN
#define HAVE_HARDWARE
@@ -140,7 +140,7 @@ static const struct driver_info belkin_info = {
#endif /* CONFIG_USB_BELKIN */
-
+
#ifdef CONFIG_USB_EPSON2888
#define HAVE_HARDWARE
@@ -167,7 +167,7 @@ static const struct driver_info epson2888_info = {
#endif /* CONFIG_USB_EPSON2888 */
-
+
/*-------------------------------------------------------------------------
*
* info from Jonathan McDowell <noodles@earth.li>
@@ -181,7 +181,7 @@ static const struct driver_info kc2190_info = {
};
#endif /* CONFIG_USB_KC2190 */
-
+
#ifdef CONFIG_USB_ARMLINUX
#define HAVE_HARDWARE
@@ -222,7 +222,7 @@ static const struct driver_info blob_info = {
#endif /* CONFIG_USB_ARMLINUX */
-
+
/*-------------------------------------------------------------------------*/
#ifndef HAVE_HARDWARE
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 9b2bc1993ece..c9efb7df892e 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -221,7 +221,7 @@ struct kaweth_device
dma_addr_t rxbufferhandle;
__u8 *rx_buf;
-
+
struct sk_buff *tx_skb;
__u8 *firmware_buf;
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 17c9c63b8eeb..2c82fbcaab22 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -18,7 +18,7 @@
/*
- * Prolific PL-2301/PL-2302 driver ... http://www.prolific.com.tw/
+ * Prolific PL-2301/PL-2302 driver ... http://www.prolific.com.tw/
*
* The protocol and handshaking used here should be bug-compatible
* with the Linux 2.2 "plusb" driver, by Deti Fliegl.
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index e415465068ca..aaa89b4cfd50 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -381,7 +381,7 @@ insanity:
}
EXPORT_SYMBOL_GPL(usbnet_update_max_qlen);
-
+
/*-------------------------------------------------------------------------
*
* Network Device Driver (peer link to "Host Device", from USB host)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 356cf8dd4164..ec8e1b3108c3 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -242,9 +242,15 @@ struct virtnet_info {
/* Packet virtio header size */
u8 hdr_len;
- /* Work struct for refilling if we run low on memory. */
+ /* Work struct for delayed refilling if we run low on memory. */
struct delayed_work refill;
+ /* Is delayed refill enabled? */
+ bool refill_enabled;
+
+ /* The lock to synchronize the access to refill_enabled */
+ spinlock_t refill_lock;
+
/* Work struct for config space updates */
struct work_struct config_work;
@@ -348,6 +354,20 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
return p;
}
+static void enable_delayed_refill(struct virtnet_info *vi)
+{
+ spin_lock_bh(&vi->refill_lock);
+ vi->refill_enabled = true;
+ spin_unlock_bh(&vi->refill_lock);
+}
+
+static void disable_delayed_refill(struct virtnet_info *vi)
+{
+ spin_lock_bh(&vi->refill_lock);
+ vi->refill_enabled = false;
+ spin_unlock_bh(&vi->refill_lock);
+}
+
static void virtqueue_napi_schedule(struct napi_struct *napi,
struct virtqueue *vq)
{
@@ -1527,8 +1547,12 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
}
if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
- if (!try_fill_recv(vi, rq, GFP_ATOMIC))
- schedule_delayed_work(&vi->refill, 0);
+ if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
+ spin_lock(&vi->refill_lock);
+ if (vi->refill_enabled)
+ schedule_delayed_work(&vi->refill, 0);
+ spin_unlock(&vi->refill_lock);
+ }
}
u64_stats_update_begin(&rq->stats.syncp);
@@ -1651,6 +1675,8 @@ static int virtnet_open(struct net_device *dev)
struct virtnet_info *vi = netdev_priv(dev);
int i, err;
+ enable_delayed_refill(vi);
+
for (i = 0; i < vi->max_queue_pairs; i++) {
if (i < vi->curr_queue_pairs)
/* Make sure we have some buffers: if oom use wq. */
@@ -2033,6 +2059,8 @@ static int virtnet_close(struct net_device *dev)
struct virtnet_info *vi = netdev_priv(dev);
int i;
+ /* Make sure NAPI doesn't schedule refill work */
+ disable_delayed_refill(vi);
/* Make sure refill_work doesn't re-enable napi! */
cancel_delayed_work_sync(&vi->refill);
@@ -2792,6 +2820,8 @@ static int virtnet_restore_up(struct virtio_device *vdev)
virtio_device_ready(vdev);
+ enable_delayed_refill(vi);
+
if (netif_running(vi->dev)) {
err = virtnet_open(vi->dev);
if (err)
@@ -3535,6 +3565,7 @@ static int virtnet_probe(struct virtio_device *vdev)
vdev->priv = vi;
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
+ spin_lock_init(&vi->refill_lock);
/* If we can receive ANY GSO packets, we must allocate large ones. */
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index dd831adbc1d1..53b3b241e027 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2075,17 +2075,8 @@ vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
if (rxd_done < budget) {
- struct Vmxnet3_RxCompDesc *rcd;
-#ifdef __BIG_ENDIAN_BITFIELD
- struct Vmxnet3_RxCompDesc rxComp;
-#endif
napi_complete_done(napi, rxd_done);
vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
- /* after unmasking the interrupt, check if any descriptors were completed */
- vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
- &rxComp);
- if (rcd->gen == rq->comp_ring.gen && napi_reschedule(napi))
- vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
}
return rxd_done;
}
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 8b0710b576c2..90811ab851fd 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -2243,7 +2243,7 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device
struct vxlan_sock *sock4,
struct sk_buff *skb, int oif, u8 tos,
__be32 daddr, __be32 *saddr, __be16 dport, __be16 sport,
- struct dst_cache *dst_cache,
+ __u8 flow_flags, struct dst_cache *dst_cache,
const struct ip_tunnel_info *info)
{
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
@@ -2270,6 +2270,7 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device
fl4.saddr = *saddr;
fl4.fl4_dport = dport;
fl4.fl4_sport = sport;
+ fl4.flowi4_flags = flow_flags;
rt = ip_route_output_key(vxlan->net, &fl4);
if (!IS_ERR(rt)) {
@@ -2459,7 +2460,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
unsigned int pkt_len = skb->len;
__be16 src_port = 0, dst_port;
struct dst_entry *ndst = NULL;
- __u8 tos, ttl;
+ __u8 tos, ttl, flow_flags = 0;
int ifindex;
int err;
u32 flags = vxlan->cfg.flags;
@@ -2525,6 +2526,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
}
dst = &remote_ip;
dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
+ flow_flags = info->key.flow_flags;
vni = tunnel_id_to_key32(info->key.tun_id);
ifindex = 0;
dst_cache = &info->dst_cache;
@@ -2555,7 +2557,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
rt = vxlan_get_route(vxlan, dev, sock4, skb, ifindex, tos,
dst->sin.sin_addr.s_addr,
&local_ip.sin.sin_addr.s_addr,
- dst_port, src_port,
+ dst_port, src_port, flow_flags,
dst_cache, info);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
@@ -3061,7 +3063,8 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos,
info->key.u.ipv4.dst,
&info->key.u.ipv4.src, dport, sport,
- &info->dst_cache, info);
+ info->key.flow_flags, &info->dst_cache,
+ info);
if (IS_ERR(rt))
return PTR_ERR(rt);
ip_rt_put(rt);
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index d7d33d5cdfc5..c47414710138 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -140,8 +140,53 @@ ath11k_ahb_get_msi_irq_wcn6750(struct ath11k_base *ab, unsigned int vector)
return ab->pci.msi.irqs[vector];
}
+static inline u32
+ath11k_ahb_get_window_start_wcn6750(struct ath11k_base *ab, u32 offset)
+{
+ u32 window_start = 0;
+
+ /* If offset lies within DP register range, use 1st window */
+ if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK)
+ window_start = ATH11K_PCI_WINDOW_START;
+ /* If offset lies within CE register range, use 2nd window */
+ else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab)) <
+ ATH11K_PCI_WINDOW_RANGE_MASK)
+ window_start = 2 * ATH11K_PCI_WINDOW_START;
+
+ return window_start;
+}
+
+static void
+ath11k_ahb_window_write32_wcn6750(struct ath11k_base *ab, u32 offset, u32 value)
+{
+ u32 window_start;
+
+ /* WCN6750 uses static window based register access*/
+ window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset);
+
+ iowrite32(value, ab->mem + window_start +
+ (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+}
+
+static u32 ath11k_ahb_window_read32_wcn6750(struct ath11k_base *ab, u32 offset)
+{
+ u32 window_start;
+ u32 val;
+
+ /* WCN6750 uses static window based register access */
+ window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset);
+
+ val = ioread32(ab->mem + window_start +
+ (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+ return val;
+}
+
static const struct ath11k_pci_ops ath11k_ahb_pci_ops_wcn6750 = {
+ .wakeup = NULL,
+ .release = NULL,
.get_msi_irq = ath11k_ahb_get_msi_irq_wcn6750,
+ .window_write32 = ath11k_ahb_window_write32_wcn6750,
+ .window_read32 = ath11k_ahb_window_read32_wcn6750,
};
static inline u32 ath11k_ahb_read32(struct ath11k_base *ab, u32 offset)
@@ -971,11 +1016,16 @@ static int ath11k_ahb_probe(struct platform_device *pdev)
}
ab->hif.ops = hif_ops;
- ab->pci.ops = pci_ops;
ab->pdev = pdev;
ab->hw_rev = hw_rev;
platform_set_drvdata(pdev, ab);
+ ret = ath11k_pcic_register_pci_ops(ab, pci_ops);
+ if (ret) {
+ ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
+ goto err_core_free;
+ }
+
ret = ath11k_core_pre_init(ab);
if (ret)
goto err_core_free;
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index c8e0bc935838..c3e9e4f7bc24 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -54,9 +54,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 11,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq8074,
.svc_to_ce_map_len = 21,
- .rfkill_pin = 0,
- .rfkill_cfg = 0,
- .rfkill_on_level = 0,
.single_pdev_only = false,
.rxdma1_enable = true,
.num_rxmda_per_pdev = 1,
@@ -107,8 +104,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.fixed_mem_region = true,
.static_window_map = false,
.hybrid_bus_type = false,
- .dp_window_idx = 0,
- .ce_window_idx = 0,
.fixed_fw_mem = false,
.support_off_channel_tx = false,
},
@@ -133,9 +128,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 11,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq6018,
.svc_to_ce_map_len = 19,
- .rfkill_pin = 0,
- .rfkill_cfg = 0,
- .rfkill_on_level = 0,
.single_pdev_only = false,
.rxdma1_enable = true,
.num_rxmda_per_pdev = 1,
@@ -183,8 +175,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.fixed_mem_region = true,
.static_window_map = false,
.hybrid_bus_type = false,
- .dp_window_idx = 0,
- .ce_window_idx = 0,
.fixed_fw_mem = false,
.support_off_channel_tx = false,
},
@@ -209,9 +199,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 9,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
.svc_to_ce_map_len = 14,
- .rfkill_pin = 48,
- .rfkill_cfg = 0,
- .rfkill_on_level = 1,
.single_pdev_only = true,
.rxdma1_enable = false,
.num_rxmda_per_pdev = 2,
@@ -258,8 +245,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.fixed_mem_region = false,
.static_window_map = false,
.hybrid_bus_type = false,
- .dp_window_idx = 0,
- .ce_window_idx = 0,
.fixed_fw_mem = false,
.support_off_channel_tx = true,
},
@@ -284,9 +269,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 9,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qcn9074,
.svc_to_ce_map_len = 18,
- .rfkill_pin = 0,
- .rfkill_cfg = 0,
- .rfkill_on_level = 0,
.rxdma1_enable = true,
.num_rxmda_per_pdev = 1,
.rx_mac_buf_ring = false,
@@ -333,8 +315,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.fixed_mem_region = false,
.static_window_map = true,
.hybrid_bus_type = false,
- .dp_window_idx = 3,
- .ce_window_idx = 2,
.fixed_fw_mem = false,
.support_off_channel_tx = false,
},
@@ -359,9 +339,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 9,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
.svc_to_ce_map_len = 14,
- .rfkill_pin = 0,
- .rfkill_cfg = 0,
- .rfkill_on_level = 0,
.single_pdev_only = true,
.rxdma1_enable = false,
.num_rxmda_per_pdev = 2,
@@ -408,8 +385,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.fixed_mem_region = false,
.static_window_map = false,
.hybrid_bus_type = false,
- .dp_window_idx = 0,
- .ce_window_idx = 0,
.fixed_fw_mem = false,
.support_off_channel_tx = true,
},
@@ -434,9 +409,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 9,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
.svc_to_ce_map_len = 14,
- .rfkill_pin = 0,
- .rfkill_cfg = 0,
- .rfkill_on_level = 0,
.single_pdev_only = true,
.rxdma1_enable = false,
.num_rxmda_per_pdev = 2,
@@ -482,8 +454,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.fixed_mem_region = false,
.static_window_map = false,
.hybrid_bus_type = false,
- .dp_window_idx = 0,
- .ce_window_idx = 0,
.fixed_fw_mem = false,
.support_off_channel_tx = true,
},
@@ -508,9 +478,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 9,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
.svc_to_ce_map_len = 14,
- .rfkill_pin = 0,
- .rfkill_cfg = 0,
- .rfkill_on_level = 0,
.single_pdev_only = true,
.rxdma1_enable = false,
.num_rxmda_per_pdev = 1,
@@ -556,8 +523,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.fixed_mem_region = false,
.static_window_map = true,
.hybrid_bus_type = true,
- .dp_window_idx = 1,
- .ce_window_idx = 2,
.fixed_fw_mem = true,
.support_off_channel_tx = false,
},
@@ -1402,27 +1367,6 @@ static int ath11k_core_start_firmware(struct ath11k_base *ab,
return ret;
}
-static int ath11k_core_rfkill_config(struct ath11k_base *ab)
-{
- struct ath11k *ar;
- int ret = 0, i;
-
- if (!(ab->target_caps.sys_cap_info & WMI_SYS_CAP_INFO_RFKILL))
- return 0;
-
- for (i = 0; i < ab->num_radios; i++) {
- ar = ab->pdevs[i].ar;
-
- ret = ath11k_mac_rfkill_config(ar);
- if (ret && ret != -EOPNOTSUPP) {
- ath11k_warn(ab, "failed to configure rfkill: %d", ret);
- return ret;
- }
- }
-
- return ret;
-}
-
int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab)
{
int ret;
@@ -1475,13 +1419,6 @@ int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab)
goto err_core_stop;
}
ath11k_hif_irq_enable(ab);
-
- ret = ath11k_core_rfkill_config(ab);
- if (ret && ret != -EOPNOTSUPP) {
- ath11k_err(ab, "failed to config rfkill: %d\n", ret);
- goto err_core_stop;
- }
-
mutex_unlock(&ab->core_lock);
return 0;
@@ -1550,7 +1487,6 @@ void ath11k_core_halt(struct ath11k *ar)
cancel_delayed_work_sync(&ar->scan.timeout);
cancel_work_sync(&ar->regd_update_work);
cancel_work_sync(&ab->update_11d_work);
- cancel_work_sync(&ab->rfkill_work);
rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
synchronize_rcu();
@@ -1558,28 +1494,6 @@ void ath11k_core_halt(struct ath11k *ar)
idr_init(&ar->txmgmt_idr);
}
-static void ath11k_rfkill_work(struct work_struct *work)
-{
- struct ath11k_base *ab = container_of(work, struct ath11k_base, rfkill_work);
- struct ath11k *ar;
- bool rfkill_radio_on;
- int i;
-
- spin_lock_bh(&ab->base_lock);
- rfkill_radio_on = ab->rfkill_radio_on;
- spin_unlock_bh(&ab->base_lock);
-
- for (i = 0; i < ab->num_radios; i++) {
- ar = ab->pdevs[i].ar;
- if (!ar)
- continue;
-
- /* notify cfg80211 radio state change */
- ath11k_mac_rfkill_enable_radio(ar, rfkill_radio_on);
- wiphy_rfkill_set_hw_state(ar->hw->wiphy, !rfkill_radio_on);
- }
-}
-
static void ath11k_update_11d(struct work_struct *work)
{
struct ath11k_base *ab = container_of(work, struct ath11k_base, update_11d_work);
@@ -1891,7 +1805,6 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
init_waitqueue_head(&ab->qmi.cold_boot_waitq);
INIT_WORK(&ab->restart_work, ath11k_core_restart);
INIT_WORK(&ab->update_11d_work, ath11k_update_11d);
- INIT_WORK(&ab->rfkill_work, ath11k_rfkill_work);
INIT_WORK(&ab->reset_work, ath11k_core_reset);
timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0);
init_completion(&ab->htc_suspend);
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 2bd5eb9df4d4..afad8f55e433 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -929,10 +929,6 @@ struct ath11k_base {
struct ath11k_dbring_cap *db_caps;
u32 num_db_cap;
- struct work_struct rfkill_work;
-
- /* true means radio is on */
- bool rfkill_radio_on;
/* To synchronize 11d scan vdev id */
struct mutex vdev_id_11d_lock;
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index 77dc5c851c9b..bb5ac940e470 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -153,9 +153,6 @@ struct ath11k_hw_params {
u32 svc_to_ce_map_len;
bool single_pdev_only;
- u32 rfkill_pin;
- u32 rfkill_cfg;
- u32 rfkill_on_level;
bool rxdma1_enable;
int num_rxmda_per_pdev;
@@ -201,8 +198,6 @@ struct ath11k_hw_params {
bool fixed_mem_region;
bool static_window_map;
bool hybrid_bus_type;
- u8 dp_window_idx;
- u8 ce_window_idx;
bool fixed_fw_mem;
bool support_off_channel_tx;
};
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index d83d3c944594..7e91e347c9ff 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -5611,63 +5611,6 @@ static int ath11k_mac_mgmt_tx(struct ath11k *ar, struct sk_buff *skb,
return 0;
}
-int ath11k_mac_rfkill_config(struct ath11k *ar)
-{
- struct ath11k_base *ab = ar->ab;
- u32 param;
- int ret;
-
- if (ab->hw_params.rfkill_pin == 0)
- return -EOPNOTSUPP;
-
- ath11k_dbg(ab, ATH11K_DBG_MAC,
- "mac rfkill_pin %d rfkill_cfg %d rfkill_on_level %d",
- ab->hw_params.rfkill_pin, ab->hw_params.rfkill_cfg,
- ab->hw_params.rfkill_on_level);
-
- param = FIELD_PREP(WMI_RFKILL_CFG_RADIO_LEVEL,
- ab->hw_params.rfkill_on_level) |
- FIELD_PREP(WMI_RFKILL_CFG_GPIO_PIN_NUM,
- ab->hw_params.rfkill_pin) |
- FIELD_PREP(WMI_RFKILL_CFG_PIN_AS_GPIO,
- ab->hw_params.rfkill_cfg);
-
- ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_HW_RFKILL_CONFIG,
- param, ar->pdev->pdev_id);
- if (ret) {
- ath11k_warn(ab,
- "failed to set rfkill config 0x%x: %d\n",
- param, ret);
- return ret;
- }
-
- return 0;
-}
-
-int ath11k_mac_rfkill_enable_radio(struct ath11k *ar, bool enable)
-{
- enum wmi_rfkill_enable_radio param;
- int ret;
-
- if (enable)
- param = WMI_RFKILL_ENABLE_RADIO_ON;
- else
- param = WMI_RFKILL_ENABLE_RADIO_OFF;
-
- ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac %d rfkill enable %d",
- ar->pdev_idx, param);
-
- ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_RFKILL_ENABLE,
- param, ar->pdev->pdev_id);
- if (ret) {
- ath11k_warn(ar->ab, "failed to set rfkill enable param %d: %d\n",
- param, ret);
- return ret;
- }
-
- return 0;
-}
-
static void ath11k_mac_op_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
@@ -5922,7 +5865,6 @@ static void ath11k_mac_op_stop(struct ieee80211_hw *hw)
cancel_delayed_work_sync(&ar->scan.timeout);
cancel_work_sync(&ar->regd_update_work);
cancel_work_sync(&ar->ab->update_11d_work);
- cancel_work_sync(&ar->ab->rfkill_work);
if (ar->state_11d == ATH11K_11D_PREPARING) {
ar->state_11d = ATH11K_11D_IDLE;
diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h
index 57ebfc592b00..2a0d3afb0c99 100644
--- a/drivers/net/wireless/ath/ath11k/mac.h
+++ b/drivers/net/wireless/ath/ath11k/mac.h
@@ -148,8 +148,6 @@ u8 ath11k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
void __ath11k_mac_scan_finish(struct ath11k *ar);
void ath11k_mac_scan_finish(struct ath11k *ar);
-int ath11k_mac_rfkill_enable_radio(struct ath11k *ar, bool enable);
-int ath11k_mac_rfkill_config(struct ath11k *ar);
struct ath11k_vif *ath11k_mac_get_arvif(struct ath11k *ar, u32 vdev_id);
struct ath11k_vif *ath11k_mac_get_arvif_by_vdev_id(struct ath11k_base *ab,
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index 487a303b3077..5bd34a6273d9 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -50,6 +50,22 @@ static void ath11k_pci_bus_release(struct ath11k_base *ab)
mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
}
+static u32 ath11k_pci_get_window_start(struct ath11k_base *ab, u32 offset)
+{
+ if (!ab->hw_params.static_window_map)
+ return ATH11K_PCI_WINDOW_START;
+
+ if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK)
+ /* if offset lies within DP register range, use 3rd window */
+ return 3 * ATH11K_PCI_WINDOW_START;
+ else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab)) <
+ ATH11K_PCI_WINDOW_RANGE_MASK)
+ /* if offset lies within CE register range, use 2nd window */
+ return 2 * ATH11K_PCI_WINDOW_START;
+ else
+ return ATH11K_PCI_WINDOW_START;
+}
+
static inline void ath11k_pci_select_window(struct ath11k_pci *ab_pci, u32 offset)
{
struct ath11k_base *ab = ab_pci->ab;
@@ -70,26 +86,39 @@ static void
ath11k_pci_window_write32(struct ath11k_base *ab, u32 offset, u32 value)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
- u32 window_start = ATH11K_PCI_WINDOW_START;
+ u32 window_start;
- spin_lock_bh(&ab_pci->window_lock);
- ath11k_pci_select_window(ab_pci, offset);
- iowrite32(value, ab->mem + window_start +
- (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
- spin_unlock_bh(&ab_pci->window_lock);
+ window_start = ath11k_pci_get_window_start(ab, offset);
+
+ if (window_start == ATH11K_PCI_WINDOW_START) {
+ spin_lock_bh(&ab_pci->window_lock);
+ ath11k_pci_select_window(ab_pci, offset);
+ iowrite32(value, ab->mem + window_start +
+ (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+ spin_unlock_bh(&ab_pci->window_lock);
+ } else {
+ iowrite32(value, ab->mem + window_start +
+ (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+ }
}
static u32 ath11k_pci_window_read32(struct ath11k_base *ab, u32 offset)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
- u32 window_start = ATH11K_PCI_WINDOW_START;
- u32 val;
+ u32 window_start, val;
- spin_lock_bh(&ab_pci->window_lock);
- ath11k_pci_select_window(ab_pci, offset);
- val = ioread32(ab->mem + window_start +
- (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
- spin_unlock_bh(&ab_pci->window_lock);
+ window_start = ath11k_pci_get_window_start(ab, offset);
+
+ if (window_start == ATH11K_PCI_WINDOW_START) {
+ spin_lock_bh(&ab_pci->window_lock);
+ ath11k_pci_select_window(ab_pci, offset);
+ val = ioread32(ab->mem + window_start +
+ (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+ spin_unlock_bh(&ab_pci->window_lock);
+ } else {
+ val = ioread32(ab->mem + window_start +
+ (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+ }
return val;
}
@@ -110,6 +139,8 @@ static const struct ath11k_pci_ops ath11k_pci_ops_qca6390 = {
};
static const struct ath11k_pci_ops ath11k_pci_ops_qcn9074 = {
+ .wakeup = NULL,
+ .release = NULL,
.get_msi_irq = ath11k_pci_get_msi_irq,
.window_write32 = ath11k_pci_window_write32,
.window_read32 = ath11k_pci_window_read32,
@@ -697,6 +728,7 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
struct ath11k_base *ab;
struct ath11k_pci *ab_pci;
u32 soc_hw_version_major, soc_hw_version_minor, addr;
+ const struct ath11k_pci_ops *pci_ops;
int ret;
ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI);
@@ -754,10 +786,10 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
goto err_pci_free_region;
}
- ab->pci.ops = &ath11k_pci_ops_qca6390;
+ pci_ops = &ath11k_pci_ops_qca6390;
break;
case QCN9074_DEVICE_ID:
- ab->pci.ops = &ath11k_pci_ops_qcn9074;
+ pci_ops = &ath11k_pci_ops_qcn9074;
ab->hw_rev = ATH11K_HW_QCN9074_HW10;
break;
case WCN6855_DEVICE_ID:
@@ -787,7 +819,7 @@ unsupported_wcn6855_soc:
goto err_pci_free_region;
}
- ab->pci.ops = &ath11k_pci_ops_qca6390;
+ pci_ops = &ath11k_pci_ops_qca6390;
break;
default:
dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n",
@@ -796,6 +828,12 @@ unsupported_wcn6855_soc:
goto err_pci_free_region;
}
+ ret = ath11k_pcic_register_pci_ops(ab, pci_ops);
+ if (ret) {
+ ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
+ goto err_pci_free_region;
+ }
+
ret = ath11k_pcic_init_msi_config(ab);
if (ret) {
ath11k_err(ab, "failed to init msi config: %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c
index cf12b98c480d..1adf20ebef27 100644
--- a/drivers/net/wireless/ath/ath11k/pcic.c
+++ b/drivers/net/wireless/ath/ath11k/pcic.c
@@ -140,23 +140,8 @@ int ath11k_pcic_init_msi_config(struct ath11k_base *ab)
}
EXPORT_SYMBOL(ath11k_pcic_init_msi_config);
-static inline u32 ath11k_pcic_get_window_start(struct ath11k_base *ab,
- u32 offset)
-{
- u32 window_start = 0;
-
- if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK)
- window_start = ab->hw_params.dp_window_idx * ATH11K_PCI_WINDOW_START;
- else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab)) <
- ATH11K_PCI_WINDOW_RANGE_MASK)
- window_start = ab->hw_params.ce_window_idx * ATH11K_PCI_WINDOW_START;
-
- return window_start;
-}
-
void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value)
{
- u32 window_start;
int ret = 0;
/* for offset beyond BAR + 4K - 32, may
@@ -166,15 +151,10 @@ void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value)
offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->wakeup)
ret = ab->pci.ops->wakeup(ab);
- if (offset < ATH11K_PCI_WINDOW_START) {
+ if (offset < ATH11K_PCI_WINDOW_START)
iowrite32(value, ab->mem + offset);
- } else if (ab->hw_params.static_window_map) {
- window_start = ath11k_pcic_get_window_start(ab, offset);
- iowrite32(value, ab->mem + window_start +
- (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
- } else if (ab->pci.ops->window_write32) {
+ else
ab->pci.ops->window_write32(ab, offset, value);
- }
if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->release &&
@@ -185,9 +165,8 @@ EXPORT_SYMBOL(ath11k_pcic_write32);
u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset)
{
- u32 val = 0;
- u32 window_start;
int ret = 0;
+ u32 val;
/* for offset beyond BAR + 4K - 32, may
* need to wakeup the device to access.
@@ -196,15 +175,10 @@ u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset)
offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->wakeup)
ret = ab->pci.ops->wakeup(ab);
- if (offset < ATH11K_PCI_WINDOW_START) {
+ if (offset < ATH11K_PCI_WINDOW_START)
val = ioread32(ab->mem + offset);
- } else if (ab->hw_params.static_window_map) {
- window_start = ath11k_pcic_get_window_start(ab, offset);
- val = ioread32(ab->mem + window_start +
- (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
- } else if (ab->pci.ops->window_read32) {
+ else
val = ab->pci.ops->window_read32(ab, offset);
- }
if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->release &&
@@ -516,11 +490,6 @@ static irqreturn_t ath11k_pcic_ext_interrupt_handler(int irq, void *arg)
static int
ath11k_pcic_get_msi_irq(struct ath11k_base *ab, unsigned int vector)
{
- if (!ab->pci.ops->get_msi_irq) {
- WARN_ONCE(1, "get_msi_irq pci op not defined");
- return -EOPNOTSUPP;
- }
-
return ab->pci.ops->get_msi_irq(ab, vector);
}
@@ -746,3 +715,19 @@ int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
return 0;
}
EXPORT_SYMBOL(ath11k_pcic_map_service_to_pipe);
+
+int ath11k_pcic_register_pci_ops(struct ath11k_base *ab,
+ const struct ath11k_pci_ops *pci_ops)
+{
+ if (!pci_ops)
+ return 0;
+
+ /* Return error if mandatory pci_ops callbacks are missing */
+ if (!pci_ops->get_msi_irq || !pci_ops->window_write32 ||
+ !pci_ops->window_read32)
+ return -EINVAL;
+
+ ab->pci.ops = pci_ops;
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_pcic_register_pci_ops);
diff --git a/drivers/net/wireless/ath/ath11k/pcic.h b/drivers/net/wireless/ath/ath11k/pcic.h
index c53d86289a8e..0afbb34510db 100644
--- a/drivers/net/wireless/ath/ath11k/pcic.h
+++ b/drivers/net/wireless/ath/ath11k/pcic.h
@@ -43,4 +43,6 @@ int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
void ath11k_pcic_ce_irqs_enable(struct ath11k_base *ab);
void ath11k_pcic_ce_irq_disable_sync(struct ath11k_base *ab);
int ath11k_pcic_init_msi_config(struct ath11k_base *ab);
+int ath11k_pcic_register_pci_ops(struct ath11k_base *ab,
+ const struct ath11k_pci_ops *pci_ops);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 5d9437ea92cf..88ee4f9d19da 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -129,8 +129,6 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = {
= { .min_len = sizeof(struct wmi_peer_assoc_conf_event) },
[WMI_TAG_STATS_EVENT]
= { .min_len = sizeof(struct wmi_stats_event) },
- [WMI_TAG_RFKILL_EVENT] = {
- .min_len = sizeof(struct wmi_rfkill_state_change_ev) },
[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT]
= { .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
[WMI_TAG_HOST_SWFDA_EVENT] = {
@@ -533,8 +531,6 @@ static int ath11k_pull_service_ready_tlv(struct ath11k_base *ab,
cap->default_dbs_hw_mode_index = ev->default_dbs_hw_mode_index;
cap->num_msdu_desc = ev->num_msdu_desc;
- ath11k_dbg(ab, ATH11K_DBG_WMI, "wmi sys cap info 0x%x\n", cap->sys_cap_info);
-
return 0;
}
@@ -7566,40 +7562,6 @@ exit:
kfree(tb);
}
-static void ath11k_rfkill_state_change_event(struct ath11k_base *ab,
- struct sk_buff *skb)
-{
- const struct wmi_rfkill_state_change_ev *ev;
- const void **tb;
- int ret;
-
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
- if (IS_ERR(tb)) {
- ret = PTR_ERR(tb);
- ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
- return;
- }
-
- ev = tb[WMI_TAG_RFKILL_EVENT];
- if (!ev) {
- kfree(tb);
- return;
- }
-
- ath11k_dbg(ab, ATH11K_DBG_MAC,
- "wmi tlv rfkill state change gpio %d type %d radio_state %d\n",
- ev->gpio_pin_num,
- ev->int_type,
- ev->radio_state);
-
- spin_lock_bh(&ab->base_lock);
- ab->rfkill_radio_on = (ev->radio_state == WMI_RFKILL_RADIO_STATE_ON);
- spin_unlock_bh(&ab->base_lock);
-
- queue_work(ab->workqueue, &ab->rfkill_work);
- kfree(tb);
-}
-
static void
ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab,
struct sk_buff *skb)
@@ -7995,9 +7957,6 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
case WMI_11D_NEW_COUNTRY_EVENTID:
ath11k_reg_11d_new_cc_event(ab, skb);
break;
- case WMI_RFKILL_STATE_CHANGE_EVENTID:
- ath11k_rfkill_state_change_event(ab, skb);
- break;
case WMI_DIAG_EVENTID:
ath11k_wmi_diag_event(ab, skb);
break;
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
index b1fad4707dc6..4da248ffa318 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.h
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -5328,31 +5328,6 @@ struct target_resource_config {
u32 twt_ap_sta_count;
};
-enum wmi_sys_cap_info_flags {
- WMI_SYS_CAP_INFO_RXTX_LED = BIT(0),
- WMI_SYS_CAP_INFO_RFKILL = BIT(1),
-};
-
-#define WMI_RFKILL_CFG_GPIO_PIN_NUM GENMASK(5, 0)
-#define WMI_RFKILL_CFG_RADIO_LEVEL BIT(6)
-#define WMI_RFKILL_CFG_PIN_AS_GPIO GENMASK(10, 7)
-
-enum wmi_rfkill_enable_radio {
- WMI_RFKILL_ENABLE_RADIO_ON = 0,
- WMI_RFKILL_ENABLE_RADIO_OFF = 1,
-};
-
-enum wmi_rfkill_radio_state {
- WMI_RFKILL_RADIO_STATE_OFF = 1,
- WMI_RFKILL_RADIO_STATE_ON = 2,
-};
-
-struct wmi_rfkill_state_change_ev {
- u32 gpio_pin_num;
- u32 int_type;
- u32 radio_state;
-} __packed;
-
enum wmi_debug_log_param {
WMI_DEBUG_LOG_PARAM_LOG_LEVEL = 0x1,
WMI_DEBUG_LOG_PARAM_VDEV_ENABLE,
diff --git a/drivers/net/wireless/ath/wcn36xx/Makefile b/drivers/net/wireless/ath/wcn36xx/Makefile
index 27413703ad69..26bec795b372 100644
--- a/drivers/net/wireless/ath/wcn36xx/Makefile
+++ b/drivers/net/wireless/ath/wcn36xx/Makefile
@@ -5,6 +5,7 @@ wcn36xx-y += main.o \
txrx.o \
smd.o \
pmc.o \
- debug.o
+ debug.o \
+ firmware.o
wcn36xx-$(CONFIG_NL80211_TESTMODE) += testmode.o
diff --git a/drivers/net/wireless/ath/wcn36xx/debug.c b/drivers/net/wireless/ath/wcn36xx/debug.c
index 6af306ae41ad..58b3c0501bfd 100644
--- a/drivers/net/wireless/ath/wcn36xx/debug.c
+++ b/drivers/net/wireless/ath/wcn36xx/debug.c
@@ -21,6 +21,7 @@
#include "wcn36xx.h"
#include "debug.h"
#include "pmc.h"
+#include "firmware.h"
#ifdef CONFIG_WCN36XX_DEBUGFS
@@ -136,6 +137,42 @@ static const struct file_operations fops_wcn36xx_dump = {
.write = write_file_dump,
};
+static ssize_t read_file_firmware_feature_caps(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wcn36xx *wcn = file->private_data;
+ size_t len = 0, buf_len = 2048;
+ char *buf;
+ int i;
+ int ret;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&wcn->hal_mutex);
+ for (i = 0; i < MAX_FEATURE_SUPPORTED; i++) {
+ if (wcn36xx_firmware_get_feat_caps(wcn->fw_feat_caps, i)) {
+ len += scnprintf(buf + len, buf_len - len, "%s\n",
+ wcn36xx_firmware_get_cap_name(i));
+ }
+ if (len >= buf_len)
+ break;
+ }
+ mutex_unlock(&wcn->hal_mutex);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return ret;
+}
+
+static const struct file_operations fops_wcn36xx_firmware_feat_caps = {
+ .open = simple_open,
+ .read = read_file_firmware_feature_caps,
+};
+
#define ADD_FILE(name, mode, fop, priv_data) \
do { \
struct dentry *d; \
@@ -163,6 +200,8 @@ void wcn36xx_debugfs_init(struct wcn36xx *wcn)
ADD_FILE(bmps_switcher, 0600, &fops_wcn36xx_bmps, wcn);
ADD_FILE(dump, 0200, &fops_wcn36xx_dump, wcn);
+ ADD_FILE(firmware_feat_caps, 0200,
+ &fops_wcn36xx_firmware_feat_caps, wcn);
}
void wcn36xx_debugfs_exit(struct wcn36xx *wcn)
diff --git a/drivers/net/wireless/ath/wcn36xx/debug.h b/drivers/net/wireless/ath/wcn36xx/debug.h
index 46307aa562d3..7116d96e0543 100644
--- a/drivers/net/wireless/ath/wcn36xx/debug.h
+++ b/drivers/net/wireless/ath/wcn36xx/debug.h
@@ -31,6 +31,7 @@ struct wcn36xx_dfs_entry {
struct dentry *rootdir;
struct wcn36xx_dfs_file file_bmps_switcher;
struct wcn36xx_dfs_file file_dump;
+ struct wcn36xx_dfs_file file_firmware_feat_caps;
};
void wcn36xx_debugfs_init(struct wcn36xx *wcn);
diff --git a/drivers/net/wireless/ath/wcn36xx/firmware.c b/drivers/net/wireless/ath/wcn36xx/firmware.c
new file mode 100644
index 000000000000..4b7f439e4db5
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/firmware.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include "wcn36xx.h"
+#include "firmware.h"
+
+#define DEFINE(s)[s] = #s
+
+static const char * const wcn36xx_firmware_caps_names[] = {
+ DEFINE(MCC),
+ DEFINE(P2P),
+ DEFINE(DOT11AC),
+ DEFINE(SLM_SESSIONIZATION),
+ DEFINE(DOT11AC_OPMODE),
+ DEFINE(SAP32STA),
+ DEFINE(TDLS),
+ DEFINE(P2P_GO_NOA_DECOUPLE_INIT_SCAN),
+ DEFINE(WLANACTIVE_OFFLOAD),
+ DEFINE(BEACON_OFFLOAD),
+ DEFINE(SCAN_OFFLOAD),
+ DEFINE(ROAM_OFFLOAD),
+ DEFINE(BCN_MISS_OFFLOAD),
+ DEFINE(STA_POWERSAVE),
+ DEFINE(STA_ADVANCED_PWRSAVE),
+ DEFINE(AP_UAPSD),
+ DEFINE(AP_DFS),
+ DEFINE(BLOCKACK),
+ DEFINE(PHY_ERR),
+ DEFINE(BCN_FILTER),
+ DEFINE(RTT),
+ DEFINE(RATECTRL),
+ DEFINE(WOW),
+ DEFINE(WLAN_ROAM_SCAN_OFFLOAD),
+ DEFINE(SPECULATIVE_PS_POLL),
+ DEFINE(SCAN_SCH),
+ DEFINE(IBSS_HEARTBEAT_OFFLOAD),
+ DEFINE(WLAN_SCAN_OFFLOAD),
+ DEFINE(WLAN_PERIODIC_TX_PTRN),
+ DEFINE(ADVANCE_TDLS),
+ DEFINE(BATCH_SCAN),
+ DEFINE(FW_IN_TX_PATH),
+ DEFINE(EXTENDED_NSOFFLOAD_SLOT),
+ DEFINE(CH_SWITCH_V1),
+ DEFINE(HT40_OBSS_SCAN),
+ DEFINE(UPDATE_CHANNEL_LIST),
+ DEFINE(WLAN_MCADDR_FLT),
+ DEFINE(WLAN_CH144),
+ DEFINE(NAN),
+ DEFINE(TDLS_SCAN_COEXISTENCE),
+ DEFINE(LINK_LAYER_STATS_MEAS),
+ DEFINE(MU_MIMO),
+ DEFINE(EXTENDED_SCAN),
+ DEFINE(DYNAMIC_WMM_PS),
+ DEFINE(MAC_SPOOFED_SCAN),
+ DEFINE(BMU_ERROR_GENERIC_RECOVERY),
+ DEFINE(DISA),
+ DEFINE(FW_STATS),
+ DEFINE(WPS_PRBRSP_TMPL),
+ DEFINE(BCN_IE_FLT_DELTA),
+ DEFINE(TDLS_OFF_CHANNEL),
+ DEFINE(RTT3),
+ DEFINE(MGMT_FRAME_LOGGING),
+ DEFINE(ENHANCED_TXBD_COMPLETION),
+ DEFINE(LOGGING_ENHANCEMENT),
+ DEFINE(EXT_SCAN_ENHANCED),
+ DEFINE(MEMORY_DUMP_SUPPORTED),
+ DEFINE(PER_PKT_STATS_SUPPORTED),
+ DEFINE(EXT_LL_STAT),
+ DEFINE(WIFI_CONFIG),
+ DEFINE(ANTENNA_DIVERSITY_SELECTION),
+};
+
+#undef DEFINE
+
+const char *wcn36xx_firmware_get_cap_name(enum wcn36xx_firmware_feat_caps x)
+{
+ if (x >= ARRAY_SIZE(wcn36xx_firmware_caps_names))
+ return "UNKNOWN";
+ return wcn36xx_firmware_caps_names[x];
+}
+
+void wcn36xx_firmware_set_feat_caps(u32 *bitmap,
+ enum wcn36xx_firmware_feat_caps cap)
+{
+ int arr_idx, bit_idx;
+
+ if (cap < 0 || cap > 127) {
+ wcn36xx_warn("error cap idx %d\n", cap);
+ return;
+ }
+
+ arr_idx = cap / 32;
+ bit_idx = cap % 32;
+ bitmap[arr_idx] |= (1 << bit_idx);
+}
+
+int wcn36xx_firmware_get_feat_caps(u32 *bitmap,
+ enum wcn36xx_firmware_feat_caps cap)
+{
+ int arr_idx, bit_idx;
+
+ if (cap < 0 || cap > 127) {
+ wcn36xx_warn("error cap idx %d\n", cap);
+ return -EINVAL;
+ }
+
+ arr_idx = cap / 32;
+ bit_idx = cap % 32;
+
+ return (bitmap[arr_idx] & (1 << bit_idx)) ? 1 : 0;
+}
+
+void wcn36xx_firmware_clear_feat_caps(u32 *bitmap,
+ enum wcn36xx_firmware_feat_caps cap)
+{
+ int arr_idx, bit_idx;
+
+ if (cap < 0 || cap > 127) {
+ wcn36xx_warn("error cap idx %d\n", cap);
+ return;
+ }
+
+ arr_idx = cap / 32;
+ bit_idx = cap % 32;
+ bitmap[arr_idx] &= ~(1 << bit_idx);
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/firmware.h b/drivers/net/wireless/ath/wcn36xx/firmware.h
new file mode 100644
index 000000000000..f991cf959f82
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/firmware.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _FIRMWARE_H_
+#define _FIRMWARE_H_
+
+/* Capability bitmap exchange definitions and macros starts */
+
+enum wcn36xx_firmware_feat_caps {
+ MCC = 0,
+ P2P = 1,
+ DOT11AC = 2,
+ SLM_SESSIONIZATION = 3,
+ DOT11AC_OPMODE = 4,
+ SAP32STA = 5,
+ TDLS = 6,
+ P2P_GO_NOA_DECOUPLE_INIT_SCAN = 7,
+ WLANACTIVE_OFFLOAD = 8,
+ BEACON_OFFLOAD = 9,
+ SCAN_OFFLOAD = 10,
+ ROAM_OFFLOAD = 11,
+ BCN_MISS_OFFLOAD = 12,
+ STA_POWERSAVE = 13,
+ STA_ADVANCED_PWRSAVE = 14,
+ AP_UAPSD = 15,
+ AP_DFS = 16,
+ BLOCKACK = 17,
+ PHY_ERR = 18,
+ BCN_FILTER = 19,
+ RTT = 20,
+ RATECTRL = 21,
+ WOW = 22,
+ WLAN_ROAM_SCAN_OFFLOAD = 23,
+ SPECULATIVE_PS_POLL = 24,
+ SCAN_SCH = 25,
+ IBSS_HEARTBEAT_OFFLOAD = 26,
+ WLAN_SCAN_OFFLOAD = 27,
+ WLAN_PERIODIC_TX_PTRN = 28,
+ ADVANCE_TDLS = 29,
+ BATCH_SCAN = 30,
+ FW_IN_TX_PATH = 31,
+ EXTENDED_NSOFFLOAD_SLOT = 32,
+ CH_SWITCH_V1 = 33,
+ HT40_OBSS_SCAN = 34,
+ UPDATE_CHANNEL_LIST = 35,
+ WLAN_MCADDR_FLT = 36,
+ WLAN_CH144 = 37,
+ NAN = 38,
+ TDLS_SCAN_COEXISTENCE = 39,
+ LINK_LAYER_STATS_MEAS = 40,
+ MU_MIMO = 41,
+ EXTENDED_SCAN = 42,
+ DYNAMIC_WMM_PS = 43,
+ MAC_SPOOFED_SCAN = 44,
+ BMU_ERROR_GENERIC_RECOVERY = 45,
+ DISA = 46,
+ FW_STATS = 47,
+ WPS_PRBRSP_TMPL = 48,
+ BCN_IE_FLT_DELTA = 49,
+ TDLS_OFF_CHANNEL = 51,
+ RTT3 = 52,
+ MGMT_FRAME_LOGGING = 53,
+ ENHANCED_TXBD_COMPLETION = 54,
+ LOGGING_ENHANCEMENT = 55,
+ EXT_SCAN_ENHANCED = 56,
+ MEMORY_DUMP_SUPPORTED = 57,
+ PER_PKT_STATS_SUPPORTED = 58,
+ EXT_LL_STAT = 60,
+ WIFI_CONFIG = 61,
+ ANTENNA_DIVERSITY_SELECTION = 62,
+
+ MAX_FEATURE_SUPPORTED = 128,
+};
+
+void wcn36xx_firmware_set_feat_caps(u32 *bitmap,
+ enum wcn36xx_firmware_feat_caps cap);
+int wcn36xx_firmware_get_feat_caps(u32 *bitmap,
+ enum wcn36xx_firmware_feat_caps cap);
+void wcn36xx_firmware_clear_feat_caps(u32 *bitmap,
+ enum wcn36xx_firmware_feat_caps cap);
+
+const char *wcn36xx_firmware_get_cap_name(enum wcn36xx_firmware_feat_caps x);
+
+#endif /* _FIRMWARE_H_ */
+
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index a1afe1f85f0e..f1a43fd1d957 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -4758,74 +4758,6 @@ struct wcn36xx_hal_set_power_params_resp {
u32 status;
} __packed;
-/* Capability bitmap exchange definitions and macros starts */
-
-enum place_holder_in_cap_bitmap {
- MCC = 0,
- P2P = 1,
- DOT11AC = 2,
- SLM_SESSIONIZATION = 3,
- DOT11AC_OPMODE = 4,
- SAP32STA = 5,
- TDLS = 6,
- P2P_GO_NOA_DECOUPLE_INIT_SCAN = 7,
- WLANACTIVE_OFFLOAD = 8,
- BEACON_OFFLOAD = 9,
- SCAN_OFFLOAD = 10,
- ROAM_OFFLOAD = 11,
- BCN_MISS_OFFLOAD = 12,
- STA_POWERSAVE = 13,
- STA_ADVANCED_PWRSAVE = 14,
- AP_UAPSD = 15,
- AP_DFS = 16,
- BLOCKACK = 17,
- PHY_ERR = 18,
- BCN_FILTER = 19,
- RTT = 20,
- RATECTRL = 21,
- WOW = 22,
- WLAN_ROAM_SCAN_OFFLOAD = 23,
- SPECULATIVE_PS_POLL = 24,
- SCAN_SCH = 25,
- IBSS_HEARTBEAT_OFFLOAD = 26,
- WLAN_SCAN_OFFLOAD = 27,
- WLAN_PERIODIC_TX_PTRN = 28,
- ADVANCE_TDLS = 29,
- BATCH_SCAN = 30,
- FW_IN_TX_PATH = 31,
- EXTENDED_NSOFFLOAD_SLOT = 32,
- CH_SWITCH_V1 = 33,
- HT40_OBSS_SCAN = 34,
- UPDATE_CHANNEL_LIST = 35,
- WLAN_MCADDR_FLT = 36,
- WLAN_CH144 = 37,
- NAN = 38,
- TDLS_SCAN_COEXISTENCE = 39,
- LINK_LAYER_STATS_MEAS = 40,
- MU_MIMO = 41,
- EXTENDED_SCAN = 42,
- DYNAMIC_WMM_PS = 43,
- MAC_SPOOFED_SCAN = 44,
- BMU_ERROR_GENERIC_RECOVERY = 45,
- DISA = 46,
- FW_STATS = 47,
- WPS_PRBRSP_TMPL = 48,
- BCN_IE_FLT_DELTA = 49,
- TDLS_OFF_CHANNEL = 51,
- RTT3 = 52,
- MGMT_FRAME_LOGGING = 53,
- ENHANCED_TXBD_COMPLETION = 54,
- LOGGING_ENHANCEMENT = 55,
- EXT_SCAN_ENHANCED = 56,
- MEMORY_DUMP_SUPPORTED = 57,
- PER_PKT_STATS_SUPPORTED = 58,
- EXT_LL_STAT = 60,
- WIFI_CONFIG = 61,
- ANTENNA_DIVERSITY_SELECTION = 62,
-
- MAX_FEATURE_SUPPORTED = 128,
-};
-
#define WCN36XX_HAL_CAPS_SIZE 4
struct wcn36xx_hal_feat_caps_msg {
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index dc59cafd29e3..6b8d2889d73f 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -28,6 +28,7 @@
#include <net/ipv6.h>
#include "wcn36xx.h"
#include "testmode.h"
+#include "firmware.h"
unsigned int wcn36xx_dbg_mask;
module_param_named(debug_mask, wcn36xx_dbg_mask, uint, 0644);
@@ -192,88 +193,15 @@ static inline u8 get_sta_index(struct ieee80211_vif *vif,
sta_priv->sta_index;
}
-#define DEFINE(s) [s] = #s
-
-static const char * const wcn36xx_caps_names[] = {
- DEFINE(MCC),
- DEFINE(P2P),
- DEFINE(DOT11AC),
- DEFINE(SLM_SESSIONIZATION),
- DEFINE(DOT11AC_OPMODE),
- DEFINE(SAP32STA),
- DEFINE(TDLS),
- DEFINE(P2P_GO_NOA_DECOUPLE_INIT_SCAN),
- DEFINE(WLANACTIVE_OFFLOAD),
- DEFINE(BEACON_OFFLOAD),
- DEFINE(SCAN_OFFLOAD),
- DEFINE(ROAM_OFFLOAD),
- DEFINE(BCN_MISS_OFFLOAD),
- DEFINE(STA_POWERSAVE),
- DEFINE(STA_ADVANCED_PWRSAVE),
- DEFINE(AP_UAPSD),
- DEFINE(AP_DFS),
- DEFINE(BLOCKACK),
- DEFINE(PHY_ERR),
- DEFINE(BCN_FILTER),
- DEFINE(RTT),
- DEFINE(RATECTRL),
- DEFINE(WOW),
- DEFINE(WLAN_ROAM_SCAN_OFFLOAD),
- DEFINE(SPECULATIVE_PS_POLL),
- DEFINE(SCAN_SCH),
- DEFINE(IBSS_HEARTBEAT_OFFLOAD),
- DEFINE(WLAN_SCAN_OFFLOAD),
- DEFINE(WLAN_PERIODIC_TX_PTRN),
- DEFINE(ADVANCE_TDLS),
- DEFINE(BATCH_SCAN),
- DEFINE(FW_IN_TX_PATH),
- DEFINE(EXTENDED_NSOFFLOAD_SLOT),
- DEFINE(CH_SWITCH_V1),
- DEFINE(HT40_OBSS_SCAN),
- DEFINE(UPDATE_CHANNEL_LIST),
- DEFINE(WLAN_MCADDR_FLT),
- DEFINE(WLAN_CH144),
- DEFINE(NAN),
- DEFINE(TDLS_SCAN_COEXISTENCE),
- DEFINE(LINK_LAYER_STATS_MEAS),
- DEFINE(MU_MIMO),
- DEFINE(EXTENDED_SCAN),
- DEFINE(DYNAMIC_WMM_PS),
- DEFINE(MAC_SPOOFED_SCAN),
- DEFINE(BMU_ERROR_GENERIC_RECOVERY),
- DEFINE(DISA),
- DEFINE(FW_STATS),
- DEFINE(WPS_PRBRSP_TMPL),
- DEFINE(BCN_IE_FLT_DELTA),
- DEFINE(TDLS_OFF_CHANNEL),
- DEFINE(RTT3),
- DEFINE(MGMT_FRAME_LOGGING),
- DEFINE(ENHANCED_TXBD_COMPLETION),
- DEFINE(LOGGING_ENHANCEMENT),
- DEFINE(EXT_SCAN_ENHANCED),
- DEFINE(MEMORY_DUMP_SUPPORTED),
- DEFINE(PER_PKT_STATS_SUPPORTED),
- DEFINE(EXT_LL_STAT),
- DEFINE(WIFI_CONFIG),
- DEFINE(ANTENNA_DIVERSITY_SELECTION),
-};
-
-#undef DEFINE
-
-static const char *wcn36xx_get_cap_name(enum place_holder_in_cap_bitmap x)
-{
- if (x >= ARRAY_SIZE(wcn36xx_caps_names))
- return "UNKNOWN";
- return wcn36xx_caps_names[x];
-}
-
static void wcn36xx_feat_caps_info(struct wcn36xx *wcn)
{
int i;
for (i = 0; i < MAX_FEATURE_SUPPORTED; i++) {
- if (get_feat_caps(wcn->fw_feat_caps, i))
- wcn36xx_dbg(WCN36XX_DBG_MAC, "FW Cap %s\n", wcn36xx_get_cap_name(i));
+ if (wcn36xx_firmware_get_feat_caps(wcn->fw_feat_caps, i)) {
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "FW Cap %s\n",
+ wcn36xx_firmware_get_cap_name(i));
+ }
}
}
@@ -705,7 +633,7 @@ static int wcn36xx_hw_scan(struct ieee80211_hw *hw,
{
struct wcn36xx *wcn = hw->priv;
- if (!get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) {
+ if (!wcn36xx_firmware_get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) {
/* fallback to mac80211 software scan */
return 1;
}
@@ -743,7 +671,7 @@ static void wcn36xx_cancel_hw_scan(struct ieee80211_hw *hw,
wcn->scan_aborted = true;
mutex_unlock(&wcn->scan_lock);
- if (get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) {
+ if (wcn36xx_firmware_get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) {
/* ieee80211_scan_completed will be called on FW scan
* indication */
wcn36xx_smd_stop_hw_scan(wcn);
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 46ab21824d63..566f0b9c1584 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -22,6 +22,7 @@
#include <linux/bitops.h>
#include <linux/rpmsg.h>
#include "smd.h"
+#include "firmware.h"
struct wcn36xx_cfg_val {
u32 cfg_id;
@@ -295,7 +296,7 @@ static void wcn36xx_smd_set_sta_vht_params(struct wcn36xx *wcn,
sta_params->vht_capable = sta->deflink.vht_cap.vht_supported;
sta_params->vht_ldpc_enabled =
is_cap_supported(caps, IEEE80211_VHT_CAP_RXLDPC);
- if (get_feat_caps(wcn->fw_feat_caps, MU_MIMO)) {
+ if (wcn36xx_firmware_get_feat_caps(wcn->fw_feat_caps, MU_MIMO)) {
sta_params->vht_tx_mu_beamformee_capable =
is_cap_supported(caps, IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE);
if (sta_params->vht_tx_mu_beamformee_capable)
@@ -2431,49 +2432,6 @@ out:
return ret;
}
-void set_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
-{
- int arr_idx, bit_idx;
-
- if (cap < 0 || cap > 127) {
- wcn36xx_warn("error cap idx %d\n", cap);
- return;
- }
-
- arr_idx = cap / 32;
- bit_idx = cap % 32;
- bitmap[arr_idx] |= (1 << bit_idx);
-}
-
-int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
-{
- int arr_idx, bit_idx;
-
- if (cap < 0 || cap > 127) {
- wcn36xx_warn("error cap idx %d\n", cap);
- return -EINVAL;
- }
-
- arr_idx = cap / 32;
- bit_idx = cap % 32;
-
- return (bitmap[arr_idx] & (1 << bit_idx)) ? 1 : 0;
-}
-
-void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
-{
- int arr_idx, bit_idx;
-
- if (cap < 0 || cap > 127) {
- wcn36xx_warn("error cap idx %d\n", cap);
- return;
- }
-
- arr_idx = cap / 32;
- bit_idx = cap % 32;
- bitmap[arr_idx] &= ~(1 << bit_idx);
-}
-
int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
{
struct wcn36xx_hal_feat_caps_msg msg_body, *rsp;
@@ -2482,11 +2440,12 @@ int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ);
- set_feat_caps(msg_body.feat_caps, STA_POWERSAVE);
+ wcn36xx_firmware_set_feat_caps(msg_body.feat_caps, STA_POWERSAVE);
if (wcn->rf_id == RF_IRIS_WCN3680) {
- set_feat_caps(msg_body.feat_caps, DOT11AC);
- set_feat_caps(msg_body.feat_caps, WLAN_CH144);
- set_feat_caps(msg_body.feat_caps, ANTENNA_DIVERSITY_SELECTION);
+ wcn36xx_firmware_set_feat_caps(msg_body.feat_caps, DOT11AC);
+ wcn36xx_firmware_set_feat_caps(msg_body.feat_caps, WLAN_CH144);
+ wcn36xx_firmware_set_feat_caps(msg_body.feat_caps,
+ ANTENNA_DIVERSITY_SELECTION);
}
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
@@ -3300,7 +3259,7 @@ int wcn36xx_smd_add_beacon_filter(struct wcn36xx *wcn,
size_t payload_size;
int ret;
- if (!get_feat_caps(wcn->fw_feat_caps, BCN_FILTER))
+ if (!wcn36xx_firmware_get_feat_caps(wcn->fw_feat_caps, BCN_FILTER))
return -EOPNOTSUPP;
mutex_lock(&wcn->hal_mutex);
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
index 3fd598ac2a27..cf15cde2a364 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.h
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -125,9 +125,6 @@ int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2,
u32 arg3, u32 arg4, u32 arg5);
int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn);
-void set_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
-int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
-void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index fe84362718de..04d1aa0e2d35 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -1010,7 +1010,7 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf,
void *cmd;
int cmdlen = len - sizeof(struct wmi_cmd_hdr);
u16 cmdid;
- int rc, rc1;
+ int rc1;
if (cmdlen < 0 || *ppos != 0)
return -EINVAL;
@@ -1027,7 +1027,7 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf,
wil_info(wil, "0x%04x[%d] -> %d\n", cmdid, cmdlen, rc1);
- return rc;
+ return len;
}
static const struct file_operations fops_wmi = {
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index 008ee1f98af4..b2539a916fd0 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -105,7 +105,7 @@ int b43_modparam_verbose = B43_VERBOSITY_DEFAULT;
module_param_named(verbose, b43_modparam_verbose, int, 0644);
MODULE_PARM_DESC(verbose, "Log message verbosity: 0=error, 1=warn, 2=info(default), 3=debug");
-static int b43_modparam_pio = 0;
+static int b43_modparam_pio;
module_param_named(pio, b43_modparam_pio, int, 0644);
MODULE_PARM_DESC(pio, "Use PIO accesses by default: 0=DMA, 1=PIO");
diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
index cbc21c7c3138..4022c544aefe 100644
--- a/drivers/net/wireless/broadcom/b43legacy/main.c
+++ b/drivers/net/wireless/broadcom/b43legacy/main.c
@@ -2944,7 +2944,7 @@ static void b43legacy_wireless_core_stop(struct b43legacy_wldev *dev)
dev_kfree_skb(skb_dequeue(&wl->tx_queue[queue_num]));
}
-b43legacy_mac_suspend(dev);
+ b43legacy_mac_suspend(dev);
free_irq(dev->dev->irq, dev);
b43legacydbg(wl, "Wireless interface stopped\n");
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 9c598ea97499..d639bb8b51ae 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -784,9 +784,11 @@ void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
sdiodev->txglomsz = sdiodev->settings->bus.sdio.txglomsz;
}
-#ifdef CONFIG_PM_SLEEP
static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev)
{
+ if (!IS_ENABLED(CONFIG_PM_SLEEP))
+ return 0;
+
sdiodev->freezer = kzalloc(sizeof(*sdiodev->freezer), GFP_KERNEL);
if (!sdiodev->freezer)
return -ENOMEM;
@@ -802,6 +804,7 @@ static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
if (sdiodev->freezer) {
WARN_ON(atomic_read(&sdiodev->freezer->freezing));
kfree(sdiodev->freezer);
+ sdiodev->freezer = NULL;
}
}
@@ -833,7 +836,8 @@ static void brcmf_sdiod_freezer_off(struct brcmf_sdio_dev *sdiodev)
bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev)
{
- return atomic_read(&sdiodev->freezer->freezing);
+ return IS_ENABLED(CONFIG_PM_SLEEP) &&
+ atomic_read(&sdiodev->freezer->freezing);
}
void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev)
@@ -847,23 +851,15 @@ void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev)
void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev)
{
- atomic_inc(&sdiodev->freezer->thread_count);
+ if (IS_ENABLED(CONFIG_PM_SLEEP))
+ atomic_inc(&sdiodev->freezer->thread_count);
}
void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev)
{
- atomic_dec(&sdiodev->freezer->thread_count);
-}
-#else
-static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev)
-{
- return 0;
-}
-
-static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
-{
+ if (IS_ENABLED(CONFIG_PM_SLEEP))
+ atomic_dec(&sdiodev->freezer->thread_count);
}
-#endif /* CONFIG_PM_SLEEP */
int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
{
@@ -875,13 +871,9 @@ int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
brcmf_sdiod_freezer_detach(sdiodev);
- /* Disable Function 2 */
- sdio_claim_host(sdiodev->func2);
- sdio_disable_func(sdiodev->func2);
- sdio_release_host(sdiodev->func2);
-
- /* Disable Function 1 */
+ /* Disable functions 2 then 1. */
sdio_claim_host(sdiodev->func1);
+ sdio_disable_func(sdiodev->func2);
sdio_disable_func(sdiodev->func1);
sdio_release_host(sdiodev->func1);
@@ -911,7 +903,7 @@ int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
if (ret) {
brcmf_err("Failed to set F1 blocksize\n");
sdio_release_host(sdiodev->func1);
- goto out;
+ return ret;
}
switch (sdiodev->func2->device) {
case SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373:
@@ -933,7 +925,7 @@ int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
if (ret) {
brcmf_err("Failed to set F2 blocksize\n");
sdio_release_host(sdiodev->func1);
- goto out;
+ return ret;
} else {
brcmf_dbg(SDIO, "set F2 blocksize to %d\n", f2_blksz);
}
@@ -1136,7 +1128,6 @@ notsup:
brcmf_dbg(SDIO, "WOWL not supported\n");
}
-#ifdef CONFIG_PM_SLEEP
static int brcmf_ops_sdio_suspend(struct device *dev)
{
struct sdio_func *func;
@@ -1204,11 +1195,9 @@ static int brcmf_ops_sdio_resume(struct device *dev)
return ret;
}
-static const struct dev_pm_ops brcmf_sdio_pm_ops = {
- .suspend = brcmf_ops_sdio_suspend,
- .resume = brcmf_ops_sdio_resume,
-};
-#endif /* CONFIG_PM_SLEEP */
+static DEFINE_SIMPLE_DEV_PM_OPS(brcmf_sdio_pm_ops,
+ brcmf_ops_sdio_suspend,
+ brcmf_ops_sdio_resume);
static struct sdio_driver brcmf_sdmmc_driver = {
.probe = brcmf_ops_sdio_probe,
@@ -1217,9 +1206,7 @@ static struct sdio_driver brcmf_sdmmc_driver = {
.id_table = brcmf_sdmmc_ids,
.drv = {
.owner = THIS_MODULE,
-#ifdef CONFIG_PM_SLEEP
- .pm = &brcmf_sdio_pm_ops,
-#endif /* CONFIG_PM_SLEEP */
+ .pm = pm_sleep_ptr(&brcmf_sdio_pm_ops),
.coredump = brcmf_dev_coredump,
},
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 3ae6779fe153..db45da33adfd 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -7481,6 +7481,9 @@ int brcmf_cfg80211_wait_vif_event(struct brcmf_cfg80211_info *cfg,
static bool brmcf_use_iso3166_ccode_fallback(struct brcmf_pub *drvr)
{
+ if (drvr->settings->trivial_ccode_map)
+ return true;
+
switch (drvr->bus_if->chip) {
case BRCM_CC_4345_CHIP_ID:
case BRCM_CC_43602_CHIP_ID:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index fe01da9e620d..7485e784be2a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -190,6 +190,31 @@ done:
return err;
}
+int brcmf_c_set_cur_etheraddr(struct brcmf_if *ifp, const u8 *addr)
+{
+ s32 err;
+
+ err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", addr, ETH_ALEN);
+ if (err < 0)
+ bphy_err(ifp->drvr, "Setting cur_etheraddr failed, %d\n", err);
+
+ return err;
+}
+
+/* On some boards there is no eeprom to hold the nvram, in this case instead
+ * a board specific nvram is loaded from /lib/firmware. On most boards the
+ * macaddr setting in the /lib/firmware nvram file is ignored because the
+ * wifibt chip has a unique MAC programmed into the chip itself.
+ * But in some cases the actual MAC from the /lib/firmware nvram file gets
+ * used, leading to MAC conflicts.
+ * The MAC addresses in the troublesome nvram files seem to all come from
+ * the same nvram file template, so we only need to check for 1 known
+ * address to detect this.
+ */
+static const u8 brcmf_default_mac_address[ETH_ALEN] = {
+ 0x00, 0x90, 0x4c, 0xc5, 0x12, 0x38
+};
+
int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
{
struct brcmf_pub *drvr = ifp->drvr;
@@ -204,12 +229,9 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
if (is_valid_ether_addr(ifp->mac_addr)) {
/* set mac address */
- err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", ifp->mac_addr,
- ETH_ALEN);
- if (err < 0) {
- bphy_err(ifp->drvr, "Setting cur_etheraddr failed, %d\n", err);
+ err = brcmf_c_set_cur_etheraddr(ifp, ifp->mac_addr);
+ if (err < 0)
goto done;
- }
} else {
/* retrieve mac address */
err = brcmf_fil_iovar_data_get(ifp, "cur_etheraddr", ifp->mac_addr,
@@ -218,6 +240,15 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
bphy_err(drvr, "Retrieving cur_etheraddr failed, %d\n", err);
goto done;
}
+
+ if (ether_addr_equal_unaligned(ifp->mac_addr, brcmf_default_mac_address)) {
+ bphy_err(drvr, "Default MAC is used, replacing with random MAC to avoid conflicts\n");
+ eth_random_addr(ifp->mac_addr);
+ ifp->ndev->addr_assign_type = NET_ADDR_RANDOM;
+ err = brcmf_c_set_cur_etheraddr(ifp, ifp->mac_addr);
+ if (err < 0)
+ goto done;
+ }
}
memcpy(ifp->drvr->mac, ifp->mac_addr, sizeof(ifp->drvr->mac));
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
index 15accc88d5c0..6c5a22a32a96 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
@@ -38,6 +38,7 @@ extern struct brcmf_mp_global_t brcmf_mp_global;
* @fcmode: FWS flow control.
* @roamoff: Firmware roaming off?
* @ignore_probe_fail: Ignore probe failure.
+ * @trivial_ccode_map: Assume firmware uses ISO3166 country codes with rev 0
* @country_codes: If available, pointer to struct for translating country codes
* @bus: Bus specific platform data. Only SDIO at the mmoment.
*/
@@ -48,6 +49,7 @@ struct brcmf_mp_device {
bool roamoff;
bool iapp;
bool ignore_probe_fail;
+ bool trivial_ccode_map;
struct brcmfmac_pd_cc *country_codes;
const char *board_type;
unsigned char mac[ETH_ALEN];
@@ -65,6 +67,7 @@ void brcmf_release_module_param(struct brcmf_mp_device *module_param);
/* Sets dongle media info (drv_version, mac address). */
int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
+int brcmf_c_set_cur_etheraddr(struct brcmf_if *ifp, const u8 *addr);
#ifdef CONFIG_DMI
void brcmf_dmi_probe(struct brcmf_mp_device *settings, u32 chip, u32 chiprev);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 87aef211b35f..bd164a0821f9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -233,16 +233,12 @@ static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
{
struct brcmf_if *ifp = netdev_priv(ndev);
struct sockaddr *sa = (struct sockaddr *)addr;
- struct brcmf_pub *drvr = ifp->drvr;
int err;
brcmf_dbg(TRACE, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx);
- err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", sa->sa_data,
- ETH_ALEN);
- if (err < 0) {
- bphy_err(drvr, "Setting cur_etheraddr failed, %d\n", err);
- } else {
+ err = brcmf_c_set_cur_etheraddr(ifp, sa->sa_data);
+ if (err >= 0) {
brcmf_dbg(TRACE, "updated to %pM\n", sa->sa_data);
memcpy(ifp->mac_addr, sa->sa_data, ETH_ALEN);
eth_hw_addr_set(ifp->ndev, ifp->mac_addr);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
index 083ac58f466d..79388d49c256 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
@@ -24,6 +24,12 @@ static int brcmf_of_get_country_codes(struct device *dev,
count = of_property_count_strings(np, "brcm,ccode-map");
if (count < 0) {
+ /* If no explicit country code map is specified, check whether
+ * the trivial map should be used.
+ */
+ settings->trivial_ccode_map =
+ of_property_read_bool(np, "brcm,ccode-map-trivial");
+
/* The property is optional, so return success if it doesn't
* exist. Otherwise propagate the error code.
*/
@@ -72,7 +78,6 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
/* Set board-type to the first string of the machine compatible prop */
root = of_find_node_by_path("/");
if (root) {
- int i;
char *board_type;
const char *tmp;
@@ -84,10 +89,7 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
of_node_put(root);
return;
}
- for (i = 0; i < board_type[i]; i++) {
- if (board_type[i] == '/')
- board_type[i] = '-';
- }
+ strreplace(board_type, '/', '-');
settings->board_type = board_type;
of_node_put(root);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 2136c3c434ae..8968809399c7 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -4020,15 +4020,14 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
*/
brcmf_sdiod_sgtable_alloc(sdiodev);
-#ifdef CONFIG_PM_SLEEP
/* wowl can be supported when KEEP_POWER is true and (WAKE_SDIO_IRQ
* is true or when platform data OOB irq is true).
*/
- if ((sdio_get_host_pm_caps(sdiodev->func1) & MMC_PM_KEEP_POWER) &&
+ if (IS_ENABLED(CONFIG_PM_SLEEP) &&
+ (sdio_get_host_pm_caps(sdiodev->func1) & MMC_PM_KEEP_POWER) &&
((sdio_get_host_pm_caps(sdiodev->func1) & MMC_PM_WAKE_SDIO_IRQ) ||
(sdiodev->settings->bus.sdio.oob_irq_supported)))
sdiodev->bus_if->wowl_supported = true;
-#endif
if (brcmf_sdio_kso_init(bus)) {
brcmf_err("error enabling KSO\n");
@@ -4152,7 +4151,6 @@ int brcmf_sdio_get_fwname(struct device *dev, const char *ext, u8 *fw_name)
static int brcmf_sdio_bus_reset(struct device *dev)
{
- int ret = 0;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
@@ -4169,14 +4167,7 @@ static int brcmf_sdio_bus_reset(struct device *dev)
sdio_release_host(sdiodev->func1);
brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN);
-
- ret = brcmf_sdiod_probe(sdiodev);
- if (ret) {
- brcmf_err("Failed to probe after sdio device reset: ret %d\n",
- ret);
- }
-
- return ret;
+ return 0;
}
static const struct brcmf_bus_ops brcmf_sdio_bus_ops = {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
index 15d2c02fa3ec..47351ff458ca 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
@@ -346,26 +346,10 @@ int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, struct sdio_func *func);
void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev);
void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
enum brcmf_sdiod_state state);
-#ifdef CONFIG_PM_SLEEP
bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev);
void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev);
void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev);
void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev);
-#else
-static inline bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev)
-{
- return false;
-}
-static inline void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev)
-{
-}
-static inline void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev)
-{
-}
-static inline void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev)
-{
-}
-#endif /* CONFIG_PM_SLEEP */
int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev);
int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index 777c568f35a5..8c5b97fb1941 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -1105,10 +1105,10 @@ static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index,
IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status);
IWL_DEBUG_INFO(mvm, "\tBSSID: %pM\n", res->addr);
IWL_DEBUG_INFO(mvm, "\thost time: %llu\n", res->host_time);
- IWL_DEBUG_INFO(mvm, "\tburst index: %hhu\n", res->ftm.burst_index);
+ IWL_DEBUG_INFO(mvm, "\tburst index: %d\n", res->ftm.burst_index);
IWL_DEBUG_INFO(mvm, "\tsuccess num: %u\n", res->ftm.num_ftmr_successes);
IWL_DEBUG_INFO(mvm, "\trssi: %d\n", res->ftm.rssi_avg);
- IWL_DEBUG_INFO(mvm, "\trssi spread: %hhu\n", res->ftm.rssi_spread);
+ IWL_DEBUG_INFO(mvm, "\trssi spread: %d\n", res->ftm.rssi_spread);
IWL_DEBUG_INFO(mvm, "\trtt: %lld\n", res->ftm.rtt_avg);
IWL_DEBUG_INFO(mvm, "\trtt var: %llu\n", res->ftm.rtt_variance);
IWL_DEBUG_INFO(mvm, "\trtt spread: %llu\n", res->ftm.rtt_spread);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index b296f4965895..ff0d3b3df140 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1861,6 +1861,7 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
iwl_mvm_txq_from_mac80211(sta->txq[i]);
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+ list_del_init(&mvmtxq->list);
}
}
diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c
index 5d6dc1dd050d..32fdc4150b60 100644
--- a/drivers/net/wireless/marvell/libertas/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas/if_usb.c
@@ -287,6 +287,7 @@ static int if_usb_probe(struct usb_interface *intf,
return 0;
err_get_fw:
+ usb_put_dev(udev);
lbs_remove_card(priv);
err_add_card:
if_usb_reset_device(cardp);
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index d5fb29400bad..43bdcbc9ef39 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -3373,7 +3373,7 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
} else {
mwifiex_dbg(adapter, INFO,
"%s(): calling free_irq()\n", __func__);
- free_irq(card->dev->irq, &card->share_irq_ctx);
+ free_irq(card->dev->irq, &card->share_irq_ctx);
if (card->msi_enable)
pci_disable_msi(pdev);
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index 76004bda0c02..9b91580c4f92 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -1549,7 +1549,7 @@ done:
/*
* This function decode sdio aggreation pkt.
*
- * Based on the the data block size and pkt_len,
+ * Based on the data block size and pkt_len,
* skb data will be decoded to few packets.
*/
static void mwifiex_deaggr_sdio_pkt(struct mwifiex_adapter *adapter,
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index c1bf8998109c..4dc7e2e53b81 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -1880,7 +1880,7 @@ static inline void mwl8k_tx_count_packet(struct ieee80211_sta *sta, u8 tid)
* packets ever exceeds the ampdu_min_traffic threshold, we will allow
* an ampdu stream to be started.
*/
- if (jiffies - tx_stats->start_time > HZ) {
+ if (time_after(jiffies, (unsigned long)tx_stats->start_time + HZ)) {
tx_stats->pkts = 0;
tx_stats->start_time = 0;
} else
diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
index 5c2c7f1dbffd..3ac373d29d93 100644
--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
@@ -1312,12 +1312,11 @@ static int dump_station(struct wiphy *wiphy, struct net_device *dev,
if (idx != 0)
return -ENOENT;
- sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
-
ret = wilc_get_rssi(vif, &sinfo->signal);
if (ret)
return ret;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
memcpy(mac, vif->priv.associated_bss, ETH_ALEN);
return 0;
}
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c
index 021e0db80bd2..b89519ab9205 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.c
+++ b/drivers/net/wireless/microchip/wilc1000/hif.c
@@ -635,7 +635,7 @@ static inline void host_int_parse_assoc_resp_info(struct wilc_vif *vif,
conn_info->req_ies_len = 0;
}
-static inline void host_int_handle_disconnect(struct wilc_vif *vif)
+inline void wilc_handle_disconnect(struct wilc_vif *vif)
{
struct host_if_drv *hif_drv = vif->hif_drv;
@@ -647,8 +647,6 @@ static inline void host_int_handle_disconnect(struct wilc_vif *vif)
if (hif_drv->conn_info.conn_result)
hif_drv->conn_info.conn_result(CONN_DISCONN_EVENT_DISCONN_NOTIF,
0, hif_drv->conn_info.arg);
- else
- netdev_err(vif->ndev, "%s: conn_result is NULL\n", __func__);
eth_zero_addr(hif_drv->assoc_bssid);
@@ -684,7 +682,7 @@ static void handle_rcvd_gnrl_async_info(struct work_struct *work)
host_int_parse_assoc_resp_info(vif, mac_info->status);
} else if (mac_info->status == WILC_MAC_STATUS_DISCONNECTED) {
if (hif_drv->hif_state == HOST_IF_CONNECTED) {
- host_int_handle_disconnect(vif);
+ wilc_handle_disconnect(vif);
} else if (hif_drv->usr_scan_req.scan_result) {
del_timer(&hif_drv->scan_timer);
handle_scan_done(vif, SCAN_EVENT_ABORTED);
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.h b/drivers/net/wireless/microchip/wilc1000/hif.h
index d8dd94dcfe14..69ba1d469e9f 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.h
+++ b/drivers/net/wireless/microchip/wilc1000/hif.h
@@ -215,4 +215,5 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length);
void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
struct cfg80211_crypto_settings *crypto);
int wilc_set_default_mgmt_key_index(struct wilc_vif *vif, u8 index);
+inline void wilc_handle_disconnect(struct wilc_vif *vif);
#endif
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
index fcc4e61592ee..9b319a455b96 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.c
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
@@ -97,12 +97,12 @@ static struct net_device *get_if_handler(struct wilc *wilc, u8 *mac_header)
struct ieee80211_hdr *h = (struct ieee80211_hdr *)mac_header;
list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
- if (vif->mode == WILC_STATION_MODE)
+ if (vif->iftype == WILC_STATION_MODE)
if (ether_addr_equal_unaligned(h->addr2, vif->bssid)) {
ndev = vif->ndev;
goto out;
}
- if (vif->mode == WILC_AP_MODE)
+ if (vif->iftype == WILC_AP_MODE)
if (ether_addr_equal_unaligned(h->addr1, vif->bssid)) {
ndev = vif->ndev;
goto out;
@@ -122,7 +122,7 @@ void wilc_wlan_set_bssid(struct net_device *wilc_netdev, const u8 *bssid,
else
eth_zero_addr(vif->bssid);
- vif->mode = mode;
+ vif->iftype = mode;
}
int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc)
@@ -472,7 +472,7 @@ static int wlan_initialize_threads(struct net_device *dev)
"%s-tx", dev->name);
if (IS_ERR(wilc->txq_thread)) {
netdev_err(dev, "couldn't create TXQ thread\n");
- wilc->close = 0;
+ wilc->close = 1;
return PTR_ERR(wilc->txq_thread);
}
wait_for_completion(&wilc->txq_thread_started);
@@ -780,6 +780,7 @@ static int wilc_mac_close(struct net_device *ndev)
if (vif->ndev) {
netif_stop_queue(vif->ndev);
+ wilc_handle_disconnect(vif);
wilc_deinit_host_int(vif->ndev);
}
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.h b/drivers/net/wireless/microchip/wilc1000/netdev.h
index 822e65d00f53..43c085c74b7a 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.h
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.h
@@ -177,7 +177,6 @@ struct wilc_vif {
u8 bssid[ETH_ALEN];
struct host_if_drv *hif_drv;
struct net_device *ndev;
- u8 mode;
struct timer_list during_ip_timer;
struct timer_list periodic_rssi;
struct rf_info periodic_stat;
diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
index 7962c11cfe84..600cc57e9da2 100644
--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
+++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
@@ -26,6 +26,7 @@ static const struct sdio_device_id wilc_sdio_ids[] = {
struct wilc_sdio {
bool irq_gpio;
u32 block_size;
+ bool isinit;
int has_thrpt_enh3;
};
@@ -193,6 +194,13 @@ static int wilc_sdio_reset(struct wilc *wilc)
return 0;
}
+static bool wilc_sdio_is_init(struct wilc *wilc)
+{
+ struct wilc_sdio *sdio_priv = wilc->bus_data;
+
+ return sdio_priv->isinit;
+}
+
static int wilc_sdio_suspend(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
@@ -581,6 +589,9 @@ static int wilc_sdio_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
static int wilc_sdio_deinit(struct wilc *wilc)
{
+ struct wilc_sdio *sdio_priv = wilc->bus_data;
+
+ sdio_priv->isinit = false;
return 0;
}
@@ -700,6 +711,7 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume)
sdio_priv->has_thrpt_enh3);
}
+ sdio_priv->isinit = true;
return 0;
}
@@ -981,6 +993,7 @@ static const struct wilc_hif_func wilc_hif_sdio = {
.enable_interrupt = wilc_sdio_enable_interrupt,
.disable_interrupt = wilc_sdio_disable_interrupt,
.hif_reset = wilc_sdio_reset,
+ .hif_is_init = wilc_sdio_is_init,
};
static int wilc_sdio_resume(struct device *dev)
diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c
index 2ae8dd3411ac..b0fc5e68feec 100644
--- a/drivers/net/wireless/microchip/wilc1000/spi.c
+++ b/drivers/net/wireless/microchip/wilc1000/spi.c
@@ -1029,6 +1029,13 @@ static int wilc_spi_reset(struct wilc *wilc)
return result;
}
+static bool wilc_spi_is_init(struct wilc *wilc)
+{
+ struct wilc_spi *spi_priv = wilc->bus_data;
+
+ return spi_priv->isinit;
+}
+
static int wilc_spi_deinit(struct wilc *wilc)
{
struct wilc_spi *spi_priv = wilc->bus_data;
@@ -1250,4 +1257,5 @@ static const struct wilc_hif_func wilc_hif_spi = {
.hif_block_rx_ext = wilc_spi_read,
.hif_sync_ext = wilc_spi_sync_ext,
.hif_reset = wilc_spi_reset,
+ .hif_is_init = wilc_spi_is_init,
};
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c
index f3f504d12873..947d9a0a494e 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.c
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.c
@@ -1481,9 +1481,12 @@ int wilc_wlan_init(struct net_device *dev)
wilc->quit = 0;
- if (wilc->hif_func->hif_init(wilc, false)) {
- ret = -EIO;
- goto fail;
+ if (!wilc->hif_func->hif_is_init(wilc)) {
+ acquire_bus(wilc, WILC_BUS_ACQUIRE_ONLY);
+ ret = wilc->hif_func->hif_init(wilc, false);
+ release_bus(wilc, WILC_BUS_RELEASE_ONLY);
+ if (ret)
+ goto fail;
}
if (!wilc->tx_buffer)
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.h b/drivers/net/wireless/microchip/wilc1000/wlan.h
index b45e72789a0e..a72cd5cac81d 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.h
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.h
@@ -373,6 +373,7 @@ struct wilc_hif_func {
int (*enable_interrupt)(struct wilc *nic);
void (*disable_interrupt)(struct wilc *nic);
int (*hif_reset)(struct wilc *wilc);
+ bool (*hif_is_init)(struct wilc *wilc);
};
#define WILC_MAX_CFG_FRAME_SIZE 1468
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
index dba301378b7f..131388886acb 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
+++ b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
@@ -22,6 +22,7 @@ static const struct wilc_cfg_byte g_cfg_byte[] = {
{WID_STATUS, 0},
{WID_RSSI, 0},
{WID_LINKSPEED, 0},
+ {WID_TX_POWER, 0},
{WID_WOWLAN_TRIGGER, 0},
{WID_NIL, 0}
};
@@ -180,9 +181,10 @@ static void wilc_wlan_parse_response_frame(struct wilc *wl, u8 *info, int size)
i++;
if (cfg->s[i].id == wid)
- memcpy(cfg->s[i].str, &info[2], info[2] + 2);
+ memcpy(cfg->s[i].str, &info[2],
+ get_unaligned_le16(&info[2]) + 2);
- len = 2 + info[2];
+ len = 2 + get_unaligned_le16(&info[2]);
break;
default:
diff --git a/drivers/net/wireless/purelifi/plfxlc/usb.c b/drivers/net/wireless/purelifi/plfxlc/usb.c
index 8519cf0adfff..39e54b3787d6 100644
--- a/drivers/net/wireless/purelifi/plfxlc/usb.c
+++ b/drivers/net/wireless/purelifi/plfxlc/usb.c
@@ -562,7 +562,7 @@ static void sta_queue_cleanup_timer_callb(struct timer_list *t)
if (tx->station[sidx].flag & STATION_HEARTBEAT_FLAG) {
tx->station[sidx].flag ^= STATION_HEARTBEAT_FLAG;
} else {
- memset(tx->station[sidx].mac, 0, ETH_ALEN);
+ eth_zero_addr(tx->station[sidx].mac);
tx->station[sidx].flag = 0;
}
}
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 33a1d9143038..c66f0726b253 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -6658,7 +6658,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
if (!hw) {
ret = -ENOMEM;
priv = NULL;
- goto exit;
+ goto err_put_dev;
}
priv = hw->priv;
@@ -6680,24 +6680,24 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
ret = rtl8xxxu_parse_usb(priv, interface);
if (ret)
- goto exit;
+ goto err_set_intfdata;
ret = rtl8xxxu_identify_chip(priv);
if (ret) {
dev_err(&udev->dev, "Fatal - failed to identify chip\n");
- goto exit;
+ goto err_set_intfdata;
}
ret = rtl8xxxu_read_efuse(priv);
if (ret) {
dev_err(&udev->dev, "Fatal - failed to read EFuse\n");
- goto exit;
+ goto err_set_intfdata;
}
ret = priv->fops->parse_efuse(priv);
if (ret) {
dev_err(&udev->dev, "Fatal - failed to parse EFuse\n");
- goto exit;
+ goto err_set_intfdata;
}
rtl8xxxu_print_chipinfo(priv);
@@ -6705,12 +6705,12 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
ret = priv->fops->load_firmware(priv);
if (ret) {
dev_err(&udev->dev, "Fatal - failed to load firmware\n");
- goto exit;
+ goto err_set_intfdata;
}
ret = rtl8xxxu_init_device(hw);
if (ret)
- goto exit;
+ goto err_set_intfdata;
hw->wiphy->max_scan_ssids = 1;
hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
@@ -6760,12 +6760,12 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
if (ret) {
dev_err(&udev->dev, "%s: Failed to register: %i\n",
__func__, ret);
- goto exit;
+ goto err_set_intfdata;
}
return 0;
-exit:
+err_set_intfdata:
usb_set_intfdata(interface, NULL);
if (priv) {
@@ -6773,9 +6773,10 @@ exit:
mutex_destroy(&priv->usb_buf_mutex);
mutex_destroy(&priv->h2c_mutex);
}
- usb_put_dev(udev);
ieee80211_free_hw(hw);
+err_put_dev:
+ usb_put_dev(udev);
return ret;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 837febe4dfa4..ca01270944fe 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -1703,7 +1703,7 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
rtlpriv->sec.key_len[key_idx] = 0;
eth_zero_addr(mac_addr);
/*
- *mac80211 will delete entrys one by one,
+ *mac80211 will delete entries one by one,
*so don't use rtl_cam_reset_all_entry
*or clear all entry here.
*/
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 985ee36efc0f..76dc9da88f6c 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -1992,6 +1992,10 @@ int rtw_core_init(struct rtw_dev *rtwdev)
timer_setup(&rtwdev->tx_report.purge_timer,
rtw_tx_report_purge_timer, 0);
rtwdev->tx_wq = alloc_workqueue("rtw_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0);
+ if (!rtwdev->tx_wq) {
+ rtw_warn(rtwdev, "alloc_workqueue rtw_tx_wq failed\n");
+ return -ENOMEM;
+ }
INIT_DELAYED_WORK(&rtwdev->watch_dog_work, rtw_watch_dog_work);
INIT_DELAYED_WORK(&coex->bt_relink_work, rtw_coex_bt_relink_work);
diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
index 73b3b7e9fe6f..c68fec9eb5a6 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.c
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -3111,7 +3111,7 @@ void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev)
rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0;
if (rtwpci->under_recovery) {
- rtwpci->intrs[0] = 0;
+ rtwpci->intrs[0] = B_AX_HS0ISR_IND_INT_EN;
rtwpci->intrs[1] = 0;
} else {
rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN |
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c
index 99479bbb0939..320bcd4852c6 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c
@@ -1281,7 +1281,6 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x018, 0x00011124},
{0x000, 0x00033C00},
{0x01A, 0x00040004},
- {0x0FE, 0x00000000},
{0x055, 0x00080000},
{0x056, 0x0008FFF0},
{0x057, 0x0000C485},
@@ -20496,7 +20495,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20516,7 +20515,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20542,7 +20541,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20562,7 +20561,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20588,7 +20587,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20608,7 +20607,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20622,17 +20621,17 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0xB0000000, 0x00000000},
{0x033, 0x0000002E},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20644,15 +20643,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20664,21 +20663,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000002F},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20690,15 +20689,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20710,21 +20709,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000030},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20736,15 +20735,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20756,21 +20755,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000031},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20782,15 +20781,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20802,21 +20801,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000032},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20828,15 +20827,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20848,21 +20847,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000033},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20874,15 +20873,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20894,21 +20893,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000034},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20920,15 +20919,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20940,21 +20939,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000035},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -20966,15 +20965,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -20986,21 +20985,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000036},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21012,15 +21011,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21032,21 +21031,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000037},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21058,15 +21057,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21078,21 +21077,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000038},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21104,15 +21103,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21124,21 +21123,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000039},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21150,15 +21149,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21170,21 +21169,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003A},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21196,15 +21195,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21216,21 +21215,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003B},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21242,15 +21241,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21262,21 +21261,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003C},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21288,15 +21287,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21308,21 +21307,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003D},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21334,15 +21333,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21354,21 +21353,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003E},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21380,15 +21379,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21400,21 +21399,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003F},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -21426,15 +21425,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -21446,7 +21445,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x0EF, 0x00000000},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
@@ -21596,8 +21595,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
{0x087, 0x00000427},
{0xB0000000, 0x00000000},
{0x002, 0x00000000},
- {0x067, 0x00000052},
-
+ {0x067, 0x00000056},
};
static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
@@ -21671,7 +21669,6 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x018, 0x00011124},
{0x000, 0x00033C00},
{0x01A, 0x00040004},
- {0x0FE, 0x00000000},
{0x055, 0x00080000},
{0x056, 0x0008FFF0},
{0x057, 0x0000C485},
@@ -41142,7 +41139,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41162,7 +41159,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41188,7 +41185,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41208,7 +41205,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41234,7 +41231,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41254,7 +41251,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001FF},
+ {0x03F, 0x000001FB},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001FF},
{0x90340002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41268,17 +41265,17 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0xB0000000, 0x00000000},
{0x033, 0x0000002E},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41290,15 +41287,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41310,21 +41307,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000002F},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41336,15 +41333,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41356,21 +41353,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000030},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41382,15 +41379,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41402,21 +41399,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000031},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41428,15 +41425,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41448,21 +41445,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000032},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41474,15 +41471,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41494,21 +41491,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000033},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41520,15 +41517,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41540,21 +41537,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000034},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41566,15 +41563,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41586,21 +41583,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000035},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41612,15 +41609,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41632,21 +41629,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000036},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41658,15 +41655,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41678,21 +41675,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000037},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41704,15 +41701,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41724,21 +41721,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000038},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41750,15 +41747,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41770,21 +41767,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x00000039},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41796,15 +41793,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41816,21 +41813,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003A},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41842,15 +41839,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41862,21 +41859,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003B},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41888,15 +41885,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41908,21 +41905,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003C},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41934,15 +41931,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -41954,21 +41951,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003D},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -41980,15 +41977,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -42000,21 +41997,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003E},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -42026,15 +42023,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -42046,21 +42043,21 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x033, 0x0000003F},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90010001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -42072,15 +42069,15 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0x90010002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90020002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90030002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90250002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90260002, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0x90320002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003B},
{0x90330002, 0x00000000}, {0x40000000, 0x00000000},
@@ -42092,7 +42089,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x90360002, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003F},
{0xA0000000, 0x00000000},
- {0x03F, 0x0000003F},
+ {0x03F, 0x000000EB},
{0xB0000000, 0x00000000},
{0x0EF, 0x00000000},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
@@ -42243,8 +42240,7 @@ static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
{0x087, 0x00000427},
{0xB0000000, 0x00000000},
{0x002, 0x00000000},
- {0x067, 0x00000052},
-
+ {0x067, 0x00000056},
};
static const struct rtw89_reg2_def rtw89_8852a_phy_nctl_regs[] = {
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index c6da0cfb4afb..d06a2c419447 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -1924,13 +1924,10 @@ static int wl12xx_remove(struct platform_device *pdev)
struct wl1271 *wl = platform_get_drvdata(pdev);
struct wl12xx_priv *priv;
- if (!wl)
- goto out;
priv = wl->priv;
kfree(priv->rx_mem_addr);
-out:
return wlcore_remove(pdev);
}
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index f52960d2dfbe..bff144c97e66 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -32,7 +32,7 @@ config DEBUG_PINCTRL
Say Y here to add some extra checks and diagnostics to PINCTRL calls.
config PINCTRL_AMD
- tristate "AMD GPIO pin control"
+ bool "AMD GPIO pin control"
depends on HAS_IOMEM
depends on ACPI || COMPILE_TEST
select GPIOLIB
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index a140b6bfbfaa..bcde042d29dc 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -102,7 +102,7 @@ struct armada_37xx_pinctrl {
struct device *dev;
struct gpio_chip gpio_chip;
struct irq_chip irq_chip;
- spinlock_t irq_lock;
+ raw_spinlock_t irq_lock;
struct pinctrl_desc pctl;
struct pinctrl_dev *pctl_dev;
struct armada_37xx_pin_group *groups;
@@ -523,9 +523,9 @@ static void armada_37xx_irq_ack(struct irq_data *d)
unsigned long flags;
armada_37xx_irq_update_reg(&reg, d);
- spin_lock_irqsave(&info->irq_lock, flags);
+ raw_spin_lock_irqsave(&info->irq_lock, flags);
writel(d->mask, info->base + reg);
- spin_unlock_irqrestore(&info->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&info->irq_lock, flags);
}
static void armada_37xx_irq_mask(struct irq_data *d)
@@ -536,10 +536,10 @@ static void armada_37xx_irq_mask(struct irq_data *d)
unsigned long flags;
armada_37xx_irq_update_reg(&reg, d);
- spin_lock_irqsave(&info->irq_lock, flags);
+ raw_spin_lock_irqsave(&info->irq_lock, flags);
val = readl(info->base + reg);
writel(val & ~d->mask, info->base + reg);
- spin_unlock_irqrestore(&info->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&info->irq_lock, flags);
}
static void armada_37xx_irq_unmask(struct irq_data *d)
@@ -550,10 +550,10 @@ static void armada_37xx_irq_unmask(struct irq_data *d)
unsigned long flags;
armada_37xx_irq_update_reg(&reg, d);
- spin_lock_irqsave(&info->irq_lock, flags);
+ raw_spin_lock_irqsave(&info->irq_lock, flags);
val = readl(info->base + reg);
writel(val | d->mask, info->base + reg);
- spin_unlock_irqrestore(&info->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&info->irq_lock, flags);
}
static int armada_37xx_irq_set_wake(struct irq_data *d, unsigned int on)
@@ -564,14 +564,14 @@ static int armada_37xx_irq_set_wake(struct irq_data *d, unsigned int on)
unsigned long flags;
armada_37xx_irq_update_reg(&reg, d);
- spin_lock_irqsave(&info->irq_lock, flags);
+ raw_spin_lock_irqsave(&info->irq_lock, flags);
val = readl(info->base + reg);
if (on)
val |= (BIT(d->hwirq % GPIO_PER_REG));
else
val &= ~(BIT(d->hwirq % GPIO_PER_REG));
writel(val, info->base + reg);
- spin_unlock_irqrestore(&info->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&info->irq_lock, flags);
return 0;
}
@@ -583,7 +583,7 @@ static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type)
u32 val, reg = IRQ_POL;
unsigned long flags;
- spin_lock_irqsave(&info->irq_lock, flags);
+ raw_spin_lock_irqsave(&info->irq_lock, flags);
armada_37xx_irq_update_reg(&reg, d);
val = readl(info->base + reg);
switch (type) {
@@ -607,11 +607,11 @@ static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type)
break;
}
default:
- spin_unlock_irqrestore(&info->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&info->irq_lock, flags);
return -EINVAL;
}
writel(val, info->base + reg);
- spin_unlock_irqrestore(&info->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&info->irq_lock, flags);
return 0;
}
@@ -626,7 +626,7 @@ static int armada_37xx_edge_both_irq_swap_pol(struct armada_37xx_pinctrl *info,
regmap_read(info->regmap, INPUT_VAL + 4*reg_idx, &l);
- spin_lock_irqsave(&info->irq_lock, flags);
+ raw_spin_lock_irqsave(&info->irq_lock, flags);
p = readl(info->base + IRQ_POL + 4 * reg_idx);
if ((p ^ l) & (1 << bit_num)) {
/*
@@ -647,7 +647,7 @@ static int armada_37xx_edge_both_irq_swap_pol(struct armada_37xx_pinctrl *info,
ret = -1;
}
- spin_unlock_irqrestore(&info->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&info->irq_lock, flags);
return ret;
}
@@ -664,11 +664,11 @@ static void armada_37xx_irq_handler(struct irq_desc *desc)
u32 status;
unsigned long flags;
- spin_lock_irqsave(&info->irq_lock, flags);
+ raw_spin_lock_irqsave(&info->irq_lock, flags);
status = readl_relaxed(info->base + IRQ_STATUS + 4 * i);
/* Manage only the interrupt that was enabled */
status &= readl_relaxed(info->base + IRQ_EN + 4 * i);
- spin_unlock_irqrestore(&info->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&info->irq_lock, flags);
while (status) {
u32 hwirq = ffs(status) - 1;
u32 virq = irq_find_mapping(d, hwirq +
@@ -695,12 +695,12 @@ static void armada_37xx_irq_handler(struct irq_desc *desc)
update_status:
/* Update status in case a new IRQ appears */
- spin_lock_irqsave(&info->irq_lock, flags);
+ raw_spin_lock_irqsave(&info->irq_lock, flags);
status = readl_relaxed(info->base +
IRQ_STATUS + 4 * i);
/* Manage only the interrupt that was enabled */
status &= readl_relaxed(info->base + IRQ_EN + 4 * i);
- spin_unlock_irqrestore(&info->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&info->irq_lock, flags);
}
}
chained_irq_exit(chip, desc);
@@ -731,7 +731,7 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
struct device *dev = &pdev->dev;
unsigned int i, nr_irq_parent;
- spin_lock_init(&info->irq_lock);
+ raw_spin_lock_init(&info->irq_lock);
nr_irq_parent = of_irq_count(np);
if (!nr_irq_parent) {
@@ -1107,25 +1107,40 @@ static const struct of_device_id armada_37xx_pinctrl_of_match[] = {
{ },
};
+static const struct regmap_config armada_37xx_pinctrl_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .use_raw_spinlock = true,
+};
+
static int __init armada_37xx_pinctrl_probe(struct platform_device *pdev)
{
struct armada_37xx_pinctrl *info;
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
struct regmap *regmap;
+ void __iomem *base;
int ret;
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(base)) {
+ dev_err(dev, "failed to ioremap base address: %pe\n", base);
+ return PTR_ERR(base);
+ }
+
+ regmap = devm_regmap_init_mmio(dev, base,
+ &armada_37xx_pinctrl_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "failed to create regmap: %pe\n", regmap);
+ return PTR_ERR(regmap);
+ }
+
info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->dev = dev;
-
- regmap = syscon_node_to_regmap(np);
- if (IS_ERR(regmap))
- return dev_err_probe(dev, PTR_ERR(regmap), "cannot get regmap\n");
info->regmap = regmap;
-
info->data = of_device_get_match_data(dev);
ret = armada_37xx_pinctrl_register(pdev, info);
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index 5f4a8c5c6650..dfc8ea9f3843 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -29,19 +29,12 @@
#define ocelot_clrsetbits(addr, clear, set) \
writel((readl(addr) & ~(clear)) | (set), (addr))
-/* PINCONFIG bits (sparx5 only) */
enum {
PINCONF_BIAS,
PINCONF_SCHMITT,
PINCONF_DRIVE_STRENGTH,
};
-#define BIAS_PD_BIT BIT(4)
-#define BIAS_PU_BIT BIT(3)
-#define BIAS_BITS (BIAS_PD_BIT|BIAS_PU_BIT)
-#define SCHMITT_BIT BIT(2)
-#define DRIVE_BITS GENMASK(1, 0)
-
/* GPIO standard registers */
#define OCELOT_GPIO_OUT_SET 0x0
#define OCELOT_GPIO_OUT_CLR 0x4
@@ -321,6 +314,13 @@ struct ocelot_pin_caps {
unsigned char a_functions[OCELOT_FUNC_PER_PIN]; /* Additional functions */
};
+struct ocelot_pincfg_data {
+ u8 pd_bit;
+ u8 pu_bit;
+ u8 drive_bits;
+ u8 schmitt_bit;
+};
+
struct ocelot_pinctrl {
struct device *dev;
struct pinctrl_dev *pctl;
@@ -328,10 +328,16 @@ struct ocelot_pinctrl {
struct regmap *map;
struct regmap *pincfg;
struct pinctrl_desc *desc;
+ const struct ocelot_pincfg_data *pincfg_data;
struct ocelot_pmx_func func[FUNC_MAX];
u8 stride;
};
+struct ocelot_match_data {
+ struct pinctrl_desc desc;
+ struct ocelot_pincfg_data pincfg_data;
+};
+
#define LUTON_P(p, f0, f1) \
static struct ocelot_pin_caps luton_pin_##p = { \
.pin = p, \
@@ -1325,24 +1331,27 @@ static int ocelot_hw_get_value(struct ocelot_pinctrl *info,
int ret = -EOPNOTSUPP;
if (info->pincfg) {
+ const struct ocelot_pincfg_data *opd = info->pincfg_data;
u32 regcfg;
- ret = regmap_read(info->pincfg, pin, &regcfg);
+ ret = regmap_read(info->pincfg,
+ pin * regmap_get_reg_stride(info->pincfg),
+ &regcfg);
if (ret)
return ret;
ret = 0;
switch (reg) {
case PINCONF_BIAS:
- *val = regcfg & BIAS_BITS;
+ *val = regcfg & (opd->pd_bit | opd->pu_bit);
break;
case PINCONF_SCHMITT:
- *val = regcfg & SCHMITT_BIT;
+ *val = regcfg & opd->schmitt_bit;
break;
case PINCONF_DRIVE_STRENGTH:
- *val = regcfg & DRIVE_BITS;
+ *val = regcfg & opd->drive_bits;
break;
default:
@@ -1359,14 +1368,18 @@ static int ocelot_pincfg_clrsetbits(struct ocelot_pinctrl *info, u32 regaddr,
u32 val;
int ret;
- ret = regmap_read(info->pincfg, regaddr, &val);
+ ret = regmap_read(info->pincfg,
+ regaddr * regmap_get_reg_stride(info->pincfg),
+ &val);
if (ret)
return ret;
val &= ~clrbits;
val |= setbits;
- ret = regmap_write(info->pincfg, regaddr, val);
+ ret = regmap_write(info->pincfg,
+ regaddr * regmap_get_reg_stride(info->pincfg),
+ val);
return ret;
}
@@ -1379,23 +1392,27 @@ static int ocelot_hw_set_value(struct ocelot_pinctrl *info,
int ret = -EOPNOTSUPP;
if (info->pincfg) {
+ const struct ocelot_pincfg_data *opd = info->pincfg_data;
ret = 0;
switch (reg) {
case PINCONF_BIAS:
- ret = ocelot_pincfg_clrsetbits(info, pin, BIAS_BITS,
+ ret = ocelot_pincfg_clrsetbits(info, pin,
+ opd->pd_bit | opd->pu_bit,
val);
break;
case PINCONF_SCHMITT:
- ret = ocelot_pincfg_clrsetbits(info, pin, SCHMITT_BIT,
+ ret = ocelot_pincfg_clrsetbits(info, pin,
+ opd->schmitt_bit,
val);
break;
case PINCONF_DRIVE_STRENGTH:
if (val <= 3)
ret = ocelot_pincfg_clrsetbits(info, pin,
- DRIVE_BITS, val);
+ opd->drive_bits,
+ val);
else
ret = -EINVAL;
break;
@@ -1425,17 +1442,20 @@ static int ocelot_pinconf_get(struct pinctrl_dev *pctldev,
if (param == PIN_CONFIG_BIAS_DISABLE)
val = (val == 0);
else if (param == PIN_CONFIG_BIAS_PULL_DOWN)
- val = (val & BIAS_PD_BIT ? true : false);
+ val = !!(val & info->pincfg_data->pd_bit);
else /* PIN_CONFIG_BIAS_PULL_UP */
- val = (val & BIAS_PU_BIT ? true : false);
+ val = !!(val & info->pincfg_data->pu_bit);
break;
case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ if (!info->pincfg_data->schmitt_bit)
+ return -EOPNOTSUPP;
+
err = ocelot_hw_get_value(info, pin, PINCONF_SCHMITT, &val);
if (err)
return err;
- val = (val & SCHMITT_BIT ? true : false);
+ val = !!(val & info->pincfg_data->schmitt_bit);
break;
case PIN_CONFIG_DRIVE_STRENGTH:
@@ -1479,6 +1499,7 @@ static int ocelot_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
unsigned long *configs, unsigned int num_configs)
{
struct ocelot_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+ const struct ocelot_pincfg_data *opd = info->pincfg_data;
u32 param, arg, p;
int cfg, err = 0;
@@ -1491,8 +1512,8 @@ static int ocelot_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
case PIN_CONFIG_BIAS_PULL_UP:
case PIN_CONFIG_BIAS_PULL_DOWN:
arg = (param == PIN_CONFIG_BIAS_DISABLE) ? 0 :
- (param == PIN_CONFIG_BIAS_PULL_UP) ? BIAS_PU_BIT :
- BIAS_PD_BIT;
+ (param == PIN_CONFIG_BIAS_PULL_UP) ?
+ opd->pu_bit : opd->pd_bit;
err = ocelot_hw_set_value(info, pin, PINCONF_BIAS, arg);
if (err)
@@ -1501,7 +1522,10 @@ static int ocelot_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
break;
case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
- arg = arg ? SCHMITT_BIT : 0;
+ if (!opd->schmitt_bit)
+ return -EOPNOTSUPP;
+
+ arg = arg ? opd->schmitt_bit : 0;
err = ocelot_hw_set_value(info, pin, PINCONF_SCHMITT,
arg);
if (err)
@@ -1562,69 +1586,94 @@ static const struct pinctrl_ops ocelot_pctl_ops = {
.dt_free_map = pinconf_generic_dt_free_map,
};
-static struct pinctrl_desc luton_desc = {
- .name = "luton-pinctrl",
- .pins = luton_pins,
- .npins = ARRAY_SIZE(luton_pins),
- .pctlops = &ocelot_pctl_ops,
- .pmxops = &ocelot_pmx_ops,
- .owner = THIS_MODULE,
+static struct ocelot_match_data luton_desc = {
+ .desc = {
+ .name = "luton-pinctrl",
+ .pins = luton_pins,
+ .npins = ARRAY_SIZE(luton_pins),
+ .pctlops = &ocelot_pctl_ops,
+ .pmxops = &ocelot_pmx_ops,
+ .owner = THIS_MODULE,
+ },
};
-static struct pinctrl_desc serval_desc = {
- .name = "serval-pinctrl",
- .pins = serval_pins,
- .npins = ARRAY_SIZE(serval_pins),
- .pctlops = &ocelot_pctl_ops,
- .pmxops = &ocelot_pmx_ops,
- .owner = THIS_MODULE,
+static struct ocelot_match_data serval_desc = {
+ .desc = {
+ .name = "serval-pinctrl",
+ .pins = serval_pins,
+ .npins = ARRAY_SIZE(serval_pins),
+ .pctlops = &ocelot_pctl_ops,
+ .pmxops = &ocelot_pmx_ops,
+ .owner = THIS_MODULE,
+ },
};
-static struct pinctrl_desc ocelot_desc = {
- .name = "ocelot-pinctrl",
- .pins = ocelot_pins,
- .npins = ARRAY_SIZE(ocelot_pins),
- .pctlops = &ocelot_pctl_ops,
- .pmxops = &ocelot_pmx_ops,
- .owner = THIS_MODULE,
+static struct ocelot_match_data ocelot_desc = {
+ .desc = {
+ .name = "ocelot-pinctrl",
+ .pins = ocelot_pins,
+ .npins = ARRAY_SIZE(ocelot_pins),
+ .pctlops = &ocelot_pctl_ops,
+ .pmxops = &ocelot_pmx_ops,
+ .owner = THIS_MODULE,
+ },
};
-static struct pinctrl_desc jaguar2_desc = {
- .name = "jaguar2-pinctrl",
- .pins = jaguar2_pins,
- .npins = ARRAY_SIZE(jaguar2_pins),
- .pctlops = &ocelot_pctl_ops,
- .pmxops = &ocelot_pmx_ops,
- .owner = THIS_MODULE,
+static struct ocelot_match_data jaguar2_desc = {
+ .desc = {
+ .name = "jaguar2-pinctrl",
+ .pins = jaguar2_pins,
+ .npins = ARRAY_SIZE(jaguar2_pins),
+ .pctlops = &ocelot_pctl_ops,
+ .pmxops = &ocelot_pmx_ops,
+ .owner = THIS_MODULE,
+ },
};
-static struct pinctrl_desc servalt_desc = {
- .name = "servalt-pinctrl",
- .pins = servalt_pins,
- .npins = ARRAY_SIZE(servalt_pins),
- .pctlops = &ocelot_pctl_ops,
- .pmxops = &ocelot_pmx_ops,
- .owner = THIS_MODULE,
+static struct ocelot_match_data servalt_desc = {
+ .desc = {
+ .name = "servalt-pinctrl",
+ .pins = servalt_pins,
+ .npins = ARRAY_SIZE(servalt_pins),
+ .pctlops = &ocelot_pctl_ops,
+ .pmxops = &ocelot_pmx_ops,
+ .owner = THIS_MODULE,
+ },
};
-static struct pinctrl_desc sparx5_desc = {
- .name = "sparx5-pinctrl",
- .pins = sparx5_pins,
- .npins = ARRAY_SIZE(sparx5_pins),
- .pctlops = &ocelot_pctl_ops,
- .pmxops = &ocelot_pmx_ops,
- .confops = &ocelot_confops,
- .owner = THIS_MODULE,
+static struct ocelot_match_data sparx5_desc = {
+ .desc = {
+ .name = "sparx5-pinctrl",
+ .pins = sparx5_pins,
+ .npins = ARRAY_SIZE(sparx5_pins),
+ .pctlops = &ocelot_pctl_ops,
+ .pmxops = &ocelot_pmx_ops,
+ .confops = &ocelot_confops,
+ .owner = THIS_MODULE,
+ },
+ .pincfg_data = {
+ .pd_bit = BIT(4),
+ .pu_bit = BIT(3),
+ .drive_bits = GENMASK(1, 0),
+ .schmitt_bit = BIT(2),
+ },
};
-static struct pinctrl_desc lan966x_desc = {
- .name = "lan966x-pinctrl",
- .pins = lan966x_pins,
- .npins = ARRAY_SIZE(lan966x_pins),
- .pctlops = &ocelot_pctl_ops,
- .pmxops = &lan966x_pmx_ops,
- .confops = &ocelot_confops,
- .owner = THIS_MODULE,
+static struct ocelot_match_data lan966x_desc = {
+ .desc = {
+ .name = "lan966x-pinctrl",
+ .pins = lan966x_pins,
+ .npins = ARRAY_SIZE(lan966x_pins),
+ .pctlops = &ocelot_pctl_ops,
+ .pmxops = &lan966x_pmx_ops,
+ .confops = &ocelot_confops,
+ .owner = THIS_MODULE,
+ },
+ .pincfg_data = {
+ .pd_bit = BIT(3),
+ .pu_bit = BIT(2),
+ .drive_bits = GENMASK(1, 0),
+ },
};
static int ocelot_create_group_func_map(struct device *dev,
@@ -1890,7 +1939,8 @@ static const struct of_device_id ocelot_pinctrl_of_match[] = {
{},
};
-static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev)
+static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev,
+ const struct ocelot_pinctrl *info)
{
void __iomem *base;
@@ -1898,7 +1948,7 @@ static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev)
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
- .max_register = 32,
+ .max_register = info->desc->npins * 4,
.name = "pincfg",
};
@@ -1913,6 +1963,7 @@ static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev)
static int ocelot_pinctrl_probe(struct platform_device *pdev)
{
+ const struct ocelot_match_data *data;
struct device *dev = &pdev->dev;
struct ocelot_pinctrl *info;
struct reset_control *reset;
@@ -1929,7 +1980,16 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
if (!info)
return -ENOMEM;
- info->desc = (struct pinctrl_desc *)device_get_match_data(dev);
+ data = device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ info->desc = devm_kmemdup(dev, &data->desc, sizeof(*info->desc),
+ GFP_KERNEL);
+ if (!info->desc)
+ return -ENOMEM;
+
+ info->pincfg_data = &data->pincfg_data;
reset = devm_reset_control_get_optional_shared(dev, "switch");
if (IS_ERR(reset))
@@ -1956,7 +2016,7 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
/* Pinconf registers */
if (info->desc->confops) {
- pincfg = ocelot_pinctrl_create_pincfg(pdev);
+ pincfg = ocelot_pinctrl_create_pincfg(pdev, info);
if (IS_ERR(pincfg))
dev_dbg(dev, "Failed to create pincfg regmap\n");
else
diff --git a/drivers/pinctrl/ralink/pinctrl-ralink.c b/drivers/pinctrl/ralink/pinctrl-ralink.c
index 63429a287434..770862f45b3f 100644
--- a/drivers/pinctrl/ralink/pinctrl-ralink.c
+++ b/drivers/pinctrl/ralink/pinctrl-ralink.c
@@ -266,6 +266,8 @@ static int ralink_pinctrl_pins(struct ralink_priv *p)
p->func[i]->pin_count,
sizeof(int),
GFP_KERNEL);
+ if (!p->func[i]->pins)
+ return -ENOMEM;
for (j = 0; j < p->func[i]->pin_count; j++)
p->func[i]->pins[j] = p->func[i]->pin_first + j;
diff --git a/drivers/pinctrl/sunplus/sppctl.c b/drivers/pinctrl/sunplus/sppctl.c
index 3ba47040ac42..2b3335ab56c6 100644
--- a/drivers/pinctrl/sunplus/sppctl.c
+++ b/drivers/pinctrl/sunplus/sppctl.c
@@ -871,6 +871,9 @@ static int sppctl_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node
}
*map = kcalloc(*num_maps + nmG, sizeof(**map), GFP_KERNEL);
+ if (*map == NULL)
+ return -ENOMEM;
+
for (i = 0; i < (*num_maps); i++) {
dt_pin = be32_to_cpu(list[i]);
pin_num = FIELD_GET(GENMASK(31, 24), dt_pin);
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 458218f88c5e..fe4971b65c64 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -176,6 +176,7 @@ config PTP_1588_CLOCK_OCP
depends on !S390
depends on COMMON_CLK
select NET_DEVLINK
+ select CRC16
help
This driver adds support for an OpenCompute time card.
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 9e54fe76a9b2..35d4b398c197 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -3565,7 +3565,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
if (!atomic_read(&queue->set_pci_flags_count)) {
/*
* there's no outstanding PCI any more, so we
- * have to request a PCI to be sure the the PCI
+ * have to request a PCI to be sure the PCI
* will wake at some time in the future then we
* can flush packed buffers that might still be
* hanging around, which can happen if no
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 775c0bf2f923..0933948d7df3 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -1138,10 +1138,14 @@ static void bcm2835_spi_handle_err(struct spi_controller *ctlr,
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
/* if an error occurred and we have an active dma, then terminate */
- dmaengine_terminate_sync(ctlr->dma_tx);
- bs->tx_dma_active = false;
- dmaengine_terminate_sync(ctlr->dma_rx);
- bs->rx_dma_active = false;
+ if (ctlr->dma_tx) {
+ dmaengine_terminate_sync(ctlr->dma_tx);
+ bs->tx_dma_active = false;
+ }
+ if (ctlr->dma_rx) {
+ dmaengine_terminate_sync(ctlr->dma_rx);
+ bs->rx_dma_active = false;
+ }
bcm2835_spi_undo_prologue(bs);
/* and reset */
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
index 31d778e9d255..6a7f7df1e776 100644
--- a/drivers/spi/spi-cadence.c
+++ b/drivers/spi/spi-cadence.c
@@ -69,7 +69,7 @@
#define CDNS_SPI_BAUD_DIV_SHIFT 3 /* Baud rate divisor shift in CR */
#define CDNS_SPI_SS_SHIFT 10 /* Slave Select field shift in CR */
#define CDNS_SPI_SS0 0x1 /* Slave Select zero */
-#define CDNS_SPI_NOSS 0x3C /* No Slave select */
+#define CDNS_SPI_NOSS 0xF /* No Slave select */
/*
* SPI Interrupt Registers bit Masks
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 7a014eeec2d0..411b1307b7fd 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -613,6 +613,10 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
rspi->dma_callbacked, HZ);
if (ret > 0 && rspi->dma_callbacked) {
ret = 0;
+ if (tx)
+ dmaengine_synchronize(rspi->ctlr->dma_tx);
+ if (rx)
+ dmaengine_synchronize(rspi->ctlr->dma_rx);
} else {
if (!ret) {
dev_err(&rspi->ctlr->dev, "DMA timeout\n");
diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
index 90ce16b6e05f..f422f9c58ba7 100644
--- a/drivers/virt/coco/sev-guest/sev-guest.c
+++ b/drivers/virt/coco/sev-guest/sev-guest.c
@@ -632,16 +632,19 @@ static int __init sev_guest_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct snp_guest_dev *snp_dev;
struct miscdevice *misc;
+ void __iomem *mapping;
int ret;
if (!dev->platform_data)
return -ENODEV;
data = (struct sev_guest_platform_data *)dev->platform_data;
- layout = (__force void *)ioremap_encrypted(data->secrets_gpa, PAGE_SIZE);
- if (!layout)
+ mapping = ioremap_encrypted(data->secrets_gpa, PAGE_SIZE);
+ if (!mapping)
return -ENODEV;
+ layout = (__force void *)mapping;
+
ret = -ENOMEM;
snp_dev = devm_kzalloc(&pdev->dev, sizeof(struct snp_guest_dev), GFP_KERNEL);
if (!snp_dev)
@@ -706,7 +709,7 @@ e_free_response:
e_free_request:
free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg));
e_unmap:
- iounmap(layout);
+ iounmap(mapping);
return ret;
}
diff --git a/fs/io_uring.c b/fs/io_uring.c
index a01ea49f3017..e8e769be9ed0 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1738,6 +1738,14 @@ static void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
return;
/*
+ * READV uses fields in `struct io_rw` (len/addr) to stash the selected
+ * buffer data. However if that buffer is recycled the original request
+ * data stored in addr is lost. Therefore forbid recycling for now.
+ */
+ if (req->opcode == IORING_OP_READV)
+ return;
+
+ /*
* We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
* the flag and hence ensure that bl->head doesn't get incremented.
* If the tail has already been incremented, hang on to it.
@@ -12931,7 +12939,7 @@ static int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
{
struct io_uring_buf_ring *br;
struct io_uring_buf_reg reg;
- struct io_buffer_list *bl;
+ struct io_buffer_list *bl, *free_bl = NULL;
struct page **pages;
int nr_pages;
@@ -12963,7 +12971,7 @@ static int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
if (bl->buf_nr_pages || !list_empty(&bl->buf_list))
return -EEXIST;
} else {
- bl = kzalloc(sizeof(*bl), GFP_KERNEL);
+ free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
if (!bl)
return -ENOMEM;
}
@@ -12972,7 +12980,7 @@ static int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
struct_size(br, bufs, reg.ring_entries),
&nr_pages);
if (IS_ERR(pages)) {
- kfree(bl);
+ kfree(free_bl);
return PTR_ERR(pages);
}
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
index 4de597a83b88..52615e6090e1 100644
--- a/fs/ntfs/attrib.c
+++ b/fs/ntfs/attrib.c
@@ -592,8 +592,12 @@ static int ntfs_attr_find(const ATTR_TYPE type, const ntfschar *name,
a = (ATTR_RECORD*)((u8*)ctx->attr +
le32_to_cpu(ctx->attr->length));
for (;; a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length))) {
- if ((u8*)a < (u8*)ctx->mrec || (u8*)a > (u8*)ctx->mrec +
- le32_to_cpu(ctx->mrec->bytes_allocated))
+ u8 *mrec_end = (u8 *)ctx->mrec +
+ le32_to_cpu(ctx->mrec->bytes_allocated);
+ u8 *name_end = (u8 *)a + le16_to_cpu(a->name_offset) +
+ a->name_length * sizeof(ntfschar);
+ if ((u8*)a < (u8*)ctx->mrec || (u8*)a > mrec_end ||
+ name_end > mrec_end)
break;
ctx->attr = a;
if (unlikely(le32_to_cpu(a->type) > le32_to_cpu(type) ||
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 337527571461..740b64238312 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -277,7 +277,6 @@ enum ocfs2_mount_options
OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT = 1 << 15, /* Journal Async Commit */
OCFS2_MOUNT_ERRORS_CONT = 1 << 16, /* Return EIO to the calling process on error */
OCFS2_MOUNT_ERRORS_ROFS = 1 << 17, /* Change filesystem to read-only on error */
- OCFS2_MOUNT_NOCLUSTER = 1 << 18, /* No cluster aware filesystem mount */
};
#define OCFS2_OSB_SOFT_RO 0x0001
@@ -673,8 +672,7 @@ static inline int ocfs2_cluster_o2cb_global_heartbeat(struct ocfs2_super *osb)
static inline int ocfs2_mount_local(struct ocfs2_super *osb)
{
- return ((osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT)
- || (osb->s_mount_opt & OCFS2_MOUNT_NOCLUSTER));
+ return (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT);
}
static inline int ocfs2_uses_extended_slot_map(struct ocfs2_super *osb)
diff --git a/fs/ocfs2/slot_map.c b/fs/ocfs2/slot_map.c
index 0b0ae3ebb0cf..da7718cef735 100644
--- a/fs/ocfs2/slot_map.c
+++ b/fs/ocfs2/slot_map.c
@@ -252,16 +252,14 @@ static int __ocfs2_find_empty_slot(struct ocfs2_slot_info *si,
int i, ret = -ENOSPC;
if ((preferred >= 0) && (preferred < si->si_num_slots)) {
- if (!si->si_slots[preferred].sl_valid ||
- !si->si_slots[preferred].sl_node_num) {
+ if (!si->si_slots[preferred].sl_valid) {
ret = preferred;
goto out;
}
}
for(i = 0; i < si->si_num_slots; i++) {
- if (!si->si_slots[i].sl_valid ||
- !si->si_slots[i].sl_node_num) {
+ if (!si->si_slots[i].sl_valid) {
ret = i;
break;
}
@@ -456,30 +454,24 @@ int ocfs2_find_slot(struct ocfs2_super *osb)
spin_lock(&osb->osb_lock);
ocfs2_update_slot_info(si);
- if (ocfs2_mount_local(osb))
- /* use slot 0 directly in local mode */
- slot = 0;
- else {
- /* search for ourselves first and take the slot if it already
- * exists. Perhaps we need to mark this in a variable for our
- * own journal recovery? Possibly not, though we certainly
- * need to warn to the user */
- slot = __ocfs2_node_num_to_slot(si, osb->node_num);
+ /* search for ourselves first and take the slot if it already
+ * exists. Perhaps we need to mark this in a variable for our
+ * own journal recovery? Possibly not, though we certainly
+ * need to warn to the user */
+ slot = __ocfs2_node_num_to_slot(si, osb->node_num);
+ if (slot < 0) {
+ /* if no slot yet, then just take 1st available
+ * one. */
+ slot = __ocfs2_find_empty_slot(si, osb->preferred_slot);
if (slot < 0) {
- /* if no slot yet, then just take 1st available
- * one. */
- slot = __ocfs2_find_empty_slot(si, osb->preferred_slot);
- if (slot < 0) {
- spin_unlock(&osb->osb_lock);
- mlog(ML_ERROR, "no free slots available!\n");
- status = -EINVAL;
- goto bail;
- }
- } else
- printk(KERN_INFO "ocfs2: Slot %d on device (%s) was "
- "already allocated to this node!\n",
- slot, osb->dev_str);
- }
+ spin_unlock(&osb->osb_lock);
+ mlog(ML_ERROR, "no free slots available!\n");
+ status = -EINVAL;
+ goto bail;
+ }
+ } else
+ printk(KERN_INFO "ocfs2: Slot %d on device (%s) was already "
+ "allocated to this node!\n", slot, osb->dev_str);
ocfs2_set_slot(si, slot, osb->node_num);
osb->slot_num = slot;
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index f7298816d8d9..438be028935d 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -172,7 +172,6 @@ enum {
Opt_dir_resv_level,
Opt_journal_async_commit,
Opt_err_cont,
- Opt_nocluster,
Opt_err,
};
@@ -206,7 +205,6 @@ static const match_table_t tokens = {
{Opt_dir_resv_level, "dir_resv_level=%u"},
{Opt_journal_async_commit, "journal_async_commit"},
{Opt_err_cont, "errors=continue"},
- {Opt_nocluster, "nocluster"},
{Opt_err, NULL}
};
@@ -618,13 +616,6 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
goto out;
}
- tmp = OCFS2_MOUNT_NOCLUSTER;
- if ((osb->s_mount_opt & tmp) != (parsed_options.mount_opt & tmp)) {
- ret = -EINVAL;
- mlog(ML_ERROR, "Cannot change nocluster option on remount\n");
- goto out;
- }
-
tmp = OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL |
OCFS2_MOUNT_HB_NONE;
if ((osb->s_mount_opt & tmp) != (parsed_options.mount_opt & tmp)) {
@@ -865,7 +856,6 @@ static int ocfs2_verify_userspace_stack(struct ocfs2_super *osb,
}
if (ocfs2_userspace_stack(osb) &&
- !(osb->s_mount_opt & OCFS2_MOUNT_NOCLUSTER) &&
strncmp(osb->osb_cluster_stack, mopt->cluster_stack,
OCFS2_STACK_LABEL_LEN)) {
mlog(ML_ERROR,
@@ -1137,11 +1127,6 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
osb->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK ? "writeback" :
"ordered");
- if ((osb->s_mount_opt & OCFS2_MOUNT_NOCLUSTER) &&
- !(osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT))
- printk(KERN_NOTICE "ocfs2: The shared device (%s) is mounted "
- "without cluster aware mode.\n", osb->dev_str);
-
atomic_set(&osb->vol_state, VOLUME_MOUNTED);
wake_up(&osb->osb_mount_event);
@@ -1452,9 +1437,6 @@ static int ocfs2_parse_options(struct super_block *sb,
case Opt_journal_async_commit:
mopt->mount_opt |= OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT;
break;
- case Opt_nocluster:
- mopt->mount_opt |= OCFS2_MOUNT_NOCLUSTER;
- break;
default:
mlog(ML_ERROR,
"Unrecognized mount option \"%s\" "
@@ -1566,9 +1548,6 @@ static int ocfs2_show_options(struct seq_file *s, struct dentry *root)
if (opts & OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT)
seq_printf(s, ",journal_async_commit");
- if (opts & OCFS2_MOUNT_NOCLUSTER)
- seq_printf(s, ",nocluster");
-
return 0;
}
diff --git a/fs/read_write.c b/fs/read_write.c
index e0777eefd846..397da0236607 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -1263,6 +1263,9 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
count, fl);
file_end_write(out.file);
} else {
+ if (out.file->f_flags & O_NONBLOCK)
+ fl |= SPLICE_F_NONBLOCK;
+
retval = splice_file_to_pipe(in.file, opipe, &pos, count, fl);
}
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index e943370107d0..de86f5b2859f 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -192,17 +192,19 @@ static inline void msg_init(struct uffd_msg *msg)
}
static inline struct uffd_msg userfault_msg(unsigned long address,
+ unsigned long real_address,
unsigned int flags,
unsigned long reason,
unsigned int features)
{
struct uffd_msg msg;
+
msg_init(&msg);
msg.event = UFFD_EVENT_PAGEFAULT;
- if (!(features & UFFD_FEATURE_EXACT_ADDRESS))
- address &= PAGE_MASK;
- msg.arg.pagefault.address = address;
+ msg.arg.pagefault.address = (features & UFFD_FEATURE_EXACT_ADDRESS) ?
+ real_address : address;
+
/*
* These flags indicate why the userfault occurred:
* - UFFD_PAGEFAULT_FLAG_WP indicates a write protect fault.
@@ -488,8 +490,8 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
uwq.wq.private = current;
- uwq.msg = userfault_msg(vmf->real_address, vmf->flags, reason,
- ctx->features);
+ uwq.msg = userfault_msg(vmf->address, vmf->real_address, vmf->flags,
+ reason, ctx->features);
uwq.ctx = ctx;
uwq.waken = false;
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 7ce93aaf69f8..98954dda5734 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -1125,9 +1125,7 @@ static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer,
}
#endif
-#ifndef CONFIG_GENERIC_DEVMEM_IS_ALLOWED
extern int devmem_is_allowed(unsigned long pfn);
-#endif
#endif /* __KERNEL__ */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index cb2167c89eee..492dce43236e 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -368,9 +368,6 @@ static inline void tlb_flush(struct mmu_gather *tlb)
flush_tlb_mm(tlb->mm);
}
-static inline void
-tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
-
#else /* CONFIG_MMU_GATHER_NO_RANGE */
#ifndef tlb_flush
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 0fca8f38bee4..addb135eeea6 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -28,7 +28,7 @@
#include <linux/dma-fence.h>
#include <linux/completion.h>
#include <linux/xarray.h>
-#include <linux/irq_work.h>
+#include <linux/workqueue.h>
#define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
@@ -295,7 +295,7 @@ struct drm_sched_job {
*/
union {
struct dma_fence_cb finish_cb;
- struct irq_work work;
+ struct work_struct work;
};
uint64_t id;
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index e22dc03c850e..c3e50e537e39 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -20,6 +20,7 @@
#include <linux/can/length.h>
#include <linux/can/netlink.h>
#include <linux/can/skb.h>
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
/*
@@ -162,6 +163,9 @@ struct can_priv *safe_candev_priv(struct net_device *dev);
int open_candev(struct net_device *dev);
void close_candev(struct net_device *dev);
int can_change_mtu(struct net_device *dev, int new_mtu);
+int can_eth_ioctl_hwts(struct net_device *netdev, struct ifreq *ifr, int cmd);
+int can_ethtool_op_get_ts_info_hwts(struct net_device *dev,
+ struct ethtool_ts_info *info);
int register_candev(struct net_device *dev);
void unregister_candev(struct net_device *dev);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index cf3d0d673f6b..7898e29bcfb5 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1130,23 +1130,27 @@ static inline bool is_zone_movable_page(const struct page *page)
#if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_FS_DAX)
DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
-bool __put_devmap_managed_page(struct page *page);
-static inline bool put_devmap_managed_page(struct page *page)
+bool __put_devmap_managed_page_refs(struct page *page, int refs);
+static inline bool put_devmap_managed_page_refs(struct page *page, int refs)
{
if (!static_branch_unlikely(&devmap_managed_key))
return false;
if (!is_zone_device_page(page))
return false;
- return __put_devmap_managed_page(page);
+ return __put_devmap_managed_page_refs(page, refs);
}
-
#else /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */
-static inline bool put_devmap_managed_page(struct page *page)
+static inline bool put_devmap_managed_page_refs(struct page *page, int refs)
{
return false;
}
#endif /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */
+static inline bool put_devmap_managed_page(struct page *page)
+{
+ return put_devmap_managed_page_refs(page, 1);
+}
+
/* 127: arbitrary random number, small enough to assemble well */
#define folio_ref_zero_or_close_to_overflow(folio) \
((unsigned int) folio_ref_count(folio) + 127u <= 127u)
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index f7506f08e505..c04f359655b8 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -405,6 +405,9 @@ static inline bool ip6_ignore_linkdown(const struct net_device *dev)
{
const struct inet6_dev *idev = __in6_dev_get(dev);
+ if (unlikely(!idev))
+ return true;
+
return !!idev->cnf.ignore_routes_with_linkdown;
}
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 3c4f550e5a8b..2f766e3437ce 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -847,6 +847,7 @@ enum {
};
void l2cap_chan_hold(struct l2cap_chan *c);
+struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c);
void l2cap_chan_put(struct l2cap_chan *c);
static inline void l2cap_chan_lock(struct l2cap_chan *chan)
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 5bd3fac12e9e..119ed1ffb988 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -1509,6 +1509,27 @@ struct devlink_ops {
struct devlink_rate *parent,
void *priv_child, void *priv_parent,
struct netlink_ext_ack *extack);
+ /**
+ * selftests_check() - queries if selftest is supported
+ * @devlink: devlink instance
+ * @id: test index
+ * @extack: extack for reporting error messages
+ *
+ * Return: true if test is supported by the driver
+ */
+ bool (*selftest_check)(struct devlink *devlink, unsigned int id,
+ struct netlink_ext_ack *extack);
+ /**
+ * selftest_run() - Runs a selftest
+ * @devlink: devlink instance
+ * @id: test index
+ * @extack: extack for reporting error messages
+ *
+ * Return: status of the test
+ */
+ enum devlink_selftest_status
+ (*selftest_run)(struct devlink *devlink, unsigned int id,
+ struct netlink_ext_ack *extack);
};
void *devlink_priv(struct devlink *devlink);
diff --git a/include/net/firewire.h b/include/net/firewire.h
index 2442d645e412..8fbff8d77865 100644
--- a/include/net/firewire.h
+++ b/include/net/firewire.h
@@ -13,8 +13,7 @@ union fwnet_hwaddr {
__be64 uniq_id; /* EUI-64 */
u8 max_rec; /* max packet size */
u8 sspd; /* max speed */
- __be16 fifo_hi; /* hi 16bits of FIFO addr */
- __be32 fifo_lo; /* lo 32bits of FIFO addr */
+ u8 fifo[6]; /* FIFO addr */
} __packed uc;
};
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 85cd695e7fd1..ee88f0f1350f 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -321,7 +321,7 @@ void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
-#define TCP_PINGPONG_THRESH 3
+#define TCP_PINGPONG_THRESH 1
static inline void inet_csk_enter_pingpong_mode(struct sock *sk)
{
@@ -338,14 +338,6 @@ static inline bool inet_csk_in_pingpong_mode(struct sock *sk)
return inet_csk(sk)->icsk_ack.pingpong >= TCP_PINGPONG_THRESH;
}
-static inline void inet_csk_inc_pingpong_cnt(struct sock *sk)
-{
- struct inet_connection_sock *icsk = inet_csk(sk);
-
- if (icsk->icsk_ack.pingpong < U8_MAX)
- icsk->icsk_ack.pingpong++;
-}
-
static inline bool inet_csk_has_ulp(struct sock *sk)
{
return inet_sk(sk)->is_icsk && !!inet_csk(sk)->icsk_ulp_ops;
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 20db95055db3..63fac94f9ace 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -54,6 +54,7 @@ struct ip_tunnel_key {
__be32 label; /* Flow Label for IPv6 */
__be16 tp_src;
__be16 tp_dst;
+ __u8 flow_flags;
};
/* Flags for ip_tunnel_info mode. */
diff --git a/include/net/sock.h b/include/net/sock.h
index f7ad1a7705e9..a7273b289188 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -2823,18 +2823,18 @@ static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto)
{
/* Does this proto have per netns sysctl_wmem ? */
if (proto->sysctl_wmem_offset)
- return *(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset);
+ return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset));
- return *proto->sysctl_wmem;
+ return READ_ONCE(*proto->sysctl_wmem);
}
static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto)
{
/* Does this proto have per netns sysctl_rmem ? */
if (proto->sysctl_rmem_offset)
- return *(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset);
+ return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset));
- return *proto->sysctl_rmem;
+ return READ_ONCE(*proto->sysctl_rmem);
}
/* Default TCP Small queue budget is ~1 ms of data (1sec >> 10)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index b8620be32f8f..d10962b9f0d0 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1425,7 +1425,7 @@ void tcp_select_initial_window(const struct sock *sk, int __space,
static inline int tcp_win_from_space(const struct sock *sk, int space)
{
- int tcp_adv_win_scale = sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale;
+ int tcp_adv_win_scale = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale);
return tcp_adv_win_scale <= 0 ?
(space>>(-tcp_adv_win_scale)) :
diff --git a/include/net/tls.h b/include/net/tls.h
index abb050b0df83..b75b5727abdb 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -161,6 +161,8 @@ struct tls_offload_context_tx {
struct scatterlist sg_tx_data[MAX_SKB_FRAGS];
void (*sk_destruct)(struct sock *sk);
+ struct work_struct destruct_work;
+ struct tls_context *ctx;
u8 driver_state[] __aligned(8);
/* The TLS layer reserves room for driver specific state
* Currently the belief is that there is not enough
diff --git a/include/uapi/asm-generic/fcntl.h b/include/uapi/asm-generic/fcntl.h
index f13d37b60775..1ecdb911add8 100644
--- a/include/uapi/asm-generic/fcntl.h
+++ b/include/uapi/asm-generic/fcntl.h
@@ -192,6 +192,7 @@ struct f_owner_ex {
#define F_LINUX_SPECIFIC_BASE 1024
+#ifndef HAVE_ARCH_STRUCT_FLOCK
struct flock {
short l_type;
short l_whence;
@@ -216,5 +217,6 @@ struct flock64 {
__ARCH_FLOCK64_PAD
#endif
};
+#endif /* HAVE_ARCH_STRUCT_FLOCK */
#endif /* _ASM_GENERIC_FCNTL_H */
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index 541321695f52..2f24b53a87a5 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -136,6 +136,9 @@ enum devlink_command {
DEVLINK_CMD_LINECARD_NEW,
DEVLINK_CMD_LINECARD_DEL,
+ DEVLINK_CMD_SELFTESTS_GET, /* can dump */
+ DEVLINK_CMD_SELFTESTS_RUN,
+
/* add new commands above here */
__DEVLINK_CMD_MAX,
DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1
@@ -276,6 +279,30 @@ enum {
#define DEVLINK_SUPPORTED_FLASH_OVERWRITE_SECTIONS \
(_BITUL(__DEVLINK_FLASH_OVERWRITE_MAX_BIT) - 1)
+enum devlink_attr_selftest_id {
+ DEVLINK_ATTR_SELFTEST_ID_UNSPEC,
+ DEVLINK_ATTR_SELFTEST_ID_FLASH, /* flag */
+
+ __DEVLINK_ATTR_SELFTEST_ID_MAX,
+ DEVLINK_ATTR_SELFTEST_ID_MAX = __DEVLINK_ATTR_SELFTEST_ID_MAX - 1
+};
+
+enum devlink_selftest_status {
+ DEVLINK_SELFTEST_STATUS_SKIP,
+ DEVLINK_SELFTEST_STATUS_PASS,
+ DEVLINK_SELFTEST_STATUS_FAIL
+};
+
+enum devlink_attr_selftest_result {
+ DEVLINK_ATTR_SELFTEST_RESULT_UNSPEC,
+ DEVLINK_ATTR_SELFTEST_RESULT, /* nested */
+ DEVLINK_ATTR_SELFTEST_RESULT_ID, /* u32, enum devlink_attr_selftest_id */
+ DEVLINK_ATTR_SELFTEST_RESULT_STATUS, /* u8, enum devlink_selftest_status */
+
+ __DEVLINK_ATTR_SELFTEST_RESULT_MAX,
+ DEVLINK_ATTR_SELFTEST_RESULT_MAX = __DEVLINK_ATTR_SELFTEST_RESULT_MAX - 1
+};
+
/**
* enum devlink_trap_action - Packet trap action.
* @DEVLINK_TRAP_ACTION_DROP: Packet is dropped by the device and a copy is not
@@ -578,6 +605,8 @@ enum devlink_attr {
DEVLINK_ATTR_NESTED_DEVLINK, /* nested */
+ DEVLINK_ATTR_SELFTESTS, /* nested */
+
/* add new attributes above here, update the policy in devlink.c */
__DEVLINK_ATTR_MAX,
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 811897dadcae..860f867c50c0 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -2084,7 +2084,7 @@ struct kvm_stats_header {
#define KVM_STATS_UNIT_SECONDS (0x2 << KVM_STATS_UNIT_SHIFT)
#define KVM_STATS_UNIT_CYCLES (0x3 << KVM_STATS_UNIT_SHIFT)
#define KVM_STATS_UNIT_BOOLEAN (0x4 << KVM_STATS_UNIT_SHIFT)
-#define KVM_STATS_UNIT_MAX KVM_STATS_UNIT_CYCLES
+#define KVM_STATS_UNIT_MAX KVM_STATS_UNIT_BOOLEAN
#define KVM_STATS_BASE_SHIFT 8
#define KVM_STATS_BASE_MASK (0xF << KVM_STATS_BASE_SHIFT)
diff --git a/include/uapi/linux/seg6_iptunnel.h b/include/uapi/linux/seg6_iptunnel.h
index eb815e0d0ac3..a9fa777f16de 100644
--- a/include/uapi/linux/seg6_iptunnel.h
+++ b/include/uapi/linux/seg6_iptunnel.h
@@ -35,6 +35,8 @@ enum {
SEG6_IPTUN_MODE_INLINE,
SEG6_IPTUN_MODE_ENCAP,
SEG6_IPTUN_MODE_L2ENCAP,
+ SEG6_IPTUN_MODE_ENCAP_RED,
+ SEG6_IPTUN_MODE_L2ENCAP_RED,
};
#endif
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 7ac971ea98d1..7e64447659f3 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -6643,7 +6643,7 @@ static void btf_snprintf_show(struct btf_show *show, const char *fmt,
if (len < 0) {
ssnprintf->len_left = 0;
ssnprintf->len = len;
- } else if (len > ssnprintf->len_left) {
+ } else if (len >= ssnprintf->len_left) {
/* no space, drive on to get length we would have written */
ssnprintf->len_left = 0;
ssnprintf->len += len;
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index 1400561efb15..a0e02b009487 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -477,7 +477,7 @@ static inline int __xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
if (!dev->netdev_ops->ndo_xdp_xmit)
return -EOPNOTSUPP;
- err = xdp_ok_fwd_dev(dev, xdpf->len);
+ err = xdp_ok_fwd_dev(dev, xdp_get_frame_len(xdpf));
if (unlikely(err))
return err;
@@ -536,7 +536,7 @@ static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf)
!obj->dev->netdev_ops->ndo_xdp_xmit)
return false;
- if (xdp_ok_fwd_dev(obj->dev, xdpf->len))
+ if (xdp_ok_fwd_dev(obj->dev, xdp_get_frame_len(xdpf)))
return false;
return true;
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index 42e387a12694..0f532e6a717f 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -98,7 +98,7 @@ static int bpf_tramp_ftrace_ops_func(struct ftrace_ops *ops, enum ftrace_ops_cmd
default:
ret = -EINVAL;
break;
- };
+ }
mutex_unlock(&tr->mutex);
return ret;
@@ -248,14 +248,17 @@ static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
int ret;
faddr = ftrace_location((unsigned long)ip);
- if (faddr)
+ if (faddr) {
+ if (!tr->fops)
+ return -ENOTSUPP;
tr->func.ftrace_managed = true;
+ }
if (bpf_trampoline_module_get(tr))
return -ENOENT;
if (tr->func.ftrace_managed) {
- ftrace_set_filter_ip(tr->fops, (unsigned long)ip, 0, 0);
+ ftrace_set_filter_ip(tr->fops, (unsigned long)ip, 0, 1);
ret = register_ftrace_direct_multi(tr->fops, (long)new_addr);
} else {
ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr);
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 50ba70f019de..1c304fec89c0 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -511,10 +511,52 @@ static bool srcu_readers_active(struct srcu_struct *ssp)
return sum;
}
-#define SRCU_INTERVAL 1 // Base delay if no expedited GPs pending.
-#define SRCU_MAX_INTERVAL 10 // Maximum incremental delay from slow readers.
-#define SRCU_MAX_NODELAY_PHASE 1 // Maximum per-GP-phase consecutive no-delay instances.
-#define SRCU_MAX_NODELAY 100 // Maximum consecutive no-delay instances.
+/*
+ * We use an adaptive strategy for synchronize_srcu() and especially for
+ * synchronize_srcu_expedited(). We spin for a fixed time period
+ * (defined below, boot time configurable) to allow SRCU readers to exit
+ * their read-side critical sections. If there are still some readers
+ * after one jiffy, we repeatedly block for one jiffy time periods.
+ * The blocking time is increased as the grace-period age increases,
+ * with max blocking time capped at 10 jiffies.
+ */
+#define SRCU_DEFAULT_RETRY_CHECK_DELAY 5
+
+static ulong srcu_retry_check_delay = SRCU_DEFAULT_RETRY_CHECK_DELAY;
+module_param(srcu_retry_check_delay, ulong, 0444);
+
+#define SRCU_INTERVAL 1 // Base delay if no expedited GPs pending.
+#define SRCU_MAX_INTERVAL 10 // Maximum incremental delay from slow readers.
+
+#define SRCU_DEFAULT_MAX_NODELAY_PHASE_LO 3UL // Lowmark on default per-GP-phase
+ // no-delay instances.
+#define SRCU_DEFAULT_MAX_NODELAY_PHASE_HI 1000UL // Highmark on default per-GP-phase
+ // no-delay instances.
+
+#define SRCU_UL_CLAMP_LO(val, low) ((val) > (low) ? (val) : (low))
+#define SRCU_UL_CLAMP_HI(val, high) ((val) < (high) ? (val) : (high))
+#define SRCU_UL_CLAMP(val, low, high) SRCU_UL_CLAMP_HI(SRCU_UL_CLAMP_LO((val), (low)), (high))
+// per-GP-phase no-delay instances adjusted to allow non-sleeping poll upto
+// one jiffies time duration. Mult by 2 is done to factor in the srcu_get_delay()
+// called from process_srcu().
+#define SRCU_DEFAULT_MAX_NODELAY_PHASE_ADJUSTED \
+ (2UL * USEC_PER_SEC / HZ / SRCU_DEFAULT_RETRY_CHECK_DELAY)
+
+// Maximum per-GP-phase consecutive no-delay instances.
+#define SRCU_DEFAULT_MAX_NODELAY_PHASE \
+ SRCU_UL_CLAMP(SRCU_DEFAULT_MAX_NODELAY_PHASE_ADJUSTED, \
+ SRCU_DEFAULT_MAX_NODELAY_PHASE_LO, \
+ SRCU_DEFAULT_MAX_NODELAY_PHASE_HI)
+
+static ulong srcu_max_nodelay_phase = SRCU_DEFAULT_MAX_NODELAY_PHASE;
+module_param(srcu_max_nodelay_phase, ulong, 0444);
+
+// Maximum consecutive no-delay instances.
+#define SRCU_DEFAULT_MAX_NODELAY (SRCU_DEFAULT_MAX_NODELAY_PHASE > 100 ? \
+ SRCU_DEFAULT_MAX_NODELAY_PHASE : 100)
+
+static ulong srcu_max_nodelay = SRCU_DEFAULT_MAX_NODELAY;
+module_param(srcu_max_nodelay, ulong, 0444);
/*
* Return grace-period delay, zero if there are expedited grace
@@ -522,16 +564,22 @@ static bool srcu_readers_active(struct srcu_struct *ssp)
*/
static unsigned long srcu_get_delay(struct srcu_struct *ssp)
{
+ unsigned long gpstart;
+ unsigned long j;
unsigned long jbase = SRCU_INTERVAL;
if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
jbase = 0;
- if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)))
- jbase += jiffies - READ_ONCE(ssp->srcu_gp_start);
- if (!jbase) {
- WRITE_ONCE(ssp->srcu_n_exp_nodelay, READ_ONCE(ssp->srcu_n_exp_nodelay) + 1);
- if (READ_ONCE(ssp->srcu_n_exp_nodelay) > SRCU_MAX_NODELAY_PHASE)
- jbase = 1;
+ if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq))) {
+ j = jiffies - 1;
+ gpstart = READ_ONCE(ssp->srcu_gp_start);
+ if (time_after(j, gpstart))
+ jbase += j - gpstart;
+ if (!jbase) {
+ WRITE_ONCE(ssp->srcu_n_exp_nodelay, READ_ONCE(ssp->srcu_n_exp_nodelay) + 1);
+ if (READ_ONCE(ssp->srcu_n_exp_nodelay) > srcu_max_nodelay_phase)
+ jbase = 1;
+ }
}
return jbase > SRCU_MAX_INTERVAL ? SRCU_MAX_INTERVAL : jbase;
}
@@ -607,15 +655,6 @@ void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
EXPORT_SYMBOL_GPL(__srcu_read_unlock);
/*
- * We use an adaptive strategy for synchronize_srcu() and especially for
- * synchronize_srcu_expedited(). We spin for a fixed time period
- * (defined below) to allow SRCU readers to exit their read-side critical
- * sections. If there are still some readers after a few microseconds,
- * we repeatedly block for 1-millisecond time periods.
- */
-#define SRCU_RETRY_CHECK_DELAY 5
-
-/*
* Start an SRCU grace period.
*/
static void srcu_gp_start(struct srcu_struct *ssp)
@@ -700,7 +739,7 @@ static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp
*/
static void srcu_gp_end(struct srcu_struct *ssp)
{
- unsigned long cbdelay;
+ unsigned long cbdelay = 1;
bool cbs;
bool last_lvl;
int cpu;
@@ -720,7 +759,9 @@ static void srcu_gp_end(struct srcu_struct *ssp)
spin_lock_irq_rcu_node(ssp);
idx = rcu_seq_state(ssp->srcu_gp_seq);
WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
- cbdelay = !!srcu_get_delay(ssp);
+ if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
+ cbdelay = 0;
+
WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns());
rcu_seq_end(&ssp->srcu_gp_seq);
gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
@@ -921,12 +962,16 @@ static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
*/
static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
{
+ unsigned long curdelay;
+
+ curdelay = !srcu_get_delay(ssp);
+
for (;;) {
if (srcu_readers_active_idx_check(ssp, idx))
return true;
- if (--trycount + !srcu_get_delay(ssp) <= 0)
+ if ((--trycount + curdelay) <= 0)
return false;
- udelay(SRCU_RETRY_CHECK_DELAY);
+ udelay(srcu_retry_check_delay);
}
}
@@ -1582,7 +1627,7 @@ static void process_srcu(struct work_struct *work)
j = jiffies;
if (READ_ONCE(ssp->reschedule_jiffies) == j) {
WRITE_ONCE(ssp->reschedule_count, READ_ONCE(ssp->reschedule_count) + 1);
- if (READ_ONCE(ssp->reschedule_count) > SRCU_MAX_NODELAY)
+ if (READ_ONCE(ssp->reschedule_count) > srcu_max_nodelay)
curdelay = 1;
} else {
WRITE_ONCE(ssp->reschedule_count, 1);
@@ -1674,6 +1719,11 @@ static int __init srcu_bootup_announce(void)
pr_info("Hierarchical SRCU implementation.\n");
if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
+ if (srcu_retry_check_delay != SRCU_DEFAULT_RETRY_CHECK_DELAY)
+ pr_info("\tNon-default retry check delay of %lu us.\n", srcu_retry_check_delay);
+ if (srcu_max_nodelay != SRCU_DEFAULT_MAX_NODELAY)
+ pr_info("\tNon-default max no-delay of %lu.\n", srcu_max_nodelay);
+ pr_info("\tMax phase no-delay instances is %lu.\n", srcu_max_nodelay_phase);
return 0;
}
early_initcall(srcu_bootup_announce);
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index b5152961b743..7bf561262cb8 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1701,7 +1701,10 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
* the throttle.
*/
p->dl.dl_throttled = 0;
- BUG_ON(!is_dl_boosted(&p->dl) || flags != ENQUEUE_REPLENISH);
+ if (!(flags & ENQUEUE_REPLENISH))
+ printk_deferred_once("sched: DL de-boosted task PID %d: REPLENISH flag missing\n",
+ task_pid_nr(p));
+
return;
}
diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c
index bb9962b33f95..59ddb00d6944 100644
--- a/kernel/watch_queue.c
+++ b/kernel/watch_queue.c
@@ -454,6 +454,33 @@ void init_watch(struct watch *watch, struct watch_queue *wqueue)
rcu_assign_pointer(watch->queue, wqueue);
}
+static int add_one_watch(struct watch *watch, struct watch_list *wlist, struct watch_queue *wqueue)
+{
+ const struct cred *cred;
+ struct watch *w;
+
+ hlist_for_each_entry(w, &wlist->watchers, list_node) {
+ struct watch_queue *wq = rcu_access_pointer(w->queue);
+ if (wqueue == wq && watch->id == w->id)
+ return -EBUSY;
+ }
+
+ cred = current_cred();
+ if (atomic_inc_return(&cred->user->nr_watches) > task_rlimit(current, RLIMIT_NOFILE)) {
+ atomic_dec(&cred->user->nr_watches);
+ return -EAGAIN;
+ }
+
+ watch->cred = get_cred(cred);
+ rcu_assign_pointer(watch->watch_list, wlist);
+
+ kref_get(&wqueue->usage);
+ kref_get(&watch->usage);
+ hlist_add_head(&watch->queue_node, &wqueue->watches);
+ hlist_add_head_rcu(&watch->list_node, &wlist->watchers);
+ return 0;
+}
+
/**
* add_watch_to_object - Add a watch on an object to a watch list
* @watch: The watch to add
@@ -468,34 +495,21 @@ void init_watch(struct watch *watch, struct watch_queue *wqueue)
*/
int add_watch_to_object(struct watch *watch, struct watch_list *wlist)
{
- struct watch_queue *wqueue = rcu_access_pointer(watch->queue);
- struct watch *w;
-
- hlist_for_each_entry(w, &wlist->watchers, list_node) {
- struct watch_queue *wq = rcu_access_pointer(w->queue);
- if (wqueue == wq && watch->id == w->id)
- return -EBUSY;
- }
-
- watch->cred = get_current_cred();
- rcu_assign_pointer(watch->watch_list, wlist);
+ struct watch_queue *wqueue;
+ int ret = -ENOENT;
- if (atomic_inc_return(&watch->cred->user->nr_watches) >
- task_rlimit(current, RLIMIT_NOFILE)) {
- atomic_dec(&watch->cred->user->nr_watches);
- put_cred(watch->cred);
- return -EAGAIN;
- }
+ rcu_read_lock();
+ wqueue = rcu_access_pointer(watch->queue);
if (lock_wqueue(wqueue)) {
- kref_get(&wqueue->usage);
- kref_get(&watch->usage);
- hlist_add_head(&watch->queue_node, &wqueue->watches);
+ spin_lock(&wlist->lock);
+ ret = add_one_watch(watch, wlist, wqueue);
+ spin_unlock(&wlist->lock);
unlock_wqueue(wqueue);
}
- hlist_add_head(&watch->list_node, &wlist->watchers);
- return 0;
+ rcu_read_unlock();
+ return ret;
}
EXPORT_SYMBOL(add_watch_to_object);
diff --git a/mm/gup.c b/mm/gup.c
index 551264407624..e2a39e30756d 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -87,7 +87,8 @@ retry:
* belongs to this folio.
*/
if (unlikely(page_folio(page) != folio)) {
- folio_put_refs(folio, refs);
+ if (!put_devmap_managed_page_refs(&folio->page, refs))
+ folio_put_refs(folio, refs);
goto retry;
}
@@ -176,7 +177,8 @@ static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
refs *= GUP_PIN_COUNTING_BIAS;
}
- folio_put_refs(folio, refs);
+ if (!put_devmap_managed_page_refs(&folio->page, refs))
+ folio_put_refs(folio, refs);
}
/**
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a57e1be41401..a18c071c294e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4788,8 +4788,13 @@ again:
* sharing with another vma.
*/
;
- } else if (unlikely(is_hugetlb_entry_migration(entry) ||
- is_hugetlb_entry_hwpoisoned(entry))) {
+ } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) {
+ bool uffd_wp = huge_pte_uffd_wp(entry);
+
+ if (!userfaultfd_wp(dst_vma) && uffd_wp)
+ entry = huge_pte_clear_uffd_wp(entry);
+ set_huge_pte_at(dst, addr, dst_pte, entry);
+ } else if (unlikely(is_hugetlb_entry_migration(entry))) {
swp_entry_t swp_entry = pte_to_swp_entry(entry);
bool uffd_wp = huge_pte_uffd_wp(entry);
@@ -5947,6 +5952,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
page = alloc_huge_page(dst_vma, dst_addr, 0);
if (IS_ERR(page)) {
+ put_page(*pagep);
ret = -ENOMEM;
*pagep = NULL;
goto out;
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 4b5e5a3d3a63..6aff49f6b79e 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -603,14 +603,6 @@ static unsigned long kfence_init_pool(void)
addr += 2 * PAGE_SIZE;
}
- /*
- * The pool is live and will never be deallocated from this point on.
- * Remove the pool object from the kmemleak object tree, as it would
- * otherwise overlap with allocations returned by kfence_alloc(), which
- * are registered with kmemleak through the slab post-alloc hook.
- */
- kmemleak_free(__kfence_pool);
-
return 0;
}
@@ -623,8 +615,16 @@ static bool __init kfence_init_pool_early(void)
addr = kfence_init_pool();
- if (!addr)
+ if (!addr) {
+ /*
+ * The pool is live and will never be deallocated from this point on.
+ * Ignore the pool object from the kmemleak phys object tree, as it would
+ * otherwise overlap with allocations returned by kfence_alloc(), which
+ * are registered with kmemleak through the slab post-alloc hook.
+ */
+ kmemleak_ignore_phys(__pa(__kfence_pool));
return true;
+ }
/*
* Only release unprotected pages, and do not try to go back and change
diff --git a/mm/memory.c b/mm/memory.c
index 4cf7d4b6c950..1c6027adc542 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3043,7 +3043,7 @@ static inline void wp_page_reuse(struct vm_fault *vmf)
pte_t entry;
VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE));
- VM_BUG_ON(PageAnon(page) && !PageAnonExclusive(page));
+ VM_BUG_ON(page && PageAnon(page) && !PageAnonExclusive(page));
/*
* Clear the pages cpupid information as the existing
@@ -4369,9 +4369,12 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
return VM_FAULT_OOM;
}
- /* See comment in handle_pte_fault() */
+ /*
+ * See comment in handle_pte_fault() for how this scenario happens, we
+ * need to return NOPAGE so that we drop this page.
+ */
if (pmd_devmap_trans_unstable(vmf->pmd))
- return 0;
+ return VM_FAULT_NOPAGE;
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
vmf->address, &vmf->ptl);
diff --git a/mm/memremap.c b/mm/memremap.c
index b870a659eee6..745eea0f99c3 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -499,7 +499,7 @@ void free_zone_device_page(struct page *page)
}
#ifdef CONFIG_FS_DAX
-bool __put_devmap_managed_page(struct page *page)
+bool __put_devmap_managed_page_refs(struct page *page, int refs)
{
if (page->pgmap->type != MEMORY_DEVICE_FS_DAX)
return false;
@@ -509,9 +509,9 @@ bool __put_devmap_managed_page(struct page *page)
* refcount is 1, then the page is free and the refcount is
* stable because nobody holds a reference on the page.
*/
- if (page_ref_dec_return(page) == 1)
+ if (page_ref_sub_return(page, refs) == 1)
wake_up_var(&page->_refcount);
return true;
}
-EXPORT_SYMBOL(__put_devmap_managed_page);
+EXPORT_SYMBOL(__put_devmap_managed_page_refs);
#endif /* CONFIG_FS_DAX */
diff --git a/mm/secretmem.c b/mm/secretmem.c
index 206ed6b40c1d..f06279d6190a 100644
--- a/mm/secretmem.c
+++ b/mm/secretmem.c
@@ -55,22 +55,28 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf)
gfp_t gfp = vmf->gfp_mask;
unsigned long addr;
struct page *page;
+ vm_fault_t ret;
int err;
if (((loff_t)vmf->pgoff << PAGE_SHIFT) >= i_size_read(inode))
return vmf_error(-EINVAL);
+ filemap_invalidate_lock_shared(mapping);
+
retry:
page = find_lock_page(mapping, offset);
if (!page) {
page = alloc_page(gfp | __GFP_ZERO);
- if (!page)
- return VM_FAULT_OOM;
+ if (!page) {
+ ret = VM_FAULT_OOM;
+ goto out;
+ }
err = set_direct_map_invalid_noflush(page);
if (err) {
put_page(page);
- return vmf_error(err);
+ ret = vmf_error(err);
+ goto out;
}
__SetPageUptodate(page);
@@ -86,7 +92,8 @@ retry:
if (err == -EEXIST)
goto retry;
- return vmf_error(err);
+ ret = vmf_error(err);
+ goto out;
}
addr = (unsigned long)page_address(page);
@@ -94,7 +101,11 @@ retry:
}
vmf->page = page;
- return VM_FAULT_LOCKED;
+ ret = VM_FAULT_LOCKED;
+
+out:
+ filemap_invalidate_unlock_shared(mapping);
+ return ret;
}
static const struct vm_operations_struct secretmem_vm_ops = {
@@ -162,12 +173,20 @@ static int secretmem_setattr(struct user_namespace *mnt_userns,
struct dentry *dentry, struct iattr *iattr)
{
struct inode *inode = d_inode(dentry);
+ struct address_space *mapping = inode->i_mapping;
unsigned int ia_valid = iattr->ia_valid;
+ int ret;
+
+ filemap_invalidate_lock(mapping);
if ((ia_valid & ATTR_SIZE) && inode->i_size)
- return -EINVAL;
+ ret = -EINVAL;
+ else
+ ret = simple_setattr(mnt_userns, dentry, iattr);
- return simple_setattr(mnt_userns, dentry, iattr);
+ filemap_invalidate_unlock(mapping);
+
+ return ret;
}
static const struct inode_operations secretmem_iops = {
diff --git a/mm/shmem.c b/mm/shmem.c
index a6f565308133..b7f2d4a56867 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -3392,7 +3392,7 @@ static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
break;
case Opt_nr_blocks:
ctx->blocks = memparse(param->string, &rest);
- if (*rest)
+ if (*rest || ctx->blocks > S64_MAX)
goto bad_value;
ctx->seen |= SHMEM_SEEN_BLOCKS;
break;
@@ -3514,10 +3514,7 @@ static int shmem_reconfigure(struct fs_context *fc)
raw_spin_lock(&sbinfo->stat_lock);
inodes = sbinfo->max_inodes - sbinfo->free_inodes;
- if (ctx->blocks > S64_MAX) {
- err = "Number of blocks too large";
- goto out;
- }
+
if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
if (!sbinfo->max_blocks) {
err = "Cannot retroactively limit size";
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index 148ce629a59f..e6d804b82b67 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -5297,6 +5297,9 @@ int hci_suspend_sync(struct hci_dev *hdev)
return err;
}
+ /* Update event mask so only the allowed event can wakeup the host */
+ hci_set_event_mask_sync(hdev);
+
/* Only configure accept list if disconnect succeeded and wake
* isn't being prevented.
*/
@@ -5308,9 +5311,6 @@ int hci_suspend_sync(struct hci_dev *hdev)
/* Unpause to take care of updating scanning params */
hdev->scanning_paused = false;
- /* Update event mask so only the allowed event can wakeup the host */
- hci_set_event_mask_sync(hdev);
-
/* Enable event filter for paired devices */
hci_update_event_filter_sync(hdev);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 09ecaf556de5..77c0aac14539 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -111,7 +111,8 @@ static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
}
/* Find channel with given SCID.
- * Returns locked channel. */
+ * Returns a reference locked channel.
+ */
static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
u16 cid)
{
@@ -119,15 +120,19 @@ static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
mutex_lock(&conn->chan_lock);
c = __l2cap_get_chan_by_scid(conn, cid);
- if (c)
- l2cap_chan_lock(c);
+ if (c) {
+ /* Only lock if chan reference is not 0 */
+ c = l2cap_chan_hold_unless_zero(c);
+ if (c)
+ l2cap_chan_lock(c);
+ }
mutex_unlock(&conn->chan_lock);
return c;
}
/* Find channel with given DCID.
- * Returns locked channel.
+ * Returns a reference locked channel.
*/
static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
u16 cid)
@@ -136,8 +141,12 @@ static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
mutex_lock(&conn->chan_lock);
c = __l2cap_get_chan_by_dcid(conn, cid);
- if (c)
- l2cap_chan_lock(c);
+ if (c) {
+ /* Only lock if chan reference is not 0 */
+ c = l2cap_chan_hold_unless_zero(c);
+ if (c)
+ l2cap_chan_lock(c);
+ }
mutex_unlock(&conn->chan_lock);
return c;
@@ -162,8 +171,12 @@ static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
mutex_lock(&conn->chan_lock);
c = __l2cap_get_chan_by_ident(conn, ident);
- if (c)
- l2cap_chan_lock(c);
+ if (c) {
+ /* Only lock if chan reference is not 0 */
+ c = l2cap_chan_hold_unless_zero(c);
+ if (c)
+ l2cap_chan_lock(c);
+ }
mutex_unlock(&conn->chan_lock);
return c;
@@ -497,6 +510,16 @@ void l2cap_chan_hold(struct l2cap_chan *c)
kref_get(&c->kref);
}
+struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
+{
+ BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
+
+ if (!kref_get_unless_zero(&c->kref))
+ return NULL;
+
+ return c;
+}
+
void l2cap_chan_put(struct l2cap_chan *c)
{
BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
@@ -1969,7 +1992,10 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
src_match = !bacmp(&c->src, src);
dst_match = !bacmp(&c->dst, dst);
if (src_match && dst_match) {
- l2cap_chan_hold(c);
+ c = l2cap_chan_hold_unless_zero(c);
+ if (!c)
+ continue;
+
read_unlock(&chan_list_lock);
return c;
}
@@ -1984,7 +2010,7 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
}
if (c1)
- l2cap_chan_hold(c1);
+ c1 = l2cap_chan_hold_unless_zero(c1);
read_unlock(&chan_list_lock);
@@ -4464,6 +4490,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
unlock:
l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
return err;
}
@@ -4578,6 +4605,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
done:
l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
return err;
}
@@ -5305,6 +5333,7 @@ send_move_response:
l2cap_send_move_chan_rsp(chan, result);
l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
return 0;
}
@@ -5397,6 +5426,7 @@ static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
}
l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
}
static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
@@ -5426,6 +5456,7 @@ static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
}
static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
@@ -5489,6 +5520,7 @@ static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
return 0;
}
@@ -5524,6 +5556,7 @@ static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
}
l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
return 0;
}
@@ -5896,12 +5929,11 @@ static inline int l2cap_le_credits(struct l2cap_conn *conn,
if (credits > max_credits) {
BT_ERR("LE credits overflow");
l2cap_send_disconn_req(chan, ECONNRESET);
- l2cap_chan_unlock(chan);
/* Return 0 so that we don't trigger an unnecessary
* command reject packet.
*/
- return 0;
+ goto unlock;
}
chan->tx_credits += credits;
@@ -5912,7 +5944,9 @@ static inline int l2cap_le_credits(struct l2cap_conn *conn,
if (chan->tx_credits)
chan->ops->resume(chan);
+unlock:
l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
return 0;
}
@@ -7598,6 +7632,7 @@ drop:
done:
l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
}
static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
@@ -8086,7 +8121,7 @@ static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
if (src_type != c->src_type)
continue;
- l2cap_chan_hold(c);
+ c = l2cap_chan_hold_unless_zero(c);
read_unlock(&chan_list_lock);
return c;
}
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 8cfafd7a0576..646d10401b80 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -4844,7 +4844,6 @@ static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
else
status = MGMT_STATUS_FAILED;
- mgmt_pending_remove(cmd);
goto unlock;
}
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 1ef14a099c6b..5aeb3646e74c 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -589,9 +589,13 @@ static int br_fill_ifinfo(struct sk_buff *skb,
}
done:
+ if (af) {
+ if (nlmsg_get_pos(skb) - (void *)af > nla_attr_size(0))
+ nla_nest_end(skb, af);
+ else
+ nla_nest_cancel(skb, af);
+ }
- if (af)
- nla_nest_end(skb, af);
nlmsg_end(skb, nlh);
return 0;
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 251e666ba9a2..748be7253248 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -47,7 +47,7 @@ enum caif_states {
struct caifsock {
struct sock sk; /* must be first member */
struct cflayer layer;
- u32 flow_state;
+ unsigned long flow_state;
struct caif_connect_request conn_req;
struct mutex readlock;
struct dentry *debugfs_socket_dir;
@@ -56,38 +56,32 @@ struct caifsock {
static int rx_flow_is_on(struct caifsock *cf_sk)
{
- return test_bit(RX_FLOW_ON_BIT,
- (void *) &cf_sk->flow_state);
+ return test_bit(RX_FLOW_ON_BIT, &cf_sk->flow_state);
}
static int tx_flow_is_on(struct caifsock *cf_sk)
{
- return test_bit(TX_FLOW_ON_BIT,
- (void *) &cf_sk->flow_state);
+ return test_bit(TX_FLOW_ON_BIT, &cf_sk->flow_state);
}
static void set_rx_flow_off(struct caifsock *cf_sk)
{
- clear_bit(RX_FLOW_ON_BIT,
- (void *) &cf_sk->flow_state);
+ clear_bit(RX_FLOW_ON_BIT, &cf_sk->flow_state);
}
static void set_rx_flow_on(struct caifsock *cf_sk)
{
- set_bit(RX_FLOW_ON_BIT,
- (void *) &cf_sk->flow_state);
+ set_bit(RX_FLOW_ON_BIT, &cf_sk->flow_state);
}
static void set_tx_flow_off(struct caifsock *cf_sk)
{
- clear_bit(TX_FLOW_ON_BIT,
- (void *) &cf_sk->flow_state);
+ clear_bit(TX_FLOW_ON_BIT, &cf_sk->flow_state);
}
static void set_tx_flow_on(struct caifsock *cf_sk)
{
- set_bit(TX_FLOW_ON_BIT,
- (void *) &cf_sk->flow_state);
+ set_bit(TX_FLOW_ON_BIT, &cf_sk->flow_state);
}
static void caif_read_lock(struct sock *sk)
diff --git a/net/core/devlink.c b/net/core/devlink.c
index ca4c9939d569..889e7e3d3e8a 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -201,8 +201,13 @@ static const struct nla_policy devlink_function_nl_policy[DEVLINK_PORT_FUNCTION_
DEVLINK_PORT_FN_STATE_ACTIVE),
};
+static const struct nla_policy devlink_selftest_nl_policy[DEVLINK_ATTR_SELFTEST_ID_MAX + 1] = {
+ [DEVLINK_ATTR_SELFTEST_ID_FLASH] = { .type = NLA_FLAG },
+};
+
static DEFINE_XARRAY_FLAGS(devlinks, XA_FLAGS_ALLOC);
#define DEVLINK_REGISTERED XA_MARK_1
+#define DEVLINK_UNREGISTERING XA_MARK_2
/* devlink instances are open to the access from the user space after
* devlink_register() call. Such logical barrier allows us to have certain
@@ -220,12 +225,6 @@ static DEFINE_XARRAY_FLAGS(devlinks, XA_FLAGS_ALLOC);
#define ASSERT_DEVLINK_NOT_REGISTERED(d) \
WARN_ON_ONCE(xa_get_mark(&devlinks, (d)->index, DEVLINK_REGISTERED))
-/* devlink_mutex
- *
- * An overall lock guarding every operation coming from userspace.
- */
-static DEFINE_MUTEX(devlink_mutex);
-
struct net *devlink_net(const struct devlink *devlink)
{
return read_pnet(&devlink->_net);
@@ -301,6 +300,14 @@ retry:
devlink = xa_find_fn(&devlinks, indexp, ULONG_MAX, DEVLINK_REGISTERED);
if (!devlink)
goto unlock;
+
+ /* In case devlink_unregister() was already called and "unregistering"
+ * mark was set, do not allow to get a devlink reference here.
+ * This prevents live-lock of devlink_unregister() wait for completion.
+ */
+ if (xa_get_mark(&devlinks, *indexp, DEVLINK_UNREGISTERING))
+ goto retry;
+
/* For a possible retry, the xa_find_after() should be always used */
xa_find_fn = xa_find_after;
if (!devlink_try_get(devlink))
@@ -695,6 +702,10 @@ struct devlink_region {
const struct devlink_region_ops *ops;
const struct devlink_port_region_ops *port_ops;
};
+ struct mutex snapshot_lock; /* protects snapshot_list,
+ * max_snapshots and cur_snapshots
+ * consistency.
+ */
struct list_head snapshot_list;
u32 max_snapshots;
u32 cur_snapshots;
@@ -751,12 +762,6 @@ devlink_region_snapshot_get_by_id(struct devlink_region *region, u32 id)
#define DEVLINK_NL_FLAG_NEED_RATE_NODE BIT(3)
#define DEVLINK_NL_FLAG_NEED_LINECARD BIT(4)
-/* The per devlink instance lock is taken by default in the pre-doit
- * operation, yet several commands do not require this. The global
- * devlink lock is taken and protects from disruption by user-calls.
- */
-#define DEVLINK_NL_FLAG_NO_LOCK BIT(5)
-
static int devlink_nl_pre_doit(const struct genl_ops *ops,
struct sk_buff *skb, struct genl_info *info)
{
@@ -765,14 +770,10 @@ static int devlink_nl_pre_doit(const struct genl_ops *ops,
struct devlink *devlink;
int err;
- mutex_lock(&devlink_mutex);
devlink = devlink_get_from_attrs(genl_info_net(info), info->attrs);
- if (IS_ERR(devlink)) {
- mutex_unlock(&devlink_mutex);
+ if (IS_ERR(devlink))
return PTR_ERR(devlink);
- }
- if (~ops->internal_flags & DEVLINK_NL_FLAG_NO_LOCK)
- devl_lock(devlink);
+ devl_lock(devlink);
info->user_ptr[0] = devlink;
if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT) {
devlink_port = devlink_port_get_from_info(devlink, info);
@@ -814,10 +815,8 @@ static int devlink_nl_pre_doit(const struct genl_ops *ops,
return 0;
unlock:
- if (~ops->internal_flags & DEVLINK_NL_FLAG_NO_LOCK)
- devl_unlock(devlink);
+ devl_unlock(devlink);
devlink_put(devlink);
- mutex_unlock(&devlink_mutex);
return err;
}
@@ -832,10 +831,8 @@ static void devlink_nl_post_doit(const struct genl_ops *ops,
linecard = info->user_ptr[1];
devlink_linecard_put(linecard);
}
- if (~ops->internal_flags & DEVLINK_NL_FLAG_NO_LOCK)
- devl_unlock(devlink);
+ devl_unlock(devlink);
devlink_put(devlink);
- mutex_unlock(&devlink_mutex);
}
static struct genl_family devlink_nl_family;
@@ -1400,7 +1397,6 @@ static int devlink_nl_cmd_rate_get_dumpit(struct sk_buff *msg,
int idx = 0;
int err = 0;
- mutex_lock(&devlink_mutex);
devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
devl_lock(devlink);
list_for_each_entry(devlink_rate, &devlink->rate_list, list) {
@@ -1425,7 +1421,6 @@ static int devlink_nl_cmd_rate_get_dumpit(struct sk_buff *msg,
devlink_put(devlink);
}
out:
- mutex_unlock(&devlink_mutex);
if (err != -EMSGSIZE)
return err;
@@ -1496,7 +1491,6 @@ static int devlink_nl_cmd_get_dumpit(struct sk_buff *msg,
int idx = 0;
int err;
- mutex_lock(&devlink_mutex);
devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
if (idx < start) {
idx++;
@@ -1513,8 +1507,6 @@ static int devlink_nl_cmd_get_dumpit(struct sk_buff *msg,
idx++;
}
out:
- mutex_unlock(&devlink_mutex);
-
cb->args[0] = idx;
return msg->len;
}
@@ -1551,7 +1543,6 @@ static int devlink_nl_cmd_port_get_dumpit(struct sk_buff *msg,
int idx = 0;
int err;
- mutex_lock(&devlink_mutex);
devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
devl_lock(devlink);
list_for_each_entry(devlink_port, &devlink->port_list, list) {
@@ -1575,8 +1566,6 @@ static int devlink_nl_cmd_port_get_dumpit(struct sk_buff *msg,
devlink_put(devlink);
}
out:
- mutex_unlock(&devlink_mutex);
-
cb->args[0] = idx;
return msg->len;
}
@@ -2230,7 +2219,6 @@ static int devlink_nl_cmd_linecard_get_dumpit(struct sk_buff *msg,
int idx = 0;
int err;
- mutex_lock(&devlink_mutex);
devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
mutex_lock(&devlink->linecards_lock);
list_for_each_entry(linecard, &devlink->linecard_list, list) {
@@ -2257,8 +2245,6 @@ static int devlink_nl_cmd_linecard_get_dumpit(struct sk_buff *msg,
devlink_put(devlink);
}
out:
- mutex_unlock(&devlink_mutex);
-
cb->args[0] = idx;
return msg->len;
}
@@ -2495,7 +2481,6 @@ static int devlink_nl_cmd_sb_get_dumpit(struct sk_buff *msg,
int idx = 0;
int err;
- mutex_lock(&devlink_mutex);
devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
devl_lock(devlink);
list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
@@ -2519,8 +2504,6 @@ static int devlink_nl_cmd_sb_get_dumpit(struct sk_buff *msg,
devlink_put(devlink);
}
out:
- mutex_unlock(&devlink_mutex);
-
cb->args[0] = idx;
return msg->len;
}
@@ -2640,7 +2623,6 @@ static int devlink_nl_cmd_sb_pool_get_dumpit(struct sk_buff *msg,
int idx = 0;
int err = 0;
- mutex_lock(&devlink_mutex);
devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
if (!devlink->ops->sb_pool_get)
goto retry;
@@ -2664,8 +2646,6 @@ retry:
devlink_put(devlink);
}
out:
- mutex_unlock(&devlink_mutex);
-
if (err != -EMSGSIZE)
return err;
@@ -2857,7 +2837,6 @@ static int devlink_nl_cmd_sb_port_pool_get_dumpit(struct sk_buff *msg,
int idx = 0;
int err = 0;
- mutex_lock(&devlink_mutex);
devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
if (!devlink->ops->sb_port_pool_get)
goto retry;
@@ -2881,8 +2860,6 @@ retry:
devlink_put(devlink);
}
out:
- mutex_unlock(&devlink_mutex);
-
if (err != -EMSGSIZE)
return err;
@@ -3102,7 +3079,6 @@ devlink_nl_cmd_sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
int idx = 0;
int err = 0;
- mutex_lock(&devlink_mutex);
devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
if (!devlink->ops->sb_tc_pool_bind_get)
goto retry;
@@ -3127,8 +3103,6 @@ retry:
devlink_put(devlink);
}
out:
- mutex_unlock(&devlink_mutex);
-
if (err != -EMSGSIZE)
return err;
@@ -4826,6 +4800,204 @@ static int devlink_nl_cmd_flash_update(struct sk_buff *skb,
return ret;
}
+static int
+devlink_nl_selftests_fill(struct sk_buff *msg, struct devlink *devlink,
+ u32 portid, u32 seq, int flags,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *selftests;
+ void *hdr;
+ int err;
+ int i;
+
+ hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags,
+ DEVLINK_CMD_SELFTESTS_GET);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ err = -EMSGSIZE;
+ if (devlink_nl_put_handle(msg, devlink))
+ goto err_cancel_msg;
+
+ selftests = nla_nest_start(msg, DEVLINK_ATTR_SELFTESTS);
+ if (!selftests)
+ goto err_cancel_msg;
+
+ for (i = DEVLINK_ATTR_SELFTEST_ID_UNSPEC + 1;
+ i <= DEVLINK_ATTR_SELFTEST_ID_MAX; i++) {
+ if (devlink->ops->selftest_check(devlink, i, extack)) {
+ err = nla_put_flag(msg, i);
+ if (err)
+ goto err_cancel_msg;
+ }
+ }
+
+ nla_nest_end(msg, selftests);
+ genlmsg_end(msg, hdr);
+ return 0;
+
+err_cancel_msg:
+ genlmsg_cancel(msg, hdr);
+ return err;
+}
+
+static int devlink_nl_cmd_selftests_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct sk_buff *msg;
+ int err;
+
+ if (!devlink->ops->selftest_check)
+ return -EOPNOTSUPP;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ err = devlink_nl_selftests_fill(msg, devlink, info->snd_portid,
+ info->snd_seq, 0, info->extack);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
+
+ return genlmsg_reply(msg, info);
+}
+
+static int devlink_nl_cmd_selftests_get_dumpit(struct sk_buff *msg,
+ struct netlink_callback *cb)
+{
+ struct devlink *devlink;
+ int start = cb->args[0];
+ unsigned long index;
+ int idx = 0;
+ int err = 0;
+
+ devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
+ if (idx < start || !devlink->ops->selftest_check)
+ goto inc;
+
+ devl_lock(devlink);
+ err = devlink_nl_selftests_fill(msg, devlink,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ cb->extack);
+ devl_unlock(devlink);
+ if (err) {
+ devlink_put(devlink);
+ break;
+ }
+inc:
+ idx++;
+ devlink_put(devlink);
+ }
+
+ if (err != -EMSGSIZE)
+ return err;
+
+ cb->args[0] = idx;
+ return msg->len;
+}
+
+static int devlink_selftest_result_put(struct sk_buff *skb, unsigned int id,
+ enum devlink_selftest_status test_status)
+{
+ struct nlattr *result_attr;
+
+ result_attr = nla_nest_start(skb, DEVLINK_ATTR_SELFTEST_RESULT);
+ if (!result_attr)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(skb, DEVLINK_ATTR_SELFTEST_RESULT_ID, id) ||
+ nla_put_u8(skb, DEVLINK_ATTR_SELFTEST_RESULT_STATUS,
+ test_status))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, result_attr);
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, result_attr);
+ return -EMSGSIZE;
+}
+
+static int devlink_nl_cmd_selftests_run(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct nlattr *tb[DEVLINK_ATTR_SELFTEST_ID_MAX + 1];
+ struct devlink *devlink = info->user_ptr[0];
+ struct nlattr *attrs, *selftests;
+ struct sk_buff *msg;
+ void *hdr;
+ int err;
+ int i;
+
+ if (!devlink->ops->selftest_run || !devlink->ops->selftest_check)
+ return -EOPNOTSUPP;
+
+ if (!info->attrs[DEVLINK_ATTR_SELFTESTS]) {
+ NL_SET_ERR_MSG_MOD(info->extack, "selftest required");
+ return -EINVAL;
+ }
+
+ attrs = info->attrs[DEVLINK_ATTR_SELFTESTS];
+
+ err = nla_parse_nested(tb, DEVLINK_ATTR_SELFTEST_ID_MAX, attrs,
+ devlink_selftest_nl_policy, info->extack);
+ if (err < 0)
+ return err;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ err = -EMSGSIZE;
+ hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
+ &devlink_nl_family, 0, DEVLINK_CMD_SELFTESTS_RUN);
+ if (!hdr)
+ goto free_msg;
+
+ if (devlink_nl_put_handle(msg, devlink))
+ goto genlmsg_cancel;
+
+ selftests = nla_nest_start(msg, DEVLINK_ATTR_SELFTESTS);
+ if (!selftests)
+ goto genlmsg_cancel;
+
+ for (i = DEVLINK_ATTR_SELFTEST_ID_UNSPEC + 1;
+ i <= DEVLINK_ATTR_SELFTEST_ID_MAX; i++) {
+ enum devlink_selftest_status test_status;
+
+ if (nla_get_flag(tb[i])) {
+ if (!devlink->ops->selftest_check(devlink, i,
+ info->extack)) {
+ if (devlink_selftest_result_put(msg, i,
+ DEVLINK_SELFTEST_STATUS_SKIP))
+ goto selftests_nest_cancel;
+ continue;
+ }
+
+ test_status = devlink->ops->selftest_run(devlink, i,
+ info->extack);
+ if (devlink_selftest_result_put(msg, i, test_status))
+ goto selftests_nest_cancel;
+ }
+ }
+
+ nla_nest_end(msg, selftests);
+ genlmsg_end(msg, hdr);
+ return genlmsg_reply(msg, info);
+
+selftests_nest_cancel:
+ nla_nest_cancel(msg, selftests);
+genlmsg_cancel:
+ genlmsg_cancel(msg, hdr);
+free_msg:
+ nlmsg_free(msg);
+ return err;
+}
+
static const struct devlink_param devlink_param_generic[] = {
{
.id = DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
@@ -5185,7 +5357,6 @@ static int devlink_nl_cmd_param_get_dumpit(struct sk_buff *msg,
int idx = 0;
int err = 0;
- mutex_lock(&devlink_mutex);
devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
devl_lock(devlink);
list_for_each_entry(param_item, &devlink->param_list, list) {
@@ -5211,8 +5382,6 @@ static int devlink_nl_cmd_param_get_dumpit(struct sk_buff *msg,
devlink_put(devlink);
}
out:
- mutex_unlock(&devlink_mutex);
-
if (err != -EMSGSIZE)
return err;
@@ -5413,7 +5582,6 @@ static int devlink_nl_cmd_port_param_get_dumpit(struct sk_buff *msg,
int idx = 0;
int err = 0;
- mutex_lock(&devlink_mutex);
devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
devl_lock(devlink);
list_for_each_entry(devlink_port, &devlink->port_list, list) {
@@ -5444,8 +5612,6 @@ static int devlink_nl_cmd_port_param_get_dumpit(struct sk_buff *msg,
devlink_put(devlink);
}
out:
- mutex_unlock(&devlink_mutex);
-
if (err != -EMSGSIZE)
return err;
@@ -5690,21 +5856,28 @@ static int __devlink_snapshot_id_increment(struct devlink *devlink, u32 id)
{
unsigned long count;
void *p;
+ int err;
- devl_assert_locked(devlink);
-
+ xa_lock(&devlink->snapshot_ids);
p = xa_load(&devlink->snapshot_ids, id);
- if (WARN_ON(!p))
- return -EINVAL;
+ if (WARN_ON(!p)) {
+ err = -EINVAL;
+ goto unlock;
+ }
- if (WARN_ON(!xa_is_value(p)))
- return -EINVAL;
+ if (WARN_ON(!xa_is_value(p))) {
+ err = -EINVAL;
+ goto unlock;
+ }
count = xa_to_value(p);
count++;
- return xa_err(xa_store(&devlink->snapshot_ids, id, xa_mk_value(count),
- GFP_KERNEL));
+ err = xa_err(__xa_store(&devlink->snapshot_ids, id, xa_mk_value(count),
+ GFP_ATOMIC));
+unlock:
+ xa_unlock(&devlink->snapshot_ids);
+ return err;
}
/**
@@ -5727,25 +5900,26 @@ static void __devlink_snapshot_id_decrement(struct devlink *devlink, u32 id)
unsigned long count;
void *p;
- devl_assert_locked(devlink);
-
+ xa_lock(&devlink->snapshot_ids);
p = xa_load(&devlink->snapshot_ids, id);
if (WARN_ON(!p))
- return;
+ goto unlock;
if (WARN_ON(!xa_is_value(p)))
- return;
+ goto unlock;
count = xa_to_value(p);
if (count > 1) {
count--;
- xa_store(&devlink->snapshot_ids, id, xa_mk_value(count),
- GFP_KERNEL);
+ __xa_store(&devlink->snapshot_ids, id, xa_mk_value(count),
+ GFP_ATOMIC);
} else {
/* If this was the last user, we can erase this id */
- xa_erase(&devlink->snapshot_ids, id);
+ __xa_erase(&devlink->snapshot_ids, id);
}
+unlock:
+ xa_unlock(&devlink->snapshot_ids);
}
/**
@@ -5766,13 +5940,17 @@ static void __devlink_snapshot_id_decrement(struct devlink *devlink, u32 id)
*/
static int __devlink_snapshot_id_insert(struct devlink *devlink, u32 id)
{
- devl_assert_locked(devlink);
+ int err;
- if (xa_load(&devlink->snapshot_ids, id))
+ xa_lock(&devlink->snapshot_ids);
+ if (xa_load(&devlink->snapshot_ids, id)) {
+ xa_unlock(&devlink->snapshot_ids);
return -EEXIST;
-
- return xa_err(xa_store(&devlink->snapshot_ids, id, xa_mk_value(0),
- GFP_KERNEL));
+ }
+ err = xa_err(__xa_store(&devlink->snapshot_ids, id, xa_mk_value(0),
+ GFP_ATOMIC));
+ xa_unlock(&devlink->snapshot_ids);
+ return err;
}
/**
@@ -5793,8 +5971,6 @@ static int __devlink_snapshot_id_insert(struct devlink *devlink, u32 id)
*/
static int __devlink_region_snapshot_id_get(struct devlink *devlink, u32 *id)
{
- devl_assert_locked(devlink);
-
return xa_alloc(&devlink->snapshot_ids, id, xa_mk_value(1),
xa_limit_32b, GFP_KERNEL);
}
@@ -5807,7 +5983,7 @@ static int __devlink_region_snapshot_id_get(struct devlink *devlink, u32 *id)
* Multiple snapshots can be created on a region.
* The @snapshot_id should be obtained using the getter function.
*
- * Must be called only while holding the devlink instance lock.
+ * Must be called only while holding the region snapshot lock.
*
* @region: devlink region of the snapshot
* @data: snapshot data
@@ -5821,7 +5997,7 @@ __devlink_region_snapshot_create(struct devlink_region *region,
struct devlink_snapshot *snapshot;
int err;
- devl_assert_locked(devlink);
+ lockdep_assert_held(&region->snapshot_lock);
/* check if region can hold one more snapshot */
if (region->cur_snapshots == region->max_snapshots)
@@ -5859,7 +6035,7 @@ static void devlink_region_snapshot_del(struct devlink_region *region,
{
struct devlink *devlink = region->devlink;
- devl_assert_locked(devlink);
+ lockdep_assert_held(&region->snapshot_lock);
devlink_nl_region_notify(region, snapshot, DEVLINK_CMD_REGION_DEL);
region->cur_snapshots--;
@@ -5990,7 +6166,6 @@ static int devlink_nl_cmd_region_get_dumpit(struct sk_buff *msg,
int idx = 0;
int err = 0;
- mutex_lock(&devlink_mutex);
devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
err = devlink_nl_cmd_region_get_devlink_dumpit(msg, cb, devlink,
&idx, start);
@@ -5999,7 +6174,6 @@ static int devlink_nl_cmd_region_get_dumpit(struct sk_buff *msg,
goto out;
}
out:
- mutex_unlock(&devlink_mutex);
cb->args[0] = idx;
return msg->len;
}
@@ -6038,11 +6212,15 @@ static int devlink_nl_cmd_region_del(struct sk_buff *skb,
if (!region)
return -EINVAL;
+ mutex_lock(&region->snapshot_lock);
snapshot = devlink_region_snapshot_get_by_id(region, snapshot_id);
- if (!snapshot)
+ if (!snapshot) {
+ mutex_unlock(&region->snapshot_lock);
return -EINVAL;
+ }
devlink_region_snapshot_del(region, snapshot);
+ mutex_unlock(&region->snapshot_lock);
return 0;
}
@@ -6090,9 +6268,12 @@ devlink_nl_cmd_region_new(struct sk_buff *skb, struct genl_info *info)
return -EOPNOTSUPP;
}
+ mutex_lock(&region->snapshot_lock);
+
if (region->cur_snapshots == region->max_snapshots) {
NL_SET_ERR_MSG_MOD(info->extack, "The region has reached the maximum number of stored snapshots");
- return -ENOSPC;
+ err = -ENOSPC;
+ goto unlock;
}
snapshot_id_attr = info->attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID];
@@ -6101,17 +6282,18 @@ devlink_nl_cmd_region_new(struct sk_buff *skb, struct genl_info *info)
if (devlink_region_snapshot_get_by_id(region, snapshot_id)) {
NL_SET_ERR_MSG_MOD(info->extack, "The requested snapshot id is already in use");
- return -EEXIST;
+ err = -EEXIST;
+ goto unlock;
}
err = __devlink_snapshot_id_insert(devlink, snapshot_id);
if (err)
- return err;
+ goto unlock;
} else {
err = __devlink_region_snapshot_id_get(devlink, &snapshot_id);
if (err) {
NL_SET_ERR_MSG_MOD(info->extack, "Failed to allocate a new snapshot id");
- return err;
+ goto unlock;
}
}
@@ -6149,16 +6331,20 @@ devlink_nl_cmd_region_new(struct sk_buff *skb, struct genl_info *info)
goto err_notify;
}
+ mutex_unlock(&region->snapshot_lock);
return 0;
err_snapshot_create:
region->ops->destructor(data);
err_snapshot_capture:
__devlink_snapshot_id_decrement(devlink, snapshot_id);
+ mutex_unlock(&region->snapshot_lock);
return err;
err_notify:
devlink_region_snapshot_del(region, snapshot);
+unlock:
+ mutex_unlock(&region->snapshot_lock);
return err;
}
@@ -6253,12 +6439,9 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
start_offset = *((u64 *)&cb->args[0]);
- mutex_lock(&devlink_mutex);
devlink = devlink_get_from_attrs(sock_net(cb->skb->sk), attrs);
- if (IS_ERR(devlink)) {
- err = PTR_ERR(devlink);
- goto out_dev;
- }
+ if (IS_ERR(devlink))
+ return PTR_ERR(devlink);
devl_lock(devlink);
@@ -6358,8 +6541,6 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
genlmsg_end(skb, hdr);
devl_unlock(devlink);
devlink_put(devlink);
- mutex_unlock(&devlink_mutex);
-
return skb->len;
nla_put_failure:
@@ -6367,8 +6548,6 @@ nla_put_failure:
out_unlock:
devl_unlock(devlink);
devlink_put(devlink);
-out_dev:
- mutex_unlock(&devlink_mutex);
return err;
}
@@ -6517,7 +6696,6 @@ static int devlink_nl_cmd_info_get_dumpit(struct sk_buff *msg,
int idx = 0;
int err = 0;
- mutex_lock(&devlink_mutex);
devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
if (idx < start || !devlink->ops->info_get)
goto inc;
@@ -6538,7 +6716,6 @@ inc:
idx++;
devlink_put(devlink);
}
- mutex_unlock(&devlink_mutex);
if (err != -EMSGSIZE)
return err;
@@ -7527,6 +7704,7 @@ int devlink_health_report(struct devlink_health_reporter *reporter,
enum devlink_health_reporter_state prev_health_state;
struct devlink *devlink = reporter->devlink;
unsigned long recover_ts_threshold;
+ int ret;
/* write a log message of the current error */
WARN_ON(!msg);
@@ -7560,11 +7738,14 @@ int devlink_health_report(struct devlink_health_reporter *reporter,
mutex_unlock(&reporter->dump_lock);
}
- if (reporter->auto_recover)
- return devlink_health_reporter_recover(reporter,
- priv_ctx, NULL);
+ if (!reporter->auto_recover)
+ return 0;
- return 0;
+ devl_lock(devlink);
+ ret = devlink_health_reporter_recover(reporter, priv_ctx, NULL);
+ devl_unlock(devlink);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(devlink_health_report);
@@ -7613,18 +7794,13 @@ devlink_health_reporter_get_from_cb(struct netlink_callback *cb)
struct nlattr **attrs = info->attrs;
struct devlink *devlink;
- mutex_lock(&devlink_mutex);
devlink = devlink_get_from_attrs(sock_net(cb->skb->sk), attrs);
if (IS_ERR(devlink))
- goto unlock;
+ return NULL;
reporter = devlink_health_reporter_get_from_attrs(devlink, attrs);
devlink_put(devlink);
- mutex_unlock(&devlink_mutex);
return reporter;
-unlock:
- mutex_unlock(&devlink_mutex);
- return NULL;
}
void
@@ -7690,7 +7866,6 @@ devlink_nl_cmd_health_reporter_get_dumpit(struct sk_buff *msg,
int idx = 0;
int err;
- mutex_lock(&devlink_mutex);
devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
mutex_lock(&devlink->reporters_lock);
list_for_each_entry(reporter, &devlink->reporter_list,
@@ -7742,8 +7917,6 @@ devlink_nl_cmd_health_reporter_get_dumpit(struct sk_buff *msg,
devlink_put(devlink);
}
out:
- mutex_unlock(&devlink_mutex);
-
cb->args[0] = idx;
return msg->len;
}
@@ -8276,7 +8449,6 @@ static int devlink_nl_cmd_trap_get_dumpit(struct sk_buff *msg,
int idx = 0;
int err;
- mutex_lock(&devlink_mutex);
devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
devl_lock(devlink);
list_for_each_entry(trap_item, &devlink->trap_list, list) {
@@ -8300,8 +8472,6 @@ static int devlink_nl_cmd_trap_get_dumpit(struct sk_buff *msg,
devlink_put(devlink);
}
out:
- mutex_unlock(&devlink_mutex);
-
cb->args[0] = idx;
return msg->len;
}
@@ -8496,7 +8666,6 @@ static int devlink_nl_cmd_trap_group_get_dumpit(struct sk_buff *msg,
int idx = 0;
int err;
- mutex_lock(&devlink_mutex);
devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
devl_lock(devlink);
list_for_each_entry(group_item, &devlink->trap_group_list,
@@ -8521,8 +8690,6 @@ static int devlink_nl_cmd_trap_group_get_dumpit(struct sk_buff *msg,
devlink_put(devlink);
}
out:
- mutex_unlock(&devlink_mutex);
-
cb->args[0] = idx;
return msg->len;
}
@@ -8803,7 +8970,6 @@ static int devlink_nl_cmd_trap_policer_get_dumpit(struct sk_buff *msg,
int idx = 0;
int err;
- mutex_lock(&devlink_mutex);
devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
devl_lock(devlink);
list_for_each_entry(policer_item, &devlink->trap_policer_list,
@@ -8828,8 +8994,6 @@ static int devlink_nl_cmd_trap_policer_get_dumpit(struct sk_buff *msg,
devlink_put(devlink);
}
out:
- mutex_unlock(&devlink_mutex);
-
cb->args[0] = idx;
return msg->len;
}
@@ -8969,6 +9133,7 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_RATE_PARENT_NODE_NAME] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_LINECARD_INDEX] = { .type = NLA_U32 },
[DEVLINK_ATTR_LINECARD_TYPE] = { .type = NLA_NUL_STRING },
+ [DEVLINK_ATTR_SELFTESTS] = { .type = NLA_NESTED },
};
static const struct genl_small_ops devlink_nl_ops[] = {
@@ -9170,7 +9335,6 @@ static const struct genl_small_ops devlink_nl_ops[] = {
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_reload,
.flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NO_LOCK,
},
{
.cmd = DEVLINK_CMD_PARAM_GET,
@@ -9238,8 +9402,7 @@ static const struct genl_small_ops devlink_nl_ops[] = {
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_health_reporter_get_doit,
.dumpit = devlink_nl_cmd_health_reporter_get_dumpit,
- .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT |
- DEVLINK_NL_FLAG_NO_LOCK,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
/* can be retrieved by unprivileged users */
},
{
@@ -9247,24 +9410,21 @@ static const struct genl_small_ops devlink_nl_ops[] = {
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_health_reporter_set_doit,
.flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT |
- DEVLINK_NL_FLAG_NO_LOCK,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
},
{
.cmd = DEVLINK_CMD_HEALTH_REPORTER_RECOVER,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_health_reporter_recover_doit,
.flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT |
- DEVLINK_NL_FLAG_NO_LOCK,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
},
{
.cmd = DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_health_reporter_diagnose_doit,
.flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT |
- DEVLINK_NL_FLAG_NO_LOCK,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
},
{
.cmd = DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET,
@@ -9278,16 +9438,14 @@ static const struct genl_small_ops devlink_nl_ops[] = {
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_health_reporter_dump_clear_doit,
.flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT |
- DEVLINK_NL_FLAG_NO_LOCK,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
},
{
.cmd = DEVLINK_CMD_HEALTH_REPORTER_TEST,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_health_reporter_test_doit,
.flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT |
- DEVLINK_NL_FLAG_NO_LOCK,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
},
{
.cmd = DEVLINK_CMD_FLASH_UPDATE,
@@ -9328,6 +9486,17 @@ static const struct genl_small_ops devlink_nl_ops[] = {
.doit = devlink_nl_cmd_trap_policer_set_doit,
.flags = GENL_ADMIN_PERM,
},
+ {
+ .cmd = DEVLINK_CMD_SELFTESTS_GET,
+ .doit = devlink_nl_cmd_selftests_get_doit,
+ .dumpit = devlink_nl_cmd_selftests_get_dumpit
+ /* can be retrieved by unprivileged users */
+ },
+ {
+ .cmd = DEVLINK_CMD_SELFTESTS_RUN,
+ .doit = devlink_nl_cmd_selftests_run,
+ .flags = GENL_ADMIN_PERM,
+ },
};
static struct genl_family devlink_nl_family __ro_after_init = {
@@ -9336,6 +9505,7 @@ static struct genl_family devlink_nl_family __ro_after_init = {
.maxattr = DEVLINK_ATTR_MAX,
.policy = devlink_nl_policy,
.netnsok = true,
+ .parallel_ops = true,
.pre_doit = devlink_nl_pre_doit,
.post_doit = devlink_nl_post_doit,
.module = THIS_MODULE,
@@ -9569,11 +9739,13 @@ void devlink_unregister(struct devlink *devlink)
ASSERT_DEVLINK_REGISTERED(devlink);
/* Make sure that we are in .remove() routine */
+ xa_set_mark(&devlinks, devlink->index, DEVLINK_UNREGISTERING);
devlink_put(devlink);
wait_for_completion(&devlink->comp);
devlink_notify_unregister(devlink);
xa_clear_mark(&devlinks, devlink->index, DEVLINK_REGISTERED);
+ xa_clear_mark(&devlinks, devlink->index, DEVLINK_UNREGISTERING);
}
EXPORT_SYMBOL_GPL(devlink_unregister);
@@ -11084,6 +11256,7 @@ struct devlink_region *devl_region_create(struct devlink *devlink,
region->ops = ops;
region->size = region_size;
INIT_LIST_HEAD(&region->snapshot_list);
+ mutex_init(&region->snapshot_lock);
list_add_tail(&region->list, &devlink->region_list);
devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_NEW);
@@ -11157,6 +11330,7 @@ devlink_port_region_create(struct devlink_port *port,
region->port_ops = ops;
region->size = region_size;
INIT_LIST_HEAD(&region->snapshot_list);
+ mutex_init(&region->snapshot_lock);
list_add_tail(&region->list, &port->region_list);
devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_NEW);
@@ -11186,6 +11360,7 @@ void devl_region_destroy(struct devlink_region *region)
devlink_region_snapshot_del(region, snapshot);
list_del(&region->list);
+ mutex_destroy(&region->snapshot_lock);
devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_DEL);
kfree(region);
@@ -11226,13 +11401,7 @@ EXPORT_SYMBOL_GPL(devlink_region_destroy);
*/
int devlink_region_snapshot_id_get(struct devlink *devlink, u32 *id)
{
- int err;
-
- devl_lock(devlink);
- err = __devlink_region_snapshot_id_get(devlink, id);
- devl_unlock(devlink);
-
- return err;
+ return __devlink_region_snapshot_id_get(devlink, id);
}
EXPORT_SYMBOL_GPL(devlink_region_snapshot_id_get);
@@ -11248,9 +11417,7 @@ EXPORT_SYMBOL_GPL(devlink_region_snapshot_id_get);
*/
void devlink_region_snapshot_id_put(struct devlink *devlink, u32 id)
{
- devl_lock(devlink);
__devlink_snapshot_id_decrement(devlink, id);
- devl_unlock(devlink);
}
EXPORT_SYMBOL_GPL(devlink_region_snapshot_id_put);
@@ -11269,13 +11436,11 @@ EXPORT_SYMBOL_GPL(devlink_region_snapshot_id_put);
int devlink_region_snapshot_create(struct devlink_region *region,
u8 *data, u32 snapshot_id)
{
- struct devlink *devlink = region->devlink;
int err;
- devl_lock(devlink);
+ mutex_lock(&region->snapshot_lock);
err = __devlink_region_snapshot_create(region, data, snapshot_id);
- devl_unlock(devlink);
-
+ mutex_unlock(&region->snapshot_lock);
return err;
}
EXPORT_SYMBOL_GPL(devlink_region_snapshot_create);
@@ -12260,18 +12425,18 @@ static void __net_exit devlink_pernet_pre_exit(struct net *net)
/* In case network namespace is getting destroyed, reload
* all devlink instances from this namespace into init_net.
*/
- mutex_lock(&devlink_mutex);
devlinks_xa_for_each_registered_get(net, index, devlink) {
WARN_ON(!(devlink->features & DEVLINK_F_RELOAD));
+ mutex_lock(&devlink->lock);
err = devlink_reload(devlink, &init_net,
DEVLINK_RELOAD_ACTION_DRIVER_REINIT,
DEVLINK_RELOAD_LIMIT_UNSPEC,
&actions_performed, NULL);
+ mutex_unlock(&devlink->lock);
if (err && err != -EOPNOTSUPP)
pr_warn("Failed to reload devlink instance into init_net\n");
devlink_put(devlink);
}
- mutex_unlock(&devlink_mutex);
}
static struct pernet_operations devlink_pernet_ops __net_initdata = {
diff --git a/net/core/filter.c b/net/core/filter.c
index 57c5e4c4efd2..5669248aff25 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -3918,7 +3918,7 @@ static void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len)
offset -= frag_size;
}
out:
- return offset + len < size ? addr + offset : NULL;
+ return offset + len <= size ? addr + offset : NULL;
}
BPF_CALL_4(bpf_xdp_load_bytes, struct xdp_buff *, xdp, u32, offset,
@@ -4653,6 +4653,7 @@ BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb,
} else {
info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
info->key.u.ipv4.src = cpu_to_be32(from->local_ipv4);
+ info->key.flow_flags = FLOWI_FLAG_ANYSRC;
}
return 0;
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index aa4f43f52499..6582dfdfb932 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -484,8 +484,8 @@ static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gf
sk->sk_family = PF_DECnet;
sk->sk_protocol = 0;
sk->sk_allocation = gfp;
- sk->sk_sndbuf = sysctl_decnet_wmem[1];
- sk->sk_rcvbuf = sysctl_decnet_rmem[1];
+ sk->sk_sndbuf = READ_ONCE(sysctl_decnet_wmem[1]);
+ sk->sk_rcvbuf = READ_ONCE(sysctl_decnet_rmem[1]);
/* Initialization of DECnet Session Control Port */
scp = DN_SK(sk);
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 552a53f1d5d0..ac2ee1689111 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -201,7 +201,7 @@ static void dn_dst_check_expire(struct timer_list *unused)
}
spin_unlock(&dn_rt_hash_table[i].lock);
- if ((jiffies - now) > 0)
+ if (jiffies != now)
break;
}
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
index 2b56218fc57c..4dfd68cf61c5 100644
--- a/net/dsa/switch.c
+++ b/net/dsa/switch.c
@@ -344,6 +344,7 @@ static int dsa_switch_do_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag *lag,
ether_addr_copy(a->addr, addr);
a->vid = vid;
+ a->db = db;
refcount_set(&a->refcount, 1);
list_add_tail(&a->list, &lag->fdbs);
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 46e8a5125853..452ff177e4da 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1042,6 +1042,7 @@ fib_find_matching_alias(struct net *net, const struct fib_rt_info *fri)
void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri)
{
+ u8 fib_notify_on_flag_change;
struct fib_alias *fa_match;
struct sk_buff *skb;
int err;
@@ -1063,14 +1064,16 @@ void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri)
WRITE_ONCE(fa_match->offload, fri->offload);
WRITE_ONCE(fa_match->trap, fri->trap);
+ fib_notify_on_flag_change = READ_ONCE(net->ipv4.sysctl_fib_notify_on_flag_change);
+
/* 2 means send notifications only if offload_failed was changed. */
- if (net->ipv4.sysctl_fib_notify_on_flag_change == 2 &&
+ if (fib_notify_on_flag_change == 2 &&
READ_ONCE(fa_match->offload_failed) == fri->offload_failed)
goto out;
WRITE_ONCE(fa_match->offload_failed, fri->offload_failed);
- if (!net->ipv4.sysctl_fib_notify_on_flag_change)
+ if (!fib_notify_on_flag_change)
goto out;
skb = nlmsg_new(fib_nlmsg_size(fa_match->fa_info), GFP_ATOMIC);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index dc7cc3ce6a53..970e9a2cca4a 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -454,8 +454,8 @@ void tcp_init_sock(struct sock *sk)
icsk->icsk_sync_mss = tcp_sync_mss;
- WRITE_ONCE(sk->sk_sndbuf, sock_net(sk)->ipv4.sysctl_tcp_wmem[1]);
- WRITE_ONCE(sk->sk_rcvbuf, sock_net(sk)->ipv4.sysctl_tcp_rmem[1]);
+ WRITE_ONCE(sk->sk_sndbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[1]));
+ WRITE_ONCE(sk->sk_rcvbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[1]));
sk_sockets_allocated_inc(sk);
}
@@ -688,7 +688,7 @@ static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
int size_goal)
{
return skb->len < size_goal &&
- sock_net(sk)->ipv4.sysctl_tcp_autocorking &&
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_autocorking) &&
!tcp_rtx_queue_empty(sk) &&
refcount_read(&sk->sk_wmem_alloc) > skb->truesize &&
tcp_skb_can_collapse_to(skb);
@@ -1842,7 +1842,7 @@ int tcp_set_rcvlowat(struct sock *sk, int val)
if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
cap = sk->sk_rcvbuf >> 1;
else
- cap = sock_net(sk)->ipv4.sysctl_tcp_rmem[2] >> 1;
+ cap = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]) >> 1;
val = min(val, cap);
WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
@@ -4573,9 +4573,18 @@ tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
return SKB_DROP_REASON_TCP_MD5UNEXPECTED;
}
- /* check the signature */
- genhash = tp->af_specific->calc_md5_hash(newhash, hash_expected,
- NULL, skb);
+ /* Check the signature.
+ * To support dual stack listeners, we need to handle
+ * IPv4-mapped case.
+ */
+ if (family == AF_INET)
+ genhash = tcp_v4_md5_hash_skb(newhash,
+ hash_expected,
+ NULL, skb);
+ else
+ genhash = tp->af_specific->calc_md5_hash(newhash,
+ hash_expected,
+ NULL, skb);
if (genhash || memcmp(hash_location, newhash, 16) != 0) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index ae73b34d32e9..ab5f0ea166f1 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -426,7 +426,7 @@ static void tcp_sndbuf_expand(struct sock *sk)
if (sk->sk_sndbuf < sndmem)
WRITE_ONCE(sk->sk_sndbuf,
- min(sndmem, sock_net(sk)->ipv4.sysctl_tcp_wmem[2]));
+ min(sndmem, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[2])));
}
/* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
@@ -461,7 +461,7 @@ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb,
struct tcp_sock *tp = tcp_sk(sk);
/* Optimize this! */
int truesize = tcp_win_from_space(sk, skbtruesize) >> 1;
- int window = tcp_win_from_space(sk, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]) >> 1;
+ int window = tcp_win_from_space(sk, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])) >> 1;
while (tp->rcv_ssthresh <= window) {
if (truesize <= skb->len)
@@ -534,7 +534,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb,
*/
static void tcp_init_buffer_space(struct sock *sk)
{
- int tcp_app_win = sock_net(sk)->ipv4.sysctl_tcp_app_win;
+ int tcp_app_win = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_app_win);
struct tcp_sock *tp = tcp_sk(sk);
int maxwin;
@@ -574,16 +574,17 @@ static void tcp_clamp_window(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
struct net *net = sock_net(sk);
+ int rmem2;
icsk->icsk_ack.quick = 0;
+ rmem2 = READ_ONCE(net->ipv4.sysctl_tcp_rmem[2]);
- if (sk->sk_rcvbuf < net->ipv4.sysctl_tcp_rmem[2] &&
+ if (sk->sk_rcvbuf < rmem2 &&
!(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
!tcp_under_memory_pressure(sk) &&
sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) {
WRITE_ONCE(sk->sk_rcvbuf,
- min(atomic_read(&sk->sk_rmem_alloc),
- net->ipv4.sysctl_tcp_rmem[2]));
+ min(atomic_read(&sk->sk_rmem_alloc), rmem2));
}
if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss);
@@ -724,7 +725,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
* <prev RTT . ><current RTT .. ><next RTT .... >
*/
- if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf &&
+ if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) &&
!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
int rcvmem, rcvbuf;
u64 rcvwin, grow;
@@ -745,7 +746,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
do_div(rcvwin, tp->advmss);
rcvbuf = min_t(u64, rcvwin * rcvmem,
- sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
if (rcvbuf > sk->sk_rcvbuf) {
WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
@@ -909,9 +910,9 @@ static void tcp_update_pacing_rate(struct sock *sk)
* end of slow start and should slow down.
*/
if (tcp_snd_cwnd(tp) < tp->snd_ssthresh / 2)
- rate *= sock_net(sk)->ipv4.sysctl_tcp_pacing_ss_ratio;
+ rate *= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pacing_ss_ratio);
else
- rate *= sock_net(sk)->ipv4.sysctl_tcp_pacing_ca_ratio;
+ rate *= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pacing_ca_ratio);
rate *= max(tcp_snd_cwnd(tp), tp->packets_out);
@@ -2174,7 +2175,7 @@ void tcp_enter_loss(struct sock *sk)
* loss recovery is underway except recurring timeout(s) on
* the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
*/
- tp->frto = net->ipv4.sysctl_tcp_frto &&
+ tp->frto = READ_ONCE(net->ipv4.sysctl_tcp_frto) &&
(new_recovery || icsk->icsk_retransmits) &&
!inet_csk(sk)->icsk_mtup.probe_size;
}
@@ -3057,7 +3058,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us, const int flag)
{
- u32 wlen = sock_net(sk)->ipv4.sysctl_tcp_min_rtt_wlen * HZ;
+ u32 wlen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_rtt_wlen) * HZ;
struct tcp_sock *tp = tcp_sk(sk);
if ((flag & FLAG_ACK_MAYBE_DELAYED) && rtt_us > tcp_min_rtt(tp)) {
@@ -3580,7 +3581,8 @@ static bool __tcp_oow_rate_limited(struct net *net, int mib_idx,
if (*last_oow_ack_time) {
s32 elapsed = (s32)(tcp_jiffies32 - *last_oow_ack_time);
- if (0 <= elapsed && elapsed < net->ipv4.sysctl_tcp_invalid_ratelimit) {
+ if (0 <= elapsed &&
+ elapsed < READ_ONCE(net->ipv4.sysctl_tcp_invalid_ratelimit)) {
NET_INC_STATS(net, mib_idx);
return true; /* rate-limited: don't send yet! */
}
@@ -3628,7 +3630,7 @@ static void tcp_send_challenge_ack(struct sock *sk)
/* Then check host-wide RFC 5961 rate limit. */
now = jiffies / HZ;
if (now != challenge_timestamp) {
- u32 ack_limit = net->ipv4.sysctl_tcp_challenge_ack_limit;
+ u32 ack_limit = READ_ONCE(net->ipv4.sysctl_tcp_challenge_ack_limit);
u32 half = (ack_limit + 1) >> 1;
challenge_timestamp = now;
@@ -4425,7 +4427,7 @@ static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
{
struct tcp_sock *tp = tcp_sk(sk);
- if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) {
+ if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) {
int mib_idx;
if (before(seq, tp->rcv_nxt))
@@ -4472,7 +4474,7 @@ static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
- if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) {
+ if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) {
u32 end_seq = TCP_SKB_CB(skb)->end_seq;
tcp_rcv_spurious_retrans(sk, skb);
@@ -5516,7 +5518,7 @@ send_now:
}
if (!tcp_is_sack(tp) ||
- tp->compressed_ack >= sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr)
+ tp->compressed_ack >= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr))
goto send_now;
if (tp->compressed_ack_rcv_nxt != tp->rcv_nxt) {
@@ -5537,11 +5539,12 @@ send_now:
if (tp->srtt_us && tp->srtt_us < rtt)
rtt = tp->srtt_us;
- delay = min_t(unsigned long, sock_net(sk)->ipv4.sysctl_tcp_comp_sack_delay_ns,
+ delay = min_t(unsigned long,
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_delay_ns),
rtt * (NSEC_PER_USEC >> 3)/20);
sock_hold(sk);
hrtimer_start_range_ns(&tp->compressed_ack_timer, ns_to_ktime(delay),
- sock_net(sk)->ipv4.sysctl_tcp_comp_sack_slack_ns,
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_slack_ns),
HRTIMER_MODE_REL_PINNED_SOFT);
}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index c7e7101647dc..0c83780dc9bf 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1008,7 +1008,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
if (skb) {
__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
- tos = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ?
+ tos = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) ?
(tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
(inet_sk(sk)->tos & INET_ECN_MASK) :
inet_sk(sk)->tos;
@@ -1528,7 +1528,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
/* Set ToS of the new socket based upon the value of incoming SYN.
* ECT bits are set later in tcp_init_transfer().
*/
- if (sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)
+ if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos))
newinet->tos = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
if (!dst) {
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index a501150deaa3..d58e672be31c 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -329,7 +329,7 @@ void tcp_update_metrics(struct sock *sk)
int m;
sk_dst_confirm(sk);
- if (net->ipv4.sysctl_tcp_nometrics_save || !dst)
+ if (READ_ONCE(net->ipv4.sysctl_tcp_nometrics_save) || !dst)
return;
rcu_read_lock();
@@ -385,7 +385,7 @@ void tcp_update_metrics(struct sock *sk)
if (tcp_in_initial_slowstart(tp)) {
/* Slow start still did not finish. */
- if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save &&
+ if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
if (val && (tcp_snd_cwnd(tp) >> 1) > val)
@@ -401,7 +401,7 @@ void tcp_update_metrics(struct sock *sk)
} else if (!tcp_in_slow_start(tp) &&
icsk->icsk_ca_state == TCP_CA_Open) {
/* Cong. avoidance phase, cwnd is reliable. */
- if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save &&
+ if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
max(tcp_snd_cwnd(tp) >> 1, tp->snd_ssthresh));
@@ -418,7 +418,7 @@ void tcp_update_metrics(struct sock *sk)
tcp_metric_set(tm, TCP_METRIC_CWND,
(val + tp->snd_ssthresh) >> 1);
}
- if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save &&
+ if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
if (val && tp->snd_ssthresh > val)
@@ -463,7 +463,7 @@ void tcp_init_metrics(struct sock *sk)
if (tcp_metric_locked(tm, TCP_METRIC_CWND))
tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
- val = net->ipv4.sysctl_tcp_no_ssthresh_metrics_save ?
+ val = READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) ?
0 : tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
if (val) {
tp->snd_ssthresh = val;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 2b72ccd2e651..78b654ff421b 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -167,16 +167,13 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
if (tcp_packets_in_flight(tp) == 0)
tcp_ca_event(sk, CA_EVENT_TX_START);
- /* If this is the first data packet sent in response to the
- * previous received data,
- * and it is a reply for ato after last received packet,
- * increase pingpong count.
- */
- if (before(tp->lsndtime, icsk->icsk_ack.lrcvtime) &&
- (u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
- inet_csk_inc_pingpong_cnt(sk);
-
tp->lsndtime = now;
+
+ /* If it is a reply for ato after last received
+ * packet, enter pingpong mode.
+ */
+ if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
+ inet_csk_enter_pingpong_mode(sk);
}
/* Account for an ACK we sent. */
@@ -230,7 +227,7 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
* which we interpret as a sign the remote TCP is not
* misinterpreting the window field as a signed quantity.
*/
- if (sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)
+ if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows))
(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
else
(*rcv_wnd) = min_t(u32, space, U16_MAX);
@@ -241,7 +238,7 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
*rcv_wscale = 0;
if (wscale_ok) {
/* Set window scaling on max possible window */
- space = max_t(u32, space, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
+ space = max_t(u32, space, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
space = max_t(u32, space, sysctl_rmem_max);
space = min_t(u32, space, *window_clamp);
*rcv_wscale = clamp_t(int, ilog2(space) - 15,
@@ -285,7 +282,7 @@ static u16 tcp_select_window(struct sock *sk)
* scaled window.
*/
if (!tp->rx_opt.rcv_wscale &&
- sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows))
new_win = min(new_win, MAX_TCP_WINDOW);
else
new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
@@ -1976,7 +1973,7 @@ static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
bytes = sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift);
- r = tcp_min_rtt(tcp_sk(sk)) >> sock_net(sk)->ipv4.sysctl_tcp_tso_rtt_log;
+ r = tcp_min_rtt(tcp_sk(sk)) >> READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_rtt_log);
if (r < BITS_PER_TYPE(sk->sk_gso_max_size))
bytes += sk->sk_gso_max_size >> r;
@@ -1995,7 +1992,7 @@ static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
min_tso = ca_ops->min_tso_segs ?
ca_ops->min_tso_segs(sk) :
- sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs;
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs);
tso_segs = tcp_tso_autosize(sk, mss_now, min_tso);
return min_t(u32, tso_segs, sk->sk_gso_max_segs);
@@ -2507,7 +2504,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift));
if (sk->sk_pacing_status == SK_PACING_NONE)
limit = min_t(unsigned long, limit,
- sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes);
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes));
limit <<= factor;
if (static_branch_unlikely(&tcp_tx_delay_enabled) &&
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 2516078aa03e..34eda973bbf1 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -3264,7 +3264,7 @@ u32 udp_flow_hashrnd(void)
}
EXPORT_SYMBOL(udp_flow_hashrnd);
-static void __udp_sysctl_init(struct net *net)
+static int __net_init udp_sysctl_init(struct net *net)
{
net->ipv4.sysctl_udp_rmem_min = PAGE_SIZE;
net->ipv4.sysctl_udp_wmem_min = PAGE_SIZE;
@@ -3272,11 +3272,7 @@ static void __udp_sysctl_init(struct net *net)
#ifdef CONFIG_NET_L3_MASTER_DEV
net->ipv4.sysctl_udp_l3mdev_accept = 0;
#endif
-}
-static int __net_init udp_sysctl_init(struct net *net)
-{
- __udp_sysctl_init(net);
return 0;
}
@@ -3352,8 +3348,6 @@ void __init udp_init(void)
sysctl_udp_mem[1] = limit;
sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2;
- __udp_sysctl_init(&init_net);
-
/* 16 spinlocks per cpu */
udp_busylocks_log = ilog2(nr_cpu_ids) + 4;
udp_busylocks = kmalloc(sizeof(spinlock_t) << udp_busylocks_log,
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 7f695c39d9a8..87c699d57b36 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1522,7 +1522,6 @@ static void mld_query_work(struct work_struct *work)
if (++cnt >= MLD_MAX_QUEUE) {
rework = true;
- schedule_delayed_work(&idev->mc_query_work, 0);
break;
}
}
@@ -1533,8 +1532,10 @@ static void mld_query_work(struct work_struct *work)
__mld_query_work(skb);
mutex_unlock(&idev->mc_lock);
- if (!rework)
- in6_dev_put(idev);
+ if (rework && queue_delayed_work(mld_wq, &idev->mc_query_work, 0))
+ return;
+
+ in6_dev_put(idev);
}
/* called with rcu_read_lock() */
@@ -1624,7 +1625,6 @@ static void mld_report_work(struct work_struct *work)
if (++cnt >= MLD_MAX_QUEUE) {
rework = true;
- schedule_delayed_work(&idev->mc_report_work, 0);
break;
}
}
@@ -1635,8 +1635,10 @@ static void mld_report_work(struct work_struct *work)
__mld_report_work(skb);
mutex_unlock(&idev->mc_lock);
- if (!rework)
- in6_dev_put(idev);
+ if (rework && queue_delayed_work(mld_wq, &idev->mc_report_work, 0))
+ return;
+
+ in6_dev_put(idev);
}
static bool is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index b1179f62bd23..91b840514656 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -22,6 +22,11 @@
#include <linux/proc_fs.h>
#include <net/ping.h>
+static void ping_v6_destroy(struct sock *sk)
+{
+ inet6_destroy_sock(sk);
+}
+
/* Compatibility glue so we can support IPv6 when it's compiled as a module */
static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
int *addr_len)
@@ -185,6 +190,7 @@ struct proto pingv6_prot = {
.owner = THIS_MODULE,
.init = ping_init_sock,
.close = ping_close,
+ .destroy = ping_v6_destroy,
.connect = ip6_datagram_connect_v6_only,
.disconnect = __udp_disconnect,
.setsockopt = ipv6_setsockopt,
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index e756ba705fd9..34db881204d2 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -36,9 +36,11 @@ static size_t seg6_lwt_headroom(struct seg6_iptunnel_encap *tuninfo)
case SEG6_IPTUN_MODE_INLINE:
break;
case SEG6_IPTUN_MODE_ENCAP:
+ case SEG6_IPTUN_MODE_ENCAP_RED:
head = sizeof(struct ipv6hdr);
break;
case SEG6_IPTUN_MODE_L2ENCAP:
+ case SEG6_IPTUN_MODE_L2ENCAP_RED:
return 0;
}
@@ -197,6 +199,124 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
}
EXPORT_SYMBOL_GPL(seg6_do_srh_encap);
+/* encapsulate an IPv6 packet within an outer IPv6 header with reduced SRH */
+static int seg6_do_srh_encap_red(struct sk_buff *skb,
+ struct ipv6_sr_hdr *osrh, int proto)
+{
+ __u8 first_seg = osrh->first_segment;
+ struct dst_entry *dst = skb_dst(skb);
+ struct net *net = dev_net(dst->dev);
+ struct ipv6hdr *hdr, *inner_hdr;
+ int hdrlen = ipv6_optlen(osrh);
+ int red_tlv_offset, tlv_offset;
+ struct ipv6_sr_hdr *isrh;
+ bool skip_srh = false;
+ __be32 flowlabel;
+ int tot_len, err;
+ int red_hdrlen;
+ int tlvs_len;
+
+ if (first_seg > 0) {
+ red_hdrlen = hdrlen - sizeof(struct in6_addr);
+ } else {
+ /* NOTE: if tag/flags and/or other TLVs are introduced in the
+ * seg6_iptunnel infrastructure, they should be considered when
+ * deciding to skip the SRH.
+ */
+ skip_srh = !sr_has_hmac(osrh);
+
+ red_hdrlen = skip_srh ? 0 : hdrlen;
+ }
+
+ tot_len = red_hdrlen + sizeof(struct ipv6hdr);
+
+ err = skb_cow_head(skb, tot_len + skb->mac_len);
+ if (unlikely(err))
+ return err;
+
+ inner_hdr = ipv6_hdr(skb);
+ flowlabel = seg6_make_flowlabel(net, skb, inner_hdr);
+
+ skb_push(skb, tot_len);
+ skb_reset_network_header(skb);
+ skb_mac_header_rebuild(skb);
+ hdr = ipv6_hdr(skb);
+
+ /* based on seg6_do_srh_encap() */
+ if (skb->protocol == htons(ETH_P_IPV6)) {
+ ip6_flow_hdr(hdr, ip6_tclass(ip6_flowinfo(inner_hdr)),
+ flowlabel);
+ hdr->hop_limit = inner_hdr->hop_limit;
+ } else {
+ ip6_flow_hdr(hdr, 0, flowlabel);
+ hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb));
+
+ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+ IP6CB(skb)->iif = skb->skb_iif;
+ }
+
+ /* no matter if we have to skip the SRH or not, the first segment
+ * always comes in the pushed IPv6 header.
+ */
+ hdr->daddr = osrh->segments[first_seg];
+
+ if (skip_srh) {
+ hdr->nexthdr = proto;
+
+ set_tun_src(net, dst->dev, &hdr->daddr, &hdr->saddr);
+ goto out;
+ }
+
+ /* we cannot skip the SRH, slow path */
+
+ hdr->nexthdr = NEXTHDR_ROUTING;
+ isrh = (void *)hdr + sizeof(struct ipv6hdr);
+
+ if (unlikely(!first_seg)) {
+ /* this is a very rare case; we have only one SID but
+ * we cannot skip the SRH since we are carrying some
+ * other info.
+ */
+ memcpy(isrh, osrh, hdrlen);
+ goto srcaddr;
+ }
+
+ tlv_offset = sizeof(*osrh) + (first_seg + 1) * sizeof(struct in6_addr);
+ red_tlv_offset = tlv_offset - sizeof(struct in6_addr);
+
+ memcpy(isrh, osrh, red_tlv_offset);
+
+ tlvs_len = hdrlen - tlv_offset;
+ if (unlikely(tlvs_len > 0)) {
+ const void *s = (const void *)osrh + tlv_offset;
+ void *d = (void *)isrh + red_tlv_offset;
+
+ memcpy(d, s, tlvs_len);
+ }
+
+ --isrh->first_segment;
+ isrh->hdrlen -= 2;
+
+srcaddr:
+ isrh->nexthdr = proto;
+ set_tun_src(net, dst->dev, &hdr->daddr, &hdr->saddr);
+
+#ifdef CONFIG_IPV6_SEG6_HMAC
+ if (unlikely(!skip_srh && sr_has_hmac(isrh))) {
+ err = seg6_push_hmac(net, &hdr->saddr, isrh);
+ if (unlikely(err))
+ return err;
+ }
+#endif
+
+out:
+ hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
+
+ skb_postpush_rcsum(skb, hdr, tot_len);
+
+ return 0;
+}
+
/* insert an SRH within an IPv6 packet, just after the IPv6 header */
int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
{
@@ -269,6 +389,7 @@ static int seg6_do_srh(struct sk_buff *skb)
return err;
break;
case SEG6_IPTUN_MODE_ENCAP:
+ case SEG6_IPTUN_MODE_ENCAP_RED:
err = iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6);
if (err)
return err;
@@ -280,7 +401,11 @@ static int seg6_do_srh(struct sk_buff *skb)
else
return -EINVAL;
- err = seg6_do_srh_encap(skb, tinfo->srh, proto);
+ if (tinfo->mode == SEG6_IPTUN_MODE_ENCAP)
+ err = seg6_do_srh_encap(skb, tinfo->srh, proto);
+ else
+ err = seg6_do_srh_encap_red(skb, tinfo->srh, proto);
+
if (err)
return err;
@@ -289,6 +414,7 @@ static int seg6_do_srh(struct sk_buff *skb)
skb->protocol = htons(ETH_P_IPV6);
break;
case SEG6_IPTUN_MODE_L2ENCAP:
+ case SEG6_IPTUN_MODE_L2ENCAP_RED:
if (!skb_mac_header_was_set(skb))
return -EINVAL;
@@ -298,7 +424,13 @@ static int seg6_do_srh(struct sk_buff *skb)
skb_mac_header_rebuild(skb);
skb_push(skb, skb->mac_len);
- err = seg6_do_srh_encap(skb, tinfo->srh, IPPROTO_ETHERNET);
+ if (tinfo->mode == SEG6_IPTUN_MODE_L2ENCAP)
+ err = seg6_do_srh_encap(skb, tinfo->srh,
+ IPPROTO_ETHERNET);
+ else
+ err = seg6_do_srh_encap_red(skb, tinfo->srh,
+ IPPROTO_ETHERNET);
+
if (err)
return err;
@@ -517,6 +649,10 @@ static int seg6_build_state(struct net *net, struct nlattr *nla,
break;
case SEG6_IPTUN_MODE_L2ENCAP:
break;
+ case SEG6_IPTUN_MODE_ENCAP_RED:
+ break;
+ case SEG6_IPTUN_MODE_L2ENCAP_RED:
+ break;
default:
return -EINVAL;
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 85b8b765dcb1..e54eee80ce5f 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -546,7 +546,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
if (np->repflow && ireq->pktopts)
fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
- tclass = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ?
+ tclass = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) ?
(tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
(np->tclass & INET_ECN_MASK) :
np->tclass;
@@ -1317,7 +1317,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
/* Set ToS of the new socket based upon the value of incoming SYN.
* ECT bits are set later in tcp_init_transfer().
*/
- if (sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)
+ if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos))
newnp->tclass = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
/* Clone native IPv6 options from listening socket (if any)
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index bd8f0f425be4..30d289044e71 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -1271,7 +1271,7 @@ raise_win:
if (unlikely(th->syn))
new_win = min(new_win, 65535U) << tp->rx_opt.rcv_wscale;
if (!tp->rx_opt.rcv_wscale &&
- sock_net(ssk)->ipv4.sysctl_tcp_workaround_signed_windows)
+ READ_ONCE(sock_net(ssk)->ipv4.sysctl_tcp_workaround_signed_windows))
new_win = min(new_win, MAX_TCP_WINDOW);
else
new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 57f23f4e3a7c..a3f1c1461874 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -1873,7 +1873,7 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
if (msk->rcvq_space.copied <= msk->rcvq_space.space)
goto new_measure;
- if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf &&
+ if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) &&
!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
int rcvmem, rcvbuf;
u64 rcvwin, grow;
@@ -1891,7 +1891,7 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
do_div(rcvwin, advmss);
rcvbuf = min_t(u64, rcvwin * rcvmem,
- sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
if (rcvbuf > sk->sk_rcvbuf) {
u32 window_clamp;
@@ -2634,8 +2634,8 @@ static int mptcp_init_sock(struct sock *sk)
mptcp_ca_reset(sk);
sk_sockets_allocated_inc(sk);
- sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1];
- sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1];
+ sk->sk_rcvbuf = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[1]);
+ sk->sk_sndbuf = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[1]);
return 0;
}
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index d4b16d033978..901c763dcdbb 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -1533,7 +1533,7 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
mptcp_sock_graft(ssk, sk->sk_socket);
iput(SOCK_INODE(sf));
WRITE_ONCE(msk->allow_infinite_fallback, false);
- return err;
+ return 0;
failed_unlink:
list_del(&subflow->node);
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 646d5fd53604..9f976b11d896 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -3340,6 +3340,8 @@ int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain)
if (err < 0)
return err;
}
+
+ cond_resched();
}
return 0;
@@ -9367,9 +9369,13 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
break;
}
}
+
+ cond_resched();
}
list_for_each_entry(set, &ctx->table->sets, list) {
+ cond_resched();
+
if (!nft_is_active_next(ctx->net, set))
continue;
if (!(set->flags & NFT_SET_MAP) ||
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index a364f8e5e698..87a9009d5234 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -843,11 +843,16 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
}
static int
-nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e, int diff)
+nfqnl_mangle(void *data, unsigned int data_len, struct nf_queue_entry *e, int diff)
{
struct sk_buff *nskb;
if (diff < 0) {
+ unsigned int min_len = skb_transport_offset(e->skb);
+
+ if (data_len < min_len)
+ return -EINVAL;
+
if (pskb_trim(e->skb, data_len))
return -ENOMEM;
} else if (diff > 0) {
diff --git a/net/netfilter/nft_queue.c b/net/netfilter/nft_queue.c
index 15e4b7640dc0..da29e92c03e2 100644
--- a/net/netfilter/nft_queue.c
+++ b/net/netfilter/nft_queue.c
@@ -68,6 +68,31 @@ static void nft_queue_sreg_eval(const struct nft_expr *expr,
regs->verdict.code = ret;
}
+static int nft_queue_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data)
+{
+ static const unsigned int supported_hooks = ((1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_LOCAL_IN) |
+ (1 << NF_INET_FORWARD) |
+ (1 << NF_INET_LOCAL_OUT) |
+ (1 << NF_INET_POST_ROUTING));
+
+ switch (ctx->family) {
+ case NFPROTO_IPV4:
+ case NFPROTO_IPV6:
+ case NFPROTO_INET:
+ case NFPROTO_BRIDGE:
+ break;
+ case NFPROTO_NETDEV: /* lacks okfn */
+ fallthrough;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return nft_chain_validate_hooks(ctx->chain, supported_hooks);
+}
+
static const struct nla_policy nft_queue_policy[NFTA_QUEUE_MAX + 1] = {
[NFTA_QUEUE_NUM] = { .type = NLA_U16 },
[NFTA_QUEUE_TOTAL] = { .type = NLA_U16 },
@@ -164,6 +189,7 @@ static const struct nft_expr_ops nft_queue_ops = {
.eval = nft_queue_eval,
.init = nft_queue_init,
.dump = nft_queue_dump,
+ .validate = nft_queue_validate,
.reduce = NFT_REDUCE_READONLY,
};
@@ -173,6 +199,7 @@ static const struct nft_expr_ops nft_queue_sreg_ops = {
.eval = nft_queue_sreg_eval,
.init = nft_queue_sreg_init,
.dump = nft_queue_sreg_dump,
+ .validate = nft_queue_validate,
.reduce = NFT_REDUCE_READONLY,
};
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index d08c4728523b..5cbe07116e04 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3037,8 +3037,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
if (err)
goto out_free;
- if (sock->type == SOCK_RAW &&
- !dev_validate_header(dev, skb->data, len)) {
+ if ((sock->type == SOCK_RAW &&
+ !dev_validate_header(dev, skb->data, len)) || !skb->len) {
err = -EINVAL;
goto out_free;
}
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index be29da09cc7a..3460abceba44 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -229,9 +229,8 @@ static struct sctp_association *sctp_association_init(
if (!sctp_ulpq_init(&asoc->ulpq, asoc))
goto fail_init;
- if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams,
- 0, gfp))
- goto fail_init;
+ if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams, 0, gfp))
+ goto stream_free;
/* Initialize default path MTU. */
asoc->pathmtu = sp->pathmtu;
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 6dc95dcc0ff4..ef9fceadef8d 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -137,7 +137,7 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
ret = sctp_stream_alloc_out(stream, outcnt, gfp);
if (ret)
- goto out_err;
+ return ret;
for (i = 0; i < stream->outcnt; i++)
SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
@@ -145,22 +145,9 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
handle_in:
sctp_stream_interleave_init(stream);
if (!incnt)
- goto out;
-
- ret = sctp_stream_alloc_in(stream, incnt, gfp);
- if (ret)
- goto in_err;
-
- goto out;
+ return 0;
-in_err:
- sched->free(stream);
- genradix_free(&stream->in);
-out_err:
- genradix_free(&stream->out);
- stream->outcnt = 0;
-out:
- return ret;
+ return sctp_stream_alloc_in(stream, incnt, gfp);
}
int sctp_stream_init_ext(struct sctp_stream *stream, __u16 sid)
diff --git a/net/sctp/stream_sched.c b/net/sctp/stream_sched.c
index 518b1b9bf89d..1ad565ed5627 100644
--- a/net/sctp/stream_sched.c
+++ b/net/sctp/stream_sched.c
@@ -160,7 +160,7 @@ int sctp_sched_set_sched(struct sctp_association *asoc,
if (!SCTP_SO(&asoc->stream, i)->ext)
continue;
- ret = n->init_sid(&asoc->stream, i, GFP_KERNEL);
+ ret = n->init_sid(&asoc->stream, i, GFP_ATOMIC);
if (ret)
goto err;
}
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 43509c7e90fc..f1c3b8eb4b3d 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -517,7 +517,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
timer_setup(&sk->sk_timer, tipc_sk_timeout, 0);
sk->sk_shutdown = 0;
sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
- sk->sk_rcvbuf = sysctl_tipc_rmem[1];
+ sk->sk_rcvbuf = READ_ONCE(sysctl_tipc_rmem[1]);
sk->sk_data_ready = tipc_data_ready;
sk->sk_write_space = tipc_write_space;
sk->sk_destruct = tipc_sock_destruct;
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index fc513c1806a0..18c7e5c6d228 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -46,10 +46,8 @@
*/
static DECLARE_RWSEM(device_offload_lock);
-static void tls_device_gc_task(struct work_struct *work);
+static struct workqueue_struct *destruct_wq __read_mostly;
-static DECLARE_WORK(tls_device_gc_work, tls_device_gc_task);
-static LIST_HEAD(tls_device_gc_list);
static LIST_HEAD(tls_device_list);
static LIST_HEAD(tls_device_down_list);
static DEFINE_SPINLOCK(tls_device_lock);
@@ -68,47 +66,44 @@ static void tls_device_free_ctx(struct tls_context *ctx)
tls_ctx_free(NULL, ctx);
}
-static void tls_device_gc_task(struct work_struct *work)
+static void tls_device_tx_del_task(struct work_struct *work)
{
- struct tls_context *ctx, *tmp;
- unsigned long flags;
- LIST_HEAD(gc_list);
-
- spin_lock_irqsave(&tls_device_lock, flags);
- list_splice_init(&tls_device_gc_list, &gc_list);
- spin_unlock_irqrestore(&tls_device_lock, flags);
-
- list_for_each_entry_safe(ctx, tmp, &gc_list, list) {
- struct net_device *netdev = ctx->netdev;
+ struct tls_offload_context_tx *offload_ctx =
+ container_of(work, struct tls_offload_context_tx, destruct_work);
+ struct tls_context *ctx = offload_ctx->ctx;
+ struct net_device *netdev = ctx->netdev;
- if (netdev && ctx->tx_conf == TLS_HW) {
- netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
- TLS_OFFLOAD_CTX_DIR_TX);
- dev_put(netdev);
- ctx->netdev = NULL;
- }
-
- list_del(&ctx->list);
- tls_device_free_ctx(ctx);
- }
+ netdev->tlsdev_ops->tls_dev_del(netdev, ctx, TLS_OFFLOAD_CTX_DIR_TX);
+ dev_put(netdev);
+ ctx->netdev = NULL;
+ tls_device_free_ctx(ctx);
}
static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
{
unsigned long flags;
+ bool async_cleanup;
spin_lock_irqsave(&tls_device_lock, flags);
- if (unlikely(!refcount_dec_and_test(&ctx->refcount)))
- goto unlock;
+ if (unlikely(!refcount_dec_and_test(&ctx->refcount))) {
+ spin_unlock_irqrestore(&tls_device_lock, flags);
+ return;
+ }
- list_move_tail(&ctx->list, &tls_device_gc_list);
+ list_del(&ctx->list); /* Remove from tls_device_list / tls_device_down_list */
+ async_cleanup = ctx->netdev && ctx->tx_conf == TLS_HW;
+ if (async_cleanup) {
+ struct tls_offload_context_tx *offload_ctx = tls_offload_ctx_tx(ctx);
- /* schedule_work inside the spinlock
- * to make sure tls_device_down waits for that work.
- */
- schedule_work(&tls_device_gc_work);
-unlock:
+ /* queue_work inside the spinlock
+ * to make sure tls_device_down waits for that work.
+ */
+ queue_work(destruct_wq, &offload_ctx->destruct_work);
+ }
spin_unlock_irqrestore(&tls_device_lock, flags);
+
+ if (!async_cleanup)
+ tls_device_free_ctx(ctx);
}
/* We assume that the socket is already connected */
@@ -1150,6 +1145,9 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
start_marker_record->len = 0;
start_marker_record->num_frags = 0;
+ INIT_WORK(&offload_ctx->destruct_work, tls_device_tx_del_task);
+ offload_ctx->ctx = ctx;
+
INIT_LIST_HEAD(&offload_ctx->records_list);
list_add_tail(&start_marker_record->list, &offload_ctx->records_list);
spin_lock_init(&offload_ctx->lock);
@@ -1383,13 +1381,18 @@ static int tls_device_down(struct net_device *netdev)
* by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW.
* Now release the ref taken above.
*/
- if (refcount_dec_and_test(&ctx->refcount))
+ if (refcount_dec_and_test(&ctx->refcount)) {
+ /* sk_destruct ran after tls_device_down took a ref, and
+ * it returned early. Complete the destruction here.
+ */
+ list_del(&ctx->list);
tls_device_free_ctx(ctx);
+ }
}
up_write(&device_offload_lock);
- flush_work(&tls_device_gc_work);
+ flush_workqueue(destruct_wq);
return NOTIFY_DONE;
}
@@ -1430,12 +1433,23 @@ static struct notifier_block tls_dev_notifier = {
int __init tls_device_init(void)
{
- return register_netdevice_notifier(&tls_dev_notifier);
+ int err;
+
+ destruct_wq = alloc_workqueue("ktls_device_destruct", 0, 0);
+ if (!destruct_wq)
+ return -ENOMEM;
+
+ err = register_netdevice_notifier(&tls_dev_notifier);
+ if (err)
+ destroy_workqueue(destruct_wq);
+
+ return err;
}
void __exit tls_device_cleanup(void)
{
unregister_netdevice_notifier(&tls_dev_notifier);
- flush_work(&tls_device_gc_work);
+ flush_workqueue(destruct_wq);
+ destroy_workqueue(destruct_wq);
clean_acked_data_flush();
}
diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c
index b945288c312e..f0b7c9122fba 100644
--- a/net/tls/tls_strp.c
+++ b/net/tls/tls_strp.c
@@ -187,9 +187,10 @@ static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb,
unsigned int offset, size_t in_len)
{
struct tls_strparser *strp = (struct tls_strparser *)desc->arg.data;
- size_t sz, len, chunk;
struct sk_buff *skb;
skb_frag_t *frag;
+ size_t len, chunk;
+ int sz;
if (strp->msg_ready)
return 0;
@@ -480,7 +481,7 @@ void tls_strp_done(struct tls_strparser *strp)
int __init tls_strp_dev_init(void)
{
- tls_strp_wq = create_singlethread_workqueue("kstrp");
+ tls_strp_wq = create_workqueue("tls-strp");
if (unlikely(!tls_strp_wq))
return -ENOMEM;
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 0fc24a5ce208..17db8c8811fa 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -1283,11 +1283,14 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
static int
tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
- bool released, long timeo)
+ bool released)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ long timeo;
+
+ timeo = sock_rcvtimeo(sk, nonblock);
while (!tls_strp_msg_ready(ctx)) {
if (!sk_psock_queue_empty(psock))
@@ -1308,7 +1311,7 @@ tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
if (sock_flag(sk, SOCK_DONE))
return 0;
- if (nonblock || !timeo)
+ if (!timeo)
return -EAGAIN;
released = true;
@@ -1842,8 +1845,8 @@ tls_read_flush_backlog(struct sock *sk, struct tls_prot_info *prot,
return sk_flush_backlog(sk);
}
-static long tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx,
- bool nonblock)
+static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx,
+ bool nonblock)
{
long timeo;
int err;
@@ -1874,7 +1877,7 @@ static long tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx,
WRITE_ONCE(ctx->reader_present, 1);
- return timeo;
+ return 0;
err_unlock:
release_sock(sk);
@@ -1913,8 +1916,7 @@ int tls_sw_recvmsg(struct sock *sk,
struct tls_msg *tlm;
ssize_t copied = 0;
bool async = false;
- int target, err = 0;
- long timeo;
+ int target, err;
bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
bool is_peek = flags & MSG_PEEK;
bool released = true;
@@ -1925,9 +1927,9 @@ int tls_sw_recvmsg(struct sock *sk,
return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
psock = sk_psock_get(sk);
- timeo = tls_rx_reader_lock(sk, ctx, flags & MSG_DONTWAIT);
- if (timeo < 0)
- return timeo;
+ err = tls_rx_reader_lock(sk, ctx, flags & MSG_DONTWAIT);
+ if (err < 0)
+ return err;
bpf_strp_enabled = sk_psock_strp_enabled(psock);
/* If crypto failed the connection is broken */
@@ -1954,8 +1956,8 @@ int tls_sw_recvmsg(struct sock *sk,
struct tls_decrypt_arg darg;
int to_decrypt, chunk;
- err = tls_rx_rec_wait(sk, psock, flags & MSG_DONTWAIT, released,
- timeo);
+ err = tls_rx_rec_wait(sk, psock, flags & MSG_DONTWAIT,
+ released);
if (err <= 0) {
if (psock) {
chunk = sk_msg_recvmsg(sk, psock, msg, len,
@@ -2024,7 +2026,7 @@ put_on_rx_list_err:
bool partially_consumed = chunk > len;
struct sk_buff *skb = darg.skb;
- DEBUG_NET_WARN_ON_ONCE(darg.skb == tls_strp_msg(ctx));
+ DEBUG_NET_WARN_ON_ONCE(darg.skb == ctx->strp.anchor);
if (async) {
/* TLS 1.2-only, to_decrypt must be text len */
@@ -2131,13 +2133,12 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
struct tls_msg *tlm;
struct sk_buff *skb;
ssize_t copied = 0;
- int err = 0;
- long timeo;
int chunk;
+ int err;
- timeo = tls_rx_reader_lock(sk, ctx, flags & SPLICE_F_NONBLOCK);
- if (timeo < 0)
- return timeo;
+ err = tls_rx_reader_lock(sk, ctx, flags & SPLICE_F_NONBLOCK);
+ if (err < 0)
+ return err;
if (!skb_queue_empty(&ctx->rx_list)) {
skb = __skb_dequeue(&ctx->rx_list);
@@ -2145,7 +2146,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
struct tls_decrypt_arg darg;
err = tls_rx_rec_wait(sk, NULL, flags & SPLICE_F_NONBLOCK,
- true, timeo);
+ true);
if (err <= 0)
goto splice_read_end;
diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
index 99a128a666fb..4ce5d2579387 100644
--- a/sound/soc/rockchip/rockchip_i2s.c
+++ b/sound/soc/rockchip/rockchip_i2s.c
@@ -13,7 +13,6 @@
#include <linux/of_gpio.h>
#include <linux/of_device.h>
#include <linux/clk.h>
-#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/spinlock.h>
@@ -55,40 +54,8 @@ struct rk_i2s_dev {
const struct rk_i2s_pins *pins;
unsigned int bclk_ratio;
spinlock_t lock; /* tx/rx lock */
- struct pinctrl *pinctrl;
- struct pinctrl_state *bclk_on;
- struct pinctrl_state *bclk_off;
};
-static int i2s_pinctrl_select_bclk_on(struct rk_i2s_dev *i2s)
-{
- int ret = 0;
-
- if (!IS_ERR(i2s->pinctrl) && !IS_ERR_OR_NULL(i2s->bclk_on))
- ret = pinctrl_select_state(i2s->pinctrl,
- i2s->bclk_on);
-
- if (ret)
- dev_err(i2s->dev, "bclk enable failed %d\n", ret);
-
- return ret;
-}
-
-static int i2s_pinctrl_select_bclk_off(struct rk_i2s_dev *i2s)
-{
-
- int ret = 0;
-
- if (!IS_ERR(i2s->pinctrl) && !IS_ERR_OR_NULL(i2s->bclk_off))
- ret = pinctrl_select_state(i2s->pinctrl,
- i2s->bclk_off);
-
- if (ret)
- dev_err(i2s->dev, "bclk disable failed %d\n", ret);
-
- return ret;
-}
-
static int i2s_runtime_suspend(struct device *dev)
{
struct rk_i2s_dev *i2s = dev_get_drvdata(dev);
@@ -125,49 +92,38 @@ static inline struct rk_i2s_dev *to_info(struct snd_soc_dai *dai)
return snd_soc_dai_get_drvdata(dai);
}
-static int rockchip_snd_txctrl(struct rk_i2s_dev *i2s, int on)
+static void rockchip_snd_txctrl(struct rk_i2s_dev *i2s, int on)
{
unsigned int val = 0;
int retry = 10;
- int ret = 0;
spin_lock(&i2s->lock);
if (on) {
- ret = regmap_update_bits(i2s->regmap, I2S_DMACR,
- I2S_DMACR_TDE_ENABLE, I2S_DMACR_TDE_ENABLE);
- if (ret < 0)
- goto end;
+ regmap_update_bits(i2s->regmap, I2S_DMACR,
+ I2S_DMACR_TDE_ENABLE, I2S_DMACR_TDE_ENABLE);
- ret = regmap_update_bits(i2s->regmap, I2S_XFER,
- I2S_XFER_TXS_START | I2S_XFER_RXS_START,
- I2S_XFER_TXS_START | I2S_XFER_RXS_START);
- if (ret < 0)
- goto end;
+ regmap_update_bits(i2s->regmap, I2S_XFER,
+ I2S_XFER_TXS_START | I2S_XFER_RXS_START,
+ I2S_XFER_TXS_START | I2S_XFER_RXS_START);
i2s->tx_start = true;
} else {
i2s->tx_start = false;
- ret = regmap_update_bits(i2s->regmap, I2S_DMACR,
- I2S_DMACR_TDE_ENABLE, I2S_DMACR_TDE_DISABLE);
- if (ret < 0)
- goto end;
+ regmap_update_bits(i2s->regmap, I2S_DMACR,
+ I2S_DMACR_TDE_ENABLE, I2S_DMACR_TDE_DISABLE);
if (!i2s->rx_start) {
- ret = regmap_update_bits(i2s->regmap, I2S_XFER,
- I2S_XFER_TXS_START |
- I2S_XFER_RXS_START,
- I2S_XFER_TXS_STOP |
- I2S_XFER_RXS_STOP);
- if (ret < 0)
- goto end;
+ regmap_update_bits(i2s->regmap, I2S_XFER,
+ I2S_XFER_TXS_START |
+ I2S_XFER_RXS_START,
+ I2S_XFER_TXS_STOP |
+ I2S_XFER_RXS_STOP);
udelay(150);
- ret = regmap_update_bits(i2s->regmap, I2S_CLR,
- I2S_CLR_TXC | I2S_CLR_RXC,
- I2S_CLR_TXC | I2S_CLR_RXC);
- if (ret < 0)
- goto end;
+ regmap_update_bits(i2s->regmap, I2S_CLR,
+ I2S_CLR_TXC | I2S_CLR_RXC,
+ I2S_CLR_TXC | I2S_CLR_RXC);
regmap_read(i2s->regmap, I2S_CLR, &val);
@@ -182,57 +138,44 @@ static int rockchip_snd_txctrl(struct rk_i2s_dev *i2s, int on)
}
}
}
-end:
spin_unlock(&i2s->lock);
- if (ret < 0)
- dev_err(i2s->dev, "lrclk update failed\n");
-
- return ret;
}
-static int rockchip_snd_rxctrl(struct rk_i2s_dev *i2s, int on)
+static void rockchip_snd_rxctrl(struct rk_i2s_dev *i2s, int on)
{
unsigned int val = 0;
int retry = 10;
- int ret = 0;
spin_lock(&i2s->lock);
if (on) {
- ret = regmap_update_bits(i2s->regmap, I2S_DMACR,
+ regmap_update_bits(i2s->regmap, I2S_DMACR,
I2S_DMACR_RDE_ENABLE, I2S_DMACR_RDE_ENABLE);
- if (ret < 0)
- goto end;
- ret = regmap_update_bits(i2s->regmap, I2S_XFER,
+ regmap_update_bits(i2s->regmap, I2S_XFER,
I2S_XFER_TXS_START | I2S_XFER_RXS_START,
I2S_XFER_TXS_START | I2S_XFER_RXS_START);
- if (ret < 0)
- goto end;
i2s->rx_start = true;
} else {
i2s->rx_start = false;
- ret = regmap_update_bits(i2s->regmap, I2S_DMACR,
+ regmap_update_bits(i2s->regmap, I2S_DMACR,
I2S_DMACR_RDE_ENABLE, I2S_DMACR_RDE_DISABLE);
- if (ret < 0)
- goto end;
if (!i2s->tx_start) {
- ret = regmap_update_bits(i2s->regmap, I2S_XFER,
+ regmap_update_bits(i2s->regmap, I2S_XFER,
I2S_XFER_TXS_START |
I2S_XFER_RXS_START,
I2S_XFER_TXS_STOP |
I2S_XFER_RXS_STOP);
- if (ret < 0)
- goto end;
+
udelay(150);
- ret = regmap_update_bits(i2s->regmap, I2S_CLR,
+ regmap_update_bits(i2s->regmap, I2S_CLR,
I2S_CLR_TXC | I2S_CLR_RXC,
I2S_CLR_TXC | I2S_CLR_RXC);
- if (ret < 0)
- goto end;
+
regmap_read(i2s->regmap, I2S_CLR, &val);
+
/* Should wait for clear operation to finish */
while (val) {
regmap_read(i2s->regmap, I2S_CLR, &val);
@@ -244,12 +187,7 @@ static int rockchip_snd_rxctrl(struct rk_i2s_dev *i2s, int on)
}
}
}
-end:
spin_unlock(&i2s->lock);
- if (ret < 0)
- dev_err(i2s->dev, "lrclk update failed\n");
-
- return ret;
}
static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
@@ -487,26 +425,17 @@ static int rockchip_i2s_trigger(struct snd_pcm_substream *substream,
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
- ret = rockchip_snd_rxctrl(i2s, 1);
+ rockchip_snd_rxctrl(i2s, 1);
else
- ret = rockchip_snd_txctrl(i2s, 1);
- /* Do not turn on bclk if lrclk open fails. */
- if (ret < 0)
- return ret;
- i2s_pinctrl_select_bclk_on(i2s);
+ rockchip_snd_txctrl(i2s, 1);
break;
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
- if (!i2s->tx_start)
- i2s_pinctrl_select_bclk_off(i2s);
- ret = rockchip_snd_rxctrl(i2s, 0);
- } else {
- if (!i2s->rx_start)
- i2s_pinctrl_select_bclk_off(i2s);
- ret = rockchip_snd_txctrl(i2s, 0);
- }
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ rockchip_snd_rxctrl(i2s, 0);
+ else
+ rockchip_snd_txctrl(i2s, 0);
break;
default:
ret = -EINVAL;
@@ -807,33 +736,6 @@ static int rockchip_i2s_probe(struct platform_device *pdev)
}
i2s->bclk_ratio = 64;
- i2s->pinctrl = devm_pinctrl_get(&pdev->dev);
- if (IS_ERR(i2s->pinctrl))
- dev_err(&pdev->dev, "failed to find i2s pinctrl\n");
-
- i2s->bclk_on = pinctrl_lookup_state(i2s->pinctrl,
- "bclk_on");
- if (IS_ERR_OR_NULL(i2s->bclk_on))
- dev_err(&pdev->dev, "failed to find i2s default state\n");
- else
- dev_dbg(&pdev->dev, "find i2s bclk state\n");
-
- i2s->bclk_off = pinctrl_lookup_state(i2s->pinctrl,
- "bclk_off");
- if (IS_ERR_OR_NULL(i2s->bclk_off))
- dev_err(&pdev->dev, "failed to find i2s gpio state\n");
- else
- dev_dbg(&pdev->dev, "find i2s bclk_off state\n");
-
- i2s_pinctrl_select_bclk_off(i2s);
-
- i2s->playback_dma_data.addr = res->start + I2S_TXDR;
- i2s->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- i2s->playback_dma_data.maxburst = 4;
-
- i2s->capture_dma_data.addr = res->start + I2S_RXDR;
- i2s->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- i2s->capture_dma_data.maxburst = 4;
dev_set_drvdata(&pdev->dev, i2s);
diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
index 1cf53bb01936..7070dcffa822 100644
--- a/tools/bpf/bpftool/gen.c
+++ b/tools/bpf/bpftool/gen.c
@@ -1175,7 +1175,7 @@ static int do_skeleton(int argc, char **argv)
static inline void \n\
%1$s__detach(struct %1$s *obj) \n\
{ \n\
- return bpf_object__detach_skeleton(obj->skeleton); \n\
+ bpf_object__detach_skeleton(obj->skeleton); \n\
} \n\
",
obj_name
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index f081de398b60..c81362a001ba 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -1962,7 +1962,7 @@ static int profile_parse_metrics(int argc, char **argv)
int selected_cnt = 0;
unsigned int i;
- metric_cnt = sizeof(metrics) / sizeof(struct profile_metric);
+ metric_cnt = ARRAY_SIZE(metrics);
while (argc > 0) {
for (i = 0; i < metric_cnt; i++) {
diff --git a/tools/include/uapi/asm-generic/fcntl.h b/tools/include/uapi/asm-generic/fcntl.h
index 0197042b7dfb..1ecdb911add8 100644
--- a/tools/include/uapi/asm-generic/fcntl.h
+++ b/tools/include/uapi/asm-generic/fcntl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_GENERIC_FCNTL_H
#define _ASM_GENERIC_FCNTL_H
@@ -90,7 +91,7 @@
/* a horrid kludge trying to make sure that this will fail on old kernels */
#define O_TMPFILE (__O_TMPFILE | O_DIRECTORY)
-#define O_TMPFILE_MASK (__O_TMPFILE | O_DIRECTORY | O_CREAT)
+#define O_TMPFILE_MASK (__O_TMPFILE | O_DIRECTORY | O_CREAT)
#ifndef O_NDELAY
#define O_NDELAY O_NONBLOCK
@@ -115,11 +116,13 @@
#define F_GETSIG 11 /* for sockets. */
#endif
+#if __BITS_PER_LONG == 32 || defined(__KERNEL__)
#ifndef F_GETLK64
#define F_GETLK64 12 /* using 'struct flock64' */
#define F_SETLK64 13
#define F_SETLKW64 14
#endif
+#endif /* __BITS_PER_LONG == 32 || defined(__KERNEL__) */
#ifndef F_SETOWN_EX
#define F_SETOWN_EX 15
@@ -178,6 +181,10 @@ struct f_owner_ex {
blocking */
#define LOCK_UN 8 /* remove lock */
+/*
+ * LOCK_MAND support has been removed from the kernel. We leave the symbols
+ * here to not break legacy builds, but these should not be used in new code.
+ */
#define LOCK_MAND 32 /* This is a mandatory flock ... */
#define LOCK_READ 64 /* which allows concurrent read operations */
#define LOCK_WRITE 128 /* which allows concurrent write operations */
@@ -185,6 +192,7 @@ struct f_owner_ex {
#define F_LINUX_SPECIFIC_BASE 1024
+#ifndef HAVE_ARCH_STRUCT_FLOCK
struct flock {
short l_type;
short l_whence;
@@ -209,5 +217,6 @@ struct flock64 {
__ARCH_FLOCK64_PAD
#endif
};
+#endif /* HAVE_ARCH_STRUCT_FLOCK */
#endif /* _ASM_GENERIC_FCNTL_H */
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index 811897dadcae..860f867c50c0 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -2084,7 +2084,7 @@ struct kvm_stats_header {
#define KVM_STATS_UNIT_SECONDS (0x2 << KVM_STATS_UNIT_SHIFT)
#define KVM_STATS_UNIT_CYCLES (0x3 << KVM_STATS_UNIT_SHIFT)
#define KVM_STATS_UNIT_BOOLEAN (0x4 << KVM_STATS_UNIT_SHIFT)
-#define KVM_STATS_UNIT_MAX KVM_STATS_UNIT_CYCLES
+#define KVM_STATS_UNIT_MAX KVM_STATS_UNIT_BOOLEAN
#define KVM_STATS_BASE_SHIFT 8
#define KVM_STATS_BASE_MASK (0xF << KVM_STATS_BASE_SHIFT)
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 5eb0df90eb2b..efcc06dafbd9 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -579,11 +579,20 @@ int bpf_obj_pin(int fd, const char *pathname)
int bpf_obj_get(const char *pathname)
{
+ return bpf_obj_get_opts(pathname, NULL);
+}
+
+int bpf_obj_get_opts(const char *pathname, const struct bpf_obj_get_opts *opts)
+{
union bpf_attr attr;
int fd;
+ if (!OPTS_VALID(opts, bpf_obj_get_opts))
+ return libbpf_err(-EINVAL);
+
memset(&attr, 0, sizeof(attr));
attr.pathname = ptr_to_u64((void *)pathname);
+ attr.file_flags = OPTS_GET(opts, file_flags, 0);
fd = sys_bpf_fd(BPF_OBJ_GET, &attr, sizeof(attr));
return libbpf_err_errno(fd);
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 88a7cc4bd76f..9c50beabdd14 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -270,8 +270,19 @@ LIBBPF_API int bpf_map_update_batch(int fd, const void *keys, const void *values
__u32 *count,
const struct bpf_map_batch_opts *opts);
+struct bpf_obj_get_opts {
+ size_t sz; /* size of this struct for forward/backward compatibility */
+
+ __u32 file_flags;
+
+ size_t :0;
+};
+#define bpf_obj_get_opts__last_field file_flags
+
LIBBPF_API int bpf_obj_pin(int fd, const char *pathname);
LIBBPF_API int bpf_obj_get(const char *pathname);
+LIBBPF_API int bpf_obj_get_opts(const char *pathname,
+ const struct bpf_obj_get_opts *opts);
struct bpf_prog_attach_opts {
size_t sz; /* size of this struct for forward/backward compatibility */
diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
index f4d3e1e2abe2..43ca3aff2292 100644
--- a/tools/lib/bpf/bpf_tracing.h
+++ b/tools/lib/bpf/bpf_tracing.h
@@ -523,10 +523,17 @@ static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args)
* Original struct pt_regs * context is preserved as 'ctx' argument. This might
* be necessary when using BPF helpers like bpf_perf_event_output().
*
- * At the moment BPF_KSYSCALL does not handle all the calling convention
- * quirks for mmap(), clone() and compat syscalls transparrently. This may or
- * may not change in the future. User needs to take extra measures to handle
- * such quirks explicitly, if necessary.
+ * At the moment BPF_KSYSCALL does not transparently handle all the calling
+ * convention quirks for the following syscalls:
+ *
+ * - mmap(): __ARCH_WANT_SYS_OLD_MMAP.
+ * - clone(): CONFIG_CLONE_BACKWARDS, CONFIG_CLONE_BACKWARDS2 and
+ * CONFIG_CLONE_BACKWARDS3.
+ * - socket-related syscalls: __ARCH_WANT_SYS_SOCKETCALL.
+ * - compat syscalls.
+ *
+ * This may or may not change in the future. User needs to take extra measures
+ * to handle such quirks explicitly, if necessary.
*
* This macro relies on BPF CO-RE support and virtual __kconfig externs.
*/
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index b01fe01b0761..50d41815f431 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -9995,6 +9995,10 @@ static const char *arch_specific_syscall_pfx(void)
return "mips";
#elif defined(__riscv)
return "riscv";
+#elif defined(__powerpc__)
+ return "powerpc";
+#elif defined(__powerpc64__)
+ return "powerpc64";
#else
return NULL;
#endif
@@ -10127,8 +10131,13 @@ struct bpf_link *bpf_program__attach_ksyscall(const struct bpf_program *prog,
return libbpf_err_ptr(-EINVAL);
if (kernel_supports(prog->obj, FEAT_SYSCALL_WRAPPER)) {
+ /* arch_specific_syscall_pfx() should never return NULL here
+ * because it is guarded by kernel_supports(). However, since
+ * compiler does not know that we have an explicit conditional
+ * as well.
+ */
snprintf(func_name, sizeof(func_name), "__%s_sys_%s",
- arch_specific_syscall_pfx(), syscall_name);
+ arch_specific_syscall_pfx() ? : "", syscall_name);
} else {
snprintf(func_name, sizeof(func_name), "__se_sys_%s", syscall_name);
}
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 0625adb9e888..119e6e1ea7f1 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -355,6 +355,7 @@ LIBBPF_0.8.0 {
LIBBPF_1.0.0 {
global:
+ bpf_obj_get_opts;
bpf_prog_query_opts;
bpf_program__attach_ksyscall;
btf__add_enum64;
diff --git a/tools/testing/selftests/bpf/DENYLIST b/tools/testing/selftests/bpf/DENYLIST
new file mode 100644
index 000000000000..939de574fc7f
--- /dev/null
+++ b/tools/testing/selftests/bpf/DENYLIST
@@ -0,0 +1,6 @@
+# TEMPORARY
+get_stack_raw_tp # spams with kernel warnings until next bpf -> bpf-next merge
+stacktrace_build_id_nmi
+stacktrace_build_id
+task_fd_query_rawtp
+varlen
diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x
new file mode 100644
index 000000000000..e33cab34d22f
--- /dev/null
+++ b/tools/testing/selftests/bpf/DENYLIST.s390x
@@ -0,0 +1,67 @@
+# TEMPORARY
+atomics # attach(add): actual -524 <= expected 0 (trampoline)
+bpf_iter_setsockopt # JIT does not support calling kernel function (kfunc)
+bloom_filter_map # failed to find kernel BTF type ID of '__x64_sys_getpgid': -3 (?)
+bpf_tcp_ca # JIT does not support calling kernel function (kfunc)
+bpf_loop # attaches to __x64_sys_nanosleep
+bpf_mod_race # BPF trampoline
+bpf_nf # JIT does not support calling kernel function
+core_read_macros # unknown func bpf_probe_read#4 (overlapping)
+d_path # failed to auto-attach program 'prog_stat': -524 (trampoline)
+dummy_st_ops # test_run unexpected error: -524 (errno 524) (trampoline)
+fentry_fexit # fentry attach failed: -524 (trampoline)
+fentry_test # fentry_first_attach unexpected error: -524 (trampoline)
+fexit_bpf2bpf # freplace_attach_trace unexpected error: -524 (trampoline)
+fexit_sleep # fexit_skel_load fexit skeleton failed (trampoline)
+fexit_stress # fexit attach failed prog 0 failed: -524 (trampoline)
+fexit_test # fexit_first_attach unexpected error: -524 (trampoline)
+get_func_args_test # trampoline
+get_func_ip_test # get_func_ip_test__attach unexpected error: -524 (trampoline)
+get_stack_raw_tp # user_stack corrupted user stack (no backchain userspace)
+kfree_skb # attach fentry unexpected error: -524 (trampoline)
+kfunc_call # 'bpf_prog_active': not found in kernel BTF (?)
+ksyms_module # test_ksyms_module__open_and_load unexpected error: -9 (?)
+ksyms_module_libbpf # JIT does not support calling kernel function (kfunc)
+ksyms_module_lskel # test_ksyms_module_lskel__open_and_load unexpected error: -9 (?)
+modify_return # modify_return attach failed: -524 (trampoline)
+module_attach # skel_attach skeleton attach failed: -524 (trampoline)
+mptcp
+kprobe_multi_test # relies on fentry
+netcnt # failed to load BPF skeleton 'netcnt_prog': -7 (?)
+probe_user # check_kprobe_res wrong kprobe res from probe read (?)
+recursion # skel_attach unexpected error: -524 (trampoline)
+ringbuf # skel_load skeleton load failed (?)
+sk_assign # Can't read on server: Invalid argument (?)
+sk_lookup # endianness problem
+sk_storage_tracing # test_sk_storage_tracing__attach unexpected error: -524 (trampoline)
+skc_to_unix_sock # could not attach BPF object unexpected error: -524 (trampoline)
+socket_cookie # prog_attach unexpected error: -524 (trampoline)
+stacktrace_build_id # compare_map_keys stackid_hmap vs. stackmap err -2 errno 2 (?)
+tailcalls # tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls (?)
+task_local_storage # failed to auto-attach program 'trace_exit_creds': -524 (trampoline)
+test_bpffs # bpffs test failed 255 (iterator)
+test_bprm_opts # failed to auto-attach program 'secure_exec': -524 (trampoline)
+test_ima # failed to auto-attach program 'ima': -524 (trampoline)
+test_local_storage # failed to auto-attach program 'unlink_hook': -524 (trampoline)
+test_lsm # failed to find kernel BTF type ID of '__x64_sys_setdomainname': -3 (?)
+test_overhead # attach_fentry unexpected error: -524 (trampoline)
+test_profiler # unknown func bpf_probe_read_str#45 (overlapping)
+timer # failed to auto-attach program 'test1': -524 (trampoline)
+timer_crash # trampoline
+timer_mim # failed to auto-attach program 'test1': -524 (trampoline)
+trace_ext # failed to auto-attach program 'test_pkt_md_access_new': -524 (trampoline)
+trace_printk # trace_printk__load unexpected error: -2 (errno 2) (?)
+trace_vprintk # trace_vprintk__open_and_load unexpected error: -9 (?)
+trampoline_count # prog 'prog1': failed to attach: ERROR: strerror_r(-524)=22 (trampoline)
+verif_stats # trace_vprintk__open_and_load unexpected error: -9 (?)
+vmlinux # failed to auto-attach program 'handle__fentry': -524 (trampoline)
+xdp_adjust_tail # case-128 err 0 errno 28 retval 1 size 128 expect-size 3520 (?)
+xdp_bonding # failed to auto-attach program 'trace_on_entry': -524 (trampoline)
+xdp_bpf2bpf # failed to auto-attach program 'trace_on_entry': -524 (trampoline)
+map_kptr # failed to open_and_load program: -524 (trampoline)
+bpf_cookie # failed to open_and_load program: -524 (trampoline)
+xdp_do_redirect # prog_run_max_size unexpected error: -22 (errno 22)
+send_signal # intermittently fails to receive signal
+select_reuseport # intermittently fails on new s390x setup
+xdp_synproxy # JIT does not support calling kernel function (kfunc)
+unpriv_bpf_disabled # fentry
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index c05904d631ec..fabf0c014349 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -1,65 +1,64 @@
+CONFIG_BLK_DEV_LOOP=y
CONFIG_BPF=y
-CONFIG_BPF_SYSCALL=y
-CONFIG_NET_CLS_BPF=m
CONFIG_BPF_EVENTS=y
-CONFIG_TEST_BPF=m
+CONFIG_BPF_JIT=y
+CONFIG_BPF_LIRC_MODE2=y
+CONFIG_BPF_LSM=y
+CONFIG_BPF_STREAM_PARSER=y
+CONFIG_BPF_SYSCALL=y
CONFIG_CGROUP_BPF=y
-CONFIG_NETDEVSIM=m
-CONFIG_NET_CLS_ACT=y
-CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_INGRESS=y
-CONFIG_NET_IPIP=y
-CONFIG_IPV6=y
-CONFIG_NET_IPGRE_DEMUX=y
-CONFIG_NET_IPGRE=y
-CONFIG_IPV6_GRE=y
-CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_HMAC=m
CONFIG_CRYPTO_SHA256=m
-CONFIG_VXLAN=y
-CONFIG_GENEVE=y
-CONFIG_NET_CLS_FLOWER=m
-CONFIG_LWTUNNEL=y
-CONFIG_BPF_STREAM_PARSER=y
-CONFIG_XDP_SOCKETS=y
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_DYNAMIC_FTRACE=y
+CONFIG_FPROBE=y
CONFIG_FTRACE_SYSCALLS=y
-CONFIG_IPV6_TUNNEL=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_GENEVE=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_IMA=y
+CONFIG_IMA_READ_POLICY=y
+CONFIG_IMA_WRITE_POLICY=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_TARGET_SYNPROXY=y
+CONFIG_IPV6=y
+CONFIG_IPV6_FOU=m
+CONFIG_IPV6_FOU_TUNNEL=m
CONFIG_IPV6_GRE=y
CONFIG_IPV6_SEG6_BPF=y
+CONFIG_IPV6_SIT=m
+CONFIG_IPV6_TUNNEL=y
+CONFIG_LIRC=y
+CONFIG_LWTUNNEL=y
+CONFIG_MPLS=y
+CONFIG_MPLS_IPTUNNEL=m
+CONFIG_MPLS_ROUTING=m
+CONFIG_MPTCP=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_CLS_BPF=y
+CONFIG_NET_CLS_FLOWER=m
CONFIG_NET_FOU=m
CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_IPV6_FOU=m
-CONFIG_IPV6_FOU_TUNNEL=m
-CONFIG_MPLS=y
+CONFIG_NET_IPGRE=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_NET_IPIP=y
CONFIG_NET_MPLS_GSO=m
-CONFIG_MPLS_ROUTING=m
-CONFIG_MPLS_IPTUNNEL=m
-CONFIG_IPV6_SIT=m
-CONFIG_BPF_JIT=y
-CONFIG_BPF_LSM=y
-CONFIG_SECURITY=y
-CONFIG_RC_CORE=y
-CONFIG_LIRC=y
-CONFIG_BPF_LIRC_MODE2=y
-CONFIG_IMA=y
-CONFIG_SECURITYFS=y
-CONFIG_IMA_WRITE_POLICY=y
-CONFIG_IMA_READ_POLICY=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_FUNCTION_TRACER=y
-CONFIG_DYNAMIC_FTRACE=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_SCHED=y
+CONFIG_NETDEVSIM=m
CONFIG_NETFILTER=y
+CONFIG_NETFILTER_SYNPROXY=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_TARGET_CT=y
+CONFIG_NF_CONNTRACK=y
CONFIG_NF_DEFRAG_IPV4=y
CONFIG_NF_DEFRAG_IPV6=y
-CONFIG_NF_CONNTRACK=y
+CONFIG_RC_CORE=y
+CONFIG_SECURITY=y
+CONFIG_SECURITYFS=y
+CONFIG_TEST_BPF=m
CONFIG_USERFAULTFD=y
-CONFIG_FPROBE=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_MPTCP=y
-CONFIG_NETFILTER_SYNPROXY=y
-CONFIG_NETFILTER_XT_TARGET_CT=y
-CONFIG_NETFILTER_XT_MATCH_STATE=y
-CONFIG_IP_NF_FILTER=y
-CONFIG_IP_NF_TARGET_SYNPROXY=y
-CONFIG_IP_NF_RAW=y
+CONFIG_VXLAN=y
+CONFIG_XDP_SOCKETS=y
diff --git a/tools/testing/selftests/bpf/config.s390x b/tools/testing/selftests/bpf/config.s390x
new file mode 100644
index 000000000000..f8a7a258a718
--- /dev/null
+++ b/tools/testing/selftests/bpf/config.s390x
@@ -0,0 +1,147 @@
+CONFIG_9P_FS=y
+CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y
+CONFIG_AUDIT=y
+CONFIG_BLK_CGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BONDING=y
+CONFIG_BPF_JIT_ALWAYS_ON=y
+CONFIG_BPF_JIT_DEFAULT_ON=y
+CONFIG_BPF_PRELOAD=y
+CONFIG_BPF_PRELOAD_UMD=y
+CONFIG_BPFILTER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CGROUP_NET_CLASSID=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUPS=y
+CONFIG_CHECKPOINT_RESTORE=y
+CONFIG_CPUSETS=y
+CONFIG_CRASH_DUMP=y
+CONFIG_CRYPTO_USER_API_RNG=y
+CONFIG_CRYPTO_USER_API_SKCIPHER=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_INFO_BTF=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_DEBUG_LIST=y
+CONFIG_DEBUG_LOCKDEP=y
+CONFIG_DEBUG_NOTIFIERS=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_DEBUG_SG=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEVTMPFS=y
+CONFIG_EXPERT=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_FANOTIFY=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_GDB_SCRIPTS=y
+CONFIG_HAVE_EBPF_JIT=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KPROBES_ON_FTRACE=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_MARCH_Z10_FEATURES=y
+CONFIG_HAVE_MARCH_Z196_FEATURES=y
+CONFIG_HEADERS_INSTALL=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_HUGETLBFS=y
+CONFIG_HW_RANDOM=y
+CONFIG_HZ_100=y
+CONFIG_IDLE_PAGE_TRACKING=y
+CONFIG_IKHEADERS=y
+CONFIG_INET6_ESP=y
+CONFIG_INET=y
+CONFIG_INET_ESP=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IPV6_SEG6_LWTUNNEL=y
+CONFIG_IPVLAN=y
+CONFIG_JUMP_LABEL=y
+CONFIG_KERNEL_UNCOMPRESSED=y
+CONFIG_KPROBES=y
+CONFIG_KPROBES_ON_FTRACE=y
+CONFIG_KRETPROBES=y
+CONFIG_KSM=y
+CONFIG_LATENCYTOP=y
+CONFIG_LIVEPATCH=y
+CONFIG_LOCK_STAT=y
+CONFIG_MACVLAN=y
+CONFIG_MACVTAP=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_MARCH_Z196=y
+CONFIG_MARCH_Z196_TUNE=y
+CONFIG_MEMCG=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULES=y
+CONFIG_NAMESPACES=y
+CONFIG_NET=y
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
+CONFIG_NET_ACT_BPF=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_KEY=y
+CONFIG_NET_SCH_FQ=y
+CONFIG_NET_VRF=y
+CONFIG_NETDEVICES=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NF_TABLES=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NR_CPUS=256
+CONFIG_NUMA=y
+CONFIG_PACKET=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_PCI=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROFILING=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_PTDUMP_DEBUGFS=y
+CONFIG_RC_DEVICES=y
+CONFIG_RC_LOOPBACK=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_SAMPLE_SECCOMP=y
+CONFIG_SAMPLES=y
+CONFIG_SCHED_TRACER=y
+CONFIG_SCSI=y
+CONFIG_SCSI_VIRTIO=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_STACK_TRACER=y
+CONFIG_STATIC_KEYS_SELFTEST=y
+CONFIG_SYSVIPC=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASKSTATS=y
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_DCTCP=y
+CONFIG_TLS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_TUN=y
+CONFIG_UNIX=y
+CONFIG_UPROBES=y
+CONFIG_USELIB=y
+CONFIG_USER_NS=y
+CONFIG_VETH=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_BLK=y
+CONFIG_VIRTIO_NET=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VLAN_8021Q=y
+CONFIG_XFRM_USER=y
diff --git a/tools/testing/selftests/bpf/config.x86_64 b/tools/testing/selftests/bpf/config.x86_64
new file mode 100644
index 000000000000..f0859a1d37ab
--- /dev/null
+++ b/tools/testing/selftests/bpf/config.x86_64
@@ -0,0 +1,251 @@
+CONFIG_9P_FS=y
+CONFIG_9P_FS_POSIX_ACL=y
+CONFIG_9P_FS_SECURITY=y
+CONFIG_AGP=y
+CONFIG_AGP_AMD64=y
+CONFIG_AGP_INTEL=y
+CONFIG_AGP_SIS=y
+CONFIG_AGP_VIA=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_AUDIT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BINFMT_MISC=y
+CONFIG_BLK_CGROUP=y
+CONFIG_BLK_CGROUP_IOLATENCY=y
+CONFIG_BLK_DEV_BSGLIB=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_BONDING=y
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y
+CONFIG_BOOTTIME_TRACING=y
+CONFIG_BPF_JIT_ALWAYS_ON=y
+CONFIG_BPF_KPROBE_OVERRIDE=y
+CONFIG_BPF_PRELOAD=y
+CONFIG_BPF_PRELOAD_UMD=y
+CONFIG_BPFILTER=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUPS=y
+CONFIG_CMA=y
+CONFIG_CMA_AREAS=7
+CONFIG_COMPAT_32BIT_TIME=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_CPUSETS=y
+CONFIG_CRC_T10DIF=y
+CONFIG_CRYPTO_BLAKE2B=y
+CONFIG_CRYPTO_DEV_VIRTIO=m
+CONFIG_CRYPTO_SEQIV=y
+CONFIG_CRYPTO_XXHASH=y
+CONFIG_DCB=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_CREDENTIALS=y
+CONFIG_DEBUG_INFO_BTF=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEFAULT_FQ_CODEL=y
+CONFIG_DEFAULT_RENO=y
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DMA_CMA=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_EFI=y
+CONFIG_EFI_STUB=y
+CONFIG_EXPERT=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_FAIL_FUNCTION=y
+CONFIG_FAULT_INJECTION=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+CONFIG_FB_VESA=y
+CONFIG_FONT_8x16=y
+CONFIG_FONT_MINI_4x6=y
+CONFIG_FONTS=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+CONFIG_FW_LOADER_USER_HELPER=y
+CONFIG_GART_IOMMU=y
+CONFIG_GENERIC_PHY=y
+CONFIG_HARDLOCKUP_DETECTOR=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_DRAGONRISE=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_GREENASIA=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_KYE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SMARTJOYPLUS=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_TWINHAN=y
+CONFIG_HID_ZEROPLUS=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_HPET=y
+CONFIG_HUGETLBFS=y
+CONFIG_HWPOISON_INJECT=y
+CONFIG_HZ_1000=y
+CONFIG_INET=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INTEL_POWERCLAMP=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_SEG6_LWTUNNEL=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_IRQ_POLL=y
+CONFIG_JUMP_LABEL=y
+CONFIG_KARMA_PARTITION=y
+CONFIG_KEXEC=y
+CONFIG_KPROBES=y
+CONFIG_KSM=y
+CONFIG_LEGACY_VSYSCALL_NONE=y
+CONFIG_LOG_BUF_SHIFT=21
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=0
+CONFIG_LOGO=y
+CONFIG_LSM="selinux,bpf,integrity"
+CONFIG_MAC_PARTITION=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_MCORE2=y
+CONFIG_MEMCG=y
+CONFIG_MEMORY_FAILURE=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULES=y
+CONFIG_MODVERSIONS=y
+CONFIG_NAMESPACES=y
+CONFIG_NET=y
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
+CONFIG_NET_ACT_BPF=y
+CONFIG_NET_CLS_CGROUP=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_IPGRE_BROADCAST=y
+CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_NET_SCH_DEFAULT=y
+CONFIG_NET_SCH_FQ_CODEL=y
+CONFIG_NET_TC_SKB_EXT=y
+CONFIG_NET_VRF=y
+CONFIG_NETDEVICES=y
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NETFILTER_NETLINK_QUEUE=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETLABEL=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NO_HZ=y
+CONFIG_NR_CPUS=128
+CONFIG_NUMA=y
+CONFIG_NUMA_BALANCING=y
+CONFIG_NVMEM=y
+CONFIG_OSF_PARTITION=y
+CONFIG_PACKET=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_PCI=y
+CONFIG_PCI_IOV=y
+CONFIG_PCI_MSI=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PHYSICAL_ALIGN=0x1000000
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_PREEMPT=y
+CONFIG_PRINTK_TIME=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROFILING=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_PTP_1588_CLOCK=y
+CONFIG_RC_DEVICES=y
+CONFIG_RC_LOOPBACK=y
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_SCHED_STACK_END_CHECK=y
+CONFIG_SCHEDSTATS=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_DETECT_IRQ=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_NR_UARTS=32
+CONFIG_SERIAL_8250_RSA=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_NONSTANDARD=y
+CONFIG_SERIO_LIBPS2=y
+CONFIG_SGI_PARTITION=y
+CONFIG_SMP=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_SUN_PARTITION=y
+CONFIG_SYNC_FILE=y
+CONFIG_SYSVIPC=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASKSTATS=y
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_MD5SIG=y
+CONFIG_TLS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
+CONFIG_TUN=y
+CONFIG_UNIX=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_USER_NS=y
+CONFIG_VALIDATE_FS_PARSER=y
+CONFIG_VETH=y
+CONFIG_VIRT_DRIVERS=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_BLK=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_VIRTIO_NET=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VLAN_8021Q=y
+CONFIG_X86_ACPI_CPUFREQ=y
+CONFIG_X86_CPUID=y
+CONFIG_X86_MSR=y
+CONFIG_X86_POWERNOW_K8=y
+CONFIG_XDP_SOCKETS_DIAG=y
+CONFIG_XFRM_SUB_POLICY=y
+CONFIG_XFRM_USER=y
+CONFIG_ZEROPLUS_FF=y
diff --git a/tools/testing/selftests/bpf/prog_tests/probe_user.c b/tools/testing/selftests/bpf/prog_tests/probe_user.c
index abf890d066eb..34dbd2adc157 100644
--- a/tools/testing/selftests/bpf/prog_tests/probe_user.c
+++ b/tools/testing/selftests/bpf/prog_tests/probe_user.c
@@ -4,25 +4,35 @@
/* TODO: corrupts other tests uses connect() */
void serial_test_probe_user(void)
{
- const char *prog_name = "handle_sys_connect";
+ static const char *const prog_names[] = {
+ "handle_sys_connect",
+#if defined(__s390x__)
+ "handle_sys_socketcall",
+#endif
+ };
+ enum { prog_count = ARRAY_SIZE(prog_names) };
const char *obj_file = "./test_probe_user.o";
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, );
int err, results_map_fd, sock_fd, duration = 0;
struct sockaddr curr, orig, tmp;
struct sockaddr_in *in = (struct sockaddr_in *)&curr;
- struct bpf_link *kprobe_link = NULL;
- struct bpf_program *kprobe_prog;
+ struct bpf_link *kprobe_links[prog_count] = {};
+ struct bpf_program *kprobe_progs[prog_count];
struct bpf_object *obj;
static const int zero = 0;
+ size_t i;
obj = bpf_object__open_file(obj_file, &opts);
if (!ASSERT_OK_PTR(obj, "obj_open_file"))
return;
- kprobe_prog = bpf_object__find_program_by_name(obj, prog_name);
- if (CHECK(!kprobe_prog, "find_probe",
- "prog '%s' not found\n", prog_name))
- goto cleanup;
+ for (i = 0; i < prog_count; i++) {
+ kprobe_progs[i] =
+ bpf_object__find_program_by_name(obj, prog_names[i]);
+ if (CHECK(!kprobe_progs[i], "find_probe",
+ "prog '%s' not found\n", prog_names[i]))
+ goto cleanup;
+ }
err = bpf_object__load(obj);
if (CHECK(err, "obj_load", "err %d\n", err))
@@ -33,9 +43,11 @@ void serial_test_probe_user(void)
"err %d\n", results_map_fd))
goto cleanup;
- kprobe_link = bpf_program__attach(kprobe_prog);
- if (!ASSERT_OK_PTR(kprobe_link, "attach_kprobe"))
- goto cleanup;
+ for (i = 0; i < prog_count; i++) {
+ kprobe_links[i] = bpf_program__attach(kprobe_progs[i]);
+ if (!ASSERT_OK_PTR(kprobe_links[i], "attach_kprobe"))
+ goto cleanup;
+ }
memset(&curr, 0, sizeof(curr));
in->sin_family = AF_INET;
@@ -69,6 +81,7 @@ void serial_test_probe_user(void)
inet_ntoa(in->sin_addr), ntohs(in->sin_port)))
goto cleanup;
cleanup:
- bpf_link__destroy(kprobe_link);
+ for (i = 0; i < prog_count; i++)
+ bpf_link__destroy(kprobe_links[i]);
bpf_object__close(obj);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c
index d71226e34c34..d63a20fbed33 100644
--- a/tools/testing/selftests/bpf/prog_tests/send_signal.c
+++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c
@@ -64,7 +64,7 @@ static void test_send_signal_common(struct perf_event_attr *attr,
ASSERT_EQ(read(pipe_p2c[0], buf, 1), 1, "pipe_read");
/* wait a little for signal handler */
- for (int i = 0; i < 100000000 && !sigusr1_received; i++)
+ for (int i = 0; i < 1000000000 && !sigusr1_received; i++)
j /= i + j + 1;
buf[0] = sigusr1_received ? '2' : '0';
diff --git a/tools/testing/selftests/bpf/prog_tests/test_tunnel.c b/tools/testing/selftests/bpf/prog_tests/test_tunnel.c
index 3bba4a2a0530..eea274110267 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_tunnel.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_tunnel.c
@@ -82,6 +82,7 @@
#define MAC_TUNL_DEV0 "52:54:00:d9:01:00"
#define MAC_TUNL_DEV1 "52:54:00:d9:02:00"
+#define MAC_VETH1 "52:54:00:d9:03:00"
#define VXLAN_TUNL_DEV0 "vxlan00"
#define VXLAN_TUNL_DEV1 "vxlan11"
@@ -108,10 +109,9 @@
static int config_device(void)
{
SYS("ip netns add at_ns0");
- SYS("ip link add veth0 type veth peer name veth1");
+ SYS("ip link add veth0 address " MAC_VETH1 " type veth peer name veth1");
SYS("ip link set veth0 netns at_ns0");
SYS("ip addr add " IP4_ADDR1_VETH1 "/24 dev veth1");
- SYS("ip addr add " IP4_ADDR2_VETH1 "/24 dev veth1");
SYS("ip link set dev veth1 up mtu 1500");
SYS("ip netns exec at_ns0 ip addr add " IP4_ADDR_VETH0 "/24 dev veth0");
SYS("ip netns exec at_ns0 ip link set dev veth0 up mtu 1500");
@@ -140,6 +140,8 @@ static int add_vxlan_tunnel(void)
VXLAN_TUNL_DEV0, IP4_ADDR_TUNL_DEV0);
SYS("ip netns exec at_ns0 ip neigh add %s lladdr %s dev %s",
IP4_ADDR_TUNL_DEV1, MAC_TUNL_DEV1, VXLAN_TUNL_DEV0);
+ SYS("ip netns exec at_ns0 ip neigh add %s lladdr %s dev veth0",
+ IP4_ADDR2_VETH1, MAC_VETH1);
/* root namespace */
SYS("ip link add dev %s type vxlan external gbp dstport 4789",
@@ -277,6 +279,17 @@ static void test_vxlan_tunnel(void)
if (attach_tc_prog(&tc_hook, get_src_prog_fd, set_src_prog_fd))
goto done;
+ /* load and attach bpf prog to veth dev tc hook point */
+ ifindex = if_nametoindex("veth1");
+ if (!ASSERT_NEQ(ifindex, 0, "veth1 ifindex"))
+ goto done;
+ tc_hook.ifindex = ifindex;
+ set_dst_prog_fd = bpf_program__fd(skel->progs.veth_set_outer_dst);
+ if (!ASSERT_GE(set_dst_prog_fd, 0, "bpf_program__fd"))
+ goto done;
+ if (attach_tc_prog(&tc_hook, set_dst_prog_fd, -1))
+ goto done;
+
/* load and attach prog set_md to tunnel dev tc hook point at_ns0 */
nstoken = open_netns("at_ns0");
if (!ASSERT_OK_PTR(nstoken, "setns src"))
diff --git a/tools/testing/selftests/bpf/progs/test_probe_user.c b/tools/testing/selftests/bpf/progs/test_probe_user.c
index 8e1495008e4d..a8e501af9604 100644
--- a/tools/testing/selftests/bpf/progs/test_probe_user.c
+++ b/tools/testing/selftests/bpf/progs/test_probe_user.c
@@ -7,8 +7,7 @@
static struct sockaddr_in old;
-SEC("ksyscall/connect")
-int BPF_KSYSCALL(handle_sys_connect, int fd, struct sockaddr_in *uservaddr, int addrlen)
+static int handle_sys_connect_common(struct sockaddr_in *uservaddr)
{
struct sockaddr_in new;
@@ -19,4 +18,30 @@ int BPF_KSYSCALL(handle_sys_connect, int fd, struct sockaddr_in *uservaddr, int
return 0;
}
+SEC("ksyscall/connect")
+int BPF_KSYSCALL(handle_sys_connect, int fd, struct sockaddr_in *uservaddr,
+ int addrlen)
+{
+ return handle_sys_connect_common(uservaddr);
+}
+
+#if defined(bpf_target_s390)
+#ifndef SYS_CONNECT
+#define SYS_CONNECT 3
+#endif
+
+SEC("ksyscall/socketcall")
+int BPF_KSYSCALL(handle_sys_socketcall, int call, unsigned long *args)
+{
+ if (call == SYS_CONNECT) {
+ struct sockaddr_in *uservaddr;
+
+ bpf_probe_read_user(&uservaddr, sizeof(uservaddr), &args[1]);
+ return handle_sys_connect_common(uservaddr);
+ }
+
+ return 0;
+}
+#endif
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
index 17f2f325b3f3..df0673c4ecbe 100644
--- a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
@@ -14,15 +14,24 @@
#include <linux/if_packet.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <linux/icmp.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/pkt_cls.h>
#include <linux/erspan.h>
+#include <linux/udp.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#define log_err(__ret) bpf_printk("ERROR line:%d ret:%d\n", __LINE__, __ret)
+#define VXLAN_UDP_PORT 4789
+
+/* Only IPv4 address assigned to veth1.
+ * 172.16.1.200
+ */
+#define ASSIGNED_ADDR_VETH1 0xac1001c8
+
struct geneve_opt {
__be16 opt_class;
__u8 type;
@@ -33,6 +42,11 @@ struct geneve_opt {
__u8 opt_data[8]; /* hard-coded to 8 byte */
};
+struct vxlanhdr {
+ __be32 vx_flags;
+ __be32 vx_vni;
+} __attribute__((packed));
+
struct vxlan_metadata {
__u32 gbp;
};
@@ -369,14 +383,8 @@ int vxlan_get_tunnel_src(struct __sk_buff *skb)
int ret;
struct bpf_tunnel_key key;
struct vxlan_metadata md;
+ __u32 orig_daddr;
__u32 index = 0;
- __u32 *local_ip = NULL;
-
- local_ip = bpf_map_lookup_elem(&local_ip_map, &index);
- if (!local_ip) {
- log_err(ret);
- return TC_ACT_SHOT;
- }
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
if (ret < 0) {
@@ -390,11 +398,10 @@ int vxlan_get_tunnel_src(struct __sk_buff *skb)
return TC_ACT_SHOT;
}
- if (key.local_ipv4 != *local_ip || md.gbp != 0x800FF) {
+ if (key.local_ipv4 != ASSIGNED_ADDR_VETH1 || md.gbp != 0x800FF) {
bpf_printk("vxlan key %d local ip 0x%x remote ip 0x%x gbp 0x%x\n",
key.tunnel_id, key.local_ipv4,
key.remote_ipv4, md.gbp);
- bpf_printk("local_ip 0x%x\n", *local_ip);
log_err(ret);
return TC_ACT_SHOT;
}
@@ -403,6 +410,61 @@ int vxlan_get_tunnel_src(struct __sk_buff *skb)
}
SEC("tc")
+int veth_set_outer_dst(struct __sk_buff *skb)
+{
+ struct ethhdr *eth = (struct ethhdr *)(long)skb->data;
+ __u32 assigned_ip = bpf_htonl(ASSIGNED_ADDR_VETH1);
+ void *data_end = (void *)(long)skb->data_end;
+ struct udphdr *udph;
+ struct iphdr *iph;
+ __u32 index = 0;
+ int ret = 0;
+ int shrink;
+ __s64 csum;
+
+ if ((void *)eth + sizeof(*eth) > data_end) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
+
+ if (eth->h_proto != bpf_htons(ETH_P_IP))
+ return TC_ACT_OK;
+
+ iph = (struct iphdr *)(eth + 1);
+ if ((void *)iph + sizeof(*iph) > data_end) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
+ if (iph->protocol != IPPROTO_UDP)
+ return TC_ACT_OK;
+
+ udph = (struct udphdr *)(iph + 1);
+ if ((void *)udph + sizeof(*udph) > data_end) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
+ if (udph->dest != bpf_htons(VXLAN_UDP_PORT))
+ return TC_ACT_OK;
+
+ if (iph->daddr != assigned_ip) {
+ csum = bpf_csum_diff(&iph->daddr, sizeof(__u32), &assigned_ip,
+ sizeof(__u32), 0);
+ if (bpf_skb_store_bytes(skb, ETH_HLEN + offsetof(struct iphdr, daddr),
+ &assigned_ip, sizeof(__u32), 0) < 0) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
+ if (bpf_l3_csum_replace(skb, ETH_HLEN + offsetof(struct iphdr, check),
+ 0, csum, 0) < 0) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
+ bpf_skb_change_type(skb, PACKET_HOST);
+ }
+ return TC_ACT_OK;
+}
+
+SEC("tc")
int ip6vxlan_set_tunnel_dst(struct __sk_buff *skb)
{
struct bpf_tunnel_key key;
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index c639f2e56fc5..3561c97701f2 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -1604,11 +1604,8 @@ int main(int argc, char **argv)
struct prog_test_def *test = &prog_test_defs[i];
test->test_num = i + 1;
- if (should_run(&env.test_selector,
- test->test_num, test->test_name))
- test->should_run = true;
- else
- test->should_run = false;
+ test->should_run = should_run(&env.test_selector,
+ test->test_num, test->test_name);
if ((test->run_test == NULL && test->run_serial_test == NULL) ||
(test->run_test != NULL && test->run_serial_test != NULL)) {
diff --git a/tools/testing/selftests/bpf/vmtest.sh b/tools/testing/selftests/bpf/vmtest.sh
index e0bb04a97e10..b86ae4a2e5c5 100755
--- a/tools/testing/selftests/bpf/vmtest.sh
+++ b/tools/testing/selftests/bpf/vmtest.sh
@@ -30,8 +30,7 @@ DEFAULT_COMMAND="./test_progs"
MOUNT_DIR="mnt"
ROOTFS_IMAGE="root.img"
OUTPUT_DIR="$HOME/.bpf_selftests"
-KCONFIG_URL="https://raw.githubusercontent.com/libbpf/libbpf/master/travis-ci/vmtest/configs/config-latest.${ARCH}"
-KCONFIG_API_URL="https://api.github.com/repos/libbpf/libbpf/contents/travis-ci/vmtest/configs/config-latest.${ARCH}"
+KCONFIG_REL_PATHS=("tools/testing/selftests/bpf/config" "tools/testing/selftests/bpf/config.${ARCH}")
INDEX_URL="https://raw.githubusercontent.com/libbpf/ci/master/INDEX"
NUM_COMPILE_JOBS="$(nproc)"
LOG_FILE_BASE="$(date +"bpf_selftests.%Y-%m-%d_%H-%M-%S")"
@@ -269,26 +268,42 @@ is_rel_path()
[[ ${path:0:1} != "/" ]]
}
+do_update_kconfig()
+{
+ local kernel_checkout="$1"
+ local kconfig_file="$2"
+
+ rm -f "$kconfig_file" 2> /dev/null
+
+ for config in "${KCONFIG_REL_PATHS[@]}"; do
+ local kconfig_src="${kernel_checkout}/${config}"
+ cat "$kconfig_src" >> "$kconfig_file"
+ done
+}
+
update_kconfig()
{
- local kconfig_file="$1"
- local update_command="curl -sLf ${KCONFIG_URL} -o ${kconfig_file}"
- # Github does not return the "last-modified" header when retrieving the
- # raw contents of the file. Use the API call to get the last-modified
- # time of the kernel config and only update the config if it has been
- # updated after the previously cached config was created. This avoids
- # unnecessarily compiling the kernel and selftests.
- if [[ -f "${kconfig_file}" ]]; then
- local last_modified_date="$(curl -sL -D - "${KCONFIG_API_URL}" -o /dev/null | \
- grep "last-modified" | awk -F ': ' '{print $2}')"
- local remote_modified_timestamp="$(date -d "${last_modified_date}" +"%s")"
- local local_creation_timestamp="$(stat -c %Y "${kconfig_file}")"
+ local kernel_checkout="$1"
+ local kconfig_file="$2"
- if [[ "${remote_modified_timestamp}" -gt "${local_creation_timestamp}" ]]; then
- ${update_command}
- fi
+ if [[ -f "${kconfig_file}" ]]; then
+ local local_modified="$(stat -c %Y "${kconfig_file}")"
+
+ for config in "${KCONFIG_REL_PATHS[@]}"; do
+ local kconfig_src="${kernel_checkout}/${config}"
+ local src_modified="$(stat -c %Y "${kconfig_src}")"
+ # Only update the config if it has been updated after the
+ # previously cached config was created. This avoids
+ # unnecessarily compiling the kernel and selftests.
+ if [[ "${src_modified}" -gt "${local_modified}" ]]; then
+ do_update_kconfig "$kernel_checkout" "$kconfig_file"
+ # Once we have found one outdated configuration
+ # there is no need to check other ones.
+ break
+ fi
+ done
else
- ${update_command}
+ do_update_kconfig "$kernel_checkout" "$kconfig_file"
fi
}
@@ -372,7 +387,7 @@ main()
mkdir -p "${OUTPUT_DIR}"
mkdir -p "${mount_dir}"
- update_kconfig "${kconfig_file}"
+ update_kconfig "${kernel_checkout}" "${kconfig_file}"
recompile_kernel "${kernel_checkout}" "${make_command}"
diff --git a/tools/testing/selftests/drivers/net/dsa/Makefile b/tools/testing/selftests/drivers/net/dsa/Makefile
new file mode 100644
index 000000000000..2a731d5c6d85
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/dsa/Makefile
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0+ OR MIT
+
+TEST_PROGS = bridge_locked_port.sh \
+ bridge_mdb.sh \
+ bridge_mld.sh \
+ bridge_vlan_aware.sh \
+ bridge_vlan_mcast.sh \
+ bridge_vlan_unaware.sh \
+ local_termination.sh \
+ no_forwarding.sh \
+ test_bridge_fdb_stress.sh
+
+TEST_PROGS_EXTENDED := lib.sh
+
+TEST_FILES := forwarding.config
+
+include ../../../lib.mk
diff --git a/tools/testing/selftests/gpio/Makefile b/tools/testing/selftests/gpio/Makefile
index 71b306602368..616ed4019655 100644
--- a/tools/testing/selftests/gpio/Makefile
+++ b/tools/testing/selftests/gpio/Makefile
@@ -3,6 +3,6 @@
TEST_PROGS := gpio-mockup.sh gpio-sim.sh
TEST_FILES := gpio-mockup-sysfs.sh
TEST_GEN_PROGS_EXTENDED := gpio-mockup-cdev gpio-chip-info gpio-line-name
-CFLAGS += -O2 -g -Wall -I../../../../usr/include/
+CFLAGS += -O2 -g -Wall -I../../../../usr/include/ $(KHDR_INCLUDES)
include ../lib.mk
diff --git a/tools/testing/selftests/kvm/rseq_test.c b/tools/testing/selftests/kvm/rseq_test.c
index 4158da0da2bb..2237d1aac801 100644
--- a/tools/testing/selftests/kvm/rseq_test.c
+++ b/tools/testing/selftests/kvm/rseq_test.c
@@ -82,8 +82,9 @@ static int next_cpu(int cpu)
return cpu;
}
-static void *migration_worker(void *ign)
+static void *migration_worker(void *__rseq_tid)
{
+ pid_t rseq_tid = (pid_t)(unsigned long)__rseq_tid;
cpu_set_t allowed_mask;
int r, i, cpu;
@@ -106,7 +107,7 @@ static void *migration_worker(void *ign)
* stable, i.e. while changing affinity is in-progress.
*/
smp_wmb();
- r = sched_setaffinity(0, sizeof(allowed_mask), &allowed_mask);
+ r = sched_setaffinity(rseq_tid, sizeof(allowed_mask), &allowed_mask);
TEST_ASSERT(!r, "sched_setaffinity failed, errno = %d (%s)",
errno, strerror(errno));
smp_wmb();
@@ -231,7 +232,8 @@ int main(int argc, char *argv[])
vm = vm_create_default(VCPU_ID, 0, guest_code);
ucall_init(vm, NULL);
- pthread_create(&migration_thread, NULL, migration_worker, 0);
+ pthread_create(&migration_thread, NULL, migration_worker,
+ (void *)(unsigned long)gettid());
for (i = 0; !done; i++) {
vcpu_run(vm, VCPU_ID);
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 80628bf8413a..cd86d37146cc 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -35,6 +35,8 @@ TEST_PROGS += cmsg_time.sh cmsg_ipv6.sh
TEST_PROGS += srv6_end_dt46_l3vpn_test.sh
TEST_PROGS += srv6_end_dt4_l3vpn_test.sh
TEST_PROGS += srv6_end_dt6_l3vpn_test.sh
+TEST_PROGS += srv6_hencap_red_l3vpn_test.sh
+TEST_PROGS += srv6_hl2encap_red_l2vpn_test.sh
TEST_PROGS += vrf_strict_mode_test.sh
TEST_PROGS += arp_ndisc_evict_nocarrier.sh
TEST_PROGS += ndisc_unsolicited_na_test.sh
diff --git a/tools/testing/selftests/net/srv6_hencap_red_l3vpn_test.sh b/tools/testing/selftests/net/srv6_hencap_red_l3vpn_test.sh
new file mode 100755
index 000000000000..28a775654b92
--- /dev/null
+++ b/tools/testing/selftests/net/srv6_hencap_red_l3vpn_test.sh
@@ -0,0 +1,879 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# author: Andrea Mayer <andrea.mayer@uniroma2.it>
+#
+# This script is designed for testing the SRv6 H.Encaps.Red behavior.
+#
+# Below is depicted the IPv6 network of an operator which offers advanced
+# IPv4/IPv6 VPN services to hosts, enabling them to communicate with each
+# other.
+# In this example, hosts hs-1 and hs-2 are connected through an IPv4/IPv6 VPN
+# service, while hs-3 and hs-4 are connected using an IPv6 only VPN.
+#
+# Routers rt-1,rt-2,rt-3 and rt-4 implement IPv4/IPv6 L3 VPN services
+# leveraging the SRv6 architecture. The key components for such VPNs are:
+#
+# i) The SRv6 H.Encaps.Red behavior applies SRv6 Policies on traffic received
+# by connected hosts, initiating the VPN tunnel. Such a behavior is an
+# optimization of the SRv6 H.Encap aiming to reduce the length of the SID
+# List carried in the pushed SRH. Specifically, the H.Encaps.Red removes
+# the first SID contained in the SID List (i.e. SRv6 Policy) by storing it
+# into the IPv6 Destination Address. When a SRv6 Policy is made of only one
+# SID, the SRv6 H.Encaps.Red behavior omits the SRH at all and pushes that
+# SID directly into the IPv6 DA;
+#
+# ii) The SRv6 End behavior advances the active SID in the SID List carried by
+# the SRH;
+#
+# iii) The SRv6 End.DT46 behavior is used for removing the SRv6 Policy and,
+# thus, it terminates the VPN tunnel. Such a behavior is capable of
+# handling, at the same time, both tunneled IPv4 and IPv6 traffic.
+#
+#
+# cafe::1 cafe::2
+# 10.0.0.1 10.0.0.2
+# +--------+ +--------+
+# | | | |
+# | hs-1 | | hs-2 |
+# | | | |
+# +---+----+ +--- +---+
+# cafe::/64 | | cafe::/64
+# 10.0.0.0/24 | | 10.0.0.0/24
+# +---+----+ +----+---+
+# | | fcf0:0:1:2::/64 | |
+# | rt-1 +-------------------+ rt-2 |
+# | | | |
+# +---+----+ +----+---+
+# | . . |
+# | fcf0:0:1:3::/64 . |
+# | . . |
+# | . . |
+# fcf0:0:1:4::/64 | . | fcf0:0:2:3::/64
+# | . . |
+# | . . |
+# | fcf0:0:2:4::/64 . |
+# | . . |
+# +---+----+ +----+---+
+# | | | |
+# | rt-4 +-------------------+ rt-3 |
+# | | fcf0:0:3:4::/64 | |
+# +---+----+ +----+---+
+# cafe::/64 | | cafe::/64
+# 10.0.0.0/24 | | 10.0.0.0/24
+# +---+----+ +--- +---+
+# | | | |
+# | hs-4 | | hs-3 |
+# | | | |
+# +--------+ +--------+
+# cafe::4 cafe::3
+# 10.0.0.4 10.0.0.3
+#
+#
+# Every fcf0:0:x:y::/64 network interconnects the SRv6 routers rt-x with rt-y
+# in the IPv6 operator network.
+#
+# Local SID table
+# ===============
+#
+# Each SRv6 router is configured with a Local SID table in which SIDs are
+# stored. Considering the given SRv6 router rt-x, at least two SIDs are
+# configured in the Local SID table:
+#
+# Local SID table for SRv6 router rt-x
+# +----------------------------------------------------------+
+# |fcff:x::e is associated with the SRv6 End behavior |
+# |fcff:x::d46 is associated with the SRv6 End.DT46 behavior |
+# +----------------------------------------------------------+
+#
+# The fcff::/16 prefix is reserved by the operator for implementing SRv6 VPN
+# services. Reachability of SIDs is ensured by proper configuration of the IPv6
+# operator's network and SRv6 routers.
+#
+# # SRv6 Policies
+# ===============
+#
+# An SRv6 ingress router applies SRv6 policies to the traffic received from a
+# connected host. SRv6 policy enforcement consists of encapsulating the
+# received traffic into a new IPv6 packet with a given SID List contained in
+# the SRH.
+#
+# IPv4/IPv6 VPN between hs-1 and hs-2
+# -----------------------------------
+#
+# Hosts hs-1 and hs-2 are connected using dedicated IPv4/IPv6 VPNs.
+# Specifically, packets generated from hs-1 and directed towards hs-2 are
+# handled by rt-1 which applies the following SRv6 Policies:
+#
+# i.a) IPv6 traffic, SID List=fcff:3::e,fcff:4::e,fcff:2::d46
+# ii.a) IPv4 traffic, SID List=fcff:2::d46
+#
+# Policy (i.a) steers tunneled IPv6 traffic through SRv6 routers
+# rt-3,rt-4,rt-2. Instead, Policy (ii.a) steers tunneled IPv4 traffic through
+# rt-2.
+# The H.Encaps.Red reduces the SID List (i.a) carried in SRH by removing the
+# first SID (fcff:3::e) and pushing it into the IPv6 DA. In case of IPv4
+# traffic, the H.Encaps.Red omits the presence of SRH at all, since the SID
+# List (ii.a) consists of only one SID that can be stored directly in the IPv6
+# DA.
+#
+# On the reverse path (i.e. from hs-2 to hs-1), rt-2 applies the following
+# policies:
+#
+# i.b) IPv6 traffic, SID List=fcff:1::d46
+# ii.b) IPv4 traffic, SID List=fcff:4::e,fcff:3::e,fcff:1::d46
+#
+# Policy (i.b) steers tunneled IPv6 traffic through the SRv6 router rt-1.
+# Conversely, Policy (ii.b) steers tunneled IPv4 traffic through SRv6 routers
+# rt-4,rt-3,rt-1.
+# The H.Encaps.Red omits the SRH at all in case of (i.b) by pushing the single
+# SID (fcff::1::d46) inside the IPv6 DA.
+# The H.Encaps.Red reduces the SID List (ii.b) in the SRH by removing the first
+# SID (fcff:4::e) and pushing it into the IPv6 DA.
+#
+# In summary:
+# hs-1->hs-2 |IPv6 DA=fcff:3::e|SRH SIDs=fcff:4::e,fcff:2::d46|IPv6|...| (i.a)
+# hs-1->hs-2 |IPv6 DA=fcff:2::d46|IPv4|...| (ii.a)
+#
+# hs-2->hs-1 |IPv6 DA=fcff:1::d46|IPv6|...| (i.b)
+# hs-2->hs-1 |IPv6 DA=fcff:4::e|SRH SIDs=fcff:3::e,fcff:1::d46|IPv4|...| (ii.b)
+#
+#
+# IPv6 VPN between hs-3 and hs-4
+# ------------------------------
+#
+# Hosts hs-3 and hs-4 are connected using a dedicated IPv6 only VPN.
+# Specifically, packets generated from hs-3 and directed towards hs-4 are
+# handled by rt-3 which applies the following SRv6 Policy:
+#
+# i.c) IPv6 traffic, SID List=fcff:2::e,fcff:4::d46
+#
+# Policy (i.c) steers tunneled IPv6 traffic through SRv6 routers rt-2,rt-4.
+# The H.Encaps.Red reduces the SID List (i.c) carried in SRH by pushing the
+# first SID (fcff:2::e) in the IPv6 DA.
+#
+# On the reverse path (i.e. from hs-4 to hs-3) the router rt-4 applies the
+# following SRv6 Policy:
+#
+# i.d) IPv6 traffic, SID List=fcff:1::e,fcff:3::d46.
+#
+# Policy (i.d) steers tunneled IPv6 traffic through SRv6 routers rt-1,rt-3.
+# The H.Encaps.Red reduces the SID List (i.d) carried in SRH by pushing the
+# first SID (fcff:1::e) in the IPv6 DA.
+#
+# In summary:
+# hs-3->hs-4 |IPv6 DA=fcff:2::e|SRH SIDs=fcff:4::d46|IPv6|...| (i.c)
+# hs-4->hs-3 |IPv6 DA=fcff:1::e|SRH SIDs=fcff:3::d46|IPv6|...| (i.d)
+#
+
+# Kselftest framework requirement - SKIP code is 4.
+readonly ksft_skip=4
+
+readonly RDMSUFF="$(mktemp -u XXXXXXXX)"
+readonly VRF_TID=100
+readonly VRF_DEVNAME="vrf-${VRF_TID}"
+readonly RT2HS_DEVNAME="veth-t${VRF_TID}"
+readonly LOCALSID_TABLE_ID=90
+readonly IPv6_RT_NETWORK=fcf0:0
+readonly IPv6_HS_NETWORK=cafe
+readonly IPv4_HS_NETWORK=10.0.0
+readonly VPN_LOCATOR_SERVICE=fcff
+readonly END_FUNC=000e
+readonly DT46_FUNC=0d46
+
+PING_TIMEOUT_SEC=4
+PAUSE_ON_FAIL=${PAUSE_ON_FAIL:=no}
+
+# IDs of routers and hosts are initialized during the setup of the testing
+# network
+ROUTERS=''
+HOSTS=''
+
+SETUP_ERR=1
+
+ret=${ksft_skip}
+nsuccess=0
+nfail=0
+
+log_test()
+{
+ local rc="$1"
+ local expected="$2"
+ local msg="$3"
+
+ if [ "${rc}" -eq "${expected}" ]; then
+ nsuccess=$((nsuccess+1))
+ printf "\n TEST: %-60s [ OK ]\n" "${msg}"
+ else
+ ret=1
+ nfail=$((nfail+1))
+ printf "\n TEST: %-60s [FAIL]\n" "${msg}"
+ if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
+ echo
+ echo "hit enter to continue, 'q' to quit"
+ read a
+ [ "$a" = "q" ] && exit 1
+ fi
+ fi
+}
+
+print_log_test_results()
+{
+ printf "\nTests passed: %3d\n" "${nsuccess}"
+ printf "Tests failed: %3d\n" "${nfail}"
+
+ # when a test fails, the value of 'ret' is set to 1 (error code).
+ # Conversely, when all tests are passed successfully, the 'ret' value
+ # is set to 0 (success code).
+ if [ "${ret}" -ne 1 ]; then
+ ret=0
+ fi
+}
+
+log_section()
+{
+ echo
+ echo "################################################################################"
+ echo "TEST SECTION: $*"
+ echo "################################################################################"
+}
+
+test_command_or_ksft_skip()
+{
+ local cmd="$1"
+
+ if [ ! -x "$(command -v "${cmd}")" ]; then
+ echo "SKIP: Could not run test without \"${cmd}\" tool";
+ exit "${ksft_skip}"
+ fi
+}
+
+get_nodename()
+{
+ local name="$1"
+
+ echo "${name}-${RDMSUFF}"
+}
+
+get_rtname()
+{
+ local rtid="$1"
+
+ get_nodename "rt-${rtid}"
+}
+
+get_hsname()
+{
+ local hsid="$1"
+
+ get_nodename "hs-${hsid}"
+}
+
+__create_namespace()
+{
+ local name="$1"
+
+ ip netns add "${name}"
+}
+
+create_router()
+{
+ local rtid="$1"
+ local nsname
+
+ nsname="$(get_rtname "${rtid}")"
+
+ __create_namespace "${nsname}"
+}
+
+create_host()
+{
+ local hsid="$1"
+ local nsname
+
+ nsname="$(get_hsname "${hsid}")"
+
+ __create_namespace "${nsname}"
+}
+
+cleanup()
+{
+ local nsname
+ local i
+
+ # destroy routers
+ for i in ${ROUTERS}; do
+ nsname="$(get_rtname "${i}")"
+
+ ip netns del "${nsname}" &>/dev/null || true
+ done
+
+ # destroy hosts
+ for i in ${HOSTS}; do
+ nsname="$(get_hsname "${i}")"
+
+ ip netns del "${nsname}" &>/dev/null || true
+ done
+
+ # check whether the setup phase was completed successfully or not. In
+ # case of an error during the setup phase of the testing environment,
+ # the selftest is considered as "skipped".
+ if [ "${SETUP_ERR}" -ne 0 ]; then
+ echo "SKIP: Setting up the testing environment failed"
+ exit "${ksft_skip}"
+ fi
+
+ exit "${ret}"
+}
+
+add_link_rt_pairs()
+{
+ local rt="$1"
+ local rt_neighs="$2"
+ local neigh
+ local nsname
+ local neigh_nsname
+
+ nsname="$(get_rtname "${rt}")"
+
+ for neigh in ${rt_neighs}; do
+ neigh_nsname="$(get_rtname "${neigh}")"
+
+ ip link add "veth-rt-${rt}-${neigh}" netns "${nsname}" \
+ type veth peer name "veth-rt-${neigh}-${rt}" \
+ netns "${neigh_nsname}"
+ done
+}
+
+get_network_prefix()
+{
+ local rt="$1"
+ local neigh="$2"
+ local p="${rt}"
+ local q="${neigh}"
+
+ if [ "${p}" -gt "${q}" ]; then
+ p="${q}"; q="${rt}"
+ fi
+
+ echo "${IPv6_RT_NETWORK}:${p}:${q}"
+}
+
+# Setup the basic networking for the routers
+setup_rt_networking()
+{
+ local rt="$1"
+ local rt_neighs="$2"
+ local nsname
+ local net_prefix
+ local devname
+ local neigh
+
+ nsname="$(get_rtname "${rt}")"
+
+ for neigh in ${rt_neighs}; do
+ devname="veth-rt-${rt}-${neigh}"
+
+ net_prefix="$(get_network_prefix "${rt}" "${neigh}")"
+
+ ip -netns "${nsname}" addr \
+ add "${net_prefix}::${rt}/64" dev "${devname}" nodad
+
+ ip -netns "${nsname}" link set "${devname}" up
+ done
+
+ ip -netns "${nsname}" link set lo up
+
+ ip netns exec "${nsname}" sysctl -wq net.ipv6.conf.all.accept_dad=0
+ ip netns exec "${nsname}" sysctl -wq net.ipv6.conf.default.accept_dad=0
+ ip netns exec "${nsname}" sysctl -wq net.ipv6.conf.all.forwarding=1
+
+ ip netns exec "${nsname}" sysctl -wq net.ipv4.conf.all.rp_filter=0
+ ip netns exec "${nsname}" sysctl -wq net.ipv4.conf.default.rp_filter=0
+ ip netns exec "${nsname}" sysctl -wq net.ipv4.ip_forward=1
+}
+
+# Setup local SIDs for an SRv6 router
+setup_rt_local_sids()
+{
+ local rt="$1"
+ local rt_neighs="$2"
+ local net_prefix
+ local devname
+ local nsname
+ local neigh
+
+ nsname="$(get_rtname "${rt}")"
+
+ for neigh in ${rt_neighs}; do
+ devname="veth-rt-${rt}-${neigh}"
+
+ net_prefix="$(get_network_prefix "${rt}" "${neigh}")"
+
+ # set underlay network routes for SIDs reachability
+ ip -netns "${nsname}" -6 route \
+ add "${VPN_LOCATOR_SERVICE}:${neigh}::/32" \
+ table "${LOCALSID_TABLE_ID}" \
+ via "${net_prefix}::${neigh}" dev "${devname}"
+ done
+
+ # Local End behavior (note that "dev" is dummy and the VRF is chosen
+ # for the sake of simplicity).
+ ip -netns "${nsname}" -6 route \
+ add "${VPN_LOCATOR_SERVICE}:${rt}::${END_FUNC}" \
+ table "${LOCALSID_TABLE_ID}" \
+ encap seg6local action End dev "${VRF_DEVNAME}"
+
+ # Local End.DT46 behavior
+ ip -netns "${nsname}" -6 route \
+ add "${VPN_LOCATOR_SERVICE}:${rt}::${DT46_FUNC}" \
+ table "${LOCALSID_TABLE_ID}" \
+ encap seg6local action End.DT46 vrftable "${VRF_TID}" \
+ dev "${VRF_DEVNAME}"
+
+ # all SIDs for VPNs start with a common locator. Routes and SRv6
+ # Endpoint behavior instaces are grouped together in the 'localsid'
+ # table.
+ ip -netns "${nsname}" -6 rule \
+ add to "${VPN_LOCATOR_SERVICE}::/16" \
+ lookup "${LOCALSID_TABLE_ID}" prio 999
+
+ # set default routes to unreachable for both ipv4 and ipv6
+ ip -netns "${nsname}" -6 route \
+ add unreachable default metric 4278198272 \
+ vrf "${VRF_DEVNAME}"
+
+ ip -netns "${nsname}" -4 route \
+ add unreachable default metric 4278198272 \
+ vrf "${VRF_DEVNAME}"
+}
+
+# build and install the SRv6 policy into the ingress SRv6 router.
+# args:
+# $1 - destination host (i.e. cafe::x host)
+# $2 - SRv6 router configured for enforcing the SRv6 Policy
+# $3 - SRv6 routers configured for steering traffic (End behaviors)
+# $4 - SRv6 router configured for removing the SRv6 Policy (router connected
+# to the destination host)
+# $5 - encap mode (full or red)
+# $6 - traffic type (IPv6 or IPv4)
+__setup_rt_policy()
+{
+ local dst="$1"
+ local encap_rt="$2"
+ local end_rts="$3"
+ local dec_rt="$4"
+ local mode="$5"
+ local traffic="$6"
+ local nsname
+ local policy=''
+ local n
+
+ nsname="$(get_rtname "${encap_rt}")"
+
+ for n in ${end_rts}; do
+ policy="${policy}${VPN_LOCATOR_SERVICE}:${n}::${END_FUNC},"
+ done
+
+ policy="${policy}${VPN_LOCATOR_SERVICE}:${dec_rt}::${DT46_FUNC}"
+
+ # add SRv6 policy to incoming traffic sent by connected hosts
+ if [ "${traffic}" -eq 6 ]; then
+ ip -netns "${nsname}" -6 route \
+ add "${IPv6_HS_NETWORK}::${dst}" vrf "${VRF_DEVNAME}" \
+ encap seg6 mode "${mode}" segs "${policy}" \
+ dev "${VRF_DEVNAME}"
+
+ ip -netns "${nsname}" -6 neigh \
+ add proxy "${IPv6_HS_NETWORK}::${dst}" \
+ dev "${RT2HS_DEVNAME}"
+ else
+ # "dev" must be different from the one where the packet is
+ # received, otherwise the proxy arp does not work.
+ ip -netns "${nsname}" -4 route \
+ add "${IPv4_HS_NETWORK}.${dst}" vrf "${VRF_DEVNAME}" \
+ encap seg6 mode "${mode}" segs "${policy}" \
+ dev "${VRF_DEVNAME}"
+ fi
+}
+
+# see __setup_rt_policy
+setup_rt_policy_ipv6()
+{
+ __setup_rt_policy "$1" "$2" "$3" "$4" "$5" 6
+}
+
+#see __setup_rt_policy
+setup_rt_policy_ipv4()
+{
+ __setup_rt_policy "$1" "$2" "$3" "$4" "$5" 4
+}
+
+setup_hs()
+{
+ local hs="$1"
+ local rt="$2"
+ local hsname
+ local rtname
+
+ hsname="$(get_hsname "${hs}")"
+ rtname="$(get_rtname "${rt}")"
+
+ ip netns exec "${hsname}" sysctl -wq net.ipv6.conf.all.accept_dad=0
+ ip netns exec "${hsname}" sysctl -wq net.ipv6.conf.default.accept_dad=0
+
+ ip -netns "${hsname}" link add veth0 type veth \
+ peer name "${RT2HS_DEVNAME}" netns "${rtname}"
+
+ ip -netns "${hsname}" addr \
+ add "${IPv6_HS_NETWORK}::${hs}/64" dev veth0 nodad
+ ip -netns "${hsname}" addr add "${IPv4_HS_NETWORK}.${hs}/24" dev veth0
+
+ ip -netns "${hsname}" link set veth0 up
+ ip -netns "${hsname}" link set lo up
+
+ # configure the VRF on the router which is directly connected to the
+ # source host.
+ ip -netns "${rtname}" link \
+ add "${VRF_DEVNAME}" type vrf table "${VRF_TID}"
+ ip -netns "${rtname}" link set "${VRF_DEVNAME}" up
+
+ # enslave the veth interface connecting the router with the host to the
+ # VRF in the access router
+ ip -netns "${rtname}" link \
+ set "${RT2HS_DEVNAME}" master "${VRF_DEVNAME}"
+
+ ip -netns "${rtname}" addr \
+ add "${IPv6_HS_NETWORK}::254/64" dev "${RT2HS_DEVNAME}" nodad
+ ip -netns "${rtname}" addr \
+ add "${IPv4_HS_NETWORK}.254/24" dev "${RT2HS_DEVNAME}"
+
+ ip -netns "${rtname}" link set "${RT2HS_DEVNAME}" up
+
+ ip netns exec "${rtname}" \
+ sysctl -wq net.ipv6.conf."${RT2HS_DEVNAME}".proxy_ndp=1
+ ip netns exec "${rtname}" \
+ sysctl -wq net.ipv4.conf."${RT2HS_DEVNAME}".proxy_arp=1
+
+ # disable the rp_filter otherwise the kernel gets confused about how
+ # to route decap ipv4 packets.
+ ip netns exec "${rtname}" \
+ sysctl -wq net.ipv4.conf."${RT2HS_DEVNAME}".rp_filter=0
+
+ ip netns exec "${rtname}" sh -c "echo 1 > /proc/sys/net/vrf/strict_mode"
+}
+
+setup()
+{
+ local i
+
+ # create routers
+ ROUTERS="1 2 3 4"; readonly ROUTERS
+ for i in ${ROUTERS}; do
+ create_router "${i}"
+ done
+
+ # create hosts
+ HOSTS="1 2 3 4"; readonly HOSTS
+ for i in ${HOSTS}; do
+ create_host "${i}"
+ done
+
+ # set up the links for connecting routers
+ add_link_rt_pairs 1 "2 3 4"
+ add_link_rt_pairs 2 "3 4"
+ add_link_rt_pairs 3 "4"
+
+ # set up the basic connectivity of routers and routes required for
+ # reachability of SIDs.
+ setup_rt_networking 1 "2 3 4"
+ setup_rt_networking 2 "1 3 4"
+ setup_rt_networking 3 "1 2 4"
+ setup_rt_networking 4 "1 2 3"
+
+ # set up the hosts connected to routers
+ setup_hs 1 1
+ setup_hs 2 2
+ setup_hs 3 3
+ setup_hs 4 4
+
+ # set up default SRv6 Endpoints (i.e. SRv6 End and SRv6 End.DT46)
+ setup_rt_local_sids 1 "2 3 4"
+ setup_rt_local_sids 2 "1 3 4"
+ setup_rt_local_sids 3 "1 2 4"
+ setup_rt_local_sids 4 "1 2 3"
+
+ # set up SRv6 policies
+
+ # create an IPv6 VPN between hosts hs-1 and hs-2.
+ # the network path between hs-1 and hs-2 traverses several routers
+ # depending on the direction of traffic.
+ #
+ # Direction hs-1 -> hs-2 (H.Encaps.Red)
+ # - rt-3,rt-4 (SRv6 End behaviors)
+ # - rt-2 (SRv6 End.DT46 behavior)
+ #
+ # Direction hs-2 -> hs-1 (H.Encaps.Red)
+ # - rt-1 (SRv6 End.DT46 behavior)
+ setup_rt_policy_ipv6 2 1 "3 4" 2 encap.red
+ setup_rt_policy_ipv6 1 2 "" 1 encap.red
+
+ # create an IPv4 VPN between hosts hs-1 and hs-2
+ # the network path between hs-1 and hs-2 traverses several routers
+ # depending on the direction of traffic.
+ #
+ # Direction hs-1 -> hs-2 (H.Encaps.Red)
+ # - rt-2 (SRv6 End.DT46 behavior)
+ #
+ # Direction hs-2 -> hs-1 (H.Encaps.Red)
+ # - rt-4,rt-3 (SRv6 End behaviors)
+ # - rt-1 (SRv6 End.DT46 behavior)
+ setup_rt_policy_ipv4 2 1 "" 2 encap.red
+ setup_rt_policy_ipv4 1 2 "4 3" 1 encap.red
+
+ # create an IPv6 VPN between hosts hs-3 and hs-4
+ # the network path between hs-3 and hs-4 traverses several routers
+ # depending on the direction of traffic.
+ #
+ # Direction hs-3 -> hs-4 (H.Encaps.Red)
+ # - rt-2 (SRv6 End Behavior)
+ # - rt-4 (SRv6 End.DT46 behavior)
+ #
+ # Direction hs-4 -> hs-3 (H.Encaps.Red)
+ # - rt-1 (SRv6 End behavior)
+ # - rt-3 (SRv6 End.DT46 behavior)
+ setup_rt_policy_ipv6 4 3 "2" 4 encap.red
+ setup_rt_policy_ipv6 3 4 "1" 3 encap.red
+
+ # testing environment was set up successfully
+ SETUP_ERR=0
+}
+
+check_rt_connectivity()
+{
+ local rtsrc="$1"
+ local rtdst="$2"
+ local prefix
+ local rtsrc_nsname
+
+ rtsrc_nsname="$(get_rtname "${rtsrc}")"
+
+ prefix="$(get_network_prefix "${rtsrc}" "${rtdst}")"
+
+ ip netns exec "${rtsrc_nsname}" ping -c 1 -W "${PING_TIMEOUT_SEC}" \
+ "${prefix}::${rtdst}" >/dev/null 2>&1
+}
+
+check_and_log_rt_connectivity()
+{
+ local rtsrc="$1"
+ local rtdst="$2"
+
+ check_rt_connectivity "${rtsrc}" "${rtdst}"
+ log_test $? 0 "Routers connectivity: rt-${rtsrc} -> rt-${rtdst}"
+}
+
+check_hs_ipv6_connectivity()
+{
+ local hssrc="$1"
+ local hsdst="$2"
+ local hssrc_nsname
+
+ hssrc_nsname="$(get_hsname "${hssrc}")"
+
+ ip netns exec "${hssrc_nsname}" ping -c 1 -W "${PING_TIMEOUT_SEC}" \
+ "${IPv6_HS_NETWORK}::${hsdst}" >/dev/null 2>&1
+}
+
+check_hs_ipv4_connectivity()
+{
+ local hssrc="$1"
+ local hsdst="$2"
+ local hssrc_nsname
+
+ hssrc_nsname="$(get_hsname "${hssrc}")"
+
+ ip netns exec "${hssrc_nsname}" ping -c 1 -W "${PING_TIMEOUT_SEC}" \
+ "${IPv4_HS_NETWORK}.${hsdst}" >/dev/null 2>&1
+}
+
+check_and_log_hs2gw_connectivity()
+{
+ local hssrc="$1"
+
+ check_hs_ipv6_connectivity "${hssrc}" 254
+ log_test $? 0 "IPv6 Hosts connectivity: hs-${hssrc} -> gw"
+
+ check_hs_ipv4_connectivity "${hssrc}" 254
+ log_test $? 0 "IPv4 Hosts connectivity: hs-${hssrc} -> gw"
+}
+
+check_and_log_hs_ipv6_connectivity()
+{
+ local hssrc="$1"
+ local hsdst="$2"
+
+ check_hs_ipv6_connectivity "${hssrc}" "${hsdst}"
+ log_test $? 0 "IPv6 Hosts connectivity: hs-${hssrc} -> hs-${hsdst}"
+}
+
+check_and_log_hs_ipv4_connectivity()
+{
+ local hssrc="$1"
+ local hsdst="$2"
+
+ check_hs_ipv4_connectivity "${hssrc}" "${hsdst}"
+ log_test $? 0 "IPv4 Hosts connectivity: hs-${hssrc} -> hs-${hsdst}"
+}
+
+check_and_log_hs_connectivity()
+{
+ local hssrc="$1"
+ local hsdst="$2"
+
+ check_and_log_hs_ipv4_connectivity "${hssrc}" "${hsdst}"
+ check_and_log_hs_ipv6_connectivity "${hssrc}" "${hsdst}"
+}
+
+check_and_log_hs_ipv6_isolation()
+{
+ local hssrc="$1"
+ local hsdst="$2"
+
+ # in this case, the connectivity test must fail
+ check_hs_ipv6_connectivity "${hssrc}" "${hsdst}"
+ log_test $? 1 "IPv6 Hosts isolation: hs-${hssrc} -X-> hs-${hsdst}"
+}
+
+check_and_log_hs_ipv4_isolation()
+{
+ local hssrc="$1"
+ local hsdst="$2"
+
+ # in this case, the connectivity test must fail
+ check_hs_ipv4_connectivity "${hssrc}" "${hsdst}"
+ log_test $? 1 "IPv4 Hosts isolation: hs-${hssrc} -X-> hs-${hsdst}"
+}
+
+check_and_log_hs_isolation()
+{
+ local hssrc="$1"
+ local hsdst="$2"
+
+ check_and_log_hs_ipv6_isolation "${hssrc}" "${hsdst}"
+ check_and_log_hs_ipv4_isolation "${hssrc}" "${hsdst}"
+}
+
+router_tests()
+{
+ local i
+ local j
+
+ log_section "IPv6 routers connectivity test"
+
+ for i in ${ROUTERS}; do
+ for j in ${ROUTERS}; do
+ if [ "${i}" -eq "${j}" ]; then
+ continue
+ fi
+
+ check_and_log_rt_connectivity "${i}" "${j}"
+ done
+ done
+}
+
+host2gateway_tests()
+{
+ local hs
+
+ log_section "IPv4/IPv6 connectivity test among hosts and gateways"
+
+ for hs in ${HOSTS}; do
+ check_and_log_hs2gw_connectivity "${hs}"
+ done
+}
+
+host_vpn_tests()
+{
+ log_section "SRv6 VPN connectivity test hosts (h1 <-> h2, IPv4/IPv6)"
+
+ check_and_log_hs_connectivity 1 2
+ check_and_log_hs_connectivity 2 1
+
+ log_section "SRv6 VPN connectivity test hosts (h3 <-> h4, IPv6 only)"
+
+ check_and_log_hs_ipv6_connectivity 3 4
+ check_and_log_hs_ipv6_connectivity 4 3
+}
+
+host_vpn_isolation_tests()
+{
+ local l1="1 2"
+ local l2="3 4"
+ local tmp
+ local i
+ local j
+ local k
+
+ log_section "SRv6 VPN isolation test among hosts"
+
+ for k in 0 1; do
+ for i in ${l1}; do
+ for j in ${l2}; do
+ check_and_log_hs_isolation "${i}" "${j}"
+ done
+ done
+
+ # let us test the reverse path
+ tmp="${l1}"; l1="${l2}"; l2="${tmp}"
+ done
+
+ log_section "SRv6 VPN isolation test among hosts (h2 <-> h4, IPv4 only)"
+
+ check_and_log_hs_ipv4_isolation 2 4
+ check_and_log_hs_ipv4_isolation 4 2
+}
+
+test_iproute2_supp_or_ksft_skip()
+{
+ if ! ip route help 2>&1 | grep -qo "encap.red"; then
+ echo "SKIP: Missing SRv6 encap.red support in iproute2"
+ exit "${ksft_skip}"
+ fi
+}
+
+test_vrf_or_ksft_skip()
+{
+ modprobe vrf &>/dev/null || true
+ if [ ! -e /proc/sys/net/vrf/strict_mode ]; then
+ echo "SKIP: vrf sysctl does not exist"
+ exit "${ksft_skip}"
+ fi
+}
+
+if [ "$(id -u)" -ne 0 ]; then
+ echo "SKIP: Need root privileges"
+ exit "${ksft_skip}"
+fi
+
+# required programs to carry out this selftest
+test_command_or_ksft_skip ip
+test_command_or_ksft_skip ping
+test_command_or_ksft_skip sysctl
+test_command_or_ksft_skip grep
+
+test_iproute2_supp_or_ksft_skip
+test_vrf_or_ksft_skip
+
+set -e
+trap cleanup EXIT
+
+setup
+set +e
+
+router_tests
+host2gateway_tests
+host_vpn_tests
+host_vpn_isolation_tests
+
+print_log_test_results
diff --git a/tools/testing/selftests/net/srv6_hl2encap_red_l2vpn_test.sh b/tools/testing/selftests/net/srv6_hl2encap_red_l2vpn_test.sh
new file mode 100755
index 000000000000..cb4177d41b21
--- /dev/null
+++ b/tools/testing/selftests/net/srv6_hl2encap_red_l2vpn_test.sh
@@ -0,0 +1,821 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# author: Andrea Mayer <andrea.mayer@uniroma2.it>
+#
+# This script is designed for testing the SRv6 H.L2Encaps.Red behavior.
+#
+# Below is depicted the IPv6 network of an operator which offers L2 VPN
+# services to hosts, enabling them to communicate with each other.
+# In this example, hosts hs-1 and hs-2 are connected through an L2 VPN service.
+# Currently, the SRv6 subsystem in Linux allows hosts hs-1 and hs-2 to exchange
+# full L2 frames as long as they carry IPv4/IPv6.
+#
+# Routers rt-1,rt-2,rt-3 and rt-4 implement L2 VPN services
+# leveraging the SRv6 architecture. The key components for such VPNs are:
+#
+# i) The SRv6 H.L2Encaps.Red behavior applies SRv6 Policies on traffic
+# received by connected hosts, initiating the VPN tunnel. Such a behavior
+# is an optimization of the SRv6 H.L2Encap aiming to reduce the
+# length of the SID List carried in the pushed SRH. Specifically, the
+# H.L2Encaps.Red removes the first SID contained in the SID List (i.e. SRv6
+# Policy) by storing it into the IPv6 Destination Address. When a SRv6
+# Policy is made of only one SID, the SRv6 H.L2Encaps.Red behavior omits
+# the SRH at all and pushes that SID directly into the IPv6 DA;
+#
+# ii) The SRv6 End behavior advances the active SID in the SID List
+# carried by the SRH;
+#
+# iii) The SRv6 End.DX2 behavior is used for removing the SRv6 Policy
+# and, thus, it terminates the VPN tunnel. The decapsulated L2 frame is
+# sent over the interface connected with the destination host.
+#
+# cafe::1 cafe::2
+# 10.0.0.1 10.0.0.2
+# +--------+ +--------+
+# | | | |
+# | hs-1 | | hs-2 |
+# | | | |
+# +---+----+ +--- +---+
+# cafe::/64 | | cafe::/64
+# 10.0.0.0/24 | | 10.0.0.0/24
+# +---+----+ +----+---+
+# | | fcf0:0:1:2::/64 | |
+# | rt-1 +-------------------+ rt-2 |
+# | | | |
+# +---+----+ +----+---+
+# | . . |
+# | fcf0:0:1:3::/64 . |
+# | . . |
+# | . . |
+# fcf0:0:1:4::/64 | . | fcf0:0:2:3::/64
+# | . . |
+# | . . |
+# | fcf0:0:2:4::/64 . |
+# | . . |
+# +---+----+ +----+---+
+# | | | |
+# | rt-4 +-------------------+ rt-3 |
+# | | fcf0:0:3:4::/64 | |
+# +---+----+ +----+---+
+#
+#
+# Every fcf0:0:x:y::/64 network interconnects the SRv6 routers rt-x with rt-y
+# in the IPv6 operator network.
+#
+# Local SID table
+# ===============
+#
+# Each SRv6 router is configured with a Local SID table in which SIDs are
+# stored. Considering the given SRv6 router rt-x, at least two SIDs are
+# configured in the Local SID table:
+#
+# Local SID table for SRv6 router rt-x
+# +----------------------------------------------------------+
+# |fcff:x::e is associated with the SRv6 End behavior |
+# |fcff:x::d2 is associated with the SRv6 End.DX2 behavior |
+# +----------------------------------------------------------+
+#
+# The fcff::/16 prefix is reserved by the operator for implementing SRv6 VPN
+# services. Reachability of SIDs is ensured by proper configuration of the IPv6
+# operator's network and SRv6 routers.
+#
+# SRv6 Policies
+# =============
+#
+# An SRv6 ingress router applies SRv6 policies to the traffic received from a
+# connected host. SRv6 policy enforcement consists of encapsulating the
+# received traffic into a new IPv6 packet with a given SID List contained in
+# the SRH.
+#
+# L2 VPN between hs-1 and hs-2
+# ----------------------------
+#
+# Hosts hs-1 and hs-2 are connected using a dedicated L2 VPN.
+# Specifically, packets generated from hs-1 and directed towards hs-2 are
+# handled by rt-1 which applies the following SRv6 Policies:
+#
+# i.a) L2 traffic, SID List=fcff:2::d2
+#
+# Policy (i.a) steers tunneled L2 traffic through SRv6 router rt-2.
+# The H.L2Encaps.Red omits the presence of SRH at all, since the SID List
+# consists of only one SID (fcff:2::d2) that can be stored directly in the IPv6
+# DA.
+#
+# On the reverse path (i.e. from hs-2 to hs-1), rt-2 applies the following
+# policies:
+#
+# i.b) L2 traffic, SID List=fcff:4::e,fcff:3::e,fcff:1::d2
+#
+# Policy (i.b) steers tunneled L2 traffic through the SRv6 routers
+# rt-4,rt-3,rt2. The H.L2Encaps.Red reduces the SID List in the SRH by removing
+# the first SID (fcff:4::e) and pushing it into the IPv6 DA.
+#
+# In summary:
+# hs-1->hs-2 |IPv6 DA=fcff:2::d2|eth|...| (i.a)
+# hs-2->hs-1 |IPv6 DA=fcff:4::e|SRH SIDs=fcff:3::e,fcff:1::d2|eth|...| (i.b)
+#
+
+# Kselftest framework requirement - SKIP code is 4.
+readonly ksft_skip=4
+
+readonly RDMSUFF="$(mktemp -u XXXXXXXX)"
+readonly DUMMY_DEVNAME="dum0"
+readonly RT2HS_DEVNAME="veth-hs"
+readonly HS_VETH_NAME="veth0"
+readonly LOCALSID_TABLE_ID=90
+readonly IPv6_RT_NETWORK=fcf0:0
+readonly IPv6_HS_NETWORK=cafe
+readonly IPv4_HS_NETWORK=10.0.0
+readonly VPN_LOCATOR_SERVICE=fcff
+readonly MAC_PREFIX=00:00:00:c0:01
+readonly END_FUNC=000e
+readonly DX2_FUNC=00d2
+
+PING_TIMEOUT_SEC=4
+PAUSE_ON_FAIL=${PAUSE_ON_FAIL:=no}
+
+# IDs of routers and hosts are initialized during the setup of the testing
+# network
+ROUTERS=''
+HOSTS=''
+
+SETUP_ERR=1
+
+ret=${ksft_skip}
+nsuccess=0
+nfail=0
+
+log_test()
+{
+ local rc="$1"
+ local expected="$2"
+ local msg="$3"
+
+ if [ "${rc}" -eq "${expected}" ]; then
+ nsuccess=$((nsuccess+1))
+ printf "\n TEST: %-60s [ OK ]\n" "${msg}"
+ else
+ ret=1
+ nfail=$((nfail+1))
+ printf "\n TEST: %-60s [FAIL]\n" "${msg}"
+ if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
+ echo
+ echo "hit enter to continue, 'q' to quit"
+ read a
+ [ "$a" = "q" ] && exit 1
+ fi
+ fi
+}
+
+print_log_test_results()
+{
+ printf "\nTests passed: %3d\n" "${nsuccess}"
+ printf "Tests failed: %3d\n" "${nfail}"
+
+ # when a test fails, the value of 'ret' is set to 1 (error code).
+ # Conversely, when all tests are passed successfully, the 'ret' value
+ # is set to 0 (success code).
+ if [ "${ret}" -ne 1 ]; then
+ ret=0
+ fi
+}
+
+log_section()
+{
+ echo
+ echo "################################################################################"
+ echo "TEST SECTION: $*"
+ echo "################################################################################"
+}
+
+test_command_or_ksft_skip()
+{
+ local cmd="$1"
+
+ if [ ! -x "$(command -v "${cmd}")" ]; then
+ echo "SKIP: Could not run test without \"${cmd}\" tool";
+ exit "${ksft_skip}"
+ fi
+}
+
+get_nodename()
+{
+ local name="$1"
+
+ echo "${name}-${RDMSUFF}"
+}
+
+get_rtname()
+{
+ local rtid="$1"
+
+ get_nodename "rt-${rtid}"
+}
+
+get_hsname()
+{
+ local hsid="$1"
+
+ get_nodename "hs-${hsid}"
+}
+
+__create_namespace()
+{
+ local name="$1"
+
+ ip netns add "${name}"
+}
+
+create_router()
+{
+ local rtid="$1"
+ local nsname
+
+ nsname="$(get_rtname "${rtid}")"
+
+ __create_namespace "${nsname}"
+}
+
+create_host()
+{
+ local hsid="$1"
+ local nsname
+
+ nsname="$(get_hsname "${hsid}")"
+
+ __create_namespace "${nsname}"
+}
+
+cleanup()
+{
+ local nsname
+ local i
+
+ # destroy routers
+ for i in ${ROUTERS}; do
+ nsname="$(get_rtname "${i}")"
+
+ ip netns del "${nsname}" &>/dev/null || true
+ done
+
+ # destroy hosts
+ for i in ${HOSTS}; do
+ nsname="$(get_hsname "${i}")"
+
+ ip netns del "${nsname}" &>/dev/null || true
+ done
+
+ # check whether the setup phase was completed successfully or not. In
+ # case of an error during the setup phase of the testing environment,
+ # the selftest is considered as "skipped".
+ if [ "${SETUP_ERR}" -ne 0 ]; then
+ echo "SKIP: Setting up the testing environment failed"
+ exit "${ksft_skip}"
+ fi
+
+ exit "${ret}"
+}
+
+add_link_rt_pairs()
+{
+ local rt="$1"
+ local rt_neighs="$2"
+ local neigh
+ local nsname
+ local neigh_nsname
+
+ nsname="$(get_rtname "${rt}")"
+
+ for neigh in ${rt_neighs}; do
+ neigh_nsname="$(get_rtname "${neigh}")"
+
+ ip link add "veth-rt-${rt}-${neigh}" netns "${nsname}" \
+ type veth peer name "veth-rt-${neigh}-${rt}" \
+ netns "${neigh_nsname}"
+ done
+}
+
+get_network_prefix()
+{
+ local rt="$1"
+ local neigh="$2"
+ local p="${rt}"
+ local q="${neigh}"
+
+ if [ "${p}" -gt "${q}" ]; then
+ p="${q}"; q="${rt}"
+ fi
+
+ echo "${IPv6_RT_NETWORK}:${p}:${q}"
+}
+
+# Setup the basic networking for the routers
+setup_rt_networking()
+{
+ local rt="$1"
+ local rt_neighs="$2"
+ local nsname
+ local net_prefix
+ local devname
+ local neigh
+
+ nsname="$(get_rtname "${rt}")"
+
+ for neigh in ${rt_neighs}; do
+ devname="veth-rt-${rt}-${neigh}"
+
+ net_prefix="$(get_network_prefix "${rt}" "${neigh}")"
+
+ ip -netns "${nsname}" addr \
+ add "${net_prefix}::${rt}/64" dev "${devname}" nodad
+
+ ip -netns "${nsname}" link set "${devname}" up
+ done
+
+ ip -netns "${nsname}" link add "${DUMMY_DEVNAME}" type dummy
+
+ ip -netns "${nsname}" link set "${DUMMY_DEVNAME}" up
+ ip -netns "${nsname}" link set lo up
+
+ ip netns exec "${nsname}" sysctl -wq net.ipv6.conf.all.accept_dad=0
+ ip netns exec "${nsname}" sysctl -wq net.ipv6.conf.default.accept_dad=0
+ ip netns exec "${nsname}" sysctl -wq net.ipv6.conf.all.forwarding=1
+
+ ip netns exec "${nsname}" sysctl -wq net.ipv4.conf.all.rp_filter=0
+ ip netns exec "${nsname}" sysctl -wq net.ipv4.conf.default.rp_filter=0
+ ip netns exec "${nsname}" sysctl -wq net.ipv4.ip_forward=1
+}
+
+# Setup local SIDs for an SRv6 router
+setup_rt_local_sids()
+{
+ local rt="$1"
+ local rt_neighs="$2"
+ local net_prefix
+ local devname
+ local nsname
+ local neigh
+
+ nsname="$(get_rtname "${rt}")"
+
+ for neigh in ${rt_neighs}; do
+ devname="veth-rt-${rt}-${neigh}"
+
+ net_prefix="$(get_network_prefix "${rt}" "${neigh}")"
+
+ # set underlay network routes for SIDs reachability
+ ip -netns "${nsname}" -6 route \
+ add "${VPN_LOCATOR_SERVICE}:${neigh}::/32" \
+ table "${LOCALSID_TABLE_ID}" \
+ via "${net_prefix}::${neigh}" dev "${devname}"
+ done
+
+ # Local End behavior (note that dev "${DUMMY_DEVNAME}" is a dummy
+ # interface)
+ ip -netns "${nsname}" -6 route \
+ add "${VPN_LOCATOR_SERVICE}:${rt}::${END_FUNC}" \
+ table "${LOCALSID_TABLE_ID}" \
+ encap seg6local action End dev "${DUMMY_DEVNAME}"
+
+ # all SIDs for VPNs start with a common locator. Routes and SRv6
+ # Endpoint behaviors instaces are grouped together in the 'localsid'
+ # table.
+ ip -netns "${nsname}" -6 rule add \
+ to "${VPN_LOCATOR_SERVICE}::/16" \
+ lookup "${LOCALSID_TABLE_ID}" prio 999
+}
+
+# build and install the SRv6 policy into the ingress SRv6 router.
+# args:
+# $1 - destination host (i.e. cafe::x host)
+# $2 - SRv6 router configured for enforcing the SRv6 Policy
+# $3 - SRv6 routers configured for steering traffic (End behaviors)
+# $4 - SRv6 router configured for removing the SRv6 Policy (router connected
+# to the destination host)
+# $5 - encap mode (full or red)
+# $6 - traffic type (IPv6 or IPv4)
+__setup_rt_policy()
+{
+ local dst="$1"
+ local encap_rt="$2"
+ local end_rts="$3"
+ local dec_rt="$4"
+ local mode="$5"
+ local traffic="$6"
+ local nsname
+ local policy=''
+ local n
+
+ nsname="$(get_rtname "${encap_rt}")"
+
+ for n in ${end_rts}; do
+ policy="${policy}${VPN_LOCATOR_SERVICE}:${n}::${END_FUNC},"
+ done
+
+ policy="${policy}${VPN_LOCATOR_SERVICE}:${dec_rt}::${DX2_FUNC}"
+
+ # add SRv6 policy to incoming traffic sent by connected hosts
+ if [ "${traffic}" -eq 6 ]; then
+ ip -netns "${nsname}" -6 route \
+ add "${IPv6_HS_NETWORK}::${dst}" \
+ encap seg6 mode "${mode}" segs "${policy}" \
+ dev dum0
+ else
+ ip -netns "${nsname}" -4 route \
+ add "${IPv4_HS_NETWORK}.${dst}" \
+ encap seg6 mode "${mode}" segs "${policy}" \
+ dev dum0
+ fi
+}
+
+# see __setup_rt_policy
+setup_rt_policy_ipv6()
+{
+ __setup_rt_policy "$1" "$2" "$3" "$4" "$5" 6
+}
+
+#see __setup_rt_policy
+setup_rt_policy_ipv4()
+{
+ __setup_rt_policy "$1" "$2" "$3" "$4" "$5" 4
+}
+
+setup_decap()
+{
+ local rt="$1"
+ local nsname
+
+ nsname="$(get_rtname "${rt}")"
+
+ # Local End.DX2 behavior
+ ip -netns "${nsname}" -6 route \
+ add "${VPN_LOCATOR_SERVICE}:${rt}::${DX2_FUNC}" \
+ table "${LOCALSID_TABLE_ID}" \
+ encap seg6local action End.DX2 oif "${RT2HS_DEVNAME}" \
+ dev "${RT2HS_DEVNAME}"
+}
+
+setup_hs()
+{
+ local hs="$1"
+ local rt="$2"
+ local hsname
+ local rtname
+
+ hsname="$(get_hsname "${hs}")"
+ rtname="$(get_rtname "${rt}")"
+
+ ip netns exec "${hsname}" sysctl -wq net.ipv6.conf.all.accept_dad=0
+ ip netns exec "${hsname}" sysctl -wq net.ipv6.conf.default.accept_dad=0
+
+ ip -netns "${hsname}" link add "${HS_VETH_NAME}" type veth \
+ peer name "${RT2HS_DEVNAME}" netns "${rtname}"
+
+ ip -netns "${hsname}" addr add "${IPv6_HS_NETWORK}::${hs}/64" \
+ dev "${HS_VETH_NAME}" nodad
+ ip -netns "${hsname}" addr add "${IPv4_HS_NETWORK}.${hs}/24" \
+ dev "${HS_VETH_NAME}"
+
+ ip -netns "${hsname}" link set "${HS_VETH_NAME}" up
+ ip -netns "${hsname}" link set lo up
+
+ ip -netns "${rtname}" addr add "${IPv6_HS_NETWORK}::254/64" \
+ dev "${RT2HS_DEVNAME}" nodad
+ ip -netns "${rtname}" addr \
+ add "${IPv4_HS_NETWORK}.254/24" dev "${RT2HS_DEVNAME}"
+
+ ip -netns "${rtname}" link set "${RT2HS_DEVNAME}" up
+
+ # disable the rp_filter otherwise the kernel gets confused about how
+ # to route decap ipv4 packets.
+ ip netns exec "${rtname}" \
+ sysctl -wq net.ipv4.conf."${RT2HS_DEVNAME}".rp_filter=0
+}
+
+# set an auto-generated mac address
+# args:
+# $1 - name of the node (e.g.: hs-1, rt-3, etc)
+# $2 - id of the node (e.g.: 1 for hs-1, 3 for rt-3, etc)
+# $3 - host part of the IPv6 network address
+# $4 - name of the network interface to which the generated mac address must
+# be set.
+set_mac_address()
+{
+ local nodename="$1"
+ local nodeid="$2"
+ local host="$3"
+ local ifname="$4"
+ local nsname
+
+ nsname=$(get_nodename "${nodename}")
+
+ ip -netns "${nsname}" link set dev "${ifname}" down
+
+ ip -netns "${nsname}" link set address "${MAC_PREFIX}:${nodeid}" \
+ dev "${ifname}"
+
+ # the IPv6 address must be set once again after the MAC address has
+ # been changed.
+ ip -netns "${nsname}" addr add "${IPv6_HS_NETWORK}::${host}/64" \
+ dev "${ifname}" nodad
+
+ ip -netns "${nsname}" link set dev "${ifname}" up
+}
+
+set_host_l2peer()
+{
+ local hssrc="$1"
+ local hsdst="$2"
+ local ipprefix="$3"
+ local proto="$4"
+ local hssrc_name
+ local ipaddr
+
+ hssrc_name="$(get_hsname "${hssrc}")"
+
+ if [ "${proto}" -eq 6 ]; then
+ ipaddr="${ipprefix}::${hsdst}"
+ else
+ ipaddr="${ipprefix}.${hsdst}"
+ fi
+
+ ip -netns "${hssrc_name}" route add "${ipaddr}" dev "${HS_VETH_NAME}"
+
+ ip -netns "${hssrc_name}" neigh \
+ add "${ipaddr}" lladdr "${MAC_PREFIX}:${hsdst}" \
+ dev "${HS_VETH_NAME}"
+}
+
+# setup an SRv6 L2 VPN between host hs-x and hs-y (currently, the SRv6
+# subsystem only supports L2 frames whose layer-3 is IPv4/IPv6).
+# args:
+# $1 - source host
+# $2 - SRv6 routers configured for steering tunneled traffic
+# $3 - destination host
+setup_l2vpn()
+{
+ local hssrc="$1"
+ local end_rts="$2"
+ local hsdst="$3"
+ local rtsrc="${hssrc}"
+ local rtdst="${hsdst}"
+
+ # set fixed mac for source node and the neigh MAC address
+ set_mac_address "hs-${hssrc}" "${hssrc}" "${hssrc}" "${HS_VETH_NAME}"
+ set_host_l2peer "${hssrc}" "${hsdst}" "${IPv6_HS_NETWORK}" 6
+ set_host_l2peer "${hssrc}" "${hsdst}" "${IPv4_HS_NETWORK}" 4
+
+ # we have to set the mac address of the veth-host (on ingress router)
+ # to the mac address of the remote peer (L2 VPN destination host).
+ # Otherwise, traffic coming from the source host is dropped at the
+ # ingress router.
+ set_mac_address "rt-${rtsrc}" "${hsdst}" 254 "${RT2HS_DEVNAME}"
+
+ # set the SRv6 Policies at the ingress router
+ setup_rt_policy_ipv6 "${hsdst}" "${rtsrc}" "${end_rts}" "${rtdst}" \
+ l2encap.red 6
+ setup_rt_policy_ipv4 "${hsdst}" "${rtsrc}" "${end_rts}" "${rtdst}" \
+ l2encap.red 4
+
+ # set the decap behavior
+ setup_decap "${rtsrc}"
+}
+
+setup()
+{
+ local i
+
+ # create routers
+ ROUTERS="1 2 3 4"; readonly ROUTERS
+ for i in ${ROUTERS}; do
+ create_router "${i}"
+ done
+
+ # create hosts
+ HOSTS="1 2"; readonly HOSTS
+ for i in ${HOSTS}; do
+ create_host "${i}"
+ done
+
+ # set up the links for connecting routers
+ add_link_rt_pairs 1 "2 3 4"
+ add_link_rt_pairs 2 "3 4"
+ add_link_rt_pairs 3 "4"
+
+ # set up the basic connectivity of routers and routes required for
+ # reachability of SIDs.
+ setup_rt_networking 1 "2 3 4"
+ setup_rt_networking 2 "1 3 4"
+ setup_rt_networking 3 "1 2 4"
+ setup_rt_networking 4 "1 2 3"
+
+ # set up the hosts connected to routers
+ setup_hs 1 1
+ setup_hs 2 2
+
+ # set up default SRv6 Endpoints (i.e. SRv6 End and SRv6 End.DX2)
+ setup_rt_local_sids 1 "2 3 4"
+ setup_rt_local_sids 2 "1 3 4"
+ setup_rt_local_sids 3 "1 2 4"
+ setup_rt_local_sids 4 "1 2 3"
+
+ # create a L2 VPN between hs-1 and hs-2.
+ # NB: currently, H.L2Encap* enables tunneling of L2 frames whose
+ # layer-3 is IPv4/IPv6.
+ #
+ # the network path between hs-1 and hs-2 traverses several routers
+ # depending on the direction of traffic.
+ #
+ # Direction hs-1 -> hs-2 (H.L2Encaps.Red)
+ # - rt-2 (SRv6 End.DX2 behavior)
+ #
+ # Direction hs-2 -> hs-1 (H.L2Encaps.Red)
+ # - rt-4,rt-3 (SRv6 End behaviors)
+ # - rt-1 (SRv6 End.DX2 behavior)
+ setup_l2vpn 1 "" 2
+ setup_l2vpn 2 "4 3" 1
+
+ # testing environment was set up successfully
+ SETUP_ERR=0
+}
+
+check_rt_connectivity()
+{
+ local rtsrc="$1"
+ local rtdst="$2"
+ local prefix
+ local rtsrc_nsname
+
+ rtsrc_nsname="$(get_rtname "${rtsrc}")"
+
+ prefix="$(get_network_prefix "${rtsrc}" "${rtdst}")"
+
+ ip netns exec "${rtsrc_nsname}" ping -c 1 -W "${PING_TIMEOUT_SEC}" \
+ "${prefix}::${rtdst}" >/dev/null 2>&1
+}
+
+check_and_log_rt_connectivity()
+{
+ local rtsrc="$1"
+ local rtdst="$2"
+
+ check_rt_connectivity "${rtsrc}" "${rtdst}"
+ log_test $? 0 "Routers connectivity: rt-${rtsrc} -> rt-${rtdst}"
+}
+
+check_hs_ipv6_connectivity()
+{
+ local hssrc="$1"
+ local hsdst="$2"
+ local hssrc_nsname
+
+ hssrc_nsname="$(get_hsname "${hssrc}")"
+
+ ip netns exec "${hssrc_nsname}" ping -c 1 -W "${PING_TIMEOUT_SEC}" \
+ "${IPv6_HS_NETWORK}::${hsdst}" >/dev/null 2>&1
+}
+
+check_hs_ipv4_connectivity()
+{
+ local hssrc="$1"
+ local hsdst="$2"
+ local hssrc_nsname
+
+ hssrc_nsname="$(get_hsname "${hssrc}")"
+
+ ip netns exec "${hssrc_nsname}" ping -c 1 -W "${PING_TIMEOUT_SEC}" \
+ "${IPv4_HS_NETWORK}.${hsdst}" >/dev/null 2>&1
+}
+
+check_and_log_hs2gw_connectivity()
+{
+ local hssrc="$1"
+
+ check_hs_ipv6_connectivity "${hssrc}" 254
+ log_test $? 0 "IPv6 Hosts connectivity: hs-${hssrc} -> gw"
+
+ check_hs_ipv4_connectivity "${hssrc}" 254
+ log_test $? 0 "IPv4 Hosts connectivity: hs-${hssrc} -> gw"
+}
+
+check_and_log_hs_ipv6_connectivity()
+{
+ local hssrc="$1"
+ local hsdst="$2"
+
+ check_hs_ipv6_connectivity "${hssrc}" "${hsdst}"
+ log_test $? 0 "IPv6 Hosts connectivity: hs-${hssrc} -> hs-${hsdst}"
+}
+
+check_and_log_hs_ipv4_connectivity()
+{
+ local hssrc="$1"
+ local hsdst="$2"
+
+ check_hs_ipv4_connectivity "${hssrc}" "${hsdst}"
+ log_test $? 0 "IPv4 Hosts connectivity: hs-${hssrc} -> hs-${hsdst}"
+}
+
+check_and_log_hs_connectivity()
+{
+ local hssrc="$1"
+ local hsdst="$2"
+
+ check_and_log_hs_ipv4_connectivity "${hssrc}" "${hsdst}"
+ check_and_log_hs_ipv6_connectivity "${hssrc}" "${hsdst}"
+}
+
+router_tests()
+{
+ local i
+ local j
+
+ log_section "IPv6 routers connectivity test"
+
+ for i in ${ROUTERS}; do
+ for j in ${ROUTERS}; do
+ if [ "${i}" -eq "${j}" ]; then
+ continue
+ fi
+
+ check_and_log_rt_connectivity "${i}" "${j}"
+ done
+ done
+}
+
+host2gateway_tests()
+{
+ local hs
+
+ log_section "IPv4/IPv6 connectivity test among hosts and gateways"
+
+ for hs in ${HOSTS}; do
+ check_and_log_hs2gw_connectivity "${hs}"
+ done
+}
+
+host_vpn_tests()
+{
+ log_section "SRv6 L2 VPN connectivity test hosts (h1 <-> h2)"
+
+ check_and_log_hs_connectivity 1 2
+ check_and_log_hs_connectivity 2 1
+}
+
+test_dummy_dev_or_ksft_skip()
+{
+ local test_netns
+
+ test_netns="dummy-$(mktemp -u XXXXXXXX)"
+
+ if ! ip netns add "${test_netns}"; then
+ echo "SKIP: Cannot set up netns for testing dummy dev support"
+ exit "${ksft_skip}"
+ fi
+
+ modprobe dummy &>/dev/null || true
+ if ! ip -netns "${test_netns}" link \
+ add "${DUMMY_DEVNAME}" type dummy; then
+ echo "SKIP: dummy dev not supported"
+
+ ip netns del "${test_netns}"
+ exit "${ksft_skip}"
+ fi
+
+ ip netns del "${test_netns}"
+}
+
+test_iproute2_supp_or_ksft_skip()
+{
+ if ! ip route help 2>&1 | grep -qo "l2encap.red"; then
+ echo "SKIP: Missing SRv6 l2encap.red support in iproute2"
+ exit "${ksft_skip}"
+ fi
+}
+
+if [ "$(id -u)" -ne 0 ]; then
+ echo "SKIP: Need root privileges"
+ exit "${ksft_skip}"
+fi
+
+# required programs to carry out this selftest
+test_command_or_ksft_skip ip
+test_command_or_ksft_skip ping
+test_command_or_ksft_skip sysctl
+test_command_or_ksft_skip grep
+
+test_iproute2_supp_or_ksft_skip
+test_dummy_dev_or_ksft_skip
+
+set -e
+trap cleanup EXIT
+
+setup
+set +e
+
+router_tests
+host2gateway_tests
+host_vpn_tests
+
+print_log_test_results
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
index 4ecbac197c46..2cbb12736596 100644
--- a/tools/testing/selftests/net/tls.c
+++ b/tools/testing/selftests/net/tls.c
@@ -644,12 +644,14 @@ TEST_F(tls, splice_from_pipe2)
int p2[2];
int p[2];
+ memrnd(mem_send, sizeof(mem_send));
+
ASSERT_GE(pipe(p), 0);
ASSERT_GE(pipe(p2), 0);
- EXPECT_GE(write(p[1], mem_send, 8000), 0);
- EXPECT_GE(splice(p[0], NULL, self->fd, NULL, 8000, 0), 0);
- EXPECT_GE(write(p2[1], mem_send + 8000, 8000), 0);
- EXPECT_GE(splice(p2[0], NULL, self->fd, NULL, 8000, 0), 0);
+ EXPECT_EQ(write(p[1], mem_send, 8000), 8000);
+ EXPECT_EQ(splice(p[0], NULL, self->fd, NULL, 8000, 0), 8000);
+ EXPECT_EQ(write(p2[1], mem_send + 8000, 8000), 8000);
+ EXPECT_EQ(splice(p2[0], NULL, self->fd, NULL, 8000, 0), 8000);
EXPECT_EQ(recv(self->cfd, mem_recv, send_len, MSG_WAITALL), send_len);
EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
}
@@ -683,10 +685,12 @@ TEST_F(tls, splice_to_pipe)
char mem_recv[TLS_PAYLOAD_MAX_LEN];
int p[2];
+ memrnd(mem_send, sizeof(mem_send));
+
ASSERT_GE(pipe(p), 0);
- EXPECT_GE(send(self->fd, mem_send, send_len, 0), 0);
- EXPECT_GE(splice(self->cfd, NULL, p[1], NULL, send_len, 0), 0);
- EXPECT_GE(read(p[0], mem_recv, send_len), 0);
+ EXPECT_EQ(send(self->fd, mem_send, send_len, 0), send_len);
+ EXPECT_EQ(splice(self->cfd, NULL, p[1], NULL, send_len, 0), send_len);
+ EXPECT_EQ(read(p[0], mem_recv, send_len), send_len);
EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
}
@@ -875,6 +879,8 @@ TEST_F(tls, multiple_send_single_recv)
char recv_mem[2 * 10];
char send_mem[10];
+ memrnd(send_mem, sizeof(send_mem));
+
EXPECT_GE(send(self->fd, send_mem, send_len, 0), 0);
EXPECT_GE(send(self->fd, send_mem, send_len, 0), 0);
memset(recv_mem, 0, total_len);
@@ -891,6 +897,8 @@ TEST_F(tls, single_send_multiple_recv_non_align)
char recv_mem[recv_len * 2];
char send_mem[total_len];
+ memrnd(send_mem, sizeof(send_mem));
+
EXPECT_GE(send(self->fd, send_mem, total_len, 0), 0);
memset(recv_mem, 0, total_len);
@@ -936,10 +944,10 @@ TEST_F(tls, recv_peek)
char buf[15];
EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
- EXPECT_NE(recv(self->cfd, buf, send_len, MSG_PEEK), -1);
+ EXPECT_EQ(recv(self->cfd, buf, send_len, MSG_PEEK), send_len);
EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
memset(buf, 0, sizeof(buf));
- EXPECT_NE(recv(self->cfd, buf, send_len, 0), -1);
+ EXPECT_EQ(recv(self->cfd, buf, send_len, 0), send_len);
EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
}