diff options
211 files changed, 6664 insertions, 4338 deletions
diff --git a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt index 93eac7ce1446..cccd945fc45b 100644 --- a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt +++ b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt @@ -3,7 +3,8 @@ Rockchip SoC RK3288 10/100/1000 Ethernet driver(GMAC) The device node has following properties. Required properties: - - compatible: Can be one of "rockchip,rk3288-gmac", "rockchip,rk3368-gmac" + - compatible: Can be one of "rockchip,rk3228-gmac", "rockchip,rk3288-gmac", + "rockchip,rk3368-gmac" - reg: addresses and length of the register sets for the device. - interrupts: Should contain the GMAC interrupts. - interrupt-names: Should contain the interrupt names "macirq". diff --git a/MAINTAINERS b/MAINTAINERS index 50f69ba7499a..f5ddaa901133 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2578,12 +2578,11 @@ S: Supported F: drivers/net/ethernet/broadcom/tg3.* BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER -M: Brett Rudley <brudley@broadcom.com> -M: Arend van Spriel <arend@broadcom.com> -M: Franky (Zhenhui) Lin <frankyl@broadcom.com> -M: Hante Meuleman <meuleman@broadcom.com> +M: Arend van Spriel <arend.vanspriel@broadcom.com> +M: Franky Lin <franky.lin@broadcom.com> +M: Hante Meuleman <hante.meuleman@broadcom.com> L: linux-wireless@vger.kernel.org -L: brcm80211-dev-list@broadcom.com +L: brcm80211-dev-list.pdl@broadcom.com S: Supported F: drivers/net/wireless/broadcom/brcm80211/ @@ -7172,6 +7171,12 @@ W: http://www.kernel.org/doc/man-pages L: linux-man@vger.kernel.org S: Maintained +MARVELL 88E6XXX ETHERNET SWITCH FABRIC DRIVER +M: Andrew Lunn <andrew@lunn.ch> +M: Vivien Didelot <vivien.didelot@savoirfairelinux.com> +S: Maintained +F: drivers/net/dsa/mv88e6xxx/ + MARVELL ARMADA DRM SUPPORT M: Russell King <rmk+kernel@armlinux.org.uk> S: Maintained @@ -7179,11 +7184,6 @@ F: drivers/gpu/drm/armada/ F: include/uapi/drm/armada_drm.h F: Documentation/devicetree/bindings/display/armada/ -MARVELL 88E6352 DSA support -M: Guenter Roeck <linux@roeck-us.net> -S: Maintained -F: drivers/net/dsa/mv88e6352.c - MARVELL CRYPTO DRIVER M: Boris Brezillon <boris.brezillon@free-electrons.com> M: Arnaud Ebalard <arno@natisbad.org> @@ -10280,10 +10280,9 @@ W: http://www.avagotech.com S: Supported F: drivers/scsi/be2iscsi/ -Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER +Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER (be2net) M: Sathya Perla <sathya.perla@broadcom.com> M: Ajit Khaparde <ajit.khaparde@broadcom.com> -M: Padmanabh Ratnakar <padmanabh.ratnakar@broadcom.com> M: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com> M: Somnath Kotur <somnath.kotur@broadcom.com> L: netdev@vger.kernel.org diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index be481e15ec9b..8f4544394f44 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -9,14 +9,6 @@ config NET_DSA_MV88E6060 This enables support for the Marvell 88E6060 ethernet switch chip. -config NET_DSA_MV88E6XXX - tristate "Marvell 88E6xxx Ethernet switch chip support" - depends on NET_DSA - select NET_DSA_TAG_EDSA - ---help--- - This enables support for most of the Marvell 88E6xxx models of - Ethernet switch chips, except 88E6060. - config NET_DSA_BCM_SF2 tristate "Broadcom Starfighter 2 Ethernet switch support" depends on HAS_IOMEM && NET_DSA @@ -30,4 +22,6 @@ config NET_DSA_BCM_SF2 source "drivers/net/dsa/b53/Kconfig" +source "drivers/net/dsa/mv88e6xxx/Kconfig" + endmenu diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index 97bc70a7f3c7..ca1e71b853a6 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile @@ -1,5 +1,5 @@ obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o -obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx.o obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm_sf2.o obj-y += b53/ +obj-y += mv88e6xxx/ diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 5321083379c7..444de66667b9 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -679,7 +679,7 @@ static void b53_get_ethtool_stats(struct dsa_switch *ds, int port, for (i = 0; i < mib_size; i++) { s = &mibs[i]; - if (mibs->size == 8) { + if (s->size == 8) { b53_read64(dev, B53_MIB_PAGE(port), s->offset, &val); } else { u32 val32; diff --git a/drivers/net/dsa/mv88e6xxx/Kconfig b/drivers/net/dsa/mv88e6xxx/Kconfig new file mode 100644 index 000000000000..490bc06f993e --- /dev/null +++ b/drivers/net/dsa/mv88e6xxx/Kconfig @@ -0,0 +1,7 @@ +config NET_DSA_MV88E6XXX + tristate "Marvell 88E6xxx Ethernet switch fabric support" + depends on NET_DSA + select NET_DSA_TAG_EDSA + help + This driver adds support for most of the Marvell 88E6xxx models of + Ethernet switch chips, except 88E6060. diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile new file mode 100644 index 000000000000..6e29a75ee2f7 --- /dev/null +++ b/drivers/net/dsa/mv88e6xxx/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_NET_DSA_MV88E6XXX) += chip.o diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx/chip.c index 9b116d8d4e23..5cb06f7673af 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -1,5 +1,6 @@ /* - * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support + * Marvell 88e6xxx Ethernet switch single-chip support + * * Copyright (c) 2008 Marvell Semiconductor * * Copyright (c) 2015 CMC Electronics, Inc. @@ -30,10 +31,10 @@ #include <net/switchdev.h> #include "mv88e6xxx.h" -static void assert_reg_lock(struct mv88e6xxx_priv_state *ps) +static void assert_reg_lock(struct mv88e6xxx_chip *chip) { - if (unlikely(!mutex_is_locked(&ps->reg_lock))) { - dev_err(ps->dev, "Switch registers lock not held!\n"); + if (unlikely(!mutex_is_locked(&chip->reg_lock))) { + dev_err(chip->dev, "Switch registers lock not held!\n"); dump_stack(); } } @@ -50,30 +51,30 @@ static void assert_reg_lock(struct mv88e6xxx_priv_state *ps) * 2 registers, used to indirectly access the internal SMI devices. */ -static int mv88e6xxx_smi_read(struct mv88e6xxx_priv_state *ps, +static int mv88e6xxx_smi_read(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val) { - if (!ps->smi_ops) + if (!chip->smi_ops) return -EOPNOTSUPP; - return ps->smi_ops->read(ps, addr, reg, val); + return chip->smi_ops->read(chip, addr, reg, val); } -static int mv88e6xxx_smi_write(struct mv88e6xxx_priv_state *ps, +static int mv88e6xxx_smi_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val) { - if (!ps->smi_ops) + if (!chip->smi_ops) return -EOPNOTSUPP; - return ps->smi_ops->write(ps, addr, reg, val); + return chip->smi_ops->write(chip, addr, reg, val); } -static int mv88e6xxx_smi_single_chip_read(struct mv88e6xxx_priv_state *ps, +static int mv88e6xxx_smi_single_chip_read(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val) { int ret; - ret = mdiobus_read_nested(ps->bus, addr, reg); + ret = mdiobus_read_nested(chip->bus, addr, reg); if (ret < 0) return ret; @@ -82,12 +83,12 @@ static int mv88e6xxx_smi_single_chip_read(struct mv88e6xxx_priv_state *ps, return 0; } -static int mv88e6xxx_smi_single_chip_write(struct mv88e6xxx_priv_state *ps, +static int mv88e6xxx_smi_single_chip_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val) { int ret; - ret = mdiobus_write_nested(ps->bus, addr, reg, val); + ret = mdiobus_write_nested(chip->bus, addr, reg, val); if (ret < 0) return ret; @@ -99,13 +100,13 @@ static const struct mv88e6xxx_ops mv88e6xxx_smi_single_chip_ops = { .write = mv88e6xxx_smi_single_chip_write, }; -static int mv88e6xxx_smi_multi_chip_wait(struct mv88e6xxx_priv_state *ps) +static int mv88e6xxx_smi_multi_chip_wait(struct mv88e6xxx_chip *chip) { int ret; int i; for (i = 0; i < 16; i++) { - ret = mdiobus_read_nested(ps->bus, ps->sw_addr, SMI_CMD); + ret = mdiobus_read_nested(chip->bus, chip->sw_addr, SMI_CMD); if (ret < 0) return ret; @@ -116,29 +117,29 @@ static int mv88e6xxx_smi_multi_chip_wait(struct mv88e6xxx_priv_state *ps) return -ETIMEDOUT; } -static int mv88e6xxx_smi_multi_chip_read(struct mv88e6xxx_priv_state *ps, +static int mv88e6xxx_smi_multi_chip_read(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val) { int ret; /* Wait for the bus to become free. */ - ret = mv88e6xxx_smi_multi_chip_wait(ps); + ret = mv88e6xxx_smi_multi_chip_wait(chip); if (ret < 0) return ret; /* Transmit the read command. */ - ret = mdiobus_write_nested(ps->bus, ps->sw_addr, SMI_CMD, + ret = mdiobus_write_nested(chip->bus, chip->sw_addr, SMI_CMD, SMI_CMD_OP_22_READ | (addr << 5) | reg); if (ret < 0) return ret; /* Wait for the read command to complete. */ - ret = mv88e6xxx_smi_multi_chip_wait(ps); + ret = mv88e6xxx_smi_multi_chip_wait(chip); if (ret < 0) return ret; /* Read the data. */ - ret = mdiobus_read_nested(ps->bus, ps->sw_addr, SMI_DATA); + ret = mdiobus_read_nested(chip->bus, chip->sw_addr, SMI_DATA); if (ret < 0) return ret; @@ -147,29 +148,29 @@ static int mv88e6xxx_smi_multi_chip_read(struct mv88e6xxx_priv_state *ps, return 0; } -static int mv88e6xxx_smi_multi_chip_write(struct mv88e6xxx_priv_state *ps, +static int mv88e6xxx_smi_multi_chip_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val) { int ret; /* Wait for the bus to become free. */ - ret = mv88e6xxx_smi_multi_chip_wait(ps); + ret = mv88e6xxx_smi_multi_chip_wait(chip); if (ret < 0) return ret; /* Transmit the data to write. */ - ret = mdiobus_write_nested(ps->bus, ps->sw_addr, SMI_DATA, val); + ret = mdiobus_write_nested(chip->bus, chip->sw_addr, SMI_DATA, val); if (ret < 0) return ret; /* Transmit the write command. */ - ret = mdiobus_write_nested(ps->bus, ps->sw_addr, SMI_CMD, + ret = mdiobus_write_nested(chip->bus, chip->sw_addr, SMI_CMD, SMI_CMD_OP_22_WRITE | (addr << 5) | reg); if (ret < 0) return ret; /* Wait for the write command to complete. */ - ret = mv88e6xxx_smi_multi_chip_wait(ps); + ret = mv88e6xxx_smi_multi_chip_wait(chip); if (ret < 0) return ret; @@ -181,105 +182,103 @@ static const struct mv88e6xxx_ops mv88e6xxx_smi_multi_chip_ops = { .write = mv88e6xxx_smi_multi_chip_write, }; -static int mv88e6xxx_read(struct mv88e6xxx_priv_state *ps, +static int mv88e6xxx_read(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val) { int err; - assert_reg_lock(ps); + assert_reg_lock(chip); - err = mv88e6xxx_smi_read(ps, addr, reg, val); + err = mv88e6xxx_smi_read(chip, addr, reg, val); if (err) return err; - dev_dbg(ps->dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n", + dev_dbg(chip->dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n", addr, reg, *val); return 0; } -static int mv88e6xxx_write(struct mv88e6xxx_priv_state *ps, +static int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val) { int err; - assert_reg_lock(ps); + assert_reg_lock(chip); - err = mv88e6xxx_smi_write(ps, addr, reg, val); + err = mv88e6xxx_smi_write(chip, addr, reg, val); if (err) return err; - dev_dbg(ps->dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n", + dev_dbg(chip->dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n", addr, reg, val); return 0; } -static int _mv88e6xxx_reg_read(struct mv88e6xxx_priv_state *ps, - int addr, int reg) +static int _mv88e6xxx_reg_read(struct mv88e6xxx_chip *chip, int addr, int reg) { u16 val; int err; - err = mv88e6xxx_read(ps, addr, reg, &val); + err = mv88e6xxx_read(chip, addr, reg, &val); if (err) return err; return val; } -static int mv88e6xxx_reg_read(struct mv88e6xxx_priv_state *ps, int addr, - int reg) +static int mv88e6xxx_reg_read(struct mv88e6xxx_chip *chip, int addr, int reg) { int ret; - mutex_lock(&ps->reg_lock); - ret = _mv88e6xxx_reg_read(ps, addr, reg); - mutex_unlock(&ps->reg_lock); + mutex_lock(&chip->reg_lock); + ret = _mv88e6xxx_reg_read(chip, addr, reg); + mutex_unlock(&chip->reg_lock); return ret; } -static int _mv88e6xxx_reg_write(struct mv88e6xxx_priv_state *ps, int addr, +static int _mv88e6xxx_reg_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val) { - return mv88e6xxx_write(ps, addr, reg, val); + return mv88e6xxx_write(chip, addr, reg, val); } -static int mv88e6xxx_reg_write(struct mv88e6xxx_priv_state *ps, int addr, +static int mv88e6xxx_reg_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val) { int ret; - mutex_lock(&ps->reg_lock); - ret = _mv88e6xxx_reg_write(ps, addr, reg, val); - mutex_unlock(&ps->reg_lock); + mutex_lock(&chip->reg_lock); + ret = _mv88e6xxx_reg_write(chip, addr, reg, val); + mutex_unlock(&chip->reg_lock); return ret; } static int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); int err; - err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_01, + err = mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 8) | addr[1]); if (err) return err; - err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_23, + err = mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]); if (err) return err; - return mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_45, + return mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]); } static int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); int ret; int i; @@ -287,7 +286,7 @@ static int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr) int j; /* Write the MAC address byte. */ - ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SWITCH_MAC, + ret = mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_SWITCH_MAC, GLOBAL2_SWITCH_MAC_BUSY | (i << 8) | addr[i]); if (ret) @@ -295,7 +294,7 @@ static int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr) /* Wait for the write to complete. */ for (j = 0; j < 16; j++) { - ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2, + ret = mv88e6xxx_reg_read(chip, REG_GLOBAL2, GLOBAL2_SWITCH_MAC); if (ret < 0) return ret; @@ -312,47 +311,47 @@ static int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr) static int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); - if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SWITCH_MAC)) + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_SWITCH_MAC)) return mv88e6xxx_set_addr_indirect(ds, addr); else return mv88e6xxx_set_addr_direct(ds, addr); } -static int mv88e6xxx_mdio_read_direct(struct mv88e6xxx_priv_state *ps, +static int mv88e6xxx_mdio_read_direct(struct mv88e6xxx_chip *chip, int addr, int regnum) { if (addr >= 0) - return _mv88e6xxx_reg_read(ps, addr, regnum); + return _mv88e6xxx_reg_read(chip, addr, regnum); return 0xffff; } -static int mv88e6xxx_mdio_write_direct(struct mv88e6xxx_priv_state *ps, +static int mv88e6xxx_mdio_write_direct(struct mv88e6xxx_chip *chip, int addr, int regnum, u16 val) { if (addr >= 0) - return _mv88e6xxx_reg_write(ps, addr, regnum, val); + return _mv88e6xxx_reg_write(chip, addr, regnum, val); return 0; } -static int mv88e6xxx_ppu_disable(struct mv88e6xxx_priv_state *ps) +static int mv88e6xxx_ppu_disable(struct mv88e6xxx_chip *chip) { int ret; unsigned long timeout; - ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_CONTROL); + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_CONTROL); if (ret < 0) return ret; - ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL, + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL, ret & ~GLOBAL_CONTROL_PPU_ENABLE); if (ret) return ret; timeout = jiffies + 1 * HZ; while (time_before(jiffies, timeout)) { - ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATUS); + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATUS); if (ret < 0) return ret; @@ -365,23 +364,23 @@ static int mv88e6xxx_ppu_disable(struct mv88e6xxx_priv_state *ps) return -ETIMEDOUT; } -static int mv88e6xxx_ppu_enable(struct mv88e6xxx_priv_state *ps) +static int mv88e6xxx_ppu_enable(struct mv88e6xxx_chip *chip) { int ret, err; unsigned long timeout; - ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_CONTROL); + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_CONTROL); if (ret < 0) return ret; - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL, + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL, ret | GLOBAL_CONTROL_PPU_ENABLE); if (err) return err; timeout = jiffies + 1 * HZ; while (time_before(jiffies, timeout)) { - ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATUS); + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATUS); if (ret < 0) return ret; @@ -396,148 +395,148 @@ static int mv88e6xxx_ppu_enable(struct mv88e6xxx_priv_state *ps) static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly) { - struct mv88e6xxx_priv_state *ps; + struct mv88e6xxx_chip *chip; - ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work); + chip = container_of(ugly, struct mv88e6xxx_chip, ppu_work); - mutex_lock(&ps->reg_lock); + mutex_lock(&chip->reg_lock); - if (mutex_trylock(&ps->ppu_mutex)) { - if (mv88e6xxx_ppu_enable(ps) == 0) - ps->ppu_disabled = 0; - mutex_unlock(&ps->ppu_mutex); + if (mutex_trylock(&chip->ppu_mutex)) { + if (mv88e6xxx_ppu_enable(chip) == 0) + chip->ppu_disabled = 0; + mutex_unlock(&chip->ppu_mutex); } - mutex_unlock(&ps->reg_lock); + mutex_unlock(&chip->reg_lock); } static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps) { - struct mv88e6xxx_priv_state *ps = (void *)_ps; + struct mv88e6xxx_chip *chip = (void *)_ps; - schedule_work(&ps->ppu_work); + schedule_work(&chip->ppu_work); } -static int mv88e6xxx_ppu_access_get(struct mv88e6xxx_priv_state *ps) +static int mv88e6xxx_ppu_access_get(struct mv88e6xxx_chip *chip) { int ret; - mutex_lock(&ps->ppu_mutex); + mutex_lock(&chip->ppu_mutex); /* If the PHY polling unit is enabled, disable it so that * we can access the PHY registers. If it was already * disabled, cancel the timer that is going to re-enable * it. */ - if (!ps->ppu_disabled) { - ret = mv88e6xxx_ppu_disable(ps); + if (!chip->ppu_disabled) { + ret = mv88e6xxx_ppu_disable(chip); if (ret < 0) { - mutex_unlock(&ps->ppu_mutex); + mutex_unlock(&chip->ppu_mutex); return ret; } - ps->ppu_disabled = 1; + chip->ppu_disabled = 1; } else { - del_timer(&ps->ppu_timer); + del_timer(&chip->ppu_timer); ret = 0; } return ret; } -static void mv88e6xxx_ppu_access_put(struct mv88e6xxx_priv_state *ps) +static void mv88e6xxx_ppu_access_put(struct mv88e6xxx_chip *chip) { /* Schedule a timer to re-enable the PHY polling unit. */ - mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10)); - mutex_unlock(&ps->ppu_mutex); + mod_timer(&chip->ppu_timer, jiffies + msecs_to_jiffies(10)); + mutex_unlock(&chip->ppu_mutex); } -static void mv88e6xxx_ppu_state_init(struct mv88e6xxx_priv_state *ps) +static void mv88e6xxx_ppu_state_init(struct mv88e6xxx_chip *chip) { - mutex_init(&ps->ppu_mutex); - INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work); - init_timer(&ps->ppu_timer); - ps->ppu_timer.data = (unsigned long)ps; - ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer; + mutex_init(&chip->ppu_mutex); + INIT_WORK(&chip->ppu_work, mv88e6xxx_ppu_reenable_work); + init_timer(&chip->ppu_timer); + chip->ppu_timer.data = (unsigned long)chip; + chip->ppu_timer.function = mv88e6xxx_ppu_reenable_timer; } -static int mv88e6xxx_mdio_read_ppu(struct mv88e6xxx_priv_state *ps, int addr, +static int mv88e6xxx_mdio_read_ppu(struct mv88e6xxx_chip *chip, int addr, int regnum) { int ret; - ret = mv88e6xxx_ppu_access_get(ps); + ret = mv88e6xxx_ppu_access_get(chip); if (ret >= 0) { - ret = _mv88e6xxx_reg_read(ps, addr, regnum); - mv88e6xxx_ppu_access_put(ps); + ret = _mv88e6xxx_reg_read(chip, addr, regnum); + mv88e6xxx_ppu_access_put(chip); } return ret; } -static int mv88e6xxx_mdio_write_ppu(struct mv88e6xxx_priv_state *ps, int addr, +static int mv88e6xxx_mdio_write_ppu(struct mv88e6xxx_chip *chip, int addr, int regnum, u16 val) { int ret; - ret = mv88e6xxx_ppu_access_get(ps); + ret = mv88e6xxx_ppu_access_get(chip); if (ret >= 0) { - ret = _mv88e6xxx_reg_write(ps, addr, regnum, val); - mv88e6xxx_ppu_access_put(ps); + ret = _mv88e6xxx_reg_write(chip, addr, regnum, val); + mv88e6xxx_ppu_access_put(chip); } return ret; } -static bool mv88e6xxx_6065_family(struct mv88e6xxx_priv_state *ps) +static bool mv88e6xxx_6065_family(struct mv88e6xxx_chip *chip) { - return ps->info->family == MV88E6XXX_FAMILY_6065; + return chip->info->family == MV88E6XXX_FAMILY_6065; } -static bool mv88e6xxx_6095_family(struct mv88e6xxx_priv_state *ps) +static bool mv88e6xxx_6095_family(struct mv88e6xxx_chip *chip) { - return ps->info->family == MV88E6XXX_FAMILY_6095; + return chip->info->family == MV88E6XXX_FAMILY_6095; } -static bool mv88e6xxx_6097_family(struct mv88e6xxx_priv_state *ps) +static bool mv88e6xxx_6097_family(struct mv88e6xxx_chip *chip) { - return ps->info->family == MV88E6XXX_FAMILY_6097; + return chip->info->family == MV88E6XXX_FAMILY_6097; } -static bool mv88e6xxx_6165_family(struct mv88e6xxx_priv_state *ps) +static bool mv88e6xxx_6165_family(struct mv88e6xxx_chip *chip) { - return ps->info->family == MV88E6XXX_FAMILY_6165; + return chip->info->family == MV88E6XXX_FAMILY_6165; } -static bool mv88e6xxx_6185_family(struct mv88e6xxx_priv_state *ps) +static bool mv88e6xxx_6185_family(struct mv88e6xxx_chip *chip) { - return ps->info->family == MV88E6XXX_FAMILY_6185; + return chip->info->family == MV88E6XXX_FAMILY_6185; } -static bool mv88e6xxx_6320_family(struct mv88e6xxx_priv_state *ps) +static bool mv88e6xxx_6320_family(struct mv88e6xxx_chip *chip) { - return ps->info->family == MV88E6XXX_FAMILY_6320; + return chip->info->family == MV88E6XXX_FAMILY_6320; } -static bool mv88e6xxx_6351_family(struct mv88e6xxx_priv_state *ps) +static bool mv88e6xxx_6351_family(struct mv88e6xxx_chip *chip) { - return ps->info->family == MV88E6XXX_FAMILY_6351; + return chip->info->family == MV88E6XXX_FAMILY_6351; } -static bool mv88e6xxx_6352_family(struct mv88e6xxx_priv_state *ps) +static bool mv88e6xxx_6352_family(struct mv88e6xxx_chip *chip) { - return ps->info->family == MV88E6XXX_FAMILY_6352; + return chip->info->family == MV88E6XXX_FAMILY_6352; } -static unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_priv_state *ps) +static unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_chip *chip) { - return ps->info->num_databases; + return chip->info->num_databases; } -static bool mv88e6xxx_has_fid_reg(struct mv88e6xxx_priv_state *ps) +static bool mv88e6xxx_has_fid_reg(struct mv88e6xxx_chip *chip) { /* Does the device have dedicated FID registers for ATU and VTU ops? */ - if (mv88e6xxx_6097_family(ps) || mv88e6xxx_6165_family(ps) || - mv88e6xxx_6351_family(ps) || mv88e6xxx_6352_family(ps)) + if (mv88e6xxx_6097_family(chip) || mv88e6xxx_6165_family(chip) || + mv88e6xxx_6351_family(chip) || mv88e6xxx_6352_family(chip)) return true; return false; @@ -550,16 +549,16 @@ static bool mv88e6xxx_has_fid_reg(struct mv88e6xxx_priv_state *ps) static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phydev) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); u32 reg; int ret; if (!phy_is_pseudo_fixed_link(phydev)) return; - mutex_lock(&ps->reg_lock); + mutex_lock(&chip->reg_lock); - ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_PCS_CTRL); + ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_PCS_CTRL); if (ret < 0) goto out; @@ -573,7 +572,7 @@ static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port, if (phydev->link) reg |= PORT_PCS_CTRL_LINK_UP; - if (mv88e6xxx_6065_family(ps) && phydev->speed > SPEED_100) + if (mv88e6xxx_6065_family(chip) && phydev->speed > SPEED_100) goto out; switch (phydev->speed) { @@ -595,8 +594,8 @@ static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port, if (phydev->duplex == DUPLEX_FULL) reg |= PORT_PCS_CTRL_DUPLEX_FULL; - if ((mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps)) && - (port >= ps->info->num_ports - 2)) { + if ((mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip)) && + (port >= chip->info->num_ports - 2)) { if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK; if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) @@ -605,19 +604,19 @@ static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port, reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK | PORT_PCS_CTRL_RGMII_DELAY_TXCLK); } - _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_PCS_CTRL, reg); + _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_PCS_CTRL, reg); out: - mutex_unlock(&ps->reg_lock); + mutex_unlock(&chip->reg_lock); } -static int _mv88e6xxx_stats_wait(struct mv88e6xxx_priv_state *ps) +static int _mv88e6xxx_stats_wait(struct mv88e6xxx_chip *chip) { int ret; int i; for (i = 0; i < 10; i++) { - ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_OP); + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATS_OP); if ((ret & GLOBAL_STATS_OP_BUSY) == 0) return 0; } @@ -625,30 +624,29 @@ static int _mv88e6xxx_stats_wait(struct mv88e6xxx_priv_state *ps) return -ETIMEDOUT; } -static int _mv88e6xxx_stats_snapshot(struct mv88e6xxx_priv_state *ps, - int port) +static int _mv88e6xxx_stats_snapshot(struct mv88e6xxx_chip *chip, int port) { int ret; - if (mv88e6xxx_6320_family(ps) || mv88e6xxx_6352_family(ps)) + if (mv88e6xxx_6320_family(chip) || mv88e6xxx_6352_family(chip)) port = (port + 1) << 5; /* Snapshot the hardware statistics counters for this port. */ - ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP, + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_STATS_OP, GLOBAL_STATS_OP_CAPTURE_PORT | GLOBAL_STATS_OP_HIST_RX_TX | port); if (ret < 0) return ret; /* Wait for the snapshotting to complete. */ - ret = _mv88e6xxx_stats_wait(ps); + ret = _mv88e6xxx_stats_wait(chip); if (ret < 0) return ret; return 0; } -static void _mv88e6xxx_stats_read(struct mv88e6xxx_priv_state *ps, +static void _mv88e6xxx_stats_read(struct mv88e6xxx_chip *chip, int stat, u32 *val) { u32 _val; @@ -656,23 +654,23 @@ static void _mv88e6xxx_stats_read(struct mv88e6xxx_priv_state *ps, *val = 0; - ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP, + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_STATS_OP, GLOBAL_STATS_OP_READ_CAPTURED | GLOBAL_STATS_OP_HIST_RX_TX | stat); if (ret < 0) return; - ret = _mv88e6xxx_stats_wait(ps); + ret = _mv88e6xxx_stats_wait(chip); if (ret < 0) return; - ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_COUNTER_32); + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATS_COUNTER_32); if (ret < 0) return; _val = ret << 16; - ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_COUNTER_01); + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATS_COUNTER_01); if (ret < 0) return; @@ -741,26 +739,26 @@ static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = { { "out_management", 4, 0x1f | GLOBAL_STATS_OP_BANK_1, BANK1, }, }; -static bool mv88e6xxx_has_stat(struct mv88e6xxx_priv_state *ps, +static bool mv88e6xxx_has_stat(struct mv88e6xxx_chip *chip, struct mv88e6xxx_hw_stat *stat) { switch (stat->type) { case BANK0: return true; case BANK1: - return mv88e6xxx_6320_family(ps); + return mv88e6xxx_6320_family(chip); case PORT: - return mv88e6xxx_6095_family(ps) || - mv88e6xxx_6185_family(ps) || - mv88e6xxx_6097_family(ps) || - mv88e6xxx_6165_family(ps) || - mv88e6xxx_6351_family(ps) || - mv88e6xxx_6352_family(ps); + return mv88e6xxx_6095_family(chip) || + mv88e6xxx_6185_family(chip) || + mv88e6xxx_6097_family(chip) || + mv88e6xxx_6165_family(chip) || + mv88e6xxx_6351_family(chip) || + mv88e6xxx_6352_family(chip); } return false; } -static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_priv_state *ps, +static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip, struct mv88e6xxx_hw_stat *s, int port) { @@ -771,13 +769,13 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_priv_state *ps, switch (s->type) { case PORT: - ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), s->reg); + ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), s->reg); if (ret < 0) return UINT64_MAX; low = ret; if (s->sizeof_stat == 4) { - ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), + ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), s->reg + 1); if (ret < 0) return UINT64_MAX; @@ -786,9 +784,9 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_priv_state *ps, break; case BANK0: case BANK1: - _mv88e6xxx_stats_read(ps, s->reg, &low); + _mv88e6xxx_stats_read(chip, s->reg, &low); if (s->sizeof_stat == 8) - _mv88e6xxx_stats_read(ps, s->reg + 1, &high); + _mv88e6xxx_stats_read(chip, s->reg + 1, &high); } value = (((u64)high) << 16) | low; return value; @@ -797,13 +795,13 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_priv_state *ps, static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); struct mv88e6xxx_hw_stat *stat; int i, j; for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) { stat = &mv88e6xxx_hw_stats[i]; - if (mv88e6xxx_has_stat(ps, stat)) { + if (mv88e6xxx_has_stat(chip, stat)) { memcpy(data + j * ETH_GSTRING_LEN, stat->string, ETH_GSTRING_LEN); j++; @@ -813,13 +811,13 @@ static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, static int mv88e6xxx_get_sset_count(struct dsa_switch *ds) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); struct mv88e6xxx_hw_stat *stat; int i, j; for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) { stat = &mv88e6xxx_hw_stats[i]; - if (mv88e6xxx_has_stat(ps, stat)) + if (mv88e6xxx_has_stat(chip, stat)) j++; } return j; @@ -828,27 +826,27 @@ static int mv88e6xxx_get_sset_count(struct dsa_switch *ds) static void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); struct mv88e6xxx_hw_stat *stat; int ret; int i, j; - mutex_lock(&ps->reg_lock); + mutex_lock(&chip->reg_lock); - ret = _mv88e6xxx_stats_snapshot(ps, port); + ret = _mv88e6xxx_stats_snapshot(chip, port); if (ret < 0) { - mutex_unlock(&ps->reg_lock); + mutex_unlock(&chip->reg_lock); return; } for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) { stat = &mv88e6xxx_hw_stats[i]; - if (mv88e6xxx_has_stat(ps, stat)) { - data[j] = _mv88e6xxx_get_ethtool_stat(ps, stat, port); + if (mv88e6xxx_has_stat(chip, stat)) { + data[j] = _mv88e6xxx_get_ethtool_stat(chip, stat, port); j++; } } - mutex_unlock(&ps->reg_lock); + mutex_unlock(&chip->reg_lock); } static int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port) @@ -859,7 +857,7 @@ static int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port) static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, struct ethtool_regs *regs, void *_p) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); u16 *p = _p; int i; @@ -867,20 +865,20 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, memset(p, 0xff, 32 * sizeof(u16)); - mutex_lock(&ps->reg_lock); + mutex_lock(&chip->reg_lock); for (i = 0; i < 32; i++) { int ret; - ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), i); + ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), i); if (ret >= 0) p[i] = ret; } - mutex_unlock(&ps->reg_lock); + mutex_unlock(&chip->reg_lock); } -static int _mv88e6xxx_wait(struct mv88e6xxx_priv_state *ps, int reg, int offset, +static int _mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int reg, int offset, u16 mask) { unsigned long timeout = jiffies + HZ / 10; @@ -888,7 +886,7 @@ static int _mv88e6xxx_wait(struct mv88e6xxx_priv_state *ps, int reg, int offset, while (time_before(jiffies, timeout)) { int ret; - ret = _mv88e6xxx_reg_read(ps, reg, offset); + ret = _mv88e6xxx_reg_read(chip, reg, offset); if (ret < 0) return ret; if (!(ret & mask)) @@ -899,48 +897,48 @@ static int _mv88e6xxx_wait(struct mv88e6xxx_priv_state *ps, int reg, int offset, return -ETIMEDOUT; } -static int mv88e6xxx_wait(struct mv88e6xxx_priv_state *ps, int reg, +static int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int reg, int offset, u16 mask) { int ret; - mutex_lock(&ps->reg_lock); - ret = _mv88e6xxx_wait(ps, reg, offset, mask); - mutex_unlock(&ps->reg_lock); + mutex_lock(&chip->reg_lock); + ret = _mv88e6xxx_wait(chip, reg, offset, mask); + mutex_unlock(&chip->reg_lock); return ret; } -static int mv88e6xxx_mdio_wait(struct mv88e6xxx_priv_state *ps) +static int mv88e6xxx_mdio_wait(struct mv88e6xxx_chip *chip) { - return _mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_SMI_OP, + return _mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_SMI_OP, GLOBAL2_SMI_OP_BUSY); } static int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); - return mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP, + return mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_EEPROM_OP, GLOBAL2_EEPROM_OP_LOAD); } static int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); - return mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP, + return mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_EEPROM_OP, GLOBAL2_EEPROM_OP_BUSY); } static int mv88e6xxx_read_eeprom_word(struct dsa_switch *ds, int addr) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); int ret; - mutex_lock(&ps->eeprom_mutex); + mutex_lock(&chip->eeprom_mutex); - ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP, + ret = mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_EEPROM_OP, GLOBAL2_EEPROM_OP_READ | (addr & GLOBAL2_EEPROM_OP_ADDR_MASK)); if (ret < 0) @@ -950,18 +948,18 @@ static int mv88e6xxx_read_eeprom_word(struct dsa_switch *ds, int addr) if (ret < 0) goto error; - ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_EEPROM_DATA); + ret = mv88e6xxx_reg_read(chip, REG_GLOBAL2, GLOBAL2_EEPROM_DATA); error: - mutex_unlock(&ps->eeprom_mutex); + mutex_unlock(&chip->eeprom_mutex); return ret; } static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); - if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM)) - return ps->eeprom_len; + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEPROM)) + return chip->eeprom_len; return 0; } @@ -969,12 +967,12 @@ static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds) static int mv88e6xxx_get_eeprom(struct dsa_switch *ds, struct ethtool_eeprom *eeprom, u8 *data) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); int offset; int len; int ret; - if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM)) + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEPROM)) return -EOPNOTSUPP; offset = eeprom->offset; @@ -1035,10 +1033,10 @@ static int mv88e6xxx_get_eeprom(struct dsa_switch *ds, static int mv88e6xxx_eeprom_is_readonly(struct dsa_switch *ds) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); int ret; - ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP); + ret = mv88e6xxx_reg_read(chip, REG_GLOBAL2, GLOBAL2_EEPROM_OP); if (ret < 0) return ret; @@ -1051,16 +1049,16 @@ static int mv88e6xxx_eeprom_is_readonly(struct dsa_switch *ds) static int mv88e6xxx_write_eeprom_word(struct dsa_switch *ds, int addr, u16 data) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); int ret; - mutex_lock(&ps->eeprom_mutex); + mutex_lock(&chip->eeprom_mutex); - ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data); + ret = mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data); if (ret < 0) goto error; - ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP, + ret = mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_EEPROM_OP, GLOBAL2_EEPROM_OP_WRITE | (addr & GLOBAL2_EEPROM_OP_ADDR_MASK)); if (ret < 0) @@ -1068,19 +1066,19 @@ static int mv88e6xxx_write_eeprom_word(struct dsa_switch *ds, int addr, ret = mv88e6xxx_eeprom_busy_wait(ds); error: - mutex_unlock(&ps->eeprom_mutex); + mutex_unlock(&chip->eeprom_mutex); return ret; } static int mv88e6xxx_set_eeprom(struct dsa_switch *ds, struct ethtool_eeprom *eeprom, u8 *data) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); int offset; int ret; int len; - if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM)) + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEPROM)) return -EOPNOTSUPP; if (eeprom->magic != 0xc3ec4951) @@ -1152,67 +1150,67 @@ static int mv88e6xxx_set_eeprom(struct dsa_switch *ds, return 0; } -static int _mv88e6xxx_atu_wait(struct mv88e6xxx_priv_state *ps) +static int _mv88e6xxx_atu_wait(struct mv88e6xxx_chip *chip) { - return _mv88e6xxx_wait(ps, REG_GLOBAL, GLOBAL_ATU_OP, + return _mv88e6xxx_wait(chip, REG_GLOBAL, GLOBAL_ATU_OP, GLOBAL_ATU_OP_BUSY); } -static int mv88e6xxx_mdio_read_indirect(struct mv88e6xxx_priv_state *ps, +static int mv88e6xxx_mdio_read_indirect(struct mv88e6xxx_chip *chip, int addr, int regnum) { int ret; - ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_OP, + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_SMI_OP, GLOBAL2_SMI_OP_22_READ | (addr << 5) | regnum); if (ret < 0) return ret; - ret = mv88e6xxx_mdio_wait(ps); + ret = mv88e6xxx_mdio_wait(chip); if (ret < 0) return ret; - ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_SMI_DATA); + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL2, GLOBAL2_SMI_DATA); return ret; } -static int mv88e6xxx_mdio_write_indirect(struct mv88e6xxx_priv_state *ps, +static int mv88e6xxx_mdio_write_indirect(struct mv88e6xxx_chip *chip, int addr, int regnum, u16 val) { int ret; - ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_DATA, val); + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_SMI_DATA, val); if (ret < 0) return ret; - ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_OP, + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_SMI_OP, GLOBAL2_SMI_OP_22_WRITE | (addr << 5) | regnum); - return mv88e6xxx_mdio_wait(ps); + return mv88e6xxx_mdio_wait(chip); } static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); int reg; - if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEE)) + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEE)) return -EOPNOTSUPP; - mutex_lock(&ps->reg_lock); + mutex_lock(&chip->reg_lock); - reg = mv88e6xxx_mdio_read_indirect(ps, port, 16); + reg = mv88e6xxx_mdio_read_indirect(chip, port, 16); if (reg < 0) goto out; e->eee_enabled = !!(reg & 0x0200); e->tx_lpi_enabled = !!(reg & 0x0100); - reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_STATUS); + reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_STATUS); if (reg < 0) goto out; @@ -1220,23 +1218,23 @@ static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, reg = 0; out: - mutex_unlock(&ps->reg_lock); + mutex_unlock(&chip->reg_lock); return reg; } static int mv88e6xxx_set_eee(struct dsa_switch *ds, int port, struct phy_device *phydev, struct ethtool_eee *e) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); int reg; int ret; - if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEE)) + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEE)) return -EOPNOTSUPP; - mutex_lock(&ps->reg_lock); + mutex_lock(&chip->reg_lock); - ret = mv88e6xxx_mdio_read_indirect(ps, port, 16); + ret = mv88e6xxx_mdio_read_indirect(chip, port, 16); if (ret < 0) goto out; @@ -1246,28 +1244,29 @@ static int mv88e6xxx_set_eee(struct dsa_switch *ds, int port, if (e->tx_lpi_enabled) reg |= 0x0100; - ret = mv88e6xxx_mdio_write_indirect(ps, port, 16, reg); + ret = mv88e6xxx_mdio_write_indirect(chip, port, 16, reg); out: - mutex_unlock(&ps->reg_lock); + mutex_unlock(&chip->reg_lock); return ret; } -static int _mv88e6xxx_atu_cmd(struct mv88e6xxx_priv_state *ps, u16 fid, u16 cmd) +static int _mv88e6xxx_atu_cmd(struct mv88e6xxx_chip *chip, u16 fid, u16 cmd) { int ret; - if (mv88e6xxx_has_fid_reg(ps)) { - ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_FID, fid); + if (mv88e6xxx_has_fid_reg(chip)) { + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_FID, + fid); if (ret < 0) return ret; - } else if (mv88e6xxx_num_databases(ps) == 256) { + } else if (mv88e6xxx_num_databases(chip) == 256) { /* ATU DBNum[7:4] are located in ATU Control 15:12 */ - ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL); + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL); if (ret < 0) return ret; - ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL, + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL, (ret & 0xfff) | ((fid << 8) & 0xf000)); if (ret < 0) @@ -1277,14 +1276,14 @@ static int _mv88e6xxx_atu_cmd(struct mv88e6xxx_priv_state *ps, u16 fid, u16 cmd) cmd |= fid & 0xf; } - ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_OP, cmd); + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_OP, cmd); if (ret < 0) return ret; - return _mv88e6xxx_atu_wait(ps); + return _mv88e6xxx_atu_wait(chip); } -static int _mv88e6xxx_atu_data_write(struct mv88e6xxx_priv_state *ps, +static int _mv88e6xxx_atu_data_write(struct mv88e6xxx_chip *chip, struct mv88e6xxx_atu_entry *entry) { u16 data = entry->state & GLOBAL_ATU_DATA_STATE_MASK; @@ -1304,21 +1303,21 @@ static int _mv88e6xxx_atu_data_write(struct mv88e6xxx_priv_state *ps, data |= (entry->portv_trunkid << shift) & mask; } - return _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_DATA, data); + return _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_DATA, data); } -static int _mv88e6xxx_atu_flush_move(struct mv88e6xxx_priv_state *ps, +static int _mv88e6xxx_atu_flush_move(struct mv88e6xxx_chip *chip, struct mv88e6xxx_atu_entry *entry, bool static_too) { int op; int err; - err = _mv88e6xxx_atu_wait(ps); + err = _mv88e6xxx_atu_wait(chip); if (err) return err; - err = _mv88e6xxx_atu_data_write(ps, entry); + err = _mv88e6xxx_atu_data_write(chip, entry); if (err) return err; @@ -1330,10 +1329,10 @@ static int _mv88e6xxx_atu_flush_move(struct mv88e6xxx_priv_state *ps, GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC; } - return _mv88e6xxx_atu_cmd(ps, entry->fid, op); + return _mv88e6xxx_atu_cmd(chip, entry->fid, op); } -static int _mv88e6xxx_atu_flush(struct mv88e6xxx_priv_state *ps, +static int _mv88e6xxx_atu_flush(struct mv88e6xxx_chip *chip, u16 fid, bool static_too) { struct mv88e6xxx_atu_entry entry = { @@ -1341,10 +1340,10 @@ static int _mv88e6xxx_atu_flush(struct mv88e6xxx_priv_state *ps, .state = 0, /* EntryState bits must be 0 */ }; - return _mv88e6xxx_atu_flush_move(ps, &entry, static_too); + return _mv88e6xxx_atu_flush_move(chip, &entry, static_too); } -static int _mv88e6xxx_atu_move(struct mv88e6xxx_priv_state *ps, u16 fid, +static int _mv88e6xxx_atu_move(struct mv88e6xxx_chip *chip, u16 fid, int from_port, int to_port, bool static_too) { struct mv88e6xxx_atu_entry entry = { @@ -1359,14 +1358,14 @@ static int _mv88e6xxx_atu_move(struct mv88e6xxx_priv_state *ps, u16 fid, entry.portv_trunkid = (to_port & 0x0f) << 4; entry.portv_trunkid |= from_port & 0x0f; - return _mv88e6xxx_atu_flush_move(ps, &entry, static_too); + return _mv88e6xxx_atu_flush_move(chip, &entry, static_too); } -static int _mv88e6xxx_atu_remove(struct mv88e6xxx_priv_state *ps, u16 fid, +static int _mv88e6xxx_atu_remove(struct mv88e6xxx_chip *chip, u16 fid, int port, bool static_too) { /* Destination port 0xF means remove the entries */ - return _mv88e6xxx_atu_move(ps, fid, port, 0x0f, static_too); + return _mv88e6xxx_atu_move(chip, fid, port, 0x0f, static_too); } static const char * const mv88e6xxx_port_state_names[] = { @@ -1376,14 +1375,14 @@ static const char * const mv88e6xxx_port_state_names[] = { [PORT_CONTROL_STATE_FORWARDING] = "Forwarding", }; -static int _mv88e6xxx_port_state(struct mv88e6xxx_priv_state *ps, int port, +static int _mv88e6xxx_port_state(struct mv88e6xxx_chip *chip, int port, u8 state) { - struct dsa_switch *ds = ps->ds; + struct dsa_switch *ds = chip->ds; int reg, ret = 0; u8 oldstate; - reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL); + reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_CONTROL); if (reg < 0) return reg; @@ -1398,13 +1397,13 @@ static int _mv88e6xxx_port_state(struct mv88e6xxx_priv_state *ps, int port, oldstate == PORT_CONTROL_STATE_FORWARDING) && (state == PORT_CONTROL_STATE_DISABLED || state == PORT_CONTROL_STATE_BLOCKING)) { - ret = _mv88e6xxx_atu_remove(ps, 0, port, false); + ret = _mv88e6xxx_atu_remove(chip, 0, port, false); if (ret) return ret; } reg = (reg & ~PORT_CONTROL_STATE_MASK) | state; - ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL, + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL, reg); if (ret) return ret; @@ -1417,12 +1416,11 @@ static int _mv88e6xxx_port_state(struct mv88e6xxx_priv_state *ps, int port, return ret; } -static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_priv_state *ps, - int port) +static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_chip *chip, int port) { - struct net_device *bridge = ps->ports[port].bridge_dev; - const u16 mask = (1 << ps->info->num_ports) - 1; - struct dsa_switch *ds = ps->ds; + struct net_device *bridge = chip->ports[port].bridge_dev; + const u16 mask = (1 << chip->info->num_ports) - 1; + struct dsa_switch *ds = chip->ds; u16 output_ports = 0; int reg; int i; @@ -1431,9 +1429,9 @@ static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_priv_state *ps, if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) { output_ports = mask; } else { - for (i = 0; i < ps->info->num_ports; ++i) { + for (i = 0; i < chip->info->num_ports; ++i) { /* allow sending frames to every group member */ - if (bridge && ps->ports[i].bridge_dev == bridge) + if (bridge && chip->ports[i].bridge_dev == bridge) output_ports |= BIT(i); /* allow sending frames to CPU port and DSA link(s) */ @@ -1445,24 +1443,24 @@ static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_priv_state *ps, /* prevent frames from going back out of the port they came in on */ output_ports &= ~BIT(port); - reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_BASE_VLAN); + reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_BASE_VLAN); if (reg < 0) return reg; reg &= ~mask; reg |= output_ports & mask; - return _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_BASE_VLAN, reg); + return _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_BASE_VLAN, reg); } static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); int stp_state; int err; - if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_PORTSTATE)) + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_PORTSTATE)) return; switch (state) { @@ -1482,9 +1480,9 @@ static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port, break; } - mutex_lock(&ps->reg_lock); - err = _mv88e6xxx_port_state(ps, port, stp_state); - mutex_unlock(&ps->reg_lock); + mutex_lock(&chip->reg_lock); + err = _mv88e6xxx_port_state(chip, port, stp_state); + mutex_unlock(&chip->reg_lock); if (err) netdev_err(ds->ports[port].netdev, @@ -1492,14 +1490,14 @@ static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port, mv88e6xxx_port_state_names[stp_state]); } -static int _mv88e6xxx_port_pvid(struct mv88e6xxx_priv_state *ps, int port, +static int _mv88e6xxx_port_pvid(struct mv88e6xxx_chip *chip, int port, u16 *new, u16 *old) { - struct dsa_switch *ds = ps->ds; + struct dsa_switch *ds = chip->ds; u16 pvid; int ret; - ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_DEFAULT_VLAN); + ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_DEFAULT_VLAN); if (ret < 0) return ret; @@ -1509,7 +1507,7 @@ static int _mv88e6xxx_port_pvid(struct mv88e6xxx_priv_state *ps, int port, ret &= ~PORT_DEFAULT_VLAN_MASK; ret |= *new & PORT_DEFAULT_VLAN_MASK; - ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_DEFAULT_VLAN, ret); if (ret < 0) return ret; @@ -1524,47 +1522,47 @@ static int _mv88e6xxx_port_pvid(struct mv88e6xxx_priv_state *ps, int port, return 0; } -static int _mv88e6xxx_port_pvid_get(struct mv88e6xxx_priv_state *ps, +static int _mv88e6xxx_port_pvid_get(struct mv88e6xxx_chip *chip, int port, u16 *pvid) { - return _mv88e6xxx_port_pvid(ps, port, NULL, pvid); + return _mv88e6xxx_port_pvid(chip, port, NULL, pvid); } -static int _mv88e6xxx_port_pvid_set(struct mv88e6xxx_priv_state *ps, +static int _mv88e6xxx_port_pvid_set(struct mv88e6xxx_chip *chip, int port, u16 pvid) { - return _mv88e6xxx_port_pvid(ps, port, &pvid, NULL); + return _mv88e6xxx_port_pvid(chip, port, &pvid, NULL); } -static int _mv88e6xxx_vtu_wait(struct mv88e6xxx_priv_state *ps) +static int _mv88e6xxx_vtu_wait(struct mv88e6xxx_chip *chip) { - return _mv88e6xxx_wait(ps, REG_GLOBAL, GLOBAL_VTU_OP, + return _mv88e6xxx_wait(chip, REG_GLOBAL, GLOBAL_VTU_OP, GLOBAL_VTU_OP_BUSY); } -static int _mv88e6xxx_vtu_cmd(struct mv88e6xxx_priv_state *ps, u16 op) +static int _mv88e6xxx_vtu_cmd(struct mv88e6xxx_chip *chip, u16 op) { int ret; - ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_OP, op); + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_OP, op); if (ret < 0) return ret; - return _mv88e6xxx_vtu_wait(ps); + return _mv88e6xxx_vtu_wait(chip); } -static int _mv88e6xxx_vtu_stu_flush(struct mv88e6xxx_priv_state *ps) +static int _mv88e6xxx_vtu_stu_flush(struct mv88e6xxx_chip *chip) { int ret; - ret = _mv88e6xxx_vtu_wait(ps); + ret = _mv88e6xxx_vtu_wait(chip); if (ret < 0) return ret; - return _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_FLUSH_ALL); + return _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_FLUSH_ALL); } -static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_priv_state *ps, +static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_chip *chip, struct mv88e6xxx_vtu_stu_entry *entry, unsigned int nibble_offset) { @@ -1573,7 +1571,7 @@ static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_priv_state *ps, int ret; for (i = 0; i < 3; ++i) { - ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_VTU_DATA_0_3 + i); if (ret < 0) return ret; @@ -1581,7 +1579,7 @@ static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_priv_state *ps, regs[i] = ret; } - for (i = 0; i < ps->info->num_ports; ++i) { + for (i = 0; i < chip->info->num_ports; ++i) { unsigned int shift = (i % 4) * 4 + nibble_offset; u16 reg = regs[i / 4]; @@ -1591,19 +1589,19 @@ static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_priv_state *ps, return 0; } -static int mv88e6xxx_vtu_data_read(struct mv88e6xxx_priv_state *ps, +static int mv88e6xxx_vtu_data_read(struct mv88e6xxx_chip *chip, struct mv88e6xxx_vtu_stu_entry *entry) { - return _mv88e6xxx_vtu_stu_data_read(ps, entry, 0); + return _mv88e6xxx_vtu_stu_data_read(chip, entry, 0); } -static int mv88e6xxx_stu_data_read(struct mv88e6xxx_priv_state *ps, +static int mv88e6xxx_stu_data_read(struct mv88e6xxx_chip *chip, struct mv88e6xxx_vtu_stu_entry *entry) { - return _mv88e6xxx_vtu_stu_data_read(ps, entry, 2); + return _mv88e6xxx_vtu_stu_data_read(chip, entry, 2); } -static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_priv_state *ps, +static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_chip *chip, struct mv88e6xxx_vtu_stu_entry *entry, unsigned int nibble_offset) { @@ -1611,7 +1609,7 @@ static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_priv_state *ps, int i; int ret; - for (i = 0; i < ps->info->num_ports; ++i) { + for (i = 0; i < chip->info->num_ports; ++i) { unsigned int shift = (i % 4) * 4 + nibble_offset; u8 data = entry->data[i]; @@ -1619,7 +1617,7 @@ static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_priv_state *ps, } for (i = 0; i < 3; ++i) { - ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_DATA_0_3 + i, regs[i]); if (ret < 0) return ret; @@ -1628,39 +1626,39 @@ static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_priv_state *ps, return 0; } -static int mv88e6xxx_vtu_data_write(struct mv88e6xxx_priv_state *ps, +static int mv88e6xxx_vtu_data_write(struct mv88e6xxx_chip *chip, struct mv88e6xxx_vtu_stu_entry *entry) { - return _mv88e6xxx_vtu_stu_data_write(ps, entry, 0); + return _mv88e6xxx_vtu_stu_data_write(chip, entry, 0); } -static int mv88e6xxx_stu_data_write(struct mv88e6xxx_priv_state *ps, +static int mv88e6xxx_stu_data_write(struct mv88e6xxx_chip *chip, struct mv88e6xxx_vtu_stu_entry *entry) { - return _mv88e6xxx_vtu_stu_data_write(ps, entry, 2); + return _mv88e6xxx_vtu_stu_data_write(chip, entry, 2); } -static int _mv88e6xxx_vtu_vid_write(struct mv88e6xxx_priv_state *ps, u16 vid) +static int _mv88e6xxx_vtu_vid_write(struct mv88e6xxx_chip *chip, u16 vid) { - return _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID, + return _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_VID, vid & GLOBAL_VTU_VID_MASK); } -static int _mv88e6xxx_vtu_getnext(struct mv88e6xxx_priv_state *ps, +static int _mv88e6xxx_vtu_getnext(struct mv88e6xxx_chip *chip, struct mv88e6xxx_vtu_stu_entry *entry) { struct mv88e6xxx_vtu_stu_entry next = { 0 }; int ret; - ret = _mv88e6xxx_vtu_wait(ps); + ret = _mv88e6xxx_vtu_wait(chip); if (ret < 0) return ret; - ret = _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_VTU_GET_NEXT); + ret = _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_VTU_GET_NEXT); if (ret < 0) return ret; - ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_VID); + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_VTU_VID); if (ret < 0) return ret; @@ -1668,22 +1666,22 @@ static int _mv88e6xxx_vtu_getnext(struct mv88e6xxx_priv_state *ps, next.valid = !!(ret & GLOBAL_VTU_VID_VALID); if (next.valid) { - ret = mv88e6xxx_vtu_data_read(ps, &next); + ret = mv88e6xxx_vtu_data_read(chip, &next); if (ret < 0) return ret; - if (mv88e6xxx_has_fid_reg(ps)) { - ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, + if (mv88e6xxx_has_fid_reg(chip)) { + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_VTU_FID); if (ret < 0) return ret; next.fid = ret & GLOBAL_VTU_FID_MASK; - } else if (mv88e6xxx_num_databases(ps) == 256) { + } else if (mv88e6xxx_num_databases(chip) == 256) { /* VTU DBNum[7:4] are located in VTU Operation 11:8, and * VTU DBNum[3:0] are located in VTU Operation 3:0 */ - ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_VTU_OP); if (ret < 0) return ret; @@ -1692,8 +1690,8 @@ static int _mv88e6xxx_vtu_getnext(struct mv88e6xxx_priv_state *ps, next.fid |= ret & 0xf; } - if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_STU)) { - ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_STU)) { + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_VTU_SID); if (ret < 0) return ret; @@ -1710,26 +1708,26 @@ static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port, struct switchdev_obj_port_vlan *vlan, int (*cb)(struct switchdev_obj *obj)) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); struct mv88e6xxx_vtu_stu_entry next; u16 pvid; int err; - if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU)) + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU)) return -EOPNOTSUPP; - mutex_lock(&ps->reg_lock); + mutex_lock(&chip->reg_lock); - err = _mv88e6xxx_port_pvid_get(ps, port, &pvid); + err = _mv88e6xxx_port_pvid_get(chip, port, &pvid); if (err) goto unlock; - err = _mv88e6xxx_vtu_vid_write(ps, GLOBAL_VTU_VID_MASK); + err = _mv88e6xxx_vtu_vid_write(chip, GLOBAL_VTU_VID_MASK); if (err) goto unlock; do { - err = _mv88e6xxx_vtu_getnext(ps, &next); + err = _mv88e6xxx_vtu_getnext(chip, &next); if (err) break; @@ -1756,19 +1754,19 @@ static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port, } while (next.vid < GLOBAL_VTU_VID_MASK); unlock: - mutex_unlock(&ps->reg_lock); + mutex_unlock(&chip->reg_lock); return err; } -static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_priv_state *ps, +static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip, struct mv88e6xxx_vtu_stu_entry *entry) { u16 op = GLOBAL_VTU_OP_VTU_LOAD_PURGE; u16 reg = 0; int ret; - ret = _mv88e6xxx_vtu_wait(ps); + ret = _mv88e6xxx_vtu_wait(chip); if (ret < 0) return ret; @@ -1776,23 +1774,25 @@ static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_priv_state *ps, goto loadpurge; /* Write port member tags */ - ret = mv88e6xxx_vtu_data_write(ps, entry); + ret = mv88e6xxx_vtu_data_write(chip, entry); if (ret < 0) return ret; - if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_STU)) { + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_STU)) { reg = entry->sid & GLOBAL_VTU_SID_MASK; - ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID, reg); + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_SID, + reg); if (ret < 0) return ret; } - if (mv88e6xxx_has_fid_reg(ps)) { + if (mv88e6xxx_has_fid_reg(chip)) { reg = entry->fid & GLOBAL_VTU_FID_MASK; - ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_FID, reg); + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_FID, + reg); if (ret < 0) return ret; - } else if (mv88e6xxx_num_databases(ps) == 256) { + } else if (mv88e6xxx_num_databases(chip) == 256) { /* VTU DBNum[7:4] are located in VTU Operation 11:8, and * VTU DBNum[3:0] are located in VTU Operation 3:0 */ @@ -1803,46 +1803,46 @@ static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_priv_state *ps, reg = GLOBAL_VTU_VID_VALID; loadpurge: reg |= entry->vid & GLOBAL_VTU_VID_MASK; - ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID, reg); + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_VID, reg); if (ret < 0) return ret; - return _mv88e6xxx_vtu_cmd(ps, op); + return _mv88e6xxx_vtu_cmd(chip, op); } -static int _mv88e6xxx_stu_getnext(struct mv88e6xxx_priv_state *ps, u8 sid, +static int _mv88e6xxx_stu_getnext(struct mv88e6xxx_chip *chip, u8 sid, struct mv88e6xxx_vtu_stu_entry *entry) { struct mv88e6xxx_vtu_stu_entry next = { 0 }; int ret; - ret = _mv88e6xxx_vtu_wait(ps); + ret = _mv88e6xxx_vtu_wait(chip); if (ret < 0) return ret; - ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID, + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_SID, sid & GLOBAL_VTU_SID_MASK); if (ret < 0) return ret; - ret = _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_STU_GET_NEXT); + ret = _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_STU_GET_NEXT); if (ret < 0) return ret; - ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_SID); + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_VTU_SID); if (ret < 0) return ret; next.sid = ret & GLOBAL_VTU_SID_MASK; - ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_VID); + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_VTU_VID); if (ret < 0) return ret; next.valid = !!(ret & GLOBAL_VTU_VID_VALID); if (next.valid) { - ret = mv88e6xxx_stu_data_read(ps, &next); + ret = mv88e6xxx_stu_data_read(chip, &next); if (ret < 0) return ret; } @@ -1851,13 +1851,13 @@ static int _mv88e6xxx_stu_getnext(struct mv88e6xxx_priv_state *ps, u8 sid, return 0; } -static int _mv88e6xxx_stu_loadpurge(struct mv88e6xxx_priv_state *ps, +static int _mv88e6xxx_stu_loadpurge(struct mv88e6xxx_chip *chip, struct mv88e6xxx_vtu_stu_entry *entry) { u16 reg = 0; int ret; - ret = _mv88e6xxx_vtu_wait(ps); + ret = _mv88e6xxx_vtu_wait(chip); if (ret < 0) return ret; @@ -1865,41 +1865,41 @@ static int _mv88e6xxx_stu_loadpurge(struct mv88e6xxx_priv_state *ps, goto loadpurge; /* Write port states */ - ret = mv88e6xxx_stu_data_write(ps, entry); + ret = mv88e6xxx_stu_data_write(chip, entry); if (ret < 0) return ret; reg = GLOBAL_VTU_VID_VALID; loadpurge: - ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID, reg); + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_VID, reg); if (ret < 0) return ret; reg = entry->sid & GLOBAL_VTU_SID_MASK; - ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID, reg); + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_SID, reg); if (ret < 0) return ret; - return _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_STU_LOAD_PURGE); + return _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_STU_LOAD_PURGE); } -static int _mv88e6xxx_port_fid(struct mv88e6xxx_priv_state *ps, int port, +static int _mv88e6xxx_port_fid(struct mv88e6xxx_chip *chip, int port, u16 *new, u16 *old) { - struct dsa_switch *ds = ps->ds; + struct dsa_switch *ds = chip->ds; u16 upper_mask; u16 fid; int ret; - if (mv88e6xxx_num_databases(ps) == 4096) + if (mv88e6xxx_num_databases(chip) == 4096) upper_mask = 0xff; - else if (mv88e6xxx_num_databases(ps) == 256) + else if (mv88e6xxx_num_databases(chip) == 256) upper_mask = 0xf; else return -EOPNOTSUPP; /* Port's default FID bits 3:0 are located in reg 0x06, offset 12 */ - ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_BASE_VLAN); + ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_BASE_VLAN); if (ret < 0) return ret; @@ -1909,14 +1909,14 @@ static int _mv88e6xxx_port_fid(struct mv88e6xxx_priv_state *ps, int port, ret &= ~PORT_BASE_VLAN_FID_3_0_MASK; ret |= (*new << 12) & PORT_BASE_VLAN_FID_3_0_MASK; - ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_BASE_VLAN, + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_BASE_VLAN, ret); if (ret < 0) return ret; } /* Port's default FID bits 11:4 are located in reg 0x05, offset 0 */ - ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL_1); + ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_CONTROL_1); if (ret < 0) return ret; @@ -1926,7 +1926,7 @@ static int _mv88e6xxx_port_fid(struct mv88e6xxx_priv_state *ps, int port, ret &= ~upper_mask; ret |= (*new >> 4) & upper_mask; - ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_1, + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL_1, ret); if (ret < 0) return ret; @@ -1941,19 +1941,19 @@ static int _mv88e6xxx_port_fid(struct mv88e6xxx_priv_state *ps, int port, return 0; } -static int _mv88e6xxx_port_fid_get(struct mv88e6xxx_priv_state *ps, +static int _mv88e6xxx_port_fid_get(struct mv88e6xxx_chip *chip, int port, u16 *fid) { - return _mv88e6xxx_port_fid(ps, port, NULL, fid); + return _mv88e6xxx_port_fid(chip, port, NULL, fid); } -static int _mv88e6xxx_port_fid_set(struct mv88e6xxx_priv_state *ps, +static int _mv88e6xxx_port_fid_set(struct mv88e6xxx_chip *chip, int port, u16 fid) { - return _mv88e6xxx_port_fid(ps, port, &fid, NULL); + return _mv88e6xxx_port_fid(chip, port, &fid, NULL); } -static int _mv88e6xxx_fid_new(struct mv88e6xxx_priv_state *ps, u16 *fid) +static int _mv88e6xxx_fid_new(struct mv88e6xxx_chip *chip, u16 *fid) { DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID); struct mv88e6xxx_vtu_stu_entry vlan; @@ -1962,8 +1962,8 @@ static int _mv88e6xxx_fid_new(struct mv88e6xxx_priv_state *ps, u16 *fid) bitmap_zero(fid_bitmap, MV88E6XXX_N_FID); /* Set every FID bit used by the (un)bridged ports */ - for (i = 0; i < ps->info->num_ports; ++i) { - err = _mv88e6xxx_port_fid_get(ps, i, fid); + for (i = 0; i < chip->info->num_ports; ++i) { + err = _mv88e6xxx_port_fid_get(chip, i, fid); if (err) return err; @@ -1971,12 +1971,12 @@ static int _mv88e6xxx_fid_new(struct mv88e6xxx_priv_state *ps, u16 *fid) } /* Set every FID bit used by the VLAN entries */ - err = _mv88e6xxx_vtu_vid_write(ps, GLOBAL_VTU_VID_MASK); + err = _mv88e6xxx_vtu_vid_write(chip, GLOBAL_VTU_VID_MASK); if (err) return err; do { - err = _mv88e6xxx_vtu_getnext(ps, &vlan); + err = _mv88e6xxx_vtu_getnext(chip, &vlan); if (err) return err; @@ -1990,35 +1990,35 @@ static int _mv88e6xxx_fid_new(struct mv88e6xxx_priv_state *ps, u16 *fid) * databases are not needed. Return the next positive available. */ *fid = find_next_zero_bit(fid_bitmap, MV88E6XXX_N_FID, 1); - if (unlikely(*fid >= mv88e6xxx_num_databases(ps))) + if (unlikely(*fid >= mv88e6xxx_num_databases(chip))) return -ENOSPC; /* Clear the database */ - return _mv88e6xxx_atu_flush(ps, *fid, true); + return _mv88e6xxx_atu_flush(chip, *fid, true); } -static int _mv88e6xxx_vtu_new(struct mv88e6xxx_priv_state *ps, u16 vid, +static int _mv88e6xxx_vtu_new(struct mv88e6xxx_chip *chip, u16 vid, struct mv88e6xxx_vtu_stu_entry *entry) { - struct dsa_switch *ds = ps->ds; + struct dsa_switch *ds = chip->ds; struct mv88e6xxx_vtu_stu_entry vlan = { .valid = true, .vid = vid, }; int i, err; - err = _mv88e6xxx_fid_new(ps, &vlan.fid); + err = _mv88e6xxx_fid_new(chip, &vlan.fid); if (err) return err; /* exclude all ports except the CPU and DSA ports */ - for (i = 0; i < ps->info->num_ports; ++i) + for (i = 0; i < chip->info->num_ports; ++i) vlan.data[i] = dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i) ? GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED : GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER; - if (mv88e6xxx_6097_family(ps) || mv88e6xxx_6165_family(ps) || - mv88e6xxx_6351_family(ps) || mv88e6xxx_6352_family(ps)) { + if (mv88e6xxx_6097_family(chip) || mv88e6xxx_6165_family(chip) || + mv88e6xxx_6351_family(chip) || mv88e6xxx_6352_family(chip)) { struct mv88e6xxx_vtu_stu_entry vstp; /* Adding a VTU entry requires a valid STU entry. As VSTP is not @@ -2026,7 +2026,7 @@ static int _mv88e6xxx_vtu_new(struct mv88e6xxx_priv_state *ps, u16 vid, * entries. Thus, validate the SID 0. */ vlan.sid = 0; - err = _mv88e6xxx_stu_getnext(ps, GLOBAL_VTU_SID_MASK, &vstp); + err = _mv88e6xxx_stu_getnext(chip, GLOBAL_VTU_SID_MASK, &vstp); if (err) return err; @@ -2035,7 +2035,7 @@ static int _mv88e6xxx_vtu_new(struct mv88e6xxx_priv_state *ps, u16 vid, vstp.valid = true; vstp.sid = vlan.sid; - err = _mv88e6xxx_stu_loadpurge(ps, &vstp); + err = _mv88e6xxx_stu_loadpurge(chip, &vstp); if (err) return err; } @@ -2045,7 +2045,7 @@ static int _mv88e6xxx_vtu_new(struct mv88e6xxx_priv_state *ps, u16 vid, return 0; } -static int _mv88e6xxx_vtu_get(struct mv88e6xxx_priv_state *ps, u16 vid, +static int _mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid, struct mv88e6xxx_vtu_stu_entry *entry, bool creat) { int err; @@ -2053,11 +2053,11 @@ static int _mv88e6xxx_vtu_get(struct mv88e6xxx_priv_state *ps, u16 vid, if (!vid) return -EINVAL; - err = _mv88e6xxx_vtu_vid_write(ps, vid - 1); + err = _mv88e6xxx_vtu_vid_write(chip, vid - 1); if (err) return err; - err = _mv88e6xxx_vtu_getnext(ps, entry); + err = _mv88e6xxx_vtu_getnext(chip, entry); if (err) return err; @@ -2068,7 +2068,7 @@ static int _mv88e6xxx_vtu_get(struct mv88e6xxx_priv_state *ps, u16 vid, * -EOPNOTSUPP to inform bridge about an eventual software VLAN. */ - err = _mv88e6xxx_vtu_new(ps, vid, entry); + err = _mv88e6xxx_vtu_new(chip, vid, entry); } return err; @@ -2077,21 +2077,21 @@ static int _mv88e6xxx_vtu_get(struct mv88e6xxx_priv_state *ps, u16 vid, static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, u16 vid_begin, u16 vid_end) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); struct mv88e6xxx_vtu_stu_entry vlan; int i, err; if (!vid_begin) return -EOPNOTSUPP; - mutex_lock(&ps->reg_lock); + mutex_lock(&chip->reg_lock); - err = _mv88e6xxx_vtu_vid_write(ps, vid_begin - 1); + err = _mv88e6xxx_vtu_vid_write(chip, vid_begin - 1); if (err) goto unlock; do { - err = _mv88e6xxx_vtu_getnext(ps, &vlan); + err = _mv88e6xxx_vtu_getnext(chip, &vlan); if (err) goto unlock; @@ -2101,7 +2101,7 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, if (vlan.vid > vid_end) break; - for (i = 0; i < ps->info->num_ports; ++i) { + for (i = 0; i < chip->info->num_ports; ++i) { if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i)) continue; @@ -2109,21 +2109,21 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) continue; - if (ps->ports[i].bridge_dev == - ps->ports[port].bridge_dev) + if (chip->ports[i].bridge_dev == + chip->ports[port].bridge_dev) break; /* same bridge, check next VLAN */ netdev_warn(ds->ports[port].netdev, "hardware VLAN %d already used by %s\n", vlan.vid, - netdev_name(ps->ports[i].bridge_dev)); + netdev_name(chip->ports[i].bridge_dev)); err = -EOPNOTSUPP; goto unlock; } } while (vlan.vid < vid_end); unlock: - mutex_unlock(&ps->reg_lock); + mutex_unlock(&chip->reg_lock); return err; } @@ -2138,17 +2138,17 @@ static const char * const mv88e6xxx_port_8021q_mode_names[] = { static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); u16 old, new = vlan_filtering ? PORT_CONTROL_2_8021Q_SECURE : PORT_CONTROL_2_8021Q_DISABLED; int ret; - if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU)) + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU)) return -EOPNOTSUPP; - mutex_lock(&ps->reg_lock); + mutex_lock(&chip->reg_lock); - ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL_2); + ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_CONTROL_2); if (ret < 0) goto unlock; @@ -2158,7 +2158,7 @@ static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port, ret &= ~PORT_CONTROL_2_8021Q_MASK; ret |= new & PORT_CONTROL_2_8021Q_MASK; - ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_2, + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL_2, ret); if (ret < 0) goto unlock; @@ -2170,7 +2170,7 @@ static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port, ret = 0; unlock: - mutex_unlock(&ps->reg_lock); + mutex_unlock(&chip->reg_lock); return ret; } @@ -2180,10 +2180,10 @@ mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan, struct switchdev_trans *trans) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); int err; - if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU)) + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU)) return -EOPNOTSUPP; /* If the requested port doesn't belong to the same bridge as the VLAN @@ -2200,13 +2200,13 @@ mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port, return 0; } -static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_priv_state *ps, int port, +static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_chip *chip, int port, u16 vid, bool untagged) { struct mv88e6xxx_vtu_stu_entry vlan; int err; - err = _mv88e6xxx_vtu_get(ps, vid, &vlan, true); + err = _mv88e6xxx_vtu_get(chip, vid, &vlan, true); if (err) return err; @@ -2214,44 +2214,44 @@ static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_priv_state *ps, int port, GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED : GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED; - return _mv88e6xxx_vtu_loadpurge(ps, &vlan); + return _mv88e6xxx_vtu_loadpurge(chip, &vlan); } static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan, struct switchdev_trans *trans) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; u16 vid; - if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU)) + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU)) return; - mutex_lock(&ps->reg_lock); + mutex_lock(&chip->reg_lock); for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) - if (_mv88e6xxx_port_vlan_add(ps, port, vid, untagged)) + if (_mv88e6xxx_port_vlan_add(chip, port, vid, untagged)) netdev_err(ds->ports[port].netdev, "failed to add VLAN %d%c\n", vid, untagged ? 'u' : 't'); - if (pvid && _mv88e6xxx_port_pvid_set(ps, port, vlan->vid_end)) + if (pvid && _mv88e6xxx_port_pvid_set(chip, port, vlan->vid_end)) netdev_err(ds->ports[port].netdev, "failed to set PVID %d\n", vlan->vid_end); - mutex_unlock(&ps->reg_lock); + mutex_unlock(&chip->reg_lock); } -static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_priv_state *ps, +static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_chip *chip, int port, u16 vid) { - struct dsa_switch *ds = ps->ds; + struct dsa_switch *ds = chip->ds; struct mv88e6xxx_vtu_stu_entry vlan; int i, err; - err = _mv88e6xxx_vtu_get(ps, vid, &vlan, false); + err = _mv88e6xxx_vtu_get(chip, vid, &vlan, false); if (err) return err; @@ -2263,7 +2263,7 @@ static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_priv_state *ps, /* keep the VLAN unless all ports are excluded */ vlan.valid = false; - for (i = 0; i < ps->info->num_ports; ++i) { + for (i = 0; i < chip->info->num_ports; ++i) { if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)) continue; @@ -2273,55 +2273,55 @@ static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_priv_state *ps, } } - err = _mv88e6xxx_vtu_loadpurge(ps, &vlan); + err = _mv88e6xxx_vtu_loadpurge(chip, &vlan); if (err) return err; - return _mv88e6xxx_atu_remove(ps, vlan.fid, port, false); + return _mv88e6xxx_atu_remove(chip, vlan.fid, port, false); } static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); u16 pvid, vid; int err = 0; - if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU)) + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU)) return -EOPNOTSUPP; - mutex_lock(&ps->reg_lock); + mutex_lock(&chip->reg_lock); - err = _mv88e6xxx_port_pvid_get(ps, port, &pvid); + err = _mv88e6xxx_port_pvid_get(chip, port, &pvid); if (err) goto unlock; for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { - err = _mv88e6xxx_port_vlan_del(ps, port, vid); + err = _mv88e6xxx_port_vlan_del(chip, port, vid); if (err) goto unlock; if (vid == pvid) { - err = _mv88e6xxx_port_pvid_set(ps, port, 0); + err = _mv88e6xxx_port_pvid_set(chip, port, 0); if (err) goto unlock; } } unlock: - mutex_unlock(&ps->reg_lock); + mutex_unlock(&chip->reg_lock); return err; } -static int _mv88e6xxx_atu_mac_write(struct mv88e6xxx_priv_state *ps, +static int _mv88e6xxx_atu_mac_write(struct mv88e6xxx_chip *chip, const unsigned char *addr) { int i, ret; for (i = 0; i < 3; i++) { ret = _mv88e6xxx_reg_write( - ps, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i, + chip, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i, (addr[i * 2] << 8) | addr[i * 2 + 1]); if (ret < 0) return ret; @@ -2330,13 +2330,13 @@ static int _mv88e6xxx_atu_mac_write(struct mv88e6xxx_priv_state *ps, return 0; } -static int _mv88e6xxx_atu_mac_read(struct mv88e6xxx_priv_state *ps, +static int _mv88e6xxx_atu_mac_read(struct mv88e6xxx_chip *chip, unsigned char *addr) { int i, ret; for (i = 0; i < 3; i++) { - ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i); if (ret < 0) return ret; @@ -2347,27 +2347,27 @@ static int _mv88e6xxx_atu_mac_read(struct mv88e6xxx_priv_state *ps, return 0; } -static int _mv88e6xxx_atu_load(struct mv88e6xxx_priv_state *ps, +static int _mv88e6xxx_atu_load(struct mv88e6xxx_chip *chip, struct mv88e6xxx_atu_entry *entry) { int ret; - ret = _mv88e6xxx_atu_wait(ps); + ret = _mv88e6xxx_atu_wait(chip); if (ret < 0) return ret; - ret = _mv88e6xxx_atu_mac_write(ps, entry->mac); + ret = _mv88e6xxx_atu_mac_write(chip, entry->mac); if (ret < 0) return ret; - ret = _mv88e6xxx_atu_data_write(ps, entry); + ret = _mv88e6xxx_atu_data_write(chip, entry); if (ret < 0) return ret; - return _mv88e6xxx_atu_cmd(ps, entry->fid, GLOBAL_ATU_OP_LOAD_DB); + return _mv88e6xxx_atu_cmd(chip, entry->fid, GLOBAL_ATU_OP_LOAD_DB); } -static int _mv88e6xxx_port_fdb_load(struct mv88e6xxx_priv_state *ps, int port, +static int _mv88e6xxx_port_fdb_load(struct mv88e6xxx_chip *chip, int port, const unsigned char *addr, u16 vid, u8 state) { @@ -2377,9 +2377,9 @@ static int _mv88e6xxx_port_fdb_load(struct mv88e6xxx_priv_state *ps, int port, /* Null VLAN ID corresponds to the port private database */ if (vid == 0) - err = _mv88e6xxx_port_fid_get(ps, port, &vlan.fid); + err = _mv88e6xxx_port_fid_get(chip, port, &vlan.fid); else - err = _mv88e6xxx_vtu_get(ps, vid, &vlan, false); + err = _mv88e6xxx_vtu_get(chip, vid, &vlan, false); if (err) return err; @@ -2391,16 +2391,16 @@ static int _mv88e6xxx_port_fdb_load(struct mv88e6xxx_priv_state *ps, int port, entry.portv_trunkid = BIT(port); } - return _mv88e6xxx_atu_load(ps, &entry); + return _mv88e6xxx_atu_load(chip, &entry); } static int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port, const struct switchdev_obj_port_fdb *fdb, struct switchdev_trans *trans) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); - if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU)) + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_ATU)) return -EOPNOTSUPP; /* We don't need any dynamic resource from the kernel (yet), @@ -2416,36 +2416,36 @@ static void mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, int state = is_multicast_ether_addr(fdb->addr) ? GLOBAL_ATU_DATA_STATE_MC_STATIC : GLOBAL_ATU_DATA_STATE_UC_STATIC; - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); - if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU)) + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_ATU)) return; - mutex_lock(&ps->reg_lock); - if (_mv88e6xxx_port_fdb_load(ps, port, fdb->addr, fdb->vid, state)) + mutex_lock(&chip->reg_lock); + if (_mv88e6xxx_port_fdb_load(chip, port, fdb->addr, fdb->vid, state)) netdev_err(ds->ports[port].netdev, "failed to load MAC address\n"); - mutex_unlock(&ps->reg_lock); + mutex_unlock(&chip->reg_lock); } static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port, const struct switchdev_obj_port_fdb *fdb) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); int ret; - if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU)) + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_ATU)) return -EOPNOTSUPP; - mutex_lock(&ps->reg_lock); - ret = _mv88e6xxx_port_fdb_load(ps, port, fdb->addr, fdb->vid, + mutex_lock(&chip->reg_lock); + ret = _mv88e6xxx_port_fdb_load(chip, port, fdb->addr, fdb->vid, GLOBAL_ATU_DATA_STATE_UNUSED); - mutex_unlock(&ps->reg_lock); + mutex_unlock(&chip->reg_lock); return ret; } -static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_priv_state *ps, u16 fid, +static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid, struct mv88e6xxx_atu_entry *entry) { struct mv88e6xxx_atu_entry next = { 0 }; @@ -2453,19 +2453,19 @@ static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_priv_state *ps, u16 fid, next.fid = fid; - ret = _mv88e6xxx_atu_wait(ps); + ret = _mv88e6xxx_atu_wait(chip); if (ret < 0) return ret; - ret = _mv88e6xxx_atu_cmd(ps, fid, GLOBAL_ATU_OP_GET_NEXT_DB); + ret = _mv88e6xxx_atu_cmd(chip, fid, GLOBAL_ATU_OP_GET_NEXT_DB); if (ret < 0) return ret; - ret = _mv88e6xxx_atu_mac_read(ps, next.mac); + ret = _mv88e6xxx_atu_mac_read(chip, next.mac); if (ret < 0) return ret; - ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_ATU_DATA); + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_ATU_DATA); if (ret < 0) return ret; @@ -2490,7 +2490,7 @@ static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_priv_state *ps, u16 fid, return 0; } -static int _mv88e6xxx_port_fdb_dump_one(struct mv88e6xxx_priv_state *ps, +static int _mv88e6xxx_port_fdb_dump_one(struct mv88e6xxx_chip *chip, u16 fid, u16 vid, int port, struct switchdev_obj_port_fdb *fdb, int (*cb)(struct switchdev_obj *obj)) @@ -2500,12 +2500,12 @@ static int _mv88e6xxx_port_fdb_dump_one(struct mv88e6xxx_priv_state *ps, }; int err; - err = _mv88e6xxx_atu_mac_write(ps, addr.mac); + err = _mv88e6xxx_atu_mac_write(chip, addr.mac); if (err) return err; do { - err = _mv88e6xxx_atu_getnext(ps, fid, &addr); + err = _mv88e6xxx_atu_getnext(chip, fid, &addr); if (err) break; @@ -2535,48 +2535,48 @@ static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port, struct switchdev_obj_port_fdb *fdb, int (*cb)(struct switchdev_obj *obj)) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); struct mv88e6xxx_vtu_stu_entry vlan = { .vid = GLOBAL_VTU_VID_MASK, /* all ones */ }; u16 fid; int err; - if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU)) + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_ATU)) return -EOPNOTSUPP; - mutex_lock(&ps->reg_lock); + mutex_lock(&chip->reg_lock); /* Dump port's default Filtering Information Database (VLAN ID 0) */ - err = _mv88e6xxx_port_fid_get(ps, port, &fid); + err = _mv88e6xxx_port_fid_get(chip, port, &fid); if (err) goto unlock; - err = _mv88e6xxx_port_fdb_dump_one(ps, fid, 0, port, fdb, cb); + err = _mv88e6xxx_port_fdb_dump_one(chip, fid, 0, port, fdb, cb); if (err) goto unlock; /* Dump VLANs' Filtering Information Databases */ - err = _mv88e6xxx_vtu_vid_write(ps, vlan.vid); + err = _mv88e6xxx_vtu_vid_write(chip, vlan.vid); if (err) goto unlock; do { - err = _mv88e6xxx_vtu_getnext(ps, &vlan); + err = _mv88e6xxx_vtu_getnext(chip, &vlan); if (err) break; if (!vlan.valid) break; - err = _mv88e6xxx_port_fdb_dump_one(ps, vlan.fid, vlan.vid, port, - fdb, cb); + err = _mv88e6xxx_port_fdb_dump_one(chip, vlan.fid, vlan.vid, + port, fdb, cb); if (err) break; } while (vlan.vid < GLOBAL_VTU_VID_MASK); unlock: - mutex_unlock(&ps->reg_lock); + mutex_unlock(&chip->reg_lock); return err; } @@ -2584,101 +2584,101 @@ unlock: static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *bridge) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); int i, err = 0; - if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VLANTABLE)) + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VLANTABLE)) return -EOPNOTSUPP; - mutex_lock(&ps->reg_lock); + mutex_lock(&chip->reg_lock); /* Assign the bridge and remap each port's VLANTable */ - ps->ports[port].bridge_dev = bridge; + chip->ports[port].bridge_dev = bridge; - for (i = 0; i < ps->info->num_ports; ++i) { - if (ps->ports[i].bridge_dev == bridge) { - err = _mv88e6xxx_port_based_vlan_map(ps, i); + for (i = 0; i < chip->info->num_ports; ++i) { + if (chip->ports[i].bridge_dev == bridge) { + err = _mv88e6xxx_port_based_vlan_map(chip, i); if (err) break; } } - mutex_unlock(&ps->reg_lock); + mutex_unlock(&chip->reg_lock); return err; } static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - struct net_device *bridge = ps->ports[port].bridge_dev; + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + struct net_device *bridge = chip->ports[port].bridge_dev; int i; - if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VLANTABLE)) + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VLANTABLE)) return; - mutex_lock(&ps->reg_lock); + mutex_lock(&chip->reg_lock); /* Unassign the bridge and remap each port's VLANTable */ - ps->ports[port].bridge_dev = NULL; + chip->ports[port].bridge_dev = NULL; - for (i = 0; i < ps->info->num_ports; ++i) - if (i == port || ps->ports[i].bridge_dev == bridge) - if (_mv88e6xxx_port_based_vlan_map(ps, i)) + for (i = 0; i < chip->info->num_ports; ++i) + if (i == port || chip->ports[i].bridge_dev == bridge) + if (_mv88e6xxx_port_based_vlan_map(chip, i)) netdev_warn(ds->ports[i].netdev, "failed to remap\n"); - mutex_unlock(&ps->reg_lock); + mutex_unlock(&chip->reg_lock); } -static int _mv88e6xxx_mdio_page_write(struct mv88e6xxx_priv_state *ps, +static int _mv88e6xxx_mdio_page_write(struct mv88e6xxx_chip *chip, int port, int page, int reg, int val) { int ret; - ret = mv88e6xxx_mdio_write_indirect(ps, port, 0x16, page); + ret = mv88e6xxx_mdio_write_indirect(chip, port, 0x16, page); if (ret < 0) goto restore_page_0; - ret = mv88e6xxx_mdio_write_indirect(ps, port, reg, val); + ret = mv88e6xxx_mdio_write_indirect(chip, port, reg, val); restore_page_0: - mv88e6xxx_mdio_write_indirect(ps, port, 0x16, 0x0); + mv88e6xxx_mdio_write_indirect(chip, port, 0x16, 0x0); return ret; } -static int _mv88e6xxx_mdio_page_read(struct mv88e6xxx_priv_state *ps, +static int _mv88e6xxx_mdio_page_read(struct mv88e6xxx_chip *chip, int port, int page, int reg) { int ret; - ret = mv88e6xxx_mdio_write_indirect(ps, port, 0x16, page); + ret = mv88e6xxx_mdio_write_indirect(chip, port, 0x16, page); if (ret < 0) goto restore_page_0; - ret = mv88e6xxx_mdio_read_indirect(ps, port, reg); + ret = mv88e6xxx_mdio_read_indirect(chip, port, reg); restore_page_0: - mv88e6xxx_mdio_write_indirect(ps, port, 0x16, 0x0); + mv88e6xxx_mdio_write_indirect(chip, port, 0x16, 0x0); return ret; } -static int mv88e6xxx_switch_reset(struct mv88e6xxx_priv_state *ps) +static int mv88e6xxx_switch_reset(struct mv88e6xxx_chip *chip) { - bool ppu_active = mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU_ACTIVE); + bool ppu_active = mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU_ACTIVE); u16 is_reset = (ppu_active ? 0x8800 : 0xc800); - struct gpio_desc *gpiod = ps->reset; + struct gpio_desc *gpiod = chip->reset; unsigned long timeout; int ret; int i; /* Set all ports to the disabled state. */ - for (i = 0; i < ps->info->num_ports; i++) { - ret = _mv88e6xxx_reg_read(ps, REG_PORT(i), PORT_CONTROL); + for (i = 0; i < chip->info->num_ports; i++) { + ret = _mv88e6xxx_reg_read(chip, REG_PORT(i), PORT_CONTROL); if (ret < 0) return ret; - ret = _mv88e6xxx_reg_write(ps, REG_PORT(i), PORT_CONTROL, + ret = _mv88e6xxx_reg_write(chip, REG_PORT(i), PORT_CONTROL, ret & 0xfffc); if (ret) return ret; @@ -2700,16 +2700,16 @@ static int mv88e6xxx_switch_reset(struct mv88e6xxx_priv_state *ps) * through global registers 0x18 and 0x19. */ if (ppu_active) - ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, 0x04, 0xc000); + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, 0x04, 0xc000); else - ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, 0x04, 0xc400); + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, 0x04, 0xc400); if (ret) return ret; /* Wait up to one second for reset to complete. */ timeout = jiffies + 1 * HZ; while (time_before(jiffies, timeout)) { - ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, 0x00); + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, 0x00); if (ret < 0) return ret; @@ -2725,18 +2725,18 @@ static int mv88e6xxx_switch_reset(struct mv88e6xxx_priv_state *ps) return ret; } -static int mv88e6xxx_power_on_serdes(struct mv88e6xxx_priv_state *ps) +static int mv88e6xxx_power_on_serdes(struct mv88e6xxx_chip *chip) { int ret; - ret = _mv88e6xxx_mdio_page_read(ps, REG_FIBER_SERDES, + ret = _mv88e6xxx_mdio_page_read(chip, REG_FIBER_SERDES, PAGE_FIBER_SERDES, MII_BMCR); if (ret < 0) return ret; if (ret & BMCR_PDOWN) { ret &= ~BMCR_PDOWN; - ret = _mv88e6xxx_mdio_page_write(ps, REG_FIBER_SERDES, + ret = _mv88e6xxx_mdio_page_write(chip, REG_FIBER_SERDES, PAGE_FIBER_SERDES, MII_BMCR, ret); } @@ -2744,30 +2744,30 @@ static int mv88e6xxx_power_on_serdes(struct mv88e6xxx_priv_state *ps) return ret; } -static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port) +static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) { - struct dsa_switch *ds = ps->ds; + struct dsa_switch *ds = chip->ds; int ret; u16 reg; - if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || - mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || - mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) || - mv88e6xxx_6065_family(ps) || mv88e6xxx_6320_family(ps)) { + if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) || + mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) || + mv88e6xxx_6185_family(chip) || mv88e6xxx_6095_family(chip) || + mv88e6xxx_6065_family(chip) || mv88e6xxx_6320_family(chip)) { /* MAC Forcing register: don't force link, speed, * duplex or flow control state to any particular * values on physical ports, but force the CPU port * and all DSA ports to their maximum bandwidth and * full duplex. */ - reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_PCS_CTRL); + reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_PCS_CTRL); if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) { reg &= ~PORT_PCS_CTRL_UNFORCED; reg |= PORT_PCS_CTRL_FORCE_LINK | PORT_PCS_CTRL_LINK_UP | PORT_PCS_CTRL_DUPLEX_FULL | PORT_PCS_CTRL_FORCE_DUPLEX; - if (mv88e6xxx_6065_family(ps)) + if (mv88e6xxx_6065_family(chip)) reg |= PORT_PCS_CTRL_100; else reg |= PORT_PCS_CTRL_1000; @@ -2775,7 +2775,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port) reg |= PORT_PCS_CTRL_UNFORCED; } - ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_PCS_CTRL, reg); if (ret) return ret; @@ -2796,37 +2796,46 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port) * forwarding of unknown unicasts and multicasts. */ reg = 0; - if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || - mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || - mv88e6xxx_6095_family(ps) || mv88e6xxx_6065_family(ps) || - mv88e6xxx_6185_family(ps) || mv88e6xxx_6320_family(ps)) + if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) || + mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) || + mv88e6xxx_6095_family(chip) || mv88e6xxx_6065_family(chip) || + mv88e6xxx_6185_family(chip) || mv88e6xxx_6320_family(chip)) reg = PORT_CONTROL_IGMP_MLD_SNOOP | PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP | PORT_CONTROL_STATE_FORWARDING; if (dsa_is_cpu_port(ds, port)) { - if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps)) + if (mv88e6xxx_6095_family(chip) || mv88e6xxx_6185_family(chip)) reg |= PORT_CONTROL_DSA_TAG; - if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || - mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || - mv88e6xxx_6320_family(ps)) { + if (mv88e6xxx_6352_family(chip) || + mv88e6xxx_6351_family(chip) || + mv88e6xxx_6165_family(chip) || + mv88e6xxx_6097_family(chip) || + mv88e6xxx_6320_family(chip)) { reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA | PORT_CONTROL_FORWARD_UNKNOWN | PORT_CONTROL_FORWARD_UNKNOWN_MC; } - if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || - mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || - mv88e6xxx_6095_family(ps) || mv88e6xxx_6065_family(ps) || - mv88e6xxx_6185_family(ps) || mv88e6xxx_6320_family(ps)) { + if (mv88e6xxx_6352_family(chip) || + mv88e6xxx_6351_family(chip) || + mv88e6xxx_6165_family(chip) || + mv88e6xxx_6097_family(chip) || + mv88e6xxx_6095_family(chip) || + mv88e6xxx_6065_family(chip) || + mv88e6xxx_6185_family(chip) || + mv88e6xxx_6320_family(chip)) { reg |= PORT_CONTROL_EGRESS_ADD_TAG; } } if (dsa_is_dsa_port(ds, port)) { - if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps)) + if (mv88e6xxx_6095_family(chip) || + mv88e6xxx_6185_family(chip)) reg |= PORT_CONTROL_DSA_TAG; - if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || - mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || - mv88e6xxx_6320_family(ps)) { + if (mv88e6xxx_6352_family(chip) || + mv88e6xxx_6351_family(chip) || + mv88e6xxx_6165_family(chip) || + mv88e6xxx_6097_family(chip) || + mv88e6xxx_6320_family(chip)) { reg |= PORT_CONTROL_FRAME_MODE_DSA; } @@ -2835,7 +2844,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port) PORT_CONTROL_FORWARD_UNKNOWN_MC; } if (reg) { - ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL, reg); if (ret) return ret; @@ -2844,15 +2853,15 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port) /* If this port is connected to a SerDes, make sure the SerDes is not * powered down. */ - if (mv88e6xxx_6352_family(ps)) { - ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_STATUS); + if (mv88e6xxx_6352_family(chip)) { + ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_STATUS); if (ret < 0) return ret; ret &= PORT_STATUS_CMODE_MASK; if ((ret == PORT_STATUS_CMODE_100BASE_X) || (ret == PORT_STATUS_CMODE_1000BASE_X) || (ret == PORT_STATUS_CMODE_SGMII)) { - ret = mv88e6xxx_power_on_serdes(ps); + ret = mv88e6xxx_power_on_serdes(chip); if (ret < 0) return ret; } @@ -2865,17 +2874,17 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port) * copy of all transmitted/received frames on this port to the CPU. */ reg = 0; - if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || - mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || - mv88e6xxx_6095_family(ps) || mv88e6xxx_6320_family(ps) || - mv88e6xxx_6185_family(ps)) + if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) || + mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) || + mv88e6xxx_6095_family(chip) || mv88e6xxx_6320_family(chip) || + mv88e6xxx_6185_family(chip)) reg = PORT_CONTROL_2_MAP_DA; - if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || - mv88e6xxx_6165_family(ps) || mv88e6xxx_6320_family(ps)) + if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) || + mv88e6xxx_6165_family(chip) || mv88e6xxx_6320_family(chip)) reg |= PORT_CONTROL_2_JUMBO_10240; - if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps)) { + if (mv88e6xxx_6095_family(chip) || mv88e6xxx_6185_family(chip)) { /* Set the upstream port this port should use */ reg |= dsa_upstream_port(ds); /* enable forwarding of unknown multicast addresses to @@ -2888,7 +2897,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port) reg |= PORT_CONTROL_2_8021Q_DISABLED; if (reg) { - ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL_2, reg); if (ret) return ret; @@ -2904,24 +2913,25 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port) if (dsa_is_cpu_port(ds, port)) reg = 0; - ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_ASSOC_VECTOR, reg); + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_ASSOC_VECTOR, + reg); if (ret) return ret; /* Egress rate control 2: disable egress rate control. */ - ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_RATE_CONTROL_2, + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_RATE_CONTROL_2, 0x0000); if (ret) return ret; - if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || - mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || - mv88e6xxx_6320_family(ps)) { + if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) || + mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) || + mv88e6xxx_6320_family(chip)) { /* Do not limit the period of time that this port can * be paused for by the remote end or the period of * time that this port can pause the remote end. */ - ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_PAUSE_CTRL, 0x0000); if (ret) return ret; @@ -2930,12 +2940,12 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port) * address database entries that this port is allowed * to use. */ - ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_ATU_CONTROL, 0x0000); /* Priority Override: disable DA, SA and VTU priority * override. */ - ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_PRI_OVERRIDE, 0x0000); if (ret) return ret; @@ -2943,14 +2953,14 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port) /* Port Ethertype: use the Ethertype DSA Ethertype * value. */ - ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_ETH_TYPE, ETH_P_EDSA); if (ret) return ret; /* Tag Remap: use an identity 802.1p prio -> switch * prio mapping. */ - ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_TAG_REGMAP_0123, 0x3210); if (ret) return ret; @@ -2958,18 +2968,18 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port) /* Tag Remap 2: use an identity 802.1p prio -> switch * prio mapping. */ - ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_TAG_REGMAP_4567, 0x7654); if (ret) return ret; } - if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || - mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || - mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) || - mv88e6xxx_6320_family(ps)) { + if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) || + mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) || + mv88e6xxx_6185_family(chip) || mv88e6xxx_6095_family(chip) || + mv88e6xxx_6320_family(chip)) { /* Rate Control: disable ingress rate limiting. */ - ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_RATE_CONTROL, 0x0001); if (ret) return ret; @@ -2978,7 +2988,8 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port) /* Port Control 1: disable trunking, disable sending * learning messages to this port. */ - ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_1, 0x0000); + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL_1, + 0x0000); if (ret) return ret; @@ -2986,18 +2997,18 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port) * database, and allow bidirectional communication between the * CPU and DSA port(s), and the other ports. */ - ret = _mv88e6xxx_port_fid_set(ps, port, 0); + ret = _mv88e6xxx_port_fid_set(chip, port, 0); if (ret) return ret; - ret = _mv88e6xxx_port_based_vlan_map(ps, port); + ret = _mv88e6xxx_port_based_vlan_map(chip, port); if (ret) return ret; /* Default VLAN ID and priority: don't set a default VLAN * ID, and set the default packet priority to zero. */ - ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_DEFAULT_VLAN, + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_DEFAULT_VLAN, 0x0000); if (ret) return ret; @@ -3005,9 +3016,9 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port) return 0; } -static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps) +static int mv88e6xxx_setup_global(struct mv88e6xxx_chip *chip) { - struct dsa_switch *ds = ps->ds; + struct dsa_switch *ds = chip->ds; u32 upstream_port = dsa_upstream_port(ds); u16 reg; int err; @@ -3017,11 +3028,11 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps) * and mask all interrupt sources. */ reg = 0; - if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU) || - mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU_ACTIVE)) + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU) || + mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU_ACTIVE)) reg |= GLOBAL_CONTROL_PPU_ENABLE; - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL, reg); + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL, reg); if (err) return err; @@ -3031,12 +3042,13 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps) reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT | upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT | upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT; - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg); + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_MONITOR_CONTROL, + reg); if (err) return err; /* Disable remote management, and set the switch's DSA device number. */ - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL_2, + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL_2, GLOBAL_CONTROL_2_MULTIPLE_CASCADE | (ds->index & 0x1f)); if (err) @@ -3046,46 +3058,47 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps) * enable address learn messages to be sent to all message * ports. */ - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL, + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL, 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL); if (err) return err; /* Configure the IP ToS mapping registers. */ - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000); + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000); if (err) return err; - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000); + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000); if (err) return err; - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555); + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555); if (err) return err; - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555); + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555); if (err) return err; - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa); + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa); if (err) return err; - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa); + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa); if (err) return err; - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff); + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff); if (err) return err; - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff); + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff); if (err) return err; /* Configure the IEEE 802.1p priority mapping register. */ - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41); + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41); if (err) return err; /* Send all frames with destination addresses matching * 01:80:c2:00:00:0x to the CPU port. */ - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff); + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, + 0xffff); if (err) return err; @@ -3094,7 +3107,7 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps) * highest, and send all special multicast frames to the CPU * port at the highest priority. */ - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SWITCH_MGMT, + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_SWITCH_MGMT, 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 | GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI); if (err) @@ -3108,7 +3121,7 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps) nexthop = ds->rtable[i] & 0x1f; err = _mv88e6xxx_reg_write( - ps, REG_GLOBAL2, + chip, REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING, GLOBAL2_DEVICE_MAPPING_UPDATE | (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) | nexthop); @@ -3118,10 +3131,11 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps) /* Clear all trunk masks. */ for (i = 0; i < 8; i++) { - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_TRUNK_MASK, + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, + GLOBAL2_TRUNK_MASK, 0x8000 | (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) | - ((1 << ps->info->num_ports) - 1)); + ((1 << chip->info->num_ports) - 1)); if (err) return err; } @@ -3129,7 +3143,7 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps) /* Clear all trunk mappings. */ for (i = 0; i < 16; i++) { err = _mv88e6xxx_reg_write( - ps, REG_GLOBAL2, + chip, REG_GLOBAL2, GLOBAL2_TRUNK_MAPPING, GLOBAL2_TRUNK_MAPPING_UPDATE | (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT)); @@ -3137,13 +3151,13 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps) return err; } - if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || - mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || - mv88e6xxx_6320_family(ps)) { + if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) || + mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) || + mv88e6xxx_6320_family(chip)) { /* Send all frames with destination addresses matching * 01:80:c2:00:00:2x to the CPU port. */ - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_MGMT_EN_2X, 0xffff); if (err) return err; @@ -3151,14 +3165,14 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps) /* Initialise cross-chip port VLAN table to reset * defaults. */ - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_PVT_ADDR, 0x9000); if (err) return err; /* Clear the priority override table. */ for (i = 0; i < 16; i++) { - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE, 0x8000 | (i << 8)); if (err) @@ -3166,16 +3180,16 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps) } } - if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) || - mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) || - mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) || - mv88e6xxx_6320_family(ps)) { + if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) || + mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) || + mv88e6xxx_6185_family(chip) || mv88e6xxx_6095_family(chip) || + mv88e6xxx_6320_family(chip)) { /* Disable ingress rate limiting by resetting all * ingress rate limit registers to their initial * state. */ - for (i = 0; i < ps->info->num_ports; i++) { - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, + for (i = 0; i < chip->info->num_ports; i++) { + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_INGRESS_OP, 0x9000 | (i << 8)); if (err) @@ -3184,23 +3198,23 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps) } /* Clear the statistics counters for all ports */ - err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP, + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_STATS_OP, GLOBAL_STATS_OP_FLUSH_ALL); if (err) return err; /* Wait for the flush to complete. */ - err = _mv88e6xxx_stats_wait(ps); + err = _mv88e6xxx_stats_wait(chip); if (err) return err; /* Clear all ATU entries */ - err = _mv88e6xxx_atu_flush(ps, 0, true); + err = _mv88e6xxx_atu_flush(chip, 0, true); if (err) return err; /* Clear all the VTU and STU entries */ - err = _mv88e6xxx_vtu_stu_flush(ps); + err = _mv88e6xxx_vtu_stu_flush(chip); if (err < 0) return err; @@ -3209,34 +3223,34 @@ static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps) static int mv88e6xxx_setup(struct dsa_switch *ds) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); int err; int i; - ps->ds = ds; - ds->slave_mii_bus = ps->mdio_bus; + chip->ds = ds; + ds->slave_mii_bus = chip->mdio_bus; - if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM)) - mutex_init(&ps->eeprom_mutex); + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEPROM)) + mutex_init(&chip->eeprom_mutex); - mutex_lock(&ps->reg_lock); + mutex_lock(&chip->reg_lock); - err = mv88e6xxx_switch_reset(ps); + err = mv88e6xxx_switch_reset(chip); if (err) goto unlock; - err = mv88e6xxx_setup_global(ps); + err = mv88e6xxx_setup_global(chip); if (err) goto unlock; - for (i = 0; i < ps->info->num_ports; i++) { - err = mv88e6xxx_setup_port(ps, i); + for (i = 0; i < chip->info->num_ports; i++) { + err = mv88e6xxx_setup_port(chip, i); if (err) goto unlock; } unlock: - mutex_unlock(&ps->reg_lock); + mutex_unlock(&chip->reg_lock); return err; } @@ -3244,12 +3258,12 @@ unlock: static int mv88e6xxx_mdio_page_read(struct dsa_switch *ds, int port, int page, int reg) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); int ret; - mutex_lock(&ps->reg_lock); - ret = _mv88e6xxx_mdio_page_read(ps, port, page, reg); - mutex_unlock(&ps->reg_lock); + mutex_lock(&chip->reg_lock); + ret = _mv88e6xxx_mdio_page_read(chip, port, page, reg); + mutex_unlock(&chip->reg_lock); return ret; } @@ -3257,87 +3271,86 @@ static int mv88e6xxx_mdio_page_read(struct dsa_switch *ds, int port, int page, static int mv88e6xxx_mdio_page_write(struct dsa_switch *ds, int port, int page, int reg, int val) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); int ret; - mutex_lock(&ps->reg_lock); - ret = _mv88e6xxx_mdio_page_write(ps, port, page, reg, val); - mutex_unlock(&ps->reg_lock); + mutex_lock(&chip->reg_lock); + ret = _mv88e6xxx_mdio_page_write(chip, port, page, reg, val); + mutex_unlock(&chip->reg_lock); return ret; } -static int mv88e6xxx_port_to_mdio_addr(struct mv88e6xxx_priv_state *ps, - int port) +static int mv88e6xxx_port_to_mdio_addr(struct mv88e6xxx_chip *chip, int port) { - if (port >= 0 && port < ps->info->num_ports) + if (port >= 0 && port < chip->info->num_ports) return port; return -EINVAL; } static int mv88e6xxx_mdio_read(struct mii_bus *bus, int port, int regnum) { - struct mv88e6xxx_priv_state *ps = bus->priv; - int addr = mv88e6xxx_port_to_mdio_addr(ps, port); + struct mv88e6xxx_chip *chip = bus->priv; + int addr = mv88e6xxx_port_to_mdio_addr(chip, port); int ret; if (addr < 0) return 0xffff; - mutex_lock(&ps->reg_lock); + mutex_lock(&chip->reg_lock); - if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU)) - ret = mv88e6xxx_mdio_read_ppu(ps, addr, regnum); - else if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SMI_PHY)) - ret = mv88e6xxx_mdio_read_indirect(ps, addr, regnum); + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU)) + ret = mv88e6xxx_mdio_read_ppu(chip, addr, regnum); + else if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_SMI_PHY)) + ret = mv88e6xxx_mdio_read_indirect(chip, addr, regnum); else - ret = mv88e6xxx_mdio_read_direct(ps, addr, regnum); + ret = mv88e6xxx_mdio_read_direct(chip, addr, regnum); - mutex_unlock(&ps->reg_lock); + mutex_unlock(&chip->reg_lock); return ret; } static int mv88e6xxx_mdio_write(struct mii_bus *bus, int port, int regnum, u16 val) { - struct mv88e6xxx_priv_state *ps = bus->priv; - int addr = mv88e6xxx_port_to_mdio_addr(ps, port); + struct mv88e6xxx_chip *chip = bus->priv; + int addr = mv88e6xxx_port_to_mdio_addr(chip, port); int ret; if (addr < 0) return 0xffff; - mutex_lock(&ps->reg_lock); + mutex_lock(&chip->reg_lock); - if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU)) - ret = mv88e6xxx_mdio_write_ppu(ps, addr, regnum, val); - else if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SMI_PHY)) - ret = mv88e6xxx_mdio_write_indirect(ps, addr, regnum, val); + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU)) + ret = mv88e6xxx_mdio_write_ppu(chip, addr, regnum, val); + else if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_SMI_PHY)) + ret = mv88e6xxx_mdio_write_indirect(chip, addr, regnum, val); else - ret = mv88e6xxx_mdio_write_direct(ps, addr, regnum, val); + ret = mv88e6xxx_mdio_write_direct(chip, addr, regnum, val); - mutex_unlock(&ps->reg_lock); + mutex_unlock(&chip->reg_lock); return ret; } -static int mv88e6xxx_mdio_register(struct mv88e6xxx_priv_state *ps, +static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip, struct device_node *np) { static int index; struct mii_bus *bus; int err; - if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU)) - mv88e6xxx_ppu_state_init(ps); + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU)) + mv88e6xxx_ppu_state_init(chip); if (np) - ps->mdio_np = of_get_child_by_name(np, "mdio"); + chip->mdio_np = of_get_child_by_name(np, "mdio"); - bus = devm_mdiobus_alloc(ps->dev); + bus = devm_mdiobus_alloc(chip->dev); if (!bus) return -ENOMEM; - bus->priv = (void *)ps; + bus->priv = (void *)chip; if (np) { bus->name = np->full_name; snprintf(bus->id, MII_BUS_ID_SIZE, "%s", np->full_name); @@ -3348,89 +3361,89 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_priv_state *ps, bus->read = mv88e6xxx_mdio_read; bus->write = mv88e6xxx_mdio_write; - bus->parent = ps->dev; + bus->parent = chip->dev; - if (ps->mdio_np) - err = of_mdiobus_register(bus, ps->mdio_np); + if (chip->mdio_np) + err = of_mdiobus_register(bus, chip->mdio_np); else err = mdiobus_register(bus); if (err) { - dev_err(ps->dev, "Cannot register MDIO bus (%d)\n", err); + dev_err(chip->dev, "Cannot register MDIO bus (%d)\n", err); goto out; } - ps->mdio_bus = bus; + chip->mdio_bus = bus; return 0; out: - if (ps->mdio_np) - of_node_put(ps->mdio_np); + if (chip->mdio_np) + of_node_put(chip->mdio_np); return err; } -static void mv88e6xxx_mdio_unregister(struct mv88e6xxx_priv_state *ps) +static void mv88e6xxx_mdio_unregister(struct mv88e6xxx_chip *chip) { - struct mii_bus *bus = ps->mdio_bus; + struct mii_bus *bus = chip->mdio_bus; mdiobus_unregister(bus); - if (ps->mdio_np) - of_node_put(ps->mdio_np); + if (chip->mdio_np) + of_node_put(chip->mdio_np); } #ifdef CONFIG_NET_DSA_HWMON static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); int ret; int val; *temp = 0; - mutex_lock(&ps->reg_lock); + mutex_lock(&chip->reg_lock); - ret = mv88e6xxx_mdio_write_direct(ps, 0x0, 0x16, 0x6); + ret = mv88e6xxx_mdio_write_direct(chip, 0x0, 0x16, 0x6); if (ret < 0) goto error; /* Enable temperature sensor */ - ret = mv88e6xxx_mdio_read_direct(ps, 0x0, 0x1a); + ret = mv88e6xxx_mdio_read_direct(chip, 0x0, 0x1a); if (ret < 0) goto error; - ret = mv88e6xxx_mdio_write_direct(ps, 0x0, 0x1a, ret | (1 << 5)); + ret = mv88e6xxx_mdio_write_direct(chip, 0x0, 0x1a, ret | (1 << 5)); if (ret < 0) goto error; /* Wait for temperature to stabilize */ usleep_range(10000, 12000); - val = mv88e6xxx_mdio_read_direct(ps, 0x0, 0x1a); + val = mv88e6xxx_mdio_read_direct(chip, 0x0, 0x1a); if (val < 0) { ret = val; goto error; } /* Disable temperature sensor */ - ret = mv88e6xxx_mdio_write_direct(ps, 0x0, 0x1a, ret & ~(1 << 5)); + ret = mv88e6xxx_mdio_write_direct(chip, 0x0, 0x1a, ret & ~(1 << 5)); if (ret < 0) goto error; *temp = ((val & 0x1f) - 5) * 5; error: - mv88e6xxx_mdio_write_direct(ps, 0x0, 0x16, 0x0); - mutex_unlock(&ps->reg_lock); + mv88e6xxx_mdio_write_direct(chip, 0x0, 0x16, 0x0); + mutex_unlock(&chip->reg_lock); return ret; } static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int phy = mv88e6xxx_6320_family(ps) ? 3 : 0; + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int phy = mv88e6xxx_6320_family(chip) ? 3 : 0; int ret; *temp = 0; @@ -3446,12 +3459,12 @@ static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp) static int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); - if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP)) + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP)) return -EOPNOTSUPP; - if (mv88e6xxx_6320_family(ps) || mv88e6xxx_6352_family(ps)) + if (mv88e6xxx_6320_family(chip) || mv88e6xxx_6352_family(chip)) return mv88e63xx_get_temp(ds, temp); return mv88e61xx_get_temp(ds, temp); @@ -3459,11 +3472,11 @@ static int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp) static int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int phy = mv88e6xxx_6320_family(ps) ? 3 : 0; + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int phy = mv88e6xxx_6320_family(chip) ? 3 : 0; int ret; - if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT)) + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT)) return -EOPNOTSUPP; *temp = 0; @@ -3479,11 +3492,11 @@ static int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp) static int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int phy = mv88e6xxx_6320_family(ps) ? 3 : 0; + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int phy = mv88e6xxx_6320_family(chip) ? 3 : 0; int ret; - if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT)) + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT)) return -EOPNOTSUPP; ret = mv88e6xxx_mdio_page_read(ds, phy, 6, 26); @@ -3496,11 +3509,11 @@ static int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp) static int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int phy = mv88e6xxx_6320_family(ps) ? 3 : 0; + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int phy = mv88e6xxx_6320_family(chip) ? 3 : 0; int ret; - if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT)) + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT)) return -EOPNOTSUPP; *alarm = false; @@ -3698,12 +3711,13 @@ static const struct mv88e6xxx_info *mv88e6xxx_lookup_info(unsigned int prod_num) return NULL; } -static int mv88e6xxx_detect(struct mv88e6xxx_priv_state *ps) +static int mv88e6xxx_detect(struct mv88e6xxx_chip *chip) { const struct mv88e6xxx_info *info; int id, prod_num, rev; - id = mv88e6xxx_reg_read(ps, ps->info->port_base_addr, PORT_SWITCH_ID); + id = mv88e6xxx_reg_read(chip, chip->info->port_base_addr, + PORT_SWITCH_ID); if (id < 0) return id; @@ -3715,30 +3729,30 @@ static int mv88e6xxx_detect(struct mv88e6xxx_priv_state *ps) return -ENODEV; /* Update the compatible info with the probed one */ - ps->info = info; + chip->info = info; - dev_info(ps->dev, "switch 0x%x detected: %s, revision %u\n", - ps->info->prod_num, ps->info->name, rev); + dev_info(chip->dev, "switch 0x%x detected: %s, revision %u\n", + chip->info->prod_num, chip->info->name, rev); return 0; } -static struct mv88e6xxx_priv_state *mv88e6xxx_alloc_chip(struct device *dev) +static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev) { - struct mv88e6xxx_priv_state *ps; + struct mv88e6xxx_chip *chip; - ps = devm_kzalloc(dev, sizeof(*ps), GFP_KERNEL); - if (!ps) + chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); + if (!chip) return NULL; - ps->dev = dev; + chip->dev = dev; - mutex_init(&ps->reg_lock); + mutex_init(&chip->reg_lock); - return ps; + return chip; } -static int mv88e6xxx_smi_init(struct mv88e6xxx_priv_state *ps, +static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip, struct mii_bus *bus, int sw_addr) { /* ADDR[0] pin is unavailable externally and considered zero */ @@ -3746,14 +3760,14 @@ static int mv88e6xxx_smi_init(struct mv88e6xxx_priv_state *ps, return -EINVAL; if (sw_addr == 0) - ps->smi_ops = &mv88e6xxx_smi_single_chip_ops; - else if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_MULTI_CHIP)) - ps->smi_ops = &mv88e6xxx_smi_multi_chip_ops; + chip->smi_ops = &mv88e6xxx_smi_single_chip_ops; + else if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_MULTI_CHIP)) + chip->smi_ops = &mv88e6xxx_smi_multi_chip_ops; else return -EINVAL; - ps->bus = bus; - ps->sw_addr = sw_addr; + chip->bus = bus; + chip->sw_addr = sw_addr; return 0; } @@ -3762,7 +3776,7 @@ static const char *mv88e6xxx_drv_probe(struct device *dsa_dev, struct device *host_dev, int sw_addr, void **priv) { - struct mv88e6xxx_priv_state *ps; + struct mv88e6xxx_chip *chip; struct mii_bus *bus; int err; @@ -3770,30 +3784,30 @@ static const char *mv88e6xxx_drv_probe(struct device *dsa_dev, if (!bus) return NULL; - ps = mv88e6xxx_alloc_chip(dsa_dev); - if (!ps) + chip = mv88e6xxx_alloc_chip(dsa_dev); + if (!chip) return NULL; /* Legacy SMI probing will only support chips similar to 88E6085 */ - ps->info = &mv88e6xxx_table[MV88E6085]; + chip->info = &mv88e6xxx_table[MV88E6085]; - err = mv88e6xxx_smi_init(ps, bus, sw_addr); + err = mv88e6xxx_smi_init(chip, bus, sw_addr); if (err) goto free; - err = mv88e6xxx_detect(ps); + err = mv88e6xxx_detect(chip); if (err) goto free; - err = mv88e6xxx_mdio_register(ps, NULL); + err = mv88e6xxx_mdio_register(chip, NULL); if (err) goto free; - *priv = ps; + *priv = chip; - return ps->info->name; + return chip->info->name; free: - devm_kfree(dsa_dev, ps); + devm_kfree(dsa_dev, chip); return NULL; } @@ -3834,10 +3848,10 @@ static struct dsa_switch_driver mv88e6xxx_switch_driver = { .port_fdb_dump = mv88e6xxx_port_fdb_dump, }; -static int mv88e6xxx_register_switch(struct mv88e6xxx_priv_state *ps, +static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip, struct device_node *np) { - struct device *dev = ps->dev; + struct device *dev = chip->dev; struct dsa_switch *ds; ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL); @@ -3845,7 +3859,7 @@ static int mv88e6xxx_register_switch(struct mv88e6xxx_priv_state *ps, return -ENOMEM; ds->dev = dev; - ds->priv = ps; + ds->priv = chip; ds->drv = &mv88e6xxx_switch_driver; dev_set_drvdata(dev, ds); @@ -3853,9 +3867,9 @@ static int mv88e6xxx_register_switch(struct mv88e6xxx_priv_state *ps, return dsa_register_switch(ds, np); } -static void mv88e6xxx_unregister_switch(struct mv88e6xxx_priv_state *ps) +static void mv88e6xxx_unregister_switch(struct mv88e6xxx_chip *chip) { - dsa_unregister_switch(ps->ds); + dsa_unregister_switch(chip->ds); } static int mv88e6xxx_probe(struct mdio_device *mdiodev) @@ -3863,7 +3877,7 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) struct device *dev = &mdiodev->dev; struct device_node *np = dev->of_node; const struct mv88e6xxx_info *compat_info; - struct mv88e6xxx_priv_state *ps; + struct mv88e6xxx_chip *chip; u32 eeprom_len; int err; @@ -3871,35 +3885,35 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) if (!compat_info) return -EINVAL; - ps = mv88e6xxx_alloc_chip(dev); - if (!ps) + chip = mv88e6xxx_alloc_chip(dev); + if (!chip) return -ENOMEM; - ps->info = compat_info; + chip->info = compat_info; - err = mv88e6xxx_smi_init(ps, mdiodev->bus, mdiodev->addr); + err = mv88e6xxx_smi_init(chip, mdiodev->bus, mdiodev->addr); if (err) return err; - err = mv88e6xxx_detect(ps); + err = mv88e6xxx_detect(chip); if (err) return err; - ps->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS); - if (IS_ERR(ps->reset)) - return PTR_ERR(ps->reset); + chip->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS); + if (IS_ERR(chip->reset)) + return PTR_ERR(chip->reset); - if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM) && + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEPROM) && !of_property_read_u32(np, "eeprom-length", &eeprom_len)) - ps->eeprom_len = eeprom_len; + chip->eeprom_len = eeprom_len; - err = mv88e6xxx_mdio_register(ps, np); + err = mv88e6xxx_mdio_register(chip, np); if (err) return err; - err = mv88e6xxx_register_switch(ps, np); + err = mv88e6xxx_register_switch(chip, np); if (err) { - mv88e6xxx_mdio_unregister(ps); + mv88e6xxx_mdio_unregister(chip); return err; } @@ -3909,10 +3923,10 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) static void mv88e6xxx_remove(struct mdio_device *mdiodev) { struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev); - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); - mv88e6xxx_unregister_switch(ps); - mv88e6xxx_mdio_unregister(ps); + mv88e6xxx_unregister_switch(chip); + mv88e6xxx_mdio_unregister(chip); } static const struct of_device_id mv88e6xxx_of_match[] = { diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h index a94acd887929..83f06620133d 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h @@ -1,5 +1,6 @@ /* - * net/dsa/mv88e6xxx.h - Marvell 88e6xxx switch chip support + * Marvell 88e6xxx common definitions + * * Copyright (c) 2008 Marvell Semiconductor * * This program is free software; you can redistribute it and/or modify @@ -562,7 +563,7 @@ struct mv88e6xxx_priv_port { struct net_device *bridge_dev; }; -struct mv88e6xxx_priv_state { +struct mv88e6xxx_chip { const struct mv88e6xxx_info *info; /* The dsa_switch this private structure is related to */ @@ -624,10 +625,8 @@ struct mv88e6xxx_priv_state { }; struct mv88e6xxx_ops { - int (*read)(struct mv88e6xxx_priv_state *ps, - int addr, int reg, u16 *val); - int (*write)(struct mv88e6xxx_priv_state *ps, - int addr, int reg, u16 val); + int (*read)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val); + int (*write)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val); }; enum stat_type { @@ -643,10 +642,10 @@ struct mv88e6xxx_hw_stat { enum stat_type type; }; -static inline bool mv88e6xxx_has(struct mv88e6xxx_priv_state *ps, +static inline bool mv88e6xxx_has(struct mv88e6xxx_chip *chip, unsigned long flags) { - return (ps->info->flags & flags) == flags; + return (chip->info->flags & flags) == flags; } #endif diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index cb07d95e3dd9..89c0cfa9719f 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -304,7 +304,7 @@ static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev) static void macb_handle_link_change(struct net_device *dev) { struct macb *bp = netdev_priv(dev); - struct phy_device *phydev = bp->phy_dev; + struct phy_device *phydev = dev->phydev; unsigned long flags; int status_change = 0; @@ -414,7 +414,6 @@ static int macb_mii_probe(struct net_device *dev) bp->link = 0; bp->speed = 0; bp->duplex = -1; - bp->phy_dev = phydev; return 0; } @@ -1886,7 +1885,7 @@ static int macb_open(struct net_device *dev) netif_carrier_off(dev); /* if the phy is not yet register, retry later*/ - if (!bp->phy_dev) + if (!dev->phydev) return -EAGAIN; /* RX buffers initialization */ @@ -1905,7 +1904,7 @@ static int macb_open(struct net_device *dev) macb_init_hw(bp); /* schedule a link state check */ - phy_start(bp->phy_dev); + phy_start(dev->phydev); netif_tx_start_all_queues(dev); @@ -1920,8 +1919,8 @@ static int macb_close(struct net_device *dev) netif_tx_stop_all_queues(dev); napi_disable(&bp->napi); - if (bp->phy_dev) - phy_stop(bp->phy_dev); + if (dev->phydev) + phy_stop(dev->phydev); spin_lock_irqsave(&bp->lock, flags); macb_reset_hw(bp); @@ -2092,28 +2091,6 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev) return nstat; } -static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct macb *bp = netdev_priv(dev); - struct phy_device *phydev = bp->phy_dev; - - if (!phydev) - return -ENODEV; - - return phy_ethtool_gset(phydev, cmd); -} - -static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct macb *bp = netdev_priv(dev); - struct phy_device *phydev = bp->phy_dev; - - if (!phydev) - return -ENODEV; - - return phy_ethtool_sset(phydev, cmd); -} - static int macb_get_regs_len(struct net_device *netdev) { return MACB_GREGS_NBR * sizeof(u32); @@ -2186,19 +2163,17 @@ static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) } static const struct ethtool_ops macb_ethtool_ops = { - .get_settings = macb_get_settings, - .set_settings = macb_set_settings, .get_regs_len = macb_get_regs_len, .get_regs = macb_get_regs, .get_link = ethtool_op_get_link, .get_ts_info = ethtool_op_get_ts_info, .get_wol = macb_get_wol, .set_wol = macb_set_wol, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static const struct ethtool_ops gem_ethtool_ops = { - .get_settings = macb_get_settings, - .set_settings = macb_set_settings, .get_regs_len = macb_get_regs_len, .get_regs = macb_get_regs, .get_link = ethtool_op_get_link, @@ -2206,12 +2181,13 @@ static const struct ethtool_ops gem_ethtool_ops = { .get_ethtool_stats = gem_get_ethtool_stats, .get_strings = gem_get_ethtool_strings, .get_sset_count = gem_get_sset_count, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct macb *bp = netdev_priv(dev); - struct phy_device *phydev = bp->phy_dev; + struct phy_device *phydev = dev->phydev; if (!netif_running(dev)) return -EINVAL; @@ -2570,7 +2546,7 @@ static int at91ether_open(struct net_device *dev) MACB_BIT(HRESP)); /* schedule a link state check */ - phy_start(lp->phy_dev); + phy_start(dev->phydev); netif_start_queue(dev); @@ -3010,7 +2986,7 @@ static int macb_probe(struct platform_device *pdev) if (err) goto err_out_free_netdev; - phydev = bp->phy_dev; + phydev = dev->phydev; netif_carrier_off(dev); @@ -3029,7 +3005,7 @@ static int macb_probe(struct platform_device *pdev) return 0; err_out_unregister_mdio: - phy_disconnect(bp->phy_dev); + phy_disconnect(dev->phydev); mdiobus_unregister(bp->mii_bus); mdiobus_free(bp->mii_bus); @@ -3057,8 +3033,8 @@ static int macb_remove(struct platform_device *pdev) if (dev) { bp = netdev_priv(dev); - if (bp->phy_dev) - phy_disconnect(bp->phy_dev); + if (dev->phydev) + phy_disconnect(dev->phydev); mdiobus_unregister(bp->mii_bus); mdiobus_free(bp->mii_bus); diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 8a13824ef802..36893d8958d4 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -823,7 +823,6 @@ struct macb { struct macb_or_gem_ops macbgem_ops; struct mii_bus *mii_bus; - struct phy_device *phy_dev; int link; int speed; int duplex; diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c index 8ad7425f89bf..d35864ada9a3 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c @@ -367,7 +367,8 @@ void lio_cn6xxx_enable_io_queues(struct octeon_device *oct) void lio_cn6xxx_disable_io_queues(struct octeon_device *oct) { - u32 mask, i, loop = HZ; + int i; + u32 mask, loop = HZ; u32 d32; /* Reset the Enable bits for Input Queues. */ @@ -376,7 +377,7 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct) octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, mask); /* Wait until hardware indicates that the queues are out of reset. */ - mask = oct->io_qmask.iq; + mask = (u32)oct->io_qmask.iq; d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ); while (((d32 & mask) != mask) && loop--) { d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ); @@ -384,8 +385,8 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct) } /* Reset the doorbell register for each Input queue. */ - for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { - if (!(oct->io_qmask.iq & (1UL << i))) + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + if (!(oct->io_qmask.iq & (1ULL << i))) continue; octeon_write_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i), 0xFFFFFFFF); d32 = octeon_read_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i)); @@ -398,7 +399,7 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct) /* Wait until hardware indicates that the queues are out of reset. */ loop = HZ; - mask = oct->io_qmask.oq; + mask = (u32)oct->io_qmask.oq; d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ); while (((d32 & mask) != mask) && loop--) { d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ); @@ -408,8 +409,8 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct) /* Reset the doorbell register for each Output queue. */ /* for (i = 0; i < oct->num_oqs; i++) { */ - for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { - if (!(oct->io_qmask.oq & (1UL << i))) + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { + if (!(oct->io_qmask.oq & (1ULL << i))) continue; octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i), 0xFFFFFFFF); d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i)); @@ -429,16 +430,16 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct) void lio_cn6xxx_reinit_regs(struct octeon_device *oct) { - u32 i; + int i; - for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { - if (!(oct->io_qmask.iq & (1UL << i))) + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + if (!(oct->io_qmask.iq & (1ULL << i))) continue; oct->fn_list.setup_iq_regs(oct, i); } - for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { - if (!(oct->io_qmask.oq & (1UL << i))) + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { + if (!(oct->io_qmask.oq & (1ULL << i))) continue; oct->fn_list.setup_oq_regs(oct, i); } @@ -450,8 +451,8 @@ void lio_cn6xxx_reinit_regs(struct octeon_device *oct) oct->fn_list.enable_io_queues(oct); /* for (i = 0; i < oct->num_oqs; i++) { */ - for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { - if (!(oct->io_qmask.oq & (1UL << i))) + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { + if (!(oct->io_qmask.oq & (1ULL << i))) continue; writel(oct->droq[i]->max_count, oct->droq[i]->pkts_credit_reg); } @@ -495,8 +496,7 @@ u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx) } u32 -lio_cn6xxx_update_read_index(struct octeon_device *oct __attribute__((unused)), - struct octeon_instr_queue *iq) +lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq) { u32 new_idx = readl(iq->inst_cnt_reg); @@ -557,7 +557,8 @@ lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64) int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct) { struct octeon_droq *droq; - u32 oq_no, pkt_count, droq_time_mask, droq_mask, droq_int_enb; + int oq_no; + u32 pkt_count, droq_time_mask, droq_mask, droq_int_enb; u32 droq_cnt_enb, droq_cnt_mask; droq_cnt_enb = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB); @@ -573,8 +574,8 @@ int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct) oct->droq_intr = 0; /* for (oq_no = 0; oq_no < oct->num_oqs; oq_no++) { */ - for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES; oq_no++) { - if (!(droq_mask & (1 << oq_no))) + for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct); oq_no++) { + if (!(droq_mask & (1ULL << oq_no))) continue; droq = oct->droq[oq_no]; diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h index f77918779355..fe2932cb7ed8 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h +++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h @@ -91,8 +91,7 @@ void lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr, void lio_cn6xxx_bar1_idx_write(struct octeon_device *oct, u32 idx, u32 mask); u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx); u32 -lio_cn6xxx_update_read_index(struct octeon_device *oct __attribute__((unused)), - struct octeon_instr_queue *iq); +lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq); void lio_cn6xxx_enable_interrupt(void *chip); void lio_cn6xxx_disable_interrupt(void *chip); void cn6xxx_get_pcie_qlmport(struct octeon_device *oct); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index 4523c8662ed2..03bfa9771e4d 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -40,6 +40,8 @@ #include "cn68xx_device.h" #include "liquidio_image.h" +static int octnet_get_link_stats(struct net_device *netdev); + struct oct_mdio_cmd_context { int octeon_id; wait_queue_head_t wc; @@ -71,34 +73,120 @@ enum { INTERFACE_MODE_RXAUI, INTERFACE_MODE_QSGMII, INTERFACE_MODE_AGL, + INTERFACE_MODE_XLAUI, + INTERFACE_MODE_XFI, + INTERFACE_MODE_10G_KR, + INTERFACE_MODE_40G_KR4, + INTERFACE_MODE_MIXED, }; #define ARRAY_LENGTH(a) (sizeof(a) / sizeof((a)[0])) #define OCT_ETHTOOL_REGDUMP_LEN 4096 #define OCT_ETHTOOL_REGSVER 1 +/* statistics of PF */ +static const char oct_stats_strings[][ETH_GSTRING_LEN] = { + "rx_packets", + "tx_packets", + "rx_bytes", + "tx_bytes", + "rx_errors", /*jabber_err+l2_err+frame_err */ + "tx_errors", /*fw_err_pko+fw_err_link+fw_err_drop */ + "rx_dropped", /*st->fromwire.total_rcvd - st->fromwire.fw_total_rcvd + *+st->fromwire.dmac_drop + st->fromwire.fw_err_drop + */ + "tx_dropped", + + "tx_total_sent", + "tx_total_fwd", + "tx_err_pko", + "tx_err_link", + "tx_err_drop", + + "tx_tso", + "tx_tso_packets", + "tx_tso_err", + + "mac_tx_total_pkts", + "mac_tx_total_bytes", + "mac_tx_mcast_pkts", + "mac_tx_bcast_pkts", + "mac_tx_ctl_packets", /*oct->link_stats.fromhost.ctl_sent */ + "mac_tx_total_collisions", + "mac_tx_one_collision", + "mac_tx_multi_collison", + "mac_tx_max_collision_fail", + "mac_tx_max_deferal_fail", + "mac_tx_fifo_err", + "mac_tx_runts", + + "rx_total_rcvd", + "rx_total_fwd", + "rx_jabber_err", + "rx_l2_err", + "rx_frame_err", + "rx_err_pko", + "rx_err_link", + "rx_err_drop", + + "rx_lro_pkts", + "rx_lro_bytes", + "rx_total_lro", + + "rx_lro_aborts", + "rx_lro_aborts_port", + "rx_lro_aborts_seq", + "rx_lro_aborts_tsval", + "rx_lro_aborts_timer", + "rx_fwd_rate", + + "mac_rx_total_rcvd", + "mac_rx_bytes", + "mac_rx_total_bcst", + "mac_rx_total_mcst", + "mac_rx_runts", + "mac_rx_ctl_packets", + "mac_rx_fifo_err", + "mac_rx_dma_drop", + "mac_rx_fcs_err", + + "link_state_changes", +}; + +/* statistics of host tx queue */ static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = { - "Instr posted", - "Instr processed", - "Instr dropped", - "Bytes Sent", - "Sgentry_sent", - "Inst cntreg", - "Tx done", - "Tx Iq busy", - "Tx dropped", - "Tx bytes", + "packets", /*oct->instr_queue[iq_no]->stats.tx_done*/ + "bytes", /*oct->instr_queue[iq_no]->stats.tx_tot_bytes*/ + "dropped", + "iq_busy", + "sgentry_sent", + + "fw_instr_posted", + "fw_instr_processed", + "fw_instr_dropped", + "fw_bytes_sent", + + "tso", + "txq_restart", }; +/* statistics of host rx queue */ static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = { - "OQ Pkts Received", - "OQ Bytes Received", - "Dropped no dispatch", - "Dropped nomem", - "Dropped toomany", - "Stack RX cnt", - "Stack RX Bytes", - "RX dropped", + "packets", /*oct->droq[oq_no]->stats.rx_pkts_received */ + "bytes", /*oct->droq[oq_no]->stats.rx_bytes_received */ + "dropped", /*oct->droq[oq_no]->stats.rx_dropped+ + *oct->droq[oq_no]->stats.dropped_nodispatch+ + *oct->droq[oq_no]->stats.dropped_toomany+ + *oct->droq[oq_no]->stats.dropped_nomem + */ + "dropped_nomem", + "dropped_toomany", + "fw_dropped", + "fw_pkts_received", + "fw_bytes_received", + "fw_dropped_nodispatch", + + "buffer_alloc_failure", }; #define OCTNIC_NCMD_AUTONEG_ON 0x1 @@ -112,8 +200,9 @@ static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) linfo = &lio->linfo; - if (linfo->link.s.interface == INTERFACE_MODE_XAUI || - linfo->link.s.interface == INTERFACE_MODE_RXAUI) { + if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI || + linfo->link.s.if_mode == INTERFACE_MODE_RXAUI || + linfo->link.s.if_mode == INTERFACE_MODE_XFI) { ecmd->port = PORT_FIBRE; ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE | @@ -124,7 +213,8 @@ static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) ecmd->autoneg = AUTONEG_DISABLE; } else { - dev_err(&oct->pci_dev->dev, "Unknown link interface reported\n"); + dev_err(&oct->pci_dev->dev, "Unknown link interface reported %d\n", + linfo->link.s.if_mode); } if (linfo->link.s.link_up) { @@ -516,8 +606,13 @@ lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) /* Notes: Not supporting any auto negotiation in these * drivers. Just report pause frame support. */ - pause->tx_pause = 1; - pause->rx_pause = 1; /* TODO: Need to support RX pause frame!!. */ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + + pause->autoneg = 0; + + pause->tx_pause = oct->tx_pause; + pause->rx_pause = oct->rx_pause; } static void @@ -526,51 +621,245 @@ lio_get_ethtool_stats(struct net_device *netdev, { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct_dev = lio->oct_dev; + struct net_device_stats *netstats = &netdev->stats; int i = 0, j; - for (j = 0; j < MAX_OCTEON_INSTR_QUEUES; j++) { - if (!(oct_dev->io_qmask.iq & (1UL << j))) + netdev->netdev_ops->ndo_get_stats(netdev); + octnet_get_link_stats(netdev); + + /*sum of oct->droq[oq_no]->stats->rx_pkts_received */ + data[i++] = CVM_CAST64(netstats->rx_packets); + /*sum of oct->instr_queue[iq_no]->stats.tx_done */ + data[i++] = CVM_CAST64(netstats->tx_packets); + /*sum of oct->droq[oq_no]->stats->rx_bytes_received */ + data[i++] = CVM_CAST64(netstats->rx_bytes); + /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */ + data[i++] = CVM_CAST64(netstats->tx_bytes); + data[i++] = CVM_CAST64(netstats->rx_errors); + data[i++] = CVM_CAST64(netstats->tx_errors); + /*sum of oct->droq[oq_no]->stats->rx_dropped + + *oct->droq[oq_no]->stats->dropped_nodispatch + + *oct->droq[oq_no]->stats->dropped_toomany + + *oct->droq[oq_no]->stats->dropped_nomem + */ + data[i++] = CVM_CAST64(netstats->rx_dropped); + /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */ + data[i++] = CVM_CAST64(netstats->tx_dropped); + + /*data[i++] = CVM_CAST64(stats->multicast); */ + /*data[i++] = CVM_CAST64(stats->collisions); */ + + /* firmware tx stats */ + /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx]. + *fromhost.fw_total_sent + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_sent); + /*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd); + /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko); + /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link); + /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. + *fw_err_drop + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_drop); + + /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso); + /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. + *fw_tso_fwd + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso_fwd); + /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. + *fw_err_tso + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso); + + /* mac tx statistics */ + /*CVMX_BGXX_CMRX_TX_STAT5 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent); + /*CVMX_BGXX_CMRX_TX_STAT4 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_bytes_sent); + /*CVMX_BGXX_CMRX_TX_STAT15 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.mcast_pkts_sent); + /*CVMX_BGXX_CMRX_TX_STAT14 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.bcast_pkts_sent); + /*CVMX_BGXX_CMRX_TX_STAT17 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.ctl_sent); + /*CVMX_BGXX_CMRX_TX_STAT0 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_collisions); + /*CVMX_BGXX_CMRX_TX_STAT3 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.one_collision_sent); + /*CVMX_BGXX_CMRX_TX_STAT2 */ + data[i++] = + CVM_CAST64(oct_dev->link_stats.fromhost.multi_collision_sent); + /*CVMX_BGXX_CMRX_TX_STAT0 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_collision_fail); + /*CVMX_BGXX_CMRX_TX_STAT1 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_deferral_fail); + /*CVMX_BGXX_CMRX_TX_STAT16 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fifo_err); + /*CVMX_BGXX_CMRX_TX_STAT6 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.runts); + + /* RX firmware stats */ + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_total_rcvd + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_rcvd); + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_total_fwd + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd); + /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err); + /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.l2_err); + /*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.frame_err); + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_err_pko + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_pko); + /*per_core_stats[j].link_stats[i].fromwire.fw_err_link */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_link); + /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx]. + *fromwire.fw_err_drop + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop); + + /* LRO */ + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_lro_pkts + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_pkts); + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_lro_octs + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_octs); + /*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_lro); + /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts); + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_lro_aborts_port + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_port); + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_lro_aborts_seq + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_seq); + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_lro_aborts_tsval + */ + data[i++] = + CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_tsval); + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_lro_aborts_timer + */ + /* intrmod: packet forward rate */ + data[i++] = + CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_timer); + /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fwd_rate); + + /* mac: link-level stats */ + /*CVMX_BGXX_CMRX_RX_STAT0 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_rcvd); + /*CVMX_BGXX_CMRX_RX_STAT1 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.bytes_rcvd); + /*CVMX_PKI_STATX_STAT5 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_bcst); + /*CVMX_PKI_STATX_STAT5 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_mcst); + /*wqe->word2.err_code or wqe->word2.err_level */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.runts); + /*CVMX_BGXX_CMRX_RX_STAT2 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.ctl_rcvd); + /*CVMX_BGXX_CMRX_RX_STAT6 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fifo_err); + /*CVMX_BGXX_CMRX_RX_STAT4 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.dmac_drop); + /*wqe->word2.err_code or wqe->word2.err_level */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fcs_err); + /*lio->link_changes*/ + data[i++] = CVM_CAST64(lio->link_changes); + + /* TX -- lio_update_stats(lio); */ + for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) { + if (!(oct_dev->io_qmask.iq & (1ULL << j))) continue; + /*packets to network port*/ + /*# of packets tx to network */ + data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done); + /*# of bytes tx to network */ data[i++] = - CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted); - data[i++] = - CVM_CAST64( - oct_dev->instr_queue[j]->stats.instr_processed); + CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes); + /*# of packets dropped */ data[i++] = - CVM_CAST64( - oct_dev->instr_queue[j]->stats.instr_dropped); + CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped); + /*# of tx fails due to queue full */ data[i++] = - CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent); + CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy); + /*XXX gather entries sent */ data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent); + + /*instruction to firmware: data and control */ + /*# of instructions to the queue */ data[i++] = - readl(oct_dev->instr_queue[j]->inst_cnt_reg); - data[i++] = - CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done); - data[i++] = - CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy); + CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted); + /*# of instructions processed */ + data[i++] = CVM_CAST64(oct_dev->instr_queue[j]-> + stats.instr_processed); + /*# of instructions could not be processed */ + data[i++] = CVM_CAST64(oct_dev->instr_queue[j]-> + stats.instr_dropped); + /*bytes sent through the queue */ data[i++] = - CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped); + CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent); + + /*tso request*/ + data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso); + /*txq restart*/ data[i++] = - CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes); + CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart); } - /* for (j = 0; j < oct_dev->num_oqs; j++){ */ - for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES; j++) { - if (!(oct_dev->io_qmask.oq & (1UL << j))) + /* RX */ + /* for (j = 0; j < oct_dev->num_oqs; j++) { */ + for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) { + if (!(oct_dev->io_qmask.oq & (1ULL << j))) continue; - data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received); - data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received); - data[i++] = - CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch); - data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem); - data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany); + + /*packets send to TCP/IP network stack */ + /*# of packets to network stack */ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received); + /*# of bytes to network stack */ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received); + /*# of packets dropped */ + data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem + + oct_dev->droq[j]->stats.dropped_toomany + + oct_dev->droq[j]->stats.rx_dropped); + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem); + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany); data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped); + + /*control and data path*/ + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.pkts_received); + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.bytes_received); + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch); + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure); } } @@ -579,26 +868,43 @@ static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data) struct lio *lio = GET_LIO(netdev); struct octeon_device *oct_dev = lio->oct_dev; int num_iq_stats, num_oq_stats, i, j; + int num_stats; - num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings); - for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { - if (!(oct_dev->io_qmask.iq & (1UL << i))) - continue; - for (j = 0; j < num_iq_stats; j++) { - sprintf(data, "IQ%d %s", i, oct_iq_stats_strings[j]); + switch (stringset) { + case ETH_SS_STATS: + num_stats = ARRAY_SIZE(oct_stats_strings); + for (j = 0; j < num_stats; j++) { + sprintf(data, "%s", oct_stats_strings[j]); data += ETH_GSTRING_LEN; } - } - num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings); - /* for (i = 0; i < oct_dev->num_oqs; i++) { */ - for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { - if (!(oct_dev->io_qmask.oq & (1UL << i))) - continue; - for (j = 0; j < num_oq_stats; j++) { - sprintf(data, "OQ%d %s", i, oct_droq_stats_strings[j]); - data += ETH_GSTRING_LEN; + num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings); + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) { + if (!(oct_dev->io_qmask.iq & (1ULL << i))) + continue; + for (j = 0; j < num_iq_stats; j++) { + sprintf(data, "tx-%d-%s", i, + oct_iq_stats_strings[j]); + data += ETH_GSTRING_LEN; + } } + + num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings); + /* for (i = 0; i < oct_dev->num_oqs; i++) { */ + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) { + if (!(oct_dev->io_qmask.oq & (1ULL << i))) + continue; + for (j = 0; j < num_oq_stats; j++) { + sprintf(data, "rx-%d-%s", i, + oct_droq_stats_strings[j]); + data += ETH_GSTRING_LEN; + } + } + break; + + default: + netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n"); + break; } } @@ -607,8 +913,14 @@ static int lio_get_sset_count(struct net_device *netdev, int sset) struct lio *lio = GET_LIO(netdev); struct octeon_device *oct_dev = lio->oct_dev; - return (ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs) + - (ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs); + switch (sset) { + case ETH_SS_STATS: + return (ARRAY_SIZE(oct_stats_strings) + + ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs + + ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs); + default: + return -EOPNOTSUPP; + } } static int lio_get_intr_coalesce(struct net_device *netdev, @@ -616,50 +928,50 @@ static int lio_get_intr_coalesce(struct net_device *netdev, { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; - struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; struct octeon_instr_queue *iq; struct oct_intrmod_cfg *intrmod_cfg; intrmod_cfg = &oct->intrmod; switch (oct->chip_id) { - /* case OCTEON_CN73XX: Todo */ - /* break; */ case OCTEON_CN68XX: - case OCTEON_CN66XX: - if (!intrmod_cfg->intrmod_enable) { + case OCTEON_CN66XX: { + struct octeon_cn6xxx *cn6xxx = + (struct octeon_cn6xxx *)oct->chip; + + if (!intrmod_cfg->rx_enable) { intr_coal->rx_coalesce_usecs = CFG_GET_OQ_INTR_TIME(cn6xxx->conf); intr_coal->rx_max_coalesced_frames = CFG_GET_OQ_INTR_PKT(cn6xxx->conf); - } else { - intr_coal->use_adaptive_rx_coalesce = - intrmod_cfg->intrmod_enable; - intr_coal->rate_sample_interval = - intrmod_cfg->intrmod_check_intrvl; - intr_coal->pkt_rate_high = - intrmod_cfg->intrmod_maxpkt_ratethr; - intr_coal->pkt_rate_low = - intrmod_cfg->intrmod_minpkt_ratethr; - intr_coal->rx_max_coalesced_frames_high = - intrmod_cfg->intrmod_maxcnt_trigger; - intr_coal->rx_coalesce_usecs_high = - intrmod_cfg->intrmod_maxtmr_trigger; - intr_coal->rx_coalesce_usecs_low = - intrmod_cfg->intrmod_mintmr_trigger; - intr_coal->rx_max_coalesced_frames_low = - intrmod_cfg->intrmod_mincnt_trigger; } iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no]; intr_coal->tx_max_coalesced_frames = iq->fill_threshold; break; - + } default: netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n"); return -EINVAL; } - + if (intrmod_cfg->rx_enable) { + intr_coal->use_adaptive_rx_coalesce = + intrmod_cfg->rx_enable; + intr_coal->rate_sample_interval = + intrmod_cfg->check_intrvl; + intr_coal->pkt_rate_high = + intrmod_cfg->maxpkt_ratethr; + intr_coal->pkt_rate_low = + intrmod_cfg->minpkt_ratethr; + intr_coal->rx_max_coalesced_frames_high = + intrmod_cfg->rx_maxcnt_trigger; + intr_coal->rx_coalesce_usecs_high = + intrmod_cfg->rx_maxtmr_trigger; + intr_coal->rx_coalesce_usecs_low = + intrmod_cfg->rx_mintmr_trigger; + intr_coal->rx_max_coalesced_frames_low = + intrmod_cfg->rx_mincnt_trigger; + } return 0; } @@ -679,19 +991,20 @@ static void octnet_intrmod_callback(struct octeon_device *oct_dev, else dev_info(&oct_dev->pci_dev->dev, "Rx-Adaptive Interrupt moderation enabled:%llx\n", - oct_dev->intrmod.intrmod_enable); + oct_dev->intrmod.rx_enable); octeon_free_soft_command(oct_dev, sc); } /* Configure interrupt moderation parameters */ -static int octnet_set_intrmod_cfg(void *oct, struct oct_intrmod_cfg *intr_cfg) +static int octnet_set_intrmod_cfg(struct lio *lio, + struct oct_intrmod_cfg *intr_cfg) { struct octeon_soft_command *sc; struct oct_intrmod_cmd *cmd; struct oct_intrmod_cfg *cfg; int retval; - struct octeon_device *oct_dev = (struct octeon_device *)oct; + struct octeon_device *oct_dev = lio->oct_dev; /* Alloc soft command */ sc = (struct octeon_soft_command *) @@ -712,6 +1025,8 @@ static int octnet_set_intrmod_cfg(void *oct, struct oct_intrmod_cfg *intr_cfg) cmd->cfg = cfg; cmd->oct_dev = oct_dev; + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_INTRMOD_CFG, 0, 0, 0); @@ -728,9 +1043,158 @@ static int octnet_set_intrmod_cfg(void *oct, struct oct_intrmod_cfg *intr_cfg) return 0; } +void +octnet_nic_stats_callback(struct octeon_device *oct_dev, + u32 status, void *ptr) +{ + struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr; + struct oct_nic_stats_resp *resp = (struct oct_nic_stats_resp *) + sc->virtrptr; + struct oct_nic_stats_ctrl *ctrl = (struct oct_nic_stats_ctrl *) + sc->ctxptr; + struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire; + struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost; + + struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire; + struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost; + + if ((status != OCTEON_REQUEST_TIMEOUT) && !resp->status) { + octeon_swap_8B_data((u64 *)&resp->stats, + (sizeof(struct oct_link_stats)) >> 3); + + /* RX link-level stats */ + rstats->total_rcvd = rsp_rstats->total_rcvd; + rstats->bytes_rcvd = rsp_rstats->bytes_rcvd; + rstats->total_bcst = rsp_rstats->total_bcst; + rstats->total_mcst = rsp_rstats->total_mcst; + rstats->runts = rsp_rstats->runts; + rstats->ctl_rcvd = rsp_rstats->ctl_rcvd; + /* Accounts for over/under-run of buffers */ + rstats->fifo_err = rsp_rstats->fifo_err; + rstats->dmac_drop = rsp_rstats->dmac_drop; + rstats->fcs_err = rsp_rstats->fcs_err; + rstats->jabber_err = rsp_rstats->jabber_err; + rstats->l2_err = rsp_rstats->l2_err; + rstats->frame_err = rsp_rstats->frame_err; + + /* RX firmware stats */ + rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd; + rstats->fw_total_fwd = rsp_rstats->fw_total_fwd; + rstats->fw_err_pko = rsp_rstats->fw_err_pko; + rstats->fw_err_link = rsp_rstats->fw_err_link; + rstats->fw_err_drop = rsp_rstats->fw_err_drop; + /* Number of packets that are LROed */ + rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts; + /* Number of octets that are LROed */ + rstats->fw_lro_octs = rsp_rstats->fw_lro_octs; + /* Number of LRO packets formed */ + rstats->fw_total_lro = rsp_rstats->fw_total_lro; + /* Number of times lRO of packet aborted */ + rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts; + rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port; + rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq; + rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval; + rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer; + /* intrmod: packet forward rate */ + rstats->fwd_rate = rsp_rstats->fwd_rate; + + /* TX link-level stats */ + tstats->total_pkts_sent = rsp_tstats->total_pkts_sent; + tstats->total_bytes_sent = rsp_tstats->total_bytes_sent; + tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent; + tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent; + tstats->ctl_sent = rsp_tstats->ctl_sent; + /* Packets sent after one collision*/ + tstats->one_collision_sent = rsp_tstats->one_collision_sent; + /* Packets sent after multiple collision*/ + tstats->multi_collision_sent = rsp_tstats->multi_collision_sent; + /* Packets not sent due to max collisions */ + tstats->max_collision_fail = rsp_tstats->max_collision_fail; + /* Packets not sent due to max deferrals */ + tstats->max_deferral_fail = rsp_tstats->max_deferral_fail; + /* Accounts for over/under-run of buffers */ + tstats->fifo_err = rsp_tstats->fifo_err; + tstats->runts = rsp_tstats->runts; + /* Total number of collisions detected */ + tstats->total_collisions = rsp_tstats->total_collisions; + + /* firmware stats */ + tstats->fw_total_sent = rsp_tstats->fw_total_sent; + tstats->fw_total_fwd = rsp_tstats->fw_total_fwd; + tstats->fw_err_pko = rsp_tstats->fw_err_pko; + tstats->fw_err_link = rsp_tstats->fw_err_link; + tstats->fw_err_drop = rsp_tstats->fw_err_drop; + tstats->fw_tso = rsp_tstats->fw_tso; + tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd; + tstats->fw_err_tso = rsp_tstats->fw_err_tso; + resp->status = 1; + } else { + resp->status = -1; + } + complete(&ctrl->complete); +} + +/* Configure interrupt moderation parameters */ +static int octnet_get_link_stats(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct_dev = lio->oct_dev; + + struct octeon_soft_command *sc; + struct oct_nic_stats_ctrl *ctrl; + struct oct_nic_stats_resp *resp; + + int retval; + + /* Alloc soft command */ + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(oct_dev, + 0, + sizeof(struct oct_nic_stats_resp), + sizeof(struct octnic_ctrl_pkt)); + + if (!sc) + return -ENOMEM; + + resp = (struct oct_nic_stats_resp *)sc->virtrptr; + memset(resp, 0, sizeof(struct oct_nic_stats_resp)); + + ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr; + memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl)); + ctrl->netdev = netdev; + init_completion(&ctrl->complete); + + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + + octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, + OPCODE_NIC_PORT_STATS, 0, 0, 0); + + sc->callback = octnet_nic_stats_callback; + sc->callback_arg = sc; + sc->wait_time = 500; /*in milli seconds*/ + + retval = octeon_send_soft_command(oct_dev, sc); + if (retval == IQ_SEND_FAILED) { + octeon_free_soft_command(oct_dev, sc); + return -EINVAL; + } + + wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000)); + + if (resp->status != 1) { + octeon_free_soft_command(oct_dev, sc); + + return -EINVAL; + } + + octeon_free_soft_command(oct_dev, sc); + + return 0; +} + /* Enable/Disable auto interrupt Moderation */ static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce - *intr_coal, int adaptive) + *intr_coal) { int ret = 0; struct octeon_device *oct = lio->oct_dev; @@ -738,59 +1202,73 @@ static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce intrmod_cfg = &oct->intrmod; - if (adaptive) { + if (oct->intrmod.rx_enable || oct->intrmod.tx_enable) { if (intr_coal->rate_sample_interval) - intrmod_cfg->intrmod_check_intrvl = + intrmod_cfg->check_intrvl = intr_coal->rate_sample_interval; else - intrmod_cfg->intrmod_check_intrvl = + intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL; if (intr_coal->pkt_rate_high) - intrmod_cfg->intrmod_maxpkt_ratethr = + intrmod_cfg->maxpkt_ratethr = intr_coal->pkt_rate_high; else - intrmod_cfg->intrmod_maxpkt_ratethr = + intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR; if (intr_coal->pkt_rate_low) - intrmod_cfg->intrmod_minpkt_ratethr = + intrmod_cfg->minpkt_ratethr = intr_coal->pkt_rate_low; else - intrmod_cfg->intrmod_minpkt_ratethr = + intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR; - + } + if (oct->intrmod.rx_enable) { if (intr_coal->rx_max_coalesced_frames_high) - intrmod_cfg->intrmod_maxcnt_trigger = + intrmod_cfg->rx_maxcnt_trigger = intr_coal->rx_max_coalesced_frames_high; else - intrmod_cfg->intrmod_maxcnt_trigger = - LIO_INTRMOD_MAXCNT_TRIGGER; + intrmod_cfg->rx_maxcnt_trigger = + LIO_INTRMOD_RXMAXCNT_TRIGGER; if (intr_coal->rx_coalesce_usecs_high) - intrmod_cfg->intrmod_maxtmr_trigger = + intrmod_cfg->rx_maxtmr_trigger = intr_coal->rx_coalesce_usecs_high; else - intrmod_cfg->intrmod_maxtmr_trigger = - LIO_INTRMOD_MAXTMR_TRIGGER; + intrmod_cfg->rx_maxtmr_trigger = + LIO_INTRMOD_RXMAXTMR_TRIGGER; if (intr_coal->rx_coalesce_usecs_low) - intrmod_cfg->intrmod_mintmr_trigger = + intrmod_cfg->rx_mintmr_trigger = intr_coal->rx_coalesce_usecs_low; else - intrmod_cfg->intrmod_mintmr_trigger = - LIO_INTRMOD_MINTMR_TRIGGER; + intrmod_cfg->rx_mintmr_trigger = + LIO_INTRMOD_RXMINTMR_TRIGGER; if (intr_coal->rx_max_coalesced_frames_low) - intrmod_cfg->intrmod_mincnt_trigger = + intrmod_cfg->rx_mincnt_trigger = intr_coal->rx_max_coalesced_frames_low; else - intrmod_cfg->intrmod_mincnt_trigger = - LIO_INTRMOD_MINCNT_TRIGGER; + intrmod_cfg->rx_mincnt_trigger = + LIO_INTRMOD_RXMINCNT_TRIGGER; + } + if (oct->intrmod.tx_enable) { + if (intr_coal->tx_max_coalesced_frames_high) + intrmod_cfg->tx_maxcnt_trigger = + intr_coal->tx_max_coalesced_frames_high; + else + intrmod_cfg->tx_maxcnt_trigger = + LIO_INTRMOD_TXMAXCNT_TRIGGER; + if (intr_coal->tx_max_coalesced_frames_low) + intrmod_cfg->tx_mincnt_trigger = + intr_coal->tx_max_coalesced_frames_low; + else + intrmod_cfg->tx_mincnt_trigger = + LIO_INTRMOD_TXMINCNT_TRIGGER; } - intrmod_cfg->intrmod_enable = adaptive; - ret = octnet_set_intrmod_cfg(oct, intrmod_cfg); + ret = octnet_set_intrmod_cfg(lio, intrmod_cfg); return ret; } @@ -798,54 +1276,82 @@ static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce static int oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal) { - int ret; struct octeon_device *oct = lio->oct_dev; - struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; u32 rx_max_coalesced_frames; - if (!intr_coal->rx_max_coalesced_frames) - rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT; - else - rx_max_coalesced_frames = intr_coal->rx_max_coalesced_frames; - - /* Disable adaptive interrupt modulation */ - ret = oct_cfg_adaptive_intr(lio, intr_coal, 0); - if (ret) - return ret; - /* Config Cnt based interrupt values */ - octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS, - rx_max_coalesced_frames); - CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames); + switch (oct->chip_id) { + case OCTEON_CN68XX: + case OCTEON_CN66XX: { + struct octeon_cn6xxx *cn6xxx = + (struct octeon_cn6xxx *)oct->chip; + + if (!intr_coal->rx_max_coalesced_frames) + rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT; + else + rx_max_coalesced_frames = + intr_coal->rx_max_coalesced_frames; + octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS, + rx_max_coalesced_frames); + CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames); + break; + } + default: + return -EINVAL; + } return 0; } static int oct_cfg_rx_intrtime(struct lio *lio, struct ethtool_coalesce *intr_coal) { - int ret; struct octeon_device *oct = lio->oct_dev; - struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; u32 time_threshold, rx_coalesce_usecs; - if (!intr_coal->rx_coalesce_usecs) - rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME; - else - rx_coalesce_usecs = intr_coal->rx_coalesce_usecs; + /* Config Time based interrupt values */ + switch (oct->chip_id) { + case OCTEON_CN68XX: + case OCTEON_CN66XX: { + struct octeon_cn6xxx *cn6xxx = + (struct octeon_cn6xxx *)oct->chip; + if (!intr_coal->rx_coalesce_usecs) + rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME; + else + rx_coalesce_usecs = intr_coal->rx_coalesce_usecs; - /* Disable adaptive interrupt modulation */ - ret = oct_cfg_adaptive_intr(lio, intr_coal, 0); - if (ret) - return ret; + time_threshold = lio_cn6xxx_get_oq_ticks(oct, + rx_coalesce_usecs); + octeon_write_csr(oct, + CN6XXX_SLI_OQ_INT_LEVEL_TIME, + time_threshold); - /* Config Time based interrupt values */ - time_threshold = lio_cn6xxx_get_oq_ticks(oct, rx_coalesce_usecs); - octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_TIME, time_threshold); - CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs); + CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs); + break; + } + default: + return -EINVAL; + } return 0; } +static int +oct_cfg_tx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal + __attribute__((unused))) +{ + struct octeon_device *oct = lio->oct_dev; + + /* Config Cnt based interrupt values */ + switch (oct->chip_id) { + case OCTEON_CN68XX: + case OCTEON_CN66XX: + break; + default: + return -EINVAL; + } + return 0; +} + static int lio_set_intr_coalesce(struct net_device *netdev, struct ethtool_coalesce *intr_coal) { @@ -853,59 +1359,48 @@ static int lio_set_intr_coalesce(struct net_device *netdev, int ret; struct octeon_device *oct = lio->oct_dev; u32 j, q_no; + int db_max, db_min; - if ((intr_coal->tx_max_coalesced_frames >= CN6XXX_DB_MIN) && - (intr_coal->tx_max_coalesced_frames <= CN6XXX_DB_MAX)) { - for (j = 0; j < lio->linfo.num_txpciq; j++) { - q_no = lio->linfo.txpciq[j].s.q_no; - oct->instr_queue[q_no]->fill_threshold = - intr_coal->tx_max_coalesced_frames; + switch (oct->chip_id) { + case OCTEON_CN68XX: + case OCTEON_CN66XX: + db_min = CN6XXX_DB_MIN; + db_max = CN6XXX_DB_MAX; + if ((intr_coal->tx_max_coalesced_frames >= db_min) && + (intr_coal->tx_max_coalesced_frames <= db_max)) { + for (j = 0; j < lio->linfo.num_txpciq; j++) { + q_no = lio->linfo.txpciq[j].s.q_no; + oct->instr_queue[q_no]->fill_threshold = + intr_coal->tx_max_coalesced_frames; + } + } else { + dev_err(&oct->pci_dev->dev, + "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n", + intr_coal->tx_max_coalesced_frames, db_min, + db_max); + return -EINVAL; } - } else { - dev_err(&oct->pci_dev->dev, - "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n", - intr_coal->tx_max_coalesced_frames, CN6XXX_DB_MIN, - CN6XXX_DB_MAX); + break; + default: return -EINVAL; } - /* User requested adaptive-rx on */ - if (intr_coal->use_adaptive_rx_coalesce) { - ret = oct_cfg_adaptive_intr(lio, intr_coal, 1); - if (ret) - goto ret_intrmod; - } + oct->intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0; + oct->intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0; + + ret = oct_cfg_adaptive_intr(lio, intr_coal); - /* User requested adaptive-rx off and rx coalesce */ - if ((intr_coal->rx_coalesce_usecs) && - (!intr_coal->use_adaptive_rx_coalesce)) { + if (!intr_coal->use_adaptive_rx_coalesce) { ret = oct_cfg_rx_intrtime(lio, intr_coal); if (ret) goto ret_intrmod; - } - /* User requested adaptive-rx off and rx coalesce */ - if ((intr_coal->rx_max_coalesced_frames) && - (!intr_coal->use_adaptive_rx_coalesce)) { ret = oct_cfg_rx_intrcnt(lio, intr_coal); if (ret) goto ret_intrmod; } - - /* User requested adaptive-rx off, so use default coalesce params */ - if ((!intr_coal->rx_max_coalesced_frames) && - (!intr_coal->use_adaptive_rx_coalesce) && - (!intr_coal->rx_coalesce_usecs)) { - dev_info(&oct->pci_dev->dev, - "Turning off adaptive-rx interrupt moderation\n"); - dev_info(&oct->pci_dev->dev, - "Using RX Coalesce Default values rx_coalesce_usecs:%d rx_max_coalesced_frames:%d\n", - CN6XXX_OQ_INTR_TIME, CN6XXX_OQ_INTR_PKT); - ret = oct_cfg_rx_intrtime(lio, intr_coal); - if (ret) - goto ret_intrmod; - - ret = oct_cfg_rx_intrcnt(lio, intr_coal); + if (!intr_coal->use_adaptive_tx_coalesce) { + ret = oct_cfg_tx_intrcnt(lio, intr_coal); if (ret) goto ret_intrmod; } @@ -921,23 +1416,28 @@ static int lio_get_ts_info(struct net_device *netdev, struct lio *lio = GET_LIO(netdev); info->so_timestamping = +#ifdef PTP_HARDWARE_TIMESTAMPING SOF_TIMESTAMPING_TX_HARDWARE | - SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE | + SOF_TIMESTAMPING_TX_SOFTWARE | +#endif SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE; + SOF_TIMESTAMPING_SOFTWARE; if (lio->ptp_clock) info->phc_index = ptp_clock_index(lio->ptp_clock); else info->phc_index = -1; +#ifdef PTP_HARDWARE_TIMESTAMPING info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); +#endif return 0; } @@ -962,12 +1462,14 @@ static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) ecmd->duplex != DUPLEX_FULL))) return -EINVAL; - /* Ethtool Support is not provided for XAUI and RXAUI Interfaces + /* Ethtool Support is not provided for XAUI, RXAUI, and XFI Interfaces * as they operate at fixed Speed and Duplex settings */ - if (linfo->link.s.interface == INTERFACE_MODE_XAUI || - linfo->link.s.interface == INTERFACE_MODE_RXAUI) { - dev_info(&oct->pci_dev->dev, "XAUI IFs settings cannot be modified.\n"); + if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI || + linfo->link.s.if_mode == INTERFACE_MODE_RXAUI || + linfo->link.s.if_mode == INTERFACE_MODE_XFI) { + dev_info(&oct->pci_dev->dev, + "Autonegotiation, duplex and speed settings cannot be modified.\n"); return -EINVAL; } @@ -1181,6 +1683,23 @@ static void lio_get_regs(struct net_device *dev, } } +static u32 lio_get_priv_flags(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + + return lio->oct_dev->priv_flags; +} + +static int lio_set_priv_flags(struct net_device *netdev, u32 flags) +{ + struct lio *lio = GET_LIO(netdev); + bool intr_by_tx_bytes = !!(flags & (0x1 << OCT_PRIV_FLAG_TX_BYTES)); + + lio_set_priv_flag(lio->oct_dev, OCT_PRIV_FLAG_TX_BYTES, + intr_by_tx_bytes); + return 0; +} + static const struct ethtool_ops lio_ethtool_ops = { .get_settings = lio_get_settings, .get_link = ethtool_op_get_link, @@ -1202,6 +1721,8 @@ static const struct ethtool_ops lio_ethtool_ops = { .set_settings = lio_set_settings, .get_coalesce = lio_get_intr_coalesce, .set_coalesce = lio_set_intr_coalesce, + .get_priv_flags = lio_get_priv_flags, + .set_priv_flags = lio_set_priv_flags, .get_ts_info = lio_get_ts_info, }; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index d0ab97c15f4a..1a584ebde42c 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -72,6 +72,9 @@ MODULE_PARM_DESC(console_bitmask, #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) +#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \ + (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count) + static int debug = -1; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); @@ -224,8 +227,8 @@ static void octeon_droq_bh(unsigned long pdev) (struct octeon_device_priv *)oct->priv; /* for (q_no = 0; q_no < oct->num_oqs; q_no++) { */ - for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES; q_no++) { - if (!(oct->io_qmask.oq & (1UL << q_no))) + for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) { + if (!(oct->io_qmask.oq & (1ULL << q_no))) continue; reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no], MAX_PACKET_BUDGET); @@ -245,8 +248,8 @@ static int lio_wait_for_oq_pkts(struct octeon_device *oct) do { pending_pkts = 0; - for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { - if (!(oct->io_qmask.oq & (1UL << i))) + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { + if (!(oct->io_qmask.oq & (1ULL << i))) continue; pkt_cnt += octeon_droq_check_hw_for_pkts(oct, oct->droq[i]); @@ -365,7 +368,7 @@ static int wait_for_pending_requests(struct octeon_device *oct) [OCTEON_ORDERED_SC_LIST].pending_req_count); if (pcount) schedule_timeout_uninterruptible(HZ / 10); - else + else break; } @@ -396,10 +399,10 @@ static inline void pcierror_quiesce_device(struct octeon_device *oct) dev_err(&oct->pci_dev->dev, "There were pending requests\n"); /* Force all requests waiting to be fetched by OCTEON to complete. */ - for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { struct octeon_instr_queue *iq; - if (!(oct->io_qmask.iq & (1UL << i))) + if (!(oct->io_qmask.iq & (1ULL << i))) continue; iq = oct->instr_queue[i]; @@ -409,7 +412,7 @@ static inline void pcierror_quiesce_device(struct octeon_device *oct) iq->octeon_read_index = iq->host_write_index; iq->stats.instr_processed += atomic_read(&iq->instr_pending); - lio_process_iq_request_list(oct, iq); + lio_process_iq_request_list(oct, iq, 0); spin_unlock_bh(&iq->lock); } } @@ -682,13 +685,24 @@ static inline void txqs_start(struct net_device *netdev) */ static inline void txqs_wake(struct net_device *netdev) { + struct lio *lio = GET_LIO(netdev); + if (netif_is_multiqueue(netdev)) { int i; - for (i = 0; i < netdev->num_tx_queues; i++) - if (__netif_subqueue_stopped(netdev, i)) + for (i = 0; i < netdev->num_tx_queues; i++) { + int qno = lio->linfo.txpciq[i % + (lio->linfo.num_txpciq)].s.q_no; + + if (__netif_subqueue_stopped(netdev, i)) { + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno, + tx_restart, 1); netif_wake_subqueue(netdev, i); + } + } } else { + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq, + tx_restart, 1); netif_wake_queue(netdev); } } @@ -763,6 +777,8 @@ static inline int check_txq_status(struct lio *lio) continue; if (__netif_subqueue_stopped(lio->netdev, q)) { wake_q(lio->netdev, q); + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, + tx_restart, 1); ret_val++; } } @@ -770,6 +786,8 @@ static inline int check_txq_status(struct lio *lio) if (octnet_iq_is_full(lio->oct_dev, lio->txq)) return 0; wake_q(lio->netdev, lio->txq); + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq, + tx_restart, 1); ret_val = 1; } return ret_val; @@ -959,6 +977,42 @@ static inline void update_link_status(struct net_device *netdev, } } +/* Runs in interrupt context. */ +static void update_txq_status(struct octeon_device *oct, int iq_num) +{ + struct net_device *netdev; + struct lio *lio; + struct octeon_instr_queue *iq = oct->instr_queue[iq_num]; + + /*octeon_update_iq_read_idx(oct, iq);*/ + + netdev = oct->props[iq->ifidx].netdev; + + /* This is needed because the first IQ does not have + * a netdev associated with it. + */ + if (!netdev) + return; + + lio = GET_LIO(netdev); + if (netif_is_multiqueue(netdev)) { + if (__netif_subqueue_stopped(netdev, iq->q_index) && + lio->linfo.link.s.link_up && + (!octnet_iq_is_full(oct, iq_num))) { + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num, + tx_restart, 1); + netif_wake_subqueue(netdev, iq->q_index); + } else { + if (!octnet_iq_is_full(oct, lio->txq)) { + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, + lio->txq, + tx_restart, 1); + wake_q(netdev, lio->txq); + } + } + } +} + /** * \brief Droq packet processor sceduler * @param oct octeon device @@ -972,8 +1026,9 @@ void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct) struct octeon_droq *droq; if (oct->int_status & OCT_DEV_INTR_PKT_DATA) { - for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES; oq_no++) { - if (!(oct->droq_intr & (1 << oq_no))) + for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct); + oq_no++) { + if (!(oct->droq_intr & (1ULL << oq_no))) continue; droq = oct->droq[oq_no]; @@ -1084,6 +1139,9 @@ static int liquidio_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return -ENOMEM; } + oct_dev->rx_pause = 1; + oct_dev->tx_pause = 1; + dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n"); return 0; @@ -1149,19 +1207,13 @@ static void octeon_destroy_resources(struct octeon_device *oct) if (oct->flags & LIO_FLAG_MSI_ENABLED) pci_disable_msi(oct->pci_dev); - /* Soft reset the octeon device before exiting */ - oct->fn_list.soft_reset(oct); - - /* Disable the device, releasing the PCI INT */ - pci_disable_device(oct->pci_dev); - /* fallthrough */ case OCT_DEV_IN_RESET: case OCT_DEV_DROQ_INIT_DONE: /*atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);*/ mdelay(100); - for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { - if (!(oct->io_qmask.oq & (1UL << i))) + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { + if (!(oct->io_qmask.oq & (1ULL << i))) continue; octeon_delete_droq(oct, i); } @@ -1188,8 +1240,8 @@ static void octeon_destroy_resources(struct octeon_device *oct) /* fallthrough */ case OCT_DEV_INSTR_QUEUE_INIT_DONE: - for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { - if (!(oct->io_qmask.iq & (1UL << i))) + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + if (!(oct->io_qmask.iq & (1ULL << i))) continue; octeon_delete_instr_queue(oct, i); } @@ -1201,11 +1253,18 @@ static void octeon_destroy_resources(struct octeon_device *oct) /* fallthrough */ case OCT_DEV_PCI_MAP_DONE: + + /* Soft reset the octeon device before exiting */ + oct->fn_list.soft_reset(oct); + octeon_unmap_pci_barx(oct, 0); octeon_unmap_pci_barx(oct, 1); /* fallthrough */ case OCT_DEV_BEGIN_STATE: + /* Disable the device, releasing the PCI INT */ + pci_disable_device(oct->pci_dev); + /* Nothing to be done here either */ break; } /* end switch(oct->status) */ @@ -1245,6 +1304,7 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) { struct net_device *netdev = oct->props[ifidx].netdev; struct lio *lio; + struct napi_struct *napi, *n; if (!netdev) { dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n", @@ -1261,6 +1321,13 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) txqs_stop(netdev); + if (oct->props[lio->ifidx].napi_enabled == 1) { + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + napi_disable(napi); + + oct->props[lio->ifidx].napi_enabled = 0; + } + if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) unregister_netdev(netdev); @@ -1288,6 +1355,10 @@ static int liquidio_stop_nic_module(struct octeon_device *oct) return 1; } + spin_lock_bh(&oct->cmd_resp_wqlock); + oct->cmd_resp_state = OCT_DRV_OFFLINE; + spin_unlock_bh(&oct->cmd_resp_wqlock); + for (i = 0; i < oct->ifcount; i++) { lio = GET_LIO(oct->props[i].netdev); for (j = 0; j < lio->linfo.num_rxpciq; j++) @@ -1336,6 +1407,7 @@ static int octeon_chip_specific_setup(struct octeon_device *oct) { u32 dev_id, rev_id; int ret = 1; + char *s; pci_read_config_dword(oct->pci_dev, 0, &dev_id); pci_read_config_dword(oct->pci_dev, 8, &rev_id); @@ -1345,22 +1417,27 @@ static int octeon_chip_specific_setup(struct octeon_device *oct) case OCTEON_CN68XX_PCIID: oct->chip_id = OCTEON_CN68XX; ret = lio_setup_cn68xx_octeon_device(oct); + s = "CN68XX"; break; case OCTEON_CN66XX_PCIID: oct->chip_id = OCTEON_CN66XX; ret = lio_setup_cn66xx_octeon_device(oct); + s = "CN66XX"; break; + default: + s = "?"; dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n", dev_id); } if (!ret) - dev_info(&oct->pci_dev->dev, "CN68XX PASS%d.%d %s\n", + dev_info(&oct->pci_dev->dev, "%s PASS%d.%d %s Version: %s\n", s, OCTEON_MAJOR_REV(oct), OCTEON_MINOR_REV(oct), - octeon_get_conf(oct)->card_name); + octeon_get_conf(oct)->card_name, + LIQUIDIO_VERSION); return ret; } @@ -1418,8 +1495,10 @@ static inline int check_txq_state(struct lio *lio, struct sk_buff *skb) if (octnet_iq_is_full(lio->oct_dev, iq)) return 0; - if (__netif_subqueue_stopped(lio->netdev, q)) + if (__netif_subqueue_stopped(lio->netdev, q)) { + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1); wake_q(lio->netdev, q); + } return 1; } @@ -1733,6 +1812,7 @@ static int load_firmware(struct octeon_device *oct) if (ret) { dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.", fw_name); + release_firmware(fw); return ret; } @@ -1802,6 +1882,9 @@ static void if_cfg_callback(struct octeon_device *oct, CVM_CAST64(resp->status)); ACCESS_ONCE(ctx->cond) = 1; + snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s", + resp->cfg_info.liquidio_firmware_version); + /* This barrier is required to be sure that the response has been * written fully before waking up the handler */ @@ -1848,6 +1931,7 @@ liquidio_push_packet(u32 octeon_id, struct sk_buff *skb = (struct sk_buff *)skbuff; struct skb_shared_hwtstamps *shhwtstamps; u64 ns; + u16 vtag = 0; struct net_device *netdev = (struct net_device *)arg; struct octeon_droq *droq = container_of(param, struct octeon_droq, napi); @@ -1924,6 +2008,16 @@ liquidio_push_packet(u32 octeon_id, else skb->ip_summed = CHECKSUM_NONE; + /* inbound VLAN tag */ + if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && + (rh->r_dh.vlan != 0)) { + u16 vid = rh->r_dh.vlan; + u16 priority = rh->r_dh.priority; + + vtag = priority << 13 | vid; + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag); + } + packet_was_received = napi_gro_receive(napi, skb) != GRO_DROP; if (packet_was_received) { @@ -1978,39 +2072,6 @@ static void liquidio_napi_drv_callback(void *arg) } /** - * \brief Main NAPI poll function - * @param droq octeon output queue - * @param budget maximum number of items to process - */ -static int liquidio_napi_do_rx(struct octeon_droq *droq, int budget) -{ - int work_done; - struct lio *lio = GET_LIO(droq->napi.dev); - struct octeon_device *oct = lio->oct_dev; - - work_done = octeon_process_droq_poll_cmd(oct, droq->q_no, - POLL_EVENT_PROCESS_PKTS, - budget); - if (work_done < 0) { - netif_info(lio, rx_err, lio->netdev, - "Receive work_done < 0, rxq:%d\n", droq->q_no); - goto octnet_napi_finish; - } - - if (work_done > budget) - dev_err(&oct->pci_dev->dev, ">>>> %s work_done: %d budget: %d\n", - __func__, work_done, budget); - - return work_done; - -octnet_napi_finish: - napi_complete(&droq->napi); - octeon_process_droq_poll_cmd(oct, droq->q_no, POLL_EVENT_ENABLE_INTR, - 0); - return 0; -} - -/** * \brief Entry point for NAPI polling * @param napi NAPI structure * @param budget maximum number of items to process @@ -2019,19 +2080,41 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) { struct octeon_droq *droq; int work_done; + int tx_done = 0, iq_no; + struct octeon_instr_queue *iq; + struct octeon_device *oct; droq = container_of(napi, struct octeon_droq, napi); + oct = droq->oct_dev; + iq_no = droq->q_no; + /* Handle Droq descriptors */ + work_done = octeon_process_droq_poll_cmd(oct, droq->q_no, + POLL_EVENT_PROCESS_PKTS, + budget); - work_done = liquidio_napi_do_rx(droq, budget); + /* Flush the instruction queue */ + iq = oct->instr_queue[iq_no]; + if (iq) { + /* Process iq buffers with in the budget limits */ + tx_done = octeon_flush_iq(oct, iq, 1, budget); + /* Update iq read-index rather than waiting for next interrupt. + * Return back if tx_done is false. + */ + update_txq_status(oct, iq_no); + /*tx_done = (iq->flush_index == iq->octeon_read_index);*/ + } else { + dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n", + __func__, iq_no); + } - if (work_done < budget) { + if ((work_done < budget) && (tx_done)) { napi_complete(napi); octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no, POLL_EVENT_ENABLE_INTR, 0); return 0; } - return work_done; + return (!tx_done) ? (budget) : (work_done); } /** @@ -2165,6 +2248,14 @@ static inline void setup_tx_poll_fn(struct net_device *netdev) &lio->txq_status_wq.wk.work, msecs_to_jiffies(1)); } +static inline void cleanup_tx_poll_fn(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + + cancel_delayed_work_sync(&lio->txq_status_wq.wk.work); + destroy_workqueue(lio->txq_status_wq.wq); +} + /** * \brief Net device open for LiquidIO * @param netdev network device @@ -2175,17 +2266,22 @@ static int liquidio_open(struct net_device *netdev) struct octeon_device *oct = lio->oct_dev; struct napi_struct *napi, *n; - list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) - napi_enable(napi); + if (oct->props[lio->ifidx].napi_enabled == 0) { + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + napi_enable(napi); + + oct->props[lio->ifidx].napi_enabled = 1; + } oct_ptp_open(netdev); ifstate_set(lio, LIO_IFSTATE_RUNNING); + setup_tx_poll_fn(netdev); + start_txq(netdev); netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); - try_module_get(THIS_MODULE); /* tell Octeon to start forwarding packets to host */ send_rx_ctrl_cmd(lio, 1); @@ -2205,39 +2301,35 @@ static int liquidio_open(struct net_device *netdev) */ static int liquidio_stop(struct net_device *netdev) { - struct napi_struct *napi, *n; struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; - netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n"); + ifstate_reset(lio, LIO_IFSTATE_RUNNING); + + netif_tx_disable(netdev); + /* Inform that netif carrier is down */ + netif_carrier_off(netdev); lio->intf_open = 0; lio->linfo.link.s.link_up = 0; lio->link_changes++; - netif_carrier_off(netdev); + /* Pause for a moment and wait for Octeon to flush out (to the wire) any + * egress packets that are in-flight. + */ + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(msecs_to_jiffies(100)); - /* tell Octeon to stop forwarding packets to host */ + /* Now it should be safe to tell Octeon that nic interface is down. */ send_rx_ctrl_cmd(lio, 0); - cancel_delayed_work_sync(&lio->txq_status_wq.wk.work); - destroy_workqueue(lio->txq_status_wq.wq); + cleanup_tx_poll_fn(netdev); if (lio->ptp_clock) { ptp_clock_unregister(lio->ptp_clock); lio->ptp_clock = NULL; } - ifstate_reset(lio, LIO_IFSTATE_RUNNING); - - /* This is a hack that allows DHCP to continue working. */ - set_bit(__LINK_STATE_START, &lio->netdev->state); - - list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) - napi_disable(napi); - - txqs_stop(netdev); - dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); module_put(THIS_MODULE); @@ -2298,12 +2390,31 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr) netdev->name); break; + case OCTNET_CMD_ENABLE_VLAN_FILTER: + dev_info(&oct->pci_dev->dev, "%s VLAN filter enabled\n", + netdev->name); + break; + + case OCTNET_CMD_ADD_VLAN_FILTER: + dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n", + netdev->name, nctrl->ncmd.s.param1); + break; + + case OCTNET_CMD_DEL_VLAN_FILTER: + dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n", + netdev->name, nctrl->ncmd.s.param1); + break; + case OCTNET_CMD_SET_SETTINGS: dev_info(&oct->pci_dev->dev, "%s settings changed\n", netdev->name); break; + case OCTNET_CMD_SET_FLOW_CTL: + netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n"); + break; + default: dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__, nctrl->ncmd.s.cmd); @@ -2898,6 +3009,13 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) if (skb_shinfo(skb)->gso_size) { tx_info->s.gso_size = skb_shinfo(skb)->gso_size; tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs; + stats->tx_gso++; + } + + /* HW insert VLAN tag */ + if (skb_vlan_tag_present(skb)) { + irh->priority = skb_vlan_tag_get(skb) >> 13; + irh->vlan = skb_vlan_tag_get(skb) & 0xfff; } xmit_more = skb->xmit_more; @@ -2916,7 +3034,10 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) netif_trans_update(netdev); - stats->tx_done++; + if (skb_shinfo(skb)->gso_size) + stats->tx_done += skb_shinfo(skb)->gso_segs; + else + stats->tx_done++; stats->tx_tot_bytes += skb->len; return NETDEV_TX_OK; @@ -2948,6 +3069,61 @@ static void liquidio_tx_timeout(struct net_device *netdev) txqs_wake(netdev); } +static int liquidio_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto __attribute__((unused)), + u16 vid) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int ret = 0; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; + nctrl.ncmd.s.param1 = vid; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.wait_time = 100; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret < 0) { + dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", + ret); + } + + return ret; +} + +static int liquidio_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto __attribute__((unused)), + u16 vid) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int ret = 0; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; + nctrl.ncmd.s.param1 = vid; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.wait_time = 100; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret < 0) { + dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", + ret); + } + return ret; +} + int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1) { struct lio *lio = GET_LIO(netdev); @@ -3039,6 +3215,9 @@ static struct net_device_ops lionetdevops = { .ndo_set_mac_address = liquidio_set_mac, .ndo_set_rx_mode = liquidio_set_mcast_list, .ndo_tx_timeout = liquidio_tx_timeout, + + .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid, .ndo_change_mtu = liquidio_change_mtu, .ndo_do_ioctl = liquidio_ioctl, .ndo_fix_features = liquidio_fix_features, @@ -3300,11 +3479,18 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) | NETIF_F_LRO; netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE); - netdev->features = (lio->dev_capability & ~NETIF_F_LRO); - netdev->vlan_features = lio->dev_capability; + /* Add any unchangeable hw features */ + lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; + + netdev->features = (lio->dev_capability & ~NETIF_F_LRO); netdev->hw_features = lio->dev_capability; + /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/ + netdev->hw_features = netdev->hw_features & + ~NETIF_F_HW_VLAN_CTAG_RX; /* Point to the properties for octeon device to which this * interface belongs. @@ -3349,14 +3535,17 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) /* Register ethtool support */ liquidio_set_ethtool_ops(netdev); + octeon_dev->priv_flags = 0x0; if (netdev->features & NETIF_F_LRO) liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, OCTNIC_LROIPV4 | OCTNIC_LROIPV6); + liquidio_set_feature(netdev, OCTNET_CMD_ENABLE_VLAN_FILTER, 0); + if ((debug != -1) && (debug & NETIF_MSG_HW)) - liquidio_set_feature(netdev, OCTNET_CMD_VERBOSE_ENABLE, - 0); + liquidio_set_feature(netdev, + OCTNET_CMD_VERBOSE_ENABLE, 0); /* Register the network device with the OS */ if (register_netdev(netdev)) { @@ -3429,15 +3618,19 @@ static int liquidio_init_nic_module(struct octeon_device *oct) /* Initialize interrupt moderation params */ intrmod_cfg = &((struct octeon_device *)oct)->intrmod; - intrmod_cfg->intrmod_enable = 1; - intrmod_cfg->intrmod_check_intrvl = LIO_INTRMOD_CHECK_INTERVAL; - intrmod_cfg->intrmod_maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR; - intrmod_cfg->intrmod_minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR; - intrmod_cfg->intrmod_maxcnt_trigger = LIO_INTRMOD_MAXCNT_TRIGGER; - intrmod_cfg->intrmod_maxtmr_trigger = LIO_INTRMOD_MAXTMR_TRIGGER; - intrmod_cfg->intrmod_mintmr_trigger = LIO_INTRMOD_MINTMR_TRIGGER; - intrmod_cfg->intrmod_mincnt_trigger = LIO_INTRMOD_MINCNT_TRIGGER; - + intrmod_cfg->rx_enable = 1; + intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL; + intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR; + intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR; + intrmod_cfg->rx_maxcnt_trigger = LIO_INTRMOD_RXMAXCNT_TRIGGER; + intrmod_cfg->rx_maxtmr_trigger = LIO_INTRMOD_RXMAXTMR_TRIGGER; + intrmod_cfg->rx_mintmr_trigger = LIO_INTRMOD_RXMINTMR_TRIGGER; + intrmod_cfg->rx_mincnt_trigger = LIO_INTRMOD_RXMINCNT_TRIGGER; + intrmod_cfg->tx_enable = 1; + intrmod_cfg->tx_maxcnt_trigger = LIO_INTRMOD_TXMAXCNT_TRIGGER; + intrmod_cfg->tx_mincnt_trigger = LIO_INTRMOD_TXMINCNT_TRIGGER; + intrmod_cfg->rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct)); + intrmod_cfg->rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct)); dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n"); return retval; @@ -3500,6 +3693,7 @@ static void nic_starter(struct work_struct *work) static int octeon_device_init(struct octeon_device *octeon_dev) { int j, ret; + char bootcmd[] = "\n"; struct octeon_device_priv *oct_priv = (struct octeon_device_priv *)octeon_dev->priv; atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE); @@ -3611,14 +3805,19 @@ static int octeon_device_init(struct octeon_device *octeon_dev) dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n"); - if (ddr_timeout == 0) { - dev_info(&octeon_dev->pci_dev->dev, - "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n"); - } + if (ddr_timeout == 0) + dev_info(&octeon_dev->pci_dev->dev, "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n"); schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS); /* Wait for the octeon to initialize DDR after the soft-reset. */ + while (ddr_timeout == 0) { + set_current_state(TASK_INTERRUPTIBLE); + if (schedule_timeout(HZ / 10)) { + /* user probably pressed Control-C */ + return 1; + } + } ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout); if (ret) { dev_err(&octeon_dev->pci_dev->dev, @@ -3632,6 +3831,9 @@ static int octeon_device_init(struct octeon_device *octeon_dev) return 1; } + /* Divert uboot to take commands from host instead. */ + ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50); + dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n"); ret = octeon_init_consoles(octeon_dev); if (ret) { diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index 2179691efebc..5aa01f427d4a 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -30,11 +30,10 @@ #include "octeon_config.h" -#define LIQUIDIO_VERSION "1.1.9" -#define LIQUIDIO_MAJOR_VERSION 1 -#define LIQUIDIO_MINOR_VERSION 1 -#define LIQUIDIO_MICRO_VERSION 9 - +#define LIQUIDIO_BASE_VERSION "1.4" +#define LIQUIDIO_MICRO_VERSION ".1" +#define LIQUIDIO_PACKAGE "" +#define LIQUIDIO_VERSION "1.4.1" #define CONTROL_IQ 0 /** Tag types used by Octeon cores in its work. */ enum octeon_tag_type { @@ -214,6 +213,10 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry, #define OCTNET_CMD_VERBOSE_ENABLE 0x14 #define OCTNET_CMD_VERBOSE_DISABLE 0x15 +#define OCTNET_CMD_ENABLE_VLAN_FILTER 0x16 +#define OCTNET_CMD_ADD_VLAN_FILTER 0x17 +#define OCTNET_CMD_DEL_VLAN_FILTER 0x18 + /* RX(packets coming from wire) Checksum verification flags */ /* TCP/UDP csum */ #define CNNIC_L4SUM_VERIFIED 0x1 @@ -482,15 +485,15 @@ struct octeon_instr_irh { u64 opcode:4; u64 rflag:1; u64 subcode:7; - u64 len:3; - u64 rid:13; - u64 reserved:4; + u64 vlan:12; + u64 priority:3; + u64 reserved:5; u64 ossp:32; /* opcode/subcode specific parameters */ #else u64 ossp:32; /* opcode/subcode specific parameters */ - u64 reserved:4; - u64 rid:13; - u64 len:3; + u64 reserved:5; + u64 priority:3; + u64 vlan:12; u64 subcode:7; u64 rflag:1; u64 opcode:4; @@ -517,28 +520,27 @@ union octeon_rh { struct { u64 opcode:4; u64 subcode:8; - u64 len:3; /** additional 64-bit words */ - u64 rid:13; /** request id in response to pkt sent by host */ - u64 reserved:4; - u64 ossp:32; /** opcode/subcode specific parameters */ + u64 len:3; /** additional 64-bit words */ + u64 reserved:17; + u64 ossp:32; /** opcode/subcode specific parameters */ } r; struct { u64 opcode:4; u64 subcode:8; - u64 len:3; /** additional 64-bit words */ - u64 rid:13; /** request id in response to pkt sent by host */ - u64 extra:24; - u64 link:8; + u64 len:3; /** additional 64-bit words */ + u64 extra:28; + u64 vlan:12; + u64 priority:3; u64 csum_verified:3; /** checksum verified. */ u64 has_hwtstamp:1; /** Has hardware timestamp. 1 = yes. */ } r_dh; struct { u64 opcode:4; u64 subcode:8; - u64 len:3; /** additional 64-bit words */ - u64 rid:13; /** request id in response to pkt sent by host */ + u64 len:3; /** additional 64-bit words */ + u64 reserved:11; u64 num_gmx_ports:8; - u64 max_nic_ports:8; + u64 max_nic_ports:10; u64 app_cap_flags:4; u64 app_mode:16; } r_core_drv_init; @@ -554,8 +556,7 @@ union octeon_rh { u64 u64; struct { u64 ossp:32; /** opcode/subcode specific parameters */ - u64 reserved:4; - u64 rid:13; /** req id in response to pkt sent by host */ + u64 reserved:17; u64 len:3; /** additional 64-bit words */ u64 subcode:8; u64 opcode:4; @@ -563,9 +564,9 @@ union octeon_rh { struct { u64 has_hwtstamp:1; /** 1 = has hwtstamp */ u64 csum_verified:3; /** checksum verified. */ - u64 link:8; - u64 extra:24; - u64 rid:13; /** req id in response to pkt sent by host */ + u64 priority:3; + u64 vlan:12; + u64 extra:28; u64 len:3; /** additional 64-bit words */ u64 subcode:8; u64 opcode:4; @@ -573,9 +574,9 @@ union octeon_rh { struct { u64 app_mode:16; u64 app_cap_flags:4; - u64 max_nic_ports:8; + u64 max_nic_ports:10; u64 num_gmx_ports:8; - u64 rid:13; + u64 reserved:11; u64 len:3; /** additional 64-bit words */ u64 subcode:8; u64 opcode:4; @@ -627,13 +628,13 @@ union oct_link_status { u64 speed:16; u64 link_up:1; u64 autoneg:1; - u64 interface:4; + u64 if_mode:5; u64 pause:1; - u64 reserved:17; + u64 reserved:16; #else - u64 reserved:17; + u64 reserved:16; u64 pause:1; - u64 interface:4; + u64 if_mode:5; u64 autoneg:1; u64 link_up:1; u64 speed:16; @@ -710,6 +711,7 @@ struct liquidio_if_cfg_info { u64 iqmask; /** mask for IQs enabled for the port */ u64 oqmask; /** mask for OQs enabled for the port */ struct oct_link_info linfo; /** initial link information */ + char liquidio_firmware_version[32]; }; /** Stats for each NIC port in RX direction. */ @@ -734,10 +736,16 @@ struct nic_rx_stats { u64 fw_err_pko; u64 fw_err_link; u64 fw_err_drop; + + /* LRO */ u64 fw_lro_pkts; /* Number of packets that are LROed */ u64 fw_lro_octs; /* Number of octets that are LROed */ u64 fw_total_lro; /* Number of LRO packets formed */ u64 fw_lro_aborts; /* Number of times lRO of packet aborted */ + u64 fw_lro_aborts_port; + u64 fw_lro_aborts_seq; + u64 fw_lro_aborts_tsval; + u64 fw_lro_aborts_timer; /* intrmod: packet forward rate */ u64 fwd_rate; }; @@ -761,9 +769,13 @@ struct nic_tx_stats { /* firmware stats */ u64 fw_total_sent; u64 fw_total_fwd; + u64 fw_total_fwd_bytes; u64 fw_err_pko; u64 fw_err_link; u64 fw_err_drop; + u64 fw_err_tso; + u64 fw_tso; /* number of tso requests */ + u64 fw_tso_fwd; /* number of packets segmented in tso */ }; struct oct_link_stats { @@ -794,23 +806,44 @@ struct oct_mdio_cmd { #define OCT_LINK_STATS_SIZE (sizeof(struct oct_link_stats)) +/* intrmod: max. packet rate threshold */ +#define LIO_INTRMOD_MAXPKT_RATETHR 196608 +/* intrmod: min. packet rate threshold */ +#define LIO_INTRMOD_MINPKT_RATETHR 9216 +/* intrmod: max. packets to trigger interrupt */ +#define LIO_INTRMOD_RXMAXCNT_TRIGGER 384 +/* intrmod: min. packets to trigger interrupt */ +#define LIO_INTRMOD_RXMINCNT_TRIGGER 1 +/* intrmod: max. time to trigger interrupt */ +#define LIO_INTRMOD_RXMAXTMR_TRIGGER 128 +/* 66xx:intrmod: min. time to trigger interrupt + * (value of 1 is optimum for TCP_RR) + */ +#define LIO_INTRMOD_RXMINTMR_TRIGGER 1 + +/* intrmod: max. packets to trigger interrupt */ +#define LIO_INTRMOD_TXMAXCNT_TRIGGER 64 +/* intrmod: min. packets to trigger interrupt */ +#define LIO_INTRMOD_TXMINCNT_TRIGGER 0 + +/* intrmod: poll interval in seconds */ #define LIO_INTRMOD_CHECK_INTERVAL 1 -#define LIO_INTRMOD_MAXPKT_RATETHR 196608 /* max pkt rate threshold */ -#define LIO_INTRMOD_MINPKT_RATETHR 9216 /* min pkt rate threshold */ -#define LIO_INTRMOD_MAXCNT_TRIGGER 384 /* max pkts to trigger interrupt */ -#define LIO_INTRMOD_MINCNT_TRIGGER 1 /* min pkts to trigger interrupt */ -#define LIO_INTRMOD_MAXTMR_TRIGGER 128 /* max time to trigger interrupt */ -#define LIO_INTRMOD_MINTMR_TRIGGER 32 /* min time to trigger interrupt */ struct oct_intrmod_cfg { - u64 intrmod_enable; - u64 intrmod_check_intrvl; - u64 intrmod_maxpkt_ratethr; - u64 intrmod_minpkt_ratethr; - u64 intrmod_maxcnt_trigger; - u64 intrmod_maxtmr_trigger; - u64 intrmod_mincnt_trigger; - u64 intrmod_mintmr_trigger; + u64 rx_enable; + u64 tx_enable; + u64 check_intrvl; + u64 maxpkt_ratethr; + u64 minpkt_ratethr; + u64 rx_maxcnt_trigger; + u64 rx_mincnt_trigger; + u64 rx_maxtmr_trigger; + u64 rx_mintmr_trigger; + u64 tx_mincnt_trigger; + u64 tx_maxcnt_trigger; + u64 rx_frames; + u64 tx_frames; + u64 rx_usecs; }; #define BASE_QUEUE_NOT_REQUESTED 65535 diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h index 62a8dd5cd3dc..4b8c948400be 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h @@ -37,7 +37,7 @@ /* Maximum octeon devices defined as MAX_OCTEON_NICIF to support * multiple(<= MAX_OCTEON_NICIF) Miniports */ -#define MAX_OCTEON_NICIF 32 +#define MAX_OCTEON_NICIF 128 #define MAX_OCTEON_DEVICES MAX_OCTEON_NICIF #define MAX_OCTEON_LINKS MAX_OCTEON_NICIF #define MAX_OCTEON_MULTICAST_ADDR 32 @@ -135,7 +135,7 @@ #define CFG_GET_IS_SLI_BP_ON(cfg) ((cfg)->misc.enable_sli_oq_bp) /* Max IOQs per OCTEON Link */ -#define MAX_IOQS_PER_NICIF 32 +#define MAX_IOQS_PER_NICIF 64 enum lio_card_type { LIO_210SV = 0, /* Two port, 66xx */ @@ -416,9 +416,11 @@ struct octeon_config { #define DISPATCH_LIST_SIZE BIT(OPCODE_MASK_BITS) /* Maximum number of Octeon Instruction (command) queues */ -#define MAX_OCTEON_INSTR_QUEUES CN6XXX_MAX_INPUT_QUEUES +#define MAX_OCTEON_INSTR_QUEUES(oct) CN6XXX_MAX_INPUT_QUEUES +/* Maximum number of Octeon Output queues */ +#define MAX_OCTEON_OUTPUT_QUEUES(oct) CN6XXX_MAX_OUTPUT_QUEUES -/* Maximum number of Octeon Instruction (command) queues */ -#define MAX_OCTEON_OUTPUT_QUEUES CN6XXX_MAX_OUTPUT_QUEUES +#define MAX_POSSIBLE_OCTEON_INSTR_QUEUES CN6XXX_MAX_INPUT_QUEUES +#define MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES CN6XXX_MAX_OUTPUT_QUEUES #endif /* __OCTEON_CONFIG_H__ */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c index 7b44b5c50e63..337220721632 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -549,17 +549,19 @@ static char *get_oct_app_string(u32 app_mode) return oct_dev_app_str[CVM_DRV_INVALID_APP - CVM_DRV_APP_START]; } +u8 fbuf[4 * 1024 * 1024]; + int octeon_download_firmware(struct octeon_device *oct, const u8 *data, size_t size) { int ret = 0; - u8 *p; - u8 *buffer; + u8 *p = fbuf; u32 crc32_result; u64 load_addr; u32 image_len; struct octeon_firmware_file_header *h; - u32 i; + u32 i, rem, base_len = strlen(LIQUIDIO_BASE_VERSION); + char *base; if (size < sizeof(struct octeon_firmware_file_header)) { dev_err(&oct->pci_dev->dev, "Firmware file too small (%d < %d).\n", @@ -575,19 +577,26 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data, return -EINVAL; } - crc32_result = - crc32(~0, data, - sizeof(struct octeon_firmware_file_header) - - sizeof(u32)) ^ ~0U; + crc32_result = crc32((unsigned int)~0, data, + sizeof(struct octeon_firmware_file_header) - + sizeof(u32)) ^ ~0U; if (crc32_result != be32_to_cpu(h->crc32)) { dev_err(&oct->pci_dev->dev, "Firmware CRC mismatch (0x%08x != 0x%08x).\n", crc32_result, be32_to_cpu(h->crc32)); return -EINVAL; } - if (memcmp(LIQUIDIO_VERSION, h->version, strlen(LIQUIDIO_VERSION))) { - dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s, got %s.\n", - LIQUIDIO_VERSION, h->version); + if (strncmp(LIQUIDIO_PACKAGE, h->version, strlen(LIQUIDIO_PACKAGE))) { + dev_err(&oct->pci_dev->dev, "Unmatched firmware package type. Expected %s, got %s.\n", + LIQUIDIO_PACKAGE, h->version); + return -EINVAL; + } + + base = h->version + strlen(LIQUIDIO_PACKAGE); + ret = memcmp(LIQUIDIO_BASE_VERSION, base, base_len); + if (ret) { + dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s.x, got %s.\n", + LIQUIDIO_BASE_VERSION, base); return -EINVAL; } @@ -601,56 +610,56 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data, snprintf(oct->fw_info.liquidio_firmware_version, 32, "LIQUIDIO: %s", h->version); - buffer = kmemdup(data, size, GFP_KERNEL); - if (!buffer) - return -ENOMEM; - - p = buffer + sizeof(struct octeon_firmware_file_header); + data += sizeof(struct octeon_firmware_file_header); + dev_info(&oct->pci_dev->dev, "%s: Loading %d images\n", __func__, + be32_to_cpu(h->num_images)); /* load all images */ for (i = 0; i < be32_to_cpu(h->num_images); i++) { load_addr = be64_to_cpu(h->desc[i].addr); image_len = be32_to_cpu(h->desc[i].len); - /* validate the image */ - crc32_result = crc32(~0, p, image_len) ^ ~0U; - if (crc32_result != be32_to_cpu(h->desc[i].crc32)) { - dev_err(&oct->pci_dev->dev, - "Firmware CRC mismatch in image %d (0x%08x != 0x%08x).\n", - i, crc32_result, - be32_to_cpu(h->desc[i].crc32)); - ret = -EINVAL; - goto done_downloading; - } + dev_info(&oct->pci_dev->dev, "Loading firmware %d at %llx\n", + image_len, load_addr); - /* download the image */ - octeon_pci_write_core_mem(oct, load_addr, p, image_len); + /* Write in 4MB chunks*/ + rem = image_len; - p += image_len; - dev_dbg(&oct->pci_dev->dev, - "Downloaded image %d (%d bytes) to address 0x%016llx\n", - i, image_len, load_addr); + while (rem) { + if (rem < (4 * 1024 * 1024)) + size = rem; + else + size = 4 * 1024 * 1024; + + memcpy(p, data, size); + + /* download the image */ + octeon_pci_write_core_mem(oct, load_addr, p, (u32)size); + + data += size; + rem -= (u32)size; + load_addr += size; + } } + dev_info(&oct->pci_dev->dev, "Writing boot command: %s\n", + h->bootcmd); /* Invoke the bootcmd */ ret = octeon_console_send_cmd(oct, h->bootcmd, 50); -done_downloading: - kfree(buffer); - - return ret; + return 0; } void octeon_free_device_mem(struct octeon_device *oct) { u32 i; - for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { /* could check mask as well */ vfree(oct->droq[i]); } - for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { /* could check mask as well */ vfree(oct->instr_queue[i]); } @@ -734,7 +743,7 @@ struct octeon_device *octeon_allocate_device(u32 pci_id, octeon_device[oct_idx] = oct; oct->octeon_id = oct_idx; - snprintf((oct->device_name), sizeof(oct->device_name), + snprintf(oct->device_name, sizeof(oct->device_name), "LiquidIO%d", (oct->octeon_id)); return oct; @@ -1157,8 +1166,8 @@ core_drv_init_err: int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no) { - if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES) && - (oct->io_qmask.iq & (1UL << q_no))) + if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES(oct)) && + (oct->io_qmask.iq & (1ULL << q_no))) return oct->instr_queue[q_no]->max_count; return -1; @@ -1166,8 +1175,8 @@ int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no) int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no) { - if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES) && - (oct->io_qmask.oq & (1UL << q_no))) + if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES(oct)) && + (oct->io_qmask.oq & (1ULL << q_no))) return oct->droq[q_no]->max_count; return -1; } @@ -1258,10 +1267,10 @@ void lio_pci_writeq(struct octeon_device *oct, int octeon_mem_access_ok(struct octeon_device *oct) { u64 access_okay = 0; + u64 lmc0_reset_ctl; /* Check to make sure a DDR interface is enabled */ - u64 lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL); - + lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL); access_okay = (lmc0_reset_ctl & CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK); return access_okay ? 0 : 1; @@ -1275,9 +1284,6 @@ int octeon_wait_for_ddr_init(struct octeon_device *oct, u32 *timeout) if (!timeout) return ret; - while (*timeout == 0) - schedule_timeout_uninterruptible(HZ / 10); - for (ms = 0; (ret != 0) && ((*timeout == 0) || (ms <= *timeout)); ms += HZ / 10) { ret = octeon_mem_access_ok(oct); diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h index 0950b94f8805..b4e566dea008 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h @@ -152,9 +152,9 @@ struct octeon_mmio { #define MAX_OCTEON_MAPS 32 struct octeon_io_enable { - u32 iq; - u32 oq; - u32 iq64B; + u64 iq; + u64 oq; + u64 iq64B; }; struct octeon_reg_list { @@ -204,8 +204,7 @@ struct octeon_fn_list { void (*bar1_idx_setup)(struct octeon_device *, u64, u32, int); void (*bar1_idx_write)(struct octeon_device *, u32, u32); u32 (*bar1_idx_read)(struct octeon_device *, u32); - u32 (*update_iq_read_idx)(struct octeon_device *, - struct octeon_instr_queue *); + u32 (*update_iq_read_idx)(struct octeon_instr_queue *); void (*enable_oq_pkt_time_intr)(struct octeon_device *, u32); void (*disable_oq_pkt_time_intr)(struct octeon_device *, u32); @@ -267,6 +266,7 @@ struct octdev_props { /* Each interface in the Octeon device has a network * device pointer (used for OS specific calls). */ + int napi_enabled; int gmxport; struct net_device *netdev; }; @@ -325,7 +325,8 @@ struct octeon_device { struct octeon_sc_buffer_pool sc_buf_pool; /** The input instruction queues */ - struct octeon_instr_queue *instr_queue[MAX_OCTEON_INSTR_QUEUES]; + struct octeon_instr_queue *instr_queue + [MAX_POSSIBLE_OCTEON_INSTR_QUEUES]; /** The doubly-linked list of instruction response */ struct octeon_response_list response_list[MAX_RESPONSE_LISTS]; @@ -333,7 +334,7 @@ struct octeon_device { u32 num_oqs; /** The DROQ output queues */ - struct octeon_droq *droq[MAX_OCTEON_OUTPUT_QUEUES]; + struct octeon_droq *droq[MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES]; struct octeon_io_enable io_qmask; @@ -382,15 +383,29 @@ struct octeon_device { struct cavium_wq dma_comp_wq; - struct cavium_wq check_db_wq[MAX_OCTEON_INSTR_QUEUES]; + /** Lock for dma response list */ + spinlock_t cmd_resp_wqlock; + u32 cmd_resp_state; + + struct cavium_wq check_db_wq[MAX_POSSIBLE_OCTEON_INSTR_QUEUES]; struct cavium_wk nic_poll_work; struct cavium_wk console_poll_work[MAX_OCTEON_MAPS]; void *priv; + + int rx_pause; + int tx_pause; + + struct oct_link_stats link_stats; /*stastics from firmware*/ + + /* private flags to control driver-specific features through ethtool */ + u32 priv_flags; }; +#define OCT_DRV_ONLINE 1 +#define OCT_DRV_OFFLINE 2 #define OCTEON_CN6XXX(oct) ((oct->chip_id == OCTEON_CN66XX) || \ (oct->chip_id == OCTEON_CN68XX)) #define CHIP_FIELD(oct, TYPE, field) \ @@ -647,4 +662,17 @@ void *oct_get_config_info(struct octeon_device *oct, u16 card_type); */ struct octeon_config *octeon_get_conf(struct octeon_device *oct); +/* LiquidIO driver pivate flags */ +enum { + OCT_PRIV_FLAG_TX_BYTES = 0, /* Tx interrupts by pending byte count */ +}; + +static inline void lio_set_priv_flag(struct octeon_device *octdev, u32 flag, + u32 val) +{ + if (val) + octdev->priv_flags |= (0x1 << flag); + else + octdev->priv_flags &= ~(0x1 << flag); +} #endif diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c index 59a529353f6d..d9bb2f7e0836 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c @@ -337,7 +337,7 @@ int octeon_init_droq(struct octeon_device *oct, /* For 56xx Pass1, this function won't be called, so no checks. */ oct->fn_list.setup_oq_regs(oct, q_no); - oct->io_qmask.oq |= (1 << q_no); + oct->io_qmask.oq |= (1ULL << q_no); return 0; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h index 513f8a068179..caa2b4f30717 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h @@ -65,6 +65,10 @@ struct oct_iq_stats { u64 tx_iq_busy;/**< Numof times this iq was found to be full. */ u64 tx_dropped;/**< Numof pkts dropped dueto xmitpath errors. */ u64 tx_tot_bytes;/**< Total count of bytes sento to network. */ + u64 tx_gso; /* count of tso */ + u64 tx_dmamap_fail; + u64 tx_restart; + /*u64 tx_timeout_count;*/ }; #define OCT_IQ_STATS_SIZE (sizeof(struct oct_iq_stats)) @@ -80,6 +84,12 @@ struct octeon_instr_queue { /** A spinlock to protect access to the input ring. */ spinlock_t lock; + /** A spinlock to protect while posting on the ring. */ + spinlock_t post_lock; + + /** A spinlock to protect access to the input ring.*/ + spinlock_t iq_flush_running_lock; + /** Flag that indicates if the queue uses 64 byte commands. */ u32 iqcmd_64B:1; @@ -244,7 +254,7 @@ union octeon_instr_64B { /** The size of each buffer in soft command buffer pool */ -#define SOFT_COMMAND_BUFFER_SIZE 1024 +#define SOFT_COMMAND_BUFFER_SIZE 1536 struct octeon_soft_command { /** Soft command buffer info. */ @@ -282,7 +292,7 @@ struct octeon_soft_command { /** Maximum number of buffers to allocate into soft command buffer pool */ -#define MAX_SOFT_COMMAND_BUFFERS 16 +#define MAX_SOFT_COMMAND_BUFFERS 256 /** Head of a soft command buffer pool. */ @@ -339,7 +349,7 @@ octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype, int lio_process_iq_request_list(struct octeon_device *oct, - struct octeon_instr_queue *iq); + struct octeon_instr_queue *iq, u32 napi_budget); int octeon_send_command(struct octeon_device *oct, u32 iq_no, u32 force_db, void *cmd, void *buf, @@ -357,5 +367,7 @@ int octeon_send_soft_command(struct octeon_device *oct, int octeon_setup_iq(struct octeon_device *oct, int ifidx, int q_index, union oct_txpciq iq_no, u32 num_descs, void *app_ctx); - +int +octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, + u32 pending_thresh, u32 napi_budget); #endif /* __OCTEON_IQ_H__ */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index 9c14484bfca0..b481edc56c6e 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -30,6 +30,17 @@ #include <linux/dma-mapping.h> #include <linux/ptp_clock_kernel.h> +struct oct_nic_stats_resp { + u64 rh; + struct oct_link_stats stats; + u64 status; +}; + +struct oct_nic_stats_ctrl { + struct completion complete; + struct net_device *netdev; +}; + /** LiquidIO per-interface network private data */ struct lio { /** State of the interface. Rx/Tx happens only in the RUNNING state. */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c index 7843b8a05dcf..36f1970a860e 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c @@ -171,20 +171,36 @@ octnet_send_nic_ctrl_pkt(struct octeon_device *oct, int retval; struct octeon_soft_command *sc = NULL; + spin_lock_bh(&oct->cmd_resp_wqlock); + /* Allow only rx ctrl command to stop traffic on the chip + * during offline operations + */ + if ((oct->cmd_resp_state == OCT_DRV_OFFLINE) && + (nctrl->ncmd.s.cmd != OCTNET_CMD_RX_CTL)) { + spin_unlock_bh(&oct->cmd_resp_wqlock); + dev_err(&oct->pci_dev->dev, + "%s cmd:%d not processed since driver offline\n", + __func__, nctrl->ncmd.s.cmd); + return -1; + } + sc = octnic_alloc_ctrl_pkt_sc(oct, nctrl); if (!sc) { dev_err(&oct->pci_dev->dev, "%s soft command alloc failed\n", __func__); + spin_unlock_bh(&oct->cmd_resp_wqlock); return -1; } retval = octeon_send_soft_command(oct, sc); if (retval == IQ_SEND_FAILED) { octeon_free_soft_command(oct, sc); - dev_err(&oct->pci_dev->dev, "%s soft command send failed status: %x\n", - __func__, retval); + dev_err(&oct->pci_dev->dev, "%s soft command:%d send failed status: %x\n", + __func__, nctrl->ncmd.s.cmd, retval); + spin_unlock_bh(&oct->cmd_resp_wqlock); return -1; } + spin_unlock_bh(&oct->cmd_resp_wqlock); return retval; } diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index 8649677b2411..7eafa75ac095 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -51,7 +51,7 @@ struct iq_post_status { }; static void check_db_timeout(struct work_struct *work); -static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no); +static void __check_db_timeout(struct octeon_device *oct, u64 iq_no); static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *); @@ -149,8 +149,11 @@ int octeon_init_instr_queue(struct octeon_device *oct, /* Initialize the spinlock for this instruction queue */ spin_lock_init(&iq->lock); + spin_lock_init(&iq->post_lock); - oct->io_qmask.iq |= (1 << iq_no); + spin_lock_init(&iq->iq_flush_running_lock); + + oct->io_qmask.iq |= (1ULL << iq_no); /* Set the 32B/64B mode for each input queue */ oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no); @@ -253,8 +256,8 @@ int lio_wait_for_instr_fetch(struct octeon_device *oct) instr_cnt = 0; /*for (i = 0; i < oct->num_iqs; i++) {*/ - for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { - if (!(oct->io_qmask.iq & (1UL << i))) + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + if (!(oct->io_qmask.iq & (1ULL << i))) continue; pending = atomic_read(&oct-> @@ -391,13 +394,13 @@ __add_to_request_list(struct octeon_instr_queue *iq, int lio_process_iq_request_list(struct octeon_device *oct, - struct octeon_instr_queue *iq) + struct octeon_instr_queue *iq, u32 napi_budget) { int reqtype; void *buf; u32 old = iq->flush_index; u32 inst_count = 0; - unsigned pkts_compl = 0, bytes_compl = 0; + unsigned int pkts_compl = 0, bytes_compl = 0; struct octeon_soft_command *sc; struct octeon_instr_irh *irh; @@ -457,6 +460,9 @@ lio_process_iq_request_list(struct octeon_device *oct, skip_this: inst_count++; INCR_INDEX_BY1(old, iq->max_count); + + if ((napi_budget) && (inst_count >= napi_budget)) + break; } if (bytes_compl) octeon_report_tx_completion_to_bql(iq->app_ctx, pkts_compl, @@ -466,38 +472,63 @@ lio_process_iq_request_list(struct octeon_device *oct, return inst_count; } -static inline void -update_iq_indices(struct octeon_device *oct, struct octeon_instr_queue *iq) +/* Can only be called from process context */ +int +octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, + u32 pending_thresh, u32 napi_budget) { u32 inst_processed = 0; + u32 tot_inst_processed = 0; + int tx_done = 1; - /* Calculate how many commands Octeon has read and move the read index - * accordingly. - */ - iq->octeon_read_index = oct->fn_list.update_iq_read_idx(oct, iq); + if (!spin_trylock(&iq->iq_flush_running_lock)) + return tx_done; - /* Move the NORESPONSE requests to the per-device completion list. */ - if (iq->flush_index != iq->octeon_read_index) - inst_processed = lio_process_iq_request_list(oct, iq); + spin_lock_bh(&iq->lock); - if (inst_processed) { - atomic_sub(inst_processed, &iq->instr_pending); - iq->stats.instr_processed += inst_processed; - } -} + iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq); -static void -octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, - u32 pending_thresh) -{ if (atomic_read(&iq->instr_pending) >= (s32)pending_thresh) { - spin_lock_bh(&iq->lock); - update_iq_indices(oct, iq); - spin_unlock_bh(&iq->lock); + do { + /* Process any outstanding IQ packets. */ + if (iq->flush_index == iq->octeon_read_index) + break; + + if (napi_budget) + inst_processed = lio_process_iq_request_list + (oct, iq, + napi_budget - tot_inst_processed); + else + inst_processed = + lio_process_iq_request_list(oct, iq, 0); + + if (inst_processed) { + atomic_sub(inst_processed, &iq->instr_pending); + iq->stats.instr_processed += inst_processed; + } + + tot_inst_processed += inst_processed; + inst_processed = 0; + + } while (tot_inst_processed < napi_budget); + + if (napi_budget && (tot_inst_processed >= napi_budget)) + tx_done = 0; } + + iq->last_db_time = jiffies; + + spin_unlock_bh(&iq->lock); + + spin_unlock(&iq->iq_flush_running_lock); + + return tx_done; } -static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no) +/* Process instruction queue after timeout. + * This routine gets called from a workqueue or when removing the module. + */ +static void __check_db_timeout(struct octeon_device *oct, u64 iq_no) { struct octeon_instr_queue *iq; u64 next_time; @@ -508,24 +539,17 @@ static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no) if (!iq) return; + /* return immediately, if no work pending */ + if (!atomic_read(&iq->instr_pending)) + return; /* If jiffies - last_db_time < db_timeout do nothing */ next_time = iq->last_db_time + iq->db_timeout; if (!time_after(jiffies, (unsigned long)next_time)) return; iq->last_db_time = jiffies; - /* Get the lock and prevent tasklets. This routine gets called from - * the poll thread. Instructions can now be posted in tasklet context - */ - spin_lock_bh(&iq->lock); - if (iq->fill_cnt != 0) - ring_doorbell(oct, iq); - - spin_unlock_bh(&iq->lock); - /* Flush the instruction queue */ - if (iq->do_auto_flush) - octeon_flush_iq(oct, iq, 1); + octeon_flush_iq(oct, iq, 1, 0); } /* Called by the Poll thread at regular intervals to check the instruction @@ -550,7 +574,10 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no, struct iq_post_status st; struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; - spin_lock_bh(&iq->lock); + /* Get the lock and prevent other tasks and tx interrupt handler from + * running. + */ + spin_lock_bh(&iq->post_lock); st = __post_command2(oct, iq, force_db, cmd); @@ -566,10 +593,13 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no, INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1); } - spin_unlock_bh(&iq->lock); + spin_unlock_bh(&iq->post_lock); - if (iq->do_auto_flush) - octeon_flush_iq(oct, iq, 2); + /* This is only done here to expedite packets being flushed + * for cases where there are no IQ completion interrupts. + */ + /*if (iq->do_auto_flush)*/ + /* octeon_flush_iq(oct, iq, 2, 0);*/ return st.status; } diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c index e2e9103e6ebd..c93210f99dda 100644 --- a/drivers/net/ethernet/cavium/liquidio/response_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c @@ -54,6 +54,7 @@ int octeon_setup_response_list(struct octeon_device *oct) spin_lock_init(&oct->response_list[i].lock); atomic_set(&oct->response_list[i].pending_req_count, 0); } + spin_lock_init(&oct->cmd_resp_wqlock); oct->dma_comp_wq.wq = alloc_workqueue("dma-comp", WQ_MEM_RECLAIM, 0); if (!oct->dma_comp_wq.wq) { @@ -64,6 +65,7 @@ int octeon_setup_response_list(struct octeon_device *oct) cwq = &oct->dma_comp_wq; INIT_DELAYED_WORK(&cwq->wk.work, oct_poll_req_completion); cwq->wk.ctxptr = oct; + oct->cmd_resp_state = OCT_DRV_ONLINE; queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100)); return ret; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h index 734dd776c22f..109bc630408b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h @@ -353,6 +353,10 @@ struct hash_mac_addr { u8 addr[ETH_ALEN]; }; +struct mbox_list { + struct list_head list; +}; + /* * Per-"adapter" (Virtual Function) information. */ @@ -387,6 +391,10 @@ struct adapter { /* various locks */ spinlock_t stats_lock; + /* lock for mailbox cmd list */ + spinlock_t mbox_lock; + struct mbox_list mlist; + /* support for mailbox command/reply logging */ #define T4VF_OS_LOG_MBOX_CMDS 256 struct mbox_cmd_log *mbox_log; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 8d9b2cb74aa2..9f5526478d2f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -2774,6 +2774,8 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, * Initialize SMP data synchronization resources. */ spin_lock_init(&adapter->stats_lock); + spin_lock_init(&adapter->mbox_lock); + INIT_LIST_HEAD(&adapter->mlist.list); /* * Map our I/O registers in BAR0. diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 955ff7c61f1b..61bfe86da86d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -139,6 +139,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL; u32 cmd_op = FW_CMD_OP_G(be32_to_cpu(((struct fw_cmd_hdr *)cmd)->hi)); __be64 cmd_rpl[MBOX_LEN / 8]; + struct mbox_list entry; /* In T6, mailbox size is changed to 128 bytes to avoid * invalidating the entire prefetch buffer. @@ -156,6 +157,51 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4) return -EINVAL; + /* Queue ourselves onto the mailbox access list. When our entry is at + * the front of the list, we have rights to access the mailbox. So we + * wait [for a while] till we're at the front [or bail out with an + * EBUSY] ... + */ + spin_lock(&adapter->mbox_lock); + list_add_tail(&entry.list, &adapter->mlist.list); + spin_unlock(&adapter->mbox_lock); + + delay_idx = 0; + ms = delay[0]; + + for (i = 0; ; i += ms) { + /* If we've waited too long, return a busy indication. This + * really ought to be based on our initial position in the + * mailbox access list but this is a start. We very rearely + * contend on access to the mailbox ... + */ + if (i > FW_CMD_MAX_TIMEOUT) { + spin_lock(&adapter->mbox_lock); + list_del(&entry.list); + spin_unlock(&adapter->mbox_lock); + ret = -EBUSY; + t4vf_record_mbox(adapter, cmd, size, access, ret); + return ret; + } + + /* If we're at the head, break out and start the mailbox + * protocol. + */ + if (list_first_entry(&adapter->mlist.list, struct mbox_list, + list) == &entry) + break; + + /* Delay for a bit before checking again ... */ + if (sleep_ok) { + ms = delay[delay_idx]; /* last element may repeat */ + if (delay_idx < ARRAY_SIZE(delay) - 1) + delay_idx++; + msleep(ms); + } else { + mdelay(ms); + } + } + /* * Loop trying to get ownership of the mailbox. Return an error * if we can't gain ownership. @@ -164,6 +210,9 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl)); if (v != MBOX_OWNER_DRV) { + spin_lock(&adapter->mbox_lock); + list_del(&entry.list); + spin_unlock(&adapter->mbox_lock); ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT; t4vf_record_mbox(adapter, cmd, size, access, ret); return ret; @@ -248,6 +297,9 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, if (cmd_op != FW_VI_STATS_CMD) t4vf_record_mbox(adapter, cmd_rpl, size, access, execute); + spin_lock(&adapter->mbox_lock); + list_del(&entry.list); + spin_unlock(&adapter->mbox_lock); return -FW_CMD_RETVAL_G(v); } } @@ -255,6 +307,9 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, /* We timed out. Return the error ... */ ret = -ETIMEDOUT; t4vf_record_mbox(adapter, cmd, size, access, ret); + spin_lock(&adapter->mbox_lock); + list_del(&entry.list); + spin_unlock(&adapter->mbox_lock); return ret; } diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index e1e6c40a5f8b..4555e041ef69 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2015 Emulex + * Copyright (C) 2005 - 2016 Broadcom * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -443,6 +443,7 @@ struct be_resources { u16 max_iface_count; u16 max_mcc_count; u16 max_evt_qs; + u16 max_nic_evt_qs; /* NIC's share of evt qs */ u32 if_cap_flags; u32 vf_if_cap_flags; /* VF if capability flags */ u32 flags; @@ -525,7 +526,8 @@ struct be_adapter { spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */ spinlock_t mcc_cq_lock; - u16 cfg_num_qs; /* configured via set-channels */ + u16 cfg_num_rx_irqs; /* configured via set-channels */ + u16 cfg_num_tx_irqs; /* configured via set-channels */ u16 num_evt_qs; u16 num_msix_vec; struct be_eq_obj eq_obj[MAX_EVT_QS]; @@ -644,18 +646,42 @@ struct be_adapter { #define be_max_txqs(adapter) (adapter->res.max_tx_qs) #define be_max_prio_txqs(adapter) (adapter->res.max_prio_tx_qs) #define be_max_rxqs(adapter) (adapter->res.max_rx_qs) -#define be_max_eqs(adapter) (adapter->res.max_evt_qs) +/* Max number of EQs available for the function (NIC + RoCE (if enabled)) */ +#define be_max_func_eqs(adapter) (adapter->res.max_evt_qs) +/* Max number of EQs available avaialble only for NIC */ +#define be_max_nic_eqs(adapter) (adapter->res.max_nic_evt_qs) #define be_if_cap_flags(adapter) (adapter->res.if_cap_flags) #define be_max_pf_pool_rss_tables(adapter) \ (adapter->pool_res.max_rss_tables) +/* Max irqs avaialble for NIC */ +#define be_max_irqs(adapter) \ + (min_t(u16, be_max_nic_eqs(adapter), num_online_cpus())) -static inline u16 be_max_qs(struct be_adapter *adapter) +/* Max irqs *needed* for RX queues */ +static inline u16 be_max_rx_irqs(struct be_adapter *adapter) { - /* If no RSS, need atleast the one def RXQ */ + /* If no RSS, need atleast one irq for def-RXQ */ u16 num = max_t(u16, be_max_rss(adapter), 1); - num = min(num, be_max_eqs(adapter)); - return min_t(u16, num, num_online_cpus()); + return min_t(u16, num, be_max_irqs(adapter)); +} + +/* Max irqs *needed* for TX queues */ +static inline u16 be_max_tx_irqs(struct be_adapter *adapter) +{ + return min_t(u16, be_max_txqs(adapter), be_max_irqs(adapter)); +} + +/* Max irqs *needed* for combined queues */ +static inline u16 be_max_qp_irqs(struct be_adapter *adapter) +{ + return min(be_max_tx_irqs(adapter), be_max_rx_irqs(adapter)); +} + +/* Max irqs *needed* for RX and TX queues together */ +static inline u16 be_max_any_irqs(struct be_adapter *adapter) +{ + return max(be_max_tx_irqs(adapter), be_max_rx_irqs(adapter)); } /* Is BE in pvid_tagging mode */ diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 29aeb91cba49..2cc11756859f 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2015 Emulex + * Copyright (C) 2005 - 2016 Broadcom * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -87,6 +87,11 @@ static struct be_cmd_priv_map cmd_priv_map[] = { CMD_SUBSYSTEM_LOWLEVEL, BE_PRIV_DEVCFG | BE_PRIV_DEVSEC }, + { + OPCODE_COMMON_SET_HSW_CONFIG, + CMD_SUBSYSTEM_COMMON, + BE_PRIV_DEVCFG | BE_PRIV_VHADM + }, }; static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem) @@ -3850,6 +3855,10 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, void *ctxt; int status; + if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_HSW_CONFIG, + CMD_SUBSYSTEM_COMMON)) + return -EPERM; + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); @@ -3871,7 +3880,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1); AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid); } - if (!BEx_chip(adapter) && hsw_mode) { + if (hsw_mode) { AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, adapter->hba_port_num); AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1); diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index cb96ddd90b6e..0d6be224a787 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2015 Emulex + * Copyright (C) 2005 - 2016 Broadcom * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index c569cd703c80..50e7be5da50c 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2015 Emulex + * Copyright (C) 2005 - 2016 Broadcom * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -1196,9 +1196,17 @@ static void be_get_channels(struct net_device *netdev, struct ethtool_channels *ch) { struct be_adapter *adapter = netdev_priv(netdev); + u16 num_rx_irqs = max_t(u16, adapter->num_rss_qs, 1); - ch->combined_count = adapter->num_evt_qs; - ch->max_combined = be_max_qs(adapter); + /* num_tx_qs is always same as the number of irqs used for TX */ + ch->combined_count = min(adapter->num_tx_qs, num_rx_irqs); + ch->rx_count = num_rx_irqs - ch->combined_count; + ch->tx_count = adapter->num_tx_qs - ch->combined_count; + + ch->max_combined = be_max_qp_irqs(adapter); + /* The user must create atleast one combined channel */ + ch->max_rx = be_max_rx_irqs(adapter) - 1; + ch->max_tx = be_max_tx_irqs(adapter) - 1; } static int be_set_channels(struct net_device *netdev, @@ -1207,11 +1215,22 @@ static int be_set_channels(struct net_device *netdev, struct be_adapter *adapter = netdev_priv(netdev); int status; - if (ch->rx_count || ch->tx_count || ch->other_count || - !ch->combined_count || ch->combined_count > be_max_qs(adapter)) + /* we support either only combined channels or a combination of + * combined and either RX-only or TX-only channels. + */ + if (ch->other_count || !ch->combined_count || + (ch->rx_count && ch->tx_count)) + return -EINVAL; + + if (ch->combined_count > be_max_qp_irqs(adapter) || + (ch->rx_count && + (ch->rx_count + ch->combined_count) > be_max_rx_irqs(adapter)) || + (ch->tx_count && + (ch->tx_count + ch->combined_count) > be_max_tx_irqs(adapter))) return -EINVAL; - adapter->cfg_num_qs = ch->combined_count; + adapter->cfg_num_rx_irqs = ch->combined_count + ch->rx_count; + adapter->cfg_num_tx_irqs = ch->combined_count + ch->tx_count; status = be_update_queues(adapter); return be_cmd_status(status); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 3d947897bb6a..1873c74638cd 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2015 Emulex + * Copyright (C) 2005 - 2016 Broadcom * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -2620,8 +2620,10 @@ static int be_evt_queues_create(struct be_adapter *adapter) struct be_aic_obj *aic; int i, rc; + /* need enough EQs to service both RX and TX queues */ adapter->num_evt_qs = min_t(u16, num_irqs(adapter), - adapter->cfg_num_qs); + max(adapter->cfg_num_rx_irqs, + adapter->cfg_num_tx_irqs)); for_all_evt_queues(adapter, eqo, i) { int numa_node = dev_to_node(&adapter->pdev->dev); @@ -2726,7 +2728,7 @@ static int be_tx_qs_create(struct be_adapter *adapter) struct be_eq_obj *eqo; int status, i; - adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter)); + adapter->num_tx_qs = min(adapter->num_evt_qs, adapter->cfg_num_tx_irqs); for_all_tx_queues(adapter, txo, i) { cq = &txo->cq; @@ -2784,11 +2786,11 @@ static int be_rx_cqs_create(struct be_adapter *adapter) struct be_rx_obj *rxo; int rc, i; - /* We can create as many RSS rings as there are EQs. */ - adapter->num_rss_qs = adapter->num_evt_qs; + adapter->num_rss_qs = + min(adapter->num_evt_qs, adapter->cfg_num_rx_irqs); /* We'll use RSS only if atleast 2 RSS rings are supported. */ - if (adapter->num_rss_qs <= 1) + if (adapter->num_rss_qs < 2) adapter->num_rss_qs = 0; adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq; @@ -3249,18 +3251,22 @@ static void be_msix_disable(struct be_adapter *adapter) static int be_msix_enable(struct be_adapter *adapter) { - int i, num_vec; + unsigned int i, num_vec, max_roce_eqs; struct device *dev = &adapter->pdev->dev; - /* If RoCE is supported, program the max number of NIC vectors that - * may be configured via set-channels, along with vectors needed for - * RoCe. Else, just program the number we'll use initially. + /* If RoCE is supported, program the max number of vectors that + * could be used for NIC and RoCE, else, just program the number + * we'll use initially. */ - if (be_roce_supported(adapter)) - num_vec = min_t(int, 2 * be_max_eqs(adapter), - 2 * num_online_cpus()); - else - num_vec = adapter->cfg_num_qs; + if (be_roce_supported(adapter)) { + max_roce_eqs = + be_max_func_eqs(adapter) - be_max_nic_eqs(adapter); + max_roce_eqs = min(max_roce_eqs, num_online_cpus()); + num_vec = be_max_any_irqs(adapter) + max_roce_eqs; + } else { + num_vec = max(adapter->cfg_num_rx_irqs, + adapter->cfg_num_tx_irqs); + } for (i = 0; i < num_vec; i++) adapter->msix_entries[i].entry = i; @@ -3723,6 +3729,11 @@ static void be_vf_clear(struct be_adapter *adapter) be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1); } + + if (BE3_chip(adapter)) + be_cmd_set_hsw_config(adapter, 0, 0, + adapter->if_handle, + PORT_FWD_TYPE_PASSTHRU, 0); done: kfree(adapter->vf_cfg); adapter->num_vfs = 0; @@ -4013,6 +4024,15 @@ static int be_vf_setup(struct be_adapter *adapter) } } + if (BE3_chip(adapter)) { + /* On BE3, enable VEB only when SRIOV is enabled */ + status = be_cmd_set_hsw_config(adapter, 0, 0, + adapter->if_handle, + PORT_FWD_TYPE_VEB, 0); + if (status) + goto err; + } + adapter->flags |= BE_FLAGS_SRIOV_ENABLED; return 0; err: @@ -4219,16 +4239,13 @@ static int be_get_resources(struct be_adapter *adapter) struct be_resources res = {0}; int status; - if (BEx_chip(adapter)) { - BEx_get_resources(adapter, &res); - adapter->res = res; - } - /* For Lancer, SH etc read per-function resource limits from FW. * GET_FUNC_CONFIG returns per function guaranteed limits. * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits */ - if (!BEx_chip(adapter)) { + if (BEx_chip(adapter)) { + BEx_get_resources(adapter, &res); + } else { status = be_cmd_get_func_config(adapter, &res); if (status) return status; @@ -4237,13 +4254,13 @@ static int be_get_resources(struct be_adapter *adapter) if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs && !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)) res.max_rss_qs -= 1; - - /* If RoCE may be enabled stash away half the EQs for RoCE */ - if (be_roce_supported(adapter)) - res.max_evt_qs /= 2; - adapter->res = res; } + /* If RoCE is supported stash away half the EQs for RoCE */ + res.max_nic_evt_qs = be_roce_supported(adapter) ? + res.max_evt_qs / 2 : res.max_evt_qs; + adapter->res = res; + /* If FW supports RSS default queue, then skip creating non-RSS * queue for non-IP traffic. */ @@ -4252,15 +4269,17 @@ static int be_get_resources(struct be_adapter *adapter) dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n", be_max_txqs(adapter), be_max_rxqs(adapter), - be_max_rss(adapter), be_max_eqs(adapter), + be_max_rss(adapter), be_max_nic_eqs(adapter), be_max_vfs(adapter)); dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n", be_max_uc(adapter), be_max_mc(adapter), be_max_vlans(adapter)); - /* Sanitize cfg_num_qs based on HW and platform limits */ - adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(), - be_max_qs(adapter)); + /* Ensure RX and TX queues are created in pairs at init time */ + adapter->cfg_num_rx_irqs = + min_t(u16, netif_get_num_default_rss_queues(), + be_max_qp_irqs(adapter)); + adapter->cfg_num_tx_irqs = adapter->cfg_num_rx_irqs; return 0; } @@ -4373,7 +4392,7 @@ static int be_if_create(struct be_adapter *adapter) u32 cap_flags = be_if_cap_flags(adapter); int status; - if (adapter->cfg_num_qs == 1) + if (adapter->cfg_num_rx_irqs == 1) cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS); en_flags &= cap_flags; @@ -4559,6 +4578,15 @@ static int be_setup(struct be_adapter *adapter) be_cmd_set_logical_link_config(adapter, IFLA_VF_LINK_STATE_AUTO, 0); + /* BE3 EVB echoes broadcast/multicast packets back to PF's vport + * confusing a linux bridge or OVS that it might be connected to. + * Set the EVB to PASSTHRU mode which effectively disables the EVB + * when SRIOV is not enabled. + */ + if (BE3_chip(adapter)) + be_cmd_set_hsw_config(adapter, 0, 0, adapter->if_handle, + PORT_FWD_TYPE_PASSTHRU, 0); + if (adapter->num_vfs) be_vf_setup(adapter); diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c index 4089156a7f5e..2b62841c4c63 100644 --- a/drivers/net/ethernet/emulex/benet/be_roce.c +++ b/drivers/net/ethernet/emulex/benet/be_roce.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2015 Emulex + * Copyright (C) 2005 - 2016 Broadcom * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h index fde609789483..e51719a7307f 100644 --- a/drivers/net/ethernet/emulex/benet/be_roce.h +++ b/drivers/net/ethernet/emulex/benet/be_roce.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2015 Emulex + * Copyright (C) 2005 - 2016 Broadcom * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index f58f9ea51639..92fd5c0bf4df 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -442,6 +442,8 @@ struct bufdesc_ex { #define FEC_QUIRK_SINGLE_MDIO (1 << 11) /* Controller supports RACC register */ #define FEC_QUIRK_HAS_RACC (1 << 12) +/* Controller supports interrupt coalesc */ +#define FEC_QUIRK_HAS_COALESCE (1 << 13) struct bufdesc_prop { int qid; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index c36401e59905..4040003a74f9 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -111,7 +111,13 @@ static struct platform_device_id fec_devtype[] = { FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | - FEC_QUIRK_HAS_RACC, + FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE, + }, { + .name = "imx6ul-fec", + .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_BUG_CAPTURE | + FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE, }, { /* sentinel */ } @@ -125,6 +131,7 @@ enum imx_fec_type { IMX6Q_FEC, MVF600_FEC, IMX6SX_FEC, + IMX6UL_FEC, }; static const struct of_device_id fec_dt_ids[] = { @@ -134,6 +141,7 @@ static const struct of_device_id fec_dt_ids[] = { { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], }, { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], }, + { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, fec_dt_ids); @@ -2358,9 +2366,6 @@ static void fec_enet_itr_coal_set(struct net_device *ndev) struct fec_enet_private *fep = netdev_priv(ndev); int rx_itr, tx_itr; - if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) - return; - /* Must be greater than zero to avoid unpredictable behavior */ if (!fep->rx_time_itr || !fep->rx_pkts_itr || !fep->tx_time_itr || !fep->tx_pkts_itr) @@ -2383,10 +2388,12 @@ static void fec_enet_itr_coal_set(struct net_device *ndev) writel(tx_itr, fep->hwp + FEC_TXIC0); writel(rx_itr, fep->hwp + FEC_RXIC0); - writel(tx_itr, fep->hwp + FEC_TXIC1); - writel(rx_itr, fep->hwp + FEC_RXIC1); - writel(tx_itr, fep->hwp + FEC_TXIC2); - writel(rx_itr, fep->hwp + FEC_RXIC2); + if (fep->quirks & FEC_QUIRK_HAS_AVB) { + writel(tx_itr, fep->hwp + FEC_TXIC1); + writel(rx_itr, fep->hwp + FEC_RXIC1); + writel(tx_itr, fep->hwp + FEC_TXIC2); + writel(rx_itr, fep->hwp + FEC_RXIC2); + } } static int @@ -2394,7 +2401,7 @@ fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) { struct fec_enet_private *fep = netdev_priv(ndev); - if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) + if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) return -EOPNOTSUPP; ec->rx_coalesce_usecs = fep->rx_time_itr; @@ -2412,7 +2419,7 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) struct fec_enet_private *fep = netdev_priv(ndev); unsigned int cycle; - if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) + if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) return -EOPNOTSUPP; if (ec->rx_max_coalesced_frames > 255) { diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c index f01918c63f28..99c6bbdff501 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c @@ -37,6 +37,11 @@ #include "mlx4_en.h" #include "fw_qos.h" +enum { + MLX4_CEE_STATE_DOWN = 0, + MLX4_CEE_STATE_UP = 1, +}; + /* Definitions for QCN */ @@ -80,13 +85,202 @@ struct mlx4_congestion_control_mb_prio_802_1_qau_statistics { __be32 reserved3[4]; }; +static u8 mlx4_en_dcbnl_getcap(struct net_device *dev, int capid, u8 *cap) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + switch (capid) { + case DCB_CAP_ATTR_PFC: + *cap = true; + break; + case DCB_CAP_ATTR_DCBX: + *cap = priv->cee_params.dcbx_cap; + break; + case DCB_CAP_ATTR_PFC_TCS: + *cap = 1 << mlx4_max_tc(priv->mdev->dev); + break; + default: + *cap = false; + break; + } + + return 0; +} + +static u8 mlx4_en_dcbnl_getpfcstate(struct net_device *netdev) +{ + struct mlx4_en_priv *priv = netdev_priv(netdev); + + return priv->cee_params.dcb_cfg.pfc_state; +} + +static void mlx4_en_dcbnl_setpfcstate(struct net_device *netdev, u8 state) +{ + struct mlx4_en_priv *priv = netdev_priv(netdev); + + priv->cee_params.dcb_cfg.pfc_state = state; +} + +static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, + u8 *setting) +{ + struct mlx4_en_priv *priv = netdev_priv(netdev); + + *setting = priv->cee_params.dcb_cfg.tc_config[priority].dcb_pfc; +} + +static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority, + u8 setting) +{ + struct mlx4_en_priv *priv = netdev_priv(netdev); + + priv->cee_params.dcb_cfg.tc_config[priority].dcb_pfc = setting; + priv->cee_params.dcb_cfg.pfc_state = true; +} + +static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) +{ + struct mlx4_en_priv *priv = netdev_priv(netdev); + + if (!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED)) + return -EINVAL; + + if (tcid == DCB_NUMTCS_ATTR_PFC) + *num = mlx4_max_tc(priv->mdev->dev); + else + *num = 0; + + return 0; +} + +static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev) +{ + struct mlx4_en_priv *priv = netdev_priv(netdev); + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_cee_config *dcb_cfg = &priv->cee_params.dcb_cfg; + int err = 0; + + if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return -EINVAL; + + if (dcb_cfg->pfc_state) { + int tc; + + priv->prof->rx_pause = 0; + priv->prof->tx_pause = 0; + for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) { + u8 tc_mask = 1 << tc; + + switch (dcb_cfg->tc_config[tc].dcb_pfc) { + case pfc_disabled: + priv->prof->tx_ppp &= ~tc_mask; + priv->prof->rx_ppp &= ~tc_mask; + break; + case pfc_enabled_full: + priv->prof->tx_ppp |= tc_mask; + priv->prof->rx_ppp |= tc_mask; + break; + case pfc_enabled_tx: + priv->prof->tx_ppp |= tc_mask; + priv->prof->rx_ppp &= ~tc_mask; + break; + case pfc_enabled_rx: + priv->prof->tx_ppp &= ~tc_mask; + priv->prof->rx_ppp |= tc_mask; + break; + default: + break; + } + } + en_dbg(DRV, priv, "Set pfc on\n"); + } else { + priv->prof->rx_pause = 1; + priv->prof->tx_pause = 1; + en_dbg(DRV, priv, "Set pfc off\n"); + } + + err = mlx4_SET_PORT_general(mdev->dev, priv->port, + priv->rx_skb_size + ETH_FCS_LEN, + priv->prof->tx_pause, + priv->prof->tx_ppp, + priv->prof->rx_pause, + priv->prof->rx_ppp); + if (err) + en_err(priv, "Failed setting pause params\n"); + return err; +} + +static u8 mlx4_en_dcbnl_get_state(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + if (priv->flags & MLX4_EN_FLAG_DCB_ENABLED) + return MLX4_CEE_STATE_UP; + + return MLX4_CEE_STATE_DOWN; +} + +static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + int num_tcs = 0; + + if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return 1; + + if (!!(state) == !!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED)) + return 0; + + if (state) { + priv->flags |= MLX4_EN_FLAG_DCB_ENABLED; + num_tcs = IEEE_8021QAZ_MAX_TCS; + } else { + priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED; + } + + return mlx4_en_setup_tc(dev, num_tcs); +} + +/* On success returns a non-zero 802.1p user priority bitmap + * otherwise returns 0 as the invalid user priority bitmap to + * indicate an error. + */ +static int mlx4_en_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) +{ + struct mlx4_en_priv *priv = netdev_priv(netdev); + struct dcb_app app = { + .selector = idtype, + .protocol = id, + }; + if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return 0; + + return dcb_getapp(netdev, &app); +} + +static int mlx4_en_dcbnl_setapp(struct net_device *netdev, u8 idtype, + u16 id, u8 up) +{ + struct mlx4_en_priv *priv = netdev_priv(netdev); + struct dcb_app app; + + if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return -EINVAL; + + memset(&app, 0, sizeof(struct dcb_app)); + app.selector = idtype; + app.protocol = id; + app.priority = up; + + return dcb_setapp(netdev, &app); +} + static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets) { struct mlx4_en_priv *priv = netdev_priv(dev); struct ieee_ets *my_ets = &priv->ets; - /* No IEEE PFC settings available */ if (!my_ets) return -EINVAL; @@ -237,18 +431,51 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev, static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev) { - return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; + struct mlx4_en_priv *priv = netdev_priv(dev); + + return priv->cee_params.dcbx_cap; } static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode) { + struct mlx4_en_priv *priv = netdev_priv(dev); + struct ieee_ets ets = {0}; + struct ieee_pfc pfc = {0}; + + if (mode == priv->cee_params.dcbx_cap) + return 0; + if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || - (mode & DCB_CAP_DCBX_VER_CEE) || - !(mode & DCB_CAP_DCBX_VER_IEEE) || + ((mode & DCB_CAP_DCBX_VER_IEEE) && + (mode & DCB_CAP_DCBX_VER_CEE)) || !(mode & DCB_CAP_DCBX_HOST)) - return 1; + goto err; + + priv->cee_params.dcbx_cap = mode; + + ets.ets_cap = IEEE_8021QAZ_MAX_TCS; + pfc.pfc_cap = IEEE_8021QAZ_MAX_TCS; + + if (mode & DCB_CAP_DCBX_VER_IEEE) { + if (mlx4_en_dcbnl_ieee_setets(dev, &ets)) + goto err; + if (mlx4_en_dcbnl_ieee_setpfc(dev, &pfc)) + goto err; + } else if (mode & DCB_CAP_DCBX_VER_CEE) { + if (mlx4_en_dcbnl_set_all(dev)) + goto err; + } else { + if (mlx4_en_dcbnl_ieee_setets(dev, &ets)) + goto err; + if (mlx4_en_dcbnl_ieee_setpfc(dev, &pfc)) + goto err; + if (mlx4_en_setup_tc(dev, 0)) + goto err; + } return 0; +err: + return 1; } #define MLX4_RATELIMIT_UNITS_IN_KB 100000 /* rate-limit HW unit in Kbps */ @@ -463,24 +690,46 @@ static int mlx4_en_dcbnl_ieee_getqcnstats(struct net_device *dev, } const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = { - .ieee_getets = mlx4_en_dcbnl_ieee_getets, - .ieee_setets = mlx4_en_dcbnl_ieee_setets, - .ieee_getmaxrate = mlx4_en_dcbnl_ieee_getmaxrate, - .ieee_setmaxrate = mlx4_en_dcbnl_ieee_setmaxrate, - .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc, - .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc, + .ieee_getets = mlx4_en_dcbnl_ieee_getets, + .ieee_setets = mlx4_en_dcbnl_ieee_setets, + .ieee_getmaxrate = mlx4_en_dcbnl_ieee_getmaxrate, + .ieee_setmaxrate = mlx4_en_dcbnl_ieee_setmaxrate, + .ieee_getqcn = mlx4_en_dcbnl_ieee_getqcn, + .ieee_setqcn = mlx4_en_dcbnl_ieee_setqcn, + .ieee_getqcnstats = mlx4_en_dcbnl_ieee_getqcnstats, + .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc, + .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc, + + .getstate = mlx4_en_dcbnl_get_state, + .setstate = mlx4_en_dcbnl_set_state, + .getpfccfg = mlx4_en_dcbnl_get_pfc_cfg, + .setpfccfg = mlx4_en_dcbnl_set_pfc_cfg, + .setall = mlx4_en_dcbnl_set_all, + .getcap = mlx4_en_dcbnl_getcap, + .getnumtcs = mlx4_en_dcbnl_getnumtcs, + .getpfcstate = mlx4_en_dcbnl_getpfcstate, + .setpfcstate = mlx4_en_dcbnl_setpfcstate, + .getapp = mlx4_en_dcbnl_getapp, + .setapp = mlx4_en_dcbnl_setapp, .getdcbx = mlx4_en_dcbnl_getdcbx, .setdcbx = mlx4_en_dcbnl_setdcbx, - .ieee_getqcn = mlx4_en_dcbnl_ieee_getqcn, - .ieee_setqcn = mlx4_en_dcbnl_ieee_setqcn, - .ieee_getqcnstats = mlx4_en_dcbnl_ieee_getqcnstats, }; const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = { .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc, .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc, + .setstate = mlx4_en_dcbnl_set_state, + .getpfccfg = mlx4_en_dcbnl_get_pfc_cfg, + .setpfccfg = mlx4_en_dcbnl_set_pfc_cfg, + .setall = mlx4_en_dcbnl_set_all, + .getnumtcs = mlx4_en_dcbnl_getnumtcs, + .getpfcstate = mlx4_en_dcbnl_getpfcstate, + .setpfcstate = mlx4_en_dcbnl_setpfcstate, + .getapp = mlx4_en_dcbnl_getapp, + .setapp = mlx4_en_dcbnl_setapp, + .getdcbx = mlx4_en_dcbnl_getdcbx, .setdcbx = mlx4_en_dcbnl_setdcbx, }; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 8e318d21321f..d42083a8a104 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -67,6 +67,17 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up) offset += priv->num_tx_rings_p_up; } +#ifdef CONFIG_MLX4_EN_DCB + if (!mlx4_is_slave(priv->mdev->dev)) { + if (up) { + priv->flags |= MLX4_EN_FLAG_DCB_ENABLED; + } else { + priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED; + priv->cee_params.dcb_cfg.pfc_state = false; + } + } +#endif /* CONFIG_MLX4_EN_DCB */ + return 0; } @@ -2815,6 +2826,9 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, struct mlx4_en_priv *priv; int i; int err; +#ifdef CONFIG_MLX4_EN_DCB + struct tc_configuration *tc; +#endif dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), MAX_TX_RINGS, MAX_RX_RINGS); @@ -2881,6 +2895,17 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv->msg_enable = MLX4_EN_MSG_LEVEL; #ifdef CONFIG_MLX4_EN_DCB if (!mlx4_is_slave(priv->mdev->dev)) { + priv->cee_params.dcbx_cap = DCB_CAP_DCBX_VER_CEE | + DCB_CAP_DCBX_HOST | + DCB_CAP_DCBX_VER_IEEE; + priv->flags |= MLX4_EN_DCB_ENABLED; + priv->cee_params.dcb_cfg.pfc_state = false; + + for (i = 0; i < MLX4_EN_NUM_UP; i++) { + tc = &priv->cee_params.dcb_cfg.tc_config[i]; + tc->dcb_pfc = pfc_disabled; + } + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) { dev->dcbnl_ops = &mlx4_en_dcbnl_ops; } else { diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index e97094598b2d..f4497cf4d06d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -1128,6 +1128,7 @@ int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_c port_cap->max_pkeys = 1 << (field & 0xf); MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET); port_cap->max_vl = field & 0xf; + port_cap->max_tc_eth = field >> 4; MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET); port_cap->log_max_macs = field & 0xf; port_cap->log_max_vlans = field >> 4; diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index 7ea258af636a..cdbd76f10ced 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -53,6 +53,7 @@ struct mlx4_port_cap { int ib_mtu; int max_port_width; int max_vl; + int max_tc_eth; int max_gids; int max_pkeys; u64 def_mac; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 12c77a70abdb..3564aad778a3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -292,6 +292,7 @@ static int _mlx4_dev_port(struct mlx4_dev *dev, int port, dev->caps.pkey_table_len[port] = port_cap->max_pkeys; dev->caps.port_width_cap[port] = port_cap->max_port_width; dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu; + dev->caps.max_tc_eth = port_cap->max_tc_eth; dev->caps.def_mac[port] = port_cap->def_mac; dev->caps.supported_type[port] = port_cap->supported_port_types; dev->caps.suggested_type[port] = port_cap->suggested_type; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 6b3b0fefabad..d39bf594abe4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -448,6 +448,27 @@ struct mlx4_en_frag_info { #define MLX4_EN_TC_ETS 7 +enum dcb_pfc_type { + pfc_disabled = 0, + pfc_enabled_full, + pfc_enabled_tx, + pfc_enabled_rx +}; + +struct tc_configuration { + enum dcb_pfc_type dcb_pfc; +}; + +struct mlx4_en_cee_config { + bool pfc_state; + struct tc_configuration tc_config[MLX4_EN_NUM_UP]; +}; + +struct mlx4_en_cee_params { + u8 dcbx_cap; + struct mlx4_en_cee_config dcb_cfg; +}; + #endif struct ethtool_flow_id { @@ -467,6 +488,9 @@ enum { MLX4_EN_FLAG_RX_FILTER_NEEDED = (1 << 3), MLX4_EN_FLAG_FORCE_PROMISC = (1 << 4), MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP = (1 << 5), +#ifdef CONFIG_MLX4_EN_DCB + MLX4_EN_FLAG_DCB_ENABLED = (1 << 6), +#endif }; #define PORT_BEACON_MAX_LIMIT (65535) @@ -568,9 +592,11 @@ struct mlx4_en_priv { u32 counter_index; #ifdef CONFIG_MLX4_EN_DCB +#define MLX4_EN_DCB_ENABLED 0x3 struct ieee_ets ets; u16 maxrate[IEEE_8021QAZ_MAX_TCS]; enum dcbnl_cndd_states cndd_state[IEEE_8021QAZ_MAX_TCS]; + struct mlx4_en_cee_params cee_params; #endif #ifdef CONFIG_RFS_ACCEL spinlock_t filters_lock; diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 087b23b320cb..3d2095e5c61c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -52,6 +52,7 @@ #define MLX4_FLAG_V_IGNORE_FCS_MASK 0x2 #define MLX4_IGNORE_FCS_MASK 0x1 +#define MLNX4_TX_MAX_NUMBER 8 void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table) { @@ -2015,3 +2016,14 @@ out: return ret; } EXPORT_SYMBOL(mlx4_get_module_info); + +int mlx4_max_tc(struct mlx4_dev *dev) +{ + u8 num_tc = dev->caps.max_tc_eth; + + if (!num_tc) + num_tc = MLNX4_TX_MAX_NUMBER; + + return num_tc; +} +EXPORT_SYMBOL(mlx4_max_tc); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 9ea7b583096a..c4f450f1c658 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -1,11 +1,13 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ - health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ - mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o fs_counters.o + health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ + mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \ + fs_counters.o rl.o mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \ en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \ - en_txrx.o en_clock.o vxlan.o en_tc.o en_arfs.o + en_rx_am.o en_txrx.o en_clock.o vxlan.o en_tc.o \ + en_arfs.o mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index e8a6c3325b39..da885c0dfebe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -79,6 +79,7 @@ #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10 +#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20 @@ -88,6 +89,7 @@ #define MLX5E_LOG_INDIR_RQT_SIZE 0x7 #define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE) #define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1) +#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC) #define MLX5E_TX_CQ_POLL_BUDGET 128 #define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */ #define MLX5E_SQ_BF_BUDGET 16 @@ -143,11 +145,32 @@ struct mlx5e_umr_wqe { struct mlx5_wqe_data_seg data; }; +static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = { + "rx_cqe_moder", +}; + +enum mlx5e_priv_flag { + MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0), +}; + +#define MLX5E_SET_PRIV_FLAG(priv, pflag, enable) \ + do { \ + if (enable) \ + priv->pflags |= pflag; \ + else \ + priv->pflags &= ~pflag; \ + } while (0) + #ifdef CONFIG_MLX5_CORE_EN_DCB #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */ #define MLX5E_MIN_BW_ALLOC 1 /* Min percentage of BW allocation */ #endif +struct mlx5e_cq_moder { + u16 usec; + u16 pkts; +}; + struct mlx5e_params { u8 log_sq_size; u8 rq_wq_type; @@ -156,12 +179,11 @@ struct mlx5e_params { u8 log_rq_size; u16 num_channels; u8 num_tc; + u8 rx_cq_period_mode; bool rx_cqe_compress_admin; bool rx_cqe_compress; - u16 rx_cq_moderation_usec; - u16 rx_cq_moderation_pkts; - u16 tx_cq_moderation_usec; - u16 tx_cq_moderation_pkts; + struct mlx5e_cq_moder rx_cq_moderation; + struct mlx5e_cq_moder tx_cq_moderation; u16 min_rx_wqes; bool lro_en; u32 lro_wqe_sz; @@ -173,6 +195,7 @@ struct mlx5e_params { #ifdef CONFIG_MLX5_CORE_EN_DCB struct ieee_ets ets; #endif + bool rx_am_enabled; }; struct mlx5e_tstamp { @@ -191,6 +214,7 @@ struct mlx5e_tstamp { enum { MLX5E_RQ_STATE_POST_WQES_ENABLE, MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, + MLX5E_RQ_STATE_AM, }; struct mlx5e_cq { @@ -198,6 +222,7 @@ struct mlx5e_cq { struct mlx5_cqwq wq; /* data path - accessed per napi poll */ + u16 event_ctr; struct napi_struct *napi; struct mlx5_core_cq mcq; struct mlx5e_channel *channel; @@ -225,6 +250,30 @@ struct mlx5e_dma_info { dma_addr_t addr; }; +struct mlx5e_rx_am_stats { + int ppms; /* packets per msec */ + int epms; /* events per msec */ +}; + +struct mlx5e_rx_am_sample { + ktime_t time; + unsigned int pkt_ctr; + u16 event_ctr; +}; + +struct mlx5e_rx_am { /* Adaptive Moderation */ + u8 state; + struct mlx5e_rx_am_stats prev_stats; + struct mlx5e_rx_am_sample start_sample; + struct work_struct work; + u8 profile_ix; + u8 mode; + u8 tune_state; + u8 steps_right; + u8 steps_left; + u8 tired; +}; + struct mlx5e_rq { /* data path */ struct mlx5_wq_ll wq; @@ -245,6 +294,8 @@ struct mlx5e_rq { unsigned long state; int ix; + struct mlx5e_rx_am am; /* Adaptive Moderation */ + /* control */ struct mlx5_wq_ctrl wq_ctrl; u8 wq_type; @@ -354,6 +405,7 @@ struct mlx5e_sq { struct mlx5e_channel *channel; int tc; struct mlx5e_ico_wqe_info *ico_wqe_info; + u32 rate_limit; } ____cacheline_aligned_in_smp; static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n) @@ -530,6 +582,7 @@ struct mlx5e_priv { u32 indir_rqtn; u32 indir_tirn[MLX5E_NUM_INDIR_TIRS]; struct mlx5e_direct_tir direct_tir[MLX5E_MAX_NUM_CHANNELS]; + u32 tx_rates[MLX5E_MAX_NUM_SQS]; struct mlx5e_flow_steering fs; struct mlx5e_vxlan_db vxlan; @@ -540,6 +593,7 @@ struct mlx5e_priv { struct work_struct set_rx_mode_work; struct delayed_work update_stats_work; + u32 pflags; struct mlx5_core_dev *mdev; struct net_device *netdev; struct mlx5e_stats stats; @@ -562,6 +616,7 @@ enum mlx5e_link_mode { MLX5E_10GBASE_ER = 14, MLX5E_40GBASE_SR4 = 15, MLX5E_40GBASE_LR4 = 16, + MLX5E_50GBASE_SR2 = 18, MLX5E_100GBASE_CR4 = 20, MLX5E_100GBASE_SR4 = 21, MLX5E_100GBASE_KR4 = 22, @@ -579,6 +634,9 @@ enum mlx5e_link_mode { #define MLX5E_PROT_MASK(link_mode) (1 << link_mode) + +void mlx5e_build_ptys2ethtool_map(void); + void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw); u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback); @@ -612,6 +670,10 @@ void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi); struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq); +void mlx5e_rx_am(struct mlx5e_rq *rq); +void mlx5e_rx_am_work(struct work_struct *work); +struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode); + void mlx5e_update_stats(struct mlx5e_priv *priv); int mlx5e_create_flow_steering(struct mlx5e_priv *priv); @@ -647,6 +709,9 @@ void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev, int num_channels); int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); +void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, + u8 cq_period_mode); + static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index b2db180ae2a5..e6883132b555 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -191,7 +191,6 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev, { struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5_core_dev *mdev = priv->mdev; - enum mlx5_port_status ps; u8 curr_pfc_en; int ret; @@ -200,14 +199,8 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev, if (pfc->pfc_en == curr_pfc_en) return 0; - mlx5_query_port_admin_status(mdev, &ps); - if (ps == MLX5_PORT_UP) - mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN); - ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en); - - if (ps == MLX5_PORT_UP) - mlx5_set_port_admin_status(mdev, MLX5_PORT_UP); + mlx5_toggle_port_link(mdev); return ret; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index fc7dcc03b1de..39a4d961a58e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -48,123 +48,85 @@ static void mlx5e_get_drvinfo(struct net_device *dev, sizeof(drvinfo->bus_info)); } -static const struct { - u32 supported; - u32 advertised; +struct ptys2ethtool_config { + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised); u32 speed; -} ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER] = { - [MLX5E_1000BASE_CX_SGMII] = { - .supported = SUPPORTED_1000baseKX_Full, - .advertised = ADVERTISED_1000baseKX_Full, - .speed = 1000, - }, - [MLX5E_1000BASE_KX] = { - .supported = SUPPORTED_1000baseKX_Full, - .advertised = ADVERTISED_1000baseKX_Full, - .speed = 1000, - }, - [MLX5E_10GBASE_CX4] = { - .supported = SUPPORTED_10000baseKX4_Full, - .advertised = ADVERTISED_10000baseKX4_Full, - .speed = 10000, - }, - [MLX5E_10GBASE_KX4] = { - .supported = SUPPORTED_10000baseKX4_Full, - .advertised = ADVERTISED_10000baseKX4_Full, - .speed = 10000, - }, - [MLX5E_10GBASE_KR] = { - .supported = SUPPORTED_10000baseKR_Full, - .advertised = ADVERTISED_10000baseKR_Full, - .speed = 10000, - }, - [MLX5E_20GBASE_KR2] = { - .supported = SUPPORTED_20000baseKR2_Full, - .advertised = ADVERTISED_20000baseKR2_Full, - .speed = 20000, - }, - [MLX5E_40GBASE_CR4] = { - .supported = SUPPORTED_40000baseCR4_Full, - .advertised = ADVERTISED_40000baseCR4_Full, - .speed = 40000, - }, - [MLX5E_40GBASE_KR4] = { - .supported = SUPPORTED_40000baseKR4_Full, - .advertised = ADVERTISED_40000baseKR4_Full, - .speed = 40000, - }, - [MLX5E_56GBASE_R4] = { - .supported = SUPPORTED_56000baseKR4_Full, - .advertised = ADVERTISED_56000baseKR4_Full, - .speed = 56000, - }, - [MLX5E_10GBASE_CR] = { - .supported = SUPPORTED_10000baseKR_Full, - .advertised = ADVERTISED_10000baseKR_Full, - .speed = 10000, - }, - [MLX5E_10GBASE_SR] = { - .supported = SUPPORTED_10000baseKR_Full, - .advertised = ADVERTISED_10000baseKR_Full, - .speed = 10000, - }, - [MLX5E_10GBASE_ER] = { - .supported = SUPPORTED_10000baseKR_Full, - .advertised = ADVERTISED_10000baseKR_Full, - .speed = 10000, - }, - [MLX5E_40GBASE_SR4] = { - .supported = SUPPORTED_40000baseSR4_Full, - .advertised = ADVERTISED_40000baseSR4_Full, - .speed = 40000, - }, - [MLX5E_40GBASE_LR4] = { - .supported = SUPPORTED_40000baseLR4_Full, - .advertised = ADVERTISED_40000baseLR4_Full, - .speed = 40000, - }, - [MLX5E_100GBASE_CR4] = { - .speed = 100000, - }, - [MLX5E_100GBASE_SR4] = { - .speed = 100000, - }, - [MLX5E_100GBASE_KR4] = { - .speed = 100000, - }, - [MLX5E_100GBASE_LR4] = { - .speed = 100000, - }, - [MLX5E_100BASE_TX] = { - .speed = 100, - }, - [MLX5E_1000BASE_T] = { - .supported = SUPPORTED_1000baseT_Full, - .advertised = ADVERTISED_1000baseT_Full, - .speed = 1000, - }, - [MLX5E_10GBASE_T] = { - .supported = SUPPORTED_10000baseT_Full, - .advertised = ADVERTISED_10000baseT_Full, - .speed = 1000, - }, - [MLX5E_25GBASE_CR] = { - .speed = 25000, - }, - [MLX5E_25GBASE_KR] = { - .speed = 25000, - }, - [MLX5E_25GBASE_SR] = { - .speed = 25000, - }, - [MLX5E_50GBASE_CR2] = { - .speed = 50000, - }, - [MLX5E_50GBASE_KR2] = { - .speed = 50000, - }, }; +static struct ptys2ethtool_config ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER]; + +#define MLX5_BUILD_PTYS2ETHTOOL_CONFIG(reg_, speed_, ...) \ + ({ \ + struct ptys2ethtool_config *cfg; \ + const unsigned int modes[] = { __VA_ARGS__ }; \ + unsigned int i; \ + cfg = &ptys2ethtool_table[reg_]; \ + cfg->speed = speed_; \ + bitmap_zero(cfg->supported, \ + __ETHTOOL_LINK_MODE_MASK_NBITS); \ + bitmap_zero(cfg->advertised, \ + __ETHTOOL_LINK_MODE_MASK_NBITS); \ + for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) { \ + __set_bit(modes[i], cfg->supported); \ + __set_bit(modes[i], cfg->advertised); \ + } \ + }) + +void mlx5e_build_ptys2ethtool_map(void) +{ + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_CX_SGMII, SPEED_1000, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_KX, SPEED_1000, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CX4, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KX4, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KR, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_20GBASE_KR2, SPEED_20000, + ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_CR4, SPEED_40000, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_KR4, SPEED_40000, + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_56GBASE_R4, SPEED_56000, + ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CR, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_SR, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_ER, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_SR4, SPEED_40000, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_LR4, SPEED_40000, + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_SR2, SPEED_50000, + ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_CR4, SPEED_100000, + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_SR4, SPEED_100000, + ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_KR4, SPEED_100000, + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4, SPEED_100000, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseT_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR, SPEED_25000, + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_KR, SPEED_25000, + ETHTOOL_LINK_MODE_25000baseKR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_SR, SPEED_25000, + ETHTOOL_LINK_MODE_25000baseSR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_CR2, SPEED_50000, + ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_KR2, SPEED_50000, + ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT); +} + static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; @@ -198,6 +160,8 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset) MLX5E_NUM_RQ_STATS(priv) + MLX5E_NUM_SQ_STATS(priv) + MLX5E_NUM_PFC_COUNTERS(priv); + case ETH_SS_PRIV_FLAGS: + return ARRAY_SIZE(mlx5e_priv_flags); /* fallthrough */ default: return -EOPNOTSUPP; @@ -272,9 +236,12 @@ static void mlx5e_get_strings(struct net_device *dev, uint32_t stringset, uint8_t *data) { struct mlx5e_priv *priv = netdev_priv(dev); + int i; switch (stringset) { case ETH_SS_PRIV_FLAGS: + for (i = 0; i < ARRAY_SIZE(mlx5e_priv_flags); i++) + strcpy(data + i * ETH_GSTRING_LEN, mlx5e_priv_flags[i]); break; case ETH_SS_TEST: @@ -519,10 +486,11 @@ static int mlx5e_get_coalesce(struct net_device *netdev, if (!MLX5_CAP_GEN(priv->mdev, cq_moderation)) return -ENOTSUPP; - coal->rx_coalesce_usecs = priv->params.rx_cq_moderation_usec; - coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation_pkts; - coal->tx_coalesce_usecs = priv->params.tx_cq_moderation_usec; - coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation_pkts; + coal->rx_coalesce_usecs = priv->params.rx_cq_moderation.usec; + coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts; + coal->tx_coalesce_usecs = priv->params.tx_cq_moderation.usec; + coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation.pkts; + coal->use_adaptive_rx_coalesce = priv->params.rx_am_enabled; return 0; } @@ -533,6 +501,10 @@ static int mlx5e_set_coalesce(struct net_device *netdev, struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_channel *c; + bool restart = + !!coal->use_adaptive_rx_coalesce != priv->params.rx_am_enabled; + bool was_opened; + int err = 0; int tc; int i; @@ -540,12 +512,19 @@ static int mlx5e_set_coalesce(struct net_device *netdev, return -ENOTSUPP; mutex_lock(&priv->state_lock); - priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs; - priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames; - priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs; - priv->params.rx_cq_moderation_pkts = coal->rx_max_coalesced_frames; - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); + if (was_opened && restart) { + mlx5e_close_locked(netdev); + priv->params.rx_am_enabled = !!coal->use_adaptive_rx_coalesce; + } + + priv->params.tx_cq_moderation.usec = coal->tx_coalesce_usecs; + priv->params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames; + priv->params.rx_cq_moderation.usec = coal->rx_coalesce_usecs; + priv->params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames; + + if (!was_opened || restart) goto out; for (i = 0; i < priv->params.num_channels; ++i) { @@ -564,35 +543,37 @@ static int mlx5e_set_coalesce(struct net_device *netdev, } out: + if (was_opened && restart) + err = mlx5e_open_locked(netdev); + mutex_unlock(&priv->state_lock); - return 0; + return err; } -static u32 ptys2ethtool_supported_link(u32 eth_proto_cap) +static void ptys2ethtool_supported_link(unsigned long *supported_modes, + u32 eth_proto_cap) { - int i; - u32 supported_modes = 0; + int proto; - for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { - if (eth_proto_cap & MLX5E_PROT_MASK(i)) - supported_modes |= ptys2ethtool_table[i].supported; - } - return supported_modes; + for_each_set_bit(proto, (unsigned long *)ð_proto_cap, MLX5E_LINK_MODES_NUMBER) + bitmap_or(supported_modes, supported_modes, + ptys2ethtool_table[proto].supported, + __ETHTOOL_LINK_MODE_MASK_NBITS); } -static u32 ptys2ethtool_adver_link(u32 eth_proto_cap) +static void ptys2ethtool_adver_link(unsigned long *advertising_modes, + u32 eth_proto_cap) { - int i; - u32 advertising_modes = 0; + int proto; - for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { - if (eth_proto_cap & MLX5E_PROT_MASK(i)) - advertising_modes |= ptys2ethtool_table[i].advertised; - } - return advertising_modes; + for_each_set_bit(proto, (unsigned long *)ð_proto_cap, MLX5E_LINK_MODES_NUMBER) + bitmap_or(advertising_modes, advertising_modes, + ptys2ethtool_table[proto].advertised, + __ETHTOOL_LINK_MODE_MASK_NBITS); } -static u32 ptys2ethtool_supported_port(u32 eth_proto_cap) +static void ptys2ethtool_supported_port(struct ethtool_link_ksettings *link_ksettings, + u32 eth_proto_cap) { if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR) | MLX5E_PROT_MASK(MLX5E_10GBASE_SR) @@ -600,7 +581,7 @@ static u32 ptys2ethtool_supported_port(u32 eth_proto_cap) | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4) | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4) | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) { - return SUPPORTED_FIBRE; + ethtool_link_ksettings_add_link_mode(link_ksettings, supported, FIBRE); } if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_100GBASE_KR4) @@ -608,9 +589,8 @@ static u32 ptys2ethtool_supported_port(u32 eth_proto_cap) | MLX5E_PROT_MASK(MLX5E_10GBASE_KR) | MLX5E_PROT_MASK(MLX5E_10GBASE_KX4) | MLX5E_PROT_MASK(MLX5E_1000BASE_KX))) { - return SUPPORTED_Backplane; + ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Backplane); } - return 0; } int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) @@ -634,7 +614,7 @@ int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) static void get_speed_duplex(struct net_device *netdev, u32 eth_proto_oper, - struct ethtool_cmd *cmd) + struct ethtool_link_ksettings *link_ksettings) { int i; u32 speed = SPEED_UNKNOWN; @@ -651,23 +631,32 @@ static void get_speed_duplex(struct net_device *netdev, } } out: - ethtool_cmd_speed_set(cmd, speed); - cmd->duplex = duplex; + link_ksettings->base.speed = speed; + link_ksettings->base.duplex = duplex; } -static void get_supported(u32 eth_proto_cap, u32 *supported) +static void get_supported(u32 eth_proto_cap, + struct ethtool_link_ksettings *link_ksettings) { - *supported |= ptys2ethtool_supported_port(eth_proto_cap); - *supported |= ptys2ethtool_supported_link(eth_proto_cap); - *supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + unsigned long *supported = link_ksettings->link_modes.supported; + + ptys2ethtool_supported_port(link_ksettings, eth_proto_cap); + ptys2ethtool_supported_link(supported, eth_proto_cap); + ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause); + ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Asym_Pause); } static void get_advertising(u32 eth_proto_cap, u8 tx_pause, - u8 rx_pause, u32 *advertising) + u8 rx_pause, + struct ethtool_link_ksettings *link_ksettings) { - *advertising |= ptys2ethtool_adver_link(eth_proto_cap); - *advertising |= tx_pause ? ADVERTISED_Pause : 0; - *advertising |= (tx_pause ^ rx_pause) ? ADVERTISED_Asym_Pause : 0; + unsigned long *advertising = link_ksettings->link_modes.advertising; + + ptys2ethtool_adver_link(advertising, eth_proto_cap); + if (tx_pause) + ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause); + if (tx_pause ^ rx_pause) + ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Asym_Pause); } static u8 get_connector_port(u32 eth_proto) @@ -695,13 +684,16 @@ static u8 get_connector_port(u32 eth_proto) return PORT_OTHER; } -static void get_lp_advertising(u32 eth_proto_lp, u32 *lp_advertising) +static void get_lp_advertising(u32 eth_proto_lp, + struct ethtool_link_ksettings *link_ksettings) { - *lp_advertising = ptys2ethtool_adver_link(eth_proto_lp); + unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising; + + ptys2ethtool_adver_link(lp_advertising, eth_proto_lp); } -static int mlx5e_get_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) +static int mlx5e_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *link_ksettings) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; @@ -710,6 +702,8 @@ static int mlx5e_get_settings(struct net_device *netdev, u32 eth_proto_admin; u32 eth_proto_lp; u32 eth_proto_oper; + u8 an_disable_admin; + u8 an_status; int err; err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1); @@ -720,35 +714,49 @@ static int mlx5e_get_settings(struct net_device *netdev, goto err_query_ptys; } - eth_proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability); - eth_proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin); - eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); - eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise); + eth_proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability); + eth_proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin); + eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); + eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise); + an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin); + an_status = MLX5_GET(ptys_reg, out, an_status); - cmd->supported = 0; - cmd->advertising = 0; + ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); + ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); - get_supported(eth_proto_cap, &cmd->supported); - get_advertising(eth_proto_admin, 0, 0, &cmd->advertising); - get_speed_duplex(netdev, eth_proto_oper, cmd); + get_supported(eth_proto_cap, link_ksettings); + get_advertising(eth_proto_admin, 0, 0, link_ksettings); + get_speed_duplex(netdev, eth_proto_oper, link_ksettings); eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; - cmd->port = get_connector_port(eth_proto_oper); - get_lp_advertising(eth_proto_lp, &cmd->lp_advertising); + link_ksettings->base.port = get_connector_port(eth_proto_oper); + get_lp_advertising(eth_proto_lp, link_ksettings); + + if (an_status == MLX5_AN_COMPLETE) + ethtool_link_ksettings_add_link_mode(link_ksettings, + lp_advertising, Autoneg); - cmd->transceiver = XCVR_INTERNAL; + link_ksettings->base.autoneg = an_disable_admin ? AUTONEG_DISABLE : + AUTONEG_ENABLE; + ethtool_link_ksettings_add_link_mode(link_ksettings, supported, + Autoneg); + if (!an_disable_admin) + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, Autoneg); err_query_ptys: return err; } -static u32 mlx5e_ethtool2ptys_adver_link(u32 link_modes) +static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes) { u32 i, ptys_modes = 0; for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { - if (ptys2ethtool_table[i].advertised & link_modes) + if (bitmap_intersects(ptys2ethtool_table[i].advertised, + link_modes, + __ETHTOOL_LINK_MODE_MASK_NBITS)) ptys_modes |= MLX5E_PROT_MASK(i); } @@ -767,21 +775,25 @@ static u32 mlx5e_ethtool2ptys_speed_link(u32 speed) return speed_links; } -static int mlx5e_set_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) +static int mlx5e_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *link_ksettings) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; + u32 eth_proto_cap, eth_proto_admin; + bool an_changes = false; + u8 an_disable_admin; + u8 an_disable_cap; + bool an_disable; u32 link_modes; + u8 an_status; u32 speed; - u32 eth_proto_cap, eth_proto_admin; - enum mlx5_port_status ps; int err; - speed = ethtool_cmd_speed(cmd); + speed = link_ksettings->base.speed; - link_modes = cmd->autoneg == AUTONEG_ENABLE ? - mlx5e_ethtool2ptys_adver_link(cmd->advertising) : + link_modes = link_ksettings->base.autoneg == AUTONEG_ENABLE ? + mlx5e_ethtool2ptys_adver_link(link_ksettings->link_modes.advertising) : mlx5e_ethtool2ptys_speed_link(speed); err = mlx5_query_port_proto_cap(mdev, ð_proto_cap, MLX5_PTYS_EN); @@ -806,15 +818,18 @@ static int mlx5e_set_settings(struct net_device *netdev, goto out; } - if (link_modes == eth_proto_admin) + mlx5_query_port_autoneg(mdev, MLX5_PTYS_EN, &an_status, + &an_disable_cap, &an_disable_admin); + + an_disable = link_ksettings->base.autoneg == AUTONEG_DISABLE; + an_changes = ((!an_disable && an_disable_admin) || + (an_disable && !an_disable_admin)); + + if (!an_changes && link_modes == eth_proto_admin) goto out; - mlx5_query_port_admin_status(mdev, &ps); - if (ps == MLX5_PORT_UP) - mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN); - mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN); - if (ps == MLX5_PORT_UP) - mlx5_set_port_admin_status(mdev, MLX5_PORT_UP); + mlx5_set_port_ptys(mdev, an_disable, link_modes, MLX5_PTYS_EN); + mlx5_toggle_port_link(mdev); out: return err; @@ -1272,6 +1287,87 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev, return 0; } +typedef int (*mlx5e_pflag_handler)(struct net_device *netdev, bool enable); + +static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + bool rx_mode_changed; + u8 rx_cq_period_mode; + int err = 0; + bool reset; + + rx_cq_period_mode = enable ? + MLX5_CQ_PERIOD_MODE_START_FROM_CQE : + MLX5_CQ_PERIOD_MODE_START_FROM_EQE; + rx_mode_changed = rx_cq_period_mode != priv->params.rx_cq_period_mode; + + if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE && + !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe)) + return -ENOTSUPP; + + if (!rx_mode_changed) + return 0; + + reset = test_bit(MLX5E_STATE_OPENED, &priv->state); + if (reset) + mlx5e_close_locked(netdev); + + mlx5e_set_rx_cq_mode_params(&priv->params, rx_cq_period_mode); + + if (reset) + err = mlx5e_open_locked(netdev); + + return err; +} + +static int mlx5e_handle_pflag(struct net_device *netdev, + u32 wanted_flags, + enum mlx5e_priv_flag flag, + mlx5e_pflag_handler pflag_handler) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + bool enable = !!(wanted_flags & flag); + u32 changes = wanted_flags ^ priv->pflags; + int err; + + if (!(changes & flag)) + return 0; + + err = pflag_handler(netdev, enable); + if (err) { + netdev_err(netdev, "%s private flag 0x%x failed err %d\n", + enable ? "Enable" : "Disable", flag, err); + return err; + } + + MLX5E_SET_PRIV_FLAG(priv, flag, enable); + return 0; +} + +static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + int err; + + mutex_lock(&priv->state_lock); + + err = mlx5e_handle_pflag(netdev, pflags, + MLX5E_PFLAG_RX_CQE_BASED_MODER, + set_pflag_rx_cqe_based_moder); + + mutex_unlock(&priv->state_lock); + return err ? -EINVAL : 0; +} + +static u32 mlx5e_get_priv_flags(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + return priv->pflags; +} + const struct ethtool_ops mlx5e_ethtool_ops = { .get_drvinfo = mlx5e_get_drvinfo, .get_link = ethtool_op_get_link, @@ -1284,8 +1380,8 @@ const struct ethtool_ops mlx5e_ethtool_ops = { .set_channels = mlx5e_set_channels, .get_coalesce = mlx5e_get_coalesce, .set_coalesce = mlx5e_set_coalesce, - .get_settings = mlx5e_get_settings, - .set_settings = mlx5e_set_settings, + .get_link_ksettings = mlx5e_get_link_ksettings, + .set_link_ksettings = mlx5e_set_link_ksettings, .get_rxfh_key_size = mlx5e_get_rxfh_key_size, .get_rxfh_indir_size = mlx5e_get_rxfh_indir_size, .get_rxfh = mlx5e_get_rxfh, @@ -1301,4 +1397,6 @@ const struct ethtool_ops mlx5e_ethtool_ops = { .set_wol = mlx5e_set_wol, .get_module_info = mlx5e_get_module_info, .get_module_eeprom = mlx5e_get_module_eeprom, + .get_priv_flags = mlx5e_get_priv_flags, + .set_priv_flags = mlx5e_set_priv_flags }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 8b7c6f381706..02a0f1796f7b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -40,8 +40,9 @@ #include "vxlan.h" struct mlx5e_rq_param { - u32 rqc[MLX5_ST_SZ_DW(rqc)]; - struct mlx5_wq_param wq; + u32 rqc[MLX5_ST_SZ_DW(rqc)]; + struct mlx5_wq_param wq; + bool am_enabled; }; struct mlx5e_sq_param { @@ -55,6 +56,7 @@ struct mlx5e_cq_param { u32 cqc[MLX5_ST_SZ_DW(cqc)]; struct mlx5_wq_param wq; u16 eq_ix; + u8 cq_period_mode; }; struct mlx5e_channel_param { @@ -336,6 +338,9 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, wqe->data.byte_count = cpu_to_be32(byte_count); } + INIT_WORK(&rq->am.work, mlx5e_rx_am_work); + rq->am.mode = priv->params.rx_cq_period_mode; + rq->wq_type = priv->params.rq_wq_type; rq->pdev = c->pdev; rq->netdev = c->netdev; @@ -508,6 +513,9 @@ static int mlx5e_open_rq(struct mlx5e_channel *c, if (err) goto err_disable_rq; + if (param->am_enabled) + set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); + set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state); sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP; @@ -536,6 +544,8 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq) /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */ napi_synchronize(&rq->channel->napi); + cancel_work_sync(&rq->am.work); + mlx5e_disable_rq(rq); mlx5e_destroy_rq(rq); } @@ -702,7 +712,8 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param) return err; } -static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state) +static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, + int next_state, bool update_rl, int rl_index) { struct mlx5e_channel *c = sq->channel; struct mlx5e_priv *priv = c->priv; @@ -722,6 +733,10 @@ static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state) MLX5_SET(modify_sq_in, in, sq_state, curr_state); MLX5_SET(sqc, sqc, state, next_state); + if (update_rl && next_state == MLX5_SQC_STATE_RDY) { + MLX5_SET64(modify_sq_in, in, modify_bitmask, 1); + MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index); + } err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen); @@ -737,6 +752,8 @@ static void mlx5e_disable_sq(struct mlx5e_sq *sq) struct mlx5_core_dev *mdev = priv->mdev; mlx5_core_destroy_sq(mdev, sq->sqn); + if (sq->rate_limit) + mlx5_rl_remove_rate(mdev, sq->rate_limit); } static int mlx5e_open_sq(struct mlx5e_channel *c, @@ -754,7 +771,8 @@ static int mlx5e_open_sq(struct mlx5e_channel *c, if (err) goto err_destroy_sq; - err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY); + err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY, + false, 0); if (err) goto err_disable_sq; @@ -793,7 +811,8 @@ static void mlx5e_close_sq(struct mlx5e_sq *sq) if (mlx5e_sq_has_room_for(sq, 1)) mlx5e_send_nop(sq, true); - mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR); + mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR, + false, 0); } while (sq->cc != sq->pc) /* wait till sq is empty */ @@ -887,6 +906,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used); + MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); MLX5_SET(cqc, cqc, c_eqn, eqn); MLX5_SET(cqc, cqc, uar_page, mcq->uar->index); MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - @@ -916,8 +936,7 @@ static void mlx5e_disable_cq(struct mlx5e_cq *cq) static int mlx5e_open_cq(struct mlx5e_channel *c, struct mlx5e_cq_param *param, struct mlx5e_cq *cq, - u16 moderation_usecs, - u16 moderation_frames) + struct mlx5e_cq_moder moderation) { int err; struct mlx5e_priv *priv = c->priv; @@ -933,8 +952,8 @@ static int mlx5e_open_cq(struct mlx5e_channel *c, if (MLX5_CAP_GEN(mdev, cq_moderation)) mlx5_core_modify_cq_moderation(mdev, &cq->mcq, - moderation_usecs, - moderation_frames); + moderation.usec, + moderation.pkts); return 0; err_destroy_cq: @@ -963,8 +982,7 @@ static int mlx5e_open_tx_cqs(struct mlx5e_channel *c, for (tc = 0; tc < c->num_tc; tc++) { err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq, - priv->params.tx_cq_moderation_usec, - priv->params.tx_cq_moderation_pkts); + priv->params.tx_cq_moderation); if (err) goto err_close_tx_cqs; } @@ -1024,14 +1042,91 @@ static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv *priv, int ix) ix + i * priv->params.num_channels; } +static int mlx5e_set_sq_maxrate(struct net_device *dev, + struct mlx5e_sq *sq, u32 rate) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + u16 rl_index = 0; + int err; + + if (rate == sq->rate_limit) + /* nothing to do */ + return 0; + + if (sq->rate_limit) + /* remove current rl index to free space to next ones */ + mlx5_rl_remove_rate(mdev, sq->rate_limit); + + sq->rate_limit = 0; + + if (rate) { + err = mlx5_rl_add_rate(mdev, rate, &rl_index); + if (err) { + netdev_err(dev, "Failed configuring rate %u: %d\n", + rate, err); + return err; + } + } + + err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, + MLX5_SQC_STATE_RDY, true, rl_index); + if (err) { + netdev_err(dev, "Failed configuring rate %u: %d\n", + rate, err); + /* remove the rate from the table */ + if (rate) + mlx5_rl_remove_rate(mdev, rate); + return err; + } + + sq->rate_limit = rate; + return 0; +} + +static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_sq *sq = priv->txq_to_sq_map[index]; + int err = 0; + + if (!mlx5_rl_is_supported(mdev)) { + netdev_err(dev, "Rate limiting is not supported on this device\n"); + return -EINVAL; + } + + /* rate is given in Mb/sec, HW config is in Kb/sec */ + rate = rate << 10; + + /* Check whether rate in valid range, 0 is always valid */ + if (rate && !mlx5_rl_is_in_range(mdev, rate)) { + netdev_err(dev, "TX rate %u, is not in range\n", rate); + return -ERANGE; + } + + mutex_lock(&priv->state_lock); + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + err = mlx5e_set_sq_maxrate(dev, sq, rate); + if (!err) + priv->tx_rates[index] = rate; + mutex_unlock(&priv->state_lock); + + return err; +} + static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, struct mlx5e_channel_param *cparam, struct mlx5e_channel **cp) { + struct mlx5e_cq_moder icosq_cq_moder = {0, 0}; struct net_device *netdev = priv->netdev; + struct mlx5e_cq_moder rx_cq_profile; int cpu = mlx5e_get_cpu(priv, ix); struct mlx5e_channel *c; + struct mlx5e_sq *sq; int err; + int i; c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); if (!c) @@ -1045,11 +1140,16 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->mkey_be = cpu_to_be32(priv->mkey.key); c->num_tc = priv->params.num_tc; + if (priv->params.rx_am_enabled) + rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode); + else + rx_cq_profile = priv->params.rx_cq_moderation; + mlx5e_build_channeltc_to_txq_map(priv, ix); netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); - err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, 0, 0); + err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, icosq_cq_moder); if (err) goto err_napi_del; @@ -1058,8 +1158,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, goto err_close_icosq_cq; err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq, - priv->params.rx_cq_moderation_usec, - priv->params.rx_cq_moderation_pkts); + rx_cq_profile); if (err) goto err_close_tx_cqs; @@ -1073,6 +1172,16 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, if (err) goto err_close_icosq; + for (i = 0; i < priv->params.num_tc; i++) { + u32 txq_ix = priv->channeltc_to_txq_map[ix][i]; + + if (priv->tx_rates[txq_ix]) { + sq = priv->txq_to_sq_map[txq_ix]; + mlx5e_set_sq_maxrate(priv->netdev, sq, + priv->tx_rates[txq_ix]); + } + } + err = mlx5e_open_rq(c, &cparam->rq, &c->rq); if (err) goto err_close_sqs; @@ -1149,6 +1258,8 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv, param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev); param->wq.linear = 1; + + param->am_enabled = priv->params.rx_am_enabled; } static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param) @@ -1214,6 +1325,8 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, } mlx5e_build_common_cq_param(priv, param); + + param->cq_period_mode = priv->params.rx_cq_period_mode; } static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, @@ -1224,6 +1337,8 @@ static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size); mlx5e_build_common_cq_param(priv, param); + + param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE; } static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv, @@ -1235,6 +1350,8 @@ static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv, MLX5_SET(cqc, cqc, log_cq_size, log_wq_size); mlx5e_build_common_cq_param(priv, param); + + param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE; } static void mlx5e_build_icosq_param(struct mlx5e_priv *priv, @@ -2611,6 +2728,7 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = { .ndo_set_features = mlx5e_set_features, .ndo_change_mtu = mlx5e_change_mtu, .ndo_do_ioctl = mlx5e_ioctl, + .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx5e_rx_flow_steer, #endif @@ -2632,6 +2750,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = { .ndo_do_ioctl = mlx5e_ioctl, .ndo_udp_tunnel_add = mlx5e_add_vxlan_port, .ndo_udp_tunnel_del = mlx5e_del_vxlan_port, + .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, .ndo_features_check = mlx5e_features_check, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx5e_rx_flow_steer, @@ -2760,6 +2879,20 @@ static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw) (pci_bw < 40000) && (pci_bw < link_speed)); } +void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) +{ + params->rx_cq_period_mode = cq_period_mode; + + params->rx_cq_moderation.pkts = + MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; + params->rx_cq_moderation.usec = + MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; + + if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) + params->rx_cq_moderation.usec = + MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE; +} + static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, struct net_device *netdev, int num_channels) @@ -2767,6 +2900,9 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv = netdev_priv(netdev); u32 link_speed = 0; u32 pci_bw = 0; + u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? + MLX5_CQ_PERIOD_MODE_START_FROM_CQE : + MLX5_CQ_PERIOD_MODE_START_FROM_EQE; priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; @@ -2812,13 +2948,13 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type, BIT(priv->params.log_rq_size)); - priv->params.rx_cq_moderation_usec = - MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; - priv->params.rx_cq_moderation_pkts = - MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; - priv->params.tx_cq_moderation_usec = + + priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation); + mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode); + + priv->params.tx_cq_moderation.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; - priv->params.tx_cq_moderation_pkts = + priv->params.tx_cq_moderation.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev); priv->params.num_tc = 1; @@ -2833,6 +2969,10 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; + /* Initialize pflags */ + MLX5E_SET_PRIV_FLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER, + priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE); + priv->mdev = mdev; priv->netdev = netdev; priv->params.num_channels = num_channels; @@ -3240,6 +3380,7 @@ static struct mlx5_interface mlx5e_interface = { void mlx5e_init(void) { + mlx5e_build_ptys2ethtool_map(); mlx5_register_interface(&mlx5e_interface); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c new file mode 100644 index 000000000000..1fffe48a93cc --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c @@ -0,0 +1,335 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "en.h" + +/* Adaptive moderation profiles */ +#define MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256 +#define MLX5E_RX_AM_DEF_PROFILE_CQE 1 +#define MLX5E_RX_AM_DEF_PROFILE_EQE 1 +#define MLX5E_PARAMS_AM_NUM_PROFILES 5 + +/* All profiles sizes must be MLX5E_PARAMS_AM_NUM_PROFILES */ +#define MLX5_AM_EQE_PROFILES { \ + {1, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {8, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {64, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {128, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {256, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ +} + +#define MLX5_AM_CQE_PROFILES { \ + {2, 256}, \ + {8, 128}, \ + {16, 64}, \ + {32, 64}, \ + {64, 64} \ +} + +static const struct mlx5e_cq_moder +profile[MLX5_CQ_PERIOD_NUM_MODES][MLX5E_PARAMS_AM_NUM_PROFILES] = { + MLX5_AM_EQE_PROFILES, + MLX5_AM_CQE_PROFILES, +}; + +static inline struct mlx5e_cq_moder mlx5e_am_get_profile(u8 cq_period_mode, int ix) +{ + return profile[cq_period_mode][ix]; +} + +struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode) +{ + int default_profile_ix; + + if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) + default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_CQE; + else /* MLX5_CQ_PERIOD_MODE_START_FROM_EQE */ + default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_EQE; + + return profile[rx_cq_period_mode][default_profile_ix]; +} + +/* Adaptive moderation logic */ +enum { + MLX5E_AM_START_MEASURE, + MLX5E_AM_MEASURE_IN_PROGRESS, + MLX5E_AM_APPLY_NEW_PROFILE, +}; + +enum { + MLX5E_AM_PARKING_ON_TOP, + MLX5E_AM_PARKING_TIRED, + MLX5E_AM_GOING_RIGHT, + MLX5E_AM_GOING_LEFT, +}; + +enum { + MLX5E_AM_STATS_WORSE, + MLX5E_AM_STATS_SAME, + MLX5E_AM_STATS_BETTER, +}; + +enum { + MLX5E_AM_STEPPED, + MLX5E_AM_TOO_TIRED, + MLX5E_AM_ON_EDGE, +}; + +static bool mlx5e_am_on_top(struct mlx5e_rx_am *am) +{ + switch (am->tune_state) { + case MLX5E_AM_PARKING_ON_TOP: + case MLX5E_AM_PARKING_TIRED: + WARN_ONCE(true, "mlx5e_am_on_top: PARKING\n"); + return true; + case MLX5E_AM_GOING_RIGHT: + return (am->steps_left > 1) && (am->steps_right == 1); + default: /* MLX5E_AM_GOING_LEFT */ + return (am->steps_right > 1) && (am->steps_left == 1); + } +} + +static void mlx5e_am_turn(struct mlx5e_rx_am *am) +{ + switch (am->tune_state) { + case MLX5E_AM_PARKING_ON_TOP: + case MLX5E_AM_PARKING_TIRED: + WARN_ONCE(true, "mlx5e_am_turn: PARKING\n"); + break; + case MLX5E_AM_GOING_RIGHT: + am->tune_state = MLX5E_AM_GOING_LEFT; + am->steps_left = 0; + break; + case MLX5E_AM_GOING_LEFT: + am->tune_state = MLX5E_AM_GOING_RIGHT; + am->steps_right = 0; + break; + } +} + +static int mlx5e_am_step(struct mlx5e_rx_am *am) +{ + if (am->tired == (MLX5E_PARAMS_AM_NUM_PROFILES * 2)) + return MLX5E_AM_TOO_TIRED; + + switch (am->tune_state) { + case MLX5E_AM_PARKING_ON_TOP: + case MLX5E_AM_PARKING_TIRED: + WARN_ONCE(true, "mlx5e_am_step: PARKING\n"); + break; + case MLX5E_AM_GOING_RIGHT: + if (am->profile_ix == (MLX5E_PARAMS_AM_NUM_PROFILES - 1)) + return MLX5E_AM_ON_EDGE; + am->profile_ix++; + am->steps_right++; + break; + case MLX5E_AM_GOING_LEFT: + if (am->profile_ix == 0) + return MLX5E_AM_ON_EDGE; + am->profile_ix--; + am->steps_left++; + break; + } + + am->tired++; + return MLX5E_AM_STEPPED; +} + +static void mlx5e_am_park_on_top(struct mlx5e_rx_am *am) +{ + am->steps_right = 0; + am->steps_left = 0; + am->tired = 0; + am->tune_state = MLX5E_AM_PARKING_ON_TOP; +} + +static void mlx5e_am_park_tired(struct mlx5e_rx_am *am) +{ + am->steps_right = 0; + am->steps_left = 0; + am->tune_state = MLX5E_AM_PARKING_TIRED; +} + +static void mlx5e_am_exit_parking(struct mlx5e_rx_am *am) +{ + am->tune_state = am->profile_ix ? MLX5E_AM_GOING_LEFT : + MLX5E_AM_GOING_RIGHT; + mlx5e_am_step(am); +} + +static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr, + struct mlx5e_rx_am_stats *prev) +{ + int diff; + + if (!prev->ppms) + return curr->ppms ? MLX5E_AM_STATS_BETTER : + MLX5E_AM_STATS_SAME; + + diff = curr->ppms - prev->ppms; + if (((100 * abs(diff)) / prev->ppms) > 10) /* more than 10% diff */ + return (diff > 0) ? MLX5E_AM_STATS_BETTER : + MLX5E_AM_STATS_WORSE; + + if (!prev->epms) + return curr->epms ? MLX5E_AM_STATS_WORSE : + MLX5E_AM_STATS_SAME; + + diff = curr->epms - prev->epms; + if (((100 * abs(diff)) / prev->epms) > 10) /* more than 10% diff */ + return (diff < 0) ? MLX5E_AM_STATS_BETTER : + MLX5E_AM_STATS_WORSE; + + return MLX5E_AM_STATS_SAME; +} + +static bool mlx5e_am_decision(struct mlx5e_rx_am_stats *curr_stats, + struct mlx5e_rx_am *am) +{ + int prev_state = am->tune_state; + int prev_ix = am->profile_ix; + int stats_res; + int step_res; + + switch (am->tune_state) { + case MLX5E_AM_PARKING_ON_TOP: + stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats); + if (stats_res != MLX5E_AM_STATS_SAME) + mlx5e_am_exit_parking(am); + break; + + case MLX5E_AM_PARKING_TIRED: + am->tired--; + if (!am->tired) + mlx5e_am_exit_parking(am); + break; + + case MLX5E_AM_GOING_RIGHT: + case MLX5E_AM_GOING_LEFT: + stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats); + if (stats_res != MLX5E_AM_STATS_BETTER) + mlx5e_am_turn(am); + + if (mlx5e_am_on_top(am)) { + mlx5e_am_park_on_top(am); + break; + } + + step_res = mlx5e_am_step(am); + switch (step_res) { + case MLX5E_AM_ON_EDGE: + mlx5e_am_park_on_top(am); + break; + case MLX5E_AM_TOO_TIRED: + mlx5e_am_park_tired(am); + break; + } + + break; + } + + if ((prev_state != MLX5E_AM_PARKING_ON_TOP) || + (am->tune_state != MLX5E_AM_PARKING_ON_TOP)) + am->prev_stats = *curr_stats; + + return am->profile_ix != prev_ix; +} + +static void mlx5e_am_sample(struct mlx5e_rq *rq, + struct mlx5e_rx_am_sample *s) +{ + s->time = ktime_get(); + s->pkt_ctr = rq->stats.packets; + s->event_ctr = rq->cq.event_ctr; +} + +#define MLX5E_AM_NEVENTS 64 + +static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start, + struct mlx5e_rx_am_sample *end, + struct mlx5e_rx_am_stats *curr_stats) +{ + /* u32 holds up to 71 minutes, should be enough */ + u32 delta_us = ktime_us_delta(end->time, start->time); + unsigned int npkts = end->pkt_ctr - start->pkt_ctr; + + if (!delta_us) { + WARN_ONCE(true, "mlx5e_am_calc_stats: delta_us=0\n"); + return; + } + + curr_stats->ppms = (npkts * USEC_PER_MSEC) / delta_us; + curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us; +} + +void mlx5e_rx_am_work(struct work_struct *work) +{ + struct mlx5e_rx_am *am = container_of(work, struct mlx5e_rx_am, + work); + struct mlx5e_rq *rq = container_of(am, struct mlx5e_rq, am); + struct mlx5e_cq_moder cur_profile = profile[am->mode][am->profile_ix]; + + mlx5_core_modify_cq_moderation(rq->priv->mdev, &rq->cq.mcq, + cur_profile.usec, cur_profile.pkts); + + am->state = MLX5E_AM_START_MEASURE; +} + +void mlx5e_rx_am(struct mlx5e_rq *rq) +{ + struct mlx5e_rx_am *am = &rq->am; + struct mlx5e_rx_am_sample end_sample; + struct mlx5e_rx_am_stats curr_stats; + u16 nevents; + + switch (am->state) { + case MLX5E_AM_MEASURE_IN_PROGRESS: + nevents = rq->cq.event_ctr - am->start_sample.event_ctr; + if (nevents < MLX5E_AM_NEVENTS) + break; + mlx5e_am_sample(rq, &end_sample); + mlx5e_am_calc_stats(&am->start_sample, &end_sample, + &curr_stats); + if (mlx5e_am_decision(&curr_stats, am)) { + am->state = MLX5E_AM_APPLY_NEW_PROFILE; + schedule_work(&am->work); + break; + } + /* fall through */ + case MLX5E_AM_START_MEASURE: + mlx5e_am_sample(rq, &am->start_sample); + am->state = MLX5E_AM_MEASURE_IN_PROGRESS; + break; + case MLX5E_AM_APPLY_NEW_PROFILE: + break; + } +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index c38781fa567d..64ae2e800daa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -136,6 +136,10 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) for (i = 0; i < c->num_tc; i++) mlx5e_cq_arm(&c->sq[i].cq); + + if (test_bit(MLX5E_RQ_STATE_AM, &c->rq.state)) + mlx5e_rx_am(&c->rq); + mlx5e_cq_arm(&c->rq.cq); mlx5e_cq_arm(&c->icosq.cq); @@ -146,6 +150,7 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq) { struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); + cq->event_ctr++; set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags); napi_schedule(cq->napi); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 75c7ae6a5cc4..77fc1aa26114 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -151,6 +151,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) return err; } + if (MLX5_CAP_GEN(dev, qos)) { + err = mlx5_core_get_caps(dev, MLX5_CAP_QOS); + if (err) + return err; + } + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index a19b59348dd6..08cae3485960 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1144,6 +1144,13 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) dev_err(&pdev->dev, "Failed to init flow steering\n"); goto err_fs; } + + err = mlx5_init_rl_table(dev); + if (err) { + dev_err(&pdev->dev, "Failed to init rate limiting\n"); + goto err_rl; + } + #ifdef CONFIG_MLX5_CORE_EN err = mlx5_eswitch_init(dev); if (err) { @@ -1183,6 +1190,8 @@ err_sriov: mlx5_eswitch_cleanup(dev->priv.eswitch); #endif err_reg_dev: + mlx5_cleanup_rl_table(dev); +err_rl: mlx5_cleanup_fs(dev); err_fs: mlx5_cleanup_mkey_table(dev); @@ -1253,6 +1262,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) mlx5_eswitch_cleanup(dev->priv.eswitch); #endif + mlx5_cleanup_rl_table(dev); mlx5_cleanup_fs(dev); mlx5_cleanup_mkey_table(dev); mlx5_cleanup_srq_table(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 3e35611b19c3..752c08127138 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -202,15 +202,24 @@ int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev, } EXPORT_SYMBOL_GPL(mlx5_query_port_proto_oper); -int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, - int proto_mask) +int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable, + u32 proto_admin, int proto_mask) { - u32 in[MLX5_ST_SZ_DW(ptys_reg)]; u32 out[MLX5_ST_SZ_DW(ptys_reg)]; + u32 in[MLX5_ST_SZ_DW(ptys_reg)]; + u8 an_disable_admin; + u8 an_disable_cap; + u8 an_status; + + mlx5_query_port_autoneg(dev, proto_mask, &an_status, + &an_disable_cap, &an_disable_admin); + if (!an_disable_cap && an_disable) + return -EPERM; memset(in, 0, sizeof(in)); MLX5_SET(ptys_reg, in, local_port, 1); + MLX5_SET(ptys_reg, in, an_disable_admin, an_disable); MLX5_SET(ptys_reg, in, proto_mask, proto_mask); if (proto_mask == MLX5_PTYS_EN) MLX5_SET(ptys_reg, in, eth_proto_admin, proto_admin); @@ -220,7 +229,19 @@ int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PTYS, 0, 1); } -EXPORT_SYMBOL_GPL(mlx5_set_port_proto); +EXPORT_SYMBOL_GPL(mlx5_set_port_ptys); + +/* This function should be used after setting a port register only */ +void mlx5_toggle_port_link(struct mlx5_core_dev *dev) +{ + enum mlx5_port_status ps; + + mlx5_query_port_admin_status(dev, &ps); + mlx5_set_port_admin_status(dev, MLX5_PORT_DOWN); + if (ps == MLX5_PORT_UP) + mlx5_set_port_admin_status(dev, MLX5_PORT_UP); +} +EXPORT_SYMBOL_GPL(mlx5_toggle_port_link); int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, enum mlx5_port_status status) @@ -518,6 +539,25 @@ int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx) } EXPORT_SYMBOL_GPL(mlx5_query_port_pfc); +void mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask, + u8 *an_status, + u8 *an_disable_cap, u8 *an_disable_admin) +{ + u32 out[MLX5_ST_SZ_DW(ptys_reg)]; + + *an_status = 0; + *an_disable_cap = 0; + *an_disable_admin = 0; + + if (mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1)) + return; + + *an_status = MLX5_GET(ptys_reg, out, an_status); + *an_disable_cap = MLX5_GET(ptys_reg, out, an_disable_cap); + *an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin); +} +EXPORT_SYMBOL_GPL(mlx5_query_port_autoneg); + int mlx5_max_tc(struct mlx5_core_dev *mdev) { u8 num_tc = MLX5_CAP_GEN(mdev, max_tc) ? : 8; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c new file mode 100644 index 000000000000..c07c28bd3d55 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c @@ -0,0 +1,209 @@ +/* + * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/cmd.h> +#include "mlx5_core.h" + +/* Finds an entry where we can register the given rate + * If the rate already exists, return the entry where it is registered, + * otherwise return the first available entry. + * If the table is full, return NULL + */ +static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table, + u32 rate) +{ + struct mlx5_rl_entry *ret_entry = NULL; + bool empty_found = false; + int i; + + for (i = 0; i < table->max_size; i++) { + if (table->rl_entry[i].rate == rate) + return &table->rl_entry[i]; + if (!empty_found && !table->rl_entry[i].rate) { + empty_found = true; + ret_entry = &table->rl_entry[i]; + } + } + + return ret_entry; +} + +static int mlx5_set_rate_limit_cmd(struct mlx5_core_dev *dev, + u32 rate, u16 index) +{ + u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)]; + u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(set_rate_limit_in, in, opcode, + MLX5_CMD_OP_SET_RATE_LIMIT); + MLX5_SET(set_rate_limit_in, in, rate_limit_index, index); + MLX5_SET(set_rate_limit_in, in, rate_limit, rate); + + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), + out, sizeof(out)); +} + +bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate) +{ + struct mlx5_rl_table *table = &dev->priv.rl_table; + + return (rate <= table->max_rate && rate >= table->min_rate); +} +EXPORT_SYMBOL(mlx5_rl_is_in_range); + +int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index) +{ + struct mlx5_rl_table *table = &dev->priv.rl_table; + struct mlx5_rl_entry *entry; + int err = 0; + + mutex_lock(&table->rl_lock); + + if (!rate || !mlx5_rl_is_in_range(dev, rate)) { + mlx5_core_err(dev, "Invalid rate: %u, should be %u to %u\n", + rate, table->min_rate, table->max_rate); + err = -EINVAL; + goto out; + } + + entry = find_rl_entry(table, rate); + if (!entry) { + mlx5_core_err(dev, "Max number of %u rates reached\n", + table->max_size); + err = -ENOSPC; + goto out; + } + if (entry->refcount) { + /* rate already configured */ + entry->refcount++; + } else { + /* new rate limit */ + err = mlx5_set_rate_limit_cmd(dev, rate, entry->index); + if (err) { + mlx5_core_err(dev, "Failed configuring rate: %u (%d)\n", + rate, err); + goto out; + } + entry->rate = rate; + entry->refcount = 1; + } + *index = entry->index; + +out: + mutex_unlock(&table->rl_lock); + return err; +} +EXPORT_SYMBOL(mlx5_rl_add_rate); + +void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate) +{ + struct mlx5_rl_table *table = &dev->priv.rl_table; + struct mlx5_rl_entry *entry = NULL; + + /* 0 is a reserved value for unlimited rate */ + if (rate == 0) + return; + + mutex_lock(&table->rl_lock); + entry = find_rl_entry(table, rate); + if (!entry || !entry->refcount) { + mlx5_core_warn(dev, "Rate %u is not configured\n", rate); + goto out; + } + + entry->refcount--; + if (!entry->refcount) { + /* need to remove rate */ + mlx5_set_rate_limit_cmd(dev, 0, entry->index); + entry->rate = 0; + } + +out: + mutex_unlock(&table->rl_lock); +} +EXPORT_SYMBOL(mlx5_rl_remove_rate); + +int mlx5_init_rl_table(struct mlx5_core_dev *dev) +{ + struct mlx5_rl_table *table = &dev->priv.rl_table; + int i; + + mutex_init(&table->rl_lock); + if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, packet_pacing)) { + table->max_size = 0; + return 0; + } + + /* First entry is reserved for unlimited rate */ + table->max_size = MLX5_CAP_QOS(dev, packet_pacing_rate_table_size) - 1; + table->max_rate = MLX5_CAP_QOS(dev, packet_pacing_max_rate); + table->min_rate = MLX5_CAP_QOS(dev, packet_pacing_min_rate); + + table->rl_entry = kcalloc(table->max_size, sizeof(struct mlx5_rl_entry), + GFP_KERNEL); + if (!table->rl_entry) + return -ENOMEM; + + /* The index represents the index in HW rate limit table + * Index 0 is reserved for unlimited rate + */ + for (i = 0; i < table->max_size; i++) + table->rl_entry[i].index = i + 1; + + /* Index 0 is reserved */ + mlx5_core_info(dev, "Rate limit: %u rates are supported, range: %uMbps to %uMbps\n", + table->max_size, + table->min_rate >> 10, + table->max_rate >> 10); + + return 0; +} + +void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev) +{ + struct mlx5_rl_table *table = &dev->priv.rl_table; + int i; + + /* Clear all configured rates */ + for (i = 0; i < table->max_size; i++) + if (table->rl_entry[i].rate) + mlx5_set_rate_limit_cmd(dev, 0, + table->rl_entry[i].index); + + kfree(dev->priv.rl_table.rl_entry); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index e45cff4df280..b26fe267a150 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -2222,6 +2222,110 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, return 0; } +static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u32 hw_addr, void *p_eth_qzone, + size_t eth_qzone_size, u8 timeset) +{ + struct coalescing_timeset *p_coal_timeset; + + if (p_hwfn->cdev->int_coalescing_mode != QED_COAL_MODE_ENABLE) { + DP_NOTICE(p_hwfn, "Coalescing configuration not enabled\n"); + return -EINVAL; + } + + p_coal_timeset = p_eth_qzone; + memset(p_coal_timeset, 0, eth_qzone_size); + SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset); + SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1); + qed_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size); + + return 0; +} + +int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u16 coalesce, u8 qid, u16 sb_id) +{ + struct ustorm_eth_queue_zone eth_qzone; + u8 timeset, timer_res; + u16 fw_qid = 0; + u32 address; + int rc; + + /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ + if (coalesce <= 0x7F) { + timer_res = 0; + } else if (coalesce <= 0xFF) { + timer_res = 1; + } else if (coalesce <= 0x1FF) { + timer_res = 2; + } else { + DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); + return -EINVAL; + } + timeset = (u8)(coalesce >> timer_res); + + rc = qed_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid); + if (rc) + return rc; + + rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, false); + if (rc) + goto out; + + address = BAR0_MAP_REG_USDM_RAM + USTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid); + + rc = qed_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, + sizeof(struct ustorm_eth_queue_zone), timeset); + if (rc) + goto out; + + p_hwfn->cdev->rx_coalesce_usecs = coalesce; +out: + return rc; +} + +int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u16 coalesce, u8 qid, u16 sb_id) +{ + struct xstorm_eth_queue_zone eth_qzone; + u8 timeset, timer_res; + u16 fw_qid = 0; + u32 address; + int rc; + + /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ + if (coalesce <= 0x7F) { + timer_res = 0; + } else if (coalesce <= 0xFF) { + timer_res = 1; + } else if (coalesce <= 0x1FF) { + timer_res = 2; + } else { + DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); + return -EINVAL; + } + timeset = (u8)(coalesce >> timer_res); + + rc = qed_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid); + if (rc) + return rc; + + rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, true); + if (rc) + goto out; + + address = BAR0_MAP_REG_XSDM_RAM + XSTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid); + + rc = qed_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, + sizeof(struct xstorm_eth_queue_zone), timeset); + if (rc) + goto out; + + p_hwfn->cdev->tx_coalesce_usecs = coalesce; +out: + return rc; +} + /* Calculate final WFQ values for all vports and configure them. * After this configuration each vport will have * approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT) diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index f810ce45d463..343bb0344f62 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -212,6 +212,20 @@ qed_dmae_host2grc(struct qed_hwfn *p_hwfn, u32 size_in_dwords, u32 flags); + /** + * @brief qed_dmae_grc2host - Read data from dmae data offset + * to source address using the given ptt + * + * @param p_ptt + * @param grc_addr (dmae_data_offset) + * @param dest_addr + * @param size_in_dwords + * @param flags - one of the flags defined above + */ +int qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u32 grc_addr, dma_addr_t dest_addr, u32 size_in_dwords, + u32 flags); + /** * @brief qed_dmae_host2host - copy data from to source address * to a destination adress (for SRIOV) using the given ptt @@ -308,4 +322,37 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, int qed_final_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 id, bool is_vf); +/** + * @brief qed_set_rxq_coalesce - Configure coalesce parameters for an Rx queue + * The fact that we can configure coalescing to up to 511, but on varying + * accuracy [the bigger the value the less accurate] up to a mistake of 3usec + * for the highest values. + * + * @param p_hwfn + * @param p_ptt + * @param coalesce - Coalesce value in micro seconds. + * @param qid - Queue index. + * @param qid - SB Id + * + * @return int + */ +int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u16 coalesce, u8 qid, u16 sb_id); + +/** + * @brief qed_set_txq_coalesce - Configure coalesce parameters for a Tx queue + * While the API allows setting coalescing per-qid, all tx queues sharing a + * SB should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff] + * otherwise configuration would break. + * + * @param p_hwfn + * @param p_ptt + * @param coalesce - Coalesce value in micro seconds. + * @param qid - Queue index. + * @param qid - SB Id + * + * @return int + */ +int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u16 coalesce, u8 qid, u16 sb_id); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index 2693c30981eb..e17885321faf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -769,6 +769,29 @@ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn, } int +qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 grc_addr, + dma_addr_t dest_addr, u32 size_in_dwords, u32 flags) +{ + u32 grc_addr_in_dw = grc_addr / sizeof(u32); + struct qed_dmae_params params; + int rc; + + memset(¶ms, 0, sizeof(struct qed_dmae_params)); + params.flags = flags; + + mutex_lock(&p_hwfn->dmae_info.mutex); + + rc = qed_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw, + dest_addr, QED_DMAE_ADDRESS_GRC, + QED_DMAE_ADDRESS_HOST_VIRT, + size_in_dwords, ¶ms); + + mutex_unlock(&p_hwfn->dmae_info.mutex); + + return rc; +} + +int qed_dmae_host2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, dma_addr_t source_addr, diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 09a6ad3d22dd..8fa50fa23c8d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -2418,6 +2418,7 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, { struct qed_dev *cdev = p_hwfn->cdev; u32 cau_state; + u8 timer_res; memset(p_sb_entry, 0, sizeof(*p_sb_entry)); @@ -2443,6 +2444,23 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, cdev->tx_coalesce_usecs = QED_CAU_DEF_TX_USECS; } + /* Coalesce = (timeset << timer-res), timeset is 7bit wide */ + if (cdev->rx_coalesce_usecs <= 0x7F) + timer_res = 0; + else if (cdev->rx_coalesce_usecs <= 0xFF) + timer_res = 1; + else + timer_res = 2; + SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res); + + if (cdev->tx_coalesce_usecs <= 0x7F) + timer_res = 0; + else if (cdev->tx_coalesce_usecs <= 0xFF) + timer_res = 1; + else + timer_res = 2; + SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res); + SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state); SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state); } @@ -2484,17 +2502,28 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, /* Configure pi coalescing if set */ if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { - u8 timeset = p_hwfn->cdev->rx_coalesce_usecs >> - (QED_CAU_DEF_RX_TIMER_RES + 1); + u8 timeset, timer_res; u8 num_tc = 1, i; + /* timeset = (coalesce >> timer-res), timeset is 7bit wide */ + if (p_hwfn->cdev->rx_coalesce_usecs <= 0x7F) + timer_res = 0; + else if (p_hwfn->cdev->rx_coalesce_usecs <= 0xFF) + timer_res = 1; + else + timer_res = 2; + timeset = (u8)(p_hwfn->cdev->rx_coalesce_usecs >> timer_res); qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI, QED_COAL_RX_STATE_MACHINE, timeset); - timeset = p_hwfn->cdev->tx_coalesce_usecs >> - (QED_CAU_DEF_TX_TIMER_RES + 1); - + if (p_hwfn->cdev->tx_coalesce_usecs <= 0x7F) + timer_res = 0; + else if (p_hwfn->cdev->tx_coalesce_usecs <= 0xFF) + timer_res = 1; + else + timer_res = 2; + timeset = (u8)(p_hwfn->cdev->tx_coalesce_usecs >> timer_res); for (i = 0; i < num_tc; i++) { qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, TX_PI(i), @@ -3199,3 +3228,39 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev) for_each_hwfn(cdev, i) cdev->hwfns[i].b_int_requested = false; } + +int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u8 timer_res, u16 sb_id, bool tx) +{ + struct cau_sb_entry sb_entry; + int rc; + + if (!p_hwfn->hw_init_done) { + DP_ERR(p_hwfn, "hardware not initialized yet\n"); + return -EINVAL; + } + + rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + + sb_id * sizeof(u64), + (u64)(uintptr_t)&sb_entry, 2, 0); + if (rc) { + DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); + return rc; + } + + if (tx) + SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res); + else + SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res); + + rc = qed_dmae_host2grc(p_hwfn, p_ptt, + (u64)(uintptr_t)&sb_entry, + CAU_REG_SB_VAR_MEMORY + + sb_id * sizeof(u64), 2, 0); + if (rc) { + DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc); + return rc; + } + + return rc; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index 20b468637504..0948be64dc78 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -389,6 +389,9 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, u16 vf_number, u8 vf_valid); +int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u8 timer_res, u16 sb_id, bool tx); + #define QED_MAPPING_MEMORY_SIZE(dev) (NUM_OF_SBS(dev)) #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 6c4606b44b7e..e32ee57cdfee 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1303,6 +1303,38 @@ static int qed_drain(struct qed_dev *cdev) return 0; } +static void qed_get_coalesce(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal) +{ + *rx_coal = cdev->rx_coalesce_usecs; + *tx_coal = cdev->tx_coalesce_usecs; +} + +static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, + u8 qid, u16 sb_id) +{ + struct qed_hwfn *hwfn; + struct qed_ptt *ptt; + int hwfn_index; + int status = 0; + + hwfn_index = qid % cdev->num_hwfns; + hwfn = &cdev->hwfns[hwfn_index]; + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + status = qed_set_rxq_coalesce(hwfn, ptt, rx_coal, + qid / cdev->num_hwfns, sb_id); + if (status) + goto out; + status = qed_set_txq_coalesce(hwfn, ptt, tx_coal, + qid / cdev->num_hwfns, sb_id); +out: + qed_ptt_release(hwfn, ptt); + + return status; +} + static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); @@ -1349,5 +1381,7 @@ const struct qed_common_ops qed_common_ops_pass = { .update_msglvl = &qed_init_dp, .chain_alloc = &qed_chain_alloc, .chain_free = &qed_chain_free, + .get_coalesce = &qed_get_coalesce, + .set_coalesce = &qed_set_coalesce, .set_led = &qed_set_led, }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index aa08ddbd95df..f6b86ca1ff79 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -80,6 +80,8 @@ 0x1f00000UL #define BAR0_MAP_REG_TSDM_RAM \ 0x1c80000UL +#define BAR0_MAP_REG_XSDM_RAM \ + 0x1e00000UL #define NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \ 0x5011f4UL #define PRS_REG_SEARCH_TCP \ diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 6836d44b6b89..6228482bf7f0 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -426,6 +426,57 @@ static u32 qede_get_link(struct net_device *dev) return current_link.link_up; } +static int qede_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal) +{ + struct qede_dev *edev = netdev_priv(dev); + + memset(coal, 0, sizeof(struct ethtool_coalesce)); + edev->ops->common->get_coalesce(edev->cdev, + (u16 *)&coal->rx_coalesce_usecs, + (u16 *)&coal->tx_coalesce_usecs); + + return 0; +} + +static int qede_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal) +{ + struct qede_dev *edev = netdev_priv(dev); + int i, rc = 0; + u16 rxc, txc; + u8 sb_id; + + if (!netif_running(dev)) { + DP_INFO(edev, "Interface is down\n"); + return -EINVAL; + } + + if (coal->rx_coalesce_usecs > QED_COALESCE_MAX || + coal->tx_coalesce_usecs > QED_COALESCE_MAX) { + DP_INFO(edev, + "Can't support requested %s coalesce value [max supported value %d]\n", + coal->rx_coalesce_usecs > QED_COALESCE_MAX ? "rx" + : "tx", + QED_COALESCE_MAX); + return -EINVAL; + } + + rxc = (u16)coal->rx_coalesce_usecs; + txc = (u16)coal->tx_coalesce_usecs; + for_each_rss(i) { + sb_id = edev->fp_array[i].sb_info->igu_sb_id; + rc = edev->ops->common->set_coalesce(edev->cdev, rxc, txc, + (u8)i, sb_id); + if (rc) { + DP_INFO(edev, "Set coalesce error, rc = %d\n", rc); + return rc; + } + } + + return rc; +} + static void qede_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) { @@ -1139,6 +1190,8 @@ static const struct ethtool_ops qede_ethtool_ops = { .set_msglevel = qede_set_msglevel, .nway_reset = qede_nway_reset, .get_link = qede_get_link, + .get_coalesce = qede_get_coalesce, + .set_coalesce = qede_set_coalesce, .get_ringparam = qede_get_ringparam, .set_ringparam = qede_set_ringparam, .get_pauseparam = qede_get_pauseparam, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 63c2e4fda169..92105916ef40 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -73,6 +73,122 @@ struct rk_priv_data { #define GRF_BIT(nr) (BIT(nr) | BIT(nr+16)) #define GRF_CLR_BIT(nr) (BIT(nr+16)) +#define RK3228_GRF_MAC_CON0 0x0900 +#define RK3228_GRF_MAC_CON1 0x0904 + +/* RK3228_GRF_MAC_CON0 */ +#define RK3228_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7) +#define RK3228_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0) + +/* RK3228_GRF_MAC_CON1 */ +#define RK3228_GMAC_PHY_INTF_SEL_RGMII \ + (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6)) +#define RK3228_GMAC_PHY_INTF_SEL_RMII \ + (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6)) +#define RK3228_GMAC_FLOW_CTRL GRF_BIT(3) +#define RK3228_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3) +#define RK3228_GMAC_SPEED_10M GRF_CLR_BIT(2) +#define RK3228_GMAC_SPEED_100M GRF_BIT(2) +#define RK3228_GMAC_RMII_CLK_25M GRF_BIT(7) +#define RK3228_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(7) +#define RK3228_GMAC_CLK_125M (GRF_CLR_BIT(8) | GRF_CLR_BIT(9)) +#define RK3228_GMAC_CLK_25M (GRF_BIT(8) | GRF_BIT(9)) +#define RK3228_GMAC_CLK_2_5M (GRF_CLR_BIT(8) | GRF_BIT(9)) +#define RK3228_GMAC_RMII_MODE GRF_BIT(10) +#define RK3228_GMAC_RMII_MODE_CLR GRF_CLR_BIT(10) +#define RK3228_GMAC_TXCLK_DLY_ENABLE GRF_BIT(0) +#define RK3228_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(0) +#define RK3228_GMAC_RXCLK_DLY_ENABLE GRF_BIT(1) +#define RK3228_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(1) + +static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv, + int tx_delay, int rx_delay) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "Missing rockchip,grf property\n"); + return; + } + + regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, + RK3228_GMAC_PHY_INTF_SEL_RGMII | + RK3228_GMAC_RMII_MODE_CLR | + RK3228_GMAC_RXCLK_DLY_ENABLE | + RK3228_GMAC_TXCLK_DLY_ENABLE); + + regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON0, + RK3228_GMAC_CLK_RX_DL_CFG(rx_delay) | + RK3228_GMAC_CLK_TX_DL_CFG(tx_delay)); +} + +static void rk3228_set_to_rmii(struct rk_priv_data *bsp_priv) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "Missing rockchip,grf property\n"); + return; + } + + regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, + RK3228_GMAC_PHY_INTF_SEL_RMII | + RK3228_GMAC_RMII_MODE); + + /* set MAC to RMII mode */ + regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, GRF_BIT(11)); +} + +static void rk3228_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "Missing rockchip,grf property\n"); + return; + } + + if (speed == 10) + regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, + RK3228_GMAC_CLK_2_5M); + else if (speed == 100) + regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, + RK3228_GMAC_CLK_25M); + else if (speed == 1000) + regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, + RK3228_GMAC_CLK_125M); + else + dev_err(dev, "unknown speed value for RGMII! speed=%d", speed); +} + +static void rk3228_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "Missing rockchip,grf property\n"); + return; + } + + if (speed == 10) + regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, + RK3228_GMAC_RMII_CLK_2_5M | + RK3228_GMAC_SPEED_10M); + else if (speed == 100) + regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, + RK3228_GMAC_RMII_CLK_25M | + RK3228_GMAC_SPEED_100M); + else + dev_err(dev, "unknown speed value for RMII! speed=%d", speed); +} + +static const struct rk_gmac_ops rk3228_ops = { + .set_to_rgmii = rk3228_set_to_rgmii, + .set_to_rmii = rk3228_set_to_rmii, + .set_rgmii_speed = rk3228_set_rgmii_speed, + .set_rmii_speed = rk3228_set_rmii_speed, +}; + #define RK3288_GRF_SOC_CON1 0x0248 #define RK3288_GRF_SOC_CON3 0x0250 @@ -642,6 +758,7 @@ static int rk_gmac_probe(struct platform_device *pdev) } static const struct of_device_id rk_gmac_dwmac_match[] = { + { .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops }, { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops }, { .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops }, { } diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 83273288aefc..9c924f15cd03 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -2014,7 +2014,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, if (ret) dev_warn(&pdev->dev, "Doesn't have any child node\n"); - for_each_child_of_node(node, slave_node) { + for_each_available_child_of_node(node, slave_node) { struct cpsw_slave_data *slave_data = data->slave_data + i; const void *mac_addr = NULL; int lenp; diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index 54874783476a..74e671906ddb 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -280,7 +280,7 @@ struct tc35815_regs { * Descriptors */ -/* Frame descripter */ +/* Frame descriptor */ struct FDesc { volatile __u32 FDNext; volatile __u32 FDSystem; @@ -288,7 +288,7 @@ struct FDesc { volatile __u32 FDCtl; }; -/* Buffer descripter */ +/* Buffer descriptor */ struct BDesc { volatile __u32 BuffData; volatile __u32 BDCtl; @@ -296,7 +296,7 @@ struct BDesc { #define FD_ALIGN 16 -/* Frame Descripter bit assign ---------------------------------------------- */ +/* Frame Descriptor bit assign ---------------------------------------------- */ #define FD_FDLength_MASK 0x0000FFFF /* Length MASK */ #define FD_BDCnt_MASK 0x001F0000 /* BD count MASK in FD */ #define FD_FrmOpt_MASK 0x7C000000 /* Frame option MASK */ @@ -309,7 +309,7 @@ struct BDesc { #define FD_Next_EOL 0x00000001 /* FD EOL indicator */ #define FD_BDCnt_SHIFT 16 -/* Buffer Descripter bit assign --------------------------------------------- */ +/* Buffer Descriptor bit assign --------------------------------------------- */ #define BD_BuffLength_MASK 0x0000FFFF /* Receive Data Size */ #define BD_RxBDID_MASK 0x00FF0000 /* BD ID Number MASK */ #define BD_RxBDSeqN_MASK 0x7F000000 /* Rx BD Sequence Number */ @@ -1387,7 +1387,7 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit) if (status & Int_IntExBD) { if (netif_msg_rx_err(lp)) dev_warn(&dev->dev, - "Excessive Buffer Descriptiors (%#x).\n", + "Excessive Buffer Descriptors (%#x).\n", status); dev->stats.rx_length_errors++; ret = 0; diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c index 7b44968e02e6..ddced28e8247 100644 --- a/drivers/net/ethernet/xircom/xirc2ps_cs.c +++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c @@ -1144,8 +1144,8 @@ xirc2ps_interrupt(int irq, void *dev_id) dev->stats.tx_packets += lp->last_ptr_value - n; netif_wake_queue(dev); } - if (tx_status & 0x0002) { /* Execessive collissions */ - pr_debug("tx restarted due to execssive collissions\n"); + if (tx_status & 0x0002) { /* Excessive collisions */ + pr_debug("tx restarted due to excessive collisions\n"); PutByte(XIRCREG_CR, RestartTx); /* restart transmitter process */ } if (tx_status & 0x0040) diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c index bd62bc19e758..acec16b9cf49 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.c +++ b/drivers/net/wireless/ath/ath10k/ahb.c @@ -25,10 +25,9 @@ #include "ahb.h" static const struct of_device_id ath10k_ahb_of_match[] = { - /* TODO: enable this entry once everything in place. - * { .compatible = "qcom,ipq4019-wifi", - * .data = (void *)ATH10K_HW_QCA4019 }, - */ + { .compatible = "qcom,ipq4019-wifi", + .data = (void *)ATH10K_HW_QCA4019 + }, { } }; @@ -476,6 +475,7 @@ static irqreturn_t ath10k_ahb_interrupt_handler(int irq, void *arg) static int ath10k_ahb_request_irq_legacy(struct ath10k *ar) { + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); int ret; @@ -487,6 +487,7 @@ static int ath10k_ahb_request_irq_legacy(struct ath10k *ar) ar_ahb->irq, ret); return ret; } + ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY; return 0; } @@ -918,8 +919,6 @@ int ath10k_ahb_init(void) { int ret; - printk(KERN_ERR "AHB support is still work in progress\n"); - ret = platform_driver_register(&ath10k_ahb_driver); if (ret) printk(KERN_ERR "failed to register ath10k ahb driver: %d\n", diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 49af62428c88..c6291c20f7ec 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -18,6 +18,7 @@ #include <linux/module.h> #include <linux/firmware.h> #include <linux/of.h> +#include <asm/byteorder.h> #include "core.h" #include "mac.h" @@ -55,7 +56,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .name = "qca988x hw2.0", .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR, .uart_pin = 7, - .has_shifted_cc_wraparound = true, + .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL, .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, @@ -69,6 +70,25 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { }, }, { + .id = QCA9887_HW_1_0_VERSION, + .dev_id = QCA9887_1_0_DEVICE_ID, + .name = "qca9887 hw1.0", + .patch_load_addr = QCA9887_HW_1_0_PATCH_LOAD_ADDR, + .uart_pin = 7, + .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL, + .otp_exe_param = 0, + .channel_counters_freq_hz = 88000, + .max_probe_resp_desc_thres = 0, + .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER, + .cal_data_len = 2116, + .fw = { + .dir = QCA9887_HW_1_0_FW_DIR, + .board = QCA9887_HW_1_0_BOARD_DATA_FILE, + .board_size = QCA9887_BOARD_DATA_SZ, + .board_ext_size = QCA9887_BOARD_EXT_DATA_SZ, + }, + }, + { .id = QCA6174_HW_2_1_VERSION, .dev_id = QCA6164_2_1_DEVICE_ID, .name = "qca6164 hw2.1", @@ -148,6 +168,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .uart_pin = 7, .otp_exe_param = 0x00000700, .continuous_frag_desc = true, + .cck_rate_map_rev2 = true, .channel_counters_freq_hz = 150000, .max_probe_resp_desc_thres = 24, .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE, @@ -163,6 +184,29 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { }, }, { + .id = QCA9984_HW_1_0_DEV_VERSION, + .dev_id = QCA9984_1_0_DEVICE_ID, + .name = "qca9984/qca9994 hw1.0", + .patch_load_addr = QCA9984_HW_1_0_PATCH_LOAD_ADDR, + .uart_pin = 7, + .otp_exe_param = 0x00000700, + .continuous_frag_desc = true, + .cck_rate_map_rev2 = true, + .channel_counters_freq_hz = 150000, + .max_probe_resp_desc_thres = 24, + .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE, + .tx_chain_mask = 0xf, + .rx_chain_mask = 0xf, + .max_spatial_stream = 4, + .cal_data_len = 12064, + .fw = { + .dir = QCA9984_HW_1_0_FW_DIR, + .board = QCA9984_HW_1_0_BOARD_DATA_FILE, + .board_size = QCA99X0_BOARD_DATA_SZ, + .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ, + }, + }, + { .id = QCA9377_HW_1_0_DEV_VERSION, .dev_id = QCA9377_1_0_DEVICE_ID, .name = "qca9377 hw1.0", @@ -202,9 +246,10 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .name = "qca4019 hw1.0", .patch_load_addr = QCA4019_HW_1_0_PATCH_LOAD_ADDR, .uart_pin = 7, - .has_shifted_cc_wraparound = true, + .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH, .otp_exe_param = 0x0010000, .continuous_frag_desc = true, + .cck_rate_map_rev2 = true, .channel_counters_freq_hz = 125000, .max_probe_resp_desc_thres = 24, .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE, @@ -236,6 +281,7 @@ static const char *const ath10k_core_fw_feature_str[] = { [ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA] = "adaptive-cca", [ATH10K_FW_FEATURE_MFP_SUPPORT] = "mfp", [ATH10K_FW_FEATURE_PEER_FLOW_CONTROL] = "peer-flow-ctrl", + [ATH10K_FW_FEATURE_BTCOEX_PARAM] = "btcoex-param", }; static unsigned int ath10k_core_get_fw_feature_str(char *buf, @@ -531,6 +577,35 @@ out: return ret; } +static int ath10k_download_cal_eeprom(struct ath10k *ar) +{ + size_t data_len; + void *data = NULL; + int ret; + + ret = ath10k_hif_fetch_cal_eeprom(ar, &data, &data_len); + if (ret) { + if (ret != -EOPNOTSUPP) + ath10k_warn(ar, "failed to read calibration data from EEPROM: %d\n", + ret); + goto out_free; + } + + ret = ath10k_download_board_data(ar, data, data_len); + if (ret) { + ath10k_warn(ar, "failed to download calibration data from EEPROM: %d\n", + ret); + goto out_free; + } + + ret = 0; + +out_free: + kfree(data); + + return ret; +} + static int ath10k_core_get_board_id_from_otp(struct ath10k *ar) { u32 result, address; @@ -1293,7 +1368,17 @@ static int ath10k_download_cal_data(struct ath10k *ar) } ath10k_dbg(ar, ATH10K_DBG_BOOT, - "boot did not find DT entry, try OTP next: %d\n", + "boot did not find DT entry, try target EEPROM next: %d\n", + ret); + + ret = ath10k_download_cal_eeprom(ar); + if (ret == 0) { + ar->cal_mode = ATH10K_CAL_MODE_EEPROM; + goto done; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot did not find target EEPROM entry, try OTP next: %d\n", ret); ret = ath10k_download_and_run_otp(ar); @@ -1733,6 +1818,16 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map)) val |= WMI_10_4_BSS_CHANNEL_INFO_64; + /* 10.4 firmware supports BT-Coex without reloading firmware + * via pdev param. To support Bluetooth coexistence pdev param, + * WMI_COEX_GPIO_SUPPORT of extended resource config should be + * enabled always. + */ + if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) && + test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM, + ar->running_fw->fw_file.fw_features)) + val |= WMI_10_4_COEX_GPIO_SUPPORT; + status = ath10k_mac_ext_resource_config(ar, val); if (status) { ath10k_err(ar, @@ -2062,6 +2157,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, switch (hw_rev) { case ATH10K_HW_QCA988X: + case ATH10K_HW_QCA9887: ar->regs = &qca988x_regs; ar->hw_values = &qca988x_values; break; @@ -2071,6 +2167,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, ar->hw_values = &qca6174_values; break; case ATH10K_HW_QCA99X0: + case ATH10K_HW_QCA9984: ar->regs = &qca99x0_regs; ar->hw_values = &qca99x0_values; break; @@ -2159,5 +2256,5 @@ void ath10k_core_destroy(struct ath10k *ar) EXPORT_SYMBOL(ath10k_core_destroy); MODULE_AUTHOR("Qualcomm Atheros"); -MODULE_DESCRIPTION("Core module for QCA988X PCIe devices."); +MODULE_DESCRIPTION("Core module for Qualcomm Atheros 802.11ac wireless LAN cards."); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 1852e0ee3fa1..3da18c9dbd7a 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -535,6 +535,13 @@ enum ath10k_fw_features { */ ATH10K_FW_FEATURE_PEER_FLOW_CONTROL = 13, + /* Firmware supports BT-Coex without reloading firmware via pdev param. + * To support Bluetooth coexistence pdev param, WMI_COEX_GPIO_SUPPORT of + * extended resource config should be enabled always. This firmware IE + * is used to configure WMI_COEX_GPIO_SUPPORT. + */ + ATH10K_FW_FEATURE_BTCOEX_PARAM = 14, + /* keep last */ ATH10K_FW_FEATURE_COUNT, }; @@ -571,6 +578,7 @@ enum ath10k_cal_mode { ATH10K_CAL_MODE_DT, ATH10K_PRE_CAL_MODE_FILE, ATH10K_PRE_CAL_MODE_DT, + ATH10K_CAL_MODE_EEPROM, }; enum ath10k_crypt_mode { @@ -593,6 +601,8 @@ static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode) return "pre-cal-file"; case ATH10K_PRE_CAL_MODE_DT: return "pre-cal-dt"; + case ATH10K_CAL_MODE_EEPROM: + return "eeprom"; } return "unknown"; @@ -703,12 +713,10 @@ struct ath10k { int uart_pin; u32 otp_exe_param; - /* This is true if given HW chip has a quirky Cycle Counter - * wraparound which resets to 0x7fffffff instead of 0. All - * other CC related counters (e.g. Rx Clear Count) are divided - * by 2 so they never wraparound themselves. + /* Type of hw cycle counter wraparound logic, for more info + * refer enum ath10k_hw_cc_wraparound_type. */ - bool has_shifted_cc_wraparound; + enum ath10k_hw_cc_wraparound_type cc_wraparound_type; /* Some of chip expects fragment descriptor to be continuous * memory for any TX operation. Set continuous_frag_desc flag @@ -716,6 +724,12 @@ struct ath10k { */ bool continuous_frag_desc; + /* CCK hardware rate table mapping for the newer chipsets + * like QCA99X0, QCA4019 got revised. The CCK h/w rate values + * are in a proper order with respect to the rate/preamble + */ + bool cck_rate_map_rev2; + u32 channel_counters_freq_hz; /* Mgmt tx descriptors threshold for limiting probe response diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index e2511550fbb8..8fbb8f2c7828 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -609,25 +609,23 @@ static ssize_t ath10k_write_simulate_fw_crash(struct file *file, char buf[32]; int ret; - mutex_lock(&ar->conf_mutex); - simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); /* make sure that buf is null terminated */ buf[sizeof(buf) - 1] = 0; + /* drop the possible '\n' from the end */ + if (buf[count - 1] == '\n') + buf[count - 1] = 0; + + mutex_lock(&ar->conf_mutex); + if (ar->state != ATH10K_STATE_ON && ar->state != ATH10K_STATE_RESTARTED) { ret = -ENETDOWN; goto exit; } - /* drop the possible '\n' from the end */ - if (buf[count - 1] == '\n') { - buf[count - 1] = 0; - count--; - } - if (!strcmp(buf, "soft")) { ath10k_info(ar, "simulating soft firmware crash\n"); ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0); @@ -2127,6 +2125,7 @@ static ssize_t ath10k_write_btcoex(struct file *file, size_t buf_size; int ret; bool val; + u32 pdev_param; buf_size = min(count, (sizeof(buf) - 1)); if (copy_from_user(buf, ubuf, buf_size)) @@ -2150,14 +2149,25 @@ static ssize_t ath10k_write_btcoex(struct file *file, goto exit; } + pdev_param = ar->wmi.pdev_param->enable_btcoex; + if (test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM, + ar->running_fw->fw_file.fw_features)) { + ret = ath10k_wmi_pdev_set_param(ar, pdev_param, val); + if (ret) { + ath10k_warn(ar, "failed to enable btcoex: %d\n", ret); + ret = count; + goto exit; + } + } else { + ath10k_info(ar, "restarting firmware due to btcoex change"); + queue_work(ar->workqueue, &ar->restart_work); + } + if (val) set_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags); else clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags); - ath10k_info(ar, "restarting firmware due to btcoex change"); - - queue_work(ar->workqueue, &ar->restart_work); ret = count; exit: diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h index 89e7076c919f..b2566b06e1e1 100644 --- a/drivers/net/wireless/ath/ath10k/hif.h +++ b/drivers/net/wireless/ath/ath10k/hif.h @@ -87,6 +87,10 @@ struct ath10k_hif_ops { int (*suspend)(struct ath10k *ar); int (*resume)(struct ath10k *ar); + + /* fetch calibration data from target eeprom */ + int (*fetch_cal_eeprom)(struct ath10k *ar, void **data, + size_t *data_len); }; static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id, @@ -202,4 +206,14 @@ static inline void ath10k_hif_write32(struct ath10k *ar, ar->hif.ops->write32(ar, address, data); } +static inline int ath10k_hif_fetch_cal_eeprom(struct ath10k *ar, + void **data, + size_t *data_len) +{ + if (!ar->hif.ops->fetch_cal_eeprom) + return -EOPNOTSUPP; + + return ar->hif.ops->fetch_cal_eeprom(ar, data, data_len); +} + #endif /* _HIF_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 911c535d0863..430a83e142aa 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -485,10 +485,10 @@ struct htt_mgmt_tx_completion { __le32 status; } __packed; -#define HTT_RX_INDICATION_INFO0_EXT_TID_MASK (0x3F) +#define HTT_RX_INDICATION_INFO0_EXT_TID_MASK (0x1F) #define HTT_RX_INDICATION_INFO0_EXT_TID_LSB (0) -#define HTT_RX_INDICATION_INFO0_FLUSH_VALID (1 << 6) -#define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 7) +#define HTT_RX_INDICATION_INFO0_FLUSH_VALID (1 << 5) +#define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 6) #define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_MASK 0x0000003F #define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_LSB 0 diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index cc979a4faeb0..3b35c7ab5680 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -748,7 +748,7 @@ ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd) if (WARN_ON_ONCE(!arvif)) return NULL; - if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) + if (WARN_ON_ONCE(ath10k_mac_vif_chan(arvif->vif, &def))) return NULL; return def.chan; @@ -939,7 +939,8 @@ static void ath10k_process_rx(struct ath10k *ar, is_multicast_ether_addr(ieee80211_get_DA(hdr)) ? "mcast" : "ucast", (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4, - status->flag == 0 ? "legacy" : "", + (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) == 0 ? + "legacy" : "", status->flag & RX_FLAG_HT ? "ht" : "", status->flag & RX_FLAG_VHT ? "vht" : "", status->flag & RX_FLAG_40MHZ ? "40" : "", @@ -2182,34 +2183,6 @@ static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar, ath10k_mac_tx_push_pending(ar); } -static inline enum nl80211_band phy_mode_to_band(u32 phy_mode) -{ - enum nl80211_band band; - - switch (phy_mode) { - case MODE_11A: - case MODE_11NA_HT20: - case MODE_11NA_HT40: - case MODE_11AC_VHT20: - case MODE_11AC_VHT40: - case MODE_11AC_VHT80: - band = NL80211_BAND_5GHZ; - break; - case MODE_11G: - case MODE_11B: - case MODE_11GONLY: - case MODE_11NG_HT20: - case MODE_11NG_HT40: - case MODE_11AC_VHT20_2G: - case MODE_11AC_VHT40_2G: - case MODE_11AC_VHT80_2G: - default: - band = NL80211_BAND_2GHZ; - } - - return band; -} - void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) { bool release; @@ -2291,7 +2264,6 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) ath10k_htt_tx_mgmt_dec_pending(htt); spin_unlock_bh(&htt->tx_lock); } - ath10k_mac_tx_push_pending(ar); break; } case HTT_T2H_MSG_TYPE_TX_COMPL_IND: @@ -2442,8 +2414,6 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr) dev_kfree_skb_any(skb); } - ath10k_mac_tx_push_pending(ar); - num_mpdus = atomic_read(&htt->num_mpdus_ready); while (num_mpdus) { diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c index f544d48518c3..bd86e7a38db9 100644 --- a/drivers/net/wireless/ath/ath10k/hw.c +++ b/drivers/net/wireless/ath/ath10k/hw.c @@ -179,17 +179,35 @@ void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey, u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev) { u32 cc_fix = 0; + u32 rcc_fix = 0; + enum ath10k_hw_cc_wraparound_type wraparound_type; survey->filled |= SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY; - if (ar->hw_params.has_shifted_cc_wraparound && cc < cc_prev) { - cc_fix = 0x7fffffff; - survey->filled &= ~SURVEY_INFO_TIME_BUSY; + wraparound_type = ar->hw_params.cc_wraparound_type; + + if (cc < cc_prev || rcc < rcc_prev) { + switch (wraparound_type) { + case ATH10K_HW_CC_WRAP_SHIFTED_ALL: + if (cc < cc_prev) { + cc_fix = 0x7fffffff; + survey->filled &= ~SURVEY_INFO_TIME_BUSY; + } + break; + case ATH10K_HW_CC_WRAP_SHIFTED_EACH: + if (cc < cc_prev) + cc_fix = 0x7fffffff; + else + rcc_fix = 0x7fffffff; + break; + case ATH10K_HW_CC_WRAP_DISABLED: + break; + } } cc -= cc_prev - cc_fix; - rcc -= rcc_prev; + rcc -= rcc_prev - rcc_fix; survey->time = CCNT_TO_MSEC(ar, cc); survey->time_busy = CCNT_TO_MSEC(ar, rcc); diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index aedd8987040b..f31d3ce42470 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -26,7 +26,9 @@ #define QCA6164_2_1_DEVICE_ID (0x0041) #define QCA6174_2_1_DEVICE_ID (0x003e) #define QCA99X0_2_0_DEVICE_ID (0x0040) +#define QCA9984_1_0_DEVICE_ID (0x0046) #define QCA9377_1_0_DEVICE_ID (0x0042) +#define QCA9887_1_0_DEVICE_ID (0x0050) /* QCA988X 1.0 definitions (unsupported) */ #define QCA988X_HW_1_0_CHIP_ID_REV 0x0 @@ -38,6 +40,13 @@ #define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin" #define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234 +/* QCA9887 1.0 definitions */ +#define QCA9887_HW_1_0_VERSION 0x4100016d +#define QCA9887_HW_1_0_CHIP_ID_REV 0 +#define QCA9887_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9887/hw1.0" +#define QCA9887_HW_1_0_BOARD_DATA_FILE "board.bin" +#define QCA9887_HW_1_0_PATCH_LOAD_ADDR 0x1234 + /* QCA6174 target BMI version signatures */ #define QCA6174_HW_1_0_VERSION 0x05000000 #define QCA6174_HW_1_1_VERSION 0x05000001 @@ -91,6 +100,14 @@ enum qca9377_chip_id_rev { #define QCA99X0_HW_2_0_BOARD_DATA_FILE "board.bin" #define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234 +/* QCA9984 1.0 defines */ +#define QCA9984_HW_1_0_DEV_VERSION 0x1000000 +#define QCA9984_HW_DEV_TYPE 0xa +#define QCA9984_HW_1_0_CHIP_ID_REV 0x0 +#define QCA9984_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9984/hw1.0" +#define QCA9984_HW_1_0_BOARD_DATA_FILE "board.bin" +#define QCA9984_HW_1_0_PATCH_LOAD_ADDR 0x1234 + /* QCA9377 1.0 definitions */ #define QCA9377_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9377/hw1.0" #define QCA9377_HW_1_0_BOARD_DATA_FILE "board.bin" @@ -193,8 +210,10 @@ enum ath10k_hw_rev { ATH10K_HW_QCA988X, ATH10K_HW_QCA6174, ATH10K_HW_QCA99X0, + ATH10K_HW_QCA9984, ATH10K_HW_QCA9377, ATH10K_HW_QCA4019, + ATH10K_HW_QCA9887, }; struct ath10k_hw_regs { @@ -247,8 +266,10 @@ void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey, u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev); #define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X) +#define QCA_REV_9887(ar) ((ar)->hw_rev == ATH10K_HW_QCA9887) #define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174) #define QCA_REV_99X0(ar) ((ar)->hw_rev == ATH10K_HW_QCA99X0) +#define QCA_REV_9984(ar) ((ar)->hw_rev == ATH10K_HW_QCA9984) #define QCA_REV_9377(ar) ((ar)->hw_rev == ATH10K_HW_QCA9377) #define QCA_REV_40XX(ar) ((ar)->hw_rev == ATH10K_HW_QCA4019) @@ -315,11 +336,41 @@ enum ath10k_hw_rate_cck { ATH10K_HW_RATE_CCK_SP_2M, }; +enum ath10k_hw_rate_rev2_cck { + ATH10K_HW_RATE_REV2_CCK_LP_1M = 1, + ATH10K_HW_RATE_REV2_CCK_LP_2M, + ATH10K_HW_RATE_REV2_CCK_LP_5_5M, + ATH10K_HW_RATE_REV2_CCK_LP_11M, + ATH10K_HW_RATE_REV2_CCK_SP_2M, + ATH10K_HW_RATE_REV2_CCK_SP_5_5M, + ATH10K_HW_RATE_REV2_CCK_SP_11M, +}; + enum ath10k_hw_4addr_pad { ATH10K_HW_4ADDR_PAD_AFTER, ATH10K_HW_4ADDR_PAD_BEFORE, }; +enum ath10k_hw_cc_wraparound_type { + ATH10K_HW_CC_WRAP_DISABLED = 0, + + /* This type is when the HW chip has a quirky Cycle Counter + * wraparound which resets to 0x7fffffff instead of 0. All + * other CC related counters (e.g. Rx Clear Count) are divided + * by 2 so they never wraparound themselves. + */ + ATH10K_HW_CC_WRAP_SHIFTED_ALL = 1, + + /* Each hw counter wrapsaround independently. When the + * counter overflows the repestive counter is right shifted + * by 1, i.e reset to 0x7fffffff, and other counters will be + * running unaffected. In this type of wraparound, it should + * be possible to report accurate Rx busy time unlike the + * first type. + */ + ATH10K_HW_CC_WRAP_SHIFTED_EACH = 2, +}; + /* Target specific defines for MAIN firmware */ #define TARGET_NUM_VDEVS 8 #define TARGET_NUM_PEER_AST 2 @@ -547,7 +598,10 @@ enum ath10k_hw_4addr_pad { #define WLAN_SYSTEM_SLEEP_DISABLE_MASK 0x00000001 #define WLAN_GPIO_PIN0_ADDRESS 0x00000028 +#define WLAN_GPIO_PIN0_CONFIG_LSB 11 #define WLAN_GPIO_PIN0_CONFIG_MASK 0x00007800 +#define WLAN_GPIO_PIN0_PAD_PULL_LSB 5 +#define WLAN_GPIO_PIN0_PAD_PULL_MASK 0x00000060 #define WLAN_GPIO_PIN1_ADDRESS 0x0000002c #define WLAN_GPIO_PIN1_CONFIG_MASK 0x00007800 #define WLAN_GPIO_PIN10_ADDRESS 0x00000050 @@ -560,6 +614,8 @@ enum ath10k_hw_4addr_pad { #define CLOCK_GPIO_BT_CLK_OUT_EN_MASK 0 #define SI_CONFIG_OFFSET 0x00000000 +#define SI_CONFIG_ERR_INT_LSB 19 +#define SI_CONFIG_ERR_INT_MASK 0x00080000 #define SI_CONFIG_BIDIR_OD_DATA_LSB 18 #define SI_CONFIG_BIDIR_OD_DATA_MASK 0x00040000 #define SI_CONFIG_I2C_LSB 16 @@ -573,7 +629,9 @@ enum ath10k_hw_4addr_pad { #define SI_CONFIG_DIVIDER_LSB 0 #define SI_CONFIG_DIVIDER_MASK 0x0000000f #define SI_CS_OFFSET 0x00000004 +#define SI_CS_DONE_ERR_LSB 10 #define SI_CS_DONE_ERR_MASK 0x00000400 +#define SI_CS_DONE_INT_LSB 9 #define SI_CS_DONE_INT_MASK 0x00000200 #define SI_CS_START_LSB 8 #define SI_CS_START_MASK 0x00000100 @@ -624,7 +682,10 @@ enum ath10k_hw_4addr_pad { #define GPIO_BASE_ADDRESS WLAN_GPIO_BASE_ADDRESS #define GPIO_PIN0_OFFSET WLAN_GPIO_PIN0_ADDRESS #define GPIO_PIN1_OFFSET WLAN_GPIO_PIN1_ADDRESS +#define GPIO_PIN0_CONFIG_LSB WLAN_GPIO_PIN0_CONFIG_LSB #define GPIO_PIN0_CONFIG_MASK WLAN_GPIO_PIN0_CONFIG_MASK +#define GPIO_PIN0_PAD_PULL_LSB WLAN_GPIO_PIN0_PAD_PULL_LSB +#define GPIO_PIN0_PAD_PULL_MASK WLAN_GPIO_PIN0_PAD_PULL_MASK #define GPIO_PIN1_CONFIG_MASK WLAN_GPIO_PIN1_CONFIG_MASK #define SI_BASE_ADDRESS WLAN_SI_BASE_ADDRESS #define SCRATCH_BASE_ADDRESS SOC_CORE_BASE_ADDRESS @@ -679,6 +740,18 @@ enum ath10k_hw_4addr_pad { #define WINDOW_READ_ADDR_ADDRESS MISSING #define WINDOW_WRITE_ADDR_ADDRESS MISSING +#define QCA9887_1_0_I2C_SDA_GPIO_PIN 5 +#define QCA9887_1_0_I2C_SDA_PIN_CONFIG 3 +#define QCA9887_1_0_SI_CLK_GPIO_PIN 17 +#define QCA9887_1_0_SI_CLK_PIN_CONFIG 3 +#define QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS 0x00000010 + +#define QCA9887_EEPROM_SELECT_READ 0xa10000a0 +#define QCA9887_EEPROM_ADDR_HI_MASK 0x0000ff00 +#define QCA9887_EEPROM_ADDR_HI_LSB 8 +#define QCA9887_EEPROM_ADDR_LO_MASK 0x00ff0000 +#define QCA9887_EEPROM_ADDR_LO_LSB 16 + #define RTC_STATE_V_GET(x) (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB) #endif /* _HW_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 6dd1d26b357f..3a170b1bf99b 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -62,6 +62,32 @@ static struct ieee80211_rate ath10k_rates[] = { { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M }, }; +static struct ieee80211_rate ath10k_rates_rev2[] = { + { .bitrate = 10, + .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_1M }, + { .bitrate = 20, + .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_2M, + .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_2M, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 55, + .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_5_5M, + .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_5_5M, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 110, + .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_11M, + .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_11M, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + + { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M }, + { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M }, + { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M }, + { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M }, + { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M }, + { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M }, + { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M }, + { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M }, +}; + #define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4 #define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX) @@ -70,6 +96,9 @@ static struct ieee80211_rate ath10k_rates[] = { #define ath10k_g_rates (ath10k_rates + 0) #define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates)) +#define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0) +#define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2)) + static bool ath10k_mac_bitrate_is_cck(int bitrate) { switch (bitrate) { @@ -3781,6 +3810,9 @@ void ath10k_mac_tx_push_pending(struct ath10k *ar) int ret; int max; + if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2)) + return; + spin_lock_bh(&ar->txqs_lock); rcu_read_lock(); @@ -4051,9 +4083,7 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw, list_add_tail(&artxq->list, &ar->txqs); spin_unlock_bh(&ar->txqs_lock); - if (ath10k_mac_tx_can_push(hw, txq)) - tasklet_schedule(&ar->htt.txrx_compl_task); - + ath10k_mac_tx_push_pending(ar); ath10k_htt_tx_txq_update(hw, txq); } @@ -4467,6 +4497,19 @@ static int ath10k_start(struct ieee80211_hw *hw) } } + param = ar->wmi.pdev_param->enable_btcoex; + if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) && + test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM, + ar->running_fw->fw_file.fw_features)) { + ret = ath10k_wmi_pdev_set_param(ar, param, 0); + if (ret) { + ath10k_warn(ar, + "failed to set btcoex param: %d\n", ret); + goto err_core_stop; + } + clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags); + } + ar->num_started_vdevs = 0; ath10k_regd_update(ar); @@ -7695,8 +7738,14 @@ int ath10k_mac_register(struct ath10k *ar) band = &ar->mac.sbands[NL80211_BAND_2GHZ]; band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels); band->channels = channels; - band->n_bitrates = ath10k_g_rates_size; - band->bitrates = ath10k_g_rates; + + if (ar->hw_params.cck_rate_map_rev2) { + band->n_bitrates = ath10k_g_rates_rev2_size; + band->bitrates = ath10k_g_rates_rev2; + } else { + band->n_bitrates = ath10k_g_rates_size; + band->bitrates = ath10k_g_rates; + } ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band; } diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 8133d7b5b956..f06dd3941bac 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -56,7 +56,9 @@ static const struct pci_device_id ath10k_pci_id_table[] = { { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */ { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */ { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */ + { PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) }, /* PCI-E QCA9984 V1 */ { PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */ + { PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) }, /* PCI-E QCA9887 */ {0} }; @@ -81,8 +83,12 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = { { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV }, + { QCA9984_1_0_DEVICE_ID, QCA9984_HW_1_0_CHIP_ID_REV }, + { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV }, { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV }, + + { QCA9887_1_0_DEVICE_ID, QCA9887_HW_1_0_CHIP_ID_REV }, }; static void ath10k_pci_buffer_cleanup(struct ath10k *ar); @@ -837,6 +843,7 @@ static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) switch (ar->hw_rev) { case ATH10K_HW_QCA988X: + case ATH10K_HW_QCA9887: case ATH10K_HW_QCA6174: case ATH10K_HW_QCA9377: val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + @@ -844,6 +851,7 @@ static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) 0x7ff) << 21; break; case ATH10K_HW_QCA99X0: + case ATH10K_HW_QCA9984: case ATH10K_HW_QCA4019: val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS); break; @@ -864,7 +872,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret = 0; u32 *buf; - unsigned int completed_nbytes, orig_nbytes, remaining_bytes; + unsigned int completed_nbytes, alloc_nbytes, remaining_bytes; struct ath10k_ce_pipe *ce_diag; /* Host buffer address in CE space */ u32 ce_data; @@ -882,9 +890,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, * 1) 4-byte alignment * 2) Buffer in DMA-able space */ - orig_nbytes = nbytes; + alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT); + data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, - orig_nbytes, + alloc_nbytes, &ce_data_base, GFP_ATOMIC); @@ -892,9 +901,9 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, ret = -ENOMEM; goto done; } - memset(data_buf, 0, orig_nbytes); + memset(data_buf, 0, alloc_nbytes); - remaining_bytes = orig_nbytes; + remaining_bytes = nbytes; ce_data = ce_data_base; while (remaining_bytes) { nbytes = min_t(unsigned int, remaining_bytes, @@ -954,19 +963,22 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, } remaining_bytes -= nbytes; + + if (ret) { + ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n", + address, ret); + break; + } + memcpy(data, data_buf, nbytes); + address += nbytes; - ce_data += nbytes; + data += nbytes; } done: - if (ret == 0) - memcpy(data, data_buf, orig_nbytes); - else - ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n", - address, ret); if (data_buf) - dma_free_coherent(ar->dev, orig_nbytes, data_buf, + dma_free_coherent(ar->dev, alloc_nbytes, data_buf, ce_data_base); spin_unlock_bh(&ar_pci->ce_lock); @@ -1560,6 +1572,7 @@ static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar) switch (ar->hw_rev) { case ATH10K_HW_QCA988X: + case ATH10K_HW_QCA9887: case ATH10K_HW_QCA6174: case ATH10K_HW_QCA9377: val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + @@ -1569,6 +1582,7 @@ static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar) CORE_CTRL_ADDRESS, val); break; case ATH10K_HW_QCA99X0: + case ATH10K_HW_QCA9984: case ATH10K_HW_QCA4019: /* TODO: Find appropriate register configuration for QCA99X0 * to mask irq/MSI. @@ -1583,6 +1597,7 @@ static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar) switch (ar->hw_rev) { case ATH10K_HW_QCA988X: + case ATH10K_HW_QCA9887: case ATH10K_HW_QCA6174: case ATH10K_HW_QCA9377: val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + @@ -1592,6 +1607,7 @@ static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar) CORE_CTRL_ADDRESS, val); break; case ATH10K_HW_QCA99X0: + case ATH10K_HW_QCA9984: case ATH10K_HW_QCA4019: /* TODO: Find appropriate register configuration for QCA99X0 * to unmask irq/MSI. @@ -1932,6 +1948,8 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar) switch (ar_pci->pdev->device) { case QCA988X_2_0_DEVICE_ID: case QCA99X0_2_0_DEVICE_ID: + case QCA9984_1_0_DEVICE_ID: + case QCA9887_1_0_DEVICE_ID: return 1; case QCA6164_2_1_DEVICE_ID: case QCA6174_2_1_DEVICE_ID: @@ -2293,16 +2311,20 @@ static int ath10k_pci_warm_reset(struct ath10k *ar) return 0; } +static int ath10k_pci_qca99x0_soft_chip_reset(struct ath10k *ar) +{ + ath10k_pci_irq_disable(ar); + return ath10k_pci_qca99x0_chip_reset(ar); +} + static int ath10k_pci_safe_chip_reset(struct ath10k *ar) { - if (QCA_REV_988X(ar) || QCA_REV_6174(ar)) { - return ath10k_pci_warm_reset(ar); - } else if (QCA_REV_99X0(ar)) { - ath10k_pci_irq_disable(ar); - return ath10k_pci_qca99x0_chip_reset(ar); - } else { + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + if (!ar_pci->pci_soft_reset) return -ENOTSUPP; - } + + return ar_pci->pci_soft_reset(ar); } static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar) @@ -2437,16 +2459,12 @@ static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar) static int ath10k_pci_chip_reset(struct ath10k *ar) { - if (QCA_REV_988X(ar)) - return ath10k_pci_qca988x_chip_reset(ar); - else if (QCA_REV_6174(ar)) - return ath10k_pci_qca6174_chip_reset(ar); - else if (QCA_REV_9377(ar)) - return ath10k_pci_qca6174_chip_reset(ar); - else if (QCA_REV_99X0(ar)) - return ath10k_pci_qca99x0_chip_reset(ar); - else + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + if (WARN_ON(!ar_pci->pci_hard_reset)) return -ENOTSUPP; + + return ar_pci->pci_hard_reset(ar); } static int ath10k_pci_hif_power_up(struct ath10k *ar) @@ -2559,6 +2577,144 @@ static int ath10k_pci_hif_resume(struct ath10k *ar) } #endif +static bool ath10k_pci_validate_cal(void *data, size_t size) +{ + __le16 *cal_words = data; + u16 checksum = 0; + size_t i; + + if (size % 2 != 0) + return false; + + for (i = 0; i < size / 2; i++) + checksum ^= le16_to_cpu(cal_words[i]); + + return checksum == 0xffff; +} + +static void ath10k_pci_enable_eeprom(struct ath10k *ar) +{ + /* Enable SI clock */ + ath10k_pci_soc_write32(ar, CLOCK_CONTROL_OFFSET, 0x0); + + /* Configure GPIOs for I2C operation */ + ath10k_pci_write32(ar, + GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET + + 4 * QCA9887_1_0_I2C_SDA_GPIO_PIN, + SM(QCA9887_1_0_I2C_SDA_PIN_CONFIG, + GPIO_PIN0_CONFIG) | + SM(1, GPIO_PIN0_PAD_PULL)); + + ath10k_pci_write32(ar, + GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET + + 4 * QCA9887_1_0_SI_CLK_GPIO_PIN, + SM(QCA9887_1_0_SI_CLK_PIN_CONFIG, GPIO_PIN0_CONFIG) | + SM(1, GPIO_PIN0_PAD_PULL)); + + ath10k_pci_write32(ar, + GPIO_BASE_ADDRESS + + QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS, + 1u << QCA9887_1_0_SI_CLK_GPIO_PIN); + + /* In Swift ASIC - EEPROM clock will be (110MHz/512) = 214KHz */ + ath10k_pci_write32(ar, + SI_BASE_ADDRESS + SI_CONFIG_OFFSET, + SM(1, SI_CONFIG_ERR_INT) | + SM(1, SI_CONFIG_BIDIR_OD_DATA) | + SM(1, SI_CONFIG_I2C) | + SM(1, SI_CONFIG_POS_SAMPLE) | + SM(1, SI_CONFIG_INACTIVE_DATA) | + SM(1, SI_CONFIG_INACTIVE_CLK) | + SM(8, SI_CONFIG_DIVIDER)); +} + +static int ath10k_pci_read_eeprom(struct ath10k *ar, u16 addr, u8 *out) +{ + u32 reg; + int wait_limit; + + /* set device select byte and for the read operation */ + reg = QCA9887_EEPROM_SELECT_READ | + SM(addr, QCA9887_EEPROM_ADDR_LO) | + SM(addr >> 8, QCA9887_EEPROM_ADDR_HI); + ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_TX_DATA0_OFFSET, reg); + + /* write transmit data, transfer length, and START bit */ + ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, + SM(1, SI_CS_START) | SM(1, SI_CS_RX_CNT) | + SM(4, SI_CS_TX_CNT)); + + /* wait max 1 sec */ + wait_limit = 100000; + + /* wait for SI_CS_DONE_INT */ + do { + reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET); + if (MS(reg, SI_CS_DONE_INT)) + break; + + wait_limit--; + udelay(10); + } while (wait_limit > 0); + + if (!MS(reg, SI_CS_DONE_INT)) { + ath10k_err(ar, "timeout while reading device EEPROM at %04x\n", + addr); + return -ETIMEDOUT; + } + + /* clear SI_CS_DONE_INT */ + ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, reg); + + if (MS(reg, SI_CS_DONE_ERR)) { + ath10k_err(ar, "failed to read device EEPROM at %04x\n", addr); + return -EIO; + } + + /* extract receive data */ + reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_RX_DATA0_OFFSET); + *out = reg; + + return 0; +} + +static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data, + size_t *data_len) +{ + u8 *caldata = NULL; + size_t calsize, i; + int ret; + + if (!QCA_REV_9887(ar)) + return -EOPNOTSUPP; + + calsize = ar->hw_params.cal_data_len; + caldata = kmalloc(calsize, GFP_KERNEL); + if (!caldata) + return -ENOMEM; + + ath10k_pci_enable_eeprom(ar); + + for (i = 0; i < calsize; i++) { + ret = ath10k_pci_read_eeprom(ar, i, &caldata[i]); + if (ret) + goto err_free; + } + + if (!ath10k_pci_validate_cal(caldata, calsize)) + goto err_free; + + *data = caldata; + *data_len = calsize; + + return 0; + +err_free: + kfree(data); + + return -EINVAL; +} + static const struct ath10k_hif_ops ath10k_pci_hif_ops = { .tx_sg = ath10k_pci_hif_tx_sg, .diag_read = ath10k_pci_hif_diag_read, @@ -2578,6 +2734,7 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = { .suspend = ath10k_pci_hif_suspend, .resume = ath10k_pci_hif_resume, #endif + .fetch_cal_eeprom = ath10k_pci_hif_fetch_cal_eeprom, }; /* @@ -2976,24 +3133,47 @@ static int ath10k_pci_probe(struct pci_dev *pdev, enum ath10k_hw_rev hw_rev; u32 chip_id; bool pci_ps; + int (*pci_soft_reset)(struct ath10k *ar); + int (*pci_hard_reset)(struct ath10k *ar); switch (pci_dev->device) { case QCA988X_2_0_DEVICE_ID: hw_rev = ATH10K_HW_QCA988X; pci_ps = false; + pci_soft_reset = ath10k_pci_warm_reset; + pci_hard_reset = ath10k_pci_qca988x_chip_reset; + break; + case QCA9887_1_0_DEVICE_ID: + dev_warn(&pdev->dev, "QCA9887 support is still experimental, there are likely bugs. You have been warned.\n"); + hw_rev = ATH10K_HW_QCA9887; + pci_ps = false; + pci_soft_reset = ath10k_pci_warm_reset; + pci_hard_reset = ath10k_pci_qca988x_chip_reset; break; case QCA6164_2_1_DEVICE_ID: case QCA6174_2_1_DEVICE_ID: hw_rev = ATH10K_HW_QCA6174; pci_ps = true; + pci_soft_reset = ath10k_pci_warm_reset; + pci_hard_reset = ath10k_pci_qca6174_chip_reset; break; case QCA99X0_2_0_DEVICE_ID: hw_rev = ATH10K_HW_QCA99X0; pci_ps = false; + pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset; + pci_hard_reset = ath10k_pci_qca99x0_chip_reset; + break; + case QCA9984_1_0_DEVICE_ID: + hw_rev = ATH10K_HW_QCA9984; + pci_ps = false; + pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset; + pci_hard_reset = ath10k_pci_qca99x0_chip_reset; break; case QCA9377_1_0_DEVICE_ID: hw_rev = ATH10K_HW_QCA9377; pci_ps = true; + pci_soft_reset = NULL; + pci_hard_reset = ath10k_pci_qca6174_chip_reset; break; default: WARN_ON(1); @@ -3018,6 +3198,8 @@ static int ath10k_pci_probe(struct pci_dev *pdev, ar->dev_id = pci_dev->device; ar_pci->pci_ps = pci_ps; ar_pci->bus_ops = &ath10k_pci_bus_ops; + ar_pci->pci_soft_reset = pci_soft_reset; + ar_pci->pci_hard_reset = pci_hard_reset; ar->id.vendor = pdev->vendor; ar->id.device = pdev->device; @@ -3169,7 +3351,7 @@ static void __exit ath10k_pci_exit(void) module_exit(ath10k_pci_exit); MODULE_AUTHOR("Qualcomm Atheros"); -MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices"); +MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN PCIe/AHB devices"); MODULE_LICENSE("Dual BSD/GPL"); /* QCA988x 2.0 firmware files */ @@ -3180,6 +3362,11 @@ MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); +/* QCA9887 1.0 firmware files */ +MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE); +MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" QCA9887_HW_1_0_BOARD_DATA_FILE); +MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); + /* QCA6174 2.1 firmware files */ MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE); MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE); diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index 959dc321b75e..6eca1df2ce60 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -234,6 +234,12 @@ struct ath10k_pci { const struct ath10k_bus_ops *bus_ops; + /* Chip specific pci reset routine used to do a safe reset */ + int (*pci_soft_reset)(struct ath10k *ar); + + /* Chip specific pci full reset function */ + int (*pci_hard_reset)(struct ath10k *ar); + /* Keep this entry in the last, memory for struct ath10k_ahb is * allocated (ahb support enabled case) in the continuation of * this struct. diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h index ca8d16884af1..034e7a54c5b2 100644 --- a/drivers/net/wireless/ath/ath10k/rx_desc.h +++ b/drivers/net/wireless/ath/ath10k/rx_desc.h @@ -656,26 +656,6 @@ struct rx_msdu_end { * Reserved: HW should fill with zero. FW should ignore. */ -#define RX_PPDU_START_SIG_RATE_SELECT_OFDM 0 -#define RX_PPDU_START_SIG_RATE_SELECT_CCK 1 - -#define RX_PPDU_START_SIG_RATE_OFDM_48 0 -#define RX_PPDU_START_SIG_RATE_OFDM_24 1 -#define RX_PPDU_START_SIG_RATE_OFDM_12 2 -#define RX_PPDU_START_SIG_RATE_OFDM_6 3 -#define RX_PPDU_START_SIG_RATE_OFDM_54 4 -#define RX_PPDU_START_SIG_RATE_OFDM_36 5 -#define RX_PPDU_START_SIG_RATE_OFDM_18 6 -#define RX_PPDU_START_SIG_RATE_OFDM_9 7 - -#define RX_PPDU_START_SIG_RATE_CCK_LP_11 0 -#define RX_PPDU_START_SIG_RATE_CCK_LP_5_5 1 -#define RX_PPDU_START_SIG_RATE_CCK_LP_2 2 -#define RX_PPDU_START_SIG_RATE_CCK_LP_1 3 -#define RX_PPDU_START_SIG_RATE_CCK_SP_11 4 -#define RX_PPDU_START_SIG_RATE_CCK_SP_5_5 5 -#define RX_PPDU_START_SIG_RATE_CCK_SP_2 6 - #define HTT_RX_PPDU_START_PREAMBLE_LEGACY 0x04 #define HTT_RX_PPDU_START_PREAMBLE_HT 0x08 #define HTT_RX_PPDU_START_PREAMBLE_HT_WITH_TXBF 0x09 @@ -711,25 +691,6 @@ struct rx_msdu_end { /* No idea what this flag means. It seems to be always set in rate. */ #define RX_PPDU_START_RATE_FLAG BIT(3) -enum rx_ppdu_start_rate { - RX_PPDU_START_RATE_OFDM_48M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_48M, - RX_PPDU_START_RATE_OFDM_24M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_24M, - RX_PPDU_START_RATE_OFDM_12M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_12M, - RX_PPDU_START_RATE_OFDM_6M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_6M, - RX_PPDU_START_RATE_OFDM_54M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_54M, - RX_PPDU_START_RATE_OFDM_36M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_36M, - RX_PPDU_START_RATE_OFDM_18M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_18M, - RX_PPDU_START_RATE_OFDM_9M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_9M, - - RX_PPDU_START_RATE_CCK_LP_11M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_11M, - RX_PPDU_START_RATE_CCK_LP_5_5M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_5_5M, - RX_PPDU_START_RATE_CCK_LP_2M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_2M, - RX_PPDU_START_RATE_CCK_LP_1M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_1M, - RX_PPDU_START_RATE_CCK_SP_11M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_11M, - RX_PPDU_START_RATE_CCK_SP_5_5M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_5_5M, - RX_PPDU_START_RATE_CCK_SP_2M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_2M, -}; - struct rx_ppdu_start { struct { u8 pri20_mhz; @@ -994,7 +955,41 @@ struct rx_pkt_end { __le32 info0; /* %RX_PKT_END_INFO0_ */ __le32 phy_timestamp_1; __le32 phy_timestamp_2; - __le32 rx_location_info; /* %RX_LOCATION_INFO_ */ +} __packed; + +#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_MASK 0x00003fff +#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_LSB 0 +#define RX_LOCATION_INFO0_RTT_FAC_VHT_MASK 0x1fff8000 +#define RX_LOCATION_INFO0_RTT_FAC_VHT_LSB 15 +#define RX_LOCATION_INFO0_RTT_STRONGEST_CHAIN_MASK 0xc0000000 +#define RX_LOCATION_INFO0_RTT_STRONGEST_CHAIN_LSB 30 +#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_STATUS BIT(14) +#define RX_LOCATION_INFO0_RTT_FAC_VHT_STATUS BIT(29) + +#define RX_LOCATION_INFO1_RTT_PREAMBLE_TYPE_MASK 0x0000000c +#define RX_LOCATION_INFO1_RTT_PREAMBLE_TYPE_LSB 2 +#define RX_LOCATION_INFO1_PKT_BW_MASK 0x00000030 +#define RX_LOCATION_INFO1_PKT_BW_LSB 4 +#define RX_LOCATION_INFO1_SKIP_P_SKIP_BTCF_MASK 0x0000ff00 +#define RX_LOCATION_INFO1_SKIP_P_SKIP_BTCF_LSB 8 +#define RX_LOCATION_INFO1_RTT_MSC_RATE_MASK 0x000f0000 +#define RX_LOCATION_INFO1_RTT_MSC_RATE_LSB 16 +#define RX_LOCATION_INFO1_RTT_PBD_LEG_BW_MASK 0x00300000 +#define RX_LOCATION_INFO1_RTT_PBD_LEG_BW_LSB 20 +#define RX_LOCATION_INFO1_TIMING_BACKOFF_MASK 0x07c00000 +#define RX_LOCATION_INFO1_TIMING_BACKOFF_LSB 22 +#define RX_LOCATION_INFO1_RTT_TX_FRAME_PHASE_MASK 0x18000000 +#define RX_LOCATION_INFO1_RTT_TX_FRAME_PHASE_LSB 27 +#define RX_LOCATION_INFO1_RTT_CFR_STATUS BIT(0) +#define RX_LOCATION_INFO1_RTT_CIR_STATUS BIT(1) +#define RX_LOCATION_INFO1_RTT_GI_TYPE BIT(7) +#define RX_LOCATION_INFO1_RTT_MAC_PHY_PHASE BIT(29) +#define RX_LOCATION_INFO1_RTT_TX_DATA_START_X_PHASE BIT(30) +#define RX_LOCATION_INFO1_RX_LOCATION_VALID BIT(31) + +struct rx_location_info { + __le32 rx_location_info0; /* %RX_LOCATION_INFO0_ */ + __le32 rx_location_info1; /* %RX_LOCATION_INFO1_ */ } __packed; enum rx_phy_ppdu_end_info0 { @@ -1067,6 +1062,17 @@ struct rx_phy_ppdu_end { struct rx_ppdu_end_qca99x0 { struct rx_pkt_end rx_pkt_end; + __le32 rx_location_info; /* %RX_LOCATION_INFO_ */ + struct rx_phy_ppdu_end rx_phy_ppdu_end; + __le32 rx_timing_offset; /* %RX_PPDU_END_RX_TIMING_OFFSET_ */ + __le32 rx_info; /* %RX_PPDU_END_RX_INFO_ */ + __le16 bb_length; + __le16 info1; /* %RX_PPDU_END_INFO1_ */ +} __packed; + +struct rx_ppdu_end_qca9984 { + struct rx_pkt_end rx_pkt_end; + struct rx_location_info rx_location_info; struct rx_phy_ppdu_end rx_phy_ppdu_end; __le32 rx_timing_offset; /* %RX_PPDU_END_RX_TIMING_OFFSET_ */ __le32 rx_info; /* %RX_PPDU_END_RX_INFO_ */ @@ -1080,6 +1086,7 @@ struct rx_ppdu_end { struct rx_ppdu_end_qca988x qca988x; struct rx_ppdu_end_qca6174 qca6174; struct rx_ppdu_end_qca99x0 qca99x0; + struct rx_ppdu_end_qca9984 qca9984; } __packed; } __packed; diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h index 8e24099fa936..aaf53a81e78b 100644 --- a/drivers/net/wireless/ath/ath10k/targaddrs.h +++ b/drivers/net/wireless/ath/ath10k/targaddrs.h @@ -447,6 +447,9 @@ Fw Mode/SubMode Mask #define QCA988X_BOARD_DATA_SZ 7168 #define QCA988X_BOARD_EXT_DATA_SZ 0 +#define QCA9887_BOARD_DATA_SZ 7168 +#define QCA9887_BOARD_EXT_DATA_SZ 0 + #define QCA6174_BOARD_DATA_SZ 8192 #define QCA6174_BOARD_EXT_DATA_SZ 0 diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index 576e7c42ed65..1966c787998b 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -117,6 +117,9 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt, ieee80211_tx_status(htt->ar->hw, msdu); /* we do not own the msdu anymore */ + + ath10k_mac_tx_push_pending(ar); + return 0; } diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 2c300329ebc3..6279ab4a760e 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -1104,6 +1104,7 @@ static struct wmi_pdev_param_map wmi_pdev_param_map = { .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED, .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED, .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED, }; static struct wmi_pdev_param_map wmi_10x_pdev_param_map = { @@ -1199,6 +1200,7 @@ static struct wmi_pdev_param_map wmi_10x_pdev_param_map = { .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED, .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED, .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED, }; static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = { @@ -1294,6 +1296,7 @@ static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = { .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED, .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED, .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED, }; /* firmware 10.2 specific mappings */ @@ -1550,6 +1553,7 @@ static struct wmi_pdev_param_map wmi_10_4_pdev_param_map = { .wapi_mbssid_offset = WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET, .arp_srcaddr = WMI_10_4_PDEV_PARAM_ARP_SRCADDR, .arp_dstaddr = WMI_10_4_PDEV_PARAM_ARP_DSTADDR, + .enable_btcoex = WMI_10_4_PDEV_PARAM_ENABLE_BTCOEX, }; static const struct wmi_peer_flags_map wmi_peer_flags_map = { diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 9fdf47ea27d0..90f594e89f94 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -3447,6 +3447,7 @@ struct wmi_pdev_param_map { u32 wapi_mbssid_offset; u32 arp_srcaddr; u32 arp_dstaddr; + u32 enable_btcoex; }; #define WMI_PDEV_PARAM_UNSUPPORTED 0 @@ -3760,6 +3761,9 @@ enum wmi_10_4_pdev_param { WMI_10_4_PDEV_PARAM_ATF_OBSS_NOISE_SCH, WMI_10_4_PDEV_PARAM_ATF_OBSS_NOISE_SCALING_FACTOR, WMI_10_4_PDEV_PARAM_CUST_TXPOWER_SCALE, + WMI_10_4_PDEV_PARAM_ATF_DYNAMIC_ENABLE, + WMI_10_4_PDEV_PARAM_ATF_SSID_GROUP_POLICY, + WMI_10_4_PDEV_PARAM_ENABLE_BTCOEX, }; struct wmi_pdev_set_param_cmd { diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c index fc47b70988b1..f23c851765df 100644 --- a/drivers/net/wireless/ath/ath5k/pcu.c +++ b/drivers/net/wireless/ath/ath5k/pcu.c @@ -219,8 +219,8 @@ ath5k_hw_get_default_sifs(struct ath5k_hw *ah) sifs = AR5K_INIT_SIFS_QUARTER_RATE; break; case AR5K_BWMODE_DEFAULT: - sifs = AR5K_INIT_SIFS_DEFAULT_BG; default: + sifs = AR5K_INIT_SIFS_DEFAULT_BG; if (channel->band == NL80211_BAND_5GHZ) sifs = AR5K_INIT_SIFS_DEFAULT_A; break; diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h index 7a1970e484a6..ac25f1781b42 100644 --- a/drivers/net/wireless/ath/ath6kl/core.h +++ b/drivers/net/wireless/ath/ath6kl/core.h @@ -148,7 +148,7 @@ enum ath6kl_fw_capability { /* ratetable is the 2 stream version (max MCS15) */ ATH6KL_FW_CAPABILITY_RATETABLE_MCS15, - /* firmare doesn't support IP checksumming */ + /* firmware doesn't support IP checksumming */ ATH6KL_FW_CAPABILITY_NO_IP_CHECKSUM, /* this needs to be last */ diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c index 631c3a0c572b..b8cf04d11975 100644 --- a/drivers/net/wireless/ath/ath6kl/wmi.c +++ b/drivers/net/wireless/ath/ath6kl/wmi.c @@ -2544,8 +2544,7 @@ int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi, u8 if_idx, s32 nominal_phy = 0; int ret; - if (!((params->user_pri < 8) && - (params->user_pri <= 0x7) && + if (!((params->user_pri <= 0x7) && (up_to_ac[params->user_pri & 0x7] == params->traffic_class) && (params->traffic_direc == UPLINK_TRAFFIC || params->traffic_direc == DNLINK_TRAFFIC || diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index dec1a317a070..d0224fc58e78 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -3202,8 +3202,7 @@ static int ar9300_compress_decision(struct ath_hw *ah, it, length); break; case _CompressBlock: - if (reference == 0) { - } else { + if (reference != 0) { eep = ar9003_eeprom_struct_find_by_id(reference); if (eep == NULL) { ath_dbg(common, EEPROM, diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c index ac4781f37e78..16aca9e28b77 100644 --- a/drivers/net/wireless/ath/ath9k/tx99.c +++ b/drivers/net/wireless/ath/ath9k/tx99.c @@ -132,7 +132,6 @@ static int ath9k_tx99_init(struct ath_softc *sc) ath9k_ps_wakeup(sc); ath9k_hw_disable_interrupts(ah); - atomic_set(&ah->intr_ref_cnt, -1); ath_drain_all_txq(sc); ath_stoprecv(sc); @@ -266,7 +265,7 @@ static const struct file_operations fops_tx99_power = { void ath9k_tx99_init_debug(struct ath_softc *sc) { - if (!AR_SREV_9300_20_OR_LATER(sc->sc_ah)) + if (!AR_SREV_9280_20_OR_LATER(sc->sc_ah)) return; debugfs_create_file("tx99", S_IRUSR | S_IWUSR, diff --git a/drivers/net/wireless/ath/carl9170/Kconfig b/drivers/net/wireless/ath/carl9170/Kconfig index 1a796e5f69ec..2e34baeaf764 100644 --- a/drivers/net/wireless/ath/carl9170/Kconfig +++ b/drivers/net/wireless/ath/carl9170/Kconfig @@ -5,12 +5,10 @@ config CARL9170 select FW_LOADER select CRC32 help - This is another driver for the Atheros "otus" 802.11n USB devices. + This is the mainline driver for the Atheros "otus" 802.11n USB devices. - This driver provides more features than the original, - but it needs a special firmware (carl9170-1.fw) to do that. - - The firmware can be downloaded from our wiki here: + It needs a special firmware (carl9170-1.fw), which can be downloaded + from our wiki here: <http://wireless.kernel.org/en/users/Drivers/carl9170> If you choose to build a module, it'll be called carl9170. diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 5769811291bf..62bf9331bd7f 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -378,6 +378,10 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, /* social scan on P2P_DEVICE is handled as p2p search */ if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE && wil_p2p_is_social_scan(request)) { + if (!wil->p2p.p2p_dev_started) { + wil_err(wil, "P2P search requested on stopped P2P device\n"); + return -EIO; + } wil->scan_request = request; wil->radio_wdev = wdev; rc = wil_p2p_search(wil, request); @@ -1351,6 +1355,7 @@ static int wil_cfg80211_start_p2p_device(struct wiphy *wiphy, struct wil6210_priv *wil = wiphy_to_wil(wiphy); wil_dbg_misc(wil, "%s: entered\n", __func__); + wil->p2p.p2p_dev_started = 1; return 0; } @@ -1358,8 +1363,19 @@ static void wil_cfg80211_stop_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); + u8 started; wil_dbg_misc(wil, "%s: entered\n", __func__); + mutex_lock(&wil->mutex); + started = wil_p2p_stop_discovery(wil); + if (started && wil->scan_request) { + cfg80211_scan_done(wil->scan_request, 1); + wil->scan_request = NULL; + wil->radio_wdev = wil->wdev; + } + mutex_unlock(&wil->mutex); + + wil->p2p.p2p_dev_started = 0; } static struct cfg80211_ops wil_cfg80211_ops = { diff --git a/drivers/net/wireless/ath/wil6210/debug.c b/drivers/net/wireless/ath/wil6210/debug.c index c312a667c12a..217a4591bde4 100644 --- a/drivers/net/wireless/ath/wil6210/debug.c +++ b/drivers/net/wireless/ath/wil6210/debug.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Qualcomm Atheros, Inc. + * Copyright (c) 2013,2016 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -19,34 +19,31 @@ void __wil_err(struct wil6210_priv *wil, const char *fmt, ...) { - struct net_device *ndev = wil_to_ndev(wil); - struct va_format vaf = { - .fmt = fmt, - }; + struct va_format vaf; va_list args; va_start(args, fmt); + vaf.fmt = fmt; vaf.va = &args; - netdev_err(ndev, "%pV", &vaf); + netdev_err(wil_to_ndev(wil), "%pV", &vaf); trace_wil6210_log_err(&vaf); va_end(args); } void __wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...) { - if (net_ratelimit()) { - struct net_device *ndev = wil_to_ndev(wil); - struct va_format vaf = { - .fmt = fmt, - }; - va_list args; + struct va_format vaf; + va_list args; - va_start(args, fmt); - vaf.va = &args; - netdev_err(ndev, "%pV", &vaf); - trace_wil6210_log_err(&vaf); - va_end(args); - } + if (!net_ratelimit()) + return; + + va_start(args, fmt); + vaf.fmt = fmt; + vaf.va = &args; + netdev_err(wil_to_ndev(wil), "%pV", &vaf); + trace_wil6210_log_err(&vaf); + va_end(args); } void wil_dbg_ratelimited(const struct wil6210_priv *wil, const char *fmt, ...) @@ -67,27 +64,24 @@ void wil_dbg_ratelimited(const struct wil6210_priv *wil, const char *fmt, ...) void __wil_info(struct wil6210_priv *wil, const char *fmt, ...) { - struct net_device *ndev = wil_to_ndev(wil); - struct va_format vaf = { - .fmt = fmt, - }; + struct va_format vaf; va_list args; va_start(args, fmt); + vaf.fmt = fmt; vaf.va = &args; - netdev_info(ndev, "%pV", &vaf); + netdev_info(wil_to_ndev(wil), "%pV", &vaf); trace_wil6210_log_info(&vaf); va_end(args); } void wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...) { - struct va_format vaf = { - .fmt = fmt, - }; + struct va_format vaf; va_list args; va_start(args, fmt); + vaf.fmt = fmt; vaf.va = &args; trace_wil6210_log_dbg(&vaf); va_end(args); diff --git a/drivers/net/wireless/ath/wil6210/p2p.c b/drivers/net/wireless/ath/wil6210/p2p.c index 1c9153894dca..213b8259638c 100644 --- a/drivers/net/wireless/ath/wil6210/p2p.c +++ b/drivers/net/wireless/ath/wil6210/p2p.c @@ -114,8 +114,10 @@ int wil_p2p_listen(struct wil6210_priv *wil, unsigned int duration, u8 channel = P2P_DMG_SOCIAL_CHANNEL; int rc; - if (chan) - channel = chan->hw_value; + if (!chan) + return -EINVAL; + + channel = chan->hw_value; wil_dbg_misc(wil, "%s: duration %d\n", __func__, duration); diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index aeb72c438e44..7b5c4222bc33 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -18,13 +18,20 @@ #include <linux/pci.h> #include <linux/moduleparam.h> #include <linux/interrupt.h> - +#include <linux/suspend.h> #include "wil6210.h" static bool use_msi = true; module_param(use_msi, bool, S_IRUGO); MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true"); +#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP +static int wil6210_pm_notify(struct notifier_block *notify_block, + unsigned long mode, void *unused); +#endif /* CONFIG_PM_SLEEP */ +#endif /* CONFIG_PM */ + static void wil_set_capabilities(struct wil6210_priv *wil) { @@ -238,6 +245,18 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto bus_disable; } +#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP + wil->pm_notify.notifier_call = wil6210_pm_notify; + rc = register_pm_notifier(&wil->pm_notify); + if (rc) + /* Do not fail the driver initialization, as suspend can + * be prevented in a later phase if needed + */ + wil_err(wil, "register_pm_notifier failed: %d\n", rc); +#endif /* CONFIG_PM_SLEEP */ +#endif /* CONFIG_PM */ + wil6210_debugfs_init(wil); @@ -267,6 +286,12 @@ static void wil_pcie_remove(struct pci_dev *pdev) wil_dbg_misc(wil, "%s()\n", __func__); +#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP + unregister_pm_notifier(&wil->pm_notify); +#endif /* CONFIG_PM_SLEEP */ +#endif /* CONFIG_PM */ + wil6210_debugfs_remove(wil); wil_if_remove(wil); wil_if_pcie_disable(wil); @@ -335,6 +360,45 @@ static int wil6210_resume(struct device *dev, bool is_runtime) return rc; } +static int wil6210_pm_notify(struct notifier_block *notify_block, + unsigned long mode, void *unused) +{ + struct wil6210_priv *wil = container_of( + notify_block, struct wil6210_priv, pm_notify); + int rc = 0; + enum wil_platform_event evt; + + wil_dbg_pm(wil, "%s: mode (%ld)\n", __func__, mode); + + switch (mode) { + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + case PM_RESTORE_PREPARE: + rc = wil_can_suspend(wil, false); + if (rc) + break; + evt = WIL_PLATFORM_EVT_PRE_SUSPEND; + if (wil->platform_ops.notify) + rc = wil->platform_ops.notify(wil->platform_handle, + evt); + break; + case PM_POST_SUSPEND: + case PM_POST_HIBERNATION: + case PM_POST_RESTORE: + evt = WIL_PLATFORM_EVT_POST_SUSPEND; + if (wil->platform_ops.notify) + rc = wil->platform_ops.notify(wil->platform_handle, + evt); + break; + default: + wil_dbg_pm(wil, "unhandled notify mode %ld\n", mode); + break; + } + + wil_dbg_pm(wil, "notification mode %ld: rc (%d)\n", mode, rc); + return rc; +} + static int wil6210_pm_suspend(struct device *dev) { return wil6210_suspend(dev, false); diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c index 0b7ecbcac19c..11ee24d509e5 100644 --- a/drivers/net/wireless/ath/wil6210/pm.c +++ b/drivers/net/wireless/ath/wil6210/pm.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 Qualcomm Atheros, Inc. + * Copyright (c) 2014,2016 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -24,10 +24,32 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime) wil_dbg_pm(wil, "%s(%s)\n", __func__, is_runtime ? "runtime" : "system"); + if (!netif_running(wil_to_ndev(wil))) { + /* can always sleep when down */ + wil_dbg_pm(wil, "Interface is down\n"); + goto out; + } + if (test_bit(wil_status_resetting, wil->status)) { + wil_dbg_pm(wil, "Delay suspend when resetting\n"); + rc = -EBUSY; + goto out; + } + if (wil->recovery_state != fw_recovery_idle) { + wil_dbg_pm(wil, "Delay suspend during recovery\n"); + rc = -EBUSY; + goto out; + } + + /* interface is running */ switch (wdev->iftype) { case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: + if (test_bit(wil_status_fwconnecting, wil->status)) { + wil_dbg_pm(wil, "Delay suspend when connecting\n"); + rc = -EBUSY; + goto out; + } break; /* AP-like interface - can't suspend */ default: @@ -36,6 +58,7 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime) break; } +out: wil_dbg_pm(wil, "%s(%s) => %s (%d)\n", __func__, is_runtime ? "runtime" : "system", rc ? "No" : "Yes", rc); diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index a4e43796addb..f2f6a404d3d1 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -184,6 +184,13 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring, &vring->va[vring->swtail].tx; ctx = &vring->ctx[vring->swtail]; + if (!ctx) { + wil_dbg_txrx(wil, + "ctx(%d) was already completed\n", + vring->swtail); + vring->swtail = wil_vring_next_tail(vring); + continue; + } *d = *_d; wil_txdesc_unmap(dev, d, ctx); if (ctx->skb) @@ -544,6 +551,12 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count) break; } } + + /* make sure all writes to descriptors (shared memory) are done before + * committing them to HW + */ + wmb(); + wil_w(wil, v->hwtail, v->swtail); return rc; @@ -969,6 +982,13 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id) txdata->dot1x_open = false; txdata->enabled = 0; /* no Tx can be in progress or start anew */ spin_unlock_bh(&txdata->lock); + /* napi_synchronize waits for completion of the current NAPI but will + * not prevent the next NAPI run. + * Add a memory barrier to guarantee that txdata->enabled is zeroed + * before napi_synchronize so that the next scheduled NAPI will not + * handle this vring + */ + wmb(); /* make sure NAPI won't touch this vring */ if (test_bit(wil_status_napi_en, wil->status)) napi_synchronize(&wil->napi_tx); @@ -1551,6 +1571,13 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring, vring_index, used, used + descs_used); } + /* Make sure to advance the head only after descriptor update is done. + * This will prevent a race condition where the completion thread + * will see the DU bit set from previous run and will handle the + * skb before it was completed. + */ + wmb(); + /* advance swhead */ wil_vring_advance_head(vring, descs_used); wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead); @@ -1567,7 +1594,7 @@ mem_error: while (descs_used > 0) { struct wil_ctx *ctx; - i = (swhead + descs_used) % vring->size; + i = (swhead + descs_used - 1) % vring->size; d = (struct vring_tx_desc *)&vring->va[i].tx; _desc = &vring->va[i].tx; *d = *_desc; @@ -1691,6 +1718,13 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, vring_index, used, used + nr_frags + 1); } + /* Make sure to advance the head only after descriptor update is done. + * This will prevent a race condition where the completion thread + * will see the DU bit set from previous run and will handle the + * skb before it was completed. + */ + wmb(); + /* advance swhead */ wil_vring_advance_head(vring, nr_frags + 1); wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead, @@ -1914,6 +1948,12 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid) wil_consume_skb(skb, d->dma.error == 0); } memset(ctx, 0, sizeof(*ctx)); + /* Make sure the ctx is zeroed before updating the tail + * to prevent a case where wil_tx_vring will see + * this descriptor as used and handle it before ctx zero + * is completed. + */ + wmb(); /* There is no need to touch HW descriptor: * - ststus bit TX_DMA_STATUS_DU is set by design, * so hardware will not try to process this desc., diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index aa09cbcce47c..ecab4af90602 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -458,6 +458,7 @@ struct wil_tid_crypto_rx { struct wil_p2p_info { struct ieee80211_channel listen_chan; u8 discovery_started; + u8 p2p_dev_started; u64 cookie; struct timer_list discovery_timer; /* listen/search duration */ struct work_struct discovery_expired_work; /* listen/search expire */ @@ -662,6 +663,11 @@ struct wil6210_priv { /* High Access Latency Policy voting */ struct wil_halp halp; +#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP + struct notifier_block pm_notify; +#endif /* CONFIG_PM_SLEEP */ +#endif /* CONFIG_PM */ }; #define wil_to_wiphy(i) (i->wdev->wiphy) diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h index 33d4a34b3b1c..f8c41172a3f4 100644 --- a/drivers/net/wireless/ath/wil6210/wil_platform.h +++ b/drivers/net/wireless/ath/wil6210/wil_platform.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2015 Qualcomm Atheros, Inc. + * Copyright (c) 2014-2016 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -23,6 +23,8 @@ enum wil_platform_event { WIL_PLATFORM_EVT_FW_CRASH = 0, WIL_PLATFORM_EVT_PRE_RESET = 1, WIL_PLATFORM_EVT_FW_RDY = 2, + WIL_PLATFORM_EVT_PRE_SUSPEND = 3, + WIL_PLATFORM_EVT_POST_SUSPEND = 4, }; /** diff --git a/drivers/net/wireless/broadcom/b43/Makefile b/drivers/net/wireless/broadcom/b43/Makefile index ddc4df46656f..27fab958e3d5 100644 --- a/drivers/net/wireless/broadcom/b43/Makefile +++ b/drivers/net/wireless/broadcom/b43/Makefile @@ -1,6 +1,6 @@ b43-y += main.o b43-y += bus.o -b43-$(CONFIG_B43_PHY_G) += phy_a.o phy_g.o tables.o lo.o wa.o +b43-$(CONFIG_B43_PHY_G) += phy_g.o tables.o lo.o wa.o b43-$(CONFIG_B43_PHY_N) += tables_nphy.o b43-$(CONFIG_B43_PHY_N) += radio_2055.o b43-$(CONFIG_B43_PHY_N) += radio_2056.o diff --git a/drivers/net/wireless/broadcom/b43/leds.c b/drivers/net/wireless/broadcom/b43/leds.c index d79ab2a227e1..cb987c2ecc6b 100644 --- a/drivers/net/wireless/broadcom/b43/leds.c +++ b/drivers/net/wireless/broadcom/b43/leds.c @@ -222,7 +222,7 @@ static void b43_led_get_sprominfo(struct b43_wldev *dev, sprom[2] = dev->dev->bus_sprom->gpio2; sprom[3] = dev->dev->bus_sprom->gpio3; - if (sprom[led_index] == 0xFF) { + if ((sprom[0] & sprom[1] & sprom[2] & sprom[3]) == 0xff) { /* There is no LED information in the SPROM * for this LED. Hardcode it here. */ *activelow = false; @@ -250,7 +250,11 @@ static void b43_led_get_sprominfo(struct b43_wldev *dev, return; } } else { - *behaviour = sprom[led_index] & B43_LED_BEHAVIOUR; + /* keep LED disabled if no mapping is defined */ + if (sprom[led_index] == 0xff) + *behaviour = B43_LED_OFF; + else + *behaviour = sprom[led_index] & B43_LED_BEHAVIOUR; *activelow = !!(sprom[led_index] & B43_LED_ACTIVELOW); } } diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c index 4ee5c5853f9f..6e5d9095b195 100644 --- a/drivers/net/wireless/broadcom/b43/main.c +++ b/drivers/net/wireless/broadcom/b43/main.c @@ -3180,7 +3180,6 @@ static void b43_rate_memory_write(struct b43_wldev *dev, u16 rate, int is_ofdm) static void b43_rate_memory_init(struct b43_wldev *dev) { switch (dev->phy.type) { - case B43_PHYTYPE_A: case B43_PHYTYPE_G: case B43_PHYTYPE_N: case B43_PHYTYPE_LP: @@ -3194,8 +3193,6 @@ static void b43_rate_memory_init(struct b43_wldev *dev) b43_rate_memory_write(dev, B43_OFDM_RATE_36MB, 1); b43_rate_memory_write(dev, B43_OFDM_RATE_48MB, 1); b43_rate_memory_write(dev, B43_OFDM_RATE_54MB, 1); - if (dev->phy.type == B43_PHYTYPE_A) - break; /* fallthrough */ case B43_PHYTYPE_B: b43_rate_memory_write(dev, B43_CCK_RATE_1MB, 0); @@ -4604,14 +4601,6 @@ static int b43_phy_versioning(struct b43_wldev *dev) if (radio_manuf != 0x17F /* Broadcom */) unsupported = 1; switch (phy_type) { - case B43_PHYTYPE_A: - if (radio_id != 0x2060) - unsupported = 1; - if (radio_rev != 1) - unsupported = 1; - if (radio_manuf != 0x17F) - unsupported = 1; - break; case B43_PHYTYPE_B: if ((radio_id & 0xFFF0) != 0x2050) unsupported = 1; @@ -4766,10 +4755,7 @@ static void b43_set_synth_pu_delay(struct b43_wldev *dev, bool idle) u16 pu_delay; /* The time value is in microseconds. */ - if (dev->phy.type == B43_PHYTYPE_A) - pu_delay = 3700; - else - pu_delay = 1050; + pu_delay = 1050; if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC) || idle) pu_delay = 500; if ((dev->phy.radio_ver == 0x2050) && (dev->phy.radio_rev == 8)) @@ -4784,14 +4770,10 @@ static void b43_set_pretbtt(struct b43_wldev *dev) u16 pretbtt; /* The time value is in microseconds. */ - if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC)) { + if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC)) pretbtt = 2; - } else { - if (dev->phy.type == B43_PHYTYPE_A) - pretbtt = 120; - else - pretbtt = 250; - } + else + pretbtt = 250; b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_PRETBTT, pretbtt); b43_write16(dev, B43_MMIO_TSF_CFP_PRETBTT, pretbtt); } @@ -5380,10 +5362,6 @@ static void b43_supported_bands(struct b43_wldev *dev, bool *have_2ghz_phy, /* As a fallback, try to guess using PHY type */ switch (dev->phy.type) { - case B43_PHYTYPE_A: - *have_2ghz_phy = false; - *have_5ghz_phy = true; - return; case B43_PHYTYPE_G: case B43_PHYTYPE_N: case B43_PHYTYPE_LP: @@ -5455,7 +5433,6 @@ static int b43_wireless_core_attach(struct b43_wldev *dev) /* We don't support 5 GHz on some PHYs yet */ if (have_5ghz_phy) { switch (dev->phy.type) { - case B43_PHYTYPE_A: case B43_PHYTYPE_G: case B43_PHYTYPE_LP: case B43_PHYTYPE_HT: diff --git a/drivers/net/wireless/broadcom/b43/phy_a.c b/drivers/net/wireless/broadcom/b43/phy_a.c deleted file mode 100644 index 99c036f5ecb7..000000000000 --- a/drivers/net/wireless/broadcom/b43/phy_a.c +++ /dev/null @@ -1,595 +0,0 @@ -/* - - Broadcom B43 wireless driver - IEEE 802.11a PHY driver - - Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>, - Copyright (c) 2005-2007 Stefano Brivio <stefano.brivio@polimi.it> - Copyright (c) 2005-2008 Michael Buesch <m@bues.ch> - Copyright (c) 2005, 2006 Danny van Dyk <kugelfang@gentoo.org> - Copyright (c) 2005, 2006 Andreas Jaggi <andreas.jaggi@waterwave.ch> - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; see the file COPYING. If not, write to - the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, - Boston, MA 02110-1301, USA. - -*/ - -#include <linux/slab.h> - -#include "b43.h" -#include "phy_a.h" -#include "phy_common.h" -#include "wa.h" -#include "tables.h" -#include "main.h" - - -/* Get the freq, as it has to be written to the device. */ -static inline u16 channel2freq_a(u8 channel) -{ - B43_WARN_ON(channel > 200); - - return (5000 + 5 * channel); -} - -static inline u16 freq_r3A_value(u16 frequency) -{ - u16 value; - - if (frequency < 5091) - value = 0x0040; - else if (frequency < 5321) - value = 0x0000; - else if (frequency < 5806) - value = 0x0080; - else - value = 0x0040; - - return value; -} - -#if 0 -/* This function converts a TSSI value to dBm in Q5.2 */ -static s8 b43_aphy_estimate_power_out(struct b43_wldev *dev, s8 tssi) -{ - struct b43_phy *phy = &dev->phy; - struct b43_phy_a *aphy = phy->a; - s8 dbm = 0; - s32 tmp; - - tmp = (aphy->tgt_idle_tssi - aphy->cur_idle_tssi + tssi); - tmp += 0x80; - tmp = clamp_val(tmp, 0x00, 0xFF); - dbm = aphy->tssi2dbm[tmp]; - //TODO: There's a FIXME on the specs - - return dbm; -} -#endif - -static void b43_radio_set_tx_iq(struct b43_wldev *dev) -{ - static const u8 data_high[5] = { 0x00, 0x40, 0x80, 0x90, 0xD0 }; - static const u8 data_low[5] = { 0x00, 0x01, 0x05, 0x06, 0x0A }; - u16 tmp = b43_radio_read16(dev, 0x001E); - int i, j; - - for (i = 0; i < 5; i++) { - for (j = 0; j < 5; j++) { - if (tmp == (data_high[i] << 4 | data_low[j])) { - b43_phy_write(dev, 0x0069, - (i - j) << 8 | 0x00C0); - return; - } - } - } -} - -static void aphy_channel_switch(struct b43_wldev *dev, unsigned int channel) -{ - u16 freq, r8, tmp; - - freq = channel2freq_a(channel); - - r8 = b43_radio_read16(dev, 0x0008); - b43_write16(dev, 0x03F0, freq); - b43_radio_write16(dev, 0x0008, r8); - - //TODO: write max channel TX power? to Radio 0x2D - tmp = b43_radio_read16(dev, 0x002E); - tmp &= 0x0080; - //TODO: OR tmp with the Power out estimation for this channel? - b43_radio_write16(dev, 0x002E, tmp); - - if (freq >= 4920 && freq <= 5500) { - /* - * r8 = (((freq * 15 * 0xE1FC780F) >> 32) / 29) & 0x0F; - * = (freq * 0.025862069 - */ - r8 = 3 * freq / 116; /* is equal to r8 = freq * 0.025862 */ - } - b43_radio_write16(dev, 0x0007, (r8 << 4) | r8); - b43_radio_write16(dev, 0x0020, (r8 << 4) | r8); - b43_radio_write16(dev, 0x0021, (r8 << 4) | r8); - b43_radio_maskset(dev, 0x0022, 0x000F, (r8 << 4)); - b43_radio_write16(dev, 0x002A, (r8 << 4)); - b43_radio_write16(dev, 0x002B, (r8 << 4)); - b43_radio_maskset(dev, 0x0008, 0x00F0, (r8 << 4)); - b43_radio_maskset(dev, 0x0029, 0xFF0F, 0x00B0); - b43_radio_write16(dev, 0x0035, 0x00AA); - b43_radio_write16(dev, 0x0036, 0x0085); - b43_radio_maskset(dev, 0x003A, 0xFF20, freq_r3A_value(freq)); - b43_radio_mask(dev, 0x003D, 0x00FF); - b43_radio_maskset(dev, 0x0081, 0xFF7F, 0x0080); - b43_radio_mask(dev, 0x0035, 0xFFEF); - b43_radio_maskset(dev, 0x0035, 0xFFEF, 0x0010); - b43_radio_set_tx_iq(dev); - //TODO: TSSI2dbm workaround -//FIXME b43_phy_xmitpower(dev); -} - -static void b43_radio_init2060(struct b43_wldev *dev) -{ - b43_radio_write16(dev, 0x0004, 0x00C0); - b43_radio_write16(dev, 0x0005, 0x0008); - b43_radio_write16(dev, 0x0009, 0x0040); - b43_radio_write16(dev, 0x0005, 0x00AA); - b43_radio_write16(dev, 0x0032, 0x008F); - b43_radio_write16(dev, 0x0006, 0x008F); - b43_radio_write16(dev, 0x0034, 0x008F); - b43_radio_write16(dev, 0x002C, 0x0007); - b43_radio_write16(dev, 0x0082, 0x0080); - b43_radio_write16(dev, 0x0080, 0x0000); - b43_radio_write16(dev, 0x003F, 0x00DA); - b43_radio_mask(dev, 0x0005, ~0x0008); - b43_radio_mask(dev, 0x0081, ~0x0010); - b43_radio_mask(dev, 0x0081, ~0x0020); - b43_radio_mask(dev, 0x0081, ~0x0020); - msleep(1); /* delay 400usec */ - - b43_radio_maskset(dev, 0x0081, ~0x0020, 0x0010); - msleep(1); /* delay 400usec */ - - b43_radio_maskset(dev, 0x0005, ~0x0008, 0x0008); - b43_radio_mask(dev, 0x0085, ~0x0010); - b43_radio_mask(dev, 0x0005, ~0x0008); - b43_radio_mask(dev, 0x0081, ~0x0040); - b43_radio_maskset(dev, 0x0081, ~0x0040, 0x0040); - b43_radio_write16(dev, 0x0005, - (b43_radio_read16(dev, 0x0081) & ~0x0008) | 0x0008); - b43_phy_write(dev, 0x0063, 0xDDC6); - b43_phy_write(dev, 0x0069, 0x07BE); - b43_phy_write(dev, 0x006A, 0x0000); - - aphy_channel_switch(dev, dev->phy.ops->get_default_chan(dev)); - - msleep(1); -} - -static void b43_phy_rssiagc(struct b43_wldev *dev, u8 enable) -{ - int i; - - if (dev->phy.rev < 3) { - if (enable) - for (i = 0; i < B43_TAB_RSSIAGC1_SIZE; i++) { - b43_ofdmtab_write16(dev, - B43_OFDMTAB_LNAHPFGAIN1, i, 0xFFF8); - b43_ofdmtab_write16(dev, - B43_OFDMTAB_WRSSI, i, 0xFFF8); - } - else - for (i = 0; i < B43_TAB_RSSIAGC1_SIZE; i++) { - b43_ofdmtab_write16(dev, - B43_OFDMTAB_LNAHPFGAIN1, i, b43_tab_rssiagc1[i]); - b43_ofdmtab_write16(dev, - B43_OFDMTAB_WRSSI, i, b43_tab_rssiagc1[i]); - } - } else { - if (enable) - for (i = 0; i < B43_TAB_RSSIAGC1_SIZE; i++) - b43_ofdmtab_write16(dev, - B43_OFDMTAB_WRSSI, i, 0x0820); - else - for (i = 0; i < B43_TAB_RSSIAGC2_SIZE; i++) - b43_ofdmtab_write16(dev, - B43_OFDMTAB_WRSSI, i, b43_tab_rssiagc2[i]); - } -} - -static void b43_phy_ww(struct b43_wldev *dev) -{ - u16 b, curr_s, best_s = 0xFFFF; - int i; - - b43_phy_mask(dev, B43_PHY_CRS0, ~B43_PHY_CRS0_EN); - b43_phy_set(dev, B43_PHY_OFDM(0x1B), 0x1000); - b43_phy_maskset(dev, B43_PHY_OFDM(0x82), 0xF0FF, 0x0300); - b43_radio_set(dev, 0x0009, 0x0080); - b43_radio_maskset(dev, 0x0012, 0xFFFC, 0x0002); - b43_wa_initgains(dev); - b43_phy_write(dev, B43_PHY_OFDM(0xBA), 0x3ED5); - b = b43_phy_read(dev, B43_PHY_PWRDOWN); - b43_phy_write(dev, B43_PHY_PWRDOWN, (b & 0xFFF8) | 0x0005); - b43_radio_set(dev, 0x0004, 0x0004); - for (i = 0x10; i <= 0x20; i++) { - b43_radio_write16(dev, 0x0013, i); - curr_s = b43_phy_read(dev, B43_PHY_OTABLEQ) & 0x00FF; - if (!curr_s) { - best_s = 0x0000; - break; - } else if (curr_s >= 0x0080) - curr_s = 0x0100 - curr_s; - if (curr_s < best_s) - best_s = curr_s; - } - b43_phy_write(dev, B43_PHY_PWRDOWN, b); - b43_radio_mask(dev, 0x0004, 0xFFFB); - b43_radio_write16(dev, 0x0013, best_s); - b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1_R1, 0, 0xFFEC); - b43_phy_write(dev, B43_PHY_OFDM(0xB7), 0x1E80); - b43_phy_write(dev, B43_PHY_OFDM(0xB6), 0x1C00); - b43_phy_write(dev, B43_PHY_OFDM(0xB5), 0x0EC0); - b43_phy_write(dev, B43_PHY_OFDM(0xB2), 0x00C0); - b43_phy_write(dev, B43_PHY_OFDM(0xB9), 0x1FFF); - b43_phy_maskset(dev, B43_PHY_OFDM(0xBB), 0xF000, 0x0053); - b43_phy_maskset(dev, B43_PHY_OFDM61, 0xFE1F, 0x0120); - b43_phy_maskset(dev, B43_PHY_OFDM(0x13), 0x0FFF, 0x3000); - b43_phy_maskset(dev, B43_PHY_OFDM(0x14), 0x0FFF, 0x3000); - b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 6, 0x0017); - for (i = 0; i < 6; i++) - b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, i, 0x000F); - b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0x0D, 0x000E); - b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0x0E, 0x0011); - b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0x0F, 0x0013); - b43_phy_write(dev, B43_PHY_OFDM(0x33), 0x5030); - b43_phy_set(dev, B43_PHY_CRS0, B43_PHY_CRS0_EN); -} - -static void hardware_pctl_init_aphy(struct b43_wldev *dev) -{ - //TODO -} - -void b43_phy_inita(struct b43_wldev *dev) -{ - struct b43_phy *phy = &dev->phy; - - /* This lowlevel A-PHY init is also called from G-PHY init. - * So we must not access phy->a, if called from G-PHY code. - */ - B43_WARN_ON((phy->type != B43_PHYTYPE_A) && - (phy->type != B43_PHYTYPE_G)); - - might_sleep(); - - if (phy->rev >= 6) { - if (phy->type == B43_PHYTYPE_A) - b43_phy_mask(dev, B43_PHY_OFDM(0x1B), ~0x1000); - if (b43_phy_read(dev, B43_PHY_ENCORE) & B43_PHY_ENCORE_EN) - b43_phy_set(dev, B43_PHY_ENCORE, 0x0010); - else - b43_phy_mask(dev, B43_PHY_ENCORE, ~0x1010); - } - - b43_wa_all(dev); - - if (phy->type == B43_PHYTYPE_A) { - if (phy->gmode && (phy->rev < 3)) - b43_phy_set(dev, 0x0034, 0x0001); - b43_phy_rssiagc(dev, 0); - - b43_phy_set(dev, B43_PHY_CRS0, B43_PHY_CRS0_EN); - - b43_radio_init2060(dev); - - if ((dev->dev->board_vendor == SSB_BOARDVENDOR_BCM) && - ((dev->dev->board_type == SSB_BOARD_BU4306) || - (dev->dev->board_type == SSB_BOARD_BU4309))) { - ; //TODO: A PHY LO - } - - if (phy->rev >= 3) - b43_phy_ww(dev); - - hardware_pctl_init_aphy(dev); - - //TODO: radar detection - } - - if ((phy->type == B43_PHYTYPE_G) && - (dev->dev->bus_sprom->boardflags_lo & B43_BFL_PACTRL)) { - b43_phy_maskset(dev, B43_PHY_OFDM(0x6E), 0xE000, 0x3CF); - } -} - -/* Initialise the TSSI->dBm lookup table */ -static int b43_aphy_init_tssi2dbm_table(struct b43_wldev *dev) -{ - struct b43_phy *phy = &dev->phy; - struct b43_phy_a *aphy = phy->a; - s16 pab0, pab1, pab2; - - pab0 = (s16) (dev->dev->bus_sprom->pa1b0); - pab1 = (s16) (dev->dev->bus_sprom->pa1b1); - pab2 = (s16) (dev->dev->bus_sprom->pa1b2); - - if (pab0 != 0 && pab1 != 0 && pab2 != 0 && - pab0 != -1 && pab1 != -1 && pab2 != -1) { - /* The pabX values are set in SPROM. Use them. */ - if ((s8) dev->dev->bus_sprom->itssi_a != 0 && - (s8) dev->dev->bus_sprom->itssi_a != -1) - aphy->tgt_idle_tssi = - (s8) (dev->dev->bus_sprom->itssi_a); - else - aphy->tgt_idle_tssi = 62; - aphy->tssi2dbm = b43_generate_dyn_tssi2dbm_tab(dev, pab0, - pab1, pab2); - if (!aphy->tssi2dbm) - return -ENOMEM; - } else { - /* pabX values not set in SPROM, - * but APHY needs a generated table. */ - aphy->tssi2dbm = NULL; - b43err(dev->wl, "Could not generate tssi2dBm " - "table (wrong SPROM info)!\n"); - return -ENODEV; - } - - return 0; -} - -static int b43_aphy_op_allocate(struct b43_wldev *dev) -{ - struct b43_phy_a *aphy; - int err; - - aphy = kzalloc(sizeof(*aphy), GFP_KERNEL); - if (!aphy) - return -ENOMEM; - dev->phy.a = aphy; - - err = b43_aphy_init_tssi2dbm_table(dev); - if (err) - goto err_free_aphy; - - return 0; - -err_free_aphy: - kfree(aphy); - dev->phy.a = NULL; - - return err; -} - -static void b43_aphy_op_prepare_structs(struct b43_wldev *dev) -{ - struct b43_phy *phy = &dev->phy; - struct b43_phy_a *aphy = phy->a; - const void *tssi2dbm; - int tgt_idle_tssi; - - /* tssi2dbm table is constant, so it is initialized at alloc time. - * Save a copy of the pointer. */ - tssi2dbm = aphy->tssi2dbm; - tgt_idle_tssi = aphy->tgt_idle_tssi; - - /* Zero out the whole PHY structure. */ - memset(aphy, 0, sizeof(*aphy)); - - aphy->tssi2dbm = tssi2dbm; - aphy->tgt_idle_tssi = tgt_idle_tssi; - - //TODO init struct b43_phy_a - -} - -static void b43_aphy_op_free(struct b43_wldev *dev) -{ - struct b43_phy *phy = &dev->phy; - struct b43_phy_a *aphy = phy->a; - - kfree(aphy->tssi2dbm); - aphy->tssi2dbm = NULL; - - kfree(aphy); - dev->phy.a = NULL; -} - -static int b43_aphy_op_init(struct b43_wldev *dev) -{ - b43_phy_inita(dev); - - return 0; -} - -static inline u16 adjust_phyreg(struct b43_wldev *dev, u16 offset) -{ - /* OFDM registers are base-registers for the A-PHY. */ - if ((offset & B43_PHYROUTE) == B43_PHYROUTE_OFDM_GPHY) { - offset &= ~B43_PHYROUTE; - offset |= B43_PHYROUTE_BASE; - } - -#if B43_DEBUG - if ((offset & B43_PHYROUTE) == B43_PHYROUTE_EXT_GPHY) { - /* Ext-G registers are only available on G-PHYs */ - b43err(dev->wl, "Invalid EXT-G PHY access at " - "0x%04X on A-PHY\n", offset); - dump_stack(); - } - if ((offset & B43_PHYROUTE) == B43_PHYROUTE_N_BMODE) { - /* N-BMODE registers are only available on N-PHYs */ - b43err(dev->wl, "Invalid N-BMODE PHY access at " - "0x%04X on A-PHY\n", offset); - dump_stack(); - } -#endif /* B43_DEBUG */ - - return offset; -} - -static u16 b43_aphy_op_read(struct b43_wldev *dev, u16 reg) -{ - reg = adjust_phyreg(dev, reg); - b43_write16f(dev, B43_MMIO_PHY_CONTROL, reg); - return b43_read16(dev, B43_MMIO_PHY_DATA); -} - -static void b43_aphy_op_write(struct b43_wldev *dev, u16 reg, u16 value) -{ - reg = adjust_phyreg(dev, reg); - b43_write16f(dev, B43_MMIO_PHY_CONTROL, reg); - b43_write16(dev, B43_MMIO_PHY_DATA, value); -} - -static u16 b43_aphy_op_radio_read(struct b43_wldev *dev, u16 reg) -{ - /* Register 1 is a 32-bit register. */ - B43_WARN_ON(reg == 1); - /* A-PHY needs 0x40 for read access */ - reg |= 0x40; - - b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); - return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW); -} - -static void b43_aphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value) -{ - /* Register 1 is a 32-bit register. */ - B43_WARN_ON(reg == 1); - - b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); - b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value); -} - -static bool b43_aphy_op_supports_hwpctl(struct b43_wldev *dev) -{ - return (dev->phy.rev >= 5); -} - -static void b43_aphy_op_software_rfkill(struct b43_wldev *dev, - bool blocked) -{ - struct b43_phy *phy = &dev->phy; - - if (!blocked) { - if (phy->radio_on) - return; - b43_radio_write16(dev, 0x0004, 0x00C0); - b43_radio_write16(dev, 0x0005, 0x0008); - b43_phy_mask(dev, 0x0010, 0xFFF7); - b43_phy_mask(dev, 0x0011, 0xFFF7); - b43_radio_init2060(dev); - } else { - b43_radio_write16(dev, 0x0004, 0x00FF); - b43_radio_write16(dev, 0x0005, 0x00FB); - b43_phy_set(dev, 0x0010, 0x0008); - b43_phy_set(dev, 0x0011, 0x0008); - } -} - -static int b43_aphy_op_switch_channel(struct b43_wldev *dev, - unsigned int new_channel) -{ - if (new_channel > 200) - return -EINVAL; - aphy_channel_switch(dev, new_channel); - - return 0; -} - -static unsigned int b43_aphy_op_get_default_chan(struct b43_wldev *dev) -{ - return 36; /* Default to channel 36 */ -} - -static void b43_aphy_op_set_rx_antenna(struct b43_wldev *dev, int antenna) -{//TODO - struct b43_phy *phy = &dev->phy; - u16 tmp; - int autodiv = 0; - - if (antenna == B43_ANTENNA_AUTO0 || antenna == B43_ANTENNA_AUTO1) - autodiv = 1; - - b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_ANTDIVHELP); - - b43_phy_maskset(dev, B43_PHY_BBANDCFG, ~B43_PHY_BBANDCFG_RXANT, - (autodiv ? B43_ANTENNA_AUTO1 : antenna) << - B43_PHY_BBANDCFG_RXANT_SHIFT); - - if (autodiv) { - tmp = b43_phy_read(dev, B43_PHY_ANTDWELL); - if (antenna == B43_ANTENNA_AUTO1) - tmp &= ~B43_PHY_ANTDWELL_AUTODIV1; - else - tmp |= B43_PHY_ANTDWELL_AUTODIV1; - b43_phy_write(dev, B43_PHY_ANTDWELL, tmp); - } - if (phy->rev < 3) - b43_phy_maskset(dev, B43_PHY_ANTDWELL, 0xFF00, 0x24); - else { - b43_phy_set(dev, B43_PHY_OFDM61, 0x10); - if (phy->rev == 3) { - b43_phy_write(dev, B43_PHY_CLIPPWRDOWNT, 0x1D); - b43_phy_write(dev, B43_PHY_ADIVRELATED, 8); - } else { - b43_phy_write(dev, B43_PHY_CLIPPWRDOWNT, 0x3A); - b43_phy_maskset(dev, B43_PHY_ADIVRELATED, 0xFF00, 8); - } - } - - b43_hf_write(dev, b43_hf_read(dev) | B43_HF_ANTDIVHELP); -} - -static void b43_aphy_op_adjust_txpower(struct b43_wldev *dev) -{//TODO -} - -static enum b43_txpwr_result b43_aphy_op_recalc_txpower(struct b43_wldev *dev, - bool ignore_tssi) -{//TODO - return B43_TXPWR_RES_DONE; -} - -static void b43_aphy_op_pwork_15sec(struct b43_wldev *dev) -{//TODO -} - -static void b43_aphy_op_pwork_60sec(struct b43_wldev *dev) -{//TODO -} - -static const struct b43_phy_operations b43_phyops_a = { - .allocate = b43_aphy_op_allocate, - .free = b43_aphy_op_free, - .prepare_structs = b43_aphy_op_prepare_structs, - .init = b43_aphy_op_init, - .phy_read = b43_aphy_op_read, - .phy_write = b43_aphy_op_write, - .radio_read = b43_aphy_op_radio_read, - .radio_write = b43_aphy_op_radio_write, - .supports_hwpctl = b43_aphy_op_supports_hwpctl, - .software_rfkill = b43_aphy_op_software_rfkill, - .switch_analog = b43_phyop_switch_analog_generic, - .switch_channel = b43_aphy_op_switch_channel, - .get_default_chan = b43_aphy_op_get_default_chan, - .set_rx_antenna = b43_aphy_op_set_rx_antenna, - .recalc_txpower = b43_aphy_op_recalc_txpower, - .adjust_txpower = b43_aphy_op_adjust_txpower, - .pwork_15sec = b43_aphy_op_pwork_15sec, - .pwork_60sec = b43_aphy_op_pwork_60sec, -}; diff --git a/drivers/net/wireless/broadcom/b43/phy_a.h b/drivers/net/wireless/broadcom/b43/phy_a.h index f7d0d929a374..0a92d01c21f9 100644 --- a/drivers/net/wireless/broadcom/b43/phy_a.h +++ b/drivers/net/wireless/broadcom/b43/phy_a.h @@ -101,26 +101,4 @@ u32 b43_ofdmtab_read32(struct b43_wldev *dev, u16 table, u16 offset); void b43_ofdmtab_write32(struct b43_wldev *dev, u16 table, u16 offset, u32 value); - -struct b43_phy_a { - /* Pointer to the table used to convert a - * TSSI value to dBm-Q5.2 */ - const s8 *tssi2dbm; - /* Target idle TSSI */ - int tgt_idle_tssi; - /* Current idle TSSI */ - int cur_idle_tssi;//FIXME value currently not set - - /* A-PHY TX Power control value. */ - u16 txpwr_offset; - - //TODO lots of missing stuff -}; - -/** - * b43_phy_inita - Lowlevel A-PHY init routine. - * This is _only_ used by the G-PHY code. - */ -void b43_phy_inita(struct b43_wldev *dev); - #endif /* LINUX_B43_PHY_A_H_ */ diff --git a/drivers/net/wireless/broadcom/b43/phy_common.h b/drivers/net/wireless/broadcom/b43/phy_common.h index 78d86526799e..ced054a9850c 100644 --- a/drivers/net/wireless/broadcom/b43/phy_common.h +++ b/drivers/net/wireless/broadcom/b43/phy_common.h @@ -190,7 +190,6 @@ struct b43_phy_operations { void (*pwork_60sec)(struct b43_wldev *dev); }; -struct b43_phy_a; struct b43_phy_g; struct b43_phy_n; struct b43_phy_lp; @@ -210,8 +209,6 @@ struct b43_phy { #else union { #endif - /* A-PHY specific information */ - struct b43_phy_a *a; /* G-PHY specific information */ struct b43_phy_g *g; /* N-PHY specific information */ diff --git a/drivers/net/wireless/broadcom/b43/phy_g.c b/drivers/net/wireless/broadcom/b43/phy_g.c index 462310e6e88f..822dcaa8ace6 100644 --- a/drivers/net/wireless/broadcom/b43/phy_g.c +++ b/drivers/net/wireless/broadcom/b43/phy_g.c @@ -31,6 +31,7 @@ #include "phy_common.h" #include "lo.h" #include "main.h" +#include "wa.h" #include <linux/bitrev.h> #include <linux/slab.h> @@ -1987,6 +1988,25 @@ static void b43_phy_init_pctl(struct b43_wldev *dev) b43_shm_clear_tssi(dev); } +static void b43_phy_inita(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + + might_sleep(); + + if (phy->rev >= 6) { + if (b43_phy_read(dev, B43_PHY_ENCORE) & B43_PHY_ENCORE_EN) + b43_phy_set(dev, B43_PHY_ENCORE, 0x0010); + else + b43_phy_mask(dev, B43_PHY_ENCORE, ~0x1010); + } + + b43_wa_all(dev); + + if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_PACTRL) + b43_phy_maskset(dev, B43_PHY_OFDM(0x6E), 0xE000, 0x3CF); +} + static void b43_phy_initg(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; @@ -2150,11 +2170,6 @@ static void default_radio_attenuation(struct b43_wldev *dev, } } - if (phy->type == B43_PHYTYPE_A) { - rf->att = 0x60; - return; - } - switch (phy->radio_ver) { case 0x2053: switch (phy->radio_rev) { diff --git a/drivers/net/wireless/broadcom/b43/wa.c b/drivers/net/wireless/broadcom/b43/wa.c index c218c08fb2f5..0e96c08d1e17 100644 --- a/drivers/net/wireless/broadcom/b43/wa.c +++ b/drivers/net/wireless/broadcom/b43/wa.c @@ -30,33 +30,6 @@ #include "phy_common.h" #include "wa.h" -static void b43_wa_papd(struct b43_wldev *dev) -{ - u16 backup; - - backup = b43_ofdmtab_read16(dev, B43_OFDMTAB_PWRDYN2, 0); - b43_ofdmtab_write16(dev, B43_OFDMTAB_PWRDYN2, 0, 7); - b43_ofdmtab_write16(dev, B43_OFDMTAB_UNKNOWN_APHY, 0, 0); - b43_dummy_transmission(dev, true, true); - b43_ofdmtab_write16(dev, B43_OFDMTAB_PWRDYN2, 0, backup); -} - -static void b43_wa_auxclipthr(struct b43_wldev *dev) -{ - b43_phy_write(dev, B43_PHY_OFDM(0x8E), 0x3800); -} - -static void b43_wa_afcdac(struct b43_wldev *dev) -{ - b43_phy_write(dev, 0x0035, 0x03FF); - b43_phy_write(dev, 0x0036, 0x0400); -} - -static void b43_wa_txdc_offset(struct b43_wldev *dev) -{ - b43_ofdmtab_write16(dev, B43_OFDMTAB_DC, 0, 0x0051); -} - void b43_wa_initgains(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; @@ -81,41 +54,6 @@ void b43_wa_initgains(struct b43_wldev *dev) b43_phy_write(dev, 0x00BA, 0x3ED5); } -static void b43_wa_divider(struct b43_wldev *dev) -{ - b43_phy_mask(dev, 0x002B, ~0x0100); - b43_phy_write(dev, 0x008E, 0x58C1); -} - -static void b43_wa_gt(struct b43_wldev *dev) /* Gain table. */ -{ - if (dev->phy.rev <= 2) { - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 0, 15); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 1, 31); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 2, 42); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 3, 48); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 4, 58); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 0, 19); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 1, 19); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 2, 19); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 3, 19); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 4, 21); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 5, 21); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 6, 25); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN1, 0, 3); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN1, 1, 3); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN1, 2, 7); - } else { - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 0, 19); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 1, 19); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 2, 19); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 3, 19); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 4, 21); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 5, 21); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 6, 25); - } -} - static void b43_wa_rssi_lt(struct b43_wldev *dev) /* RSSI lookup table */ { int i; @@ -133,15 +71,11 @@ static void b43_wa_rssi_lt(struct b43_wldev *dev) /* RSSI lookup table */ static void b43_wa_analog(struct b43_wldev *dev) { - struct b43_phy *phy = &dev->phy; u16 ofdmrev; ofdmrev = b43_phy_read(dev, B43_PHY_VERSION_OFDM) & B43_PHYVER_VERSION; if (ofdmrev > 2) { - if (phy->type == B43_PHYTYPE_A) - b43_phy_write(dev, B43_PHY_PWRDOWN, 0x1808); - else - b43_phy_write(dev, B43_PHY_PWRDOWN, 0x1000); + b43_phy_write(dev, B43_PHY_PWRDOWN, 0x1000); } else { b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 3, 0x1044); b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 4, 0x7201); @@ -149,26 +83,13 @@ static void b43_wa_analog(struct b43_wldev *dev) } } -static void b43_wa_dac(struct b43_wldev *dev) -{ - if (dev->phy.analog == 1) - b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 1, - (b43_ofdmtab_read16(dev, B43_OFDMTAB_DAC, 1) & ~0x0034) | 0x0008); - else - b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 1, - (b43_ofdmtab_read16(dev, B43_OFDMTAB_DAC, 1) & ~0x0078) | 0x0010); -} - static void b43_wa_fft(struct b43_wldev *dev) /* Fine frequency table */ { int i; - if (dev->phy.type == B43_PHYTYPE_A) - for (i = 0; i < B43_TAB_FINEFREQA_SIZE; i++) - b43_ofdmtab_write16(dev, B43_OFDMTAB_DACRFPABB, i, b43_tab_finefreqa[i]); - else - for (i = 0; i < B43_TAB_FINEFREQG_SIZE; i++) - b43_ofdmtab_write16(dev, B43_OFDMTAB_DACRFPABB, i, b43_tab_finefreqg[i]); + for (i = 0; i < B43_TAB_FINEFREQG_SIZE; i++) + b43_ofdmtab_write16(dev, B43_OFDMTAB_DACRFPABB, i, + b43_tab_finefreqg[i]); } static void b43_wa_nft(struct b43_wldev *dev) /* Noise figure table */ @@ -176,21 +97,14 @@ static void b43_wa_nft(struct b43_wldev *dev) /* Noise figure table */ struct b43_phy *phy = &dev->phy; int i; - if (phy->type == B43_PHYTYPE_A) { - if (phy->rev == 2) - for (i = 0; i < B43_TAB_NOISEA2_SIZE; i++) - b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, b43_tab_noisea2[i]); - else - for (i = 0; i < B43_TAB_NOISEA3_SIZE; i++) - b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, b43_tab_noisea3[i]); - } else { - if (phy->rev == 1) - for (i = 0; i < B43_TAB_NOISEG1_SIZE; i++) - b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, b43_tab_noiseg1[i]); - else - for (i = 0; i < B43_TAB_NOISEG2_SIZE; i++) - b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, b43_tab_noiseg2[i]); - } + if (phy->rev == 1) + for (i = 0; i < B43_TAB_NOISEG1_SIZE; i++) + b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, + b43_tab_noiseg1[i]); + else + for (i = 0; i < B43_TAB_NOISEG2_SIZE; i++) + b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, + b43_tab_noiseg2[i]); } static void b43_wa_rt(struct b43_wldev *dev) /* Rotor table */ @@ -201,14 +115,6 @@ static void b43_wa_rt(struct b43_wldev *dev) /* Rotor table */ b43_ofdmtab_write32(dev, B43_OFDMTAB_ROTOR, i, b43_tab_rotor[i]); } -static void b43_write_null_nst(struct b43_wldev *dev) -{ - int i; - - for (i = 0; i < B43_TAB_NOISESCALE_SIZE; i++) - b43_ofdmtab_write16(dev, B43_OFDMTAB_NOISESCALE, i, 0); -} - static void b43_write_nst(struct b43_wldev *dev, const u16 *nst) { int i; @@ -221,24 +127,13 @@ static void b43_wa_nst(struct b43_wldev *dev) /* Noise scale table */ { struct b43_phy *phy = &dev->phy; - if (phy->type == B43_PHYTYPE_A) { - if (phy->rev <= 1) - b43_write_null_nst(dev); - else if (phy->rev == 2) - b43_write_nst(dev, b43_tab_noisescalea2); - else if (phy->rev == 3) - b43_write_nst(dev, b43_tab_noisescalea3); - else + if (phy->rev >= 6) { + if (b43_phy_read(dev, B43_PHY_ENCORE) & B43_PHY_ENCORE_EN) b43_write_nst(dev, b43_tab_noisescaleg3); + else + b43_write_nst(dev, b43_tab_noisescaleg2); } else { - if (phy->rev >= 6) { - if (b43_phy_read(dev, B43_PHY_ENCORE) & B43_PHY_ENCORE_EN) - b43_write_nst(dev, b43_tab_noisescaleg3); - else - b43_write_nst(dev, b43_tab_noisescaleg2); - } else { - b43_write_nst(dev, b43_tab_noisescaleg1); - } + b43_write_nst(dev, b43_tab_noisescaleg1); } } @@ -251,41 +146,13 @@ static void b43_wa_art(struct b43_wldev *dev) /* ADV retard table */ i, b43_tab_retard[i]); } -static void b43_wa_txlna_gain(struct b43_wldev *dev) -{ - b43_ofdmtab_write16(dev, B43_OFDMTAB_DC, 13, 0x0000); -} - -static void b43_wa_crs_reset(struct b43_wldev *dev) -{ - b43_phy_write(dev, 0x002C, 0x0064); -} - -static void b43_wa_2060txlna_gain(struct b43_wldev *dev) -{ - b43_hf_write(dev, b43_hf_read(dev) | - B43_HF_2060W); -} - -static void b43_wa_lms(struct b43_wldev *dev) -{ - b43_phy_maskset(dev, 0x0055, 0xFFC0, 0x0004); -} - -static void b43_wa_mixedsignal(struct b43_wldev *dev) -{ - b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 1, 3); -} - static void b43_wa_msst(struct b43_wldev *dev) /* Min sigma square table */ { struct b43_phy *phy = &dev->phy; int i; const u16 *tab; - if (phy->type == B43_PHYTYPE_A) { - tab = b43_tab_sigmasqr1; - } else if (phy->type == B43_PHYTYPE_G) { + if (phy->type == B43_PHYTYPE_G) { tab = b43_tab_sigmasqr2; } else { B43_WARN_ON(1); @@ -298,13 +165,6 @@ static void b43_wa_msst(struct b43_wldev *dev) /* Min sigma square table */ } } -static void b43_wa_iqadc(struct b43_wldev *dev) -{ - if (dev->phy.analog == 4) - b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 0, - b43_ofdmtab_read16(dev, B43_OFDMTAB_DAC, 0) & ~0xF000); -} - static void b43_wa_crs_ed(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; @@ -450,38 +310,6 @@ static void b43_wa_cpll_nonpilot(struct b43_wldev *dev) b43_ofdmtab_write16(dev, B43_OFDMTAB_UNKNOWN_11, 1, 0); } -static void b43_wa_rssi_adc(struct b43_wldev *dev) -{ - if (dev->phy.analog == 4) - b43_phy_write(dev, 0x00DC, 0x7454); -} - -static void b43_wa_boards_a(struct b43_wldev *dev) -{ - if (dev->dev->board_vendor == SSB_BOARDVENDOR_BCM && - dev->dev->board_type == SSB_BOARD_BU4306 && - dev->dev->board_rev < 0x30) { - b43_phy_write(dev, 0x0010, 0xE000); - b43_phy_write(dev, 0x0013, 0x0140); - b43_phy_write(dev, 0x0014, 0x0280); - } else { - if (dev->dev->board_type == SSB_BOARD_MP4318 && - dev->dev->board_rev < 0x20) { - b43_phy_write(dev, 0x0013, 0x0210); - b43_phy_write(dev, 0x0014, 0x0840); - } else { - b43_phy_write(dev, 0x0013, 0x0140); - b43_phy_write(dev, 0x0014, 0x0280); - } - if (dev->phy.rev <= 4) - b43_phy_write(dev, 0x0010, 0xE000); - else - b43_phy_write(dev, 0x0010, 0x2000); - b43_ofdmtab_write16(dev, B43_OFDMTAB_DC, 1, 0x0039); - b43_ofdmtab_write16(dev, B43_OFDMTAB_UNKNOWN_APHY, 7, 0x0040); - } -} - static void b43_wa_boards_g(struct b43_wldev *dev) { struct ssb_sprom *sprom = dev->dev->bus_sprom; @@ -518,80 +346,7 @@ void b43_wa_all(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; - if (phy->type == B43_PHYTYPE_A) { - switch (phy->rev) { - case 2: - b43_wa_papd(dev); - b43_wa_auxclipthr(dev); - b43_wa_afcdac(dev); - b43_wa_txdc_offset(dev); - b43_wa_initgains(dev); - b43_wa_divider(dev); - b43_wa_gt(dev); - b43_wa_rssi_lt(dev); - b43_wa_analog(dev); - b43_wa_dac(dev); - b43_wa_fft(dev); - b43_wa_nft(dev); - b43_wa_rt(dev); - b43_wa_nst(dev); - b43_wa_art(dev); - b43_wa_txlna_gain(dev); - b43_wa_crs_reset(dev); - b43_wa_2060txlna_gain(dev); - b43_wa_lms(dev); - break; - case 3: - b43_wa_papd(dev); - b43_wa_mixedsignal(dev); - b43_wa_rssi_lt(dev); - b43_wa_txdc_offset(dev); - b43_wa_initgains(dev); - b43_wa_dac(dev); - b43_wa_nft(dev); - b43_wa_nst(dev); - b43_wa_msst(dev); - b43_wa_analog(dev); - b43_wa_gt(dev); - b43_wa_txpuoff_rxpuon(dev); - b43_wa_txlna_gain(dev); - break; - case 5: - b43_wa_iqadc(dev); - case 6: - b43_wa_papd(dev); - b43_wa_rssi_lt(dev); - b43_wa_txdc_offset(dev); - b43_wa_initgains(dev); - b43_wa_dac(dev); - b43_wa_nft(dev); - b43_wa_nst(dev); - b43_wa_msst(dev); - b43_wa_analog(dev); - b43_wa_gt(dev); - b43_wa_txpuoff_rxpuon(dev); - b43_wa_txlna_gain(dev); - break; - case 7: - b43_wa_iqadc(dev); - b43_wa_papd(dev); - b43_wa_rssi_lt(dev); - b43_wa_txdc_offset(dev); - b43_wa_initgains(dev); - b43_wa_dac(dev); - b43_wa_nft(dev); - b43_wa_nst(dev); - b43_wa_msst(dev); - b43_wa_analog(dev); - b43_wa_gt(dev); - b43_wa_txpuoff_rxpuon(dev); - b43_wa_txlna_gain(dev); - b43_wa_rssi_adc(dev); - default: - B43_WARN_ON(1); - } - b43_wa_boards_a(dev); - } else if (phy->type == B43_PHYTYPE_G) { + if (phy->type == B43_PHYTYPE_G) { switch (phy->rev) { case 1://XXX review rev1 b43_wa_crs_ed(dev); diff --git a/drivers/net/wireless/broadcom/b43/xmit.c b/drivers/net/wireless/broadcom/b43/xmit.c index f6201264de49..b068d5aeee24 100644 --- a/drivers/net/wireless/broadcom/b43/xmit.c +++ b/drivers/net/wireless/broadcom/b43/xmit.c @@ -205,7 +205,7 @@ static u16 b43_generate_tx_phy_ctl1(struct b43_wldev *dev, u8 bitrate) return control; } -static u8 b43_calc_fallback_rate(u8 bitrate) +static u8 b43_calc_fallback_rate(u8 bitrate, int gmode) { switch (bitrate) { case B43_CCK_RATE_1MB: @@ -216,8 +216,15 @@ static u8 b43_calc_fallback_rate(u8 bitrate) return B43_CCK_RATE_2MB; case B43_CCK_RATE_11MB: return B43_CCK_RATE_5MB; + /* + * Don't just fallback to CCK; it may be in 5GHz operation + * and falling back to CCK won't work out very well. + */ case B43_OFDM_RATE_6MB: - return B43_CCK_RATE_5MB; + if (gmode) + return B43_CCK_RATE_5MB; + else + return B43_OFDM_RATE_6MB; case B43_OFDM_RATE_9MB: return B43_OFDM_RATE_6MB; case B43_OFDM_RATE_12MB: @@ -438,7 +445,7 @@ int b43_generate_txhdr(struct b43_wldev *dev, rts_rate = rts_cts_rate ? rts_cts_rate->hw_value : B43_CCK_RATE_1MB; rts_rate_ofdm = b43_is_ofdm_rate(rts_rate); - rts_rate_fb = b43_calc_fallback_rate(rts_rate); + rts_rate_fb = b43_calc_fallback_rate(rts_rate, phy->gmode); rts_rate_fb_ofdm = b43_is_ofdm_rate(rts_rate_fb); if (rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { @@ -642,11 +649,7 @@ static s8 b43_rssinoise_postprocess(struct b43_wldev *dev, u8 in_rssi) struct b43_phy *phy = &dev->phy; s8 ret; - if (phy->type == B43_PHYTYPE_A) { - //TODO: Incomplete specs. - ret = 0; - } else - ret = b43_rssi_postprocess(dev, in_rssi, 0, 1, 1); + ret = b43_rssi_postprocess(dev, in_rssi, 0, 1, 1); return ret; } @@ -663,7 +666,6 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr) u16 uninitialized_var(chanstat), uninitialized_var(mactime); u32 uninitialized_var(macstat); u16 chanid; - u16 phytype; int padding, rate_idx; memset(&status, 0, sizeof(status)); @@ -684,7 +686,6 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr) chanstat = le16_to_cpu(rxhdr->format_351.channel); break; } - phytype = chanstat & B43_RX_CHAN_PHYTYPE; if (unlikely(macstat & B43_RX_MAC_FCSERR)) { dev->wl->ieee_stats.dot11FCSErrorCount++; @@ -755,7 +756,6 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr) else status.signal = max(rxhdr->power0, rxhdr->power1); break; - case B43_PHYTYPE_A: case B43_PHYTYPE_B: case B43_PHYTYPE_G: case B43_PHYTYPE_LP: @@ -802,14 +802,6 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr) chanid = (chanstat & B43_RX_CHAN_ID) >> B43_RX_CHAN_ID_SHIFT; switch (chanstat & B43_RX_CHAN_PHYTYPE) { - case B43_PHYTYPE_A: - status.band = NL80211_BAND_5GHZ; - B43_WARN_ON(1); - /* FIXME: We don't really know which value the "chanid" contains. - * So the following assignment might be wrong. */ - status.freq = - ieee80211_channel_to_frequency(chanid, status.band); - break; case B43_PHYTYPE_G: status.band = NL80211_BAND_2GHZ; /* Somewhere between 478.104 and 508.1084 firmware for G-PHY diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index c7550dab6a23..c4b89d27e2e8 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -166,41 +166,45 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev) sdio_claim_irq(sdiodev->func[1], brcmf_sdiod_ib_irqhandler); sdio_claim_irq(sdiodev->func[2], brcmf_sdiod_dummy_irqhandler); sdio_release_host(sdiodev->func[1]); + sdiodev->sd_irq_requested = true; } return 0; } -int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev) +void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev) { - struct brcmfmac_sdio_pd *pdata; - brcmf_dbg(SDIO, "Entering\n"); + brcmf_dbg(SDIO, "Entering oob=%d sd=%d\n", + sdiodev->oob_irq_requested, + sdiodev->sd_irq_requested); - pdata = &sdiodev->settings->bus.sdio; - if (pdata->oob_irq_supported) { + if (sdiodev->oob_irq_requested) { + struct brcmfmac_sdio_pd *pdata; + + pdata = &sdiodev->settings->bus.sdio; sdio_claim_host(sdiodev->func[1]); brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL); brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL); sdio_release_host(sdiodev->func[1]); - if (sdiodev->oob_irq_requested) { - sdiodev->oob_irq_requested = false; - if (sdiodev->irq_wake) { - disable_irq_wake(pdata->oob_irq_nr); - sdiodev->irq_wake = false; - } - free_irq(pdata->oob_irq_nr, &sdiodev->func[1]->dev); - sdiodev->irq_en = false; + sdiodev->oob_irq_requested = false; + if (sdiodev->irq_wake) { + disable_irq_wake(pdata->oob_irq_nr); + sdiodev->irq_wake = false; } - } else { + free_irq(pdata->oob_irq_nr, &sdiodev->func[1]->dev); + sdiodev->irq_en = false; + sdiodev->oob_irq_requested = false; + } + + if (sdiodev->sd_irq_requested) { sdio_claim_host(sdiodev->func[1]); sdio_release_irq(sdiodev->func[2]); sdio_release_irq(sdiodev->func[1]); sdio_release_host(sdiodev->func[1]); + sdiodev->sd_irq_requested = false; } - - return 0; } void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev, @@ -1197,12 +1201,17 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func) brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device); brcmf_dbg(SDIO, "Function: %d\n", func->num); - if (func->num != 1) - return; - bus_if = dev_get_drvdata(&func->dev); if (bus_if) { sdiodev = bus_if->bus_priv.sdio; + + /* start by unregistering irqs */ + brcmf_sdiod_intr_unregister(sdiodev); + + if (func->num != 1) + return; + + /* only proceed with rest of cleanup if func 1 */ brcmf_sdiod_remove(sdiodev); dev_set_drvdata(&sdiodev->func[1]->dev, NULL); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 62f475e31077..264bd638a3d9 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -541,6 +541,21 @@ brcmf_cfg80211_update_proto_addr_mode(struct wireless_dev *wdev) ADDR_INDIRECT); } +static int brcmf_get_first_free_bsscfgidx(struct brcmf_pub *drvr) +{ + int bsscfgidx; + + for (bsscfgidx = 0; bsscfgidx < BRCMF_MAX_IFS; bsscfgidx++) { + /* bsscfgidx 1 is reserved for legacy P2P */ + if (bsscfgidx == 1) + continue; + if (!drvr->iflist[bsscfgidx]) + return bsscfgidx; + } + + return -ENOMEM; +} + static int brcmf_cfg80211_request_ap_if(struct brcmf_if *ifp) { struct brcmf_mbss_ssid_le mbss_ssid_le; @@ -548,7 +563,7 @@ static int brcmf_cfg80211_request_ap_if(struct brcmf_if *ifp) int err; memset(&mbss_ssid_le, 0, sizeof(mbss_ssid_le)); - bsscfgidx = brcmf_get_next_free_bsscfgidx(ifp->drvr); + bsscfgidx = brcmf_get_first_free_bsscfgidx(ifp->drvr); if (bsscfgidx < 0) return bsscfgidx; @@ -586,7 +601,7 @@ struct wireless_dev *brcmf_ap_add_vif(struct wiphy *wiphy, const char *name, brcmf_dbg(INFO, "Adding vif \"%s\"\n", name); - vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_AP, false); + vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_AP); if (IS_ERR(vif)) return (struct wireless_dev *)vif; @@ -669,20 +684,24 @@ static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy, return ERR_PTR(-EOPNOTSUPP); case NL80211_IFTYPE_AP: wdev = brcmf_ap_add_vif(wiphy, name, flags, params); - if (!IS_ERR(wdev)) - brcmf_cfg80211_update_proto_addr_mode(wdev); - return wdev; + break; case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_P2P_DEVICE: wdev = brcmf_p2p_add_vif(wiphy, name, name_assign_type, type, flags, params); - if (!IS_ERR(wdev)) - brcmf_cfg80211_update_proto_addr_mode(wdev); - return wdev; + break; case NL80211_IFTYPE_UNSPECIFIED: default: return ERR_PTR(-EINVAL); } + + if (IS_ERR(wdev)) + brcmf_err("add iface %s type %d failed: err=%d\n", + name, type, (int)PTR_ERR(wdev)); + else + brcmf_cfg80211_update_proto_addr_mode(wdev); + + return wdev; } static void brcmf_scan_config_mpc(struct brcmf_if *ifp, int mpc) @@ -2750,7 +2769,7 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg, if (!bi->ctl_ch) { ch.chspec = le16_to_cpu(bi->chanspec); cfg->d11inf.decchspec(&ch); - bi->ctl_ch = ch.chnum; + bi->ctl_ch = ch.control_ch_num; } channel = bi->ctl_ch; @@ -2868,7 +2887,7 @@ static s32 brcmf_inform_ibss(struct brcmf_cfg80211_info *cfg, else band = wiphy->bands[NL80211_BAND_5GHZ]; - freq = ieee80211_channel_to_frequency(ch.chnum, band->band); + freq = ieee80211_channel_to_frequency(ch.control_ch_num, band->band); cfg->channel = freq; notify_channel = ieee80211_get_channel(wiphy, freq); @@ -2878,7 +2897,7 @@ static s32 brcmf_inform_ibss(struct brcmf_cfg80211_info *cfg, notify_ielen = le32_to_cpu(bi->ie_length); notify_signal = (s16)le16_to_cpu(bi->RSSI) * 100; - brcmf_dbg(CONN, "channel: %d(%d)\n", ch.chnum, freq); + brcmf_dbg(CONN, "channel: %d(%d)\n", ch.control_ch_num, freq); brcmf_dbg(CONN, "capability: %X\n", notify_capability); brcmf_dbg(CONN, "beacon interval: %d\n", notify_interval); brcmf_dbg(CONN, "signal: %d\n", notify_signal); @@ -4439,7 +4458,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, struct brcmf_join_params join_params; enum nl80211_iftype dev_role; struct brcmf_fil_bss_enable_le bss_enable; - u16 chanspec; + u16 chanspec = chandef_to_chanspec(&cfg->d11inf, &settings->chandef); bool mbss; int is_11d; @@ -4515,16 +4534,8 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, brcmf_config_ap_mgmt_ie(ifp->vif, &settings->beacon); + /* Parameters shared by all radio interfaces */ if (!mbss) { - chanspec = chandef_to_chanspec(&cfg->d11inf, - &settings->chandef); - err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec); - if (err < 0) { - brcmf_err("Set Channel failed: chspec=%d, %d\n", - chanspec, err); - goto exit; - } - if (is_11d != ifp->vif->is_11d) { err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_REGULATORY, is_11d); @@ -4572,6 +4583,8 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, err = -EINVAL; goto exit; } + + /* Interface specific setup */ if (dev_role == NL80211_IFTYPE_AP) { if ((brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS)) && (!mbss)) brcmf_fil_iovar_int_set(ifp, "mbss", 1); @@ -4581,6 +4594,17 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, brcmf_err("setting AP mode failed %d\n", err); goto exit; } + if (!mbss) { + /* Firmware 10.x requires setting channel after enabling + * AP and before bringing interface up. + */ + err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec); + if (err < 0) { + brcmf_err("Set Channel failed: chspec=%d, %d\n", + chanspec, err); + goto exit; + } + } err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1); if (err < 0) { brcmf_err("BRCMF_C_UP error (%d)\n", err); @@ -4602,7 +4626,13 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, goto exit; } brcmf_dbg(TRACE, "AP mode configuration complete\n"); - } else { + } else if (dev_role == NL80211_IFTYPE_P2P_GO) { + err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec); + if (err < 0) { + brcmf_err("Set Channel failed: chspec=%d, %d\n", + chanspec, err); + goto exit; + } err = brcmf_fil_bsscfg_data_set(ifp, "ssid", &ssid_le, sizeof(ssid_le)); if (err < 0) { @@ -4619,7 +4649,10 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, } brcmf_dbg(TRACE, "GO mode configuration complete\n"); + } else { + WARN_ON(1); } + set_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state); brcmf_net_setcarrier(ifp, true); @@ -4908,6 +4941,68 @@ exit: return err; } +static int brcmf_cfg80211_get_channel(struct wiphy *wiphy, + struct wireless_dev *wdev, + struct cfg80211_chan_def *chandef) +{ + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct net_device *ndev = wdev->netdev; + struct brcmf_if *ifp; + struct brcmu_chan ch; + enum nl80211_band band = 0; + enum nl80211_chan_width width = 0; + u32 chanspec; + int freq, err; + + if (!ndev) + return -ENODEV; + ifp = netdev_priv(ndev); + + err = brcmf_fil_iovar_int_get(ifp, "chanspec", &chanspec); + if (err) { + brcmf_err("chanspec failed (%d)\n", err); + return err; + } + + ch.chspec = chanspec; + cfg->d11inf.decchspec(&ch); + + switch (ch.band) { + case BRCMU_CHAN_BAND_2G: + band = NL80211_BAND_2GHZ; + break; + case BRCMU_CHAN_BAND_5G: + band = NL80211_BAND_5GHZ; + break; + } + + switch (ch.bw) { + case BRCMU_CHAN_BW_80: + width = NL80211_CHAN_WIDTH_80; + break; + case BRCMU_CHAN_BW_40: + width = NL80211_CHAN_WIDTH_40; + break; + case BRCMU_CHAN_BW_20: + width = NL80211_CHAN_WIDTH_20; + break; + case BRCMU_CHAN_BW_80P80: + width = NL80211_CHAN_WIDTH_80P80; + break; + case BRCMU_CHAN_BW_160: + width = NL80211_CHAN_WIDTH_160; + break; + } + + freq = ieee80211_channel_to_frequency(ch.control_ch_num, band); + chandef->chan = ieee80211_get_channel(wiphy, freq); + chandef->width = width; + chandef->center_freq1 = ieee80211_channel_to_frequency(ch.chnum, band); + chandef->center_freq2 = 0; + + return 0; +} + static int brcmf_cfg80211_crit_proto_start(struct wiphy *wiphy, struct wireless_dev *wdev, enum nl80211_crit_proto_id proto, @@ -5070,6 +5165,7 @@ static struct cfg80211_ops brcmf_cfg80211_ops = { .mgmt_tx = brcmf_cfg80211_mgmt_tx, .remain_on_channel = brcmf_p2p_remain_on_channel, .cancel_remain_on_channel = brcmf_cfg80211_cancel_remain_on_channel, + .get_channel = brcmf_cfg80211_get_channel, .start_p2p_device = brcmf_p2p_start_device, .stop_p2p_device = brcmf_p2p_stop_device, .crit_proto_start = brcmf_cfg80211_crit_proto_start, @@ -5078,8 +5174,7 @@ static struct cfg80211_ops brcmf_cfg80211_ops = { }; struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg, - enum nl80211_iftype type, - bool pm_block) + enum nl80211_iftype type) { struct brcmf_cfg80211_vif *vif_walk; struct brcmf_cfg80211_vif *vif; @@ -5094,8 +5189,6 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg, vif->wdev.wiphy = cfg->wiphy; vif->wdev.iftype = type; - vif->pm_block = pm_block; - brcmf_init_prof(&vif->profile); if (type == NL80211_IFTYPE_AP) { @@ -5296,7 +5389,7 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg, else band = wiphy->bands[NL80211_BAND_5GHZ]; - freq = ieee80211_channel_to_frequency(ch.chnum, band->band); + freq = ieee80211_channel_to_frequency(ch.control_ch_num, band->band); notify_channel = ieee80211_get_channel(wiphy, freq); done: @@ -5352,7 +5445,6 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg, struct net_device *ndev, const struct brcmf_event_msg *e, void *data) { - struct brcmf_if *ifp = netdev_priv(ndev); static int generation; u32 event = e->event_code; u32 reason = e->reason; @@ -5363,8 +5455,6 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg, ndev != cfg_to_ndev(cfg)) { brcmf_dbg(CONN, "AP mode link down\n"); complete(&cfg->vif_disabled); - if (ifp->vif->mbss) - brcmf_remove_interface(ifp); return 0; } @@ -5818,14 +5908,15 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, channel = band->channels; index = band->n_channels; for (j = 0; j < band->n_channels; j++) { - if (channel[j].hw_value == ch.chnum) { + if (channel[j].hw_value == ch.control_ch_num) { index = j; break; } } channel[index].center_freq = - ieee80211_channel_to_frequency(ch.chnum, band->band); - channel[index].hw_value = ch.chnum; + ieee80211_channel_to_frequency(ch.control_ch_num, + band->band); + channel[index].hw_value = ch.control_ch_num; /* assuming the chanspecs order is HT20, * HT40 upper, HT40 lower, and VHT80. @@ -5927,7 +6018,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg) if (WARN_ON(ch.bw != BRCMU_CHAN_BW_40)) continue; for (j = 0; j < band->n_channels; j++) { - if (band->channels[j].hw_value == ch.chnum) + if (band->channels[j].hw_value == ch.control_ch_num) break; } if (WARN_ON(j == band->n_channels)) @@ -6715,11 +6806,10 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, return NULL; } - ops = kzalloc(sizeof(*ops), GFP_KERNEL); + ops = kmemdup(&brcmf_cfg80211_ops, sizeof(*ops), GFP_KERNEL); if (!ops) return NULL; - memcpy(ops, &brcmf_cfg80211_ops, sizeof(*ops)); ifp = netdev_priv(ndev); #ifdef CONFIG_PM if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL_GTK)) @@ -6740,7 +6830,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, init_vif_event(&cfg->vif_event); INIT_LIST_HEAD(&cfg->vif_list); - vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_STATION, false); + vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_STATION); if (IS_ERR(vif)) goto wiphy_out; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h index 95e35bcc16ce..04bfc7e3ecde 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h @@ -20,6 +20,9 @@ /* for brcmu_d11inf */ #include <brcmu_d11.h> +#include "fwil_types.h" +#include "p2p.h" + #define WL_NUM_SCAN_MAX 10 #define WL_TLV_INFO_MAX 1024 #define WL_BSS_INFO_MAX 2048 @@ -167,7 +170,6 @@ struct vif_saved_ie { * @wdev: wireless device. * @profile: profile information. * @sme_state: SME state using enum brcmf_vif_status bits. - * @pm_block: power-management blocked. * @list: linked list. * @mgmt_rx_reg: registered rx mgmt frame types. * @mbss: Multiple BSS type, set if not first AP (not relevant for P2P). @@ -177,7 +179,6 @@ struct brcmf_cfg80211_vif { struct wireless_dev wdev; struct brcmf_cfg80211_profile profile; unsigned long sme_state; - bool pm_block; struct vif_saved_ie saved_ie; struct list_head list; u16 mgmt_rx_reg; @@ -388,8 +389,7 @@ s32 brcmf_cfg80211_down(struct net_device *ndev); enum nl80211_iftype brcmf_cfg80211_get_iftype(struct brcmf_if *ifp); struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg, - enum nl80211_iftype type, - bool pm_block); + enum nl80211_iftype type); void brcmf_free_vif(struct brcmf_cfg80211_vif *vif); s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag, diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c index d3fd6b1db1d9..05f22ff81d60 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c @@ -685,6 +685,8 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci) case BRCM_CC_43602_CHIP_ID: case BRCM_CC_4371_CHIP_ID: return 0x180000; + case BRCM_CC_43465_CHIP_ID: + case BRCM_CC_43525_CHIP_ID: case BRCM_CC_4365_CHIP_ID: case BRCM_CC_4366_CHIP_ID: return 0x200000; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index b590499f6883..faf4e46bd65b 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -516,7 +516,7 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked) /* set appropriate operations */ ndev->netdev_ops = &brcmf_netdev_ops_pri; - ndev->hard_header_len += drvr->hdrlen; + ndev->needed_headroom += drvr->hdrlen; ndev->ethtool_ops = &brcmf_ethtool_ops; drvr->rxsz = ndev->mtu + ndev->hard_header_len + @@ -753,30 +753,6 @@ void brcmf_remove_interface(struct brcmf_if *ifp) brcmf_del_if(ifp->drvr, ifp->bsscfgidx); } -int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr) -{ - int ifidx; - int bsscfgidx; - bool available; - int highest; - - available = false; - bsscfgidx = 2; - highest = 2; - for (ifidx = 0; ifidx < BRCMF_MAX_IFS; ifidx++) { - if (drvr->iflist[ifidx]) { - if (drvr->iflist[ifidx]->bsscfgidx == bsscfgidx) - bsscfgidx = highest + 1; - else if (drvr->iflist[ifidx]->bsscfgidx > highest) - highest = drvr->iflist[ifidx]->bsscfgidx; - } else { - available = true; - } - } - - return available ? bsscfgidx : -ENOMEM; -} - #ifdef CONFIG_INET #define ARPOL_MAX_ENTRIES 8 static int brcmf_inetaddr_changed(struct notifier_block *nb, diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h index 647d3cc2a4dc..2a075c5f6f8b 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h @@ -217,7 +217,6 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked); struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bsscfgidx, s32 ifidx, bool is_p2pdev, char *name, u8 *mac_addr); void brcmf_remove_interface(struct brcmf_if *ifp); -int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr); void brcmf_txflowblock_if(struct brcmf_if *ifp, enum brcmf_netif_stop_reason reason, bool state); void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c index 5b30922b67ec..cd221ab55062 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c @@ -2101,7 +2101,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb) brcmf_dbg(DATA, "tx proto=0x%X\n", ntohs(eh->h_proto)); /* determine the priority */ - if (!skb->priority) + if ((skb->priority == 0) || (skb->priority > 7)) skb->priority = cfg80211_classify8021d(skb, NULL); drvr->tx_multicast += !!multicast; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c index a70cda6c0592..f38a82133540 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c @@ -1246,7 +1246,7 @@ bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg, if (!bi->ctl_ch) { ch.chspec = le16_to_cpu(bi->chanspec); cfg->d11inf.decchspec(&ch); - bi->ctl_ch = ch.chnum; + bi->ctl_ch = ch.control_ch_num; } afx_hdl->peer_chan = bi->ctl_ch; brcmf_dbg(TRACE, "ACTION FRAME SCAN : Peer %pM found, channel : %d\n", @@ -1385,7 +1385,7 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp, if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status) && (ether_addr_equal(afx_hdl->tx_dst_addr, e->addr))) { - afx_hdl->peer_chan = ch.chnum; + afx_hdl->peer_chan = ch.control_ch_num; brcmf_dbg(INFO, "GON request: Peer found, channel=%d\n", afx_hdl->peer_chan); complete(&afx_hdl->act_frm_scan); @@ -1428,7 +1428,7 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp, memcpy(&mgmt_frame->u, frame, mgmt_frame_len); mgmt_frame_len += offsetof(struct ieee80211_mgmt, u); - freq = ieee80211_channel_to_frequency(ch.chnum, + freq = ieee80211_channel_to_frequency(ch.control_ch_num, ch.band == BRCMU_CHAN_BAND_2G ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ); @@ -1873,7 +1873,7 @@ s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp, if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status) && (ether_addr_equal(afx_hdl->tx_dst_addr, e->addr))) { - afx_hdl->peer_chan = ch.chnum; + afx_hdl->peer_chan = ch.control_ch_num; brcmf_dbg(INFO, "PROBE REQUEST: Peer found, channel=%d\n", afx_hdl->peer_chan); complete(&afx_hdl->act_frm_scan); @@ -1898,7 +1898,7 @@ s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp, mgmt_frame = (u8 *)(rxframe + 1); mgmt_frame_len = e->datalen - sizeof(*rxframe); - freq = ieee80211_channel_to_frequency(ch.chnum, + freq = ieee80211_channel_to_frequency(ch.control_ch_num, ch.band == BRCMU_CHAN_BAND_2G ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ); @@ -2030,8 +2030,6 @@ static int brcmf_p2p_request_p2p_if(struct brcmf_p2p_info *p2p, err = brcmf_fil_iovar_data_set(ifp, "p2p_ifadd", &if_request, sizeof(if_request)); - if (err) - return err; return err; } @@ -2076,8 +2074,7 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p, if (p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif) return ERR_PTR(-ENOSPC); - p2p_vif = brcmf_alloc_vif(p2p->cfg, NL80211_IFTYPE_P2P_DEVICE, - false); + p2p_vif = brcmf_alloc_vif(p2p->cfg, NL80211_IFTYPE_P2P_DEVICE); if (IS_ERR(p2p_vif)) { brcmf_err("could not create discovery vif\n"); return (struct wireless_dev *)p2p_vif; @@ -2177,7 +2174,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name, return ERR_PTR(-EOPNOTSUPP); } - vif = brcmf_alloc_vif(cfg, type, false); + vif = brcmf_alloc_vif(cfg, type); if (IS_ERR(vif)) return (struct wireless_dev *)vif; brcmf_cfg80211_arm_vif_event(cfg, vif); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 0af8db82da0c..3deba90c7eb5 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -54,21 +54,25 @@ BRCMF_FW_NVRAM_DEF(43570, "brcmfmac43570-pcie.bin", "brcmfmac43570-pcie.txt"); BRCMF_FW_NVRAM_DEF(4358, "brcmfmac4358-pcie.bin", "brcmfmac4358-pcie.txt"); BRCMF_FW_NVRAM_DEF(4359, "brcmfmac4359-pcie.bin", "brcmfmac4359-pcie.txt"); BRCMF_FW_NVRAM_DEF(4365B, "brcmfmac4365b-pcie.bin", "brcmfmac4365b-pcie.txt"); +BRCMF_FW_NVRAM_DEF(4365C, "brcmfmac4365c-pcie.bin", "brcmfmac4365c-pcie.txt"); BRCMF_FW_NVRAM_DEF(4366B, "brcmfmac4366b-pcie.bin", "brcmfmac4366b-pcie.txt"); BRCMF_FW_NVRAM_DEF(4366C, "brcmfmac4366c-pcie.bin", "brcmfmac4366c-pcie.txt"); BRCMF_FW_NVRAM_DEF(4371, "brcmfmac4371-pcie.bin", "brcmfmac4371-pcie.txt"); static struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = { BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43602_CHIP_ID, 0xFFFFFFFF, 43602), + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43465_CHIP_ID, 0xFFFFFFF0, 4366C), BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4350_CHIP_ID, 0x000000FF, 4350C), BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4350_CHIP_ID, 0xFFFFFF00, 4350), + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43525_CHIP_ID, 0xFFFFFFF0, 4365C), BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356), BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43567_CHIP_ID, 0xFFFFFFFF, 43570), BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43569_CHIP_ID, 0xFFFFFFFF, 43570), BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43570_CHIP_ID, 0xFFFFFFFF, 43570), BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4358_CHIP_ID, 0xFFFFFFFF, 4358), BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFFF, 4365B), + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4365_CHIP_ID, 0x0000000F, 4365B), + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFF0, 4365C), BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4366_CHIP_ID, 0x0000000F, 4366B), BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4366_CHIP_ID, 0xFFFFFFF0, 4366C), BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371), diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 67e69bff2545..5fb8b91b9326 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -1384,8 +1384,7 @@ static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header, return -ENXIO; } if (rd->seq_num != rx_seq) { - brcmf_err("seq %d: sequence number error, expect %d\n", - rx_seq, rd->seq_num); + brcmf_dbg(SDIO, "seq %d, expected %d\n", rx_seq, rd->seq_num); bus->sdcnt.rx_badseq++; rd->seq_num = rx_seq; } @@ -3666,7 +3665,7 @@ brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev, str_shift = 11; break; default: - brcmf_err("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n", + brcmf_dbg(INFO, "No SDIO driver strength init needed for chip %s rev %d pmurev %d\n", ci->name, ci->chiprev, ci->pmurev); break; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h index dcf0ce8cd2c1..f3da32fc6360 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h @@ -186,6 +186,7 @@ struct brcmf_sdio_dev { struct brcmf_bus *bus_if; struct brcmf_mp_device *settings; bool oob_irq_requested; + bool sd_irq_requested; bool irq_en; /* irq enable flags */ spinlock_t irq_en_lock; bool irq_wake; /* irq wake enable flags */ @@ -293,7 +294,7 @@ struct sdpcmd_regs { /* Register/deregister interrupt handler. */ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev); -int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev); +void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev); /* sdio device register access interface */ u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c index 99dac9b8a082..b3aab2fe96eb 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c @@ -27017,7 +27017,7 @@ wlc_phy_rxcal_gainctrl_nphy_rev5(struct brcms_phy *pi, u8 rx_core, tx_core = 1 - rx_core; num_samps = 1024; - desired_log2_pwr = (cal_type == 0) ? 13 : 13; + desired_log2_pwr = 13; wlc_phy_rx_iq_coeffs_nphy(pi, 0, &save_comp); zero_comp.a0 = zero_comp.b0 = zero_comp.a1 = zero_comp.b1 = 0x0; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c index 2b2522bdd8eb..d8b79cb72b58 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c @@ -107,6 +107,7 @@ static void brcmu_d11n_decchspec(struct brcmu_chan *ch) u16 val; ch->chnum = (u8)(ch->chspec & BRCMU_CHSPEC_CH_MASK); + ch->control_ch_num = ch->chnum; switch (ch->chspec & BRCMU_CHSPEC_D11N_BW_MASK) { case BRCMU_CHSPEC_D11N_BW_20: @@ -118,10 +119,10 @@ static void brcmu_d11n_decchspec(struct brcmu_chan *ch) val = ch->chspec & BRCMU_CHSPEC_D11N_SB_MASK; if (val == BRCMU_CHSPEC_D11N_SB_L) { ch->sb = BRCMU_CHAN_SB_L; - ch->chnum -= CH_10MHZ_APART; + ch->control_ch_num -= CH_10MHZ_APART; } else { ch->sb = BRCMU_CHAN_SB_U; - ch->chnum += CH_10MHZ_APART; + ch->control_ch_num += CH_10MHZ_APART; } break; default: @@ -147,6 +148,7 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch) u16 val; ch->chnum = (u8)(ch->chspec & BRCMU_CHSPEC_CH_MASK); + ch->control_ch_num = ch->chnum; switch (ch->chspec & BRCMU_CHSPEC_D11AC_BW_MASK) { case BRCMU_CHSPEC_D11AC_BW_20: @@ -158,10 +160,10 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch) val = ch->chspec & BRCMU_CHSPEC_D11AC_SB_MASK; if (val == BRCMU_CHSPEC_D11AC_SB_L) { ch->sb = BRCMU_CHAN_SB_L; - ch->chnum -= CH_10MHZ_APART; + ch->control_ch_num -= CH_10MHZ_APART; } else if (val == BRCMU_CHSPEC_D11AC_SB_U) { ch->sb = BRCMU_CHAN_SB_U; - ch->chnum += CH_10MHZ_APART; + ch->control_ch_num += CH_10MHZ_APART; } else { WARN_ON_ONCE(1); } @@ -172,16 +174,16 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch) BRCMU_CHSPEC_D11AC_SB_SHIFT); switch (ch->sb) { case BRCMU_CHAN_SB_LL: - ch->chnum -= CH_30MHZ_APART; + ch->control_ch_num -= CH_30MHZ_APART; break; case BRCMU_CHAN_SB_LU: - ch->chnum -= CH_10MHZ_APART; + ch->control_ch_num -= CH_10MHZ_APART; break; case BRCMU_CHAN_SB_UL: - ch->chnum += CH_10MHZ_APART; + ch->control_ch_num += CH_10MHZ_APART; break; case BRCMU_CHAN_SB_UU: - ch->chnum += CH_30MHZ_APART; + ch->control_ch_num += CH_30MHZ_APART; break; default: WARN_ON_ONCE(1); diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h index 699f2c2782ee..3cc42bef6245 100644 --- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h +++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h @@ -40,7 +40,9 @@ #define BRCM_CC_4339_CHIP_ID 0x4339 #define BRCM_CC_43430_CHIP_ID 43430 #define BRCM_CC_4345_CHIP_ID 0x4345 +#define BRCM_CC_43465_CHIP_ID 43465 #define BRCM_CC_4350_CHIP_ID 0x4350 +#define BRCM_CC_43525_CHIP_ID 43525 #define BRCM_CC_4354_CHIP_ID 0x4354 #define BRCM_CC_4356_CHIP_ID 0x4356 #define BRCM_CC_43566_CHIP_ID 43566 diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_d11.h b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_d11.h index f9745ea8b3e0..8b8b2ecb3199 100644 --- a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_d11.h +++ b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_d11.h @@ -125,14 +125,36 @@ enum brcmu_chan_sb { BRCMU_CHAN_SB_UU = BRCMU_CHAN_SB_LUU, }; +/** + * struct brcmu_chan - stores channel formats + * + * This structure can be used with functions translating chanspec into generic + * channel info and the other way. + * + * @chspec: firmware specific format + * @chnum: center channel number + * @control_ch_num: control channel number + * @band: frequency band + * @bw: channel width + * @sb: control sideband (location of control channel against the center one) + */ struct brcmu_chan { u16 chspec; u8 chnum; + u8 control_ch_num; u8 band; enum brcmu_chan_bw bw; enum brcmu_chan_sb sb; }; +/** + * struct brcmu_d11inf - provides functions translating channel format + * + * @io_type: determines version of channel format used by firmware + * @encchspec: encodes channel info into a chanspec, requires center channel + * number, ignores control one + * @decchspec: decodes chanspec into generic info + */ struct brcmu_d11inf { u8 io_type; diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c index 7bcedbb53d94..209dc9988455 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945.c +++ b/drivers/net/wireless/intel/iwlegacy/3945.c @@ -1019,12 +1019,13 @@ il3945_hw_txq_ctx_free(struct il_priv *il) int txq_id; /* Tx queues */ - if (il->txq) + if (il->txq) { for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) if (txq_id == IL39_CMD_QUEUE_NUM) il_cmd_queue_free(il); else il_tx_queue_free(il, txq_id); + } /* free tx queue structure */ il_free_txq_mem(il); diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c index 13eae9ff8c35..47f4a14c84fe 100644 --- a/drivers/net/wireless/marvell/libertas/if_sdio.c +++ b/drivers/net/wireless/marvell/libertas/if_sdio.c @@ -1228,7 +1228,7 @@ static int if_sdio_probe(struct sdio_func *func, } spin_lock_init(&card->lock); - card->workqueue = create_workqueue("libertas_sdio"); + card->workqueue = alloc_workqueue("libertas_sdio", WQ_MEM_RECLAIM, 0); INIT_WORK(&card->packet_worker, if_sdio_host_to_card_worker); init_waitqueue_head(&card->pwron_waitq); @@ -1326,7 +1326,6 @@ static void if_sdio_remove(struct sdio_func *func) lbs_stop_card(card->priv); lbs_remove_card(card->priv); - flush_workqueue(card->workqueue); destroy_workqueue(card->workqueue); while (card->packets) { diff --git a/drivers/net/wireless/marvell/libertas/if_spi.c b/drivers/net/wireless/marvell/libertas/if_spi.c index 82c0796377aa..c3a53cd6988e 100644 --- a/drivers/net/wireless/marvell/libertas/if_spi.c +++ b/drivers/net/wireless/marvell/libertas/if_spi.c @@ -1180,7 +1180,7 @@ static int if_spi_probe(struct spi_device *spi) priv->fw_ready = 1; /* Initialize interrupt handling stuff. */ - card->workqueue = create_workqueue("libertas_spi"); + card->workqueue = alloc_workqueue("libertas_spi", WQ_MEM_RECLAIM, 0); INIT_WORK(&card->packet_work, if_spi_host_to_card_worker); INIT_WORK(&card->resume_work, if_spi_resume_worker); @@ -1208,7 +1208,6 @@ static int if_spi_probe(struct spi_device *spi) release_irq: free_irq(spi->irq, card); terminate_workqueue: - flush_workqueue(card->workqueue); destroy_workqueue(card->workqueue); lbs_remove_card(priv); /* will call free_netdev */ free_card: @@ -1235,7 +1234,6 @@ static int libertas_spi_remove(struct spi_device *spi) lbs_remove_card(priv); /* will call free_netdev */ free_irq(spi->irq, card); - flush_workqueue(card->workqueue); destroy_workqueue(card->workqueue); if (card->pdata->teardown) card->pdata->teardown(spi); diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c index 0bf8916a02cf..75bf0c8a2f6f 100644 --- a/drivers/net/wireless/marvell/libertas_tf/main.c +++ b/drivers/net/wireless/marvell/libertas_tf/main.c @@ -16,7 +16,6 @@ #include <linux/module.h> #include "libertas_tf.h" -#define DRIVER_RELEASE_VERSION "004.p0" /* thinfirm version: 5.132.X.pX */ #define LBTF_FW_VER_MIN 0x05840300 #define LBTF_FW_VER_MAX 0x0584ffff @@ -27,12 +26,6 @@ unsigned int lbtf_debug; EXPORT_SYMBOL_GPL(lbtf_debug); module_param_named(libertas_tf_debug, lbtf_debug, int, 0644); -static const char lbtf_driver_version[] = "THINFIRM-USB8388-" DRIVER_RELEASE_VERSION -#ifdef DEBUG - "-dbg" -#endif - ""; - struct workqueue_struct *lbtf_wq; static const struct ieee80211_channel lbtf_channels[] = { diff --git a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c index 1efef3b8273d..dc49c3de1f25 100644 --- a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c +++ b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c @@ -184,7 +184,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, tx_info_src = MWIFIEX_SKB_TXCB(skb_src); skb_aggr = mwifiex_alloc_dma_align_buf(adapter->tx_buf_size, - GFP_ATOMIC | GFP_DMA); + GFP_ATOMIC); if (!skb_aggr) { spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c index 78c532f0d286..a6d86d4ccd22 100644 --- a/drivers/net/wireless/marvell/mwifiex/init.c +++ b/drivers/net/wireless/marvell/mwifiex/init.c @@ -788,3 +788,4 @@ poll_fw: return ret; } +EXPORT_SYMBOL_GPL(mwifiex_dnld_fw); diff --git a/drivers/net/wireless/marvell/mwifiex/join.c b/drivers/net/wireless/marvell/mwifiex/join.c index 62211fca91b7..a4b773d102b3 100644 --- a/drivers/net/wireless/marvell/mwifiex/join.c +++ b/drivers/net/wireless/marvell/mwifiex/join.c @@ -1281,7 +1281,7 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv, if (result) { mwifiex_dbg(priv->adapter, ERROR, "ADHOC_RESP: failed\n"); if (priv->media_connected) - mwifiex_reset_connect_state(priv, result); + mwifiex_reset_connect_state(priv, result, true); memset(&priv->curr_bss_params.bss_descriptor, 0x00, sizeof(struct mwifiex_bssdescriptor)); diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 8b67a552a690..0e280f879b58 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -526,10 +526,12 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) fw.fw_buf = (u8 *) adapter->firmware->data; fw.fw_len = adapter->firmware->size; - if (adapter->if_ops.dnld_fw) + if (adapter->if_ops.dnld_fw) { ret = adapter->if_ops.dnld_fw(adapter, &fw); - else + } else { ret = mwifiex_dnld_fw(adapter, &fw); + } + if (ret == -1) goto err_dnld_fw; diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 0207af00be42..f0cd055c0b2c 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -1128,7 +1128,8 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv, struct mwifiex_bssdescriptor *bss_desc); int mwifiex_ret_802_11_associate(struct mwifiex_private *priv, struct host_cmd_ds_command *resp); -void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason); +void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason, + bool from_ap); u8 mwifiex_band_to_radio_type(u8 band); int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac); void mwifiex_deauthenticate_all(struct mwifiex_adapter *adapter); diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 0c7937eb6b77..1b1e266ce00f 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -507,7 +507,7 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter) for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) { /* Allocate skb here so that firmware can DMA data from it */ skb = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE, - GFP_KERNEL | GFP_DMA); + GFP_KERNEL); if (!skb) { mwifiex_dbg(adapter, ERROR, "Unable to allocate skb for RX ring.\n"); @@ -1319,7 +1319,7 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter) } skb_tmp = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE, - GFP_KERNEL | GFP_DMA); + GFP_KERNEL); if (!skb_tmp) { mwifiex_dbg(adapter, ERROR, "Unable to allocate skb.\n"); @@ -2804,7 +2804,7 @@ static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter) } /* - * This function get firmare name for downloading by revision id + * This function gets the firmware name for downloading by revision id * * Read revision id register to get revision id */ @@ -2901,10 +2901,11 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg; - struct pci_dev *pdev = card->dev; + struct pci_dev *pdev; int i; if (card) { + pdev = card->dev; if (card->msix_enable) { for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++) synchronize_irq(card->msix_entries[i].vector); diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index bdc51ffd43ec..d3e1561ca075 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -102,10 +102,9 @@ static int mwifiex_sdio_probe_of(struct device *dev, struct sdio_mmc_card *card) struct mwifiex_plt_wake_cfg *cfg; int ret; - if (!dev->of_node || - !of_match_node(mwifiex_sdio_of_match_table, dev->of_node)) { - dev_err(dev, "sdio platform data not available\n"); - return -1; + if (!of_match_node(mwifiex_sdio_of_match_table, dev->of_node)) { + dev_err(dev, "required compatible string missing\n"); + return -EINVAL; } card->plt_of_node = dev->of_node; @@ -115,7 +114,7 @@ static int mwifiex_sdio_probe_of(struct device *dev, struct sdio_mmc_card *card) if (cfg && card->plt_of_node) { cfg->irq_wifi = irq_of_parse_and_map(card->plt_of_node, 0); if (!cfg->irq_wifi) { - dev_err(dev, + dev_dbg(dev, "fail to parse irq_wifi from device tree\n"); } else { ret = devm_request_irq(dev, cfg->irq_wifi, @@ -183,24 +182,35 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) sdio_release_host(func); if (ret) { - pr_err("%s: failed to enable function\n", __func__); - kfree(card); - return -EIO; + dev_err(&func->dev, "failed to enable function\n"); + goto err_free; } /* device tree node parsing and platform specific configuration*/ - mwifiex_sdio_probe_of(&func->dev, card); - - if (mwifiex_add_card(card, &add_remove_card_sem, &sdio_ops, - MWIFIEX_SDIO)) { - pr_err("%s: add card failed\n", __func__); - kfree(card); - sdio_claim_host(func); - ret = sdio_disable_func(func); - sdio_release_host(func); - ret = -1; + if (func->dev.of_node) { + ret = mwifiex_sdio_probe_of(&func->dev, card); + if (ret) { + dev_err(&func->dev, "SDIO dt node parse failed\n"); + goto err_disable; + } + } + + ret = mwifiex_add_card(card, &add_remove_card_sem, &sdio_ops, + MWIFIEX_SDIO); + if (ret) { + dev_err(&func->dev, "add card failed\n"); + goto err_disable; } + return 0; + +err_disable: + sdio_claim_host(func); + sdio_disable_func(func); + sdio_release_host(func); +err_free: + kfree(card); + return ret; } @@ -544,6 +554,19 @@ static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter) return mwifiex_write_reg(adapter, CONFIGURATION_REG, 0); } +static int mwifiex_sdio_dnld_fw(struct mwifiex_adapter *adapter, + struct mwifiex_fw_image *fw) +{ + struct sdio_mmc_card *card = adapter->card; + int ret; + + sdio_claim_host(card->func); + ret = mwifiex_dnld_fw(adapter, fw); + sdio_release_host(card->func); + + return ret; +} + /* * This function is used to initialize IO ports for the * chipsets supporting SDIO new mode eg SD8897. @@ -1492,7 +1515,7 @@ rx_curr_single: mwifiex_dbg(adapter, INFO, "info: RX: port: %d, rx_len: %d\n", port, rx_len); - skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL | GFP_DMA); + skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL); if (!skb) { mwifiex_dbg(adapter, ERROR, "single skb allocated fail,\t" @@ -1597,7 +1620,7 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE); mwifiex_dbg(adapter, INFO, "info: rx_len = %d\n", rx_len); - skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL | GFP_DMA); + skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL); if (!skb) return -1; @@ -2732,6 +2755,7 @@ static struct mwifiex_if_ops sdio_ops = { .cleanup_mpa_buf = mwifiex_cleanup_mpa_buf, .cmdrsp_complete = mwifiex_sdio_cmdrsp_complete, .event_complete = mwifiex_sdio_event_complete, + .dnld_fw = mwifiex_sdio_dnld_fw, .card_reset = mwifiex_sdio_card_reset, .reg_dump = mwifiex_sdio_reg_dump, .device_dump = mwifiex_sdio_device_dump, diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c index d18c7979d723..bcfd4b743145 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c @@ -553,7 +553,8 @@ static int mwifiex_ret_802_11_deauthenticate(struct mwifiex_private *priv, if (!memcmp(resp->params.deauth.mac_addr, &priv->curr_bss_params.bss_descriptor.mac_address, sizeof(resp->params.deauth.mac_addr))) - mwifiex_reset_connect_state(priv, WLAN_REASON_DEAUTH_LEAVING); + mwifiex_reset_connect_state(priv, WLAN_REASON_DEAUTH_LEAVING, + false); return 0; } @@ -566,7 +567,7 @@ static int mwifiex_ret_802_11_deauthenticate(struct mwifiex_private *priv, static int mwifiex_ret_802_11_ad_hoc_stop(struct mwifiex_private *priv, struct host_cmd_ds_command *resp) { - mwifiex_reset_connect_state(priv, WLAN_REASON_DEAUTH_LEAVING); + mwifiex_reset_connect_state(priv, WLAN_REASON_DEAUTH_LEAVING, false); return 0; } diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c index 0104108b4ea2..0cefd40b2762 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_event.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c @@ -40,8 +40,8 @@ * - Erases current SSID and BSSID information * - Sends a disconnect event to upper layers/applications. */ -void -mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code) +void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code, + bool from_ap) { struct mwifiex_adapter *adapter = priv->adapter; @@ -140,7 +140,7 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code) if (priv->bss_mode == NL80211_IFTYPE_STATION || priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) { cfg80211_disconnected(priv->netdev, reason_code, NULL, 0, - false, GFP_KERNEL); + !from_ap, GFP_KERNEL); } eth_zero_addr(priv->cfg_bssid); @@ -574,7 +574,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) if (priv->media_connected) { reason_code = le16_to_cpu(*(__le16 *)adapter->event_body); - mwifiex_reset_connect_state(priv, reason_code); + mwifiex_reset_connect_state(priv, reason_code, true); } break; @@ -589,7 +589,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) if (priv->media_connected) { reason_code = le16_to_cpu(*(__le16 *)adapter->event_body); - mwifiex_reset_connect_state(priv, reason_code); + mwifiex_reset_connect_state(priv, reason_code, true); } break; @@ -599,7 +599,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) if (priv->media_connected) { reason_code = le16_to_cpu(*(__le16 *)adapter->event_body); - mwifiex_reset_connect_state(priv, reason_code); + mwifiex_reset_connect_state(priv, reason_code, true); } break; diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c index 666e91af59d7..bf5660eb27d3 100644 --- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c @@ -272,7 +272,7 @@ int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv, int mwifiex_uap_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb) { - struct mwifiex_adapter *adapter = adapter; + struct mwifiex_adapter *adapter = priv->adapter; struct mwifiex_sta_node *src_node; struct ethhdr *p_ethhdr; struct sk_buff *skb_uap; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c index 8ee83b093c0d..e26a233684bb 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c @@ -1839,20 +1839,22 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw) u8 hwinfo[HWSET_MAX_SIZE]; u16 eeprom_id; - if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) { + switch (rtlefuse->epromtype) { + case EEPROM_BOOT_EFUSE: rtl_efuse_shadow_map_update(hw); + break; - memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], - HWSET_MAX_SIZE); - } else if (rtlefuse->epromtype == EEPROM_93C46) { + case EEPROM_93C46: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "RTL819X Not boot from eeprom, check it !!"); return; - } else { + + default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "boot from neither eeprom nor efuse, check it !!"); return; } + memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], HWSET_MAX_SIZE); RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP\n", hwinfo, HWSET_MAX_SIZE); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c index 04eb5c3f8464..58b7ac6899ef 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c @@ -1680,21 +1680,28 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct device *dev = &rtl_pcipriv(hw)->dev.pdev->dev; u16 i, usvalue; u8 hwinfo[HWSET_MAX_SIZE]; u16 eeprom_id; - if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) { + switch (rtlefuse->epromtype) { + case EEPROM_BOOT_EFUSE: rtl_efuse_shadow_map_update(hw); + break; - memcpy((void *)hwinfo, - (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0], - HWSET_MAX_SIZE); - } else if (rtlefuse->epromtype == EEPROM_93C46) { + case EEPROM_93C46: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "RTL819X Not boot from eeprom, check it !!"); + return; + + default: + dev_warn(dev, "no efuse data\n"); + return; } + memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], HWSET_MAX_SIZE); + RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP", hwinfo, HWSET_MAX_SIZE); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c index 34ce06441d1b..ae1129f916d5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c @@ -351,15 +351,21 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw) u8 hwinfo[HWSET_MAX_SIZE] = {0}; u16 eeprom_id; - if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) { + switch (rtlefuse->epromtype) { + case EEPROM_BOOT_EFUSE: rtl_efuse_shadow_map_update(hw); - memcpy((void *)hwinfo, - (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0], - HWSET_MAX_SIZE); - } else if (rtlefuse->epromtype == EEPROM_93C46) { + break; + + case EEPROM_93C46: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "RTL819X Not boot from eeprom, check it !!\n"); + return; + + default: + pr_warn("rtl92cu: no efuse data\n\n"); + return; } + memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], HWSET_MAX_SIZE); RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, "MAP", hwinfo, HWSET_MAX_SIZE); eeprom_id = le16_to_cpu(*((__le16 *)&hwinfo[0])); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c index f49b60d31450..8618c322a3f8 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c @@ -1744,23 +1744,29 @@ static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct device *dev = &rtl_pcipriv(hw)->dev.pdev->dev; u16 i, usvalue; u8 hwinfo[HWSET_MAX_SIZE]; u16 eeprom_id; unsigned long flags; - if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) { + switch (rtlefuse->epromtype) { + case EEPROM_BOOT_EFUSE: spin_lock_irqsave(&globalmutex_for_power_and_efuse, flags); rtl_efuse_shadow_map_update(hw); _rtl92de_efuse_update_chip_version(hw); spin_unlock_irqrestore(&globalmutex_for_power_and_efuse, flags); - memcpy((void *)hwinfo, (void *)&rtlefuse->efuse_map - [EFUSE_INIT_MAP][0], - HWSET_MAX_SIZE); - } else if (rtlefuse->epromtype == EEPROM_93C46) { + break; + case EEPROM_93C46: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "RTL819X Not boot from eeprom, check it !!\n"); + return; + default: + dev_warn(dev, "no efuse data\n"); + return; } + + memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], HWSET_MAX_SIZE); RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP", hwinfo, HWSET_MAX_SIZE); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c index 9fd3f1b6e4a8..28c260dd11ea 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c @@ -2102,20 +2102,22 @@ static void _rtl92ee_read_adapter_info(struct ieee80211_hw *hw) u8 hwinfo[HWSET_MAX_SIZE]; u16 eeprom_id; - if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) { + switch (rtlefuse->epromtype) { + case EEPROM_BOOT_EFUSE: rtl_efuse_shadow_map_update(hw); + break; - memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], - HWSET_MAX_SIZE); - } else if (rtlefuse->epromtype == EEPROM_93C46) { + case EEPROM_93C46: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "RTL819X Not boot from eeprom, check it !!"); return; - } else { + + default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "boot from neither eeprom nor efuse, check it !!"); return; } + memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], HWSET_MAX_SIZE); RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP\n", hwinfo, HWSET_MAX_SIZE); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c index 018340aedf09..c2bf8d1a7af3 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c @@ -2414,19 +2414,10 @@ static void _rtl92ee_phy_reload_mac_registers(struct ieee80211_hw *hw, static void _rtl92ee_phy_path_adda_on(struct ieee80211_hw *hw, u32 *addareg, bool is_patha_on, bool is2t) { - u32 pathon; u32 i; - pathon = is_patha_on ? 0x0fc01616 : 0x0fc01616; - if (!is2t) { - pathon = 0x0fc01616; - rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0fc01616); - } else { - rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathon); - } - - for (i = 1; i < IQK_ADDA_REG_NUM; i++) - rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathon); + for (i = 0; i < IQK_ADDA_REG_NUM; i++) + rtl_set_bbreg(hw, addareg[i], MASKDWORD, 0x0fc01616); } static void _rtl92ee_phy_mac_setting_calibration(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c index 12b0978ba4fa..442f2b68ee58 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c @@ -1673,23 +1673,31 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct device *dev = &rtl_pcipriv(hw)->dev.pdev->dev; u16 i, usvalue; u16 eeprom_id; u8 tempval; u8 hwinfo[HWSET_MAX_SIZE_92S]; u8 rf_path, index; - if (rtlefuse->epromtype == EEPROM_93C46) { + switch (rtlefuse->epromtype) { + case EEPROM_BOOT_EFUSE: + rtl_efuse_shadow_map_update(hw); + break; + + case EEPROM_93C46: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "RTL819X Not boot from eeprom, check it !!\n"); - } else if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) { - rtl_efuse_shadow_map_update(hw); + return; - memcpy((void *)hwinfo, (void *) - &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], - HWSET_MAX_SIZE_92S); + default: + dev_warn(dev, "no efuse data\n"); + return; } + memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], + HWSET_MAX_SIZE_92S); + RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP", hwinfo, HWSET_MAX_SIZE_92S); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c index a4b7eac6856f..57a1ba8822b1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c @@ -1630,6 +1630,7 @@ static void _rtl8723e_read_adapter_info(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct device *dev = &rtl_pcipriv(hw)->dev.pdev->dev; u16 i, usvalue; u8 hwinfo[HWSET_MAX_SIZE]; u16 eeprom_id; @@ -1638,15 +1639,19 @@ static void _rtl8723e_read_adapter_info(struct ieee80211_hw *hw, /* need add */ return; } - if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) { + switch (rtlefuse->epromtype) { + case EEPROM_BOOT_EFUSE: rtl_efuse_shadow_map_update(hw); - memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], - HWSET_MAX_SIZE); - } else if (rtlefuse->epromtype == EEPROM_93C46) { + case EEPROM_93C46: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "RTL819X Not boot from eeprom, check it !!"); + return; + + default: + dev_warn(dev, "no efuse data\n"); } + memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], HWSET_MAX_SIZE); RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP\n", hwinfo, HWSET_MAX_SIZE); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c index 5a3df9198ddf..08288ac9020a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c @@ -2026,6 +2026,7 @@ static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct device *dev = &rtl_pcipriv(hw)->dev.pdev->dev; u16 i, usvalue; u8 hwinfo[HWSET_MAX_SIZE]; u16 eeprom_id; @@ -2055,15 +2056,22 @@ static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw, /* needs to be added */ return; } - if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) { + + switch (rtlefuse->epromtype) { + case EEPROM_BOOT_EFUSE: rtl_efuse_shadow_map_update(hw); + break; - memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], - HWSET_MAX_SIZE); - } else if (rtlefuse->epromtype == EEPROM_93C46) { + case EEPROM_93C46: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "RTL819X Not boot from eeprom, check it !!"); + return; + + default: + dev_warn(dev, "no efuse data\n"); + return; } + memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], HWSET_MAX_SIZE); RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("MAP\n"), hwinfo, HWSET_MAX_SIZE); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c index 445f681d08c0..c5ca9dfb445c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c @@ -1019,7 +1019,7 @@ static u8 _rtl8723be_get_txpower_index(struct ieee80211_hw *hw, u8 path, struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u8 index = (channel - 1); - u8 txpower; + u8 txpower = 0; u8 power_diff_byrate = 0; if (channel > 14 || channel < 1) { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c index 71e4dd9965bb..b9436df9e1ec 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c @@ -3101,6 +3101,7 @@ static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + struct device *dev = &rtl_pcipriv(hw)->dev.pdev->dev; u16 i, usvalue; u8 hwinfo[HWSET_MAX_SIZE]; u16 eeprom_id; @@ -3109,14 +3110,20 @@ static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_ ;/* need add */ } - if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) { + switch (rtlefuse->epromtype) { + case EEPROM_BOOT_EFUSE: rtl_efuse_shadow_map_update(hw); - memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], - HWSET_MAX_SIZE); - } else if (rtlefuse->epromtype == EEPROM_93C46) { + break; + + case EEPROM_93C46: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "RTL819X Not boot from eeprom, check it !!"); + return; + + default: + dev_warn(dev, "no efuse data\n"); } + memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], HWSET_MAX_SIZE); RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP\n", hwinfo, HWSET_MAX_SIZE); diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index 40658b62d077..35c14cc3f0d2 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -398,7 +398,7 @@ static int rsi_mgmt_pkt_to_core(struct rsi_common *common, return -ENOLINK; msg_len -= pad_bytes; - if ((msg_len <= 0) || (!msg)) { + if (msg_len <= 0) { rsi_dbg(MGMT_RX_ZONE, "%s: Invalid rx msg of len = %d\n", __func__, msg_len); diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c index 13fd734b61ec..82d94f83b6b4 100644 --- a/drivers/net/wireless/wl3501_cs.c +++ b/drivers/net/wireless/wl3501_cs.c @@ -378,8 +378,7 @@ static int wl3501_esbq_exec(struct wl3501_card *this, void *sig, int sig_size) return rc; } -static int wl3501_get_mib_value(struct wl3501_card *this, u8 index, - void *bf, int size) +static int wl3501_request_mib(struct wl3501_card *this, u8 index, void *bf) { struct wl3501_get_req sig = { .sig_id = WL3501_SIG_GET_REQ, @@ -395,20 +394,32 @@ static int wl3501_get_mib_value(struct wl3501_card *this, u8 index, wl3501_set_to_wla(this, ptr, &sig, sizeof(sig)); wl3501_esbq_req(this, &ptr); this->sig_get_confirm.mib_status = 255; - spin_unlock_irqrestore(&this->lock, flags); - rc = wait_event_interruptible(this->wait, - this->sig_get_confirm.mib_status != 255); - if (!rc) - memcpy(bf, this->sig_get_confirm.mib_value, - size); - goto out; + rc = 0; } } spin_unlock_irqrestore(&this->lock, flags); -out: + return rc; } +static int wl3501_get_mib_value(struct wl3501_card *this, u8 index, + void *bf, int size) +{ + int rc; + + rc = wl3501_request_mib(this, index, bf); + if (rc) + return rc; + + rc = wait_event_interruptible(this->wait, + this->sig_get_confirm.mib_status != 255); + if (rc) + return rc; + + memcpy(bf, this->sig_get_confirm.mib_value, size); + return 0; +} + static int wl3501_pwr_mgmt(struct wl3501_card *this, int suspend) { struct wl3501_pwr_mgmt_req sig = { diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index e051e1b57609..de68707a99c7 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -361,7 +361,7 @@ struct phy_device *of_phy_attach(struct net_device *dev, } EXPORT_SYMBOL(of_phy_attach); -#if defined(CONFIG_FIXED_PHY) +#if IS_ENABLED(CONFIG_FIXED_PHY) /* * of_phy_is_fixed_link() and of_phy_register_fixed_link() must * support two DT bindings: diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 80dec87a94f8..4dbc1450bbe0 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -535,6 +535,7 @@ struct mlx4_caps { int max_rq_desc_sz; int max_qp_init_rdma; int max_qp_dest_rdma; + int max_tc_eth; u32 *qp0_qkey; u32 *qp0_proxy; u32 *qp1_proxy; @@ -1494,6 +1495,7 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, int mlx4_get_module_info(struct mlx4_dev *dev, u8 port, u16 offset, u16 size, u8 *data); +int mlx4_max_tc(struct mlx4_dev *dev); /* Returns true if running in low memory profile (kdump kernel) */ static inline bool mlx4_low_memory_profile(void) diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 73a48479892d..e0a3ed758287 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -1330,6 +1330,7 @@ enum mlx5_cap_type { MLX5_CAP_ESWITCH, MLX5_CAP_RESERVED, MLX5_CAP_VECTOR_CALC, + MLX5_CAP_QOS, /* NUM OF CAP Types */ MLX5_CAP_NUM }; @@ -1414,6 +1415,9 @@ enum mlx5_cap_type { MLX5_GET(vector_calc_cap, \ mdev->hca_caps_cur[MLX5_CAP_VECTOR_CALC], cap) +#define MLX5_CAP_QOS(mdev, cap)\ + MLX5_GET(qos_cap, mdev->hca_caps_cur[MLX5_CAP_QOS], cap) + enum { MLX5_CMD_STAT_OK = 0x0, MLX5_CMD_STAT_INT_ERR = 0x1, diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 80776d0c52dc..46260fdc5305 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -481,6 +481,21 @@ struct mlx5_fc_stats { struct mlx5_eswitch; +struct mlx5_rl_entry { + u32 rate; + u16 index; + u16 refcount; +}; + +struct mlx5_rl_table { + /* protect rate limit table */ + struct mutex rl_lock; + u16 max_size; + u32 max_rate; + u32 min_rate; + struct mlx5_rl_entry *rl_entry; +}; + struct mlx5_priv { char name[MLX5_MAX_NAME_LEN]; struct mlx5_eq_table eq_table; @@ -544,6 +559,7 @@ struct mlx5_priv { struct mlx5_flow_root_namespace *esw_ingress_root_ns; struct mlx5_fc_stats fc_stats; + struct mlx5_rl_table rl_table; }; enum mlx5_device_state { @@ -861,6 +877,12 @@ int mlx5_query_odp_caps(struct mlx5_core_dev *dev, int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev, u8 port_num, void *out, size_t sz); +int mlx5_init_rl_table(struct mlx5_core_dev *dev); +void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev); +int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index); +void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate); +bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate); + static inline int fw_initializing(struct mlx5_core_dev *dev) { return ioread32be(&dev->iseg->initializing) >> 31; @@ -938,6 +960,11 @@ static inline int mlx5_get_gid_table_len(u16 param) return 8 * (1 << param); } +static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev) +{ + return !!(dev->priv.rl_table.max_size); +} + enum { MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32, }; diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index 9851862c0ec5..e3012cc64b8a 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -47,6 +47,14 @@ enum mlx5_module_id { MLX5_MODULE_ID_QSFP28 = 0x11, }; +enum mlx5_an_status { + MLX5_AN_UNAVAILABLE = 0, + MLX5_AN_COMPLETE = 1, + MLX5_AN_FAILED = 2, + MLX5_AN_LINK_UP = 3, + MLX5_AN_LINK_DOWN = 4, +}; + #define MLX5_EEPROM_MAX_BYTES 32 #define MLX5_EEPROM_IDENTIFIER_BYTE_MASK 0x000000ff #define MLX5_I2C_ADDR_LOW 0x50 @@ -65,13 +73,17 @@ int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev, int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev, u8 *proto_oper, int proto_mask, u8 local_port); -int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, - int proto_mask); +int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable, + u32 proto_admin, int proto_mask); +void mlx5_toggle_port_link(struct mlx5_core_dev *dev); int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, enum mlx5_port_status status); int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, enum mlx5_port_status *status); int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration); +void mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask, + u8 *an_status, + u8 *an_disable_cap, u8 *an_disable_admin); int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port); void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port); diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h index 8f2237eb3485..6c8cb9aa4c00 100644 --- a/include/linux/of_mdio.h +++ b/include/linux/of_mdio.h @@ -69,7 +69,7 @@ static inline int of_mdio_parse_addr(struct device *dev, } #endif /* CONFIG_OF */ -#if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY) +#if defined(CONFIG_OF) && IS_ENABLED(CONFIG_FIXED_PHY) extern int of_phy_register_fixed_link(struct device_node *np); extern bool of_phy_is_fixed_link(struct device_node *np); #else diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index e1d5122e8a96..b1e3c57c7117 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -489,6 +489,30 @@ struct qed_common_ops { struct qed_chain *p_chain); /** + * @brief get_coalesce - Get coalesce parameters in usec + * + * @param cdev + * @param rx_coal - Rx coalesce value in usec + * @param tx_coal - Tx coalesce value in usec + * + */ + void (*get_coalesce)(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal); + +/** + * @brief set_coalesce - Configure Rx coalesce value in usec + * + * @param cdev + * @param rx_coal - Rx coalesce value in usec + * @param tx_coal - Tx coalesce value in usec + * @param qid - Queue index + * @param sb_id - Status Block Id + * + * @return 0 on success, error otherwise. + */ + int (*set_coalesce)(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, + u8 qid, u16 sb_id); + +/** * @brief set_led - Configure LED mode * * @param cdev diff --git a/include/linux/rxrpc.h b/include/linux/rxrpc.h index 1e8f216e2cf1..c68307bc306f 100644 --- a/include/linux/rxrpc.h +++ b/include/linux/rxrpc.h @@ -35,7 +35,7 @@ struct sockaddr_rxrpc { */ #define RXRPC_SECURITY_KEY 1 /* [clnt] set client security key */ #define RXRPC_SECURITY_KEYRING 2 /* [srvr] set ring of server security keys */ -#define RXRPC_EXCLUSIVE_CONNECTION 3 /* [clnt] use exclusive RxRPC connection */ +#define RXRPC_EXCLUSIVE_CONNECTION 3 /* Deprecated; use RXRPC_EXCLUSIVE_CALL instead */ #define RXRPC_MIN_SECURITY_LEVEL 4 /* minimum security level */ /* @@ -52,6 +52,7 @@ struct sockaddr_rxrpc { #define RXRPC_LOCAL_ERROR 7 /* -r: local error generated [terminal] */ #define RXRPC_NEW_CALL 8 /* -r: [Service] new incoming call notification */ #define RXRPC_ACCEPT 9 /* s-: [Service] accept request */ +#define RXRPC_EXCLUSIVE_CALL 10 /* s-: Call should be on exclusive connection */ /* * RxRPC security levels diff --git a/include/net/codel_qdisc.h b/include/net/codel_qdisc.h index 8144d9cd2908..098630f83a55 100644 --- a/include/net/codel_qdisc.h +++ b/include/net/codel_qdisc.h @@ -52,6 +52,7 @@ /* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */ struct codel_skb_cb { codel_time_t enqueue_time; + unsigned int mem_usage; }; static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb) diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 4f7cee8344c4..909aff2db2b3 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -37,8 +37,10 @@ struct qdisc_size_table { }; struct Qdisc { - int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev); - struct sk_buff * (*dequeue)(struct Qdisc *dev); + int (*enqueue)(struct sk_buff *skb, + struct Qdisc *sch, + struct sk_buff **to_free); + struct sk_buff * (*dequeue)(struct Qdisc *sch); unsigned int flags; #define TCQ_F_BUILTIN 1 #define TCQ_F_INGRESS 2 @@ -73,13 +75,14 @@ struct Qdisc { /* * For performance sake on SMP, we put highly modified fields at the end */ - struct Qdisc *next_sched ____cacheline_aligned_in_smp; - struct sk_buff *gso_skb; - unsigned long state; + struct sk_buff *gso_skb ____cacheline_aligned_in_smp; struct sk_buff_head q; struct gnet_stats_basic_packed bstats; seqcount_t running; struct gnet_stats_queue qstats; + unsigned long state; + struct Qdisc *next_sched; + struct sk_buff *skb_bad_txq; struct rcu_head rcu_head; int padded; atomic_t refcnt; @@ -160,7 +163,9 @@ struct Qdisc_ops { char id[IFNAMSIZ]; int priv_size; - int (*enqueue)(struct sk_buff *, struct Qdisc *); + int (*enqueue)(struct sk_buff *skb, + struct Qdisc *sch, + struct sk_buff **to_free); struct sk_buff * (*dequeue)(struct Qdisc *); struct sk_buff * (*peek)(struct Qdisc *); @@ -498,10 +503,11 @@ static inline void qdisc_calculate_pkt_len(struct sk_buff *skb, #endif } -static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) +static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { qdisc_calculate_pkt_len(skb, sch); - return sch->enqueue(skb, sch); + return sch->enqueue(skb, sch, to_free); } static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) @@ -626,24 +632,36 @@ static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) return __qdisc_dequeue_head(sch, &sch->q); } +/* Instead of calling kfree_skb() while root qdisc lock is held, + * queue the skb for future freeing at end of __dev_xmit_skb() + */ +static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free) +{ + skb->next = *to_free; + *to_free = skb; +} + static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, - struct sk_buff_head *list) + struct sk_buff_head *list, + struct sk_buff **to_free) { struct sk_buff *skb = __skb_dequeue(list); if (likely(skb != NULL)) { unsigned int len = qdisc_pkt_len(skb); + qdisc_qstats_backlog_dec(sch, skb); - kfree_skb(skb); + __qdisc_drop(skb, to_free); return len; } return 0; } -static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch) +static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch, + struct sk_buff **to_free) { - return __qdisc_queue_drop_head(sch, &sch->q); + return __qdisc_queue_drop_head(sch, &sch->q, to_free); } static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) @@ -724,9 +742,11 @@ static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) qdisc_qstats_drop(sch); } -static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) + +static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { - kfree_skb(skb); + __qdisc_drop(skb, to_free); qdisc_qstats_drop(sch); return NET_XMIT_DROP; diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 5f030b46cff4..b8f38e84d93a 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -1362,6 +1362,7 @@ enum ethtool_link_mode_bit_indices { ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT = 37, ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT = 38, ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT = 39, + ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT = 40, /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit * 31. Please do NOT define any SUPPORTED_* or ADVERTISED_* @@ -1370,7 +1371,7 @@ enum ethtool_link_mode_bit_indices { */ __ETHTOOL_LINK_MODE_LAST - = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, + = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, }; #define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \ diff --git a/net/core/dev.c b/net/core/dev.c index d40593b3b9fb..aba10d2a8bc3 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3070,6 +3070,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, struct netdev_queue *txq) { spinlock_t *root_lock = qdisc_lock(q); + struct sk_buff *to_free = NULL; bool contended; int rc; @@ -3086,7 +3087,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, spin_lock(root_lock); if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { - kfree_skb(skb); + __qdisc_drop(skb, &to_free); rc = NET_XMIT_DROP; } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && qdisc_run_begin(q)) { @@ -3109,7 +3110,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, rc = NET_XMIT_SUCCESS; } else { - rc = q->enqueue(skb, q) & NET_XMIT_MASK; + rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK; if (qdisc_run_begin(q)) { if (unlikely(contended)) { spin_unlock(&q->busylock); @@ -3119,6 +3120,8 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, } } spin_unlock(root_lock); + if (unlikely(to_free)) + kfree_skb_list(to_free); if (unlikely(contended)) spin_unlock(&q->busylock); return rc; diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 3d5feede962d..52f3b9b89e97 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -824,6 +824,17 @@ static int ovs_ct_lookup(struct net *net, struct sw_flow_key *key, return 0; } +static bool labels_nonzero(const struct ovs_key_ct_labels *labels) +{ + size_t i; + + for (i = 0; i < sizeof(*labels); i++) + if (labels->ct_labels[i]) + return true; + + return false; +} + /* Lookup connection and confirm if unconfirmed. */ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key, const struct ovs_conntrack_info *info, @@ -834,24 +845,32 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key, err = __ovs_ct_lookup(net, key, info, skb); if (err) return err; - /* This is a no-op if the connection has already been confirmed. */ + + /* Apply changes before confirming the connection so that the initial + * conntrack NEW netlink event carries the values given in the CT + * action. + */ + if (info->mark.mask) { + err = ovs_ct_set_mark(skb, key, info->mark.value, + info->mark.mask); + if (err) + return err; + } + if (labels_nonzero(&info->labels.mask)) { + err = ovs_ct_set_labels(skb, key, &info->labels.value, + &info->labels.mask); + if (err) + return err; + } + /* This will take care of sending queued events even if the connection + * is already confirmed. + */ if (nf_conntrack_confirm(skb) != NF_ACCEPT) return -EINVAL; return 0; } -static bool labels_nonzero(const struct ovs_key_ct_labels *labels) -{ - size_t i; - - for (i = 0; i < sizeof(*labels); i++) - if (labels->ct_labels[i]) - return true; - - return false; -} - /* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero * value if 'skb' is freed. */ @@ -876,19 +895,7 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb, err = ovs_ct_commit(net, key, info, skb); else err = ovs_ct_lookup(net, key, info, skb); - if (err) - goto err; - if (info->mark.mask) { - err = ovs_ct_set_mark(skb, key, info->mark.value, - info->mark.mask); - if (err) - goto err; - } - if (labels_nonzero(&info->labels.mask)) - err = ovs_ct_set_labels(skb, key, &info->labels.value, - &info->labels.mask); -err: skb_push(skb, nh_ofs); if (err) kfree_skb(skb); @@ -1145,6 +1152,20 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info, } } +#ifdef CONFIG_NF_CONNTRACK_MARK + if (!info->commit && info->mark.mask) { + OVS_NLERR(log, + "Setting conntrack mark requires 'commit' flag."); + return -EINVAL; + } +#endif +#ifdef CONFIG_NF_CONNTRACK_LABELS + if (!info->commit && labels_nonzero(&info->labels.mask)) { + OVS_NLERR(log, + "Setting conntrack labels requires 'commit' flag."); + return -EINVAL; + } +#endif if (rem > 0) { OVS_NLERR(log, "Conntrack attr has %d unknown bytes", rem); return -EINVAL; diff --git a/net/rxrpc/Makefile b/net/rxrpc/Makefile index b005027f80cf..6522e50fb750 100644 --- a/net/rxrpc/Makefile +++ b/net/rxrpc/Makefile @@ -7,6 +7,7 @@ af-rxrpc-y := \ call_accept.o \ call_event.o \ call_object.o \ + conn_client.o \ conn_event.o \ conn_object.o \ input.o \ @@ -21,7 +22,6 @@ af-rxrpc-y := \ recvmsg.o \ security.o \ skbuff.o \ - transport.o \ utils.o af-rxrpc-$(CONFIG_PROC_FS) += proc.o diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index c83c3c75d665..5d3e795a7c48 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -97,7 +97,7 @@ static int rxrpc_validate_address(struct rxrpc_sock *rx, srx->transport_len > len) return -EINVAL; - if (srx->transport.family != rx->proto) + if (srx->transport.family != rx->family) return -EAFNOSUPPORT; switch (srx->transport.family) { @@ -224,39 +224,6 @@ static int rxrpc_listen(struct socket *sock, int backlog) return ret; } -/* - * find a transport by address - */ -struct rxrpc_transport *rxrpc_name_to_transport(struct rxrpc_sock *rx, - struct sockaddr *addr, - int addr_len, int flags, - gfp_t gfp) -{ - struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr; - struct rxrpc_transport *trans; - struct rxrpc_peer *peer; - - _enter("%p,%p,%d,%d", rx, addr, addr_len, flags); - - ASSERT(rx->local != NULL); - - if (rx->srx.transport_type != srx->transport_type) - return ERR_PTR(-ESOCKTNOSUPPORT); - if (rx->srx.transport.family != srx->transport.family) - return ERR_PTR(-EAFNOSUPPORT); - - /* find a remote transport endpoint from the local one */ - peer = rxrpc_lookup_peer(rx->local, srx, gfp); - if (IS_ERR(peer)) - return ERR_CAST(peer); - - /* find a transport */ - trans = rxrpc_get_transport(rx->local, peer, gfp); - rxrpc_put_peer(peer); - _leave(" = %p", trans); - return trans; -} - /** * rxrpc_kernel_begin_call - Allow a kernel service to begin a call * @sock: The socket on which to make the call @@ -277,39 +244,32 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, unsigned long user_call_ID, gfp_t gfp) { - struct rxrpc_conn_bundle *bundle; - struct rxrpc_transport *trans; + struct rxrpc_conn_parameters cp; struct rxrpc_call *call; struct rxrpc_sock *rx = rxrpc_sk(sock->sk); + int ret; _enter(",,%x,%lx", key_serial(key), user_call_ID); - lock_sock(&rx->sk); + ret = rxrpc_validate_address(rx, srx, sizeof(*srx)); + if (ret < 0) + return ERR_PTR(ret); - trans = rxrpc_name_to_transport(rx, (struct sockaddr *)srx, - sizeof(*srx), 0, gfp); - if (IS_ERR(trans)) { - call = ERR_CAST(trans); - trans = NULL; - goto out_notrans; - } + lock_sock(&rx->sk); if (!key) key = rx->key; if (key && !key->payload.data[0]) key = NULL; /* a no-security key */ - bundle = rxrpc_get_bundle(rx, trans, key, srx->srx_service, gfp); - if (IS_ERR(bundle)) { - call = ERR_CAST(bundle); - goto out; - } + memset(&cp, 0, sizeof(cp)); + cp.local = rx->local; + cp.key = key; + cp.security_level = 0; + cp.exclusive = false; + cp.service_id = srx->srx_service; + call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, gfp); - call = rxrpc_new_client_call(rx, trans, bundle, user_call_ID, gfp); - rxrpc_put_bundle(trans, bundle); -out: - rxrpc_put_transport(trans); -out_notrans: release_sock(&rx->sk); _leave(" = %p", call); return call; @@ -487,7 +447,7 @@ static int rxrpc_setsockopt(struct socket *sock, int level, int optname, ret = -EISCONN; if (rx->sk.sk_state != RXRPC_UNBOUND) goto error; - set_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags); + rx->exclusive = true; goto success; case RXRPC_SECURITY_KEY: @@ -600,7 +560,7 @@ static int rxrpc_create(struct net *net, struct socket *sock, int protocol, sk->sk_destruct = rxrpc_sock_destructor; rx = rxrpc_sk(sk); - rx->proto = protocol; + rx->family = protocol; rx->calls = RB_ROOT; INIT_LIST_HEAD(&rx->listen_link); @@ -662,16 +622,8 @@ static int rxrpc_release_sock(struct sock *sk) flush_workqueue(rxrpc_workqueue); rxrpc_purge_queue(&sk->sk_receive_queue); - if (rx->conn) { - rxrpc_put_connection(rx->conn); - rx->conn = NULL; - } - - if (rx->local) { - rxrpc_put_local(rx->local); - rx->local = NULL; - } - + rxrpc_put_local(rx->local); + rx->local = NULL; key_put(rx->key); rx->key = NULL; key_put(rx->securities); @@ -836,7 +788,6 @@ static void __exit af_rxrpc_exit(void) proto_unregister(&rxrpc_proto); rxrpc_destroy_all_calls(); rxrpc_destroy_all_connections(); - rxrpc_destroy_all_transports(); ASSERTCMP(atomic_read(&rxrpc_n_skbs), ==, 0); @@ -856,6 +807,8 @@ static void __exit af_rxrpc_exit(void) _debug("synchronise RCU"); rcu_barrier(); _debug("destroy locals"); + ASSERT(idr_is_empty(&rxrpc_client_conn_ids)); + idr_destroy(&rxrpc_client_conn_ids); rxrpc_destroy_all_locals(); remove_proc_entry("rxrpc_conns", init_net.proc_net); diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index c168268467cd..702db72196fb 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -37,6 +37,8 @@ struct rxrpc_crypt { #define rxrpc_queue_call(CALL) rxrpc_queue_work(&(CALL)->processor) #define rxrpc_queue_conn(CONN) rxrpc_queue_work(&(CONN)->processor) +struct rxrpc_connection; + /* * sk_state for RxRPC sockets */ @@ -57,7 +59,6 @@ struct rxrpc_sock { struct sock sk; rxrpc_interceptor_t interceptor; /* kernel service Rx interceptor function */ struct rxrpc_local *local; /* local endpoint */ - struct rxrpc_connection *conn; /* exclusive virtual connection */ struct list_head listen_link; /* link in the local endpoint's listen list */ struct list_head secureq; /* calls awaiting connection security clearance */ struct list_head acceptq; /* calls awaiting acceptance */ @@ -66,13 +67,13 @@ struct rxrpc_sock { struct rb_root calls; /* outstanding calls on this socket */ unsigned long flags; #define RXRPC_SOCK_CONNECTED 0 /* connect_srx is set */ -#define RXRPC_SOCK_EXCLUSIVE_CONN 1 /* exclusive connection for a client socket */ rwlock_t call_lock; /* lock for calls */ u32 min_sec_level; /* minimum security level */ #define RXRPC_SECURITY_MAX RXRPC_SECURITY_ENCRYPT + bool exclusive; /* Exclusive connection for a client socket */ + sa_family_t family; /* Protocol family created with */ struct sockaddr_rxrpc srx; /* local address */ struct sockaddr_rxrpc connect_srx; /* Default client address from connect() */ - sa_family_t proto; /* protocol created with */ }; #define rxrpc_sk(__sk) container_of((__sk), struct rxrpc_sock, sk) @@ -185,7 +186,8 @@ struct rxrpc_local { struct sk_buff_head accept_queue; /* incoming calls awaiting acceptance */ struct sk_buff_head reject_queue; /* packets awaiting rejection */ struct sk_buff_head event_queue; /* endpoint event packets awaiting processing */ - struct mutex conn_lock; /* Client connection creation lock */ + struct rb_root client_conns; /* Client connections by socket params */ + spinlock_t client_conns_lock; /* Lock for client_conns */ spinlock_t lock; /* access lock */ rwlock_t services_lock; /* lock for services list */ int debug_id; /* debug ID for printks */ @@ -205,6 +207,8 @@ struct rxrpc_peer { struct rxrpc_local *local; struct hlist_head error_targets; /* targets for net error distribution */ struct work_struct error_distributor; + struct rb_root service_conns; /* Service connections */ + rwlock_t conn_lock; spinlock_t lock; /* access lock */ unsigned int if_mtu; /* interface MTU for this peer */ unsigned int mtu; /* network MTU for this peer */ @@ -224,66 +228,63 @@ struct rxrpc_peer { }; /* - * RxRPC point-to-point transport / connection manager definition - * - handles a bundle of connections between two endpoints - * - matched by { local, peer } - */ -struct rxrpc_transport { - struct rxrpc_local *local; /* local transport endpoint */ - struct rxrpc_peer *peer; /* remote transport endpoint */ - struct rb_root bundles; /* client connection bundles on this transport */ - struct rb_root client_conns; /* client connections on this transport */ - struct rb_root server_conns; /* server connections on this transport */ - struct list_head link; /* link in master session list */ - unsigned long put_time; /* time at which to reap */ - spinlock_t client_lock; /* client connection allocation lock */ - rwlock_t conn_lock; /* lock for active/dead connections */ - atomic_t usage; - int debug_id; /* debug ID for printks */ - unsigned int conn_idcounter; /* connection ID counter (client) */ + * Keys for matching a connection. + */ +struct rxrpc_conn_proto { + unsigned long hash_key; + struct rxrpc_local *local; /* Representation of local endpoint */ + u32 epoch; /* epoch of this connection */ + u32 cid; /* connection ID */ + u8 in_clientflag; /* RXRPC_CLIENT_INITIATED if we are server */ + u8 addr_size; /* Size of the address */ + sa_family_t family; /* Transport protocol */ + __be16 port; /* Peer UDP/UDP6 port */ + union { /* Peer address */ + struct in_addr ipv4_addr; + struct in6_addr ipv6_addr; + u32 raw_addr[0]; + }; }; -/* - * RxRPC client connection bundle - * - matched by { transport, service_id, key } - */ -struct rxrpc_conn_bundle { - struct rb_node node; /* node in transport's lookup tree */ - struct list_head unused_conns; /* unused connections in this bundle */ - struct list_head avail_conns; /* available connections in this bundle */ - struct list_head busy_conns; /* busy connections in this bundle */ - struct key *key; /* security for this bundle */ - wait_queue_head_t chanwait; /* wait for channel to become available */ - atomic_t usage; - int debug_id; /* debug ID for printks */ - unsigned short num_conns; /* number of connections in this bundle */ - u16 service_id; /* Service ID for this bundle */ - u8 security_ix; /* security type */ +struct rxrpc_conn_parameters { + struct rxrpc_local *local; /* Representation of local endpoint */ + struct rxrpc_peer *peer; /* Remote endpoint */ + struct key *key; /* Security details */ + bool exclusive; /* T if conn is exclusive */ + u16 service_id; /* Service ID for this connection */ + u32 security_level; /* Security level selected */ }; /* * RxRPC connection definition - * - matched by { transport, service_id, conn_id, direction, key } + * - matched by { local, peer, epoch, conn_id, direction } * - each connection can only handle four simultaneous calls */ struct rxrpc_connection { - struct rxrpc_transport *trans; /* transport session */ - struct rxrpc_conn_bundle *bundle; /* connection bundle (client) */ + struct rxrpc_conn_proto proto; + struct rxrpc_conn_parameters params; + + spinlock_t channel_lock; + struct rxrpc_call *channels[RXRPC_MAXCALLS]; /* active calls */ + wait_queue_head_t channel_wq; /* queue to wait for channel to become available */ + struct work_struct processor; /* connection event processor */ - struct rb_node node; /* node in transport's lookup tree */ + union { + struct rb_node client_node; /* Node in local->client_conns */ + struct rb_node service_node; /* Node in peer->service_conns */ + }; struct list_head link; /* link in master connection list */ - struct list_head bundle_link; /* link in bundle */ struct rb_root calls; /* calls on this connection */ struct sk_buff_head rx_queue; /* received conn-level packets */ - struct rxrpc_call *channels[RXRPC_MAXCALLS]; /* channels (active calls) */ const struct rxrpc_security *security; /* applied security module */ - struct key *key; /* security for this connection (client) */ struct key *server_key; /* security for this service */ struct crypto_skcipher *cipher; /* encryption handle */ struct rxrpc_crypt csum_iv; /* packet checksum base */ + unsigned long flags; +#define RXRPC_CONN_HAS_IDR 0 /* - Has a client conn ID assigned */ unsigned long events; #define RXRPC_CONN_CHALLENGE 0 /* send challenge packet */ - unsigned long put_time; /* time at which to reap */ + unsigned long put_time; /* Time at which last put */ rwlock_t lock; /* access lock */ spinlock_t state_lock; /* state-change lock */ atomic_t usage; @@ -304,17 +305,12 @@ struct rxrpc_connection { unsigned int call_counter; /* call ID counter */ atomic_t serial; /* packet serial number counter */ atomic_t hi_serial; /* highest serial number received */ - u8 avail_calls; /* number of calls available */ + atomic_t avail_chans; /* number of channels available */ u8 size_align; /* data size alignment (for security) */ u8 header_size; /* rxrpc + security header size */ u8 security_size; /* security header size */ - u32 security_level; /* security level negotiated */ u32 security_nonce; /* response re-use preventer */ - u32 epoch; /* epoch of this connection */ - u32 cid; /* connection ID */ - u16 service_id; /* service ID for this connection */ u8 security_ix; /* security type */ - u8 in_clientflag; /* RXRPC_CLIENT_INITIATED if we are server */ u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */ }; @@ -360,6 +356,8 @@ enum rxrpc_call_event { * The states that a call can be in. */ enum rxrpc_call_state { + RXRPC_CALL_UNINITIALISED, + RXRPC_CALL_CLIENT_AWAIT_CONN, /* - client waiting for connection to become available */ RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */ RXRPC_CALL_CLIENT_AWAIT_REPLY, /* - client awaiting reply */ RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */ @@ -448,7 +446,7 @@ struct rxrpc_call { unsigned long hash_key; /* Full hash key */ u8 in_clientflag; /* Copy of conn->in_clientflag for hashing */ struct rxrpc_local *local; /* Local endpoint. Used for hashing. */ - sa_family_t proto; /* Frame protocol */ + sa_family_t family; /* Frame protocol */ u32 call_id; /* call ID on connection */ u32 cid; /* connection ID plus channel index */ u32 epoch; /* epoch of this connection */ @@ -481,10 +479,6 @@ extern u32 rxrpc_epoch; extern atomic_t rxrpc_debug_id; extern struct workqueue_struct *rxrpc_workqueue; -extern struct rxrpc_transport *rxrpc_name_to_transport(struct rxrpc_sock *, - struct sockaddr *, - int, int, gfp_t); - /* * call_accept.c */ @@ -512,18 +506,26 @@ struct rxrpc_call *rxrpc_find_call_hash(struct rxrpc_host_header *, void *, sa_family_t, const void *); struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long); struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *, - struct rxrpc_transport *, - struct rxrpc_conn_bundle *, + struct rxrpc_conn_parameters *, + struct sockaddr_rxrpc *, unsigned long, gfp_t); struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_connection *, - struct rxrpc_host_header *); + struct sk_buff *); void rxrpc_release_call(struct rxrpc_call *); void rxrpc_release_calls_on_socket(struct rxrpc_sock *); void __rxrpc_put_call(struct rxrpc_call *); void __exit rxrpc_destroy_all_calls(void); /* + * conn_client.c + */ +extern struct idr rxrpc_client_conn_ids; + +int rxrpc_get_client_connection_id(struct rxrpc_connection *, gfp_t); +void rxrpc_put_client_connection_id(struct rxrpc_connection *); + +/* * conn_event.c */ void rxrpc_process_connection(struct work_struct *); @@ -537,18 +539,32 @@ extern unsigned int rxrpc_connection_expiry; extern struct list_head rxrpc_connections; extern rwlock_t rxrpc_connection_lock; -struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *, - struct rxrpc_transport *, - struct key *, u16, gfp_t); -void rxrpc_put_bundle(struct rxrpc_transport *, struct rxrpc_conn_bundle *); -int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_transport *, - struct rxrpc_conn_bundle *, struct rxrpc_call *, gfp_t); +int rxrpc_connect_call(struct rxrpc_call *, struct rxrpc_conn_parameters *, + struct sockaddr_rxrpc *, gfp_t); +struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_local *, + struct rxrpc_peer *, + struct sk_buff *); +void rxrpc_disconnect_call(struct rxrpc_call *); void rxrpc_put_connection(struct rxrpc_connection *); void __exit rxrpc_destroy_all_connections(void); -struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *, - struct rxrpc_host_header *); -extern struct rxrpc_connection * -rxrpc_incoming_connection(struct rxrpc_transport *, struct rxrpc_host_header *); +struct rxrpc_connection *rxrpc_incoming_connection(struct rxrpc_local *, + struct rxrpc_peer *, + struct sk_buff *); + +static inline bool rxrpc_conn_is_client(const struct rxrpc_connection *conn) +{ + return conn->out_clientflag; +} + +static inline bool rxrpc_conn_is_service(const struct rxrpc_connection *conn) +{ + return conn->proto.in_clientflag; +} + +static inline void rxrpc_get_connection(struct rxrpc_connection *conn) +{ + atomic_inc(&conn->usage); +} /* * input.c @@ -598,7 +614,7 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local) static inline void rxrpc_put_local(struct rxrpc_local *local) { - if (atomic_dec_and_test(&local->usage)) + if (local && atomic_dec_and_test(&local->usage)) __rxrpc_put_local(local); } @@ -623,7 +639,7 @@ extern const char *rxrpc_acks(u8 reason); */ extern unsigned int rxrpc_resend_timeout; -int rxrpc_send_packet(struct rxrpc_transport *, struct sk_buff *); +int rxrpc_send_data_packet(struct rxrpc_connection *, struct sk_buff *); int rxrpc_do_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t); /* @@ -655,7 +671,7 @@ struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer) extern void __rxrpc_put_peer(struct rxrpc_peer *peer); static inline void rxrpc_put_peer(struct rxrpc_peer *peer) { - if (atomic_dec_and_test(&peer->usage)) + if (peer && atomic_dec_and_test(&peer->usage)) __rxrpc_put_peer(peer); } @@ -704,18 +720,6 @@ static inline void rxrpc_sysctl_exit(void) {} #endif /* - * transport.c - */ -extern unsigned int rxrpc_transport_expiry; - -struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *, - struct rxrpc_peer *, gfp_t); -void rxrpc_put_transport(struct rxrpc_transport *); -void __exit rxrpc_destroy_all_transports(void); -struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *, - struct rxrpc_peer *); - -/* * utils.c */ void rxrpc_get_addr_from_skb(struct rxrpc_local *, const struct sk_buff *, diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index 50136c76ebd1..202e053a3c6d 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c @@ -74,7 +74,6 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local, struct sockaddr_rxrpc *srx) { struct rxrpc_connection *conn; - struct rxrpc_transport *trans; struct rxrpc_skb_priv *sp, *nsp; struct rxrpc_peer *peer; struct rxrpc_call *call; @@ -96,29 +95,21 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local, notification->mark = RXRPC_SKB_MARK_NEW_CALL; peer = rxrpc_lookup_peer(local, srx, GFP_NOIO); - if (IS_ERR(peer)) { + if (!peer) { _debug("no peer"); ret = -EBUSY; goto error; } - trans = rxrpc_get_transport(local, peer, GFP_NOIO); + conn = rxrpc_incoming_connection(local, peer, skb); rxrpc_put_peer(peer); - if (IS_ERR(trans)) { - _debug("no trans"); - ret = -EBUSY; - goto error; - } - - conn = rxrpc_incoming_connection(trans, &sp->hdr); - rxrpc_put_transport(trans); if (IS_ERR(conn)) { _debug("no conn"); ret = PTR_ERR(conn); goto error; } - call = rxrpc_incoming_call(rx, conn, &sp->hdr); + call = rxrpc_incoming_call(rx, conn, skb); rxrpc_put_connection(conn); if (IS_ERR(call)) { _debug("no call"); @@ -141,7 +132,7 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local, _debug("await conn sec"); list_add_tail(&call->accept_link, &rx->secureq); call->conn->state = RXRPC_CONN_SERVER_CHALLENGING; - atomic_inc(&call->conn->usage); + rxrpc_get_connection(call->conn); set_bit(RXRPC_CONN_CHALLENGE, &call->conn->events); rxrpc_queue_conn(call->conn); } else { diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c index e610b106c913..0ba84295f913 100644 --- a/net/rxrpc/call_event.c +++ b/net/rxrpc/call_event.c @@ -187,7 +187,7 @@ static void rxrpc_resend(struct rxrpc_call *call) _proto("Tx DATA %%%u { #%d }", sp->hdr.serial, sp->hdr.seq); - if (rxrpc_send_packet(call->conn->trans, txb) < 0) { + if (rxrpc_send_data_packet(call->conn, txb) < 0) { stop = true; sp->resend_at = jiffies + 3; } else { @@ -545,7 +545,7 @@ static void rxrpc_extract_ackinfo(struct rxrpc_call *call, struct sk_buff *skb, mtu = min(ntohl(ackinfo.rxMTU), ntohl(ackinfo.maxMTU)); - peer = call->conn->trans->peer; + peer = call->conn->params.peer; if (mtu < peer->maxdata) { spin_lock_bh(&peer->lock); peer->maxdata = mtu; @@ -836,13 +836,13 @@ void rxrpc_process_call(struct work_struct *work) /* there's a good chance we're going to have to send a message, so set * one up in advance */ - msg.msg_name = &call->conn->trans->peer->srx.transport; - msg.msg_namelen = call->conn->trans->peer->srx.transport_len; + msg.msg_name = &call->conn->params.peer->srx.transport; + msg.msg_namelen = call->conn->params.peer->srx.transport_len; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; - whdr.epoch = htonl(call->conn->epoch); + whdr.epoch = htonl(call->conn->proto.epoch); whdr.cid = htonl(call->cid); whdr.callNumber = htonl(call->call_id); whdr.seq = 0; @@ -1151,8 +1151,8 @@ send_ACK_with_skew: ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) - ntohl(ack.serial)); send_ACK: - mtu = call->conn->trans->peer->if_mtu; - mtu -= call->conn->trans->peer->hdrsize; + mtu = call->conn->params.peer->if_mtu; + mtu -= call->conn->params.peer->hdrsize; ackinfo.maxMTU = htonl(mtu); ackinfo.rwind = htonl(rxrpc_rx_window_size); @@ -1206,7 +1206,7 @@ send_message_2: len += iov[1].iov_len; } - ret = kernel_sendmsg(call->conn->trans->local->socket, + ret = kernel_sendmsg(call->conn->params.local->socket, &msg, iov, ioc, len); if (ret < 0) { _debug("sendmsg failed: %d", ret); @@ -1264,7 +1264,7 @@ maybe_reschedule: if (call->state >= RXRPC_CALL_COMPLETE && !list_empty(&call->accept_link)) { _debug("X unlinking once-pending call %p { e=%lx f=%lx c=%x }", - call, call->events, call->flags, call->conn->cid); + call, call->events, call->flags, call->conn->proto.cid); read_lock_bh(&call->state_lock); if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) && @@ -1282,7 +1282,7 @@ error: * this means there's a race between clearing the flag and setting the * work pending bit and the work item being processed again */ if (call->events && !work_pending(&call->processor)) { - _debug("jumpstart %x", call->conn->cid); + _debug("jumpstart %x", call->conn->proto.cid); rxrpc_queue_call(call); } diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index 8b4d47b3ccac..ad933daae13b 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -31,6 +31,8 @@ unsigned int rxrpc_max_call_lifetime = 60 * HZ; unsigned int rxrpc_dead_call_expiry = 2 * HZ; const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = { + [RXRPC_CALL_UNINITIALISED] = "Uninit", + [RXRPC_CALL_CLIENT_AWAIT_CONN] = "ClWtConn", [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq", [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl", [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl", @@ -71,7 +73,7 @@ static unsigned long rxrpc_call_hashfunc( u32 call_id, u32 epoch, u16 service_id, - sa_family_t proto, + sa_family_t family, void *localptr, unsigned int addr_size, const u8 *peer_addr) @@ -92,7 +94,7 @@ static unsigned long rxrpc_call_hashfunc( key += (cid & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT; key += cid & RXRPC_CHANNELMASK; key += in_clientflag; - key += proto; + key += family; /* Step through the peer address in 16-bit portions for speed */ for (i = 0, p = (const u16 *)peer_addr; i < addr_size >> 1; i++, p++) key += *p; @@ -109,7 +111,7 @@ static void rxrpc_call_hash_add(struct rxrpc_call *call) unsigned int addr_size = 0; _enter(""); - switch (call->proto) { + switch (call->family) { case AF_INET: addr_size = sizeof(call->peer_ip.ipv4_addr); break; @@ -121,8 +123,8 @@ static void rxrpc_call_hash_add(struct rxrpc_call *call) } key = rxrpc_call_hashfunc(call->in_clientflag, call->cid, call->call_id, call->epoch, - call->service_id, call->proto, - call->conn->trans->local, addr_size, + call->service_id, call->family, + call->conn->params.local, addr_size, call->peer_ip.ipv6_addr); /* Store the full key in the call */ call->hash_key = key; @@ -151,7 +153,7 @@ static void rxrpc_call_hash_del(struct rxrpc_call *call) struct rxrpc_call *rxrpc_find_call_hash( struct rxrpc_host_header *hdr, void *localptr, - sa_family_t proto, + sa_family_t family, const void *peer_addr) { unsigned long key; @@ -161,7 +163,7 @@ struct rxrpc_call *rxrpc_find_call_hash( u8 in_clientflag = hdr->flags & RXRPC_CLIENT_INITIATED; _enter(""); - switch (proto) { + switch (family) { case AF_INET: addr_size = sizeof(call->peer_ip.ipv4_addr); break; @@ -174,7 +176,7 @@ struct rxrpc_call *rxrpc_find_call_hash( key = rxrpc_call_hashfunc(in_clientflag, hdr->cid, hdr->callNumber, hdr->epoch, hdr->serviceId, - proto, localptr, addr_size, + family, localptr, addr_size, peer_addr); hash_for_each_possible_rcu(rxrpc_call_hash, call, hash_node, key) { if (call->hash_key == key && @@ -182,7 +184,7 @@ struct rxrpc_call *rxrpc_find_call_hash( call->cid == hdr->cid && call->in_clientflag == in_clientflag && call->service_id == hdr->serviceId && - call->proto == proto && + call->family == family && call->local == localptr && memcmp(call->peer_ip.ipv6_addr, peer_addr, addr_size) == 0 && @@ -261,6 +263,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) (unsigned long) call); INIT_WORK(&call->destroyer, &rxrpc_destroy_call); INIT_WORK(&call->processor, &rxrpc_process_call); + INIT_LIST_HEAD(&call->link); INIT_LIST_HEAD(&call->accept_link); skb_queue_head_init(&call->rx_queue); skb_queue_head_init(&call->rx_oos_queue); @@ -269,7 +272,6 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) rwlock_init(&call->state_lock); atomic_set(&call->usage, 1); call->debug_id = atomic_inc_return(&rxrpc_debug_id); - call->state = RXRPC_CALL_CLIENT_SEND_REQUEST; memset(&call->sock_node, 0xed, sizeof(call->sock_node)); @@ -282,66 +284,77 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) } /* - * allocate a new client call and attempt to get a connection slot for it + * Allocate a new client call. */ -static struct rxrpc_call *rxrpc_alloc_client_call( - struct rxrpc_sock *rx, - struct rxrpc_transport *trans, - struct rxrpc_conn_bundle *bundle, - gfp_t gfp) +static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx, + struct sockaddr_rxrpc *srx, + gfp_t gfp) { struct rxrpc_call *call; - int ret; _enter(""); - ASSERT(rx != NULL); - ASSERT(trans != NULL); - ASSERT(bundle != NULL); + ASSERT(rx->local != NULL); call = rxrpc_alloc_call(gfp); if (!call) return ERR_PTR(-ENOMEM); + call->state = RXRPC_CALL_CLIENT_AWAIT_CONN; sock_hold(&rx->sk); call->socket = rx; call->rx_data_post = 1; - ret = rxrpc_connect_call(rx, trans, bundle, call, gfp); - if (ret < 0) { - kmem_cache_free(rxrpc_call_jar, call); - return ERR_PTR(ret); - } - /* Record copies of information for hashtable lookup */ - call->proto = rx->proto; - call->local = trans->local; - switch (call->proto) { + call->family = rx->family; + call->local = rx->local; + switch (call->family) { case AF_INET: - call->peer_ip.ipv4_addr = - trans->peer->srx.transport.sin.sin_addr.s_addr; + call->peer_ip.ipv4_addr = srx->transport.sin.sin_addr.s_addr; break; case AF_INET6: memcpy(call->peer_ip.ipv6_addr, - trans->peer->srx.transport.sin6.sin6_addr.in6_u.u6_addr8, + srx->transport.sin6.sin6_addr.in6_u.u6_addr8, sizeof(call->peer_ip.ipv6_addr)); break; } - call->epoch = call->conn->epoch; - call->service_id = call->conn->service_id; - call->in_clientflag = call->conn->in_clientflag; + + call->service_id = srx->srx_service; + call->in_clientflag = 0; + + _leave(" = %p", call); + return call; +} + +/* + * Begin client call. + */ +static int rxrpc_begin_client_call(struct rxrpc_call *call, + struct rxrpc_conn_parameters *cp, + struct sockaddr_rxrpc *srx, + gfp_t gfp) +{ + int ret; + + /* Set up or get a connection record and set the protocol parameters, + * including channel number and call ID. + */ + ret = rxrpc_connect_call(call, cp, srx, gfp); + if (ret < 0) + return ret; + + call->state = RXRPC_CALL_CLIENT_SEND_REQUEST; + /* Add the new call to the hashtable */ rxrpc_call_hash_add(call); - spin_lock(&call->conn->trans->peer->lock); - hlist_add_head(&call->error_link, &call->conn->trans->peer->error_targets); - spin_unlock(&call->conn->trans->peer->lock); + spin_lock(&call->conn->params.peer->lock); + hlist_add_head(&call->error_link, &call->conn->params.peer->error_targets); + spin_unlock(&call->conn->params.peer->lock); call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime; add_timer(&call->lifetimer); - - _leave(" = %p", call); - return call; + return 0; } /* @@ -349,24 +362,24 @@ static struct rxrpc_call *rxrpc_alloc_client_call( * - called in process context with IRQs enabled */ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, - struct rxrpc_transport *trans, - struct rxrpc_conn_bundle *bundle, + struct rxrpc_conn_parameters *cp, + struct sockaddr_rxrpc *srx, unsigned long user_call_ID, gfp_t gfp) { struct rxrpc_call *call, *xcall; struct rb_node *parent, **pp; + int ret; - _enter("%p,%d,%d,%lx", - rx, trans->debug_id, bundle ? bundle->debug_id : -1, - user_call_ID); + _enter("%p,%lx", rx, user_call_ID); - call = rxrpc_alloc_client_call(rx, trans, bundle, gfp); + call = rxrpc_alloc_client_call(rx, srx, gfp); if (IS_ERR(call)) { _leave(" = %ld", PTR_ERR(call)); return call; } + /* Publish the call, even though it is incompletely set up as yet */ call->user_call_ID = user_call_ID; __set_bit(RXRPC_CALL_HAS_USERID, &call->flags); @@ -396,11 +409,29 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, list_add_tail(&call->link, &rxrpc_calls); write_unlock_bh(&rxrpc_call_lock); + ret = rxrpc_begin_client_call(call, cp, srx, gfp); + if (ret < 0) + goto error; + _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id); _leave(" = %p [new]", call); return call; +error: + write_lock(&rx->call_lock); + rb_erase(&call->sock_node, &rx->calls); + write_unlock(&rx->call_lock); + rxrpc_put_call(call); + + write_lock_bh(&rxrpc_call_lock); + list_del(&call->link); + write_unlock_bh(&rxrpc_call_lock); + + rxrpc_put_call(call); + _leave(" = %d", ret); + return ERR_PTR(ret); + /* We unexpectedly found the user ID in the list after taking * the call_lock. This shouldn't happen unless the user races * with itself and tries to add the same user ID twice at the @@ -419,8 +450,9 @@ found_user_ID_now_present: */ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx, struct rxrpc_connection *conn, - struct rxrpc_host_header *hdr) + struct sk_buff *skb) { + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); struct rxrpc_call *call, *candidate; struct rb_node **p, *parent; u32 call_id; @@ -433,13 +465,13 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx, if (!candidate) return ERR_PTR(-EBUSY); - candidate->socket = rx; - candidate->conn = conn; - candidate->cid = hdr->cid; - candidate->call_id = hdr->callNumber; - candidate->channel = hdr->cid & RXRPC_CHANNELMASK; - candidate->rx_data_post = 0; - candidate->state = RXRPC_CALL_SERVER_ACCEPTING; + candidate->socket = rx; + candidate->conn = conn; + candidate->cid = sp->hdr.cid; + candidate->call_id = sp->hdr.callNumber; + candidate->channel = sp->hdr.cid & RXRPC_CHANNELMASK; + candidate->rx_data_post = 0; + candidate->state = RXRPC_CALL_SERVER_ACCEPTING; if (conn->security_ix > 0) candidate->state = RXRPC_CALL_SERVER_SECURING; @@ -448,7 +480,7 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx, /* set the channel for this call */ call = conn->channels[candidate->channel]; _debug("channel[%u] is %p", candidate->channel, call); - if (call && call->call_id == hdr->callNumber) { + if (call && call->call_id == sp->hdr.callNumber) { /* already set; must've been a duplicate packet */ _debug("extant call [%d]", call->state); ASSERTCMP(call->conn, ==, conn); @@ -486,7 +518,7 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx, /* check the call number isn't duplicate */ _debug("check dup"); - call_id = hdr->callNumber; + call_id = sp->hdr.callNumber; p = &conn->calls.rb_node; parent = NULL; while (*p) { @@ -512,36 +544,36 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx, rb_insert_color(&call->conn_node, &conn->calls); conn->channels[call->channel] = call; sock_hold(&rx->sk); - atomic_inc(&conn->usage); + rxrpc_get_connection(conn); write_unlock_bh(&conn->lock); - spin_lock(&conn->trans->peer->lock); - hlist_add_head(&call->error_link, &conn->trans->peer->error_targets); - spin_unlock(&conn->trans->peer->lock); + spin_lock(&conn->params.peer->lock); + hlist_add_head(&call->error_link, &conn->params.peer->error_targets); + spin_unlock(&conn->params.peer->lock); write_lock_bh(&rxrpc_call_lock); list_add_tail(&call->link, &rxrpc_calls); write_unlock_bh(&rxrpc_call_lock); /* Record copies of information for hashtable lookup */ - call->proto = rx->proto; - call->local = conn->trans->local; - switch (call->proto) { + call->family = rx->family; + call->local = conn->params.local; + switch (call->family) { case AF_INET: call->peer_ip.ipv4_addr = - conn->trans->peer->srx.transport.sin.sin_addr.s_addr; + conn->params.peer->srx.transport.sin.sin_addr.s_addr; break; case AF_INET6: memcpy(call->peer_ip.ipv6_addr, - conn->trans->peer->srx.transport.sin6.sin6_addr.in6_u.u6_addr8, + conn->params.peer->srx.transport.sin6.sin6_addr.in6_u.u6_addr8, sizeof(call->peer_ip.ipv6_addr)); break; default: break; } - call->epoch = conn->epoch; - call->service_id = conn->service_id; - call->in_clientflag = conn->in_clientflag; + call->epoch = conn->proto.epoch; + call->service_id = conn->params.service_id; + call->in_clientflag = conn->proto.in_clientflag; /* Add the new call to the hashtable */ rxrpc_call_hash_add(call); @@ -609,40 +641,13 @@ void rxrpc_release_call(struct rxrpc_call *call) write_unlock_bh(&rx->call_lock); /* free up the channel for reuse */ - spin_lock(&conn->trans->client_lock); + spin_lock(&conn->channel_lock); write_lock_bh(&conn->lock); write_lock(&call->state_lock); - if (conn->channels[call->channel] == call) - conn->channels[call->channel] = NULL; - - if (conn->out_clientflag && conn->bundle) { - conn->avail_calls++; - switch (conn->avail_calls) { - case 1: - list_move_tail(&conn->bundle_link, - &conn->bundle->avail_conns); - case 2 ... RXRPC_MAXCALLS - 1: - ASSERT(conn->channels[0] == NULL || - conn->channels[1] == NULL || - conn->channels[2] == NULL || - conn->channels[3] == NULL); - break; - case RXRPC_MAXCALLS: - list_move_tail(&conn->bundle_link, - &conn->bundle->unused_conns); - ASSERT(conn->channels[0] == NULL && - conn->channels[1] == NULL && - conn->channels[2] == NULL && - conn->channels[3] == NULL); - break; - default: - pr_err("conn->avail_calls=%d\n", conn->avail_calls); - BUG(); - } - } + rxrpc_disconnect_call(call); - spin_unlock(&conn->trans->client_lock); + spin_unlock(&conn->channel_lock); if (call->state < RXRPC_CALL_COMPLETE && call->state != RXRPC_CALL_CLIENT_FINAL_ACK) { @@ -811,9 +816,9 @@ static void rxrpc_cleanup_call(struct rxrpc_call *call) } if (call->conn) { - spin_lock(&call->conn->trans->peer->lock); + spin_lock(&call->conn->params.peer->lock); hlist_del_init(&call->error_link); - spin_unlock(&call->conn->trans->peer->lock); + spin_unlock(&call->conn->params.peer->lock); write_lock_bh(&call->conn->lock); rb_erase(&call->conn_node, &call->conn->calls); diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c new file mode 100644 index 000000000000..82488d6adb83 --- /dev/null +++ b/net/rxrpc/conn_client.c @@ -0,0 +1,94 @@ +/* Client connection-specific management code. + * + * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/slab.h> +#include <linux/idr.h> +#include <linux/timer.h> +#include "ar-internal.h" + +/* + * We use machine-unique IDs for our client connections. + */ +DEFINE_IDR(rxrpc_client_conn_ids); +static DEFINE_SPINLOCK(rxrpc_conn_id_lock); + +/* + * Get a connection ID and epoch for a client connection from the global pool. + * The connection struct pointer is then recorded in the idr radix tree. The + * epoch is changed if this wraps. + * + * TODO: The IDR tree gets very expensive on memory if the connection IDs are + * widely scattered throughout the number space, so we shall need to retire + * connections that have, say, an ID more than four times the maximum number of + * client conns away from the current allocation point to try and keep the IDs + * concentrated. We will also need to retire connections from an old epoch. + */ +int rxrpc_get_client_connection_id(struct rxrpc_connection *conn, gfp_t gfp) +{ + u32 epoch; + int id; + + _enter(""); + + idr_preload(gfp); + spin_lock(&rxrpc_conn_id_lock); + + epoch = rxrpc_epoch; + + /* We could use idr_alloc_cyclic() here, but we really need to know + * when the thing wraps so that we can advance the epoch. + */ + if (rxrpc_client_conn_ids.cur == 0) + rxrpc_client_conn_ids.cur = 1; + id = idr_alloc(&rxrpc_client_conn_ids, conn, + rxrpc_client_conn_ids.cur, 0x40000000, GFP_NOWAIT); + if (id < 0) { + if (id != -ENOSPC) + goto error; + id = idr_alloc(&rxrpc_client_conn_ids, conn, + 1, 0x40000000, GFP_NOWAIT); + if (id < 0) + goto error; + epoch++; + rxrpc_epoch = epoch; + } + rxrpc_client_conn_ids.cur = id + 1; + + spin_unlock(&rxrpc_conn_id_lock); + idr_preload_end(); + + conn->proto.epoch = epoch; + conn->proto.cid = id << RXRPC_CIDSHIFT; + set_bit(RXRPC_CONN_HAS_IDR, &conn->flags); + _leave(" [CID %x:%x]", epoch, conn->proto.cid); + return 0; + +error: + spin_unlock(&rxrpc_conn_id_lock); + idr_preload_end(); + _leave(" = %d", id); + return id; +} + +/* + * Release a connection ID for a client connection from the global pool. + */ +void rxrpc_put_client_connection_id(struct rxrpc_connection *conn) +{ + if (test_bit(RXRPC_CONN_HAS_IDR, &conn->flags)) { + spin_lock(&rxrpc_conn_id_lock); + idr_remove(&rxrpc_client_conn_ids, + conn->proto.cid >> RXRPC_CIDSHIFT); + spin_unlock(&rxrpc_conn_id_lock); + } +} diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index 00c92b614485..bf6971555eac 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c @@ -88,14 +88,14 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn, rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, abort_code); - msg.msg_name = &conn->trans->peer->srx.transport; - msg.msg_namelen = conn->trans->peer->srx.transport_len; + msg.msg_name = &conn->params.peer->srx.transport; + msg.msg_namelen = conn->params.peer->srx.transport_len; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; - whdr.epoch = htonl(conn->epoch); - whdr.cid = htonl(conn->cid); + whdr.epoch = htonl(conn->proto.epoch); + whdr.cid = htonl(conn->proto.cid); whdr.callNumber = 0; whdr.seq = 0; whdr.type = RXRPC_PACKET_TYPE_ABORT; @@ -103,7 +103,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn, whdr.userStatus = 0; whdr.securityIndex = conn->security_ix; whdr._rsvd = 0; - whdr.serviceId = htons(conn->service_id); + whdr.serviceId = htons(conn->params.service_id); word = htonl(conn->local_abort); @@ -118,7 +118,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn, whdr.serial = htonl(serial); _proto("Tx CONN ABORT %%%u { %d }", serial, conn->local_abort); - ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len); + ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); if (ret < 0) { _debug("sendmsg failed: %d", ret); return -EAGAIN; @@ -220,7 +220,7 @@ static void rxrpc_secure_connection(struct rxrpc_connection *conn) ASSERT(conn->security_ix != 0); - if (!conn->key) { + if (!conn->params.key) { _debug("set up security"); ret = rxrpc_init_server_conn_security(conn); switch (ret) { @@ -263,7 +263,7 @@ void rxrpc_process_connection(struct work_struct *work) _enter("{%d}", conn->debug_id); - atomic_inc(&conn->usage); + rxrpc_get_connection(conn); if (test_and_clear_bit(RXRPC_CONN_CHALLENGE, &conn->events)) { rxrpc_secure_connection(conn); diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c index 8ecde4b77b55..4bfad7cf96cb 100644 --- a/net/rxrpc/conn_object.c +++ b/net/rxrpc/conn_object.c @@ -32,152 +32,6 @@ DEFINE_RWLOCK(rxrpc_connection_lock); static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper); /* - * allocate a new client connection bundle - */ -static struct rxrpc_conn_bundle *rxrpc_alloc_bundle(gfp_t gfp) -{ - struct rxrpc_conn_bundle *bundle; - - _enter(""); - - bundle = kzalloc(sizeof(struct rxrpc_conn_bundle), gfp); - if (bundle) { - INIT_LIST_HEAD(&bundle->unused_conns); - INIT_LIST_HEAD(&bundle->avail_conns); - INIT_LIST_HEAD(&bundle->busy_conns); - init_waitqueue_head(&bundle->chanwait); - atomic_set(&bundle->usage, 1); - } - - _leave(" = %p", bundle); - return bundle; -} - -/* - * compare bundle parameters with what we're looking for - * - return -ve, 0 or +ve - */ -static inline -int rxrpc_cmp_bundle(const struct rxrpc_conn_bundle *bundle, - struct key *key, u16 service_id) -{ - return (bundle->service_id - service_id) ?: - ((unsigned long)bundle->key - (unsigned long)key); -} - -/* - * get bundle of client connections that a client socket can make use of - */ -struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *rx, - struct rxrpc_transport *trans, - struct key *key, - u16 service_id, - gfp_t gfp) -{ - struct rxrpc_conn_bundle *bundle, *candidate; - struct rb_node *p, *parent, **pp; - - _enter("%p{%x},%x,%hx,", - rx, key_serial(key), trans->debug_id, service_id); - - /* search the extant bundles first for one that matches the specified - * user ID */ - spin_lock(&trans->client_lock); - - p = trans->bundles.rb_node; - while (p) { - bundle = rb_entry(p, struct rxrpc_conn_bundle, node); - - if (rxrpc_cmp_bundle(bundle, key, service_id) < 0) - p = p->rb_left; - else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0) - p = p->rb_right; - else - goto found_extant_bundle; - } - - spin_unlock(&trans->client_lock); - - /* not yet present - create a candidate for a new record and then - * redo the search */ - candidate = rxrpc_alloc_bundle(gfp); - if (!candidate) { - _leave(" = -ENOMEM"); - return ERR_PTR(-ENOMEM); - } - - candidate->key = key_get(key); - candidate->service_id = service_id; - - spin_lock(&trans->client_lock); - - pp = &trans->bundles.rb_node; - parent = NULL; - while (*pp) { - parent = *pp; - bundle = rb_entry(parent, struct rxrpc_conn_bundle, node); - - if (rxrpc_cmp_bundle(bundle, key, service_id) < 0) - pp = &(*pp)->rb_left; - else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0) - pp = &(*pp)->rb_right; - else - goto found_extant_second; - } - - /* second search also failed; add the new bundle */ - bundle = candidate; - candidate = NULL; - - rb_link_node(&bundle->node, parent, pp); - rb_insert_color(&bundle->node, &trans->bundles); - spin_unlock(&trans->client_lock); - _net("BUNDLE new on trans %d", trans->debug_id); - _leave(" = %p [new]", bundle); - return bundle; - - /* we found the bundle in the list immediately */ -found_extant_bundle: - atomic_inc(&bundle->usage); - spin_unlock(&trans->client_lock); - _net("BUNDLE old on trans %d", trans->debug_id); - _leave(" = %p [extant %d]", bundle, atomic_read(&bundle->usage)); - return bundle; - - /* we found the bundle on the second time through the list */ -found_extant_second: - atomic_inc(&bundle->usage); - spin_unlock(&trans->client_lock); - kfree(candidate); - _net("BUNDLE old2 on trans %d", trans->debug_id); - _leave(" = %p [second %d]", bundle, atomic_read(&bundle->usage)); - return bundle; -} - -/* - * release a bundle - */ -void rxrpc_put_bundle(struct rxrpc_transport *trans, - struct rxrpc_conn_bundle *bundle) -{ - _enter("%p,%p{%d}",trans, bundle, atomic_read(&bundle->usage)); - - if (atomic_dec_and_lock(&bundle->usage, &trans->client_lock)) { - _debug("Destroy bundle"); - rb_erase(&bundle->node, &trans->bundles); - spin_unlock(&trans->client_lock); - ASSERT(list_empty(&bundle->unused_conns)); - ASSERT(list_empty(&bundle->avail_conns)); - ASSERT(list_empty(&bundle->busy_conns)); - ASSERTCMP(bundle->num_conns, ==, 0); - key_put(bundle->key); - kfree(bundle); - } - - _leave(""); -} - -/* * allocate a new connection */ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp) @@ -188,8 +42,10 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp) conn = kzalloc(sizeof(struct rxrpc_connection), gfp); if (conn) { + spin_lock_init(&conn->channel_lock); + init_waitqueue_head(&conn->channel_wq); INIT_WORK(&conn->processor, &rxrpc_process_connection); - INIT_LIST_HEAD(&conn->bundle_link); + INIT_LIST_HEAD(&conn->link); conn->calls = RB_ROOT; skb_queue_head_init(&conn->rx_queue); conn->security = &rxrpc_no_security; @@ -197,7 +53,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp) spin_lock_init(&conn->state_lock); atomic_set(&conn->usage, 1); conn->debug_id = atomic_inc_return(&rxrpc_debug_id); - conn->avail_calls = RXRPC_MAXCALLS; + atomic_set(&conn->avail_chans, RXRPC_MAXCALLS); conn->size_align = 4; conn->header_size = sizeof(struct rxrpc_wire_header); } @@ -207,81 +63,6 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp) } /* - * assign a connection ID to a connection and add it to the transport's - * connection lookup tree - * - called with transport client lock held - */ -static void rxrpc_assign_connection_id(struct rxrpc_connection *conn) -{ - struct rxrpc_connection *xconn; - struct rb_node *parent, **p; - __be32 epoch; - u32 cid; - - _enter(""); - - epoch = conn->epoch; - - write_lock_bh(&conn->trans->conn_lock); - - conn->trans->conn_idcounter += RXRPC_CID_INC; - if (conn->trans->conn_idcounter < RXRPC_CID_INC) - conn->trans->conn_idcounter = RXRPC_CID_INC; - cid = conn->trans->conn_idcounter; - -attempt_insertion: - parent = NULL; - p = &conn->trans->client_conns.rb_node; - - while (*p) { - parent = *p; - xconn = rb_entry(parent, struct rxrpc_connection, node); - - if (epoch < xconn->epoch) - p = &(*p)->rb_left; - else if (epoch > xconn->epoch) - p = &(*p)->rb_right; - else if (cid < xconn->cid) - p = &(*p)->rb_left; - else if (cid > xconn->cid) - p = &(*p)->rb_right; - else - goto id_exists; - } - - /* we've found a suitable hole - arrange for this connection to occupy - * it */ - rb_link_node(&conn->node, parent, p); - rb_insert_color(&conn->node, &conn->trans->client_conns); - - conn->cid = cid; - write_unlock_bh(&conn->trans->conn_lock); - _leave(" [CID %x]", cid); - return; - - /* we found a connection with the proposed ID - walk the tree from that - * point looking for the next unused ID */ -id_exists: - for (;;) { - cid += RXRPC_CID_INC; - if (cid < RXRPC_CID_INC) { - cid = RXRPC_CID_INC; - conn->trans->conn_idcounter = cid; - goto attempt_insertion; - } - - parent = rb_next(parent); - if (!parent) - goto attempt_insertion; - - xconn = rb_entry(parent, struct rxrpc_connection, node); - if (epoch < xconn->epoch || - cid < xconn->cid) - goto attempt_insertion; - } -} - -/* * add a call to a connection's call-by-ID tree */ static void rxrpc_add_call_ID_to_conn(struct rxrpc_connection *conn, @@ -315,286 +96,242 @@ static void rxrpc_add_call_ID_to_conn(struct rxrpc_connection *conn, } /* - * connect a call on an exclusive connection + * Allocate a client connection. The caller must take care to clear any + * padding bytes in *cp. */ -static int rxrpc_connect_exclusive(struct rxrpc_sock *rx, - struct rxrpc_transport *trans, - u16 service_id, - struct rxrpc_call *call, - gfp_t gfp) +static struct rxrpc_connection * +rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp) { struct rxrpc_connection *conn; - int chan, ret; + int ret; _enter(""); - conn = rx->conn; + conn = rxrpc_alloc_connection(gfp); if (!conn) { - /* not yet present - create a candidate for a new connection - * and then redo the check */ - conn = rxrpc_alloc_connection(gfp); - if (!conn) { - _leave(" = -ENOMEM"); - return -ENOMEM; - } - - conn->trans = trans; - conn->bundle = NULL; - conn->service_id = service_id; - conn->epoch = rxrpc_epoch; - conn->in_clientflag = 0; - conn->out_clientflag = RXRPC_CLIENT_INITIATED; - conn->cid = 0; - conn->state = RXRPC_CONN_CLIENT; - conn->avail_calls = RXRPC_MAXCALLS - 1; - conn->security_level = rx->min_sec_level; - conn->key = key_get(rx->key); - - ret = rxrpc_init_client_conn_security(conn); - if (ret < 0) { - key_put(conn->key); - kfree(conn); - _leave(" = %d [key]", ret); - return ret; - } - - write_lock_bh(&rxrpc_connection_lock); - list_add_tail(&conn->link, &rxrpc_connections); - write_unlock_bh(&rxrpc_connection_lock); - - spin_lock(&trans->client_lock); - atomic_inc(&trans->usage); - - _net("CONNECT EXCL new %d on TRANS %d", - conn->debug_id, conn->trans->debug_id); + _leave(" = -ENOMEM"); + return ERR_PTR(-ENOMEM); + } - rxrpc_assign_connection_id(conn); - rx->conn = conn; - } else { - spin_lock(&trans->client_lock); + conn->params = *cp; + conn->proto.local = cp->local; + conn->proto.epoch = rxrpc_epoch; + conn->proto.cid = 0; + conn->proto.in_clientflag = 0; + conn->proto.family = cp->peer->srx.transport.family; + conn->out_clientflag = RXRPC_CLIENT_INITIATED; + conn->state = RXRPC_CONN_CLIENT; + + switch (conn->proto.family) { + case AF_INET: + conn->proto.addr_size = sizeof(conn->proto.ipv4_addr); + conn->proto.ipv4_addr = cp->peer->srx.transport.sin.sin_addr; + conn->proto.port = cp->peer->srx.transport.sin.sin_port; + break; } - /* we've got a connection with a free channel and we can now attach the - * call to it - * - we're holding the transport's client lock - * - we're holding a reference on the connection - */ - for (chan = 0; chan < RXRPC_MAXCALLS; chan++) - if (!conn->channels[chan]) - goto found_channel; - goto no_free_channels; + ret = rxrpc_get_client_connection_id(conn, gfp); + if (ret < 0) + goto error_0; -found_channel: - atomic_inc(&conn->usage); - conn->channels[chan] = call; - call->conn = conn; - call->channel = chan; - call->cid = conn->cid | chan; - call->call_id = ++conn->call_counter; + ret = rxrpc_init_client_conn_security(conn); + if (ret < 0) + goto error_1; - _net("CONNECT client on conn %d chan %d as call %x", - conn->debug_id, chan, call->call_id); + conn->security->prime_packet_security(conn); - spin_unlock(&trans->client_lock); + write_lock(&rxrpc_connection_lock); + list_add_tail(&conn->link, &rxrpc_connections); + write_unlock(&rxrpc_connection_lock); - rxrpc_add_call_ID_to_conn(conn, call); - _leave(" = 0"); - return 0; + /* We steal the caller's peer ref. */ + cp->peer = NULL; + rxrpc_get_local(conn->params.local); + key_get(conn->params.key); + + _leave(" = %p", conn); + return conn; -no_free_channels: - spin_unlock(&trans->client_lock); - _leave(" = -ENOSR"); - return -ENOSR; +error_1: + rxrpc_put_client_connection_id(conn); +error_0: + kfree(conn); + _leave(" = %d", ret); + return ERR_PTR(ret); } /* * find a connection for a call * - called in process context with IRQs enabled */ -int rxrpc_connect_call(struct rxrpc_sock *rx, - struct rxrpc_transport *trans, - struct rxrpc_conn_bundle *bundle, - struct rxrpc_call *call, +int rxrpc_connect_call(struct rxrpc_call *call, + struct rxrpc_conn_parameters *cp, + struct sockaddr_rxrpc *srx, gfp_t gfp) { - struct rxrpc_connection *conn, *candidate; - int chan, ret; + struct rxrpc_connection *conn, *candidate = NULL; + struct rxrpc_local *local = cp->local; + struct rb_node *p, **pp, *parent; + long diff; + int chan; DECLARE_WAITQUEUE(myself, current); - _enter("%p,%lx,", rx, call->user_call_ID); - - if (test_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags)) - return rxrpc_connect_exclusive(rx, trans, bundle->service_id, - call, gfp); - - spin_lock(&trans->client_lock); - for (;;) { - /* see if the bundle has a call slot available */ - if (!list_empty(&bundle->avail_conns)) { - _debug("avail"); - conn = list_entry(bundle->avail_conns.next, - struct rxrpc_connection, - bundle_link); - if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) { - list_del_init(&conn->bundle_link); - bundle->num_conns--; - continue; - } - if (--conn->avail_calls == 0) - list_move(&conn->bundle_link, - &bundle->busy_conns); - ASSERTCMP(conn->avail_calls, <, RXRPC_MAXCALLS); - ASSERT(conn->channels[0] == NULL || - conn->channels[1] == NULL || - conn->channels[2] == NULL || - conn->channels[3] == NULL); - atomic_inc(&conn->usage); - break; - } - - if (!list_empty(&bundle->unused_conns)) { - _debug("unused"); - conn = list_entry(bundle->unused_conns.next, - struct rxrpc_connection, - bundle_link); - if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) { - list_del_init(&conn->bundle_link); - bundle->num_conns--; - continue; - } - ASSERTCMP(conn->avail_calls, ==, RXRPC_MAXCALLS); - conn->avail_calls = RXRPC_MAXCALLS - 1; - ASSERT(conn->channels[0] == NULL && - conn->channels[1] == NULL && - conn->channels[2] == NULL && - conn->channels[3] == NULL); - atomic_inc(&conn->usage); - list_move(&conn->bundle_link, &bundle->avail_conns); - break; + _enter("{%d,%lx},", call->debug_id, call->user_call_ID); + + cp->peer = rxrpc_lookup_peer(cp->local, srx, gfp); + if (!cp->peer) + return -ENOMEM; + + if (!cp->exclusive) { + /* Search for a existing client connection unless this is going + * to be a connection that's used exclusively for a single call. + */ + _debug("search 1"); + spin_lock(&local->client_conns_lock); + p = local->client_conns.rb_node; + while (p) { + conn = rb_entry(p, struct rxrpc_connection, client_node); + +#define cmp(X) ((long)conn->params.X - (long)cp->X) + diff = (cmp(peer) ?: + cmp(key) ?: + cmp(security_level)); + if (diff < 0) + p = p->rb_left; + else if (diff > 0) + p = p->rb_right; + else + goto found_extant_conn; } + spin_unlock(&local->client_conns_lock); + } - /* need to allocate a new connection */ - _debug("get new conn [%d]", bundle->num_conns); - - spin_unlock(&trans->client_lock); + /* We didn't find a connection or we want an exclusive one. */ + _debug("get new conn"); + candidate = rxrpc_alloc_client_connection(cp, gfp); + if (!candidate) { + _leave(" = -ENOMEM"); + return -ENOMEM; + } - if (signal_pending(current)) - goto interrupted; + if (cp->exclusive) { + /* Assign the call on an exclusive connection to channel 0 and + * don't add the connection to the endpoint's shareable conn + * lookup tree. + */ + _debug("exclusive chan 0"); + conn = candidate; + atomic_set(&conn->avail_chans, RXRPC_MAXCALLS - 1); + spin_lock(&conn->channel_lock); + chan = 0; + goto found_channel; + } - if (bundle->num_conns >= 20) { - _debug("too many conns"); + /* We need to redo the search before attempting to add a new connection + * lest we race with someone else adding a conflicting instance. + */ + _debug("search 2"); + spin_lock(&local->client_conns_lock); - if (!gfpflags_allow_blocking(gfp)) { - _leave(" = -EAGAIN"); - return -EAGAIN; - } + pp = &local->client_conns.rb_node; + parent = NULL; + while (*pp) { + parent = *pp; + conn = rb_entry(parent, struct rxrpc_connection, client_node); - add_wait_queue(&bundle->chanwait, &myself); - for (;;) { - set_current_state(TASK_INTERRUPTIBLE); - if (bundle->num_conns < 20 || - !list_empty(&bundle->unused_conns) || - !list_empty(&bundle->avail_conns)) - break; - if (signal_pending(current)) - goto interrupted_dequeue; - schedule(); - } - remove_wait_queue(&bundle->chanwait, &myself); - __set_current_state(TASK_RUNNING); - spin_lock(&trans->client_lock); - continue; - } + diff = (cmp(peer) ?: + cmp(key) ?: + cmp(security_level)); + if (diff < 0) + pp = &(*pp)->rb_left; + else if (diff > 0) + pp = &(*pp)->rb_right; + else + goto found_extant_conn; + } - /* not yet present - create a candidate for a new connection and then - * redo the check */ - candidate = rxrpc_alloc_connection(gfp); - if (!candidate) { - _leave(" = -ENOMEM"); - return -ENOMEM; - } + /* The second search also failed; simply add the new connection with + * the new call in channel 0. Note that we need to take the channel + * lock before dropping the client conn lock. + */ + _debug("new conn"); + conn = candidate; + candidate = NULL; - candidate->trans = trans; - candidate->bundle = bundle; - candidate->service_id = bundle->service_id; - candidate->epoch = rxrpc_epoch; - candidate->in_clientflag = 0; - candidate->out_clientflag = RXRPC_CLIENT_INITIATED; - candidate->cid = 0; - candidate->state = RXRPC_CONN_CLIENT; - candidate->avail_calls = RXRPC_MAXCALLS; - candidate->security_level = rx->min_sec_level; - candidate->key = key_get(bundle->key); - - ret = rxrpc_init_client_conn_security(candidate); - if (ret < 0) { - key_put(candidate->key); - kfree(candidate); - _leave(" = %d [key]", ret); - return ret; - } + rb_link_node(&conn->client_node, parent, pp); + rb_insert_color(&conn->client_node, &local->client_conns); - write_lock_bh(&rxrpc_connection_lock); - list_add_tail(&candidate->link, &rxrpc_connections); - write_unlock_bh(&rxrpc_connection_lock); + atomic_set(&conn->avail_chans, RXRPC_MAXCALLS - 1); + spin_lock(&conn->channel_lock); + spin_unlock(&local->client_conns_lock); + chan = 0; - spin_lock(&trans->client_lock); +found_channel: + _debug("found chan"); + call->conn = conn; + call->channel = chan; + call->epoch = conn->proto.epoch; + call->cid = conn->proto.cid | chan; + call->call_id = ++conn->call_counter; + rcu_assign_pointer(conn->channels[chan], call); - list_add(&candidate->bundle_link, &bundle->unused_conns); - bundle->num_conns++; - atomic_inc(&bundle->usage); - atomic_inc(&trans->usage); + _net("CONNECT call %d on conn %d", call->debug_id, conn->debug_id); - _net("CONNECT new %d on TRANS %d", - candidate->debug_id, candidate->trans->debug_id); + rxrpc_add_call_ID_to_conn(conn, call); + spin_unlock(&conn->channel_lock); + rxrpc_put_peer(cp->peer); + cp->peer = NULL; + _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage)); + return 0; - rxrpc_assign_connection_id(candidate); - candidate->security->prime_packet_security(candidate); + /* We found a suitable connection already in existence. Discard any + * candidate we may have allocated, and try to get a channel on this + * one. + */ +found_extant_conn: + _debug("found conn"); + rxrpc_get_connection(conn); + spin_unlock(&local->client_conns_lock); + + rxrpc_put_connection(candidate); + + if (!atomic_add_unless(&conn->avail_chans, -1, 0)) { + if (!gfpflags_allow_blocking(gfp)) { + rxrpc_put_connection(conn); + _leave(" = -EAGAIN"); + return -EAGAIN; + } - /* leave the candidate lurking in zombie mode attached to the - * bundle until we're ready for it */ - rxrpc_put_connection(candidate); - candidate = NULL; + add_wait_queue(&conn->channel_wq, &myself); + for (;;) { + set_current_state(TASK_INTERRUPTIBLE); + if (atomic_add_unless(&conn->avail_chans, -1, 0)) + break; + if (signal_pending(current)) + goto interrupted; + schedule(); + } + remove_wait_queue(&conn->channel_wq, &myself); + __set_current_state(TASK_RUNNING); } - /* we've got a connection with a free channel and we can now attach the - * call to it - * - we're holding the transport's client lock - * - we're holding a reference on the connection - * - we're holding a reference on the bundle + /* The connection allegedly now has a free channel and we can now + * attach the call to it. */ + spin_lock(&conn->channel_lock); + for (chan = 0; chan < RXRPC_MAXCALLS; chan++) if (!conn->channels[chan]) goto found_channel; - ASSERT(conn->channels[0] == NULL || - conn->channels[1] == NULL || - conn->channels[2] == NULL || - conn->channels[3] == NULL); BUG(); -found_channel: - conn->channels[chan] = call; - call->conn = conn; - call->channel = chan; - call->cid = conn->cid | chan; - call->call_id = ++conn->call_counter; - - _net("CONNECT client on conn %d chan %d as call %x", - conn->debug_id, chan, call->call_id); - - ASSERTCMP(conn->avail_calls, <, RXRPC_MAXCALLS); - spin_unlock(&trans->client_lock); - - rxrpc_add_call_ID_to_conn(conn, call); - - _leave(" = 0"); - return 0; - -interrupted_dequeue: - remove_wait_queue(&bundle->chanwait, &myself); - __set_current_state(TASK_RUNNING); interrupted: + remove_wait_queue(&conn->channel_wq, &myself); + __set_current_state(TASK_RUNNING); + rxrpc_put_connection(conn); + rxrpc_put_peer(cp->peer); + cp->peer = NULL; _leave(" = -ERESTARTSYS"); return -ERESTARTSYS; } @@ -602,11 +339,12 @@ interrupted: /* * get a record of an incoming connection */ -struct rxrpc_connection * -rxrpc_incoming_connection(struct rxrpc_transport *trans, - struct rxrpc_host_header *hdr) +struct rxrpc_connection *rxrpc_incoming_connection(struct rxrpc_local *local, + struct rxrpc_peer *peer, + struct sk_buff *skb) { struct rxrpc_connection *conn, *candidate = NULL; + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); struct rb_node *p, **pp; const char *new = "old"; __be32 epoch; @@ -614,32 +352,32 @@ rxrpc_incoming_connection(struct rxrpc_transport *trans, _enter(""); - ASSERT(hdr->flags & RXRPC_CLIENT_INITIATED); + ASSERT(sp->hdr.flags & RXRPC_CLIENT_INITIATED); - epoch = hdr->epoch; - cid = hdr->cid & RXRPC_CIDMASK; + epoch = sp->hdr.epoch; + cid = sp->hdr.cid & RXRPC_CIDMASK; /* search the connection list first */ - read_lock_bh(&trans->conn_lock); + read_lock_bh(&peer->conn_lock); - p = trans->server_conns.rb_node; + p = peer->service_conns.rb_node; while (p) { - conn = rb_entry(p, struct rxrpc_connection, node); + conn = rb_entry(p, struct rxrpc_connection, service_node); - _debug("maybe %x", conn->cid); + _debug("maybe %x", conn->proto.cid); - if (epoch < conn->epoch) + if (epoch < conn->proto.epoch) p = p->rb_left; - else if (epoch > conn->epoch) + else if (epoch > conn->proto.epoch) p = p->rb_right; - else if (cid < conn->cid) + else if (cid < conn->proto.cid) p = p->rb_left; - else if (cid > conn->cid) + else if (cid > conn->proto.cid) p = p->rb_right; else goto found_extant_connection; } - read_unlock_bh(&trans->conn_lock); + read_unlock_bh(&peer->conn_lock); /* not yet present - create a candidate for a new record and then * redo the search */ @@ -649,32 +387,34 @@ rxrpc_incoming_connection(struct rxrpc_transport *trans, return ERR_PTR(-ENOMEM); } - candidate->trans = trans; - candidate->epoch = hdr->epoch; - candidate->cid = hdr->cid & RXRPC_CIDMASK; - candidate->service_id = hdr->serviceId; - candidate->security_ix = hdr->securityIndex; - candidate->in_clientflag = RXRPC_CLIENT_INITIATED; - candidate->out_clientflag = 0; - candidate->state = RXRPC_CONN_SERVER; - if (candidate->service_id) - candidate->state = RXRPC_CONN_SERVER_UNSECURED; - - write_lock_bh(&trans->conn_lock); - - pp = &trans->server_conns.rb_node; + candidate->proto.local = local; + candidate->proto.epoch = sp->hdr.epoch; + candidate->proto.cid = sp->hdr.cid & RXRPC_CIDMASK; + candidate->proto.in_clientflag = RXRPC_CLIENT_INITIATED; + candidate->params.local = local; + candidate->params.peer = peer; + candidate->params.service_id = sp->hdr.serviceId; + candidate->security_ix = sp->hdr.securityIndex; + candidate->out_clientflag = 0; + candidate->state = RXRPC_CONN_SERVER; + if (candidate->params.service_id) + candidate->state = RXRPC_CONN_SERVER_UNSECURED; + + write_lock_bh(&peer->conn_lock); + + pp = &peer->service_conns.rb_node; p = NULL; while (*pp) { p = *pp; - conn = rb_entry(p, struct rxrpc_connection, node); + conn = rb_entry(p, struct rxrpc_connection, service_node); - if (epoch < conn->epoch) + if (epoch < conn->proto.epoch) pp = &(*pp)->rb_left; - else if (epoch > conn->epoch) + else if (epoch > conn->proto.epoch) pp = &(*pp)->rb_right; - else if (cid < conn->cid) + else if (cid < conn->proto.cid) pp = &(*pp)->rb_left; - else if (cid > conn->cid) + else if (cid > conn->proto.cid) pp = &(*pp)->rb_right; else goto found_extant_second; @@ -683,42 +423,43 @@ rxrpc_incoming_connection(struct rxrpc_transport *trans, /* we can now add the new candidate to the list */ conn = candidate; candidate = NULL; - rb_link_node(&conn->node, p, pp); - rb_insert_color(&conn->node, &trans->server_conns); - atomic_inc(&conn->trans->usage); + rb_link_node(&conn->service_node, p, pp); + rb_insert_color(&conn->service_node, &peer->service_conns); + rxrpc_get_peer(peer); + rxrpc_get_local(local); - write_unlock_bh(&trans->conn_lock); + write_unlock_bh(&peer->conn_lock); - write_lock_bh(&rxrpc_connection_lock); + write_lock(&rxrpc_connection_lock); list_add_tail(&conn->link, &rxrpc_connections); - write_unlock_bh(&rxrpc_connection_lock); + write_unlock(&rxrpc_connection_lock); new = "new"; success: - _net("CONNECTION %s %d {%x}", new, conn->debug_id, conn->cid); + _net("CONNECTION %s %d {%x}", new, conn->debug_id, conn->proto.cid); _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage)); return conn; /* we found the connection in the list immediately */ found_extant_connection: - if (hdr->securityIndex != conn->security_ix) { - read_unlock_bh(&trans->conn_lock); + if (sp->hdr.securityIndex != conn->security_ix) { + read_unlock_bh(&peer->conn_lock); goto security_mismatch; } - atomic_inc(&conn->usage); - read_unlock_bh(&trans->conn_lock); + rxrpc_get_connection(conn); + read_unlock_bh(&peer->conn_lock); goto success; /* we found the connection on the second time through the list */ found_extant_second: - if (hdr->securityIndex != conn->security_ix) { - write_unlock_bh(&trans->conn_lock); + if (sp->hdr.securityIndex != conn->security_ix) { + write_unlock_bh(&peer->conn_lock); goto security_mismatch; } - atomic_inc(&conn->usage); - write_unlock_bh(&trans->conn_lock); + rxrpc_get_connection(conn); + write_unlock_bh(&peer->conn_lock); kfree(candidate); goto success; @@ -732,58 +473,83 @@ security_mismatch: * find a connection based on transport and RxRPC connection ID for an incoming * packet */ -struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *trans, - struct rxrpc_host_header *hdr) +struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_local *local, + struct rxrpc_peer *peer, + struct sk_buff *skb) { struct rxrpc_connection *conn; + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); struct rb_node *p; u32 epoch, cid; - _enter(",{%x,%x}", hdr->cid, hdr->flags); - - read_lock_bh(&trans->conn_lock); + _enter(",{%x,%x}", sp->hdr.cid, sp->hdr.flags); - cid = hdr->cid & RXRPC_CIDMASK; - epoch = hdr->epoch; + read_lock_bh(&peer->conn_lock); - if (hdr->flags & RXRPC_CLIENT_INITIATED) - p = trans->server_conns.rb_node; - else - p = trans->client_conns.rb_node; + cid = sp->hdr.cid & RXRPC_CIDMASK; + epoch = sp->hdr.epoch; - while (p) { - conn = rb_entry(p, struct rxrpc_connection, node); + if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) { + p = peer->service_conns.rb_node; + while (p) { + conn = rb_entry(p, struct rxrpc_connection, service_node); - _debug("maybe %x", conn->cid); + _debug("maybe %x", conn->proto.cid); - if (epoch < conn->epoch) - p = p->rb_left; - else if (epoch > conn->epoch) - p = p->rb_right; - else if (cid < conn->cid) - p = p->rb_left; - else if (cid > conn->cid) - p = p->rb_right; - else + if (epoch < conn->proto.epoch) + p = p->rb_left; + else if (epoch > conn->proto.epoch) + p = p->rb_right; + else if (cid < conn->proto.cid) + p = p->rb_left; + else if (cid > conn->proto.cid) + p = p->rb_right; + else + goto found; + } + } else { + conn = idr_find(&rxrpc_client_conn_ids, cid >> RXRPC_CIDSHIFT); + if (conn && conn->proto.epoch == epoch) goto found; } - read_unlock_bh(&trans->conn_lock); + read_unlock_bh(&peer->conn_lock); _leave(" = NULL"); return NULL; found: - atomic_inc(&conn->usage); - read_unlock_bh(&trans->conn_lock); + rxrpc_get_connection(conn); + read_unlock_bh(&peer->conn_lock); _leave(" = %p", conn); return conn; } /* + * Disconnect a call and clear any channel it occupies when that call + * terminates. + */ +void rxrpc_disconnect_call(struct rxrpc_call *call) +{ + struct rxrpc_connection *conn = call->conn; + unsigned chan = call->channel; + + _enter("%d,%d", conn->debug_id, call->channel); + + if (conn->channels[chan] == call) { + rcu_assign_pointer(conn->channels[chan], NULL); + atomic_inc(&conn->avail_chans); + wake_up(&conn->channel_wq); + } +} + +/* * release a virtual connection */ void rxrpc_put_connection(struct rxrpc_connection *conn) { + if (!conn) + return; + _enter("%p{u=%d,d=%d}", conn, atomic_read(&conn->usage), conn->debug_id); @@ -809,17 +575,15 @@ static void rxrpc_destroy_connection(struct rxrpc_connection *conn) _net("DESTROY CONN %d", conn->debug_id); - if (conn->bundle) - rxrpc_put_bundle(conn->trans, conn->bundle); - ASSERT(RB_EMPTY_ROOT(&conn->calls)); rxrpc_purge_queue(&conn->rx_queue); conn->security->clear(conn); - key_put(conn->key); + key_put(conn->params.key); key_put(conn->server_key); + rxrpc_put_peer(conn->params.peer); + rxrpc_put_local(conn->params.local); - rxrpc_put_transport(conn->trans); kfree(conn); _leave(""); } @@ -830,6 +594,7 @@ static void rxrpc_destroy_connection(struct rxrpc_connection *conn) static void rxrpc_connection_reaper(struct work_struct *work) { struct rxrpc_connection *conn, *_p; + struct rxrpc_peer *peer; unsigned long now, earliest, reap_time; LIST_HEAD(graveyard); @@ -839,7 +604,7 @@ static void rxrpc_connection_reaper(struct work_struct *work) now = ktime_get_seconds(); earliest = ULONG_MAX; - write_lock_bh(&rxrpc_connection_lock); + write_lock(&rxrpc_connection_lock); list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) { _debug("reap CONN %d { u=%d,t=%ld }", conn->debug_id, atomic_read(&conn->usage), @@ -848,33 +613,42 @@ static void rxrpc_connection_reaper(struct work_struct *work) if (likely(atomic_read(&conn->usage) > 0)) continue; - spin_lock(&conn->trans->client_lock); - write_lock(&conn->trans->conn_lock); - reap_time = conn->put_time + rxrpc_connection_expiry; - - if (atomic_read(&conn->usage) > 0) { - ; - } else if (reap_time <= now) { - list_move_tail(&conn->link, &graveyard); - if (conn->out_clientflag) - rb_erase(&conn->node, - &conn->trans->client_conns); - else - rb_erase(&conn->node, - &conn->trans->server_conns); - if (conn->bundle) { - list_del_init(&conn->bundle_link); - conn->bundle->num_conns--; + if (rxrpc_conn_is_client(conn)) { + struct rxrpc_local *local = conn->params.local; + spin_lock(&local->client_conns_lock); + reap_time = conn->put_time + rxrpc_connection_expiry; + + if (atomic_read(&conn->usage) > 0) { + ; + } else if (reap_time <= now) { + list_move_tail(&conn->link, &graveyard); + rxrpc_put_client_connection_id(conn); + rb_erase(&conn->client_node, + &local->client_conns); + } else if (reap_time < earliest) { + earliest = reap_time; } - } else if (reap_time < earliest) { - earliest = reap_time; - } + spin_unlock(&local->client_conns_lock); + } else { + peer = conn->params.peer; + write_lock_bh(&peer->conn_lock); + reap_time = conn->put_time + rxrpc_connection_expiry; + + if (atomic_read(&conn->usage) > 0) { + ; + } else if (reap_time <= now) { + list_move_tail(&conn->link, &graveyard); + rb_erase(&conn->service_node, + &peer->service_conns); + } else if (reap_time < earliest) { + earliest = reap_time; + } - write_unlock(&conn->trans->conn_lock); - spin_unlock(&conn->trans->client_lock); + write_unlock_bh(&peer->conn_lock); + } } - write_unlock_bh(&rxrpc_connection_lock); + write_unlock(&rxrpc_connection_lock); if (earliest != ULONG_MAX) { _debug("reschedule reaper %ld", (long) earliest - now); diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 47fb167af3e4..f4bd57b77b93 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -360,7 +360,7 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb) case RXRPC_PACKET_TYPE_BUSY: _proto("Rx BUSY %%%u", sp->hdr.serial); - if (call->conn->out_clientflag) + if (rxrpc_conn_is_service(call->conn)) goto protocol_error; write_lock_bh(&call->state_lock); @@ -533,7 +533,7 @@ static void rxrpc_post_packet_to_call(struct rxrpc_call *call, case RXRPC_CALL_COMPLETE: case RXRPC_CALL_CLIENT_FINAL_ACK: /* complete server call */ - if (call->conn->in_clientflag) + if (rxrpc_conn_is_service(call->conn)) goto dead_call; /* resend last packet of a completed call */ _debug("final ack again"); @@ -560,7 +560,7 @@ static void rxrpc_post_packet_to_call(struct rxrpc_call *call, dead_call: if (sp->hdr.type != RXRPC_PACKET_TYPE_ABORT) { skb->priority = RX_CALL_DEAD; - rxrpc_reject_packet(call->conn->trans->local, skb); + rxrpc_reject_packet(call->conn->params.local, skb); goto unlock; } free_unlock: @@ -580,7 +580,7 @@ static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn, { _enter("%p,%p", conn, skb); - atomic_inc(&conn->usage); + rxrpc_get_connection(conn); skb_queue_tail(&conn->rx_queue, skb); rxrpc_queue_conn(conn); } @@ -628,27 +628,20 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb) } static struct rxrpc_connection *rxrpc_conn_from_local(struct rxrpc_local *local, - struct sk_buff *skb, - struct rxrpc_skb_priv *sp) + struct sk_buff *skb) { struct rxrpc_peer *peer; - struct rxrpc_transport *trans; struct rxrpc_connection *conn; struct sockaddr_rxrpc srx; rxrpc_get_addr_from_skb(local, skb, &srx); rcu_read_lock(); peer = rxrpc_lookup_peer_rcu(local, &srx); - if (IS_ERR(peer)) + if (!peer) goto cant_find_peer; - trans = rxrpc_find_transport(local, peer); + conn = rxrpc_find_connection(local, peer, skb); rcu_read_unlock(); - if (!trans) - goto cant_find_conn; - - conn = rxrpc_find_connection(trans, &sp->hdr); - rxrpc_put_transport(trans); if (!conn) goto cant_find_conn; @@ -739,7 +732,7 @@ void rxrpc_data_ready(struct sock *sk) * old-fashioned way doesn't really hurt */ struct rxrpc_connection *conn; - conn = rxrpc_conn_from_local(local, skb, sp); + conn = rxrpc_conn_from_local(local, skb); if (!conn) goto cant_route_call; diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c index 4ad56fafe3a7..18c737a61d80 100644 --- a/net/rxrpc/key.c +++ b/net/rxrpc/key.c @@ -987,7 +987,7 @@ int rxrpc_get_server_data_key(struct rxrpc_connection *conn, if (ret < 0) goto error; - conn->key = key; + conn->params.key = key; _leave(" = 0 [%d]", key_serial(key)); return 0; diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c index 009b321712bc..3ab7764f7cd8 100644 --- a/net/rxrpc/local_object.c +++ b/net/rxrpc/local_object.c @@ -80,7 +80,8 @@ static struct rxrpc_local *rxrpc_alloc_local(const struct sockaddr_rxrpc *srx) skb_queue_head_init(&local->accept_queue); skb_queue_head_init(&local->reject_queue); skb_queue_head_init(&local->event_queue); - mutex_init(&local->conn_lock); + local->client_conns = RB_ROOT; + spin_lock_init(&local->client_conns_lock); spin_lock_init(&local->lock); rwlock_init(&local->services_lock); local->debug_id = atomic_inc_return(&rxrpc_debug_id); @@ -209,7 +210,7 @@ struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *srx) * bind the transport socket may still fail if we're attempting * to use a local address that the dying object is still using. */ - if (!atomic_inc_not_zero(&local->usage)) { + if (!rxrpc_get_local_maybe(local)) { cursor = cursor->next; list_del_init(&local->link); break; @@ -294,6 +295,7 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local) list_del_init(&local->link); mutex_unlock(&rxrpc_local_mutex); + ASSERT(RB_EMPTY_ROOT(&local->client_conns)); ASSERT(list_empty(&local->services)); if (socket) { diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c index e6fb3863b0bc..f4bda06b7d2d 100644 --- a/net/rxrpc/output.c +++ b/net/rxrpc/output.c @@ -35,7 +35,8 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, static int rxrpc_sendmsg_cmsg(struct msghdr *msg, unsigned long *user_call_ID, enum rxrpc_command *command, - u32 *abort_code) + u32 *abort_code, + bool *_exclusive) { struct cmsghdr *cmsg; bool got_user_ID = false; @@ -93,6 +94,11 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, return -EINVAL; break; + case RXRPC_EXCLUSIVE_CALL: + *_exclusive = true; + if (len != 0) + return -EINVAL; + break; default: return -EINVAL; } @@ -131,13 +137,11 @@ static void rxrpc_send_abort(struct rxrpc_call *call, u32 abort_code) */ static struct rxrpc_call * rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, - unsigned long user_call_ID) + unsigned long user_call_ID, bool exclusive) { - struct rxrpc_conn_bundle *bundle; - struct rxrpc_transport *trans; + struct rxrpc_conn_parameters cp; struct rxrpc_call *call; struct key *key; - long ret; DECLARE_SOCKADDR(struct sockaddr_rxrpc *, srx, msg->msg_name); @@ -146,39 +150,20 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, if (!msg->msg_name) return ERR_PTR(-EDESTADDRREQ); - trans = rxrpc_name_to_transport(rx, msg->msg_name, msg->msg_namelen, 0, - GFP_KERNEL); - if (IS_ERR(trans)) { - ret = PTR_ERR(trans); - goto out; - } - key = rx->key; if (key && !rx->key->payload.data[0]) key = NULL; - bundle = rxrpc_get_bundle(rx, trans, key, srx->srx_service, GFP_KERNEL); - if (IS_ERR(bundle)) { - ret = PTR_ERR(bundle); - goto out_trans; - } - call = rxrpc_new_client_call(rx, trans, bundle, user_call_ID, - GFP_KERNEL); - rxrpc_put_bundle(trans, bundle); - rxrpc_put_transport(trans); - if (IS_ERR(call)) { - ret = PTR_ERR(call); - goto out_trans; - } + memset(&cp, 0, sizeof(cp)); + cp.local = rx->local; + cp.key = rx->key; + cp.security_level = rx->min_sec_level; + cp.exclusive = rx->exclusive | exclusive; + cp.service_id = srx->srx_service; + call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, GFP_KERNEL); _leave(" = %p\n", call); return call; - -out_trans: - rxrpc_put_transport(trans); -out: - _leave(" = %ld", ret); - return ERR_PTR(ret); } /* @@ -191,12 +176,14 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) enum rxrpc_command cmd; struct rxrpc_call *call; unsigned long user_call_ID = 0; + bool exclusive = false; u32 abort_code = 0; int ret; _enter(""); - ret = rxrpc_sendmsg_cmsg(msg, &user_call_ID, &cmd, &abort_code); + ret = rxrpc_sendmsg_cmsg(msg, &user_call_ID, &cmd, &abort_code, + &exclusive); if (ret < 0) return ret; @@ -214,7 +201,8 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) if (!call) { if (cmd != RXRPC_CMD_SEND_DATA) return -EBADSLT; - call = rxrpc_new_client_call_for_sendmsg(rx, msg, user_call_ID); + call = rxrpc_new_client_call_for_sendmsg(rx, msg, user_call_ID, + exclusive); if (IS_ERR(call)) return PTR_ERR(call); } @@ -319,7 +307,7 @@ EXPORT_SYMBOL(rxrpc_kernel_abort_call); /* * send a packet through the transport endpoint */ -int rxrpc_send_packet(struct rxrpc_transport *trans, struct sk_buff *skb) +int rxrpc_send_data_packet(struct rxrpc_connection *conn, struct sk_buff *skb) { struct kvec iov[1]; struct msghdr msg; @@ -330,30 +318,30 @@ int rxrpc_send_packet(struct rxrpc_transport *trans, struct sk_buff *skb) iov[0].iov_base = skb->head; iov[0].iov_len = skb->len; - msg.msg_name = &trans->peer->srx.transport.sin; - msg.msg_namelen = sizeof(trans->peer->srx.transport.sin); + msg.msg_name = &conn->params.peer->srx.transport; + msg.msg_namelen = conn->params.peer->srx.transport_len; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; /* send the packet with the don't fragment bit set if we currently * think it's small enough */ - if (skb->len - sizeof(struct rxrpc_wire_header) < trans->peer->maxdata) { - down_read(&trans->local->defrag_sem); + if (skb->len - sizeof(struct rxrpc_wire_header) < conn->params.peer->maxdata) { + down_read(&conn->params.local->defrag_sem); /* send the packet by UDP * - returns -EMSGSIZE if UDP would have to fragment the packet * to go out of the interface * - in which case, we'll have processed the ICMP error * message and update the peer record */ - ret = kernel_sendmsg(trans->local->socket, &msg, iov, 1, + ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 1, iov[0].iov_len); - up_read(&trans->local->defrag_sem); + up_read(&conn->params.local->defrag_sem); if (ret == -EMSGSIZE) goto send_fragmentable; - _leave(" = %d [%u]", ret, trans->peer->maxdata); + _leave(" = %d [%u]", ret, conn->params.peer->maxdata); return ret; } @@ -361,21 +349,28 @@ send_fragmentable: /* attempt to send this message with fragmentation enabled */ _debug("send fragment"); - down_write(&trans->local->defrag_sem); - opt = IP_PMTUDISC_DONT; - ret = kernel_setsockopt(trans->local->socket, SOL_IP, IP_MTU_DISCOVER, - (char *) &opt, sizeof(opt)); - if (ret == 0) { - ret = kernel_sendmsg(trans->local->socket, &msg, iov, 1, - iov[0].iov_len); - - opt = IP_PMTUDISC_DO; - kernel_setsockopt(trans->local->socket, SOL_IP, - IP_MTU_DISCOVER, (char *) &opt, sizeof(opt)); + down_write(&conn->params.local->defrag_sem); + + switch (conn->params.local->srx.transport.family) { + case AF_INET: + opt = IP_PMTUDISC_DONT; + ret = kernel_setsockopt(conn->params.local->socket, + SOL_IP, IP_MTU_DISCOVER, + (char *)&opt, sizeof(opt)); + if (ret == 0) { + ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 1, + iov[0].iov_len); + + opt = IP_PMTUDISC_DO; + kernel_setsockopt(conn->params.local->socket, SOL_IP, + IP_MTU_DISCOVER, + (char *)&opt, sizeof(opt)); + } + break; } - up_write(&trans->local->defrag_sem); - _leave(" = %d [frag %u]", ret, trans->peer->maxdata); + up_write(&conn->params.local->defrag_sem); + _leave(" = %d [frag %u]", ret, conn->params.peer->maxdata); return ret; } @@ -487,7 +482,7 @@ static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb, if (try_to_del_timer_sync(&call->ack_timer) >= 0) { /* the packet may be freed by rxrpc_process_call() before this * returns */ - ret = rxrpc_send_packet(call->conn->trans, skb); + ret = rxrpc_send_data_packet(call->conn, skb); _net("sent skb %p", skb); } else { _debug("failed to delete ACK timer"); @@ -573,7 +568,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, goto maybe_error; } - max = call->conn->trans->peer->maxdata; + max = call->conn->params.peer->maxdata; max -= call->conn->security_size; max &= ~(call->conn->size_align - 1UL); @@ -664,7 +659,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, seq = atomic_inc_return(&call->sequence); - sp->hdr.epoch = conn->epoch; + sp->hdr.epoch = conn->proto.epoch; sp->hdr.cid = call->cid; sp->hdr.callNumber = call->call_id; sp->hdr.seq = seq; diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c index faf222c21698..01d4930a11f7 100644 --- a/net/rxrpc/peer_object.c +++ b/net/rxrpc/peer_object.c @@ -50,6 +50,9 @@ static unsigned long rxrpc_peer_hash_key(struct rxrpc_local *local, size = sizeof(srx->transport.sin.sin_addr); p = (u16 *)&srx->transport.sin.sin_addr; break; + default: + WARN(1, "AF_RXRPC: Unsupported transport address family\n"); + return 0; } /* Step through the peer address in 16-bit portions for speed */ @@ -185,6 +188,8 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp) INIT_HLIST_HEAD(&peer->error_targets); INIT_WORK(&peer->error_distributor, &rxrpc_peer_error_distributor); + peer->service_conns = RB_ROOT; + rwlock_init(&peer->conn_lock); spin_lock_init(&peer->lock); peer->debug_id = atomic_inc_return(&rxrpc_debug_id); } diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c index 225163bc658d..500cdcdc843c 100644 --- a/net/rxrpc/proc.c +++ b/net/rxrpc/proc.c @@ -46,7 +46,7 @@ static void rxrpc_call_seq_stop(struct seq_file *seq, void *v) static int rxrpc_call_seq_show(struct seq_file *seq, void *v) { - struct rxrpc_transport *trans; + struct rxrpc_connection *conn; struct rxrpc_call *call; char lbuff[4 + 4 + 4 + 4 + 5 + 1], rbuff[4 + 4 + 4 + 4 + 5 + 1]; @@ -59,25 +59,28 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v) } call = list_entry(v, struct rxrpc_call, link); - trans = call->conn->trans; sprintf(lbuff, "%pI4:%u", - &trans->local->srx.transport.sin.sin_addr, - ntohs(trans->local->srx.transport.sin.sin_port)); + &call->local->srx.transport.sin.sin_addr, + ntohs(call->local->srx.transport.sin.sin_port)); - sprintf(rbuff, "%pI4:%u", - &trans->peer->srx.transport.sin.sin_addr, - ntohs(trans->peer->srx.transport.sin.sin_port)); + conn = call->conn; + if (conn) + sprintf(rbuff, "%pI4:%u", + &conn->params.peer->srx.transport.sin.sin_addr, + ntohs(conn->params.peer->srx.transport.sin.sin_port)); + else + strcpy(rbuff, "no_connection"); seq_printf(seq, "UDP %-22.22s %-22.22s %4x %08x %08x %s %3u" " %-8.8s %08x %lx\n", lbuff, rbuff, - call->conn->service_id, + call->service_id, call->cid, call->call_id, - call->conn->in_clientflag ? "Svc" : "Clt", + call->in_clientflag ? "Svc" : "Clt", atomic_read(&call->usage), rxrpc_call_states[call->state], call->remote_abort ?: call->local_abort, @@ -129,7 +132,6 @@ static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v) static int rxrpc_connection_seq_show(struct seq_file *seq, void *v) { struct rxrpc_connection *conn; - struct rxrpc_transport *trans; char lbuff[4 + 4 + 4 + 4 + 5 + 1], rbuff[4 + 4 + 4 + 4 + 5 + 1]; if (v == &rxrpc_connections) { @@ -142,28 +144,27 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v) } conn = list_entry(v, struct rxrpc_connection, link); - trans = conn->trans; sprintf(lbuff, "%pI4:%u", - &trans->local->srx.transport.sin.sin_addr, - ntohs(trans->local->srx.transport.sin.sin_port)); + &conn->params.local->srx.transport.sin.sin_addr, + ntohs(conn->params.local->srx.transport.sin.sin_port)); sprintf(rbuff, "%pI4:%u", - &trans->peer->srx.transport.sin.sin_addr, - ntohs(trans->peer->srx.transport.sin.sin_port)); + &conn->params.peer->srx.transport.sin.sin_addr, + ntohs(conn->params.peer->srx.transport.sin.sin_port)); seq_printf(seq, "UDP %-22.22s %-22.22s %4x %08x %08x %s %3u" " %s %08x %08x %08x\n", lbuff, rbuff, - conn->service_id, - conn->cid, + conn->params.service_id, + conn->proto.cid, conn->call_counter, - conn->in_clientflag ? "Svc" : "Clt", + rxrpc_conn_is_service(conn) ? "Svc" : "Clt", atomic_read(&conn->usage), rxrpc_conn_states[conn->state], - key_serial(conn->key), + key_serial(conn->params.key), atomic_read(&conn->serial), atomic_read(&conn->hi_serial)); diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index 59706b9f2f7a..a3fa2ed85d63 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -147,9 +147,9 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, if (!continue_call) { if (msg->msg_name) { size_t len = - sizeof(call->conn->trans->peer->srx); + sizeof(call->conn->params.peer->srx); memcpy(msg->msg_name, - &call->conn->trans->peer->srx, len); + &call->conn->params.peer->srx, len); msg->msg_namelen = len; } sock_recv_timestamp(msg, &rx->sk, skb); @@ -205,7 +205,7 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, /* we transferred the whole data packet */ if (sp->hdr.flags & RXRPC_LAST_PACKET) { _debug("last"); - if (call->conn->out_clientflag) { + if (rxrpc_conn_is_client(call->conn)) { /* last byte of reply received */ ret = copied; goto terminal_message; diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index 36a634027d9d..23c05ec6fa28 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c @@ -58,9 +58,9 @@ static int rxkad_init_connection_security(struct rxrpc_connection *conn) struct rxrpc_key_token *token; int ret; - _enter("{%d},{%x}", conn->debug_id, key_serial(conn->key)); + _enter("{%d},{%x}", conn->debug_id, key_serial(conn->params.key)); - token = conn->key->payload.data[0]; + token = conn->params.key->payload.data[0]; conn->security_ix = token->security_index; ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC); @@ -74,7 +74,7 @@ static int rxkad_init_connection_security(struct rxrpc_connection *conn) sizeof(token->kad->session_key)) < 0) BUG(); - switch (conn->security_level) { + switch (conn->params.security_level) { case RXRPC_SECURITY_PLAIN: break; case RXRPC_SECURITY_AUTH: @@ -115,14 +115,14 @@ static void rxkad_prime_packet_security(struct rxrpc_connection *conn) _enter(""); - if (!conn->key) + if (!conn->params.key) return; - token = conn->key->payload.data[0]; + token = conn->params.key->payload.data[0]; memcpy(&iv, token->kad->session_key, sizeof(iv)); - tmpbuf.x[0] = htonl(conn->epoch); - tmpbuf.x[1] = htonl(conn->cid); + tmpbuf.x[0] = htonl(conn->proto.epoch); + tmpbuf.x[1] = htonl(conn->proto.cid); tmpbuf.x[2] = 0; tmpbuf.x[3] = htonl(conn->security_ix); @@ -220,7 +220,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call, rxkhdr.checksum = 0; /* encrypt from the session key */ - token = call->conn->key->payload.data[0]; + token = call->conn->params.key->payload.data[0]; memcpy(&iv, token->kad->session_key, sizeof(iv)); sg_init_one(&sg[0], sechdr, sizeof(rxkhdr)); @@ -277,13 +277,13 @@ static int rxkad_secure_packet(const struct rxrpc_call *call, sp = rxrpc_skb(skb); _enter("{%d{%x}},{#%u},%zu,", - call->debug_id, key_serial(call->conn->key), sp->hdr.seq, - data_size); + call->debug_id, key_serial(call->conn->params.key), + sp->hdr.seq, data_size); if (!call->conn->cipher) return 0; - ret = key_validate(call->conn->key); + ret = key_validate(call->conn->params.key); if (ret < 0) return ret; @@ -312,7 +312,7 @@ static int rxkad_secure_packet(const struct rxrpc_call *call, y = 1; /* zero checksums are not permitted */ sp->hdr.cksum = y; - switch (call->conn->security_level) { + switch (call->conn->params.security_level) { case RXRPC_SECURITY_PLAIN: ret = 0; break; @@ -446,7 +446,7 @@ static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call, skb_to_sgvec(skb, sg, 0, skb->len); /* decrypt from the session key */ - token = call->conn->key->payload.data[0]; + token = call->conn->params.key->payload.data[0]; memcpy(&iv, token->kad->session_key, sizeof(iv)); skcipher_request_set_tfm(req, call->conn->cipher); @@ -516,7 +516,7 @@ static int rxkad_verify_packet(const struct rxrpc_call *call, sp = rxrpc_skb(skb); _enter("{%d{%x}},{#%u}", - call->debug_id, key_serial(call->conn->key), sp->hdr.seq); + call->debug_id, key_serial(call->conn->params.key), sp->hdr.seq); if (!call->conn->cipher) return 0; @@ -557,7 +557,7 @@ static int rxkad_verify_packet(const struct rxrpc_call *call, return -EPROTO; } - switch (call->conn->security_level) { + switch (call->conn->params.security_level) { case RXRPC_SECURITY_PLAIN: ret = 0; break; @@ -589,9 +589,9 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn) u32 serial; int ret; - _enter("{%d,%x}", conn->debug_id, key_serial(conn->key)); + _enter("{%d,%x}", conn->debug_id, key_serial(conn->params.key)); - ret = key_validate(conn->key); + ret = key_validate(conn->params.key); if (ret < 0) return ret; @@ -602,14 +602,14 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn) challenge.min_level = htonl(0); challenge.__padding = 0; - msg.msg_name = &conn->trans->peer->srx.transport.sin; - msg.msg_namelen = sizeof(conn->trans->peer->srx.transport.sin); + msg.msg_name = &conn->params.peer->srx.transport.sin; + msg.msg_namelen = sizeof(conn->params.peer->srx.transport.sin); msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; - whdr.epoch = htonl(conn->epoch); - whdr.cid = htonl(conn->cid); + whdr.epoch = htonl(conn->proto.epoch); + whdr.cid = htonl(conn->proto.cid); whdr.callNumber = 0; whdr.seq = 0; whdr.type = RXRPC_PACKET_TYPE_CHALLENGE; @@ -617,7 +617,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn) whdr.userStatus = 0; whdr.securityIndex = conn->security_ix; whdr._rsvd = 0; - whdr.serviceId = htons(conn->service_id); + whdr.serviceId = htons(conn->params.service_id); iov[0].iov_base = &whdr; iov[0].iov_len = sizeof(whdr); @@ -630,7 +630,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn) whdr.serial = htonl(serial); _proto("Tx CHALLENGE %%%u", serial); - ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len); + ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); if (ret < 0) { _debug("sendmsg failed: %d", ret); return -EAGAIN; @@ -657,8 +657,8 @@ static int rxkad_send_response(struct rxrpc_connection *conn, _enter(""); - msg.msg_name = &conn->trans->peer->srx.transport.sin; - msg.msg_namelen = sizeof(conn->trans->peer->srx.transport.sin); + msg.msg_name = &conn->params.peer->srx.transport.sin; + msg.msg_namelen = sizeof(conn->params.peer->srx.transport.sin); msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; @@ -684,7 +684,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn, whdr.serial = htonl(serial); _proto("Tx RESPONSE %%%u", serial); - ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len); + ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 3, len); if (ret < 0) { _debug("sendmsg failed: %d", ret); return -EAGAIN; @@ -771,14 +771,14 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn, u32 version, nonce, min_level, abort_code; int ret; - _enter("{%d,%x}", conn->debug_id, key_serial(conn->key)); + _enter("{%d,%x}", conn->debug_id, key_serial(conn->params.key)); - if (!conn->key) { + if (!conn->params.key) { _leave(" = -EPROTO [no key]"); return -EPROTO; } - ret = key_validate(conn->key); + ret = key_validate(conn->params.key); if (ret < 0) { *_abort_code = RXKADEXPIRED; return ret; @@ -801,20 +801,20 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn, goto protocol_error; abort_code = RXKADLEVELFAIL; - if (conn->security_level < min_level) + if (conn->params.security_level < min_level) goto protocol_error; - token = conn->key->payload.data[0]; + token = conn->params.key->payload.data[0]; /* build the response packet */ memset(&resp, 0, sizeof(resp)); resp.version = htonl(RXKAD_VERSION); - resp.encrypted.epoch = htonl(conn->epoch); - resp.encrypted.cid = htonl(conn->cid); + resp.encrypted.epoch = htonl(conn->proto.epoch); + resp.encrypted.cid = htonl(conn->proto.cid); resp.encrypted.securityIndex = htonl(conn->security_ix); resp.encrypted.inc_nonce = htonl(nonce + 1); - resp.encrypted.level = htonl(conn->security_level); + resp.encrypted.level = htonl(conn->params.security_level); resp.kvno = htonl(token->kad->kvno); resp.ticket_len = htonl(token->kad->ticket_len); @@ -1096,9 +1096,9 @@ static int rxkad_verify_response(struct rxrpc_connection *conn, rxkad_decrypt_response(conn, &response, &session_key); abort_code = RXKADSEALEDINCON; - if (ntohl(response.encrypted.epoch) != conn->epoch) + if (ntohl(response.encrypted.epoch) != conn->proto.epoch) goto protocol_error_free; - if (ntohl(response.encrypted.cid) != conn->cid) + if (ntohl(response.encrypted.cid) != conn->proto.cid) goto protocol_error_free; if (ntohl(response.encrypted.securityIndex) != conn->security_ix) goto protocol_error_free; @@ -1122,7 +1122,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn, level = ntohl(response.encrypted.level); if (level > RXRPC_SECURITY_ENCRYPT) goto protocol_error_free; - conn->security_level = level; + conn->params.security_level = level; /* create a key to hold the security data and expiration time - after * this the connection security can be handled in exactly the same way diff --git a/net/rxrpc/security.c b/net/rxrpc/security.c index d223253b22fa..814d285ff802 100644 --- a/net/rxrpc/security.c +++ b/net/rxrpc/security.c @@ -76,7 +76,7 @@ int rxrpc_init_client_conn_security(struct rxrpc_connection *conn) { const struct rxrpc_security *sec; struct rxrpc_key_token *token; - struct key *key = conn->key; + struct key *key = conn->params.key; int ret; _enter("{%d},{%x}", conn->debug_id, key_serial(key)); @@ -113,7 +113,7 @@ int rxrpc_init_client_conn_security(struct rxrpc_connection *conn) int rxrpc_init_server_conn_security(struct rxrpc_connection *conn) { const struct rxrpc_security *sec; - struct rxrpc_local *local = conn->trans->local; + struct rxrpc_local *local = conn->params.local; struct rxrpc_sock *rx; struct key *key; key_ref_t kref; @@ -121,7 +121,7 @@ int rxrpc_init_server_conn_security(struct rxrpc_connection *conn) _enter(""); - sprintf(kdesc, "%u:%u", conn->service_id, conn->security_ix); + sprintf(kdesc, "%u:%u", conn->params.service_id, conn->security_ix); sec = rxrpc_security_lookup(conn->security_ix); if (!sec) { @@ -132,7 +132,7 @@ int rxrpc_init_server_conn_security(struct rxrpc_connection *conn) /* find the service */ read_lock_bh(&local->services_lock); list_for_each_entry(rx, &local->services, listen_link) { - if (rx->srx.srx_service == conn->service_id) + if (rx->srx.srx_service == conn->params.service_id) goto found_service; } diff --git a/net/rxrpc/sysctl.c b/net/rxrpc/sysctl.c index a99690a8a3da..03ad08774d4e 100644 --- a/net/rxrpc/sysctl.c +++ b/net/rxrpc/sysctl.c @@ -90,14 +90,6 @@ static struct ctl_table rxrpc_sysctl_table[] = { .proc_handler = proc_dointvec_minmax, .extra1 = (void *)&one, }, - { - .procname = "transport_expiry", - .data = &rxrpc_transport_expiry, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = (void *)&one, - }, /* Non-time values */ { diff --git a/net/rxrpc/transport.c b/net/rxrpc/transport.c deleted file mode 100644 index 24c71218a6f8..000000000000 --- a/net/rxrpc/transport.c +++ /dev/null @@ -1,269 +0,0 @@ -/* RxRPC point-to-point transport session management - * - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/module.h> -#include <linux/net.h> -#include <linux/skbuff.h> -#include <linux/slab.h> -#include <net/sock.h> -#include <net/af_rxrpc.h> -#include "ar-internal.h" - -/* - * Time after last use at which transport record is cleaned up. - */ -unsigned int rxrpc_transport_expiry = 3600 * 24; - -static void rxrpc_transport_reaper(struct work_struct *work); - -static LIST_HEAD(rxrpc_transports); -static DEFINE_RWLOCK(rxrpc_transport_lock); -static DECLARE_DELAYED_WORK(rxrpc_transport_reap, rxrpc_transport_reaper); - -/* - * allocate a new transport session manager - */ -static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local, - struct rxrpc_peer *peer, - gfp_t gfp) -{ - struct rxrpc_transport *trans; - - _enter(""); - - trans = kzalloc(sizeof(struct rxrpc_transport), gfp); - if (trans) { - trans->local = local; - trans->peer = peer; - INIT_LIST_HEAD(&trans->link); - trans->bundles = RB_ROOT; - trans->client_conns = RB_ROOT; - trans->server_conns = RB_ROOT; - spin_lock_init(&trans->client_lock); - rwlock_init(&trans->conn_lock); - atomic_set(&trans->usage, 1); - trans->conn_idcounter = peer->srx.srx_service << 16; - trans->debug_id = atomic_inc_return(&rxrpc_debug_id); - } - - _leave(" = %p", trans); - return trans; -} - -/* - * obtain a transport session for the nominated endpoints - */ -struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *local, - struct rxrpc_peer *peer, - gfp_t gfp) -{ - struct rxrpc_transport *trans, *candidate; - const char *new = "old"; - int usage; - - _enter("{%pI4+%hu},{%pI4+%hu},", - &local->srx.transport.sin.sin_addr, - ntohs(local->srx.transport.sin.sin_port), - &peer->srx.transport.sin.sin_addr, - ntohs(peer->srx.transport.sin.sin_port)); - - /* search the transport list first */ - read_lock_bh(&rxrpc_transport_lock); - list_for_each_entry(trans, &rxrpc_transports, link) { - if (trans->local == local && trans->peer == peer) - goto found_extant_transport; - } - read_unlock_bh(&rxrpc_transport_lock); - - /* not yet present - create a candidate for a new record and then - * redo the search */ - candidate = rxrpc_alloc_transport(local, peer, gfp); - if (!candidate) { - _leave(" = -ENOMEM"); - return ERR_PTR(-ENOMEM); - } - - write_lock_bh(&rxrpc_transport_lock); - - list_for_each_entry(trans, &rxrpc_transports, link) { - if (trans->local == local && trans->peer == peer) - goto found_extant_second; - } - - /* we can now add the new candidate to the list */ - trans = candidate; - candidate = NULL; - usage = atomic_read(&trans->usage); - - rxrpc_get_local(trans->local); - rxrpc_get_peer(trans->peer); - list_add_tail(&trans->link, &rxrpc_transports); - write_unlock_bh(&rxrpc_transport_lock); - new = "new"; - -success: - _net("TRANSPORT %s %d local %d -> peer %d", - new, - trans->debug_id, - trans->local->debug_id, - trans->peer->debug_id); - - _leave(" = %p {u=%d}", trans, usage); - return trans; - - /* we found the transport in the list immediately */ -found_extant_transport: - usage = atomic_inc_return(&trans->usage); - read_unlock_bh(&rxrpc_transport_lock); - goto success; - - /* we found the transport on the second time through the list */ -found_extant_second: - usage = atomic_inc_return(&trans->usage); - write_unlock_bh(&rxrpc_transport_lock); - kfree(candidate); - goto success; -} - -/* - * find the transport connecting two endpoints - */ -struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *local, - struct rxrpc_peer *peer) -{ - struct rxrpc_transport *trans; - - _enter("{%pI4+%hu},{%pI4+%hu},", - &local->srx.transport.sin.sin_addr, - ntohs(local->srx.transport.sin.sin_port), - &peer->srx.transport.sin.sin_addr, - ntohs(peer->srx.transport.sin.sin_port)); - - /* search the transport list */ - read_lock_bh(&rxrpc_transport_lock); - - list_for_each_entry(trans, &rxrpc_transports, link) { - if (trans->local == local && trans->peer == peer) - goto found_extant_transport; - } - - read_unlock_bh(&rxrpc_transport_lock); - _leave(" = NULL"); - return NULL; - -found_extant_transport: - atomic_inc(&trans->usage); - read_unlock_bh(&rxrpc_transport_lock); - _leave(" = %p", trans); - return trans; -} - -/* - * release a transport session - */ -void rxrpc_put_transport(struct rxrpc_transport *trans) -{ - _enter("%p{u=%d}", trans, atomic_read(&trans->usage)); - - ASSERTCMP(atomic_read(&trans->usage), >, 0); - - trans->put_time = ktime_get_seconds(); - if (unlikely(atomic_dec_and_test(&trans->usage))) { - _debug("zombie"); - /* let the reaper determine the timeout to avoid a race with - * overextending the timeout if the reaper is running at the - * same time */ - rxrpc_queue_delayed_work(&rxrpc_transport_reap, 0); - } - _leave(""); -} - -/* - * clean up a transport session - */ -static void rxrpc_cleanup_transport(struct rxrpc_transport *trans) -{ - _net("DESTROY TRANS %d", trans->debug_id); - - rxrpc_put_local(trans->local); - rxrpc_put_peer(trans->peer); - kfree(trans); -} - -/* - * reap dead transports that have passed their expiry date - */ -static void rxrpc_transport_reaper(struct work_struct *work) -{ - struct rxrpc_transport *trans, *_p; - unsigned long now, earliest, reap_time; - - LIST_HEAD(graveyard); - - _enter(""); - - now = ktime_get_seconds(); - earliest = ULONG_MAX; - - /* extract all the transports that have been dead too long */ - write_lock_bh(&rxrpc_transport_lock); - list_for_each_entry_safe(trans, _p, &rxrpc_transports, link) { - _debug("reap TRANS %d { u=%d t=%ld }", - trans->debug_id, atomic_read(&trans->usage), - (long) now - (long) trans->put_time); - - if (likely(atomic_read(&trans->usage) > 0)) - continue; - - reap_time = trans->put_time + rxrpc_transport_expiry; - if (reap_time <= now) - list_move_tail(&trans->link, &graveyard); - else if (reap_time < earliest) - earliest = reap_time; - } - write_unlock_bh(&rxrpc_transport_lock); - - if (earliest != ULONG_MAX) { - _debug("reschedule reaper %ld", (long) earliest - now); - ASSERTCMP(earliest, >, now); - rxrpc_queue_delayed_work(&rxrpc_transport_reap, - (earliest - now) * HZ); - } - - /* then destroy all those pulled out */ - while (!list_empty(&graveyard)) { - trans = list_entry(graveyard.next, struct rxrpc_transport, - link); - list_del_init(&trans->link); - - ASSERTCMP(atomic_read(&trans->usage), ==, 0); - rxrpc_cleanup_transport(trans); - } - - _leave(""); -} - -/* - * preemptively destroy all the transport session records rather than waiting - * for them to time out - */ -void __exit rxrpc_destroy_all_transports(void) -{ - _enter(""); - - rxrpc_transport_expiry = 0; - cancel_delayed_work(&rxrpc_transport_reap); - rxrpc_queue_delayed_work(&rxrpc_transport_reap, 0); - - _leave(""); -} diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index e04ea6994d1c..481e4f12aeb4 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -357,7 +357,8 @@ static struct tcf_proto __rcu **atm_tc_find_tcf(struct Qdisc *sch, /* --------------------------- Qdisc operations ---------------------------- */ -static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch) +static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { struct atm_qdisc_data *p = qdisc_priv(sch); struct atm_flow_data *flow; @@ -398,10 +399,10 @@ done: switch (result) { case TC_ACT_QUEUED: case TC_ACT_STOLEN: - kfree_skb(skb); + __qdisc_drop(skb, to_free); return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: - kfree_skb(skb); + __qdisc_drop(skb, to_free); goto drop; case TC_ACT_RECLASSIFY: if (flow->excess) @@ -413,7 +414,7 @@ done: #endif } - ret = qdisc_enqueue(skb, flow->q); + ret = qdisc_enqueue(skb, flow->q, to_free); if (ret != NET_XMIT_SUCCESS) { drop: __maybe_unused if (net_xmit_drop_count(ret)) { diff --git a/net/sched/sch_blackhole.c b/net/sched/sch_blackhole.c index 3fee70d9814f..c98a61e980ba 100644 --- a/net/sched/sch_blackhole.c +++ b/net/sched/sch_blackhole.c @@ -17,9 +17,10 @@ #include <linux/skbuff.h> #include <net/pkt_sched.h> -static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch) +static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { - qdisc_drop(skb, sch); + qdisc_drop(skb, sch, to_free); return NET_XMIT_SUCCESS; } diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index a29fd811d7b9..beb554aa8cfb 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -358,7 +358,8 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) } static int -cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) +cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { struct cbq_sched_data *q = qdisc_priv(sch); int uninitialized_var(ret); @@ -370,11 +371,11 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (cl == NULL) { if (ret & __NET_XMIT_BYPASS) qdisc_qstats_drop(sch); - kfree_skb(skb); + __qdisc_drop(skb, to_free); return ret; } - ret = qdisc_enqueue(skb, cl->q); + ret = qdisc_enqueue(skb, cl->q, to_free); if (ret == NET_XMIT_SUCCESS) { sch->q.qlen++; cbq_mark_toplevel(q, cl); diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c index 789b69ee9e51..3b6d5bd69101 100644 --- a/net/sched/sch_choke.c +++ b/net/sched/sch_choke.c @@ -115,7 +115,8 @@ static void choke_zap_tail_holes(struct choke_sched_data *q) } /* Drop packet from queue array by creating a "hole" */ -static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx) +static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx, + struct sk_buff **to_free) { struct choke_sched_data *q = qdisc_priv(sch); struct sk_buff *skb = q->tab[idx]; @@ -129,7 +130,7 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx) qdisc_qstats_backlog_dec(sch, skb); qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb)); - qdisc_drop(skb, sch); + qdisc_drop(skb, sch, to_free); --sch->q.qlen; } @@ -261,7 +262,8 @@ static bool choke_match_random(const struct choke_sched_data *q, return choke_match_flow(oskb, nskb); } -static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) +static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; struct choke_sched_data *q = qdisc_priv(sch); @@ -288,7 +290,7 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) /* Draw a packet at random from queue and compare flow */ if (choke_match_random(q, skb, &idx)) { q->stats.matched++; - choke_drop_by_idx(sch, idx); + choke_drop_by_idx(sch, idx, to_free); goto congestion_drop; } @@ -331,16 +333,16 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) } q->stats.pdrop++; - return qdisc_drop(skb, sch); + return qdisc_drop(skb, sch, to_free); congestion_drop: - qdisc_drop(skb, sch); + qdisc_drop(skb, sch, to_free); return NET_XMIT_CN; other_drop: if (ret & __NET_XMIT_BYPASS) qdisc_qstats_drop(sch); - kfree_skb(skb); + __qdisc_drop(skb, to_free); return ret; } diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c index c5bc424e3b3c..4002df3c7d9f 100644 --- a/net/sched/sch_codel.c +++ b/net/sched/sch_codel.c @@ -82,7 +82,8 @@ static void drop_func(struct sk_buff *skb, void *ctx) { struct Qdisc *sch = ctx; - qdisc_drop(skb, sch); + kfree_skb(skb); + qdisc_qstats_drop(sch); } static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch) @@ -107,7 +108,8 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch) return skb; } -static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) +static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { struct codel_sched_data *q; @@ -117,7 +119,7 @@ static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) } q = qdisc_priv(sch); q->drop_overlimit++; - return qdisc_drop(skb, sch); + return qdisc_drop(skb, sch, to_free); } static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = { diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index 22609e4e845f..8af5c59eef84 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c @@ -350,7 +350,8 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch, return NULL; } -static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) +static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { struct drr_sched *q = qdisc_priv(sch); struct drr_class *cl; @@ -360,11 +361,11 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (cl == NULL) { if (err & __NET_XMIT_BYPASS) qdisc_qstats_drop(sch); - kfree_skb(skb); + __qdisc_drop(skb, to_free); return err; } - err = qdisc_enqueue(skb, cl->qdisc); + err = qdisc_enqueue(skb, cl->qdisc, to_free); if (unlikely(err != NET_XMIT_SUCCESS)) { if (net_xmit_drop_count(err)) { cl->qstats.drops++; diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index b9ba5f658528..1308bbf460f7 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -191,7 +191,8 @@ static inline struct tcf_proto __rcu **dsmark_find_tcf(struct Qdisc *sch, /* --------------------------- Qdisc operations ---------------------------- */ -static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) +static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { struct dsmark_qdisc_data *p = qdisc_priv(sch); int err; @@ -234,7 +235,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) #ifdef CONFIG_NET_CLS_ACT case TC_ACT_QUEUED: case TC_ACT_STOLEN: - kfree_skb(skb); + __qdisc_drop(skb, to_free); return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: @@ -251,7 +252,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) } } - err = qdisc_enqueue(skb, p->q); + err = qdisc_enqueue(skb, p->q, to_free); if (err != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(err)) qdisc_qstats_drop(sch); @@ -264,7 +265,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) return NET_XMIT_SUCCESS; drop: - qdisc_drop(skb, sch); + qdisc_drop(skb, sch, to_free); return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; } diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c index dea70e3ef0ba..6ea0db427f91 100644 --- a/net/sched/sch_fifo.c +++ b/net/sched/sch_fifo.c @@ -19,29 +19,32 @@ /* 1 band FIFO pseudo-"scheduler" */ -static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch) +static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit)) return qdisc_enqueue_tail(skb, sch); - return qdisc_drop(skb, sch); + return qdisc_drop(skb, sch, to_free); } -static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch) +static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { if (likely(skb_queue_len(&sch->q) < sch->limit)) return qdisc_enqueue_tail(skb, sch); - return qdisc_drop(skb, sch); + return qdisc_drop(skb, sch, to_free); } -static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch) +static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { if (likely(skb_queue_len(&sch->q) < sch->limit)) return qdisc_enqueue_tail(skb, sch); /* queue full, remove one skb to fulfill the limit */ - __qdisc_queue_drop_head(sch, &sch->q); + __qdisc_queue_drop_head(sch, &sch->q, to_free); qdisc_qstats_drop(sch); qdisc_enqueue_tail(skb, sch); diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index 6eb06674f778..e5458b99e09c 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -368,18 +368,19 @@ static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb) } } -static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch) +static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { struct fq_sched_data *q = qdisc_priv(sch); struct fq_flow *f; if (unlikely(sch->q.qlen >= sch->limit)) - return qdisc_drop(skb, sch); + return qdisc_drop(skb, sch, to_free); f = fq_classify(skb, q); if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) { q->stat_flows_plimit++; - return qdisc_drop(skb, sch); + return qdisc_drop(skb, sch, to_free); } f->qlen++; diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 2dc0a849515a..a5ea0e9b6be4 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -139,7 +139,8 @@ static inline void flow_queue_add(struct fq_codel_flow *flow, skb->next = NULL; } -static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets) +static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets, + struct sk_buff **to_free) { struct fq_codel_sched_data *q = qdisc_priv(sch); struct sk_buff *skb; @@ -171,8 +172,8 @@ static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets) do { skb = dequeue_head(flow); len += qdisc_pkt_len(skb); - mem += skb->truesize; - kfree_skb(skb); + mem += get_codel_cb(skb)->mem_usage; + __qdisc_drop(skb, to_free); } while (++i < max_packets && len < threshold); flow->dropped += i; @@ -184,7 +185,8 @@ static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets) return idx; } -static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) +static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { struct fq_codel_sched_data *q = qdisc_priv(sch); unsigned int idx, prev_backlog, prev_qlen; @@ -197,7 +199,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (idx == 0) { if (ret & __NET_XMIT_BYPASS) qdisc_qstats_drop(sch); - kfree_skb(skb); + __qdisc_drop(skb, to_free); return ret; } idx--; @@ -214,7 +216,8 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) flow->deficit = q->quantum; flow->dropped = 0; } - q->memory_usage += skb->truesize; + get_codel_cb(skb)->mem_usage = skb->truesize; + q->memory_usage += get_codel_cb(skb)->mem_usage; memory_limited = q->memory_usage > q->memory_limit; if (++sch->q.qlen <= sch->limit && !memory_limited) return NET_XMIT_SUCCESS; @@ -229,7 +232,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) * So instead of dropping a single packet, drop half of its backlog * with a 64 packets limit to not add a too big cpu spike here. */ - ret = fq_codel_drop(sch, q->drop_batch_size); + ret = fq_codel_drop(sch, q->drop_batch_size, to_free); prev_qlen -= sch->q.qlen; prev_backlog -= sch->qstats.backlog; @@ -265,7 +268,7 @@ static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx) if (flow->head) { skb = dequeue_head(flow); q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb); - q->memory_usage -= skb->truesize; + q->memory_usage -= get_codel_cb(skb)->mem_usage; sch->q.qlen--; sch->qstats.backlog -= qdisc_pkt_len(skb); } @@ -276,7 +279,8 @@ static void drop_func(struct sk_buff *skb, void *ctx) { struct Qdisc *sch = ctx; - qdisc_drop(skb, sch); + kfree_skb(skb); + qdisc_qstats_drop(sch); } static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch) diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 773b632e1e33..e95b67cd5718 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -77,6 +77,34 @@ static void try_bulk_dequeue_skb(struct Qdisc *q, skb->next = NULL; } +/* This variant of try_bulk_dequeue_skb() makes sure + * all skbs in the chain are for the same txq + */ +static void try_bulk_dequeue_skb_slow(struct Qdisc *q, + struct sk_buff *skb, + int *packets) +{ + int mapping = skb_get_queue_mapping(skb); + struct sk_buff *nskb; + int cnt = 0; + + do { + nskb = q->dequeue(q); + if (!nskb) + break; + if (unlikely(skb_get_queue_mapping(nskb) != mapping)) { + q->skb_bad_txq = nskb; + qdisc_qstats_backlog_inc(q, nskb); + q->q.qlen++; + break; + } + skb->next = nskb; + skb = nskb; + } while (++cnt < 8); + (*packets) += cnt; + skb->next = NULL; +} + /* Note that dequeue_skb can possibly return a SKB list (via skb->next). * A requeued skb (via q->gso_skb) can also be a SKB list. */ @@ -87,8 +115,9 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, const struct netdev_queue *txq = q->dev_queue; *packets = 1; - *validate = true; if (unlikely(skb)) { + /* skb in gso_skb were already validated */ + *validate = false; /* check the reason of requeuing without tx lock first */ txq = skb_get_tx_queue(txq->dev, skb); if (!netif_xmit_frozen_or_stopped(txq)) { @@ -97,15 +126,30 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, q->q.qlen--; } else skb = NULL; - /* skb in gso_skb were already validated */ - *validate = false; - } else { - if (!(q->flags & TCQ_F_ONETXQUEUE) || - !netif_xmit_frozen_or_stopped(txq)) { - skb = q->dequeue(q); - if (skb && qdisc_may_bulk(q)) - try_bulk_dequeue_skb(q, skb, txq, packets); + return skb; + } + *validate = true; + skb = q->skb_bad_txq; + if (unlikely(skb)) { + /* check the reason of requeuing without tx lock first */ + txq = skb_get_tx_queue(txq->dev, skb); + if (!netif_xmit_frozen_or_stopped(txq)) { + q->skb_bad_txq = NULL; + qdisc_qstats_backlog_dec(q, skb); + q->q.qlen--; + goto bulk; } + return NULL; + } + if (!(q->flags & TCQ_F_ONETXQUEUE) || + !netif_xmit_frozen_or_stopped(txq)) + skb = q->dequeue(q); + if (skb) { +bulk: + if (qdisc_may_bulk(q)) + try_bulk_dequeue_skb(q, skb, txq, packets); + else + try_bulk_dequeue_skb_slow(q, skb, packets); } return skb; } @@ -348,9 +392,10 @@ EXPORT_SYMBOL(netif_carrier_off); cheaper. */ -static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc) +static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, + struct sk_buff **to_free) { - kfree_skb(skb); + __qdisc_drop(skb, to_free); return NET_XMIT_CN; } @@ -439,7 +484,8 @@ static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv, return priv->q + band; } -static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc) +static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, + struct sk_buff **to_free) { if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) { int band = prio2band[skb->priority & TC_PRIO_MAX]; @@ -451,7 +497,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc) return __qdisc_enqueue_tail(skb, qdisc, list); } - return qdisc_drop(skb, qdisc); + return qdisc_drop(skb, qdisc, to_free); } static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc) @@ -622,11 +668,14 @@ void qdisc_reset(struct Qdisc *qdisc) if (ops->reset) ops->reset(qdisc); + kfree_skb(qdisc->skb_bad_txq); + qdisc->skb_bad_txq = NULL; + if (qdisc->gso_skb) { kfree_skb_list(qdisc->gso_skb); qdisc->gso_skb = NULL; - qdisc->q.qlen = 0; } + qdisc->q.qlen = 0; } EXPORT_SYMBOL(qdisc_reset); @@ -665,6 +714,7 @@ void qdisc_destroy(struct Qdisc *qdisc) dev_put(qdisc_dev(qdisc)); kfree_skb_list(qdisc->gso_skb); + kfree_skb(qdisc->skb_bad_txq); /* * gen_estimator est_timer() might access qdisc->q.lock, * wait a RCU grace period before freeing qdisc. diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index b5fb63c7be02..c78a093c551a 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c @@ -149,7 +149,8 @@ static inline int gred_use_harddrop(struct gred_sched *t) return t->red_flags & TC_RED_HARDDROP; } -static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch) +static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { struct gred_sched_data *q = NULL; struct gred_sched *t = qdisc_priv(sch); @@ -237,10 +238,10 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch) q->stats.pdrop++; drop: - return qdisc_drop(skb, sch); + return qdisc_drop(skb, sch, to_free); congestion_drop: - qdisc_drop(skb, sch); + qdisc_drop(skb, sch, to_free); return NET_XMIT_CN; } diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index bd08c363a26d..8cb5eff7b79c 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1572,7 +1572,7 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) } static int -hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) +hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct hfsc_class *cl; int uninitialized_var(err); @@ -1581,11 +1581,11 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (cl == NULL) { if (err & __NET_XMIT_BYPASS) qdisc_qstats_drop(sch); - kfree_skb(skb); + __qdisc_drop(skb, to_free); return err; } - err = qdisc_enqueue(skb, cl->qdisc); + err = qdisc_enqueue(skb, cl->qdisc, to_free); if (unlikely(err != NET_XMIT_SUCCESS)) { if (net_xmit_drop_count(err)) { cl->qstats.drops++; diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c index c44593b8e65a..e3d0458af17b 100644 --- a/net/sched/sch_hhf.c +++ b/net/sched/sch_hhf.c @@ -345,7 +345,7 @@ static void bucket_add(struct wdrr_bucket *bucket, struct sk_buff *skb) skb->next = NULL; } -static unsigned int hhf_drop(struct Qdisc *sch) +static unsigned int hhf_drop(struct Qdisc *sch, struct sk_buff **to_free) { struct hhf_sched_data *q = qdisc_priv(sch); struct wdrr_bucket *bucket; @@ -359,16 +359,16 @@ static unsigned int hhf_drop(struct Qdisc *sch) struct sk_buff *skb = dequeue_head(bucket); sch->q.qlen--; - qdisc_qstats_drop(sch); qdisc_qstats_backlog_dec(sch, skb); - kfree_skb(skb); + qdisc_drop(skb, sch, to_free); } /* Return id of the bucket from which the packet was dropped. */ return bucket - q->buckets; } -static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch) +static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { struct hhf_sched_data *q = qdisc_priv(sch); enum wdrr_bucket_idx idx; @@ -406,7 +406,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch) /* Return Congestion Notification only if we dropped a packet from this * bucket. */ - if (hhf_drop(sch) == idx) + if (hhf_drop(sch, to_free) == idx) return NET_XMIT_CN; /* As we dropped a packet, better let upper stack know this. */ diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index a454605ab5cb..ba098f2654b4 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -117,7 +117,6 @@ struct htb_class { * Written often fields */ struct gnet_stats_basic_packed bstats; - struct gnet_stats_queue qstats; struct tc_htb_xstats xstats; /* our special stats */ /* token bucket parameters */ @@ -140,6 +139,8 @@ struct htb_class { enum htb_cmode cmode; /* current mode of the class */ struct rb_node pq_node; /* node for event queue */ struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */ + + unsigned int drops ____cacheline_aligned_in_smp; }; struct htb_level { @@ -569,7 +570,8 @@ static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl) list_del_init(&cl->un.leaf.drop_list); } -static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) +static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { int uninitialized_var(ret); struct htb_sched *q = qdisc_priv(sch); @@ -581,19 +583,20 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) __skb_queue_tail(&q->direct_queue, skb); q->direct_pkts++; } else { - return qdisc_drop(skb, sch); + return qdisc_drop(skb, sch, to_free); } #ifdef CONFIG_NET_CLS_ACT } else if (!cl) { if (ret & __NET_XMIT_BYPASS) qdisc_qstats_drop(sch); - kfree_skb(skb); + __qdisc_drop(skb, to_free); return ret; #endif - } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) { + } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q, + to_free)) != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(ret)) { qdisc_qstats_drop(sch); - cl->qstats.drops++; + cl->drops++; } return ret; } else { @@ -1108,17 +1111,22 @@ static int htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) { struct htb_class *cl = (struct htb_class *)arg; + struct gnet_stats_queue qs = { + .drops = cl->drops, + }; __u32 qlen = 0; - if (!cl->level && cl->un.leaf.q) + if (!cl->level && cl->un.leaf.q) { qlen = cl->un.leaf.q->q.qlen; + qs.backlog = cl->un.leaf.q->qstats.backlog; + } cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens); cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens); if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 || gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 || - gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0) + gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0) return -1; return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats)); diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index 5ea93305d705..9ffbb025b37e 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c @@ -65,7 +65,8 @@ multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) } static int -multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch) +multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { struct Qdisc *qdisc; int ret; @@ -76,12 +77,12 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (ret & __NET_XMIT_BYPASS) qdisc_qstats_drop(sch); - kfree_skb(skb); + __qdisc_drop(skb, to_free); return ret; } #endif - ret = qdisc_enqueue(skb, qdisc); + ret = qdisc_enqueue(skb, qdisc, to_free); if (ret == NET_XMIT_SUCCESS) { sch->q.qlen++; return NET_XMIT_SUCCESS; diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index e271967439bf..ccca8ca4c722 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -397,7 +397,8 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) * when we statistically choose to corrupt one, we instead segment it, returning * the first packet to be corrupted, and re-enqueue the remaining frames */ -static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch) +static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { struct sk_buff *segs; netdev_features_t features = netif_skb_features(skb); @@ -405,7 +406,7 @@ static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch) segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); if (IS_ERR_OR_NULL(segs)) { - qdisc_drop(skb, sch); + qdisc_drop(skb, sch, to_free); return NULL; } consume_skb(skb); @@ -418,7 +419,8 @@ static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch) * NET_XMIT_DROP: queue length didn't change. * NET_XMIT_SUCCESS: one skb was queued. */ -static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) +static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { struct netem_sched_data *q = qdisc_priv(sch); /* We don't fill cb now as skb_unshare() may invalidate it */ @@ -443,7 +445,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) } if (count == 0) { qdisc_qstats_drop(sch); - kfree_skb(skb); + __qdisc_drop(skb, to_free); return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; } @@ -463,7 +465,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ q->duplicate = 0; - rootq->enqueue(skb2, rootq); + rootq->enqueue(skb2, rootq, to_free); q->duplicate = dupsave; } @@ -475,7 +477,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) */ if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { if (skb_is_gso(skb)) { - segs = netem_segment(skb, sch); + segs = netem_segment(skb, sch, to_free); if (!segs) return NET_XMIT_DROP; } else { @@ -488,7 +490,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (!(skb = skb_unshare(skb, GFP_ATOMIC)) || (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb))) { - rc = qdisc_drop(skb, sch); + rc = qdisc_drop(skb, sch, to_free); goto finish_segs; } @@ -497,7 +499,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) } if (unlikely(skb_queue_len(&sch->q) >= sch->limit)) - return qdisc_drop(skb, sch); + return qdisc_drop(skb, sch, to_free); qdisc_qstats_backlog_inc(sch, skb); @@ -557,7 +559,7 @@ finish_segs: segs->next = NULL; qdisc_skb_cb(segs)->pkt_len = segs->len; last_len = segs->len; - rc = qdisc_enqueue(segs, sch); + rc = qdisc_enqueue(segs, sch, to_free); if (rc != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(rc)) qdisc_qstats_drop(sch); @@ -615,8 +617,11 @@ deliver: #endif if (q->qdisc) { - int err = qdisc_enqueue(skb, q->qdisc); + struct sk_buff *to_free = NULL; + int err; + err = qdisc_enqueue(skb, q->qdisc, &to_free); + kfree_skb_list(to_free); if (unlikely(err != NET_XMIT_SUCCESS)) { if (net_xmit_drop_count(err)) { qdisc_qstats_drop(sch); diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c index 912a46a5d02e..a570b0bb254c 100644 --- a/net/sched/sch_pie.c +++ b/net/sched/sch_pie.c @@ -134,7 +134,8 @@ static bool drop_early(struct Qdisc *sch, u32 packet_size) return false; } -static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) +static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { struct pie_sched_data *q = qdisc_priv(sch); bool enqueue = false; @@ -166,7 +167,7 @@ static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) out: q->stats.dropped++; - return qdisc_drop(skb, sch); + return qdisc_drop(skb, sch, to_free); } static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = { diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c index a12cd37680f8..1c6cbab3e7b9 100644 --- a/net/sched/sch_plug.c +++ b/net/sched/sch_plug.c @@ -88,7 +88,8 @@ struct plug_sched_data { u32 pkts_to_release; }; -static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch) +static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { struct plug_sched_data *q = qdisc_priv(sch); @@ -98,7 +99,7 @@ static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch) return qdisc_enqueue_tail(skb, sch); } - return qdisc_drop(skb, sch); + return qdisc_drop(skb, sch, to_free); } static struct sk_buff *plug_dequeue(struct Qdisc *sch) diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index de492682caee..f4d443aeae54 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -67,7 +67,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) } static int -prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) +prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct Qdisc *qdisc; int ret; @@ -83,7 +83,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) } #endif - ret = qdisc_enqueue(skb, qdisc); + ret = qdisc_enqueue(skb, qdisc, to_free); if (ret == NET_XMIT_SUCCESS) { qdisc_qstats_backlog_inc(sch, skb); sch->q.qlen++; diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 0427fa8b23f2..f27ffee106f6 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -1217,7 +1217,8 @@ static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q) return agg; } -static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) +static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { struct qfq_sched *q = qdisc_priv(sch); struct qfq_class *cl; @@ -1240,11 +1241,11 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) qdisc_pkt_len(skb)); if (err) { cl->qstats.drops++; - return qdisc_drop(skb, sch); + return qdisc_drop(skb, sch, to_free); } } - err = qdisc_enqueue(skb, cl->qdisc); + err = qdisc_enqueue(skb, cl->qdisc, to_free); if (unlikely(err != NET_XMIT_SUCCESS)) { pr_debug("qfq_enqueue: enqueue failed %d\n", err); if (net_xmit_drop_count(err)) { diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index a0d57530335e..249b2a18acbd 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -56,7 +56,8 @@ static inline int red_use_harddrop(struct red_sched_data *q) return q->flags & TC_RED_HARDDROP; } -static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch) +static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { struct red_sched_data *q = qdisc_priv(sch); struct Qdisc *child = q->qdisc; @@ -95,7 +96,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch) break; } - ret = qdisc_enqueue(skb, child); + ret = qdisc_enqueue(skb, child, to_free); if (likely(ret == NET_XMIT_SUCCESS)) { qdisc_qstats_backlog_inc(sch, skb); sch->q.qlen++; @@ -106,7 +107,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch) return ret; congestion_drop: - qdisc_drop(skb, sch); + qdisc_drop(skb, sch, to_free); return NET_XMIT_CN; } diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c index c69611640fa5..add3cc7d37ec 100644 --- a/net/sched/sch_sfb.c +++ b/net/sched/sch_sfb.c @@ -275,7 +275,8 @@ static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl, return false; } -static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) +static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { struct sfb_sched_data *q = qdisc_priv(sch); @@ -397,7 +398,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) } enqueue: - ret = qdisc_enqueue(skb, child); + ret = qdisc_enqueue(skb, child, to_free); if (likely(ret == NET_XMIT_SUCCESS)) { sch->q.qlen++; increment_qlen(skb, q); @@ -408,7 +409,7 @@ enqueue: return ret; drop: - qdisc_drop(skb, sch); + qdisc_drop(skb, sch, to_free); return NET_XMIT_CN; other_drop: if (ret & __NET_XMIT_BYPASS) diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 57d118b41cad..7f195ed4d568 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -343,7 +343,7 @@ static int sfq_headdrop(const struct sfq_sched_data *q) } static int -sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) +sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct sfq_sched_data *q = qdisc_priv(sch); unsigned int hash, dropped; @@ -367,7 +367,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (x == SFQ_EMPTY_SLOT) { x = q->dep[0].next; /* get a free slot */ if (x >= SFQ_MAX_FLOWS) - return qdisc_drop(skb, sch); + return qdisc_drop(skb, sch, to_free); q->ht[hash] = x; slot = &q->slots[x]; slot->hash = hash; @@ -424,14 +424,14 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (slot->qlen >= q->maxdepth) { congestion_drop: if (!sfq_headdrop(q)) - return qdisc_drop(skb, sch); + return qdisc_drop(skb, sch, to_free); /* We know we have at least one packet in queue */ head = slot_dequeue_head(slot); delta = qdisc_pkt_len(head) - qdisc_pkt_len(skb); sch->qstats.backlog -= delta; slot->backlog -= delta; - qdisc_drop(head, sch); + qdisc_drop(head, sch, to_free); slot_queue_add(slot, skb); return NET_XMIT_CN; diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index c12df84d1078..303355c449ab 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -155,7 +155,8 @@ static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb) /* GSO packet is too big, segment it so that tbf can transmit * each segment in time */ -static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch) +static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { struct tbf_sched_data *q = qdisc_priv(sch); struct sk_buff *segs, *nskb; @@ -166,7 +167,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch) segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); if (IS_ERR_OR_NULL(segs)) - return qdisc_drop(skb, sch); + return qdisc_drop(skb, sch, to_free); nb = 0; while (segs) { @@ -174,7 +175,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch) segs->next = NULL; qdisc_skb_cb(segs)->pkt_len = segs->len; len += segs->len; - ret = qdisc_enqueue(segs, q->qdisc); + ret = qdisc_enqueue(segs, q->qdisc, to_free); if (ret != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(ret)) qdisc_qstats_drop(sch); @@ -190,17 +191,18 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch) return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP; } -static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch) +static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { struct tbf_sched_data *q = qdisc_priv(sch); int ret; if (qdisc_pkt_len(skb) > q->max_size) { if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size) - return tbf_segment(skb, sch); - return qdisc_drop(skb, sch); + return tbf_segment(skb, sch, to_free); + return qdisc_drop(skb, sch, to_free); } - ret = qdisc_enqueue(skb, q->qdisc); + ret = qdisc_enqueue(skb, q->qdisc, to_free); if (ret != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(ret)) qdisc_qstats_drop(sch); diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index e02687185a59..2cd9b4478b92 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -77,7 +77,7 @@ struct teql_sched_data { /* "teql*" qdisc routines */ static int -teql_enqueue(struct sk_buff *skb, struct Qdisc *sch) +teql_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct net_device *dev = qdisc_dev(sch); struct teql_sched_data *q = qdisc_priv(sch); @@ -87,7 +87,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc *sch) return NET_XMIT_SUCCESS; } - return qdisc_drop(skb, sch); + return qdisc_drop(skb, sch, to_free); } static struct sk_buff * diff --git a/net/tipc/server.c b/net/tipc/server.c index 272d20a795d5..215849ce453d 100644 --- a/net/tipc/server.c +++ b/net/tipc/server.c @@ -418,13 +418,12 @@ static struct outqueue_entry *tipc_alloc_entry(void *data, int len) if (!entry) return NULL; - buf = kmalloc(len, GFP_ATOMIC); + buf = kmemdup(data, len, GFP_ATOMIC); if (!buf) { kfree(entry); return NULL; } - memcpy(buf, data, len); entry->iov.iov_base = buf; entry->iov.iov_len = len; diff --git a/samples/bpf/sockex2_user.c b/samples/bpf/sockex2_user.c index 29a276d766fc..8a4085c2d117 100644 --- a/samples/bpf/sockex2_user.c +++ b/samples/bpf/sockex2_user.c @@ -5,6 +5,7 @@ #include "bpf_load.h" #include <unistd.h> #include <arpa/inet.h> +#include <sys/resource.h> struct pair { __u64 packets; @@ -13,11 +14,13 @@ struct pair { int main(int ac, char **argv) { + struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; char filename[256]; FILE *f; int i, sock; snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); + setrlimit(RLIMIT_MEMLOCK, &r); if (load_bpf_file(filename)) { printf("%s", bpf_log_buf); diff --git a/samples/bpf/sockex3_user.c b/samples/bpf/sockex3_user.c index 2617772d060d..d4184ab5f3ac 100644 --- a/samples/bpf/sockex3_user.c +++ b/samples/bpf/sockex3_user.c @@ -5,6 +5,7 @@ #include "bpf_load.h" #include <unistd.h> #include <arpa/inet.h> +#include <sys/resource.h> struct flow_keys { __be32 src; @@ -23,11 +24,13 @@ struct pair { int main(int argc, char **argv) { + struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; char filename[256]; FILE *f; int i, sock; snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); + setrlimit(RLIMIT_MEMLOCK, &r); if (load_bpf_file(filename)) { printf("%s", bpf_log_buf); |