diff options
-rw-r--r-- | drivers/net/bonding/bond_main.c | 69 | ||||
-rw-r--r-- | drivers/net/dsa/mt7530.c | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c | 56 | ||||
-rw-r--r-- | drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/mscc/ocelot_io.c | 16 | ||||
-rw-r--r-- | drivers/net/phy/nxp-tja11xx.c | 13 | ||||
-rw-r--r-- | drivers/net/wwan/wwan_core.c | 7 | ||||
-rw-r--r-- | include/net/bonding.h | 12 | ||||
-rw-r--r-- | include/net/mctp.h | 1 | ||||
-rw-r--r-- | net/bridge/br_vlan.c | 3 | ||||
-rw-r--r-- | net/core/pktgen.c | 163 | ||||
-rw-r--r-- | net/dsa/dsa2.c | 3 | ||||
-rw-r--r-- | net/dsa/dsa_priv.h | 4 | ||||
-rw-r--r-- | net/dsa/port.c | 32 | ||||
-rw-r--r-- | net/dsa/tag_8021q.c | 21 | ||||
-rw-r--r-- | net/mctp/route.c | 27 |
18 files changed, 355 insertions, 89 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 365953e8013e..c0db4e2b2462 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1725,6 +1725,20 @@ void bond_lower_state_changed(struct slave *slave) netdev_lower_state_changed(slave->dev, &info); } +#define BOND_NL_ERR(bond_dev, extack, errmsg) do { \ + if (extack) \ + NL_SET_ERR_MSG(extack, errmsg); \ + else \ + netdev_err(bond_dev, "Error: %s\n", errmsg); \ +} while (0) + +#define SLAVE_NL_ERR(bond_dev, slave_dev, extack, errmsg) do { \ + if (extack) \ + NL_SET_ERR_MSG(extack, errmsg); \ + else \ + slave_err(bond_dev, slave_dev, "Error: %s\n", errmsg); \ +} while (0) + /* enslave device <slave> to bond device <master> */ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, struct netlink_ext_ack *extack) @@ -1738,9 +1752,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, if (slave_dev->flags & IFF_MASTER && !netif_is_bond_master(slave_dev)) { - NL_SET_ERR_MSG(extack, "Device with IFF_MASTER cannot be enslaved"); - netdev_err(bond_dev, - "Error: Device with IFF_MASTER cannot be enslaved\n"); + BOND_NL_ERR(bond_dev, extack, + "Device with IFF_MASTER cannot be enslaved"); return -EPERM; } @@ -1752,15 +1765,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, /* already in-use? */ if (netdev_is_rx_handler_busy(slave_dev)) { - NL_SET_ERR_MSG(extack, "Device is in use and cannot be enslaved"); - slave_err(bond_dev, slave_dev, - "Error: Device is in use and cannot be enslaved\n"); + SLAVE_NL_ERR(bond_dev, slave_dev, extack, + "Device is in use and cannot be enslaved"); return -EBUSY; } if (bond_dev == slave_dev) { - NL_SET_ERR_MSG(extack, "Cannot enslave bond to itself."); - netdev_err(bond_dev, "cannot enslave bond to itself.\n"); + BOND_NL_ERR(bond_dev, extack, "Cannot enslave bond to itself."); return -EPERM; } @@ -1769,8 +1780,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) { slave_dbg(bond_dev, slave_dev, "is NETIF_F_VLAN_CHALLENGED\n"); if (vlan_uses_dev(bond_dev)) { - NL_SET_ERR_MSG(extack, "Can not enslave VLAN challenged device to VLAN enabled bond"); - slave_err(bond_dev, slave_dev, "Error: cannot enslave VLAN challenged slave on VLAN enabled bond\n"); + SLAVE_NL_ERR(bond_dev, slave_dev, extack, + "Can not enslave VLAN challenged device to VLAN enabled bond"); return -EPERM; } else { slave_warn(bond_dev, slave_dev, "enslaved VLAN challenged slave. Adding VLANs will be blocked as long as it is part of bond.\n"); @@ -1788,8 +1799,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, * enslaving it; the old ifenslave will not. */ if (slave_dev->flags & IFF_UP) { - NL_SET_ERR_MSG(extack, "Device can not be enslaved while up"); - slave_err(bond_dev, slave_dev, "slave is up - this may be due to an out of date ifenslave\n"); + SLAVE_NL_ERR(bond_dev, slave_dev, extack, + "Device can not be enslaved while up"); return -EPERM; } @@ -1828,17 +1839,15 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, bond_dev); } } else if (bond_dev->type != slave_dev->type) { - NL_SET_ERR_MSG(extack, "Device type is different from other slaves"); - slave_err(bond_dev, slave_dev, "ether type (%d) is different from other slaves (%d), can not enslave it\n", - slave_dev->type, bond_dev->type); + SLAVE_NL_ERR(bond_dev, slave_dev, extack, + "Device type is different from other slaves"); return -EINVAL; } if (slave_dev->type == ARPHRD_INFINIBAND && BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { - NL_SET_ERR_MSG(extack, "Only active-backup mode is supported for infiniband slaves"); - slave_warn(bond_dev, slave_dev, "Type (%d) supports only active-backup mode\n", - slave_dev->type); + SLAVE_NL_ERR(bond_dev, slave_dev, extack, + "Only active-backup mode is supported for infiniband slaves"); res = -EOPNOTSUPP; goto err_undo_flags; } @@ -1852,8 +1861,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, bond->params.fail_over_mac = BOND_FOM_ACTIVE; slave_warn(bond_dev, slave_dev, "Setting fail_over_mac to active for active-backup mode\n"); } else { - NL_SET_ERR_MSG(extack, "Slave device does not support setting the MAC address, but fail_over_mac is not set to active"); - slave_err(bond_dev, slave_dev, "The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active\n"); + SLAVE_NL_ERR(bond_dev, slave_dev, extack, + "Slave device does not support setting the MAC address, but fail_over_mac is not set to active"); res = -EOPNOTSUPP; goto err_undo_flags; } @@ -2149,8 +2158,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, if (!slave_dev->netdev_ops->ndo_bpf || !slave_dev->netdev_ops->ndo_xdp_xmit) { if (bond->xdp_prog) { - NL_SET_ERR_MSG(extack, "Slave does not support XDP"); - slave_err(bond_dev, slave_dev, "Slave does not support XDP\n"); + SLAVE_NL_ERR(bond_dev, slave_dev, extack, + "Slave does not support XDP"); res = -EOPNOTSUPP; goto err_sysfs_del; } @@ -2163,10 +2172,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, }; if (dev_xdp_prog_count(slave_dev) > 0) { - NL_SET_ERR_MSG(extack, - "Slave has XDP program loaded, please unload before enslaving"); - slave_err(bond_dev, slave_dev, - "Slave has XDP program loaded, please unload before enslaving\n"); + SLAVE_NL_ERR(bond_dev, slave_dev, extack, + "Slave has XDP program loaded, please unload before enslaving"); res = -EOPNOTSUPP; goto err_sysfs_del; } @@ -5190,17 +5197,15 @@ static int bond_xdp_set(struct net_device *dev, struct bpf_prog *prog, if (!slave_dev->netdev_ops->ndo_bpf || !slave_dev->netdev_ops->ndo_xdp_xmit) { - NL_SET_ERR_MSG(extack, "Slave device does not support XDP"); - slave_err(dev, slave_dev, "Slave does not support XDP\n"); + SLAVE_NL_ERR(dev, slave_dev, extack, + "Slave device does not support XDP"); err = -EOPNOTSUPP; goto err; } if (dev_xdp_prog_count(slave_dev) > 0) { - NL_SET_ERR_MSG(extack, - "Slave has XDP program loaded, please unload before enslaving"); - slave_err(dev, slave_dev, - "Slave has XDP program loaded, please unload before enslaving\n"); + SLAVE_NL_ERR(dev, slave_dev, extack, + "Slave has XDP program loaded, please unload before enslaving"); err = -EOPNOTSUPP; goto err; } diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 53e6150e95b6..77e0205e4e59 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -1315,11 +1315,8 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port, /* Remove this port from the port matrix of the other ports * in the same bridge. If the port is disabled, port matrix * is kept and not being setup until the port becomes enabled. - * And the other port's port matrix cannot be broken when the - * other port is still a VLAN-aware port. */ - if (dsa_is_user_port(ds, i) && i != port && - !dsa_port_is_vlan_filtering(dsa_to_port(ds, i))) { + if (dsa_is_user_port(ds, i) && i != port) { if (dsa_to_port(ds, i)->bridge_dev != bridge) continue; if (priv->ports[i].enable) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 27809d68d6ed..b0e696b08b8b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -596,6 +596,11 @@ struct hns3_hw_error_info { const char *msg; }; +struct hns3_reset_type_map { + enum ethtool_reset_flags rst_flags; + enum hnae3_reset_type rst_type; +}; + static inline int ring_space(struct hns3_enet_ring *ring) { /* This smp_load_acquire() pairs with smp_store_release() in diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 82061ab6930f..c8f09b07185e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -953,6 +953,60 @@ static int hns3_get_rxnfc(struct net_device *netdev, } } +static const struct hns3_reset_type_map hns3_reset_type[] = { + {ETH_RESET_MGMT, HNAE3_IMP_RESET}, + {ETH_RESET_ALL, HNAE3_GLOBAL_RESET}, + {ETH_RESET_DEDICATED, HNAE3_FUNC_RESET}, +}; + +static const struct hns3_reset_type_map hns3vf_reset_type[] = { + {ETH_RESET_DEDICATED, HNAE3_VF_FUNC_RESET}, +}; + +static int hns3_set_reset(struct net_device *netdev, u32 *flags) +{ + enum hnae3_reset_type rst_type = HNAE3_NONE_RESET; + struct hnae3_handle *h = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); + const struct hnae3_ae_ops *ops = h->ae_algo->ops; + const struct hns3_reset_type_map *rst_type_map; + u32 i, size; + + if (ops->ae_dev_resetting && ops->ae_dev_resetting(h)) + return -EBUSY; + + if (!ops->set_default_reset_request || !ops->reset_event) + return -EOPNOTSUPP; + + if (h->flags & HNAE3_SUPPORT_VF) { + rst_type_map = hns3vf_reset_type; + size = ARRAY_SIZE(hns3vf_reset_type); + } else { + rst_type_map = hns3_reset_type; + size = ARRAY_SIZE(hns3_reset_type); + } + + for (i = 0; i < size; i++) { + if (rst_type_map[i].rst_flags == *flags) { + rst_type = rst_type_map[i].rst_type; + break; + } + } + + if (rst_type == HNAE3_NONE_RESET || + (rst_type == HNAE3_IMP_RESET && + ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2)) + return -EOPNOTSUPP; + + netdev_info(netdev, "Setting reset type %d\n", rst_type); + + ops->set_default_reset_request(ae_dev, rst_type); + + ops->reset_event(h->pdev, h); + + return 0; +} + static void hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv, u32 tx_desc_num, u32 rx_desc_num) { @@ -1699,6 +1753,7 @@ static const struct ethtool_ops hns3vf_ethtool_ops = { .set_priv_flags = hns3_set_priv_flags, .get_tunable = hns3_get_tunable, .set_tunable = hns3_set_tunable, + .reset = hns3_set_reset, }; static const struct ethtool_ops hns3_ethtool_ops = { @@ -1740,6 +1795,7 @@ static const struct ethtool_ops hns3_ethtool_ops = { .get_ts_info = hns3_get_ts_info, .get_tunable = hns3_get_tunable, .set_tunable = hns3_set_tunable, + .reset = hns3_set_reset, }; void hns3_ethtool_set_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index f15d76ec0068..9fd15287986f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -3789,6 +3789,12 @@ static void hclge_do_reset(struct hclge_dev *hdev) } switch (hdev->reset_type) { + case HNAE3_IMP_RESET: + dev_info(&pdev->dev, "IMP reset requested\n"); + val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); + hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1); + hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val); + break; case HNAE3_GLOBAL_RESET: dev_info(&pdev->dev, "global reset requested\n"); val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index cc31b12904ad..ada5c68f2851 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -194,6 +194,7 @@ enum HLCGE_PORT_TYPE { #define HCLGE_VECTOR0_IMP_CMDQ_ERR_B 4U #define HCLGE_VECTOR0_IMP_RD_POISON_B 5U #define HCLGE_VECTOR0_ALL_MSIX_ERR_B 6U +#define HCLGE_TRIGGER_IMP_RESET_B 7U #define HCLGE_MAC_DEFAULT_FRAME \ (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN) diff --git a/drivers/net/ethernet/mscc/ocelot_io.c b/drivers/net/ethernet/mscc/ocelot_io.c index ea4e83410fe4..7390fa3980ec 100644 --- a/drivers/net/ethernet/mscc/ocelot_io.c +++ b/drivers/net/ethernet/mscc/ocelot_io.c @@ -21,7 +21,7 @@ u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset) ocelot->map[target][reg & REG_MASK] + offset, &val); return val; } -EXPORT_SYMBOL(__ocelot_read_ix); +EXPORT_SYMBOL_GPL(__ocelot_read_ix); void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset) { @@ -32,7 +32,7 @@ void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset) regmap_write(ocelot->targets[target], ocelot->map[target][reg & REG_MASK] + offset, val); } -EXPORT_SYMBOL(__ocelot_write_ix); +EXPORT_SYMBOL_GPL(__ocelot_write_ix); void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg, u32 offset) @@ -45,7 +45,7 @@ void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg, ocelot->map[target][reg & REG_MASK] + offset, mask, val); } -EXPORT_SYMBOL(__ocelot_rmw_ix); +EXPORT_SYMBOL_GPL(__ocelot_rmw_ix); u32 ocelot_port_readl(struct ocelot_port *port, u32 reg) { @@ -58,7 +58,7 @@ u32 ocelot_port_readl(struct ocelot_port *port, u32 reg) regmap_read(port->target, ocelot->map[target][reg & REG_MASK], &val); return val; } -EXPORT_SYMBOL(ocelot_port_readl); +EXPORT_SYMBOL_GPL(ocelot_port_readl); void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg) { @@ -69,7 +69,7 @@ void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg) regmap_write(port->target, ocelot->map[target][reg & REG_MASK], val); } -EXPORT_SYMBOL(ocelot_port_writel); +EXPORT_SYMBOL_GPL(ocelot_port_writel); void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg) { @@ -77,7 +77,7 @@ void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg) ocelot_port_writel(port, (cur & (~mask)) | val, reg); } -EXPORT_SYMBOL(ocelot_port_rmwl); +EXPORT_SYMBOL_GPL(ocelot_port_rmwl); u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target, u32 reg, u32 offset) @@ -128,7 +128,7 @@ int ocelot_regfields_init(struct ocelot *ocelot, return 0; } -EXPORT_SYMBOL(ocelot_regfields_init); +EXPORT_SYMBOL_GPL(ocelot_regfields_init); static struct regmap_config ocelot_regmap_config = { .reg_bits = 32, @@ -148,4 +148,4 @@ struct regmap *ocelot_regmap_init(struct ocelot *ocelot, struct resource *res) return devm_regmap_init_mmio(ocelot->dev, regs, &ocelot_regmap_config); } -EXPORT_SYMBOL(ocelot_regmap_init); +EXPORT_SYMBOL_GPL(ocelot_regmap_init); diff --git a/drivers/net/phy/nxp-tja11xx.c b/drivers/net/phy/nxp-tja11xx.c index afd7afa1f498..9944cc501806 100644 --- a/drivers/net/phy/nxp-tja11xx.c +++ b/drivers/net/phy/nxp-tja11xx.c @@ -47,12 +47,14 @@ #define MII_INTSRC_LINK_FAIL BIT(10) #define MII_INTSRC_LINK_UP BIT(9) #define MII_INTSRC_MASK (MII_INTSRC_LINK_FAIL | MII_INTSRC_LINK_UP) -#define MII_INTSRC_TEMP_ERR BIT(1) #define MII_INTSRC_UV_ERR BIT(3) +#define MII_INTSRC_TEMP_ERR BIT(1) #define MII_INTEN 22 #define MII_INTEN_LINK_FAIL BIT(10) #define MII_INTEN_LINK_UP BIT(9) +#define MII_INTEN_UV_ERR BIT(3) +#define MII_INTEN_TEMP_ERR BIT(1) #define MII_COMMSTAT 23 #define MII_COMMSTAT_LINK_UP BIT(15) @@ -607,7 +609,8 @@ static int tja11xx_config_intr(struct phy_device *phydev) if (err) return err; - value = MII_INTEN_LINK_FAIL | MII_INTEN_LINK_UP; + value = MII_INTEN_LINK_FAIL | MII_INTEN_LINK_UP | + MII_INTEN_UV_ERR | MII_INTEN_TEMP_ERR; err = phy_write(phydev, MII_INTEN, value); } else { err = phy_write(phydev, MII_INTEN, value); @@ -622,6 +625,7 @@ static int tja11xx_config_intr(struct phy_device *phydev) static irqreturn_t tja11xx_handle_interrupt(struct phy_device *phydev) { + struct device *dev = &phydev->mdio.dev; int irq_status; irq_status = phy_read(phydev, MII_INTSRC); @@ -630,6 +634,11 @@ static irqreturn_t tja11xx_handle_interrupt(struct phy_device *phydev) return IRQ_NONE; } + if (irq_status & MII_INTSRC_TEMP_ERR) + dev_warn(dev, "Overtemperature error detected (temp > 155C°).\n"); + if (irq_status & MII_INTSRC_UV_ERR) + dev_warn(dev, "Undervoltage error detected.\n"); + if (!(irq_status & MII_INTSRC_MASK)) return IRQ_NONE; diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c index 674a81d79db3..5f9f57e59b70 100644 --- a/drivers/net/wwan/wwan_core.c +++ b/drivers/net/wwan/wwan_core.c @@ -355,8 +355,8 @@ struct wwan_port *wwan_create_port(struct device *parent, { struct wwan_device *wwandev; struct wwan_port *port; - int minor, err = -ENOMEM; char namefmt[0x20]; + int minor, err; if (type > WWAN_PORT_MAX || !ops) return ERR_PTR(-EINVAL); @@ -370,11 +370,14 @@ struct wwan_port *wwan_create_port(struct device *parent, /* A port is exposed as character device, get a minor */ minor = ida_alloc_range(&minors, 0, WWAN_MAX_MINORS - 1, GFP_KERNEL); - if (minor < 0) + if (minor < 0) { + err = minor; goto error_wwandev_remove; + } port = kzalloc(sizeof(*port), GFP_KERNEL); if (!port) { + err = -ENOMEM; ida_free(&minors, minor); goto error_wwandev_remove; } diff --git a/include/net/bonding.h b/include/net/bonding.h index 9f3fdc180c6c..15e083e18f75 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -150,11 +150,6 @@ struct bond_params { u8 ad_actor_system[ETH_ALEN + 2]; }; -struct bond_parm_tbl { - char *modename; - int mode; -}; - struct slave { struct net_device *dev; /* first - useful for panic debug */ struct bonding *bond; /* our master */ @@ -755,13 +750,6 @@ static inline int bond_get_targets_ip(__be32 *targets, __be32 ip) /* exported from bond_main.c */ extern unsigned int bond_net_id; -extern const struct bond_parm_tbl bond_lacp_tbl[]; -extern const struct bond_parm_tbl xmit_hashtype_tbl[]; -extern const struct bond_parm_tbl arp_validate_tbl[]; -extern const struct bond_parm_tbl arp_all_targets_tbl[]; -extern const struct bond_parm_tbl fail_over_mac_tbl[]; -extern const struct bond_parm_tbl pri_reselect_tbl[]; -extern struct bond_parm_tbl ad_select_tbl[]; /* exported from bond_netlink.c */ extern struct rtnl_link_ops bond_link_ops; diff --git a/include/net/mctp.h b/include/net/mctp.h index 54bbe042c973..a824d47c3c6d 100644 --- a/include/net/mctp.h +++ b/include/net/mctp.h @@ -173,6 +173,7 @@ struct mctp_route { struct mctp_dev *dev; unsigned int mtu; + unsigned char type; int (*output)(struct mctp_route *route, struct sk_buff *skb); diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 8cfd035bbaf9..cbc922681a76 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -2019,7 +2019,7 @@ static int br_vlan_dump_dev(const struct net_device *dev, if (dump_global) { if (br_vlan_global_opts_can_enter_range(v, range_end)) - continue; + goto update_end; if (!br_vlan_global_opts_fill(skb, range_start->vid, range_end->vid, range_start)) { @@ -2045,6 +2045,7 @@ static int br_vlan_dump_dev(const struct net_device *dev, range_start = v; } +update_end: range_end = v; } diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 314f97acf39d..94008536a9d6 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -175,6 +175,9 @@ #define IP_NAME_SZ 32 #define MAX_MPLS_LABELS 16 /* This is the max label stack depth */ #define MPLS_STACK_BOTTOM htonl(0x00000100) +/* Max number of internet mix entries that can be specified in imix_weights. */ +#define MAX_IMIX_ENTRIES 20 +#define IMIX_PRECISION 100 /* Precision of IMIX distribution */ #define func_enter() pr_debug("entering %s\n", __func__); @@ -242,6 +245,12 @@ static char *pkt_flag_names[] = { #define VLAN_TAG_SIZE(x) ((x)->vlan_id == 0xffff ? 0 : 4) #define SVLAN_TAG_SIZE(x) ((x)->svlan_id == 0xffff ? 0 : 4) +struct imix_pkt { + u64 size; + u64 weight; + u64 count_so_far; +}; + struct flow_state { __be32 cur_daddr; int count; @@ -343,6 +352,12 @@ struct pktgen_dev { __u8 traffic_class; /* ditto for the (former) Traffic Class in IPv6 (see RFC 3260, sec. 4) */ + /* IMIX */ + unsigned int n_imix_entries; + struct imix_pkt imix_entries[MAX_IMIX_ENTRIES]; + /* Maps 0-IMIX_PRECISION range to imix_entry based on probability*/ + __u8 imix_distribution[IMIX_PRECISION]; + /* MPLS */ unsigned int nr_labels; /* Depth of stack, 0 = no MPLS */ __be32 labels[MAX_MPLS_LABELS]; @@ -471,6 +486,7 @@ static void pktgen_stop_all_threads(struct pktgen_net *pn); static void pktgen_stop(struct pktgen_thread *t); static void pktgen_clear_counters(struct pktgen_dev *pkt_dev); +static void fill_imix_distribution(struct pktgen_dev *pkt_dev); /* Module parameters, defaults. */ static int pg_count_d __read_mostly = 1000; @@ -552,6 +568,16 @@ static int pktgen_if_show(struct seq_file *seq, void *v) (unsigned long long)pkt_dev->count, pkt_dev->min_pkt_size, pkt_dev->max_pkt_size); + if (pkt_dev->n_imix_entries > 0) { + seq_puts(seq, " imix_weights: "); + for (i = 0; i < pkt_dev->n_imix_entries; i++) { + seq_printf(seq, "%llu,%llu ", + pkt_dev->imix_entries[i].size, + pkt_dev->imix_entries[i].weight); + } + seq_puts(seq, "\n"); + } + seq_printf(seq, " frags: %d delay: %llu clone_skb: %d ifname: %s\n", pkt_dev->nfrags, (unsigned long long) pkt_dev->delay, @@ -669,6 +695,18 @@ static int pktgen_if_show(struct seq_file *seq, void *v) (unsigned long long)pkt_dev->sofar, (unsigned long long)pkt_dev->errors); + if (pkt_dev->n_imix_entries > 0) { + int i; + + seq_puts(seq, " imix_size_counts: "); + for (i = 0; i < pkt_dev->n_imix_entries; i++) { + seq_printf(seq, "%llu,%llu ", + pkt_dev->imix_entries[i].size, + pkt_dev->imix_entries[i].count_so_far); + } + seq_puts(seq, "\n"); + } + seq_printf(seq, " started: %lluus stopped: %lluus idle: %lluus\n", (unsigned long long) ktime_to_us(pkt_dev->started_at), @@ -792,6 +830,62 @@ done_str: return i; } +/* Parses imix entries from user buffer. + * The user buffer should consist of imix entries separated by spaces + * where each entry consists of size and weight delimited by commas. + * "size1,weight_1 size2,weight_2 ... size_n,weight_n" for example. + */ +static ssize_t get_imix_entries(const char __user *buffer, + struct pktgen_dev *pkt_dev) +{ + const int max_digits = 10; + int i = 0; + long len; + char c; + + pkt_dev->n_imix_entries = 0; + + do { + unsigned long weight; + unsigned long size; + + len = num_arg(&buffer[i], max_digits, &size); + if (len < 0) + return len; + i += len; + if (get_user(c, &buffer[i])) + return -EFAULT; + /* Check for comma between size_i and weight_i */ + if (c != ',') + return -EINVAL; + i++; + + if (size < 14 + 20 + 8) + size = 14 + 20 + 8; + + len = num_arg(&buffer[i], max_digits, &weight); + if (len < 0) + return len; + if (weight <= 0) + return -EINVAL; + + pkt_dev->imix_entries[pkt_dev->n_imix_entries].size = size; + pkt_dev->imix_entries[pkt_dev->n_imix_entries].weight = weight; + + i += len; + if (get_user(c, &buffer[i])) + return -EFAULT; + + i++; + pkt_dev->n_imix_entries++; + + if (pkt_dev->n_imix_entries > MAX_IMIX_ENTRIES) + return -E2BIG; + } while (c == ' '); + + return i; +} + static ssize_t get_labels(const char __user *buffer, struct pktgen_dev *pkt_dev) { unsigned int n = 0; @@ -960,6 +1054,20 @@ static ssize_t pktgen_if_write(struct file *file, return count; } + if (!strcmp(name, "imix_weights")) { + if (pkt_dev->clone_skb > 0) + return -EINVAL; + + len = get_imix_entries(&user_buffer[i], pkt_dev); + if (len < 0) + return len; + + fill_imix_distribution(pkt_dev); + + i += len; + return count; + } + if (!strcmp(name, "debug")) { len = num_arg(&user_buffer[i], 10, &value); if (len < 0) @@ -1082,10 +1190,16 @@ static ssize_t pktgen_if_write(struct file *file, len = num_arg(&user_buffer[i], 10, &value); if (len < 0) return len; + /* clone_skb is not supported for netif_receive xmit_mode and + * IMIX mode. + */ if ((value > 0) && ((pkt_dev->xmit_mode == M_NETIF_RECEIVE) || !(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING))) return -ENOTSUPP; + if (value > 0 && pkt_dev->n_imix_entries > 0) + return -EINVAL; + i += len; pkt_dev->clone_skb = value; @@ -2472,6 +2586,14 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) t = pkt_dev->min_pkt_size; } pkt_dev->cur_pkt_size = t; + } else if (pkt_dev->n_imix_entries > 0) { + struct imix_pkt *entry; + __u32 t = prandom_u32() % IMIX_PRECISION; + __u8 entry_index = pkt_dev->imix_distribution[t]; + + entry = &pkt_dev->imix_entries[entry_index]; + entry->count_so_far++; + pkt_dev->cur_pkt_size = entry->size; } set_cur_queue_map(pkt_dev); @@ -2540,6 +2662,33 @@ static void free_SAs(struct pktgen_dev *pkt_dev) } } +static void fill_imix_distribution(struct pktgen_dev *pkt_dev) +{ + int cumulative_probabilites[MAX_IMIX_ENTRIES]; + int j = 0; + __u64 cumulative_prob = 0; + __u64 total_weight = 0; + int i = 0; + + for (i = 0; i < pkt_dev->n_imix_entries; i++) + total_weight += pkt_dev->imix_entries[i].weight; + + /* Fill cumulative_probabilites with sum of normalized probabilities */ + for (i = 0; i < pkt_dev->n_imix_entries - 1; i++) { + cumulative_prob += div64_u64(pkt_dev->imix_entries[i].weight * + IMIX_PRECISION, + total_weight); + cumulative_probabilites[i] = cumulative_prob; + } + cumulative_probabilites[pkt_dev->n_imix_entries - 1] = 100; + + for (i = 0; i < IMIX_PRECISION; i++) { + if (i == cumulative_probabilites[j]) + j++; + pkt_dev->imix_distribution[i] = j; + } +} + static int process_ipsec(struct pktgen_dev *pkt_dev, struct sk_buff *skb, __be16 protocol) { @@ -3140,7 +3289,19 @@ static void show_results(struct pktgen_dev *pkt_dev, int nr_frags) pps = div64_u64(pkt_dev->sofar * NSEC_PER_SEC, ktime_to_ns(elapsed)); - bps = pps * 8 * pkt_dev->cur_pkt_size; + if (pkt_dev->n_imix_entries > 0) { + int i; + struct imix_pkt *entry; + + bps = 0; + for (i = 0; i < pkt_dev->n_imix_entries; i++) { + entry = &pkt_dev->imix_entries[i]; + bps += entry->size * entry->count_so_far; + } + bps = div64_u64(bps * 8 * NSEC_PER_SEC, ktime_to_ns(elapsed)); + } else { + bps = pps * 8 * pkt_dev->cur_pkt_size; + } mbps = bps; do_div(mbps, 1000000); diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index 8150e16aaa55..dcd67801eca4 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -49,6 +49,9 @@ int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v) * Can be used to notify the switching fabric of events such as cross-chip * bridging between disjoint trees (such as islands of tagger-compatible * switches bridged by an incompatible middle switch). + * + * WARNING: this function is not reliable during probe time, because probing + * between trees is asynchronous and not all DSA trees might have probed. */ int dsa_broadcast(unsigned long e, void *v) { diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index 9ea637832ea9..b7a269e0513f 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -261,8 +261,8 @@ int dsa_port_link_register_of(struct dsa_port *dp); void dsa_port_link_unregister_of(struct dsa_port *dp); int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr); void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr); -int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid); -void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid); +int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast); +void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast); extern const struct phylink_mac_ops dsa_port_phylink_mac_ops; static inline bool dsa_port_offloads_bridge_port(struct dsa_port *dp, diff --git a/net/dsa/port.c b/net/dsa/port.c index 831d50d28d59..979042a64d1a 100644 --- a/net/dsa/port.c +++ b/net/dsa/port.c @@ -426,7 +426,9 @@ void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br) err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info); if (err) - pr_err("DSA: failed to notify DSA_NOTIFIER_BRIDGE_LEAVE\n"); + dev_err(dp->ds->dev, + "port %d failed to notify DSA_NOTIFIER_BRIDGE_LEAVE: %pe\n", + dp->index, ERR_PTR(err)); dsa_port_switchdev_unsync_attrs(dp); } @@ -525,8 +527,9 @@ void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag) err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info); if (err) - pr_err("DSA: failed to notify DSA_NOTIFIER_LAG_LEAVE: %d\n", - err); + dev_err(dp->ds->dev, + "port %d failed to notify DSA_NOTIFIER_LAG_LEAVE: %pe\n", + dp->index, ERR_PTR(err)); dsa_lag_unmap(dp->ds->dst, lag); } @@ -1306,10 +1309,12 @@ void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr) err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_LEAVE, &info); if (err) - pr_err("DSA: failed to notify DSA_NOTIFIER_HSR_LEAVE\n"); + dev_err(dp->ds->dev, + "port %d failed to notify DSA_NOTIFIER_HSR_LEAVE: %pe\n", + dp->index, ERR_PTR(err)); } -int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid) +int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast) { struct dsa_notifier_tag_8021q_vlan_info info = { .tree_index = dp->ds->dst->index, @@ -1318,10 +1323,13 @@ int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid) .vid = vid, }; - return dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info); + if (broadcast) + return dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info); + + return dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info); } -void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid) +void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast) { struct dsa_notifier_tag_8021q_vlan_info info = { .tree_index = dp->ds->dst->index, @@ -1331,8 +1339,12 @@ void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid) }; int err; - err = dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info); + if (broadcast) + err = dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info); + else + err = dsa_port_notify(dp, DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info); if (err) - pr_err("DSA: failed to notify tag_8021q VLAN deletion: %pe\n", - ERR_PTR(err)); + dev_err(dp->ds->dev, + "port %d failed to notify tag_8021q VLAN %d deletion: %pe\n", + dp->index, vid, ERR_PTR(err)); } diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c index 654697ebb6f3..e6d5f3b4fd89 100644 --- a/net/dsa/tag_8021q.c +++ b/net/dsa/tag_8021q.c @@ -362,12 +362,12 @@ int dsa_tag_8021q_bridge_join(struct dsa_switch *ds, continue; /* Install the RX VID of the targeted port in our VLAN table */ - err = dsa_port_tag_8021q_vlan_add(dp, targeted_rx_vid); + err = dsa_port_tag_8021q_vlan_add(dp, targeted_rx_vid, false); if (err) return err; /* Install our RX VID into the targeted port's VLAN table */ - err = dsa_port_tag_8021q_vlan_add(targeted_dp, rx_vid); + err = dsa_port_tag_8021q_vlan_add(targeted_dp, rx_vid, false); if (err) return err; } @@ -398,10 +398,10 @@ int dsa_tag_8021q_bridge_leave(struct dsa_switch *ds, continue; /* Remove the RX VID of the targeted port from our VLAN table */ - dsa_port_tag_8021q_vlan_del(dp, targeted_rx_vid); + dsa_port_tag_8021q_vlan_del(dp, targeted_rx_vid, true); /* Remove our RX VID from the targeted port's VLAN table */ - dsa_port_tag_8021q_vlan_del(targeted_dp, rx_vid); + dsa_port_tag_8021q_vlan_del(targeted_dp, rx_vid, true); } return 0; @@ -413,7 +413,8 @@ int dsa_tag_8021q_bridge_tx_fwd_offload(struct dsa_switch *ds, int port, { u16 tx_vid = dsa_8021q_bridge_tx_fwd_offload_vid(bridge_num); - return dsa_port_tag_8021q_vlan_add(dsa_to_port(ds, port), tx_vid); + return dsa_port_tag_8021q_vlan_add(dsa_to_port(ds, port), tx_vid, + true); } EXPORT_SYMBOL_GPL(dsa_tag_8021q_bridge_tx_fwd_offload); @@ -423,7 +424,7 @@ void dsa_tag_8021q_bridge_tx_fwd_unoffload(struct dsa_switch *ds, int port, { u16 tx_vid = dsa_8021q_bridge_tx_fwd_offload_vid(bridge_num); - dsa_port_tag_8021q_vlan_del(dsa_to_port(ds, port), tx_vid); + dsa_port_tag_8021q_vlan_del(dsa_to_port(ds, port), tx_vid, true); } EXPORT_SYMBOL_GPL(dsa_tag_8021q_bridge_tx_fwd_unoffload); @@ -450,7 +451,7 @@ static int dsa_tag_8021q_port_setup(struct dsa_switch *ds, int port) * L2 forwarding rules still take precedence when there are no VLAN * restrictions, so there are no concerns about leaking traffic. */ - err = dsa_port_tag_8021q_vlan_add(dp, rx_vid); + err = dsa_port_tag_8021q_vlan_add(dp, rx_vid, true); if (err) { dev_err(ds->dev, "Failed to apply RX VID %d to port %d: %pe\n", @@ -462,7 +463,7 @@ static int dsa_tag_8021q_port_setup(struct dsa_switch *ds, int port) vlan_vid_add(master, ctx->proto, rx_vid); /* Finally apply the TX VID on this port and on the CPU port */ - err = dsa_port_tag_8021q_vlan_add(dp, tx_vid); + err = dsa_port_tag_8021q_vlan_add(dp, tx_vid, true); if (err) { dev_err(ds->dev, "Failed to apply TX VID %d on port %d: %pe\n", @@ -489,11 +490,11 @@ static void dsa_tag_8021q_port_teardown(struct dsa_switch *ds, int port) master = dp->cpu_dp->master; - dsa_port_tag_8021q_vlan_del(dp, rx_vid); + dsa_port_tag_8021q_vlan_del(dp, rx_vid, false); vlan_vid_del(master, ctx->proto, rx_vid); - dsa_port_tag_8021q_vlan_del(dp, tx_vid); + dsa_port_tag_8021q_vlan_del(dp, tx_vid, false); } static int dsa_tag_8021q_setup(struct dsa_switch *ds) diff --git a/net/mctp/route.c b/net/mctp/route.c index b3101375c8e7..5265525011ad 100644 --- a/net/mctp/route.c +++ b/net/mctp/route.c @@ -710,8 +710,9 @@ int mctp_local_output(struct sock *sk, struct mctp_route *rt, /* route management */ static int mctp_route_add(struct mctp_dev *mdev, mctp_eid_t daddr_start, unsigned int daddr_extent, unsigned int mtu, - bool is_local) + unsigned char type) { + int (*rtfn)(struct mctp_route *rt, struct sk_buff *skb); struct net *net = dev_net(mdev->dev); struct mctp_route *rt, *ert; @@ -721,6 +722,17 @@ static int mctp_route_add(struct mctp_dev *mdev, mctp_eid_t daddr_start, if (daddr_extent > 0xff || daddr_start + daddr_extent >= 255) return -EINVAL; + switch (type) { + case RTN_LOCAL: + rtfn = mctp_route_input; + break; + case RTN_UNICAST: + rtfn = mctp_route_output; + break; + default: + return -EINVAL; + } + rt = mctp_route_alloc(); if (!rt) return -ENOMEM; @@ -730,7 +742,8 @@ static int mctp_route_add(struct mctp_dev *mdev, mctp_eid_t daddr_start, rt->mtu = mtu; rt->dev = mdev; dev_hold(rt->dev->dev); - rt->output = is_local ? mctp_route_input : mctp_route_output; + rt->type = type; + rt->output = rtfn; ASSERT_RTNL(); /* Prevent duplicate identical routes. */ @@ -777,7 +790,7 @@ static int mctp_route_remove(struct mctp_dev *mdev, mctp_eid_t daddr_start, int mctp_route_add_local(struct mctp_dev *mdev, mctp_eid_t addr) { - return mctp_route_add(mdev, addr, 0, 0, true); + return mctp_route_add(mdev, addr, 0, 0, RTN_LOCAL); } int mctp_route_remove_local(struct mctp_dev *mdev, mctp_eid_t addr) @@ -936,7 +949,11 @@ static int mctp_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, /* TODO: parse mtu from nlparse */ mtu = 0; - rc = mctp_route_add(mdev, daddr_start, rtm->rtm_dst_len, mtu, false); + if (rtm->rtm_type != RTN_UNICAST) + return -EINVAL; + + rc = mctp_route_add(mdev, daddr_start, rtm->rtm_dst_len, mtu, + rtm->rtm_type); return rc; } @@ -985,7 +1002,7 @@ static int mctp_fill_rtinfo(struct sk_buff *skb, struct mctp_route *rt, hdr->rtm_table = RT_TABLE_DEFAULT; hdr->rtm_protocol = RTPROT_STATIC; /* everything is user-defined */ hdr->rtm_scope = RT_SCOPE_LINK; /* TODO: scope in mctp_route? */ - hdr->rtm_type = RTN_ANYCAST; /* TODO: type from route */ + hdr->rtm_type = rt->type; if (nla_put_u8(skb, RTA_DST, rt->min)) goto cancel; |