From 57d316ba2017d824ed0fd95fc98b020c834023a1 Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Thu, 21 Jul 2016 12:03:09 +0200 Subject: mlxsw: pci: Add resources query implementation. Add resources query implementation. If exists, query the HW for its builtin resources instead of having them as consts in the code. Signed-off-by: Nogah Frankel Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/cmd.h | 32 ++++++++++++++ drivers/net/ethernet/mellanox/mlxsw/core.c | 10 ++++- drivers/net/ethernet/mellanox/mlxsw/core.h | 9 +++- drivers/net/ethernet/mellanox/mlxsw/pci.c | 59 +++++++++++++++++++++++++- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 1 + drivers/net/ethernet/mellanox/mlxsw/switchx2.c | 1 + 6 files changed, 109 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h index f9cd6e3f7709..28271bedd957 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h +++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h @@ -105,6 +105,7 @@ enum mlxsw_cmd_opcode { MLXSW_CMD_OPCODE_SW2HW_EQ = 0x013, MLXSW_CMD_OPCODE_HW2SW_EQ = 0x014, MLXSW_CMD_OPCODE_QUERY_EQ = 0x015, + MLXSW_CMD_OPCODE_QUERY_RESOURCES = 0x101, }; static inline const char *mlxsw_cmd_opcode_str(u16 opcode) @@ -144,6 +145,8 @@ static inline const char *mlxsw_cmd_opcode_str(u16 opcode) return "HW2SW_EQ"; case MLXSW_CMD_OPCODE_QUERY_EQ: return "QUERY_EQ"; + case MLXSW_CMD_OPCODE_QUERY_RESOURCES: + return "QUERY_RESOURCES"; default: return "*UNKNOWN*"; } @@ -500,6 +503,35 @@ static inline int mlxsw_cmd_unmap_fa(struct mlxsw_core *mlxsw_core) return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_UNMAP_FA, 0, 0); } +/* QUERY_RESOURCES - Query chip resources + * -------------------------------------- + * OpMod == 0 (N/A) , INMmod is index + * ---------------------------------- + * The QUERY_RESOURCES command retrieves information related to chip resources + * by resource ID. Every command returns 32 entries. INmod is being use as base. + * for example, index 1 will return entries 32-63. When the tables end and there + * are no more sources in the table, will return resource id 0xFFF to indicate + * it. + */ +static inline int mlxsw_cmd_query_resources(struct mlxsw_core *mlxsw_core, + char *out_mbox, int index) +{ + return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_RESOURCES, + 0, index, false, out_mbox, + MLXSW_CMD_MBOX_SIZE); +} + +/* cmd_mbox_query_resource_id + * The resource id. 0xFFFF indicates table's end. + */ +MLXSW_ITEM32_INDEXED(cmd_mbox, query_resource, id, 0x00, 16, 16, 0x8, 0, false); + +/* cmd_mbox_query_resource_data + * The resource + */ +MLXSW_ITEM64_INDEXED(cmd_mbox, query_resource, data, + 0x00, 0, 40, 0x8, 0, false); + /* CONFIG_PROFILE (Set) - Configure Switch Profile * ------------------------------ * OpMod == 1 (Set), INMmod == 0 (N/A) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 480a3ba714dd..068ee65a960b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -111,6 +111,7 @@ struct mlxsw_core { struct { u8 *mapping; /* lag_id+port_index to local_port mapping */ } lag; + struct mlxsw_resources resources; struct mlxsw_hwmon *hwmon; unsigned long driver_priv[0]; /* driver_priv has to be always the last item */ @@ -1110,7 +1111,8 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, } } - err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile); + err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, + &mlxsw_core->resources); if (err) goto err_bus_init; @@ -1652,6 +1654,12 @@ void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core, } EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear); +struct mlxsw_resources *mlxsw_core_resources_get(struct mlxsw_core *mlxsw_core) +{ + return &mlxsw_core->resources; +} +EXPORT_SYMBOL(mlxsw_core_resources_get); + int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, struct mlxsw_core_port *mlxsw_core_port, u8 local_port, struct net_device *dev, bool split, u32 split_group) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 2fe385cce203..d57ad0db44bd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -215,6 +215,7 @@ struct mlxsw_config_profile { u32 kvd_linear_size; u32 kvd_hash_single_size; u32 kvd_hash_double_size; + u8 resource_query_enable; struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT]; }; @@ -266,10 +267,16 @@ struct mlxsw_driver { const struct mlxsw_config_profile *profile; }; +struct mlxsw_resources { +}; + +struct mlxsw_resources *mlxsw_core_resources_get(struct mlxsw_core *mlxsw_core); + struct mlxsw_bus { const char *kind; int (*init)(void *bus_priv, struct mlxsw_core *mlxsw_core, - const struct mlxsw_config_profile *profile); + const struct mlxsw_config_profile *profile, + struct mlxsw_resources *resources); void (*fini)(void *bus_priv); bool (*skb_transmit_busy)(void *bus_priv, const struct mlxsw_tx_info *tx_info); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index ddbc9f22278d..a724b6692c0d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -1154,6 +1154,56 @@ mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci, mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask); } +#define MLXSW_RESOURCES_TABLE_END_ID 0xffff +#define MLXSW_RESOURCES_QUERY_MAX_QUERIES 100 +#define MLXSW_RESOURCES_PER_QUERY 32 + +static void mlxsw_pci_resources_query_parse(int id, u64 val, + struct mlxsw_resources *resources) +{ + switch (id) { + default: + break; + } +} + +static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox, + struct mlxsw_resources *resources, + u8 query_enabled) +{ + int index, i; + u64 data; + u16 id; + int err; + + /* Not all the versions support resources query */ + if (!query_enabled) + return 0; + + mlxsw_cmd_mbox_zero(mbox); + + for (index = 0; index < MLXSW_RESOURCES_QUERY_MAX_QUERIES; index++) { + err = mlxsw_cmd_query_resources(mlxsw_pci->core, mbox, index); + if (err) + return err; + + for (i = 0; i < MLXSW_RESOURCES_PER_QUERY; i++) { + id = mlxsw_cmd_mbox_query_resource_id_get(mbox, i); + data = mlxsw_cmd_mbox_query_resource_data_get(mbox, i); + + if (id == MLXSW_RESOURCES_TABLE_END_ID) + return 0; + + mlxsw_pci_resources_query_parse(id, data, resources); + } + } + + /* If after MLXSW_RESOURCES_QUERY_MAX_QUERIES we still didn't get + * MLXSW_RESOURCES_TABLE_END_ID, something went bad in the FW. + */ + return -EIO; +} + static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox, const struct mlxsw_config_profile *profile) { @@ -1404,7 +1454,8 @@ static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci, } static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, - const struct mlxsw_config_profile *profile) + const struct mlxsw_config_profile *profile, + struct mlxsw_resources *resources) { struct mlxsw_pci *mlxsw_pci = bus_priv; struct pci_dev *pdev = mlxsw_pci->pdev; @@ -1463,6 +1514,11 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, if (err) goto err_boardinfo; + err = mlxsw_pci_resources_query(mlxsw_pci, mbox, resources, + profile->resource_query_enable); + if (err) + goto err_query_resources; + err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile); if (err) goto err_config_profile; @@ -1485,6 +1541,7 @@ err_request_eq_irq: mlxsw_pci_aqs_fini(mlxsw_pci); err_aqs_init: err_config_profile: +err_query_resources: err_boardinfo: mlxsw_pci_fw_area_fini(mlxsw_pci); err_fw_area_init: diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 2ba8cc404cb1..80172fc8bdd5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -2488,6 +2488,7 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = { .type = MLXSW_PORT_SWID_TYPE_ETH, } }, + .resource_query_enable = 1, }; static struct mlxsw_driver mlxsw_sp_driver = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index 25f658b3849a..377daa4d509c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -1541,6 +1541,7 @@ static struct mlxsw_config_profile mlxsw_sx_config_profile = { .type = MLXSW_PORT_SWID_TYPE_ETH, } }, + .resource_query_enable = 0, }; static struct mlxsw_driver mlxsw_sx_driver = { -- cgit v1.2.3 From ded821c8d3793efe00195dd7ab633f6412dd8ae0 Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Thu, 21 Jul 2016 12:03:10 +0200 Subject: mlxsw: pci: Add max span resources to resources query Add max span resources to resources query. Signed-off-by: Nogah Frankel Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.h | 2 ++ drivers/net/ethernet/mellanox/mlxsw/pci.c | 5 +++++ 2 files changed, 7 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index d57ad0db44bd..d3476ead9982 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -268,6 +268,8 @@ struct mlxsw_driver { }; struct mlxsw_resources { + u8 max_span_valid:1; + u8 max_span; }; struct mlxsw_resources *mlxsw_core_resources_get(struct mlxsw_core *mlxsw_core); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index a724b6692c0d..1d1360c178bb 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -1155,6 +1155,7 @@ mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci, } #define MLXSW_RESOURCES_TABLE_END_ID 0xffff +#define MLXSW_MAX_SPAN_ID 0x2420 #define MLXSW_RESOURCES_QUERY_MAX_QUERIES 100 #define MLXSW_RESOURCES_PER_QUERY 32 @@ -1162,6 +1163,10 @@ static void mlxsw_pci_resources_query_parse(int id, u64 val, struct mlxsw_resources *resources) { switch (id) { + case MLXSW_MAX_SPAN_ID: + resources->max_span = val; + resources->max_span_valid = 1; + break; default: break; } -- cgit v1.2.3 From bf3994d2ed310813da28362d87bfe9f0e1c3e37f Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 21 Jul 2016 12:03:11 +0200 Subject: net/sched: introduce Match-all classifier The matchall classifier matches every packet and allows the user to apply actions on it. This filter is very useful in usecases where every packet should be matched, for example, packet mirroring (SPAN) can be setup very easily using that filter. Signed-off-by: Jiri Pirko Signed-off-by: Yotam Gigi Signed-off-by: David S. Miller --- include/uapi/linux/pkt_cls.h | 11 ++ net/sched/Kconfig | 10 ++ net/sched/Makefile | 1 + net/sched/cls_matchall.c | 248 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 270 insertions(+) create mode 100644 net/sched/cls_matchall.c diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 5702e933dc07..a32494887e01 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -433,6 +433,17 @@ enum { #define TCA_FLOWER_MAX (__TCA_FLOWER_MAX - 1) +/* Match-all classifier */ + +enum { + TCA_MATCHALL_UNSPEC, + TCA_MATCHALL_CLASSID, + TCA_MATCHALL_ACT, + __TCA_MATCHALL_MAX, +}; + +#define TCA_MATCHALL_MAX (__TCA_MATCHALL_MAX - 1) + /* Extended Matches */ struct tcf_ematch_tree_hdr { diff --git a/net/sched/Kconfig b/net/sched/Kconfig index b148302bbaf2..ccf931b3b94c 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -494,6 +494,16 @@ config NET_CLS_FLOWER To compile this code as a module, choose M here: the module will be called cls_flower. +config NET_CLS_MATCHALL + tristate "Match-all classifier" + select NET_CLS + ---help--- + If you say Y here, you will be able to classify packets based on + nothing. Every packet will match. + + To compile this code as a module, choose M here: the module will + be called cls_matchall. + config NET_EMATCH bool "Extended Matches" select NET_CLS diff --git a/net/sched/Makefile b/net/sched/Makefile index 84bddb373517..ae088a5a9d95 100644 --- a/net/sched/Makefile +++ b/net/sched/Makefile @@ -60,6 +60,7 @@ obj-$(CONFIG_NET_CLS_FLOW) += cls_flow.o obj-$(CONFIG_NET_CLS_CGROUP) += cls_cgroup.o obj-$(CONFIG_NET_CLS_BPF) += cls_bpf.o obj-$(CONFIG_NET_CLS_FLOWER) += cls_flower.o +obj-$(CONFIG_NET_CLS_MATCHALL) += cls_matchall.o obj-$(CONFIG_NET_EMATCH) += ematch.o obj-$(CONFIG_NET_EMATCH_CMP) += em_cmp.o obj-$(CONFIG_NET_EMATCH_NBYTE) += em_nbyte.o diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c new file mode 100644 index 000000000000..8a6b4de7a99a --- /dev/null +++ b/net/sched/cls_matchall.c @@ -0,0 +1,248 @@ +/* + * net/sched/cls_matchll.c Match-all classifier + * + * Copyright (c) 2016 Jiri Pirko + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include + +#include +#include + +struct cls_mall_filter { + struct tcf_exts exts; + struct tcf_result res; + u32 handle; + struct rcu_head rcu; +}; + +struct cls_mall_head { + struct cls_mall_filter *filter; + struct rcu_head rcu; +}; + +static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp, + struct tcf_result *res) +{ + struct cls_mall_head *head = rcu_dereference_bh(tp->root); + struct cls_mall_filter *f = head->filter; + + return tcf_exts_exec(skb, &f->exts, res); +} + +static int mall_init(struct tcf_proto *tp) +{ + struct cls_mall_head *head; + + head = kzalloc(sizeof(*head), GFP_KERNEL); + if (!head) + return -ENOBUFS; + + rcu_assign_pointer(tp->root, head); + + return 0; +} + +static void mall_destroy_filter(struct rcu_head *head) +{ + struct cls_mall_filter *f = container_of(head, struct cls_mall_filter, rcu); + + tcf_exts_destroy(&f->exts); + kfree(f); +} + +static bool mall_destroy(struct tcf_proto *tp, bool force) +{ + struct cls_mall_head *head = rtnl_dereference(tp->root); + + if (!force && head->filter) + return false; + + if (head->filter) + call_rcu(&head->filter->rcu, mall_destroy_filter); + RCU_INIT_POINTER(tp->root, NULL); + kfree_rcu(head, rcu); + return true; +} + +static unsigned long mall_get(struct tcf_proto *tp, u32 handle) +{ + struct cls_mall_head *head = rtnl_dereference(tp->root); + struct cls_mall_filter *f = head->filter; + + if (f && f->handle == handle) + return (unsigned long) f; + return 0; +} + +static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = { + [TCA_MATCHALL_UNSPEC] = { .type = NLA_UNSPEC }, + [TCA_MATCHALL_CLASSID] = { .type = NLA_U32 }, +}; + +static int mall_set_parms(struct net *net, struct tcf_proto *tp, + struct cls_mall_filter *f, + unsigned long base, struct nlattr **tb, + struct nlattr *est, bool ovr) +{ + struct tcf_exts e; + int err; + + tcf_exts_init(&e, TCA_MATCHALL_ACT, 0); + err = tcf_exts_validate(net, tp, tb, est, &e, ovr); + if (err < 0) + return err; + + if (tb[TCA_MATCHALL_CLASSID]) { + f->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]); + tcf_bind_filter(tp, &f->res, base); + } + + tcf_exts_change(tp, &f->exts, &e); + + return 0; +} + +static int mall_change(struct net *net, struct sk_buff *in_skb, + struct tcf_proto *tp, unsigned long base, + u32 handle, struct nlattr **tca, + unsigned long *arg, bool ovr) +{ + struct cls_mall_head *head = rtnl_dereference(tp->root); + struct cls_mall_filter *fold = (struct cls_mall_filter *) *arg; + struct cls_mall_filter *f; + struct nlattr *tb[TCA_MATCHALL_MAX + 1]; + int err; + + if (!tca[TCA_OPTIONS]) + return -EINVAL; + + if (head->filter) + return -EBUSY; + + if (fold) + return -EINVAL; + + err = nla_parse_nested(tb, TCA_MATCHALL_MAX, + tca[TCA_OPTIONS], mall_policy); + if (err < 0) + return err; + + f = kzalloc(sizeof(*f), GFP_KERNEL); + if (!f) + return -ENOBUFS; + + tcf_exts_init(&f->exts, TCA_MATCHALL_ACT, 0); + + if (!handle) + handle = 1; + f->handle = handle; + + err = mall_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr); + if (err) + goto errout; + + *arg = (unsigned long) f; + rcu_assign_pointer(head->filter, f); + + return 0; + +errout: + kfree(f); + return err; +} + +static int mall_delete(struct tcf_proto *tp, unsigned long arg) +{ + struct cls_mall_head *head = rtnl_dereference(tp->root); + struct cls_mall_filter *f = (struct cls_mall_filter *) arg; + + RCU_INIT_POINTER(head->filter, NULL); + tcf_unbind_filter(tp, &f->res); + call_rcu(&f->rcu, mall_destroy_filter); + return 0; +} + +static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg) +{ + struct cls_mall_head *head = rtnl_dereference(tp->root); + struct cls_mall_filter *f = head->filter; + + if (arg->count < arg->skip) + goto skip; + if (arg->fn(tp, (unsigned long) f, arg) < 0) + arg->stop = 1; +skip: + arg->count++; +} + +static int mall_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, + struct sk_buff *skb, struct tcmsg *t) +{ + struct cls_mall_filter *f = (struct cls_mall_filter *) fh; + struct nlattr *nest; + + if (!f) + return skb->len; + + t->tcm_handle = f->handle; + + nest = nla_nest_start(skb, TCA_OPTIONS); + if (!nest) + goto nla_put_failure; + + if (f->res.classid && + nla_put_u32(skb, TCA_MATCHALL_CLASSID, f->res.classid)) + goto nla_put_failure; + + if (tcf_exts_dump(skb, &f->exts)) + goto nla_put_failure; + + nla_nest_end(skb, nest); + + if (tcf_exts_dump_stats(skb, &f->exts) < 0) + goto nla_put_failure; + + return skb->len; + +nla_put_failure: + nla_nest_cancel(skb, nest); + return -1; +} + +static struct tcf_proto_ops cls_mall_ops __read_mostly = { + .kind = "matchall", + .classify = mall_classify, + .init = mall_init, + .destroy = mall_destroy, + .get = mall_get, + .change = mall_change, + .delete = mall_delete, + .walk = mall_walk, + .dump = mall_dump, + .owner = THIS_MODULE, +}; + +static int __init cls_mall_init(void) +{ + return register_tcf_proto_ops(&cls_mall_ops); +} + +static void __exit cls_mall_exit(void) +{ + unregister_tcf_proto_ops(&cls_mall_ops); +} + +module_init(cls_mall_init); +module_exit(cls_mall_exit); + +MODULE_AUTHOR("Jiri Pirko "); +MODULE_DESCRIPTION("Match-all classifier"); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From b87f7936a93246804cf70e7e2e0568799c948bb1 Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Thu, 21 Jul 2016 12:03:12 +0200 Subject: net/sched: Add match-all classifier hw offloading. Following the work that have been done on offloading classifiers like u32 and flower, now the match-all classifier hw offloading is possible. if the interface supports tc offloading. To control the offloading, two tc flags have been introduced: skip_sw and skip_hw. Typical usage: tc filter add dev eth25 parent ffff: \ matchall skip_sw \ action mirred egress mirror \ dev eth27 Signed-off-by: Yotam Gigi Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/linux/netdevice.h | 2 ++ include/net/pkt_cls.h | 11 +++++++ include/uapi/linux/pkt_cls.h | 1 + net/sched/cls_matchall.c | 76 ++++++++++++++++++++++++++++++++++++++++++-- 4 files changed, 87 insertions(+), 3 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 43c749b1b619..076df5360ba5 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -787,6 +787,7 @@ enum { TC_SETUP_MQPRIO, TC_SETUP_CLSU32, TC_SETUP_CLSFLOWER, + TC_SETUP_MATCHALL, }; struct tc_cls_u32_offload; @@ -797,6 +798,7 @@ struct tc_to_netdev { u8 tc; struct tc_cls_u32_offload *cls_u32; struct tc_cls_flower_offload *cls_flower; + struct tc_cls_matchall_offload *cls_mall; }; }; diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 3722dda0199d..6f8d65342d3a 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -442,4 +442,15 @@ struct tc_cls_flower_offload { struct tcf_exts *exts; }; +enum tc_matchall_command { + TC_CLSMATCHALL_REPLACE, + TC_CLSMATCHALL_DESTROY, +}; + +struct tc_cls_matchall_offload { + enum tc_matchall_command command; + struct tcf_exts *exts; + unsigned long cookie; +}; + #endif diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index a32494887e01..d1c1ccaba787 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -439,6 +439,7 @@ enum { TCA_MATCHALL_UNSPEC, TCA_MATCHALL_CLASSID, TCA_MATCHALL_ACT, + TCA_MATCHALL_FLAGS, __TCA_MATCHALL_MAX, }; diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index 8a6b4de7a99a..25927b6c4436 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c @@ -21,6 +21,7 @@ struct cls_mall_filter { struct tcf_result res; u32 handle; struct rcu_head rcu; + u32 flags; }; struct cls_mall_head { @@ -34,6 +35,9 @@ static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct cls_mall_head *head = rcu_dereference_bh(tp->root); struct cls_mall_filter *f = head->filter; + if (tc_skip_sw(f->flags)) + return -1; + return tcf_exts_exec(skb, &f->exts, res); } @@ -55,18 +59,61 @@ static void mall_destroy_filter(struct rcu_head *head) struct cls_mall_filter *f = container_of(head, struct cls_mall_filter, rcu); tcf_exts_destroy(&f->exts); + kfree(f); } +static int mall_replace_hw_filter(struct tcf_proto *tp, + struct cls_mall_filter *f, + unsigned long cookie) +{ + struct net_device *dev = tp->q->dev_queue->dev; + struct tc_to_netdev offload; + struct tc_cls_matchall_offload mall_offload = {0}; + + offload.type = TC_SETUP_MATCHALL; + offload.cls_mall = &mall_offload; + offload.cls_mall->command = TC_CLSMATCHALL_REPLACE; + offload.cls_mall->exts = &f->exts; + offload.cls_mall->cookie = cookie; + + return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, + &offload); +} + +static void mall_destroy_hw_filter(struct tcf_proto *tp, + struct cls_mall_filter *f, + unsigned long cookie) +{ + struct net_device *dev = tp->q->dev_queue->dev; + struct tc_to_netdev offload; + struct tc_cls_matchall_offload mall_offload = {0}; + + offload.type = TC_SETUP_MATCHALL; + offload.cls_mall = &mall_offload; + offload.cls_mall->command = TC_CLSMATCHALL_DESTROY; + offload.cls_mall->exts = NULL; + offload.cls_mall->cookie = cookie; + + dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, + &offload); +} + static bool mall_destroy(struct tcf_proto *tp, bool force) { struct cls_mall_head *head = rtnl_dereference(tp->root); + struct net_device *dev = tp->q->dev_queue->dev; + struct cls_mall_filter *f = head->filter; - if (!force && head->filter) + if (!force && f) return false; - if (head->filter) - call_rcu(&head->filter->rcu, mall_destroy_filter); + if (f) { + if (tc_should_offload(dev, tp, f->flags)) + mall_destroy_hw_filter(tp, f, (unsigned long) f); + + call_rcu(&f->rcu, mall_destroy_filter); + } RCU_INIT_POINTER(tp->root, NULL); kfree_rcu(head, rcu); return true; @@ -117,8 +164,10 @@ static int mall_change(struct net *net, struct sk_buff *in_skb, { struct cls_mall_head *head = rtnl_dereference(tp->root); struct cls_mall_filter *fold = (struct cls_mall_filter *) *arg; + struct net_device *dev = tp->q->dev_queue->dev; struct cls_mall_filter *f; struct nlattr *tb[TCA_MATCHALL_MAX + 1]; + u32 flags = 0; int err; if (!tca[TCA_OPTIONS]) @@ -135,6 +184,12 @@ static int mall_change(struct net *net, struct sk_buff *in_skb, if (err < 0) return err; + if (tb[TCA_MATCHALL_FLAGS]) { + flags = nla_get_u32(tb[TCA_MATCHALL_FLAGS]); + if (!tc_flags_valid(flags)) + return -EINVAL; + } + f = kzalloc(sizeof(*f), GFP_KERNEL); if (!f) return -ENOBUFS; @@ -144,11 +199,22 @@ static int mall_change(struct net *net, struct sk_buff *in_skb, if (!handle) handle = 1; f->handle = handle; + f->flags = flags; err = mall_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr); if (err) goto errout; + if (tc_should_offload(dev, tp, flags)) { + err = mall_replace_hw_filter(tp, f, (unsigned long) f); + if (err) { + if (tc_skip_sw(flags)) + goto errout; + else + err = 0; + } + } + *arg = (unsigned long) f; rcu_assign_pointer(head->filter, f); @@ -163,6 +229,10 @@ static int mall_delete(struct tcf_proto *tp, unsigned long arg) { struct cls_mall_head *head = rtnl_dereference(tp->root); struct cls_mall_filter *f = (struct cls_mall_filter *) arg; + struct net_device *dev = tp->q->dev_queue->dev; + + if (tc_should_offload(dev, tp, f->flags)) + mall_destroy_hw_filter(tp, f, (unsigned long) f); RCU_INIT_POINTER(head->filter, NULL); tcf_unbind_filter(tp, &f->res); -- cgit v1.2.3 From 51ae8cc66244f19f77bc251a102b1f414586f069 Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Thu, 21 Jul 2016 12:03:13 +0200 Subject: mlxsw: reg: Add Shared Buffer Internal Buffer register The SBIB register configures per port buffer for internal use. This register is used to configure an egress mirror buffer on the egress port which does the mirroring. Signed-off-by: Yotam Gigi Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 41 +++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 5b2a0b946f71..70bb9705c845 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -5062,6 +5062,45 @@ static inline void mlxsw_reg_sbsr_rec_unpack(char *payload, int rec_index, mlxsw_reg_sbsr_rec_max_buff_occupancy_get(payload, rec_index); } +/* SBIB - Shared Buffer Internal Buffer Register + * --------------------------------------------- + * The SBIB register configures per port buffers for internal use. The internal + * buffers consume memory on the port buffers (note that the port buffers are + * used also by PBMC). + * + * For Spectrum this is used for egress mirroring. + */ +#define MLXSW_REG_SBIB_ID 0xB006 +#define MLXSW_REG_SBIB_LEN 0x10 + +static const struct mlxsw_reg_info mlxsw_reg_sbib = { + .id = MLXSW_REG_SBIB_ID, + .len = MLXSW_REG_SBIB_LEN, +}; + +/* reg_sbib_local_port + * Local port number + * Not supported for CPU port and router port + * Access: Index + */ +MLXSW_ITEM32(reg, sbib, local_port, 0x00, 16, 8); + +/* reg_sbib_buff_size + * Units represented in cells + * Allowed range is 0 to (cap_max_headroom_size - 1) + * Default is 0 + * Access: RW + */ +MLXSW_ITEM32(reg, sbib, buff_size, 0x08, 0, 24); + +static inline void mlxsw_reg_sbib_pack(char *payload, u8 local_port, + u32 buff_size) +{ + MLXSW_REG_ZERO(sbib, payload); + mlxsw_reg_sbib_local_port_set(payload, local_port); + mlxsw_reg_sbib_buff_size_set(payload, buff_size); +} + static inline const char *mlxsw_reg_id_str(u16 reg_id) { switch (reg_id) { @@ -5179,6 +5218,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "SBMM"; case MLXSW_REG_SBSR_ID: return "SBSR"; + case MLXSW_REG_SBIB_ID: + return "SBIB"; default: return "*UNKNOWN*"; } -- cgit v1.2.3 From 43a468562030e428728be14dac583ae19046b22e Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Thu, 21 Jul 2016 12:03:14 +0200 Subject: mlxsw: reg: Add Monitoring Port Analyzer Table register The MPAT register is used to query and configure the Switch Port Analyzer (SPAN) table. This register is used to configure a port as a mirror output port, while after that a mirrored input port can be bound using MPAR register. Signed-off-by: Yotam Gigi Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 54 +++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 70bb9705c845..d0b97a6779e9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -4633,6 +4633,58 @@ static inline void mlxsw_reg_mtmp_unpack(char *payload, unsigned int *p_temp, mlxsw_reg_mtmp_sensor_name_memcpy_from(payload, sensor_name); } +/* MPAT - Monitoring Port Analyzer Table + * ------------------------------------- + * MPAT Register is used to query and configure the Switch PortAnalyzer Table. + * For an enabled analyzer, all fields except e (enable) cannot be modified. + */ +#define MLXSW_REG_MPAT_ID 0x901A +#define MLXSW_REG_MPAT_LEN 0x78 + +static const struct mlxsw_reg_info mlxsw_reg_mpat = { + .id = MLXSW_REG_MPAT_ID, + .len = MLXSW_REG_MPAT_LEN, +}; + +/* reg_mpat_pa_id + * Port Analyzer ID. + * Access: Index + */ +MLXSW_ITEM32(reg, mpat, pa_id, 0x00, 28, 4); + +/* reg_mpat_system_port + * A unique port identifier for the final destination of the packet. + * Access: RW + */ +MLXSW_ITEM32(reg, mpat, system_port, 0x00, 0, 16); + +/* reg_mpat_e + * Enable. Indicating the Port Analyzer is enabled. + * Access: RW + */ +MLXSW_ITEM32(reg, mpat, e, 0x04, 31, 1); + +/* reg_mpat_qos + * Quality Of Service Mode. + * 0: CONFIGURED - QoS parameters (Switch Priority, and encapsulation + * PCP, DEI, DSCP or VL) are configured. + * 1: MAINTAIN - QoS parameters (Switch Priority, Color) are the + * same as in the original packet that has triggered the mirroring. For + * SPAN also the pcp,dei are maintained. + * Access: RW + */ +MLXSW_ITEM32(reg, mpat, qos, 0x04, 26, 1); + +static inline void mlxsw_reg_mpat_pack(char *payload, u8 pa_id, + u16 system_port, bool e) +{ + MLXSW_REG_ZERO(mpat, payload); + mlxsw_reg_mpat_pa_id_set(payload, pa_id); + mlxsw_reg_mpat_system_port_set(payload, system_port); + mlxsw_reg_mpat_e_set(payload, e); + mlxsw_reg_mpat_qos_set(payload, 1); +} + /* MLCR - Management LED Control Register * -------------------------------------- * Controls the system LEDs. @@ -5204,6 +5256,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "MFSM"; case MLXSW_REG_MTCAP_ID: return "MTCAP"; + case MLXSW_REG_MPAT_ID: + return "MPAT"; case MLXSW_REG_MTMP_ID: return "MTMP"; case MLXSW_REG_MLCR_ID: -- cgit v1.2.3 From 230190548b7529d02d38f450748baf0721332f26 Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Thu, 21 Jul 2016 12:03:15 +0200 Subject: mlxsw: reg: Add the Monitoring Port Analyzer register The MPAR register is used to bind ports to a SPAN entry (which was created using MPAT register) and thus mirror their traffic (ingress / egress) to a different port. Signed-off-by: Yotam Gigi Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 67 +++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index d0b97a6779e9..7ca9201f7dcb 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -4675,6 +4675,16 @@ MLXSW_ITEM32(reg, mpat, e, 0x04, 31, 1); */ MLXSW_ITEM32(reg, mpat, qos, 0x04, 26, 1); +/* reg_mpat_be + * Best effort mode. Indicates mirroring traffic should not cause packet + * drop or back pressure, but will discard the mirrored packets. Mirrored + * packets will be forwarded on a best effort manner. + * 0: Do not discard mirrored packets + * 1: Discard mirrored packets if causing congestion + * Access: RW + */ +MLXSW_ITEM32(reg, mpat, be, 0x04, 25, 1); + static inline void mlxsw_reg_mpat_pack(char *payload, u8 pa_id, u16 system_port, bool e) { @@ -4683,6 +4693,61 @@ static inline void mlxsw_reg_mpat_pack(char *payload, u8 pa_id, mlxsw_reg_mpat_system_port_set(payload, system_port); mlxsw_reg_mpat_e_set(payload, e); mlxsw_reg_mpat_qos_set(payload, 1); + mlxsw_reg_mpat_be_set(payload, 1); +} + +/* MPAR - Monitoring Port Analyzer Register + * ---------------------------------------- + * MPAR register is used to query and configure the port analyzer port mirroring + * properties. + */ +#define MLXSW_REG_MPAR_ID 0x901B +#define MLXSW_REG_MPAR_LEN 0x08 + +static const struct mlxsw_reg_info mlxsw_reg_mpar = { + .id = MLXSW_REG_MPAR_ID, + .len = MLXSW_REG_MPAR_LEN, +}; + +/* reg_mpar_local_port + * The local port to mirror the packets from. + * Access: Index + */ +MLXSW_ITEM32(reg, mpar, local_port, 0x00, 16, 8); + +enum mlxsw_reg_mpar_i_e { + MLXSW_REG_MPAR_TYPE_EGRESS, + MLXSW_REG_MPAR_TYPE_INGRESS, +}; + +/* reg_mpar_i_e + * Ingress/Egress + * Access: Index + */ +MLXSW_ITEM32(reg, mpar, i_e, 0x00, 0, 4); + +/* reg_mpar_enable + * Enable mirroring + * By default, port mirroring is disabled for all ports. + * Access: RW + */ +MLXSW_ITEM32(reg, mpar, enable, 0x04, 31, 1); + +/* reg_mpar_pa_id + * Port Analyzer ID. + * Access: RW + */ +MLXSW_ITEM32(reg, mpar, pa_id, 0x04, 0, 4); + +static inline void mlxsw_reg_mpar_pack(char *payload, u8 local_port, + enum mlxsw_reg_mpar_i_e i_e, + bool enable, u8 pa_id) +{ + MLXSW_REG_ZERO(mpar, payload); + mlxsw_reg_mpar_local_port_set(payload, local_port); + mlxsw_reg_mpar_enable_set(payload, enable); + mlxsw_reg_mpar_i_e_set(payload, i_e); + mlxsw_reg_mpar_pa_id_set(payload, pa_id); } /* MLCR - Management LED Control Register @@ -5258,6 +5323,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "MTCAP"; case MLXSW_REG_MPAT_ID: return "MPAT"; + case MLXSW_REG_MPAR_ID: + return "MPAR"; case MLXSW_REG_MTMP_ID: return "MTMP"; case MLXSW_REG_MLCR_ID: -- cgit v1.2.3 From 56a20680f70393d199fc5e8ecc7859549ca5a0c0 Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Thu, 21 Jul 2016 12:03:16 +0200 Subject: net/sched: act_mirred: Add helper inlines to access tcf_mirred info. The helper function is_tcf_mirred_mirror helps finding whether an action struct is of type mirred and is configured to be of type mirror. Signed-off-by: Yotam Gigi Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/tc_act/tc_mirred.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h index e891835eb74e..6a13a7c74e0c 100644 --- a/include/net/tc_act/tc_mirred.h +++ b/include/net/tc_act/tc_mirred.h @@ -24,6 +24,15 @@ static inline bool is_tcf_mirred_redirect(const struct tc_action *a) return false; } +static inline bool is_tcf_mirred_mirror(const struct tc_action *a) +{ +#ifdef CONFIG_NET_CLS_ACT + if (a->ops && a->ops->type == TCA_ACT_MIRRED) + return to_mirred(a)->tcfm_eaction == TCA_EGRESS_MIRROR; +#endif + return false; +} + static inline int tcf_mirred_ifindex(const struct tc_action *a) { return to_mirred(a)->tcfm_ifindex; -- cgit v1.2.3 From 763b4b70afcd3289fedfba6fb681e192a2291a95 Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Thu, 21 Jul 2016 12:03:17 +0200 Subject: mlxsw: spectrum: Add support in matchall mirror TC offloading This patch offloads port mirroring directives to hw using the matchall TC with action mirror. It includes both the implementation of the ndo_setup_tc function for the spectrum driver and the spectrum hardware offload configuration code. The hardware offload code is basically two new functions which are capable of adding and removing a new mirror ports pair. It is done using the MPAT, MPAR and SBIB registers: - A new Switch-Port Analyzer (SPAN) entry is added using MPAT to the 'to' port. - The 'to' port is bound to the SPAN entry using MPAR register. - In case of egress SPAN, the 'to' port gets a new internal shared buffer using SBIB register. In addition, a new database was added to the mlxsw_sp struct to store all the SPAN entries and their bound ports list. The number of supported SPAN entries is determined by resource query. Signed-off-by: Yotam Gigi Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 464 ++++++++++++++++++++++++- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 44 +++ 2 files changed, 507 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 80172fc8bdd5..552636b73416 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -54,6 +54,8 @@ #include #include #include +#include +#include #include "spectrum.h" #include "core.h" @@ -133,6 +135,8 @@ MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); */ MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); +static bool mlxsw_sp_port_dev_check(const struct net_device *dev); + static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, const struct mlxsw_tx_info *tx_info) { @@ -161,6 +165,303 @@ static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) return 0; } +static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_resources *resources; + int i; + + resources = mlxsw_core_resources_get(mlxsw_sp->core); + if (!resources->max_span_valid) + return -EIO; + + mlxsw_sp->span.entries_count = resources->max_span; + mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count, + sizeof(struct mlxsw_sp_span_entry), + GFP_KERNEL); + if (!mlxsw_sp->span.entries) + return -ENOMEM; + + for (i = 0; i < mlxsw_sp->span.entries_count; i++) + INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list); + + return 0; +} + +static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp) +{ + int i; + + for (i = 0; i < mlxsw_sp->span.entries_count; i++) { + struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; + + WARN_ON_ONCE(!list_empty(&curr->bound_ports_list)); + } + kfree(mlxsw_sp->span.entries); +} + +static struct mlxsw_sp_span_entry * +mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port) +{ + struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; + struct mlxsw_sp_span_entry *span_entry; + char mpat_pl[MLXSW_REG_MPAT_LEN]; + u8 local_port = port->local_port; + int index; + int i; + int err; + + /* find a free entry to use */ + index = -1; + for (i = 0; i < mlxsw_sp->span.entries_count; i++) { + if (!mlxsw_sp->span.entries[i].used) { + index = i; + span_entry = &mlxsw_sp->span.entries[i]; + break; + } + } + if (index < 0) + return NULL; + + /* create a new port analayzer entry for local_port */ + mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); + if (err) + return NULL; + + span_entry->used = true; + span_entry->id = index; + span_entry->ref_count = 0; + span_entry->local_port = local_port; + return span_entry; +} + +static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_span_entry *span_entry) +{ + u8 local_port = span_entry->local_port; + char mpat_pl[MLXSW_REG_MPAT_LEN]; + int pa_id = span_entry->id; + + mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); + span_entry->used = false; +} + +struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port) +{ + struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; + int i; + + for (i = 0; i < mlxsw_sp->span.entries_count; i++) { + struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; + + if (curr->used && curr->local_port == port->local_port) + return curr; + } + return NULL; +} + +struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port) +{ + struct mlxsw_sp_span_entry *span_entry; + + span_entry = mlxsw_sp_span_entry_find(port); + if (span_entry) { + span_entry->ref_count++; + return span_entry; + } + + return mlxsw_sp_span_entry_create(port); +} + +static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_span_entry *span_entry) +{ + if (--span_entry->ref_count == 0) + mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); + return 0; +} + +static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port) +{ + struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; + struct mlxsw_sp_span_inspected_port *p; + int i; + + for (i = 0; i < mlxsw_sp->span.entries_count; i++) { + struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; + + list_for_each_entry(p, &curr->bound_ports_list, list) + if (p->local_port == port->local_port && + p->type == MLXSW_SP_SPAN_EGRESS) + return true; + } + + return false; +} + +static int mlxsw_sp_span_mtu_to_buffsize(int mtu) +{ + return MLXSW_SP_BYTES_TO_CELLS(mtu * 5 / 2) + 1; +} + +static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu) +{ + struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; + char sbib_pl[MLXSW_REG_SBIB_LEN]; + int err; + + /* If port is egress mirrored, the shared buffer size should be + * updated according to the mtu value + */ + if (mlxsw_sp_span_is_egress_mirror(port)) { + mlxsw_reg_sbib_pack(sbib_pl, port->local_port, + mlxsw_sp_span_mtu_to_buffsize(mtu)); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); + if (err) { + netdev_err(port->dev, "Could not update shared buffer for mirroring\n"); + return err; + } + } + + return 0; +} + +static struct mlxsw_sp_span_inspected_port * +mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port, + struct mlxsw_sp_span_entry *span_entry) +{ + struct mlxsw_sp_span_inspected_port *p; + + list_for_each_entry(p, &span_entry->bound_ports_list, list) + if (port->local_port == p->local_port) + return p; + return NULL; +} + +static int +mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port, + struct mlxsw_sp_span_entry *span_entry, + enum mlxsw_sp_span_type type) +{ + struct mlxsw_sp_span_inspected_port *inspected_port; + struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; + char mpar_pl[MLXSW_REG_MPAR_LEN]; + char sbib_pl[MLXSW_REG_SBIB_LEN]; + int pa_id = span_entry->id; + int err; + + /* if it is an egress SPAN, bind a shared buffer to it */ + if (type == MLXSW_SP_SPAN_EGRESS) { + mlxsw_reg_sbib_pack(sbib_pl, port->local_port, + mlxsw_sp_span_mtu_to_buffsize(port->dev->mtu)); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); + if (err) { + netdev_err(port->dev, "Could not create shared buffer for mirroring\n"); + return err; + } + } + + /* bind the port to the SPAN entry */ + mlxsw_reg_mpar_pack(mpar_pl, port->local_port, type, true, pa_id); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); + if (err) + goto err_mpar_reg_write; + + inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL); + if (!inspected_port) { + err = -ENOMEM; + goto err_inspected_port_alloc; + } + inspected_port->local_port = port->local_port; + inspected_port->type = type; + list_add_tail(&inspected_port->list, &span_entry->bound_ports_list); + + return 0; + +err_mpar_reg_write: +err_inspected_port_alloc: + if (type == MLXSW_SP_SPAN_EGRESS) { + mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); + } + return err; +} + +static void +mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port, + struct mlxsw_sp_span_entry *span_entry, + enum mlxsw_sp_span_type type) +{ + struct mlxsw_sp_span_inspected_port *inspected_port; + struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; + char mpar_pl[MLXSW_REG_MPAR_LEN]; + char sbib_pl[MLXSW_REG_SBIB_LEN]; + int pa_id = span_entry->id; + + inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry); + if (!inspected_port) + return; + + /* remove the inspected port */ + mlxsw_reg_mpar_pack(mpar_pl, port->local_port, type, false, pa_id); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); + + /* remove the SBIB buffer if it was egress SPAN */ + if (type == MLXSW_SP_SPAN_EGRESS) { + mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); + } + + mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); + + list_del(&inspected_port->list); + kfree(inspected_port); +} + +static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, + struct mlxsw_sp_port *to, + enum mlxsw_sp_span_type type) +{ + struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp; + struct mlxsw_sp_span_entry *span_entry; + int err; + + span_entry = mlxsw_sp_span_entry_get(to); + if (!span_entry) + return -ENOENT; + + netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n", + span_entry->id); + + err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type); + if (err) + goto err_port_bind; + + return 0; + +err_port_bind: + mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); + return err; +} + +static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from, + struct mlxsw_sp_port *to, + enum mlxsw_sp_span_type type) +{ + struct mlxsw_sp_span_entry *span_entry; + + span_entry = mlxsw_sp_span_entry_find(to); + if (!span_entry) { + netdev_err(from->dev, "no span entry found\n"); + return; + } + + netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n", + span_entry->id); + mlxsw_sp_span_inspected_port_unbind(from, span_entry, type); +} + static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, bool is_up) { @@ -493,6 +794,9 @@ static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); if (err) return err; + err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); + if (err) + goto err_span_port_mtu_update; err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); if (err) goto err_port_mtu_set; @@ -500,6 +804,8 @@ static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) return 0; err_port_mtu_set: + mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); +err_span_port_mtu_update: mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); return err; } @@ -776,10 +1082,155 @@ static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name, return 0; } +static struct mlxsw_sp_port_mall_tc_entry * +mlxsw_sp_port_mirror_entry_find(struct mlxsw_sp_port *port, + unsigned long cookie) { + struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; + + list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) + if (mall_tc_entry->cookie == cookie) + return mall_tc_entry; + + return NULL; +} + +static int +mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, + struct tc_cls_matchall_offload *cls, + const struct tc_action *a, + bool ingress) +{ + struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; + struct net *net = dev_net(mlxsw_sp_port->dev); + enum mlxsw_sp_span_type span_type; + struct mlxsw_sp_port *to_port; + struct net_device *to_dev; + int ifindex; + int err; + + ifindex = tcf_mirred_ifindex(a); + to_dev = __dev_get_by_index(net, ifindex); + if (!to_dev) { + netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); + return -EINVAL; + } + + if (!mlxsw_sp_port_dev_check(to_dev)) { + netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port"); + return -ENOTSUPP; + } + to_port = netdev_priv(to_dev); + + mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); + if (!mall_tc_entry) + return -ENOMEM; + + mall_tc_entry->cookie = cls->cookie; + mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; + mall_tc_entry->mirror.to_local_port = to_port->local_port; + mall_tc_entry->mirror.ingress = ingress; + list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); + + span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; + err = mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type); + if (err) + goto err_mirror_add; + return 0; + +err_mirror_add: + list_del(&mall_tc_entry->list); + kfree(mall_tc_entry); + return err; +} + +static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, + __be16 protocol, + struct tc_cls_matchall_offload *cls, + bool ingress) +{ + struct tcf_exts *exts = cls->exts; + const struct tc_action *a; + int err; + + if (!list_is_singular(&exts->actions)) { + netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); + return -ENOTSUPP; + } + + a = list_first_entry(&exts->actions, struct tc_action, list); + if (is_tcf_mirred_mirror(a) && protocol == htons(ETH_P_ALL)) { + err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, cls, + a, ingress); + if (err) + return err; + } else { + return -ENOTSUPP; + } + + return 0; +} + +static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, + struct tc_cls_matchall_offload *cls) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; + enum mlxsw_sp_span_type span_type; + struct mlxsw_sp_port *to_port; + + mall_tc_entry = mlxsw_sp_port_mirror_entry_find(mlxsw_sp_port, + cls->cookie); + if (!mall_tc_entry) { + netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); + return; + } + + switch (mall_tc_entry->type) { + case MLXSW_SP_PORT_MALL_MIRROR: + to_port = mlxsw_sp->ports[mall_tc_entry->mirror.to_local_port]; + span_type = mall_tc_entry->mirror.ingress ? + MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; + + mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type); + break; + default: + WARN_ON(1); + } + + list_del(&mall_tc_entry->list); + kfree(mall_tc_entry); +} + +static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle, + __be16 proto, struct tc_to_netdev *tc) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS); + + if (tc->type == TC_SETUP_MATCHALL) { + switch (tc->cls_mall->command) { + case TC_CLSMATCHALL_REPLACE: + return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, + proto, + tc->cls_mall, + ingress); + case TC_CLSMATCHALL_DESTROY: + mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, + tc->cls_mall); + return 0; + default: + return -EINVAL; + } + } + + return -ENOTSUPP; +} + static const struct net_device_ops mlxsw_sp_port_netdev_ops = { .ndo_open = mlxsw_sp_port_open, .ndo_stop = mlxsw_sp_port_stop, .ndo_start_xmit = mlxsw_sp_port_xmit, + .ndo_setup_tc = mlxsw_sp_setup_tc, .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, .ndo_change_mtu = mlxsw_sp_port_change_mtu, @@ -1657,6 +2108,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, goto err_port_untagged_vlans_alloc; } INIT_LIST_HEAD(&mlxsw_sp_port->vports_list); + INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); mlxsw_sp_port->pcpu_stats = netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); @@ -1678,7 +2130,8 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, netif_carrier_off(dev); dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | - NETIF_F_HW_VLAN_CTAG_FILTER; + NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; + dev->hw_features |= NETIF_F_HW_TC; /* Each packet needs to have a Tx header (metadata) on top all other * headers. @@ -2410,6 +2863,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_router_init; } + err = mlxsw_sp_span_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); + goto err_span_init; + } + err = mlxsw_sp_ports_create(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); @@ -2419,6 +2878,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, return 0; err_ports_create: + mlxsw_sp_span_fini(mlxsw_sp); +err_span_init: mlxsw_sp_router_fini(mlxsw_sp); err_router_init: mlxsw_sp_switchdev_fini(mlxsw_sp); @@ -2439,6 +2900,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) int i; mlxsw_sp_ports_remove(mlxsw_sp); + mlxsw_sp_span_fini(mlxsw_sp); mlxsw_sp_router_fini(mlxsw_sp); mlxsw_sp_switchdev_fini(mlxsw_sp); mlxsw_sp_buffers_fini(mlxsw_sp); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index ef4ac8987a2a..f69aa37d1521 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -214,6 +214,43 @@ struct mlxsw_sp_vr { struct mlxsw_sp_fib *fib; }; +enum mlxsw_sp_span_type { + MLXSW_SP_SPAN_EGRESS, + MLXSW_SP_SPAN_INGRESS +}; + +struct mlxsw_sp_span_inspected_port { + struct list_head list; + enum mlxsw_sp_span_type type; + u8 local_port; +}; + +struct mlxsw_sp_span_entry { + u8 local_port; + bool used; + struct list_head bound_ports_list; + int ref_count; + int id; +}; + +enum mlxsw_sp_port_mall_action_type { + MLXSW_SP_PORT_MALL_MIRROR, +}; + +struct mlxsw_sp_port_mall_mirror_tc_entry { + u8 to_local_port; + bool ingress; +}; + +struct mlxsw_sp_port_mall_tc_entry { + struct list_head list; + unsigned long cookie; + enum mlxsw_sp_port_mall_action_type type; + union { + struct mlxsw_sp_port_mall_mirror_tc_entry mirror; + }; +}; + struct mlxsw_sp_router { struct mlxsw_sp_lpm_tree lpm_trees[MLXSW_SP_LPM_TREE_COUNT]; struct mlxsw_sp_vr vrs[MLXSW_SP_VIRTUAL_ROUTER_MAX]; @@ -260,6 +297,11 @@ struct mlxsw_sp { struct { DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE); } kvdl; + + struct { + struct mlxsw_sp_span_entry *entries; + int entries_count; + } span; }; static inline struct mlxsw_sp_upper * @@ -316,6 +358,8 @@ struct mlxsw_sp_port { unsigned long *untagged_vlans; /* VLAN interfaces */ struct list_head vports_list; + /* TC handles */ + struct list_head mall_tc_list; }; struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev); -- cgit v1.2.3