summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c)824
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c291
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/flow_table.c422
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c239
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h (renamed from include/linux/mlx5/flow_table.h)66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c1047
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h155
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h3
-rw-r--r--include/linux/mlx5/driver.h2
-rw-r--r--include/linux/mlx5/fs.h93
-rw-r--r--include/linux/mlx5/mlx5_ifc.h32
17 files changed, 2185 insertions, 1051 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index a0755919ccaf..fe11e967095f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -2,7 +2,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
- mad.o transobj.o vport.o sriov.o
-mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o flow_table.o eswitch.o \
- en_main.o en_flow_table.o en_ethtool.o en_tx.o en_rx.o \
+ mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o
+mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \
+ en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \
en_txrx.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 89313d46952d..f689ce580b44 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -64,6 +64,8 @@
#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
#define MLX5E_SQ_BF_BUDGET 16
+#define MLX5E_NUM_MAIN_GROUPS 9
+
static const char vport_strings[][ETH_GSTRING_LEN] = {
/* vport statistics */
"rx_packets",
@@ -442,7 +444,7 @@ enum mlx5e_rqt_ix {
struct mlx5e_eth_addr_info {
u8 addr[ETH_ALEN + 2];
u32 tt_vec;
- u32 ft_ix[MLX5E_NUM_TT]; /* flow table index per traffic type */
+ struct mlx5_flow_rule *ft_rule[MLX5E_NUM_TT];
};
#define MLX5E_ETH_ADDR_HASH_SIZE (1 << BITS_PER_BYTE)
@@ -466,15 +468,22 @@ enum {
struct mlx5e_vlan_db {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- u32 active_vlans_ft_ix[VLAN_N_VID];
- u32 untagged_rule_ft_ix;
- u32 any_vlan_rule_ft_ix;
+ struct mlx5_flow_rule *active_vlans_rule[VLAN_N_VID];
+ struct mlx5_flow_rule *untagged_rule;
+ struct mlx5_flow_rule *any_vlan_rule;
bool filter_disabled;
};
struct mlx5e_flow_table {
- void *vlan;
- void *main;
+ int num_groups;
+ struct mlx5_flow_table *t;
+ struct mlx5_flow_group **g;
+};
+
+struct mlx5e_flow_tables {
+ struct mlx5_flow_namespace *ns;
+ struct mlx5e_flow_table vlan;
+ struct mlx5e_flow_table main;
};
struct mlx5e_priv {
@@ -497,7 +506,7 @@ struct mlx5e_priv {
u32 rqtn[MLX5E_NUM_RQT];
u32 tirn[MLX5E_NUM_TT];
- struct mlx5e_flow_table ft;
+ struct mlx5e_flow_tables fts;
struct mlx5e_eth_addr_db eth_addr;
struct mlx5e_vlan_db vlan;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 5b93c9c6e341..80d81abc4820 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -34,9 +34,11 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
-#include <linux/mlx5/flow_table.h>
+#include <linux/mlx5/fs.h>
#include "en.h"
+#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
+
enum {
MLX5E_FULLMATCH = 0,
MLX5E_ALLMULTI = 1,
@@ -103,44 +105,38 @@ static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
struct mlx5e_eth_addr_info *ai)
{
- void *ft = priv->ft.main;
-
if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP))
- mlx5_del_flow_table_entry(ft,
- ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP]);
+ mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP))
- mlx5_del_flow_table_entry(ft,
- ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP]);
+ mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH))
- mlx5_del_flow_table_entry(ft,
- ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH]);
+ mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]);
if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH))
- mlx5_del_flow_table_entry(ft,
- ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH]);
+ mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]);
if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP))
- mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]);
+ mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_TCP]);
if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP))
- mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]);
+ mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_TCP]);
if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP))
- mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]);
+ mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_UDP]);
if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP))
- mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]);
+ mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_UDP]);
if (ai->tt_vec & BIT(MLX5E_TT_IPV6))
- mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]);
+ mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6]);
if (ai->tt_vec & BIT(MLX5E_TT_IPV4))
- mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]);
+ mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4]);
if (ai->tt_vec & BIT(MLX5E_TT_ANY))
- mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]);
+ mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_ANY]);
}
static int mlx5e_get_eth_addr_type(u8 *addr)
@@ -240,44 +236,34 @@ static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
}
static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
- struct mlx5e_eth_addr_info *ai, int type,
- void *flow_context, void *match_criteria)
+ struct mlx5e_eth_addr_info *ai,
+ int type, u32 *mc, u32 *mv)
{
+ struct mlx5_flow_destination dest;
u8 match_criteria_enable = 0;
- void *match_value;
- void *dest;
- u8 *dmac;
- u8 *match_criteria_dmac;
- void *ft = priv->ft.main;
- u32 *tirn = priv->tirn;
- u32 *ft_ix;
- u32 tt_vec;
- int err;
-
- match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
- dmac = MLX5_ADDR_OF(fte_match_param, match_value,
- outer_headers.dmac_47_16);
- match_criteria_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
- outer_headers.dmac_47_16);
- dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
-
- MLX5_SET(flow_context, flow_context, action,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
- MLX5_SET(flow_context, flow_context, destination_list_size, 1);
- MLX5_SET(dest_format_struct, dest, destination_type,
- MLX5_FLOW_CONTEXT_DEST_TYPE_TIR);
+ struct mlx5_flow_rule **rule_p;
+ struct mlx5_flow_table *ft = priv->fts.main.t;
+ u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
+ outer_headers.dmac_47_16);
+ u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv,
+ outer_headers.dmac_47_16);
+ u32 *tirn = priv->tirn;
+ u32 tt_vec;
+ int err = 0;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
switch (type) {
case MLX5E_FULLMATCH:
match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- memset(match_criteria_dmac, 0xff, ETH_ALEN);
- ether_addr_copy(dmac, ai->addr);
+ eth_broadcast_addr(mc_dmac);
+ ether_addr_copy(mv_dmac, ai->addr);
break;
case MLX5E_ALLMULTI:
match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- match_criteria_dmac[0] = 0x01;
- dmac[0] = 0x01;
+ mc_dmac[0] = 0x01;
+ mv_dmac[0] = 0x01;
break;
case MLX5E_PROMISC:
@@ -286,190 +272,165 @@ static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
tt_vec = mlx5e_get_tt_vec(ai, type);
- ft_ix = &ai->ft_ix[MLX5E_TT_ANY];
if (tt_vec & BIT(MLX5E_TT_ANY)) {
- MLX5_SET(dest_format_struct, dest, destination_id,
- tirn[MLX5E_TT_ANY]);
- err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
- match_criteria, flow_context,
- ft_ix);
- if (err)
+ rule_p = &ai->ft_rule[MLX5E_TT_ANY];
+ dest.tir_num = tirn[MLX5E_TT_ANY];
+ *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+ if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
-
ai->tt_vec |= BIT(MLX5E_TT_ANY);
}
match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, match_criteria,
- outer_headers.ethertype);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
- ft_ix = &ai->ft_ix[MLX5E_TT_IPV4];
if (tt_vec & BIT(MLX5E_TT_IPV4)) {
- MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ rule_p = &ai->ft_rule[MLX5E_TT_IPV4];
+ dest.tir_num = tirn[MLX5E_TT_IPV4];
+ MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETH_P_IP);
- MLX5_SET(dest_format_struct, dest, destination_id,
- tirn[MLX5E_TT_IPV4]);
- err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
- match_criteria, flow_context,
- ft_ix);
- if (err)
+ *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+ if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
-
ai->tt_vec |= BIT(MLX5E_TT_IPV4);
}
- ft_ix = &ai->ft_ix[MLX5E_TT_IPV6];
if (tt_vec & BIT(MLX5E_TT_IPV6)) {
- MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ rule_p = &ai->ft_rule[MLX5E_TT_IPV6];
+ dest.tir_num = tirn[MLX5E_TT_IPV6];
+ MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETH_P_IPV6);
- MLX5_SET(dest_format_struct, dest, destination_id,
- tirn[MLX5E_TT_IPV6]);
- err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
- match_criteria, flow_context,
- ft_ix);
- if (err)
+ *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+ if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
-
ai->tt_vec |= BIT(MLX5E_TT_IPV6);
}
- MLX5_SET_TO_ONES(fte_match_param, match_criteria,
- outer_headers.ip_protocol);
- MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
- IPPROTO_UDP);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
+ MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
- ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_UDP];
if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
- MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP];
+ dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
+ MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETH_P_IP);
- MLX5_SET(dest_format_struct, dest, destination_id,
- tirn[MLX5E_TT_IPV4_UDP]);
- err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
- match_criteria, flow_context,
- ft_ix);
- if (err)
+ *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+ if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
-
ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
}
- ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_UDP];
if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
- MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP];
+ dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
+ MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETH_P_IPV6);
- MLX5_SET(dest_format_struct, dest, destination_id,
- tirn[MLX5E_TT_IPV6_UDP]);
- err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
- match_criteria, flow_context,
- ft_ix);
- if (err)
+ *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+ if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
-
ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
}
- MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
- IPPROTO_TCP);
+ MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP);
- ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_TCP];
if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
- MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP];
+ dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
+ MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETH_P_IP);
- MLX5_SET(dest_format_struct, dest, destination_id,
- tirn[MLX5E_TT_IPV4_TCP]);
- err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
- match_criteria, flow_context,
- ft_ix);
- if (err)
+ *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+ if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
-
ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
}
- ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_TCP];
if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
- MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP];
+ dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
+ MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETH_P_IPV6);
- MLX5_SET(dest_format_struct, dest, destination_id,
- tirn[MLX5E_TT_IPV6_TCP]);
- err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
- match_criteria, flow_context,
- ft_ix);
- if (err)
+ *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+ if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
}
- MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
- IPPROTO_AH);
+ MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH);
- ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH];
if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
- MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH];
+ dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
+ MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETH_P_IP);
- MLX5_SET(dest_format_struct, dest, destination_id,
- tirn[MLX5E_TT_IPV4_IPSEC_AH]);
- err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
- match_criteria, flow_context,
- ft_ix);
- if (err)
+ *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+ if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
-
ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
}
- ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH];
if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
- MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH];
+ dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
+ MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETH_P_IPV6);
- MLX5_SET(dest_format_struct, dest, destination_id,
- tirn[MLX5E_TT_IPV6_IPSEC_AH]);
- err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
- match_criteria, flow_context,
- ft_ix);
- if (err)
+ *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+ if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
-
ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
}
- MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
- IPPROTO_ESP);
+ MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP);
- ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP];
if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
- MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP];
+ dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
+ MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETH_P_IP);
- MLX5_SET(dest_format_struct, dest, destination_id,
- tirn[MLX5E_TT_IPV4_IPSEC_ESP]);
- err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
- match_criteria, flow_context,
- ft_ix);
- if (err)
+ *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+ if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
-
ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
}
- ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP];
if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
- MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP];
+ dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
+ MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETH_P_IPV6);
- MLX5_SET(dest_format_struct, dest, destination_id,
- tirn[MLX5E_TT_IPV6_IPSEC_ESP]);
- err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
- match_criteria, flow_context,
- ft_ix);
- if (err)
+ *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+ if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
-
ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
}
return 0;
err_del_ai:
+ err = PTR_ERR(*rule_p);
+ *rule_p = NULL;
mlx5e_del_eth_addr_from_flow_table(priv, ai);
return err;
@@ -478,27 +439,25 @@ err_del_ai:
static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
struct mlx5e_eth_addr_info *ai, int type)
{
- u32 *flow_context;
u32 *match_criteria;
- int err;
+ u32 *match_value;
+ int err = 0;
- flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
- MLX5_ST_SZ_BYTES(dest_format_struct));
- match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- if (!flow_context || !match_criteria) {
+ match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ if (!match_value || !match_criteria) {
netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
err = -ENOMEM;
goto add_eth_addr_rule_out;
}
- err = __mlx5e_add_eth_addr_rule(priv, ai, type, flow_context,
- match_criteria);
- if (err)
- netdev_err(priv->netdev, "%s: failed\n", __func__);
+ err = __mlx5e_add_eth_addr_rule(priv, ai, type, match_criteria,
+ match_value);
add_eth_addr_rule_out:
kvfree(match_criteria);
- kvfree(flow_context);
+ kvfree(match_value);
+
return err;
}
@@ -551,72 +510,77 @@ enum mlx5e_vlan_rule_type {
MLX5E_VLAN_RULE_TYPE_MATCH_VID,
};
-static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
- enum mlx5e_vlan_rule_type rule_type, u16 vid)
+static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
+ enum mlx5e_vlan_rule_type rule_type,
+ u16 vid, u32 *mc, u32 *mv)
{
+ struct mlx5_flow_table *ft = priv->fts.vlan.t;
+ struct mlx5_flow_destination dest;
u8 match_criteria_enable = 0;
- u32 *flow_context;
- void *match_value;
- void *dest;
- u32 *match_criteria;
- u32 *ft_ix;
- int err;
+ struct mlx5_flow_rule **rule_p;
+ int err = 0;
- flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
- MLX5_ST_SZ_BYTES(dest_format_struct));
- match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- if (!flow_context || !match_criteria) {
- netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
- err = -ENOMEM;
- goto add_vlan_rule_out;
- }
- match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
- dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
-
- MLX5_SET(flow_context, flow_context, action,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
- MLX5_SET(flow_context, flow_context, destination_list_size, 1);
- MLX5_SET(dest_format_struct, dest, destination_type,
- MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE);
- MLX5_SET(dest_format_struct, dest, destination_id,
- mlx5_get_flow_table_id(priv->ft.main));
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = priv->fts.main.t;
match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, match_criteria,
- outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
- ft_ix = &priv->vlan.untagged_rule_ft_ix;
+ rule_p = &priv->vlan.untagged_rule;
break;
case MLX5E_VLAN_RULE_TYPE_ANY_VID:
- ft_ix = &priv->vlan.any_vlan_rule_ft_ix;
- MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
- 1);
+ rule_p = &priv->vlan.any_vlan_rule;
+ MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1);
break;
default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
- err = mlx5e_vport_context_update_vlans(priv);
- if (err)
- goto add_vlan_rule_out;
-
- ft_ix = &priv->vlan.active_vlans_ft_ix[vid];
- MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
- 1);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria,
- outer_headers.first_vid);
- MLX5_SET(fte_match_param, match_value, outer_headers.first_vid,
- vid);
+ rule_p = &priv->vlan.active_vlans_rule[vid];
+ MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
+ MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid);
break;
}
- err = mlx5_add_flow_table_entry(priv->ft.vlan, match_criteria_enable,
- match_criteria, flow_context, ft_ix);
- if (err)
- netdev_err(priv->netdev, "%s: failed\n", __func__);
+ *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ MLX5_FS_DEFAULT_FLOW_TAG,
+ &dest);
+
+ if (IS_ERR(*rule_p)) {
+ err = PTR_ERR(*rule_p);
+ *rule_p = NULL;
+ netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
+ }
+
+ return err;
+}
+
+static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
+ enum mlx5e_vlan_rule_type rule_type, u16 vid)
+{
+ u32 *match_criteria;
+ u32 *match_value;
+ int err = 0;
+
+ match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ if (!match_value || !match_criteria) {
+ netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+ err = -ENOMEM;
+ goto add_vlan_rule_out;
+ }
+
+ if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID)
+ mlx5e_vport_context_update_vlans(priv);
+
+ err = __mlx5e_add_vlan_rule(priv, rule_type, vid, match_criteria,
+ match_value);
add_vlan_rule_out:
kvfree(match_criteria);
- kvfree(flow_context);
+ kvfree(match_value);
+
return err;
}
@@ -625,16 +589,23 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
{
switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
- mlx5_del_flow_table_entry(priv->ft.vlan,
- priv->vlan.untagged_rule_ft_ix);
+ if (priv->vlan.untagged_rule) {
+ mlx5_del_flow_rule(priv->vlan.untagged_rule);
+ priv->vlan.untagged_rule = NULL;
+ }
break;
case MLX5E_VLAN_RULE_TYPE_ANY_VID:
- mlx5_del_flow_table_entry(priv->ft.vlan,
- priv->vlan.any_vlan_rule_ft_ix);
+ if (priv->vlan.any_vlan_rule) {
+ mlx5_del_flow_rule(priv->vlan.any_vlan_rule);
+ priv->vlan.any_vlan_rule = NULL;
+ }
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
- mlx5_del_flow_table_entry(priv->ft.vlan,
- priv->vlan.active_vlans_ft_ix[vid]);
+ mlx5e_vport_context_update_vlans(priv);
+ if (priv->vlan.active_vlans_rule[vid]) {
+ mlx5_del_flow_rule(priv->vlan.active_vlans_rule[vid]);
+ priv->vlan.active_vlans_rule[vid] = NULL;
+ }
mlx5e_vport_context_update_vlans(priv);
break;
}
@@ -889,151 +860,358 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
mlx5e_vport_context_update(priv);
}
+static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
+{
+ int i;
+
+ for (i = ft->num_groups - 1; i >= 0; i--) {
+ if (!IS_ERR_OR_NULL(ft->g[i]))
+ mlx5_destroy_flow_group(ft->g[i]);
+ ft->g[i] = NULL;
+ }
+ ft->num_groups = 0;
+}
+
void mlx5e_init_eth_addr(struct mlx5e_priv *priv)
{
ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast);
}
-static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
+#define MLX5E_MAIN_GROUP0_SIZE BIT(3)
+#define MLX5E_MAIN_GROUP1_SIZE BIT(1)
+#define MLX5E_MAIN_GROUP2_SIZE BIT(0)
+#define MLX5E_MAIN_GROUP3_SIZE BIT(14)
+#define MLX5E_MAIN_GROUP4_SIZE BIT(13)
+#define MLX5E_MAIN_GROUP5_SIZE BIT(11)
+#define MLX5E_MAIN_GROUP6_SIZE BIT(2)
+#define MLX5E_MAIN_GROUP7_SIZE BIT(1)
+#define MLX5E_MAIN_GROUP8_SIZE BIT(0)
+#define MLX5E_MAIN_TABLE_SIZE (MLX5E_MAIN_GROUP0_SIZE +\
+ MLX5E_MAIN_GROUP1_SIZE +\
+ MLX5E_MAIN_GROUP2_SIZE +\
+ MLX5E_MAIN_GROUP3_SIZE +\
+ MLX5E_MAIN_GROUP4_SIZE +\
+ MLX5E_MAIN_GROUP5_SIZE +\
+ MLX5E_MAIN_GROUP6_SIZE +\
+ MLX5E_MAIN_GROUP7_SIZE +\
+ MLX5E_MAIN_GROUP8_SIZE)
+
+static int __mlx5e_create_main_groups(struct mlx5e_flow_table *ft, u32 *in,
+ int inlen)
{
- struct mlx5_flow_table_group *g;
- u8 *dmac;
+ u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in,
+ match_criteria.outer_headers.dmac_47_16);
+ int err;
+ int ix = 0;
+
+ memset(in, 0, inlen);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5E_MAIN_GROUP0_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err_destroy_groups;
+ ft->num_groups++;
+
+ memset(in, 0, inlen);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5E_MAIN_GROUP1_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err_destroy_groups;
+ ft->num_groups++;
+
+ memset(in, 0, inlen);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5E_MAIN_GROUP2_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err_destroy_groups;
+ ft->num_groups++;
+
+ memset(in, 0, inlen);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
+ eth_broadcast_addr(dmac);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5E_MAIN_GROUP3_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err_destroy_groups;
+ ft->num_groups++;
+
+ memset(in, 0, inlen);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+ eth_broadcast_addr(dmac);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5E_MAIN_GROUP4_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err_destroy_groups;
+ ft->num_groups++;
+
+ memset(in, 0, inlen);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ eth_broadcast_addr(dmac);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5E_MAIN_GROUP5_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err_destroy_groups;
+ ft->num_groups++;
+
+ memset(in, 0, inlen);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
+ dmac[0] = 0x01;
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5E_MAIN_GROUP6_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err_destroy_groups;
+ ft->num_groups++;
+
+ memset(in, 0, inlen);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+ dmac[0] = 0x01;
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5E_MAIN_GROUP7_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err_destroy_groups;
+ ft->num_groups++;
+
+ memset(in, 0, inlen);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ dmac[0] = 0x01;
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5E_MAIN_GROUP8_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err_destroy_groups;
+ ft->num_groups++;
+
+ return 0;
+
+err_destroy_groups:
+ err = PTR_ERR(ft->g[ft->num_groups]);
+ ft->g[ft->num_groups] = NULL;
+ mlx5e_destroy_groups(ft);
+
+ return err;
+}
- g = kcalloc(9, sizeof(*g), GFP_KERNEL);
- if (!g)
+static int mlx5e_create_main_groups(struct mlx5e_flow_table *ft)
+{
+ u32 *in;
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ int err;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in)
return -ENOMEM;
- g[0].log_sz = 3;
- g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
- outer_headers.ethertype);
- MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
- outer_headers.ip_protocol);
-
- g[1].log_sz = 1;
- g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
- outer_headers.ethertype);
-
- g[2].log_sz = 0;
-
- g[3].log_sz = 14;
- g[3].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- dmac = MLX5_ADDR_OF(fte_match_param, g[3].match_criteria,
- outer_headers.dmac_47_16);
- memset(dmac, 0xff, ETH_ALEN);
- MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
- outer_headers.ethertype);
- MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
- outer_headers.ip_protocol);
-
- g[4].log_sz = 13;
- g[4].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- dmac = MLX5_ADDR_OF(fte_match_param, g[4].match_criteria,
- outer_headers.dmac_47_16);
- memset(dmac, 0xff, ETH_ALEN);
- MLX5_SET_TO_ONES(fte_match_param, g[4].match_criteria,
- outer_headers.ethertype);
-
- g[5].log_sz = 11;
- g[5].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- dmac = MLX5_ADDR_OF(fte_match_param, g[5].match_criteria,
- outer_headers.dmac_47_16);
- memset(dmac, 0xff, ETH_ALEN);
-
- g[6].log_sz = 2;
- g[6].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- dmac = MLX5_ADDR_OF(fte_match_param, g[6].match_criteria,
- outer_headers.dmac_47_16);
- dmac[0] = 0x01;
- MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
- outer_headers.ethertype);
- MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
- outer_headers.ip_protocol);
-
- g[7].log_sz = 1;
- g[7].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- dmac = MLX5_ADDR_OF(fte_match_param, g[7].match_criteria,
- outer_headers.dmac_47_16);
- dmac[0] = 0x01;
- MLX5_SET_TO_ONES(fte_match_param, g[7].match_criteria,
- outer_headers.ethertype);
+ err = __mlx5e_create_main_groups(ft, in, inlen);
- g[8].log_sz = 0;
- g[8].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- dmac = MLX5_ADDR_OF(fte_match_param, g[8].match_criteria,
- outer_headers.dmac_47_16);
- dmac[0] = 0x01;
- priv->ft.main = mlx5_create_flow_table(priv->mdev, 1,
- MLX5_FLOW_TABLE_TYPE_NIC_RCV,
- 9, g);
- kfree(g);
+ kvfree(in);
+ return err;
+}
- return priv->ft.main ? 0 : -ENOMEM;
+static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
+{
+ struct mlx5e_flow_table *ft = &priv->fts.main;
+ int err;
+
+ ft->num_groups = 0;
+ ft->t = mlx5_create_flow_table(priv->fts.ns, 0, MLX5E_MAIN_TABLE_SIZE);
+
+ if (IS_ERR(ft->t)) {
+ err = PTR_ERR(ft->t);
+ ft->t = NULL;
+ return err;
+ }
+ ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
+ if (!ft->g) {
+ err = -ENOMEM;
+ goto err_destroy_main_flow_table;
+ }
+
+ err = mlx5e_create_main_groups(ft);
+ if (err)
+ goto err_free_g;
+ return 0;
+
+err_free_g:
+ kfree(ft->g);
+
+err_destroy_main_flow_table:
+ mlx5_destroy_flow_table(ft->t);
+ ft->t = NULL;
+
+ return err;
+}
+
+static void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
+{
+ mlx5e_destroy_groups(ft);
+ kfree(ft->g);
+ mlx5_destroy_flow_table(ft->t);
+ ft->t = NULL;
}
static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
{
- mlx5_destroy_flow_table(priv->ft.main);
+ mlx5e_destroy_flow_table(&priv->fts.main);
}
-static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
+#define MLX5E_NUM_VLAN_GROUPS 2
+#define MLX5E_VLAN_GROUP0_SIZE BIT(12)
+#define MLX5E_VLAN_GROUP1_SIZE BIT(1)
+#define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
+ MLX5E_VLAN_GROUP1_SIZE)
+
+static int __mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft, u32 *in,
+ int inlen)
+{
+ int err;
+ int ix = 0;
+ u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+ memset(in, 0, inlen);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5E_VLAN_GROUP0_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err_destroy_groups;
+ ft->num_groups++;
+
+ memset(in, 0, inlen);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5E_VLAN_GROUP1_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err_destroy_groups;
+ ft->num_groups++;
+
+ return 0;
+
+err_destroy_groups:
+ err = PTR_ERR(ft->g[ft->num_groups]);
+ ft->g[ft->num_groups] = NULL;
+ mlx5e_destroy_groups(ft);
+
+ return err;
+}
+
+static int mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft)
{
- struct mlx5_flow_table_group *g;
+ u32 *in;
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ int err;
- g = kcalloc(2, sizeof(*g), GFP_KERNEL);
- if (!g)
+ in = mlx5_vzalloc(inlen);
+ if (!in)
return -ENOMEM;
- g[0].log_sz = 12;
- g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
- outer_headers.vlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
- outer_headers.first_vid);
-
- /* untagged + any vlan id */
- g[1].log_sz = 1;
- g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
- outer_headers.vlan_tag);
-
- priv->ft.vlan = mlx5_create_flow_table(priv->mdev, 0,
- MLX5_FLOW_TABLE_TYPE_NIC_RCV,
- 2, g);
-
- kfree(g);
- return priv->ft.vlan ? 0 : -ENOMEM;
+ err = __mlx5e_create_vlan_groups(ft, in, inlen);
+
+ kvfree(in);
+ return err;
+}
+
+static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
+{
+ struct mlx5e_flow_table *ft = &priv->fts.vlan;
+ int err;
+
+ ft->num_groups = 0;
+ ft->t = mlx5_create_flow_table(priv->fts.ns, 0, MLX5E_VLAN_TABLE_SIZE);
+
+ if (IS_ERR(ft->t)) {
+ err = PTR_ERR(ft->t);
+ ft->t = NULL;
+ return err;
+ }
+ ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
+ if (!ft->g) {
+ err = -ENOMEM;
+ goto err_destroy_vlan_flow_table;
+ }
+
+ err = mlx5e_create_vlan_groups(ft);
+ if (err)
+ goto err_free_g;
+
+ return 0;
+
+err_free_g:
+ kfree(ft->g);
+
+err_destroy_vlan_flow_table:
+ mlx5_destroy_flow_table(ft->t);
+ ft->t = NULL;
+
+ return err;
}
static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
{
- mlx5_destroy_flow_table(priv->ft.vlan);
+ mlx5e_destroy_flow_table(&priv->fts.vlan);
}
int mlx5e_create_flow_tables(struct mlx5e_priv *priv)
{
int err;
- err = mlx5e_create_main_flow_table(priv);
+ priv->fts.ns = mlx5_get_flow_namespace(priv->mdev,
+ MLX5_FLOW_NAMESPACE_KERNEL);
+
+ if (!priv->fts.ns)
+ return -EINVAL;
+
+ err = mlx5e_create_vlan_flow_table(priv);
if (err)
return err;
- err = mlx5e_create_vlan_flow_table(priv);
+ err = mlx5e_create_main_flow_table(priv);
if (err)
- goto err_destroy_main_flow_table;
+ goto err_destroy_vlan_flow_table;
err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
if (err)
- goto err_destroy_vlan_flow_table;
+ goto err_destroy_main_flow_table;
return 0;
-err_destroy_vlan_flow_table:
- mlx5e_destroy_vlan_flow_table(priv);
-
err_destroy_main_flow_table:
mlx5e_destroy_main_flow_table(priv);
+err_destroy_vlan_flow_table:
+ mlx5e_destroy_vlan_flow_table(priv);
return err;
}
@@ -1041,6 +1219,6 @@ err_destroy_main_flow_table:
void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv)
{
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- mlx5e_destroy_vlan_flow_table(priv);
mlx5e_destroy_main_flow_table(priv);
+ mlx5e_destroy_vlan_flow_table(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index d67058afe87e..d4601a564699 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -30,7 +30,7 @@
* SOFTWARE.
*/
-#include <linux/mlx5/flow_table.h>
+#include <linux/mlx5/fs.h>
#include "en.h"
#include "eswitch.h"
@@ -2103,6 +2103,11 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
struct mlx5e_priv *priv = netdev_priv(netdev);
mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr);
+ if (is_zero_ether_addr(netdev->dev_addr) &&
+ !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
+ eth_hw_addr_random(netdev);
+ mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
+ }
}
static void mlx5e_build_netdev(struct net_device *netdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index d8939e597c54..bc3d9f8a75c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -34,7 +34,7 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/mlx5_ifc.h>
#include <linux/mlx5/vport.h>
-#include <linux/mlx5/flow_table.h>
+#include <linux/mlx5/fs.h>
#include "mlx5_core.h"
#include "eswitch.h"
@@ -321,220 +321,6 @@ static void del_l2_table_entry(struct mlx5_core_dev *dev, u32 index)
free_l2_table_index(l2_table, index);
}
-/* E-Switch FDB flow steering */
-struct dest_node {
- struct list_head list;
- struct mlx5_flow_destination dest;
-};
-
-static int _mlx5_flow_rule_apply(struct mlx5_flow_rule *fr)
-{
- bool was_valid = fr->valid;
- struct dest_node *dest_n;
- u32 dest_list_size = 0;
- void *in_match_value;
- u32 *flow_context;
- u32 flow_index;
- int err;
- int i;
-
- if (list_empty(&fr->dest_list)) {
- if (fr->valid)
- mlx5_del_flow_table_entry(fr->ft, fr->fi);
- fr->valid = false;
- return 0;
- }
-
- list_for_each_entry(dest_n, &fr->dest_list, list)
- dest_list_size++;
-
- flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
- MLX5_ST_SZ_BYTES(dest_format_struct) *
- dest_list_size);
- if (!flow_context)
- return -ENOMEM;
-
- MLX5_SET(flow_context, flow_context, flow_tag, fr->flow_tag);
- MLX5_SET(flow_context, flow_context, action, fr->action);
- MLX5_SET(flow_context, flow_context, destination_list_size,
- dest_list_size);
-
- i = 0;
- list_for_each_entry(dest_n, &fr->dest_list, list) {
- void *dest_addr = MLX5_ADDR_OF(flow_context, flow_context,
- destination[i++]);
-
- MLX5_SET(dest_format_struct, dest_addr, destination_type,
- dest_n->dest.type);
- MLX5_SET(dest_format_struct, dest_addr, destination_id,
- dest_n->dest.vport_num);
- }
-
- in_match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
- memcpy(in_match_value, fr->match_value, MLX5_ST_SZ_BYTES(fte_match_param));
-
- err = mlx5_add_flow_table_entry(fr->ft, fr->match_criteria_enable,
- fr->match_criteria, flow_context,
- &flow_index);
- if (!err) {
- if (was_valid)
- mlx5_del_flow_table_entry(fr->ft, fr->fi);
- fr->fi = flow_index;
- fr->valid = true;
- }
- kfree(flow_context);
- return err;
-}
-
-static int mlx5_flow_rule_add_dest(struct mlx5_flow_rule *fr,
- struct mlx5_flow_destination *new_dest)
-{
- struct dest_node *dest_n;
- int err;
-
- dest_n = kzalloc(sizeof(*dest_n), GFP_KERNEL);
- if (!dest_n)
- return -ENOMEM;
-
- memcpy(&dest_n->dest, new_dest, sizeof(dest_n->dest));
- mutex_lock(&fr->mutex);
- list_add(&dest_n->list, &fr->dest_list);
- err = _mlx5_flow_rule_apply(fr);
- if (err) {
- list_del(&dest_n->list);
- kfree(dest_n);
- }
- mutex_unlock(&fr->mutex);
- return err;
-}
-
-static int mlx5_flow_rule_del_dest(struct mlx5_flow_rule *fr,
- struct mlx5_flow_destination *dest)
-{
- struct dest_node *dest_n;
- struct dest_node *n;
- int err;
-
- mutex_lock(&fr->mutex);
- list_for_each_entry_safe(dest_n, n, &fr->dest_list, list) {
- if (dest->vport_num == dest_n->dest.vport_num)
- goto found;
- }
- mutex_unlock(&fr->mutex);
- return -ENOENT;
-
-found:
- list_del(&dest_n->list);
- err = _mlx5_flow_rule_apply(fr);
- mutex_unlock(&fr->mutex);
- kfree(dest_n);
-
- return err;
-}
-
-static struct mlx5_flow_rule *find_fr(struct mlx5_eswitch *esw,
- u8 match_criteria_enable,
- u32 *match_value)
-{
- struct hlist_head *hash = esw->mc_table;
- struct esw_mc_addr *esw_mc;
- u8 *dmac_v;
-
- dmac_v = MLX5_ADDR_OF(fte_match_param, match_value,
- outer_headers.dmac_47_16);
-
- /* UNICAST FULL MATCH */
- if (!is_multicast_ether_addr(dmac_v))
- return NULL;
-
- /* MULTICAST FULL MATCH */
- esw_mc = l2addr_hash_find(hash, dmac_v, struct esw_mc_addr);
-
- return esw_mc ? esw_mc->uplink_rule : NULL;
-}
-
-static struct mlx5_flow_rule *alloc_fr(void *ft,
- u8 match_criteria_enable,
- u32 *match_criteria,
- u32 *match_value,
- u32 action,
- u32 flow_tag)
-{
- struct mlx5_flow_rule *fr = kzalloc(sizeof(*fr), GFP_KERNEL);
-
- if (!fr)
- return NULL;
-
- fr->match_criteria = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
- fr->match_value = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
- if (!fr->match_criteria || !fr->match_value) {
- kfree(fr->match_criteria);
- kfree(fr->match_value);
- kfree(fr);
- return NULL;
- }
-
- memcpy(fr->match_criteria, match_criteria, MLX5_ST_SZ_BYTES(fte_match_param));
- memcpy(fr->match_value, match_value, MLX5_ST_SZ_BYTES(fte_match_param));
- fr->match_criteria_enable = match_criteria_enable;
- fr->flow_tag = flow_tag;
- fr->action = action;
-
- mutex_init(&fr->mutex);
- INIT_LIST_HEAD(&fr->dest_list);
- atomic_set(&fr->refcount, 0);
- fr->ft = ft;
- return fr;
-}
-
-static void deref_fr(struct mlx5_flow_rule *fr)
-{
- if (!atomic_dec_and_test(&fr->refcount))
- return;
-
- kfree(fr->match_criteria);
- kfree(fr->match_value);
- kfree(fr);
-}
-
-static struct mlx5_flow_rule *
-mlx5_add_flow_rule(struct mlx5_eswitch *esw,
- u8 match_criteria_enable,
- u32 *match_criteria,
- u32 *match_value,
- u32 action,
- u32 flow_tag,
- struct mlx5_flow_destination *dest)
-{
- struct mlx5_flow_rule *fr;
- int err;
-
- fr = find_fr(esw, match_criteria_enable, match_value);
- fr = fr ? fr : alloc_fr(esw->fdb_table.fdb, match_criteria_enable, match_criteria,
- match_value, action, flow_tag);
- if (!fr)
- return NULL;
-
- atomic_inc(&fr->refcount);
-
- err = mlx5_flow_rule_add_dest(fr, dest);
- if (err) {
- deref_fr(fr);
- return NULL;
- }
-
- return fr;
-}
-
-static void mlx5_del_flow_rule(struct mlx5_flow_rule *fr, u32 vport)
-{
- struct mlx5_flow_destination dest;
-
- dest.vport_num = vport;
- mlx5_flow_rule_del_dest(fr, &dest);
- deref_fr(fr);
-}
-
/* E-Switch FDB */
static struct mlx5_flow_rule *
esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
@@ -569,7 +355,7 @@ esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
"\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
dmac_v, dmac_c, vport);
flow_rule =
- mlx5_add_flow_rule(esw,
+ mlx5_add_flow_rule(esw->fdb_table.fdb,
match_header,
match_c,
match_v,
@@ -589,33 +375,61 @@ out:
static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_core_dev *dev = esw->dev;
- struct mlx5_flow_table_group g;
+ struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *fdb;
+ struct mlx5_flow_group *g;
+ void *match_criteria;
+ int table_size;
+ u32 *flow_group_in;
u8 *dmac;
+ int err = 0;
esw_debug(dev, "Create FDB log_max_size(%d)\n",
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
- memset(&g, 0, sizeof(g));
- /* UC MC Full match rules*/
- g.log_sz = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
- g.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- dmac = MLX5_ADDR_OF(fte_match_param, g.match_criteria,
- outer_headers.dmac_47_16);
- /* Match criteria mask */
- memset(dmac, 0xff, 6);
+ root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ if (!root_ns) {
+ esw_warn(dev, "Failed to get FDB flow namespace\n");
+ return -ENOMEM;
+ }
- fdb = mlx5_create_flow_table(dev, 0,
- MLX5_FLOW_TABLE_TYPE_ESWITCH,
- 1, &g);
- if (fdb)
- esw_debug(dev, "ESW: FDB Table created fdb->id %d\n", mlx5_get_flow_table_id(fdb));
- else
- esw_warn(dev, "ESW: Failed to create FDB Table\n");
+ flow_group_in = mlx5_vzalloc(inlen);
+ if (!flow_group_in)
+ return -ENOMEM;
+ memset(flow_group_in, 0, inlen);
+
+ table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
+ fdb = mlx5_create_flow_table(root_ns, 0, table_size);
+ if (IS_ERR_OR_NULL(fdb)) {
+ err = PTR_ERR(fdb);
+ esw_warn(dev, "Failed to create FDB Table err %d\n", err);
+ goto out;
+ }
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_OUTER_HEADERS);
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+ dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
+ eth_broadcast_addr(dmac);
+
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR_OR_NULL(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create flow group err(%d)\n", err);
+ goto out;
+ }
+
+ esw->fdb_table.addr_grp = g;
esw->fdb_table.fdb = fdb;
- return fdb ? 0 : -ENOMEM;
+out:
+ kfree(flow_group_in);
+ if (err && !IS_ERR_OR_NULL(fdb))
+ mlx5_destroy_flow_table(fdb);
+ return err;
}
static void esw_destroy_fdb_table(struct mlx5_eswitch *esw)
@@ -623,10 +437,11 @@ static void esw_destroy_fdb_table(struct mlx5_eswitch *esw)
if (!esw->fdb_table.fdb)
return;
- esw_debug(esw->dev, "Destroy FDB Table fdb(%d)\n",
- mlx5_get_flow_table_id(esw->fdb_table.fdb));
+ esw_debug(esw->dev, "Destroy FDB Table\n");
+ mlx5_destroy_flow_group(esw->fdb_table.addr_grp);
mlx5_destroy_flow_table(esw->fdb_table.fdb);
esw->fdb_table.fdb = NULL;
+ esw->fdb_table.addr_grp = NULL;
}
/* E-Switch vport UC/MC lists management */
@@ -689,7 +504,7 @@ static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
del_l2_table_entry(esw->dev, esw_uc->table_index);
if (vaddr->flow_rule)
- mlx5_del_flow_rule(vaddr->flow_rule, vport);
+ mlx5_del_flow_rule(vaddr->flow_rule);
vaddr->flow_rule = NULL;
l2addr_hash_del(esw_uc);
@@ -750,14 +565,14 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
esw_mc->uplink_rule);
if (vaddr->flow_rule)
- mlx5_del_flow_rule(vaddr->flow_rule, vport);
+ mlx5_del_flow_rule(vaddr->flow_rule);
vaddr->flow_rule = NULL;
if (--esw_mc->refcnt)
return 0;
if (esw_mc->uplink_rule)
- mlx5_del_flow_rule(esw_mc->uplink_rule, UPLINK_VPORT);
+ mlx5_del_flow_rule(esw_mc->uplink_rule);
l2addr_hash_del(esw_mc);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 02ff3eade026..3416a428f70f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -88,20 +88,6 @@ struct l2addr_node {
kfree(ptr); \
})
-struct mlx5_flow_rule {
- void *ft;
- u32 fi;
- u8 match_criteria_enable;
- u32 *match_criteria;
- u32 *match_value;
- u32 action;
- u32 flow_tag;
- bool valid;
- atomic_t refcount;
- struct mutex mutex; /* protect flow rule updates */
- struct list_head dest_list;
-};
-
struct mlx5_vport {
struct mlx5_core_dev *dev;
int vport;
@@ -126,6 +112,7 @@ struct mlx5_l2_table {
struct mlx5_eswitch_fdb {
void *fdb;
+ struct mlx5_flow_group *addr_grp;
};
struct mlx5_eswitch {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c
deleted file mode 100644
index ca90b9bc3b95..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c
+++ /dev/null
@@ -1,422 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/export.h>
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/flow_table.h>
-#include "mlx5_core.h"
-
-struct mlx5_ftg {
- struct mlx5_flow_table_group g;
- u32 id;
- u32 start_ix;
-};
-
-struct mlx5_flow_table {
- struct mlx5_core_dev *dev;
- u8 level;
- u8 type;
- u32 id;
- struct mutex mutex; /* sync bitmap alloc */
- u16 num_groups;
- struct mlx5_ftg *group;
- unsigned long *bitmap;
- u32 size;
-};
-
-static int mlx5_set_flow_entry_cmd(struct mlx5_flow_table *ft, u32 group_ix,
- u32 flow_index, void *flow_context)
-{
- u32 out[MLX5_ST_SZ_DW(set_fte_out)];
- u32 *in;
- void *in_flow_context;
- int fcdls =
- MLX5_GET(flow_context, flow_context, destination_list_size) *
- MLX5_ST_SZ_BYTES(dest_format_struct);
- int inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fcdls;
- int err;
-
- in = mlx5_vzalloc(inlen);
- if (!in) {
- mlx5_core_warn(ft->dev, "failed to allocate inbox\n");
- return -ENOMEM;
- }
-
- MLX5_SET(set_fte_in, in, table_type, ft->type);
- MLX5_SET(set_fte_in, in, table_id, ft->id);
- MLX5_SET(set_fte_in, in, flow_index, flow_index);
- MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
-
- in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
- memcpy(in_flow_context, flow_context,
- MLX5_ST_SZ_BYTES(flow_context) + fcdls);
-
- MLX5_SET(flow_context, in_flow_context, group_id,
- ft->group[group_ix].id);
-
- memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out,
- sizeof(out));
- kvfree(in);
-
- return err;
-}
-
-static void mlx5_del_flow_entry_cmd(struct mlx5_flow_table *ft, u32 flow_index)
-{
- u32 in[MLX5_ST_SZ_DW(delete_fte_in)];
- u32 out[MLX5_ST_SZ_DW(delete_fte_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
-
-#define MLX5_SET_DFTEI(p, x, v) MLX5_SET(delete_fte_in, p, x, v)
- MLX5_SET_DFTEI(in, table_type, ft->type);
- MLX5_SET_DFTEI(in, table_id, ft->id);
- MLX5_SET_DFTEI(in, flow_index, flow_index);
- MLX5_SET_DFTEI(in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
-
- mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
-}
-
-static void mlx5_destroy_flow_group_cmd(struct mlx5_flow_table *ft, int i)
-{
- u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)];
- u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
-
-#define MLX5_SET_DFGI(p, x, v) MLX5_SET(destroy_flow_group_in, p, x, v)
- MLX5_SET_DFGI(in, table_type, ft->type);
- MLX5_SET_DFGI(in, table_id, ft->id);
- MLX5_SET_DFGI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP);
- MLX5_SET_DFGI(in, group_id, ft->group[i].id);
- mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
-}
-
-static int mlx5_create_flow_group_cmd(struct mlx5_flow_table *ft, int i)
-{
- u32 out[MLX5_ST_SZ_DW(create_flow_group_out)];
- u32 *in;
- void *in_match_criteria;
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct mlx5_flow_table_group *g = &ft->group[i].g;
- u32 start_ix = ft->group[i].start_ix;
- u32 end_ix = start_ix + (1 << g->log_sz) - 1;
- int err;
-
- in = mlx5_vzalloc(inlen);
- if (!in) {
- mlx5_core_warn(ft->dev, "failed to allocate inbox\n");
- return -ENOMEM;
- }
- in_match_criteria = MLX5_ADDR_OF(create_flow_group_in, in,
- match_criteria);
-
- memset(out, 0, sizeof(out));
-
-#define MLX5_SET_CFGI(p, x, v) MLX5_SET(create_flow_group_in, p, x, v)
- MLX5_SET_CFGI(in, table_type, ft->type);
- MLX5_SET_CFGI(in, table_id, ft->id);
- MLX5_SET_CFGI(in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
- MLX5_SET_CFGI(in, start_flow_index, start_ix);
- MLX5_SET_CFGI(in, end_flow_index, end_ix);
- MLX5_SET_CFGI(in, match_criteria_enable, g->match_criteria_enable);
-
- memcpy(in_match_criteria, g->match_criteria,
- MLX5_ST_SZ_BYTES(fte_match_param));
-
- err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out,
- sizeof(out));
- if (!err)
- ft->group[i].id = MLX5_GET(create_flow_group_out, out,
- group_id);
-
- kvfree(in);
-
- return err;
-}
-
-static void mlx5_destroy_flow_table_groups(struct mlx5_flow_table *ft)
-{
- int i;
-
- for (i = 0; i < ft->num_groups; i++)
- mlx5_destroy_flow_group_cmd(ft, i);
-}
-
-static int mlx5_create_flow_table_groups(struct mlx5_flow_table *ft)
-{
- int err;
- int i;
-
- for (i = 0; i < ft->num_groups; i++) {
- err = mlx5_create_flow_group_cmd(ft, i);
- if (err)
- goto err_destroy_flow_table_groups;
- }
-
- return 0;
-
-err_destroy_flow_table_groups:
- for (i--; i >= 0; i--)
- mlx5_destroy_flow_group_cmd(ft, i);
-
- return err;
-}
-
-static int mlx5_create_flow_table_cmd(struct mlx5_flow_table *ft)
-{
- u32 in[MLX5_ST_SZ_DW(create_flow_table_in)];
- u32 out[MLX5_ST_SZ_DW(create_flow_table_out)];
- int err;
-
- memset(in, 0, sizeof(in));
-
- MLX5_SET(create_flow_table_in, in, table_type, ft->type);
- MLX5_SET(create_flow_table_in, in, level, ft->level);
- MLX5_SET(create_flow_table_in, in, log_size, order_base_2(ft->size));
-
- MLX5_SET(create_flow_table_in, in, opcode,
- MLX5_CMD_OP_CREATE_FLOW_TABLE);
-
- memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out,
- sizeof(out));
- if (err)
- return err;
-
- ft->id = MLX5_GET(create_flow_table_out, out, table_id);
-
- return 0;
-}
-
-static void mlx5_destroy_flow_table_cmd(struct mlx5_flow_table *ft)
-{
- u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)];
- u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)];
-
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
-
-#define MLX5_SET_DFTI(p, x, v) MLX5_SET(destroy_flow_table_in, p, x, v)
- MLX5_SET_DFTI(in, table_type, ft->type);
- MLX5_SET_DFTI(in, table_id, ft->id);
- MLX5_SET_DFTI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_TABLE);
-
- mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
-}
-
-static int mlx5_find_group(struct mlx5_flow_table *ft, u8 match_criteria_enable,
- u32 *match_criteria, int *group_ix)
-{
- void *mc_outer = MLX5_ADDR_OF(fte_match_param, match_criteria,
- outer_headers);
- void *mc_misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
- misc_parameters);
- void *mc_inner = MLX5_ADDR_OF(fte_match_param, match_criteria,
- inner_headers);
- int mc_outer_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4);
- int mc_misc_sz = MLX5_ST_SZ_BYTES(fte_match_set_misc);
- int mc_inner_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4);
- int i;
-
- for (i = 0; i < ft->num_groups; i++) {
- struct mlx5_flow_table_group *g = &ft->group[i].g;
- void *gmc_outer = MLX5_ADDR_OF(fte_match_param,
- g->match_criteria,
- outer_headers);
- void *gmc_misc = MLX5_ADDR_OF(fte_match_param,
- g->match_criteria,
- misc_parameters);
- void *gmc_inner = MLX5_ADDR_OF(fte_match_param,
- g->match_criteria,
- inner_headers);
-
- if (g->match_criteria_enable != match_criteria_enable)
- continue;
-
- if (match_criteria_enable & MLX5_MATCH_OUTER_HEADERS)
- if (memcmp(mc_outer, gmc_outer, mc_outer_sz))
- continue;
-
- if (match_criteria_enable & MLX5_MATCH_MISC_PARAMETERS)
- if (memcmp(mc_misc, gmc_misc, mc_misc_sz))
- continue;
-
- if (match_criteria_enable & MLX5_MATCH_INNER_HEADERS)
- if (memcmp(mc_inner, gmc_inner, mc_inner_sz))
- continue;
-
- *group_ix = i;
- return 0;
- }
-
- return -EINVAL;
-}
-
-static int alloc_flow_index(struct mlx5_flow_table *ft, int group_ix, u32 *ix)
-{
- struct mlx5_ftg *g = &ft->group[group_ix];
- int err = 0;
-
- mutex_lock(&ft->mutex);
-
- *ix = find_next_zero_bit(ft->bitmap, ft->size, g->start_ix);
- if (*ix >= (g->start_ix + (1 << g->g.log_sz)))
- err = -ENOSPC;
- else
- __set_bit(*ix, ft->bitmap);
-
- mutex_unlock(&ft->mutex);
-
- return err;
-}
-
-static void mlx5_free_flow_index(struct mlx5_flow_table *ft, u32 ix)
-{
- __clear_bit(ix, ft->bitmap);
-}
-
-int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
- void *match_criteria, void *flow_context,
- u32 *flow_index)
-{
- struct mlx5_flow_table *ft = flow_table;
- int group_ix;
- int err;
-
- err = mlx5_find_group(ft, match_criteria_enable, match_criteria,
- &group_ix);
- if (err) {
- mlx5_core_warn(ft->dev, "mlx5_find_group failed\n");
- return err;
- }
-
- err = alloc_flow_index(ft, group_ix, flow_index);
- if (err) {
- mlx5_core_warn(ft->dev, "alloc_flow_index failed\n");
- return err;
- }
-
- return mlx5_set_flow_entry_cmd(ft, group_ix, *flow_index, flow_context);
-}
-EXPORT_SYMBOL(mlx5_add_flow_table_entry);
-
-void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index)
-{
- struct mlx5_flow_table *ft = flow_table;
-
- mlx5_del_flow_entry_cmd(ft, flow_index);
- mlx5_free_flow_index(ft, flow_index);
-}
-EXPORT_SYMBOL(mlx5_del_flow_table_entry);
-
-void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type,
- u16 num_groups,
- struct mlx5_flow_table_group *group)
-{
- struct mlx5_flow_table *ft;
- u32 start_ix = 0;
- u32 ft_size = 0;
- void *gr;
- void *bm;
- int err;
- int i;
-
- for (i = 0; i < num_groups; i++)
- ft_size += (1 << group[i].log_sz);
-
- ft = kzalloc(sizeof(*ft), GFP_KERNEL);
- gr = kcalloc(num_groups, sizeof(struct mlx5_ftg), GFP_KERNEL);
- bm = kcalloc(BITS_TO_LONGS(ft_size), sizeof(uintptr_t), GFP_KERNEL);
- if (!ft || !gr || !bm)
- goto err_free_ft;
-
- ft->group = gr;
- ft->bitmap = bm;
- ft->num_groups = num_groups;
- ft->level = level;
- ft->type = table_type;
- ft->size = ft_size;
- ft->dev = dev;
- mutex_init(&ft->mutex);
-
- for (i = 0; i < ft->num_groups; i++) {
- memcpy(&ft->group[i].g, &group[i], sizeof(*group));
- ft->group[i].start_ix = start_ix;
- start_ix += 1 << group[i].log_sz;
- }
-
- err = mlx5_create_flow_table_cmd(ft);
- if (err)
- goto err_free_ft;
-
- err = mlx5_create_flow_table_groups(ft);
- if (err)
- goto err_destroy_flow_table_cmd;
-
- return ft;
-
-err_destroy_flow_table_cmd:
- mlx5_destroy_flow_table_cmd(ft);
-
-err_free_ft:
- mlx5_core_warn(dev, "failed to alloc flow table\n");
- kfree(bm);
- kfree(gr);
- kfree(ft);
-
- return NULL;
-}
-EXPORT_SYMBOL(mlx5_create_flow_table);
-
-void mlx5_destroy_flow_table(void *flow_table)
-{
- struct mlx5_flow_table *ft = flow_table;
-
- mlx5_destroy_flow_table_groups(ft);
- mlx5_destroy_flow_table_cmd(ft);
- kfree(ft->bitmap);
- kfree(ft->group);
- kfree(ft);
-}
-EXPORT_SYMBOL(mlx5_destroy_flow_table);
-
-u32 mlx5_get_flow_table_id(void *flow_table)
-{
- struct mlx5_flow_table *ft = flow_table;
-
- return ft->id;
-}
-EXPORT_SYMBOL(mlx5_get_flow_table_id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
new file mode 100644
index 000000000000..5096f4f336bd
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/device.h>
+#include <linux/mlx5/mlx5_ifc.h>
+
+#include "fs_core.h"
+#include "fs_cmd.h"
+#include "mlx5_core.h"
+
+int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
+ enum fs_flow_table_type type, unsigned int level,
+ unsigned int log_size, unsigned int *table_id)
+{
+ u32 out[MLX5_ST_SZ_DW(create_flow_table_out)];
+ u32 in[MLX5_ST_SZ_DW(create_flow_table_in)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(create_flow_table_in, in, opcode,
+ MLX5_CMD_OP_CREATE_FLOW_TABLE);
+
+ MLX5_SET(create_flow_table_in, in, table_type, type);
+ MLX5_SET(create_flow_table_in, in, level, level);
+ MLX5_SET(create_flow_table_in, in, log_size, log_size);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+ sizeof(out));
+
+ if (!err)
+ *table_id = MLX5_GET(create_flow_table_out, out,
+ table_id);
+ return err;
+}
+
+int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(destroy_flow_table_in, in, opcode,
+ MLX5_CMD_OP_DESTROY_FLOW_TABLE);
+ MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
+ MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+ sizeof(out));
+}
+
+int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ u32 *in,
+ unsigned int *group_id)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ u32 out[MLX5_ST_SZ_DW(create_flow_group_out)];
+ int err;
+
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(create_flow_group_in, in, opcode,
+ MLX5_CMD_OP_CREATE_FLOW_GROUP);
+ MLX5_SET(create_flow_group_in, in, table_type, ft->type);
+ MLX5_SET(create_flow_group_in, in, table_id, ft->id);
+
+ err = mlx5_cmd_exec_check_status(dev, in,
+ inlen, out,
+ sizeof(out));
+ if (!err)
+ *group_id = MLX5_GET(create_flow_group_out, out,
+ group_id);
+
+ return err;
+}
+
+int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ unsigned int group_id)
+{
+ u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)];
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(destroy_flow_group_in, in, opcode,
+ MLX5_CMD_OP_DESTROY_FLOW_GROUP);
+ MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
+ MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
+ MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+ sizeof(out));
+}
+
+static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
+ int opmod, int modify_mask,
+ struct mlx5_flow_table *ft,
+ unsigned group_id,
+ struct fs_fte *fte)
+{
+ unsigned int inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
+ fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct);
+ u32 out[MLX5_ST_SZ_DW(set_fte_out)];
+ struct mlx5_flow_rule *dst;
+ void *in_flow_context;
+ void *in_match_value;
+ void *in_dests;
+ u32 *in;
+ int err;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ mlx5_core_warn(dev, "failed to allocate inbox\n");
+ return -ENOMEM;
+ }
+
+ MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
+ MLX5_SET(set_fte_in, in, op_mod, opmod);
+ MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
+ MLX5_SET(set_fte_in, in, table_type, ft->type);
+ MLX5_SET(set_fte_in, in, table_id, ft->id);
+ MLX5_SET(set_fte_in, in, flow_index, fte->index);
+
+ in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
+ MLX5_SET(flow_context, in_flow_context, group_id, group_id);
+ MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag);
+ MLX5_SET(flow_context, in_flow_context, action, fte->action);
+ MLX5_SET(flow_context, in_flow_context, destination_list_size,
+ fte->dests_size);
+ in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
+ match_value);
+ memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param));
+
+ in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
+ list_for_each_entry(dst, &fte->node.children, node.list) {
+ unsigned int id;
+
+ MLX5_SET(dest_format_struct, in_dests, destination_type,
+ dst->dest_attr.type);
+ if (dst->dest_attr.type ==
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
+ id = dst->dest_attr.ft->id;
+ else
+ id = dst->dest_attr.tir_num;
+ MLX5_SET(dest_format_struct, in_dests, destination_id, id);
+ in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
+ }
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out,
+ sizeof(out));
+ kvfree(in);
+
+ return err;
+}
+
+int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ unsigned group_id,
+ struct fs_fte *fte)
+{
+ return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
+}
+
+int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ unsigned group_id,
+ struct fs_fte *fte)
+{
+ int opmod;
+ int modify_mask;
+ int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
+ flow_table_properties_nic_receive.
+ flow_modify_en);
+ if (!atomic_mod_cap)
+ return -ENOTSUPP;
+ opmod = 1;
+ modify_mask = 1 <<
+ MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST;
+
+ return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
+}
+
+int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ unsigned int index)
+{
+ u32 out[MLX5_ST_SZ_DW(delete_fte_out)];
+ u32 in[MLX5_ST_SZ_DW(delete_fte_in)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
+ MLX5_SET(delete_fte_in, in, table_type, ft->type);
+ MLX5_SET(delete_fte_in, in, table_id, ft->id);
+ MLX5_SET(delete_fte_in, in, flow_index, index);
+
+ err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+
+ return err;
+}
diff --git a/include/linux/mlx5/flow_table.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index 0f2a15cf3317..f39304ede186 100644
--- a/include/linux/mlx5/flow_table.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -30,34 +30,36 @@
* SOFTWARE.
*/
-#ifndef MLX5_FLOW_TABLE_H
-#define MLX5_FLOW_TABLE_H
-
-#include <linux/mlx5/driver.h>
-
-struct mlx5_flow_table_group {
- u8 log_sz;
- u8 match_criteria_enable;
- u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
-};
-
-struct mlx5_flow_destination {
- enum mlx5_flow_destination_type type;
- union {
- u32 tir_num;
- void *ft;
- u32 vport_num;
- };
-};
-
-void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type,
- u16 num_groups,
- struct mlx5_flow_table_group *group);
-void mlx5_destroy_flow_table(void *flow_table);
-int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
- void *match_criteria, void *flow_context,
- u32 *flow_index);
-void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index);
-u32 mlx5_get_flow_table_id(void *flow_table);
-
-#endif /* MLX5_FLOW_TABLE_H */
+#ifndef _MLX5_FS_CMD_
+#define _MLX5_FS_CMD_
+
+int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
+ enum fs_flow_table_type type, unsigned int level,
+ unsigned int log_size, unsigned int *table_id);
+
+int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft);
+
+int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ u32 *in, unsigned int *group_id);
+
+int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ unsigned int group_id);
+
+int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ unsigned group_id,
+ struct fs_fte *fte);
+
+int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ unsigned group_id,
+ struct fs_fte *fte);
+
+int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ unsigned int index);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
new file mode 100644
index 000000000000..f7d62fe595f6
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -0,0 +1,1047 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mutex.h>
+#include <linux/mlx5/driver.h>
+
+#include "mlx5_core.h"
+#include "fs_core.h"
+#include "fs_cmd.h"
+
+#define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
+ sizeof(struct init_tree_node))
+
+#define INIT_PRIO(min_level_val, max_ft_val,\
+ start_level_val, ...) {.type = FS_TYPE_PRIO,\
+ .min_ft_level = min_level_val,\
+ .start_level = start_level_val,\
+ .max_ft = max_ft_val,\
+ .children = (struct init_tree_node[]) {__VA_ARGS__},\
+ .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
+}
+
+#define ADD_PRIO(min_level_val, max_ft_val, start_level_val, ...)\
+ INIT_PRIO(min_level_val, max_ft_val, start_level_val,\
+ __VA_ARGS__)\
+
+#define ADD_FT_PRIO(max_ft_val, start_level_val, ...)\
+ INIT_PRIO(0, max_ft_val, start_level_val,\
+ __VA_ARGS__)\
+
+#define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\
+ .children = (struct init_tree_node[]) {__VA_ARGS__},\
+ .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
+}
+
+#define KERNEL_START_LEVEL 0
+#define KERNEL_P0_START_LEVEL KERNEL_START_LEVEL
+#define KERNEL_MAX_FT 2
+#define KENREL_MIN_LEVEL 2
+static struct init_tree_node {
+ enum fs_node_type type;
+ struct init_tree_node *children;
+ int ar_size;
+ int min_ft_level;
+ int prio;
+ int max_ft;
+ int start_level;
+} root_fs = {
+ .type = FS_TYPE_NAMESPACE,
+ .ar_size = 1,
+ .children = (struct init_tree_node[]) {
+ ADD_PRIO(KENREL_MIN_LEVEL, KERNEL_MAX_FT,
+ KERNEL_START_LEVEL,
+ ADD_NS(ADD_FT_PRIO(KERNEL_MAX_FT,
+ KERNEL_P0_START_LEVEL))),
+ }
+};
+
+static void del_rule(struct fs_node *node);
+static void del_flow_table(struct fs_node *node);
+static void del_flow_group(struct fs_node *node);
+static void del_fte(struct fs_node *node);
+
+static void tree_init_node(struct fs_node *node,
+ unsigned int refcount,
+ void (*remove_func)(struct fs_node *))
+{
+ atomic_set(&node->refcount, refcount);
+ INIT_LIST_HEAD(&node->list);
+ INIT_LIST_HEAD(&node->children);
+ mutex_init(&node->lock);
+ node->remove_func = remove_func;
+}
+
+static void tree_add_node(struct fs_node *node, struct fs_node *parent)
+{
+ if (parent)
+ atomic_inc(&parent->refcount);
+ node->parent = parent;
+
+ /* Parent is the root */
+ if (!parent)
+ node->root = node;
+ else
+ node->root = parent->root;
+}
+
+static void tree_get_node(struct fs_node *node)
+{
+ atomic_inc(&node->refcount);
+}
+
+static void nested_lock_ref_node(struct fs_node *node)
+{
+ if (node) {
+ mutex_lock_nested(&node->lock, SINGLE_DEPTH_NESTING);
+ atomic_inc(&node->refcount);
+ }
+}
+
+static void lock_ref_node(struct fs_node *node)
+{
+ if (node) {
+ mutex_lock(&node->lock);
+ atomic_inc(&node->refcount);
+ }
+}
+
+static void unlock_ref_node(struct fs_node *node)
+{
+ if (node) {
+ atomic_dec(&node->refcount);
+ mutex_unlock(&node->lock);
+ }
+}
+
+static void tree_put_node(struct fs_node *node)
+{
+ struct fs_node *parent_node = node->parent;
+
+ lock_ref_node(parent_node);
+ if (atomic_dec_and_test(&node->refcount)) {
+ if (parent_node)
+ list_del_init(&node->list);
+ if (node->remove_func)
+ node->remove_func(node);
+ kfree(node);
+ node = NULL;
+ }
+ unlock_ref_node(parent_node);
+ if (!node && parent_node)
+ tree_put_node(parent_node);
+}
+
+static int tree_remove_node(struct fs_node *node)
+{
+ if (atomic_read(&node->refcount) > 1)
+ return -EPERM;
+ tree_put_node(node);
+ return 0;
+}
+
+static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
+ unsigned int prio)
+{
+ struct fs_prio *iter_prio;
+
+ fs_for_each_prio(iter_prio, ns) {
+ if (iter_prio->prio == prio)
+ return iter_prio;
+ }
+
+ return NULL;
+}
+
+static unsigned int find_next_free_level(struct fs_prio *prio)
+{
+ if (!list_empty(&prio->node.children)) {
+ struct mlx5_flow_table *ft;
+
+ ft = list_last_entry(&prio->node.children,
+ struct mlx5_flow_table,
+ node.list);
+ return ft->level + 1;
+ }
+ return prio->start_level;
+}
+
+static bool masked_memcmp(void *mask, void *val1, void *val2, size_t size)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++, mask++, val1++, val2++)
+ if ((*((u8 *)val1) & (*(u8 *)mask)) !=
+ ((*(u8 *)val2) & (*(u8 *)mask)))
+ return false;
+
+ return true;
+}
+
+static bool compare_match_value(struct mlx5_flow_group_mask *mask,
+ void *fte_param1, void *fte_param2)
+{
+ if (mask->match_criteria_enable &
+ 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS) {
+ void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
+ fte_param1, outer_headers);
+ void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
+ fte_param2, outer_headers);
+ void *fte_mask = MLX5_ADDR_OF(fte_match_param,
+ mask->match_criteria, outer_headers);
+
+ if (!masked_memcmp(fte_mask, fte_match1, fte_match2,
+ MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)))
+ return false;
+ }
+
+ if (mask->match_criteria_enable &
+ 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS) {
+ void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
+ fte_param1, misc_parameters);
+ void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
+ fte_param2, misc_parameters);
+ void *fte_mask = MLX5_ADDR_OF(fte_match_param,
+ mask->match_criteria, misc_parameters);
+
+ if (!masked_memcmp(fte_mask, fte_match1, fte_match2,
+ MLX5_ST_SZ_BYTES(fte_match_set_misc)))
+ return false;
+ }
+
+ if (mask->match_criteria_enable &
+ 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS) {
+ void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
+ fte_param1, inner_headers);
+ void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
+ fte_param2, inner_headers);
+ void *fte_mask = MLX5_ADDR_OF(fte_match_param,
+ mask->match_criteria, inner_headers);
+
+ if (!masked_memcmp(fte_mask, fte_match1, fte_match2,
+ MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)))
+ return false;
+ }
+ return true;
+}
+
+static bool compare_match_criteria(u8 match_criteria_enable1,
+ u8 match_criteria_enable2,
+ void *mask1, void *mask2)
+{
+ return match_criteria_enable1 == match_criteria_enable2 &&
+ !memcmp(mask1, mask2, MLX5_ST_SZ_BYTES(fte_match_param));
+}
+
+static struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
+{
+ struct fs_node *root;
+ struct mlx5_flow_namespace *ns;
+
+ root = node->root;
+
+ if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
+ pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
+ return NULL;
+ }
+
+ ns = container_of(root, struct mlx5_flow_namespace, node);
+ return container_of(ns, struct mlx5_flow_root_namespace, ns);
+}
+
+static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
+{
+ struct mlx5_flow_root_namespace *root = find_root(node);
+
+ if (root)
+ return root->dev;
+ return NULL;
+}
+
+static void del_flow_table(struct fs_node *node)
+{
+ struct mlx5_flow_table *ft;
+ struct mlx5_core_dev *dev;
+ struct fs_prio *prio;
+ int err;
+
+ fs_get_obj(ft, node);
+ dev = get_dev(&ft->node);
+
+ err = mlx5_cmd_destroy_flow_table(dev, ft);
+ if (err)
+ pr_warn("flow steering can't destroy ft\n");
+ fs_get_obj(prio, ft->node.parent);
+ prio->num_ft--;
+}
+
+static void del_rule(struct fs_node *node)
+{
+ struct mlx5_flow_rule *rule;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ struct fs_fte *fte;
+ u32 *match_value;
+ struct mlx5_core_dev *dev = get_dev(node);
+ int match_len = MLX5_ST_SZ_BYTES(fte_match_param);
+ int err;
+
+ match_value = mlx5_vzalloc(match_len);
+ if (!match_value) {
+ pr_warn("failed to allocate inbox\n");
+ return;
+ }
+
+ fs_get_obj(rule, node);
+ fs_get_obj(fte, rule->node.parent);
+ fs_get_obj(fg, fte->node.parent);
+ memcpy(match_value, fte->val, sizeof(fte->val));
+ fs_get_obj(ft, fg->node.parent);
+ list_del(&rule->node.list);
+ fte->dests_size--;
+ if (fte->dests_size) {
+ err = mlx5_cmd_update_fte(dev, ft,
+ fg->id, fte);
+ if (err)
+ pr_warn("%s can't del rule fg id=%d fte_index=%d\n",
+ __func__, fg->id, fte->index);
+ }
+ kvfree(match_value);
+}
+
+static void del_fte(struct fs_node *node)
+{
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ struct mlx5_core_dev *dev;
+ struct fs_fte *fte;
+ int err;
+
+ fs_get_obj(fte, node);
+ fs_get_obj(fg, fte->node.parent);
+ fs_get_obj(ft, fg->node.parent);
+
+ dev = get_dev(&ft->node);
+ err = mlx5_cmd_delete_fte(dev, ft,
+ fte->index);
+ if (err)
+ pr_warn("flow steering can't delete fte in index %d of flow group id %d\n",
+ fte->index, fg->id);
+
+ fte->status = 0;
+ fg->num_ftes--;
+}
+
+static void del_flow_group(struct fs_node *node)
+{
+ struct mlx5_flow_group *fg;
+ struct mlx5_flow_table *ft;
+ struct mlx5_core_dev *dev;
+
+ fs_get_obj(fg, node);
+ fs_get_obj(ft, fg->node.parent);
+ dev = get_dev(&ft->node);
+
+ if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id))
+ pr_warn("flow steering can't destroy fg %d of ft %d\n",
+ fg->id, ft->id);
+}
+
+static struct fs_fte *alloc_fte(u8 action,
+ u32 flow_tag,
+ u32 *match_value,
+ unsigned int index)
+{
+ struct fs_fte *fte;
+
+ fte = kzalloc(sizeof(*fte), GFP_KERNEL);
+ if (!fte)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(fte->val, match_value, sizeof(fte->val));
+ fte->node.type = FS_TYPE_FLOW_ENTRY;
+ fte->flow_tag = flow_tag;
+ fte->index = index;
+ fte->action = action;
+
+ return fte;
+}
+
+static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in)
+{
+ struct mlx5_flow_group *fg;
+ void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
+ create_fg_in, match_criteria);
+ u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
+ create_fg_in,
+ match_criteria_enable);
+ fg = kzalloc(sizeof(*fg), GFP_KERNEL);
+ if (!fg)
+ return ERR_PTR(-ENOMEM);
+
+ fg->mask.match_criteria_enable = match_criteria_enable;
+ memcpy(&fg->mask.match_criteria, match_criteria,
+ sizeof(fg->mask.match_criteria));
+ fg->node.type = FS_TYPE_FLOW_GROUP;
+ fg->start_index = MLX5_GET(create_flow_group_in, create_fg_in,
+ start_flow_index);
+ fg->max_ftes = MLX5_GET(create_flow_group_in, create_fg_in,
+ end_flow_index) - fg->start_index + 1;
+ return fg;
+}
+
+static struct mlx5_flow_table *alloc_flow_table(int level, int max_fte,
+ enum fs_flow_table_type table_type)
+{
+ struct mlx5_flow_table *ft;
+
+ ft = kzalloc(sizeof(*ft), GFP_KERNEL);
+ if (!ft)
+ return NULL;
+
+ ft->level = level;
+ ft->node.type = FS_TYPE_FLOW_TABLE;
+ ft->type = table_type;
+ ft->max_fte = max_fte;
+
+ return ft;
+}
+
+struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
+ int prio,
+ int max_fte)
+{
+ struct mlx5_flow_table *ft;
+ int err;
+ int log_table_sz;
+ struct mlx5_flow_root_namespace *root =
+ find_root(&ns->node);
+ struct fs_prio *fs_prio = NULL;
+
+ if (!root) {
+ pr_err("mlx5: flow steering failed to find root of namespace\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ fs_prio = find_prio(ns, prio);
+ if (!fs_prio)
+ return ERR_PTR(-EINVAL);
+
+ lock_ref_node(&fs_prio->node);
+ if (fs_prio->num_ft == fs_prio->max_ft) {
+ err = -ENOSPC;
+ goto unlock_prio;
+ }
+
+ ft = alloc_flow_table(find_next_free_level(fs_prio),
+ roundup_pow_of_two(max_fte),
+ root->table_type);
+ if (!ft) {
+ err = -ENOMEM;
+ goto unlock_prio;
+ }
+
+ tree_init_node(&ft->node, 1, del_flow_table);
+ log_table_sz = ilog2(ft->max_fte);
+ err = mlx5_cmd_create_flow_table(root->dev, ft->type, ft->level,
+ log_table_sz, &ft->id);
+ if (err)
+ goto free_ft;
+
+ tree_add_node(&ft->node, &fs_prio->node);
+ list_add_tail(&ft->node.list, &fs_prio->node.children);
+ fs_prio->num_ft++;
+ unlock_ref_node(&fs_prio->node);
+
+ return ft;
+
+free_ft:
+ kfree(ft);
+unlock_prio:
+ unlock_ref_node(&fs_prio->node);
+ return ERR_PTR(err);
+}
+
+struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
+ u32 *fg_in)
+{
+ struct mlx5_flow_group *fg;
+ struct mlx5_core_dev *dev = get_dev(&ft->node);
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ fg = alloc_flow_group(fg_in);
+ if (IS_ERR(fg))
+ return fg;
+
+ lock_ref_node(&ft->node);
+ err = mlx5_cmd_create_flow_group(dev, ft, fg_in, &fg->id);
+ if (err) {
+ kfree(fg);
+ unlock_ref_node(&ft->node);
+ return ERR_PTR(err);
+ }
+ /* Add node to tree */
+ tree_init_node(&fg->node, 1, del_flow_group);
+ tree_add_node(&fg->node, &ft->node);
+ /* Add node to group list */
+ list_add(&fg->node.list, ft->node.children.prev);
+ unlock_ref_node(&ft->node);
+
+ return fg;
+}
+
+static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
+{
+ struct mlx5_flow_rule *rule;
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return NULL;
+
+ rule->node.type = FS_TYPE_FLOW_DEST;
+ memcpy(&rule->dest_attr, dest, sizeof(*dest));
+
+ return rule;
+}
+
+/* fte should not be deleted while calling this function */
+static struct mlx5_flow_rule *add_rule_fte(struct fs_fte *fte,
+ struct mlx5_flow_group *fg,
+ struct mlx5_flow_destination *dest)
+{
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_rule *rule;
+ int err;
+
+ rule = alloc_rule(dest);
+ if (!rule)
+ return ERR_PTR(-ENOMEM);
+
+ fs_get_obj(ft, fg->node.parent);
+ /* Add dest to dests list- added as first element after the head */
+ tree_init_node(&rule->node, 1, del_rule);
+ list_add_tail(&rule->node.list, &fte->node.children);
+ fte->dests_size++;
+ if (fte->dests_size == 1)
+ err = mlx5_cmd_create_fte(get_dev(&ft->node),
+ ft, fg->id, fte);
+ else
+ err = mlx5_cmd_update_fte(get_dev(&ft->node),
+ ft, fg->id, fte);
+ if (err)
+ goto free_rule;
+
+ fte->status |= FS_FTE_STATUS_EXISTING;
+
+ return rule;
+
+free_rule:
+ list_del(&rule->node.list);
+ kfree(rule);
+ fte->dests_size--;
+ return ERR_PTR(err);
+}
+
+/* Assumed fg is locked */
+static unsigned int get_free_fte_index(struct mlx5_flow_group *fg,
+ struct list_head **prev)
+{
+ struct fs_fte *fte;
+ unsigned int start = fg->start_index;
+
+ if (prev)
+ *prev = &fg->node.children;
+
+ /* assumed list is sorted by index */
+ fs_for_each_fte(fte, fg) {
+ if (fte->index != start)
+ return start;
+ start++;
+ if (prev)
+ *prev = &fte->node.list;
+ }
+
+ return start;
+}
+
+/* prev is output, prev->next = new_fte */
+static struct fs_fte *create_fte(struct mlx5_flow_group *fg,
+ u32 *match_value,
+ u8 action,
+ u32 flow_tag,
+ struct list_head **prev)
+{
+ struct fs_fte *fte;
+ int index;
+
+ index = get_free_fte_index(fg, prev);
+ fte = alloc_fte(action, flow_tag, match_value, index);
+ if (IS_ERR(fte))
+ return fte;
+
+ return fte;
+}
+
+/* Assuming parent fg(flow table) is locked */
+static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg,
+ u32 *match_value,
+ u8 action,
+ u32 flow_tag,
+ struct mlx5_flow_destination *dest)
+{
+ struct fs_fte *fte;
+ struct mlx5_flow_rule *rule;
+ struct mlx5_flow_table *ft;
+ struct list_head *prev;
+
+ lock_ref_node(&fg->node);
+ fs_for_each_fte(fte, fg) {
+ nested_lock_ref_node(&fte->node);
+ if (compare_match_value(&fg->mask, match_value, &fte->val) &&
+ action == fte->action && flow_tag == fte->flow_tag) {
+ rule = add_rule_fte(fte, fg, dest);
+ unlock_ref_node(&fte->node);
+ if (IS_ERR(rule))
+ goto unlock_fg;
+ else
+ goto add_rule;
+ }
+ unlock_ref_node(&fte->node);
+ }
+ fs_get_obj(ft, fg->node.parent);
+ if (fg->num_ftes >= fg->max_ftes) {
+ rule = ERR_PTR(-ENOSPC);
+ goto unlock_fg;
+ }
+
+ fte = create_fte(fg, match_value, action, flow_tag, &prev);
+ if (IS_ERR(fte)) {
+ rule = (void *)fte;
+ goto unlock_fg;
+ }
+ tree_init_node(&fte->node, 0, del_fte);
+ rule = add_rule_fte(fte, fg, dest);
+ if (IS_ERR(rule)) {
+ kfree(fte);
+ goto unlock_fg;
+ }
+
+ fg->num_ftes++;
+
+ tree_add_node(&fte->node, &fg->node);
+ list_add(&fte->node.list, prev);
+add_rule:
+ tree_add_node(&rule->node, &fte->node);
+unlock_fg:
+ unlock_ref_node(&fg->node);
+ return rule;
+}
+
+struct mlx5_flow_rule *
+mlx5_add_flow_rule(struct mlx5_flow_table *ft,
+ u8 match_criteria_enable,
+ u32 *match_criteria,
+ u32 *match_value,
+ u32 action,
+ u32 flow_tag,
+ struct mlx5_flow_destination *dest)
+{
+ struct mlx5_flow_group *g;
+ struct mlx5_flow_rule *rule = ERR_PTR(-EINVAL);
+
+ tree_get_node(&ft->node);
+ lock_ref_node(&ft->node);
+ fs_for_each_fg(g, ft)
+ if (compare_match_criteria(g->mask.match_criteria_enable,
+ match_criteria_enable,
+ g->mask.match_criteria,
+ match_criteria)) {
+ unlock_ref_node(&ft->node);
+ rule = add_rule_fg(g, match_value,
+ action, flow_tag, dest);
+ goto put;
+ }
+ unlock_ref_node(&ft->node);
+put:
+ tree_put_node(&ft->node);
+ return rule;
+}
+
+void mlx5_del_flow_rule(struct mlx5_flow_rule *rule)
+{
+ tree_remove_node(&rule->node);
+}
+
+int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
+{
+ if (tree_remove_node(&ft->node))
+ mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
+ ft->id);
+
+ return 0;
+}
+
+void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
+{
+ if (tree_remove_node(&fg->node))
+ mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
+ fg->id);
+}
+
+struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type type)
+{
+ struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns;
+ int prio;
+ static struct fs_prio *fs_prio;
+ struct mlx5_flow_namespace *ns;
+
+ if (!root_ns)
+ return NULL;
+
+ switch (type) {
+ case MLX5_FLOW_NAMESPACE_KERNEL:
+ prio = 0;
+ break;
+ case MLX5_FLOW_NAMESPACE_FDB:
+ if (dev->priv.fdb_root_ns)
+ return &dev->priv.fdb_root_ns->ns;
+ else
+ return NULL;
+ default:
+ return NULL;
+ }
+
+ fs_prio = find_prio(&root_ns->ns, prio);
+ if (!fs_prio)
+ return NULL;
+
+ ns = list_first_entry(&fs_prio->node.children,
+ typeof(*ns),
+ node.list);
+
+ return ns;
+}
+
+static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
+ unsigned prio, int max_ft,
+ int start_level)
+{
+ struct fs_prio *fs_prio;
+
+ fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
+ if (!fs_prio)
+ return ERR_PTR(-ENOMEM);
+
+ fs_prio->node.type = FS_TYPE_PRIO;
+ tree_init_node(&fs_prio->node, 1, NULL);
+ tree_add_node(&fs_prio->node, &ns->node);
+ fs_prio->max_ft = max_ft;
+ fs_prio->prio = prio;
+ fs_prio->start_level = start_level;
+ list_add_tail(&fs_prio->node.list, &ns->node.children);
+
+ return fs_prio;
+}
+
+static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
+ *ns)
+{
+ ns->node.type = FS_TYPE_NAMESPACE;
+
+ return ns;
+}
+
+static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
+{
+ struct mlx5_flow_namespace *ns;
+
+ ns = kzalloc(sizeof(*ns), GFP_KERNEL);
+ if (!ns)
+ return ERR_PTR(-ENOMEM);
+
+ fs_init_namespace(ns);
+ tree_init_node(&ns->node, 1, NULL);
+ tree_add_node(&ns->node, &prio->node);
+ list_add_tail(&ns->node.list, &prio->node.children);
+
+ return ns;
+}
+
+static int init_root_tree_recursive(int max_ft_level, struct init_tree_node *init_node,
+ struct fs_node *fs_parent_node,
+ struct init_tree_node *init_parent_node,
+ int index)
+{
+ struct mlx5_flow_namespace *fs_ns;
+ struct fs_prio *fs_prio;
+ struct fs_node *base;
+ int i;
+ int err;
+
+ if (init_node->type == FS_TYPE_PRIO) {
+ if (init_node->min_ft_level > max_ft_level)
+ return -ENOTSUPP;
+
+ fs_get_obj(fs_ns, fs_parent_node);
+ fs_prio = fs_create_prio(fs_ns, index, init_node->max_ft,
+ init_node->start_level);
+ if (IS_ERR(fs_prio))
+ return PTR_ERR(fs_prio);
+ base = &fs_prio->node;
+ } else if (init_node->type == FS_TYPE_NAMESPACE) {
+ fs_get_obj(fs_prio, fs_parent_node);
+ fs_ns = fs_create_namespace(fs_prio);
+ if (IS_ERR(fs_ns))
+ return PTR_ERR(fs_ns);
+ base = &fs_ns->node;
+ } else {
+ return -EINVAL;
+ }
+ for (i = 0; i < init_node->ar_size; i++) {
+ err = init_root_tree_recursive(max_ft_level,
+ &init_node->children[i], base,
+ init_node, i);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int init_root_tree(int max_ft_level, struct init_tree_node *init_node,
+ struct fs_node *fs_parent_node)
+{
+ int i;
+ struct mlx5_flow_namespace *fs_ns;
+ int err;
+
+ fs_get_obj(fs_ns, fs_parent_node);
+ for (i = 0; i < init_node->ar_size; i++) {
+ err = init_root_tree_recursive(max_ft_level,
+ &init_node->children[i],
+ &fs_ns->node,
+ init_node, i);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_core_dev *dev,
+ enum fs_flow_table_type
+ table_type)
+{
+ struct mlx5_flow_root_namespace *root_ns;
+ struct mlx5_flow_namespace *ns;
+
+ /* Create the root namespace */
+ root_ns = mlx5_vzalloc(sizeof(*root_ns));
+ if (!root_ns)
+ return NULL;
+
+ root_ns->dev = dev;
+ root_ns->table_type = table_type;
+
+ ns = &root_ns->ns;
+ fs_init_namespace(ns);
+ tree_init_node(&ns->node, 1, NULL);
+ tree_add_node(&ns->node, NULL);
+
+ return root_ns;
+}
+
+static int init_root_ns(struct mlx5_core_dev *dev)
+{
+ int max_ft_level = MLX5_CAP_FLOWTABLE(dev,
+ flow_table_properties_nic_receive.
+ max_ft_level);
+
+ dev->priv.root_ns = create_root_ns(dev, FS_FT_NIC_RX);
+ if (IS_ERR_OR_NULL(dev->priv.root_ns))
+ goto cleanup;
+
+ if (init_root_tree(max_ft_level, &root_fs, &dev->priv.root_ns->ns.node))
+ goto cleanup;
+
+ return 0;
+
+cleanup:
+ mlx5_cleanup_fs(dev);
+ return -ENOMEM;
+}
+
+static void cleanup_single_prio_root_ns(struct mlx5_core_dev *dev,
+ struct mlx5_flow_root_namespace *root_ns)
+{
+ struct fs_node *prio;
+
+ if (!root_ns)
+ return;
+
+ if (!list_empty(&root_ns->ns.node.children)) {
+ prio = list_first_entry(&root_ns->ns.node.children,
+ struct fs_node,
+ list);
+ if (tree_remove_node(prio))
+ mlx5_core_warn(dev,
+ "Flow steering priority wasn't destroyed, refcount > 1\n");
+ }
+ if (tree_remove_node(&root_ns->ns.node))
+ mlx5_core_warn(dev,
+ "Flow steering namespace wasn't destroyed, refcount > 1\n");
+ root_ns = NULL;
+}
+
+static void cleanup_root_ns(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns;
+ struct fs_prio *iter_prio;
+
+ if (!MLX5_CAP_GEN(dev, nic_flow_table))
+ return;
+
+ if (!root_ns)
+ return;
+
+ /* stage 1 */
+ fs_for_each_prio(iter_prio, &root_ns->ns) {
+ struct fs_node *node;
+ struct mlx5_flow_namespace *iter_ns;
+
+ fs_for_each_ns_or_ft(node, iter_prio) {
+ if (node->type == FS_TYPE_FLOW_TABLE)
+ continue;
+ fs_get_obj(iter_ns, node);
+ while (!list_empty(&iter_ns->node.children)) {
+ struct fs_prio *obj_iter_prio2;
+ struct fs_node *iter_prio2 =
+ list_first_entry(&iter_ns->node.children,
+ struct fs_node,
+ list);
+
+ fs_get_obj(obj_iter_prio2, iter_prio2);
+ if (tree_remove_node(iter_prio2)) {
+ mlx5_core_warn(dev,
+ "Priority %d wasn't destroyed, refcount > 1\n",
+ obj_iter_prio2->prio);
+ return;
+ }
+ }
+ }
+ }
+
+ /* stage 2 */
+ fs_for_each_prio(iter_prio, &root_ns->ns) {
+ while (!list_empty(&iter_prio->node.children)) {
+ struct fs_node *iter_ns =
+ list_first_entry(&iter_prio->node.children,
+ struct fs_node,
+ list);
+ if (tree_remove_node(iter_ns)) {
+ mlx5_core_warn(dev,
+ "Namespace wasn't destroyed, refcount > 1\n");
+ return;
+ }
+ }
+ }
+
+ /* stage 3 */
+ while (!list_empty(&root_ns->ns.node.children)) {
+ struct fs_prio *obj_prio_node;
+ struct fs_node *prio_node =
+ list_first_entry(&root_ns->ns.node.children,
+ struct fs_node,
+ list);
+
+ fs_get_obj(obj_prio_node, prio_node);
+ if (tree_remove_node(prio_node)) {
+ mlx5_core_warn(dev,
+ "Priority %d wasn't destroyed, refcount > 1\n",
+ obj_prio_node->prio);
+ return;
+ }
+ }
+
+ if (tree_remove_node(&root_ns->ns.node)) {
+ mlx5_core_warn(dev,
+ "root namespace wasn't destroyed, refcount > 1\n");
+ return;
+ }
+
+ dev->priv.root_ns = NULL;
+}
+
+void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
+{
+ cleanup_root_ns(dev);
+ cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns);
+}
+
+static int init_fdb_root_ns(struct mlx5_core_dev *dev)
+{
+ struct fs_prio *prio;
+
+ dev->priv.fdb_root_ns = create_root_ns(dev, FS_FT_FDB);
+ if (!dev->priv.fdb_root_ns)
+ return -ENOMEM;
+
+ /* Create single prio */
+ prio = fs_create_prio(&dev->priv.fdb_root_ns->ns, 0, 1, 0);
+ if (IS_ERR(prio)) {
+ cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns);
+ return PTR_ERR(prio);
+ } else {
+ return 0;
+ }
+}
+
+int mlx5_init_fs(struct mlx5_core_dev *dev)
+{
+ int err = 0;
+
+ if (MLX5_CAP_GEN(dev, nic_flow_table)) {
+ err = init_root_ns(dev);
+ if (err)
+ return err;
+ }
+ if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
+ err = init_fdb_root_ns(dev);
+ if (err)
+ cleanup_root_ns(dev);
+ }
+
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
new file mode 100644
index 000000000000..4ebb97fd5544
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _MLX5_FS_CORE_
+#define _MLX5_FS_CORE_
+
+#include <linux/mlx5/fs.h>
+
+enum fs_node_type {
+ FS_TYPE_NAMESPACE,
+ FS_TYPE_PRIO,
+ FS_TYPE_FLOW_TABLE,
+ FS_TYPE_FLOW_GROUP,
+ FS_TYPE_FLOW_ENTRY,
+ FS_TYPE_FLOW_DEST
+};
+
+enum fs_flow_table_type {
+ FS_FT_NIC_RX = 0x0,
+ FS_FT_FDB = 0X4,
+};
+
+enum fs_fte_status {
+ FS_FTE_STATUS_EXISTING = 1UL << 0,
+};
+
+struct fs_node {
+ struct list_head list;
+ struct list_head children;
+ enum fs_node_type type;
+ struct fs_node *parent;
+ struct fs_node *root;
+ /* lock the node for writing and traversing */
+ struct mutex lock;
+ atomic_t refcount;
+ void (*remove_func)(struct fs_node *);
+};
+
+struct mlx5_flow_rule {
+ struct fs_node node;
+ struct mlx5_flow_destination dest_attr;
+};
+
+/* Type of children is mlx5_flow_group */
+struct mlx5_flow_table {
+ struct fs_node node;
+ u32 id;
+ unsigned int max_fte;
+ unsigned int level;
+ enum fs_flow_table_type type;
+};
+
+/* Type of children is mlx5_flow_rule */
+struct fs_fte {
+ struct fs_node node;
+ u32 val[MLX5_ST_SZ_DW(fte_match_param)];
+ u32 dests_size;
+ u32 flow_tag;
+ u32 index;
+ u32 action;
+ enum fs_fte_status status;
+};
+
+/* Type of children is mlx5_flow_table/namespace */
+struct fs_prio {
+ struct fs_node node;
+ unsigned int max_ft;
+ unsigned int start_level;
+ unsigned int prio;
+ unsigned int num_ft;
+};
+
+/* Type of children is fs_prio */
+struct mlx5_flow_namespace {
+ /* parent == NULL => root ns */
+ struct fs_node node;
+};
+
+struct mlx5_flow_group_mask {
+ u8 match_criteria_enable;
+ u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
+};
+
+/* Type of children is fs_fte */
+struct mlx5_flow_group {
+ struct fs_node node;
+ struct mlx5_flow_group_mask mask;
+ u32 start_index;
+ u32 max_ftes;
+ u32 num_ftes;
+ u32 id;
+};
+
+struct mlx5_flow_root_namespace {
+ struct mlx5_flow_namespace ns;
+ enum fs_flow_table_type table_type;
+ struct mlx5_core_dev *dev;
+};
+
+int mlx5_init_fs(struct mlx5_core_dev *dev);
+void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
+
+#define fs_get_obj(v, _node) {v = container_of((_node), typeof(*v), node); }
+
+#define fs_list_for_each_entry(pos, root) \
+ list_for_each_entry(pos, root, node.list)
+
+#define fs_for_each_ns_or_ft_reverse(pos, prio) \
+ list_for_each_entry_reverse(pos, &(prio)->node.children, list)
+
+#define fs_for_each_ns_or_ft(pos, prio) \
+ list_for_each_entry(pos, (&(prio)->node.children), list)
+
+#define fs_for_each_prio(pos, ns) \
+ fs_list_for_each_entry(pos, &(ns)->node.children)
+
+#define fs_for_each_fg(pos, ft) \
+ fs_list_for_each_entry(pos, &(ft)->node.children)
+
+#define fs_for_each_fte(pos, fg) \
+ fs_list_for_each_entry(pos, &(fg)->node.children)
+
+#define fs_for_each_dst(pos, fte) \
+ fs_list_for_each_entry(pos, &(fte)->node.children)
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 1c9f9a54a873..aa1ab4702385 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -173,7 +173,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
- if (MLX5_CAP_GEN(dev, vport_group_manager)) {
+ if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH,
HCA_CAP_OPMOD_GET_CUR);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c6de3240f76f..789882b7b711 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -49,6 +49,7 @@
#include <linux/delay.h>
#include <linux/mlx5/mlx5_ifc.h>
#include "mlx5_core.h"
+#include "fs_core.h"
#ifdef CONFIG_MLX5_CORE_EN
#include "eswitch.h"
#endif
@@ -1055,6 +1056,11 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
mlx5_init_srq_table(dev);
mlx5_init_mr_table(dev);
+ err = mlx5_init_fs(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init flow steering\n");
+ goto err_fs;
+ }
#ifdef CONFIG_MLX5_CORE_EN
err = mlx5_eswitch_init(dev);
if (err) {
@@ -1093,6 +1099,8 @@ err_sriov:
mlx5_eswitch_cleanup(dev->priv.eswitch);
#endif
err_reg_dev:
+ mlx5_cleanup_fs(dev);
+err_fs:
mlx5_cleanup_mr_table(dev);
mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev);
@@ -1165,6 +1173,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
mlx5_eswitch_cleanup(dev->priv.eswitch);
#endif
+ mlx5_cleanup_fs(dev);
mlx5_cleanup_mr_table(dev);
mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index bee7da822dfe..ea6a137fd76c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -65,6 +65,9 @@ do { \
(__dev)->priv.name, __func__, __LINE__, current->pid, \
##__VA_ARGS__)
+#define mlx5_core_info(__dev, format, ...) \
+ dev_info(&(__dev)->pdev->dev, format, ##__VA_ARGS__)
+
enum {
MLX5_CMD_DATA, /* print command payload only */
MLX5_CMD_TIME, /* print command execution time */
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index ac098b6b97bf..2fd7019f69db 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -502,6 +502,8 @@ struct mlx5_priv {
struct mlx5_eswitch *eswitch;
struct mlx5_core_sriov sriov;
unsigned long pci_dev_data;
+ struct mlx5_flow_root_namespace *root_ns;
+ struct mlx5_flow_root_namespace *fdb_root_ns;
};
enum mlx5_device_state {
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
new file mode 100644
index 000000000000..bc7ad019afde
--- /dev/null
+++ b/include/linux/mlx5/fs.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _MLX5_FS_
+#define _MLX5_FS_
+
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/mlx5_ifc.h>
+
+#define MLX5_FS_DEFAULT_FLOW_TAG 0x0
+
+enum mlx5_flow_namespace_type {
+ MLX5_FLOW_NAMESPACE_KERNEL,
+ MLX5_FLOW_NAMESPACE_FDB,
+};
+
+struct mlx5_flow_table;
+struct mlx5_flow_group;
+struct mlx5_flow_rule;
+struct mlx5_flow_namespace;
+
+struct mlx5_flow_destination {
+ enum mlx5_flow_destination_type type;
+ union {
+ u32 tir_num;
+ struct mlx5_flow_table *ft;
+ u32 vport_num;
+ };
+};
+
+struct mlx5_flow_namespace *
+mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type type);
+
+struct mlx5_flow_table *
+mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
+ int prio,
+ int num_flow_table_entries);
+int mlx5_destroy_flow_table(struct mlx5_flow_table *ft);
+
+/* inbox should be set with the following values:
+ * start_flow_index
+ * end_flow_index
+ * match_criteria_enable
+ * match_criteria
+ */
+struct mlx5_flow_group *
+mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in);
+void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
+
+/* Single destination per rule.
+ * Group ID is implied by the match criteria.
+ */
+struct mlx5_flow_rule *
+mlx5_add_flow_rule(struct mlx5_flow_table *ft,
+ u8 match_criteria_enable,
+ u32 *match_criteria,
+ u32 *match_value,
+ u32 action,
+ u32 flow_tag,
+ struct mlx5_flow_destination *dest);
+void mlx5_del_flow_rule(struct mlx5_flow_rule *fr);
+
+#endif
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index f5d94495758a..131a2737cfa3 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -256,25 +256,27 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
struct mlx5_ifc_flow_table_prop_layout_bits {
u8 ft_support[0x1];
- u8 reserved_0[0x1f];
+ u8 reserved_0[0x2];
+ u8 flow_modify_en[0x1];
+ u8 reserved_1[0x1c];
- u8 reserved_1[0x2];
+ u8 reserved_2[0x2];
u8 log_max_ft_size[0x6];
- u8 reserved_2[0x10];
+ u8 reserved_3[0x10];
u8 max_ft_level[0x8];
- u8 reserved_3[0x20];
+ u8 reserved_4[0x20];
- u8 reserved_4[0x18];
+ u8 reserved_5[0x18];
u8 log_max_ft_num[0x8];
- u8 reserved_5[0x18];
+ u8 reserved_6[0x18];
u8 log_max_destination[0x8];
- u8 reserved_6[0x18];
+ u8 reserved_7[0x18];
u8 log_max_flow[0x8];
- u8 reserved_7[0x40];
+ u8 reserved_8[0x40];
struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support;
@@ -2843,6 +2845,13 @@ struct mlx5_ifc_set_hca_cap_in_bits {
union mlx5_ifc_hca_cap_union_bits capability;
};
+enum {
+ MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION = 0x0,
+ MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_TAG = 0x1,
+ MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST = 0x2,
+ MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS = 0x3
+};
+
struct mlx5_ifc_set_fte_out_bits {
u8 status[0x8];
u8 reserved_0[0x18];
@@ -2867,11 +2876,14 @@ struct mlx5_ifc_set_fte_in_bits {
u8 reserved_4[0x8];
u8 table_id[0x18];
- u8 reserved_5[0x40];
+ u8 reserved_5[0x18];
+ u8 modify_enable_mask[0x8];
+
+ u8 reserved_6[0x20];
u8 flow_index[0x20];
- u8 reserved_6[0xe0];
+ u8 reserved_7[0xe0];
struct mlx5_ifc_flow_context_bits flow_context;
};