diff options
27 files changed, 1563 insertions, 149 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 9ea867a45764..5dadc2fce7ee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -17,7 +17,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ fs_counters.o fs_ft_pool.o rl.o lag/debugfs.o lag/lag.o dev.o events.o wq.o lib/gid.o \ lib/devcom.o lib/pci_vsc.o lib/dm.o lib/fs_ttc.o diag/fs_tracepoint.o \ diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o \ - fw_reset.o qos.o lib/tout.o + fw_reset.o qos.o lib/tout.o lib/aso.o # # Netdev basic @@ -45,7 +45,8 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \ esw/indir_table.o en/tc_tun_encap.o \ en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \ en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o \ - en/tc/post_act.o en/tc/int_port.o + en/tc/post_act.o en/tc/int_port.o en/tc/meter.o \ + en/tc/post_meter.o mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en/tc/act/act.o en/tc/act/drop.o en/tc/act/trap.o \ en/tc/act/accept.o en/tc/act/mark.o en/tc/act/goto.o \ @@ -53,7 +54,7 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en/tc/act/act.o en/tc/act/drop.o en/tc/a en/tc/act/vlan.o en/tc/act/vlan_mangle.o en/tc/act/mpls.o \ en/tc/act/mirred.o en/tc/act/mirred_nic.o \ en/tc/act/ct.o en/tc/act/sample.o en/tc/act/ptype.o \ - en/tc/act/redirect_ingress.o + en/tc/act/redirect_ingress.o en/tc/act/police.o ifneq ($(CONFIG_MLX5_TC_CT),) mlx5_core-y += en/tc_ct.o en/tc/ct_fs_dmfs.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c index 2755c25ba324..305fde62a78d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c @@ -30,7 +30,7 @@ static struct mlx5e_tc_act *tc_acts_fdb[NUM_FLOW_ACTIONS] = { NULL, /* FLOW_ACTION_WAKE, */ NULL, /* FLOW_ACTION_QUEUE, */ &mlx5e_tc_act_sample, - NULL, /* FLOW_ACTION_POLICE, */ + &mlx5e_tc_act_police, &mlx5e_tc_act_ct, NULL, /* FLOW_ACTION_CT_METADATA, */ &mlx5e_tc_act_mpls_push, @@ -106,8 +106,8 @@ mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state, { memset(parse_state, 0, sizeof(*parse_state)); parse_state->flow = flow; - parse_state->num_actions = flow_action->num_entries; parse_state->extack = extack; + parse_state->flow_action = flow_action; } void diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h index f34714c5ddd4..095ff8ef80e2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h @@ -13,7 +13,7 @@ struct mlx5_flow_attr; struct mlx5e_tc_act_parse_state { - unsigned int num_actions; + struct flow_action *flow_action; struct mlx5e_tc_flow *flow; struct netlink_ext_ack *extack; u32 actions; @@ -76,6 +76,7 @@ extern struct mlx5e_tc_act mlx5e_tc_act_ct; extern struct mlx5e_tc_act mlx5e_tc_act_sample; extern struct mlx5e_tc_act mlx5e_tc_act_ptype; extern struct mlx5e_tc_act mlx5e_tc_act_redirect_ingress; +extern struct mlx5e_tc_act mlx5e_tc_act_police; struct mlx5e_tc_act * mlx5e_tc_act_get(enum flow_action_id act_id, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c new file mode 100644 index 000000000000..ab32fe6a2e57 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "act.h" +#include "en/tc_priv.h" + +static bool +tc_act_can_offload_police(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index, + struct mlx5_flow_attr *attr) +{ + if (mlx5e_policer_validate(parse_state->flow_action, act, + parse_state->extack)) + return false; + + return !!mlx5e_get_flow_meters(parse_state->flow->priv->mdev); +} + +static int +tc_act_parse_police(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + struct mlx5e_flow_meter_params *params; + + params = &attr->meter_attr.params; + params->index = act->hw_index; + if (act->police.rate_bytes_ps) { + params->mode = MLX5_RATE_LIMIT_BPS; + /* change rate to bits per second */ + params->rate = act->police.rate_bytes_ps << 3; + params->burst = act->police.burst; + } else if (act->police.rate_pkt_ps) { + params->mode = MLX5_RATE_LIMIT_PPS; + params->rate = act->police.rate_pkt_ps; + params->burst = act->police.burst_pkt; + } else { + return -EOPNOTSUPP; + } + + attr->action |= MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO; + attr->exe_aso_type = MLX5_EXE_ASO_FLOW_METER; + + return 0; +} + +static bool +tc_act_is_multi_table_act_police(struct mlx5e_priv *priv, + const struct flow_action_entry *act, + struct mlx5_flow_attr *attr) +{ + return true; +} + +struct mlx5e_tc_act mlx5e_tc_act_police = { + .can_offload = tc_act_can_offload_police, + .parse_action = tc_act_parse_police, + .is_multi_table_act = tc_act_is_multi_table_act_police, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c index a7d9eab19e4a..53b270f652b9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c @@ -12,7 +12,7 @@ tc_act_can_offload_trap(struct mlx5e_tc_act_parse_state *parse_state, { struct netlink_ext_ack *extack = parse_state->extack; - if (parse_state->num_actions != 1) { + if (parse_state->flow_action->num_entries != 1) { NL_SET_ERR_MSG_MOD(extack, "action trap is supported as a sole action only"); return false; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c new file mode 100644 index 000000000000..28962b2134c7 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c @@ -0,0 +1,473 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "lib/aso.h" +#include "en/tc/post_act.h" +#include "meter.h" +#include "en/tc_priv.h" +#include "post_meter.h" + +#define MLX5_START_COLOR_SHIFT 28 +#define MLX5_METER_MODE_SHIFT 24 +#define MLX5_CBS_EXP_SHIFT 24 +#define MLX5_CBS_MAN_SHIFT 16 +#define MLX5_CIR_EXP_SHIFT 8 + +/* cir = 8*(10^9)*cir_mantissa/(2^cir_exponent)) bits/s */ +#define MLX5_CONST_CIR 8000000000ULL +#define MLX5_CALC_CIR(m, e) ((MLX5_CONST_CIR * (m)) >> (e)) +#define MLX5_MAX_CIR ((MLX5_CONST_CIR * 0x100) - 1) + +/* cbs = cbs_mantissa*2^cbs_exponent */ +#define MLX5_CALC_CBS(m, e) ((m) << (e)) +#define MLX5_MAX_CBS ((0x100ULL << 0x1F) - 1) +#define MLX5_MAX_HW_CBS 0x7FFFFFFF + +struct mlx5e_flow_meter_aso_obj { + struct list_head entry; + int base_id; + int total_meters; + + unsigned long meters_map[0]; /* must be at the end of this struct */ +}; + +struct mlx5e_flow_meters { + enum mlx5_flow_namespace_type ns_type; + struct mlx5_aso *aso; + struct mutex aso_lock; /* Protects aso operations */ + int log_granularity; + u32 pdn; + + DECLARE_HASHTABLE(hashtbl, 8); + + struct mutex sync_lock; /* protect flow meter operations */ + struct list_head partial_list; + struct list_head full_list; + + struct mlx5_core_dev *mdev; + struct mlx5e_post_act *post_act; + + struct mlx5e_post_meter_priv *post_meter; +}; + +static void +mlx5e_flow_meter_cir_calc(u64 cir, u8 *man, u8 *exp) +{ + s64 _cir, _delta, delta = S64_MAX; + u8 e, _man = 0, _exp = 0; + u64 m; + + for (e = 0; e <= 0x1F; e++) { /* exp width 5bit */ + m = cir << e; + if ((s64)m < 0) /* overflow */ + break; + m /= MLX5_CONST_CIR; + if (m > 0xFF) /* man width 8 bit */ + continue; + _cir = MLX5_CALC_CIR(m, e); + _delta = cir - _cir; + if (_delta < delta) { + _man = m; + _exp = e; + if (!_delta) + goto found; + delta = _delta; + } + } + +found: + *man = _man; + *exp = _exp; +} + +static void +mlx5e_flow_meter_cbs_calc(u64 cbs, u8 *man, u8 *exp) +{ + s64 _cbs, _delta, delta = S64_MAX; + u8 e, _man = 0, _exp = 0; + u64 m; + + for (e = 0; e <= 0x1F; e++) { /* exp width 5bit */ + m = cbs >> e; + if (m > 0xFF) /* man width 8 bit */ + continue; + _cbs = MLX5_CALC_CBS(m, e); + _delta = cbs - _cbs; + if (_delta < delta) { + _man = m; + _exp = e; + if (!_delta) + goto found; + delta = _delta; + } + } + +found: + *man = _man; + *exp = _exp; +} + +int +mlx5e_tc_meter_modify(struct mlx5_core_dev *mdev, + struct mlx5e_flow_meter_handle *meter, + struct mlx5e_flow_meter_params *meter_params) +{ + struct mlx5_wqe_aso_ctrl_seg *aso_ctrl; + struct mlx5_wqe_aso_data_seg *aso_data; + struct mlx5e_flow_meters *flow_meters; + u8 cir_man, cir_exp, cbs_man, cbs_exp; + struct mlx5_aso_wqe *aso_wqe; + struct mlx5_aso *aso; + u64 rate, burst; + u8 ds_cnt; + int err; + + rate = meter_params->rate; + burst = meter_params->burst; + + /* HW treats each packet as 128 bytes in PPS mode */ + if (meter_params->mode == MLX5_RATE_LIMIT_PPS) { + rate <<= 10; + burst <<= 7; + } + + if (!rate || rate > MLX5_MAX_CIR || !burst || burst > MLX5_MAX_CBS) + return -EINVAL; + + /* HW has limitation of total 31 bits for cbs */ + if (burst > MLX5_MAX_HW_CBS) { + mlx5_core_warn(mdev, + "burst(%lld) is too large, use HW allowed value(%d)\n", + burst, MLX5_MAX_HW_CBS); + burst = MLX5_MAX_HW_CBS; + } + + mlx5_core_dbg(mdev, "meter mode=%d\n", meter_params->mode); + mlx5e_flow_meter_cir_calc(rate, &cir_man, &cir_exp); + mlx5_core_dbg(mdev, "rate=%lld, cir=%lld, exp=%d, man=%d\n", + rate, MLX5_CALC_CIR(cir_man, cir_exp), cir_exp, cir_man); + mlx5e_flow_meter_cbs_calc(burst, &cbs_man, &cbs_exp); + mlx5_core_dbg(mdev, "burst=%lld, cbs=%lld, exp=%d, man=%d\n", + burst, MLX5_CALC_CBS((u64)cbs_man, cbs_exp), cbs_exp, cbs_man); + + if (!cir_man || !cbs_man) + return -EINVAL; + + flow_meters = meter->flow_meters; + aso = flow_meters->aso; + + mutex_lock(&flow_meters->aso_lock); + aso_wqe = mlx5_aso_get_wqe(aso); + ds_cnt = DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe_data), MLX5_SEND_WQE_DS); + mlx5_aso_build_wqe(aso, ds_cnt, aso_wqe, meter->obj_id, + MLX5_ACCESS_ASO_OPC_MOD_FLOW_METER); + + aso_ctrl = &aso_wqe->aso_ctrl; + memset(aso_ctrl, 0, sizeof(*aso_ctrl)); + aso_ctrl->data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BYTEWISE_64BYTE << 6; + aso_ctrl->condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE | + MLX5_ASO_ALWAYS_TRUE << 4; + aso_ctrl->data_offset_condition_operand = MLX5_ASO_LOGICAL_OR << 6; + aso_ctrl->data_mask = cpu_to_be64(0x80FFFFFFULL << (meter->idx ? 0 : 32)); + + aso_data = (struct mlx5_wqe_aso_data_seg *)(aso_wqe + 1); + memset(aso_data, 0, sizeof(*aso_data)); + aso_data->bytewise_data[meter->idx * 8] = cpu_to_be32((0x1 << 31) | /* valid */ + (MLX5_FLOW_METER_COLOR_GREEN << MLX5_START_COLOR_SHIFT)); + if (meter_params->mode == MLX5_RATE_LIMIT_PPS) + aso_data->bytewise_data[meter->idx * 8] |= + cpu_to_be32(MLX5_FLOW_METER_MODE_NUM_PACKETS << MLX5_METER_MODE_SHIFT); + else + aso_data->bytewise_data[meter->idx * 8] |= + cpu_to_be32(MLX5_FLOW_METER_MODE_BYTES_IP_LENGTH << MLX5_METER_MODE_SHIFT); + + aso_data->bytewise_data[meter->idx * 8 + 2] = cpu_to_be32((cbs_exp << MLX5_CBS_EXP_SHIFT) | + (cbs_man << MLX5_CBS_MAN_SHIFT) | + (cir_exp << MLX5_CIR_EXP_SHIFT) | + cir_man); + + mlx5_aso_post_wqe(aso, true, &aso_wqe->ctrl); + + /* With newer FW, the wait for the first ASO WQE is more than 2us, put the wait 10ms. */ + err = mlx5_aso_poll_cq(aso, true, 10); + mutex_unlock(&flow_meters->aso_lock); + + return err; +} + +static int +mlx5e_flow_meter_create_aso_obj(struct mlx5e_flow_meters *flow_meters, int *obj_id) +{ + u32 in[MLX5_ST_SZ_DW(create_flow_meter_aso_obj_in)] = {}; + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; + struct mlx5_core_dev *mdev = flow_meters->mdev; + void *obj; + int err; + + MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, + MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO); + MLX5_SET(general_obj_in_cmd_hdr, in, log_obj_range, flow_meters->log_granularity); + + obj = MLX5_ADDR_OF(create_flow_meter_aso_obj_in, in, flow_meter_aso_obj); + MLX5_SET(flow_meter_aso_obj, obj, meter_aso_access_pd, flow_meters->pdn); + + err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (!err) { + *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); + mlx5_core_dbg(mdev, "flow meter aso obj(0x%x) created\n", *obj_id); + } + + return err; +} + +static void +mlx5e_flow_meter_destroy_aso_obj(struct mlx5_core_dev *mdev, u32 obj_id) +{ + u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {}; + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; + + MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, + MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id); + + mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + mlx5_core_dbg(mdev, "flow meter aso obj(0x%x) destroyed\n", obj_id); +} + +static struct mlx5e_flow_meter_handle * +__mlx5e_flow_meter_alloc(struct mlx5e_flow_meters *flow_meters) +{ + struct mlx5_core_dev *mdev = flow_meters->mdev; + struct mlx5e_flow_meter_aso_obj *meters_obj; + struct mlx5e_flow_meter_handle *meter; + int err, pos, total; + u32 id; + + meter = kzalloc(sizeof(*meter), GFP_KERNEL); + if (!meter) + return ERR_PTR(-ENOMEM); + + meters_obj = list_first_entry_or_null(&flow_meters->partial_list, + struct mlx5e_flow_meter_aso_obj, + entry); + /* 2 meters in one object */ + total = 1 << (flow_meters->log_granularity + 1); + if (!meters_obj) { + err = mlx5e_flow_meter_create_aso_obj(flow_meters, &id); + if (err) { + mlx5_core_err(mdev, "Failed to create flow meter ASO object\n"); + goto err_create; + } + + meters_obj = kzalloc(sizeof(*meters_obj) + BITS_TO_BYTES(total), + GFP_KERNEL); + if (!meters_obj) { + err = -ENOMEM; + goto err_mem; + } + + meters_obj->base_id = id; + meters_obj->total_meters = total; + list_add(&meters_obj->entry, &flow_meters->partial_list); + pos = 0; + } else { + pos = find_first_zero_bit(meters_obj->meters_map, total); + if (bitmap_weight(meters_obj->meters_map, total) == total - 1) { + list_del(&meters_obj->entry); + list_add(&meters_obj->entry, &flow_meters->full_list); + } + } + + bitmap_set(meters_obj->meters_map, pos, 1); + meter->flow_meters = flow_meters; + meter->meters_obj = meters_obj; + meter->obj_id = meters_obj->base_id + pos / 2; + meter->idx = pos % 2; + + mlx5_core_dbg(mdev, "flow meter allocated, obj_id=0x%x, index=%d\n", + meter->obj_id, meter->idx); + + return meter; + +err_mem: + mlx5e_flow_meter_destroy_aso_obj(mdev, id); +err_create: + kfree(meter); + return ERR_PTR(err); +} + +static void +__mlx5e_flow_meter_free(struct mlx5e_flow_meter_handle *meter) +{ + struct mlx5e_flow_meters *flow_meters = meter->flow_meters; + struct mlx5_core_dev *mdev = flow_meters->mdev; + struct mlx5e_flow_meter_aso_obj *meters_obj; + int n, pos; + + meters_obj = meter->meters_obj; + pos = (meter->obj_id - meters_obj->base_id) * 2 + meter->idx; + bitmap_clear(meters_obj->meters_map, pos, 1); + n = bitmap_weight(meters_obj->meters_map, meters_obj->total_meters); + if (n == 0) { + list_del(&meters_obj->entry); + mlx5e_flow_meter_destroy_aso_obj(mdev, meters_obj->base_id); + kfree(meters_obj); + } else if (n == meters_obj->total_meters - 1) { + list_del(&meters_obj->entry); + list_add(&meters_obj->entry, &flow_meters->partial_list); + } + + mlx5_core_dbg(mdev, "flow meter freed, obj_id=0x%x, index=%d\n", + meter->obj_id, meter->idx); + kfree(meter); +} + +struct mlx5e_flow_meter_handle * +mlx5e_tc_meter_get(struct mlx5_core_dev *mdev, struct mlx5e_flow_meter_params *params) +{ + struct mlx5e_flow_meters *flow_meters; + struct mlx5e_flow_meter_handle *meter; + int err; + + flow_meters = mlx5e_get_flow_meters(mdev); + if (!flow_meters) + return ERR_PTR(-EOPNOTSUPP); + + mutex_lock(&flow_meters->sync_lock); + hash_for_each_possible(flow_meters->hashtbl, meter, hlist, params->index) + if (meter->params.index == params->index) + goto add_ref; + + meter = __mlx5e_flow_meter_alloc(flow_meters); + if (IS_ERR(meter)) { + err = PTR_ERR(meter); + goto err_alloc; + } + + hash_add(flow_meters->hashtbl, &meter->hlist, params->index); + meter->params.index = params->index; + +add_ref: + meter->refcnt++; + + if (meter->params.mode != params->mode || meter->params.rate != params->rate || + meter->params.burst != params->burst) { + err = mlx5e_tc_meter_modify(mdev, meter, params); + if (err) + goto err_update; + + meter->params.mode = params->mode; + meter->params.rate = params->rate; + meter->params.burst = params->burst; + } + + mutex_unlock(&flow_meters->sync_lock); + return meter; + +err_update: + if (--meter->refcnt == 0) { + hash_del(&meter->hlist); + __mlx5e_flow_meter_free(meter); + } +err_alloc: + mutex_unlock(&flow_meters->sync_lock); + return ERR_PTR(err); +} + +void +mlx5e_tc_meter_put(struct mlx5e_flow_meter_handle *meter) +{ + struct mlx5e_flow_meters *flow_meters = meter->flow_meters; + + mutex_lock(&flow_meters->sync_lock); + if (--meter->refcnt == 0) { + hash_del(&meter->hlist); + __mlx5e_flow_meter_free(meter); + } + mutex_unlock(&flow_meters->sync_lock); +} + +struct mlx5_flow_table * +mlx5e_tc_meter_get_post_meter_ft(struct mlx5e_flow_meters *flow_meters) +{ + return mlx5e_post_meter_get_ft(flow_meters->post_meter); +} + +struct mlx5e_flow_meters * +mlx5e_flow_meters_init(struct mlx5e_priv *priv, + enum mlx5_flow_namespace_type ns_type, + struct mlx5e_post_act *post_act) +{ + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_flow_meters *flow_meters; + int err; + + if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) & + MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_FLOW_METER_ASO)) + return ERR_PTR(-EOPNOTSUPP); + + if (IS_ERR_OR_NULL(post_act)) { + netdev_dbg(priv->netdev, + "flow meter offload is not supported, post action is missing\n"); + return ERR_PTR(-EOPNOTSUPP); + } + + flow_meters = kzalloc(sizeof(*flow_meters), GFP_KERNEL); + if (!flow_meters) + return ERR_PTR(-ENOMEM); + + err = mlx5_core_alloc_pd(mdev, &flow_meters->pdn); + if (err) { + mlx5_core_err(mdev, "Failed to alloc pd for flow meter aso, err=%d\n", err); + goto err_out; + } + + flow_meters->aso = mlx5_aso_create(mdev, flow_meters->pdn); + if (IS_ERR(flow_meters->aso)) { + mlx5_core_warn(mdev, "Failed to create aso wqe for flow meter\n"); + err = PTR_ERR(flow_meters->aso); + goto err_sq; + } + + flow_meters->post_meter = mlx5e_post_meter_init(priv, ns_type, post_act); + if (IS_ERR(flow_meters->post_meter)) { + err = PTR_ERR(flow_meters->post_meter); + goto err_post_meter; + } + + mutex_init(&flow_meters->sync_lock); + INIT_LIST_HEAD(&flow_meters->partial_list); + INIT_LIST_HEAD(&flow_meters->full_list); + + flow_meters->ns_type = ns_type; + flow_meters->mdev = mdev; + flow_meters->post_act = post_act; + mutex_init(&flow_meters->aso_lock); + flow_meters->log_granularity = min_t(int, 6, + MLX5_CAP_QOS(mdev, log_meter_aso_max_alloc)); + + return flow_meters; + +err_post_meter: + mlx5_aso_destroy(flow_meters->aso); +err_sq: + mlx5_core_dealloc_pd(mdev, flow_meters->pdn); +err_out: + kfree(flow_meters); + return ERR_PTR(err); +} + +void +mlx5e_flow_meters_cleanup(struct mlx5e_flow_meters *flow_meters) +{ + if (IS_ERR_OR_NULL(flow_meters)) + return; + + mlx5e_post_meter_cleanup(flow_meters->post_meter); + mlx5_aso_destroy(flow_meters->aso); + mlx5_core_dealloc_pd(flow_meters->mdev, flow_meters->pdn); + + kfree(flow_meters); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h new file mode 100644 index 000000000000..78885db5dc7d --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#ifndef __MLX5_EN_FLOW_METER_H__ +#define __MLX5_EN_FLOW_METER_H__ + +struct mlx5e_flow_meter_aso_obj; +struct mlx5e_flow_meters; +struct mlx5_flow_attr; + +enum mlx5e_flow_meter_mode { + MLX5_RATE_LIMIT_BPS, + MLX5_RATE_LIMIT_PPS, +}; + +struct mlx5e_flow_meter_params { + enum mlx5e_flow_meter_mode mode; + /* police action index */ + u32 index; + u64 rate; + u64 burst; +}; + +struct mlx5e_flow_meter_handle { + struct mlx5e_flow_meters *flow_meters; + struct mlx5e_flow_meter_aso_obj *meters_obj; + u32 obj_id; + u8 idx; + + int refcnt; + struct hlist_node hlist; + struct mlx5e_flow_meter_params params; +}; + +struct mlx5e_meter_attr { + struct mlx5e_flow_meter_params params; + struct mlx5e_flow_meter_handle *meter; +}; + +int +mlx5e_tc_meter_modify(struct mlx5_core_dev *mdev, + struct mlx5e_flow_meter_handle *meter, + struct mlx5e_flow_meter_params *meter_params); + +struct mlx5e_flow_meter_handle * +mlx5e_tc_meter_get(struct mlx5_core_dev *mdev, struct mlx5e_flow_meter_params *params); +void +mlx5e_tc_meter_put(struct mlx5e_flow_meter_handle *meter); + +struct mlx5_flow_table * +mlx5e_tc_meter_get_post_meter_ft(struct mlx5e_flow_meters *flow_meters); + +struct mlx5e_flow_meters * +mlx5e_flow_meters_init(struct mlx5e_priv *priv, + enum mlx5_flow_namespace_type ns_type, + struct mlx5e_post_act *post_action); +void +mlx5e_flow_meters_cleanup(struct mlx5e_flow_meters *flow_meters); + +#endif /* __MLX5_EN_FLOW_METER_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c index dea137dd744b..2093cc2b0d48 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c @@ -22,9 +22,9 @@ struct mlx5e_post_act_handle { u32 id; }; -#define MLX5_POST_ACTION_BITS (mlx5e_tc_attr_to_reg_mappings[FTEID_TO_REG].mlen) -#define MLX5_POST_ACTION_MAX GENMASK(MLX5_POST_ACTION_BITS - 1, 0) -#define MLX5_POST_ACTION_MASK MLX5_POST_ACTION_MAX +#define MLX5_POST_ACTION_BITS MLX5_REG_MAPPING_MBITS(FTEID_TO_REG) +#define MLX5_POST_ACTION_MASK MLX5_REG_MAPPING_MASK(FTEID_TO_REG) +#define MLX5_POST_ACTION_MAX MLX5_POST_ACTION_MASK struct mlx5e_post_act * mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c new file mode 100644 index 000000000000..efa20356764e --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c @@ -0,0 +1,198 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "en/tc_priv.h" +#include "post_meter.h" +#include "en/tc/post_act.h" + +#define MLX5_PACKET_COLOR_BITS MLX5_REG_MAPPING_MBITS(PACKET_COLOR_TO_REG) +#define MLX5_PACKET_COLOR_MASK MLX5_REG_MAPPING_MASK(PACKET_COLOR_TO_REG) + +struct mlx5e_post_meter_priv { + struct mlx5_flow_table *ft; + struct mlx5_flow_group *fg; + struct mlx5_flow_handle *fwd_green_rule; + struct mlx5_flow_handle *drop_red_rule; +}; + +struct mlx5_flow_table * +mlx5e_post_meter_get_ft(struct mlx5e_post_meter_priv *post_meter) +{ + return post_meter->ft; +} + +static int +mlx5e_post_meter_table_create(struct mlx5e_priv *priv, + enum mlx5_flow_namespace_type ns_type, + struct mlx5e_post_meter_priv *post_meter) +{ + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_namespace *root_ns; + + root_ns = mlx5_get_flow_namespace(priv->mdev, ns_type); + if (!root_ns) { + mlx5_core_warn(priv->mdev, "Failed to get namespace for flow meter\n"); + return -EOPNOTSUPP; + } + + ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED; + ft_attr.prio = FDB_SLOW_PATH; + ft_attr.max_fte = 2; + ft_attr.level = 1; + + post_meter->ft = mlx5_create_flow_table(root_ns, &ft_attr); + if (IS_ERR(post_meter->ft)) { + mlx5_core_warn(priv->mdev, "Failed to create post_meter table\n"); + return PTR_ERR(post_meter->ft); + } + + return 0; +} + +static int +mlx5e_post_meter_fg_create(struct mlx5e_priv *priv, + struct mlx5e_post_meter_priv *post_meter) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + void *misc2, *match_criteria; + u32 *flow_group_in; + int err = 0; + + flow_group_in = kvzalloc(inlen, GFP_KERNEL); + if (!flow_group_in) + return -ENOMEM; + + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, + MLX5_MATCH_MISC_PARAMETERS_2); + match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, + match_criteria); + misc2 = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters_2); + MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_5, MLX5_PACKET_COLOR_MASK); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); + + post_meter->fg = mlx5_create_flow_group(post_meter->ft, flow_group_in); + if (IS_ERR(post_meter->fg)) { + mlx5_core_warn(priv->mdev, "Failed to create post_meter flow group\n"); + err = PTR_ERR(post_meter->fg); + } + + kvfree(flow_group_in); + return err; +} + +static int +mlx5e_post_meter_rules_create(struct mlx5e_priv *priv, + struct mlx5e_post_meter_priv *post_meter, + struct mlx5e_post_act *post_act) +{ + struct mlx5_flow_destination dest = {}; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + int err; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + mlx5e_tc_match_to_reg_match(spec, PACKET_COLOR_TO_REG, + MLX5_FLOW_METER_COLOR_RED, MLX5_PACKET_COLOR_MASK); + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; + flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; + + rule = mlx5_add_flow_rules(post_meter->ft, spec, &flow_act, NULL, 0); + if (IS_ERR(rule)) { + mlx5_core_warn(priv->mdev, "Failed to create post_meter flow drop rule\n"); + err = PTR_ERR(rule); + goto err_red; + } + post_meter->drop_red_rule = rule; + + mlx5e_tc_match_to_reg_match(spec, PACKET_COLOR_TO_REG, + MLX5_FLOW_METER_COLOR_GREEN, MLX5_PACKET_COLOR_MASK); + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = mlx5e_tc_post_act_get_ft(post_act); + + rule = mlx5_add_flow_rules(post_meter->ft, spec, &flow_act, &dest, 1); + if (IS_ERR(rule)) { + mlx5_core_warn(priv->mdev, "Failed to create post_meter flow fwd rule\n"); + err = PTR_ERR(rule); + goto err_green; + } + post_meter->fwd_green_rule = rule; + + kvfree(spec); + return 0; + +err_green: + mlx5_del_flow_rules(post_meter->drop_red_rule); +err_red: + kvfree(spec); + return err; +} + +static void +mlx5e_post_meter_rules_destroy(struct mlx5e_post_meter_priv *post_meter) +{ + mlx5_del_flow_rules(post_meter->drop_red_rule); + mlx5_del_flow_rules(post_meter->fwd_green_rule); +} + +static void +mlx5e_post_meter_fg_destroy(struct mlx5e_post_meter_priv *post_meter) +{ + mlx5_destroy_flow_group(post_meter->fg); +} + +static void +mlx5e_post_meter_table_destroy(struct mlx5e_post_meter_priv *post_meter) +{ + mlx5_destroy_flow_table(post_meter->ft); +} + +struct mlx5e_post_meter_priv * +mlx5e_post_meter_init(struct mlx5e_priv *priv, + enum mlx5_flow_namespace_type ns_type, + struct mlx5e_post_act *post_act) +{ + struct mlx5e_post_meter_priv *post_meter; + int err; + + post_meter = kzalloc(sizeof(*post_meter), GFP_KERNEL); + if (!post_meter) + return ERR_PTR(-ENOMEM); + + err = mlx5e_post_meter_table_create(priv, ns_type, post_meter); + if (err) + goto err_ft; + + err = mlx5e_post_meter_fg_create(priv, post_meter); + if (err) + goto err_fg; + + err = mlx5e_post_meter_rules_create(priv, post_meter, post_act); + if (err) + goto err_rules; + + return post_meter; + +err_rules: + mlx5e_post_meter_fg_destroy(post_meter); +err_fg: + mlx5e_post_meter_table_destroy(post_meter); +err_ft: + kfree(post_meter); + return ERR_PTR(err); +} + +void +mlx5e_post_meter_cleanup(struct mlx5e_post_meter_priv *post_meter) +{ + mlx5e_post_meter_rules_destroy(post_meter); + mlx5e_post_meter_fg_destroy(post_meter); + mlx5e_post_meter_table_destroy(post_meter); + kfree(post_meter); +} + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h new file mode 100644 index 000000000000..c74f3cbd810d --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#ifndef __MLX5_EN_POST_METER_H__ +#define __MLX5_EN_POST_METER_H__ + +#define packet_color_to_reg { \ + .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_5, \ + .moffset = 0, \ + .mlen = 8, \ + .soffset = MLX5_BYTE_OFF(fte_match_param, \ + misc_parameters_2.metadata_reg_c_5), \ +} + +struct mlx5e_post_meter_priv; + +struct mlx5_flow_table * +mlx5e_post_meter_get_ft(struct mlx5e_post_meter_priv *post_meter); + +struct mlx5e_post_meter_priv * +mlx5e_post_meter_init(struct mlx5e_priv *priv, + enum mlx5_flow_namespace_type ns_type, + struct mlx5e_post_act *post_act); +void +mlx5e_post_meter_cleanup(struct mlx5e_post_meter_priv *post_meter); + +#endif /* __MLX5_EN_POST_METER_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index 25f51f80a9b4..af959fadbecf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -36,8 +36,8 @@ #define MLX5_CT_STATE_RELATED_BIT BIT(5) #define MLX5_CT_STATE_INVALID_BIT BIT(6) -#define MLX5_CT_LABELS_BITS (mlx5e_tc_attr_to_reg_mappings[LABELS_TO_REG].mlen) -#define MLX5_CT_LABELS_MASK GENMASK(MLX5_CT_LABELS_BITS - 1, 0) +#define MLX5_CT_LABELS_BITS MLX5_REG_MAPPING_MBITS(LABELS_TO_REG) +#define MLX5_CT_LABELS_MASK MLX5_REG_MAPPING_MASK(LABELS_TO_REG) /* Statically allocate modify actions for * ipv6 and port nat (5) + tuple fields (4) + nic mode zone restore (1) = 10. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h index 00a3ba862afb..5bbd6b92840f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h @@ -62,10 +62,11 @@ struct mlx5_ct_attr { misc_parameters_2.metadata_reg_c_4),\ } +/* 8 LSB of metadata C5 are reserved for packet color */ #define fteid_to_reg_ct {\ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_5,\ - .moffset = 0,\ - .mlen = 32,\ + .moffset = 8,\ + .mlen = 24,\ .soffset = MLX5_BYTE_OFF(fte_match_param,\ misc_parameters_2.metadata_reg_c_5),\ } @@ -84,10 +85,8 @@ struct mlx5_ct_attr { .mlen = ESW_ZONE_ID_BITS,\ } -#define REG_MAPPING_MLEN(reg) (mlx5e_tc_attr_to_reg_mappings[reg].mlen) -#define REG_MAPPING_MOFFSET(reg) (mlx5e_tc_attr_to_reg_mappings[reg].moffset) -#define MLX5_CT_ZONE_BITS (mlx5e_tc_attr_to_reg_mappings[ZONE_TO_REG].mlen) -#define MLX5_CT_ZONE_MASK GENMASK(MLX5_CT_ZONE_BITS - 1, 0) +#define MLX5_CT_ZONE_BITS MLX5_REG_MAPPING_MBITS(ZONE_TO_REG) +#define MLX5_CT_ZONE_MASK MLX5_REG_MAPPING_MASK(ZONE_TO_REG) #if IS_ENABLED(CONFIG_MLX5_TC_CT) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h index 3b74a6fd5c43..d2bdfd6872bc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h @@ -203,7 +203,13 @@ struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow); struct mlx5e_tc_int_port_priv * mlx5e_get_int_port_priv(struct mlx5e_priv *priv); +struct mlx5e_flow_meters *mlx5e_get_flow_meters(struct mlx5_core_dev *dev); + void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec); void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec); +int mlx5e_policer_validate(const struct flow_action *action, + const struct flow_action_entry *act, + struct netlink_ext_ack *extack); + #endif /* __MLX5_EN_TC_PRIV_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h deleted file mode 100644 index e4eeb2ba21c7..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h +++ /dev/null @@ -1,21 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ -/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ - -#ifndef __MLX5_IPSEC_STEERING_H__ -#define __MLX5_IPSEC_STEERING_H__ - -#include "en.h" -#include "ipsec.h" -#include "ipsec_offload.h" -#include "en/fs.h" - -void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec); -int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec); -int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv, - struct mlx5_accel_esp_xfrm_attrs *attrs, - u32 ipsec_obj_id, - struct mlx5e_ipsec_rule *ipsec_rule); -void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv, - struct mlx5_accel_esp_xfrm_attrs *attrs, - struct mlx5e_ipsec_rule *ipsec_rule); -#endif /* __MLX5_IPSEC_STEERING_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h index adf5cc6a7b8c..dec183ccd4ac 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h @@ -62,6 +62,7 @@ struct mlx5_tc_int_port_priv; struct mlx5e_rep_bond; struct mlx5e_tc_tun_encap; struct mlx5e_post_act; +struct mlx5e_flow_meters; struct mlx5_rep_uplink_priv { /* indirect block callbacks are invoked on bind/unbind events @@ -97,6 +98,8 @@ struct mlx5_rep_uplink_priv { /* OVS internal port support */ struct mlx5e_tc_int_port_priv *int_port_priv; + + struct mlx5e_flow_meters *flow_meters; }; struct mlx5e_rep_priv { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 34bf11cdf90f..5596d561a07f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -59,6 +59,7 @@ #include "en/tc_tun_encap.h" #include "en/tc/sample.h" #include "en/tc/act/act.h" +#include "en/tc/post_meter.h" #include "lib/devcom.h" #include "lib/geneve.h" #include "lib/fs_chains.h" @@ -104,6 +105,7 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = { .mlen = 16, }, [NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct, + [PACKET_COLOR_TO_REG] = packet_color_to_reg, }; /* To avoid false lock dependency warning set the tc_ht lock @@ -240,6 +242,30 @@ mlx5e_get_int_port_priv(struct mlx5e_priv *priv) return NULL; } +struct mlx5e_flow_meters * +mlx5e_get_flow_meters(struct mlx5_core_dev *dev) +{ + struct mlx5_eswitch *esw = dev->priv.eswitch; + struct mlx5_rep_uplink_priv *uplink_priv; + struct mlx5e_rep_priv *uplink_rpriv; + struct mlx5e_priv *priv; + + if (is_mdev_switchdev_mode(dev)) { + uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); + uplink_priv = &uplink_rpriv->uplink_priv; + priv = netdev_priv(uplink_rpriv->netdev); + if (!uplink_priv->flow_meters) + uplink_priv->flow_meters = + mlx5e_flow_meters_init(priv, + MLX5_FLOW_NAMESPACE_FDB, + uplink_priv->post_act); + if (!IS_ERR(uplink_priv->flow_meters)) + return uplink_priv->flow_meters; + } + + return NULL; +} + static struct mlx5_tc_ct_priv * get_ct_priv(struct mlx5e_priv *priv) { @@ -319,12 +345,39 @@ mlx5_tc_rule_delete(struct mlx5e_priv *priv, mlx5e_del_offloaded_nic_rule(priv, rule, attr); } +static bool +is_flow_meter_action(struct mlx5_flow_attr *attr) +{ + return ((attr->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) && + (attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER)); +} + +static int +mlx5e_tc_add_flow_meter(struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + struct mlx5e_flow_meter_handle *meter; + + meter = mlx5e_tc_meter_get(priv->mdev, &attr->meter_attr.params); + if (IS_ERR(meter)) { + mlx5_core_err(priv->mdev, "Failed to get flow meter\n"); + return PTR_ERR(meter); + } + + attr->meter_attr.meter = meter; + attr->dest_ft = mlx5e_tc_meter_get_post_meter_ft(meter->flow_meters); + attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + + return 0; +} + struct mlx5_flow_handle * mlx5e_tc_rule_offload(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + int err; if (attr->flags & MLX5_ATTR_FLAG_CT) { struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts = @@ -341,6 +394,12 @@ mlx5e_tc_rule_offload(struct mlx5e_priv *priv, if (attr->flags & MLX5_ATTR_FLAG_SAMPLE) return mlx5e_tc_sample_offload(get_sample_priv(priv), spec, attr); + if (is_flow_meter_action(attr)) { + err = mlx5e_tc_add_flow_meter(priv, attr); + if (err) + return ERR_PTR(err); + } + return mlx5_eswitch_add_offloaded_rule(esw, spec, attr); } @@ -367,6 +426,9 @@ mlx5e_tc_rule_unoffload(struct mlx5e_priv *priv, } mlx5_eswitch_del_offloaded_rule(esw, rule, attr); + + if (attr->meter_attr.meter) + mlx5e_tc_meter_put(attr->meter_attr.meter); } int @@ -4519,9 +4581,9 @@ static int apply_police_params(struct mlx5e_priv *priv, u64 rate, return err; } -static int mlx5e_policer_validate(const struct flow_action *action, - const struct flow_action_entry *act, - struct netlink_ext_ack *extack) +int mlx5e_policer_validate(const struct flow_action *action, + const struct flow_action_entry *act, + struct netlink_ext_ack *extack) { if (act->police.exceed.act_id != FLOW_ACTION_DROP) { NL_SET_ERR_MSG_MOD(extack, @@ -4956,6 +5018,7 @@ void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv) mlx5e_tc_sample_cleanup(uplink_priv->tc_psample); mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv); mlx5_tc_ct_clean(uplink_priv->ct_priv); + mlx5e_flow_meters_cleanup(uplink_priv->flow_meters); mlx5e_tc_post_act_destroy(uplink_priv->post_act); } @@ -5061,7 +5124,7 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, tc_skb_ext->chain = chain; - zone_restore_id = (reg_b >> REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) & + zone_restore_id = (reg_b >> MLX5_REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) & ESW_ZONE_ID_MASK; if (!mlx5e_tc_ct_restore_flow(tc->ct, skb, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index e2a1250aeca1..517f2252b5ff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -39,6 +39,7 @@ #include "en/tc_ct.h" #include "en/tc_tun.h" #include "en/tc/int_port.h" +#include "en/tc/meter.h" #include "en_rep.h" #define MLX5E_TC_FLOW_ID_MASK 0x0000ffff @@ -71,6 +72,7 @@ struct mlx5_flow_attr { struct mlx5_modify_hdr *modify_hdr; struct mlx5_ct_attr ct_attr; struct mlx5e_sample_attr sample_attr; + struct mlx5e_meter_attr meter_attr; struct mlx5e_tc_flow_parse_attr *parse_attr; u32 chain; u16 prio; @@ -83,6 +85,7 @@ struct mlx5_flow_attr { u8 tun_ip_version; int tunnel_id; /* mapped tunnel id */ u32 flags; + u32 exe_aso_type; struct list_head list; struct mlx5e_post_act_handle *post_act_handle; struct { @@ -229,6 +232,7 @@ enum mlx5e_tc_attr_to_reg { FTEID_TO_REG, NIC_CHAIN_TO_REG, NIC_ZONE_RESTORE_TO_REG, + PACKET_COLOR_TO_REG, }; struct mlx5e_tc_attr_to_reg_mapping { @@ -241,6 +245,10 @@ struct mlx5e_tc_attr_to_reg_mapping { extern struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[]; +#define MLX5_REG_MAPPING_MOFFSET(reg_id) (mlx5e_tc_attr_to_reg_mappings[reg_id].moffset) +#define MLX5_REG_MAPPING_MBITS(reg_id) (mlx5e_tc_attr_to_reg_mappings[reg_id].mlen) +#define MLX5_REG_MAPPING_MASK(reg_id) (GENMASK(mlx5e_tc_attr_to_reg_mappings[reg_id].mlen - 1, 0)) + bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv, struct net_device *out_dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 719ef26d23c0..b938632f89ff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1152,8 +1152,6 @@ mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs) { const u32 *out; - WARN_ON_ONCE(esw->mode != MLX5_ESWITCH_NONE); - if (num_vfs < 0) return; @@ -1186,6 +1184,9 @@ static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw) int total_vports; int err; + if (esw->flags & MLX5_ESWITCH_VPORT_ACL_NS_CREATED) + return 0; + total_vports = mlx5_eswitch_get_total_vports(dev); if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) { @@ -1203,6 +1204,7 @@ static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw) } else { esw_warn(dev, "ingress ACL is not supported by FW\n"); } + esw->flags |= MLX5_ESWITCH_VPORT_ACL_NS_CREATED; return 0; err: @@ -1215,6 +1217,7 @@ static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw) { struct mlx5_core_dev *dev = esw->dev; + esw->flags &= ~MLX5_ESWITCH_VPORT_ACL_NS_CREATED; if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) mlx5_fs_ingress_acls_cleanup(dev); if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) @@ -1224,7 +1227,6 @@ static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw) /** * mlx5_eswitch_enable_locked - Enable eswitch * @esw: Pointer to eswitch - * @mode: Eswitch mode to enable * @num_vfs: Enable eswitch for given number of VFs. This is optional. * Valid value are 0, > 0 and MLX5_ESWITCH_IGNORE_NUM_VFS. * Caller should pass num_vfs > 0 when enabling eswitch for @@ -1238,7 +1240,7 @@ static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw) * mode. If num_vfs >=0 is provided, it setup VF related eswitch vports. * It returns 0 on success or error code on failure. */ -int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs) +int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs) { int err; @@ -1257,9 +1259,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs) mlx5_eswitch_update_num_of_vfs(esw, num_vfs); - esw->mode = mode; - - if (mode == MLX5_ESWITCH_LEGACY) { + if (esw->mode == MLX5_ESWITCH_LEGACY) { err = esw_legacy_enable(esw); } else { mlx5_rescan_drivers(esw->dev); @@ -1269,22 +1269,19 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs) if (err) goto abort; + esw->fdb_table.flags |= MLX5_ESW_FDB_CREATED; + mlx5_eswitch_event_handlers_register(esw); esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n", - mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", + esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", esw->esw_funcs.num_vfs, esw->enabled_vports); - mlx5_esw_mode_change_notify(esw, mode); + mlx5_esw_mode_change_notify(esw, esw->mode); return 0; abort: - esw->mode = MLX5_ESWITCH_NONE; - - if (mode == MLX5_ESWITCH_OFFLOADS) - mlx5_rescan_drivers(esw->dev); - mlx5_esw_acls_ns_cleanup(esw); return err; } @@ -1305,14 +1302,14 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) if (!mlx5_esw_allowed(esw)) return 0; - toggle_lag = esw->mode == MLX5_ESWITCH_NONE; + toggle_lag = !mlx5_esw_is_fdb_created(esw); if (toggle_lag) mlx5_lag_disable_change(esw->dev); down_write(&esw->mode_lock); - if (esw->mode == MLX5_ESWITCH_NONE) { - ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs); + if (!mlx5_esw_is_fdb_created(esw)) { + ret = mlx5_eswitch_enable_locked(esw, num_vfs); } else { enum mlx5_eswitch_vport_event vport_events; @@ -1330,55 +1327,79 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) return ret; } -void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf) +/* When disabling sriov, free driver level resources. */ +void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) { - struct devlink *devlink = priv_to_devlink(esw->dev); - int old_mode; - - lockdep_assert_held_write(&esw->mode_lock); - - if (esw->mode == MLX5_ESWITCH_NONE) + if (!mlx5_esw_allowed(esw)) return; - esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n", + down_write(&esw->mode_lock); + /* If driver is unloaded, this function is called twice by remove_one() + * and mlx5_unload(). Prevent the second call. + */ + if (!esw->esw_funcs.num_vfs && !clear_vf) + goto unlock; + + esw_info(esw->dev, "Unload vfs: mode(%s), nvfs(%d), active vports(%d)\n", esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", esw->esw_funcs.num_vfs, esw->enabled_vports); - /* Notify eswitch users that it is exiting from current mode. - * So that it can do necessary cleanup before the eswitch is disabled. + mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs); + if (clear_vf) + mlx5_eswitch_clear_vf_vports_info(esw); + /* If disabling sriov in switchdev mode, free meta rules here + * because it depends on num_vfs. */ - mlx5_esw_mode_change_notify(esw, MLX5_ESWITCH_NONE); + if (esw->mode == MLX5_ESWITCH_OFFLOADS) { + struct devlink *devlink = priv_to_devlink(esw->dev); - mlx5_eswitch_event_handlers_unregister(esw); + esw_offloads_del_send_to_vport_meta_rules(esw); + devlink_rate_nodes_destroy(devlink); + } - if (esw->mode == MLX5_ESWITCH_LEGACY) - esw_legacy_disable(esw); - else if (esw->mode == MLX5_ESWITCH_OFFLOADS) - esw_offloads_disable(esw); + esw->esw_funcs.num_vfs = 0; - old_mode = esw->mode; - esw->mode = MLX5_ESWITCH_NONE; +unlock: + up_write(&esw->mode_lock); +} - if (old_mode == MLX5_ESWITCH_OFFLOADS) - mlx5_rescan_drivers(esw->dev); +/* Free resources for corresponding eswitch mode. It is called by devlink + * when changing eswitch mode or modprobe when unloading driver. + */ +void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw) +{ + struct devlink *devlink = priv_to_devlink(esw->dev); + + /* Notify eswitch users that it is exiting from current mode. + * So that it can do necessary cleanup before the eswitch is disabled. + */ + mlx5_esw_mode_change_notify(esw, MLX5_ESWITCH_LEGACY); - devlink_rate_nodes_destroy(devlink); + mlx5_eswitch_event_handlers_unregister(esw); + esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n", + esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", + esw->esw_funcs.num_vfs, esw->enabled_vports); + + esw->fdb_table.flags &= ~MLX5_ESW_FDB_CREATED; + if (esw->mode == MLX5_ESWITCH_OFFLOADS) + esw_offloads_disable(esw); + else if (esw->mode == MLX5_ESWITCH_LEGACY) + esw_legacy_disable(esw); mlx5_esw_acls_ns_cleanup(esw); - if (clear_vf) - mlx5_eswitch_clear_vf_vports_info(esw); + if (esw->mode == MLX5_ESWITCH_OFFLOADS) + devlink_rate_nodes_destroy(devlink); } -void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) +void mlx5_eswitch_disable(struct mlx5_eswitch *esw) { if (!mlx5_esw_allowed(esw)) return; mlx5_lag_disable_change(esw->dev); down_write(&esw->mode_lock); - mlx5_eswitch_disable_locked(esw, clear_vf); - esw->esw_funcs.num_vfs = 0; + mlx5_eswitch_disable_locked(esw); up_write(&esw->mode_lock); mlx5_lag_enable_change(esw->dev); } @@ -1573,7 +1594,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) refcount_set(&esw->qos.refcnt, 0); esw->enabled_vports = 0; - esw->mode = MLX5_ESWITCH_NONE; + esw->mode = MLX5_ESWITCH_LEGACY; esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE; if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) && MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)) @@ -1875,7 +1896,7 @@ u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev) { struct mlx5_eswitch *esw = dev->priv.eswitch; - return mlx5_esw_allowed(esw) ? esw->mode : MLX5_ESWITCH_NONE; + return mlx5_esw_allowed(esw) ? esw->mode : MLX5_ESWITCH_LEGACY; } EXPORT_SYMBOL_GPL(mlx5_eswitch_mode); @@ -1995,8 +2016,6 @@ int mlx5_esw_try_lock(struct mlx5_eswitch *esw) */ void mlx5_esw_unlock(struct mlx5_eswitch *esw) { - if (!mlx5_esw_allowed(esw)) - return; up_write(&esw->mode_lock); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 2754a732914d..c19604b06a2c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -282,10 +282,15 @@ struct mlx5_esw_functions { enum { MLX5_ESWITCH_VPORT_MATCH_METADATA = BIT(0), MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED = BIT(1), + MLX5_ESWITCH_VPORT_ACL_NS_CREATED = BIT(2), }; struct mlx5_esw_bridge_offloads; +enum { + MLX5_ESW_FDB_CREATED = BIT(0), +}; + struct mlx5_eswitch { struct mlx5_core_dev *dev; struct mlx5_nb nb; @@ -337,6 +342,7 @@ void esw_offloads_disable(struct mlx5_eswitch *esw); int esw_offloads_enable(struct mlx5_eswitch *esw); void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw); int esw_offloads_init_reps(struct mlx5_eswitch *esw); +void esw_offloads_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw); bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw); int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable); @@ -350,10 +356,11 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev); void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw); #define MLX5_ESWITCH_IGNORE_NUM_VFS (-1) -int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs); +int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs); int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs); -void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf); -void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf); +void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf); +void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw); +void mlx5_eswitch_disable(struct mlx5_eswitch *esw); int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, u16 vport, const u8 *mac); int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, @@ -575,6 +582,11 @@ mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index) return dl_port_index & 0xffff; } +static inline bool mlx5_esw_is_fdb_created(struct mlx5_eswitch *esw) +{ + return esw->fdb_table.flags & MLX5_ESW_FDB_CREATED; +} + /* TODO: This mlx5e_tc function shouldn't be called by eswitch */ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw); @@ -719,7 +731,8 @@ int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw); static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {} static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; } -static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {} +static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {} +static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {} static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; } static inline int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 2ce3728576d1..e224ec7005a6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -1040,6 +1040,15 @@ static void mlx5_eswitch_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw) mlx5_del_flow_rules(flows[i]); kvfree(flows); + /* If changing eswitch mode from switchdev to legacy, but num_vfs is not 0, + * meta rules could be freed again. So set it to NULL. + */ + esw->fdb_table.offloads.send_to_vport_meta_rules = NULL; +} + +void esw_offloads_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw) +{ + mlx5_eswitch_del_send_to_vport_meta_rules(esw); } static int @@ -2034,7 +2043,7 @@ static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode) if (!MLX5_CAP_GEN(dev, vport_group_manager)) return -EOPNOTSUPP; - if (esw->mode == MLX5_ESWITCH_NONE) + if (!mlx5_esw_is_fdb_created(esw)) return -EOPNOTSUPP; switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { @@ -2170,18 +2179,18 @@ static int esw_offloads_start(struct mlx5_eswitch *esw, { int err, err1; - mlx5_eswitch_disable_locked(esw, false); - err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS, - esw->dev->priv.sriov.num_vfs); + esw->mode = MLX5_ESWITCH_OFFLOADS; + err = mlx5_eswitch_enable_locked(esw, esw->dev->priv.sriov.num_vfs); if (err) { NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to offloads"); - err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, - MLX5_ESWITCH_IGNORE_NUM_VFS); + esw->mode = MLX5_ESWITCH_LEGACY; + err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS); if (err1) { NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch back to legacy"); } + mlx5_rescan_drivers(esw->dev); } if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) { if (mlx5_eswitch_inline_mode_get(esw, @@ -2894,7 +2903,7 @@ int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable) int err = 0; down_write(&esw->mode_lock); - if (esw->mode != MLX5_ESWITCH_NONE) { + if (mlx5_esw_is_fdb_created(esw)) { err = -EBUSY; goto done; } @@ -3229,13 +3238,12 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw, { int err, err1; - mlx5_eswitch_disable_locked(esw, false); - err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, - MLX5_ESWITCH_IGNORE_NUM_VFS); + esw->mode = MLX5_ESWITCH_LEGACY; + err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS); if (err) { NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy"); - err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS, - MLX5_ESWITCH_IGNORE_NUM_VFS); + esw->mode = MLX5_ESWITCH_OFFLOADS; + err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS); if (err1) { NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch back to offloads"); @@ -3334,15 +3342,6 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode) return 0; } -static int eswitch_devlink_esw_mode_check(const struct mlx5_eswitch *esw) -{ - /* devlink commands in NONE eswitch mode are currently supported only - * on ECPF. - */ - return (esw->mode == MLX5_ESWITCH_NONE && - !mlx5_core_is_ecpf_esw_manager(esw->dev)) ? -EOPNOTSUPP : 0; -} - /* FIXME: devl_unlock() followed by devl_lock() inside driver callback * is never correct and prone to races. It's a transitional workaround, * never repeat this pattern. @@ -3399,6 +3398,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, if (cur_mlx5_mode == mlx5_mode) goto unlock; + mlx5_eswitch_disable_locked(esw); if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) { if (mlx5_devlink_trap_get_num_active(esw->dev)) { NL_SET_ERR_MSG_MOD(extack, @@ -3409,6 +3409,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, err = esw_offloads_start(esw, extack); } else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) { err = esw_offloads_stop(esw, extack); + mlx5_rescan_drivers(esw->dev); } else { err = -EINVAL; } @@ -3431,12 +3432,7 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) return PTR_ERR(esw); mlx5_eswtich_mode_callback_enter(devlink, esw); - err = eswitch_devlink_esw_mode_check(esw); - if (err) - goto unlock; - err = esw_mode_to_devlink(esw->mode, mode); -unlock: mlx5_eswtich_mode_callback_exit(devlink, esw); return err; } @@ -3485,9 +3481,6 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, return PTR_ERR(esw); mlx5_eswtich_mode_callback_enter(devlink, esw); - err = eswitch_devlink_esw_mode_check(esw); - if (err) - goto out; switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: @@ -3539,12 +3532,7 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode) return PTR_ERR(esw); mlx5_eswtich_mode_callback_enter(devlink, esw); - err = eswitch_devlink_esw_mode_check(esw); - if (err) - goto unlock; - err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); -unlock: mlx5_eswtich_mode_callback_exit(devlink, esw); return err; } @@ -3555,16 +3543,13 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, { struct mlx5_core_dev *dev = devlink_priv(devlink); struct mlx5_eswitch *esw; - int err; + int err = 0; esw = mlx5_devlink_eswitch_get(devlink); if (IS_ERR(esw)) return PTR_ERR(esw); mlx5_eswtich_mode_callback_enter(devlink, esw); - err = eswitch_devlink_esw_mode_check(esw); - if (err) - goto unlock; if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE && (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) || @@ -3615,21 +3600,15 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, enum devlink_eswitch_encap_mode *encap) { struct mlx5_eswitch *esw; - int err; esw = mlx5_devlink_eswitch_get(devlink); if (IS_ERR(esw)) return PTR_ERR(esw); mlx5_eswtich_mode_callback_enter(devlink, esw); - err = eswitch_devlink_esw_mode_check(esw); - if (err) - goto unlock; - *encap = esw->offloads.encap; -unlock: mlx5_eswtich_mode_callback_exit(devlink, esw); - return err; + return 0; } static bool diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index 2a8fc547eb37..641505d2c0c2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -632,6 +632,7 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev) static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev) { #ifdef CONFIG_MLX5_ESWITCH + struct mlx5_core_dev *dev; u8 mode; #endif int i; @@ -641,11 +642,11 @@ static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev) return false; #ifdef CONFIG_MLX5_ESWITCH - mode = mlx5_eswitch_mode(ldev->pf[MLX5_LAG_P1].dev); - - if (mode != MLX5_ESWITCH_NONE && mode != MLX5_ESWITCH_OFFLOADS) + dev = ldev->pf[MLX5_LAG_P1].dev; + if ((mlx5_sriov_is_enabled(dev)) && !is_mdev_switchdev_mode(dev)) return false; + mode = mlx5_eswitch_mode(dev); for (i = 0; i < ldev->ports; i++) if (mlx5_eswitch_mode(ldev->pf[i].dev) != mode) return false; @@ -760,8 +761,7 @@ static bool mlx5_lag_is_roce_lag(struct mlx5_lag *ldev) #ifdef CONFIG_MLX5_ESWITCH for (i = 0; i < ldev->ports; i++) - roce_lag = roce_lag && - ldev->pf[i].dev->priv.eswitch->mode == MLX5_ESWITCH_NONE; + roce_lag = roce_lag && is_mdev_legacy_mode(ldev->pf[i].dev); #endif return roce_lag; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c new file mode 100644 index 000000000000..21e14507ff5c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c @@ -0,0 +1,433 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include <linux/mlx5/device.h> +#include <linux/mlx5/transobj.h> +#include "aso.h" +#include "wq.h" + +struct mlx5_aso_cq { + /* data path - accessed per cqe */ + struct mlx5_cqwq wq; + + /* data path - accessed per napi poll */ + struct mlx5_core_cq mcq; + + /* control */ + struct mlx5_core_dev *mdev; + struct mlx5_wq_ctrl wq_ctrl; +} ____cacheline_aligned_in_smp; + +struct mlx5_aso { + /* data path */ + u16 cc; + u16 pc; + + struct mlx5_wqe_ctrl_seg *doorbell_cseg; + struct mlx5_aso_cq cq; + + /* read only */ + struct mlx5_wq_cyc wq; + void __iomem *uar_map; + u32 sqn; + + /* control path */ + struct mlx5_wq_ctrl wq_ctrl; + +} ____cacheline_aligned_in_smp; + +static void mlx5_aso_free_cq(struct mlx5_aso_cq *cq) +{ + mlx5_wq_destroy(&cq->wq_ctrl); +} + +static int mlx5_aso_alloc_cq(struct mlx5_core_dev *mdev, int numa_node, + void *cqc_data, struct mlx5_aso_cq *cq) +{ + struct mlx5_core_cq *mcq = &cq->mcq; + struct mlx5_wq_param param; + int err; + u32 i; + + param.buf_numa_node = numa_node; + param.db_numa_node = numa_node; + + err = mlx5_cqwq_create(mdev, ¶m, cqc_data, &cq->wq, &cq->wq_ctrl); + if (err) + return err; + + mcq->cqe_sz = 64; + mcq->set_ci_db = cq->wq_ctrl.db.db; + mcq->arm_db = cq->wq_ctrl.db.db + 1; + + for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { + struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i); + + cqe->op_own = 0xf1; + } + + cq->mdev = mdev; + + return 0; +} + +static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data) +{ + u32 out[MLX5_ST_SZ_DW(create_cq_out)]; + struct mlx5_core_dev *mdev = cq->mdev; + struct mlx5_core_cq *mcq = &cq->mcq; + void *in, *cqc; + int inlen, eqn; + int err; + + err = mlx5_vector2eqn(mdev, 0, &eqn); + if (err) + return err; + + inlen = MLX5_ST_SZ_BYTES(create_cq_in) + + sizeof(u64) * cq->wq_ctrl.buf.npages; + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); + + memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc)); + + mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, + (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); + + MLX5_SET(cqc, cqc, cq_period_mode, DIM_CQ_PERIOD_MODE_START_FROM_EQE); + MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); + MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); + MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - + MLX5_ADAPTER_PAGE_SHIFT); + MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); + + err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out)); + + kvfree(in); + + return err; +} + +static void mlx5_aso_destroy_cq(struct mlx5_aso_cq *cq) +{ + mlx5_core_destroy_cq(cq->mdev, &cq->mcq); + mlx5_wq_destroy(&cq->wq_ctrl); +} + +static int mlx5_aso_create_cq(struct mlx5_core_dev *mdev, int numa_node, + struct mlx5_aso_cq *cq) +{ + void *cqc_data; + int err; + + cqc_data = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL); + if (!cqc_data) + return -ENOMEM; + + MLX5_SET(cqc, cqc_data, log_cq_size, 1); + MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index); + if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128) + MLX5_SET(cqc, cqc_data, cqe_sz, CQE_STRIDE_128_PAD); + + err = mlx5_aso_alloc_cq(mdev, numa_node, cqc_data, cq); + if (err) { + mlx5_core_err(mdev, "Failed to alloc aso wq cq, err=%d\n", err); + goto err_out; + } + + err = create_aso_cq(cq, cqc_data); + if (err) { + mlx5_core_err(mdev, "Failed to create aso wq cq, err=%d\n", err); + goto err_free_cq; + } + + kvfree(cqc_data); + return 0; + +err_free_cq: + mlx5_aso_free_cq(cq); +err_out: + kvfree(cqc_data); + return err; +} + +static int mlx5_aso_alloc_sq(struct mlx5_core_dev *mdev, int numa_node, + void *sqc_data, struct mlx5_aso *sq) +{ + void *sqc_wq = MLX5_ADDR_OF(sqc, sqc_data, wq); + struct mlx5_wq_cyc *wq = &sq->wq; + struct mlx5_wq_param param; + int err; + + sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map; + + param.db_numa_node = numa_node; + param.buf_numa_node = numa_node; + err = mlx5_wq_cyc_create(mdev, ¶m, sqc_wq, wq, &sq->wq_ctrl); + if (err) + return err; + wq->db = &wq->db[MLX5_SND_DBR]; + + return 0; +} + +static int create_aso_sq(struct mlx5_core_dev *mdev, int pdn, + void *sqc_data, struct mlx5_aso *sq) +{ + void *in, *sqc, *wq; + int inlen, err; + + inlen = MLX5_ST_SZ_BYTES(create_sq_in) + + sizeof(u64) * sq->wq_ctrl.buf.npages; + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); + wq = MLX5_ADDR_OF(sqc, sqc, wq); + + memcpy(sqc, sqc_data, MLX5_ST_SZ_BYTES(sqc)); + MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn); + + MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); + MLX5_SET(sqc, sqc, flush_in_error_en, 1); + + MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); + MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index); + MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - + MLX5_ADAPTER_PAGE_SHIFT); + MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma); + + mlx5_fill_page_frag_array(&sq->wq_ctrl.buf, + (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); + + err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn); + + kvfree(in); + + return err; +} + +static int mlx5_aso_set_sq_rdy(struct mlx5_core_dev *mdev, u32 sqn) +{ + void *in, *sqc; + int inlen, err; + + inlen = MLX5_ST_SZ_BYTES(modify_sq_in); + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST); + sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); + MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY); + + err = mlx5_core_modify_sq(mdev, sqn, in); + + kvfree(in); + + return err; +} + +static int mlx5_aso_create_sq_rdy(struct mlx5_core_dev *mdev, u32 pdn, + void *sqc_data, struct mlx5_aso *sq) +{ + int err; + + err = create_aso_sq(mdev, pdn, sqc_data, sq); + if (err) + return err; + + err = mlx5_aso_set_sq_rdy(mdev, sq->sqn); + if (err) + mlx5_core_destroy_sq(mdev, sq->sqn); + + return err; +} + +static void mlx5_aso_free_sq(struct mlx5_aso *sq) +{ + mlx5_wq_destroy(&sq->wq_ctrl); +} + +static void mlx5_aso_destroy_sq(struct mlx5_aso *sq) +{ + mlx5_core_destroy_sq(sq->cq.mdev, sq->sqn); + mlx5_aso_free_sq(sq); +} + +static int mlx5_aso_create_sq(struct mlx5_core_dev *mdev, int numa_node, + u32 pdn, struct mlx5_aso *sq) +{ + void *sqc_data, *wq; + int err; + + sqc_data = kvzalloc(MLX5_ST_SZ_BYTES(sqc), GFP_KERNEL); + if (!sqc_data) + return -ENOMEM; + + wq = MLX5_ADDR_OF(sqc, sqc_data, wq); + MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); + MLX5_SET(wq, wq, pd, pdn); + MLX5_SET(wq, wq, log_wq_sz, 1); + + err = mlx5_aso_alloc_sq(mdev, numa_node, sqc_data, sq); + if (err) { + mlx5_core_err(mdev, "Failed to alloc aso wq sq, err=%d\n", err); + goto err_out; + } + + err = mlx5_aso_create_sq_rdy(mdev, pdn, sqc_data, sq); + if (err) { + mlx5_core_err(mdev, "Failed to open aso wq sq, err=%d\n", err); + goto err_free_asosq; + } + + mlx5_core_dbg(mdev, "aso sq->sqn = 0x%x\n", sq->sqn); + + kvfree(sqc_data); + return 0; + +err_free_asosq: + mlx5_aso_free_sq(sq); +err_out: + kvfree(sqc_data); + return err; +} + +struct mlx5_aso *mlx5_aso_create(struct mlx5_core_dev *mdev, u32 pdn) +{ + int numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); + struct mlx5_aso *aso; + int err; + + aso = kzalloc(sizeof(*aso), GFP_KERNEL); + if (!aso) + return ERR_PTR(-ENOMEM); + + err = mlx5_aso_create_cq(mdev, numa_node, &aso->cq); + if (err) + goto err_cq; + + err = mlx5_aso_create_sq(mdev, numa_node, pdn, aso); + if (err) + goto err_sq; + + return aso; + +err_sq: + mlx5_aso_destroy_cq(&aso->cq); +err_cq: + kfree(aso); + return ERR_PTR(err); +} + +void mlx5_aso_destroy(struct mlx5_aso *aso) +{ + if (IS_ERR_OR_NULL(aso)) + return; + + mlx5_aso_destroy_sq(aso); + mlx5_aso_destroy_cq(&aso->cq); + kfree(aso); +} + +void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt, + struct mlx5_aso_wqe *aso_wqe, + u32 obj_id, u32 opc_mode) +{ + struct mlx5_wqe_ctrl_seg *cseg = &aso_wqe->ctrl; + + cseg->opmod_idx_opcode = cpu_to_be32((opc_mode << MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT) | + (aso->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | + MLX5_OPCODE_ACCESS_ASO); + cseg->qpn_ds = cpu_to_be32((aso->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | ds_cnt); + cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; + cseg->general_id = cpu_to_be32(obj_id); +} + +void *mlx5_aso_get_wqe(struct mlx5_aso *aso) +{ + u16 pi; + + pi = mlx5_wq_cyc_ctr2ix(&aso->wq, aso->pc); + return mlx5_wq_cyc_get_wqe(&aso->wq, pi); +} + +void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data, + struct mlx5_wqe_ctrl_seg *doorbell_cseg) +{ + doorbell_cseg->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE; + /* ensure wqe is visible to device before updating doorbell record */ + dma_wmb(); + + if (with_data) + aso->pc += MLX5_ASO_WQEBBS_DATA; + else + aso->pc += MLX5_ASO_WQEBBS; + *aso->wq.db = cpu_to_be32(aso->pc); + + /* ensure doorbell record is visible to device before ringing the + * doorbell + */ + wmb(); + + mlx5_write64((__be32 *)doorbell_cseg, aso->uar_map); + + /* Ensure doorbell is written on uar_page before poll_cq */ + WRITE_ONCE(doorbell_cseg, NULL); +} + +int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data, u32 interval_ms) +{ + struct mlx5_aso_cq *cq = &aso->cq; + struct mlx5_cqe64 *cqe; + unsigned long expires; + + cqe = mlx5_cqwq_get_cqe(&cq->wq); + + expires = jiffies + msecs_to_jiffies(interval_ms); + while (!cqe && time_is_after_jiffies(expires)) { + usleep_range(2, 10); + cqe = mlx5_cqwq_get_cqe(&cq->wq); + } + + if (!cqe) + return -ETIMEDOUT; + + /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), + * otherwise a cq overrun may occur + */ + mlx5_cqwq_pop(&cq->wq); + + if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { + struct mlx5_err_cqe *err_cqe; + + mlx5_core_err(cq->mdev, "Bad OP in ASOSQ CQE: 0x%x\n", + get_cqe_opcode(cqe)); + + err_cqe = (struct mlx5_err_cqe *)cqe; + mlx5_core_err(cq->mdev, "vendor_err_synd=%x\n", + err_cqe->vendor_err_synd); + mlx5_core_err(cq->mdev, "syndrome=%x\n", + err_cqe->syndrome); + print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, + 16, 1, err_cqe, + sizeof(*err_cqe), false); + } + + mlx5_cqwq_update_db_record(&cq->wq); + + /* ensure cq space is freed before enabling more cqes */ + wmb(); + + if (with_data) + aso->cc += MLX5_ASO_WQEBBS_DATA; + else + aso->cc += MLX5_ASO_WQEBBS; + + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h new file mode 100644 index 000000000000..b3bbf284fe71 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#ifndef __MLX5_LIB_ASO_H__ +#define __MLX5_LIB_ASO_H__ + +#include <linux/mlx5/qp.h> +#include "mlx5_core.h" + +#define MLX5_ASO_WQEBBS \ + (DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe), MLX5_SEND_WQE_BB)) +#define MLX5_ASO_WQEBBS_DATA \ + (DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe_data), MLX5_SEND_WQE_BB)) +#define MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT 24 + +struct mlx5_wqe_aso_ctrl_seg { + __be32 va_h; + __be32 va_l; /* include read_enable */ + __be32 l_key; + u8 data_mask_mode; + u8 condition_1_0_operand; + u8 condition_1_0_offset; + u8 data_offset_condition_operand; + __be32 condition_0_data; + __be32 condition_0_mask; + __be32 condition_1_data; + __be32 condition_1_mask; + __be64 bitwise_data; + __be64 data_mask; +}; + +struct mlx5_wqe_aso_data_seg { + __be32 bytewise_data[16]; +}; + +struct mlx5_aso_wqe { + struct mlx5_wqe_ctrl_seg ctrl; + struct mlx5_wqe_aso_ctrl_seg aso_ctrl; +}; + +struct mlx5_aso_wqe_data { + struct mlx5_wqe_ctrl_seg ctrl; + struct mlx5_wqe_aso_ctrl_seg aso_ctrl; + struct mlx5_wqe_aso_data_seg aso_data; +}; + +enum { + MLX5_ASO_LOGICAL_AND, + MLX5_ASO_LOGICAL_OR, +}; + +enum { + MLX5_ASO_ALWAYS_FALSE, + MLX5_ASO_ALWAYS_TRUE, + MLX5_ASO_EQUAL, + MLX5_ASO_NOT_EQUAL, + MLX5_ASO_GREATER_OR_EQUAL, + MLX5_ASO_LESSER_OR_EQUAL, + MLX5_ASO_LESSER, + MLX5_ASO_GREATER, + MLX5_ASO_CYCLIC_GREATER, + MLX5_ASO_CYCLIC_LESSER, +}; + +enum { + MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT, + MLX5_ASO_DATA_MASK_MODE_BYTEWISE_64BYTE, + MLX5_ASO_DATA_MASK_MODE_CALCULATED_64BYTE, +}; + +enum { + MLX5_ACCESS_ASO_OPC_MOD_FLOW_METER = 0x2, +}; + +struct mlx5_aso; + +void *mlx5_aso_get_wqe(struct mlx5_aso *aso); +void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt, + struct mlx5_aso_wqe *aso_wqe, + u32 obj_id, u32 opc_mode); +void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data, + struct mlx5_wqe_ctrl_seg *doorbell_cseg); +int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data, u32 interval_ms); + +struct mlx5_aso *mlx5_aso_create(struct mlx5_core_dev *mdev, u32 pdn); +void mlx5_aso_destroy(struct mlx5_aso *aso); +#endif /* __MLX5_LIB_ASO_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 2078d9f03a5f..a9e51c1b7738 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1250,6 +1250,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev) { mlx5_sf_dev_table_destroy(dev); mlx5_sriov_detach(dev); + mlx5_eswitch_disable(dev->priv.eswitch); mlx5_lag_remove_mdev(dev); mlx5_ec_cleanup(dev); mlx5_sf_hw_table_destroy(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c index 3be659cd91f1..7d955a4d9f14 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c @@ -501,7 +501,7 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi case MLX5_ESWITCH_OFFLOADS: mlx5_sf_table_enable(table); break; - case MLX5_ESWITCH_NONE: + case MLX5_ESWITCH_LEGACY: mlx5_sf_table_disable(table); break; default: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index 2935614f6fa9..5757cd6e1819 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c @@ -145,8 +145,7 @@ mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf) sriov->vfs_ctx[vf].enabled = 0; } - if (MLX5_ESWITCH_MANAGER(dev)) - mlx5_eswitch_disable(dev->priv.eswitch, clear_vf); + mlx5_eswitch_disable_sriov(dev->priv.eswitch, clear_vf); if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages)) mlx5_core_warn(dev, "timeout reclaiming VFs pages\n"); diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h index 8b18fe9771f9..e2701ed0200e 100644 --- a/include/linux/mlx5/eswitch.h +++ b/include/linux/mlx5/eswitch.h @@ -12,7 +12,6 @@ #define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager) enum { - MLX5_ESWITCH_NONE, MLX5_ESWITCH_LEGACY, MLX5_ESWITCH_OFFLOADS }; @@ -153,7 +152,7 @@ struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw); static inline u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev) { - return MLX5_ESWITCH_NONE; + return MLX5_ESWITCH_LEGACY; } static inline enum devlink_eswitch_encap_mode @@ -198,6 +197,11 @@ static inline struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitc #endif /* CONFIG_MLX5_ESWITCH */ +static inline bool is_mdev_legacy_mode(struct mlx5_core_dev *dev) +{ + return mlx5_eswitch_mode(dev) == MLX5_ESWITCH_LEGACY; +} + static inline bool is_mdev_switchdev_mode(struct mlx5_core_dev *dev) { return mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS; |