mlx5-updates-2020-02-27

mlx5 misc updates and minor cleanups:
 
 1) Use per vport tables for mirroring
 2) Improve log messages for SW steering (DR)
 3) Add devlink fdb_large_groups parameter
 4) E-Switch, Allow goto earlier chain
 5) Don't allow forwarding between uplink representors
 6) Add support for devlink-port in non-representors mode
 7) Minor misc cleanups
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAl5YYYwACgkQSD+KveBX
 +j7vIQgAsFQSjJGdyUzVDfeHu94Ze79OMRstuEh9fbcNXeyawMTShPLPE94E91qo
 Nj6AfZp8sH9M6fLtFcOMrPou87ITslP3mQ/C2cYWWJn/iq9RN13kO+howZNeRhlk
 iettT8qHGYR7aU3tBU38pc9/bWVHRPt+FZGBeCAcvwgQ1zfjBEtPcMJ8svjsfETA
 0bzEDEq0eKEaynHr3phJpzbcnvzm3154HkfZtDZFAApgr4tpEGSlFa4n+8ctcmqy
 zf82HH8GnB+GhTUPU3W33NwyH2otHOcE3dPAepQNmS3f8EemMJebAtjkexo6SfjY
 rwUs5qTU05myOy5RPmzwhQsnzRyPBw==
 =z2AA
 -----END PGP SIGNATURE-----

Merge tag 'mlx5-updates-2020-02-27' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2020-02-27

mlx5 misc updates and minor cleanups:

1) Use per vport tables for mirroring
2) Improve log messages for SW steering (DR)
3) Add devlink fdb_large_groups parameter
4) E-Switch, Allow goto earlier chain
5) Don't allow forwarding between uplink representors
6) Add support for devlink-port in non-representors mode
7) Minor misc cleanups
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2020-02-28 11:59:53 -08:00
commit 549da33801
30 changed files with 519 additions and 129 deletions

View file

@ -37,6 +37,12 @@ parameters.
* ``smfs`` Software managed flow steering. In SMFS mode, the HW
steering entities are created and manage through the driver without
firmware intervention.
* - ``fdb_large_groups``
- u32
- driverinit
- Control the number of large groups (size > 1) in the FDB table.
* The default value is 15, and the range is between 1 and 1024.
The ``mlx5`` driver supports reloading via ``DEVLINK_CMD_RELOAD``

View file

@ -25,7 +25,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \
en_selftest.o en/port.o en/monitor_stats.o en/health.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/umem.o \
en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o
en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o
#
# Netdev extra

View file

@ -190,11 +190,6 @@ static int mlx5_devlink_fs_mode_get(struct devlink *devlink, u32 id,
return 0;
}
enum mlx5_devlink_param_id {
MLX5_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE,
};
static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack)
@ -210,14 +205,38 @@ static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
return 0;
}
#ifdef CONFIG_MLX5_ESWITCH
static int mlx5_devlink_large_group_num_validate(struct devlink *devlink, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack)
{
int group_num = val.vu32;
if (group_num < 1 || group_num > 1024) {
NL_SET_ERR_MSG_MOD(extack,
"Unsupported group number, supported range is 1-1024");
return -EOPNOTSUPP;
}
return 0;
}
#endif
static const struct devlink_param mlx5_devlink_params[] = {
DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE,
DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
"flow_steering_mode", DEVLINK_PARAM_TYPE_STRING,
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
mlx5_devlink_fs_mode_get, mlx5_devlink_fs_mode_set,
mlx5_devlink_fs_mode_validate),
DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
NULL, NULL, mlx5_devlink_enable_roce_validate),
#ifdef CONFIG_MLX5_ESWITCH
DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
"fdb_large_groups", DEVLINK_PARAM_TYPE_U32,
BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
NULL, NULL,
mlx5_devlink_large_group_num_validate),
#endif
};
static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
@ -230,13 +249,20 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
else
strcpy(value.vstr, "smfs");
devlink_param_driverinit_value_set(devlink,
MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE,
MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
value);
value.vbool = MLX5_CAP_GEN(dev, roce);
devlink_param_driverinit_value_set(devlink,
DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
value);
#ifdef CONFIG_MLX5_ESWITCH
value.vu32 = ESW_OFFLOADS_DEFAULT_NUM_GROUPS;
devlink_param_driverinit_value_set(devlink,
MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
value);
#endif
}
int mlx5_devlink_register(struct devlink *devlink, struct device *dev)

View file

@ -6,6 +6,12 @@
#include <net/devlink.h>
enum mlx5_devlink_param_id {
MLX5_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
};
struct devlink *mlx5_devlink_alloc(void);
void mlx5_devlink_free(struct devlink *devlink);
int mlx5_devlink_register(struct devlink *devlink, struct device *dev);

View file

@ -880,6 +880,7 @@ struct mlx5e_priv {
#endif
struct devlink_health_reporter *tx_reporter;
struct devlink_health_reporter *rx_reporter;
struct devlink_port dl_phy_port;
struct mlx5e_xsk xsk;
#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
struct mlx5e_hv_vhca_stats_agent stats_agent;

View file

@ -0,0 +1,38 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
#include "en/devlink.h"
int mlx5e_devlink_phy_port_register(struct net_device *dev)
{
struct mlx5e_priv *priv;
struct devlink *devlink;
int err;
priv = netdev_priv(dev);
devlink = priv_to_devlink(priv->mdev);
devlink_port_attrs_set(&priv->dl_phy_port,
DEVLINK_PORT_FLAVOUR_PHYSICAL,
PCI_FUNC(priv->mdev->pdev->devfn),
false, 0,
NULL, 0);
err = devlink_port_register(devlink, &priv->dl_phy_port, 1);
if (err)
return err;
devlink_port_type_eth_set(&priv->dl_phy_port, dev);
return 0;
}
void mlx5e_devlink_phy_port_unregister(struct mlx5e_priv *priv)
{
devlink_port_unregister(&priv->dl_phy_port);
}
struct devlink_port *mlx5e_get_devlink_phy_port(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
return &priv->dl_phy_port;
}

View file

@ -0,0 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
#ifndef __MLX5E_EN_DEVLINK_H
#define __MLX5E_EN_DEVLINK_H
#include <net/devlink.h>
#include "en.h"
int mlx5e_devlink_phy_port_register(struct net_device *dev);
void mlx5e_devlink_phy_port_unregister(struct mlx5e_priv *priv);
struct devlink_port *mlx5e_get_devlink_phy_port(struct net_device *dev);
#endif

View file

@ -63,6 +63,7 @@
#include "en/xsk/rx.h"
#include "en/xsk/tx.h"
#include "en/hv_vhca_stats.h"
#include "en/devlink.h"
#include "lib/mlx5.h"
@ -4605,6 +4606,7 @@ const struct net_device_ops mlx5e_netdev_ops = {
.ndo_set_vf_link_state = mlx5e_set_vf_link_state,
.ndo_get_vf_stats = mlx5e_get_vf_stats,
#endif
.ndo_get_devlink_port = mlx5e_get_devlink_phy_port,
};
static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
@ -5471,11 +5473,19 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
goto err_detach;
}
err = mlx5e_devlink_phy_port_register(netdev);
if (err) {
mlx5_core_err(mdev, "mlx5e_devlink_phy_port_register failed, %d\n", err);
goto err_unregister_netdev;
}
#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_init_app(priv);
#endif
return priv;
err_unregister_netdev:
unregister_netdev(netdev);
err_detach:
mlx5e_detach(mdev, priv);
err_destroy_netdev:
@ -5497,6 +5507,7 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_delete_app(priv);
#endif
mlx5e_devlink_phy_port_unregister(priv);
unregister_netdev(priv->netdev);
mlx5e_detach(mdev, vpriv);
mlx5e_destroy_netdev(priv);

View file

@ -192,7 +192,8 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
if (err) {
pr_warn("vport %d error %d reading stats\n", rep->vport, err);
netdev_warn(priv->netdev, "vport %d error %d reading stats\n",
rep->vport, err);
return;
}
@ -1422,7 +1423,7 @@ static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan
return 0;
}
static struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
static struct devlink_port *mlx5e_rep_get_devlink_port(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@ -1435,7 +1436,7 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
.ndo_stop = mlx5e_rep_close,
.ndo_start_xmit = mlx5e_xmit,
.ndo_setup_tc = mlx5e_rep_setup_tc,
.ndo_get_devlink_port = mlx5e_get_devlink_port,
.ndo_get_devlink_port = mlx5e_rep_get_devlink_port,
.ndo_get_stats64 = mlx5e_rep_get_stats,
.ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
.ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
@ -1448,7 +1449,7 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
.ndo_start_xmit = mlx5e_xmit,
.ndo_set_mac_address = mlx5e_uplink_rep_set_mac,
.ndo_setup_tc = mlx5e_rep_setup_tc,
.ndo_get_devlink_port = mlx5e_get_devlink_port,
.ndo_get_devlink_port = mlx5e_rep_get_devlink_port,
.ndo_get_stats64 = mlx5e_get_stats,
.ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
.ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
@ -1464,6 +1465,11 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
.ndo_set_features = mlx5e_set_features,
};
bool mlx5e_eswitch_uplink_rep(struct net_device *netdev)
{
return netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep;
}
bool mlx5e_eswitch_rep(struct net_device *netdev)
{
if (netdev->netdev_ops == &mlx5e_netdev_ops_rep ||
@ -2026,8 +2032,9 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
&mlx5e_uplink_rep_profile : &mlx5e_rep_profile;
netdev = mlx5e_create_netdev(dev, profile, nch, rpriv);
if (!netdev) {
pr_warn("Failed to create representor netdev for vport %d\n",
rep->vport);
mlx5_core_warn(dev,
"Failed to create representor netdev for vport %d\n",
rep->vport);
kfree(rpriv);
return -EINVAL;
}
@ -2045,29 +2052,32 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
err = mlx5e_attach_netdev(netdev_priv(netdev));
if (err) {
pr_warn("Failed to attach representor netdev for vport %d\n",
rep->vport);
netdev_warn(netdev,
"Failed to attach representor netdev for vport %d\n",
rep->vport);
goto err_destroy_mdev_resources;
}
err = mlx5e_rep_neigh_init(rpriv);
if (err) {
pr_warn("Failed to initialized neighbours handling for vport %d\n",
rep->vport);
netdev_warn(netdev,
"Failed to initialized neighbours handling for vport %d\n",
rep->vport);
goto err_detach_netdev;
}
err = register_devlink_port(dev, rpriv);
if (err) {
esw_warn(dev, "Failed to register devlink port %d\n",
rep->vport);
netdev_warn(netdev, "Failed to register devlink port %d\n",
rep->vport);
goto err_neigh_cleanup;
}
err = register_netdev(netdev);
if (err) {
pr_warn("Failed to register representor netdev for vport %d\n",
rep->vport);
netdev_warn(netdev,
"Failed to register representor netdev for vport %d\n",
rep->vport);
goto err_devlink_cleanup;
}

View file

@ -200,6 +200,7 @@ void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv);
bool mlx5e_eswitch_rep(struct net_device *netdev);
bool mlx5e_eswitch_uplink_rep(struct net_device *netdev);
#else /* CONFIG_MLX5_ESWITCH */
static inline bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { return false; }

View file

@ -1076,17 +1076,17 @@ mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
static struct mlx5_flow_handle *
mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
struct mlx5_esw_flow_attr *slow_attr)
struct mlx5_flow_spec *spec)
{
struct mlx5_esw_flow_attr slow_attr;
struct mlx5_flow_handle *rule;
memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
slow_attr->split_count = 0;
slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
memcpy(&slow_attr, flow->esw_attr, sizeof(slow_attr));
slow_attr.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
slow_attr.split_count = 0;
slow_attr.flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, &slow_attr);
if (!IS_ERR(rule))
flow_flag_set(flow, SLOW);
@ -1095,14 +1095,15 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
static void
mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
struct mlx5_esw_flow_attr *slow_attr)
struct mlx5e_tc_flow *flow)
{
memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
slow_attr->split_count = 0;
slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
struct mlx5_esw_flow_attr slow_attr;
memcpy(&slow_attr, flow->esw_attr, sizeof(slow_attr));
slow_attr.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
slow_attr.split_count = 0;
slow_attr.flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
mlx5e_tc_unoffload_fdb_rules(esw, flow, &slow_attr);
flow_flag_clear(flow, SLOW);
}
@ -1173,7 +1174,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
int out_index;
if (!mlx5_esw_chains_prios_supported(esw) && attr->prio != 1) {
NL_SET_ERR_MSG(extack, "E-switch priorities unsupported, upgrade FW");
NL_SET_ERR_MSG_MOD(extack,
"E-switch priorities unsupported, upgrade FW");
return -EOPNOTSUPP;
}
@ -1184,13 +1186,15 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
*/
max_chain = mlx5_esw_chains_get_chain_range(esw);
if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
NL_SET_ERR_MSG_MOD(extack,
"Requested chain is out of supported range");
return -EOPNOTSUPP;
}
max_prio = mlx5_esw_chains_get_prio_range(esw);
if (attr->prio > max_prio) {
NL_SET_ERR_MSG(extack, "Requested priority is out of supported range");
NL_SET_ERR_MSG_MOD(extack,
"Requested priority is out of supported range");
return -EOPNOTSUPP;
}
@ -1237,14 +1241,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
* (1) there's no error
* (2) there's an encap action and we don't have valid neigh
*/
if (!encap_valid) {
/* continue with goto slow path rule instead */
struct mlx5_esw_flow_attr slow_attr;
flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec, &slow_attr);
} else {
if (!encap_valid)
flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
else
flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
}
if (IS_ERR(flow->rule[0]))
return PTR_ERR(flow->rule[0]);
@ -1272,7 +1272,6 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5_esw_flow_attr slow_attr;
int out_index;
if (flow_flag_test(flow, NOT_READY)) {
@ -1283,7 +1282,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
if (mlx5e_is_offloaded_flow(flow)) {
if (flow_flag_test(flow, SLOW))
mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
mlx5e_tc_unoffload_from_slow_path(esw, flow);
else
mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
}
@ -1312,7 +1311,7 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
struct list_head *flow_list)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr slow_attr, *esw_attr;
struct mlx5_esw_flow_attr *esw_attr;
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
struct mlx5e_tc_flow *flow;
@ -1365,7 +1364,7 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
continue;
}
mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
mlx5e_tc_unoffload_from_slow_path(esw, flow);
flow->rule[0] = rule;
/* was unset when slow path rule removed */
flow_flag_set(flow, OFFLOADED);
@ -1377,7 +1376,6 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
struct list_head *flow_list)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr slow_attr;
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
struct mlx5e_tc_flow *flow;
@ -1389,7 +1387,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
spec = &flow->esw_attr->parse_attr->spec;
/* update from encap rule to slow path rule */
rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec, &slow_attr);
rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
/* mark the flow's encap dest as non-valid */
flow->esw_attr->dests[flow->tmp_efi_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
@ -2560,7 +2558,6 @@ static const struct pedit_headers zero_masks = {};
static int parse_tc_pedit_action(struct mlx5e_priv *priv,
const struct flow_action_entry *act, int namespace,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct pedit_headers_action *hdrs,
struct netlink_ext_ack *extack)
{
@ -2836,8 +2833,7 @@ static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
return -EOPNOTSUPP;
}
err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr,
hdrs, NULL);
err = parse_tc_pedit_action(priv, &pedit_act, namespace, hdrs, NULL);
*action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
return err;
@ -2899,7 +2895,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
case FLOW_ACTION_MANGLE:
case FLOW_ACTION_ADD:
err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL,
parse_attr, hdrs, extack);
hdrs, extack);
if (err)
return err;
@ -3343,7 +3339,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
case FLOW_ACTION_MANGLE:
case FLOW_ACTION_ADD:
err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB,
parse_attr, hdrs, extack);
hdrs, extack);
if (err)
return err;
@ -3381,8 +3377,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
NL_SET_ERR_MSG_MOD(extack,
"can't support more output ports, can't offload forwarding");
pr_err("can't support more than %d output ports, can't offload forwarding\n",
attr->out_count);
netdev_warn(priv->netdev,
"can't support more than %d output ports, can't offload forwarding\n",
attr->out_count);
return -EOPNOTSUPP;
}
@ -3405,6 +3402,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
struct net_device *uplink_upper;
struct mlx5e_rep_priv *rep_priv;
if (is_duplicated_output_device(priv->netdev,
out_dev,
@ -3440,11 +3438,29 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return err;
}
/* Don't allow forwarding between uplink.
*
* Input vport was stored esw_attr->in_rep.
* In LAG case, *priv* is the private data of
* uplink which may be not the input vport.
*/
rep_priv = mlx5e_rep_to_rep_priv(attr->in_rep);
if (mlx5e_eswitch_uplink_rep(rep_priv->netdev) &&
mlx5e_eswitch_uplink_rep(out_dev)) {
NL_SET_ERR_MSG_MOD(extack,
"devices are both uplink, can't offload forwarding");
pr_err("devices %s %s are both uplink, can't offload forwarding\n",
priv->netdev->name, out_dev->name);
return -EOPNOTSUPP;
}
if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
NL_SET_ERR_MSG_MOD(extack,
"devices are not on same switch HW, can't offload forwarding");
pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
priv->netdev->name, out_dev->name);
netdev_warn(priv->netdev,
"devices %s %s not on same switch HW, can't offload forwarding\n",
priv->netdev->name,
out_dev->name);
return -EOPNOTSUPP;
}
@ -3463,8 +3479,10 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
} else {
NL_SET_ERR_MSG_MOD(extack,
"devices are not on same switch HW, can't offload forwarding");
pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
priv->netdev->name, out_dev->name);
netdev_warn(priv->netdev,
"devices %s %s not on same switch HW, can't offload forwarding\n",
priv->netdev->name,
out_dev->name);
return -EINVAL;
}
}
@ -3516,12 +3534,15 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
return -EOPNOTSUPP;
}
if (dest_chain <= attr->chain) {
NL_SET_ERR_MSG(extack, "Goto earlier chain isn't supported");
if (!mlx5_esw_chains_backwards_supported(esw) &&
dest_chain <= attr->chain) {
NL_SET_ERR_MSG_MOD(extack,
"Goto earlier chain isn't supported");
return -EOPNOTSUPP;
}
if (dest_chain > max_chain) {
NL_SET_ERR_MSG(extack, "Requested destination chain is out of supported range");
NL_SET_ERR_MSG_MOD(extack,
"Requested destination chain is out of supported range");
return -EOPNOTSUPP;
}
action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
@ -3571,7 +3592,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (attr->dest_chain) {
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
NL_SET_ERR_MSG_MOD(extack,
"Mirroring goto chain rules isn't supported");
return -EOPNOTSUPP;
}
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
@ -3579,7 +3601,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (!(attr->action &
(MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
NL_SET_ERR_MSG(extack, "Rule must have at least one forward/drop action");
NL_SET_ERR_MSG_MOD(extack,
"Rule must have at least one forward/drop action");
return -EOPNOTSUPP;
}

View file

@ -39,6 +39,7 @@
#include "lib/eq.h"
#include "eswitch.h"
#include "fs_core.h"
#include "devlink.h"
#include "ecpf.h"
enum {
@ -2006,6 +2007,25 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
esw_disable_vport(esw, vport);
}
static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw)
{
struct devlink *devlink = priv_to_devlink(esw->dev);
union devlink_param_value val;
int err;
err = devlink_param_driverinit_value_get(devlink,
MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
&val);
if (!err) {
esw->params.large_group_num = val.vu32;
} else {
esw_warn(esw->dev,
"Devlink can't get param fdb_large_groups, uses default (%d).\n",
ESW_OFFLOADS_DEFAULT_NUM_GROUPS);
esw->params.large_group_num = ESW_OFFLOADS_DEFAULT_NUM_GROUPS;
}
}
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
{
int err;
@ -2022,6 +2042,8 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
esw_warn(esw->dev, "engress ACL is not supported by FW\n");
mlx5_eswitch_get_devlink_param(esw);
esw_create_tsar(esw);
esw->mode = mode;

View file

@ -55,6 +55,8 @@
#ifdef CONFIG_MLX5_ESWITCH
#define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15
#define MLX5_MAX_UC_PER_VPORT(dev) \
(1 << MLX5_CAP_GEN(dev, log_max_current_uc_list))
@ -183,6 +185,12 @@ struct mlx5_eswitch_fdb {
int vlan_push_pop_refcount;
struct mlx5_esw_chains_priv *esw_chains_priv;
struct {
DECLARE_HASHTABLE(table, 8);
/* Protects vports.table */
struct mutex lock;
} vports;
} offloads;
};
u32 flags;
@ -255,6 +263,9 @@ struct mlx5_eswitch {
u16 manager_vport;
u16 first_host_vport;
struct mlx5_esw_functions esw_funcs;
struct {
u32 large_group_num;
} params;
};
void esw_offloads_disable(struct mlx5_eswitch *esw);
@ -623,6 +634,9 @@ void
esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw);
void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw);
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }

View file

@ -50,6 +50,181 @@
#define MLX5_ESW_MISS_FLOWS (2)
#define UPLINK_REP_INDEX 0
/* Per vport tables */
#define MLX5_ESW_VPORT_TABLE_SIZE 128
/* This struct is used as a key to the hash table and we need it to be packed
* so hash result is consistent
*/
struct mlx5_vport_key {
u32 chain;
u16 prio;
u16 vport;
u16 vhca_id;
} __packed;
struct mlx5_vport_table {
struct hlist_node hlist;
struct mlx5_flow_table *fdb;
u32 num_rules;
struct mlx5_vport_key key;
};
#define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
static struct mlx5_flow_table *
esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_table *fdb;
ft_attr.autogroup.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS;
ft_attr.max_fte = MLX5_ESW_VPORT_TABLE_SIZE;
ft_attr.prio = FDB_PER_VPORT;
fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(fdb)) {
esw_warn(esw->dev, "Failed to create per vport FDB Table err %ld\n",
PTR_ERR(fdb));
}
return fdb;
}
static u32 flow_attr_to_vport_key(struct mlx5_eswitch *esw,
struct mlx5_esw_flow_attr *attr,
struct mlx5_vport_key *key)
{
key->vport = attr->in_rep->vport;
key->chain = attr->chain;
key->prio = attr->prio;
key->vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
return jhash(key, sizeof(*key), 0);
}
/* caller must hold vports.lock */
static struct mlx5_vport_table *
esw_vport_tbl_lookup(struct mlx5_eswitch *esw, struct mlx5_vport_key *skey, u32 key)
{
struct mlx5_vport_table *e;
hash_for_each_possible(esw->fdb_table.offloads.vports.table, e, hlist, key)
if (!memcmp(&e->key, skey, sizeof(*skey)))
return e;
return NULL;
}
static void
esw_vport_tbl_put(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr)
{
struct mlx5_vport_table *e;
struct mlx5_vport_key key;
u32 hkey;
mutex_lock(&esw->fdb_table.offloads.vports.lock);
hkey = flow_attr_to_vport_key(esw, attr, &key);
e = esw_vport_tbl_lookup(esw, &key, hkey);
if (!e || --e->num_rules)
goto out;
hash_del(&e->hlist);
mlx5_destroy_flow_table(e->fdb);
kfree(e);
out:
mutex_unlock(&esw->fdb_table.offloads.vports.lock);
}
static struct mlx5_flow_table *
esw_vport_tbl_get(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr)
{
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *ns;
struct mlx5_flow_table *fdb;
struct mlx5_vport_table *e;
struct mlx5_vport_key skey;
u32 hkey;
mutex_lock(&esw->fdb_table.offloads.vports.lock);
hkey = flow_attr_to_vport_key(esw, attr, &skey);
e = esw_vport_tbl_lookup(esw, &skey, hkey);
if (e) {
e->num_rules++;
goto out;
}
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (!e) {
fdb = ERR_PTR(-ENOMEM);
goto err_alloc;
}
ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
if (!ns) {
esw_warn(dev, "Failed to get FDB namespace\n");
fdb = ERR_PTR(-ENOENT);
goto err_ns;
}
fdb = esw_vport_tbl_create(esw, ns);
if (IS_ERR(fdb))
goto err_ns;
e->fdb = fdb;
e->num_rules = 1;
e->key = skey;
hash_add(esw->fdb_table.offloads.vports.table, &e->hlist, hkey);
out:
mutex_unlock(&esw->fdb_table.offloads.vports.lock);
return e->fdb;
err_ns:
kfree(e);
err_alloc:
mutex_unlock(&esw->fdb_table.offloads.vports.lock);
return fdb;
}
int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw)
{
struct mlx5_esw_flow_attr attr = {};
struct mlx5_eswitch_rep rep = {};
struct mlx5_flow_table *fdb;
struct mlx5_vport *vport;
int i;
attr.prio = 1;
attr.in_rep = &rep;
mlx5_esw_for_all_vports(esw, i, vport) {
attr.in_rep->vport = vport->vport;
fdb = esw_vport_tbl_get(esw, &attr);
if (!fdb)
goto out;
}
return 0;
out:
mlx5_esw_vport_tbl_put(esw);
return PTR_ERR(fdb);
}
void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw)
{
struct mlx5_esw_flow_attr attr = {};
struct mlx5_eswitch_rep rep = {};
struct mlx5_vport *vport;
int i;
attr.prio = 1;
attr.in_rep = &rep;
mlx5_esw_for_all_vports(esw, i, vport) {
attr.in_rep->vport = vport->vport;
esw_vport_tbl_put(esw, &attr);
}
}
/* End: Per vport tables */
static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
u16 vport_num)
{
@ -191,8 +366,6 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
i++;
}
mlx5_eswitch_set_rule_source_port(esw, spec, attr);
if (attr->outer_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
if (attr->inner_match_level != MLX5_MATCH_NONE)
@ -201,8 +374,13 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
flow_act.modify_hdr = attr->modify_hdr;
fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio,
!!split);
if (split) {
fdb = esw_vport_tbl_get(esw, attr);
} else {
fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio,
0);
mlx5_eswitch_set_rule_source_port(esw, spec, attr);
}
if (IS_ERR(fdb)) {
rule = ERR_CAST(fdb);
goto err_esw_get;
@ -221,7 +399,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
return rule;
err_add_rule:
mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, !!split);
if (split)
esw_vport_tbl_put(esw, attr);
else
mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
err_esw_get:
if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain)
mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
@ -247,7 +428,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
goto err_get_fast;
}
fwd_fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, 1);
fwd_fdb = esw_vport_tbl_get(esw, attr);
if (IS_ERR(fwd_fdb)) {
rule = ERR_CAST(fwd_fdb);
goto err_get_fwd;
@ -285,7 +466,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
return rule;
add_err:
mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 1);
esw_vport_tbl_put(esw, attr);
err_get_fwd:
mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
err_get_fast:
@ -312,11 +493,14 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
atomic64_dec(&esw->offloads.num_flows);
if (fwd_rule) {
mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 1);
esw_vport_tbl_put(esw, attr);
mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
} else {
mlx5_esw_chains_put_table(esw, attr->chain, attr->prio,
!!split);
if (split)
esw_vport_tbl_put(esw, attr);
else
mlx5_esw_chains_put_table(esw, attr->chain, attr->prio,
0);
if (attr->dest_chain)
mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
}
@ -1923,6 +2107,9 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
if (err)
goto create_fg_err;
mutex_init(&esw->fdb_table.offloads.vports.lock);
hash_init(esw->fdb_table.offloads.vports.table);
return 0;
create_fg_err:
@ -1939,6 +2126,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
{
mutex_destroy(&esw->fdb_table.offloads.vports.lock);
esw_destroy_vport_rx_group(esw);
esw_destroy_offloads_table(esw);
esw_destroy_offloads_fdb_tables(esw);

View file

@ -21,8 +21,6 @@
#define fdb_ignore_flow_level_supported(esw) \
(MLX5_CAP_ESW_FLOWTABLE_FDB((esw)->dev, ignore_flow_level))
#define ESW_OFFLOADS_NUM_GROUPS 4
/* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
* and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
* for each flow table pool. We can allocate up to 16M of each pool,
@ -99,6 +97,11 @@ bool mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw)
return esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
}
bool mlx5_esw_chains_backwards_supported(struct mlx5_eswitch *esw)
{
return fdb_ignore_flow_level_supported(esw);
}
u32 mlx5_esw_chains_get_chain_range(struct mlx5_eswitch *esw)
{
if (!mlx5_esw_chains_prios_supported(esw))
@ -234,7 +237,7 @@ mlx5_esw_chains_create_fdb_table(struct mlx5_eswitch *esw,
}
ft_attr.autogroup.num_reserved_entries = 2;
ft_attr.autogroup.max_num_groups = ESW_OFFLOADS_NUM_GROUPS;
ft_attr.autogroup.max_num_groups = esw->params.large_group_num;
fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(fdb)) {
esw_warn(esw->dev,
@ -637,7 +640,7 @@ mlx5_esw_chains_init(struct mlx5_eswitch *esw)
esw_debug(dev,
"Init esw offloads chains, max counters(%d), groups(%d), max flow table size(%d)\n",
max_flow_counter, ESW_OFFLOADS_NUM_GROUPS, fdb_max);
max_flow_counter, esw->params.large_group_num, fdb_max);
mlx5_esw_chains_init_sz_pool(esw);
@ -704,12 +707,9 @@ mlx5_esw_chains_open(struct mlx5_eswitch *esw)
/* Open level 1 for split rules now if prios isn't supported */
if (!mlx5_esw_chains_prios_supported(esw)) {
ft = mlx5_esw_chains_get_table(esw, 0, 1, 1);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
err = mlx5_esw_vport_tbl_get(esw);
if (err)
goto level_1_err;
}
}
return 0;
@ -725,7 +725,7 @@ static void
mlx5_esw_chains_close(struct mlx5_eswitch *esw)
{
if (!mlx5_esw_chains_prios_supported(esw))
mlx5_esw_chains_put_table(esw, 0, 1, 1);
mlx5_esw_vport_tbl_put(esw);
mlx5_esw_chains_put_table(esw, 0, 1, 0);
mlx5_esw_chains_put_table(esw, mlx5_esw_chains_get_ft_chain(esw), 1, 0);
}

View file

@ -6,6 +6,8 @@
bool
mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw);
bool
mlx5_esw_chains_backwards_supported(struct mlx5_eswitch *esw);
u32
mlx5_esw_chains_get_prio_range(struct mlx5_eswitch *esw);
u32

View file

@ -181,7 +181,7 @@ mlx5_eswitch_termtbl_actions_move(struct mlx5_flow_act *src,
static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
const struct mlx5_flow_spec *spec)
{
u32 port_mask, port_value;
u16 port_mask, port_value;
if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
return spec->flow_context.flow_source ==
@ -191,7 +191,7 @@ static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
misc_parameters.source_port);
port_value = MLX5_GET(fte_match_param, spec->match_value,
misc_parameters.source_port);
return (port_mask & port_value & 0xffff) == MLX5_VPORT_UPLINK;
return (port_mask & port_value) == MLX5_VPORT_UPLINK;
}
bool

View file

@ -2700,6 +2700,17 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
goto out_err;
}
/* We put this priority last, knowing that nothing will get here
* unless explicitly forwarded to. This is possible because the
* slow path tables have catch all rules and nothing gets passed
* those tables.
*/
maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_PER_VPORT, 1);
if (IS_ERR(maj_prio)) {
err = PTR_ERR(maj_prio);
goto out_err;
}
set_prio_attrs(steering->fdb_root_ns);
return 0;

View file

@ -672,7 +672,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
dest_action = action;
if (!action->dest_tbl.is_fw_tbl) {
if (action->dest_tbl.tbl->dmn != dmn) {
mlx5dr_dbg(dmn,
mlx5dr_err(dmn,
"Destination table belongs to a different domain\n");
goto out_invalid_arg;
}
@ -703,7 +703,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
action->dest_tbl.fw_tbl.rx_icm_addr =
output.sw_owner_icm_root_0;
} else {
mlx5dr_dbg(dmn,
mlx5dr_err(dmn,
"Failed mlx5_cmd_query_flow_table ret: %d\n",
ret);
return ret;
@ -772,7 +772,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
/* Check action duplication */
if (++action_type_set[action_type] > max_actions_type) {
mlx5dr_dbg(dmn, "Action type %d supports only max %d time(s)\n",
mlx5dr_err(dmn, "Action type %d supports only max %d time(s)\n",
action_type, max_actions_type);
goto out_invalid_arg;
}
@ -781,7 +781,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
if (dr_action_validate_and_get_next_state(action_domain,
action_type,
&state)) {
mlx5dr_dbg(dmn, "Invalid action sequence provided\n");
mlx5dr_err(dmn, "Invalid action sequence provided\n");
return -EOPNOTSUPP;
}
}
@ -797,7 +797,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
rx_rule && recalc_cs_required && dest_action) {
ret = dr_action_handle_cs_recalc(dmn, dest_action, &attr.final_icm_addr);
if (ret) {
mlx5dr_dbg(dmn,
mlx5dr_err(dmn,
"Failed to handle checksum recalculation err %d\n",
ret);
return ret;

View file

@ -59,7 +59,7 @@ static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn);
if (ret) {
mlx5dr_dbg(dmn, "Couldn't allocate PD\n");
mlx5dr_err(dmn, "Couldn't allocate PD, ret: %d", ret);
return ret;
}
@ -192,7 +192,7 @@ static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
ret = dr_domain_query_vports(dmn);
if (ret) {
mlx5dr_dbg(dmn, "Failed to query vports caps\n");
mlx5dr_err(dmn, "Failed to query vports caps (err: %d)", ret);
goto free_vports_caps;
}
@ -213,7 +213,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
int ret;
if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) {
mlx5dr_dbg(dmn, "Failed to allocate domain, bad link type\n");
mlx5dr_err(dmn, "Failed to allocate domain, bad link type\n");
return -EOPNOTSUPP;
}
@ -257,7 +257,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
dmn->info.tx.ste_type = MLX5DR_STE_TYPE_TX;
vport_cap = mlx5dr_get_vport_cap(&dmn->info.caps, 0);
if (!vport_cap) {
mlx5dr_dbg(dmn, "Failed to get esw manager vport\n");
mlx5dr_err(dmn, "Failed to get esw manager vport\n");
return -ENOENT;
}
@ -268,7 +268,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
dmn->info.tx.drop_icm_addr = dmn->info.caps.esw_tx_drop_address;
break;
default:
mlx5dr_dbg(dmn, "Invalid domain\n");
mlx5dr_err(dmn, "Invalid domain\n");
ret = -EINVAL;
break;
}
@ -300,7 +300,7 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
mutex_init(&dmn->mutex);
if (dr_domain_caps_init(mdev, dmn)) {
mlx5dr_dbg(dmn, "Failed init domain, no caps\n");
mlx5dr_err(dmn, "Failed init domain, no caps\n");
goto free_domain;
}
@ -348,8 +348,11 @@ int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
mutex_lock(&dmn->mutex);
ret = mlx5dr_send_ring_force_drain(dmn);
mutex_unlock(&dmn->mutex);
if (ret)
if (ret) {
mlx5dr_err(dmn, "Force drain failed flags: %d, ret: %d\n",
flags, ret);
return ret;
}
}
if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_HW)

View file

@ -468,7 +468,7 @@ mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
err = mlx5dr_cmd_sync_steering(pool->dmn->mdev);
if (err) {
dr_icm_chill_buckets_abort(pool, bucket, buckets);
mlx5dr_dbg(pool->dmn, "Sync_steering failed\n");
mlx5dr_err(pool->dmn, "Sync_steering failed\n");
chunk = NULL;
goto out;
}

View file

@ -388,14 +388,14 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
mlx5dr_ste_build_empty_always_hit(&sb[idx++], rx);
if (idx == 0) {
mlx5dr_dbg(dmn, "Cannot generate any valid rules from mask\n");
mlx5dr_err(dmn, "Cannot generate any valid rules from mask\n");
return -EINVAL;
}
/* Check that all mask fields were consumed */
for (i = 0; i < sizeof(struct mlx5dr_match_param); i++) {
if (((u8 *)&mask)[i] != 0) {
mlx5dr_info(dmn, "Mask contains unsupported parameters\n");
mlx5dr_err(dmn, "Mask contains unsupported parameters\n");
return -EOPNOTSUPP;
}
}
@ -563,7 +563,7 @@ static int dr_matcher_set_all_ste_builders(struct mlx5dr_matcher *matcher,
dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV6, DR_RULE_IPV6);
if (!nic_matcher->ste_builder) {
mlx5dr_dbg(dmn, "Cannot generate IPv4 or IPv6 rules with given mask\n");
mlx5dr_err(dmn, "Cannot generate IPv4 or IPv6 rules with given mask\n");
return -EINVAL;
}
@ -634,13 +634,13 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher,
int ret;
if (matcher->match_criteria >= DR_MATCHER_CRITERIA_MAX) {
mlx5dr_info(dmn, "Invalid match criteria attribute\n");
mlx5dr_err(dmn, "Invalid match criteria attribute\n");
return -EINVAL;
}
if (mask) {
if (mask->match_sz > sizeof(struct mlx5dr_match_param)) {
mlx5dr_info(dmn, "Invalid match size attribute\n");
mlx5dr_err(dmn, "Invalid match size attribute\n");
return -EINVAL;
}
mlx5dr_ste_copy_param(matcher->match_criteria,
@ -671,7 +671,7 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher *
mlx5dr_matcher_create(struct mlx5dr_table *tbl,
u16 priority,
u32 priority,
u8 match_criteria_enable,
struct mlx5dr_match_parameters *mask)
{

View file

@ -826,8 +826,8 @@ dr_rule_handle_ste_branch(struct mlx5dr_rule *rule,
ste_location, send_ste_list);
if (!new_htbl) {
mlx5dr_htbl_put(cur_htbl);
mlx5dr_info(dmn, "failed creating rehash table, htbl-log_size: %d\n",
cur_htbl->chunk_size);
mlx5dr_err(dmn, "Failed creating rehash table, htbl-log_size: %d\n",
cur_htbl->chunk_size);
} else {
cur_htbl = new_htbl;
}
@ -877,7 +877,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
if (!value_size ||
(value_size > sizeof(struct mlx5dr_match_param) ||
(value_size % sizeof(u32)))) {
mlx5dr_dbg(matcher->tbl->dmn, "Rule parameters length is incorrect\n");
mlx5dr_err(matcher->tbl->dmn, "Rule parameters length is incorrect\n");
return false;
}
@ -888,7 +888,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
e_idx = min(s_idx + sizeof(param->outer), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
mlx5dr_dbg(matcher->tbl->dmn, "Rule outer parameters contains a value not specified by mask\n");
mlx5dr_err(matcher->tbl->dmn, "Rule outer parameters contains a value not specified by mask\n");
return false;
}
}
@ -898,7 +898,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
e_idx = min(s_idx + sizeof(param->misc), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
mlx5dr_dbg(matcher->tbl->dmn, "Rule misc parameters contains a value not specified by mask\n");
mlx5dr_err(matcher->tbl->dmn, "Rule misc parameters contains a value not specified by mask\n");
return false;
}
}
@ -908,7 +908,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
e_idx = min(s_idx + sizeof(param->inner), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
mlx5dr_dbg(matcher->tbl->dmn, "Rule inner parameters contains a value not specified by mask\n");
mlx5dr_err(matcher->tbl->dmn, "Rule inner parameters contains a value not specified by mask\n");
return false;
}
}
@ -918,7 +918,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
e_idx = min(s_idx + sizeof(param->misc2), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
mlx5dr_dbg(matcher->tbl->dmn, "Rule misc2 parameters contains a value not specified by mask\n");
mlx5dr_err(matcher->tbl->dmn, "Rule misc2 parameters contains a value not specified by mask\n");
return false;
}
}
@ -928,7 +928,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
e_idx = min(s_idx + sizeof(param->misc3), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
mlx5dr_dbg(matcher->tbl->dmn, "Rule misc3 parameters contains a value not specified by mask\n");
mlx5dr_err(matcher->tbl->dmn, "Rule misc3 parameters contains a value not specified by mask\n");
return false;
}
}
@ -1221,7 +1221,7 @@ dr_rule_create_rule(struct mlx5dr_matcher *matcher,
dr_rule_remove_action_members(rule);
free_rule:
kfree(rule);
mlx5dr_info(dmn, "Failed creating rule\n");
mlx5dr_err(dmn, "Failed creating rule\n");
return NULL;
}

View file

@ -136,7 +136,7 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
err = mlx5_wq_qp_create(mdev, &wqp, temp_qpc, &dr_qp->wq,
&dr_qp->wq_ctrl);
if (err) {
mlx5_core_info(mdev, "Can't create QP WQ\n");
mlx5_core_warn(mdev, "Can't create QP WQ\n");
goto err_wq;
}
@ -651,8 +651,10 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
/* Init */
ret = dr_modify_qp_rst2init(dmn->mdev, dr_qp, port);
if (ret)
if (ret) {
mlx5dr_err(dmn, "Failed modify QP rst2init\n");
return ret;
}
/* RTR */
ret = mlx5dr_cmd_query_gid(dmn->mdev, port, gid_index, &rtr_attr.dgid_attr);
@ -667,8 +669,10 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
rtr_attr.udp_src_port = dmn->info.caps.roce_min_src_udp;
ret = dr_cmd_modify_qp_init2rtr(dmn->mdev, dr_qp, &rtr_attr);
if (ret)
if (ret) {
mlx5dr_err(dmn, "Failed modify QP init2rtr\n");
return ret;
}
/* RTS */
rts_attr.timeout = 14;
@ -676,8 +680,10 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
rts_attr.rnr_retry = 7;
ret = dr_cmd_modify_qp_rtr2rts(dmn->mdev, dr_qp, &rts_attr);
if (ret)
if (ret) {
mlx5dr_err(dmn, "Failed modify QP rtr2rts\n");
return ret;
}
return 0;
}
@ -861,6 +867,7 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
cq_size = QUEUE_SIZE + 1;
dmn->send_ring->cq = dr_create_cq(dmn->mdev, dmn->uar, cq_size);
if (!dmn->send_ring->cq) {
mlx5dr_err(dmn, "Failed creating CQ\n");
ret = -ENOMEM;
goto free_send_ring;
}
@ -872,6 +879,7 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
dmn->send_ring->qp = dr_create_rc_qp(dmn->mdev, &init_attr);
if (!dmn->send_ring->qp) {
mlx5dr_err(dmn, "Failed creating QP\n");
ret = -ENOMEM;
goto clean_cq;
}

View file

@ -728,7 +728,7 @@ int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
{
if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) {
if (mask->misc.source_port && mask->misc.source_port != 0xffff) {
mlx5dr_dbg(dmn, "Partial mask source_port is not supported\n");
mlx5dr_err(dmn, "Partial mask source_port is not supported\n");
return -EINVAL;
}
}

View file

@ -128,16 +128,20 @@ static int dr_table_init_nic(struct mlx5dr_domain *dmn,
DR_CHUNK_SIZE_1,
MLX5DR_STE_LU_TYPE_DONT_CARE,
0);
if (!nic_tbl->s_anchor)
if (!nic_tbl->s_anchor) {
mlx5dr_err(dmn, "Failed allocating htbl\n");
return -ENOMEM;
}
info.type = CONNECT_MISS;
info.miss_icm_addr = nic_dmn->default_icm_addr;
ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn,
nic_tbl->s_anchor,
&info, true);
if (ret)
if (ret) {
mlx5dr_err(dmn, "Failed int and send htbl\n");
goto free_s_anchor;
}
mlx5dr_htbl_get(nic_tbl->s_anchor);

View file

@ -705,7 +705,7 @@ struct mlx5dr_matcher {
struct mlx5dr_matcher_rx_tx rx;
struct mlx5dr_matcher_rx_tx tx;
struct list_head matcher_list;
u16 prio;
u32 prio;
struct mlx5dr_match_param mask;
u8 match_criteria;
refcount_t refcount;

View file

@ -140,7 +140,7 @@ static int mlx5_cmd_dr_create_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_group *fg)
{
struct mlx5dr_matcher *matcher;
u16 priority = MLX5_GET(create_flow_group_in, in,
u32 priority = MLX5_GET(create_flow_group_in, in,
start_flow_index);
u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
in,

View file

@ -59,7 +59,7 @@ u32 mlx5dr_table_get_id(struct mlx5dr_table *table);
struct mlx5dr_matcher *
mlx5dr_matcher_create(struct mlx5dr_table *table,
u16 priority,
u32 priority,
u8 match_criteria_enable,
struct mlx5dr_match_parameters *mask);
@ -151,7 +151,7 @@ mlx5dr_table_get_id(struct mlx5dr_table *table) { return 0; }
static inline struct mlx5dr_matcher *
mlx5dr_matcher_create(struct mlx5dr_table *table,
u16 priority,
u32 priority,
u8 match_criteria_enable,
struct mlx5dr_match_parameters *mask) { return NULL; }

View file

@ -84,6 +84,7 @@ enum {
FDB_TC_OFFLOAD,
FDB_FT_OFFLOAD,
FDB_SLOW_PATH,
FDB_PER_VPORT,
};
struct mlx5_pkt_reformat;