mlx5-updates-2021-10-18

Maor Maor Gottlieb says:
 ========================
 Use hash to select the affinity port in VF LAG
 
 Current VF LAG architecture is based on QP association with a port.
 QP must be created after LAG is enabled to allow association with non-native port.
 VM Packets going on slow-path to eSwicth manager (SW path or hairpin) will be transmitted
 through a different QP than the VM. This means that Different packets of the same flow might
 egress from different physical ports.
 
 This patch-set solves this issue by moving the port selection to be based on the hash function
 defined by the bond.
 
 When the device is moved to VF LAG mode, the driver creates TTC (traffic type classifier) flow
 tables in order to classify the packet and steer it to the relevant hash function. Similar to what
 is done in the mlx5 RSS implementation.
 
 Each rule in the TTC table, forwards the packet to port selection flow table which has one hash
 split flow group which contains two "catch all" flow table entries. Each entry point to the
 relative uplink port. As shown below:
 
 		-------------------
 		| FT              |
 TTC rule ->	|     ----------- |
 		|   FG|   FTE --|-|-----> uplink of port #1
 		|     |   FTE --|-|-----> uplink of port #2
 		|     ----------- |
 		-------------------
 
 Hash split flow group is flow group that created as type of HASH_SPLIT and associated with match definer.
 The match definer define the fields which included in the hash calculation.
 
 The driver creates the match definer according to the xmit hash policy of the bond driver.
 
 Patches overview:
 ========================
 
 Minor E-Switch updates:
 - Patch #12, dynamic  allocation of dest array
 - Patch #13, increase number of forward destinations to 32
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAmFuOPIACgkQSD+KveBX
 +j4Ejgf/ScZmLSMvPu8doQ+eLG6nSiA5EAXkJqx0dwZZzzB4hSJleYuTveab/rgA
 HNhSZiVI8YXrscqvBAWNVAE8wQ0DFgYtDFs5UfUc/Pd+dZsqk7+ecHlo+kBCkYSn
 3fNTKSkdzZsGz5hOu0eP3rteIvTf9JrtB07rfBLbma/nuTnSGxIFYQpDe7H52jW2
 pov9LEonara9kjJ7BFtaupQMwCpVwYuPkMPTnt/qO1IOE18GHnK5SXgdMSlLBdjY
 HKfBF6jXWooDlN9nAxvH+2RWsumng0pRyujw0uvHTQ6SDNoOKf1ucj8znOHCFC1h
 sTfzAX6jUdrRtJ6hWLo+p9YlkLtjSw==
 =TVAs
 -----END PGP SIGNATURE-----

Merge tag 'mlx5-updates-2021-10-18' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

mlx5-updates-2021-10-18

Maor Maor Gottlieb says:
========================
Use hash to select the affinity port in VF LAG

Current VF LAG architecture is based on QP association with a port.
QP must be created after LAG is enabled to allow association with non-native port.
VM Packets going on slow-path to eSwicth manager (SW path or hairpin) will be transmitted
through a different QP than the VM. This means that Different packets of the same flow might
egress from different physical ports.

This patch-set solves this issue by moving the port selection to be based on the hash function
defined by the bond.

When the device is moved to VF LAG mode, the driver creates TTC (traffic type classifier) flow
tables in order to classify the packet and steer it to the relevant hash function. Similar to what
is done in the mlx5 RSS implementation.

Each rule in the TTC table, forwards the packet to port selection flow table which has one hash
split flow group which contains two "catch all" flow table entries. Each entry point to the
relative uplink port. As shown below:

		-------------------
		| FT              |
TTC rule ->	|     ----------- |
		|   FG|   FTE --|-|-----> uplink of port #1
		|     |   FTE --|-|-----> uplink of port #2
		|     ----------- |
		-------------------

Hash split flow group is flow group that created as type of HASH_SPLIT and associated with match definer.
The match definer define the fields which included in the hash calculation.

The driver creates the match definer according to the xmit hash policy of the bond driver.

Patches overview:
========================

Minor E-Switch updates:
- Patch #12, dynamic  allocation of dest array
- Patch #13, increase number of forward destinations to 32

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2021-10-19 12:16:34 +01:00
commit aaa5570612
22 changed files with 1241 additions and 66 deletions

View file

@ -14,7 +14,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o alloc.o port.o mr.o pd.o \
transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \
fs_counters.o fs_ft_pool.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
fs_counters.o fs_ft_pool.o rl.o lag/lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o lib/fs_ttc.o diag/fs_tracepoint.o \
diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o \
fw_reset.o qos.o lib/tout.o
@ -37,7 +37,7 @@ mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o
mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += lag_mp.o lib/geneve.o lib/port_tun.o \
mlx5_core-$(CONFIG_MLX5_ESWITCH) += lag/mp.o lag/port_sel.o lib/geneve.o lib/port_tun.o \
en_rep.o en/rep/bond.o en/mod_hdr.o \
en/mapping.o
mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \

View file

@ -235,6 +235,9 @@ const char *parse_fs_dst(struct trace_seq *p,
const char *ret = trace_seq_buffer_ptr(p);
switch (dst->type) {
case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
trace_seq_printf(p, "uplink\n");
break;
case MLX5_FLOW_DESTINATION_TYPE_VPORT:
trace_seq_printf(p, "vport=%u\n", dst->vport.num);
break;

View file

@ -433,7 +433,7 @@ enum mlx5_flow_match_level {
};
/* current maximum for flow based vport multicasting */
#define MLX5_MAX_FLOW_FWD_VPORTS 2
#define MLX5_MAX_FLOW_FWD_VPORTS 32
enum {
MLX5_ESW_DEST_ENCAP = BIT(0),

View file

@ -482,12 +482,12 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr)
{
struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5_fs_chains *chains = esw_chains(esw);
bool split = !!(esw_attr->split_count);
struct mlx5_vport_tbl_attr fwd_attr;
struct mlx5_flow_destination *dest;
struct mlx5_flow_handle *rule;
struct mlx5_flow_table *fdb;
int i = 0;
@ -495,6 +495,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return ERR_PTR(-EOPNOTSUPP);
dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
if (!dest)
return ERR_PTR(-ENOMEM);
flow_act.action = attr->action;
/* if per flow vlan pop/push is emulated, don't set that into the firmware */
if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
@ -574,6 +578,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
else
atomic64_inc(&esw->offloads.num_flows);
kfree(dest);
return rule;
err_add_rule:
@ -584,6 +589,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
err_esw_get:
esw_cleanup_dests(esw, attr);
err_create_goto_table:
kfree(dest);
return rule;
}
@ -592,16 +598,20 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr)
{
struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5_fs_chains *chains = esw_chains(esw);
struct mlx5_vport_tbl_attr fwd_attr;
struct mlx5_flow_destination *dest;
struct mlx5_flow_table *fast_fdb;
struct mlx5_flow_table *fwd_fdb;
struct mlx5_flow_handle *rule;
int i, err = 0;
dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
if (!dest)
return ERR_PTR(-ENOMEM);
fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0);
if (IS_ERR(fast_fdb)) {
rule = ERR_CAST(fast_fdb);
@ -654,6 +664,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
atomic64_inc(&esw->offloads.num_flows);
kfree(dest);
return rule;
err_chain_src_rewrite:
esw_put_dest_tables_loop(esw, attr, 0, i);
@ -661,6 +672,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
err_get_fwd:
mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
err_get_fast:
kfree(dest);
return rule;
}

View file

@ -185,6 +185,20 @@ static int mlx5_cmd_set_slave_root_fdb(struct mlx5_core_dev *master,
return mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out));
}
static int
mlx5_cmd_stub_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
int definer_id)
{
return 0;
}
static int
mlx5_cmd_stub_create_match_definer(struct mlx5_flow_root_namespace *ns,
u16 format_id, u32 *match_mask)
{
return 0;
}
static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, u32 underlay_qpn,
bool disconnect)
@ -563,8 +577,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
id = dst->dest_attr.ft->id;
break;
case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
case MLX5_FLOW_DESTINATION_TYPE_VPORT:
id = dst->dest_attr.vport.num;
MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id_valid,
!!(dst->dest_attr.vport.flags &
@ -572,6 +586,12 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id,
dst->dest_attr.vport.vhca_id);
if (type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) {
/* destination_id is reserved */
id = 0;
break;
}
id = dst->dest_attr.vport.num;
if (extended_dest &&
dst->dest_attr.vport.pkt_reformat) {
MLX5_SET(dest_format_struct, in_dests,
@ -909,6 +929,45 @@ static void mlx5_cmd_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
mlx5_cmd_exec_in(dev, dealloc_modify_header_context, in);
}
static int mlx5_cmd_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
int definer_id)
{
u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
MLX5_OBJ_TYPE_MATCH_DEFINER);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, definer_id);
return mlx5_cmd_exec(ns->dev, in, sizeof(in), out, sizeof(out));
}
static int mlx5_cmd_create_match_definer(struct mlx5_flow_root_namespace *ns,
u16 format_id, u32 *match_mask)
{
u32 out[MLX5_ST_SZ_DW(create_match_definer_out)] = {};
u32 in[MLX5_ST_SZ_DW(create_match_definer_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
void *ptr;
int err;
MLX5_SET(create_match_definer_in, in, general_obj_in_cmd_hdr.opcode,
MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(create_match_definer_in, in, general_obj_in_cmd_hdr.obj_type,
MLX5_OBJ_TYPE_MATCH_DEFINER);
ptr = MLX5_ADDR_OF(create_match_definer_in, in, obj_context);
MLX5_SET(match_definer, ptr, format_id, format_id);
ptr = MLX5_ADDR_OF(match_definer, ptr, match_mask);
memcpy(ptr, match_mask, MLX5_FLD_SZ_BYTES(match_definer, match_mask));
err = mlx5_cmd_exec_inout(dev, create_match_definer, in, out);
return err ? err : MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
}
static const struct mlx5_flow_cmds mlx5_flow_cmds = {
.create_flow_table = mlx5_cmd_create_flow_table,
.destroy_flow_table = mlx5_cmd_destroy_flow_table,
@ -923,6 +982,8 @@ static const struct mlx5_flow_cmds mlx5_flow_cmds = {
.packet_reformat_dealloc = mlx5_cmd_packet_reformat_dealloc,
.modify_header_alloc = mlx5_cmd_modify_header_alloc,
.modify_header_dealloc = mlx5_cmd_modify_header_dealloc,
.create_match_definer = mlx5_cmd_create_match_definer,
.destroy_match_definer = mlx5_cmd_destroy_match_definer,
.set_peer = mlx5_cmd_stub_set_peer,
.create_ns = mlx5_cmd_stub_create_ns,
.destroy_ns = mlx5_cmd_stub_destroy_ns,
@ -942,6 +1003,8 @@ static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
.packet_reformat_dealloc = mlx5_cmd_stub_packet_reformat_dealloc,
.modify_header_alloc = mlx5_cmd_stub_modify_header_alloc,
.modify_header_dealloc = mlx5_cmd_stub_modify_header_dealloc,
.create_match_definer = mlx5_cmd_stub_create_match_definer,
.destroy_match_definer = mlx5_cmd_stub_destroy_match_definer,
.set_peer = mlx5_cmd_stub_set_peer,
.create_ns = mlx5_cmd_stub_create_ns,
.destroy_ns = mlx5_cmd_stub_destroy_ns,
@ -969,6 +1032,7 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type typ
case FS_FT_NIC_TX:
case FS_FT_RDMA_RX:
case FS_FT_RDMA_TX:
case FS_FT_PORT_SEL:
return mlx5_fs_cmd_get_fw_cmds();
default:
return mlx5_fs_cmd_get_stub_cmds();

View file

@ -97,6 +97,10 @@ struct mlx5_flow_cmds {
int (*create_ns)(struct mlx5_flow_root_namespace *ns);
int (*destroy_ns)(struct mlx5_flow_root_namespace *ns);
int (*create_match_definer)(struct mlx5_flow_root_namespace *ns,
u16 format_id, u32 *match_mask);
int (*destroy_match_definer)(struct mlx5_flow_root_namespace *ns,
int definer_id);
};
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id);

View file

@ -2191,6 +2191,10 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
if (steering->fdb_root_ns)
return &steering->fdb_root_ns->ns;
return NULL;
case MLX5_FLOW_NAMESPACE_PORT_SEL:
if (steering->port_sel_root_ns)
return &steering->port_sel_root_ns->ns;
return NULL;
case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
if (steering->sniffer_rx_root_ns)
return &steering->sniffer_rx_root_ns->ns;
@ -2596,6 +2600,7 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
steering->fdb_root_ns = NULL;
kfree(steering->fdb_sub_ns);
steering->fdb_sub_ns = NULL;
cleanup_root_ns(steering->port_sel_root_ns);
cleanup_root_ns(steering->sniffer_rx_root_ns);
cleanup_root_ns(steering->sniffer_tx_root_ns);
cleanup_root_ns(steering->rdma_rx_root_ns);
@ -2634,6 +2639,21 @@ static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
return PTR_ERR_OR_ZERO(prio);
}
#define PORT_SEL_NUM_LEVELS 3
static int init_port_sel_root_ns(struct mlx5_flow_steering *steering)
{
struct fs_prio *prio;
steering->port_sel_root_ns = create_root_ns(steering, FS_FT_PORT_SEL);
if (!steering->port_sel_root_ns)
return -ENOMEM;
/* Create single prio */
prio = fs_create_prio(&steering->port_sel_root_ns->ns, 0,
PORT_SEL_NUM_LEVELS);
return PTR_ERR_OR_ZERO(prio);
}
static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
{
int err;
@ -3020,6 +3040,12 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
goto err;
}
if (MLX5_CAP_FLOWTABLE_PORT_SELECTION(dev, ft_support)) {
err = init_port_sel_root_ns(steering);
if (err)
goto err;
}
if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
err = init_rdma_rx_root_ns(steering);
@ -3224,6 +3250,52 @@ void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer)
{
return definer->id;
}
struct mlx5_flow_definer *
mlx5_create_match_definer(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type ns_type, u16 format_id,
u32 *match_mask)
{
struct mlx5_flow_root_namespace *root;
struct mlx5_flow_definer *definer;
int id;
root = get_root_namespace(dev, ns_type);
if (!root)
return ERR_PTR(-EOPNOTSUPP);
definer = kzalloc(sizeof(*definer), GFP_KERNEL);
if (!definer)
return ERR_PTR(-ENOMEM);
definer->ns_type = ns_type;
id = root->cmds->create_match_definer(root, format_id, match_mask);
if (id < 0) {
mlx5_core_warn(root->dev, "Failed to create match definer (%d)\n", id);
kfree(definer);
return ERR_PTR(id);
}
definer->id = id;
return definer;
}
void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
struct mlx5_flow_definer *definer)
{
struct mlx5_flow_root_namespace *root;
root = get_root_namespace(dev, definer->ns_type);
if (WARN_ON(!root))
return;
root->cmds->destroy_match_definer(root, definer->id);
kfree(definer);
}
int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns)
{

View file

@ -49,6 +49,11 @@
#define FDB_TC_MAX_PRIO 16
#define FDB_TC_LEVELS_PER_PRIO 2
struct mlx5_flow_definer {
enum mlx5_flow_namespace_type ns_type;
u32 id;
};
struct mlx5_modify_hdr {
enum mlx5_flow_namespace_type ns_type;
union {
@ -97,7 +102,8 @@ enum fs_flow_table_type {
FS_FT_SNIFFER_TX = 0X6,
FS_FT_RDMA_RX = 0X7,
FS_FT_RDMA_TX = 0X8,
FS_FT_MAX_TYPE = FS_FT_RDMA_TX,
FS_FT_PORT_SEL = 0X9,
FS_FT_MAX_TYPE = FS_FT_PORT_SEL,
};
enum fs_flow_table_op_mod {
@ -129,6 +135,7 @@ struct mlx5_flow_steering {
struct mlx5_flow_root_namespace *rdma_rx_root_ns;
struct mlx5_flow_root_namespace *rdma_tx_root_ns;
struct mlx5_flow_root_namespace *egress_root_ns;
struct mlx5_flow_root_namespace *port_sel_root_ns;
int esw_egress_acl_vports;
int esw_ingress_acl_vports;
};
@ -341,7 +348,8 @@ struct mlx5_flow_root_namespace *find_root(struct fs_node *node);
(type == FS_FT_SNIFFER_TX) ? MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) : \
(type == FS_FT_RDMA_RX) ? MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) : \
(type == FS_FT_RDMA_TX) ? MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) : \
(BUILD_BUG_ON_ZERO(FS_FT_RDMA_TX != FS_FT_MAX_TYPE))\
(type == FS_FT_PORT_SEL) ? MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) : \
(BUILD_BUG_ON_ZERO(FS_FT_PORT_SEL != FS_FT_MAX_TYPE))\
)
#endif

View file

@ -149,6 +149,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
if (err)
return err;
if (MLX5_CAP_GEN(dev, port_selection_cap)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_PORT_SELECTION);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, hca_cap_2)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL_2);
if (err)

View file

@ -38,7 +38,7 @@
#include "mlx5_core.h"
#include "eswitch.h"
#include "lag.h"
#include "lag_mp.h"
#include "mp.h"
/* General purpose, use for short periods of time.
* Beware of lock dependencies (preferably, no locks should be acquired
@ -47,16 +47,21 @@
static DEFINE_SPINLOCK(lag_lock);
static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
u8 remap_port2, bool shared_fdb)
u8 remap_port2, bool shared_fdb, u8 flags)
{
u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {};
void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
MLX5_SET(lagc, lag_ctx, fdb_selection_mode, shared_fdb);
if (!(flags & MLX5_LAG_FLAG_HASH_BASED)) {
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
} else {
MLX5_SET(lagc, lag_ctx, port_select_mode,
MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT);
}
return mlx5_cmd_exec_in(dev, create_lag, in);
}
@ -199,6 +204,15 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
*port1 = 2;
}
static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 v2p_port1, u8 v2p_port2)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
if (ldev->flags & MLX5_LAG_FLAG_HASH_BASED)
return mlx5_lag_port_sel_modify(ldev, v2p_port1, v2p_port2);
return mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
}
void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker)
{
@ -211,39 +225,56 @@ void mlx5_modify_lag(struct mlx5_lag *ldev,
if (v2p_port1 != ldev->v2p_map[MLX5_LAG_P1] ||
v2p_port2 != ldev->v2p_map[MLX5_LAG_P2]) {
ldev->v2p_map[MLX5_LAG_P1] = v2p_port1;
ldev->v2p_map[MLX5_LAG_P2] = v2p_port2;
mlx5_core_info(dev0, "modify lag map port 1:%d port 2:%d",
ldev->v2p_map[MLX5_LAG_P1],
ldev->v2p_map[MLX5_LAG_P2]);
err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
if (err)
err = _mlx5_modify_lag(ldev, v2p_port1, v2p_port2);
if (err) {
mlx5_core_err(dev0,
"Failed to modify LAG (%d)\n",
err);
return;
}
ldev->v2p_map[MLX5_LAG_P1] = v2p_port1;
ldev->v2p_map[MLX5_LAG_P2] = v2p_port2;
mlx5_core_info(dev0, "modify lag map port 1:%d port 2:%d",
ldev->v2p_map[MLX5_LAG_P1],
ldev->v2p_map[MLX5_LAG_P2]);
}
}
static void mlx5_lag_set_port_sel_mode(struct mlx5_lag *ldev,
struct lag_tracker *tracker, u8 *flags)
{
bool roce_lag = !!(*flags & MLX5_LAG_FLAG_ROCE);
struct lag_func *dev0 = &ldev->pf[MLX5_LAG_P1];
if (roce_lag ||
!MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) ||
tracker->tx_type != NETDEV_LAG_TX_TYPE_HASH)
return;
*flags |= MLX5_LAG_FLAG_HASH_BASED;
}
static char *get_str_port_sel_mode(u8 flags)
{
if (flags & MLX5_LAG_FLAG_HASH_BASED)
return "hash";
return "queue_affinity";
}
static int mlx5_create_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker,
bool shared_fdb)
bool shared_fdb, u8 flags)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
int err;
mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[MLX5_LAG_P1],
&ldev->v2p_map[MLX5_LAG_P2]);
mlx5_core_info(dev0, "lag map port 1:%d port 2:%d shared_fdb:%d",
mlx5_core_info(dev0, "lag map port 1:%d port 2:%d shared_fdb:%d mode:%s",
ldev->v2p_map[MLX5_LAG_P1], ldev->v2p_map[MLX5_LAG_P2],
shared_fdb);
shared_fdb, get_str_port_sel_mode(flags));
err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[MLX5_LAG_P1],
ldev->v2p_map[MLX5_LAG_P2], shared_fdb);
ldev->v2p_map[MLX5_LAG_P2], shared_fdb, flags);
if (err) {
mlx5_core_err(dev0,
"Failed to create LAG (%d)\n",
@ -279,16 +310,32 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
int err;
err = mlx5_create_lag(ldev, tracker, shared_fdb);
mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[MLX5_LAG_P1],
&ldev->v2p_map[MLX5_LAG_P2]);
mlx5_lag_set_port_sel_mode(ldev, tracker, &flags);
if (flags & MLX5_LAG_FLAG_HASH_BASED) {
err = mlx5_lag_port_sel_create(ldev, tracker->hash_type,
ldev->v2p_map[MLX5_LAG_P1],
ldev->v2p_map[MLX5_LAG_P2]);
if (err) {
mlx5_core_err(dev0,
"Failed to create LAG port selection(%d)\n",
err);
return err;
}
}
err = mlx5_create_lag(ldev, tracker, shared_fdb, flags);
if (err) {
if (roce_lag) {
if (flags & MLX5_LAG_FLAG_HASH_BASED)
mlx5_lag_port_sel_destroy(ldev);
if (roce_lag)
mlx5_core_err(dev0,
"Failed to activate RoCE LAG\n");
} else {
else
mlx5_core_err(dev0,
"Failed to activate VF LAG\n"
"Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
}
return err;
}
@ -302,6 +349,7 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
bool roce_lag = __mlx5_lag_is_roce(ldev);
u8 flags = ldev->flags;
int err;
ldev->flags &= ~MLX5_LAG_MODE_FLAGS;
@ -324,6 +372,8 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
"Failed to deactivate VF LAG; driver restart required\n"
"Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
}
} else if (flags & MLX5_LAG_FLAG_HASH_BASED) {
mlx5_lag_port_sel_destroy(ldev);
}
return err;
@ -588,8 +638,10 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
if (!(bond_status & 0x3))
return 0;
if (lag_upper_info)
if (lag_upper_info) {
tracker->tx_type = lag_upper_info->tx_type;
tracker->hash_type = lag_upper_info->hash_type;
}
/* Determine bonding status:
* A device is considered bonded if both its physical ports are slaves

View file

@ -5,7 +5,8 @@
#define __MLX5_LAG_H__
#include "mlx5_core.h"
#include "lag_mp.h"
#include "mp.h"
#include "port_sel.h"
enum {
MLX5_LAG_P1,
@ -17,10 +18,12 @@ enum {
MLX5_LAG_FLAG_SRIOV = 1 << 1,
MLX5_LAG_FLAG_MULTIPATH = 1 << 2,
MLX5_LAG_FLAG_READY = 1 << 3,
MLX5_LAG_FLAG_HASH_BASED = 1 << 4,
};
#define MLX5_LAG_MODE_FLAGS (MLX5_LAG_FLAG_ROCE | MLX5_LAG_FLAG_SRIOV |\
MLX5_LAG_FLAG_MULTIPATH)
MLX5_LAG_FLAG_MULTIPATH | \
MLX5_LAG_FLAG_HASH_BASED)
struct lag_func {
struct mlx5_core_dev *dev;
@ -32,6 +35,7 @@ struct lag_tracker {
enum netdev_lag_tx_type tx_type;
struct netdev_lag_lower_state_info netdev_state[MLX5_MAX_PORTS];
unsigned int is_bonded:1;
enum netdev_lag_hash hash_type;
};
/* LAG data of a ConnectX card.
@ -49,6 +53,7 @@ struct mlx5_lag {
struct delayed_work bond_work;
struct notifier_block nb;
struct lag_mp lag_mp;
struct mlx5_lag_port_sel port_sel;
};
static inline struct mlx5_lag *

View file

@ -3,8 +3,8 @@
#include <linux/netdevice.h>
#include <net/nexthop.h>
#include "lag.h"
#include "lag_mp.h"
#include "lag/lag.h"
#include "lag/mp.h"
#include "mlx5_core.h"
#include "eswitch.h"
#include "lib/mlx5.h"

View file

@ -0,0 +1,611 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
#include <linux/netdevice.h>
#include "lag.h"
enum {
MLX5_LAG_FT_LEVEL_TTC,
MLX5_LAG_FT_LEVEL_INNER_TTC,
MLX5_LAG_FT_LEVEL_DEFINER,
};
static struct mlx5_flow_group *
mlx5_create_hash_flow_group(struct mlx5_flow_table *ft,
struct mlx5_flow_definer *definer)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *fg;
u32 *in;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return ERR_PTR(-ENOMEM);
MLX5_SET(create_flow_group_in, in, match_definer_id,
mlx5_get_match_definer_id(definer));
MLX5_SET(create_flow_group_in, in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_MAX_PORTS - 1);
MLX5_SET(create_flow_group_in, in, group_type,
MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_HASH_SPLIT);
fg = mlx5_create_flow_group(ft, in);
kvfree(in);
return fg;
}
static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
struct mlx5_lag_definer *lag_definer,
u8 port1, u8 port2)
{
struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_namespace *ns;
int err, i;
ft_attr.max_fte = MLX5_MAX_PORTS;
ft_attr.level = MLX5_LAG_FT_LEVEL_DEFINER;
ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_PORT_SEL);
if (!ns) {
mlx5_core_warn(dev, "Failed to get port selection namespace\n");
return -EOPNOTSUPP;
}
lag_definer->ft = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(lag_definer->ft)) {
mlx5_core_warn(dev, "Failed to create port selection table\n");
return PTR_ERR(lag_definer->ft);
}
lag_definer->fg = mlx5_create_hash_flow_group(lag_definer->ft,
lag_definer->definer);
if (IS_ERR(lag_definer->fg)) {
err = PTR_ERR(lag_definer->fg);
goto destroy_ft;
}
dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
flow_act.flags |= FLOW_ACT_NO_APPEND;
for (i = 0; i < MLX5_MAX_PORTS; i++) {
u8 affinity = i == 0 ? port1 : port2;
dest.vport.vhca_id = MLX5_CAP_GEN(ldev->pf[affinity - 1].dev,
vhca_id);
lag_definer->rules[i] = mlx5_add_flow_rules(lag_definer->ft,
NULL, &flow_act,
&dest, 1);
if (IS_ERR(lag_definer->rules[i])) {
err = PTR_ERR(lag_definer->rules[i]);
while (i--)
mlx5_del_flow_rules(lag_definer->rules[i]);
goto destroy_fg;
}
}
return 0;
destroy_fg:
mlx5_destroy_flow_group(lag_definer->fg);
destroy_ft:
mlx5_destroy_flow_table(lag_definer->ft);
return err;
}
static int mlx5_lag_set_definer_inner(u32 *match_definer_mask,
enum mlx5_traffic_types tt)
{
int format_id;
u8 *ipv6;
switch (tt) {
case MLX5_TT_IPV4_UDP:
case MLX5_TT_IPV4_TCP:
format_id = 23;
MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
inner_l4_sport);
MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
inner_l4_dport);
MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
inner_ip_src_addr);
MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
inner_ip_dest_addr);
break;
case MLX5_TT_IPV4:
format_id = 23;
MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
inner_l3_type);
MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
inner_dmac_47_16);
MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
inner_dmac_15_0);
MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
inner_smac_47_16);
MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
inner_smac_15_0);
MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
inner_ip_src_addr);
MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
inner_ip_dest_addr);
break;
case MLX5_TT_IPV6_TCP:
case MLX5_TT_IPV6_UDP:
format_id = 31;
MLX5_SET_TO_ONES(match_definer_format_31, match_definer_mask,
inner_l4_sport);
MLX5_SET_TO_ONES(match_definer_format_31, match_definer_mask,
inner_l4_dport);
ipv6 = MLX5_ADDR_OF(match_definer_format_31, match_definer_mask,
inner_ip_dest_addr);
memset(ipv6, 0xff, 16);
ipv6 = MLX5_ADDR_OF(match_definer_format_31, match_definer_mask,
inner_ip_src_addr);
memset(ipv6, 0xff, 16);
break;
case MLX5_TT_IPV6:
format_id = 32;
ipv6 = MLX5_ADDR_OF(match_definer_format_32, match_definer_mask,
inner_ip_dest_addr);
memset(ipv6, 0xff, 16);
ipv6 = MLX5_ADDR_OF(match_definer_format_32, match_definer_mask,
inner_ip_src_addr);
memset(ipv6, 0xff, 16);
MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
inner_dmac_47_16);
MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
inner_dmac_15_0);
MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
inner_smac_47_16);
MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
inner_smac_15_0);
break;
default:
format_id = 23;
MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
inner_l3_type);
MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
inner_dmac_47_16);
MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
inner_dmac_15_0);
MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
inner_smac_47_16);
MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
inner_smac_15_0);
break;
}
return format_id;
}
static int mlx5_lag_set_definer(u32 *match_definer_mask,
enum mlx5_traffic_types tt, bool tunnel,
enum netdev_lag_hash hash)
{
int format_id;
u8 *ipv6;
if (tunnel)
return mlx5_lag_set_definer_inner(match_definer_mask, tt);
switch (tt) {
case MLX5_TT_IPV4_UDP:
case MLX5_TT_IPV4_TCP:
format_id = 22;
MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
outer_l4_sport);
MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
outer_l4_dport);
MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
outer_ip_src_addr);
MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
outer_ip_dest_addr);
break;
case MLX5_TT_IPV4:
format_id = 22;
MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
outer_l3_type);
MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
outer_dmac_47_16);
MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
outer_dmac_15_0);
MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
outer_smac_47_16);
MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
outer_smac_15_0);
MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
outer_ip_src_addr);
MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
outer_ip_dest_addr);
break;
case MLX5_TT_IPV6_TCP:
case MLX5_TT_IPV6_UDP:
format_id = 29;
MLX5_SET_TO_ONES(match_definer_format_29, match_definer_mask,
outer_l4_sport);
MLX5_SET_TO_ONES(match_definer_format_29, match_definer_mask,
outer_l4_dport);
ipv6 = MLX5_ADDR_OF(match_definer_format_29, match_definer_mask,
outer_ip_dest_addr);
memset(ipv6, 0xff, 16);
ipv6 = MLX5_ADDR_OF(match_definer_format_29, match_definer_mask,
outer_ip_src_addr);
memset(ipv6, 0xff, 16);
break;
case MLX5_TT_IPV6:
format_id = 30;
ipv6 = MLX5_ADDR_OF(match_definer_format_30, match_definer_mask,
outer_ip_dest_addr);
memset(ipv6, 0xff, 16);
ipv6 = MLX5_ADDR_OF(match_definer_format_30, match_definer_mask,
outer_ip_src_addr);
memset(ipv6, 0xff, 16);
MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
outer_dmac_47_16);
MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
outer_dmac_15_0);
MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
outer_smac_47_16);
MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
outer_smac_15_0);
break;
default:
format_id = 0;
MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
outer_smac_47_16);
MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
outer_smac_15_0);
if (hash == NETDEV_LAG_HASH_VLAN_SRCMAC) {
MLX5_SET_TO_ONES(match_definer_format_0,
match_definer_mask,
outer_first_vlan_vid);
break;
}
MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
outer_ethertype);
MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
outer_dmac_47_16);
MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
outer_dmac_15_0);
break;
}
return format_id;
}
static struct mlx5_lag_definer *
mlx5_lag_create_definer(struct mlx5_lag *ldev, enum netdev_lag_hash hash,
enum mlx5_traffic_types tt, bool tunnel, u8 port1,
u8 port2)
{
struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_lag_definer *lag_definer;
u32 *match_definer_mask;
int format_id, err;
lag_definer = kzalloc(sizeof(*lag_definer), GFP_KERNEL);
if (!lag_definer)
return ERR_PTR(ENOMEM);
match_definer_mask = kvzalloc(MLX5_FLD_SZ_BYTES(match_definer,
match_mask),
GFP_KERNEL);
if (!match_definer_mask) {
err = -ENOMEM;
goto free_lag_definer;
}
format_id = mlx5_lag_set_definer(match_definer_mask, tt, tunnel, hash);
lag_definer->definer =
mlx5_create_match_definer(dev, MLX5_FLOW_NAMESPACE_PORT_SEL,
format_id, match_definer_mask);
if (IS_ERR(lag_definer->definer)) {
err = PTR_ERR(lag_definer->definer);
goto free_mask;
}
err = mlx5_lag_create_port_sel_table(ldev, lag_definer, port1, port2);
if (err)
goto destroy_match_definer;
kvfree(match_definer_mask);
return lag_definer;
destroy_match_definer:
mlx5_destroy_match_definer(dev, lag_definer->definer);
free_mask:
kvfree(match_definer_mask);
free_lag_definer:
kfree(lag_definer);
return ERR_PTR(err);
}
static void mlx5_lag_destroy_definer(struct mlx5_lag *ldev,
struct mlx5_lag_definer *lag_definer)
{
struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
int i;
for (i = 0; i < MLX5_MAX_PORTS; i++)
mlx5_del_flow_rules(lag_definer->rules[i]);
mlx5_destroy_flow_group(lag_definer->fg);
mlx5_destroy_flow_table(lag_definer->ft);
mlx5_destroy_match_definer(dev, lag_definer->definer);
kfree(lag_definer);
}
static void mlx5_lag_destroy_definers(struct mlx5_lag *ldev)
{
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
int tt;
for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
if (port_sel->outer.definers[tt])
mlx5_lag_destroy_definer(ldev,
port_sel->outer.definers[tt]);
if (port_sel->inner.definers[tt])
mlx5_lag_destroy_definer(ldev,
port_sel->inner.definers[tt]);
}
}
static int mlx5_lag_create_definers(struct mlx5_lag *ldev,
enum netdev_lag_hash hash_type,
u8 port1, u8 port2)
{
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct mlx5_lag_definer *lag_definer;
int tt, err;
for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
lag_definer = mlx5_lag_create_definer(ldev, hash_type, tt,
false, port1, port2);
if (IS_ERR(lag_definer)) {
err = PTR_ERR(lag_definer);
goto destroy_definers;
}
port_sel->outer.definers[tt] = lag_definer;
if (!port_sel->tunnel)
continue;
lag_definer =
mlx5_lag_create_definer(ldev, hash_type, tt,
true, port1, port2);
if (IS_ERR(lag_definer)) {
err = PTR_ERR(lag_definer);
goto destroy_definers;
}
port_sel->inner.definers[tt] = lag_definer;
}
return 0;
destroy_definers:
mlx5_lag_destroy_definers(ldev);
return err;
}
static void set_tt_map(struct mlx5_lag_port_sel *port_sel,
enum netdev_lag_hash hash)
{
port_sel->tunnel = false;
switch (hash) {
case NETDEV_LAG_HASH_E34:
port_sel->tunnel = true;
fallthrough;
case NETDEV_LAG_HASH_L34:
set_bit(MLX5_TT_IPV4_TCP, port_sel->tt_map);
set_bit(MLX5_TT_IPV4_UDP, port_sel->tt_map);
set_bit(MLX5_TT_IPV6_TCP, port_sel->tt_map);
set_bit(MLX5_TT_IPV6_UDP, port_sel->tt_map);
set_bit(MLX5_TT_IPV4, port_sel->tt_map);
set_bit(MLX5_TT_IPV6, port_sel->tt_map);
set_bit(MLX5_TT_ANY, port_sel->tt_map);
break;
case NETDEV_LAG_HASH_E23:
port_sel->tunnel = true;
fallthrough;
case NETDEV_LAG_HASH_L23:
set_bit(MLX5_TT_IPV4, port_sel->tt_map);
set_bit(MLX5_TT_IPV6, port_sel->tt_map);
set_bit(MLX5_TT_ANY, port_sel->tt_map);
break;
default:
set_bit(MLX5_TT_ANY, port_sel->tt_map);
break;
}
}
#define SET_IGNORE_DESTS_BITS(tt_map, dests) \
do { \
int idx; \
\
for_each_clear_bit(idx, tt_map, MLX5_NUM_TT) \
set_bit(idx, dests); \
} while (0)
static void mlx5_lag_set_inner_ttc_params(struct mlx5_lag *ldev,
struct ttc_params *ttc_params)
{
struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct mlx5_flow_table_attr *ft_attr;
int tt;
ttc_params->ns = mlx5_get_flow_namespace(dev,
MLX5_FLOW_NAMESPACE_PORT_SEL);
ft_attr = &ttc_params->ft_attr;
ft_attr->level = MLX5_LAG_FT_LEVEL_INNER_TTC;
for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
ttc_params->dests[tt].type =
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
ttc_params->dests[tt].ft = port_sel->inner.definers[tt]->ft;
}
SET_IGNORE_DESTS_BITS(port_sel->tt_map, ttc_params->ignore_dests);
}
static void mlx5_lag_set_outer_ttc_params(struct mlx5_lag *ldev,
struct ttc_params *ttc_params)
{
struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct mlx5_flow_table_attr *ft_attr;
int tt;
ttc_params->ns = mlx5_get_flow_namespace(dev,
MLX5_FLOW_NAMESPACE_PORT_SEL);
ft_attr = &ttc_params->ft_attr;
ft_attr->level = MLX5_LAG_FT_LEVEL_TTC;
for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
ttc_params->dests[tt].type =
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
ttc_params->dests[tt].ft = port_sel->outer.definers[tt]->ft;
}
SET_IGNORE_DESTS_BITS(port_sel->tt_map, ttc_params->ignore_dests);
ttc_params->inner_ttc = port_sel->tunnel;
if (!port_sel->tunnel)
return;
for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
ttc_params->tunnel_dests[tt].type =
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
ttc_params->tunnel_dests[tt].ft =
mlx5_get_ttc_flow_table(port_sel->inner.ttc);
}
}
static int mlx5_lag_create_ttc_table(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct ttc_params ttc_params = {};
mlx5_lag_set_outer_ttc_params(ldev, &ttc_params);
port_sel->outer.ttc = mlx5_create_ttc_table(dev, &ttc_params);
if (IS_ERR(port_sel->outer.ttc))
return PTR_ERR(port_sel->outer.ttc);
return 0;
}
static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct ttc_params ttc_params = {};
mlx5_lag_set_inner_ttc_params(ldev, &ttc_params);
port_sel->inner.ttc = mlx5_create_ttc_table(dev, &ttc_params);
if (IS_ERR(port_sel->inner.ttc))
return PTR_ERR(port_sel->inner.ttc);
return 0;
}
int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
enum netdev_lag_hash hash_type, u8 port1, u8 port2)
{
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
int err;
set_tt_map(port_sel, hash_type);
err = mlx5_lag_create_definers(ldev, hash_type, port1, port2);
if (err)
return err;
if (port_sel->tunnel) {
err = mlx5_lag_create_inner_ttc_table(ldev);
if (err)
goto destroy_definers;
}
err = mlx5_lag_create_ttc_table(ldev);
if (err)
goto destroy_inner;
return 0;
destroy_inner:
if (port_sel->tunnel)
mlx5_destroy_ttc_table(port_sel->inner.ttc);
destroy_definers:
mlx5_lag_destroy_definers(ldev);
return err;
}
static int
mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev,
struct mlx5_lag_definer **definers,
u8 port1, u8 port2)
{
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct mlx5_flow_destination dest = {};
int err;
int tt;
dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
struct mlx5_flow_handle **rules = definers[tt]->rules;
if (ldev->v2p_map[MLX5_LAG_P1] != port1) {
dest.vport.vhca_id =
MLX5_CAP_GEN(ldev->pf[port1 - 1].dev, vhca_id);
err = mlx5_modify_rule_destination(rules[MLX5_LAG_P1],
&dest, NULL);
if (err)
return err;
}
if (ldev->v2p_map[MLX5_LAG_P2] != port2) {
dest.vport.vhca_id =
MLX5_CAP_GEN(ldev->pf[port2 - 1].dev, vhca_id);
err = mlx5_modify_rule_destination(rules[MLX5_LAG_P2],
&dest, NULL);
if (err)
return err;
}
}
return 0;
}
int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1, u8 port2)
{
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
int err;
err = mlx5_lag_modify_definers_destinations(ldev,
port_sel->outer.definers,
port1, port2);
if (err)
return err;
if (!port_sel->tunnel)
return 0;
return mlx5_lag_modify_definers_destinations(ldev,
port_sel->inner.definers,
port1, port2);
}
void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev)
{
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
mlx5_destroy_ttc_table(port_sel->outer.ttc);
if (port_sel->tunnel)
mlx5_destroy_ttc_table(port_sel->inner.ttc);
mlx5_lag_destroy_definers(ldev);
}

View file

@ -0,0 +1,52 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
#ifndef __MLX5_LAG_FS_H__
#define __MLX5_LAG_FS_H__
#include "lib/fs_ttc.h"
struct mlx5_lag_definer {
struct mlx5_flow_definer *definer;
struct mlx5_flow_table *ft;
struct mlx5_flow_group *fg;
struct mlx5_flow_handle *rules[MLX5_MAX_PORTS];
};
struct mlx5_lag_ttc {
struct mlx5_ttc_table *ttc;
struct mlx5_lag_definer *definers[MLX5_NUM_TT];
};
struct mlx5_lag_port_sel {
DECLARE_BITMAP(tt_map, MLX5_NUM_TT);
bool tunnel;
struct mlx5_lag_ttc outer;
struct mlx5_lag_ttc inner;
};
#ifdef CONFIG_MLX5_ESWITCH
int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1, u8 port2);
void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev);
int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
enum netdev_lag_hash hash_type, u8 port1,
u8 port2);
#else /* CONFIG_MLX5_ESWITCH */
static inline int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
enum netdev_lag_hash hash_type,
u8 port1, u8 port2)
{
return 0;
}
static inline int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1,
u8 port2)
{
return 0;
}
static inline void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev) {}
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_LAG_FS_H__ */

View file

@ -247,6 +247,8 @@ static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
for (tt = 0; tt < MLX5_NUM_TT; tt++) {
struct mlx5_ttc_rule *rule = &rules[tt];
if (test_bit(tt, params->ignore_dests))
continue;
rule->rule = mlx5_generate_ttc_rule(dev, ft, &params->dests[tt],
ttc_rules[tt].etype,
ttc_rules[tt].proto);
@ -266,6 +268,8 @@ static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
if (!mlx5_tunnel_proto_supported_rx(dev,
ttc_tunnel_rules[tt].proto))
continue;
if (test_bit(tt, params->ignore_tunnel_dests))
continue;
trules[tt] = mlx5_generate_ttc_rule(dev, ft,
&params->tunnel_dests[tt],
ttc_tunnel_rules[tt].etype,

View file

@ -43,7 +43,9 @@ struct ttc_params {
struct mlx5_flow_namespace *ns;
struct mlx5_flow_table_attr ft_attr;
struct mlx5_flow_destination dests[MLX5_NUM_TT];
DECLARE_BITMAP(ignore_dests, MLX5_NUM_TT);
bool inner_ttc;
DECLARE_BITMAP(ignore_tunnel_dests, MLX5_NUM_TUNNEL_TT);
struct mlx5_flow_destination tunnel_dests[MLX5_NUM_TUNNEL_TT];
};

View file

@ -1416,6 +1416,7 @@ static const int types[] = {
MLX5_CAP_TLS,
MLX5_CAP_VDPA_EMULATION,
MLX5_CAP_IPSEC,
MLX5_CAP_PORT_SELECTION,
};
static void mlx5_hca_caps_free(struct mlx5_core_dev *dev)

View file

@ -625,6 +625,19 @@ static void mlx5_cmd_dr_modify_header_dealloc(struct mlx5_flow_root_namespace *n
mlx5dr_action_destroy(modify_hdr->action.dr_action);
}
static int
mlx5_cmd_dr_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
int definer_id)
{
return -EOPNOTSUPP;
}
static int mlx5_cmd_dr_create_match_definer(struct mlx5_flow_root_namespace *ns,
u16 format_id, u32 *match_mask)
{
return -EOPNOTSUPP;
}
static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte)
@ -727,6 +740,8 @@ static const struct mlx5_flow_cmds mlx5_flow_cmds_dr = {
.packet_reformat_dealloc = mlx5_cmd_dr_packet_reformat_dealloc,
.modify_header_alloc = mlx5_cmd_dr_modify_header_alloc,
.modify_header_dealloc = mlx5_cmd_dr_modify_header_dealloc,
.create_match_definer = mlx5_cmd_dr_create_match_definer,
.destroy_match_definer = mlx5_cmd_dr_destroy_match_definer,
.set_peer = mlx5_cmd_dr_set_peer,
.create_ns = mlx5_cmd_dr_create_ns,
.destroy_ns = mlx5_cmd_dr_destroy_ns,

View file

@ -1185,6 +1185,7 @@ enum mlx5_cap_type {
MLX5_CAP_DEV_EVENT = 0x14,
MLX5_CAP_IPSEC,
MLX5_CAP_GENERAL_2 = 0x20,
MLX5_CAP_PORT_SELECTION = 0x25,
/* NUM OF CAP Types */
MLX5_CAP_NUM
};
@ -1342,6 +1343,20 @@ enum mlx5_qcam_feature_groups {
MLX5_GET(e_switch_cap, \
mdev->caps.hca[MLX5_CAP_ESWITCH]->max, cap)
#define MLX5_CAP_PORT_SELECTION(mdev, cap) \
MLX5_GET(port_selection_cap, \
mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->cur, cap)
#define MLX5_CAP_PORT_SELECTION_MAX(mdev, cap) \
MLX5_GET(port_selection_cap, \
mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->max, cap)
#define MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) \
MLX5_CAP_PORT_SELECTION(mdev, flow_table_properties_port_selection.cap)
#define MLX5_CAP_FLOWTABLE_PORT_SELECTION_MAX(mdev, cap) \
MLX5_CAP_PORT_SELECTION_MAX(mdev, flow_table_properties_port_selection.cap)
#define MLX5_CAP_ODP(mdev, cap)\
MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap)

View file

@ -83,6 +83,7 @@ enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_RDMA_RX,
MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL,
MLX5_FLOW_NAMESPACE_RDMA_TX,
MLX5_FLOW_NAMESPACE_PORT_SEL,
};
enum {
@ -97,6 +98,7 @@ enum {
struct mlx5_pkt_reformat;
struct mlx5_modify_hdr;
struct mlx5_flow_definer;
struct mlx5_flow_table;
struct mlx5_flow_group;
struct mlx5_flow_namespace;
@ -257,6 +259,13 @@ struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
void *modify_actions);
void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
struct mlx5_modify_hdr *modify_hdr);
struct mlx5_flow_definer *
mlx5_create_match_definer(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type ns_type, u16 format_id,
u32 *match_mask);
void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
struct mlx5_flow_definer *definer);
int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer);
struct mlx5_pkt_reformat_params {
int type;

View file

@ -94,6 +94,7 @@ enum {
enum {
MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b,
MLX5_OBJ_TYPE_VIRTIO_NET_Q = 0x000d,
MLX5_OBJ_TYPE_MATCH_DEFINER = 0x0018,
MLX5_OBJ_TYPE_MKEY = 0xff01,
MLX5_OBJ_TYPE_QP = 0xff02,
MLX5_OBJ_TYPE_PSV = 0xff03,
@ -767,6 +768,18 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
u8 reserved_at_20c0[0x5f40];
};
struct mlx5_ifc_port_selection_cap_bits {
u8 reserved_at_0[0x10];
u8 port_select_flow_table[0x1];
u8 reserved_at_11[0xf];
u8 reserved_at_20[0x1e0];
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_port_selection;
u8 reserved_at_400[0x7c00];
};
enum {
MLX5_FDB_TO_VPORT_REG_C_0 = 0x01,
MLX5_FDB_TO_VPORT_REG_C_1 = 0x02,
@ -1515,7 +1528,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 uar_4k[0x1];
u8 reserved_at_241[0x9];
u8 uar_sz[0x6];
u8 reserved_at_248[0x2];
u8 port_selection_cap[0x1];
u8 reserved_at_248[0x1];
u8 umem_uid_0[0x1];
u8 reserved_at_250[0x5];
u8 log_pg_sz[0x8];
@ -1718,7 +1732,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 flex_parser_id_outer_first_mpls_over_gre[0x4];
u8 flex_parser_id_outer_first_mpls_over_udp_label[0x4];
u8 reserved_at_6e0[0x10];
u8 max_num_match_definer[0x10];
u8 sf_base_id[0x10];
u8 flex_parser_id_gtpu_dw_2[0x4];
@ -1733,7 +1747,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_760[0x20];
u8 vhca_tunnel_commands[0x40];
u8 reserved_at_7c0[0x40];
u8 match_definer_format_supported[0x40];
};
struct mlx5_ifc_cmd_hca_cap_2_bits {
@ -1752,6 +1766,7 @@ enum mlx5_flow_destination_type {
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2,
MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER = 0x6,
MLX5_FLOW_DESTINATION_TYPE_UPLINK = 0x8,
MLX5_FLOW_DESTINATION_TYPE_PORT = 0x99,
MLX5_FLOW_DESTINATION_TYPE_COUNTER = 0x100,
@ -3164,6 +3179,7 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap;
struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
struct mlx5_ifc_port_selection_cap_bits port_selection_cap;
struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap;
struct mlx5_ifc_qos_cap_bits qos_cap;
struct mlx5_ifc_debug_cap_bits debug_cap;
@ -5652,6 +5668,236 @@ struct mlx5_ifc_query_fte_in_bits {
u8 reserved_at_120[0xe0];
};
struct mlx5_ifc_match_definer_format_0_bits {
u8 reserved_at_0[0x100];
u8 metadata_reg_c_0[0x20];
u8 metadata_reg_c_1[0x20];
u8 outer_dmac_47_16[0x20];
u8 outer_dmac_15_0[0x10];
u8 outer_ethertype[0x10];
u8 reserved_at_180[0x1];
u8 sx_sniffer[0x1];
u8 functional_lb[0x1];
u8 outer_ip_frag[0x1];
u8 outer_qp_type[0x2];
u8 outer_encap_type[0x2];
u8 port_number[0x2];
u8 outer_l3_type[0x2];
u8 outer_l4_type[0x2];
u8 outer_first_vlan_type[0x2];
u8 outer_first_vlan_prio[0x3];
u8 outer_first_vlan_cfi[0x1];
u8 outer_first_vlan_vid[0xc];
u8 outer_l4_type_ext[0x4];
u8 reserved_at_1a4[0x2];
u8 outer_ipsec_layer[0x2];
u8 outer_l2_type[0x2];
u8 force_lb[0x1];
u8 outer_l2_ok[0x1];
u8 outer_l3_ok[0x1];
u8 outer_l4_ok[0x1];
u8 outer_second_vlan_type[0x2];
u8 outer_second_vlan_prio[0x3];
u8 outer_second_vlan_cfi[0x1];
u8 outer_second_vlan_vid[0xc];
u8 outer_smac_47_16[0x20];
u8 outer_smac_15_0[0x10];
u8 inner_ipv4_checksum_ok[0x1];
u8 inner_l4_checksum_ok[0x1];
u8 outer_ipv4_checksum_ok[0x1];
u8 outer_l4_checksum_ok[0x1];
u8 inner_l3_ok[0x1];
u8 inner_l4_ok[0x1];
u8 outer_l3_ok_duplicate[0x1];
u8 outer_l4_ok_duplicate[0x1];
u8 outer_tcp_cwr[0x1];
u8 outer_tcp_ece[0x1];
u8 outer_tcp_urg[0x1];
u8 outer_tcp_ack[0x1];
u8 outer_tcp_psh[0x1];
u8 outer_tcp_rst[0x1];
u8 outer_tcp_syn[0x1];
u8 outer_tcp_fin[0x1];
};
struct mlx5_ifc_match_definer_format_22_bits {
u8 reserved_at_0[0x100];
u8 outer_ip_src_addr[0x20];
u8 outer_ip_dest_addr[0x20];
u8 outer_l4_sport[0x10];
u8 outer_l4_dport[0x10];
u8 reserved_at_160[0x1];
u8 sx_sniffer[0x1];
u8 functional_lb[0x1];
u8 outer_ip_frag[0x1];
u8 outer_qp_type[0x2];
u8 outer_encap_type[0x2];
u8 port_number[0x2];
u8 outer_l3_type[0x2];
u8 outer_l4_type[0x2];
u8 outer_first_vlan_type[0x2];
u8 outer_first_vlan_prio[0x3];
u8 outer_first_vlan_cfi[0x1];
u8 outer_first_vlan_vid[0xc];
u8 metadata_reg_c_0[0x20];
u8 outer_dmac_47_16[0x20];
u8 outer_smac_47_16[0x20];
u8 outer_smac_15_0[0x10];
u8 outer_dmac_15_0[0x10];
};
struct mlx5_ifc_match_definer_format_23_bits {
u8 reserved_at_0[0x100];
u8 inner_ip_src_addr[0x20];
u8 inner_ip_dest_addr[0x20];
u8 inner_l4_sport[0x10];
u8 inner_l4_dport[0x10];
u8 reserved_at_160[0x1];
u8 sx_sniffer[0x1];
u8 functional_lb[0x1];
u8 inner_ip_frag[0x1];
u8 inner_qp_type[0x2];
u8 inner_encap_type[0x2];
u8 port_number[0x2];
u8 inner_l3_type[0x2];
u8 inner_l4_type[0x2];
u8 inner_first_vlan_type[0x2];
u8 inner_first_vlan_prio[0x3];
u8 inner_first_vlan_cfi[0x1];
u8 inner_first_vlan_vid[0xc];
u8 tunnel_header_0[0x20];
u8 inner_dmac_47_16[0x20];
u8 inner_smac_47_16[0x20];
u8 inner_smac_15_0[0x10];
u8 inner_dmac_15_0[0x10];
};
struct mlx5_ifc_match_definer_format_29_bits {
u8 reserved_at_0[0xc0];
u8 outer_ip_dest_addr[0x80];
u8 outer_ip_src_addr[0x80];
u8 outer_l4_sport[0x10];
u8 outer_l4_dport[0x10];
u8 reserved_at_1e0[0x20];
};
struct mlx5_ifc_match_definer_format_30_bits {
u8 reserved_at_0[0xa0];
u8 outer_ip_dest_addr[0x80];
u8 outer_ip_src_addr[0x80];
u8 outer_dmac_47_16[0x20];
u8 outer_smac_47_16[0x20];
u8 outer_smac_15_0[0x10];
u8 outer_dmac_15_0[0x10];
};
struct mlx5_ifc_match_definer_format_31_bits {
u8 reserved_at_0[0xc0];
u8 inner_ip_dest_addr[0x80];
u8 inner_ip_src_addr[0x80];
u8 inner_l4_sport[0x10];
u8 inner_l4_dport[0x10];
u8 reserved_at_1e0[0x20];
};
struct mlx5_ifc_match_definer_format_32_bits {
u8 reserved_at_0[0xa0];
u8 inner_ip_dest_addr[0x80];
u8 inner_ip_src_addr[0x80];
u8 inner_dmac_47_16[0x20];
u8 inner_smac_47_16[0x20];
u8 inner_smac_15_0[0x10];
u8 inner_dmac_15_0[0x10];
};
struct mlx5_ifc_match_definer_bits {
u8 modify_field_select[0x40];
u8 reserved_at_40[0x40];
u8 reserved_at_80[0x10];
u8 format_id[0x10];
u8 reserved_at_a0[0x160];
u8 match_mask[16][0x20];
};
struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
u8 opcode[0x10];
u8 uid[0x10];
u8 vhca_tunnel_id[0x10];
u8 obj_type[0x10];
u8 obj_id[0x20];
u8 reserved_at_60[0x20];
};
struct mlx5_ifc_general_obj_out_cmd_hdr_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 obj_id[0x20];
u8 reserved_at_60[0x20];
};
struct mlx5_ifc_create_match_definer_in_bits {
struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
struct mlx5_ifc_match_definer_bits obj_context;
};
struct mlx5_ifc_create_match_definer_out_bits {
struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
};
enum {
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
@ -8125,6 +8371,11 @@ struct mlx5_ifc_create_flow_group_out_bits {
u8 reserved_at_60[0x20];
};
enum {
MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_TCAM_SUBTABLE = 0x0,
MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_HASH_SPLIT = 0x1,
};
enum {
MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
@ -8146,7 +8397,9 @@ struct mlx5_ifc_create_flow_group_in_bits {
u8 reserved_at_60[0x20];
u8 table_type[0x8];
u8 reserved_at_88[0x18];
u8 reserved_at_88[0x4];
u8 group_type[0x4];
u8 reserved_at_90[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
@ -8161,7 +8414,10 @@ struct mlx5_ifc_create_flow_group_in_bits {
u8 end_flow_index[0x20];
u8 reserved_at_140[0xa0];
u8 reserved_at_140[0x10];
u8 match_definer_id[0x10];
u8 reserved_at_160[0x80];
u8 reserved_at_1e0[0x18];
u8 match_criteria_enable[0x8];
@ -10434,9 +10690,16 @@ struct mlx5_ifc_dcbx_param_bits {
u8 reserved_at_a0[0x160];
};
enum {
MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY = 0,
MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT,
};
struct mlx5_ifc_lagc_bits {
u8 fdb_selection_mode[0x1];
u8 reserved_at_1[0x1c];
u8 reserved_at_1[0x14];
u8 port_select_mode[0x3];
u8 reserved_at_18[0x5];
u8 lag_state[0x3];
u8 reserved_at_20[0x14];
@ -10650,29 +10913,6 @@ struct mlx5_ifc_dealloc_memic_out_bits {
u8 reserved_at_40[0x40];
};
struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
u8 opcode[0x10];
u8 uid[0x10];
u8 vhca_tunnel_id[0x10];
u8 obj_type[0x10];
u8 obj_id[0x20];
u8 reserved_at_60[0x20];
};
struct mlx5_ifc_general_obj_out_cmd_hdr_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 obj_id[0x20];
u8 reserved_at_60[0x20];
};
struct mlx5_ifc_umem_bits {
u8 reserved_at_0[0x80];