mlx5-updates-2021-12-14

Parsing Infrastructure for TC actions:
 
 The series introduce a TC action infrastructure to help
 parsing TC actions in a generic way for both FDB and NIC rules.
 
 To help maintain the parsing code of TC actions, we the parsing code to
 action parser per action TC type in separate files, instead of having one
 big switch case loop, duplicated between FDB and NIC parsers as before this
 patchset.
 
 Each TC flow_action->id is represented by a dedicated mlx5e_tc_act handler
 which has callbacks to check if the specific action is offload supported and
 to parse the specific action.
 
 We move each case (TC action) handling into the specific handler, which is
 responsible for parsing and determining if the action is supported.
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAmG5fUoACgkQSD+KveBX
 +j6FXwgAuth9IVE/9N/KRxlTmdG2MqHF4fXFFGtQgZ+f1a7ViwsyEXtxGb4mISzF
 EVF14etoyAuvHSFZDhD/8uqxwAKe+kGywT6BVzYKHHeRQbPRdUulOQ4AEa/CmJ6C
 fJF5d3I2ktoSkGIn1L9sOLJQJ1bWy+qpohBkkW0q0fdW1kjb2QPb0hXtIQA0gM2J
 zXZVPt0yHg21Px3stYn3HSUCdxjY9CweXZRsP5uJ7eMmDxCp7qb3xFXzzExjI9zF
 d+3QM3rRj9GBAGJHGTMYrpRVSeVPwdJL2WgA1YTO7qNXHTtewGjizHtEIKTyxBRG
 3e9HZjA4ZuwzZDlCegisw/WCE/dUIg==
 =6jKv
 -----END PGP SIGNATURE-----

Merge tag 'mlx5-updates-2021-12-14' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saed Mahameed says:

====================
mlx5-updates-2021-12-14

Parsing Infrastructure for TC actions:

The series introduce a TC action infrastructure to help
parsing TC actions in a generic way for both FDB and NIC rules.

To help maintain the parsing code of TC actions, we the parsing code to
action parser per action TC type in separate files, instead of having one
big switch case loop, duplicated between FDB and NIC parsers as before this
patchset.

Each TC flow_action->id is represented by a dedicated mlx5e_tc_act handler
which has callbacks to check if the specific action is offload supported and
to parse the specific action.

We move each case (TC action) handling into the specific handler, which is
responsible for parsing and determining if the action is supported.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2021-12-15 14:46:33 +00:00
commit f71f1bcbd8
25 changed files with 1854 additions and 994 deletions

View file

@ -46,6 +46,15 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \
en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \
en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o \
en/tc/post_act.o en/tc/int_port.o
mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en/tc/act/act.o en/tc/act/drop.o en/tc/act/trap.o \
en/tc/act/accept.o en/tc/act/mark.o en/tc/act/goto.o \
en/tc/act/tun.o en/tc/act/csum.o en/tc/act/pedit.o \
en/tc/act/vlan.o en/tc/act/vlan_mangle.o en/tc/act/mpls.o \
en/tc/act/mirred.o en/tc/act/mirred_nic.o \
en/tc/act/ct.o en/tc/act/sample.o en/tc/act/ptype.o \
en/tc/act/redirect_ingress.o
mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o
mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o

View file

@ -0,0 +1,31 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "act.h"
#include "en/tc_priv.h"
static bool
tc_act_can_offload_accept(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
return true;
}
static int
tc_act_parse_accept(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->flags |= MLX5_ESW_ATTR_FLAG_ACCEPT;
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_accept = {
.can_offload = tc_act_can_offload_accept,
.parse_action = tc_act_parse_accept,
};

View file

@ -0,0 +1,103 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "act.h"
#include "en/tc_priv.h"
#include "mlx5_core.h"
/* Must be aligned with enum flow_action_id. */
static struct mlx5e_tc_act *tc_acts_fdb[NUM_FLOW_ACTIONS] = {
&mlx5e_tc_act_accept,
&mlx5e_tc_act_drop,
&mlx5e_tc_act_trap,
&mlx5e_tc_act_goto,
&mlx5e_tc_act_mirred,
&mlx5e_tc_act_mirred,
&mlx5e_tc_act_redirect_ingress,
NULL, /* FLOW_ACTION_MIRRED_INGRESS, */
&mlx5e_tc_act_vlan,
&mlx5e_tc_act_vlan,
&mlx5e_tc_act_vlan_mangle,
&mlx5e_tc_act_tun_encap,
&mlx5e_tc_act_tun_decap,
&mlx5e_tc_act_pedit,
&mlx5e_tc_act_pedit,
&mlx5e_tc_act_csum,
NULL, /* FLOW_ACTION_MARK, */
&mlx5e_tc_act_ptype,
NULL, /* FLOW_ACTION_PRIORITY, */
NULL, /* FLOW_ACTION_WAKE, */
NULL, /* FLOW_ACTION_QUEUE, */
&mlx5e_tc_act_sample,
NULL, /* FLOW_ACTION_POLICE, */
&mlx5e_tc_act_ct,
NULL, /* FLOW_ACTION_CT_METADATA, */
&mlx5e_tc_act_mpls_push,
&mlx5e_tc_act_mpls_pop,
};
/* Must be aligned with enum flow_action_id. */
static struct mlx5e_tc_act *tc_acts_nic[NUM_FLOW_ACTIONS] = {
&mlx5e_tc_act_accept,
&mlx5e_tc_act_drop,
NULL, /* FLOW_ACTION_TRAP, */
&mlx5e_tc_act_goto,
&mlx5e_tc_act_mirred_nic,
NULL, /* FLOW_ACTION_MIRRED, */
NULL, /* FLOW_ACTION_REDIRECT_INGRESS, */
NULL, /* FLOW_ACTION_MIRRED_INGRESS, */
NULL, /* FLOW_ACTION_VLAN_PUSH, */
NULL, /* FLOW_ACTION_VLAN_POP, */
NULL, /* FLOW_ACTION_VLAN_MANGLE, */
NULL, /* FLOW_ACTION_TUNNEL_ENCAP, */
NULL, /* FLOW_ACTION_TUNNEL_DECAP, */
&mlx5e_tc_act_pedit,
&mlx5e_tc_act_pedit,
&mlx5e_tc_act_csum,
&mlx5e_tc_act_mark,
NULL, /* FLOW_ACTION_PTYPE, */
NULL, /* FLOW_ACTION_PRIORITY, */
NULL, /* FLOW_ACTION_WAKE, */
NULL, /* FLOW_ACTION_QUEUE, */
NULL, /* FLOW_ACTION_SAMPLE, */
NULL, /* FLOW_ACTION_POLICE, */
&mlx5e_tc_act_ct,
};
/**
* mlx5e_tc_act_get() - Get an action parser for an action id.
* @act_id: Flow action id.
* @ns_type: flow namespace type.
*/
struct mlx5e_tc_act *
mlx5e_tc_act_get(enum flow_action_id act_id,
enum mlx5_flow_namespace_type ns_type)
{
struct mlx5e_tc_act **tc_acts;
tc_acts = ns_type == MLX5_FLOW_NAMESPACE_FDB ? tc_acts_fdb : tc_acts_nic;
return tc_acts[act_id];
}
/**
* mlx5e_tc_act_init_parse_state() - Init a new parse_state.
* @parse_state: Parsing state.
* @flow: mlx5e tc flow being handled.
* @flow_action: flow action to parse.
* @extack: to set an error msg.
*
* The same parse_state should be passed to action parsers
* for tracking the current parsing state.
*/
void
mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_tc_flow *flow,
struct flow_action *flow_action,
struct netlink_ext_ack *extack)
{
memset(parse_state, 0, sizeof(*parse_state));
parse_state->flow = flow;
parse_state->num_actions = flow_action->num_entries;
parse_state->extack = extack;
}

View file

@ -0,0 +1,75 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#ifndef __MLX5_EN_TC_ACT_H__
#define __MLX5_EN_TC_ACT_H__
#include <net/tc_act/tc_pedit.h>
#include <net/flow_offload.h>
#include <linux/netlink.h>
#include "eswitch.h"
#include "pedit.h"
struct mlx5_flow_attr;
struct mlx5e_tc_act_parse_state {
unsigned int num_actions;
struct mlx5e_tc_flow *flow;
struct netlink_ext_ack *extack;
bool encap;
bool decap;
bool mpls_push;
bool ptype_host;
const struct ip_tunnel_info *tun_info;
struct pedit_headers_action hdrs[__PEDIT_CMD_MAX];
int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
int if_count;
struct mlx5_tc_ct_priv *ct_priv;
};
struct mlx5e_tc_act {
bool (*can_offload)(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index);
int (*parse_action)(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr);
int (*post_parse)(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr);
};
extern struct mlx5e_tc_act mlx5e_tc_act_drop;
extern struct mlx5e_tc_act mlx5e_tc_act_trap;
extern struct mlx5e_tc_act mlx5e_tc_act_accept;
extern struct mlx5e_tc_act mlx5e_tc_act_mark;
extern struct mlx5e_tc_act mlx5e_tc_act_goto;
extern struct mlx5e_tc_act mlx5e_tc_act_tun_encap;
extern struct mlx5e_tc_act mlx5e_tc_act_tun_decap;
extern struct mlx5e_tc_act mlx5e_tc_act_csum;
extern struct mlx5e_tc_act mlx5e_tc_act_pedit;
extern struct mlx5e_tc_act mlx5e_tc_act_vlan;
extern struct mlx5e_tc_act mlx5e_tc_act_vlan_mangle;
extern struct mlx5e_tc_act mlx5e_tc_act_mpls_push;
extern struct mlx5e_tc_act mlx5e_tc_act_mpls_pop;
extern struct mlx5e_tc_act mlx5e_tc_act_mirred;
extern struct mlx5e_tc_act mlx5e_tc_act_mirred_nic;
extern struct mlx5e_tc_act mlx5e_tc_act_ct;
extern struct mlx5e_tc_act mlx5e_tc_act_sample;
extern struct mlx5e_tc_act mlx5e_tc_act_ptype;
extern struct mlx5e_tc_act mlx5e_tc_act_redirect_ingress;
struct mlx5e_tc_act *
mlx5e_tc_act_get(enum flow_action_id act_id,
enum mlx5_flow_namespace_type ns_type);
void
mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_tc_flow *flow,
struct flow_action *flow_action,
struct netlink_ext_ack *extack);
#endif /* __MLX5_EN_TC_ACT_H__ */

View file

@ -0,0 +1,61 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include <linux/tc_act/tc_csum.h>
#include "act.h"
#include "en/tc_priv.h"
static bool
csum_offload_supported(struct mlx5e_priv *priv,
u32 action,
u32 update_flags,
struct netlink_ext_ack *extack)
{
u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
TCA_CSUM_UPDATE_FLAG_UDP;
/* The HW recalcs checksums only if re-writing headers */
if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
NL_SET_ERR_MSG_MOD(extack,
"TC csum action is only offloaded with pedit");
netdev_warn(priv->netdev,
"TC csum action is only offloaded with pedit\n");
return false;
}
if (update_flags & ~prot_flags) {
NL_SET_ERR_MSG_MOD(extack,
"can't offload TC csum action for some header/s");
netdev_warn(priv->netdev,
"can't offload TC csum action for some header/s - flags %#x\n",
update_flags);
return false;
}
return true;
}
static bool
tc_act_can_offload_csum(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
struct mlx5e_tc_flow *flow = parse_state->flow;
return csum_offload_supported(flow->priv, flow->attr->action,
act->csum_flags, parse_state->extack);
}
static int
tc_act_parse_csum(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_csum = {
.can_offload = tc_act_can_offload_csum,
.parse_action = tc_act_parse_csum,
};

View file

@ -0,0 +1,50 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "act.h"
#include "en/tc_priv.h"
#include "en/tc_ct.h"
static bool
tc_act_can_offload_ct(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
struct netlink_ext_ack *extack = parse_state->extack;
if (flow_flag_test(parse_state->flow, SAMPLE)) {
NL_SET_ERR_MSG_MOD(extack,
"Sample action with connection tracking is not supported");
return false;
}
return true;
}
static int
tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
int err;
err = mlx5_tc_ct_parse_action(parse_state->ct_priv, attr,
&attr->parse_attr->mod_hdr_acts,
act, parse_state->extack);
if (err)
return err;
flow_flag_set(parse_state->flow, CT);
if (mlx5e_is_eswitch_flow(parse_state->flow))
attr->esw_attr->split_count = attr->esw_attr->out_count;
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_ct = {
.can_offload = tc_act_can_offload_ct,
.parse_action = tc_act_parse_ct,
};

View file

@ -0,0 +1,30 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "act.h"
#include "en/tc_priv.h"
static bool
tc_act_can_offload_drop(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
return true;
}
static int
tc_act_parse_drop(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_drop = {
.can_offload = tc_act_can_offload_drop,
.parse_action = tc_act_parse_drop,
};

View file

@ -0,0 +1,122 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "act.h"
#include "en/tc_priv.h"
#include "eswitch.h"
static int
validate_goto_chain(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
const struct flow_action_entry *act,
struct netlink_ext_ack *extack)
{
bool is_esw = mlx5e_is_eswitch_flow(flow);
bool ft_flow = mlx5e_is_ft_flow(flow);
u32 dest_chain = act->chain_index;
struct mlx5_fs_chains *chains;
struct mlx5_eswitch *esw;
u32 reformat_and_fwd;
u32 max_chain;
esw = priv->mdev->priv.eswitch;
chains = is_esw ? esw_chains(esw) : mlx5e_nic_chains(priv);
max_chain = mlx5_chains_get_chain_range(chains);
reformat_and_fwd = is_esw ?
MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_and_fwd_to_table) :
MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, reformat_and_fwd_to_table);
if (ft_flow) {
NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
return -EOPNOTSUPP;
}
if (!mlx5_chains_backwards_supported(chains) &&
dest_chain <= flow->attr->chain) {
NL_SET_ERR_MSG_MOD(extack, "Goto lower numbered chain isn't supported");
return -EOPNOTSUPP;
}
if (dest_chain > max_chain) {
NL_SET_ERR_MSG_MOD(extack,
"Requested destination chain is out of supported range");
return -EOPNOTSUPP;
}
if (flow->attr->action & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
!reformat_and_fwd) {
NL_SET_ERR_MSG_MOD(extack,
"Goto chain is not allowed if action has reformat or decap");
return -EOPNOTSUPP;
}
return 0;
}
static bool
tc_act_can_offload_goto(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5e_tc_flow *flow = parse_state->flow;
if (validate_goto_chain(flow->priv, flow, act, extack))
return false;
return true;
}
static int
tc_act_parse_goto(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->dest_chain = act->chain_index;
return 0;
}
static int
tc_act_post_parse_goto(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5e_tc_flow *flow = parse_state->flow;
if (!attr->dest_chain)
return 0;
if (parse_state->decap) {
/* It can be supported if we'll create a mapping for
* the tunnel device only (without tunnel), and set
* this tunnel id with this decap flow.
*
* On restore (miss), we'll just set this saved tunnel
* device.
*/
NL_SET_ERR_MSG_MOD(extack, "Decap with goto isn't supported");
netdev_warn(priv->netdev, "Decap with goto isn't supported");
return -EOPNOTSUPP;
}
if (!mlx5e_is_eswitch_flow(flow) && parse_attr->mirred_ifindex[0]) {
NL_SET_ERR_MSG_MOD(extack, "Mirroring goto chain rules isn't supported");
return -EOPNOTSUPP;
}
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_goto = {
.can_offload = tc_act_can_offload_goto,
.parse_action = tc_act_parse_goto,
.post_parse = tc_act_post_parse_goto,
};

View file

@ -0,0 +1,35 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "act.h"
#include "en_tc.h"
static bool
tc_act_can_offload_mark(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
if (act->mark & ~MLX5E_TC_FLOW_ID_MASK) {
NL_SET_ERR_MSG_MOD(parse_state->extack, "Bad flow mark, only 16 bit supported");
return false;
}
return true;
}
static int
tc_act_parse_mark(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
attr->nic_attr->flow_tag = act->mark;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_mark = {
.can_offload = tc_act_can_offload_mark,
.parse_action = tc_act_parse_mark,
};

View file

@ -0,0 +1,315 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include <linux/if_macvlan.h>
#include <linux/if_vlan.h>
#include <net/bareudp.h>
#include <net/bonding.h>
#include "act.h"
#include "vlan.h"
#include "en/tc_tun_encap.h"
#include "en/tc_priv.h"
#include "en_rep.h"
static bool
same_vf_reps(struct mlx5e_priv *priv, struct net_device *out_dev)
{
return mlx5e_eswitch_vf_rep(priv->netdev) &&
priv->netdev == out_dev;
}
static int
verify_uplink_forwarding(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr,
struct net_device *out_dev,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *rep_priv;
/* Forwarding non encapsulated traffic between
* uplink ports is allowed only if
* termination_table_raw_traffic cap is set.
*
* Input vport was stored attr->in_rep.
* In LAG case, *priv* is the private data of
* uplink which may be not the input vport.
*/
rep_priv = mlx5e_rep_to_rep_priv(attr->esw_attr->in_rep);
if (!(mlx5e_eswitch_uplink_rep(rep_priv->netdev) &&
mlx5e_eswitch_uplink_rep(out_dev)))
return 0;
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev,
termination_table_raw_traffic)) {
NL_SET_ERR_MSG_MOD(extack,
"devices are both uplink, can't offload forwarding");
pr_err("devices %s %s are both uplink, can't offload forwarding\n",
priv->netdev->name, out_dev->name);
return -EOPNOTSUPP;
} else if (out_dev != rep_priv->netdev) {
NL_SET_ERR_MSG_MOD(extack,
"devices are not the same uplink, can't offload forwarding");
pr_err("devices %s %s are both uplink but not the same, can't offload forwarding\n",
priv->netdev->name, out_dev->name);
return -EOPNOTSUPP;
}
return 0;
}
static bool
is_duplicated_output_device(struct net_device *dev,
struct net_device *out_dev,
int *ifindexes, int if_count,
struct netlink_ext_ack *extack)
{
int i;
for (i = 0; i < if_count; i++) {
if (ifindexes[i] == out_dev->ifindex) {
NL_SET_ERR_MSG_MOD(extack, "can't duplicate output to same device");
netdev_err(dev, "can't duplicate output to same device: %s\n",
out_dev->name);
return true;
}
}
return false;
}
static struct net_device *
get_fdb_out_dev(struct net_device *uplink_dev, struct net_device *out_dev)
{
struct net_device *fdb_out_dev = out_dev;
struct net_device *uplink_upper;
rcu_read_lock();
uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev);
if (uplink_upper && netif_is_lag_master(uplink_upper) &&
uplink_upper == out_dev) {
fdb_out_dev = uplink_dev;
} else if (netif_is_lag_master(out_dev)) {
fdb_out_dev = bond_option_active_slave_get_rcu(netdev_priv(out_dev));
if (fdb_out_dev &&
(!mlx5e_eswitch_rep(fdb_out_dev) ||
!netdev_port_same_parent_id(fdb_out_dev, uplink_dev)))
fdb_out_dev = NULL;
}
rcu_read_unlock();
return fdb_out_dev;
}
static bool
tc_act_can_offload_mirred(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5e_tc_flow *flow = parse_state->flow;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct net_device *out_dev = act->dev;
struct mlx5e_priv *priv = flow->priv;
struct mlx5_esw_flow_attr *esw_attr;
parse_attr = flow->attr->parse_attr;
esw_attr = flow->attr->esw_attr;
if (!out_dev) {
/* out_dev is NULL when filters with
* non-existing mirred device are replayed to
* the driver.
*/
return false;
}
if (parse_state->mpls_push && !netif_is_bareudp(out_dev)) {
NL_SET_ERR_MSG_MOD(extack, "mpls is supported only through a bareudp device");
return false;
}
if (mlx5e_is_ft_flow(flow) && out_dev == priv->netdev) {
/* Ignore forward to self rules generated
* by adding both mlx5 devs to the flow table
* block on a normal nft offload setup.
*/
return false;
}
if (esw_attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
NL_SET_ERR_MSG_MOD(extack,
"can't support more output ports, can't offload forwarding");
netdev_warn(priv->netdev,
"can't support more than %d output ports, can't offload forwarding\n",
esw_attr->out_count);
return false;
}
if (parse_state->encap ||
netdev_port_same_parent_id(priv->netdev, out_dev) ||
netif_is_ovs_master(out_dev))
return true;
if (parse_attr->filter_dev != priv->netdev) {
/* All mlx5 devices are called to configure
* high level device filters. Therefore, the
* *attempt* to install a filter on invalid
* eswitch should not trigger an explicit error
*/
return false;
}
NL_SET_ERR_MSG_MOD(extack, "devices are not on same switch HW, can't offload forwarding");
netdev_warn(priv->netdev,
"devices %s %s not on same switch HW, can't offload forwarding\n",
netdev_name(priv->netdev),
out_dev->name);
return false;
}
static int
parse_mirred_encap(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5_flow_attr *attr)
{
struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct net_device *out_dev = act->dev;
parse_attr->mirred_ifindex[esw_attr->out_count] = out_dev->ifindex;
parse_attr->tun_info[esw_attr->out_count] =
mlx5e_dup_tun_info(parse_state->tun_info);
if (!parse_attr->tun_info[esw_attr->out_count])
return -ENOMEM;
parse_state->encap = false;
esw_attr->dests[esw_attr->out_count].flags |= MLX5_ESW_DEST_ENCAP;
esw_attr->out_count++;
/* attr->dests[].rep is resolved when we handle encap */
return 0;
}
static int
parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct net_device *out_dev = act->dev;
struct net_device *uplink_dev;
struct mlx5e_priv *out_priv;
struct mlx5_eswitch *esw;
int *ifindexes;
int if_count;
int err;
esw = priv->mdev->priv.eswitch;
uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
ifindexes = parse_state->ifindexes;
if_count = parse_state->if_count;
if (is_duplicated_output_device(priv->netdev, out_dev, ifindexes, if_count, extack))
return -EOPNOTSUPP;
parse_state->ifindexes[if_count] = out_dev->ifindex;
parse_state->if_count++;
out_dev = get_fdb_out_dev(uplink_dev, out_dev);
if (!out_dev)
return -ENODEV;
if (is_vlan_dev(out_dev)) {
err = mlx5e_tc_act_vlan_add_push_action(priv, attr, &out_dev, extack);
if (err)
return err;
}
if (is_vlan_dev(parse_attr->filter_dev)) {
err = mlx5e_tc_act_vlan_add_pop_action(priv, attr, extack);
if (err)
return err;
}
if (netif_is_macvlan(out_dev))
out_dev = macvlan_dev_real_dev(out_dev);
err = verify_uplink_forwarding(priv, attr, out_dev, extack);
if (err)
return err;
if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
NL_SET_ERR_MSG_MOD(extack,
"devices are not on same switch HW, can't offload forwarding");
return -EOPNOTSUPP;
}
if (same_vf_reps(priv, out_dev)) {
NL_SET_ERR_MSG_MOD(extack, "can't forward from a VF to itself");
return -EOPNOTSUPP;
}
out_priv = netdev_priv(out_dev);
rpriv = out_priv->ppriv;
esw_attr->dests[esw_attr->out_count].rep = rpriv->rep;
esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev;
esw_attr->out_count++;
return 0;
}
static int
parse_mirred_ovs_master(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct net_device *out_dev = act->dev;
int err;
err = mlx5e_set_fwd_to_int_port_actions(priv, attr, out_dev->ifindex,
MLX5E_TC_INT_PORT_EGRESS,
&attr->action, esw_attr->out_count);
if (err)
return err;
esw_attr->out_count++;
return 0;
}
static int
tc_act_parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct net_device *out_dev = act->dev;
int err = -EOPNOTSUPP;
if (parse_state->encap)
err = parse_mirred_encap(parse_state, act, attr);
else if (netdev_port_same_parent_id(priv->netdev, out_dev))
err = parse_mirred(parse_state, act, priv, attr);
else if (netif_is_ovs_master(out_dev))
err = parse_mirred_ovs_master(parse_state, act, priv, attr);
if (err)
return err;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_mirred = {
.can_offload = tc_act_can_offload_mirred,
.parse_action = tc_act_parse_mirred,
};

View file

@ -0,0 +1,51 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "act.h"
#include "en/tc_priv.h"
static bool
tc_act_can_offload_mirred_nic(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5e_tc_flow *flow = parse_state->flow;
struct net_device *out_dev = act->dev;
struct mlx5e_priv *priv = flow->priv;
if (act->id != FLOW_ACTION_REDIRECT)
return false;
if (priv->netdev->netdev_ops != out_dev->netdev_ops ||
!mlx5e_same_hw_devs(priv, netdev_priv(out_dev))) {
NL_SET_ERR_MSG_MOD(extack,
"devices are not on same switch HW, can't offload forwarding");
netdev_warn(priv->netdev,
"devices %s %s not on same switch HW, can't offload forwarding\n",
netdev_name(priv->netdev),
out_dev->name);
return false;
}
return true;
}
static int
tc_act_parse_mirred_nic(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
attr->parse_attr->mirred_ifindex[0] = act->dev->ifindex;
flow_flag_set(parse_state->flow, HAIRPIN);
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_mirred_nic = {
.can_offload = tc_act_can_offload_mirred_nic,
.parse_action = tc_act_parse_mirred_nic,
};

View file

@ -0,0 +1,86 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include <net/bareudp.h>
#include "act.h"
#include "en/tc_priv.h"
static bool
tc_act_can_offload_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5e_priv *priv = parse_state->flow->priv;
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_l2_to_l3_tunnel) ||
act->mpls_push.proto != htons(ETH_P_MPLS_UC)) {
NL_SET_ERR_MSG_MOD(extack, "mpls push is supported only for mpls_uc protocol");
return false;
}
return true;
}
static int
tc_act_parse_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
parse_state->mpls_push = true;
return 0;
}
static bool
tc_act_can_offload_mpls_pop(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5e_tc_flow *flow = parse_state->flow;
struct net_device *filter_dev;
filter_dev = flow->attr->parse_attr->filter_dev;
/* we only support mpls pop if it is the first action
* and the filter net device is bareudp. Subsequent
* actions can be pedit and the last can be mirred
* egress redirect.
*/
if (act_index) {
NL_SET_ERR_MSG_MOD(extack, "mpls pop supported only as first action");
return false;
}
if (!netif_is_bareudp(filter_dev)) {
NL_SET_ERR_MSG_MOD(extack, "mpls pop supported only on bareudp devices");
return false;
}
return true;
}
static int
tc_act_parse_mpls_pop(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
attr->parse_attr->eth.h_proto = act->mpls_pop.proto;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
flow_flag_set(parse_state->flow, L3_TO_L2_DECAP);
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_mpls_push = {
.can_offload = tc_act_can_offload_mpls_push,
.parse_action = tc_act_parse_mpls_push,
};
struct mlx5e_tc_act mlx5e_tc_act_mpls_pop = {
.can_offload = tc_act_can_offload_mpls_pop,
.parse_action = tc_act_parse_mpls_pop,
};

View file

@ -0,0 +1,165 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include <linux/if_vlan.h>
#include "act.h"
#include "pedit.h"
#include "en/tc_priv.h"
#include "en/mod_hdr.h"
static int pedit_header_offsets[] = {
[FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
[FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
[FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
[FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
[FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
};
#define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
static int
set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
struct pedit_headers_action *hdrs,
struct netlink_ext_ack *extack)
{
u32 *curr_pmask, *curr_pval;
curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
if (*curr_pmask & mask) { /* disallow acting twice on the same location */
NL_SET_ERR_MSG_MOD(extack,
"curr_pmask and new mask same. Acting twice on same location");
goto out_err;
}
*curr_pmask |= mask;
*curr_pval |= (val & mask);
return 0;
out_err:
return -EOPNOTSUPP;
}
static int
parse_pedit_to_modify_hdr(struct mlx5e_priv *priv,
const struct flow_action_entry *act, int namespace,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct pedit_headers_action *hdrs,
struct netlink_ext_ack *extack)
{
u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
u8 htype = act->mangle.htype;
int err = -EOPNOTSUPP;
u32 mask, val, offset;
if (htype == FLOW_ACT_MANGLE_UNSPEC) {
NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded");
goto out_err;
}
if (!mlx5e_mod_hdr_max_actions(priv->mdev, namespace)) {
NL_SET_ERR_MSG_MOD(extack, "The pedit offload action is not supported");
goto out_err;
}
mask = act->mangle.mask;
val = act->mangle.val;
offset = act->mangle.offset;
err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd], extack);
if (err)
goto out_err;
hdrs[cmd].pedits++;
return 0;
out_err:
return err;
}
static int
parse_pedit_to_reformat(const struct flow_action_entry *act,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct netlink_ext_ack *extack)
{
u32 mask, val, offset;
u32 *p;
if (act->id != FLOW_ACTION_MANGLE) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported action id");
return -EOPNOTSUPP;
}
if (act->mangle.htype != FLOW_ACT_MANGLE_HDR_TYPE_ETH) {
NL_SET_ERR_MSG_MOD(extack, "Only Ethernet modification is supported");
return -EOPNOTSUPP;
}
mask = ~act->mangle.mask;
val = act->mangle.val;
offset = act->mangle.offset;
p = (u32 *)&parse_attr->eth;
*(p + (offset >> 2)) |= (val & mask);
return 0;
}
int
mlx5e_tc_act_pedit_parse_action(struct mlx5e_priv *priv,
const struct flow_action_entry *act, int namespace,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct pedit_headers_action *hdrs,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
if (flow && flow_flag_test(flow, L3_TO_L2_DECAP))
return parse_pedit_to_reformat(act, parse_attr, extack);
return parse_pedit_to_modify_hdr(priv, act, namespace, parse_attr, hdrs, extack);
}
static bool
tc_act_can_offload_pedit(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
return true;
}
static int
tc_act_parse_pedit(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5e_tc_flow *flow = parse_state->flow;
enum mlx5_flow_namespace_type ns_type;
int err;
ns_type = mlx5e_get_flow_namespace(flow);
err = mlx5e_tc_act_pedit_parse_action(flow->priv, act, ns_type,
attr->parse_attr, parse_state->hdrs,
flow, parse_state->extack);
if (err)
return err;
if (flow_flag_test(flow, L3_TO_L2_DECAP))
goto out;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
if (ns_type == MLX5_FLOW_NAMESPACE_FDB)
esw_attr->split_count = esw_attr->out_count;
out:
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_pedit = {
.can_offload = tc_act_can_offload_pedit,
.parse_action = tc_act_parse_pedit,
};

View file

@ -0,0 +1,32 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#ifndef __MLX5_EN_TC_ACT_PEDIT_H__
#define __MLX5_EN_TC_ACT_PEDIT_H__
#include "en_tc.h"
struct pedit_headers {
struct ethhdr eth;
struct vlan_hdr vlan;
struct iphdr ip4;
struct ipv6hdr ip6;
struct tcphdr tcp;
struct udphdr udp;
};
struct pedit_headers_action {
struct pedit_headers vals;
struct pedit_headers masks;
u32 pedits;
};
int
mlx5e_tc_act_pedit_parse_action(struct mlx5e_priv *priv,
const struct flow_action_entry *act, int namespace,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct pedit_headers_action *hdrs,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack);
#endif /* __MLX5_EN_TC_ACT_PEDIT_H__ */

View file

@ -0,0 +1,35 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "act.h"
#include "en/tc_priv.h"
static bool
tc_act_can_offload_ptype(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
return true;
}
static int
tc_act_parse_ptype(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct netlink_ext_ack *extack = parse_state->extack;
if (act->ptype != PACKET_HOST) {
NL_SET_ERR_MSG_MOD(extack, "skbedit ptype is only supported with type host");
return -EOPNOTSUPP;
}
parse_state->ptype_host = true;
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_ptype = {
.can_offload = tc_act_can_offload_ptype,
.parse_action = tc_act_parse_ptype,
};

View file

@ -0,0 +1,79 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "act.h"
#include "en/tc_priv.h"
static bool
tc_act_can_offload_redirect_ingress(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5e_tc_flow *flow = parse_state->flow;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct net_device *out_dev = act->dev;
struct mlx5_esw_flow_attr *esw_attr;
parse_attr = flow->attr->parse_attr;
esw_attr = flow->attr->esw_attr;
if (!out_dev)
return false;
if (!netif_is_ovs_master(out_dev)) {
NL_SET_ERR_MSG_MOD(extack,
"redirect to ingress is supported only for OVS internal ports");
return false;
}
if (netif_is_ovs_master(parse_attr->filter_dev)) {
NL_SET_ERR_MSG_MOD(extack,
"redirect to ingress is not supported from internal port");
return false;
}
if (!parse_state->ptype_host) {
NL_SET_ERR_MSG_MOD(extack,
"redirect to int port ingress requires ptype=host action");
return false;
}
if (esw_attr->out_count) {
NL_SET_ERR_MSG_MOD(extack,
"redirect to int port ingress is supported only as single destination");
return false;
}
return true;
}
static int
tc_act_parse_redirect_ingress(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct net_device *out_dev = act->dev;
int err;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
err = mlx5e_set_fwd_to_int_port_actions(priv, attr, out_dev->ifindex,
MLX5E_TC_INT_PORT_INGRESS,
&attr->action, esw_attr->out_count);
if (err)
return err;
esw_attr->out_count++;
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_redirect_ingress = {
.can_offload = tc_act_can_offload_redirect_ingress,
.parse_action = tc_act_parse_redirect_ingress,
};

View file

@ -0,0 +1,51 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include <net/psample.h>
#include "act.h"
#include "en/tc_priv.h"
static bool
tc_act_can_offload_sample(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
struct netlink_ext_ack *extack = parse_state->extack;
if (flow_flag_test(parse_state->flow, CT)) {
NL_SET_ERR_MSG_MOD(extack,
"Sample action with connection tracking is not supported");
return false;
}
return true;
}
static int
tc_act_parse_sample(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct mlx5e_sample_attr *sample_attr;
sample_attr = kzalloc(sizeof(*attr->sample_attr), GFP_KERNEL);
if (!sample_attr)
return -ENOMEM;
sample_attr->rate = act->sample.rate;
sample_attr->group_num = act->sample.psample_group->group_num;
if (act->sample.truncate)
sample_attr->trunc_size = act->sample.trunc_size;
attr->sample_attr = sample_attr;
flow_flag_set(parse_state->flow, SAMPLE);
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_sample = {
.can_offload = tc_act_can_offload_sample,
.parse_action = tc_act_parse_sample,
};

View file

@ -0,0 +1,38 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "act.h"
#include "en/tc_priv.h"
static bool
tc_act_can_offload_trap(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
struct netlink_ext_ack *extack = parse_state->extack;
if (parse_state->num_actions != 1) {
NL_SET_ERR_MSG_MOD(extack, "action trap is supported as a sole action only");
return false;
}
return true;
}
static int
tc_act_parse_trap(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_trap = {
.can_offload = tc_act_can_offload_trap,
.parse_action = tc_act_parse_trap,
};

View file

@ -0,0 +1,61 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "act.h"
#include "en/tc_tun_encap.h"
#include "en/tc_priv.h"
static bool
tc_act_can_offload_tun_encap(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
if (!act->tunnel) {
NL_SET_ERR_MSG_MOD(parse_state->extack,
"Zero tunnel attributes is not supported");
return false;
}
return true;
}
static int
tc_act_parse_tun_encap(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
parse_state->tun_info = act->tunnel;
parse_state->encap = true;
return 0;
}
static bool
tc_act_can_offload_tun_decap(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
return true;
}
static int
tc_act_parse_tun_decap(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
parse_state->decap = true;
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_tun_encap = {
.can_offload = tc_act_can_offload_tun_encap,
.parse_action = tc_act_parse_tun_encap,
};
struct mlx5e_tc_act mlx5e_tc_act_tun_decap = {
.can_offload = tc_act_can_offload_tun_decap,
.parse_action = tc_act_parse_tun_decap,
};

View file

@ -0,0 +1,218 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include <linux/if_vlan.h>
#include "act.h"
#include "vlan.h"
#include "en/tc_priv.h"
static int
add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct pedit_headers_action *hdrs,
u32 *action, struct netlink_ext_ack *extack)
{
const struct flow_action_entry prio_tag_act = {
.vlan.vid = 0,
.vlan.prio =
MLX5_GET(fte_match_set_lyr_2_4,
mlx5e_get_match_headers_value(*action,
&parse_attr->spec),
first_prio) &
MLX5_GET(fte_match_set_lyr_2_4,
mlx5e_get_match_headers_criteria(*action,
&parse_attr->spec),
first_prio),
};
return mlx5e_tc_act_vlan_add_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB,
&prio_tag_act, parse_attr, hdrs, action,
extack);
}
static int
parse_tc_vlan_action(struct mlx5e_priv *priv,
const struct flow_action_entry *act,
struct mlx5_esw_flow_attr *attr,
u32 *action,
struct netlink_ext_ack *extack)
{
u8 vlan_idx = attr->total_vlan;
if (vlan_idx >= MLX5_FS_VLAN_DEPTH) {
NL_SET_ERR_MSG_MOD(extack, "Total vlans used is greater than supported");
return -EOPNOTSUPP;
}
switch (act->id) {
case FLOW_ACTION_VLAN_POP:
if (vlan_idx) {
if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
MLX5_FS_VLAN_DEPTH)) {
NL_SET_ERR_MSG_MOD(extack, "vlan pop action is not supported");
return -EOPNOTSUPP;
}
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
} else {
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
}
break;
case FLOW_ACTION_VLAN_PUSH:
attr->vlan_vid[vlan_idx] = act->vlan.vid;
attr->vlan_prio[vlan_idx] = act->vlan.prio;
attr->vlan_proto[vlan_idx] = act->vlan.proto;
if (!attr->vlan_proto[vlan_idx])
attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
if (vlan_idx) {
if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
MLX5_FS_VLAN_DEPTH)) {
NL_SET_ERR_MSG_MOD(extack,
"vlan push action is not supported for vlan depth > 1");
return -EOPNOTSUPP;
}
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
} else {
if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
(act->vlan.proto != htons(ETH_P_8021Q) ||
act->vlan.prio)) {
NL_SET_ERR_MSG_MOD(extack, "vlan push action is not supported");
return -EOPNOTSUPP;
}
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
}
break;
default:
NL_SET_ERR_MSG_MOD(extack, "Unexpected action id for VLAN");
return -EINVAL;
}
attr->total_vlan = vlan_idx + 1;
return 0;
}
int
mlx5e_tc_act_vlan_add_push_action(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr,
struct net_device **out_dev,
struct netlink_ext_ack *extack)
{
struct net_device *vlan_dev = *out_dev;
struct flow_action_entry vlan_act = {
.id = FLOW_ACTION_VLAN_PUSH,
.vlan.vid = vlan_dev_vlan_id(vlan_dev),
.vlan.proto = vlan_dev_vlan_proto(vlan_dev),
.vlan.prio = 0,
};
int err;
err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, &attr->action, extack);
if (err)
return err;
rcu_read_lock();
*out_dev = dev_get_by_index_rcu(dev_net(vlan_dev), dev_get_iflink(vlan_dev));
rcu_read_unlock();
if (!*out_dev)
return -ENODEV;
if (is_vlan_dev(*out_dev))
err = mlx5e_tc_act_vlan_add_push_action(priv, attr, out_dev, extack);
return err;
}
int
mlx5e_tc_act_vlan_add_pop_action(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr,
struct netlink_ext_ack *extack)
{
struct flow_action_entry vlan_act = {
.id = FLOW_ACTION_VLAN_POP,
};
int nest_level, err = 0;
nest_level = attr->parse_attr->filter_dev->lower_level -
priv->netdev->lower_level;
while (nest_level--) {
err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, &attr->action,
extack);
if (err)
return err;
}
return err;
}
static bool
tc_act_can_offload_vlan(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
return true;
}
static int
tc_act_parse_vlan(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
int err;
if (act->id == FLOW_ACTION_VLAN_PUSH &&
(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) {
/* Replace vlan pop+push with vlan modify */
attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
err = mlx5e_tc_act_vlan_add_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB, act,
attr->parse_attr, parse_state->hdrs,
&attr->action, parse_state->extack);
} else {
err = parse_tc_vlan_action(priv, act, esw_attr, &attr->action,
parse_state->extack);
}
if (err)
return err;
esw_attr->split_count = esw_attr->out_count;
return 0;
}
static int
tc_act_post_parse_vlan(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
struct pedit_headers_action *hdrs = parse_state->hdrs;
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
int err;
if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
/* For prio tag mode, replace vlan pop with rewrite vlan prio
* tag rewrite.
*/
attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs,
&attr->action, extack);
if (err)
return err;
}
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_vlan = {
.can_offload = tc_act_can_offload_vlan,
.parse_action = tc_act_parse_vlan,
.post_parse = tc_act_post_parse_vlan,
};

View file

@ -0,0 +1,30 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#ifndef __MLX5_EN_TC_ACT_VLAN_H__
#define __MLX5_EN_TC_ACT_VLAN_H__
#include <net/flow_offload.h>
#include "en/tc_priv.h"
struct pedit_headers_action;
int
mlx5e_tc_act_vlan_add_push_action(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr,
struct net_device **out_dev,
struct netlink_ext_ack *extack);
int
mlx5e_tc_act_vlan_add_pop_action(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr,
struct netlink_ext_ack *extack);
int
mlx5e_tc_act_vlan_add_rewrite_action(struct mlx5e_priv *priv, int namespace,
const struct flow_action_entry *act,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct pedit_headers_action *hdrs,
u32 *action, struct netlink_ext_ack *extack);
#endif /* __MLX5_EN_TC_ACT_VLAN_H__ */

View file

@ -0,0 +1,87 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include <linux/if_vlan.h>
#include "act.h"
#include "vlan.h"
#include "en/tc_priv.h"
struct pedit_headers_action;
int
mlx5e_tc_act_vlan_add_rewrite_action(struct mlx5e_priv *priv, int namespace,
const struct flow_action_entry *act,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct pedit_headers_action *hdrs,
u32 *action, struct netlink_ext_ack *extack)
{
u16 mask16 = VLAN_VID_MASK;
u16 val16 = act->vlan.vid & VLAN_VID_MASK;
const struct flow_action_entry pedit_act = {
.id = FLOW_ACTION_MANGLE,
.mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH,
.mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI),
.mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16),
.mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16),
};
u8 match_prio_mask, match_prio_val;
void *headers_c, *headers_v;
int err;
headers_c = mlx5e_get_match_headers_criteria(*action, &parse_attr->spec);
headers_v = mlx5e_get_match_headers_value(*action, &parse_attr->spec);
if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) &&
MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) {
NL_SET_ERR_MSG_MOD(extack, "VLAN rewrite action must have VLAN protocol match");
return -EOPNOTSUPP;
}
match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
if (act->vlan.prio != (match_prio_val & match_prio_mask)) {
NL_SET_ERR_MSG_MOD(extack, "Changing VLAN prio is not supported");
return -EOPNOTSUPP;
}
err = mlx5e_tc_act_pedit_parse_action(priv, &pedit_act, namespace, parse_attr, hdrs,
NULL, extack);
*action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
return err;
}
static bool
tc_act_can_offload_vlan_mangle(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index)
{
return true;
}
static int
tc_act_parse_vlan_mangle(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
enum mlx5_flow_namespace_type ns_type;
int err;
ns_type = mlx5e_get_flow_namespace(parse_state->flow);
err = mlx5e_tc_act_vlan_add_rewrite_action(priv, ns_type, act,
attr->parse_attr, parse_state->hdrs,
&attr->action, parse_state->extack);
if (err)
return err;
if (ns_type == MLX5_FLOW_NAMESPACE_FDB)
attr->esw_attr->split_count = attr->esw_attr->out_count;
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_vlan_mangle = {
.can_offload = tc_act_can_offload_vlan_mangle,
.parse_action = tc_act_parse_vlan_mangle,
};

View file

@ -5,11 +5,14 @@
#define __MLX5_EN_TC_PRIV_H__
#include "en_tc.h"
#include "en/tc/act/act.h"
#define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1)
#define MLX5E_TC_MAX_SPLITS 1
#define mlx5e_nic_chains(priv) ((priv)->fs.tc.chains)
enum {
MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT,
MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT,
@ -37,6 +40,7 @@ struct mlx5e_tc_flow_parse_attr {
struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
struct ethhdr eth;
struct mlx5e_tc_act_parse_state parse_state;
};
/* Helper struct for accessing a struct containing list_head array.
@ -115,7 +119,11 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr);
bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow);
bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow);
bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow);
int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow);
bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv);
static inline void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag)
{
@ -176,4 +184,8 @@ struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow);
struct mlx5e_tc_int_port_priv *
mlx5e_get_int_port_priv(struct mlx5e_priv *priv);
void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec);
void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec);
#endif /* __MLX5_EN_TC_PRIV_H__ */

File diff suppressed because it is too large Load diff

View file

@ -151,7 +151,6 @@ enum {
int mlx5e_tc_esw_init(struct rhashtable *tc_ht);
void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht);
bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow);
int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct flow_cls_offload *f, unsigned long flags);