Merge branch 'nfp-flower-add-Geneve-tunnel-support'

Simon Horman says:

====================
nfp: flower: add Geneve tunnel support

John Hurley says:

This patchset adds support for offloading the encap and decap of Geneve
tunnels to the NFP. In both cases, specifying well known port 6081 is a
requirement for rule offload.

Geneve firmware support has been recently added, so the patchset includes
the reading of a fw symbol that defines a bitmap of newly supported
features. Geneve will only be offloaded if the fw supports it. The new
symbol is added in fw r5646.

Geneve option fields are not supported as either a match or an action due
there current exclussion from TC flower. Because Geneve (as both a match
and action) behaves the same as other udp tunnels such as VXLAN, generic
functions are created that handle both Geneve and VXLAN. It is anticapated
that these functions will be modified to support options in future
patches.

The removal of an unused variable 'tun_dst_mask' is included as a separate
patch here. This does not affect functionality.

Also included are modifications to the test framework to check that the
new encap and decap features are functioning correctly.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2017-12-19 14:52:13 -05:00
commit f39a5c01c3
6 changed files with 151 additions and 81 deletions

View File

@ -81,6 +81,9 @@ static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan"))
return tun_type == NFP_FL_TUNNEL_VXLAN;
if (!strcmp(out_dev->rtnl_link_ops->kind, "geneve"))
return tun_type == NFP_FL_TUNNEL_GENEVE;
return false;
}
@ -136,11 +139,23 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
return 0;
}
static bool nfp_fl_supported_tun_port(const struct tc_action *action)
static enum nfp_flower_tun_type
nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app,
const struct tc_action *action)
{
struct ip_tunnel_info *tun = tcf_tunnel_info(action);
struct nfp_flower_priv *priv = app->priv;
return tun->key.tp_dst == htons(NFP_FL_VXLAN_PORT);
switch (tun->key.tp_dst) {
case htons(NFP_FL_VXLAN_PORT):
return NFP_FL_TUNNEL_VXLAN;
case htons(NFP_FL_GENEVE_PORT):
if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)
return NFP_FL_TUNNEL_GENEVE;
/* FALLTHROUGH */
default:
return NFP_FL_TUNNEL_NONE;
}
}
static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
@ -165,38 +180,33 @@ static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
}
static int
nfp_fl_set_vxlan(struct nfp_fl_set_vxlan *set_vxlan,
const struct tc_action *action,
struct nfp_fl_pre_tunnel *pre_tun)
nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun,
const struct tc_action *action,
struct nfp_fl_pre_tunnel *pre_tun,
enum nfp_flower_tun_type tun_type)
{
struct ip_tunnel_info *vxlan = tcf_tunnel_info(action);
size_t act_size = sizeof(struct nfp_fl_set_vxlan);
u32 tmp_set_vxlan_type_index = 0;
size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
u32 tmp_set_ip_tun_type_index = 0;
/* Currently support one pre-tunnel so index is always 0. */
int pretun_idx = 0;
if (vxlan->options_len) {
/* Do not support options e.g. vxlan gpe. */
if (ip_tun->options_len)
return -EOPNOTSUPP;
}
set_vxlan->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
set_vxlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
/* Set tunnel type and pre-tunnel index. */
tmp_set_vxlan_type_index |=
FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, NFP_FL_TUNNEL_VXLAN) |
tmp_set_ip_tun_type_index |=
FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, tun_type) |
FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx);
set_vxlan->tun_type_index = cpu_to_be32(tmp_set_vxlan_type_index);
set_vxlan->tun_id = vxlan->key.tun_id;
set_vxlan->tun_flags = vxlan->key.tun_flags;
set_vxlan->ipv4_ttl = vxlan->key.ttl;
set_vxlan->ipv4_tos = vxlan->key.tos;
set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
set_tun->tun_id = ip_tun->key.tun_id;
/* Complete pre_tunnel action. */
pre_tun->ipv4_dst = vxlan->key.u.ipv4.dst;
pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
return 0;
}
@ -433,8 +443,8 @@ nfp_flower_loop_action(const struct tc_action *a,
struct net_device *netdev,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt)
{
struct nfp_fl_set_ipv4_udp_tun *set_tun;
struct nfp_fl_pre_tunnel *pre_tun;
struct nfp_fl_set_vxlan *s_vxl;
struct nfp_fl_push_vlan *psh_v;
struct nfp_fl_pop_vlan *pop_v;
struct nfp_fl_output *output;
@ -482,26 +492,29 @@ nfp_flower_loop_action(const struct tc_action *a,
nfp_fl_push_vlan(psh_v, a);
*a_len += sizeof(struct nfp_fl_push_vlan);
} else if (is_tcf_tunnel_set(a) && nfp_fl_supported_tun_port(a)) {
} else if (is_tcf_tunnel_set(a)) {
struct nfp_repr *repr = netdev_priv(netdev);
*tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
if (*tun_type == NFP_FL_TUNNEL_NONE)
return -EOPNOTSUPP;
/* Pre-tunnel action is required for tunnel encap.
* This checks for next hop entries on NFP.
* If none, the packet falls back before applying other actions.
*/
if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
sizeof(struct nfp_fl_set_vxlan) > NFP_FL_MAX_A_SIZ)
sizeof(struct nfp_fl_set_ipv4_udp_tun) > NFP_FL_MAX_A_SIZ)
return -EOPNOTSUPP;
*tun_type = NFP_FL_TUNNEL_VXLAN;
pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len);
nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
*a_len += sizeof(struct nfp_fl_pre_tunnel);
s_vxl = (struct nfp_fl_set_vxlan *)&nfp_fl->action_data[*a_len];
err = nfp_fl_set_vxlan(s_vxl, a, pre_tun);
set_tun = (void *)&nfp_fl->action_data[*a_len];
err = nfp_fl_set_ipv4_udp_tun(set_tun, a, pre_tun, *tun_type);
if (err)
return err;
*a_len += sizeof(struct nfp_fl_set_vxlan);
*a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
} else if (is_tcf_tunnel_release(a)) {
/* Tunnel decap is handled by default so accept action. */
return 0;

View File

@ -41,7 +41,7 @@
#include "../nfp_app.h"
#include "../nfpcore/nfp_cpp.h"
#define NFP_FLOWER_LAYER_META BIT(0)
#define NFP_FLOWER_LAYER_EXT_META BIT(0)
#define NFP_FLOWER_LAYER_PORT BIT(1)
#define NFP_FLOWER_LAYER_MAC BIT(2)
#define NFP_FLOWER_LAYER_TP BIT(3)
@ -50,6 +50,8 @@
#define NFP_FLOWER_LAYER_CT BIT(6)
#define NFP_FLOWER_LAYER_VXLAN BIT(7)
#define NFP_FLOWER_LAYER2_GENEVE BIT(5)
#define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13)
#define NFP_FLOWER_MASK_VLAN_CFI BIT(12)
#define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0)
@ -105,6 +107,7 @@
enum nfp_flower_tun_type {
NFP_FL_TUNNEL_NONE = 0,
NFP_FL_TUNNEL_VXLAN = 2,
NFP_FL_TUNNEL_GENEVE = 4,
};
struct nfp_fl_act_head {
@ -170,16 +173,13 @@ struct nfp_fl_pre_tunnel {
__be32 extra[3];
};
struct nfp_fl_set_vxlan {
struct nfp_fl_set_ipv4_udp_tun {
struct nfp_fl_act_head head;
__be16 reserved;
__be64 tun_id;
__be64 tun_id __packed;
__be32 tun_type_index;
__be16 tun_flags;
u8 ipv4_ttl;
u8 ipv4_tos;
__be32 extra[2];
} __packed;
__be32 extra[3];
};
/* Metadata with L2 (1W/4B)
* ----------------------------------------------------------------
@ -198,6 +198,18 @@ struct nfp_flower_meta_tci {
__be16 tci;
};
/* Extended metadata for additional key_layers (1W/4B)
* ----------------------------------------------------------------
* 3 2 1
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | nfp_flow_key_layer2 |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
struct nfp_flower_ext_meta {
__be32 nfp_flow_key_layer2;
};
/* Port details (1W/4B)
* ----------------------------------------------------------------
* 3 2 1
@ -296,7 +308,7 @@ struct nfp_flower_ipv6 {
struct in6_addr ipv6_dst;
};
/* Flow Frame VXLAN --> Tunnel details (4W/16B)
/* Flow Frame IPv4 UDP TUNNEL --> Tunnel details (4W/16B)
* -----------------------------------------------------------------
* 3 2 1
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
@ -305,22 +317,17 @@ struct nfp_flower_ipv6 {
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ipv4_addr_dst |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | tun_flags | tos | ttl |
* | Reserved |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | gpe_flags | Reserved | Next Protocol |
* | Reserved |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | VNI | Reserved |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
struct nfp_flower_vxlan {
struct nfp_flower_ipv4_udp_tun {
__be32 ip_src;
__be32 ip_dst;
__be16 tun_flags;
u8 tos;
u8 ttl;
u8 gpe_flags;
u8 reserved[2];
u8 nxt_proto;
__be32 reserved[2];
__be32 tun_id;
};

View File

@ -381,7 +381,7 @@ static int nfp_flower_init(struct nfp_app *app)
{
const struct nfp_pf *pf = app->pf;
struct nfp_flower_priv *app_priv;
u64 version;
u64 version, features;
int err;
if (!pf->eth_tbl) {
@ -424,6 +424,14 @@ static int nfp_flower_init(struct nfp_app *app)
if (err)
goto err_free_app_priv;
/* Extract the extra features supported by the firmware. */
features = nfp_rtsym_read_le(app->pf->rtbl,
"_abi_flower_extra_features", &err);
if (err)
app_priv->flower_ext_feats = 0;
else
app_priv->flower_ext_feats = features;
return 0;
err_free_app_priv:

View File

@ -34,6 +34,8 @@
#ifndef __NFP_FLOWER_H__
#define __NFP_FLOWER_H__ 1
#include "cmsg.h"
#include <linux/circ_buf.h>
#include <linux/hashtable.h>
#include <linux/time64.h>
@ -58,6 +60,10 @@ struct nfp_app;
#define NFP_FL_MASK_ID_LOCATION 1
#define NFP_FL_VXLAN_PORT 4789
#define NFP_FL_GENEVE_PORT 6081
/* Extra features bitmap. */
#define NFP_FL_FEATS_GENEVE BIT(0)
struct nfp_fl_mask_id {
struct circ_buf mask_id_free_list;
@ -77,6 +83,7 @@ struct nfp_fl_stats_id {
* @nn: Pointer to vNIC
* @mask_id_seed: Seed used for mask hash table
* @flower_version: HW version of flower
* @flower_ext_feats: Bitmap of extra features the HW supports
* @stats_ids: List of free stats ids
* @mask_ids: List of free mask ids
* @mask_table: Hash table used to store masks
@ -101,6 +108,7 @@ struct nfp_flower_priv {
struct nfp_net *nn;
u32 mask_id_seed;
u64 flower_version;
u64 flower_ext_feats;
struct nfp_fl_stats_id stats_ids;
struct nfp_fl_mask_id mask_ids;
DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS);
@ -172,7 +180,8 @@ int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow);
struct nfp_fl_payload *nfp_flow,
enum nfp_flower_tun_type tun_type);
int nfp_flower_compile_action(struct tc_cls_flower_offload *flow,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow);

View File

@ -67,6 +67,12 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *frame,
}
}
static void
nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext)
{
frame->nfp_flow_key_layer2 = cpu_to_be32(key_ext);
}
static int
nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
bool mask_version, enum nfp_flower_tun_type tun_type)
@ -216,16 +222,15 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
}
static void
nfp_flower_compile_vxlan(struct nfp_flower_vxlan *frame,
struct tc_cls_flower_offload *flow,
bool mask_version, __be32 *tun_dst)
nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame,
struct tc_cls_flower_offload *flow,
bool mask_version)
{
struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
struct flow_dissector_key_ipv4_addrs *vxlan_ips;
struct flow_dissector_key_ipv4_addrs *tun_ips;
struct flow_dissector_key_keyid *vni;
/* Wildcard TOS/TTL/GPE_FLAGS/NXT_PROTO for now. */
memset(frame, 0, sizeof(struct nfp_flower_vxlan));
memset(frame, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
if (dissector_uses_key(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_KEYID)) {
@ -240,31 +245,26 @@ nfp_flower_compile_vxlan(struct nfp_flower_vxlan *frame,
if (dissector_uses_key(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
vxlan_ips =
tun_ips =
skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
target);
frame->ip_src = vxlan_ips->src;
frame->ip_dst = vxlan_ips->dst;
*tun_dst = vxlan_ips->dst;
frame->ip_src = tun_ips->src;
frame->ip_dst = tun_ips->dst;
}
}
int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow)
struct nfp_fl_payload *nfp_flow,
enum nfp_flower_tun_type tun_type)
{
enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
__be32 tun_dst, tun_dst_mask = 0;
struct nfp_repr *netdev_repr;
int err;
u8 *ext;
u8 *msk;
if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN)
tun_type = NFP_FL_TUNNEL_VXLAN;
memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
memset(nfp_flow->mask_data, 0, key_ls->key_size);
@ -280,6 +280,16 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
ext += sizeof(struct nfp_flower_meta_tci);
msk += sizeof(struct nfp_flower_meta_tci);
/* Populate Extended Metadata if Required. */
if (NFP_FLOWER_LAYER_EXT_META & key_ls->key_layer) {
nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)ext,
key_ls->key_layer_two);
nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk,
key_ls->key_layer_two);
ext += sizeof(struct nfp_flower_ext_meta);
msk += sizeof(struct nfp_flower_ext_meta);
}
/* Populate Exact Port data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
nfp_repr_get_port_id(netdev),
@ -341,15 +351,17 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
msk += sizeof(struct nfp_flower_ipv6);
}
if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN) {
if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
__be32 tun_dst;
/* Populate Exact VXLAN Data. */
nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)ext,
flow, false, &tun_dst);
nfp_flower_compile_ipv4_udp_tun((void *)ext, flow, false);
/* Populate Mask VXLAN Data. */
nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)msk,
flow, true, &tun_dst_mask);
ext += sizeof(struct nfp_flower_vxlan);
msk += sizeof(struct nfp_flower_vxlan);
nfp_flower_compile_ipv4_udp_tun((void *)msk, flow, true);
tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ip_dst;
ext += sizeof(struct nfp_flower_ipv4_udp_tun);
msk += sizeof(struct nfp_flower_ipv4_udp_tun);
/* Configure tunnel end point MAC. */
if (nfp_netdev_is_nfp_repr(netdev)) {

View File

@ -130,12 +130,15 @@ static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
}
static int
nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
nfp_flower_calculate_key_layers(struct nfp_app *app,
struct nfp_fl_key_ls *ret_key_ls,
struct tc_cls_flower_offload *flow,
bool egress)
bool egress,
enum nfp_flower_tun_type *tun_type)
{
struct flow_dissector_key_basic *mask_basic = NULL;
struct flow_dissector_key_basic *key_basic = NULL;
struct nfp_flower_priv *priv = app->priv;
u32 key_layer_two;
u8 key_layer;
int key_size;
@ -197,12 +200,27 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
FLOW_DISSECTOR_KEY_ENC_PORTS,
flow->key);
if (mask_enc_ports->dst != cpu_to_be16(~0) ||
enc_ports->dst != htons(NFP_FL_VXLAN_PORT))
if (mask_enc_ports->dst != cpu_to_be16(~0))
return -EOPNOTSUPP;
key_layer |= NFP_FLOWER_LAYER_VXLAN;
key_size += sizeof(struct nfp_flower_vxlan);
switch (enc_ports->dst) {
case htons(NFP_FL_VXLAN_PORT):
*tun_type = NFP_FL_TUNNEL_VXLAN;
key_layer |= NFP_FLOWER_LAYER_VXLAN;
key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
break;
case htons(NFP_FL_GENEVE_PORT):
if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE))
return -EOPNOTSUPP;
*tun_type = NFP_FL_TUNNEL_GENEVE;
key_layer |= NFP_FLOWER_LAYER_EXT_META;
key_size += sizeof(struct nfp_flower_ext_meta);
key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
break;
default:
return -EOPNOTSUPP;
}
} else if (egress) {
/* Reject non tunnel matches offloaded to egress repr. */
return -EOPNOTSUPP;
@ -330,6 +348,7 @@ static int
nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_flower_offload *flow, bool egress)
{
enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *flow_pay;
struct nfp_fl_key_ls *key_layer;
@ -339,7 +358,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (!key_layer)
return -ENOMEM;
err = nfp_flower_calculate_key_layers(key_layer, flow, egress);
err = nfp_flower_calculate_key_layers(app, key_layer, flow, egress,
&tun_type);
if (err)
goto err_free_key_ls;
@ -349,7 +369,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
goto err_free_key_ls;
}
err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay);
err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay,
tun_type);
if (err)
goto err_destroy_flow;