mlx5-fixes-2021-03-31

-----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAmBk16kACgkQSD+KveBX
 +j7rvwf+LVsoKh1XxZ6JziSR46CpBnl++5SMdJKifCR9h4GqfYYLSDMihekMgVcL
 p6/ey1W0VseYrZD5mDK674fVTjXmh0dAkqmlF/X9mtxQztlNHOmw1rhw5Mc+h8Cf
 YaBA1Y7ne+pZtTir+r5fAMPlUOWubDI2ftpzgK0e+o+Am+FFm4sl9bkZtYvMNwfN
 Gn14e39V0tb4+inyeAcYndWISD09FTVbPydr79hlPl0It/3e3SFN6PaHfNbnWUpm
 jwEXvk8MEsEa3bc3Ihsh8rSQQuD9Ch+sboxyCX748TDwVQsDH3+IMUCmWH+ZsbWk
 hlI2S7M5f3yaQ3j9jdh5/WOgExtrwQ==
 =BUdu
 -----END PGP SIGNATURE-----

Merge tag 'mlx5-fixes-2021-03-31' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2021-03-31

This series introduces some fixes to mlx5 driver.
Please pull and let me know if there is any problem.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2021-03-31 14:54:32 -07:00
commit 9dc22c0d04
20 changed files with 226 additions and 121 deletions

View File

@ -191,12 +191,12 @@ static bool is_ib_supported(struct mlx5_core_dev *dev)
}
enum {
MLX5_INTERFACE_PROTOCOL_ETH_REP,
MLX5_INTERFACE_PROTOCOL_ETH,
MLX5_INTERFACE_PROTOCOL_ETH_REP,
MLX5_INTERFACE_PROTOCOL_IB,
MLX5_INTERFACE_PROTOCOL_IB_REP,
MLX5_INTERFACE_PROTOCOL_MPIB,
MLX5_INTERFACE_PROTOCOL_IB,
MLX5_INTERFACE_PROTOCOL_VNET,
};

View File

@ -516,6 +516,7 @@ struct mlx5e_icosq {
struct mlx5_wq_cyc wq;
void __iomem *uar_map;
u32 sqn;
u16 reserved_room;
unsigned long state;
/* control path */

View File

@ -185,6 +185,28 @@ mlx5_tc_ct_entry_has_nat(struct mlx5_ct_entry *entry)
return !!(entry->tuple_nat_node.next);
}
static int
mlx5_get_label_mapping(struct mlx5_tc_ct_priv *ct_priv,
u32 *labels, u32 *id)
{
if (!memchr_inv(labels, 0, sizeof(u32) * 4)) {
*id = 0;
return 0;
}
if (mapping_add(ct_priv->labels_mapping, labels, id))
return -EOPNOTSUPP;
return 0;
}
static void
mlx5_put_label_mapping(struct mlx5_tc_ct_priv *ct_priv, u32 id)
{
if (id)
mapping_remove(ct_priv->labels_mapping, id);
}
static int
mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule)
{
@ -436,7 +458,7 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv,
mlx5_tc_rule_delete(netdev_priv(ct_priv->netdev), zone_rule->rule, attr);
mlx5e_mod_hdr_detach(ct_priv->dev,
ct_priv->mod_hdr_tbl, zone_rule->mh);
mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
kfree(attr);
}
@ -639,8 +661,8 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
if (!meta)
return -EOPNOTSUPP;
err = mapping_add(ct_priv->labels_mapping, meta->ct_metadata.labels,
&attr->ct_attr.ct_labels_id);
err = mlx5_get_label_mapping(ct_priv, meta->ct_metadata.labels,
&attr->ct_attr.ct_labels_id);
if (err)
return -EOPNOTSUPP;
if (nat) {
@ -677,7 +699,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
err_mapping:
dealloc_mod_hdr_actions(&mod_acts);
mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
return err;
}
@ -745,7 +767,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
err_rule:
mlx5e_mod_hdr_detach(ct_priv->dev,
ct_priv->mod_hdr_tbl, zone_rule->mh);
mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
err_mod_hdr:
kfree(attr);
err_attr:
@ -1197,7 +1219,7 @@ void mlx5_tc_ct_match_del(struct mlx5_tc_ct_priv *priv, struct mlx5_ct_attr *ct_
if (!priv || !ct_attr->ct_labels_id)
return;
mapping_remove(priv->labels_mapping, ct_attr->ct_labels_id);
mlx5_put_label_mapping(priv, ct_attr->ct_labels_id);
}
int
@ -1280,7 +1302,7 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
ct_labels[1] = key->ct_labels[1] & mask->ct_labels[1];
ct_labels[2] = key->ct_labels[2] & mask->ct_labels[2];
ct_labels[3] = key->ct_labels[3] & mask->ct_labels[3];
if (mapping_add(priv->labels_mapping, ct_labels, &ct_attr->ct_labels_id))
if (mlx5_get_label_mapping(priv, ct_labels, &ct_attr->ct_labels_id))
return -EOPNOTSUPP;
mlx5e_tc_match_to_reg_match(spec, LABELS_TO_REG, ct_attr->ct_labels_id,
MLX5_CT_LABELS_MASK);

View File

@ -21,6 +21,11 @@ enum {
MLX5E_TC_TUNNEL_TYPE_MPLSOUDP,
};
struct mlx5e_encap_key {
const struct ip_tunnel_key *ip_tun_key;
struct mlx5e_tc_tunnel *tc_tunnel;
};
struct mlx5e_tc_tunnel {
int tunnel_type;
enum mlx5_flow_match_level match_level;
@ -44,6 +49,8 @@ struct mlx5e_tc_tunnel {
struct flow_cls_offload *f,
void *headers_c,
void *headers_v);
bool (*encap_info_equal)(struct mlx5e_encap_key *a,
struct mlx5e_encap_key *b);
};
extern struct mlx5e_tc_tunnel vxlan_tunnel;
@ -101,6 +108,9 @@ int mlx5e_tc_tun_parse_udp_ports(struct mlx5e_priv *priv,
void *headers_c,
void *headers_v);
bool mlx5e_tc_tun_encap_info_equal_generic(struct mlx5e_encap_key *a,
struct mlx5e_encap_key *b);
#endif /* CONFIG_MLX5_ESWITCH */
#endif //__MLX5_EN_TC_TUNNEL_H__

View File

@ -476,16 +476,11 @@ void mlx5e_detach_decap(struct mlx5e_priv *priv,
mlx5e_decap_dealloc(priv, d);
}
struct encap_key {
const struct ip_tunnel_key *ip_tun_key;
struct mlx5e_tc_tunnel *tc_tunnel;
};
static int cmp_encap_info(struct encap_key *a,
struct encap_key *b)
bool mlx5e_tc_tun_encap_info_equal_generic(struct mlx5e_encap_key *a,
struct mlx5e_encap_key *b)
{
return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type;
return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) == 0 &&
a->tc_tunnel->tunnel_type == b->tc_tunnel->tunnel_type;
}
static int cmp_decap_info(struct mlx5e_decap_key *a,
@ -494,7 +489,7 @@ static int cmp_decap_info(struct mlx5e_decap_key *a,
return memcmp(&a->key, &b->key, sizeof(b->key));
}
static int hash_encap_info(struct encap_key *key)
static int hash_encap_info(struct mlx5e_encap_key *key)
{
return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
key->tc_tunnel->tunnel_type);
@ -516,18 +511,18 @@ static bool mlx5e_decap_take(struct mlx5e_decap_entry *e)
}
static struct mlx5e_encap_entry *
mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
mlx5e_encap_get(struct mlx5e_priv *priv, struct mlx5e_encap_key *key,
uintptr_t hash_key)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_encap_key e_key;
struct mlx5e_encap_entry *e;
struct encap_key e_key;
hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
encap_hlist, hash_key) {
e_key.ip_tun_key = &e->tun_info->key;
e_key.tc_tunnel = e->tunnel;
if (!cmp_encap_info(&e_key, key) &&
if (e->tunnel->encap_info_equal(&e_key, key) &&
mlx5e_encap_take(e))
return e;
}
@ -694,8 +689,8 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr = flow->attr;
const struct ip_tunnel_info *tun_info;
unsigned long tbl_time_before = 0;
struct encap_key key;
struct mlx5e_encap_entry *e;
struct mlx5e_encap_key key;
bool entry_created = false;
unsigned short family;
uintptr_t hash_key;

View File

@ -329,6 +329,34 @@ static int mlx5e_tc_tun_parse_geneve(struct mlx5e_priv *priv,
return mlx5e_tc_tun_parse_geneve_options(priv, spec, f);
}
static bool mlx5e_tc_tun_encap_info_equal_geneve(struct mlx5e_encap_key *a,
struct mlx5e_encap_key *b)
{
struct ip_tunnel_info *a_info;
struct ip_tunnel_info *b_info;
bool a_has_opts, b_has_opts;
if (!mlx5e_tc_tun_encap_info_equal_generic(a, b))
return false;
a_has_opts = !!(a->ip_tun_key->tun_flags & TUNNEL_GENEVE_OPT);
b_has_opts = !!(b->ip_tun_key->tun_flags & TUNNEL_GENEVE_OPT);
/* keys are equal when both don't have any options attached */
if (!a_has_opts && !b_has_opts)
return true;
if (a_has_opts != b_has_opts)
return false;
/* geneve options stored in memory next to ip_tunnel_info struct */
a_info = container_of(a->ip_tun_key, struct ip_tunnel_info, key);
b_info = container_of(b->ip_tun_key, struct ip_tunnel_info, key);
return a_info->options_len == b_info->options_len &&
memcmp(a_info + 1, b_info + 1, a_info->options_len) == 0;
}
struct mlx5e_tc_tunnel geneve_tunnel = {
.tunnel_type = MLX5E_TC_TUNNEL_TYPE_GENEVE,
.match_level = MLX5_MATCH_L4,
@ -338,4 +366,5 @@ struct mlx5e_tc_tunnel geneve_tunnel = {
.generate_ip_tun_hdr = mlx5e_gen_ip_tunnel_header_geneve,
.parse_udp_ports = mlx5e_tc_tun_parse_udp_ports_geneve,
.parse_tunnel = mlx5e_tc_tun_parse_geneve,
.encap_info_equal = mlx5e_tc_tun_encap_info_equal_geneve,
};

View File

@ -94,4 +94,5 @@ struct mlx5e_tc_tunnel gre_tunnel = {
.generate_ip_tun_hdr = mlx5e_gen_ip_tunnel_header_gretap,
.parse_udp_ports = NULL,
.parse_tunnel = mlx5e_tc_tun_parse_gretap,
.encap_info_equal = mlx5e_tc_tun_encap_info_equal_generic,
};

View File

@ -131,4 +131,5 @@ struct mlx5e_tc_tunnel mplsoudp_tunnel = {
.generate_ip_tun_hdr = generate_ip_tun_hdr,
.parse_udp_ports = parse_udp_ports,
.parse_tunnel = parse_tunnel,
.encap_info_equal = mlx5e_tc_tun_encap_info_equal_generic,
};

View File

@ -150,4 +150,5 @@ struct mlx5e_tc_tunnel vxlan_tunnel = {
.generate_ip_tun_hdr = mlx5e_gen_ip_tunnel_header_vxlan,
.parse_udp_ports = mlx5e_tc_tun_parse_udp_ports_vxlan,
.parse_tunnel = mlx5e_tc_tun_parse_vxlan,
.encap_info_equal = mlx5e_tc_tun_encap_info_equal_generic,
};

View File

@ -441,4 +441,10 @@ static inline u16 mlx5e_stop_room_for_wqe(u16 wqe_size)
return wqe_size * 2 - 1;
}
static inline bool mlx5e_icosq_can_post_wqe(struct mlx5e_icosq *sq, u16 wqe_size)
{
u16 room = sq->reserved_room + mlx5e_stop_room_for_wqe(wqe_size);
return mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room);
}
#endif

View File

@ -46,7 +46,8 @@ struct mlx5e_ktls_offload_context_rx {
struct tls12_crypto_info_aes_gcm_128 crypto_info;
struct accel_rule rule;
struct sock *sk;
struct mlx5e_rq_stats *stats;
struct mlx5e_rq_stats *rq_stats;
struct mlx5e_tls_sw_stats *sw_stats;
struct completion add_ctx;
u32 tirn;
u32 key_id;
@ -137,11 +138,10 @@ post_static_params(struct mlx5e_icosq *sq,
{
struct mlx5e_set_tls_static_params_wqe *wqe;
struct mlx5e_icosq_wqe_info wi;
u16 pi, num_wqebbs, room;
u16 pi, num_wqebbs;
num_wqebbs = MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS;
room = mlx5e_stop_room_for_wqe(num_wqebbs);
if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room)))
if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs)))
return ERR_PTR(-ENOSPC);
pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs);
@ -168,11 +168,10 @@ post_progress_params(struct mlx5e_icosq *sq,
{
struct mlx5e_set_tls_progress_params_wqe *wqe;
struct mlx5e_icosq_wqe_info wi;
u16 pi, num_wqebbs, room;
u16 pi, num_wqebbs;
num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS;
room = mlx5e_stop_room_for_wqe(num_wqebbs);
if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room)))
if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs)))
return ERR_PTR(-ENOSPC);
pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs);
@ -218,7 +217,7 @@ unlock:
return err;
err_out:
priv_rx->stats->tls_resync_req_skip++;
priv_rx->rq_stats->tls_resync_req_skip++;
err = PTR_ERR(cseg);
complete(&priv_rx->add_ctx);
goto unlock;
@ -277,17 +276,15 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
buf->priv_rx = priv_rx;
BUILD_BUG_ON(MLX5E_KTLS_GET_PROGRESS_WQEBBS != 1);
spin_lock_bh(&sq->channel->async_icosq_lock);
if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) {
if (unlikely(!mlx5e_icosq_can_post_wqe(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS))) {
spin_unlock_bh(&sq->channel->async_icosq_lock);
err = -ENOSPC;
goto err_dma_unmap;
}
pi = mlx5e_icosq_get_next_pi(sq, 1);
pi = mlx5e_icosq_get_next_pi(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS);
wqe = MLX5E_TLS_FETCH_GET_PROGRESS_PARAMS_WQE(sq, pi);
#define GET_PSV_DS_CNT (DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS))
@ -307,7 +304,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
wi = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_GET_PSV_TLS,
.num_wqebbs = 1,
.num_wqebbs = MLX5E_KTLS_GET_PROGRESS_WQEBBS,
.tls_get_params.buf = buf,
};
icosq_fill_wi(sq, pi, &wi);
@ -322,7 +319,7 @@ err_dma_unmap:
err_free:
kfree(buf);
err_out:
priv_rx->stats->tls_resync_req_skip++;
priv_rx->rq_stats->tls_resync_req_skip++;
return err;
}
@ -378,13 +375,13 @@ static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx
cseg = post_static_params(sq, priv_rx);
if (IS_ERR(cseg)) {
priv_rx->stats->tls_resync_res_skip++;
priv_rx->rq_stats->tls_resync_res_skip++;
err = PTR_ERR(cseg);
goto unlock;
}
/* Do not increment priv_rx refcnt, CQE handling is empty */
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
priv_rx->stats->tls_resync_res_ok++;
priv_rx->rq_stats->tls_resync_res_ok++;
unlock:
spin_unlock_bh(&c->async_icosq_lock);
@ -420,13 +417,13 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
auth_state = MLX5_GET(tls_progress_params, ctx, auth_state);
if (tracker_state != MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING ||
auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) {
priv_rx->stats->tls_resync_req_skip++;
priv_rx->rq_stats->tls_resync_req_skip++;
goto out;
}
hw_seq = MLX5_GET(tls_progress_params, ctx, hw_resync_tcp_sn);
tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq));
priv_rx->stats->tls_resync_req_end++;
priv_rx->rq_stats->tls_resync_req_end++;
out:
mlx5e_ktls_priv_rx_put(priv_rx);
dma_unmap_single(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
@ -609,7 +606,8 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
priv_rx->rxq = rxq;
priv_rx->sk = sk;
priv_rx->stats = &priv->channel_stats[rxq].rq;
priv_rx->rq_stats = &priv->channel_stats[rxq].rq;
priv_rx->sw_stats = &priv->tls->sw_stats;
mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx);
rqtn = priv->direct_tir[rxq].rqt.rqtn;
@ -630,7 +628,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
if (err)
goto err_post_wqes;
priv_rx->stats->tls_ctx++;
atomic64_inc(&priv_rx->sw_stats->rx_tls_ctx);
return 0;
@ -666,7 +664,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
if (cancel_work_sync(&resync->work))
mlx5e_ktls_priv_rx_put(priv_rx);
priv_rx->stats->tls_del++;
atomic64_inc(&priv_rx->sw_stats->rx_tls_del);
if (priv_rx->rule.rule)
mlx5e_accel_fs_del_sk(priv_rx->rule.rule);

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2019 Mellanox Technologies.
#include "en_accel/tls.h"
#include "en_accel/ktls_txrx.h"
#include "en_accel/ktls_utils.h"
@ -50,6 +51,7 @@ static int mlx5e_ktls_create_tis(struct mlx5_core_dev *mdev, u32 *tisn)
struct mlx5e_ktls_offload_context_tx {
struct tls_offload_context_tx *tx_ctx;
struct tls12_crypto_info_aes_gcm_128 crypto_info;
struct mlx5e_tls_sw_stats *sw_stats;
u32 expected_seq;
u32 tisn;
u32 key_id;
@ -99,6 +101,7 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
if (err)
goto err_create_key;
priv_tx->sw_stats = &priv->tls->sw_stats;
priv_tx->expected_seq = start_offload_tcp_sn;
priv_tx->crypto_info =
*(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
@ -111,6 +114,7 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
goto err_create_tis;
priv_tx->ctx_post_pending = true;
atomic64_inc(&priv_tx->sw_stats->tx_tls_ctx);
return 0;
@ -452,7 +456,6 @@ bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *s
if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
stats->tls_ctx++;
}
seq = ntohl(tcp_hdr(skb)->seq);

View File

@ -41,10 +41,13 @@
#include "en.h"
struct mlx5e_tls_sw_stats {
atomic64_t tx_tls_ctx;
atomic64_t tx_tls_drop_metadata;
atomic64_t tx_tls_drop_resync_alloc;
atomic64_t tx_tls_drop_no_sync_data;
atomic64_t tx_tls_drop_bypass_required;
atomic64_t rx_tls_ctx;
atomic64_t rx_tls_del;
atomic64_t rx_tls_drop_resync_request;
atomic64_t rx_tls_resync_request;
atomic64_t rx_tls_resync_reply;

View File

@ -45,49 +45,60 @@ static const struct counter_desc mlx5e_tls_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_bypass_required) },
};
static const struct counter_desc mlx5e_ktls_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_ctx) },
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_ctx) },
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_del) },
};
#define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \
atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset))
#define NUM_TLS_SW_COUNTERS ARRAY_SIZE(mlx5e_tls_sw_stats_desc)
static bool is_tls_atomic_stats(struct mlx5e_priv *priv)
static const struct counter_desc *get_tls_atomic_stats(struct mlx5e_priv *priv)
{
return priv->tls && !mlx5_accel_is_ktls_device(priv->mdev);
if (!priv->tls)
return NULL;
if (mlx5_accel_is_ktls_device(priv->mdev))
return mlx5e_ktls_sw_stats_desc;
return mlx5e_tls_sw_stats_desc;
}
int mlx5e_tls_get_count(struct mlx5e_priv *priv)
{
if (!is_tls_atomic_stats(priv))
if (!priv->tls)
return 0;
return NUM_TLS_SW_COUNTERS;
if (mlx5_accel_is_ktls_device(priv->mdev))
return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc);
return ARRAY_SIZE(mlx5e_tls_sw_stats_desc);
}
int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
{
unsigned int i, idx = 0;
const struct counter_desc *stats_desc;
unsigned int i, n, idx = 0;
if (!is_tls_atomic_stats(priv))
return 0;
stats_desc = get_tls_atomic_stats(priv);
n = mlx5e_tls_get_count(priv);
for (i = 0; i < NUM_TLS_SW_COUNTERS; i++)
for (i = 0; i < n; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
mlx5e_tls_sw_stats_desc[i].format);
stats_desc[i].format);
return NUM_TLS_SW_COUNTERS;
return n;
}
int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data)
{
int i, idx = 0;
const struct counter_desc *stats_desc;
unsigned int i, n, idx = 0;
if (!is_tls_atomic_stats(priv))
return 0;
stats_desc = get_tls_atomic_stats(priv);
n = mlx5e_tls_get_count(priv);
for (i = 0; i < NUM_TLS_SW_COUNTERS; i++)
for (i = 0; i < n; i++)
data[idx++] =
MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats,
mlx5e_tls_sw_stats_desc, i);
stats_desc, i);
return NUM_TLS_SW_COUNTERS;
return n;
}

View File

@ -758,11 +758,11 @@ static int get_fec_supported_advertised(struct mlx5_core_dev *dev,
return 0;
}
static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings *link_ksettings,
u32 eth_proto_cap,
u8 connector_type, bool ext)
static void ptys2ethtool_supported_advertised_port(struct mlx5_core_dev *mdev,
struct ethtool_link_ksettings *link_ksettings,
u32 eth_proto_cap, u8 connector_type)
{
if ((!connector_type && !ext) || connector_type >= MLX5E_CONNECTOR_TYPE_NUMBER) {
if (!MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type)) {
if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
| MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
| MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
@ -898,9 +898,9 @@ static int ptys2connector_type[MLX5E_CONNECTOR_TYPE_NUMBER] = {
[MLX5E_PORT_OTHER] = PORT_OTHER,
};
static u8 get_connector_port(u32 eth_proto, u8 connector_type, bool ext)
static u8 get_connector_port(struct mlx5_core_dev *mdev, u32 eth_proto, u8 connector_type)
{
if ((connector_type || ext) && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER)
if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type))
return ptys2connector_type[connector_type];
if (eth_proto &
@ -1001,11 +1001,11 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
data_rate_oper, link_ksettings);
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
link_ksettings->base.port = get_connector_port(eth_proto_oper,
connector_type, ext);
ptys2ethtool_supported_advertised_port(link_ksettings, eth_proto_admin,
connector_type, ext);
connector_type = connector_type < MLX5E_CONNECTOR_TYPE_NUMBER ?
connector_type : MLX5E_PORT_UNKNOWN;
link_ksettings->base.port = get_connector_port(mdev, eth_proto_oper, connector_type);
ptys2ethtool_supported_advertised_port(mdev, link_ksettings, eth_proto_admin,
connector_type);
get_lp_advertising(mdev, eth_proto_lp, link_ksettings);
if (an_status == MLX5_AN_COMPLETE)

View File

@ -1091,6 +1091,7 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
sq->channel = c;
sq->uar_map = mdev->mlx5e_res.bfreg.map;
sq->reserved_room = param->stop_room;
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@ -2350,6 +2351,24 @@ void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
mlx5e_build_ico_cq_param(priv, log_wq_size, &param->cqp);
}
static void mlx5e_build_async_icosq_param(struct mlx5e_priv *priv,
struct mlx5e_params *params,
u8 log_wq_size,
struct mlx5e_sq_param *param)
{
void *sqc = param->sqc;
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
mlx5e_build_sq_param_common(priv, param);
/* async_icosq is used by XSK only if xdp_prog is active */
if (params->xdp_prog)
param->stop_room = mlx5e_stop_room_for_wqe(1); /* for XSK NOP */
MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
mlx5e_build_ico_cq_param(priv, log_wq_size, &param->cqp);
}
void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
struct mlx5e_params *params,
struct mlx5e_sq_param *param)
@ -2398,7 +2417,7 @@ static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
mlx5e_build_sq_param(priv, params, &cparam->txq_sq);
mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq);
mlx5e_build_icosq_param(priv, async_icosq_log_wq_sz, &cparam->async_icosq);
mlx5e_build_async_icosq_param(priv, params, async_icosq_log_wq_sz, &cparam->async_icosq);
}
int mlx5e_open_channels(struct mlx5e_priv *priv,

View File

@ -116,7 +116,6 @@ static const struct counter_desc sw_stats_desc[] = {
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ctx) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
@ -180,8 +179,6 @@ static const struct counter_desc sw_stats_desc[] = {
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_ctx) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_del) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) },
@ -342,8 +339,6 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
#ifdef CONFIG_MLX5_EN_TLS
s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets;
s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes;
s->rx_tls_ctx += rq_stats->tls_ctx;
s->rx_tls_del += rq_stats->tls_del;
s->rx_tls_resync_req_pkt += rq_stats->tls_resync_req_pkt;
s->rx_tls_resync_req_start += rq_stats->tls_resync_req_start;
s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end;
@ -390,7 +385,6 @@ static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s,
#ifdef CONFIG_MLX5_EN_TLS
s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes;
s->tx_tls_ctx += sq_stats->tls_ctx;
s->tx_tls_ooo += sq_stats->tls_ooo;
s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes;
s->tx_tls_dump_packets += sq_stats->tls_dump_packets;
@ -1622,8 +1616,6 @@ static const struct counter_desc rq_stats_desc[] = {
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_ctx) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_del) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) },
@ -1650,7 +1642,6 @@ static const struct counter_desc sq_stats_desc[] = {
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
@ -1776,7 +1767,6 @@ static const struct counter_desc qos_sq_stats_desc[] = {
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },

View File

@ -191,7 +191,6 @@ struct mlx5e_sw_stats {
#ifdef CONFIG_MLX5_EN_TLS
u64 tx_tls_encrypted_packets;
u64 tx_tls_encrypted_bytes;
u64 tx_tls_ctx;
u64 tx_tls_ooo;
u64 tx_tls_dump_packets;
u64 tx_tls_dump_bytes;
@ -202,8 +201,6 @@ struct mlx5e_sw_stats {
u64 rx_tls_decrypted_packets;
u64 rx_tls_decrypted_bytes;
u64 rx_tls_ctx;
u64 rx_tls_del;
u64 rx_tls_resync_req_pkt;
u64 rx_tls_resync_req_start;
u64 rx_tls_resync_req_end;
@ -334,8 +331,6 @@ struct mlx5e_rq_stats {
#ifdef CONFIG_MLX5_EN_TLS
u64 tls_decrypted_packets;
u64 tls_decrypted_bytes;
u64 tls_ctx;
u64 tls_del;
u64 tls_resync_req_pkt;
u64 tls_resync_req_start;
u64 tls_resync_req_end;
@ -364,7 +359,6 @@ struct mlx5e_sq_stats {
#ifdef CONFIG_MLX5_EN_TLS
u64 tls_encrypted_packets;
u64 tls_encrypted_bytes;
u64 tls_ctx;
u64 tls_ooo;
u64 tls_dump_packets;
u64 tls_dump_bytes;

View File

@ -931,13 +931,24 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
mutex_unlock(&table->lock);
}
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
#define MLX5_MAX_ASYNC_EQS 4
#else
#define MLX5_MAX_ASYNC_EQS 3
#endif
int mlx5_eq_table_create(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *eq_table = dev->priv.eq_table;
int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
MLX5_CAP_GEN(dev, max_num_eqs) :
1 << MLX5_CAP_GEN(dev, log_max_eq);
int err;
eq_table->num_comp_eqs =
mlx5_irq_get_num_comp(eq_table->irq_table);
min_t(int,
mlx5_irq_get_num_comp(eq_table->irq_table),
num_eqs - MLX5_MAX_ASYNC_EQS);
err = create_async_eqs(dev);
if (err) {

View File

@ -537,6 +537,14 @@ esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *
return i;
}
static bool
esw_src_port_rewrite_supported(struct mlx5_eswitch *esw)
{
return MLX5_CAP_GEN(esw->dev, reg_c_preserve) &&
mlx5_eswitch_vport_match_metadata_enabled(esw) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level);
}
static int
esw_setup_dests(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act,
@ -550,9 +558,7 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
int err = 0;
if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) &&
MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve) &&
mlx5_eswitch_vport_match_metadata_enabled(esw) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level))
esw_src_port_rewrite_supported(esw))
attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE;
if (attr->dest_ft) {
@ -1716,36 +1722,40 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
}
esw->fdb_table.offloads.send_to_vport_grp = g;
/* meta send to vport */
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
MLX5_MATCH_MISC_PARAMETERS_2);
if (esw_src_port_rewrite_supported(esw)) {
/* meta send to vport */
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
MLX5_MATCH_MISC_PARAMETERS_2);
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
MLX5_SET(fte_match_param, match_criteria,
misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
MLX5_SET(fte_match_param, match_criteria,
misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
MLX5_SET(fte_match_param, match_criteria,
misc_parameters_2.metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_mask());
MLX5_SET(fte_match_param, match_criteria,
misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
num_vfs = esw->esw_funcs.num_vfs;
if (num_vfs) {
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + num_vfs - 1);
ix += num_vfs;
num_vfs = esw->esw_funcs.num_vfs;
if (num_vfs) {
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
MLX5_SET(create_flow_group_in, flow_group_in,
end_flow_index, ix + num_vfs - 1);
ix += num_vfs;
g = mlx5_create_flow_group(fdb, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create send-to-vport meta flow group err(%d)\n",
err);
goto send_vport_meta_err;
g = mlx5_create_flow_group(fdb, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create send-to-vport meta flow group err(%d)\n",
err);
goto send_vport_meta_err;
}
esw->fdb_table.offloads.send_to_vport_meta_grp = g;
err = mlx5_eswitch_add_send_to_vport_meta_rules(esw);
if (err)
goto meta_rule_err;
}
esw->fdb_table.offloads.send_to_vport_meta_grp = g;
err = mlx5_eswitch_add_send_to_vport_meta_rules(esw);
if (err)
goto meta_rule_err;
}
if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {