mlx5-fixes-2021-09-30

-----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAmFWJy0ACgkQSD+KveBX
 +j7wJQgAqlx3rBiJGYgtnEr/HHzBSgZXkIICGfoQHfiOORoNTjpq46Csxuiu2Fg1
 JvxB1Dahoht/8Vfcm+GooKKDRlkUXx8vYhQF3BSQ7oboVGkWZ1a6ZuEUSfW3rrag
 oBkSCuVcn6EwVBFF0NvAi9ARd2G5GAYsew9yQHnXVQzp9ZWqzZcMMsp3pdhQvKLu
 CL0SlY0uZhXHyHNl5Gz79xQDPwSyXbVlhhEK9lIaPi5tcqA3X1Y7ZTqP/ouDjZBj
 /VLCIYJZ40471c4a2YVLEXiVZu0E0BDgOLlEWRiHIkTrQf+gjcjOCOUT1CkMOuid
 F7Ny4xnVaCzCIx8V5i3FXAOfcOeOIA==
 =TCu2
 -----END PGP SIGNATURE-----

Merge tag 'mlx5-fixes-2021-09-30' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2021-09-30

This series introduces some fixes to mlx5 driver.
Please pull and let me know if there is any problem.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2021-10-01 11:19:40 +01:00
commit ea2dd331bf
14 changed files with 194 additions and 105 deletions

View file

@ -250,6 +250,7 @@ struct mlx5e_params {
struct {
u16 mode;
u8 num_tc;
struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
} mqprio;
bool rx_cqe_compress_def;
bool tunneled_offload_en;
@ -843,6 +844,7 @@ struct mlx5e_priv {
struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
struct mlx5e_channel_stats trap_stats;
struct mlx5e_ptp_stats ptp_stats;
u16 stats_nch;
u16 max_nch;
u8 max_opened_tc;
bool tx_ptp_opened;
@ -1099,12 +1101,6 @@ int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
struct ethtool_pauseparam *pauseparam);
/* mlx5e generic netdev management API */
static inline unsigned int
mlx5e_calc_max_nch(struct mlx5e_priv *priv, const struct mlx5e_profile *profile)
{
return priv->netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
}
static inline bool
mlx5e_tx_mpwqe_supported(struct mlx5_core_dev *mdev)
{
@ -1113,11 +1109,13 @@ mlx5e_tx_mpwqe_supported(struct mlx5_core_dev *mdev)
}
int mlx5e_priv_init(struct mlx5e_priv *priv,
const struct mlx5e_profile *profile,
struct net_device *netdev,
struct mlx5_core_dev *mdev);
void mlx5e_priv_cleanup(struct mlx5e_priv *priv);
struct net_device *
mlx5e_create_netdev(struct mlx5_core_dev *mdev, unsigned int txqs, unsigned int rxqs);
mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
unsigned int txqs, unsigned int rxqs);
int mlx5e_attach_netdev(struct mlx5e_priv *priv);
void mlx5e_detach_netdev(struct mlx5e_priv *priv);
void mlx5e_destroy_netdev(struct mlx5e_priv *priv);

View file

@ -35,7 +35,7 @@ static void mlx5e_hv_vhca_fill_stats(struct mlx5e_priv *priv, void *data,
{
int ch, i = 0;
for (ch = 0; ch < priv->max_nch; ch++) {
for (ch = 0; ch < priv->stats_nch; ch++) {
void *buf = data + i;
if (WARN_ON_ONCE(buf +
@ -51,7 +51,7 @@ static void mlx5e_hv_vhca_fill_stats(struct mlx5e_priv *priv, void *data,
static int mlx5e_hv_vhca_stats_buf_size(struct mlx5e_priv *priv)
{
return (sizeof(struct mlx5e_hv_vhca_per_ring_stats) *
priv->max_nch);
priv->stats_nch);
}
static void mlx5e_hv_vhca_stats_work(struct work_struct *work)
@ -100,7 +100,7 @@ static void mlx5e_hv_vhca_stats_control(struct mlx5_hv_vhca_agent *agent,
sagent = &priv->stats_agent;
block->version = MLX5_HV_VHCA_STATS_VERSION;
block->rings = priv->max_nch;
block->rings = priv->stats_nch;
if (!block->command) {
cancel_delayed_work_sync(&priv->stats_agent.work);

View file

@ -13,8 +13,6 @@ struct mlx5e_ptp_fs {
bool valid;
};
#define MLX5E_PTP_CHANNEL_IX 0
struct mlx5e_ptp_params {
struct mlx5e_params params;
struct mlx5e_sq_param txq_sq_param;
@ -509,6 +507,7 @@ static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->stats = &c->priv->ptp_stats.rq;
rq->ix = MLX5E_PTP_CHANNEL_IX;
rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
err = mlx5e_rq_set_handlers(rq, params, false);
if (err)

View file

@ -8,6 +8,8 @@
#include "en_stats.h"
#include <linux/ptp_classify.h>
#define MLX5E_PTP_CHANNEL_IX 0
struct mlx5e_ptpsq {
struct mlx5e_txqsq txqsq;
struct mlx5e_cq ts_cq;

View file

@ -2034,6 +2034,17 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
}
new_params = priv->channels.params;
/* Don't allow enabling TX-port-TS if MQPRIO mode channel offload is
* active, since it defines explicitly which TC accepts the packet.
* This conflicts with TX-port-TS hijacking the PTP traffic to a specific
* HW TX-queue.
*/
if (enable && new_params.mqprio.mode == TC_MQPRIO_MODE_CHANNEL) {
netdev_err(priv->netdev,
"%s: MQPRIO mode channel offload is active, cannot set the TX-port-TS\n",
__func__);
return -EINVAL;
}
MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_TX_PORT_TS, enable);
/* No need to verify SQ stop room as
* ptpsq.txqsq.stop_room <= generic_sq->stop_room, and both

View file

@ -2264,7 +2264,7 @@ void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv)
}
static int mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc,
struct tc_mqprio_qopt_offload *mqprio)
struct netdev_tc_txq *tc_to_txq)
{
int tc, err;
@ -2282,11 +2282,8 @@ static int mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc,
for (tc = 0; tc < ntc; tc++) {
u16 count, offset;
/* For DCB mode, map netdev TCs to offset 0
* We have our own UP to TXQ mapping for QoS
*/
count = mqprio ? mqprio->qopt.count[tc] : nch;
offset = mqprio ? mqprio->qopt.offset[tc] : 0;
count = tc_to_txq[tc].count;
offset = tc_to_txq[tc].offset;
netdev_set_tc_queue(netdev, tc, count, offset);
}
@ -2315,19 +2312,24 @@ int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv)
static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
{
struct netdev_tc_txq old_tc_to_txq[TC_MAX_QUEUE], *tc_to_txq;
struct net_device *netdev = priv->netdev;
int old_num_txqs, old_ntc;
int num_rxqs, nch, ntc;
int err;
int i;
old_num_txqs = netdev->real_num_tx_queues;
old_ntc = netdev->num_tc ? : 1;
for (i = 0; i < ARRAY_SIZE(old_tc_to_txq); i++)
old_tc_to_txq[i] = netdev->tc_to_txq[i];
nch = priv->channels.params.num_channels;
ntc = mlx5e_get_dcb_num_tc(&priv->channels.params);
ntc = priv->channels.params.mqprio.num_tc;
num_rxqs = nch * priv->profile->rq_groups;
tc_to_txq = priv->channels.params.mqprio.tc_to_txq;
err = mlx5e_netdev_set_tcs(netdev, nch, ntc, NULL);
err = mlx5e_netdev_set_tcs(netdev, nch, ntc, tc_to_txq);
if (err)
goto err_out;
err = mlx5e_update_tx_netdev_queues(priv);
@ -2350,11 +2352,14 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
WARN_ON_ONCE(netif_set_real_num_tx_queues(netdev, old_num_txqs));
err_tcs:
mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc, NULL);
WARN_ON_ONCE(mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc,
old_tc_to_txq));
err_out:
return err;
}
static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_update_netdev_queues);
static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
struct mlx5e_params *params)
{
@ -2861,6 +2866,58 @@ static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
return 0;
}
static void mlx5e_mqprio_build_default_tc_to_txq(struct netdev_tc_txq *tc_to_txq,
int ntc, int nch)
{
int tc;
memset(tc_to_txq, 0, sizeof(*tc_to_txq) * TC_MAX_QUEUE);
/* Map netdev TCs to offset 0.
* We have our own UP to TXQ mapping for DCB mode of QoS
*/
for (tc = 0; tc < ntc; tc++) {
tc_to_txq[tc] = (struct netdev_tc_txq) {
.count = nch,
.offset = 0,
};
}
}
static void mlx5e_mqprio_build_tc_to_txq(struct netdev_tc_txq *tc_to_txq,
struct tc_mqprio_qopt *qopt)
{
int tc;
for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
tc_to_txq[tc] = (struct netdev_tc_txq) {
.count = qopt->count[tc],
.offset = qopt->offset[tc],
};
}
}
static void mlx5e_params_mqprio_dcb_set(struct mlx5e_params *params, u8 num_tc)
{
params->mqprio.mode = TC_MQPRIO_MODE_DCB;
params->mqprio.num_tc = num_tc;
mlx5e_mqprio_build_default_tc_to_txq(params->mqprio.tc_to_txq, num_tc,
params->num_channels);
}
static void mlx5e_params_mqprio_channel_set(struct mlx5e_params *params,
struct tc_mqprio_qopt *qopt)
{
params->mqprio.mode = TC_MQPRIO_MODE_CHANNEL;
params->mqprio.num_tc = qopt->num_tc;
mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, qopt);
}
static void mlx5e_params_mqprio_reset(struct mlx5e_params *params)
{
mlx5e_params_mqprio_dcb_set(params, 1);
}
static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv *priv,
struct tc_mqprio_qopt *mqprio)
{
@ -2874,8 +2931,7 @@ static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv *priv,
return -EINVAL;
new_params = priv->channels.params;
new_params.mqprio.mode = TC_MQPRIO_MODE_DCB;
new_params.mqprio.num_tc = tc ? tc : 1;
mlx5e_params_mqprio_dcb_set(&new_params, tc ? tc : 1);
err = mlx5e_safe_switch_params(priv, &new_params,
mlx5e_num_channels_changed_ctx, NULL, true);
@ -2889,9 +2945,17 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
struct tc_mqprio_qopt_offload *mqprio)
{
struct net_device *netdev = priv->netdev;
struct mlx5e_ptp *ptp_channel;
int agg_count = 0;
int i;
ptp_channel = priv->channels.ptp;
if (ptp_channel && test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state)) {
netdev_err(netdev,
"Cannot activate MQPRIO mode channel since it conflicts with TX port TS\n");
return -EINVAL;
}
if (mqprio->qopt.offset[0] != 0 || mqprio->qopt.num_tc < 1 ||
mqprio->qopt.num_tc > MLX5E_MAX_NUM_MQPRIO_CH_TC)
return -EINVAL;
@ -2926,25 +2990,12 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
return 0;
}
static int mlx5e_mqprio_channel_set_tcs_ctx(struct mlx5e_priv *priv, void *ctx)
{
struct tc_mqprio_qopt_offload *mqprio = (struct tc_mqprio_qopt_offload *)ctx;
struct net_device *netdev = priv->netdev;
u8 num_tc;
if (priv->channels.params.mqprio.mode != TC_MQPRIO_MODE_CHANNEL)
return -EINVAL;
num_tc = priv->channels.params.mqprio.num_tc;
mlx5e_netdev_set_tcs(netdev, 0, num_tc, mqprio);
return 0;
}
static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
struct tc_mqprio_qopt_offload *mqprio)
{
mlx5e_fp_preactivate preactivate;
struct mlx5e_params new_params;
bool nch_changed;
int err;
err = mlx5e_mqprio_channel_validate(priv, mqprio);
@ -2952,12 +3003,12 @@ static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
return err;
new_params = priv->channels.params;
new_params.mqprio.mode = TC_MQPRIO_MODE_CHANNEL;
new_params.mqprio.num_tc = mqprio->qopt.num_tc;
err = mlx5e_safe_switch_params(priv, &new_params,
mlx5e_mqprio_channel_set_tcs_ctx, mqprio, true);
mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt);
return err;
nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1;
preactivate = nch_changed ? mlx5e_num_channels_changed_ctx :
mlx5e_update_netdev_queues_ctx;
return mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true);
}
static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
@ -3065,7 +3116,7 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
{
int i;
for (i = 0; i < priv->max_nch; i++) {
for (i = 0; i < priv->stats_nch; i++) {
struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i];
struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq;
struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
@ -4186,13 +4237,11 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
struct mlx5_core_dev *mdev = priv->mdev;
u8 rx_cq_period_mode;
priv->max_nch = mlx5e_calc_max_nch(priv, priv->profile);
params->sw_mtu = mtu;
params->hard_mtu = MLX5E_ETH_HARD_MTU;
params->num_channels = min_t(unsigned int, MLX5E_MAX_NUM_CHANNELS / 2,
priv->max_nch);
params->mqprio.num_tc = 1;
mlx5e_params_mqprio_reset(params);
/* Set an initial non-zero value, so that mlx5e_select_queue won't
* divide by zero if called before first activating channels.
@ -4682,8 +4731,35 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.rx_ptp_support = true,
};
static unsigned int
mlx5e_calc_max_nch(struct mlx5_core_dev *mdev, struct net_device *netdev,
const struct mlx5e_profile *profile)
{
unsigned int max_nch, tmp;
/* core resources */
max_nch = mlx5e_get_max_num_channels(mdev);
/* netdev rx queues */
tmp = netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
max_nch = min_t(unsigned int, max_nch, tmp);
/* netdev tx queues */
tmp = netdev->num_tx_queues;
if (mlx5_qos_is_supported(mdev))
tmp -= mlx5e_qos_max_leaf_nodes(mdev);
if (MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
tmp -= profile->max_tc;
tmp = tmp / profile->max_tc;
max_nch = min_t(unsigned int, max_nch, tmp);
return max_nch;
}
/* mlx5e generic netdev management API (move to en_common.c) */
int mlx5e_priv_init(struct mlx5e_priv *priv,
const struct mlx5e_profile *profile,
struct net_device *netdev,
struct mlx5_core_dev *mdev)
{
@ -4691,6 +4767,8 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
priv->mdev = mdev;
priv->netdev = netdev;
priv->msglevel = MLX5E_MSG_LEVEL;
priv->max_nch = mlx5e_calc_max_nch(mdev, netdev, profile);
priv->stats_nch = priv->max_nch;
priv->max_opened_tc = 1;
if (!alloc_cpumask_var(&priv->scratchpad.cpumask, GFP_KERNEL))
@ -4734,7 +4812,8 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
}
struct net_device *
mlx5e_create_netdev(struct mlx5_core_dev *mdev, unsigned int txqs, unsigned int rxqs)
mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
unsigned int txqs, unsigned int rxqs)
{
struct net_device *netdev;
int err;
@ -4745,7 +4824,7 @@ mlx5e_create_netdev(struct mlx5_core_dev *mdev, unsigned int txqs, unsigned int
return NULL;
}
err = mlx5e_priv_init(netdev_priv(netdev), netdev, mdev);
err = mlx5e_priv_init(netdev_priv(netdev), profile, netdev, mdev);
if (err) {
mlx5_core_err(mdev, "mlx5e_priv_init failed, err=%d\n", err);
goto err_free_netdev;
@ -4787,7 +4866,7 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
/* max number of channels may have changed */
max_nch = mlx5e_get_max_num_channels(priv->mdev);
max_nch = mlx5e_calc_max_nch(priv->mdev, priv->netdev, profile);
if (priv->channels.params.num_channels > max_nch) {
mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);
/* Reducing the number of channels - RXFH has to be reset, and
@ -4795,7 +4874,18 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
*/
priv->netdev->priv_flags &= ~IFF_RXFH_CONFIGURED;
priv->channels.params.num_channels = max_nch;
if (priv->channels.params.mqprio.mode == TC_MQPRIO_MODE_CHANNEL) {
mlx5_core_warn(priv->mdev, "MLX5E: Disabling MQPRIO channel mode\n");
mlx5e_params_mqprio_reset(&priv->channels.params);
}
}
if (max_nch != priv->max_nch) {
mlx5_core_warn(priv->mdev,
"MLX5E: Updating max number of channels from %u to %u\n",
priv->max_nch, max_nch);
priv->max_nch = max_nch;
}
/* 1. Set the real number of queues in the kernel the first time.
* 2. Set our default XPS cpumask.
* 3. Build the RQT.
@ -4860,7 +4950,7 @@ mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mde
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
err = mlx5e_priv_init(priv, netdev, mdev);
err = mlx5e_priv_init(priv, new_profile, netdev, mdev);
if (err) {
mlx5_core_err(mdev, "mlx5e_priv_init failed, err=%d\n", err);
return err;
@ -4886,20 +4976,12 @@ mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mde
int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
const struct mlx5e_profile *new_profile, void *new_ppriv)
{
unsigned int new_max_nch = mlx5e_calc_max_nch(priv, new_profile);
const struct mlx5e_profile *orig_profile = priv->profile;
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
void *orig_ppriv = priv->ppriv;
int err, rollback_err;
/* sanity */
if (new_max_nch != priv->max_nch) {
netdev_warn(netdev, "%s: Replacing profile with different max channels\n",
__func__);
return -EINVAL;
}
/* cleanup old profile */
mlx5e_detach_netdev(priv);
priv->profile->cleanup(priv);
@ -4995,7 +5077,7 @@ static int mlx5e_probe(struct auxiliary_device *adev,
nch = mlx5e_get_max_num_channels(mdev);
txqs = nch * profile->max_tc + ptp_txqs + qos_sqs;
rxqs = nch * profile->rq_groups;
netdev = mlx5e_create_netdev(mdev, txqs, rxqs);
netdev = mlx5e_create_netdev(mdev, profile, txqs, rxqs);
if (!netdev) {
mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
return -ENOMEM;

View file

@ -596,7 +596,6 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
priv->max_nch = mlx5e_calc_max_nch(priv, priv->profile);
params = &priv->channels.params;
params->num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
@ -1169,7 +1168,7 @@ mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
nch = mlx5e_get_max_num_channels(dev);
txqs = nch * profile->max_tc;
rxqs = nch * profile->rq_groups;
netdev = mlx5e_create_netdev(dev, txqs, rxqs);
netdev = mlx5e_create_netdev(dev, profile, txqs, rxqs);
if (!netdev) {
mlx5_core_warn(dev,
"Failed to create representor netdev for vport %d\n",

View file

@ -1001,14 +1001,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
goto csum_unnecessary;
if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
u8 ipproto = get_ip_proto(skb, network_depth, proto);
if (unlikely(ipproto == IPPROTO_SCTP))
if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
goto csum_unnecessary;
if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
goto csum_none;
stats->csum_complete++;
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);

View file

@ -34,6 +34,7 @@
#include "en.h"
#include "en_accel/tls.h"
#include "en_accel/en_accel.h"
#include "en/ptp.h"
static unsigned int stats_grps_num(struct mlx5e_priv *priv)
{
@ -450,7 +451,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
memset(s, 0, sizeof(*s));
for (i = 0; i < priv->max_nch; i++) {
for (i = 0; i < priv->stats_nch; i++) {
struct mlx5e_channel_stats *channel_stats =
&priv->channel_stats[i];
int j;
@ -2076,7 +2077,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
if (priv->rx_ptp_opened) {
for (i = 0; i < NUM_PTP_RQ_STATS; i++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
ptp_rq_stats_desc[i].format);
ptp_rq_stats_desc[i].format, MLX5E_PTP_CHANNEL_IX);
}
return idx;
}
@ -2119,7 +2120,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp) { return; }
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
{
int max_nch = priv->max_nch;
int max_nch = priv->stats_nch;
return (NUM_RQ_STATS * max_nch) +
(NUM_CH_STATS * max_nch) +
@ -2133,7 +2134,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
{
bool is_xsk = priv->xsk.ever_used;
int max_nch = priv->max_nch;
int max_nch = priv->stats_nch;
int i, j, tc;
for (i = 0; i < max_nch; i++)
@ -2175,7 +2176,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
{
bool is_xsk = priv->xsk.ever_used;
int max_nch = priv->max_nch;
int max_nch = priv->stats_nch;
int i, j, tc;
for (i = 0; i < max_nch; i++)

View file

@ -79,12 +79,16 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
int dest_num = 0;
int err = 0;
if (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) {
if (vport->egress.legacy.drop_counter) {
drop_counter = vport->egress.legacy.drop_counter;
} else if (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) {
drop_counter = mlx5_fc_create(esw->dev, false);
if (IS_ERR(drop_counter))
if (IS_ERR(drop_counter)) {
esw_warn(esw->dev,
"vport[%d] configure egress drop rule counter err(%ld)\n",
vport->vport, PTR_ERR(drop_counter));
drop_counter = NULL;
}
vport->egress.legacy.drop_counter = drop_counter;
}
@ -123,7 +127,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
/* Attach egress drop flow counter */
if (!IS_ERR_OR_NULL(drop_counter)) {
if (drop_counter) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
drop_ctr_dst.counter_id = mlx5_fc_id(drop_counter);
@ -162,7 +166,7 @@ void esw_acl_egress_lgcy_cleanup(struct mlx5_eswitch *esw,
esw_acl_egress_table_destroy(vport);
clean_drop_counter:
if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_counter)) {
if (vport->egress.legacy.drop_counter) {
mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter);
vport->egress.legacy.drop_counter = NULL;
}

View file

@ -160,7 +160,9 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
esw_acl_ingress_lgcy_rules_destroy(vport);
if (MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
if (vport->ingress.legacy.drop_counter) {
counter = vport->ingress.legacy.drop_counter;
} else if (MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
counter = mlx5_fc_create(esw->dev, false);
if (IS_ERR(counter)) {
esw_warn(esw->dev,

View file

@ -113,7 +113,7 @@ static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv)
struct mlx5e_sw_stats s = { 0 };
int i, j;
for (i = 0; i < priv->max_nch; i++) {
for (i = 0; i < priv->stats_nch; i++) {
struct mlx5e_channel_stats *channel_stats;
struct mlx5e_rq_stats *rq_stats;
@ -711,7 +711,7 @@ static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u32 port_num,
goto destroy_ht;
}
err = mlx5e_priv_init(epriv, netdev, mdev);
err = mlx5e_priv_init(epriv, prof, netdev, mdev);
if (err)
goto destroy_mdev_resources;

View file

@ -448,22 +448,20 @@ static u64 find_target_cycles(struct mlx5_core_dev *mdev, s64 target_ns)
return cycles_now + cycles_delta;
}
static u64 perout_conf_internal_timer(struct mlx5_core_dev *mdev,
s64 sec, u32 nsec)
static u64 perout_conf_internal_timer(struct mlx5_core_dev *mdev, s64 sec)
{
struct timespec64 ts;
struct timespec64 ts = {};
s64 target_ns;
ts.tv_sec = sec;
ts.tv_nsec = nsec;
target_ns = timespec64_to_ns(&ts);
return find_target_cycles(mdev, target_ns);
}
static u64 perout_conf_real_time(s64 sec, u32 nsec)
static u64 perout_conf_real_time(s64 sec)
{
return (u64)nsec | (u64)sec << 32;
return (u64)sec << 32;
}
static int mlx5_perout_configure(struct ptp_clock_info *ptp,
@ -474,6 +472,7 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
container_of(ptp, struct mlx5_clock, ptp_info);
struct mlx5_core_dev *mdev =
container_of(clock, struct mlx5_core_dev, clock);
bool rt_mode = mlx5_real_time_mode(mdev);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
struct timespec64 ts;
u32 field_select = 0;
@ -501,8 +500,10 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
if (on) {
bool rt_mode = mlx5_real_time_mode(mdev);
u32 nsec;
s64 sec;
s64 sec = rq->perout.start.sec;
if (rq->perout.start.nsec)
return -EINVAL;
pin_mode = MLX5_PIN_MODE_OUT;
pattern = MLX5_OUT_PATTERN_PERIODIC;
@ -513,14 +514,11 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
if ((ns >> 1) != 500000000LL)
return -EINVAL;
nsec = rq->perout.start.nsec;
sec = rq->perout.start.sec;
if (rt_mode && sec > U32_MAX)
return -EINVAL;
time_stamp = rt_mode ? perout_conf_real_time(sec, nsec) :
perout_conf_internal_timer(mdev, sec, nsec);
time_stamp = rt_mode ? perout_conf_real_time(sec) :
perout_conf_internal_timer(mdev, sec);
field_select |= MLX5_MTPPS_FS_PIN_MODE |
MLX5_MTPPS_FS_PATTERN |
@ -538,6 +536,9 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
if (err)
return err;
if (rt_mode)
return 0;
return mlx5_set_mtppse(mdev, pin, 0,
MLX5_EVENT_MODE_REPETETIVE & on);
}
@ -705,20 +706,14 @@ static void ts_next_sec(struct timespec64 *ts)
static u64 perout_conf_next_event_timer(struct mlx5_core_dev *mdev,
struct mlx5_clock *clock)
{
bool rt_mode = mlx5_real_time_mode(mdev);
struct timespec64 ts;
s64 target_ns;
if (rt_mode)
ts = mlx5_ptp_gettimex_real_time(mdev, NULL);
else
mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
ts_next_sec(&ts);
target_ns = timespec64_to_ns(&ts);
return rt_mode ? perout_conf_real_time(ts.tv_sec, ts.tv_nsec) :
find_target_cycles(mdev, target_ns);
return find_target_cycles(mdev, target_ns);
}
static int mlx5_pps_event(struct notifier_block *nb,

View file

@ -13,8 +13,8 @@
#endif
#define MLX5_MAX_IRQ_NAME (32)
/* max irq_index is 255. three chars */
#define MLX5_MAX_IRQ_IDX_CHARS (3)
/* max irq_index is 2047, so four chars */
#define MLX5_MAX_IRQ_IDX_CHARS (4)
#define MLX5_SFS_PER_CTRL_IRQ 64
#define MLX5_IRQ_CTRL_SF_MAX 8
@ -633,8 +633,9 @@ void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table)
{
if (table->sf_comp_pool)
return table->sf_comp_pool->xa_num_irqs.max -
table->sf_comp_pool->xa_num_irqs.min + 1;
return min_t(int, num_online_cpus(),
table->sf_comp_pool->xa_num_irqs.max -
table->sf_comp_pool->xa_num_irqs.min + 1);
else
return mlx5_irq_table_get_num_comp(table);
}