net: stmmac: Offload queueMaxSDU from tc-taprio

Add support for configuring queueMaxSDU. As DWMAC IPs doesn't support
queueMaxSDU table handle this in the SW. The maximum 802.3 frame size
that is allowed to be transmitted by any queue is queueMaxSDU +
16 bytes (i.e. 6 bytes SA + 6 bytes DA + 4 bytes FCS).

Inspired from intel i225 driver.

Signed-off-by: Rohan G Thomas <rohan.g.thomas@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Rohan G Thomas 2024-01-27 12:04:41 +08:00 committed by David S. Miller
parent 57bf3dd2fe
commit c5c3e1bfc9
4 changed files with 49 additions and 0 deletions

View File

@ -202,6 +202,7 @@ struct stmmac_extra_stats {
unsigned long mtl_est_hlbf;
unsigned long mtl_est_btre;
unsigned long mtl_est_btrlm;
unsigned long max_sdu_txq_drop[MTL_MAX_TX_QUEUES];
/* per queue statistics */
struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES];
struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES];

View File

@ -2507,6 +2507,13 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
if (!xsk_tx_peek_desc(pool, &xdp_desc))
break;
if (priv->plat->est && priv->plat->est->enable &&
priv->plat->est->max_sdu[queue] &&
xdp_desc.len > priv->plat->est->max_sdu[queue]) {
priv->xstats.max_sdu_txq_drop[queue]++;
continue;
}
if (likely(priv->extend_desc))
tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
@ -4498,6 +4505,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
return stmmac_tso_xmit(skb, dev);
}
if (priv->plat->est && priv->plat->est->enable &&
priv->plat->est->max_sdu[queue] &&
skb->len > priv->plat->est->max_sdu[queue]){
priv->xstats.max_sdu_txq_drop[queue]++;
goto max_sdu_err;
}
if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
@ -4715,6 +4729,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
dma_map_err:
netdev_err(priv->dev, "Tx DMA map failed\n");
max_sdu_err:
dev_kfree_skb(skb);
priv->xstats.tx_dropped++;
return NETDEV_TX_OK;
@ -4871,6 +4886,13 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
return STMMAC_XDP_CONSUMED;
if (priv->plat->est && priv->plat->est->enable &&
priv->plat->est->max_sdu[queue] &&
xdpf->len > priv->plat->est->max_sdu[queue]) {
priv->xstats.max_sdu_txq_drop[queue]++;
return STMMAC_XDP_CONSUMED;
}
if (likely(priv->extend_desc))
tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
else if (tx_q->tbs & STMMAC_TBS_AVAIL)

View File

@ -915,6 +915,28 @@ struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time,
return time;
}
static void tc_taprio_map_maxsdu_txq(struct stmmac_priv *priv,
struct tc_taprio_qopt_offload *qopt)
{
struct plat_stmmacenet_data *plat = priv->plat;
u32 num_tc = qopt->mqprio.qopt.num_tc;
u32 offset, count, i, j;
/* QueueMaxSDU received from the driver corresponds to the Linux traffic
* class. Map queueMaxSDU per Linux traffic class to DWMAC Tx queues.
*/
for (i = 0; i < num_tc; i++) {
if (!qopt->max_sdu[i])
continue;
offset = qopt->mqprio.qopt.offset[i];
count = qopt->mqprio.qopt.count[i];
for (j = offset; j < offset + count; j++)
plat->est->max_sdu[j] = qopt->max_sdu[i] + ETH_HLEN - ETH_TLEN;
}
}
static int tc_setup_taprio(struct stmmac_priv *priv,
struct tc_taprio_qopt_offload *qopt)
{
@ -1045,6 +1067,8 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
priv->plat->est->ter = qopt->cycle_time_extension;
tc_taprio_map_maxsdu_txq(priv, qopt);
if (fpe && !priv->dma_cap.fpesel) {
mutex_unlock(&priv->plat->est->lock);
return -EOPNOTSUPP;
@ -1126,6 +1150,7 @@ static int tc_query_caps(struct stmmac_priv *priv,
return -EOPNOTSUPP;
caps->gate_mask_per_txq = true;
caps->supports_queue_max_sdu = true;
return 0;
}

View File

@ -127,6 +127,7 @@ struct stmmac_est {
u32 gcl_unaligned[EST_GCL];
u32 gcl[EST_GCL];
u32 gcl_size;
u32 max_sdu[MTL_MAX_TX_QUEUES];
};
struct stmmac_rxq_cfg {