bnxt_en: Modify TX ring indexing logic.

Change the TX ring logic so that the index increments unbounded and
mask it only when needed.

Modify the existing macros so that the index is not masked.  Add a
new macro RING_TX() to mask it only when needed to get the index of
txr->tx_buf_ring[].

Reviewed-by: Pavan Chebbi <pavan.chebbi@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
Link: https://lore.kernel.org/r/20231120234405.194542-11-michael.chan@broadcom.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Michael Chan 2023-11-20 15:44:02 -08:00 committed by Jakub Kicinski
parent b9e0c47ee2
commit 6d1add9553
3 changed files with 21 additions and 20 deletions

View File

@ -432,9 +432,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
len = skb_headlen(skb);
last_frag = skb_shinfo(skb)->nr_frags;
txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
tx_buf = &txr->tx_buf_ring[prod];
tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
tx_buf->skb = skb;
tx_buf->nr_frags = last_frag;
@ -522,7 +522,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2);
prod = NEXT_TX(prod);
tx_push->tx_bd_opaque = txbd->tx_bd_opaque;
txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
memcpy(txbd, tx_push1, sizeof(*txbd));
prod = NEXT_TX(prod);
tx_push->doorbell =
@ -569,7 +569,7 @@ normal_tx:
prod = NEXT_TX(prod);
txbd1 = (struct tx_bd_ext *)
&txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
&txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
txbd1->tx_bd_hsize_lflags = lflags;
if (skb_is_gso(skb)) {
@ -610,7 +610,7 @@ normal_tx:
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
prod = NEXT_TX(prod);
txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
len = skb_frag_size(frag);
mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
@ -619,7 +619,7 @@ normal_tx:
if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
goto tx_dma_error;
tx_buf = &txr->tx_buf_ring[prod];
tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
dma_unmap_addr_set(tx_buf, mapping, mapping);
txbd->tx_bd_haddr = cpu_to_le64(mapping);
@ -668,7 +668,7 @@ tx_dma_error:
/* start back at beginning and unmap skb */
prod = txr->tx_prod;
tx_buf = &txr->tx_buf_ring[prod];
tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
skb_headlen(skb), DMA_TO_DEVICE);
prod = NEXT_TX(prod);
@ -676,7 +676,7 @@ tx_dma_error:
/* unmap remaining mapped pages */
for (i = 0; i < last_frag; i++) {
prod = NEXT_TX(prod);
tx_buf = &txr->tx_buf_ring[prod];
tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
skb_frag_size(&skb_shinfo(skb)->frags[i]),
DMA_TO_DEVICE);
@ -702,12 +702,12 @@ static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
u16 cons = txr->tx_cons;
int tx_pkts = 0;
while (cons != hw_cons) {
while (RING_TX(bp, cons) != hw_cons) {
struct bnxt_sw_tx_bd *tx_buf;
struct sk_buff *skb;
int j, last;
tx_buf = &txr->tx_buf_ring[cons];
tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
cons = NEXT_TX(cons);
skb = tx_buf->skb;
tx_buf->skb = NULL;
@ -731,7 +731,7 @@ static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
for (j = 0; j < last; j++) {
cons = NEXT_TX(cons);
tx_buf = &txr->tx_buf_ring[cons];
tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
dma_unmap_page(
&pdev->dev,
dma_unmap_addr(tx_buf, mapping),

View File

@ -689,7 +689,7 @@ struct nqe_cn {
#define RX_RING(x) (((x) & ~(RX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
#define RX_IDX(x) ((x) & (RX_DESC_CNT - 1))
#define TX_RING(x) (((x) & ~(TX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
#define TX_RING(bp, x) (((x) & (bp)->tx_ring_mask) >> (BNXT_PAGE_SHIFT - 4))
#define TX_IDX(x) ((x) & (TX_DESC_CNT - 1))
#define CP_RING(x) (((x) & ~(CP_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
@ -720,7 +720,8 @@ struct nqe_cn {
#define NEXT_RX_AGG(idx) (((idx) + 1) & bp->rx_agg_ring_mask)
#define NEXT_TX(idx) (((idx) + 1) & bp->tx_ring_mask)
#define RING_TX(bp, idx) ((idx) & (bp)->tx_ring_mask)
#define NEXT_TX(idx) ((idx) + 1)
#define ADV_RAW_CMP(idx, n) ((idx) + (n))
#define NEXT_RAW_CMP(idx) ADV_RAW_CMP(idx, 1)

View File

@ -42,12 +42,12 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
/* fill up the first buffer */
prod = txr->tx_prod;
tx_buf = &txr->tx_buf_ring[prod];
tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
tx_buf->nr_frags = num_frags;
if (xdp)
tx_buf->page = virt_to_head_page(xdp->data);
txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
flags = (len << TX_BD_LEN_SHIFT) |
((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT) |
bnxt_lhint_arr[len >> 9];
@ -67,10 +67,10 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
WRITE_ONCE(txr->tx_prod, prod);
/* first fill up the first buffer */
frag_tx_buf = &txr->tx_buf_ring[prod];
frag_tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
frag_tx_buf->page = skb_frag_page(frag);
txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
frag_len = skb_frag_size(frag);
frag_mapping = skb_frag_dma_map(&pdev->dev, frag, 0,
@ -139,8 +139,8 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
if (!budget)
return;
while (tx_cons != tx_hw_cons) {
tx_buf = &txr->tx_buf_ring[tx_cons];
while (RING_TX(bp, tx_cons) != tx_hw_cons) {
tx_buf = &txr->tx_buf_ring[RING_TX(bp, tx_cons)];
if (tx_buf->action == XDP_REDIRECT) {
struct pci_dev *pdev = bp->pdev;
@ -160,7 +160,7 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
frags = tx_buf->nr_frags;
for (j = 0; j < frags; j++) {
tx_cons = NEXT_TX(tx_cons);
tx_buf = &txr->tx_buf_ring[tx_cons];
tx_buf = &txr->tx_buf_ring[RING_TX(bp, tx_cons)];
page_pool_recycle_direct(rxr->page_pool, tx_buf->page);
}
} else {