eth: bnxt: fix counting packets discarded due to OOM and netpoll

[ Upstream commit 7301177307 ]

I added OOM and netpoll discard counters, naively assuming that
the cpr pointer is pointing to a common completion ring.
Turns out that is usually *a* completion ring but not *the*
completion ring which bnapi->cp_ring points to. bnapi->cp_ring
is where the stats are read from, so we end up reporting 0
thru ethtool -S and qstat even though the drop events have happened.
Make 100% sure we're recording statistics in the correct structure.

Fixes: 907fd4a294 ("bnxt: count discards due to memory allocation errors")
Reviewed-by: Michael Chan <michael.chan@broadcom.com>
Link: https://lore.kernel.org/r/20240424002148.3937059-1-kuba@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Jakub Kicinski 2024-04-23 17:21:48 -07:00 committed by Greg Kroah-Hartman
parent d72dd6fcd7
commit 6a2657bf1f
1 changed files with 18 additions and 26 deletions

View File

@ -1735,7 +1735,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
if (!skb) { if (!skb) {
bnxt_abort_tpa(cpr, idx, agg_bufs); bnxt_abort_tpa(cpr, idx, agg_bufs);
cpr->sw_stats.rx.rx_oom_discards += 1; cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
return NULL; return NULL;
} }
} else { } else {
@ -1745,7 +1745,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC); new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
if (!new_data) { if (!new_data) {
bnxt_abort_tpa(cpr, idx, agg_bufs); bnxt_abort_tpa(cpr, idx, agg_bufs);
cpr->sw_stats.rx.rx_oom_discards += 1; cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
return NULL; return NULL;
} }
@ -1761,7 +1761,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if (!skb) { if (!skb) {
skb_free_frag(data); skb_free_frag(data);
bnxt_abort_tpa(cpr, idx, agg_bufs); bnxt_abort_tpa(cpr, idx, agg_bufs);
cpr->sw_stats.rx.rx_oom_discards += 1; cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
return NULL; return NULL;
} }
skb_reserve(skb, bp->rx_offset); skb_reserve(skb, bp->rx_offset);
@ -1772,7 +1772,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true); skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
if (!skb) { if (!skb) {
/* Page reuse already handled by bnxt_rx_pages(). */ /* Page reuse already handled by bnxt_rx_pages(). */
cpr->sw_stats.rx.rx_oom_discards += 1; cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
return NULL; return NULL;
} }
} }
@ -2051,11 +2051,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp, u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
cp_cons, agg_bufs, cp_cons, agg_bufs,
false); false);
if (!frag_len) { if (!frag_len)
cpr->sw_stats.rx.rx_oom_discards += 1; goto oom_next_rx;
rc = -ENOMEM;
goto next_rx;
}
} }
xdp_active = true; xdp_active = true;
} }
@ -2078,9 +2075,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
else else
bnxt_xdp_buff_frags_free(rxr, &xdp); bnxt_xdp_buff_frags_free(rxr, &xdp);
} }
cpr->sw_stats.rx.rx_oom_discards += 1; goto oom_next_rx;
rc = -ENOMEM;
goto next_rx;
} }
} else { } else {
u32 payload; u32 payload;
@ -2091,29 +2086,21 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
payload = 0; payload = 0;
skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr, skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
payload | len); payload | len);
if (!skb) { if (!skb)
cpr->sw_stats.rx.rx_oom_discards += 1; goto oom_next_rx;
rc = -ENOMEM;
goto next_rx;
}
} }
if (agg_bufs) { if (agg_bufs) {
if (!xdp_active) { if (!xdp_active) {
skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false); skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
if (!skb) { if (!skb)
cpr->sw_stats.rx.rx_oom_discards += 1; goto oom_next_rx;
rc = -ENOMEM;
goto next_rx;
}
} else { } else {
skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1); skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
if (!skb) { if (!skb) {
/* we should be able to free the old skb here */ /* we should be able to free the old skb here */
bnxt_xdp_buff_frags_free(rxr, &xdp); bnxt_xdp_buff_frags_free(rxr, &xdp);
cpr->sw_stats.rx.rx_oom_discards += 1; goto oom_next_rx;
rc = -ENOMEM;
goto next_rx;
} }
} }
} }
@ -2191,6 +2178,11 @@ next_rx_no_prod_no_len:
*raw_cons = tmp_raw_cons; *raw_cons = tmp_raw_cons;
return rc; return rc;
oom_next_rx:
cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
rc = -ENOMEM;
goto next_rx;
} }
/* In netpoll mode, if we are using a combined completion ring, we need to /* In netpoll mode, if we are using a combined completion ring, we need to
@ -2237,7 +2229,7 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
} }
rc = bnxt_rx_pkt(bp, cpr, raw_cons, event); rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
if (rc && rc != -EBUSY) if (rc && rc != -EBUSY)
cpr->sw_stats.rx.rx_netpoll_discards += 1; cpr->bnapi->cp_ring.sw_stats.rx.rx_netpoll_discards += 1;
return rc; return rc;
} }