bnxt_en: Fix page pool logic for page size >= 64K

The RXBD length field on all bnxt chips is 16-bit and so we cannot
support a full page when the native page size is 64K or greater.
The non-XDP (non page pool) code path has logic to handle this but
the XDP page pool code path does not handle this.  Add the missing
logic to use page_pool_dev_alloc_frag() to allocate 32K chunks if
the page size is 64K or greater.

Fixes: 9f4b28301c ("bnxt: XDP multibuffer enablement")
Link: https://lore.kernel.org/netdev/20230728231829.235716-2-michael.chan@broadcom.com/
Reviewed-by: Andy Gospodarek <andrew.gospodarek@broadcom.com>
Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
Link: https://lore.kernel.org/r/20230731142043.58855-2-michael.chan@broadcom.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Somnath Kotur 2023-07-31 07:20:42 -07:00 committed by Jakub Kicinski
parent 3ff1617450
commit f6974b4c2d
2 changed files with 29 additions and 19 deletions

View file

@ -699,17 +699,24 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
struct bnxt_rx_ring_info *rxr,
unsigned int *offset,
gfp_t gfp)
{
struct device *dev = &bp->pdev->dev;
struct page *page;
page = page_pool_dev_alloc_pages(rxr->page_pool);
if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
BNXT_RX_PAGE_SIZE);
} else {
page = page_pool_dev_alloc_pages(rxr->page_pool);
*offset = 0;
}
if (!page)
return NULL;
*mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
*mapping = dma_map_page_attrs(dev, page, *offset, BNXT_RX_PAGE_SIZE,
bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
if (dma_mapping_error(dev, *mapping)) {
page_pool_recycle_direct(rxr->page_pool, page);
return NULL;
@ -749,15 +756,16 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
dma_addr_t mapping;
if (BNXT_RX_PAGE_MODE(bp)) {
unsigned int offset;
struct page *page =
__bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
__bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
if (!page)
return -ENOMEM;
mapping += bp->rx_dma_offset;
rx_buf->data = page;
rx_buf->data_ptr = page_address(page) + bp->rx_offset;
rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
} else {
u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp);
@ -817,7 +825,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
unsigned int offset = 0;
if (BNXT_RX_PAGE_MODE(bp)) {
page = __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
if (!page)
return -ENOMEM;
@ -964,15 +972,15 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
return NULL;
}
dma_addr -= bp->rx_dma_offset;
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
skb = build_skb(page_address(page), PAGE_SIZE);
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
skb = build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
if (!skb) {
page_pool_recycle_direct(rxr->page_pool, page);
return NULL;
}
skb_mark_for_recycle(skb);
skb_reserve(skb, bp->rx_dma_offset);
skb_reserve(skb, bp->rx_offset);
__skb_put(skb, len);
return skb;
@ -998,8 +1006,8 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
return NULL;
}
dma_addr -= bp->rx_dma_offset;
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
if (unlikely(!payload))
payload = eth_get_headlen(bp->dev, data_ptr, len);
@ -1012,7 +1020,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
skb_mark_for_recycle(skb);
off = (void *)data_ptr - page_address(page);
skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE);
memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
payload + NET_IP_ALIGN);
@ -1143,7 +1151,7 @@ static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
skb->data_len += total_frag_len;
skb->len += total_frag_len;
skb->truesize += PAGE_SIZE * agg_bufs;
skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs;
return skb;
}
@ -2945,8 +2953,8 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
rx_buf->data = NULL;
if (BNXT_RX_PAGE_MODE(bp)) {
mapping -= bp->rx_dma_offset;
dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
bp->rx_dir,
dma_unmap_page_attrs(&pdev->dev, mapping,
BNXT_RX_PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
page_pool_recycle_direct(rxr->page_pool, data);
} else {
@ -3215,6 +3223,8 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
pp.napi = &rxr->bnapi->napi;
pp.dev = &bp->pdev->dev;
pp.dma_dir = DMA_BIDIRECTIONAL;
if (PAGE_SIZE > BNXT_RX_PAGE_SIZE)
pp.flags |= PP_FLAG_PAGE_FRAG;
rxr->page_pool = page_pool_create(&pp);
if (IS_ERR(rxr->page_pool)) {

View file

@ -186,8 +186,8 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
u16 cons, u8 *data_ptr, unsigned int len,
struct xdp_buff *xdp)
{
u32 buflen = BNXT_RX_PAGE_SIZE;
struct bnxt_sw_rx_bd *rx_buf;
u32 buflen = PAGE_SIZE;
struct pci_dev *pdev;
dma_addr_t mapping;
u32 offset;
@ -303,7 +303,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
rx_buf = &rxr->rx_buf_ring[cons];
mapping = rx_buf->mapping - bp->rx_dma_offset;
dma_unmap_page_attrs(&pdev->dev, mapping,
PAGE_SIZE, bp->rx_dir,
BNXT_RX_PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
/* if we are unable to allocate a new buffer, abort and reuse */
@ -486,7 +486,7 @@ bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
}
xdp_update_skb_shared_info(skb, num_frags,
sinfo->xdp_frags_size,
PAGE_SIZE * sinfo->nr_frags,
BNXT_RX_PAGE_SIZE * sinfo->nr_frags,
xdp_buff_is_frag_pfmemalloc(xdp));
return skb;
}