net: mana: Reuse XDP dropped page

Reuse the dropped page in RX path to save page allocation
overhead.

Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Haiyang Zhang 2022-01-28 18:03:38 -08:00 committed by David S. Miller
parent d356abb95b
commit a6bf5703f1
2 changed files with 14 additions and 2 deletions

View file

@ -310,6 +310,7 @@ struct mana_rxq {
struct bpf_prog __rcu *bpf_prog;
struct xdp_rxq_info xdp_rxq;
struct page *xdp_save_page;
/* MUST BE THE LAST MEMBER:
* Each receive buffer has an associated mana_recv_buf_oob.

View file

@ -1059,7 +1059,9 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
u64_stats_update_end(&rx_stats->syncp);
drop:
free_page((unsigned long)buf_va);
WARN_ON_ONCE(rxq->xdp_save_page);
rxq->xdp_save_page = virt_to_page(buf_va);
++ndev->stats.rx_dropped;
return;
@ -1116,7 +1118,13 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
rxbuf_oob = &rxq->rx_oobs[curr];
WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
new_page = alloc_page(GFP_ATOMIC);
/* Reuse XDP dropped page if available */
if (rxq->xdp_save_page) {
new_page = rxq->xdp_save_page;
rxq->xdp_save_page = NULL;
} else {
new_page = alloc_page(GFP_ATOMIC);
}
if (new_page) {
da = dma_map_page(dev, new_page, XDP_PACKET_HEADROOM, rxq->datasize,
@ -1403,6 +1411,9 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
mana_deinit_cq(apc, &rxq->rx_cq);
if (rxq->xdp_save_page)
__free_page(rxq->xdp_save_page);
for (i = 0; i < rxq->num_rx_buf; i++) {
rx_oob = &rxq->rx_oobs[i];