diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 5ca9906d7c6a..7bb02a9da2a6 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -548,6 +548,7 @@ enum { enum fec_txbuf_type { FEC_TXBUF_T_SKB, FEC_TXBUF_T_XDP_NDO, + FEC_TXBUF_T_XDP_TX, }; struct fec_tx_buffer { diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 3bd0bf03aedb..9bba3d7c949c 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -69,6 +69,7 @@ #include #include #include +#include #include @@ -76,6 +77,9 @@ static void set_multicast_list(struct net_device *ndev); static void fec_enet_itr_coal_set(struct net_device *ndev); +static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep, + int cpu, struct xdp_buff *xdp, + u32 dma_sync_len); #define DRIVER_NAME "fec" @@ -961,7 +965,8 @@ static void fec_enet_bd_init(struct net_device *dev) txq->tx_buf[i].skb = NULL; } } else { - if (bdp->cbd_bufaddr) + if (bdp->cbd_bufaddr && + txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr), fec16_to_cpu(bdp->cbd_datlen), @@ -1424,13 +1429,14 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget) break; xdpf = txq->tx_buf[index].xdp; - if (bdp->cbd_bufaddr) + if (bdp->cbd_bufaddr && + txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr), fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE); bdp->cbd_bufaddr = cpu_to_fec32(0); - if (!xdpf) { + if (unlikely(!xdpf)) { txq->tx_buf[index].type = FEC_TXBUF_T_SKB; goto tx_buf_done; } @@ -1483,7 +1489,16 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget) /* Free the sk buffer associated with this last transmit */ dev_kfree_skb_any(skb); } else { - xdp_return_frame(xdpf); + if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) { + xdp_return_frame_rx_napi(xdpf); + } else { /* recycle pages of XDP_TX frames */ + struct page *page = virt_to_head_page(xdpf->data); + + /* The dma_sync_size = 0 as XDP_TX has already + * synced DMA for_device. + */ + page_pool_put_page(page->pp, page, 0, true); + } txq->tx_buf[index].xdp = NULL; /* restore default tx buffer type: FEC_TXBUF_T_SKB */ @@ -1542,7 +1557,7 @@ static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq, static u32 fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog, - struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int index) + struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int cpu) { unsigned int sync, len = xdp->data_end - xdp->data; u32 ret = FEC_ENET_XDP_PASS; @@ -1552,8 +1567,10 @@ fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog, act = bpf_prog_run_xdp(prog, xdp); - /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */ - sync = xdp->data_end - xdp->data_hard_start - FEC_ENET_XDP_HEADROOM; + /* Due xdp_adjust_tail and xdp_adjust_head: DMA sync for_device cover + * max len CPU touch + */ + sync = xdp->data_end - xdp->data; sync = max(sync, len); switch (act) { @@ -1574,11 +1591,19 @@ fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog, } break; - default: - bpf_warn_invalid_xdp_action(fep->netdev, prog, act); - fallthrough; - case XDP_TX: + err = fec_enet_xdp_tx_xmit(fep, cpu, xdp, sync); + if (unlikely(err)) { + ret = FEC_ENET_XDP_CONSUMED; + page = virt_to_head_page(xdp->data); + page_pool_put_page(rxq->page_pool, page, sync, true); + trace_xdp_exception(fep->netdev, prog, act); + } else { + ret = FEC_ENET_XDP_TX; + } + break; + + default: bpf_warn_invalid_xdp_action(fep->netdev, prog, act); fallthrough; @@ -1620,6 +1645,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog); u32 ret, xdp_result = FEC_ENET_XDP_PASS; u32 data_start = FEC_ENET_XDP_HEADROOM; + int cpu = smp_processor_id(); struct xdp_buff xdp; struct page *page; u32 sub_len = 4; @@ -1698,7 +1724,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) /* subtract 16bit shift and FCS */ xdp_prepare_buff(&xdp, page_address(page), data_start, pkt_len - sub_len, false); - ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, index); + ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, cpu); xdp_result |= ret; if (ret != FEC_ENET_XDP_PASS) goto rx_processing_done; @@ -3767,7 +3793,8 @@ fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index) static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, struct fec_enet_priv_tx_q *txq, - struct xdp_frame *frame) + struct xdp_frame *frame, + u32 dma_sync_len, bool ndo_xmit) { unsigned int index, status, estatus; struct bufdesc *bdp; @@ -3787,10 +3814,24 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, index = fec_enet_get_bd_index(bdp, &txq->bd); - dma_addr = dma_map_single(&fep->pdev->dev, frame->data, - frame->len, DMA_TO_DEVICE); - if (dma_mapping_error(&fep->pdev->dev, dma_addr)) - return -ENOMEM; + if (ndo_xmit) { + dma_addr = dma_map_single(&fep->pdev->dev, frame->data, + frame->len, DMA_TO_DEVICE); + if (dma_mapping_error(&fep->pdev->dev, dma_addr)) + return -ENOMEM; + + txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO; + } else { + struct page *page = virt_to_page(frame->data); + + dma_addr = page_pool_get_dma_addr(page) + sizeof(*frame) + + frame->headroom; + dma_sync_single_for_device(&fep->pdev->dev, dma_addr, + dma_sync_len, DMA_BIDIRECTIONAL); + txq->tx_buf[index].type = FEC_TXBUF_T_XDP_TX; + } + + txq->tx_buf[index].xdp = frame; status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); if (fep->bufdesc_ex) @@ -3809,9 +3850,6 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, ebdp->cbd_esc = cpu_to_fec32(estatus); } - txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO; - txq->tx_buf[index].xdp = frame; - /* Make sure the updates to rest of the descriptor are performed before * transferring ownership. */ @@ -3837,6 +3875,33 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, return 0; } +static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep, + int cpu, struct xdp_buff *xdp, + u32 dma_sync_len) +{ + struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); + struct fec_enet_priv_tx_q *txq; + struct netdev_queue *nq; + int queue, ret; + + if (unlikely(!xdpf)) + return -EFAULT; + + queue = fec_enet_xdp_get_tx_queue(fep, cpu); + txq = fep->tx_queue[queue]; + nq = netdev_get_tx_queue(fep->netdev, queue); + + __netif_tx_lock(nq, cpu); + + /* Avoid tx timeout as XDP shares the queue with kernel stack */ + txq_trans_cond_update(nq); + ret = fec_enet_txq_xmit_frame(fep, txq, xdpf, dma_sync_len, false); + + __netif_tx_unlock(nq); + + return ret; +} + static int fec_enet_xdp_xmit(struct net_device *dev, int num_frames, struct xdp_frame **frames, @@ -3859,7 +3924,7 @@ static int fec_enet_xdp_xmit(struct net_device *dev, /* Avoid tx timeout as XDP shares the queue with kernel stack */ txq_trans_cond_update(nq); for (i = 0; i < num_frames; i++) { - if (fec_enet_txq_xmit_frame(fep, txq, frames[i]) < 0) + if (fec_enet_txq_xmit_frame(fep, txq, frames[i], 0, true) < 0) break; sent_frames++; }