Merge branch 'skb_frag_fill_page_desc'

Yunsheng Lin says:

====================
net: introduce skb_frag_fill_page_desc()

Most users use __skb_frag_set_page()/skb_frag_off_set()/
skb_frag_size_set() to fill the page desc for a skb frag.
It does not make much sense to calling __skb_frag_set_page()
without calling skb_frag_off_set(), as the offset may depend
on whether the page is head page or tail page, so add
skb_frag_fill_page_desc() to fill the page desc for a skb
frag.

In the future, we can make sure the page in the frag is
head page of compound page or a base page, if not, we
may warn about that and convert the tail page to head
page and update the offset accordingly, if we see a warning
about that, we also fix the caller to fill the head page
in the frag. when the fixing is done, we may remove the
warning and converting.

In this way, we can remove the compound_head() or use
page_ref_*() like the below case:
https://elixir.bootlin.com/linux/latest/source/net/core/page_pool.c#L881
https://elixir.bootlin.com/linux/latest/source/include/linux/skbuff.h#L3383

It may also convert net stack to use the folio easier.

V1: repost with all the ack/review tags included.
RFC: remove a local variable as pointed out by Simon.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2023-05-13 19:47:56 +01:00
commit d5e7d19683
20 changed files with 65 additions and 109 deletions

View file

@ -532,10 +532,10 @@ static bool aq_add_rx_fragment(struct device *dev,
buff_->rxdata.pg_off,
buff_->len,
DMA_FROM_DEVICE);
skb_frag_off_set(frag, buff_->rxdata.pg_off);
skb_frag_size_set(frag, buff_->len);
sinfo->xdp_frags_size += buff_->len;
__skb_frag_set_page(frag, buff_->rxdata.page);
skb_frag_fill_page_desc(frag, buff_->rxdata.page,
buff_->rxdata.pg_off,
buff_->len);
buff_->is_cleaned = 1;

View file

@ -2955,7 +2955,6 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
shinfo = skb_shinfo(skb);
shinfo->nr_frags--;
page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
__skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
cons_rx_pg->page = page;
dev_kfree_skb(skb);

View file

@ -1085,9 +1085,8 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
cons_rx_buf = &rxr->rx_agg_ring[cons];
skb_frag_off_set(frag, cons_rx_buf->offset);
skb_frag_size_set(frag, frag_len);
__skb_frag_set_page(frag, cons_rx_buf->page);
skb_frag_fill_page_desc(frag, cons_rx_buf->page,
cons_rx_buf->offset, frag_len);
shinfo->nr_frags = i + 1;
__clear_bit(cons, rxr->rx_agg_bmap);
@ -1103,10 +1102,7 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
xdp_buff_set_frag_pfmemalloc(xdp);
if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
unsigned int nr_frags;
nr_frags = --shinfo->nr_frags;
__skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
--shinfo->nr_frags;
cons_rx_buf->page = page;
/* Update prod since possibly some pages have been

View file

@ -2184,9 +2184,8 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
len -= offset;
rx_frag += nr_frags;
__skb_frag_set_page(rx_frag, sd->pg_chunk.page);
skb_frag_off_set(rx_frag, sd->pg_chunk.offset + offset);
skb_frag_size_set(rx_frag, len);
skb_frag_fill_page_desc(rx_frag, sd->pg_chunk.page,
sd->pg_chunk.offset + offset, len);
skb->len += len;
skb->data_len += len;

View file

@ -2343,11 +2343,10 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
hdr_len = ETH_HLEN;
memcpy(skb->data, start, hdr_len);
skb_shinfo(skb)->nr_frags = 1;
skb_frag_set_page(skb, 0, page_info->page);
skb_frag_off_set(&skb_shinfo(skb)->frags[0],
page_info->page_offset + hdr_len);
skb_frag_size_set(&skb_shinfo(skb)->frags[0],
curr_frag_len - hdr_len);
skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[0],
page_info->page,
page_info->page_offset + hdr_len,
curr_frag_len - hdr_len);
skb->data_len = curr_frag_len - hdr_len;
skb->truesize += rx_frag_size;
skb->tail += hdr_len;
@ -2369,16 +2368,17 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
if (page_info->page_offset == 0) {
/* Fresh page */
j++;
skb_frag_set_page(skb, j, page_info->page);
skb_frag_off_set(&skb_shinfo(skb)->frags[j],
page_info->page_offset);
skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[j],
page_info->page,
page_info->page_offset,
curr_frag_len);
skb_shinfo(skb)->nr_frags++;
} else {
put_page(page_info->page);
skb_frag_size_add(&skb_shinfo(skb)->frags[j],
curr_frag_len);
}
skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
skb->len += curr_frag_len;
skb->data_len += curr_frag_len;
skb->truesize += rx_frag_size;
@ -2451,14 +2451,16 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
if (i == 0 || page_info->page_offset == 0) {
/* First frag or Fresh page */
j++;
skb_frag_set_page(skb, j, page_info->page);
skb_frag_off_set(&skb_shinfo(skb)->frags[j],
page_info->page_offset);
skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[j],
page_info->page,
page_info->page_offset,
curr_frag_len);
} else {
put_page(page_info->page);
skb_frag_size_add(&skb_shinfo(skb)->frags[j],
curr_frag_len);
}
skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
skb->truesize += rx_frag_size;
remaining -= curr_frag_len;
memset(page_info, 0, sizeof(*page_info));

View file

@ -1445,9 +1445,8 @@ static void enetc_add_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
xdp_buff_set_frag_pfmemalloc(xdp_buff);
frag = &shinfo->frags[shinfo->nr_frags];
skb_frag_off_set(frag, rx_swbd->page_offset);
skb_frag_size_set(frag, size);
__skb_frag_set_page(frag, rx_swbd->page);
skb_frag_fill_page_desc(frag, rx_swbd->page, rx_swbd->page_offset,
size);
shinfo->nr_frags++;
}

View file

@ -323,9 +323,8 @@ static int fun_gather_pkt(struct funeth_rxq *q, unsigned int tot_len,
if (ref_ok)
ref_ok |= buf->node;
__skb_frag_set_page(frags, buf->page);
skb_frag_off_set(frags, q->buf_offset);
skb_frag_size_set(frags++, frag_len);
skb_frag_fill_page_desc(frags++, buf->page, q->buf_offset,
frag_len);
tot_len -= frag_len;
if (!tot_len)

View file

@ -2376,9 +2376,8 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) {
skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags++];
skb_frag_off_set(frag, pp->rx_offset_correction);
skb_frag_size_set(frag, data_len);
__skb_frag_set_page(frag, page);
skb_frag_fill_page_desc(frag, page,
pp->rx_offset_correction, data_len);
if (!xdp_buff_has_frags(xdp)) {
sinfo->xdp_frags_size = *size;

View file

@ -491,9 +491,7 @@ mlx5e_add_skb_shared_info_frag(struct mlx5e_rq *rq, struct skb_shared_info *sinf
}
frag = &sinfo->frags[sinfo->nr_frags++];
__skb_frag_set_page(frag, frag_page->page);
skb_frag_off_set(frag, frag_offset);
skb_frag_size_set(frag, len);
skb_frag_fill_page_desc(frag, frag_page->page, frag_offset, len);
if (page_is_pfmemalloc(frag_page->page))
xdp_buff_set_frag_pfmemalloc(xdp);

View file

@ -1998,10 +1998,8 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
skb->truesize += hlen - swivel;
skb->len += hlen - swivel;
__skb_frag_set_page(frag, page->buffer);
skb_frag_fill_page_desc(frag, page->buffer, off, hlen - swivel);
__skb_frag_ref(frag);
skb_frag_off_set(frag, off);
skb_frag_size_set(frag, hlen - swivel);
/* any more data? */
if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
@ -2024,10 +2022,8 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
skb->len += hlen;
frag++;
__skb_frag_set_page(frag, page->buffer);
skb_frag_fill_page_desc(frag, page->buffer, 0, hlen);
__skb_frag_ref(frag);
skb_frag_off_set(frag, 0);
skb_frag_size_set(frag, hlen);
RX_USED_ADD(page, hlen + cp->crc_size);
}

View file

@ -1272,9 +1272,7 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
}
frag = &shinfo->frags[shinfo->nr_frags++];
__skb_frag_set_page(frag, page);
skb_frag_off_set(frag, offset);
skb_frag_size_set(frag, len);
skb_frag_fill_page_desc(frag, page, offset, len);
if (page_is_pfmemalloc(page))
xdp_buff_set_frag_pfmemalloc(xdp);

View file

@ -686,9 +686,7 @@ vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
__skb_frag_set_page(frag, rbi->page);
skb_frag_off_set(frag, 0);
skb_frag_size_set(frag, rcd->len);
skb_frag_fill_page_desc(frag, rbi->page, 0, rcd->len);
skb->data_len += rcd->len;
skb->truesize += PAGE_SIZE;
skb_shinfo(skb)->nr_frags++;

View file

@ -1128,9 +1128,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
BUG();
offset += len;
__skb_frag_set_page(&frags[i], page);
skb_frag_off_set(&frags[i], 0);
skb_frag_size_set(&frags[i], len);
skb_frag_fill_page_desc(&frags[i], page, 0, len);
}
/* Release all the original (foreign) frags. */

View file

@ -2411,6 +2411,15 @@ static inline unsigned int skb_pagelen(const struct sk_buff *skb)
return skb_headlen(skb) + __skb_pagelen(skb);
}
static inline void skb_frag_fill_page_desc(skb_frag_t *frag,
struct page *page,
int off, int size)
{
frag->bv_page = page;
frag->bv_offset = off;
skb_frag_size_set(frag, size);
}
static inline void __skb_fill_page_desc_noacc(struct skb_shared_info *shinfo,
int i, struct page *page,
int off, int size)
@ -2422,9 +2431,7 @@ static inline void __skb_fill_page_desc_noacc(struct skb_shared_info *shinfo,
* that not all callers have unique ownership of the page but rely
* on page_is_pfmemalloc doing the right thing(tm).
*/
frag->bv_page = page;
frag->bv_offset = off;
skb_frag_size_set(frag, size);
skb_frag_fill_page_desc(frag, page, off, size);
}
/**
@ -3484,32 +3491,6 @@ static inline void skb_frag_page_copy(skb_frag_t *fragto,
fragto->bv_page = fragfrom->bv_page;
}
/**
* __skb_frag_set_page - sets the page contained in a paged fragment
* @frag: the paged fragment
* @page: the page to set
*
* Sets the fragment @frag to contain @page.
*/
static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
{
frag->bv_page = page;
}
/**
* skb_frag_set_page - sets the page contained in a paged fragment of an skb
* @skb: the buffer
* @f: the fragment offset
* @page: the page to set
*
* Sets the @f'th fragment of @skb to contain @page.
*/
static inline void skb_frag_set_page(struct sk_buff *skb, int f,
struct page *page)
{
__skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
}
bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
/**

View file

@ -1415,11 +1415,10 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
}
frag = &sinfo->frags[sinfo->nr_frags++];
__skb_frag_set_page(frag, page);
data_len = min_t(u32, kattr->test.data_size_in - size,
PAGE_SIZE);
skb_frag_size_set(frag, data_len);
skb_frag_fill_page_desc(frag, page, 0, data_len);
if (copy_from_user(page_address(page), data_in + size,
data_len)) {

View file

@ -239,9 +239,7 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
__skb_frag_set_page(frag, page);
skb_frag_off_set(frag, first_offset);
skb_frag_size_set(frag, first_size);
skb_frag_fill_page_desc(frag, page, first_offset, first_size);
memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
/* We dont need to clear skbinfo->nr_frags here */

View file

@ -2785,14 +2785,17 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
break;
}
get_page(pkt_dev->page);
skb_frag_set_page(skb, i, pkt_dev->page);
skb_frag_off_set(&skb_shinfo(skb)->frags[i], 0);
/*last fragment, fill rest of data*/
if (i == (frags - 1))
skb_frag_size_set(&skb_shinfo(skb)->frags[i],
(datalen < PAGE_SIZE ? datalen : PAGE_SIZE));
skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[i],
pkt_dev->page, 0,
(datalen < PAGE_SIZE ?
datalen : PAGE_SIZE));
else
skb_frag_size_set(&skb_shinfo(skb)->frags[i], frag_len);
skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[i],
pkt_dev->page, 0, frag_len);
datalen -= skb_frag_size(&skb_shinfo(skb)->frags[i]);
skb->len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
skb->data_len += skb_frag_size(&skb_shinfo(skb)->frags[i]);

View file

@ -4234,10 +4234,9 @@ static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb)
struct page *page;
page = virt_to_head_page(frag_skb->head);
__skb_frag_set_page(&head_frag, page);
skb_frag_off_set(&head_frag, frag_skb->data -
(unsigned char *)page_address(page));
skb_frag_size_set(&head_frag, skb_headlen(frag_skb));
skb_frag_fill_page_desc(&head_frag, page, frag_skb->data -
(unsigned char *)page_address(page),
skb_headlen(frag_skb));
return head_frag;
}

View file

@ -268,9 +268,8 @@ static void tls_append_frag(struct tls_record_info *record,
skb_frag_size_add(frag, size);
} else {
++frag;
__skb_frag_set_page(frag, pfrag->page);
skb_frag_off_set(frag, pfrag->offset);
skb_frag_size_set(frag, size);
skb_frag_fill_page_desc(frag, pfrag->page, pfrag->offset,
size);
++record->num_frags;
get_page(pfrag->page);
}
@ -357,9 +356,8 @@ static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx,
return -ENOMEM;
frag = &record->frags[0];
__skb_frag_set_page(frag, pfrag->page);
skb_frag_off_set(frag, pfrag->offset);
skb_frag_size_set(frag, prepend_size);
skb_frag_fill_page_desc(frag, pfrag->page, pfrag->offset,
prepend_size);
get_page(pfrag->page);
pfrag->offset += prepend_size;

View file

@ -74,14 +74,11 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
if (!page)
return -ENOMEM;
__skb_frag_set_page(frag, page);
len = PAGE_SIZE;
if (dlen < len)
len = dlen;
skb_frag_off_set(frag, 0);
skb_frag_size_set(frag, len);
skb_frag_fill_page_desc(frag, page, 0, len);
memcpy(skb_frag_address(frag), scratch, len);
skb->truesize += len;