net: IP6 defrag: use rbtrees in nf_conntrack_reasm.c

Currently, IPv6 defragmentation code drops non-last fragments that
are smaller than 1280 bytes: see
commit 0ed4229b08 ("ipv6: defrag: drop non-last frags smaller than min mtu")

This behavior is not specified in IPv6 RFCs and appears to break
compatibility with some IPv6 implemenations, as reported here:
https://www.spinics.net/lists/netdev/msg543846.html

This patch re-uses common IP defragmentation queueing and reassembly
code in IP6 defragmentation in nf_conntrack, removing the 1280 byte
restriction.

Signed-off-by: Peter Oskolkov <posk@google.com>
Reported-by: Tom Herbert <tom@herbertland.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Florian Westphal <fw@strlen.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Peter Oskolkov 2019-01-22 10:02:52 -08:00 committed by David S. Miller
parent d4289fcc9b
commit 997dd96471

View file

@ -136,6 +136,9 @@ static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
} }
#endif #endif
static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
struct sk_buff *prev_tail, struct net_device *dev);
static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h) static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
{ {
return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK); return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
@ -177,9 +180,10 @@ static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user,
static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb, static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
const struct frag_hdr *fhdr, int nhoff) const struct frag_hdr *fhdr, int nhoff)
{ {
struct sk_buff *prev, *next;
unsigned int payload_len; unsigned int payload_len;
int offset, end; struct net_device *dev;
struct sk_buff *prev;
int offset, end, err;
u8 ecn; u8 ecn;
if (fq->q.flags & INET_FRAG_COMPLETE) { if (fq->q.flags & INET_FRAG_COMPLETE) {
@ -254,55 +258,18 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
goto err; goto err;
} }
/* Find out which fragments are in front and at the back of us /* Note : skb->rbnode and skb->dev share the same location. */
* in the chain of fragments so far. We must know where to put dev = skb->dev;
* this fragment, right?
*/
prev = fq->q.fragments_tail;
if (!prev || prev->ip_defrag_offset < offset) {
next = NULL;
goto found;
}
prev = NULL;
for (next = fq->q.fragments; next != NULL; next = next->next) {
if (next->ip_defrag_offset >= offset)
break; /* bingo! */
prev = next;
}
found:
/* RFC5722, Section 4:
* When reassembling an IPv6 datagram, if
* one or more its constituent fragments is determined to be an
* overlapping fragment, the entire datagram (and any constituent
* fragments, including those not yet received) MUST be silently
* discarded.
*/
/* Check for overlap with preceding fragment. */
if (prev &&
(prev->ip_defrag_offset + prev->len) > offset)
goto discard_fq;
/* Look for overlap with succeeding segment. */
if (next && next->ip_defrag_offset < end)
goto discard_fq;
/* Note : skb->ip_defrag_offset and skb->dev share the same location */
if (skb->dev)
fq->iif = skb->dev->ifindex;
/* Makes sure compiler wont do silly aliasing games */ /* Makes sure compiler wont do silly aliasing games */
barrier(); barrier();
skb->ip_defrag_offset = offset;
/* Insert this fragment in the chain of fragments. */ prev = fq->q.fragments_tail;
skb->next = next; err = inet_frag_queue_insert(&fq->q, skb, offset, end);
if (!next) if (err)
fq->q.fragments_tail = skb; goto insert_error;
if (prev)
prev->next = skb; if (dev)
else fq->iif = dev->ifindex;
fq->q.fragments = skb;
fq->q.stamp = skb->tstamp; fq->q.stamp = skb->tstamp;
fq->q.meat += skb->len; fq->q.meat += skb->len;
@ -319,11 +286,25 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
fq->q.flags |= INET_FRAG_FIRST_IN; fq->q.flags |= INET_FRAG_FIRST_IN;
} }
return 0; if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
fq->q.meat == fq->q.len) {
unsigned long orefdst = skb->_skb_refdst;
discard_fq: skb->_skb_refdst = 0UL;
err = nf_ct_frag6_reasm(fq, skb, prev, dev);
skb->_skb_refdst = orefdst;
return err;
}
skb_dst_drop(skb);
return -EINPROGRESS;
insert_error:
if (err == IPFRAG_DUP)
goto err;
inet_frag_kill(&fq->q); inet_frag_kill(&fq->q);
err: err:
skb_dst_drop(skb);
return -EINVAL; return -EINVAL;
} }
@ -333,147 +314,67 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
* It is called with locked fq, and caller must check that * It is called with locked fq, and caller must check that
* queue is eligible for reassembly i.e. it is not COMPLETE, * queue is eligible for reassembly i.e. it is not COMPLETE,
* the last and the first frames arrived and all the bits are here. * the last and the first frames arrived and all the bits are here.
*
* returns true if *prev skb has been transformed into the reassembled
* skb, false otherwise.
*/ */
static bool static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev) struct sk_buff *prev_tail, struct net_device *dev)
{ {
struct sk_buff *fp, *head = fq->q.fragments; void *reasm_data;
int payload_len, delta; int payload_len;
u8 ecn; u8 ecn;
inet_frag_kill(&fq->q); inet_frag_kill(&fq->q);
WARN_ON(head == NULL);
WARN_ON(head->ip_defrag_offset != 0);
ecn = ip_frag_ecn_table[fq->ecn]; ecn = ip_frag_ecn_table[fq->ecn];
if (unlikely(ecn == 0xff)) if (unlikely(ecn == 0xff))
return false; goto err;
/* Unfragmented part is taken from the first segment. */ reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
payload_len = ((head->data - skb_network_header(head)) - if (!reasm_data)
goto err;
payload_len = ((skb->data - skb_network_header(skb)) -
sizeof(struct ipv6hdr) + fq->q.len - sizeof(struct ipv6hdr) + fq->q.len -
sizeof(struct frag_hdr)); sizeof(struct frag_hdr));
if (payload_len > IPV6_MAXPLEN) { if (payload_len > IPV6_MAXPLEN) {
net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n", net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n",
payload_len); payload_len);
return false; goto err;
}
delta = - head->truesize;
/* Head of list must not be cloned. */
if (skb_unclone(head, GFP_ATOMIC))
return false;
delta += head->truesize;
if (delta)
add_frag_mem_limit(fq->q.net, delta);
/* If the first fragment is fragmented itself, we split
* it to two chunks: the first with data and paged part
* and the second, holding only fragments. */
if (skb_has_frag_list(head)) {
struct sk_buff *clone;
int i, plen = 0;
clone = alloc_skb(0, GFP_ATOMIC);
if (clone == NULL)
return false;
clone->next = head->next;
head->next = clone;
skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
skb_frag_list_init(head);
for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
clone->len = clone->data_len = head->data_len - plen;
head->data_len -= clone->len;
head->len -= clone->len;
clone->csum = 0;
clone->ip_summed = head->ip_summed;
add_frag_mem_limit(fq->q.net, clone->truesize);
}
/* morph head into last received skb: prev.
*
* This allows callers of ipv6 conntrack defrag to continue
* to use the last skb(frag) passed into the reasm engine.
* The last skb frag 'silently' turns into the full reassembled skb.
*
* Since prev is also part of q->fragments we have to clone it first.
*/
if (head != prev) {
struct sk_buff *iter;
fp = skb_clone(prev, GFP_ATOMIC);
if (!fp)
return false;
fp->next = prev->next;
iter = head;
while (iter) {
if (iter->next == prev) {
iter->next = fp;
break;
}
iter = iter->next;
}
skb_morph(prev, head);
prev->next = head->next;
consume_skb(head);
head = prev;
} }
/* We have to remove fragment header from datagram and to relocate /* We have to remove fragment header from datagram and to relocate
* header in order to calculate ICV correctly. */ * header in order to calculate ICV correctly. */
skb_network_header(head)[fq->nhoffset] = skb_transport_header(head)[0]; skb_network_header(skb)[fq->nhoffset] = skb_transport_header(skb)[0];
memmove(head->head + sizeof(struct frag_hdr), head->head, memmove(skb->head + sizeof(struct frag_hdr), skb->head,
(head->data - head->head) - sizeof(struct frag_hdr)); (skb->data - skb->head) - sizeof(struct frag_hdr));
head->mac_header += sizeof(struct frag_hdr); skb->mac_header += sizeof(struct frag_hdr);
head->network_header += sizeof(struct frag_hdr); skb->network_header += sizeof(struct frag_hdr);
skb_shinfo(head)->frag_list = head->next; skb_reset_transport_header(skb);
skb_reset_transport_header(head);
skb_push(head, head->data - skb_network_header(head));
for (fp = head->next; fp; fp = fp->next) { inet_frag_reasm_finish(&fq->q, skb, reasm_data);
head->data_len += fp->len;
head->len += fp->len;
if (head->ip_summed != fp->ip_summed)
head->ip_summed = CHECKSUM_NONE;
else if (head->ip_summed == CHECKSUM_COMPLETE)
head->csum = csum_add(head->csum, fp->csum);
head->truesize += fp->truesize;
fp->sk = NULL;
}
sub_frag_mem_limit(fq->q.net, head->truesize);
head->ignore_df = 1; skb->ignore_df = 1;
skb_mark_not_on_list(head); skb->dev = dev;
head->dev = dev; ipv6_hdr(skb)->payload_len = htons(payload_len);
head->tstamp = fq->q.stamp; ipv6_change_dsfield(ipv6_hdr(skb), 0xff, ecn);
ipv6_hdr(head)->payload_len = htons(payload_len); IP6CB(skb)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
IP6CB(head)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
/* Yes, and fold redundant checksum back. 8) */ /* Yes, and fold redundant checksum back. 8) */
if (head->ip_summed == CHECKSUM_COMPLETE) if (skb->ip_summed == CHECKSUM_COMPLETE)
head->csum = csum_partial(skb_network_header(head), skb->csum = csum_partial(skb_network_header(skb),
skb_network_header_len(head), skb_network_header_len(skb),
head->csum); skb->csum);
fq->q.fragments = NULL; fq->q.fragments = NULL;
fq->q.rb_fragments = RB_ROOT; fq->q.rb_fragments = RB_ROOT;
fq->q.fragments_tail = NULL; fq->q.fragments_tail = NULL;
fq->q.last_run_head = NULL;
return true; return 0;
err:
inet_frag_kill(&fq->q);
return -EINVAL;
} }
/* /*
@ -542,7 +443,6 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user) int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
{ {
u16 savethdr = skb->transport_header; u16 savethdr = skb->transport_header;
struct net_device *dev = skb->dev;
int fhoff, nhoff, ret; int fhoff, nhoff, ret;
struct frag_hdr *fhdr; struct frag_hdr *fhdr;
struct frag_queue *fq; struct frag_queue *fq;
@ -565,10 +465,6 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
hdr = ipv6_hdr(skb); hdr = ipv6_hdr(skb);
fhdr = (struct frag_hdr *)skb_transport_header(skb); fhdr = (struct frag_hdr *)skb_transport_header(skb);
if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
fhdr->frag_off & htons(IP6_MF))
return -EINVAL;
skb_orphan(skb); skb_orphan(skb);
fq = fq_find(net, fhdr->identification, user, hdr, fq = fq_find(net, fhdr->identification, user, hdr,
skb->dev ? skb->dev->ifindex : 0); skb->dev ? skb->dev->ifindex : 0);
@ -580,31 +476,17 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
spin_lock_bh(&fq->q.lock); spin_lock_bh(&fq->q.lock);
ret = nf_ct_frag6_queue(fq, skb, fhdr, nhoff); ret = nf_ct_frag6_queue(fq, skb, fhdr, nhoff);
if (ret < 0) { if (ret == -EPROTO) {
if (ret == -EPROTO) { skb->transport_header = savethdr;
skb->transport_header = savethdr; ret = 0;
ret = 0;
}
goto out_unlock;
} }
/* after queue has assumed skb ownership, only 0 or -EINPROGRESS /* after queue has assumed skb ownership, only 0 or -EINPROGRESS
* must be returned. * must be returned.
*/ */
ret = -EINPROGRESS; if (ret)
if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && ret = -EINPROGRESS;
fq->q.meat == fq->q.len) {
unsigned long orefdst = skb->_skb_refdst;
skb->_skb_refdst = 0UL;
if (nf_ct_frag6_reasm(fq, skb, dev))
ret = 0;
skb->_skb_refdst = orefdst;
} else {
skb_dst_drop(skb);
}
out_unlock:
spin_unlock_bh(&fq->q.lock); spin_unlock_bh(&fq->q.lock);
inet_frag_put(&fq->q); inet_frag_put(&fq->q);
return ret; return ret;