mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-11-01 17:08:10 +00:00
Merge branch 'tunnel-fixes'
Jesse Gross says: ==================== Tunneling fixes This series fixes a problem that was reported where encapsulated packets do not have their encapsulation offload markers stripped off when being decapsulated. This causes a significant performance drop if the packets are later retransmitted. Fixing this revealed two other bugs which are also addressed as prerequisites: * GRO can aggregate packets for multiple layers of encapsulation which the stack cannot properly handle. * IPIP packets which are combined by GRO are not marked properly with their GSO type. Note that this is based off the net-next tree as the current target for bug fixes. v2: No code changes, just additional information in commit messages and a new cover letter. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
4320f21935
10 changed files with 80 additions and 14 deletions
|
@ -2096,8 +2096,8 @@ struct napi_gro_cb {
|
|||
/* This is non-zero if the packet may be of the same flow. */
|
||||
u8 same_flow:1;
|
||||
|
||||
/* Used in udp_gro_receive */
|
||||
u8 udp_mark:1;
|
||||
/* Used in tunnel GRO receive */
|
||||
u8 encap_mark:1;
|
||||
|
||||
/* GRO checksum is valid */
|
||||
u8 csum_valid:1;
|
||||
|
|
|
@ -305,6 +305,22 @@ struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
|
|||
|
||||
struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);
|
||||
|
||||
static inline int iptunnel_pull_offloads(struct sk_buff *skb)
|
||||
{
|
||||
if (skb_is_gso(skb)) {
|
||||
int err;
|
||||
|
||||
err = skb_unclone(skb, GFP_ATOMIC);
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
skb_shinfo(skb)->gso_type &= ~(NETIF_F_GSO_ENCAP_ALL >>
|
||||
NETIF_F_GSO_SHIFT);
|
||||
}
|
||||
|
||||
skb->encapsulation = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len)
|
||||
{
|
||||
if (pkt_len > 0) {
|
||||
|
|
|
@ -4438,7 +4438,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
|
|||
NAPI_GRO_CB(skb)->same_flow = 0;
|
||||
NAPI_GRO_CB(skb)->flush = 0;
|
||||
NAPI_GRO_CB(skb)->free = 0;
|
||||
NAPI_GRO_CB(skb)->udp_mark = 0;
|
||||
NAPI_GRO_CB(skb)->encap_mark = 0;
|
||||
NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
|
||||
|
||||
/* Setup for GRO checksum validation */
|
||||
|
|
|
@ -1380,6 +1380,19 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
|
|||
return pp;
|
||||
}
|
||||
|
||||
static struct sk_buff **ipip_gro_receive(struct sk_buff **head,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
if (NAPI_GRO_CB(skb)->encap_mark) {
|
||||
NAPI_GRO_CB(skb)->flush = 1;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
NAPI_GRO_CB(skb)->encap_mark = 1;
|
||||
|
||||
return inet_gro_receive(head, skb);
|
||||
}
|
||||
|
||||
#define SECONDS_PER_DAY 86400
|
||||
|
||||
/* inet_current_timestamp - Return IP network timestamp
|
||||
|
@ -1448,6 +1461,13 @@ static int inet_gro_complete(struct sk_buff *skb, int nhoff)
|
|||
return err;
|
||||
}
|
||||
|
||||
static int ipip_gro_complete(struct sk_buff *skb, int nhoff)
|
||||
{
|
||||
skb->encapsulation = 1;
|
||||
skb_shinfo(skb)->gso_type |= SKB_GSO_IPIP;
|
||||
return inet_gro_complete(skb, nhoff);
|
||||
}
|
||||
|
||||
int inet_ctl_sock_create(struct sock **sk, unsigned short family,
|
||||
unsigned short type, unsigned char protocol,
|
||||
struct net *net)
|
||||
|
@ -1675,8 +1695,8 @@ static struct packet_offload ip_packet_offload __read_mostly = {
|
|||
static const struct net_offload ipip_offload = {
|
||||
.callbacks = {
|
||||
.gso_segment = inet_gso_segment,
|
||||
.gro_receive = inet_gro_receive,
|
||||
.gro_complete = inet_gro_complete,
|
||||
.gro_receive = ipip_gro_receive,
|
||||
.gro_complete = ipip_gro_complete,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
@ -48,7 +48,7 @@ static inline struct fou *fou_from_sock(struct sock *sk)
|
|||
return sk->sk_user_data;
|
||||
}
|
||||
|
||||
static void fou_recv_pull(struct sk_buff *skb, size_t len)
|
||||
static int fou_recv_pull(struct sk_buff *skb, size_t len)
|
||||
{
|
||||
struct iphdr *iph = ip_hdr(skb);
|
||||
|
||||
|
@ -59,6 +59,7 @@ static void fou_recv_pull(struct sk_buff *skb, size_t len)
|
|||
__skb_pull(skb, len);
|
||||
skb_postpull_rcsum(skb, udp_hdr(skb), len);
|
||||
skb_reset_transport_header(skb);
|
||||
return iptunnel_pull_offloads(skb);
|
||||
}
|
||||
|
||||
static int fou_udp_recv(struct sock *sk, struct sk_buff *skb)
|
||||
|
@ -68,9 +69,14 @@ static int fou_udp_recv(struct sock *sk, struct sk_buff *skb)
|
|||
if (!fou)
|
||||
return 1;
|
||||
|
||||
fou_recv_pull(skb, sizeof(struct udphdr));
|
||||
if (fou_recv_pull(skb, sizeof(struct udphdr)))
|
||||
goto drop;
|
||||
|
||||
return -fou->protocol;
|
||||
|
||||
drop:
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr,
|
||||
|
@ -170,6 +176,9 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
|
|||
__skb_pull(skb, sizeof(struct udphdr) + hdrlen);
|
||||
skb_reset_transport_header(skb);
|
||||
|
||||
if (iptunnel_pull_offloads(skb))
|
||||
goto drop;
|
||||
|
||||
return -guehdr->proto_ctype;
|
||||
|
||||
drop:
|
||||
|
|
|
@ -126,6 +126,11 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
|
|||
struct packet_offload *ptype;
|
||||
__be16 type;
|
||||
|
||||
if (NAPI_GRO_CB(skb)->encap_mark)
|
||||
goto out;
|
||||
|
||||
NAPI_GRO_CB(skb)->encap_mark = 1;
|
||||
|
||||
off = skb_gro_offset(skb);
|
||||
hlen = off + sizeof(*greh);
|
||||
greh = skb_gro_header_fast(skb, off);
|
||||
|
|
|
@ -114,7 +114,8 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto,
|
|||
skb->vlan_tci = 0;
|
||||
skb_set_queue_mapping(skb, 0);
|
||||
skb_scrub_packet(skb, xnet);
|
||||
return 0;
|
||||
|
||||
return iptunnel_pull_offloads(skb);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iptunnel_pull_header);
|
||||
|
||||
|
|
|
@ -311,14 +311,14 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
|
|||
unsigned int off = skb_gro_offset(skb);
|
||||
int flush = 1;
|
||||
|
||||
if (NAPI_GRO_CB(skb)->udp_mark ||
|
||||
if (NAPI_GRO_CB(skb)->encap_mark ||
|
||||
(skb->ip_summed != CHECKSUM_PARTIAL &&
|
||||
NAPI_GRO_CB(skb)->csum_cnt == 0 &&
|
||||
!NAPI_GRO_CB(skb)->csum_valid))
|
||||
goto out;
|
||||
|
||||
/* mark that this skb passed once through the udp gro layer */
|
||||
NAPI_GRO_CB(skb)->udp_mark = 1;
|
||||
/* mark that this skb passed once through the tunnel gro layer */
|
||||
NAPI_GRO_CB(skb)->encap_mark = 1;
|
||||
|
||||
rcu_read_lock();
|
||||
uo_priv = rcu_dereference(udp_offload_base);
|
||||
|
|
|
@ -258,6 +258,19 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
|
|||
return pp;
|
||||
}
|
||||
|
||||
static struct sk_buff **sit_gro_receive(struct sk_buff **head,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
if (NAPI_GRO_CB(skb)->encap_mark) {
|
||||
NAPI_GRO_CB(skb)->flush = 1;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
NAPI_GRO_CB(skb)->encap_mark = 1;
|
||||
|
||||
return ipv6_gro_receive(head, skb);
|
||||
}
|
||||
|
||||
static int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
|
||||
{
|
||||
const struct net_offload *ops;
|
||||
|
@ -302,7 +315,7 @@ static struct packet_offload ipv6_packet_offload __read_mostly = {
|
|||
static const struct net_offload sit_offload = {
|
||||
.callbacks = {
|
||||
.gso_segment = ipv6_gso_segment,
|
||||
.gro_receive = ipv6_gro_receive,
|
||||
.gro_receive = sit_gro_receive,
|
||||
.gro_complete = sit_gro_complete,
|
||||
},
|
||||
};
|
||||
|
|
|
@ -681,14 +681,16 @@ static int ipip6_rcv(struct sk_buff *skb)
|
|||
skb->mac_header = skb->network_header;
|
||||
skb_reset_network_header(skb);
|
||||
IPCB(skb)->flags = 0;
|
||||
skb->protocol = htons(ETH_P_IPV6);
|
||||
skb->dev = tunnel->dev;
|
||||
|
||||
if (packet_is_spoofed(skb, iph, tunnel)) {
|
||||
tunnel->dev->stats.rx_errors++;
|
||||
goto out;
|
||||
}
|
||||
|
||||
__skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
|
||||
if (iptunnel_pull_header(skb, 0, htons(ETH_P_IPV6),
|
||||
!net_eq(tunnel->net, dev_net(tunnel->dev))))
|
||||
goto out;
|
||||
|
||||
err = IP_ECN_decapsulate(iph, skb);
|
||||
if (unlikely(err)) {
|
||||
|
|
Loading…
Reference in a new issue