Merge branch 'mpls_gso'

Simon Horman says:

====================
In the case where a non-MPLS packet is received and an MPLS stack is
added it may well be the case that the original skb is GSO but the
NIC used for transmit does not support GSO of MPLS packets.

The aim of this short series is to provide GSO in software for MPLS packets
whose skbs are GSO.

Change since v4:

Update first patch of the series to use 16 bits for all *_headers
rather than just inner_*_headers

Simon Horman (2):
  net: Use 16bits for *_headers fields of struct skbuff
  MPLS: Add limited GSO support
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2013-05-27 22:51:06 -07:00
commit 51047840e6
15 changed files with 149 additions and 116 deletions

View file

@ -43,8 +43,9 @@ enum {
NETIF_F_FSO_BIT, /* ... FCoE segmentation */
NETIF_F_GSO_GRE_BIT, /* ... GRE with TSO */
NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */
NETIF_F_GSO_MPLS_BIT, /* ... MPLS segmentation */
/**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */
NETIF_F_GSO_UDP_TUNNEL_BIT,
NETIF_F_GSO_MPLS_BIT,
NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */
NETIF_F_SCTP_CSUM_BIT, /* SCTP checksum offload */
@ -107,6 +108,7 @@ enum {
#define NETIF_F_RXALL __NETIF_F(RXALL)
#define NETIF_F_GSO_GRE __NETIF_F(GSO_GRE)
#define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL)
#define NETIF_F_GSO_MPLS __NETIF_F(GSO_MPLS)
#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX)
#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)

View file

@ -1088,6 +1088,8 @@ struct net_device {
* need to set them appropriately.
*/
netdev_features_t hw_enc_features;
/* mask of fetures inheritable by MPLS */
netdev_features_t mpls_features;
/* Interface index. Unique device identifier */
int ifindex;

View file

@ -319,6 +319,8 @@ enum {
SKB_GSO_GRE = 1 << 6,
SKB_GSO_UDP_TUNNEL = 1 << 7,
SKB_GSO_MPLS = 1 << 8,
};
#if BITS_PER_LONG > 32
@ -389,6 +391,7 @@ typedef unsigned char *sk_buff_data_t;
* @dropcount: total number of sk_receive_queue overflows
* @vlan_proto: vlan encapsulation protocol
* @vlan_tci: vlan tag control information
* @inner_protocol: Protocol (encapsulation)
* @inner_transport_header: Inner transport layer header (encapsulation)
* @inner_network_header: Network layer header (encapsulation)
* @inner_mac_header: Link layer header (encapsulation)
@ -509,12 +512,13 @@ struct sk_buff {
__u32 reserved_tailroom;
};
sk_buff_data_t inner_transport_header;
sk_buff_data_t inner_network_header;
sk_buff_data_t inner_mac_header;
sk_buff_data_t transport_header;
sk_buff_data_t network_header;
sk_buff_data_t mac_header;
__be16 inner_protocol;
__u16 inner_transport_header;
__u16 inner_network_header;
__u16 inner_mac_header;
__u16 transport_header;
__u16 network_header;
__u16 mac_header;
/* These elements must be at the end, see alloc_skb() for details. */
sk_buff_data_t tail;
sk_buff_data_t end;
@ -1527,7 +1531,6 @@ static inline void skb_reset_mac_len(struct sk_buff *skb)
skb->mac_len = skb->network_header - skb->mac_header;
}
#ifdef NET_SKBUFF_DATA_USES_OFFSET
static inline unsigned char *skb_inner_transport_header(const struct sk_buff
*skb)
{
@ -1638,112 +1641,6 @@ static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
skb->mac_header += offset;
}
#else /* NET_SKBUFF_DATA_USES_OFFSET */
static inline unsigned char *skb_inner_transport_header(const struct sk_buff
*skb)
{
return skb->inner_transport_header;
}
static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
{
skb->inner_transport_header = skb->data;
}
static inline void skb_set_inner_transport_header(struct sk_buff *skb,
const int offset)
{
skb->inner_transport_header = skb->data + offset;
}
static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
{
return skb->inner_network_header;
}
static inline void skb_reset_inner_network_header(struct sk_buff *skb)
{
skb->inner_network_header = skb->data;
}
static inline void skb_set_inner_network_header(struct sk_buff *skb,
const int offset)
{
skb->inner_network_header = skb->data + offset;
}
static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
{
return skb->inner_mac_header;
}
static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
{
skb->inner_mac_header = skb->data;
}
static inline void skb_set_inner_mac_header(struct sk_buff *skb,
const int offset)
{
skb->inner_mac_header = skb->data + offset;
}
static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
{
return skb->transport_header != NULL;
}
static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
{
return skb->transport_header;
}
static inline void skb_reset_transport_header(struct sk_buff *skb)
{
skb->transport_header = skb->data;
}
static inline void skb_set_transport_header(struct sk_buff *skb,
const int offset)
{
skb->transport_header = skb->data + offset;
}
static inline unsigned char *skb_network_header(const struct sk_buff *skb)
{
return skb->network_header;
}
static inline void skb_reset_network_header(struct sk_buff *skb)
{
skb->network_header = skb->data;
}
static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
{
skb->network_header = skb->data + offset;
}
static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
{
return skb->mac_header;
}
static inline int skb_mac_header_was_set(const struct sk_buff *skb)
{
return skb->mac_header != NULL;
}
static inline void skb_reset_mac_header(struct sk_buff *skb)
{
skb->mac_header = skb->data;
}
static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
{
skb->mac_header = skb->data + offset;
}
#endif /* NET_SKBUFF_DATA_USES_OFFSET */
static inline void skb_probe_transport_header(struct sk_buff *skb,
const int offset_hint)
{

View file

@ -218,6 +218,7 @@ source "net/batman-adv/Kconfig"
source "net/openvswitch/Kconfig"
source "net/vmw_vsock/Kconfig"
source "net/netlink/Kconfig"
source "net/mpls/Kconfig"
config RPS
boolean

View file

@ -70,3 +70,4 @@ obj-$(CONFIG_BATMAN_ADV) += batman-adv/
obj-$(CONFIG_NFC) += nfc/
obj-$(CONFIG_OPENVSWITCH) += openvswitch/
obj-$(CONFIG_VSOCKETS) += vmw_vsock/
obj-$(CONFIG_NET_MPLS_GSO) += mpls/

View file

@ -5277,6 +5277,10 @@ int register_netdevice(struct net_device *dev)
*/
dev->hw_enc_features |= NETIF_F_SG;
/* Make NETIF_F_SG inheritable to MPLS.
*/
dev->mpls_features |= NETIF_F_SG;
ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
ret = notifier_to_errno(ret);
if (ret)

View file

@ -82,6 +82,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
[NETIF_F_FSO_BIT] = "tx-fcoe-segmentation",
[NETIF_F_GSO_GRE_BIT] = "tx-gre-segmentation",
[NETIF_F_GSO_UDP_TUNNEL_BIT] = "tx-udp_tnl-segmentation",
[NETIF_F_GSO_MPLS_BIT] = "tx-mpls-segmentation",
[NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc",
[NETIF_F_SCTP_CSUM_BIT] = "tx-checksum-sctp",

View file

@ -1295,6 +1295,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
SKB_GSO_GRE |
SKB_GSO_TCPV6 |
SKB_GSO_UDP_TUNNEL |
SKB_GSO_MPLS |
0)))
goto out;

View file

@ -2917,6 +2917,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
SKB_GSO_TCP_ECN |
SKB_GSO_TCPV6 |
SKB_GSO_GRE |
SKB_GSO_MPLS |
SKB_GSO_UDP_TUNNEL |
0) ||
!(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))

View file

@ -2381,7 +2381,7 @@ struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY |
SKB_GSO_UDP_TUNNEL |
SKB_GSO_GRE) ||
SKB_GSO_GRE | SKB_GSO_MPLS) ||
!(type & (SKB_GSO_UDP))))
goto out;

View file

@ -98,6 +98,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
SKB_GSO_TCP_ECN |
SKB_GSO_GRE |
SKB_GSO_UDP_TUNNEL |
SKB_GSO_MPLS |
SKB_GSO_TCPV6 |
0)))
goto out;

View file

@ -63,7 +63,8 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
if (unlikely(type & ~(SKB_GSO_UDP |
SKB_GSO_DODGY |
SKB_GSO_UDP_TUNNEL |
SKB_GSO_GRE) ||
SKB_GSO_GRE |
SKB_GSO_MPLS) ||
!(type & (SKB_GSO_UDP))))
goto out;

9
net/mpls/Kconfig Normal file
View file

@ -0,0 +1,9 @@
#
# MPLS configuration
#
config NET_MPLS_GSO
tristate "MPLS: GSO support"
help
This is helper module to allow segmentation of non-MPLS GSO packets
that have had MPLS stack entries pushed onto them and thus
become MPLS GSO packets.

4
net/mpls/Makefile Normal file
View file

@ -0,0 +1,4 @@
#
# Makefile for MPLS.
#
obj-y += mpls_gso.o

108
net/mpls/mpls_gso.c Normal file
View file

@ -0,0 +1,108 @@
/*
* MPLS GSO Support
*
* Authors: Simon Horman (horms@verge.net.au)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Based on: GSO portions of net/ipv4/gre.c
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/err.h>
#include <linux/module.h>
#include <linux/netdev_features.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
netdev_features_t mpls_features;
__be16 mpls_protocol;
if (unlikely(skb_shinfo(skb)->gso_type &
~(SKB_GSO_TCPV4 |
SKB_GSO_TCPV6 |
SKB_GSO_UDP |
SKB_GSO_DODGY |
SKB_GSO_TCP_ECN |
SKB_GSO_GRE |
SKB_GSO_MPLS)))
goto out;
/* Setup inner SKB. */
mpls_protocol = skb->protocol;
skb->protocol = skb->inner_protocol;
/* Push back the mac header that skb_mac_gso_segment() has pulled.
* It will be re-pulled by the call to skb_mac_gso_segment() below
*/
__skb_push(skb, skb->mac_len);
/* Segment inner packet. */
mpls_features = skb->dev->mpls_features & netif_skb_features(skb);
segs = skb_mac_gso_segment(skb, mpls_features);
/* Restore outer protocol. */
skb->protocol = mpls_protocol;
/* Re-pull the mac header that the call to skb_mac_gso_segment()
* above pulled. It will be re-pushed after returning
* skb_mac_gso_segment(), an indirect caller of this function.
*/
__skb_push(skb, skb->data - skb_mac_header(skb));
out:
return segs;
}
static int mpls_gso_send_check(struct sk_buff *skb)
{
return 0;
}
static struct packet_offload mpls_mc_offload = {
.type = cpu_to_be16(ETH_P_MPLS_MC),
.callbacks = {
.gso_send_check = mpls_gso_send_check,
.gso_segment = mpls_gso_segment,
},
};
static struct packet_offload mpls_uc_offload = {
.type = cpu_to_be16(ETH_P_MPLS_UC),
.callbacks = {
.gso_send_check = mpls_gso_send_check,
.gso_segment = mpls_gso_segment,
},
};
static int __init mpls_gso_init(void)
{
pr_info("MPLS GSO support\n");
dev_add_offload(&mpls_uc_offload);
dev_add_offload(&mpls_mc_offload);
return 0;
}
static void __exit mpls_gso_exit(void)
{
dev_remove_offload(&mpls_uc_offload);
dev_remove_offload(&mpls_mc_offload);
}
module_init(mpls_gso_init);
module_exit(mpls_gso_exit);
MODULE_DESCRIPTION("MPLS GSO support");
MODULE_AUTHOR("Simon Horman (horms@verge.net.au)");
MODULE_LICENSE("GPL");