linux-stable/include/linux/if_vlan.h

416 lines
11 KiB
C
Raw Normal View History

/*
* VLAN An implementation of 802.1Q VLAN tagging.
*
* Authors: Ben Greear <greearb@candelatech.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#ifndef _LINUX_IF_VLAN_H_
#define _LINUX_IF_VLAN_H_
#ifdef __KERNEL__
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#define VLAN_HLEN 4 /* The additional bytes (on top of the Ethernet header)
* that VLAN requires.
*/
#define VLAN_ETH_ALEN 6 /* Octets in one ethernet addr */
#define VLAN_ETH_HLEN 18 /* Total octets in header. */
#define VLAN_ETH_ZLEN 64 /* Min. octets in frame sans FCS */
/*
* According to 802.3ac, the packet can be 4 bytes longer. --Klika Jan
*/
#define VLAN_ETH_DATA_LEN 1500 /* Max. octets in payload */
#define VLAN_ETH_FRAME_LEN 1518 /* Max. octets in frame sans FCS */
/*
* struct vlan_hdr - vlan header
* @h_vlan_TCI: priority and VLAN ID
* @h_vlan_encapsulated_proto: packet type ID or len
*/
struct vlan_hdr {
__be16 h_vlan_TCI;
__be16 h_vlan_encapsulated_proto;
};
/**
* struct vlan_ethhdr - vlan ethernet header (ethhdr + vlan_hdr)
* @h_dest: destination ethernet address
* @h_source: source ethernet address
* @h_vlan_proto: ethernet protocol (always 0x8100)
* @h_vlan_TCI: priority and VLAN ID
* @h_vlan_encapsulated_proto: packet type ID or len
*/
struct vlan_ethhdr {
unsigned char h_dest[ETH_ALEN];
unsigned char h_source[ETH_ALEN];
__be16 h_vlan_proto;
__be16 h_vlan_TCI;
__be16 h_vlan_encapsulated_proto;
};
#include <linux/skbuff.h>
static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
{
return (struct vlan_ethhdr *)skb_mac_header(skb);
}
#define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */
#define VLAN_PRIO_SHIFT 13
#define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator */
#define VLAN_TAG_PRESENT VLAN_CFI_MASK
#define VLAN_VID_MASK 0x0fff /* VLAN Identifier */
#define VLAN_N_VID 4096
/* found in socket.c */
[NET]: Make the device list and device lookups per namespace. This patch makes most of the generic device layer network namespace safe. This patch makes dev_base_head a network namespace variable, and then it picks up a few associated variables. The functions: dev_getbyhwaddr dev_getfirsthwbytype dev_get_by_flags dev_get_by_name __dev_get_by_name dev_get_by_index __dev_get_by_index dev_ioctl dev_ethtool dev_load wireless_process_ioctl were modified to take a network namespace argument, and deal with it. vlan_ioctl_set and brioctl_set were modified so their hooks will receive a network namespace argument. So basically anthing in the core of the network stack that was affected to by the change of dev_base was modified to handle multiple network namespaces. The rest of the network stack was simply modified to explicitly use &init_net the initial network namespace. This can be fixed when those components of the network stack are modified to handle multiple network namespaces. For now the ifindex generator is left global. Fundametally ifindex numbers are per namespace, or else we will have corner case problems with migration when we get that far. At the same time there are assumptions in the network stack that the ifindex of a network device won't change. Making the ifindex number global seems a good compromise until the network stack can cope with ifindex changes when you change namespaces, and the like. Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2007-09-17 18:56:21 +00:00
extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
/* if this changes, algorithm will have to be reworked because this
* depends on completely exhausting the VLAN identifier space. Thus
* it gives constant time look-up, but in many cases it wastes memory.
*/
#define VLAN_GROUP_ARRAY_SPLIT_PARTS 8
#define VLAN_GROUP_ARRAY_PART_LEN (VLAN_N_VID/VLAN_GROUP_ARRAY_SPLIT_PARTS)
struct vlan_group {
struct net_device *real_dev; /* The ethernet(like) device
* the vlan is attached to.
*/
unsigned int nr_vlans;
int killall;
struct hlist_node hlist; /* linked list */
struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS];
struct rcu_head rcu;
};
static inline struct net_device *vlan_group_get_device(struct vlan_group *vg,
u16 vlan_id)
{
struct net_device **array;
array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
return array ? array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] : NULL;
}
static inline void vlan_group_set_device(struct vlan_group *vg,
u16 vlan_id,
struct net_device *dev)
{
struct net_device **array;
if (!vg)
return;
array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] = dev;
}
#define vlan_tx_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
#define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
/* Must be invoked with rcu_read_lock or with RTNL. */
static inline struct net_device *vlan_find_dev(struct net_device *real_dev,
u16 vlan_id)
{
struct vlan_group *grp = rcu_dereference_rtnl(real_dev->vlgrp);
if (grp)
return vlan_group_get_device(grp, vlan_id);
return NULL;
}
extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
extern u16 vlan_dev_vlan_id(const struct net_device *dev);
extern int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
u16 vlan_tci, int polling);
vlan: Centralize handling of hardware acceleration. Currently each driver that is capable of vlan hardware acceleration must be aware of the vlan groups that are configured and then pass the stripped tag to a specialized receive function. This is different from other types of hardware offload in that it places a significant amount of knowledge in the driver itself rather keeping it in the networking core. This makes vlan offloading function more similarly to other forms of offloading (such as checksum offloading or TSO) by doing the following: * On receive, stripped vlans are passed directly to the network core, without attempting to check for vlan groups or reconstructing the header if no group * vlans are made less special by folding the logic into the main receive routines * On transmit, the device layer will add the vlan header in software if the hardware doesn't support it, instead of spreading that logic out in upper layers, such as bonding. There are a number of advantages to this: * Fixes all bugs with drivers incorrectly dropping vlan headers at once. * Avoids having to disable VLAN acceleration when in promiscuous mode (good for bridging since it always puts devices in promiscuous mode). * Keeps VLAN tag separate until given to ultimate consumer, which avoids needing to do header reconstruction as in tg3 unless absolutely necessary. * Consolidates common code in core networking. Signed-off-by: Jesse Gross <jesse@nicira.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2010-10-20 13:56:06 +00:00
extern bool vlan_hwaccel_do_receive(struct sk_buff **skb);
extern gro_result_t
vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
unsigned int vlan_tci, struct sk_buff *skb);
extern gro_result_t
vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
unsigned int vlan_tci);
#else
static inline struct net_device *vlan_find_dev(struct net_device *real_dev,
u16 vlan_id)
{
return NULL;
}
static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev)
{
BUG();
return NULL;
}
static inline u16 vlan_dev_vlan_id(const struct net_device *dev)
{
BUG();
return 0;
}
static inline int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
u16 vlan_tci, int polling)
{
BUG();
return NET_XMIT_SUCCESS;
}
vlan: Centralize handling of hardware acceleration. Currently each driver that is capable of vlan hardware acceleration must be aware of the vlan groups that are configured and then pass the stripped tag to a specialized receive function. This is different from other types of hardware offload in that it places a significant amount of knowledge in the driver itself rather keeping it in the networking core. This makes vlan offloading function more similarly to other forms of offloading (such as checksum offloading or TSO) by doing the following: * On receive, stripped vlans are passed directly to the network core, without attempting to check for vlan groups or reconstructing the header if no group * vlans are made less special by folding the logic into the main receive routines * On transmit, the device layer will add the vlan header in software if the hardware doesn't support it, instead of spreading that logic out in upper layers, such as bonding. There are a number of advantages to this: * Fixes all bugs with drivers incorrectly dropping vlan headers at once. * Avoids having to disable VLAN acceleration when in promiscuous mode (good for bridging since it always puts devices in promiscuous mode). * Keeps VLAN tag separate until given to ultimate consumer, which avoids needing to do header reconstruction as in tg3 unless absolutely necessary. * Consolidates common code in core networking. Signed-off-by: Jesse Gross <jesse@nicira.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2010-10-20 13:56:06 +00:00
static inline bool vlan_hwaccel_do_receive(struct sk_buff **skb)
{
if ((*skb)->vlan_tci & VLAN_VID_MASK)
(*skb)->pkt_type = PACKET_OTHERHOST;
vlan: Centralize handling of hardware acceleration. Currently each driver that is capable of vlan hardware acceleration must be aware of the vlan groups that are configured and then pass the stripped tag to a specialized receive function. This is different from other types of hardware offload in that it places a significant amount of knowledge in the driver itself rather keeping it in the networking core. This makes vlan offloading function more similarly to other forms of offloading (such as checksum offloading or TSO) by doing the following: * On receive, stripped vlans are passed directly to the network core, without attempting to check for vlan groups or reconstructing the header if no group * vlans are made less special by folding the logic into the main receive routines * On transmit, the device layer will add the vlan header in software if the hardware doesn't support it, instead of spreading that logic out in upper layers, such as bonding. There are a number of advantages to this: * Fixes all bugs with drivers incorrectly dropping vlan headers at once. * Avoids having to disable VLAN acceleration when in promiscuous mode (good for bridging since it always puts devices in promiscuous mode). * Keeps VLAN tag separate until given to ultimate consumer, which avoids needing to do header reconstruction as in tg3 unless absolutely necessary. * Consolidates common code in core networking. Signed-off-by: Jesse Gross <jesse@nicira.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2010-10-20 13:56:06 +00:00
return false;
}
static inline gro_result_t
vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
unsigned int vlan_tci, struct sk_buff *skb)
{
return GRO_DROP;
}
static inline gro_result_t
vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
unsigned int vlan_tci)
{
return GRO_DROP;
}
#endif
/**
* vlan_hwaccel_rx - netif_rx wrapper for VLAN RX acceleration
* @skb: buffer
* @grp: vlan group
* @vlan_tci: VLAN TCI as received from the card
*/
static inline int vlan_hwaccel_rx(struct sk_buff *skb,
struct vlan_group *grp,
u16 vlan_tci)
{
return __vlan_hwaccel_rx(skb, grp, vlan_tci, 0);
}
/**
* vlan_hwaccel_receive_skb - netif_receive_skb wrapper for VLAN RX acceleration
* @skb: buffer
* @grp: vlan group
* @vlan_tci: VLAN TCI as received from the card
*/
static inline int vlan_hwaccel_receive_skb(struct sk_buff *skb,
struct vlan_group *grp,
u16 vlan_tci)
{
return __vlan_hwaccel_rx(skb, grp, vlan_tci, 1);
}
/**
* __vlan_put_tag - regular VLAN tag inserting
* @skb: skbuff to tag
* @vlan_tci: VLAN TCI to insert
*
* Inserts the VLAN tag into @skb as part of the payload
* Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
*
* Following the skb_unshare() example, in case of error, the calling function
* doesn't have to worry about freeing the original skb.
*/
static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
{
struct vlan_ethhdr *veth;
if (skb_cow_head(skb, VLAN_HLEN) < 0) {
kfree_skb(skb);
return NULL;
}
veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN);
/* Move the mac addresses to the beginning of the new header. */
memmove(skb->data, skb->data + VLAN_HLEN, 2 * VLAN_ETH_ALEN);
skb->mac_header -= VLAN_HLEN;
/* first, the ethernet type */
veth->h_vlan_proto = htons(ETH_P_8021Q);
/* now, the TCI */
veth->h_vlan_TCI = htons(vlan_tci);
skb->protocol = htons(ETH_P_8021Q);
return skb;
}
/**
* __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting
* @skb: skbuff to tag
* @vlan_tci: VLAN TCI to insert
*
* Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest
*/
static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb,
u16 vlan_tci)
{
skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci;
return skb;
}
#define HAVE_VLAN_PUT_TAG
/**
* vlan_put_tag - inserts VLAN tag according to device features
* @skb: skbuff to tag
* @vlan_tci: VLAN TCI to insert
*
* Assumes skb->dev is the target that will xmit this frame.
* Returns a VLAN tagged skb.
*/
static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
{
if (skb->dev->features & NETIF_F_HW_VLAN_TX) {
return __vlan_hwaccel_put_tag(skb, vlan_tci);
} else {
return __vlan_put_tag(skb, vlan_tci);
}
}
/**
* __vlan_get_tag - get the VLAN ID that is part of the payload
* @skb: skbuff to query
* @vlan_tci: buffer to store vlaue
*
* Returns error if the skb is not of VLAN type
*/
static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
{
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data;
if (veth->h_vlan_proto != htons(ETH_P_8021Q)) {
return -EINVAL;
}
*vlan_tci = ntohs(veth->h_vlan_TCI);
return 0;
}
/**
* __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[]
* @skb: skbuff to query
* @vlan_tci: buffer to store vlaue
*
* Returns error if @skb->vlan_tci is not set correctly
*/
static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
u16 *vlan_tci)
{
if (vlan_tx_tag_present(skb)) {
*vlan_tci = vlan_tx_tag_get(skb);
return 0;
} else {
*vlan_tci = 0;
return -EINVAL;
}
}
#define HAVE_VLAN_GET_TAG
/**
* vlan_get_tag - get the VLAN ID from the skb
* @skb: skbuff to query
* @vlan_tci: buffer to store vlaue
*
* Returns error if the skb is not VLAN tagged
*/
static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
{
if (skb->dev->features & NETIF_F_HW_VLAN_TX) {
return __vlan_hwaccel_get_tag(skb, vlan_tci);
} else {
return __vlan_get_tag(skb, vlan_tci);
}
}
/**
* vlan_get_protocol - get protocol EtherType.
* @skb: skbuff to query
*
* Returns the EtherType of the packet, regardless of whether it is
* vlan encapsulated (normal or hardware accelerated) or not.
*/
static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
{
__be16 protocol = 0;
if (vlan_tx_tag_present(skb) ||
skb->protocol != cpu_to_be16(ETH_P_8021Q))
protocol = skb->protocol;
else {
__be16 proto, *protop;
protop = skb_header_pointer(skb, offsetof(struct vlan_ethhdr,
h_vlan_encapsulated_proto),
sizeof(proto), &proto);
if (likely(protop))
protocol = *protop;
}
return protocol;
}
#endif /* __KERNEL__ */
/* VLAN IOCTLs are found in sockios.h */
/* Passed in vlan_ioctl_args structure to determine behaviour. */
enum vlan_ioctl_cmds {
ADD_VLAN_CMD,
DEL_VLAN_CMD,
SET_VLAN_INGRESS_PRIORITY_CMD,
SET_VLAN_EGRESS_PRIORITY_CMD,
GET_VLAN_INGRESS_PRIORITY_CMD,
GET_VLAN_EGRESS_PRIORITY_CMD,
SET_VLAN_NAME_TYPE_CMD,
SET_VLAN_FLAG_CMD,
GET_VLAN_REALDEV_NAME_CMD, /* If this works, you know it's a VLAN device, btw */
GET_VLAN_VID_CMD /* Get the VID of this VLAN (specified by name) */
};
enum vlan_flags {
VLAN_FLAG_REORDER_HDR = 0x1,
VLAN_FLAG_GVRP = 0x2,
VLAN_FLAG_LOOSE_BINDING = 0x4,
};
enum vlan_name_types {
VLAN_NAME_TYPE_PLUS_VID, /* Name will look like: vlan0005 */
VLAN_NAME_TYPE_RAW_PLUS_VID, /* name will look like: eth1.0005 */
VLAN_NAME_TYPE_PLUS_VID_NO_PAD, /* Name will look like: vlan5 */
VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD, /* Name will look like: eth0.5 */
VLAN_NAME_TYPE_HIGHEST
};
struct vlan_ioctl_args {
int cmd; /* Should be one of the vlan_ioctl_cmds enum above. */
char device1[24];
union {
char device2[24];
int VID;
unsigned int skb_priority;
unsigned int name_type;
unsigned int bind_type;
unsigned int flag; /* Matches vlan_dev_info flags */
} u;
short vlan_qos;
};
#endif /* !(_LINUX_IF_VLAN_H_) */