Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David S. Miller:

 1) Since we do RCU lookups on ipv4 FIB entries, we have to test if the
    entry is dead before returning it to our caller.

 2) openvswitch locking and packet validation fixes from Ansis Atteka,
    Jesse Gross, and Pravin B Shelar.

 3) Fix PM resume locking in IGB driver, from Benjamin Poirier.

 4) Fix VLAN header handling in vhost-net and macvtap, from Basil Gor.

 5) Revert a bogus network namespace isolation change that was causing
    regressions on S390 networking devices.

 6) If bonding decides to process and handle a LACPDU frame, we
    shouldn't bump the rx_dropped counter.  From Jiri Bohac.

 7) Fix mis-calculation of available TX space in r8169 driver when doing
    TSO, which can lead to crashes and/or hung device.  From Julien
    Ducourthial.

 8) SCTP does not validate cached routes properly in all cases, from
    Nicolas Dichtel.

 9) Link status interrupt needs to be handled in ks8851 driver, from
    Stephen Boyd.

10) Use capable(), not cap_raised(), in connector/userns netlink code.
    From Eric W. Biederman via Andrew Morton.

11) Fix pktgen OOPS on module unload, from Eric Dumazet.

12) iwlwifi under-estimates SKB truesizes, also from Eric Dumazet.

13) Cure division by zero in SFC driver, from Ben Hutchings.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (26 commits)
  ks8851: Update link status during link change interrupt
  macvtap: restore vlan header on user read
  vhost-net: fix handle_rx buffer size
  bonding: don't increase rx_dropped after processing LACPDUs
  connector/userns: replace netlink uses of cap_raised() with capable()
  sctp: check cached dst before using it
  pktgen: fix crash at module unload
  Revert "net: maintain namespace isolation between vlan and real device"
  ehea: fix losing of NEQ events when one event occurred early
  igb: fix rtnl race in PM resume path
  ipv4: Do not use dead fib_info entries.
  r8169: fix unsigned int wraparound with TSO
  sfc: Fix division by zero when using one RX channel and no SR-IOV
  openvswitch: Validation of IPv6 set port action uses IPv4 header
  net: compare_ether_addr[_64bits]() has no ordering
  cdc_ether: Ignore bogus union descriptor for RNDIS devices
  bnx2x: bug fix when loading after SAN boot
  e1000: Silence sparse warnings by correcting type
  igb, ixgbe: netdev_tx_reset_queue incorrectly called from tx init path
  openvswitch: Release rtnl_lock if ovs_vport_cmd_build_info() failed.
  ...
This commit is contained in:
Linus Torvalds 2012-05-12 12:57:01 -07:00
commit 4a873f5399
37 changed files with 235 additions and 171 deletions

View File

@ -2297,7 +2297,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
return;
}
if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) {
if (!capable(CAP_SYS_ADMIN)) {
retcode = ERR_PERM;
goto fail;
}

View File

@ -134,7 +134,7 @@ static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
{
struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1);
if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
if (!capable(CAP_SYS_ADMIN))
return;
spin_lock(&receiving_list_lock);

View File

@ -2173,9 +2173,10 @@ re_arm:
* received frames (loopback). Since only the payload is given to this
* function, it check for loopback.
*/
static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u16 length)
static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u16 length)
{
struct port *port;
int ret = RX_HANDLER_ANOTHER;
if (length >= sizeof(struct lacpdu)) {
@ -2184,11 +2185,12 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u
if (!port->slave) {
pr_warning("%s: Warning: port of slave %s is uninitialized\n",
slave->dev->name, slave->dev->master->name);
return;
return ret;
}
switch (lacpdu->subtype) {
case AD_TYPE_LACPDU:
ret = RX_HANDLER_CONSUMED;
pr_debug("Received LACPDU on port %d\n",
port->actor_port_number);
/* Protect against concurrent state machines */
@ -2198,6 +2200,7 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u
break;
case AD_TYPE_MARKER:
ret = RX_HANDLER_CONSUMED;
// No need to convert fields to Little Endian since we don't use the marker's fields.
switch (((struct bond_marker *)lacpdu)->tlv_type) {
@ -2219,6 +2222,7 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u
}
}
}
return ret;
}
/**
@ -2456,18 +2460,20 @@ out:
return NETDEV_TX_OK;
}
void bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond,
int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond,
struct slave *slave)
{
int ret = RX_HANDLER_ANOTHER;
if (skb->protocol != PKT_TYPE_LACPDU)
return;
return ret;
if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
return;
return ret;
read_lock(&bond->lock);
bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len);
ret = bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len);
read_unlock(&bond->lock);
return ret;
}
/*

View File

@ -274,7 +274,7 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave);
void bond_3ad_handle_link_change(struct slave *slave, char link);
int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info);
int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev);
void bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond,
int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond,
struct slave *slave);
int bond_3ad_set_carrier(struct bonding *bond);
void bond_3ad_update_lacp_rate(struct bonding *bond);

View File

@ -1444,8 +1444,9 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
struct sk_buff *skb = *pskb;
struct slave *slave;
struct bonding *bond;
void (*recv_probe)(struct sk_buff *, struct bonding *,
int (*recv_probe)(struct sk_buff *, struct bonding *,
struct slave *);
int ret = RX_HANDLER_ANOTHER;
skb = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb))
@ -1464,8 +1465,12 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
if (likely(nskb)) {
recv_probe(nskb, bond, slave);
ret = recv_probe(nskb, bond, slave);
dev_kfree_skb(nskb);
if (ret == RX_HANDLER_CONSUMED) {
consume_skb(skb);
return ret;
}
}
}
@ -1487,7 +1492,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
memcpy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, ETH_ALEN);
}
return RX_HANDLER_ANOTHER;
return ret;
}
/* enslave device <slave> to bond device <master> */
@ -2723,7 +2728,7 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
}
}
static void bond_arp_rcv(struct sk_buff *skb, struct bonding *bond,
static int bond_arp_rcv(struct sk_buff *skb, struct bonding *bond,
struct slave *slave)
{
struct arphdr *arp;
@ -2731,7 +2736,7 @@ static void bond_arp_rcv(struct sk_buff *skb, struct bonding *bond,
__be32 sip, tip;
if (skb->protocol != __cpu_to_be16(ETH_P_ARP))
return;
return RX_HANDLER_ANOTHER;
read_lock(&bond->lock);
@ -2776,6 +2781,7 @@ static void bond_arp_rcv(struct sk_buff *skb, struct bonding *bond,
out_unlock:
read_unlock(&bond->lock);
return RX_HANDLER_ANOTHER;
}
/*

View File

@ -9122,13 +9122,34 @@ static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp)
return bnx2x_prev_mcp_done(bp);
}
/* previous driver DMAE transaction may have occurred when pre-boot stage ended
* and boot began, or when kdump kernel was loaded. Either case would invalidate
* the addresses of the transaction, resulting in was-error bit set in the pci
* causing all hw-to-host pcie transactions to timeout. If this happened we want
* to clear the interrupt which detected this from the pglueb and the was done
* bit
*/
static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
{
u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing");
REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << BP_FUNC(bp));
}
}
static int __devinit bnx2x_prev_unload(struct bnx2x *bp)
{
int time_counter = 10;
u32 rc, fw, hw_lock_reg, hw_lock_val;
BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
/* Release previously held locks */
/* clear hw from errors which may have resulted from an interrupted
* dmae transaction.
*/
bnx2x_prev_interrupted_dmae(bp);
/* Release previously held locks */
hw_lock_reg = (BP_FUNC(bp) <= 5) ?
(MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
(MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);

View File

@ -3335,6 +3335,8 @@ static int __devinit ehea_probe_adapter(struct platform_device *dev,
goto out_shutdown_ports;
}
/* Handle any events that might be pending. */
tasklet_hi_schedule(&adapter->neq_tasklet);
ret = 0;
goto out;

View File

@ -3380,7 +3380,7 @@ static void e1000_dump(struct e1000_adapter *adapter)
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
struct my_u { u64 a; u64 b; };
struct my_u { __le64 a; __le64 b; };
struct my_u *u = (struct my_u *)tx_desc;
const char *type;
@ -3424,7 +3424,7 @@ rx_ring_summary:
for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
struct my_u { u64 a; u64 b; };
struct my_u { __le64 a; __le64 b; };
struct my_u *u = (struct my_u *)rx_desc;
const char *type;

View File

@ -1111,9 +1111,12 @@ msi_only:
adapter->flags |= IGB_FLAG_HAS_MSI;
out:
/* Notify the stack of the (possibly) reduced queue counts. */
rtnl_lock();
netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
return netif_set_real_num_rx_queues(adapter->netdev,
adapter->num_rx_queues);
err = netif_set_real_num_rx_queues(adapter->netdev,
adapter->num_rx_queues);
rtnl_unlock();
return err;
}
/**
@ -2771,8 +2774,6 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
wr32(E1000_TXDCTL(reg_idx), txdctl);
netdev_tx_reset_queue(txring_txq(ring));
}
/**
@ -3282,6 +3283,8 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
}
netdev_tx_reset_queue(txring_txq(tx_ring));
size = sizeof(struct igb_tx_buffer) * tx_ring->count;
memset(tx_ring->tx_buffer_info, 0, size);
@ -6796,18 +6799,7 @@ static int igb_resume(struct device *dev)
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
if (!rtnl_is_locked()) {
/*
* shut up ASSERT_RTNL() warning in
* netif_set_real_num_tx/rx_queues.
*/
rtnl_lock();
err = igb_init_interrupt_scheme(adapter);
rtnl_unlock();
} else {
err = igb_init_interrupt_scheme(adapter);
}
if (err) {
if (igb_init_interrupt_scheme(adapter)) {
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
return -ENOMEM;
}

View File

@ -574,9 +574,6 @@ extern struct ixgbe_info ixgbe_82599_info;
extern struct ixgbe_info ixgbe_X540_info;
#ifdef CONFIG_IXGBE_DCB
extern const struct dcbnl_rtnl_ops dcbnl_ops;
extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
struct ixgbe_dcb_config *dst_dcb_cfg,
int tc_max);
#endif
extern char ixgbe_driver_name[];

View File

@ -44,18 +44,26 @@
#define DCB_NO_HW_CHG 1 /* DCB configuration did not change */
#define DCB_HW_CHG 2 /* DCB configuration changed, no reset */
int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *scfg,
struct ixgbe_dcb_config *dcfg, int tc_max)
static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max)
{
struct ixgbe_dcb_config *scfg = &adapter->temp_dcb_cfg;
struct ixgbe_dcb_config *dcfg = &adapter->dcb_cfg;
struct tc_configuration *src = NULL;
struct tc_configuration *dst = NULL;
int i, j;
int tx = DCB_TX_CONFIG;
int rx = DCB_RX_CONFIG;
int changes = 0;
#ifdef IXGBE_FCOE
struct dcb_app app = {
.selector = DCB_APP_IDTYPE_ETHTYPE,
.protocol = ETH_P_FCOE,
};
u8 up = dcb_getapp(adapter->netdev, &app);
if (!scfg || !dcfg)
return changes;
if (up && !(up & (1 << adapter->fcoe.up)))
changes |= BIT_APP_UPCHG;
#endif
for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) {
src = &scfg->tc_config[i - DCB_PG_ATTR_TC_0];
@ -332,28 +340,12 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
struct ixgbe_adapter *adapter = netdev_priv(netdev);
int ret = DCB_NO_HW_CHG;
int i;
#ifdef IXGBE_FCOE
struct dcb_app app = {
.selector = DCB_APP_IDTYPE_ETHTYPE,
.protocol = ETH_P_FCOE,
};
u8 up;
/* In IEEE mode, use the IEEE Ethertype selector value */
if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) {
app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
up = dcb_ieee_getapp_mask(netdev, &app);
} else {
up = dcb_getapp(netdev, &app);
}
#endif
/* Fail command if not in CEE mode */
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return ret;
adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg,
&adapter->dcb_cfg,
adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(adapter,
MAX_TRAFFIC_CLASS);
if (!adapter->dcb_set_bitmap)
return ret;
@ -440,8 +432,13 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
* FCoE is using changes. This happens if the APP info
* changes or the up2tc mapping is updated.
*/
if ((up && !(up & (1 << adapter->fcoe.up))) ||
(adapter->dcb_set_bitmap & BIT_APP_UPCHG)) {
if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
struct dcb_app app = {
.selector = DCB_APP_IDTYPE_ETHTYPE,
.protocol = ETH_P_FCOE,
};
u8 up = dcb_getapp(netdev, &app);
adapter->fcoe.up = ffs(up) - 1;
ixgbe_dcbnl_devreset(netdev);
ret = DCB_HW_CHG_RST;

View File

@ -1780,6 +1780,8 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
}
netdev_tx_reset_queue(txring_txq(tx_ring));
/* re-map buffers to ring, store next to clean values */
ixgbe_alloc_rx_buffers(rx_ring, count);
rx_ring->next_to_clean = rx_ntc;

View File

@ -2671,8 +2671,6 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
/* enable queue */
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
netdev_tx_reset_queue(txring_txq(ring));
/* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
if (hw->mac.type == ixgbe_mac_82598EB &&
!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
@ -4167,6 +4165,8 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
}
netdev_tx_reset_queue(txring_txq(tx_ring));
size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
memset(tx_ring->tx_buffer_info, 0, size);
@ -4418,8 +4418,8 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->dcb_cfg.pfc_mode_enable = false;
adapter->dcb_set_bitmap = 0x00;
adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
MAX_TRAFFIC_CLASS);
memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
sizeof(adapter->temp_dcb_cfg));
#endif
@ -4866,10 +4866,12 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
netif_device_detach(netdev);
if (netif_running(netdev)) {
rtnl_lock();
ixgbe_down(adapter);
ixgbe_free_irq(adapter);
ixgbe_free_all_tx_resources(adapter);
ixgbe_free_all_rx_resources(adapter);
rtnl_unlock();
}
ixgbe_clear_interrupt_scheme(adapter);

View File

@ -618,10 +618,8 @@ static void ks8851_irq_work(struct work_struct *work)
netif_dbg(ks, intr, ks->netdev,
"%s: status 0x%04x\n", __func__, status);
if (status & IRQ_LCI) {
/* should do something about checking link status */
if (status & IRQ_LCI)
handled |= IRQ_LCI;
}
if (status & IRQ_LDI) {
u16 pmecr = ks8851_rdreg16(ks, KS_PMECR);
@ -684,6 +682,9 @@ static void ks8851_irq_work(struct work_struct *work)
mutex_unlock(&ks->lock);
if (status & IRQ_LCI)
mii_check_link(&ks->mii);
if (status & IRQ_TXI)
netif_wake_queue(ks->netdev);

View File

@ -61,8 +61,12 @@
#define R8169_MSG_DEFAULT \
(NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
#define TX_BUFFS_AVAIL(tp) \
(tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
#define TX_SLOTS_AVAIL(tp) \
(tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
#define TX_FRAGS_READY_FOR(tp,nr_frags) \
(TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
The RTL chips use a 64 element hash table based on the Ethernet CRC. */
@ -5115,7 +5119,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
u32 opts[2];
int frags;
if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
goto err_stop_0;
}
@ -5169,7 +5173,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
mmiowb();
if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
* not miss a ring update when it notices a stopped queue.
*/
@ -5183,7 +5187,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
* can't.
*/
smp_mb();
if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS))
netif_wake_queue(dev);
}
@ -5306,7 +5310,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
*/
smp_mb();
if (netif_queue_stopped(dev) &&
(TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
netif_wake_queue(dev);
}
/*

View File

@ -1349,7 +1349,7 @@ static int efx_probe_interrupts(struct efx_nic *efx)
}
/* RSS might be usable on VFs even if it is disabled on the PF */
efx->rss_spread = (efx->n_rx_channels > 1 ?
efx->rss_spread = ((efx->n_rx_channels > 1 || !efx_sriov_wanted(efx)) ?
efx->n_rx_channels : efx_vf_size(efx));
return 0;

View File

@ -259,7 +259,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
xmit_world:
skb->ip_summed = ip_summed;
skb_set_dev(skb, vlan->lowerdev);
skb->dev = vlan->lowerdev;
return dev_queue_xmit(skb);
}

View File

@ -1,5 +1,6 @@
#include <linux/etherdevice.h>
#include <linux/if_macvlan.h>
#include <linux/if_vlan.h>
#include <linux/interrupt.h>
#include <linux/nsproxy.h>
#include <linux/compat.h>
@ -759,6 +760,8 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
struct macvlan_dev *vlan;
int ret;
int vnet_hdr_len = 0;
int vlan_offset = 0;
int copied;
if (q->flags & IFF_VNET_HDR) {
struct virtio_net_hdr vnet_hdr;
@ -773,18 +776,48 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
return -EFAULT;
}
copied = vnet_hdr_len;
len = min_t(int, skb->len, len);
if (!vlan_tx_tag_present(skb))
len = min_t(int, skb->len, len);
else {
int copy;
struct {
__be16 h_vlan_proto;
__be16 h_vlan_TCI;
} veth;
veth.h_vlan_proto = htons(ETH_P_8021Q);
veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
ret = skb_copy_datagram_const_iovec(skb, 0, iv, vnet_hdr_len, len);
vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
len = min_t(int, skb->len + VLAN_HLEN, len);
copy = min_t(int, vlan_offset, len);
ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
len -= copy;
copied += copy;
if (ret || !len)
goto done;
copy = min_t(int, sizeof(veth), len);
ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy);
len -= copy;
copied += copy;
if (ret || !len)
goto done;
}
ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len);
copied += len;
done:
rcu_read_lock_bh();
vlan = rcu_dereference_bh(q->vlan);
if (vlan)
macvlan_count_rx(vlan, len, ret == 0, 0);
macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0);
rcu_read_unlock_bh();
return ret ? ret : (len + vnet_hdr_len);
return ret ? ret : copied;
}
static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,

View File

@ -83,6 +83,7 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
struct cdc_state *info = (void *) &dev->data;
int status;
int rndis;
bool android_rndis_quirk = false;
struct usb_driver *driver = driver_of(intf);
struct usb_cdc_mdlm_desc *desc = NULL;
struct usb_cdc_mdlm_detail_desc *detail = NULL;
@ -195,6 +196,11 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
info->control,
info->u->bSlaveInterface0,
info->data);
/* fall back to hard-wiring for RNDIS */
if (rndis) {
android_rndis_quirk = true;
goto next_desc;
}
goto bad_desc;
}
if (info->control != intf) {
@ -271,11 +277,15 @@ next_desc:
/* Microsoft ActiveSync based and some regular RNDIS devices lack the
* CDC descriptors, so we'll hard-wire the interfaces and not check
* for descriptors.
*
* Some Android RNDIS devices have a CDC Union descriptor pointing
* to non-existing interfaces. Ignore that and attempt the same
* hard-wired 0 and 1 interfaces.
*/
if (rndis && !info->u) {
if (rndis && (!info->u || android_rndis_quirk)) {
info->control = usb_ifnum_to_if(dev->udev, 0);
info->data = usb_ifnum_to_if(dev->udev, 1);
if (!info->control || !info->data) {
if (!info->control || !info->data || info->control != intf) {
dev_dbg(&intf->dev,
"rndis: master #0/%p slave #1/%p\n",
info->control,

View File

@ -373,7 +373,7 @@ static void ar9003_hw_spur_ofdm_work(struct ath_hw *ah,
else
spur_subchannel_sd = 0;
spur_freq_sd = (freq_offset << 9) / 11;
spur_freq_sd = ((freq_offset + 10) << 9) / 11;
} else {
if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
@ -382,7 +382,7 @@ static void ar9003_hw_spur_ofdm_work(struct ath_hw *ah,
else
spur_subchannel_sd = 1;
spur_freq_sd = (freq_offset << 9) / 11;
spur_freq_sd = ((freq_offset - 10) << 9) / 11;
}

View File

@ -2637,6 +2637,7 @@ static int brcmf_sdbrcm_dpc_thread(void *data)
/* after stopping the bus, exit thread */
brcmf_sdbrcm_bus_stop(bus->sdiodev->dev);
bus->dpc_tsk = NULL;
spin_lock_irqsave(&bus->dpc_tl_lock, flags);
break;
}

View File

@ -773,8 +773,7 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
struct sk_buff *skb;
__le16 fc = hdr->frame_control;
struct iwl_rxon_context *ctx;
struct page *p;
int offset;
unsigned int hdrlen, fraglen;
/* We only process data packets if the interface is open */
if (unlikely(!priv->is_open)) {
@ -788,16 +787,24 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
iwlagn_set_decrypted_flag(priv, hdr, ampdu_status, stats))
return;
skb = dev_alloc_skb(128);
/* Dont use dev_alloc_skb(), we'll have enough headroom once
* ieee80211_hdr pulled.
*/
skb = alloc_skb(128, GFP_ATOMIC);
if (!skb) {
IWL_ERR(priv, "dev_alloc_skb failed\n");
IWL_ERR(priv, "alloc_skb failed\n");
return;
}
hdrlen = min_t(unsigned int, len, skb_tailroom(skb));
memcpy(skb_put(skb, hdrlen), hdr, hdrlen);
fraglen = len - hdrlen;
offset = (void *)hdr - rxb_addr(rxb);
p = rxb_steal_page(rxb);
skb_add_rx_frag(skb, 0, p, offset, len, len);
if (fraglen) {
int offset = (void *)hdr + hdrlen - rxb_addr(rxb);
skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
fraglen, rxb->truesize);
}
iwl_update_stats(priv, false, fc, len);
/*

View File

@ -374,8 +374,9 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
if (WARN_ON(!rxb))
return;
rxcb.truesize = PAGE_SIZE << hw_params(trans).rx_page_order;
dma_unmap_page(trans->dev, rxb->page_dma,
PAGE_SIZE << hw_params(trans).rx_page_order,
rxcb.truesize,
DMA_FROM_DEVICE);
rxcb._page = rxb->page;

View File

@ -260,6 +260,7 @@ static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
struct iwl_rx_cmd_buffer {
struct page *_page;
unsigned int truesize;
};
static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)

View File

@ -24,6 +24,7 @@
#include <linux/if_arp.h>
#include <linux/if_tun.h>
#include <linux/if_macvlan.h>
#include <linux/if_vlan.h>
#include <net/sock.h>
@ -283,8 +284,12 @@ static int peek_head_len(struct sock *sk)
spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
head = skb_peek(&sk->sk_receive_queue);
if (likely(head))
if (likely(head)) {
len = head->len;
if (vlan_tx_tag_present(head))
len += VLAN_HLEN;
}
spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags);
return len;
}

View File

@ -73,7 +73,7 @@ static void uvesafb_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *ns
struct uvesafb_task *utask;
struct uvesafb_ktask *task;
if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
if (!capable(CAP_SYS_ADMIN))
return;
if (msg->seq >= UVESAFB_TASKS_MAX)

View File

@ -159,7 +159,8 @@ static inline void eth_hw_addr_random(struct net_device *dev)
* @addr1: Pointer to a six-byte array containing the Ethernet address
* @addr2: Pointer other six-byte array containing the Ethernet address
*
* Compare two ethernet addresses, returns 0 if equal
* Compare two ethernet addresses, returns 0 if equal, non-zero otherwise.
* Unlike memcmp(), it doesn't return a value suitable for sorting.
*/
static inline unsigned compare_ether_addr(const u8 *addr1, const u8 *addr2)
{
@ -184,10 +185,10 @@ static inline unsigned long zap_last_2bytes(unsigned long value)
* @addr1: Pointer to an array of 8 bytes
* @addr2: Pointer to an other array of 8 bytes
*
* Compare two ethernet addresses, returns 0 if equal.
* Same result than "memcmp(addr1, addr2, ETH_ALEN)" but without conditional
* branches, and possibly long word memory accesses on CPU allowing cheap
* unaligned memory reads.
* Compare two ethernet addresses, returns 0 if equal, non-zero otherwise.
* Unlike memcmp(), it doesn't return a value suitable for sorting.
* The function doesn't need any conditional branches and possibly uses
* word memory accesses on CPU allowing cheap unaligned memory reads.
* arrays = { byte1, byte2, byte3, byte4, byte6, byte7, pad1, pad2}
*
* Please note that alignment of addr1 & addr2 is only guaranted to be 16 bits.

View File

@ -1403,15 +1403,6 @@ static inline bool netdev_uses_dsa_tags(struct net_device *dev)
return 0;
}
#ifndef CONFIG_NET_NS
static inline void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
{
skb->dev = dev;
}
#else /* CONFIG_NET_NS */
void skb_set_dev(struct sk_buff *skb, struct net_device *dev);
#endif
static inline bool netdev_uses_trailer_tags(struct net_device *dev)
{
#ifdef CONFIG_NET_DSA_TAG_TRAILER

View File

@ -704,4 +704,17 @@ static inline void sctp_v4_map_v6(union sctp_addr *addr)
addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff);
}
/* The cookie is always 0 since this is how it's used in the
* pmtu code.
*/
static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
{
if (t->dst && !dst_check(t->dst, 0)) {
dst_release(t->dst);
t->dst = NULL;
}
return t->dst;
}
#endif /* __net_sctp_h__ */

View File

@ -157,7 +157,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
}
skb_set_dev(skb, vlan_dev_priv(dev)->real_dev);
skb->dev = vlan_dev_priv(dev)->real_dev;
len = skb->len;
if (netpoll_tx_running(dev))
return skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev);

View File

@ -1617,10 +1617,14 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
return NET_RX_DROP;
}
skb->skb_iif = 0;
skb_set_dev(skb, dev);
skb->dev = dev;
skb_dst_drop(skb);
skb->tstamp.tv64 = 0;
skb->pkt_type = PACKET_HOST;
skb->protocol = eth_type_trans(skb, dev);
skb->mark = 0;
secpath_reset(skb);
nf_reset(skb);
return netif_rx(skb);
}
EXPORT_SYMBOL_GPL(dev_forward_skb);
@ -1869,36 +1873,6 @@ void netif_device_attach(struct net_device *dev)
}
EXPORT_SYMBOL(netif_device_attach);
/**
* skb_dev_set -- assign a new device to a buffer
* @skb: buffer for the new device
* @dev: network device
*
* If an skb is owned by a device already, we have to reset
* all data private to the namespace a device belongs to
* before assigning it a new device.
*/
#ifdef CONFIG_NET_NS
void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
{
skb_dst_drop(skb);
if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
secpath_reset(skb);
nf_reset(skb);
skb_init_secmark(skb);
skb->mark = 0;
skb->priority = 0;
skb->nf_trace = 0;
skb->ipvs_property = 0;
#ifdef CONFIG_NET_SCHED
skb->tc_index = 0;
#endif
}
skb->dev = dev;
}
EXPORT_SYMBOL(skb_set_dev);
#endif /* CONFIG_NET_NS */
static void skb_warn_bad_offload(const struct sk_buff *skb)
{
static const netdev_features_t null_features = 0;

View File

@ -1931,7 +1931,7 @@ static int pktgen_device_event(struct notifier_block *unused,
{
struct net_device *dev = ptr;
if (!net_eq(dev_net(dev), &init_net))
if (!net_eq(dev_net(dev), &init_net) || pktgen_exiting)
return NOTIFY_DONE;
/* It is OK that we do not hold the group lock right now,
@ -3755,12 +3755,18 @@ static void __exit pg_cleanup(void)
{
struct pktgen_thread *t;
struct list_head *q, *n;
struct list_head list;
/* Stop all interfaces & threads */
pktgen_exiting = true;
list_for_each_safe(q, n, &pktgen_threads) {
mutex_lock(&pktgen_thread_lock);
list_splice(&list, &pktgen_threads);
mutex_unlock(&pktgen_thread_lock);
list_for_each_safe(q, n, &list) {
t = list_entry(q, struct pktgen_thread, th_list);
list_del(&t->th_list);
kthread_stop(t->tsk);
kfree(t);
}

View File

@ -1370,6 +1370,8 @@ static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
continue;
if (fi->fib_dead)
continue;
if (fa->fa_info->fib_scope < flp->flowi4_scope)
continue;
fib_alias_accessed(fa);

View File

@ -421,6 +421,19 @@ static int validate_sample(const struct nlattr *attr,
return validate_actions(actions, key, depth + 1);
}
static int validate_tp_port(const struct sw_flow_key *flow_key)
{
if (flow_key->eth.type == htons(ETH_P_IP)) {
if (flow_key->ipv4.tp.src && flow_key->ipv4.tp.dst)
return 0;
} else if (flow_key->eth.type == htons(ETH_P_IPV6)) {
if (flow_key->ipv6.tp.src && flow_key->ipv6.tp.dst)
return 0;
}
return -EINVAL;
}
static int validate_set(const struct nlattr *a,
const struct sw_flow_key *flow_key)
{
@ -462,18 +475,13 @@ static int validate_set(const struct nlattr *a,
if (flow_key->ip.proto != IPPROTO_TCP)
return -EINVAL;
if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst)
return -EINVAL;
break;
return validate_tp_port(flow_key);
case OVS_KEY_ATTR_UDP:
if (flow_key->ip.proto != IPPROTO_UDP)
return -EINVAL;
if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst)
return -EINVAL;
break;
return validate_tp_port(flow_key);
default:
return -EINVAL;
@ -1641,10 +1649,9 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
OVS_VPORT_CMD_NEW);
if (IS_ERR(reply)) {
err = PTR_ERR(reply);
netlink_set_err(init_net.genl_sock, 0,
ovs_dp_vport_multicast_group.id, err);
return 0;
ovs_dp_vport_multicast_group.id, PTR_ERR(reply));
goto exit_unlock;
}
genl_notify(reply, genl_info_net(info), info->snd_pid,

View File

@ -183,7 +183,8 @@ void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb)
u8 tcp_flags = 0;
if (flow->key.eth.type == htons(ETH_P_IP) &&
flow->key.ip.proto == IPPROTO_TCP) {
flow->key.ip.proto == IPPROTO_TCP &&
likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) {
u8 *tcp = (u8 *)tcp_hdr(skb);
tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK;
}

View File

@ -377,9 +377,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
*/
skb_set_owner_w(nskb, sk);
/* The 'obsolete' field of dst is set to 2 when a dst is freed. */
if (!dst || (dst->obsolete > 1)) {
dst_release(dst);
if (!sctp_transport_dst_check(tp)) {
sctp_transport_route(tp, NULL, sctp_sk(sk));
if (asoc && (asoc->param_flags & SPP_PMTUD_ENABLE)) {
sctp_assoc_sync_pmtu(asoc);

View File

@ -226,23 +226,6 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
}
/* this is a complete rip-off from __sk_dst_check
* the cookie is always 0 since this is how it's used in the
* pmtu code
*/
static struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
{
struct dst_entry *dst = t->dst;
if (dst && dst->obsolete && dst->ops->check(dst, 0) == NULL) {
dst_release(t->dst);
t->dst = NULL;
return NULL;
}
return dst;
}
void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
{
struct dst_entry *dst;