Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking updates from David Miller:

 1) tx_filtered/ps_tx_buf queues need to be accessed with the SKB queue
    lock, from Arik Nemtsov.

 2) Don't call 802.11 driver's filter configure method until it's
    actually open, from Felix Fietkau.

 3) Use ieee80211_free_txskb otherwise we leak control information.
    From Johannes Berg.

 4) Fix memory leak in bluetooth UUID removal,f rom Johan Hedberg.

 5) The shift mask trick doesn't work properly when 'optname' is out of
    range in do_ip_setsockopt().  Use a straightforward switch statement
    instead, the compiler emits essentially the same code but without
    the missing range check.  From Xi Wang.

 6) Fix when we call tcp_replace_ts_recent() otherwise we can
    erroneously accept a too-high tsval.  From Eric Dumazet.

 7) VXLAN bug fixes, mostly to do with VLAN header length handling, from
    Alexander Duyck.

 8) Missing return value initialization for IPV6_MINHOPCOUNT socket
    option handling.  From Hannes Frederic.

 9) Fix regression in tasklet handling in jme/ksz884x/xilinx drivers,
    from Xiaotian Feng.

10) At smsc911x driver init time, we don't know if the chip is in word
    swap mode or not.  However we do need to wait for the control
    register's ready bit to be set before we program any other part of
    the chip.  Adjust the wait loop to account for this.  From Kamlakant
    Patel.

11) Revert erroneous MDIO bus unregister change to mdio-bitbang.c

12) Fix memory leak in /proc/net/sctp/, from Tommi Rantala.

13) tilegx driver registers IRQ with NULL name, oops, from Simon Marchi.

14) TCP metrics hash table kzalloc() based allocation can fail, back
    down to using vmalloc() if it does.  From Eric Dumazet.

15) Fix packet steering out-of-order delivery regression, from Tom
    Herbert.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (40 commits)
  net-rps: Fix brokeness causing OOO packets
  tcp: handle tcp_net_metrics_init() order-5 memory allocation failures
  batman-adv: process broadcast packets in BLA earlier
  batman-adv: don't add TEMP clients belonging to other backbone nodes
  batman-adv: correctly pass the client flag on tt_response
  batman-adv: fix tt_global_entries flags update
  tilegx: request_irq with a non-null device name
  net: correct check in dev_addr_del()
  tcp: fix retransmission in repair mode
  sctp: fix /proc/net/sctp/ memory leak
  Revert "drivers/net/phy/mdio-bitbang.c: Call mdiobus_unregister before mdiobus_free"
  net/smsc911x: Fix ready check in cases where WORD_SWAP is needed
  drivers/net: fix tasklet misuse issue
  ipv4/ip_vti.c: VTI fix post-decryption forwarding
  brcmfmac: fix typo in CONFIG_BRCMISCAN
  vxlan: Update hard_header_len based on lowerdev when instantiating VXLAN
  vxlan: fix a typo.
  ipv6: setsockopt(IPIPPROTO_IPV6, IPV6_MINHOPCOUNT) forgot to set return value
  doc/net: Fix typo in netdev-features.txt
  vxlan: Fix error that was resulting in VXLAN MTU size being 10 bytes too large
  ...
This commit is contained in:
Linus Torvalds 2012-11-16 14:10:15 -08:00
commit 1d567e19cc
42 changed files with 247 additions and 120 deletions

View file

@ -164,4 +164,4 @@ read the CRC recorded by the NIC on receipt of the packet.
This requests that the NIC receive all possible frames, including errored
frames (such as bad FCS, etc). This can be helpful when sniffing a link with
bad packets on it. Some NICs may receive more packets if also put into normal
PROMISC mdoe.
PROMISC mode.

View file

@ -67,6 +67,7 @@ static struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x13d3, 0x3304) },
{ USB_DEVICE(0x0930, 0x0215) },
{ USB_DEVICE(0x0489, 0xE03D) },
{ USB_DEVICE(0x0489, 0xE027) },
/* Atheros AR9285 Malbec with sflash firmware */
{ USB_DEVICE(0x03F0, 0x311D) },

View file

@ -124,6 +124,7 @@ static struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
/* Atheros AR9285 Malbec with sflash firmware */
{ USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },

View file

@ -1860,10 +1860,14 @@ jme_open(struct net_device *netdev)
jme_clear_pm(jme);
JME_NAPI_ENABLE(jme);
tasklet_enable(&jme->linkch_task);
tasklet_enable(&jme->txclean_task);
tasklet_hi_enable(&jme->rxclean_task);
tasklet_hi_enable(&jme->rxempty_task);
tasklet_init(&jme->linkch_task, jme_link_change_tasklet,
(unsigned long) jme);
tasklet_init(&jme->txclean_task, jme_tx_clean_tasklet,
(unsigned long) jme);
tasklet_init(&jme->rxclean_task, jme_rx_clean_tasklet,
(unsigned long) jme);
tasklet_init(&jme->rxempty_task, jme_rx_empty_tasklet,
(unsigned long) jme);
rc = jme_request_irq(jme);
if (rc)
@ -3079,22 +3083,6 @@ jme_init_one(struct pci_dev *pdev,
tasklet_init(&jme->pcc_task,
jme_pcc_tasklet,
(unsigned long) jme);
tasklet_init(&jme->linkch_task,
jme_link_change_tasklet,
(unsigned long) jme);
tasklet_init(&jme->txclean_task,
jme_tx_clean_tasklet,
(unsigned long) jme);
tasklet_init(&jme->rxclean_task,
jme_rx_clean_tasklet,
(unsigned long) jme);
tasklet_init(&jme->rxempty_task,
jme_rx_empty_tasklet,
(unsigned long) jme);
tasklet_disable_nosync(&jme->linkch_task);
tasklet_disable_nosync(&jme->txclean_task);
tasklet_disable_nosync(&jme->rxclean_task);
tasklet_disable_nosync(&jme->rxempty_task);
jme->dpi.cur = PCC_P1;
jme->reg_ghc = 0;

View file

@ -5459,8 +5459,10 @@ static int prepare_hardware(struct net_device *dev)
rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev);
if (rc)
return rc;
tasklet_enable(&hw_priv->rx_tasklet);
tasklet_enable(&hw_priv->tx_tasklet);
tasklet_init(&hw_priv->rx_tasklet, rx_proc_task,
(unsigned long) hw_priv);
tasklet_init(&hw_priv->tx_tasklet, tx_proc_task,
(unsigned long) hw_priv);
hw->promiscuous = 0;
hw->all_multi = 0;
@ -7033,16 +7035,6 @@ static int __devinit pcidev_init(struct pci_dev *pdev,
spin_lock_init(&hw_priv->hwlock);
mutex_init(&hw_priv->lock);
/* tasklet is enabled. */
tasklet_init(&hw_priv->rx_tasklet, rx_proc_task,
(unsigned long) hw_priv);
tasklet_init(&hw_priv->tx_tasklet, tx_proc_task,
(unsigned long) hw_priv);
/* tasklet_enable will decrement the atomic counter. */
tasklet_disable(&hw_priv->rx_tasklet);
tasklet_disable(&hw_priv->tx_tasklet);
for (i = 0; i < TOTAL_PORT_NUM; i++)
init_waitqueue_head(&hw_priv->counter[i].counter);

View file

@ -2110,7 +2110,7 @@ static void __devinit smsc911x_read_mac_address(struct net_device *dev)
static int __devinit smsc911x_init(struct net_device *dev)
{
struct smsc911x_data *pdata = netdev_priv(dev);
unsigned int byte_test;
unsigned int byte_test, mask;
unsigned int to = 100;
SMSC_TRACE(pdata, probe, "Driver Parameters:");
@ -2130,9 +2130,22 @@ static int __devinit smsc911x_init(struct net_device *dev)
/*
* poll the READY bit in PMT_CTRL. Any other access to the device is
* forbidden while this bit isn't set. Try for 100ms
*
* Note that this test is done before the WORD_SWAP register is
* programmed. So in some configurations the READY bit is at 16 before
* WORD_SWAP is written to. This issue is worked around by waiting
* until either bit 0 or bit 16 gets set in PMT_CTRL.
*
* SMSC has confirmed that checking bit 16 (marked as reserved in
* the datasheet) is fine since these bits "will either never be set
* or can only go high after READY does (so also indicate the device
* is ready)".
*/
while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to)
mask = PMT_CTRL_READY_ | swahw32(PMT_CTRL_READY_);
while (!(smsc911x_reg_read(pdata, PMT_CTRL) & mask) && --to)
udelay(1000);
if (to == 0) {
pr_err("Device not READY in 100ms aborting\n");
return -ENODEV;

View file

@ -917,7 +917,7 @@ static int tile_net_setup_interrupts(struct net_device *dev)
ingress_irq = rc;
tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU);
rc = request_irq(ingress_irq, tile_net_handle_ingress_irq,
0, NULL, NULL);
0, "tile_net", NULL);
if (rc != 0) {
netdev_err(dev, "request_irq failed: %d\n", rc);
destroy_irq(ingress_irq);

View file

@ -942,6 +942,10 @@ static int axienet_open(struct net_device *ndev)
phy_start(lp->phy_dev);
}
/* Enable tasklets for Axi DMA error handling */
tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
(unsigned long) lp);
/* Enable interrupts for Axi DMA Tx */
ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev);
if (ret)
@ -950,8 +954,7 @@ static int axienet_open(struct net_device *ndev)
ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev);
if (ret)
goto err_rx_irq;
/* Enable tasklets for Axi DMA error handling */
tasklet_enable(&lp->dma_err_tasklet);
return 0;
err_rx_irq:
@ -960,6 +963,7 @@ static int axienet_open(struct net_device *ndev)
if (lp->phy_dev)
phy_disconnect(lp->phy_dev);
lp->phy_dev = NULL;
tasklet_kill(&lp->dma_err_tasklet);
dev_err(lp->dev, "request_irq() failed\n");
return ret;
}
@ -1613,10 +1617,6 @@ static int __devinit axienet_of_probe(struct platform_device *op)
goto err_iounmap_2;
}
tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
(unsigned long) lp);
tasklet_disable(&lp->dma_err_tasklet);
return 0;
err_iounmap_2:

View file

@ -234,7 +234,6 @@ void free_mdio_bitbang(struct mii_bus *bus)
struct mdiobb_ctrl *ctrl = bus->priv;
module_put(ctrl->ops->owner);
mdiobus_unregister(bus);
mdiobus_free(bus);
}
EXPORT_SYMBOL(free_mdio_bitbang);

View file

@ -540,10 +540,12 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
(ctx->ether_desc == NULL) || (ctx->control != intf))
goto error;
/* claim interfaces, if any */
temp = usb_driver_claim_interface(driver, ctx->data, dev);
if (temp)
goto error;
/* claim data interface, if different from control */
if (ctx->data != ctx->control) {
temp = usb_driver_claim_interface(driver, ctx->data, dev);
if (temp)
goto error;
}
iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
@ -623,6 +625,10 @@ static void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
tasklet_kill(&ctx->bh);
/* handle devices with combined control and data interface */
if (ctx->control == ctx->data)
ctx->data = NULL;
/* disconnect master --> disconnect slave */
if (intf == ctx->control && ctx->data) {
usb_set_intfdata(ctx->data, NULL);
@ -1245,6 +1251,14 @@ static const struct usb_device_id cdc_devs[] = {
.driver_info = (unsigned long) &wwan_info,
},
/* Huawei NCM devices disguised as vendor specific */
{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x16),
.driver_info = (unsigned long)&wwan_info,
},
{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46),
.driver_info = (unsigned long)&wwan_info,
},
/* Generic CDC-NCM devices */
{ USB_INTERFACE_INFO(USB_CLASS_COMM,
USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),

View file

@ -184,7 +184,7 @@ static int smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
/* set the address, index & direction (read from PHY) */
phy_id &= dev->mii.phy_id_mask;
idx &= dev->mii.reg_num_mask;
addr = (phy_id << 11) | (idx << 6) | MII_READ_;
addr = (phy_id << 11) | (idx << 6) | MII_READ_ | MII_BUSY_;
ret = smsc95xx_write_reg(dev, MII_ADDR, addr);
check_warn_goto_done(ret, "Error writing MII_ADDR");
@ -221,7 +221,7 @@ static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
/* set the address, index & direction (write to PHY) */
phy_id &= dev->mii.phy_id_mask;
idx &= dev->mii.reg_num_mask;
addr = (phy_id << 11) | (idx << 6) | MII_WRITE_;
addr = (phy_id << 11) | (idx << 6) | MII_WRITE_ | MII_BUSY_;
ret = smsc95xx_write_reg(dev, MII_ADDR, addr);
check_warn_goto_done(ret, "Error writing MII_ADDR");

View file

@ -1,5 +1,5 @@
/*
* VXLAN: Virtual eXtensiable Local Area Network
* VXLAN: Virtual eXtensible Local Area Network
*
* Copyright (c) 2012 Vyatta Inc.
*
@ -50,8 +50,8 @@
#define VXLAN_N_VID (1u << 24)
#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
/* VLAN + IP header + UDP + VXLAN */
#define VXLAN_HEADROOM (4 + 20 + 8 + 8)
/* IP header + UDP + VXLAN + Ethernet header */
#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
@ -1102,6 +1102,10 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
if (!tb[IFLA_MTU])
dev->mtu = lowerdev->mtu - VXLAN_HEADROOM;
/* update header length based on lower device */
dev->hard_header_len = lowerdev->hard_header_len +
VXLAN_HEADROOM;
}
if (data[IFLA_VXLAN_TOS])

View file

@ -4401,7 +4401,7 @@ static s32 brcmf_mode_to_nl80211_iftype(s32 mode)
static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
{
#ifndef CONFIG_BRCMFISCAN
#ifndef CONFIG_BRCMISCAN
/* scheduled scan settings */
wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT;
wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT;

View file

@ -521,7 +521,7 @@ static void iwlagn_mac_tx(struct ieee80211_hw *hw,
ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
if (iwlagn_tx_skb(priv, control->sta, skb))
dev_kfree_skb_any(skb);
ieee80211_free_txskb(hw, skb);
}
static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,

View file

@ -2114,7 +2114,7 @@ static void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
info = IEEE80211_SKB_CB(skb);
iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
dev_kfree_skb_any(skb);
ieee80211_free_txskb(priv->hw, skb);
}
static void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)

View file

@ -321,6 +321,14 @@ static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority)
dma_map_page(trans->dev, page, 0,
PAGE_SIZE << trans_pcie->rx_page_order,
DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, rxb->page_dma)) {
rxb->page = NULL;
spin_lock_irqsave(&rxq->lock, flags);
list_add(&rxb->list, &rxq->rx_used);
spin_unlock_irqrestore(&rxq->lock, flags);
__free_pages(page, trans_pcie->rx_page_order);
return;
}
/* dma address must be no more than 36 bits */
BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
/* and also 256 byte aligned! */
@ -488,8 +496,19 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
dma_map_page(trans->dev, rxb->page, 0,
PAGE_SIZE << trans_pcie->rx_page_order,
DMA_FROM_DEVICE);
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
if (dma_mapping_error(trans->dev, rxb->page_dma)) {
/*
* free the page(s) as well to not break
* the invariant that the items on the used
* list have no page(s)
*/
__free_pages(rxb->page, trans_pcie->rx_page_order);
rxb->page = NULL;
list_add_tail(&rxb->list, &rxq->rx_used);
} else {
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
}
} else
list_add_tail(&rxb->list, &rxq->rx_used);
spin_unlock_irqrestore(&rxq->lock, flags);

View file

@ -2942,13 +2942,33 @@ static int qeth_query_ipassists_cb(struct qeth_card *card,
QETH_DBF_TEXT(SETUP, 2, "qipasscb");
cmd = (struct qeth_ipa_cmd *) data;
switch (cmd->hdr.return_code) {
case IPA_RC_NOTSUPP:
case IPA_RC_L2_UNSUPPORTED_CMD:
QETH_DBF_TEXT(SETUP, 2, "ipaunsup");
card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS;
card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS;
return -0;
default:
if (cmd->hdr.return_code) {
QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Unhandled "
"rc=%d\n",
dev_name(&card->gdev->dev),
cmd->hdr.return_code);
return 0;
}
}
if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
} else {
} else if (cmd->hdr.prot_version == QETH_PROT_IPV6) {
card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
}
} else
QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Flawed LIC detected"
"\n", dev_name(&card->gdev->dev));
QETH_DBF_TEXT(SETUP, 2, "suppenbl");
QETH_DBF_TEXT_(SETUP, 2, "%08x", (__u32)cmd->hdr.ipa_supported);
QETH_DBF_TEXT_(SETUP, 2, "%08x", (__u32)cmd->hdr.ipa_enabled);

View file

@ -626,10 +626,13 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
QETH_DBF_TEXT(SETUP, 2, "doL2init");
QETH_DBF_TEXT_(SETUP, 2, "doL2%s", CARD_BUS_ID(card));
rc = qeth_query_setadapterparms(card);
if (rc) {
QETH_DBF_MESSAGE(2, "could not query adapter parameters on "
"device %s: x%x\n", CARD_BUS_ID(card), rc);
if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
rc = qeth_query_setadapterparms(card);
if (rc) {
QETH_DBF_MESSAGE(2, "could not query adapter "
"parameters on device %s: x%x\n",
CARD_BUS_ID(card), rc);
}
}
if (card->info.type == QETH_CARD_TYPE_IQD ||
@ -676,7 +679,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
return -ERESTARTSYS;
}
rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]);
if (!rc)
if (!rc || (rc == IPA_RC_L2_MAC_NOT_FOUND))
rc = qeth_l2_send_setmac(card, addr->sa_data);
return rc ? -EINVAL : 0;
}

View file

@ -325,6 +325,12 @@ void batadv_interface_rx(struct net_device *soft_iface,
soft_iface->last_rx = jiffies;
/* Let the bridge loop avoidance check the packet. If will
* not handle it, we can safely push it up.
*/
if (batadv_bla_rx(bat_priv, skb, vid, is_bcast))
goto out;
if (orig_node)
batadv_tt_add_temporary_global_entry(bat_priv, orig_node,
ethhdr->h_source);
@ -332,12 +338,6 @@ void batadv_interface_rx(struct net_device *soft_iface,
if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
goto dropped;
/* Let the bridge loop avoidance check the packet. If will
* not handle it, we can safely push it up.
*/
if (batadv_bla_rx(bat_priv, skb, vid, is_bcast))
goto out;
netif_rx(skb);
goto out;

View file

@ -769,6 +769,12 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv,
*/
tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_TEMP;
/* the change can carry possible "attribute" flags like the
* TT_CLIENT_WIFI, therefore they have to be copied in the
* client entry
*/
tt_global_entry->common.flags |= flags;
/* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
* one originator left in the list and we previously received a
* delete + roaming change for this originator.
@ -1496,7 +1502,7 @@ batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
memcpy(tt_change->addr, tt_common_entry->addr,
ETH_ALEN);
tt_change->flags = BATADV_NO_FLAGS;
tt_change->flags = tt_common_entry->flags;
tt_count++;
tt_change++;
@ -2450,6 +2456,13 @@ bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
{
bool ret = false;
/* if the originator is a backbone node (meaning it belongs to the same
* LAN of this node) the temporary client must not be added because to
* reach such destination the node must use the LAN instead of the mesh
*/
if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
goto out;
if (!batadv_tt_global_add(bat_priv, orig_node, addr,
BATADV_TT_CLIENT_TEMP,
atomic_read(&orig_node->last_ttvn)))

View file

@ -1754,11 +1754,11 @@ int hci_register_dev(struct hci_dev *hdev)
if (hdev->dev_type != HCI_AMP)
set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
schedule_work(&hdev->power_on);
hci_notify(hdev, HCI_DEV_REG);
hci_dev_hold(hdev);
schedule_work(&hdev->power_on);
return id;
err_wqueue:

View file

@ -326,7 +326,7 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
struct hci_dev *d;
size_t rp_len;
u16 count;
int i, err;
int err;
BT_DBG("sock %p", sk);
@ -347,9 +347,7 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
return -ENOMEM;
}
rp->num_controllers = cpu_to_le16(count);
i = 0;
count = 0;
list_for_each_entry(d, &hci_dev_list, list) {
if (test_bit(HCI_SETUP, &d->dev_flags))
continue;
@ -357,10 +355,13 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
if (!mgmt_valid_hdev(d))
continue;
rp->index[i++] = cpu_to_le16(d->id);
rp->index[count++] = cpu_to_le16(d->id);
BT_DBG("Added hci%u", d->id);
}
rp->num_controllers = cpu_to_le16(count);
rp_len = sizeof(*rp) + (2 * count);
read_unlock(&hci_dev_list_lock);
err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
@ -1366,6 +1367,7 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
continue;
list_del(&match->list);
kfree(match);
found++;
}

View file

@ -267,7 +267,7 @@ static void smp_failure(struct l2cap_conn *conn, u8 reason, u8 send)
clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->hcon->flags);
mgmt_auth_failed(conn->hcon->hdev, conn->dst, hcon->type,
hcon->dst_type, reason);
hcon->dst_type, HCI_ERROR_AUTH_FAILURE);
cancel_delayed_work_sync(&conn->security_timer);

View file

@ -2818,8 +2818,10 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
if (unlikely(tcpu != next_cpu) &&
(tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
((int)(per_cpu(softnet_data, tcpu).input_queue_head -
rflow->last_qtail)) >= 0))
rflow->last_qtail)) >= 0)) {
tcpu = next_cpu;
rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
}
if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
*rflowp = rflow;

View file

@ -319,7 +319,8 @@ int dev_addr_del(struct net_device *dev, const unsigned char *addr,
*/
ha = list_first_entry(&dev->dev_addrs.list,
struct netdev_hw_addr, list);
if (ha->addr == dev->dev_addr && ha->refcount == 1)
if (!memcmp(ha->addr, addr, dev->addr_len) &&
ha->type == addr_type && ha->refcount == 1)
return -ENOENT;
err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,

View file

@ -457,19 +457,28 @@ static int do_ip_setsockopt(struct sock *sk, int level,
struct inet_sock *inet = inet_sk(sk);
int val = 0, err;
if (((1<<optname) & ((1<<IP_PKTINFO) | (1<<IP_RECVTTL) |
(1<<IP_RECVOPTS) | (1<<IP_RECVTOS) |
(1<<IP_RETOPTS) | (1<<IP_TOS) |
(1<<IP_TTL) | (1<<IP_HDRINCL) |
(1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) |
(1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) |
(1<<IP_PASSSEC) | (1<<IP_TRANSPARENT) |
(1<<IP_MINTTL) | (1<<IP_NODEFRAG))) ||
optname == IP_UNICAST_IF ||
optname == IP_MULTICAST_TTL ||
optname == IP_MULTICAST_ALL ||
optname == IP_MULTICAST_LOOP ||
optname == IP_RECVORIGDSTADDR) {
switch (optname) {
case IP_PKTINFO:
case IP_RECVTTL:
case IP_RECVOPTS:
case IP_RECVTOS:
case IP_RETOPTS:
case IP_TOS:
case IP_TTL:
case IP_HDRINCL:
case IP_MTU_DISCOVER:
case IP_RECVERR:
case IP_ROUTER_ALERT:
case IP_FREEBIND:
case IP_PASSSEC:
case IP_TRANSPARENT:
case IP_MINTTL:
case IP_NODEFRAG:
case IP_UNICAST_IF:
case IP_MULTICAST_TTL:
case IP_MULTICAST_ALL:
case IP_MULTICAST_LOOP:
case IP_RECVORIGDSTADDR:
if (optlen >= sizeof(int)) {
if (get_user(val, (int __user *) optval))
return -EFAULT;

View file

@ -338,12 +338,17 @@ static int vti_rcv(struct sk_buff *skb)
if (tunnel != NULL) {
struct pcpu_tstats *tstats;
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
return -1;
tstats = this_cpu_ptr(tunnel->dev->tstats);
u64_stats_update_begin(&tstats->syncp);
tstats->rx_packets++;
tstats->rx_bytes += skb->len;
u64_stats_update_end(&tstats->syncp);
skb->mark = 0;
secpath_reset(skb);
skb->dev = tunnel->dev;
return 1;
}

View file

@ -1212,7 +1212,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
wait_for_sndbuf:
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
wait_for_memory:
if (copied && likely(!tp->repair))
if (copied)
tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
@ -1223,7 +1223,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
}
out:
if (copied && likely(!tp->repair))
if (copied)
tcp_push(sk, flags, mss_now, tp->nonagle);
release_sock(sk);
return copied + copied_syn;

View file

@ -5313,11 +5313,6 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
goto discard;
}
/* ts_recent update must be made after we are sure that the packet
* is in window.
*/
tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
/* step 3: check security and precedence [ignored] */
/* step 4: Check for a SYN
@ -5552,6 +5547,11 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
goto discard;
/* ts_recent update must be made after we are sure that the packet
* is in window.
*/
tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
tcp_rcv_rtt_measure_ts(sk, skb);
/* Process urgent data. */
@ -6130,6 +6130,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
} else
goto discard;
/* ts_recent update must be made after we are sure that the packet
* is in window.
*/
tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
/* step 6: check the URG bit */
tcp_urg(sk, skb, th);

View file

@ -1,7 +1,6 @@
#include <linux/rcupdate.h>
#include <linux/spinlock.h>
#include <linux/jiffies.h>
#include <linux/bootmem.h>
#include <linux/module.h>
#include <linux/cache.h>
#include <linux/slab.h>
@ -9,6 +8,7 @@
#include <linux/tcp.h>
#include <linux/hash.h>
#include <linux/tcp_metrics.h>
#include <linux/vmalloc.h>
#include <net/inet_connection_sock.h>
#include <net/net_namespace.h>
@ -1034,7 +1034,10 @@ static int __net_init tcp_net_metrics_init(struct net *net)
net->ipv4.tcp_metrics_hash_log = order_base_2(slots);
size = sizeof(struct tcpm_hash_bucket) << net->ipv4.tcp_metrics_hash_log;
net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL);
net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
if (!net->ipv4.tcp_metrics_hash)
net->ipv4.tcp_metrics_hash = vzalloc(size);
if (!net->ipv4.tcp_metrics_hash)
return -ENOMEM;
@ -1055,7 +1058,10 @@ static void __net_exit tcp_net_metrics_exit(struct net *net)
tm = next;
}
}
kfree(net->ipv4.tcp_metrics_hash);
if (is_vmalloc_addr(net->ipv4.tcp_metrics_hash))
vfree(net->ipv4.tcp_metrics_hash);
else
kfree(net->ipv4.tcp_metrics_hash);
}
static __net_initdata struct pernet_operations tcp_net_metrics_ops = {

View file

@ -1986,6 +1986,9 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
BUG_ON(!tso_segs);
if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE)
goto repair; /* Skip network transmission */
cwnd_quota = tcp_cwnd_test(tp, skb);
if (!cwnd_quota)
break;
@ -2026,6 +2029,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
break;
repair:
/* Advance the send_head. This one is sent out.
* This call will increment packets_out.
*/

View file

@ -827,6 +827,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
if (val < 0 || val > 255)
goto e_inval;
np->min_hopcount = val;
retv = 0;
break;
case IPV6_DONTFRAG:
np->dontfrag = valbool;

View file

@ -2594,6 +2594,9 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
else
local->probe_req_reg--;
if (!local->open_count)
break;
ieee80211_queue_work(&local->hw, &local->reconfig_filter);
break;
default:

View file

@ -1314,6 +1314,8 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
struct net_device *dev);
netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
struct net_device *dev);
void ieee80211_purge_tx_queue(struct ieee80211_hw *hw,
struct sk_buff_head *skbs);
/* HT */
void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,

View file

@ -871,8 +871,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
local->hw.wiphy->cipher_suites,
sizeof(u32) * local->hw.wiphy->n_cipher_suites,
GFP_KERNEL);
if (!suites)
return -ENOMEM;
if (!suites) {
result = -ENOMEM;
goto fail_wiphy_register;
}
for (r = 0; r < local->hw.wiphy->n_cipher_suites; r++) {
u32 suite = local->hw.wiphy->cipher_suites[r];
if (suite == WLAN_CIPHER_SUITE_WEP40 ||

View file

@ -917,7 +917,7 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
struct cfg80211_sched_scan_request *req)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_sched_scan_ies sched_scan_ies;
struct ieee80211_sched_scan_ies sched_scan_ies = {};
int ret, i;
mutex_lock(&local->mtx);

View file

@ -117,8 +117,8 @@ static void free_sta_work(struct work_struct *wk)
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]);
__skb_queue_purge(&sta->ps_tx_buf[ac]);
__skb_queue_purge(&sta->tx_filtered[ac]);
ieee80211_purge_tx_queue(&local->hw, &sta->ps_tx_buf[ac]);
ieee80211_purge_tx_queue(&local->hw, &sta->tx_filtered[ac]);
}
#ifdef CONFIG_MAC80211_MESH
@ -141,7 +141,7 @@ static void free_sta_work(struct work_struct *wk)
tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]);
if (!tid_tx)
continue;
__skb_queue_purge(&tid_tx->pending);
ieee80211_purge_tx_queue(&local->hw, &tid_tx->pending);
kfree(tid_tx);
}
@ -961,6 +961,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
struct ieee80211_local *local = sdata->local;
struct sk_buff_head pending;
int filtered = 0, buffered = 0, ac;
unsigned long flags;
clear_sta_flag(sta, WLAN_STA_SP);
@ -976,12 +977,16 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
int count = skb_queue_len(&pending), tmp;
spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags);
skb_queue_splice_tail_init(&sta->tx_filtered[ac], &pending);
spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags);
tmp = skb_queue_len(&pending);
filtered += tmp - count;
count = tmp;
spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags);
skb_queue_splice_tail_init(&sta->ps_tx_buf[ac], &pending);
spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags);
tmp = skb_queue_len(&pending);
buffered += tmp - count;
}

View file

@ -668,3 +668,12 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb)
dev_kfree_skb_any(skb);
}
EXPORT_SYMBOL(ieee80211_free_txskb);
void ieee80211_purge_tx_queue(struct ieee80211_hw *hw,
struct sk_buff_head *skbs)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue(skbs)))
ieee80211_free_txskb(hw, skb);
}

View file

@ -1358,7 +1358,7 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
if (tx->skb)
ieee80211_free_txskb(&tx->local->hw, tx->skb);
else
__skb_queue_purge(&tx->skbs);
ieee80211_purge_tx_queue(&tx->local->hw, &tx->skbs);
return -1;
} else if (unlikely(res == TX_QUEUED)) {
I802_DEBUG_INC(tx->local->tx_handlers_queued);
@ -2120,10 +2120,13 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
*/
void ieee80211_clear_tx_pending(struct ieee80211_local *local)
{
struct sk_buff *skb;
int i;
for (i = 0; i < local->hw.queues; i++)
skb_queue_purge(&local->pending[i]);
for (i = 0; i < local->hw.queues; i++) {
while ((skb = skb_dequeue(&local->pending[i])) != NULL)
ieee80211_free_txskb(&local->hw, skb);
}
}
/*

View file

@ -1491,6 +1491,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
list_for_each_entry(sdata, &local->interfaces, list) {
if (sdata->vif.type != NL80211_IFTYPE_STATION)
continue;
if (!sdata->u.mgd.associated)
continue;
ieee80211_send_nullfunc(local, sdata, 0);
}

View file

@ -102,7 +102,7 @@ static const struct file_operations sctp_snmp_seq_fops = {
.open = sctp_snmp_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.release = single_release_net,
};
/* Set up the proc fs entry for 'snmp' object. */
@ -251,7 +251,7 @@ static const struct file_operations sctp_eps_seq_fops = {
.open = sctp_eps_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
.release = seq_release_net,
};
/* Set up the proc fs entry for 'eps' object. */
@ -372,7 +372,7 @@ static const struct file_operations sctp_assocs_seq_fops = {
.open = sctp_assocs_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
.release = seq_release_net,
};
/* Set up the proc fs entry for 'assocs' object. */
@ -517,7 +517,7 @@ static const struct file_operations sctp_remaddr_seq_fops = {
.open = sctp_remaddr_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
.release = seq_release_net,
};
int __net_init sctp_remaddr_proc_init(struct net *net)

View file

@ -141,9 +141,8 @@ static const struct ieee80211_regdomain world_regdom = {
.reg_rules = {
/* IEEE 802.11b/g, channels 1..11 */
REG_RULE(2412-10, 2462+10, 40, 6, 20, 0),
/* IEEE 802.11b/g, channels 12..13. No HT40
* channel fits here. */
REG_RULE(2467-10, 2472+10, 20, 6, 20,
/* IEEE 802.11b/g, channels 12..13. */
REG_RULE(2467-10, 2472+10, 40, 6, 20,
NL80211_RRF_PASSIVE_SCAN |
NL80211_RRF_NO_IBSS),
/* IEEE 802.11 channel 14 - Only JP enables