Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Infinite loop in _decode_session6(), from Eric Dumazet.

 2) Pass correct argument to nla_strlcpy() in netfilter, also from Eric
    Dumazet.

 3) Out of bounds memory access in ipv6 srh code, from Mathieu Xhonneux.

 4) NULL deref in XDP_REDIRECT handling of tun driver, from Toshiaki
    Makita.

 5) Incorrect idr release in cls_flower, from Paul Blakey.

 6) Probe error handling fix in davinci_emac, from Dan Carpenter.

 7) Memory leak in XPS configuration, from Alexander Duyck.

 8) Use after free with cloned sockets in kcm, from Kirill Tkhai.

 9) MTU handling fixes fo ip_tunnel and ip6_tunnel, from Nicolas
    Dichtel.

10) Fix UAPI hole in bpf data structure for 32-bit compat applications,
    from Daniel Borkmann.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (33 commits)
  bpf: fix uapi hole for 32 bit compat applications
  net: usb: cdc_mbim: add flag FLAG_SEND_ZLP
  ip6_tunnel: remove magic mtu value 0xFFF8
  ip_tunnel: restore binding to ifaces with a large mtu
  net: dsa: b53: Add BCM5389 support
  kcm: Fix use-after-free caused by clonned sockets
  net-sysfs: Fix memory leak in XPS configuration
  ixgbe: fix parsing of TC actions for HW offload
  net: ethernet: davinci_emac: fix error handling in probe()
  net/ncsi: Fix array size in dumpit handler
  cls_flower: Fix incorrect idr release when failing to modify rule
  net/sonic: Use dma_mapping_error()
  xfrm Fix potential error pointer dereference in xfrm_bundle_create.
  vhost_net: flush batched heads before trying to busy polling
  tun: Fix NULL pointer dereference in XDP redirect
  be2net: Fix error detection logic for BE3
  net: qmi_wwan: Add Netgear Aircard 779S
  mlxsw: spectrum: Forbid creation of VLAN 1 over port/LAG
  atm: zatm: fix memcmp casting
  iwlwifi: pcie: compare with number of IRQs requested for, not number of CPUs
  ...
This commit is contained in:
Linus Torvalds 2018-06-02 17:35:53 -07:00
commit 918fe1b315
38 changed files with 192 additions and 115 deletions

View File

@ -10,6 +10,7 @@ Required properties:
"brcm,bcm53128" "brcm,bcm53128"
"brcm,bcm5365" "brcm,bcm5365"
"brcm,bcm5395" "brcm,bcm5395"
"brcm,bcm5389"
"brcm,bcm5397" "brcm,bcm5397"
"brcm,bcm5398" "brcm,bcm5398"

View File

@ -1151,8 +1151,8 @@ static void eprom_get_byte(struct zatm_dev *zatm_dev, unsigned char *byte,
} }
static unsigned char eprom_try_esi(struct atm_dev *dev, unsigned short cmd, static int eprom_try_esi(struct atm_dev *dev, unsigned short cmd, int offset,
int offset, int swap) int swap)
{ {
unsigned char buf[ZEPROM_SIZE]; unsigned char buf[ZEPROM_SIZE];
struct zatm_dev *zatm_dev; struct zatm_dev *zatm_dev;

View File

@ -1711,6 +1711,18 @@ static const struct b53_chip_data b53_switch_chips[] = {
.cpu_port = B53_CPU_PORT_25, .cpu_port = B53_CPU_PORT_25,
.duplex_reg = B53_DUPLEX_STAT_FE, .duplex_reg = B53_DUPLEX_STAT_FE,
}, },
{
.chip_id = BCM5389_DEVICE_ID,
.dev_name = "BCM5389",
.vlans = 4096,
.enabled_ports = 0x1f,
.arl_entries = 4,
.cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
},
{ {
.chip_id = BCM5395_DEVICE_ID, .chip_id = BCM5395_DEVICE_ID,
.dev_name = "BCM5395", .dev_name = "BCM5395",
@ -2034,6 +2046,7 @@ int b53_switch_detect(struct b53_device *dev)
else else
dev->chip_id = BCM5365_DEVICE_ID; dev->chip_id = BCM5365_DEVICE_ID;
break; break;
case BCM5389_DEVICE_ID:
case BCM5395_DEVICE_ID: case BCM5395_DEVICE_ID:
case BCM5397_DEVICE_ID: case BCM5397_DEVICE_ID:
case BCM5398_DEVICE_ID: case BCM5398_DEVICE_ID:

View File

@ -285,6 +285,7 @@ static const struct b53_io_ops b53_mdio_ops = {
#define B53_BRCM_OUI_1 0x0143bc00 #define B53_BRCM_OUI_1 0x0143bc00
#define B53_BRCM_OUI_2 0x03625c00 #define B53_BRCM_OUI_2 0x03625c00
#define B53_BRCM_OUI_3 0x00406000 #define B53_BRCM_OUI_3 0x00406000
#define B53_BRCM_OUI_4 0x01410c00
static int b53_mdio_probe(struct mdio_device *mdiodev) static int b53_mdio_probe(struct mdio_device *mdiodev)
{ {
@ -311,7 +312,8 @@ static int b53_mdio_probe(struct mdio_device *mdiodev)
*/ */
if ((phy_id & 0xfffffc00) != B53_BRCM_OUI_1 && if ((phy_id & 0xfffffc00) != B53_BRCM_OUI_1 &&
(phy_id & 0xfffffc00) != B53_BRCM_OUI_2 && (phy_id & 0xfffffc00) != B53_BRCM_OUI_2 &&
(phy_id & 0xfffffc00) != B53_BRCM_OUI_3) { (phy_id & 0xfffffc00) != B53_BRCM_OUI_3 &&
(phy_id & 0xfffffc00) != B53_BRCM_OUI_4) {
dev_err(&mdiodev->dev, "Unsupported device: 0x%08x\n", phy_id); dev_err(&mdiodev->dev, "Unsupported device: 0x%08x\n", phy_id);
return -ENODEV; return -ENODEV;
} }
@ -360,6 +362,7 @@ static const struct of_device_id b53_of_match[] = {
{ .compatible = "brcm,bcm53125" }, { .compatible = "brcm,bcm53125" },
{ .compatible = "brcm,bcm53128" }, { .compatible = "brcm,bcm53128" },
{ .compatible = "brcm,bcm5365" }, { .compatible = "brcm,bcm5365" },
{ .compatible = "brcm,bcm5389" },
{ .compatible = "brcm,bcm5395" }, { .compatible = "brcm,bcm5395" },
{ .compatible = "brcm,bcm5397" }, { .compatible = "brcm,bcm5397" },
{ .compatible = "brcm,bcm5398" }, { .compatible = "brcm,bcm5398" },

View File

@ -48,6 +48,7 @@ struct b53_io_ops {
enum { enum {
BCM5325_DEVICE_ID = 0x25, BCM5325_DEVICE_ID = 0x25,
BCM5365_DEVICE_ID = 0x65, BCM5365_DEVICE_ID = 0x65,
BCM5389_DEVICE_ID = 0x89,
BCM5395_DEVICE_ID = 0x95, BCM5395_DEVICE_ID = 0x95,
BCM5397_DEVICE_ID = 0x97, BCM5397_DEVICE_ID = 0x97,
BCM5398_DEVICE_ID = 0x98, BCM5398_DEVICE_ID = 0x98,

View File

@ -3309,7 +3309,9 @@ void be_detect_error(struct be_adapter *adapter)
if ((val & POST_STAGE_FAT_LOG_START) if ((val & POST_STAGE_FAT_LOG_START)
!= POST_STAGE_FAT_LOG_START && != POST_STAGE_FAT_LOG_START &&
(val & POST_STAGE_ARMFW_UE) (val & POST_STAGE_ARMFW_UE)
!= POST_STAGE_ARMFW_UE) != POST_STAGE_ARMFW_UE &&
(val & POST_STAGE_RECOVERABLE_ERR)
!= POST_STAGE_RECOVERABLE_ERR)
return; return;
} }

View File

@ -9054,7 +9054,6 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter,
{ {
const struct tc_action *a; const struct tc_action *a;
LIST_HEAD(actions); LIST_HEAD(actions);
int err;
if (!tcf_exts_has_actions(exts)) if (!tcf_exts_has_actions(exts))
return -EINVAL; return -EINVAL;
@ -9075,11 +9074,11 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter,
if (!dev) if (!dev)
return -EINVAL; return -EINVAL;
err = handle_redirect_action(adapter, dev->ifindex, queue, return handle_redirect_action(adapter, dev->ifindex,
action); queue, action);
if (err == 0)
return err;
} }
return -EINVAL;
} }
return -EINVAL; return -EINVAL;

View File

@ -4433,6 +4433,11 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
return -EINVAL; return -EINVAL;
} }
if (is_vlan_dev(upper_dev) &&
vlan_dev_vlan_id(upper_dev) == 1) {
NL_SET_ERR_MSG_MOD(extack, "Creating a VLAN device with VID 1 is unsupported: VLAN 1 carries untagged traffic");
return -EINVAL;
}
break; break;
case NETDEV_CHANGEUPPER: case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev; upper_dev = info->upper_dev;

View File

@ -84,7 +84,7 @@ static int sonic_open(struct net_device *dev)
for (i = 0; i < SONIC_NUM_RRS; i++) { for (i = 0; i < SONIC_NUM_RRS; i++) {
dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE), dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE),
SONIC_RBSIZE, DMA_FROM_DEVICE); SONIC_RBSIZE, DMA_FROM_DEVICE);
if (!laddr) { if (dma_mapping_error(lp->device, laddr)) {
while(i > 0) { /* free any that were mapped successfully */ while(i > 0) { /* free any that were mapped successfully */
i--; i--;
dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE); dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);

View File

@ -1674,8 +1674,8 @@ static int netsec_probe(struct platform_device *pdev)
if (ret) if (ret)
goto unreg_napi; goto unreg_napi;
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)))
dev_warn(&pdev->dev, "Failed to enable 64-bit DMA\n"); dev_warn(&pdev->dev, "Failed to set DMA mask\n");
ret = register_netdev(ndev); ret = register_netdev(ndev);
if (ret) { if (ret) {

View File

@ -1873,7 +1873,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
if (IS_ERR(priv->txchan)) { if (IS_ERR(priv->txchan)) {
dev_err(&pdev->dev, "error initializing tx dma channel\n"); dev_err(&pdev->dev, "error initializing tx dma channel\n");
rc = PTR_ERR(priv->txchan); rc = PTR_ERR(priv->txchan);
goto no_cpdma_chan; goto err_free_dma;
} }
priv->rxchan = cpdma_chan_create(priv->dma, EMAC_DEF_RX_CH, priv->rxchan = cpdma_chan_create(priv->dma, EMAC_DEF_RX_CH,
@ -1881,14 +1881,14 @@ static int davinci_emac_probe(struct platform_device *pdev)
if (IS_ERR(priv->rxchan)) { if (IS_ERR(priv->rxchan)) {
dev_err(&pdev->dev, "error initializing rx dma channel\n"); dev_err(&pdev->dev, "error initializing rx dma channel\n");
rc = PTR_ERR(priv->rxchan); rc = PTR_ERR(priv->rxchan);
goto no_cpdma_chan; goto err_free_txchan;
} }
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) { if (!res) {
dev_err(&pdev->dev, "error getting irq res\n"); dev_err(&pdev->dev, "error getting irq res\n");
rc = -ENOENT; rc = -ENOENT;
goto no_cpdma_chan; goto err_free_rxchan;
} }
ndev->irq = res->start; ndev->irq = res->start;
@ -1914,7 +1914,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
pm_runtime_put_noidle(&pdev->dev); pm_runtime_put_noidle(&pdev->dev);
dev_err(&pdev->dev, "%s: failed to get_sync(%d)\n", dev_err(&pdev->dev, "%s: failed to get_sync(%d)\n",
__func__, rc); __func__, rc);
goto no_cpdma_chan; goto err_napi_del;
} }
/* register the network device */ /* register the network device */
@ -1924,7 +1924,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "error in register_netdev\n"); dev_err(&pdev->dev, "error in register_netdev\n");
rc = -ENODEV; rc = -ENODEV;
pm_runtime_put(&pdev->dev); pm_runtime_put(&pdev->dev);
goto no_cpdma_chan; goto err_napi_del;
} }
@ -1937,11 +1937,13 @@ static int davinci_emac_probe(struct platform_device *pdev)
return 0; return 0;
no_cpdma_chan: err_napi_del:
if (priv->txchan) netif_napi_del(&priv->napi);
cpdma_chan_destroy(priv->txchan); err_free_rxchan:
if (priv->rxchan) cpdma_chan_destroy(priv->rxchan);
cpdma_chan_destroy(priv->rxchan); err_free_txchan:
cpdma_chan_destroy(priv->txchan);
err_free_dma:
cpdma_ctlr_destroy(priv->dma); cpdma_ctlr_destroy(priv->dma);
no_pdata: no_pdata:
if (of_phy_is_fixed_link(np)) if (of_phy_is_fixed_link(np))

View File

@ -1650,7 +1650,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
else else
*skb_xdp = 0; *skb_xdp = 0;
preempt_disable(); local_bh_disable();
rcu_read_lock(); rcu_read_lock();
xdp_prog = rcu_dereference(tun->xdp_prog); xdp_prog = rcu_dereference(tun->xdp_prog);
if (xdp_prog && !*skb_xdp) { if (xdp_prog && !*skb_xdp) {
@ -1675,7 +1675,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
if (err) if (err)
goto err_redirect; goto err_redirect;
rcu_read_unlock(); rcu_read_unlock();
preempt_enable(); local_bh_enable();
return NULL; return NULL;
case XDP_TX: case XDP_TX:
get_page(alloc_frag->page); get_page(alloc_frag->page);
@ -1684,7 +1684,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
goto err_redirect; goto err_redirect;
tun_xdp_flush(tun->dev); tun_xdp_flush(tun->dev);
rcu_read_unlock(); rcu_read_unlock();
preempt_enable(); local_bh_enable();
return NULL; return NULL;
case XDP_PASS: case XDP_PASS:
delta = orig_data - xdp.data; delta = orig_data - xdp.data;
@ -1703,7 +1703,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
skb = build_skb(buf, buflen); skb = build_skb(buf, buflen);
if (!skb) { if (!skb) {
rcu_read_unlock(); rcu_read_unlock();
preempt_enable(); local_bh_enable();
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
@ -1713,7 +1713,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
alloc_frag->offset += buflen; alloc_frag->offset += buflen;
rcu_read_unlock(); rcu_read_unlock();
preempt_enable(); local_bh_enable();
return skb; return skb;
@ -1721,7 +1721,7 @@ err_redirect:
put_page(alloc_frag->page); put_page(alloc_frag->page);
err_xdp: err_xdp:
rcu_read_unlock(); rcu_read_unlock();
preempt_enable(); local_bh_enable();
this_cpu_inc(tun->pcpu_stats->rx_dropped); this_cpu_inc(tun->pcpu_stats->rx_dropped);
return NULL; return NULL;
} }
@ -1917,16 +1917,19 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
int ret; int ret;
local_bh_disable();
rcu_read_lock(); rcu_read_lock();
xdp_prog = rcu_dereference(tun->xdp_prog); xdp_prog = rcu_dereference(tun->xdp_prog);
if (xdp_prog) { if (xdp_prog) {
ret = do_xdp_generic(xdp_prog, skb); ret = do_xdp_generic(xdp_prog, skb);
if (ret != XDP_PASS) { if (ret != XDP_PASS) {
rcu_read_unlock(); rcu_read_unlock();
local_bh_enable();
return total_len; return total_len;
} }
} }
rcu_read_unlock(); rcu_read_unlock();
local_bh_enable();
} }
rcu_read_lock(); rcu_read_lock();

View File

@ -609,7 +609,7 @@ static const struct driver_info cdc_mbim_info_ndp_to_end = {
*/ */
static const struct driver_info cdc_mbim_info_avoid_altsetting_toggle = { static const struct driver_info cdc_mbim_info_avoid_altsetting_toggle = {
.description = "CDC MBIM", .description = "CDC MBIM",
.flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN, .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN | FLAG_SEND_ZLP,
.bind = cdc_mbim_bind, .bind = cdc_mbim_bind,
.unbind = cdc_mbim_unbind, .unbind = cdc_mbim_unbind,
.manage_power = cdc_mbim_manage_power, .manage_power = cdc_mbim_manage_power,

View File

@ -1103,6 +1103,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
{QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */ {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */
{QMI_FIXED_INTF(0x0846, 0x68a2, 8)}, {QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
{QMI_FIXED_INTF(0x0846, 0x68d3, 8)}, /* Netgear Aircard 779S */
{QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
{QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */ {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */

View File

@ -1590,14 +1590,13 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
struct iwl_trans *trans) struct iwl_trans *trans)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int max_irqs, num_irqs, i, ret, nr_online_cpus; int max_irqs, num_irqs, i, ret;
u16 pci_cmd; u16 pci_cmd;
if (!trans->cfg->mq_rx_supported) if (!trans->cfg->mq_rx_supported)
goto enable_msi; goto enable_msi;
nr_online_cpus = num_online_cpus(); max_irqs = min_t(u32, num_online_cpus() + 2, IWL_MAX_RX_HW_QUEUES);
max_irqs = min_t(u32, nr_online_cpus + 2, IWL_MAX_RX_HW_QUEUES);
for (i = 0; i < max_irqs; i++) for (i = 0; i < max_irqs; i++)
trans_pcie->msix_entries[i].entry = i; trans_pcie->msix_entries[i].entry = i;
@ -1623,16 +1622,17 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
* Two interrupts less: non rx causes shared with FBQ and RSS. * Two interrupts less: non rx causes shared with FBQ and RSS.
* More than two interrupts: we will use fewer RSS queues. * More than two interrupts: we will use fewer RSS queues.
*/ */
if (num_irqs <= nr_online_cpus) { if (num_irqs <= max_irqs - 2) {
trans_pcie->trans->num_rx_queues = num_irqs + 1; trans_pcie->trans->num_rx_queues = num_irqs + 1;
trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX | trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |
IWL_SHARED_IRQ_FIRST_RSS; IWL_SHARED_IRQ_FIRST_RSS;
} else if (num_irqs == nr_online_cpus + 1) { } else if (num_irqs == max_irqs - 1) {
trans_pcie->trans->num_rx_queues = num_irqs; trans_pcie->trans->num_rx_queues = num_irqs;
trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX; trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;
} else { } else {
trans_pcie->trans->num_rx_queues = num_irqs - 1; trans_pcie->trans->num_rx_queues = num_irqs - 1;
} }
WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES);
trans_pcie->alloc_vecs = num_irqs; trans_pcie->alloc_vecs = num_irqs;
trans_pcie->msix_enabled = true; trans_pcie->msix_enabled = true;

View File

@ -372,16 +372,15 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
/* /*
* Determine IFS values * Determine IFS values
* - Use TXOP_BACKOFF for probe and management frames except beacons * - Use TXOP_BACKOFF for management frames except beacons
* - Use TXOP_SIFS for fragment bursts * - Use TXOP_SIFS for fragment bursts
* - Use TXOP_HTTXOP for everything else * - Use TXOP_HTTXOP for everything else
* *
* Note: rt2800 devices won't use CTS protection (if used) * Note: rt2800 devices won't use CTS protection (if used)
* for frames not transmitted with TXOP_HTTXOP * for frames not transmitted with TXOP_HTTXOP
*/ */
if ((ieee80211_is_mgmt(hdr->frame_control) && if (ieee80211_is_mgmt(hdr->frame_control) &&
!ieee80211_is_beacon(hdr->frame_control)) || !ieee80211_is_beacon(hdr->frame_control))
(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
txdesc->u.ht.txop = TXOP_BACKOFF; txdesc->u.ht.txop = TXOP_BACKOFF;
else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
txdesc->u.ht.txop = TXOP_SIFS; txdesc->u.ht.txop = TXOP_SIFS;

View File

@ -105,7 +105,9 @@ struct vhost_net_virtqueue {
/* vhost zerocopy support fields below: */ /* vhost zerocopy support fields below: */
/* last used idx for outstanding DMA zerocopy buffers */ /* last used idx for outstanding DMA zerocopy buffers */
int upend_idx; int upend_idx;
/* first used idx for DMA done zerocopy buffers */ /* For TX, first used idx for DMA done zerocopy buffers
* For RX, number of batched heads
*/
int done_idx; int done_idx;
/* an array of userspace buffers info */ /* an array of userspace buffers info */
struct ubuf_info *ubuf_info; struct ubuf_info *ubuf_info;
@ -626,6 +628,18 @@ static int sk_has_rx_data(struct sock *sk)
return skb_queue_empty(&sk->sk_receive_queue); return skb_queue_empty(&sk->sk_receive_queue);
} }
static void vhost_rx_signal_used(struct vhost_net_virtqueue *nvq)
{
struct vhost_virtqueue *vq = &nvq->vq;
struct vhost_dev *dev = vq->dev;
if (!nvq->done_idx)
return;
vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx);
nvq->done_idx = 0;
}
static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk) static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
{ {
struct vhost_net_virtqueue *rvq = &net->vqs[VHOST_NET_VQ_RX]; struct vhost_net_virtqueue *rvq = &net->vqs[VHOST_NET_VQ_RX];
@ -635,6 +649,8 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
int len = peek_head_len(rvq, sk); int len = peek_head_len(rvq, sk);
if (!len && vq->busyloop_timeout) { if (!len && vq->busyloop_timeout) {
/* Flush batched heads first */
vhost_rx_signal_used(rvq);
/* Both tx vq and rx socket were polled here */ /* Both tx vq and rx socket were polled here */
mutex_lock_nested(&vq->mutex, 1); mutex_lock_nested(&vq->mutex, 1);
vhost_disable_notify(&net->dev, vq); vhost_disable_notify(&net->dev, vq);
@ -762,7 +778,7 @@ static void handle_rx(struct vhost_net *net)
}; };
size_t total_len = 0; size_t total_len = 0;
int err, mergeable; int err, mergeable;
s16 headcount, nheads = 0; s16 headcount;
size_t vhost_hlen, sock_hlen; size_t vhost_hlen, sock_hlen;
size_t vhost_len, sock_len; size_t vhost_len, sock_len;
struct socket *sock; struct socket *sock;
@ -790,8 +806,8 @@ static void handle_rx(struct vhost_net *net)
while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) { while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) {
sock_len += sock_hlen; sock_len += sock_hlen;
vhost_len = sock_len + vhost_hlen; vhost_len = sock_len + vhost_hlen;
headcount = get_rx_bufs(vq, vq->heads + nheads, vhost_len, headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx,
&in, vq_log, &log, vhost_len, &in, vq_log, &log,
likely(mergeable) ? UIO_MAXIOV : 1); likely(mergeable) ? UIO_MAXIOV : 1);
/* On error, stop handling until the next kick. */ /* On error, stop handling until the next kick. */
if (unlikely(headcount < 0)) if (unlikely(headcount < 0))
@ -862,12 +878,9 @@ static void handle_rx(struct vhost_net *net)
vhost_discard_vq_desc(vq, headcount); vhost_discard_vq_desc(vq, headcount);
goto out; goto out;
} }
nheads += headcount; nvq->done_idx += headcount;
if (nheads > VHOST_RX_BATCH) { if (nvq->done_idx > VHOST_RX_BATCH)
vhost_add_used_and_signal_n(&net->dev, vq, vq->heads, vhost_rx_signal_used(nvq);
nheads);
nheads = 0;
}
if (unlikely(vq_log)) if (unlikely(vq_log))
vhost_log_write(vq, vq_log, log, vhost_len); vhost_log_write(vq, vq_log, log, vhost_len);
total_len += vhost_len; total_len += vhost_len;
@ -878,9 +891,7 @@ static void handle_rx(struct vhost_net *net)
} }
vhost_net_enable_vq(net, vq); vhost_net_enable_vq(net, vq);
out: out:
if (nheads) vhost_rx_signal_used(nvq);
vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
nheads);
mutex_unlock(&vq->mutex); mutex_unlock(&vq->mutex);
} }

View File

@ -1017,6 +1017,7 @@ struct bpf_prog_info {
__aligned_u64 map_ids; __aligned_u64 map_ids;
char name[BPF_OBJ_NAME_LEN]; char name[BPF_OBJ_NAME_LEN];
__u32 ifindex; __u32 ifindex;
__u32 :32;
__u64 netns_dev; __u64 netns_dev;
__u64 netns_ino; __u64 netns_ino;
} __attribute__((aligned(8))); } __attribute__((aligned(8)));
@ -1030,6 +1031,7 @@ struct bpf_map_info {
__u32 map_flags; __u32 map_flags;
char name[BPF_OBJ_NAME_LEN]; char name[BPF_OBJ_NAME_LEN];
__u32 ifindex; __u32 ifindex;
__u32 :32;
__u64 netns_dev; __u64 netns_dev;
__u64 netns_ino; __u64 netns_ino;
} __attribute__((aligned(8))); } __attribute__((aligned(8)));

View File

@ -1954,7 +1954,8 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
int off, pad = 0; int off, pad = 0;
unsigned int size_kern, match_size = mwt->match_size; unsigned int size_kern, match_size = mwt->match_size;
strlcpy(name, mwt->u.name, sizeof(name)); if (strscpy(name, mwt->u.name, sizeof(name)) < 0)
return -EINVAL;
if (state->buf_kern_start) if (state->buf_kern_start)
dst = state->buf_kern_start + state->buf_kern_offset; dst = state->buf_kern_start + state->buf_kern_offset;

View File

@ -1214,9 +1214,6 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
cpumask_var_t mask; cpumask_var_t mask;
unsigned long index; unsigned long index;
if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
index = get_netdev_queue_index(queue); index = get_netdev_queue_index(queue);
if (dev->num_tc) { if (dev->num_tc) {
@ -1226,6 +1223,9 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
return -EINVAL; return -EINVAL;
} }
if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
rcu_read_lock(); rcu_read_lock();
dev_maps = rcu_dereference(dev->xps_maps); dev_maps = rcu_dereference(dev->xps_maps);
if (dev_maps) { if (dev_maps) {

View File

@ -328,7 +328,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
if (tdev) { if (tdev) {
hlen = tdev->hard_header_len + tdev->needed_headroom; hlen = tdev->hard_header_len + tdev->needed_headroom;
mtu = tdev->mtu; mtu = min(tdev->mtu, IP_MAX_MTU);
} }
dev->needed_headroom = t_hlen + hlen; dev->needed_headroom = t_hlen + hlen;
@ -362,7 +362,7 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net,
nt = netdev_priv(dev); nt = netdev_priv(dev);
t_hlen = nt->hlen + sizeof(struct iphdr); t_hlen = nt->hlen + sizeof(struct iphdr);
dev->min_mtu = ETH_MIN_MTU; dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen; dev->max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen;
ip_tunnel_add(itn, nt); ip_tunnel_add(itn, nt);
return nt; return nt;
@ -930,7 +930,7 @@ int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict)
{ {
struct ip_tunnel *tunnel = netdev_priv(dev); struct ip_tunnel *tunnel = netdev_priv(dev);
int t_hlen = tunnel->hlen + sizeof(struct iphdr); int t_hlen = tunnel->hlen + sizeof(struct iphdr);
int max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen; int max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen;
if (new_mtu < ETH_MIN_MTU) if (new_mtu < ETH_MIN_MTU)
return -EINVAL; return -EINVAL;
@ -1107,7 +1107,7 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
mtu = ip_tunnel_bind_dev(dev); mtu = ip_tunnel_bind_dev(dev);
if (tb[IFLA_MTU]) { if (tb[IFLA_MTU]) {
unsigned int max = 0xfff8 - dev->hard_header_len - nt->hlen; unsigned int max = IP_MAX_MTU - dev->hard_header_len - nt->hlen;
mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU,
(unsigned int)(max - sizeof(struct iphdr))); (unsigned int)(max - sizeof(struct iphdr)));

View File

@ -1692,8 +1692,13 @@ int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
if (new_mtu < ETH_MIN_MTU) if (new_mtu < ETH_MIN_MTU)
return -EINVAL; return -EINVAL;
} }
if (new_mtu > 0xFFF8 - dev->hard_header_len) if (tnl->parms.proto == IPPROTO_IPV6 || tnl->parms.proto == 0) {
return -EINVAL; if (new_mtu > IP6_MAX_MTU - dev->hard_header_len)
return -EINVAL;
} else {
if (new_mtu > IP_MAX_MTU - dev->hard_header_len)
return -EINVAL;
}
dev->mtu = new_mtu; dev->mtu = new_mtu;
return 0; return 0;
} }
@ -1841,7 +1846,7 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
dev->mtu -= 8; dev->mtu -= 8;
dev->min_mtu = ETH_MIN_MTU; dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = 0xFFF8 - dev->hard_header_len; dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len;
return 0; return 0;

View File

@ -103,7 +103,7 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
hdrlen = (osrh->hdrlen + 1) << 3; hdrlen = (osrh->hdrlen + 1) << 3;
tot_len = hdrlen + sizeof(*hdr); tot_len = hdrlen + sizeof(*hdr);
err = skb_cow_head(skb, tot_len); err = skb_cow_head(skb, tot_len + skb->mac_len);
if (unlikely(err)) if (unlikely(err))
return err; return err;
@ -161,7 +161,7 @@ int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
hdrlen = (osrh->hdrlen + 1) << 3; hdrlen = (osrh->hdrlen + 1) << 3;
err = skb_cow_head(skb, hdrlen); err = skb_cow_head(skb, hdrlen + skb->mac_len);
if (unlikely(err)) if (unlikely(err))
return err; return err;

View File

@ -1371,7 +1371,7 @@ static void ipip6_tunnel_setup(struct net_device *dev)
dev->hard_header_len = LL_MAX_HEADER + t_hlen; dev->hard_header_len = LL_MAX_HEADER + t_hlen;
dev->mtu = ETH_DATA_LEN - t_hlen; dev->mtu = ETH_DATA_LEN - t_hlen;
dev->min_mtu = IPV6_MIN_MTU; dev->min_mtu = IPV6_MIN_MTU;
dev->max_mtu = 0xFFF8 - t_hlen; dev->max_mtu = IP6_MAX_MTU - t_hlen;
dev->flags = IFF_NOARP; dev->flags = IFF_NOARP;
netif_keep_dst(dev); netif_keep_dst(dev);
dev->addr_len = 4; dev->addr_len = 4;
@ -1583,7 +1583,8 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev,
if (tb[IFLA_MTU]) { if (tb[IFLA_MTU]) {
u32 mtu = nla_get_u32(tb[IFLA_MTU]); u32 mtu = nla_get_u32(tb[IFLA_MTU]);
if (mtu >= IPV6_MIN_MTU && mtu <= 0xFFF8 - dev->hard_header_len) if (mtu >= IPV6_MIN_MTU &&
mtu <= IP6_MAX_MTU - dev->hard_header_len)
dev->mtu = mtu; dev->mtu = mtu;
} }

View File

@ -126,7 +126,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
struct flowi6 *fl6 = &fl->u.ip6; struct flowi6 *fl6 = &fl->u.ip6;
int onlyproto = 0; int onlyproto = 0;
const struct ipv6hdr *hdr = ipv6_hdr(skb); const struct ipv6hdr *hdr = ipv6_hdr(skb);
u16 offset = sizeof(*hdr); u32 offset = sizeof(*hdr);
struct ipv6_opt_hdr *exthdr; struct ipv6_opt_hdr *exthdr;
const unsigned char *nh = skb_network_header(skb); const unsigned char *nh = skb_network_header(skb);
u16 nhoff = IP6CB(skb)->nhoff; u16 nhoff = IP6CB(skb)->nhoff;

View File

@ -1671,7 +1671,7 @@ static struct file *kcm_clone(struct socket *osock)
__module_get(newsock->ops->owner); __module_get(newsock->ops->owner);
newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL, newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
&kcm_proto, true); &kcm_proto, false);
if (!newsk) { if (!newsk) {
sock_release(newsock); sock_release(newsock);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);

View File

@ -215,7 +215,7 @@ err:
static int ncsi_pkg_info_all_nl(struct sk_buff *skb, static int ncsi_pkg_info_all_nl(struct sk_buff *skb,
struct netlink_callback *cb) struct netlink_callback *cb)
{ {
struct nlattr *attrs[NCSI_ATTR_MAX]; struct nlattr *attrs[NCSI_ATTR_MAX + 1];
struct ncsi_package *np, *package; struct ncsi_package *np, *package;
struct ncsi_dev_priv *ndp; struct ncsi_dev_priv *ndp;
unsigned int package_id; unsigned int package_id;

View File

@ -2381,8 +2381,10 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
struct ipvs_sync_daemon_cfg cfg; struct ipvs_sync_daemon_cfg cfg;
memset(&cfg, 0, sizeof(cfg)); memset(&cfg, 0, sizeof(cfg));
strlcpy(cfg.mcast_ifn, dm->mcast_ifn, ret = -EINVAL;
sizeof(cfg.mcast_ifn)); if (strscpy(cfg.mcast_ifn, dm->mcast_ifn,
sizeof(cfg.mcast_ifn)) <= 0)
goto out_dec;
cfg.syncid = dm->syncid; cfg.syncid = dm->syncid;
ret = start_sync_thread(ipvs, &cfg, dm->state); ret = start_sync_thread(ipvs, &cfg, dm->state);
} else { } else {
@ -2420,12 +2422,19 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
} }
} }
if ((cmd == IP_VS_SO_SET_ADD || cmd == IP_VS_SO_SET_EDIT) &&
strnlen(usvc.sched_name, IP_VS_SCHEDNAME_MAXLEN) ==
IP_VS_SCHEDNAME_MAXLEN) {
ret = -EINVAL;
goto out_unlock;
}
/* Check for valid protocol: TCP or UDP or SCTP, even for fwmark!=0 */ /* Check for valid protocol: TCP or UDP or SCTP, even for fwmark!=0 */
if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP && if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP &&
usvc.protocol != IPPROTO_SCTP) { usvc.protocol != IPPROTO_SCTP) {
pr_err("set_ctl: invalid protocol: %d %pI4:%d %s\n", pr_err("set_ctl: invalid protocol: %d %pI4:%d\n",
usvc.protocol, &usvc.addr.ip, usvc.protocol, &usvc.addr.ip,
ntohs(usvc.port), usvc.sched_name); ntohs(usvc.port));
ret = -EFAULT; ret = -EFAULT;
goto out_unlock; goto out_unlock;
} }
@ -2847,7 +2856,7 @@ static const struct nla_policy ip_vs_cmd_policy[IPVS_CMD_ATTR_MAX + 1] = {
static const struct nla_policy ip_vs_daemon_policy[IPVS_DAEMON_ATTR_MAX + 1] = { static const struct nla_policy ip_vs_daemon_policy[IPVS_DAEMON_ATTR_MAX + 1] = {
[IPVS_DAEMON_ATTR_STATE] = { .type = NLA_U32 }, [IPVS_DAEMON_ATTR_STATE] = { .type = NLA_U32 },
[IPVS_DAEMON_ATTR_MCAST_IFN] = { .type = NLA_NUL_STRING, [IPVS_DAEMON_ATTR_MCAST_IFN] = { .type = NLA_NUL_STRING,
.len = IP_VS_IFNAME_MAXLEN }, .len = IP_VS_IFNAME_MAXLEN - 1 },
[IPVS_DAEMON_ATTR_SYNC_ID] = { .type = NLA_U32 }, [IPVS_DAEMON_ATTR_SYNC_ID] = { .type = NLA_U32 },
[IPVS_DAEMON_ATTR_SYNC_MAXLEN] = { .type = NLA_U16 }, [IPVS_DAEMON_ATTR_SYNC_MAXLEN] = { .type = NLA_U16 },
[IPVS_DAEMON_ATTR_MCAST_GROUP] = { .type = NLA_U32 }, [IPVS_DAEMON_ATTR_MCAST_GROUP] = { .type = NLA_U32 },
@ -2865,7 +2874,7 @@ static const struct nla_policy ip_vs_svc_policy[IPVS_SVC_ATTR_MAX + 1] = {
[IPVS_SVC_ATTR_PORT] = { .type = NLA_U16 }, [IPVS_SVC_ATTR_PORT] = { .type = NLA_U16 },
[IPVS_SVC_ATTR_FWMARK] = { .type = NLA_U32 }, [IPVS_SVC_ATTR_FWMARK] = { .type = NLA_U32 },
[IPVS_SVC_ATTR_SCHED_NAME] = { .type = NLA_NUL_STRING, [IPVS_SVC_ATTR_SCHED_NAME] = { .type = NLA_NUL_STRING,
.len = IP_VS_SCHEDNAME_MAXLEN }, .len = IP_VS_SCHEDNAME_MAXLEN - 1 },
[IPVS_SVC_ATTR_PE_NAME] = { .type = NLA_NUL_STRING, [IPVS_SVC_ATTR_PE_NAME] = { .type = NLA_NUL_STRING,
.len = IP_VS_PENAME_MAXLEN }, .len = IP_VS_PENAME_MAXLEN },
[IPVS_SVC_ATTR_FLAGS] = { .type = NLA_BINARY, [IPVS_SVC_ATTR_FLAGS] = { .type = NLA_BINARY,

View File

@ -1298,8 +1298,10 @@ static void nft_chain_stats_replace(struct nft_base_chain *chain,
rcu_assign_pointer(chain->stats, newstats); rcu_assign_pointer(chain->stats, newstats);
synchronize_rcu(); synchronize_rcu();
free_percpu(oldstats); free_percpu(oldstats);
} else } else {
rcu_assign_pointer(chain->stats, newstats); rcu_assign_pointer(chain->stats, newstats);
static_branch_inc(&nft_counters_enabled);
}
} }
static void nf_tables_chain_destroy(struct nft_ctx *ctx) static void nf_tables_chain_destroy(struct nft_ctx *ctx)
@ -4706,7 +4708,7 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
if (idx > s_idx) if (idx > s_idx)
memset(&cb->args[1], 0, memset(&cb->args[1], 0,
sizeof(cb->args) - sizeof(cb->args[0])); sizeof(cb->args) - sizeof(cb->args[0]));
if (filter && filter->table[0] && if (filter && filter->table &&
strcmp(filter->table, table->name)) strcmp(filter->table, table->name))
goto cont; goto cont;
if (filter && if (filter &&
@ -5380,7 +5382,7 @@ static int nf_tables_dump_flowtable(struct sk_buff *skb,
if (idx > s_idx) if (idx > s_idx)
memset(&cb->args[1], 0, memset(&cb->args[1], 0,
sizeof(cb->args) - sizeof(cb->args[0])); sizeof(cb->args) - sizeof(cb->args[0]));
if (filter && filter->table[0] && if (filter && filter->table &&
strcmp(filter->table, table->name)) strcmp(filter->table, table->name))
goto cont; goto cont;

View File

@ -126,15 +126,15 @@ static noinline void nft_update_chain_stats(const struct nft_chain *chain,
if (!base_chain->stats) if (!base_chain->stats)
return; return;
local_bh_disable();
stats = this_cpu_ptr(rcu_dereference(base_chain->stats)); stats = this_cpu_ptr(rcu_dereference(base_chain->stats));
if (stats) { if (stats) {
local_bh_disable();
u64_stats_update_begin(&stats->syncp); u64_stats_update_begin(&stats->syncp);
stats->pkts++; stats->pkts++;
stats->bytes += pkt->skb->len; stats->bytes += pkt->skb->len;
u64_stats_update_end(&stats->syncp); u64_stats_update_end(&stats->syncp);
local_bh_enable();
} }
local_bh_enable();
} }
struct nft_jumpstack { struct nft_jumpstack {

View File

@ -115,7 +115,7 @@ static int nfnl_acct_new(struct net *net, struct sock *nfnl,
nfacct->flags = flags; nfacct->flags = flags;
} }
nla_strlcpy(nfacct->name, nla_data(tb[NFACCT_NAME]), NFACCT_NAME_MAX); nla_strlcpy(nfacct->name, tb[NFACCT_NAME], NFACCT_NAME_MAX);
if (tb[NFACCT_BYTES]) { if (tb[NFACCT_BYTES]) {
atomic64_set(&nfacct->bytes, atomic64_set(&nfacct->bytes,

View File

@ -150,7 +150,7 @@ nfnl_cthelper_expect_policy(struct nf_conntrack_expect_policy *expect_policy,
return -EINVAL; return -EINVAL;
nla_strlcpy(expect_policy->name, nla_strlcpy(expect_policy->name,
nla_data(tb[NFCTH_POLICY_NAME]), NF_CT_HELPER_NAME_LEN); tb[NFCTH_POLICY_NAME], NF_CT_HELPER_NAME_LEN);
expect_policy->max_expected = expect_policy->max_expected =
ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX])); ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX]));
if (expect_policy->max_expected > NF_CT_EXPECT_MAX_CNT) if (expect_policy->max_expected > NF_CT_EXPECT_MAX_CNT)
@ -235,7 +235,7 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
goto err1; goto err1;
nla_strlcpy(helper->name, nla_strlcpy(helper->name,
nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN); tb[NFCTH_NAME], NF_CT_HELPER_NAME_LEN);
size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN])); size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
if (size > FIELD_SIZEOF(struct nf_conn_help, data)) { if (size > FIELD_SIZEOF(struct nf_conn_help, data)) {
ret = -ENOMEM; ret = -ENOMEM;

View File

@ -880,22 +880,26 @@ static int nft_ct_helper_obj_dump(struct sk_buff *skb,
struct nft_object *obj, bool reset) struct nft_object *obj, bool reset)
{ {
const struct nft_ct_helper_obj *priv = nft_obj_data(obj); const struct nft_ct_helper_obj *priv = nft_obj_data(obj);
const struct nf_conntrack_helper *helper = priv->helper4; const struct nf_conntrack_helper *helper;
u16 family; u16 family;
if (priv->helper4 && priv->helper6) {
family = NFPROTO_INET;
helper = priv->helper4;
} else if (priv->helper6) {
family = NFPROTO_IPV6;
helper = priv->helper6;
} else {
family = NFPROTO_IPV4;
helper = priv->helper4;
}
if (nla_put_string(skb, NFTA_CT_HELPER_NAME, helper->name)) if (nla_put_string(skb, NFTA_CT_HELPER_NAME, helper->name))
return -1; return -1;
if (nla_put_u8(skb, NFTA_CT_HELPER_L4PROTO, priv->l4proto)) if (nla_put_u8(skb, NFTA_CT_HELPER_L4PROTO, priv->l4proto))
return -1; return -1;
if (priv->helper4 && priv->helper6)
family = NFPROTO_INET;
else if (priv->helper6)
family = NFPROTO_IPV6;
else
family = NFPROTO_IPV4;
if (nla_put_be16(skb, NFTA_CT_HELPER_L3PROTO, htons(family))) if (nla_put_be16(skb, NFTA_CT_HELPER_L3PROTO, htons(family)))
return -1; return -1;

View File

@ -51,10 +51,13 @@ static inline bool nft_limit_eval(struct nft_limit *limit, u64 cost)
return !limit->invert; return !limit->invert;
} }
/* Use same default as in iptables. */
#define NFT_LIMIT_PKT_BURST_DEFAULT 5
static int nft_limit_init(struct nft_limit *limit, static int nft_limit_init(struct nft_limit *limit,
const struct nlattr * const tb[]) const struct nlattr * const tb[], bool pkts)
{ {
u64 unit; u64 unit, tokens;
if (tb[NFTA_LIMIT_RATE] == NULL || if (tb[NFTA_LIMIT_RATE] == NULL ||
tb[NFTA_LIMIT_UNIT] == NULL) tb[NFTA_LIMIT_UNIT] == NULL)
@ -68,18 +71,25 @@ static int nft_limit_init(struct nft_limit *limit,
if (tb[NFTA_LIMIT_BURST]) if (tb[NFTA_LIMIT_BURST])
limit->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST])); limit->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST]));
else
limit->burst = 0; if (pkts && limit->burst == 0)
limit->burst = NFT_LIMIT_PKT_BURST_DEFAULT;
if (limit->rate + limit->burst < limit->rate) if (limit->rate + limit->burst < limit->rate)
return -EOVERFLOW; return -EOVERFLOW;
/* The token bucket size limits the number of tokens can be if (pkts) {
* accumulated. tokens_max specifies the bucket size. tokens = div_u64(limit->nsecs, limit->rate) * limit->burst;
* tokens_max = unit * (rate + burst) / rate. } else {
*/ /* The token bucket size limits the number of tokens can be
limit->tokens = div_u64(limit->nsecs * (limit->rate + limit->burst), * accumulated. tokens_max specifies the bucket size.
limit->rate); * tokens_max = unit * (rate + burst) / rate.
*/
tokens = div_u64(limit->nsecs * (limit->rate + limit->burst),
limit->rate);
}
limit->tokens = tokens;
limit->tokens_max = limit->tokens; limit->tokens_max = limit->tokens;
if (tb[NFTA_LIMIT_FLAGS]) { if (tb[NFTA_LIMIT_FLAGS]) {
@ -144,7 +154,7 @@ static int nft_limit_pkts_init(const struct nft_ctx *ctx,
struct nft_limit_pkts *priv = nft_expr_priv(expr); struct nft_limit_pkts *priv = nft_expr_priv(expr);
int err; int err;
err = nft_limit_init(&priv->limit, tb); err = nft_limit_init(&priv->limit, tb, true);
if (err < 0) if (err < 0)
return err; return err;
@ -185,7 +195,7 @@ static int nft_limit_bytes_init(const struct nft_ctx *ctx,
{ {
struct nft_limit *priv = nft_expr_priv(expr); struct nft_limit *priv = nft_expr_priv(expr);
return nft_limit_init(priv, tb); return nft_limit_init(priv, tb, false);
} }
static int nft_limit_bytes_dump(struct sk_buff *skb, static int nft_limit_bytes_dump(struct sk_buff *skb,
@ -246,7 +256,7 @@ static int nft_limit_obj_pkts_init(const struct nft_ctx *ctx,
struct nft_limit_pkts *priv = nft_obj_data(obj); struct nft_limit_pkts *priv = nft_obj_data(obj);
int err; int err;
err = nft_limit_init(&priv->limit, tb); err = nft_limit_init(&priv->limit, tb, true);
if (err < 0) if (err < 0)
return err; return err;
@ -289,7 +299,7 @@ static int nft_limit_obj_bytes_init(const struct nft_ctx *ctx,
{ {
struct nft_limit *priv = nft_obj_data(obj); struct nft_limit *priv = nft_obj_data(obj);
return nft_limit_init(priv, tb); return nft_limit_init(priv, tb, false);
} }
static int nft_limit_obj_bytes_dump(struct sk_buff *skb, static int nft_limit_obj_bytes_dump(struct sk_buff *skb,

View File

@ -234,7 +234,7 @@ void nft_meta_set_eval(const struct nft_expr *expr,
struct sk_buff *skb = pkt->skb; struct sk_buff *skb = pkt->skb;
u32 *sreg = &regs->data[meta->sreg]; u32 *sreg = &regs->data[meta->sreg];
u32 value = *sreg; u32 value = *sreg;
u8 pkt_type; u8 value8;
switch (meta->key) { switch (meta->key) {
case NFT_META_MARK: case NFT_META_MARK:
@ -244,15 +244,17 @@ void nft_meta_set_eval(const struct nft_expr *expr,
skb->priority = value; skb->priority = value;
break; break;
case NFT_META_PKTTYPE: case NFT_META_PKTTYPE:
pkt_type = nft_reg_load8(sreg); value8 = nft_reg_load8(sreg);
if (skb->pkt_type != pkt_type && if (skb->pkt_type != value8 &&
skb_pkt_type_ok(pkt_type) && skb_pkt_type_ok(value8) &&
skb_pkt_type_ok(skb->pkt_type)) skb_pkt_type_ok(skb->pkt_type))
skb->pkt_type = pkt_type; skb->pkt_type = value8;
break; break;
case NFT_META_NFTRACE: case NFT_META_NFTRACE:
skb->nf_trace = !!value; value8 = nft_reg_load8(sreg);
skb->nf_trace = !!value8;
break; break;
default: default:
WARN_ON(1); WARN_ON(1);

View File

@ -977,7 +977,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
return 0; return 0;
errout_idr: errout_idr:
if (fnew->handle) if (!fold)
idr_remove(&head->handle_idr, fnew->handle); idr_remove(&head->handle_idr, fnew->handle);
errout: errout:
tcf_exts_destroy(&fnew->exts); tcf_exts_destroy(&fnew->exts);

View File

@ -1658,7 +1658,6 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len; trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
} }
out:
return &xdst0->u.dst; return &xdst0->u.dst;
put_states: put_states:
@ -1667,8 +1666,8 @@ put_states:
free_dst: free_dst:
if (xdst0) if (xdst0)
dst_release_immediate(&xdst0->u.dst); dst_release_immediate(&xdst0->u.dst);
xdst0 = ERR_PTR(err);
goto out; return ERR_PTR(err);
} }
static int xfrm_expand_policies(const struct flowi *fl, u16 family, static int xfrm_expand_policies(const struct flowi *fl, u16 family,

View File

@ -1017,6 +1017,7 @@ struct bpf_prog_info {
__aligned_u64 map_ids; __aligned_u64 map_ids;
char name[BPF_OBJ_NAME_LEN]; char name[BPF_OBJ_NAME_LEN];
__u32 ifindex; __u32 ifindex;
__u32 :32;
__u64 netns_dev; __u64 netns_dev;
__u64 netns_ino; __u64 netns_ino;
} __attribute__((aligned(8))); } __attribute__((aligned(8)));
@ -1030,6 +1031,7 @@ struct bpf_map_info {
__u32 map_flags; __u32 map_flags;
char name[BPF_OBJ_NAME_LEN]; char name[BPF_OBJ_NAME_LEN];
__u32 ifindex; __u32 ifindex;
__u32 :32;
__u64 netns_dev; __u64 netns_dev;
__u64 netns_ino; __u64 netns_ino;
} __attribute__((aligned(8))); } __attribute__((aligned(8)));