Networking fixes for 5.17-rc4, including fixes from netfilter and can.

Current release - new code bugs:
 
  - sparx5: fix get_stat64 out-of-bound access and crash
 
  - smc: fix netdev ref tracker misuse
 
 Previous releases - regressions:
 
  - eth: ixgbevf: require large buffers for build_skb on 82599VF,
    avoid overflows
 
  - eth: ocelot: fix all IP traffic getting trapped to CPU with PTP
    over IP
 
  - bonding: fix rare link activation misses in 802.3ad mode
 
 Previous releases - always broken:
 
  - tcp: fix tcp sock mem accounting in zero-copy corner cases
 
  - remove the cached dst when uncloning an skb dst and its metadata,
    since we only have one ref it'd lead to an UaF
 
  - netfilter:
    - conntrack: don't refresh sctp entries in closed state
    - conntrack: re-init state for retransmitted syn-ack, avoid
      connection establishment getting stuck with strange stacks
    - ctnetlink: disable helper autoassign, avoid it getting lost
    - nft_payload: don't allow transport header access for fragments
 
  - dsa: fix use of devres for mdio throughout drivers
 
  - eth: amd-xgbe: disable interrupts during pci removal
 
  - eth: dpaa2-eth: unregister netdev before disconnecting the PHY
 
  - eth: ice: fix IPIP and SIT TSO offload
 
 Signed-off-by: Jakub Kicinski <kuba@kernel.org>
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEE6jPA+I1ugmIBA4hXMUZtbf5SIrsFAmIFcW0ACgkQMUZtbf5S
 IrsloQ/+LrzlXgYrWf60DEFT4+AfJ20YxeakB+SpT0cZdvylU6NHF/U4rSHdUJRn
 xZiHfhfKzNWV5miJ2wzOuvufUvoz173dtrdgJJmp6G43qfAFyqiqtxrbVuuTQk0G
 C2i5M66zJ2svSj3EO5dYKV8zeydd14eVeoaJqfN5rjMARTmvpT/ssdzw0LTf0aXp
 87CF/WyeH8NyfQxQwPmbGRxDpnV2RqDJYSNdA4kOtDrnQmoKet32rhE6liHeP2jD
 OFQo70QXEMVyIZEh4wT17lMqA4M1zIEQtgrB0NsmVNU3jQnDI4UQ0aA2gHzwK31S
 glORZWGqTGGrhVy9uQBGKUK29RW8vb0D2ojZpi3zd0htWpIqpfFf6wuMAdUwEZag
 mTlZ1Yi6YgzAQEdSALoXLKps1GiHQzQNYPK6fNxoSOxnmoQTMQL9KxU/HB9d8W+L
 hjuYOGDw9vtGyUpNF7lktCQR/sWjFCmevLk97d2mhofFqfHBJYzFHGyd0GgNVzgl
 o3CMWnyiqTVapLRMTFPnEADEXGWq4DYAjRPkfjDCHeITB9Neg9S85Geth+xwfdi1
 cwSu64xFb7k7hiAbnymB3xg5sBQjXHrIQo03GHVymQE5p6XxIhn+5UD9CYHGJ2TF
 7PGyHeJJOZyWGQ/iSZJbo+BpsVGthIgk+9tTKPeTToej0tic27w=
 =jlhJ
 -----END PGP SIGNATURE-----

Merge tag 'net-5.17-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from Jakub Kicinski:
 "Including fixes from netfilter and can.

Current release - new code bugs:

   - sparx5: fix get_stat64 out-of-bound access and crash

   - smc: fix netdev ref tracker misuse

  Previous releases - regressions:

   - eth: ixgbevf: require large buffers for build_skb on 82599VF, avoid
     overflows

   - eth: ocelot: fix all IP traffic getting trapped to CPU with PTP
     over IP

   - bonding: fix rare link activation misses in 802.3ad mode

  Previous releases - always broken:

   - tcp: fix tcp sock mem accounting in zero-copy corner cases

   - remove the cached dst when uncloning an skb dst and its metadata,
     since we only have one ref it'd lead to an UaF

   - netfilter:
      - conntrack: don't refresh sctp entries in closed state
      - conntrack: re-init state for retransmitted syn-ack, avoid
        connection establishment getting stuck with strange stacks
      - ctnetlink: disable helper autoassign, avoid it getting lost
      - nft_payload: don't allow transport header access for fragments

   - dsa: fix use of devres for mdio throughout drivers

   - eth: amd-xgbe: disable interrupts during pci removal

   - eth: dpaa2-eth: unregister netdev before disconnecting the PHY

   - eth: ice: fix IPIP and SIT TSO offload"

* tag 'net-5.17-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (53 commits)
  net: dsa: mv88e6xxx: fix use-after-free in mv88e6xxx_mdios_unregister
  net: mscc: ocelot: fix mutex lock error during ethtool stats read
  ice: Avoid RTNL lock when re-creating auxiliary device
  ice: Fix KASAN error in LAG NETDEV_UNREGISTER handler
  ice: fix IPIP and SIT TSO offload
  ice: fix an error code in ice_cfg_phy_fec()
  net: mpls: Fix GCC 12 warning
  dpaa2-eth: unregister the netdev before disconnecting from the PHY
  skbuff: cleanup double word in comment
  net: macb: Align the dma and coherent dma masks
  mptcp: netlink: process IPv6 addrs in creating listening sockets
  selftests: mptcp: add missing join check
  net: usb: qmi_wwan: Add support for Dell DW5829e
  vlan: move dev_put into vlan_dev_uninit
  vlan: introduce vlan_dev_free_egress_priority
  ax25: fix UAF bugs of net_device caused by rebinding operation
  net: dsa: fix panic when DSA master device unbinds on shutdown
  net: amd-xgbe: disable interrupts during pci removal
  tipc: rate limit warning for received illegal binding update
  net: mdio: aspeed: Add missing MODULE_DEVICE_TABLE
  ...
This commit is contained in:
Linus Torvalds 2022-02-10 16:01:22 -08:00
commit f1baf68e13
50 changed files with 320 additions and 157 deletions

View File

@ -13303,8 +13303,8 @@ W: http://www.iptables.org/
W: http://www.nftables.org/
Q: http://patchwork.ozlabs.org/project/netfilter-devel/list/
C: irc://irc.libera.chat/netfilter
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next.git
F: include/linux/netfilter*
F: include/linux/netfilter/
F: include/net/netfilter/

View File

@ -1021,8 +1021,8 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
if (port->aggregator &&
port->aggregator->is_active &&
!__port_is_enabled(port)) {
__enable_port(port);
*update_slave_arr = true;
}
}
break;
@ -1779,6 +1779,7 @@ static void ad_agg_selection_logic(struct aggregator *agg,
port = port->next_port_in_aggregator) {
__enable_port(port);
}
*update_slave_arr = true;
}
}

View File

@ -621,7 +621,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
get_device(&priv->master_mii_bus->dev);
priv->master_mii_dn = dn;
priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
priv->slave_mii_bus = mdiobus_alloc();
if (!priv->slave_mii_bus) {
of_node_put(dn);
return -ENOMEM;
@ -681,8 +681,10 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
}
err = mdiobus_register(priv->slave_mii_bus);
if (err && dn)
if (err && dn) {
mdiobus_free(priv->slave_mii_bus);
of_node_put(dn);
}
return err;
}
@ -690,6 +692,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
{
mdiobus_unregister(priv->slave_mii_bus);
mdiobus_free(priv->slave_mii_bus);
of_node_put(priv->master_mii_dn);
}

View File

@ -498,8 +498,9 @@ static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg)
static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np)
{
struct dsa_switch *ds = priv->ds;
int err;
ds->slave_mii_bus = devm_mdiobus_alloc(priv->dev);
ds->slave_mii_bus = mdiobus_alloc();
if (!ds->slave_mii_bus)
return -ENOMEM;
@ -512,7 +513,11 @@ static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np)
ds->slave_mii_bus->parent = priv->dev;
ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
return of_mdiobus_register(ds->slave_mii_bus, mdio_np);
err = of_mdiobus_register(ds->slave_mii_bus, mdio_np);
if (err)
mdiobus_free(ds->slave_mii_bus);
return err;
}
static int gswip_pce_table_entry_read(struct gswip_priv *priv,
@ -2145,8 +2150,10 @@ disable_switch:
gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
dsa_unregister_switch(priv->ds);
mdio_bus:
if (mdio_np)
if (mdio_np) {
mdiobus_unregister(priv->ds->slave_mii_bus);
mdiobus_free(priv->ds->slave_mii_bus);
}
put_mdio_node:
of_node_put(mdio_np);
for (i = 0; i < priv->num_gphy_fw; i++)
@ -2169,6 +2176,7 @@ static int gswip_remove(struct platform_device *pdev)
if (priv->ds->slave_mii_bus) {
mdiobus_unregister(priv->ds->slave_mii_bus);
mdiobus_free(priv->ds->slave_mii_bus);
of_node_put(priv->ds->slave_mii_bus->dev.of_node);
}

View File

@ -2074,7 +2074,7 @@ mt7530_setup_mdio(struct mt7530_priv *priv)
if (priv->irq)
mt7530_setup_mdio_irq(priv);
ret = mdiobus_register(bus);
ret = devm_mdiobus_register(dev, bus);
if (ret) {
dev_err(dev, "failed to register MDIO bus: %d\n", ret);
if (priv->irq)

View File

@ -3399,7 +3399,7 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
return err;
}
bus = devm_mdiobus_alloc_size(chip->dev, sizeof(*mdio_bus));
bus = mdiobus_alloc_size(sizeof(*mdio_bus));
if (!bus)
return -ENOMEM;
@ -3424,14 +3424,14 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
if (!external) {
err = mv88e6xxx_g2_irq_mdio_setup(chip, bus);
if (err)
return err;
goto out;
}
err = of_mdiobus_register(bus, np);
if (err) {
dev_err(chip->dev, "Cannot register MDIO bus (%d)\n", err);
mv88e6xxx_g2_irq_mdio_free(chip, bus);
return err;
goto out;
}
if (external)
@ -3440,21 +3440,26 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
list_add(&mdio_bus->list, &chip->mdios);
return 0;
out:
mdiobus_free(bus);
return err;
}
static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
{
struct mv88e6xxx_mdio_bus *mdio_bus;
struct mv88e6xxx_mdio_bus *mdio_bus, *p;
struct mii_bus *bus;
list_for_each_entry(mdio_bus, &chip->mdios, list) {
list_for_each_entry_safe(mdio_bus, p, &chip->mdios, list) {
bus = mdio_bus->bus;
if (!mdio_bus->external)
mv88e6xxx_g2_irq_mdio_free(chip, bus);
mdiobus_unregister(bus);
mdiobus_free(bus);
}
}

View File

@ -1061,7 +1061,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
return PTR_ERR(hw);
}
bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
bus = mdiobus_alloc_size(sizeof(*mdio_priv));
if (!bus)
return -ENOMEM;
@ -1081,6 +1081,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
rc = mdiobus_register(bus);
if (rc < 0) {
dev_err(dev, "failed to register MDIO bus\n");
mdiobus_free(bus);
return rc;
}
@ -1132,6 +1133,7 @@ static void vsc9959_mdio_bus_free(struct ocelot *ocelot)
lynx_pcs_destroy(phylink_pcs);
}
mdiobus_unregister(felix->imdio);
mdiobus_free(felix->imdio);
}
static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,

View File

@ -1029,7 +1029,7 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot)
}
/* Needed in order to initialize the bus mutex lock */
rc = of_mdiobus_register(bus, NULL);
rc = devm_of_mdiobus_register(dev, bus, NULL);
if (rc < 0) {
dev_err(dev, "failed to register MDIO bus\n");
return rc;
@ -1083,7 +1083,8 @@ static void vsc9953_mdio_bus_free(struct ocelot *ocelot)
mdio_device_free(mdio_device);
lynx_pcs_destroy(phylink_pcs);
}
mdiobus_unregister(felix->imdio);
/* mdiobus_unregister and mdiobus_free handled by devres */
}
static const struct felix_info seville_info_vsc9953 = {

View File

@ -378,7 +378,7 @@ static int ar9331_sw_mbus_init(struct ar9331_sw_priv *priv)
if (!mnp)
return -ENODEV;
ret = of_mdiobus_register(mbus, mnp);
ret = devm_of_mdiobus_register(dev, mbus, mnp);
of_node_put(mnp);
if (ret)
return ret;
@ -1091,7 +1091,6 @@ static void ar9331_sw_remove(struct mdio_device *mdiodev)
}
irq_domain_remove(priv->irqdomain);
mdiobus_unregister(priv->mbus);
dsa_unregister_switch(&priv->ds);
reset_control_assert(priv->sw_reset);

View File

@ -425,6 +425,9 @@ static void xgbe_pci_remove(struct pci_dev *pdev)
pci_free_irq_vectors(pdata->pcidev);
/* Disable all interrupts in the hardware */
XP_IOWRITE(pdata, XP_INT_EN, 0x0);
xgbe_free_pdata(pdata);
}

View File

@ -4712,7 +4712,7 @@ static int macb_probe(struct platform_device *pdev)
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
bp->hw_dma_cap |= HW_DMA_CAP_64B;
}
#endif

View File

@ -4523,12 +4523,12 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
#ifdef CONFIG_DEBUG_FS
dpaa2_dbg_remove(priv);
#endif
unregister_netdev(net_dev);
rtnl_lock();
dpaa2_eth_disconnect_mac(priv);
rtnl_unlock();
unregister_netdev(net_dev);
dpaa2_eth_dl_port_del(priv);
dpaa2_eth_dl_traps_unregister(priv);
dpaa2_eth_dl_free(priv);

View File

@ -609,6 +609,7 @@ static bool gve_rx(struct gve_rx_ring *rx, netdev_features_t feat,
*packet_size_bytes = skb->len + (skb->protocol ? ETH_HLEN : 0);
*work_done = work_cnt;
skb_record_rx_queue(skb, rx->q_num);
if (skb_is_nonlinear(skb))
napi_gro_frags(napi);
else

View File

@ -110,6 +110,7 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
struct ibmvnic_sub_crq_queue *tx_scrq);
static void free_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb);
static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter);
struct ibmvnic_stat {
char name[ETH_GSTRING_LEN];
@ -1424,7 +1425,7 @@ static int __ibmvnic_open(struct net_device *netdev)
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
if (rc) {
ibmvnic_napi_disable(adapter);
release_resources(adapter);
ibmvnic_disable_irqs(adapter);
return rc;
}
@ -1474,9 +1475,6 @@ static int ibmvnic_open(struct net_device *netdev)
rc = init_resources(adapter);
if (rc) {
netdev_err(netdev, "failed to initialize resources\n");
release_resources(adapter);
release_rx_pools(adapter);
release_tx_pools(adapter);
goto out;
}
}
@ -1493,6 +1491,13 @@ out:
adapter->state = VNIC_OPEN;
rc = 0;
}
if (rc) {
release_resources(adapter);
release_rx_pools(adapter);
release_tx_pools(adapter);
}
return rc;
}

View File

@ -483,6 +483,7 @@ enum ice_pf_flags {
ICE_FLAG_VF_TRUE_PROMISC_ENA,
ICE_FLAG_MDD_AUTO_RESET_VF,
ICE_FLAG_LINK_LENIENT_MODE_ENA,
ICE_FLAG_PLUG_AUX_DEV,
ICE_PF_FLAGS_NBITS /* must be last */
};
@ -887,7 +888,7 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf)
if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) {
set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
set_bit(ICE_FLAG_AUX_ENA, pf->flags);
ice_plug_aux_dev(pf);
set_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags);
}
}

View File

@ -3342,7 +3342,8 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
!ice_fw_supports_report_dflt_cfg(hw)) {
struct ice_link_default_override_tlv tlv;
if (ice_get_link_default_override(&tlv, pi))
status = ice_get_link_default_override(&tlv, pi);
if (status)
goto out;
if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&

View File

@ -204,17 +204,39 @@ ice_lag_unlink(struct ice_lag *lag,
lag->upper_netdev = NULL;
}
if (lag->peer_netdev) {
dev_put(lag->peer_netdev);
lag->peer_netdev = NULL;
}
lag->peer_netdev = NULL;
ice_set_sriov_cap(pf);
ice_set_rdma_cap(pf);
lag->bonded = false;
lag->role = ICE_LAG_NONE;
}
/**
* ice_lag_unregister - handle netdev unregister events
* @lag: LAG info struct
* @netdev: netdev reporting the event
*/
static void ice_lag_unregister(struct ice_lag *lag, struct net_device *netdev)
{
struct ice_pf *pf = lag->pf;
/* check to see if this event is for this netdev
* check that we are in an aggregate
*/
if (netdev != lag->netdev || !lag->bonded)
return;
if (lag->upper_netdev) {
dev_put(lag->upper_netdev);
lag->upper_netdev = NULL;
ice_set_sriov_cap(pf);
ice_set_rdma_cap(pf);
}
/* perform some cleanup in case we come back */
lag->bonded = false;
lag->role = ICE_LAG_NONE;
}
/**
* ice_lag_changeupper_event - handle LAG changeupper event
* @lag: LAG info struct
@ -307,7 +329,7 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
ice_lag_info_event(lag, ptr);
break;
case NETDEV_UNREGISTER:
ice_lag_unlink(lag, ptr);
ice_lag_unregister(lag, netdev);
break;
default:
break;

View File

@ -568,6 +568,7 @@ struct ice_tx_ctx_desc {
(0x3FFFFULL << ICE_TXD_CTX_QW1_TSO_LEN_S)
#define ICE_TXD_CTX_QW1_MSS_S 50
#define ICE_TXD_CTX_MIN_MSS 64
#define ICE_TXD_CTX_QW1_VSI_S 50
#define ICE_TXD_CTX_QW1_VSI_M (0x3FFULL << ICE_TXD_CTX_QW1_VSI_S)

View File

@ -2253,6 +2253,9 @@ static void ice_service_task(struct work_struct *work)
return;
}
if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
ice_plug_aux_dev(pf);
ice_clean_adminq_subtask(pf);
ice_check_media_subtask(pf);
ice_check_for_hang_subtask(pf);
@ -8525,6 +8528,7 @@ ice_features_check(struct sk_buff *skb,
struct net_device __always_unused *netdev,
netdev_features_t features)
{
bool gso = skb_is_gso(skb);
size_t len;
/* No point in doing any of this if neither checksum nor GSO are
@ -8537,24 +8541,32 @@ ice_features_check(struct sk_buff *skb,
/* We cannot support GSO if the MSS is going to be less than
* 64 bytes. If it is then we need to drop support for GSO.
*/
if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS))
features &= ~NETIF_F_GSO_MASK;
len = skb_network_header(skb) - skb->data;
len = skb_network_offset(skb);
if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
goto out_rm_features;
len = skb_transport_header(skb) - skb_network_header(skb);
len = skb_network_header_len(skb);
if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
goto out_rm_features;
if (skb->encapsulation) {
len = skb_inner_network_header(skb) - skb_transport_header(skb);
if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
goto out_rm_features;
/* this must work for VXLAN frames AND IPIP/SIT frames, and in
* the case of IPIP frames, the transport header pointer is
* after the inner header! So check to make sure that this
* is a GRE or UDP_TUNNEL frame before doing that math.
*/
if (gso && (skb_shinfo(skb)->gso_type &
(SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) {
len = skb_inner_network_header(skb) -
skb_transport_header(skb);
if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
goto out_rm_features;
}
len = skb_inner_transport_header(skb) -
skb_inner_network_header(skb);
len = skb_inner_network_header_len(skb);
if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
goto out_rm_features;
}

View File

@ -1984,14 +1984,15 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter,
if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
return;
if (PAGE_SIZE < 8192)
if (max_frame > IXGBEVF_MAX_FRAME_BUILD_SKB)
set_ring_uses_large_buffer(rx_ring);
/* 82599 can't rely on RXDCTL.RLPML to restrict the size of the frame */
if (adapter->hw.mac.type == ixgbe_mac_82599_vf && !ring_uses_large_buffer(rx_ring))
return;
set_ring_build_skb_enabled(rx_ring);
if (PAGE_SIZE < 8192) {
if (max_frame <= IXGBEVF_MAX_FRAME_BUILD_SKB)
return;
set_ring_uses_large_buffer(rx_ring);
}
}
/**

View File

@ -17,7 +17,7 @@ if NET_VENDOR_LITEX
config LITEX_LITEETH
tristate "LiteX Ethernet support"
depends on OF
depends on OF && HAS_IOMEM
help
If you wish to compile a kernel for hardware with a LiteX LiteEth
device then you should answer Y to this.

View File

@ -1103,7 +1103,7 @@ void sparx5_get_stats64(struct net_device *ndev,
stats->tx_carrier_errors = portstats[spx5_stats_tx_csense_cnt];
stats->tx_window_errors = portstats[spx5_stats_tx_late_coll_cnt];
stats->rx_dropped = portstats[spx5_stats_ana_ac_port_stat_lsb_cnt];
for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx, ++stats)
for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx)
stats->rx_dropped += portstats[spx5_stats_green_p0_rx_port_drop
+ idx];
stats->tx_dropped = portstats[spx5_stats_tx_local_drop];

View File

@ -1432,6 +1432,8 @@ static void
ocelot_populate_ipv4_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
{
trap->key_type = OCELOT_VCAP_KEY_IPV4;
trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
trap->key.ipv4.proto.mask[0] = 0xff;
trap->key.ipv4.dport.value = PTP_EV_PORT;
trap->key.ipv4.dport.mask = 0xffff;
}
@ -1440,6 +1442,8 @@ static void
ocelot_populate_ipv6_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
{
trap->key_type = OCELOT_VCAP_KEY_IPV6;
trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
trap->key.ipv4.proto.mask[0] = 0xff;
trap->key.ipv6.dport.value = PTP_EV_PORT;
trap->key.ipv6.dport.mask = 0xffff;
}
@ -1448,6 +1452,8 @@ static void
ocelot_populate_ipv4_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
{
trap->key_type = OCELOT_VCAP_KEY_IPV4;
trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
trap->key.ipv4.proto.mask[0] = 0xff;
trap->key.ipv4.dport.value = PTP_GEN_PORT;
trap->key.ipv4.dport.mask = 0xffff;
}
@ -1456,6 +1462,8 @@ static void
ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
{
trap->key_type = OCELOT_VCAP_KEY_IPV6;
trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
trap->key.ipv4.proto.mask[0] = 0xff;
trap->key.ipv6.dport.value = PTP_GEN_PORT;
trap->key.ipv6.dport.mask = 0xffff;
}
@ -1737,12 +1745,11 @@ void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data)
}
EXPORT_SYMBOL(ocelot_get_strings);
/* Caller must hold &ocelot->stats_lock */
static void ocelot_update_stats(struct ocelot *ocelot)
{
int i, j;
mutex_lock(&ocelot->stats_lock);
for (i = 0; i < ocelot->num_phys_ports; i++) {
/* Configure the port to read the stats from */
ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(i), SYS_STAT_CFG);
@ -1761,8 +1768,6 @@ static void ocelot_update_stats(struct ocelot *ocelot)
~(u64)U32_MAX) + val;
}
}
mutex_unlock(&ocelot->stats_lock);
}
static void ocelot_check_stats_work(struct work_struct *work)
@ -1771,7 +1776,9 @@ static void ocelot_check_stats_work(struct work_struct *work)
struct ocelot *ocelot = container_of(del_work, struct ocelot,
stats_work);
mutex_lock(&ocelot->stats_lock);
ocelot_update_stats(ocelot);
mutex_unlock(&ocelot->stats_lock);
queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
OCELOT_STATS_CHECK_DELAY);
@ -1781,12 +1788,16 @@ void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data)
{
int i;
mutex_lock(&ocelot->stats_lock);
/* check and update now */
ocelot_update_stats(ocelot);
/* Copy all counters */
for (i = 0; i < ocelot->num_stats; i++)
*data++ = ocelot->stats[port * ocelot->num_stats + i];
mutex_unlock(&ocelot->stats_lock);
}
EXPORT_SYMBOL(ocelot_get_ethtool_stats);

View File

@ -1011,6 +1011,7 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
struct nfp_flower_repr_priv *repr_priv;
struct nfp_tun_offloaded_mac *entry;
struct nfp_repr *repr;
u16 nfp_mac_idx;
int ida_idx;
entry = nfp_tunnel_lookup_offloaded_macs(app, mac);
@ -1029,8 +1030,6 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
entry->bridge_count--;
if (!entry->bridge_count && entry->ref_count) {
u16 nfp_mac_idx;
nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT;
if (__nfp_tunnel_offload_mac(app, mac, nfp_mac_idx,
false)) {
@ -1046,7 +1045,6 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
/* If MAC is now used by 1 repr set the offloaded MAC index to port. */
if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) {
u16 nfp_mac_idx;
int port, err;
repr_priv = list_first_entry(&entry->repr_list,
@ -1074,8 +1072,14 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
WARN_ON_ONCE(rhashtable_remove_fast(&priv->tun.offloaded_macs,
&entry->ht_node,
offloaded_macs_params));
if (nfp_flower_is_supported_bridge(netdev))
nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT;
else
nfp_mac_idx = entry->index;
/* If MAC has global ID then extract and free the ida entry. */
if (nfp_tunnel_is_mac_idx_global(entry->index)) {
if (nfp_tunnel_is_mac_idx_global(nfp_mac_idx)) {
ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
}

View File

@ -148,6 +148,7 @@ static const struct of_device_id aspeed_mdio_of_match[] = {
{ .compatible = "aspeed,ast2600-mdio", },
{ },
};
MODULE_DEVICE_TABLE(of, aspeed_mdio_of_match);
static struct platform_driver aspeed_mdio_driver = {
.driver = {

View File

@ -553,9 +553,9 @@ static int m88e1121_config_aneg_rgmii_delays(struct phy_device *phydev)
else
mscr = 0;
return phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE,
MII_88E1121_PHY_MSCR_REG,
MII_88E1121_PHY_MSCR_DELAY_MASK, mscr);
return phy_modify_paged_changed(phydev, MII_MARVELL_MSCR_PAGE,
MII_88E1121_PHY_MSCR_REG,
MII_88E1121_PHY_MSCR_DELAY_MASK, mscr);
}
static int m88e1121_config_aneg(struct phy_device *phydev)
@ -569,11 +569,13 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
return err;
}
changed = err;
err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
if (err < 0)
return err;
changed = err;
changed |= err;
err = genphy_config_aneg(phydev);
if (err < 0)
@ -1213,16 +1215,15 @@ static int m88e1118_config_aneg(struct phy_device *phydev)
{
int err;
err = genphy_soft_reset(phydev);
if (err < 0)
return err;
err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
if (err < 0)
return err;
err = genphy_config_aneg(phydev);
return 0;
if (err < 0)
return err;
return genphy_soft_reset(phydev);
}
static int m88e1118_config_init(struct phy_device *phydev)

View File

@ -1400,6 +1400,8 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
{QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */
{QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/
{QMI_FIXED_INTF(0x413c, 0x81e4, 0)}, /* Dell Wireless 5829e with eSIM support*/
{QMI_FIXED_INTF(0x413c, 0x81e6, 0)}, /* Dell Wireless 5829e */
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
{QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
{QMI_QUIRK_SET_DTR(0x22de, 0x9051, 2)}, /* Hucom Wireless HM-211S/K */

View File

@ -265,9 +265,10 @@ static void __veth_xdp_flush(struct veth_rq *rq)
{
/* Write ptr_ring before reading rx_notify_masked */
smp_mb();
if (!rq->rx_notify_masked) {
rq->rx_notify_masked = true;
napi_schedule(&rq->xdp_napi);
if (!READ_ONCE(rq->rx_notify_masked) &&
napi_schedule_prep(&rq->xdp_napi)) {
WRITE_ONCE(rq->rx_notify_masked, true);
__napi_schedule(&rq->xdp_napi);
}
}
@ -912,8 +913,10 @@ static int veth_poll(struct napi_struct *napi, int budget)
/* Write rx_notify_masked before reading ptr_ring */
smp_store_mb(rq->rx_notify_masked, false);
if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) {
rq->rx_notify_masked = true;
napi_schedule(&rq->xdp_napi);
if (napi_schedule_prep(&rq->xdp_napi)) {
WRITE_ONCE(rq->rx_notify_masked, true);
__napi_schedule(&rq->xdp_napi);
}
}
}

View File

@ -123,8 +123,20 @@ static inline struct metadata_dst *tun_dst_unclone(struct sk_buff *skb)
memcpy(&new_md->u.tun_info, &md_dst->u.tun_info,
sizeof(struct ip_tunnel_info) + md_size);
#ifdef CONFIG_DST_CACHE
/* Unclone the dst cache if there is one */
if (new_md->u.tun_info.dst_cache.cache) {
int ret;
ret = dst_cache_init(&new_md->u.tun_info.dst_cache, GFP_ATOMIC);
if (ret) {
metadata_dst_free(new_md);
return ERR_PTR(ret);
}
}
#endif
skb_dst_drop(skb);
dst_hold(&new_md->dst);
skb_dst_set(skb, &new_md->dst);
return new_md;
}

View File

@ -106,7 +106,7 @@ enum ip_conntrack_status {
IPS_NAT_CLASH = IPS_UNTRACKED,
#endif
/* Conntrack got a helper explicitly attached via CT target. */
/* Conntrack got a helper explicitly attached (ruleset, ctnetlink). */
IPS_HELPER_BIT = 13,
IPS_HELPER = (1 << IPS_HELPER_BIT),

View File

@ -129,6 +129,7 @@ void vlan_dev_set_ingress_priority(const struct net_device *dev,
u32 skb_prio, u16 vlan_prio);
int vlan_dev_set_egress_priority(const struct net_device *dev,
u32 skb_prio, u16 vlan_prio);
void vlan_dev_free_egress_priority(const struct net_device *dev);
int vlan_dev_change_flags(const struct net_device *dev, u32 flag, u32 mask);
void vlan_dev_get_realdev_name(const struct net_device *dev, char *result,
size_t size);
@ -139,7 +140,6 @@ int vlan_check_real_dev(struct net_device *real_dev,
void vlan_setup(struct net_device *dev);
int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack);
void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
void vlan_dev_uninit(struct net_device *dev);
bool vlan_dev_inherit_address(struct net_device *dev,
struct net_device *real_dev);

View File

@ -622,7 +622,7 @@ static int vlan_dev_init(struct net_device *dev)
}
/* Note: this function might be called multiple times for the same device. */
void vlan_dev_uninit(struct net_device *dev)
void vlan_dev_free_egress_priority(const struct net_device *dev)
{
struct vlan_priority_tci_mapping *pm;
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
@ -636,6 +636,16 @@ void vlan_dev_uninit(struct net_device *dev)
}
}
static void vlan_dev_uninit(struct net_device *dev)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
vlan_dev_free_egress_priority(dev);
/* Get rid of the vlan's reference to real_dev */
dev_put_track(vlan->real_dev, &vlan->dev_tracker);
}
static netdev_features_t vlan_dev_fix_features(struct net_device *dev,
netdev_features_t features)
{
@ -846,9 +856,6 @@ static void vlan_dev_free(struct net_device *dev)
free_percpu(vlan->vlan_pcpu_stats);
vlan->vlan_pcpu_stats = NULL;
/* Get rid of the vlan's reference to real_dev */
dev_put_track(vlan->real_dev, &vlan->dev_tracker);
}
void vlan_setup(struct net_device *dev)

View File

@ -183,10 +183,11 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev,
return -EINVAL;
err = vlan_changelink(dev, tb, data, extack);
if (!err)
err = register_vlan_dev(dev, extack);
if (err)
vlan_dev_uninit(dev);
return err;
err = register_vlan_dev(dev, extack);
if (err)
vlan_dev_free_egress_priority(dev);
return err;
}

View File

@ -91,9 +91,10 @@ again:
spin_unlock_bh(&ax25_list_lock);
lock_sock(sk);
s->ax25_dev = NULL;
dev_put_track(ax25_dev->dev, &ax25_dev->dev_tracker);
ax25_dev_put(ax25_dev);
release_sock(sk);
ax25_disconnect(s, ENETUNREACH);
release_sock(sk);
spin_lock_bh(&ax25_list_lock);
sock_put(sk);
/* The entry could have been deleted from the
@ -1116,8 +1117,10 @@ static int ax25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
}
}
if (ax25_dev != NULL)
if (ax25_dev) {
ax25_fillin_cb(ax25, ax25_dev);
dev_hold_track(ax25_dev->dev, &ax25_dev->dev_tracker, GFP_ATOMIC);
}
done:
ax25_cb_add(ax25);

View File

@ -56,6 +56,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/hrtimer.h>
#include <linux/wait.h>
#include <linux/uio.h>
@ -145,6 +146,7 @@ struct isotp_sock {
struct tpcon rx, tx;
struct list_head notifier;
wait_queue_head_t wait;
spinlock_t rx_lock; /* protect single thread state machine */
};
static LIST_HEAD(isotp_notifier_list);
@ -615,11 +617,17 @@ static void isotp_rcv(struct sk_buff *skb, void *data)
n_pci_type = cf->data[ae] & 0xF0;
/* Make sure the state changes and data structures stay consistent at
* CAN frame reception time. This locking is not needed in real world
* use cases but the inconsistency can be triggered with syzkaller.
*/
spin_lock(&so->rx_lock);
if (so->opt.flags & CAN_ISOTP_HALF_DUPLEX) {
/* check rx/tx path half duplex expectations */
if ((so->tx.state != ISOTP_IDLE && n_pci_type != N_PCI_FC) ||
(so->rx.state != ISOTP_IDLE && n_pci_type == N_PCI_FC))
return;
goto out_unlock;
}
switch (n_pci_type) {
@ -668,6 +676,9 @@ static void isotp_rcv(struct sk_buff *skb, void *data)
isotp_rcv_cf(sk, cf, ae, skb);
break;
}
out_unlock:
spin_unlock(&so->rx_lock);
}
static void isotp_fill_dataframe(struct canfd_frame *cf, struct isotp_sock *so,
@ -876,7 +887,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
if (!size || size > MAX_MSG_LENGTH) {
err = -EINVAL;
goto err_out;
goto err_out_drop;
}
/* take care of a potential SF_DL ESC offset for TX_DL > 8 */
@ -886,24 +897,24 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
if ((so->opt.flags & CAN_ISOTP_SF_BROADCAST) &&
(size > so->tx.ll_dl - SF_PCI_SZ4 - ae - off)) {
err = -EINVAL;
goto err_out;
goto err_out_drop;
}
err = memcpy_from_msg(so->tx.buf, msg, size);
if (err < 0)
goto err_out;
goto err_out_drop;
dev = dev_get_by_index(sock_net(sk), so->ifindex);
if (!dev) {
err = -ENXIO;
goto err_out;
goto err_out_drop;
}
skb = sock_alloc_send_skb(sk, so->ll.mtu + sizeof(struct can_skb_priv),
msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb) {
dev_put(dev);
goto err_out;
goto err_out_drop;
}
can_skb_reserve(skb);
@ -965,7 +976,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
if (err) {
pr_notice_once("can-isotp: %s: can_send_ret %pe\n",
__func__, ERR_PTR(err));
goto err_out;
goto err_out_drop;
}
if (wait_tx_done) {
@ -978,6 +989,9 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
return size;
err_out_drop:
/* drop this PDU and unlock a potential wait queue */
old_state = ISOTP_IDLE;
err_out:
so->tx.state = old_state;
if (so->tx.state == ISOTP_IDLE)
@ -1444,6 +1458,7 @@ static int isotp_init(struct sock *sk)
so->txtimer.function = isotp_tx_timer_handler;
init_waitqueue_head(&so->wait);
spin_lock_init(&so->rx_lock);
spin_lock(&isotp_notifier_lock);
list_add_tail(&so->notifier, &isotp_notifier_list);

View File

@ -681,7 +681,7 @@ exit:
* while trying to recycle fragments on __skb_frag_unref() we need
* to make one SKB responsible for triggering the recycle path.
* So disable the recycling bit if an SKB is cloned and we have
* additional references to to the fragmented part of the SKB.
* additional references to the fragmented part of the SKB.
* Eventually the last SKB will have the recycling bit set and it's
* dataref set to 0, which will trigger the recycling
*/

View File

@ -1718,7 +1718,6 @@ EXPORT_SYMBOL_GPL(dsa_unregister_switch);
void dsa_switch_shutdown(struct dsa_switch *ds)
{
struct net_device *master, *slave_dev;
LIST_HEAD(unregister_list);
struct dsa_port *dp;
mutex_lock(&dsa2_mutex);
@ -1729,25 +1728,13 @@ void dsa_switch_shutdown(struct dsa_switch *ds)
slave_dev = dp->slave;
netdev_upper_dev_unlink(master, slave_dev);
/* Just unlinking ourselves as uppers of the master is not
* sufficient. When the master net device unregisters, that will
* also call dev_close, which we will catch as NETDEV_GOING_DOWN
* and trigger a dev_close on our own devices (dsa_slave_close).
* In turn, that will call dev_mc_unsync on the master's net
* device. If the master is also a DSA switch port, this will
* trigger dsa_slave_set_rx_mode which will call dev_mc_sync on
* its own master. Lockdep will complain about the fact that
* all cascaded masters have the same dsa_master_addr_list_lock_key,
* which it normally would not do if the cascaded masters would
* be in a proper upper/lower relationship, which we've just
* destroyed.
* To suppress the lockdep warnings, let's actually unregister
* the DSA slave interfaces too, to avoid the nonsensical
* multicast address list synchronization on shutdown.
*/
unregister_netdevice_queue(slave_dev, &unregister_list);
}
unregister_netdevice_many(&unregister_list);
/* Disconnect from further netdevice notifiers on the master,
* since netdev_uses_dsa() will now return false.
*/
dsa_switch_for_each_cpu_port(dp, ds)
dp->master->dsa_ptr = NULL;
rtnl_unlock();
mutex_unlock(&dsa2_mutex);

View File

@ -256,7 +256,9 @@ static int __net_init ipmr_rules_init(struct net *net)
return 0;
err2:
rtnl_lock();
ipmr_free_table(mrt);
rtnl_unlock();
err1:
fib_rules_unregister(ops);
return err;

View File

@ -937,6 +937,22 @@ void tcp_remove_empty_skb(struct sock *sk)
}
}
/* skb changing from pure zc to mixed, must charge zc */
static int tcp_downgrade_zcopy_pure(struct sock *sk, struct sk_buff *skb)
{
if (unlikely(skb_zcopy_pure(skb))) {
u32 extra = skb->truesize -
SKB_TRUESIZE(skb_end_offset(skb));
if (!sk_wmem_schedule(sk, extra))
return -ENOMEM;
sk_mem_charge(sk, extra);
skb_shinfo(skb)->flags &= ~SKBFL_PURE_ZEROCOPY;
}
return 0;
}
static struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags,
struct page *page, int offset, size_t *size)
{
@ -972,7 +988,7 @@ new_segment:
tcp_mark_push(tp, skb);
goto new_segment;
}
if (!sk_wmem_schedule(sk, copy))
if (tcp_downgrade_zcopy_pure(sk, skb) || !sk_wmem_schedule(sk, copy))
return NULL;
if (can_coalesce) {
@ -1320,19 +1336,8 @@ new_segment:
copy = min_t(int, copy, pfrag->size - pfrag->offset);
/* skb changing from pure zc to mixed, must charge zc */
if (unlikely(skb_zcopy_pure(skb))) {
u32 extra = skb->truesize -
SKB_TRUESIZE(skb_end_offset(skb));
if (!sk_wmem_schedule(sk, extra))
goto wait_for_space;
sk_mem_charge(sk, extra);
skb_shinfo(skb)->flags &= ~SKBFL_PURE_ZEROCOPY;
}
if (!sk_wmem_schedule(sk, copy))
if (tcp_downgrade_zcopy_pure(sk, skb) ||
!sk_wmem_schedule(sk, copy))
goto wait_for_space;
err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,

View File

@ -243,7 +243,9 @@ static int __net_init ip6mr_rules_init(struct net *net)
return 0;
err2:
rtnl_lock();
ip6mr_free_table(mrt);
rtnl_unlock();
err1:
fib_rules_unregister(ops);
return err;

View File

@ -1607,6 +1607,7 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct mpls_dev *mdev;
unsigned int flags;
int err;
if (event == NETDEV_REGISTER) {
mdev = mpls_add_dev(dev);
@ -1621,7 +1622,6 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
return NOTIFY_OK;
switch (event) {
int err;
case NETDEV_DOWN:
err = mpls_ifdown(dev, event);

View File

@ -925,6 +925,7 @@ out:
static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
struct mptcp_pm_addr_entry *entry)
{
int addrlen = sizeof(struct sockaddr_in);
struct sockaddr_storage addr;
struct mptcp_sock *msk;
struct socket *ssock;
@ -949,8 +950,11 @@ static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
}
mptcp_info2sockaddr(&entry->addr, &addr, entry->addr.family);
err = kernel_bind(ssock, (struct sockaddr *)&addr,
sizeof(struct sockaddr_in));
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
if (entry->addr.family == AF_INET6)
addrlen = sizeof(struct sockaddr_in6);
#endif
err = kernel_bind(ssock, (struct sockaddr *)&addr, addrlen);
if (err) {
pr_warn("kernel_bind error, err=%d", err);
goto out;

View File

@ -2311,7 +2311,8 @@ ctnetlink_create_conntrack(struct net *net,
if (helper->from_nlattr)
helper->from_nlattr(helpinfo, ct);
/* not in hash table yet so not strictly necessary */
/* disable helper auto-assignment for this entry */
ct->status |= IPS_HELPER;
RCU_INIT_POINTER(help->helper, helper);
}
} else {

View File

@ -489,6 +489,15 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
pr_debug("Setting vtag %x for dir %d\n",
ih->init_tag, !dir);
ct->proto.sctp.vtag[!dir] = ih->init_tag;
/* don't renew timeout on init retransmit so
* port reuse by client or NAT middlebox cannot
* keep entry alive indefinitely (incl. nat info).
*/
if (new_state == SCTP_CONNTRACK_CLOSED &&
old_state == SCTP_CONNTRACK_CLOSED &&
nf_ct_is_confirmed(ct))
ignore = true;
}
ct->proto.sctp.state = new_state;

View File

@ -446,6 +446,32 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
}
}
static void tcp_init_sender(struct ip_ct_tcp_state *sender,
struct ip_ct_tcp_state *receiver,
const struct sk_buff *skb,
unsigned int dataoff,
const struct tcphdr *tcph,
u32 end, u32 win)
{
/* SYN-ACK in reply to a SYN
* or SYN from reply direction in simultaneous open.
*/
sender->td_end =
sender->td_maxend = end;
sender->td_maxwin = (win == 0 ? 1 : win);
tcp_options(skb, dataoff, tcph, sender);
/* RFC 1323:
* Both sides must send the Window Scale option
* to enable window scaling in either direction.
*/
if (!(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE &&
receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE)) {
sender->td_scale = 0;
receiver->td_scale = 0;
}
}
static bool tcp_in_window(struct nf_conn *ct,
enum ip_conntrack_dir dir,
unsigned int index,
@ -499,24 +525,9 @@ static bool tcp_in_window(struct nf_conn *ct,
* Initialize sender data.
*/
if (tcph->syn) {
/*
* SYN-ACK in reply to a SYN
* or SYN from reply direction in simultaneous open.
*/
sender->td_end =
sender->td_maxend = end;
sender->td_maxwin = (win == 0 ? 1 : win);
tcp_options(skb, dataoff, tcph, sender);
/*
* RFC 1323:
* Both sides must send the Window Scale option
* to enable window scaling in either direction.
*/
if (!(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE
&& receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE))
sender->td_scale =
receiver->td_scale = 0;
tcp_init_sender(sender, receiver,
skb, dataoff, tcph,
end, win);
if (!tcph->ack)
/* Simultaneous open */
return true;
@ -560,6 +571,18 @@ static bool tcp_in_window(struct nf_conn *ct,
sender->td_maxwin = (win == 0 ? 1 : win);
tcp_options(skb, dataoff, tcph, sender);
} else if (tcph->syn && dir == IP_CT_DIR_REPLY &&
state->state == TCP_CONNTRACK_SYN_SENT) {
/* Retransmitted syn-ack, or syn (simultaneous open).
*
* Re-init state for this direction, just like for the first
* syn(-ack) reply, it might differ in seq, ack or tcp options.
*/
tcp_init_sender(sender, receiver,
skb, dataoff, tcph,
end, win);
if (!tcph->ack)
return true;
}
if (!(tcph->ack)) {

View File

@ -167,7 +167,7 @@ nft_tcp_header_pointer(const struct nft_pktinfo *pkt,
{
struct tcphdr *tcph;
if (pkt->tprot != IPPROTO_TCP)
if (pkt->tprot != IPPROTO_TCP || pkt->fragoff)
return NULL;
tcph = skb_header_pointer(pkt->skb, nft_thoff(pkt), sizeof(*tcph), buffer);

View File

@ -83,7 +83,7 @@ static int __nft_payload_inner_offset(struct nft_pktinfo *pkt)
{
unsigned int thoff = nft_thoff(pkt);
if (!(pkt->flags & NFT_PKTINFO_L4PROTO))
if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
return -1;
switch (pkt->tprot) {
@ -147,7 +147,7 @@ void nft_payload_eval(const struct nft_expr *expr,
offset = skb_network_offset(skb);
break;
case NFT_PAYLOAD_TRANSPORT_HEADER:
if (!(pkt->flags & NFT_PKTINFO_L4PROTO))
if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
goto err;
offset = nft_thoff(pkt);
break;
@ -688,7 +688,7 @@ static void nft_payload_set_eval(const struct nft_expr *expr,
offset = skb_network_offset(skb);
break;
case NFT_PAYLOAD_TRANSPORT_HEADER:
if (!(pkt->flags & NFT_PKTINFO_L4PROTO))
if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
goto err;
offset = nft_thoff(pkt);
break;
@ -728,7 +728,8 @@ static void nft_payload_set_eval(const struct nft_expr *expr,
if (priv->csum_type == NFT_PAYLOAD_CSUM_SCTP &&
pkt->tprot == IPPROTO_SCTP &&
skb->ip_summed != CHECKSUM_PARTIAL) {
if (nft_payload_csum_sctp(skb, nft_thoff(pkt)))
if (pkt->fragoff == 0 &&
nft_payload_csum_sctp(skb, nft_thoff(pkt)))
goto err;
}

View File

@ -368,9 +368,6 @@ static int smc_pnet_add_eth(struct smc_pnettable *pnettable, struct net *net,
new_pe->type = SMC_PNET_ETH;
memcpy(new_pe->pnet_name, pnet_name, SMC_MAX_PNETID_LEN);
strncpy(new_pe->eth_name, eth_name, IFNAMSIZ);
new_pe->ndev = ndev;
if (ndev)
netdev_tracker_alloc(ndev, &new_pe->dev_tracker, GFP_KERNEL);
rc = -EEXIST;
new_netdev = true;
write_lock(&pnettable->lock);
@ -382,6 +379,11 @@ static int smc_pnet_add_eth(struct smc_pnettable *pnettable, struct net *net,
}
}
if (new_netdev) {
if (ndev) {
new_pe->ndev = ndev;
netdev_tracker_alloc(ndev, &new_pe->dev_tracker,
GFP_ATOMIC);
}
list_add_tail(&new_pe->list, &pnettable->pnetlist);
write_unlock(&pnettable->lock);
} else {

View File

@ -313,7 +313,7 @@ static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
pr_warn_ratelimited("Failed to remove binding %u,%u from %u\n",
ua.sr.type, ua.sr.lower, node);
} else {
pr_warn("Unrecognized name table message received\n");
pr_warn_ratelimited("Unknown name table message received\n");
}
return false;
}

View File

@ -1159,6 +1159,7 @@ signal_address_tests()
ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags signal
ip netns exec $ns2 ./pm_nl_ctl add 10.0.4.2 flags signal
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr "signal addresses race test" 3 3 3
# the server will not signal the address terminating
# the MPC subflow