diff --git a/Documentation/devicetree/bindings/net/marvell,mvusb.yaml b/Documentation/devicetree/bindings/net/marvell,mvusb.yaml index 9458f6659be1..68573762294b 100644 --- a/Documentation/devicetree/bindings/net/marvell,mvusb.yaml +++ b/Documentation/devicetree/bindings/net/marvell,mvusb.yaml @@ -38,28 +38,27 @@ required: examples: - | /* USB host controller */ - &usb1 { - mvusb: mdio@1 { + usb { + #address-cells = <1>; + #size-cells = <0>; + + mdio@1 { compatible = "usb1286,1fa4"; reg = <1>; #address-cells = <1>; #size-cells = <0>; - }; - }; - /* MV88E6390X devboard */ - &mvusb { - switch@0 { - compatible = "marvell,mv88e6190"; - status = "ok"; - reg = <0x0>; + switch@0 { + compatible = "marvell,mv88e6190"; + reg = <0x0>; - ports { - /* Port definitions */ - }; + ports { + /* Port definitions */ + }; - mdio { - /* PHY definitions */ + mdio { + /* PHY definitions */ + }; }; }; }; diff --git a/drivers/crypto/chelsio/chcr_ktls.c b/drivers/crypto/chelsio/chcr_ktls.c index 73658b71d4a3..cd1769ecdc1c 100644 --- a/drivers/crypto/chelsio/chcr_ktls.c +++ b/drivers/crypto/chelsio/chcr_ktls.c @@ -2,6 +2,7 @@ /* Copyright (C) 2020 Chelsio Communications. All rights reserved. */ #ifdef CONFIG_CHELSIO_TLS_DEVICE +#include #include "chcr_ktls.h" #include "clip_tbl.h" diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index 086dfb1b9d0b..91cdc0a2b1a7 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -148,7 +148,7 @@ static void slc_bump(struct slcan *sl) u32 tmpid; char *cmd = sl->rbuff; - cf.can_id = 0; + memset(&cf, 0, sizeof(cf)); switch (*cmd) { case 'r': @@ -187,8 +187,6 @@ static void slc_bump(struct slcan *sl) else return; - *(u64 *) (&cf.data) = 0; /* clear payload */ - /* RTR frames may have a dlc > 0 but they never have any data bytes */ if (!(cf.can_id & CAN_RTR_FLAG)) { for (i = 0; i < cf.can_dlc; i++) { diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index affa5c6e135c..c7ac63f41918 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -480,7 +480,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) priv->slave_mii_bus->parent = ds->dev->parent; priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask; - err = of_mdiobus_register(priv->slave_mii_bus, dn); + err = mdiobus_register(priv->slave_mii_bus); if (err && dn) of_node_put(dn); @@ -1079,6 +1079,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) const struct bcm_sf2_of_data *data; struct b53_platform_data *pdata; struct dsa_switch_ops *ops; + struct device_node *ports; struct bcm_sf2_priv *priv; struct b53_device *dev; struct dsa_switch *ds; @@ -1146,7 +1147,11 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) set_bit(0, priv->cfp.used); set_bit(0, priv->cfp.unique); - bcm_sf2_identify_ports(priv, dn->child); + ports = of_find_node_by_name(dn, "ports"); + if (ports) { + bcm_sf2_identify_ports(priv, ports); + of_node_put(ports); + } priv->irq0 = irq_of_parse_and_map(dn, 0); priv->irq1 = irq_of_parse_and_map(dn, 1); diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index ef57552db260..2d0d91db0ddb 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -1403,6 +1403,9 @@ mt7530_setup(struct dsa_switch *ds) continue; phy_node = of_parse_phandle(mac_np, "phy-handle", 0); + if (!phy_node) + continue; + if (phy_node->parent == priv->dev->of_node->parent) { ret = of_get_phy_mode(mac_np, &interface); if (ret && ret != -ENODEV) diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c index 97901c114bfa..fbe9d88b13c7 100644 --- a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c +++ b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c @@ -491,7 +491,7 @@ get_ingress_preclass_record(struct aq_hw_s *hw, rec->snap[1] = packed_record[8] & 0xFF; rec->llc = (packed_record[8] >> 8) & 0xFF; - rec->llc = packed_record[9] << 8; + rec->llc |= packed_record[9] << 8; rec->mac_sa[0] = packed_record[10]; rec->mac_sa[0] |= packed_record[11] << 16; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 9638d65d8261..517caedc0a87 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -6874,7 +6874,8 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars) case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: /* In this option, the first PHY makes sure to pass the * traffic through itself only. - * Its not clear how to reset the link on the second phy + * It's not clear how to reset the link on the second + * phy. */ active_external_phy = EXT_PHY1; break; diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.h b/drivers/net/ethernet/cavium/common/cavium_ptp.h index a04eccbc78e8..1e0ffe8f4152 100644 --- a/drivers/net/ethernet/cavium/common/cavium_ptp.h +++ b/drivers/net/ethernet/cavium/common/cavium_ptp.h @@ -24,7 +24,7 @@ struct cavium_ptp { struct ptp_clock *ptp_clock; }; -#if IS_ENABLED(CONFIG_CAVIUM_PTP) +#if IS_REACHABLE(CONFIG_CAVIUM_PTP) struct cavium_ptp *cavium_ptp_get(void); void cavium_ptp_put(struct cavium_ptp *ptp); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 75fde0d4d493..a70018f067aa 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3132,7 +3132,6 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p) return ret; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - pi->xact_addr_filt = ret; return 0; } @@ -6672,6 +6671,10 @@ static void shutdown_one(struct pci_dev *pdev) if (adapter->port[i]->reg_state == NETREG_REGISTERED) cxgb_close(adapter->port[i]); + rtnl_lock(); + cxgb4_mqprio_stop_offload(adapter); + rtnl_unlock(); + if (is_uld(adapter)) { detach_ulds(adapter); t4_uld_clean_up(adapter); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c index ec3eb45ee3b4..e6af4906d674 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c @@ -301,6 +301,7 @@ static void cxgb4_mqprio_free_hw_resources(struct net_device *dev) cxgb4_clear_msix_aff(eorxq->msix->vec, eorxq->msix->aff_mask); free_irq(eorxq->msix->vec, &eorxq->rspq); + cxgb4_free_msix_idx_in_bmap(adap, eorxq->msix->idx); } free_rspq_fl(adap, &eorxq->rspq, &eorxq->fl); @@ -611,6 +612,28 @@ int cxgb4_setup_tc_mqprio(struct net_device *dev, return ret; } +void cxgb4_mqprio_stop_offload(struct adapter *adap) +{ + struct cxgb4_tc_port_mqprio *tc_port_mqprio; + struct net_device *dev; + u8 i; + + if (!adap->tc_mqprio || !adap->tc_mqprio->port_mqprio) + return; + + for_each_port(adap, i) { + dev = adap->port[i]; + if (!dev) + continue; + + tc_port_mqprio = &adap->tc_mqprio->port_mqprio[i]; + if (!tc_port_mqprio->mqprio.qopt.num_tc) + continue; + + cxgb4_mqprio_disable_offload(dev); + } +} + int cxgb4_init_tc_mqprio(struct adapter *adap) { struct cxgb4_tc_port_mqprio *tc_port_mqprio, *port_mqprio; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h index c532f1ef8451..ff8794132b22 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h @@ -38,6 +38,7 @@ struct cxgb4_tc_mqprio { int cxgb4_setup_tc_mqprio(struct net_device *dev, struct tc_mqprio_qopt_offload *mqprio); +void cxgb4_mqprio_stop_offload(struct adapter *adap); int cxgb4_init_tc_mqprio(struct adapter *adap); void cxgb4_cleanup_tc_mqprio(struct adapter *adap); #endif /* __CXGB4_TC_MQPRIO_H__ */ diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 835b7816e372..87236206366f 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -1731,7 +1731,7 @@ static int ftgmac100_setup_clk(struct ftgmac100 *priv) if (rc) goto cleanup_clk; - /* RCLK is for RMII, typically used for NCSI. Optional because its not + /* RCLK is for RMII, typically used for NCSI. Optional because it's not * necessary if it's the AST2400 MAC, or the MAC is configured for * RGMII, or the controller is not an ASPEED-based controller. */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 2f76908cae73..51117a5a6bbf 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -150,14 +150,20 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, u8 prio = act->vlan.prio; u16 vid = act->vlan.vid; - return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei, - act->id, vid, - proto, prio, extack); + err = mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei, + act->id, vid, + proto, prio, extack); + if (err) + return err; + break; } case FLOW_ACTION_PRIORITY: - return mlxsw_sp_acl_rulei_act_priority(mlxsw_sp, rulei, - act->priority, - extack); + err = mlxsw_sp_acl_rulei_act_priority(mlxsw_sp, rulei, + act->priority, + extack); + if (err) + return err; + break; case FLOW_ACTION_MANGLE: { enum flow_action_mangle_base htype = act->mangle.htype; __be32 be_mask = (__force __be32) act->mangle.mask; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c index 9096ffd89e50..fbf714d027d8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c @@ -643,7 +643,7 @@ static int mlxsw_sp_trap_policer_bs(u64 burst, u8 *p_burst_size, { int bs = fls64(burst) - 1; - if (burst != (1 << bs)) { + if (burst != (BIT_ULL(bs))) { NL_SET_ERR_MSG_MOD(extack, "Policer burst size is not power of two"); return -EINVAL; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 1a5fc2ae351c..29810a1aa210 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -369,8 +369,8 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; u8 abs_vport_id = 0; - int rc = -EINVAL; u16 rx_mode = 0; + int rc; rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); if (rc) diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index 1305522f72d6..40efe60eff8d 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -282,7 +282,6 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[], { struct rmnet_priv *priv = netdev_priv(dev); struct net_device *real_dev; - struct rmnet_endpoint *ep; struct rmnet_port *port; u16 mux_id; @@ -297,19 +296,27 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[], if (data[IFLA_RMNET_MUX_ID]) { mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]); - if (rmnet_get_endpoint(port, mux_id)) { - NL_SET_ERR_MSG_MOD(extack, "MUX ID already exists"); - return -EINVAL; + + if (mux_id != priv->mux_id) { + struct rmnet_endpoint *ep; + + ep = rmnet_get_endpoint(port, priv->mux_id); + if (!ep) + return -ENODEV; + + if (rmnet_get_endpoint(port, mux_id)) { + NL_SET_ERR_MSG_MOD(extack, + "MUX ID already exists"); + return -EINVAL; + } + + hlist_del_init_rcu(&ep->hlnode); + hlist_add_head_rcu(&ep->hlnode, + &port->muxed_ep[mux_id]); + + ep->mux_id = mux_id; + priv->mux_id = mux_id; } - ep = rmnet_get_endpoint(port, priv->mux_id); - if (!ep) - return -ENODEV; - - hlist_del_init_rcu(&ep->hlnode); - hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]); - - ep->mux_id = mux_id; - priv->mux_id = mux_id; } if (data[IFLA_RMNET_FLAGS]) { diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 55cb5730beb6..bf5bf05970a2 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -5441,9 +5441,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT); - dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | - NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX; + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_HIGHDMA; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; @@ -5460,26 +5459,26 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* Disallow toggling */ dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; + if (rtl_chip_supports_csum_v2(tp)) + dev->hw_features |= NETIF_F_IPV6_CSUM; + + dev->features |= dev->hw_features; + + /* There has been a number of reports that using SG/TSO results in + * tx timeouts. However for a lot of people SG/TSO works fine. + * Therefore disable both features by default, but allow users to + * enable them. Use at own risk! + */ if (rtl_chip_supports_csum_v2(tp)) { - dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6; + dev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6; dev->gso_max_size = RTL_GSO_MAX_SIZE_V2; dev->gso_max_segs = RTL_GSO_MAX_SEGS_V2; } else { + dev->hw_features |= NETIF_F_SG | NETIF_F_TSO; dev->gso_max_size = RTL_GSO_MAX_SIZE_V1; dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1; } - /* RTL8168e-vl and one RTL8168c variant are known to have a - * HW issue with TSO. - */ - if (tp->mac_version == RTL_GIGA_MAC_VER_34 || - tp->mac_version == RTL_GIGA_MAC_VER_22) { - dev->vlan_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG); - dev->hw_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG); - } - - dev->features |= dev->hw_features; - dev->hw_features |= NETIF_F_RXALL; dev->hw_features |= NETIF_F_RXFCS; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index 542784300620..efc6ec1b8027 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -207,7 +207,7 @@ static void dwmac1000_set_filter(struct mac_device_info *hw, reg++; } - while (reg <= perfect_addr_number) { + while (reg < perfect_addr_number) { writel(0, ioaddr + GMAC_ADDR_HIGH(reg)); writel(0, ioaddr + GMAC_ADDR_LOW(reg)); reg++; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c index 0e4575f7bedb..ad4df9bddcf3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c @@ -577,8 +577,13 @@ static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash, value |= XGMAC_VLAN_EDVLP; value |= XGMAC_VLAN_ESVL; value |= XGMAC_VLAN_DOVLTC; + } else { + value &= ~XGMAC_VLAN_EDVLP; + value &= ~XGMAC_VLAN_ESVL; + value &= ~XGMAC_VLAN_DOVLTC; } + value &= ~XGMAC_VLAN_VID; writel(value, ioaddr + XGMAC_VLAN_TAG); } else if (perfect_match) { u32 value = readl(ioaddr + XGMAC_PACKET_FILTER); @@ -589,13 +594,19 @@ static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash, value = readl(ioaddr + XGMAC_VLAN_TAG); + value &= ~XGMAC_VLAN_VTHM; value |= XGMAC_VLAN_ETV; if (is_double) { value |= XGMAC_VLAN_EDVLP; value |= XGMAC_VLAN_ESVL; value |= XGMAC_VLAN_DOVLTC; + } else { + value &= ~XGMAC_VLAN_EDVLP; + value &= ~XGMAC_VLAN_ESVL; + value &= ~XGMAC_VLAN_DOVLTC; } + value &= ~XGMAC_VLAN_VID; writel(value | perfect_match, ioaddr + XGMAC_VLAN_TAG); } else { u32 value = readl(ioaddr + XGMAC_PACKET_FILTER); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 2fb671e61ee8..e6898fd5223f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -4566,9 +4566,13 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid return ret; } - ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); + if (priv->hw->num_vlan) { + ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); + if (ret) + return ret; + } - return ret; + return 0; } static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) @@ -4581,9 +4585,12 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi is_double = true; clear_bit(vid, priv->active_vlans); - ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); - if (ret) - return ret; + + if (priv->hw->num_vlan) { + ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); + if (ret) + return ret; + } return stmmac_vlan_update(priv, is_double); } diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index da82d7f16a09..0d580d81d910 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -2594,6 +2594,9 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) return PTR_ERR(dev); macsec = macsec_priv(dev); + if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]) + return -EINVAL; + offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]); if (macsec->offload == offload) return 0; diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index 481cf48c9b9e..31f731e6df72 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -425,8 +425,8 @@ static int at803x_parse_dt(struct phy_device *phydev) */ if (at803x_match_phy_id(phydev, ATH8030_PHY_ID) || at803x_match_phy_id(phydev, ATH8035_PHY_ID)) { - priv->clk_25m_reg &= ~AT8035_CLK_OUT_MASK; - priv->clk_25m_mask &= ~AT8035_CLK_OUT_MASK; + priv->clk_25m_reg &= AT8035_CLK_OUT_MASK; + priv->clk_25m_mask &= AT8035_CLK_OUT_MASK; } } diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 2ec19e5540bf..05d20343b816 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -25,6 +25,7 @@ #include #include #include +#include /* Operation Mode Strap Override */ #define MII_KSZPHY_OMSO 0x16 @@ -952,6 +953,12 @@ static int kszphy_resume(struct phy_device *phydev) genphy_resume(phydev); + /* After switching from power-down to normal mode, an internal global + * reset is automatically generated. Wait a minimum of 1 ms before + * read/write access to the PHY registers. + */ + usleep_range(1000, 2000); + ret = kszphy_config_reset(phydev); if (ret) return ret; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 228fe449dc6d..07476c6510f2 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1678,8 +1678,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, alloc_frag->offset += buflen; } err = tun_xdp_act(tun, xdp_prog, &xdp, act); - if (err < 0) - goto err_xdp; + if (err < 0) { + if (act == XDP_REDIRECT || act == XDP_TX) + put_page(alloc_frag->page); + goto out; + } + if (err == XDP_REDIRECT) xdp_do_flush(); if (err != XDP_PASS) @@ -1693,8 +1697,6 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad); -err_xdp: - put_page(alloc_frag->page); out: rcu_read_unlock(); local_bh_enable(); diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index 8783e2ab3ec0..0ef7e1f443e3 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -54,6 +54,7 @@ static const char driver_name[] = "pegasus"; #undef PEGASUS_WRITE_EEPROM #define BMSR_MEDIA (BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | \ BMSR_100FULL | BMSR_ANEGCAPABLE) +#define CARRIER_CHECK_DELAY (2 * HZ) static bool loopback; static bool mii_mode; @@ -1089,17 +1090,12 @@ static inline void setup_pegasus_II(pegasus_t *pegasus) set_register(pegasus, Reg81, 2); } - -static int pegasus_count; -static struct workqueue_struct *pegasus_workqueue; -#define CARRIER_CHECK_DELAY (2 * HZ) - static void check_carrier(struct work_struct *work) { pegasus_t *pegasus = container_of(work, pegasus_t, carrier_check.work); set_carrier(pegasus->net); if (!(pegasus->flags & PEGASUS_UNPLUG)) { - queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check, + queue_delayed_work(system_long_wq, &pegasus->carrier_check, CARRIER_CHECK_DELAY); } } @@ -1120,18 +1116,6 @@ static int pegasus_blacklisted(struct usb_device *udev) return 0; } -/* we rely on probe() and remove() being serialized so we - * don't need extra locking on pegasus_count. - */ -static void pegasus_dec_workqueue(void) -{ - pegasus_count--; - if (pegasus_count == 0) { - destroy_workqueue(pegasus_workqueue); - pegasus_workqueue = NULL; - } -} - static int pegasus_probe(struct usb_interface *intf, const struct usb_device_id *id) { @@ -1144,14 +1128,6 @@ static int pegasus_probe(struct usb_interface *intf, if (pegasus_blacklisted(dev)) return -ENODEV; - if (pegasus_count == 0) { - pegasus_workqueue = alloc_workqueue("pegasus", WQ_MEM_RECLAIM, - 0); - if (!pegasus_workqueue) - return -ENOMEM; - } - pegasus_count++; - net = alloc_etherdev(sizeof(struct pegasus)); if (!net) goto out; @@ -1209,7 +1185,7 @@ static int pegasus_probe(struct usb_interface *intf, res = register_netdev(net); if (res) goto out3; - queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check, + queue_delayed_work(system_long_wq, &pegasus->carrier_check, CARRIER_CHECK_DELAY); dev_info(&intf->dev, "%s, %s, %pM\n", net->name, usb_dev_id[dev_index].name, net->dev_addr); @@ -1222,7 +1198,6 @@ static int pegasus_probe(struct usb_interface *intf, out1: free_netdev(net); out: - pegasus_dec_workqueue(); return res; } @@ -1237,7 +1212,7 @@ static void pegasus_disconnect(struct usb_interface *intf) } pegasus->flags |= PEGASUS_UNPLUG; - cancel_delayed_work(&pegasus->carrier_check); + cancel_delayed_work_sync(&pegasus->carrier_check); unregister_netdev(pegasus->net); unlink_all_urbs(pegasus); free_all_urbs(pegasus); @@ -1246,7 +1221,6 @@ static void pegasus_disconnect(struct usb_interface *intf) pegasus->rx_skb = NULL; } free_netdev(pegasus->net); - pegasus_dec_workqueue(); } static int pegasus_suspend(struct usb_interface *intf, pm_message_t message) @@ -1254,7 +1228,7 @@ static int pegasus_suspend(struct usb_interface *intf, pm_message_t message) struct pegasus *pegasus = usb_get_intfdata(intf); netif_device_detach(pegasus->net); - cancel_delayed_work(&pegasus->carrier_check); + cancel_delayed_work_sync(&pegasus->carrier_check); if (netif_running(pegasus->net)) { usb_kill_urb(pegasus->rx_urb); usb_kill_urb(pegasus->intr_urb); @@ -1276,7 +1250,7 @@ static int pegasus_resume(struct usb_interface *intf) pegasus->intr_urb->actual_length = 0; intr_callback(pegasus->intr_urb); } - queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check, + queue_delayed_work(system_long_wq, &pegasus->carrier_check, CARRIER_CHECK_DELAY); return 0; } diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c index f66c0f8f6f4a..ecb3fccca603 100644 --- a/drivers/net/wimax/i2400m/driver.c +++ b/drivers/net/wimax/i2400m/driver.c @@ -740,9 +740,6 @@ EXPORT_SYMBOL_GPL(i2400m_error_recovery); static int i2400m_bm_buf_alloc(struct i2400m *i2400m) { - int result; - - result = -ENOMEM; i2400m->bm_cmd_buf = kzalloc(I2400M_BM_CMD_BUF_SIZE, GFP_KERNEL); if (i2400m->bm_cmd_buf == NULL) goto error_bm_cmd_kzalloc; @@ -754,7 +751,7 @@ int i2400m_bm_buf_alloc(struct i2400m *i2400m) error_bm_ack_buf_kzalloc: kfree(i2400m->bm_cmd_buf); error_bm_cmd_kzalloc: - return result; + return -ENOMEM; } @@ -843,7 +840,7 @@ EXPORT_SYMBOL_GPL(i2400m_reset); */ int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags) { - int result = -ENODEV; + int result; struct device *dev = i2400m_dev(i2400m); struct wimax_dev *wimax_dev = &i2400m->wimax_dev; struct net_device *net_dev = i2400m->wimax_dev.net_dev; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 28b1a2b4459e..3a2ac7072dbb 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -47,8 +47,8 @@ * A. IP checksum related features * * Drivers advertise checksum offload capabilities in the features of a device. - * From the stack's point of view these are capabilities offered by the driver, - * a driver typically only advertises features that it is capable of offloading + * From the stack's point of view these are capabilities offered by the driver. + * A driver typically only advertises features that it is capable of offloading * to its device. * * The checksum related features are: @@ -63,7 +63,7 @@ * TCP or UDP packets over IPv4. These are specifically * unencapsulated packets of the form IPv4|TCP or * IPv4|UDP where the Protocol field in the IPv4 header - * is TCP or UDP. The IPv4 header may contain IP options + * is TCP or UDP. The IPv4 header may contain IP options. * This feature cannot be set in features for a device * with NETIF_F_HW_CSUM also set. This feature is being * DEPRECATED (see below). @@ -79,13 +79,13 @@ * DEPRECATED (see below). * * NETIF_F_RXCSUM - Driver (device) performs receive checksum offload. - * This flag is used only used to disable the RX checksum + * This flag is only used to disable the RX checksum * feature for a device. The stack will accept receive * checksum indication in packets received on a device * regardless of whether NETIF_F_RXCSUM is set. * * B. Checksumming of received packets by device. Indication of checksum - * verification is in set skb->ip_summed. Possible values are: + * verification is set in skb->ip_summed. Possible values are: * * CHECKSUM_NONE: * @@ -115,16 +115,16 @@ * the packet minus one that have been verified as CHECKSUM_UNNECESSARY. * For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet * and a device is able to verify the checksums for UDP (possibly zero), - * GRE (checksum flag is set), and TCP-- skb->csum_level would be set to + * GRE (checksum flag is set) and TCP, skb->csum_level would be set to * two. If the device were only able to verify the UDP checksum and not - * GRE, either because it doesn't support GRE checksum of because GRE + * GRE, either because it doesn't support GRE checksum or because GRE * checksum is bad, skb->csum_level would be set to zero (TCP checksum is * not considered in this case). * * CHECKSUM_COMPLETE: * * This is the most generic way. The device supplied checksum of the _whole_ - * packet as seen by netif_rx() and fills out in skb->csum. Meaning, the + * packet as seen by netif_rx() and fills in skb->csum. This means the * hardware doesn't need to parse L3/L4 headers to implement this. * * Notes: @@ -153,8 +153,8 @@ * from skb->csum_start up to the end, and to record/write the checksum at * offset skb->csum_start + skb->csum_offset. A driver may verify that the * csum_start and csum_offset values are valid values given the length and - * offset of the packet, however they should not attempt to validate that the - * checksum refers to a legitimate transport layer checksum-- it is the + * offset of the packet, but it should not attempt to validate that the + * checksum refers to a legitimate transport layer checksum -- it is the * purview of the stack to validate that csum_start and csum_offset are set * correctly. * @@ -178,18 +178,18 @@ * * CHECKSUM_UNNECESSARY: * - * This has the same meaning on as CHECKSUM_NONE for checksum offload on + * This has the same meaning as CHECKSUM_NONE for checksum offload on * output. * * CHECKSUM_COMPLETE: * Not used in checksum output. If a driver observes a packet with this value - * set in skbuff, if should treat as CHECKSUM_NONE being set. + * set in skbuff, it should treat the packet as if CHECKSUM_NONE were set. * * D. Non-IP checksum (CRC) offloads * * NETIF_F_SCTP_CRC - This feature indicates that a device is capable of * offloading the SCTP CRC in a packet. To perform this offload the stack - * will set set csum_start and csum_offset accordingly, set ip_summed to + * will set csum_start and csum_offset accordingly, set ip_summed to * CHECKSUM_PARTIAL and set csum_not_inet to 1, to provide an indication in * the skbuff that the CHECKSUM_PARTIAL refers to CRC32c. * A driver that supports both IP checksum offload and SCTP CRC32c offload @@ -200,10 +200,10 @@ * NETIF_F_FCOE_CRC - This feature indicates that a device is capable of * offloading the FCOE CRC in a packet. To perform this offload the stack * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset - * accordingly. Note the there is no indication in the skbuff that the - * CHECKSUM_PARTIAL refers to an FCOE checksum, a driver that supports + * accordingly. Note that there is no indication in the skbuff that the + * CHECKSUM_PARTIAL refers to an FCOE checksum, so a driver that supports * both IP checksum offload and FCOE CRC offload must verify which offload - * is configured for a packet presumably by inspecting packet headers. + * is configured for a packet, presumably by inspecting packet headers. * * E. Checksumming on output with GSO. * @@ -211,9 +211,9 @@ * is implied by the SKB_GSO_* flags in gso_type. Most obviously, if the * gso_type is SKB_GSO_TCPV4 or SKB_GSO_TCPV6, TCP checksum offload as * part of the GSO operation is implied. If a checksum is being offloaded - * with GSO then ip_summed is CHECKSUM_PARTIAL, csum_start and csum_offset - * are set to refer to the outermost checksum being offload (two offloaded - * checksums are possible with UDP encapsulation). + * with GSO then ip_summed is CHECKSUM_PARTIAL, and both csum_start and + * csum_offset are set to refer to the outermost checksum being offloaded + * (two offloaded checksums are possible with UDP encapsulation). */ /* Don't change this without changing skb_csum_unnecessary! */ diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 5bf8d22a47ec..39d37d0ef575 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1065,11 +1065,12 @@ static void neigh_timer_handler(struct timer_list *t) neigh->updated = jiffies; atomic_set(&neigh->probes, 0); notify = 1; - next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME); + next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), + HZ/100); } } else { /* NUD_PROBE|NUD_INCOMPLETE */ - next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME); + next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), HZ/100); } if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) && @@ -1125,7 +1126,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) neigh->nud_state = NUD_INCOMPLETE; neigh->updated = now; next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), - HZ/2); + HZ/100); neigh_add_timer(neigh, next); immediate_probe = true; } else { @@ -1427,7 +1428,8 @@ void __neigh_set_probe_once(struct neighbour *neigh) neigh->nud_state = NUD_INCOMPLETE; atomic_set(&neigh->probes, neigh_max_probes(neigh)); neigh_add_timer(neigh, - jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME)); + jiffies + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), + HZ/100)); } EXPORT_SYMBOL(__neigh_set_probe_once); diff --git a/net/core/sock.c b/net/core/sock.c index da32d9b6d09f..ce1d8dce9b7a 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -574,7 +574,7 @@ static int sock_setbindtodevice_locked(struct sock *sk, int ifindex) /* Sorry... */ ret = -EPERM; - if (!ns_capable(net->user_ns, CAP_NET_RAW)) + if (sk->sk_bound_dev_if && !ns_capable(net->user_ns, CAP_NET_RAW)) goto out; ret = -EINVAL; diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 5390ff541658..e94eb1aac602 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -1338,7 +1338,7 @@ static void dsa_hw_port_list_free(struct list_head *hw_port_list) } /* Make the hardware datapath to/from @dev limited to a common MTU */ -void dsa_bridge_mtu_normalization(struct dsa_port *dp) +static void dsa_bridge_mtu_normalization(struct dsa_port *dp) { struct list_head hw_port_list; struct dsa_switch_tree *dst; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index a11fd4d67832..24e319dfb510 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1357,7 +1357,7 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, regen_advance = idev->cnf.regen_max_retry * idev->cnf.dad_transmits * - NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ; + max(NEIGH_VAR(idev->nd_parms, RETRANS_TIME), HZ/100) / HZ; /* recalculate max_desync_factor each time and update * idev->desync_factor if it's larger @@ -3298,6 +3298,10 @@ static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route) if (netif_is_l3_master(idev->dev)) return; + /* no link local addresses on devices flagged as slaves */ + if (idev->dev->flags & IFF_SLAVE) + return; + ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0); switch (idev->cnf.addr_gen_mode) { @@ -4117,7 +4121,8 @@ static void addrconf_dad_work(struct work_struct *w) ifp->dad_probes--; addrconf_mod_dad_work(ifp, - NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME)); + max(NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME), + HZ/100)); spin_unlock(&ifp->lock); write_unlock_bh(&idev->lock); @@ -4523,7 +4528,7 @@ static void addrconf_verify_rtnl(void) !(ifp->flags&IFA_F_TENTATIVE)) { unsigned long regen_advance = ifp->idev->cnf.regen_max_retry * ifp->idev->cnf.dad_transmits * - NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME) / HZ; + max(NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME), HZ/100) / HZ; if (age >= ifp->prefered_lft - regen_advance) { struct inet6_ifaddr *ifpub = ifp->ifpub; diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 6ffa153e5166..1ecd4e9b0bdf 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -1359,8 +1359,8 @@ static void ndisc_router_discovery(struct sk_buff *skb) if (rtime && rtime/1000 < MAX_SCHEDULE_TIMEOUT/HZ) { rtime = (rtime*HZ)/1000; - if (rtime < HZ/10) - rtime = HZ/10; + if (rtime < HZ/100) + rtime = HZ/100; NEIGH_VAR_SET(in6_dev->nd_parms, RETRANS_TIME, rtime); in6_dev->tstamp = jiffies; send_ifinfo_notify = true; diff --git a/net/ipv6/rpl.c b/net/ipv6/rpl.c index dc4f20e23bf7..d38b476fc7f2 100644 --- a/net/ipv6/rpl.c +++ b/net/ipv6/rpl.c @@ -48,7 +48,7 @@ void ipv6_rpl_srh_decompress(struct ipv6_rpl_sr_hdr *outhdr, outhdr->cmpri = 0; outhdr->cmpre = 0; - for (i = 0; i <= n; i++) + for (i = 0; i < n; i++) ipv6_rpl_addr_decompress(&outhdr->rpl_segaddr[i], daddr, ipv6_rpl_segdata_pos(inhdr, i), inhdr->cmpri); @@ -66,7 +66,7 @@ static unsigned char ipv6_rpl_srh_calc_cmpri(const struct ipv6_rpl_sr_hdr *inhdr int i; for (plen = 0; plen < sizeof(*daddr); plen++) { - for (i = 0; i <= n; i++) { + for (i = 0; i < n; i++) { if (daddr->s6_addr[plen] != inhdr->rpl_segaddr[i].s6_addr[plen]) return plen; @@ -114,7 +114,7 @@ void ipv6_rpl_srh_compress(struct ipv6_rpl_sr_hdr *outhdr, outhdr->cmpri = cmpri; outhdr->cmpre = cmpre; - for (i = 0; i <= n; i++) + for (i = 0; i < n; i++) ipv6_rpl_addr_compress(ipv6_rpl_segdata_pos(outhdr, i), &inhdr->rpl_segaddr[i], cmpri); diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c index a49ddc6cd020..c3ececd7cfc1 100644 --- a/net/ipv6/rpl_iptunnel.c +++ b/net/ipv6/rpl_iptunnel.c @@ -210,7 +210,7 @@ static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb) struct dst_entry *orig_dst = skb_dst(skb); struct dst_entry *dst = NULL; struct rpl_lwt *rlwt; - int err = -EINVAL; + int err; rlwt = rpl_lwt_lwtunnel(orig_dst->lwtstate); diff --git a/net/mptcp/options.c b/net/mptcp/options.c index bd220ee4aac9..faf57585b892 100644 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@ -4,6 +4,8 @@ * Copyright (c) 2017 - 2019, Intel Corporation. */ +#define pr_fmt(fmt) "MPTCP: " fmt + #include #include #include diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c index 064639f72487..977d9c8b1453 100644 --- a/net/mptcp/pm.c +++ b/net/mptcp/pm.c @@ -3,6 +3,8 @@ * * Copyright (c) 2019, Intel Corporation. */ +#define pr_fmt(fmt) "MPTCP: " fmt + #include #include #include diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index a0ce7f324499..86d61ab34c7c 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -4,6 +4,8 @@ * Copyright (c) 2020, Red Hat, Inc. */ +#define pr_fmt(fmt) "MPTCP: " fmt + #include #include #include diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 1833bc1f4a43..939a5045181a 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -57,10 +57,43 @@ static bool __mptcp_needs_tcp_fallback(const struct mptcp_sock *msk) return msk->first && !sk_is_mptcp(msk->first); } +static struct socket *mptcp_is_tcpsk(struct sock *sk) +{ + struct socket *sock = sk->sk_socket; + + if (sock->sk != sk) + return NULL; + + if (unlikely(sk->sk_prot == &tcp_prot)) { + /* we are being invoked after mptcp_accept() has + * accepted a non-mp-capable flow: sk is a tcp_sk, + * not an mptcp one. + * + * Hand the socket over to tcp so all further socket ops + * bypass mptcp. + */ + sock->ops = &inet_stream_ops; + return sock; +#if IS_ENABLED(CONFIG_MPTCP_IPV6) + } else if (unlikely(sk->sk_prot == &tcpv6_prot)) { + sock->ops = &inet6_stream_ops; + return sock; +#endif + } + + return NULL; +} + static struct socket *__mptcp_tcp_fallback(struct mptcp_sock *msk) { + struct socket *sock; + sock_owned_by_me((const struct sock *)msk); + sock = mptcp_is_tcpsk((struct sock *)msk); + if (unlikely(sock)) + return sock; + if (likely(!__mptcp_needs_tcp_fallback(msk))) return NULL; @@ -84,6 +117,10 @@ static struct socket *__mptcp_socket_create(struct mptcp_sock *msk, int state) struct socket *ssock; int err; + ssock = __mptcp_tcp_fallback(msk); + if (unlikely(ssock)) + return ssock; + ssock = __mptcp_nmpc_socket(msk); if (ssock) goto set_state; @@ -121,6 +158,27 @@ static void __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk, MPTCP_SKB_CB(skb)->offset = offset; } +/* both sockets must be locked */ +static bool mptcp_subflow_dsn_valid(const struct mptcp_sock *msk, + struct sock *ssk) +{ + struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); + u64 dsn = mptcp_subflow_get_mapped_dsn(subflow); + + /* revalidate data sequence number. + * + * mptcp_subflow_data_available() is usually called + * without msk lock. Its unlikely (but possible) + * that msk->ack_seq has been advanced since the last + * call found in-sequence data. + */ + if (likely(dsn == msk->ack_seq)) + return true; + + subflow->data_avail = 0; + return mptcp_subflow_data_available(ssk); +} + static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk, struct sock *ssk, unsigned int *bytes) @@ -132,6 +190,11 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk, struct tcp_sock *tp; bool done = false; + if (!mptcp_subflow_dsn_valid(msk, ssk)) { + *bytes = 0; + return false; + } + if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { int rcvbuf = max(ssk->sk_rcvbuf, sk->sk_rcvbuf); @@ -290,6 +353,15 @@ void mptcp_data_acked(struct sock *sk) sock_hold(sk); } +void mptcp_subflow_eof(struct sock *sk) +{ + struct mptcp_sock *msk = mptcp_sk(sk); + + if (!test_and_set_bit(MPTCP_WORK_EOF, &msk->flags) && + schedule_work(&msk->work)) + sock_hold(sk); +} + static void mptcp_stop_timer(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); @@ -994,6 +1066,27 @@ static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu) return 0; } +static void mptcp_check_for_eof(struct mptcp_sock *msk) +{ + struct mptcp_subflow_context *subflow; + struct sock *sk = (struct sock *)msk; + int receivers = 0; + + mptcp_for_each_subflow(msk, subflow) + receivers += !subflow->rx_eof; + + if (!receivers && !(sk->sk_shutdown & RCV_SHUTDOWN)) { + /* hopefully temporary hack: propagate shutdown status + * to msk, when all subflows agree on it + */ + sk->sk_shutdown |= RCV_SHUTDOWN; + + smp_mb__before_atomic(); /* SHUTDOWN must be visible first */ + set_bit(MPTCP_DATA_READY, &msk->flags); + sk->sk_data_ready(sk); + } +} + static void mptcp_worker(struct work_struct *work) { struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work); @@ -1010,6 +1103,9 @@ static void mptcp_worker(struct work_struct *work) __mptcp_flush_join_list(msk); __mptcp_move_skbs(msk); + if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags)) + mptcp_check_for_eof(msk); + if (!test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags)) goto unlock; @@ -1752,7 +1848,9 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock, msk = mptcp_sk(sk); lock_sock(sk); - ssock = __mptcp_nmpc_socket(msk); + ssock = __mptcp_tcp_fallback(msk); + if (!ssock) + ssock = __mptcp_nmpc_socket(msk); if (ssock) { mask = ssock->ops->poll(file, ssock, wait); release_sock(sk); @@ -1762,9 +1860,6 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock, release_sock(sk); sock_poll_wait(file, sock, wait); lock_sock(sk); - ssock = __mptcp_tcp_fallback(msk); - if (unlikely(ssock)) - return ssock->ops->poll(file, ssock, NULL); if (test_bit(MPTCP_DATA_READY, &msk->flags)) mask = EPOLLIN | EPOLLRDNORM; @@ -1783,11 +1878,17 @@ static int mptcp_shutdown(struct socket *sock, int how) { struct mptcp_sock *msk = mptcp_sk(sock->sk); struct mptcp_subflow_context *subflow; + struct socket *ssock; int ret = 0; pr_debug("sk=%p, how=%d", msk, how); lock_sock(sock->sk); + ssock = __mptcp_tcp_fallback(msk); + if (ssock) { + release_sock(sock->sk); + return inet_shutdown(ssock, how); + } if (how == SHUT_WR || how == SHUT_RDWR) inet_sk_state_store(sock->sk, TCP_FIN_WAIT1); diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index f733c5425552..67448002a2d7 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -89,6 +89,7 @@ #define MPTCP_DATA_READY 0 #define MPTCP_SEND_SPACE 1 #define MPTCP_WORK_RTX 2 +#define MPTCP_WORK_EOF 3 static inline __be32 mptcp_option(u8 subopt, u8 len, u8 nib, u8 field) { @@ -339,6 +340,7 @@ void mptcp_finish_connect(struct sock *sk); void mptcp_data_ready(struct sock *sk, struct sock *ssk); bool mptcp_finish_join(struct sock *sk); void mptcp_data_acked(struct sock *sk); +void mptcp_subflow_eof(struct sock *sk); int mptcp_token_new_request(struct request_sock *req); void mptcp_token_destroy_request(u32 token); diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index b5180c81588e..50a8bea987c6 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -994,8 +994,7 @@ static void subflow_state_change(struct sock *sk) if (!(parent->sk_shutdown & RCV_SHUTDOWN) && !subflow->rx_eof && subflow_is_done(sk)) { subflow->rx_eof = 1; - parent->sk_shutdown |= RCV_SHUTDOWN; - __subflow_state_change(parent); + mptcp_subflow_eof(parent); } } diff --git a/net/mptcp/token.c b/net/mptcp/token.c index 129a5ad1bc35..33352dd99d4d 100644 --- a/net/mptcp/token.c +++ b/net/mptcp/token.c @@ -40,7 +40,7 @@ static int token_used __read_mostly; /** * mptcp_token_new_request - create new key/idsn/token for subflow_request - * @req - the request socket + * @req: the request socket * * This function is called when a new mptcp connection is coming in. * @@ -80,7 +80,7 @@ int mptcp_token_new_request(struct request_sock *req) /** * mptcp_token_new_connect - create new key/idsn/token for subflow - * @sk - the socket that will initiate a connection + * @sk: the socket that will initiate a connection * * This function is called when a new outgoing mptcp connection is * initiated. @@ -125,6 +125,7 @@ int mptcp_token_new_connect(struct sock *sk) /** * mptcp_token_new_accept - insert token for later processing * @token: the token to insert to the tree + * @conn: the just cloned socket linked to the new connection * * Called when a SYN packet creates a new logical connection, i.e. * is not a join request. @@ -169,7 +170,7 @@ struct mptcp_sock *mptcp_token_get_sock(u32 token) /** * mptcp_token_destroy_request - remove mptcp connection/token - * @token - token of mptcp connection to remove + * @token: token of mptcp connection to remove * * Remove not-yet-fully-established incoming connection identified * by @token. @@ -183,7 +184,7 @@ void mptcp_token_destroy_request(u32 token) /** * mptcp_token_destroy - remove mptcp connection/token - * @token - token of mptcp connection to remove + * @token: token of mptcp connection to remove * * Remove the connection identified by @token. */ diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c index fd8a01ca7a2d..2398d7238300 100644 --- a/net/openvswitch/flow_table.c +++ b/net/openvswitch/flow_table.c @@ -462,12 +462,14 @@ static void flow_table_copy_flows(struct table_instance *old, struct hlist_head *head = &old->buckets[i]; if (ufid) - hlist_for_each_entry(flow, head, - ufid_table.node[old_ver]) + hlist_for_each_entry_rcu(flow, head, + ufid_table.node[old_ver], + lockdep_ovsl_is_held()) ufid_table_instance_insert(new, flow); else - hlist_for_each_entry(flow, head, - flow_table.node[old_ver]) + hlist_for_each_entry_rcu(flow, head, + flow_table.node[old_ver], + lockdep_ovsl_is_held()) table_instance_insert(new, flow); } diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index 9904299424a1..61e95029c18f 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -26,9 +27,12 @@ #define DEFAULT_HASH_SIZE 64 /* optimized for diffserv */ +struct tcindex_data; + struct tcindex_filter_result { struct tcf_exts exts; struct tcf_result res; + struct tcindex_data *p; struct rcu_work rwork; }; @@ -49,6 +53,7 @@ struct tcindex_data { u32 hash; /* hash table size; 0 if undefined */ u32 alloc_hash; /* allocated size */ u32 fall_through; /* 0: only classify if explicit match */ + refcount_t refcnt; /* a temporary refcnt for perfect hash */ struct rcu_work rwork; }; @@ -57,6 +62,20 @@ static inline int tcindex_filter_is_set(struct tcindex_filter_result *r) return tcf_exts_has_actions(&r->exts) || r->res.classid; } +static void tcindex_data_get(struct tcindex_data *p) +{ + refcount_inc(&p->refcnt); +} + +static void tcindex_data_put(struct tcindex_data *p) +{ + if (refcount_dec_and_test(&p->refcnt)) { + kfree(p->perfect); + kfree(p->h); + kfree(p); + } +} + static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p, u16 key) { @@ -132,6 +151,7 @@ static int tcindex_init(struct tcf_proto *tp) p->mask = 0xffff; p->hash = DEFAULT_HASH_SIZE; p->fall_through = 1; + refcount_set(&p->refcnt, 1); /* Paired with tcindex_destroy_work() */ rcu_assign_pointer(tp->root, p); return 0; @@ -141,6 +161,7 @@ static void __tcindex_destroy_rexts(struct tcindex_filter_result *r) { tcf_exts_destroy(&r->exts); tcf_exts_put_net(&r->exts); + tcindex_data_put(r->p); } static void tcindex_destroy_rexts_work(struct work_struct *work) @@ -212,6 +233,8 @@ static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last, else __tcindex_destroy_fexts(f); } else { + tcindex_data_get(p); + if (tcf_exts_get_net(&r->exts)) tcf_queue_work(&r->rwork, tcindex_destroy_rexts_work); else @@ -228,9 +251,7 @@ static void tcindex_destroy_work(struct work_struct *work) struct tcindex_data, rwork); - kfree(p->perfect); - kfree(p->h); - kfree(p); + tcindex_data_put(p); } static inline int @@ -248,9 +269,11 @@ static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = { }; static int tcindex_filter_result_init(struct tcindex_filter_result *r, + struct tcindex_data *p, struct net *net) { memset(r, 0, sizeof(*r)); + r->p = p; return tcf_exts_init(&r->exts, net, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); } @@ -290,6 +313,7 @@ static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp) TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); if (err < 0) goto errout; + cp->perfect[i].p = cp; } return 0; @@ -334,6 +358,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, cp->alloc_hash = p->alloc_hash; cp->fall_through = p->fall_through; cp->tp = tp; + refcount_set(&cp->refcnt, 1); /* Paired with tcindex_destroy_work() */ if (tb[TCA_TCINDEX_HASH]) cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); @@ -366,7 +391,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, } cp->h = p->h; - err = tcindex_filter_result_init(&new_filter_result, net); + err = tcindex_filter_result_init(&new_filter_result, cp, net); if (err < 0) goto errout_alloc; if (old_r) @@ -434,7 +459,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, goto errout_alloc; f->key = handle; f->next = NULL; - err = tcindex_filter_result_init(&f->result, net); + err = tcindex_filter_result_init(&f->result, cp, net); if (err < 0) { kfree(f); goto errout_alloc; @@ -447,7 +472,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, } if (old_r && old_r != r) { - err = tcindex_filter_result_init(old_r, net); + err = tcindex_filter_result_init(old_r, cp, net); if (err < 0) { kfree(f); goto errout_alloc; @@ -571,6 +596,14 @@ static void tcindex_destroy(struct tcf_proto *tp, bool rtnl_held, for (i = 0; i < p->hash; i++) { struct tcindex_filter_result *r = p->perfect + i; + /* tcf_queue_work() does not guarantee the ordering we + * want, so we have to take this refcnt temporarily to + * ensure 'p' is freed after all tcindex_filter_result + * here. Imperfect hash does not need this, because it + * uses linked lists rather than an array. + */ + tcindex_data_get(p); + tcf_unbind_filter(tp, &r->res); if (tcf_exts_get_net(&r->exts)) tcf_queue_work(&r->rwork,