Networking fixes for 5.15-rc2, including fixes from bpf.

Current release - regressions:
 
  - vhost_net: fix OoB on sendmsg() failure
 
  - mlx5: bridge, fix uninitialized variable usage
 
  - bnxt_en: fix error recovery regression
 
 Current release - new code bugs:
 
  - bpf, mm: fix lockdep warning triggered by stack_map_get_build_id_offset()
 
 Previous releases - regressions:
 
  - r6040: restore MDIO clock frequency after MAC reset
 
  - tcp: fix tp->undo_retrans accounting in tcp_sacktag_one()
 
  - dsa: flush switchdev workqueue before tearing down CPU/DSA ports
 
 Previous releases - always broken:
 
  - ptp: dp83640: don't define PAGE0, avoid compiler warning
 
  - igc: fix tunnel segmentation offloads
 
  - phylink: update SFP selected interface on advertising changes
 
  - stmmac: fix system hang caused by eee_ctrl_timer during suspend/resume
 
  - mlx5e: fix mutual exclusion between CQE compression and HW TS
 
 Misc:
 
  - bpf, cgroups: fix cgroup v2 fallback on v1/v2 mixed mode
 
  - sfc: fallback for lack of xdp tx queues
 
  - hns3: add option to turn off page pool feature
 
 Signed-off-by: Jakub Kicinski <kuba@kernel.org>
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEE6jPA+I1ugmIBA4hXMUZtbf5SIrsFAmFDnXIACgkQMUZtbf5S
 Irs+pA//UvT0UqL+G/9He9eVMaX/7mnvoShbU/hHaymLuvJE4dx6B3qw8im5fQ0i
 b7ovy1dxjMjSQI4E+2FleajRBgsdXvWgMCopFZr7h35+qPXziLxTTztwsaOIjD8U
 42HBpdwFrVtdrOhSsiKsGKe8KXqIehHhFAp4vQrRlsvsz1Yk/93o6wYaXaXqR6pQ
 qlTVgI7nAuLAw4VfFZ3k7iAiQsyrXrn5kUXq5/Wy6kK7iFnIN7X9j+epTJP/EXHn
 +17SW/iGnjUfwXsRSC2ysgICtOPatu48Cke5hq5XOGqXAtRVhhLfMDiZlr8a6+z4
 gS+QETakBXLrAfvYK6neLjlMhg1FX+Y952xd3XIU4jz+DgIh03r2Nd1xBsp17Yhu
 RRBc96w2KxuHQ6USEXbi5dY1K6o9DK52m/WRmnqXnaNX4ADeesgcrxXHqZ6O6I5R
 +ddvVWylaHxvngCQSL4DvZdYNJVoKjdlo6fHrwD1qu0Uv0rGz6O+AEI9o8DkK5Af
 ZhFBETvjTKzjCykd3UlaRo/ibYChIPYwpGTK+TTI9rUm8OWNw1+sC/Z14bx0/3Vl
 RB53rkJ93/qi6Wb9ljUxM8CPGViuPvMkQJjpJOaktrj0GOgQz3EBdDkIQMaplZx5
 r2AeHW7X5VGxCWMRg4NS/x7Do1jh8m/ch8/EulOAB5fG7KQPfYs=
 =xd6p
 -----END PGP SIGNATURE-----

Merge tag 'net-5.15-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from Jakub Kicinski:
 "Including fixes from bpf.

  Current release - regressions:

   - vhost_net: fix OoB on sendmsg() failure

   - mlx5: bridge, fix uninitialized variable usage

   - bnxt_en: fix error recovery regression

  Current release - new code bugs:

   - bpf, mm: fix lockdep warning triggered by stack_map_get_build_id_offset()

  Previous releases - regressions:

   - r6040: restore MDIO clock frequency after MAC reset

   - tcp: fix tp->undo_retrans accounting in tcp_sacktag_one()

   - dsa: flush switchdev workqueue before tearing down CPU/DSA ports

  Previous releases - always broken:

   - ptp: dp83640: don't define PAGE0, avoid compiler warning

   - igc: fix tunnel segmentation offloads

   - phylink: update SFP selected interface on advertising changes

   - stmmac: fix system hang caused by eee_ctrl_timer during suspend/resume

   - mlx5e: fix mutual exclusion between CQE compression and HW TS

  Misc:

   - bpf, cgroups: fix cgroup v2 fallback on v1/v2 mixed mode

   - sfc: fallback for lack of xdp tx queues

   - hns3: add option to turn off page pool feature"

* tag 'net-5.15-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (67 commits)
  mlxbf_gige: clear valid_polarity upon open
  igc: fix tunnel offloading
  net/{mlx5|nfp|bnxt}: Remove unnecessary RTNL lock assert
  net: wan: wanxl: define CROSS_COMPILE_M68K
  selftests: nci: replace unsigned int with int
  net: dsa: flush switchdev workqueue before tearing down CPU/DSA ports
  Revert "net: phy: Uniform PHY driver access"
  net: dsa: destroy the phylink instance on any error in dsa_slave_phy_setup
  ptp: dp83640: don't define PAGE0
  bnx2x: Fix enabling network interfaces without VFs
  Revert "Revert "ipv4: fix memory leaks in ip_cmsg_send() callers""
  tcp: fix tp->undo_retrans accounting in tcp_sacktag_one()
  net-caif: avoid user-triggerable WARN_ON(1)
  bpf, selftests: Add test case for mixed cgroup v1/v2
  bpf, selftests: Add cgroup v1 net_cls classid helpers
  bpf, cgroups: Fix cgroup v2 fallback on v1/v2 mixed mode
  bpf: Add oversize check before call kvcalloc()
  net: hns3: fix the timing issue of VF clearing interrupt sources
  net: hns3: fix the exception when query imp info
  net: hns3: disable mac in flr process
  ...
This commit is contained in:
Linus Torvalds 2021-09-16 13:05:42 -07:00
commit fc0c0548c1
80 changed files with 772 additions and 386 deletions

View File

@ -19,7 +19,9 @@ properties:
- const: allwinner,sun8i-v3s-emac
- const: allwinner,sun50i-a64-emac
- items:
- const: allwinner,sun50i-h6-emac
- enum:
- allwinner,sun20i-d1-emac
- allwinner,sun50i-h6-emac
- const: allwinner,sun50i-a64-emac
reg:

View File

@ -1885,6 +1885,12 @@ static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gph
reset_control_assert(gphy_fw->reset);
/* The vendor BSP uses a 200ms delay after asserting the reset line.
* Without this some users are observing that the PHY is not coming up
* on the MDIO bus.
*/
msleep(200);
ret = request_firmware(&fw, gphy_fw->fw_name, dev);
if (ret) {
dev_err(dev, "failed to load firmware: %s, error: %i\n",

View File

@ -643,10 +643,8 @@ qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
}
static int
qca8k_mdio_write(struct mii_bus *salve_bus, int phy, int regnum, u16 data)
qca8k_mdio_write(struct mii_bus *bus, int phy, int regnum, u16 data)
{
struct qca8k_priv *priv = salve_bus->priv;
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
u32 val;
int ret;
@ -682,10 +680,8 @@ exit:
}
static int
qca8k_mdio_read(struct mii_bus *salve_bus, int phy, int regnum)
qca8k_mdio_read(struct mii_bus *bus, int phy, int regnum)
{
struct qca8k_priv *priv = salve_bus->priv;
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
u32 val;
int ret;
@ -726,6 +722,24 @@ exit:
return ret;
}
static int
qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
{
struct qca8k_priv *priv = slave_bus->priv;
struct mii_bus *bus = priv->bus;
return qca8k_mdio_write(bus, phy, regnum, data);
}
static int
qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
{
struct qca8k_priv *priv = slave_bus->priv;
struct mii_bus *bus = priv->bus;
return qca8k_mdio_read(bus, phy, regnum);
}
static int
qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
{
@ -775,8 +789,8 @@ qca8k_mdio_register(struct qca8k_priv *priv, struct device_node *mdio)
bus->priv = (void *)priv;
bus->name = "qca8k slave mii";
bus->read = qca8k_mdio_read;
bus->write = qca8k_mdio_write;
bus->read = qca8k_internal_mdio_read;
bus->write = qca8k_internal_mdio_write;
snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d",
ds->index);

View File

@ -922,13 +922,16 @@ static void __init ne_add_devices(void)
}
}
#ifdef MODULE
static int __init ne_init(void)
{
int retval;
ne_add_devices();
if (IS_MODULE(CONFIG_NE2000))
ne_add_devices();
retval = platform_driver_probe(&ne_driver, ne_drv_probe);
if (retval) {
if (IS_MODULE(CONFIG_NE2000) && retval) {
if (io[0] == 0)
pr_notice("ne.c: You must supply \"io=0xNNN\""
" value(s) for ISA cards.\n");
@ -941,18 +944,8 @@ static int __init ne_init(void)
return retval;
}
module_init(ne_init);
#else /* MODULE */
static int __init ne_init(void)
{
int retval = platform_driver_probe(&ne_driver, ne_drv_probe);
/* Unregister unused platform_devices. */
ne_loop_rm_unreg(0);
return retval;
}
module_init(ne_init);
#ifdef CONFIG_NETDEV_LEGACY_INIT
#if !defined(MODULE) && defined(CONFIG_NETDEV_LEGACY_INIT)
struct net_device * __init ne_probe(int unit)
{
int this_dev;
@ -994,7 +987,6 @@ struct net_device * __init ne_probe(int unit)
return ERR_PTR(-ENODEV);
}
#endif
#endif /* MODULE */
static void __exit ne_exit(void)
{

View File

@ -748,7 +748,7 @@ static void ni65_stop_start(struct net_device *dev,struct priv *p)
#ifdef XMT_VIA_SKB
skb_save[i] = p->tmd_skb[i];
#endif
buffer[i] = (u32) isa_bus_to_virt(tmdp->u.buffer);
buffer[i] = (unsigned long)isa_bus_to_virt(tmdp->u.buffer);
blen[i] = tmdp->blen;
tmdp->u.s.status = 0x0;
}

View File

@ -1224,7 +1224,7 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
/* SR-IOV capability was enabled but there are no VFs*/
if (iov->total == 0) {
err = -EINVAL;
err = 0;
goto failed;
}

View File

@ -2213,12 +2213,11 @@ static int bnxt_async_event_process(struct bnxt *bp,
DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
bp->current_interval * 10);
fw_health->tmr_counter = fw_health->tmr_multiplier;
if (!fw_health->enabled) {
if (!fw_health->enabled)
fw_health->last_fw_heartbeat =
bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
fw_health->last_fw_reset_cnt =
bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
}
fw_health->last_fw_reset_cnt =
bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
netif_info(bp, drv, bp->dev,
"Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
fw_health->master, fw_health->last_fw_reset_cnt,
@ -2730,6 +2729,9 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
int j;
if (!txr->tx_buf_ring)
continue;
for (j = 0; j < max_idx;) {
struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
struct sk_buff *skb;
@ -2814,6 +2816,9 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
}
skip_rx_tpa_free:
if (!rxr->rx_buf_ring)
goto skip_rx_buf_free;
for (i = 0; i < max_idx; i++) {
struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
dma_addr_t mapping = rx_buf->mapping;
@ -2836,6 +2841,11 @@ skip_rx_tpa_free:
kfree(data);
}
}
skip_rx_buf_free:
if (!rxr->rx_agg_ring)
goto skip_rx_agg_free;
for (i = 0; i < max_agg_idx; i++) {
struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
struct page *page = rx_agg_buf->page;
@ -2852,6 +2862,8 @@ skip_rx_tpa_free:
__free_page(page);
}
skip_rx_agg_free:
if (rxr->rx_page) {
__free_page(rxr->rx_page);
rxr->rx_page = NULL;
@ -2900,6 +2912,9 @@ static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
struct pci_dev *pdev = bp->pdev;
int i;
if (!rmem->pg_arr)
goto skip_pages;
for (i = 0; i < rmem->nr_pages; i++) {
if (!rmem->pg_arr[i])
continue;
@ -2909,6 +2924,7 @@ static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
rmem->pg_arr[i] = NULL;
}
skip_pages:
if (rmem->pg_tbl) {
size_t pg_tbl_size = rmem->nr_pages * 8;
@ -3228,10 +3244,14 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
{
struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
kfree(cpr->cp_desc_ring);
cpr->cp_desc_ring = NULL;
ring->ring_mem.pg_arr = NULL;
kfree(cpr->cp_desc_mapping);
cpr->cp_desc_mapping = NULL;
ring->ring_mem.dma_arr = NULL;
}
static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
@ -12207,6 +12227,11 @@ static void bnxt_fw_reset_task(struct work_struct *work)
return;
}
if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
bp->fw_health->enabled) {
bp->fw_health->last_fw_reset_cnt =
bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
}
bp->fw_reset_state = 0;
/* Make sure fw_reset_state is 0 before clearing the flag */
smp_mb__before_atomic();

View File

@ -1884,9 +1884,6 @@ bnxt_tc_indr_block_cb_lookup(struct bnxt *bp, struct net_device *netdev)
{
struct bnxt_flower_indr_block_cb_priv *cb_priv;
/* All callback list access should be protected by RTNL. */
ASSERT_RTNL();
list_for_each_entry(cb_priv, &bp->tc_indr_block_list, list)
if (cb_priv->tunnel_netdev == netdev)
return cb_priv;

View File

@ -111,9 +111,9 @@ static void macb_remove(struct pci_dev *pdev)
struct platform_device *plat_dev = pci_get_drvdata(pdev);
struct macb_platform_data *plat_data = dev_get_platdata(&plat_dev->dev);
platform_device_unregister(plat_dev);
clk_unregister(plat_data->pclk);
clk_unregister(plat_data->hclk);
platform_device_unregister(plat_dev);
}
static const struct pci_device_id dev_id_table[] = {

View File

@ -61,6 +61,9 @@ static unsigned int tx_sgl = 1;
module_param(tx_sgl, uint, 0600);
MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to optimize the IOMMU mapping");
static bool page_pool_enabled = true;
module_param(page_pool_enabled, bool, 0400);
#define HNS3_SGL_SIZE(nfrag) (sizeof(struct scatterlist) * (nfrag) + \
sizeof(struct sg_table))
#define HNS3_MAX_SGL_SIZE ALIGN(HNS3_SGL_SIZE(HNS3_MAX_TSO_BD_NUM), \
@ -73,6 +76,7 @@ MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to opt
#define HNS3_OUTER_VLAN_TAG 2
#define HNS3_MIN_TX_LEN 33U
#define HNS3_MIN_TUN_PKT_LEN 65U
/* hns3_pci_tbl - PCI Device ID Table
*
@ -1424,8 +1428,11 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
l4.tcp->doff);
break;
case IPPROTO_UDP:
if (hns3_tunnel_csum_bug(skb))
return skb_checksum_help(skb);
if (hns3_tunnel_csum_bug(skb)) {
int ret = skb_put_padto(skb, HNS3_MIN_TUN_PKT_LEN);
return ret ? ret : skb_checksum_help(skb);
}
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
@ -4753,7 +4760,8 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
goto out_with_desc_cb;
if (!HNAE3_IS_TX_RING(ring)) {
hns3_alloc_page_pool(ring);
if (page_pool_enabled)
hns3_alloc_page_pool(ring);
ret = hns3_alloc_ring_buffers(ring);
if (ret)

View File

@ -1724,6 +1724,10 @@ hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
}
bd_num = le32_to_cpu(req->bd_num);
if (!bd_num) {
dev_err(&hdev->pdev->dev, "imp statistics bd number is 0!\n");
return -EINVAL;
}
desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
if (!desc_src)

View File

@ -1528,9 +1528,10 @@ static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
static int hclge_configure(struct hclge_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
const struct cpumask *cpumask = cpu_online_mask;
struct hclge_cfg cfg;
unsigned int i;
int ret;
int node, ret;
ret = hclge_get_cfg(hdev, &cfg);
if (ret)
@ -1595,11 +1596,12 @@ static int hclge_configure(struct hclge_dev *hdev)
hclge_init_kdump_kernel_config(hdev);
/* Set the init affinity based on pci func number */
i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
&hdev->affinity_mask);
/* Set the affinity based on numa node */
node = dev_to_node(&hdev->pdev->dev);
if (node != NUMA_NO_NODE)
cpumask = cpumask_of_node(node);
cpumask_copy(&hdev->affinity_mask, cpumask);
return ret;
}
@ -8125,11 +8127,12 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
hclge_clear_arfs_rules(hdev);
spin_unlock_bh(&hdev->fd_rule_lock);
/* If it is not PF reset, the firmware will disable the MAC,
/* If it is not PF reset or FLR, the firmware will disable the MAC,
* so it only need to stop phy here.
*/
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
hdev->reset_type != HNAE3_FUNC_RESET) {
hdev->reset_type != HNAE3_FUNC_RESET &&
hdev->reset_type != HNAE3_FLR_RESET) {
hclge_mac_stop_phy(hdev);
hclge_update_link_status(hdev);
return;

View File

@ -2465,6 +2465,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
hclgevf_enable_vector(&hdev->misc_vector, false);
event_cause = hclgevf_check_evt_cause(hdev, &clearval);
if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
hclgevf_clear_event_cause(hdev, clearval);
switch (event_cause) {
case HCLGEVF_VECTOR0_EVENT_RST:
@ -2477,10 +2479,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
break;
}
if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) {
hclgevf_clear_event_cause(hdev, clearval);
if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
hclgevf_enable_vector(&hdev->misc_vector, true);
}
return IRQ_HANDLED;
}

View File

@ -4700,6 +4700,22 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
return 0;
}
if (adapter->failover_pending) {
adapter->init_done_rc = -EAGAIN;
netdev_dbg(netdev, "Failover pending, ignoring login response\n");
complete(&adapter->init_done);
/* login response buffer will be released on reset */
return 0;
}
if (adapter->failover_pending) {
adapter->init_done_rc = -EAGAIN;
netdev_dbg(netdev, "Failover pending, ignoring login response\n");
complete(&adapter->init_done);
/* login response buffer will be released on reset */
return 0;
}
netdev->mtu = adapter->req_mtu - ETH_HLEN;
netdev_dbg(adapter->netdev, "Login Response Buffer:\n");

View File

@ -695,6 +695,7 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf)
{
if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) {
set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
set_bit(ICE_FLAG_AUX_ENA, pf->flags);
ice_plug_aux_dev(pf);
}
}
@ -707,5 +708,6 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf)
{
ice_unplug_aux_dev(pf);
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
}
#endif /* _ICE_H_ */

View File

@ -271,6 +271,12 @@ int ice_plug_aux_dev(struct ice_pf *pf)
struct auxiliary_device *adev;
int ret;
/* if this PF doesn't support a technology that requires auxiliary
* devices, then gracefully exit
*/
if (!ice_is_aux_ena(pf))
return 0;
iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
if (!iadev)
return -ENOMEM;

View File

@ -6350,7 +6350,9 @@ static int igc_probe(struct pci_dev *pdev,
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= netdev->features;
netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->mpls_features |= NETIF_F_HW_CSUM;
netdev->hw_enc_features |= netdev->vlan_features;
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;

View File

@ -658,11 +658,10 @@ static const struct devlink_param enable_rdma_param =
static int mlx5_devlink_rdma_param_register(struct devlink *devlink)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
union devlink_param_value value;
int err;
if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND) || MLX5_ESWITCH_MANAGER(dev))
if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
return 0;
err = devlink_param_register(devlink, &enable_rdma_param);
@ -679,9 +678,7 @@ static int mlx5_devlink_rdma_param_register(struct devlink *devlink)
static void mlx5_devlink_rdma_param_unregister(struct devlink *devlink)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND) || MLX5_ESWITCH_MANAGER(dev))
if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
return;
devlink_param_unpublish(devlink, &enable_rdma_param);

View File

@ -1007,7 +1007,7 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
err = mlx5_core_alloc_pd(dev, &tracer->buff.pdn);
if (err) {
mlx5_core_warn(dev, "FWTracer: Failed to allocate PD %d\n", err);
return err;
goto err_cancel_work;
}
err = mlx5_fw_tracer_create_mkey(tracer);
@ -1031,6 +1031,7 @@ err_notifier_unregister:
mlx5_core_destroy_mkey(dev, &tracer->buff.mkey);
err_dealloc_pd:
mlx5_core_dealloc_pd(dev, tracer->buff.pdn);
err_cancel_work:
cancel_work_sync(&tracer->read_fw_strings_work);
return err;
}

View File

@ -922,7 +922,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work);
int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr);
int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val, bool rx_filter);
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);

View File

@ -137,7 +137,7 @@ static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr
u16 vport_num, esw_owner_vhca_id;
struct netlink_ext_ack *extack;
int ifindex = upper->ifindex;
int err;
int err = 0;
if (!netif_is_bridge_master(upper))
return 0;
@ -244,7 +244,7 @@ mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev,
struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_attr_info->info);
const struct switchdev_attr *attr = port_attr_info->attr;
u16 vport_num, esw_owner_vhca_id;
int err;
int err = 0;
if (!mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
&esw_owner_vhca_id))

View File

@ -300,9 +300,6 @@ mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
{
struct mlx5e_rep_indr_block_priv *cb_priv;
/* All callback list access should be protected by RTNL. */
ASSERT_RTNL();
list_for_each_entry(cb_priv,
&rpriv->uplink_priv.tc_indr_block_priv_list,
list)

View File

@ -572,7 +572,7 @@ void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_chann
if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
u32 rqn;
if (mlx5e_channels_get_ptp_rqn(chs, &rqn))
if (!mlx5e_channels_get_ptp_rqn(chs, &rqn))
rqn = res->drop_rqn;
err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn);

View File

@ -1884,7 +1884,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
return set_pflag_cqe_based_moder(netdev, enable, true);
}
int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val)
int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val, bool rx_filter)
{
bool curr_val = MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS);
struct mlx5e_params new_params;
@ -1896,8 +1896,7 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
if (curr_val == new_val)
return 0;
if (new_val && !priv->profile->rx_ptp_support &&
priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) {
if (new_val && !priv->profile->rx_ptp_support && rx_filter) {
netdev_err(priv->netdev,
"Profile doesn't support enabling of CQE compression while hardware time-stamping is enabled.\n");
return -EINVAL;
@ -1905,7 +1904,7 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
new_params = priv->channels.params;
MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
if (priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE)
if (rx_filter)
new_params.ptp_rx = new_val;
if (new_params.ptp_rx == priv->channels.params.ptp_rx)
@ -1928,12 +1927,14 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
bool rx_filter;
int err;
if (!MLX5_CAP_GEN(mdev, cqe_compression))
return -EOPNOTSUPP;
err = mlx5e_modify_rx_cqe_compression_locked(priv, enable);
rx_filter = priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE;
err = mlx5e_modify_rx_cqe_compression_locked(priv, enable, rx_filter);
if (err)
return err;

View File

@ -3554,14 +3554,14 @@ static int mlx5e_hwstamp_config_no_ptp_rx(struct mlx5e_priv *priv, bool rx_filte
if (!rx_filter)
/* Reset CQE compression to Admin default */
return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def);
return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def, false);
if (!MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
return 0;
/* Disable CQE compression */
netdev_warn(priv->netdev, "Disabling RX cqe compression\n");
err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
err = mlx5e_modify_rx_cqe_compression_locked(priv, false, true);
if (err)
netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);

View File

@ -1682,14 +1682,13 @@ static int build_match_list(struct match_list *match_head,
curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
if (!curr_match) {
rcu_read_unlock();
free_match_list(match_head, ft_locked);
err = -ENOMEM;
goto out;
return -ENOMEM;
}
curr_match->g = g;
list_add_tail(&curr_match->list, &match_head->list);
}
out:
rcu_read_unlock();
return err;
}

View File

@ -927,9 +927,12 @@ void mlx5_lag_disable_change(struct mlx5_core_dev *dev)
struct mlx5_core_dev *dev1;
struct mlx5_lag *ldev;
ldev = mlx5_lag_dev(dev);
if (!ldev)
return;
mlx5_dev_list_lock();
ldev = mlx5_lag_dev(dev);
dev0 = ldev->pf[MLX5_LAG_P1].dev;
dev1 = ldev->pf[MLX5_LAG_P2].dev;
@ -946,8 +949,11 @@ void mlx5_lag_enable_change(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
mlx5_dev_list_lock();
ldev = mlx5_lag_dev(dev);
if (!ldev)
return;
mlx5_dev_list_lock();
ldev->mode_changes_in_progress--;
mlx5_dev_list_unlock();
mlx5_queue_bond_work(ldev, 0);

View File

@ -142,6 +142,13 @@ static int mlxbf_gige_open(struct net_device *netdev)
err = mlxbf_gige_clean_port(priv);
if (err)
goto free_irqs;
/* Clear driver's valid_polarity to match hardware,
* since the above call to clean_port() resets the
* receive polarity used by hardware.
*/
priv->valid_polarity = 0;
err = mlxbf_gige_rx_init(priv);
if (err)
goto free_irqs;

View File

@ -398,9 +398,7 @@ static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
int err;
u16 i;
dma_buf = kzalloc(sizeof(*dma_buf) +
q_depth * sizeof(struct hwc_work_request),
GFP_KERNEL);
dma_buf = kzalloc(struct_size(dma_buf, reqs, q_depth), GFP_KERNEL);
if (!dma_buf)
return -ENOMEM;

View File

@ -1767,9 +1767,6 @@ nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
struct nfp_flower_indr_block_cb_priv *cb_priv;
struct nfp_flower_priv *priv = app->priv;
/* All callback list access should be protected by RTNL. */
ASSERT_RTNL();
list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
if (cb_priv->netdev == netdev)
return cb_priv;

View File

@ -3367,6 +3367,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
struct qed_nvm_image_att *p_image_att)
{
enum nvm_image_type type;
int rc;
u32 i;
/* Translate image_id into MFW definitions */
@ -3395,7 +3396,10 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
qed_mcp_nvm_info_populate(p_hwfn);
rc = qed_mcp_nvm_info_populate(p_hwfn);
if (rc)
return rc;
for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
if (type == p_hwfn->nvm_info.image_att[i].image_type)
break;

View File

@ -1354,10 +1354,10 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
const struct firmware *fw = fw_info->fw;
u32 dest, *p_cache, *temp;
int i, ret = -EIO;
__le32 *temp_le;
u8 data[16];
size_t size;
int i, ret;
u64 addr;
temp = vzalloc(fw->size);

View File

@ -119,6 +119,8 @@
#define PHY_ST 0x8A /* PHY status register */
#define MAC_SM 0xAC /* MAC status machine */
#define MAC_SM_RST 0x0002 /* MAC status machine reset */
#define MD_CSC 0xb6 /* MDC speed control register */
#define MD_CSC_DEFAULT 0x0030
#define MAC_ID 0xBE /* Identifier register */
#define TX_DCNT 0x80 /* TX descriptor count */
@ -355,8 +357,9 @@ static void r6040_reset_mac(struct r6040_private *lp)
{
void __iomem *ioaddr = lp->base;
int limit = MAC_DEF_TIMEOUT;
u16 cmd;
u16 cmd, md_csc;
md_csc = ioread16(ioaddr + MD_CSC);
iowrite16(MAC_RST, ioaddr + MCR1);
while (limit--) {
cmd = ioread16(ioaddr + MCR1);
@ -368,6 +371,10 @@ static void r6040_reset_mac(struct r6040_private *lp)
iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
iowrite16(0, ioaddr + MAC_SM);
mdelay(5);
/* Restore MDIO clock frequency */
if (md_csc != MD_CSC_DEFAULT)
iowrite16(md_csc, ioaddr + MD_CSC);
}
static void r6040_init_mac_regs(struct net_device *dev)

View File

@ -166,32 +166,46 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
* We need a channel per event queue, plus a VI per tx queue.
* This may be more pessimistic than it needs to be.
*/
if (n_channels + n_xdp_ev > max_channels) {
netif_err(efx, drv, efx->net_dev,
"Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
n_xdp_ev, n_channels, max_channels);
netif_err(efx, drv, efx->net_dev,
"XDP_TX and XDP_REDIRECT will not work on this interface");
efx->n_xdp_channels = 0;
efx->xdp_tx_per_channel = 0;
efx->xdp_tx_queue_count = 0;
if (n_channels >= max_channels) {
efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
netif_warn(efx, drv, efx->net_dev,
"Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
n_xdp_ev, n_channels, max_channels);
netif_warn(efx, drv, efx->net_dev,
"XDP_TX and XDP_REDIRECT might decrease device's performance\n");
} else if (n_channels + n_xdp_tx > efx->max_vis) {
netif_err(efx, drv, efx->net_dev,
"Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n",
n_xdp_tx, n_channels, efx->max_vis);
netif_err(efx, drv, efx->net_dev,
"XDP_TX and XDP_REDIRECT will not work on this interface");
efx->n_xdp_channels = 0;
efx->xdp_tx_per_channel = 0;
efx->xdp_tx_queue_count = 0;
efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
netif_warn(efx, drv, efx->net_dev,
"Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n",
n_xdp_tx, n_channels, efx->max_vis);
netif_warn(efx, drv, efx->net_dev,
"XDP_TX and XDP_REDIRECT might decrease device's performance\n");
} else if (n_channels + n_xdp_ev > max_channels) {
efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_SHARED;
netif_warn(efx, drv, efx->net_dev,
"Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
n_xdp_ev, n_channels, max_channels);
n_xdp_ev = max_channels - n_channels;
netif_warn(efx, drv, efx->net_dev,
"XDP_TX and XDP_REDIRECT will work with reduced performance (%d cpus/tx_queue)\n",
DIV_ROUND_UP(n_xdp_tx, tx_per_ev * n_xdp_ev));
} else {
efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_DEDICATED;
}
if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_BORROWED) {
efx->n_xdp_channels = n_xdp_ev;
efx->xdp_tx_per_channel = tx_per_ev;
efx->xdp_tx_queue_count = n_xdp_tx;
n_channels += n_xdp_ev;
netif_dbg(efx, drv, efx->net_dev,
"Allocating %d TX and %d event queues for XDP\n",
n_xdp_tx, n_xdp_ev);
n_xdp_ev * tx_per_ev, n_xdp_ev);
} else {
efx->n_xdp_channels = 0;
efx->xdp_tx_per_channel = 0;
efx->xdp_tx_queue_count = n_xdp_tx;
}
if (vec_count < n_channels) {
@ -858,6 +872,20 @@ rollback:
goto out;
}
static inline int
efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number,
struct efx_tx_queue *tx_queue)
{
if (xdp_queue_number >= efx->xdp_tx_queue_count)
return -EINVAL;
netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
tx_queue->channel->channel, tx_queue->label,
xdp_queue_number, tx_queue->queue);
efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
return 0;
}
int efx_set_channels(struct efx_nic *efx)
{
struct efx_tx_queue *tx_queue;
@ -896,20 +924,9 @@ int efx_set_channels(struct efx_nic *efx)
if (efx_channel_is_xdp_tx(channel)) {
efx_for_each_channel_tx_queue(tx_queue, channel) {
tx_queue->queue = next_queue++;
/* We may have a few left-over XDP TX
* queues owing to xdp_tx_queue_count
* not dividing evenly by EFX_MAX_TXQ_PER_CHANNEL.
* We still allocate and probe those
* TXQs, but never use them.
*/
if (xdp_queue_number < efx->xdp_tx_queue_count) {
netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
channel->channel, tx_queue->label,
xdp_queue_number, tx_queue->queue);
efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
if (rc == 0)
xdp_queue_number++;
}
}
} else {
efx_for_each_channel_tx_queue(tx_queue, channel) {
@ -918,10 +935,35 @@ int efx_set_channels(struct efx_nic *efx)
channel->channel, tx_queue->label,
tx_queue->queue);
}
/* If XDP is borrowing queues from net stack, it must use the queue
* with no csum offload, which is the first one of the channel
* (note: channel->tx_queue_by_type is not initialized yet)
*/
if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) {
tx_queue = &channel->tx_queue[0];
rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
if (rc == 0)
xdp_queue_number++;
}
}
}
}
WARN_ON(xdp_queue_number != efx->xdp_tx_queue_count);
WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
xdp_queue_number != efx->xdp_tx_queue_count);
WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
xdp_queue_number > efx->xdp_tx_queue_count);
/* If we have more CPUs than assigned XDP TX queues, assign the already
* existing queues to the exceeding CPUs
*/
next_queue = 0;
while (xdp_queue_number < efx->xdp_tx_queue_count) {
tx_queue = efx->xdp_tx_queues[next_queue++];
rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
if (rc == 0)
xdp_queue_number++;
}
rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
if (rc)

View File

@ -782,6 +782,12 @@ struct efx_async_filter_insertion {
#define EFX_RPS_MAX_IN_FLIGHT 8
#endif /* CONFIG_RFS_ACCEL */
enum efx_xdp_tx_queues_mode {
EFX_XDP_TX_QUEUES_DEDICATED, /* one queue per core, locking not needed */
EFX_XDP_TX_QUEUES_SHARED, /* each queue used by more than 1 core */
EFX_XDP_TX_QUEUES_BORROWED /* queues borrowed from net stack */
};
/**
* struct efx_nic - an Efx NIC
* @name: Device name (net device name or bus id before net device registered)
@ -820,6 +826,7 @@ struct efx_async_filter_insertion {
* should be allocated for this NIC
* @xdp_tx_queue_count: Number of entries in %xdp_tx_queues.
* @xdp_tx_queues: Array of pointers to tx queues used for XDP transmit.
* @xdp_txq_queues_mode: XDP TX queues sharing strategy.
* @rxq_entries: Size of receive queues requested by user.
* @txq_entries: Size of transmit queues requested by user.
* @txq_stop_thresh: TX queue fill level at or above which we stop it.
@ -979,6 +986,7 @@ struct efx_nic {
unsigned int xdp_tx_queue_count;
struct efx_tx_queue **xdp_tx_queues;
enum efx_xdp_tx_queues_mode xdp_txq_queues_mode;
unsigned rxq_entries;
unsigned txq_entries;

View File

@ -428,23 +428,32 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
unsigned int len;
int space;
int cpu;
int i;
int i = 0;
if (unlikely(n && !xdpfs))
return -EINVAL;
if (unlikely(!n))
return 0;
cpu = raw_smp_processor_id();
if (!efx->xdp_tx_queue_count ||
unlikely(cpu >= efx->xdp_tx_queue_count))
if (unlikely(cpu >= efx->xdp_tx_queue_count))
return -EINVAL;
tx_queue = efx->xdp_tx_queues[cpu];
if (unlikely(!tx_queue))
return -EINVAL;
if (unlikely(n && !xdpfs))
return -EINVAL;
if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu);
if (!n)
return 0;
/* If we're borrowing net stack queues we have to handle stop-restart
* or we might block the queue and it will be considered as frozen
*/
if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) {
if (netif_tx_queue_stopped(tx_queue->core_txq))
goto unlock;
efx_tx_maybe_stop_queue(tx_queue);
}
/* Check for available space. We should never need multiple
* descriptors per frame.
@ -484,6 +493,10 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
if (flush && i > 0)
efx_nic_push_buffers(tx_queue);
unlock:
if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
HARD_TX_UNLOCK(efx->net_dev, tx_queue->core_txq);
return i == 0 ? -EIO : i;
}

View File

@ -309,7 +309,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
priv->clk_csr = STMMAC_CSR_100_150M;
else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
priv->clk_csr = STMMAC_CSR_150_250M;
else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
priv->clk_csr = STMMAC_CSR_250_300M;
}
@ -7118,7 +7118,6 @@ int stmmac_suspend(struct device *dev)
struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
u32 chan;
int ret;
if (!ndev || !netif_running(ndev))
return 0;
@ -7150,13 +7149,6 @@ int stmmac_suspend(struct device *dev)
} else {
stmmac_mac_set(priv, priv->ioaddr, false);
pinctrl_pm_select_sleep_state(priv->device);
/* Disable clock in case of PWM is off */
clk_disable_unprepare(priv->plat->clk_ptp_ref);
ret = pm_runtime_force_suspend(dev);
if (ret) {
mutex_unlock(&priv->lock);
return ret;
}
}
mutex_unlock(&priv->lock);
@ -7242,12 +7234,6 @@ int stmmac_resume(struct device *dev)
priv->irq_wake = 0;
} else {
pinctrl_pm_select_default_state(priv->device);
/* enable the clk previously disabled */
ret = pm_runtime_force_resume(dev);
if (ret)
return ret;
if (priv->plat->clk_ptp_ref)
clk_prepare_enable(priv->plat->clk_ptp_ref);
/* reset the phy so that it's ready */
if (priv->mii)
stmmac_mdio_reset(priv->mii);

View File

@ -9,6 +9,7 @@
*******************************************************************************/
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/of.h>
@ -771,9 +772,52 @@ static int __maybe_unused stmmac_runtime_resume(struct device *dev)
return stmmac_bus_clks_config(priv, true);
}
static int __maybe_unused stmmac_pltfr_noirq_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
int ret;
if (!netif_running(ndev))
return 0;
if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
/* Disable clock in case of PWM is off */
clk_disable_unprepare(priv->plat->clk_ptp_ref);
ret = pm_runtime_force_suspend(dev);
if (ret)
return ret;
}
return 0;
}
static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
int ret;
if (!netif_running(ndev))
return 0;
if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
/* enable the clk previously disabled */
ret = pm_runtime_force_resume(dev);
if (ret)
return ret;
clk_prepare_enable(priv->plat->clk_ptp_ref);
}
return 0;
}
const struct dev_pm_ops stmmac_pltfr_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_suspend, stmmac_pltfr_resume)
SET_RUNTIME_PM_OPS(stmmac_runtime_suspend, stmmac_runtime_resume, NULL)
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_noirq_suspend, stmmac_pltfr_noirq_resume)
};
EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops);

View File

@ -430,7 +430,8 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
* table region determines the number of entries it has.
*/
if (filter) {
count = hweight32(ipa->filter_map);
/* Include one extra "slot" to hold the filter map itself */
count = 1 + hweight32(ipa->filter_map);
hash_count = hash_mem->size ? count : 0;
} else {
count = mem->size / sizeof(__le64);

View File

@ -5,7 +5,7 @@
#ifndef HAVE_DP83640_REGISTERS
#define HAVE_DP83640_REGISTERS
#define PAGE0 0x0000
/* #define PAGE0 0x0000 */
#define PHYCR2 0x001c /* PHY Control Register 2 */
#define PAGE4 0x0004

View File

@ -233,9 +233,11 @@ static DEFINE_MUTEX(phy_fixup_lock);
static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
{
struct device_driver *drv = phydev->mdio.dev.driver;
struct phy_driver *phydrv = to_phy_driver(drv);
struct net_device *netdev = phydev->attached_dev;
if (!phydev->drv->suspend)
if (!drv || !phydrv->suspend)
return false;
/* PHY not attached? May suspend if the PHY has not already been

View File

@ -1607,6 +1607,32 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
if (config.an_enabled && phylink_is_empty_linkmode(config.advertising))
return -EINVAL;
/* If this link is with an SFP, ensure that changes to advertised modes
* also cause the associated interface to be selected such that the
* link can be configured correctly.
*/
if (pl->sfp_port && pl->sfp_bus) {
config.interface = sfp_select_interface(pl->sfp_bus,
config.advertising);
if (config.interface == PHY_INTERFACE_MODE_NA) {
phylink_err(pl,
"selection of interface failed, advertisement %*pb\n",
__ETHTOOL_LINK_MODE_MASK_NBITS,
config.advertising);
return -EINVAL;
}
/* Revalidate with the selected interface */
linkmode_copy(support, pl->supported);
if (phylink_validate(pl, support, &config)) {
phylink_err(pl, "validation of %s/%s with support %*pb failed\n",
phylink_an_mode_str(pl->cur_link_an_mode),
phy_modes(config.interface),
__ETHTOOL_LINK_MODE_MASK_NBITS, support);
return -EINVAL;
}
}
mutex_lock(&pl->state_mutex);
pl->link_config.speed = config.speed;
pl->link_config.duplex = config.duplex;
@ -2186,7 +2212,9 @@ static int phylink_sfp_config(struct phylink *pl, u8 mode,
if (phy_interface_mode_is_8023z(iface) && pl->phydev)
return -EINVAL;
changed = !linkmode_equal(pl->supported, support);
changed = !linkmode_equal(pl->supported, support) ||
!linkmode_equal(pl->link_config.advertising,
config.advertising);
if (changed) {
linkmode_copy(pl->supported, support);
linkmode_copy(pl->link_config.advertising, config.advertising);

View File

@ -34,6 +34,8 @@ obj-$(CONFIG_SLIC_DS26522) += slic_ds26522.o
clean-files := wanxlfw.inc
$(obj)/wanxl.o: $(obj)/wanxlfw.inc
CROSS_COMPILE_M68K = m68k-linux-gnu-
ifeq ($(CONFIG_WANXL_BUILD_FIRMWARE),y)
ifeq ($(ARCH),m68k)
M68KCC = $(CC)

View File

@ -467,7 +467,7 @@ static void vhost_tx_batch(struct vhost_net *net,
.num = nvq->batched_xdp,
.ptr = nvq->xdp,
};
int err;
int i, err;
if (nvq->batched_xdp == 0)
goto signal_used;
@ -476,6 +476,15 @@ static void vhost_tx_batch(struct vhost_net *net,
err = sock->ops->sendmsg(sock, msghdr, 0);
if (unlikely(err < 0)) {
vq_err(&nvq->vq, "Fail to batch sending packets\n");
/* free pages owned by XDP; since this is an unlikely error path,
* keep it simple and avoid more complex bulk update for the
* used pages
*/
for (i = 0; i < nvq->batched_xdp; ++i)
put_page(virt_to_head_page(nvq->xdp[i].data));
nvq->batched_xdp = 0;
nvq->done_idx = 0;
return;
}

View File

@ -752,107 +752,54 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
* sock_cgroup_data is embedded at sock->sk_cgrp_data and contains
* per-socket cgroup information except for memcg association.
*
* On legacy hierarchies, net_prio and net_cls controllers directly set
* attributes on each sock which can then be tested by the network layer.
* On the default hierarchy, each sock is associated with the cgroup it was
* created in and the networking layer can match the cgroup directly.
*
* To avoid carrying all three cgroup related fields separately in sock,
* sock_cgroup_data overloads (prioidx, classid) and the cgroup pointer.
* On boot, sock_cgroup_data records the cgroup that the sock was created
* in so that cgroup2 matches can be made; however, once either net_prio or
* net_cls starts being used, the area is overridden to carry prioidx and/or
* classid. The two modes are distinguished by whether the lowest bit is
* set. Clear bit indicates cgroup pointer while set bit prioidx and
* classid.
*
* While userland may start using net_prio or net_cls at any time, once
* either is used, cgroup2 matching no longer works. There is no reason to
* mix the two and this is in line with how legacy and v2 compatibility is
* handled. On mode switch, cgroup references which are already being
* pointed to by socks may be leaked. While this can be remedied by adding
* synchronization around sock_cgroup_data, given that the number of leaked
* cgroups is bound and highly unlikely to be high, this seems to be the
* better trade-off.
* On legacy hierarchies, net_prio and net_cls controllers directly
* set attributes on each sock which can then be tested by the network
* layer. On the default hierarchy, each sock is associated with the
* cgroup it was created in and the networking layer can match the
* cgroup directly.
*/
struct sock_cgroup_data {
union {
#ifdef __LITTLE_ENDIAN
struct {
u8 is_data : 1;
u8 no_refcnt : 1;
u8 unused : 6;
u8 padding;
u16 prioidx;
u32 classid;
} __packed;
#else
struct {
u32 classid;
u16 prioidx;
u8 padding;
u8 unused : 6;
u8 no_refcnt : 1;
u8 is_data : 1;
} __packed;
struct cgroup *cgroup; /* v2 */
#ifdef CONFIG_CGROUP_NET_CLASSID
u32 classid; /* v1 */
#endif
#ifdef CONFIG_CGROUP_NET_PRIO
u16 prioidx; /* v1 */
#endif
u64 val;
};
};
/*
* There's a theoretical window where the following accessors race with
* updaters and return part of the previous pointer as the prioidx or
* classid. Such races are short-lived and the result isn't critical.
*/
static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd)
{
/* fallback to 1 which is always the ID of the root cgroup */
return (skcd->is_data & 1) ? skcd->prioidx : 1;
#ifdef CONFIG_CGROUP_NET_PRIO
return READ_ONCE(skcd->prioidx);
#else
return 1;
#endif
}
static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd)
{
/* fallback to 0 which is the unconfigured default classid */
return (skcd->is_data & 1) ? skcd->classid : 0;
#ifdef CONFIG_CGROUP_NET_CLASSID
return READ_ONCE(skcd->classid);
#else
return 0;
#endif
}
/*
* If invoked concurrently, the updaters may clobber each other. The
* caller is responsible for synchronization.
*/
static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd,
u16 prioidx)
{
struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
if (sock_cgroup_prioidx(&skcd_buf) == prioidx)
return;
if (!(skcd_buf.is_data & 1)) {
skcd_buf.val = 0;
skcd_buf.is_data = 1;
}
skcd_buf.prioidx = prioidx;
WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */
#ifdef CONFIG_CGROUP_NET_PRIO
WRITE_ONCE(skcd->prioidx, prioidx);
#endif
}
static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd,
u32 classid)
{
struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
if (sock_cgroup_classid(&skcd_buf) == classid)
return;
if (!(skcd_buf.is_data & 1)) {
skcd_buf.val = 0;
skcd_buf.is_data = 1;
}
skcd_buf.classid = classid;
WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */
#ifdef CONFIG_CGROUP_NET_CLASSID
WRITE_ONCE(skcd->classid, classid);
#endif
}
#else /* CONFIG_SOCK_CGROUP_DATA */

View File

@ -829,33 +829,13 @@ static inline void cgroup_account_cputime_field(struct task_struct *task,
*/
#ifdef CONFIG_SOCK_CGROUP_DATA
#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
extern spinlock_t cgroup_sk_update_lock;
#endif
void cgroup_sk_alloc_disable(void);
void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
void cgroup_sk_clone(struct sock_cgroup_data *skcd);
void cgroup_sk_free(struct sock_cgroup_data *skcd);
static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
{
#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
unsigned long v;
/*
* @skcd->val is 64bit but the following is safe on 32bit too as we
* just need the lower ulong to be written and read atomically.
*/
v = READ_ONCE(skcd->val);
if (v & 3)
return &cgrp_dfl_root.cgrp;
return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
#else
return (struct cgroup *)(unsigned long)skcd->val;
#endif
return skcd->cgroup;
}
#else /* CONFIG_CGROUP_DATA */

View File

@ -144,15 +144,6 @@ static inline void mmap_read_unlock(struct mm_struct *mm)
up_read(&mm->mmap_lock);
}
static inline bool mmap_read_trylock_non_owner(struct mm_struct *mm)
{
if (mmap_read_trylock(mm)) {
rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_);
return true;
}
return false;
}
static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
{
__mmap_lock_trace_released(mm, false);

View File

@ -1940,7 +1940,7 @@ static inline void __skb_insert(struct sk_buff *newsk,
WRITE_ONCE(newsk->prev, prev);
WRITE_ONCE(next->prev, newsk);
WRITE_ONCE(prev->next, newsk);
list->qlen++;
WRITE_ONCE(list->qlen, list->qlen + 1);
}
static inline void __skb_queue_splice(const struct sk_buff_head *list,

View File

@ -447,6 +447,11 @@ static inline bool dsa_port_is_user(struct dsa_port *dp)
return dp->type == DSA_PORT_TYPE_USER;
}
static inline bool dsa_port_is_unused(struct dsa_port *dp)
{
return dp->type == DSA_PORT_TYPE_UNUSED;
}
static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p)
{
return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_UNUSED;

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
* Copyright (c) 2016 Facebook
*/

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
* Copyright (c) 2016 Facebook
*/

View File

@ -179,7 +179,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
* with build_id.
*/
if (!user || !current || !current->mm || irq_work_busy ||
!mmap_read_trylock_non_owner(current->mm)) {
!mmap_read_trylock(current->mm)) {
/* cannot access current->mm, fall back to ips */
for (i = 0; i < trace_nr; i++) {
id_offs[i].status = BPF_STACK_BUILD_ID_IP;
@ -204,9 +204,15 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
}
if (!work) {
mmap_read_unlock_non_owner(current->mm);
mmap_read_unlock(current->mm);
} else {
work->mm = current->mm;
/* The lock will be released once we're out of interrupt
* context. Tell lockdep that we've released it now so
* it doesn't complain that we forgot to release it.
*/
rwsem_release(&current->mm->mmap_lock.dep_map, _RET_IP_);
irq_work_queue(&work->irq_work);
}
}

View File

@ -9912,6 +9912,8 @@ static int check_btf_line(struct bpf_verifier_env *env,
nr_linfo = attr->line_info_cnt;
if (!nr_linfo)
return 0;
if (nr_linfo > INT_MAX / sizeof(struct bpf_line_info))
return -EINVAL;
rec_size = attr->line_info_rec_size;
if (rec_size < MIN_BPF_LINEINFO_SIZE ||

View File

@ -6572,74 +6572,44 @@ int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v)
*/
#ifdef CONFIG_SOCK_CGROUP_DATA
#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
DEFINE_SPINLOCK(cgroup_sk_update_lock);
static bool cgroup_sk_alloc_disabled __read_mostly;
void cgroup_sk_alloc_disable(void)
{
if (cgroup_sk_alloc_disabled)
return;
pr_info("cgroup: disabling cgroup2 socket matching due to net_prio or net_cls activation\n");
cgroup_sk_alloc_disabled = true;
}
#else
#define cgroup_sk_alloc_disabled false
#endif
void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
{
if (cgroup_sk_alloc_disabled) {
skcd->no_refcnt = 1;
return;
}
/* Don't associate the sock with unrelated interrupted task's cgroup. */
if (in_interrupt())
return;
rcu_read_lock();
while (true) {
struct css_set *cset;
cset = task_css_set(current);
if (likely(cgroup_tryget(cset->dfl_cgrp))) {
skcd->val = (unsigned long)cset->dfl_cgrp;
skcd->cgroup = cset->dfl_cgrp;
cgroup_bpf_get(cset->dfl_cgrp);
break;
}
cpu_relax();
}
rcu_read_unlock();
}
void cgroup_sk_clone(struct sock_cgroup_data *skcd)
{
if (skcd->val) {
if (skcd->no_refcnt)
return;
/*
* We might be cloning a socket which is left in an empty
* cgroup and the cgroup might have already been rmdir'd.
* Don't use cgroup_get_live().
*/
cgroup_get(sock_cgroup_ptr(skcd));
cgroup_bpf_get(sock_cgroup_ptr(skcd));
}
struct cgroup *cgrp = sock_cgroup_ptr(skcd);
/*
* We might be cloning a socket which is left in an empty
* cgroup and the cgroup might have already been rmdir'd.
* Don't use cgroup_get_live().
*/
cgroup_get(cgrp);
cgroup_bpf_get(cgrp);
}
void cgroup_sk_free(struct sock_cgroup_data *skcd)
{
struct cgroup *cgrp = sock_cgroup_ptr(skcd);
if (skcd->no_refcnt)
return;
cgroup_bpf_put(cgrp);
cgroup_put(cgrp);
}

View File

@ -53,20 +53,6 @@ struct chnl_net {
enum caif_states state;
};
static void robust_list_del(struct list_head *delete_node)
{
struct list_head *list_node;
struct list_head *n;
ASSERT_RTNL();
list_for_each_safe(list_node, n, &chnl_net_list) {
if (list_node == delete_node) {
list_del(list_node);
return;
}
}
WARN_ON(1);
}
static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
{
struct sk_buff *skb;
@ -364,6 +350,7 @@ static int chnl_net_init(struct net_device *dev)
ASSERT_RTNL();
priv = netdev_priv(dev);
strncpy(priv->name, dev->name, sizeof(priv->name));
INIT_LIST_HEAD(&priv->list_field);
return 0;
}
@ -372,7 +359,7 @@ static void chnl_net_uninit(struct net_device *dev)
struct chnl_net *priv;
ASSERT_RTNL();
priv = netdev_priv(dev);
robust_list_del(&priv->list_field);
list_del_init(&priv->list_field);
}
static const struct net_device_ops netdev_ops = {
@ -537,7 +524,7 @@ static void __exit chnl_exit_module(void)
rtnl_lock();
list_for_each_safe(list_node, _tmp, &chnl_net_list) {
dev = list_entry(list_node, struct chnl_net, list_field);
list_del(list_node);
list_del_init(list_node);
delete_device(dev);
}
rtnl_unlock();

View File

@ -71,11 +71,8 @@ static int update_classid_sock(const void *v, struct file *file, unsigned n)
struct update_classid_context *ctx = (void *)v;
struct socket *sock = sock_from_file(file);
if (sock) {
spin_lock(&cgroup_sk_update_lock);
if (sock)
sock_cgroup_set_classid(&sock->sk->sk_cgrp_data, ctx->classid);
spin_unlock(&cgroup_sk_update_lock);
}
if (--ctx->batch == 0) {
ctx->batch = UPDATE_CLASSID_BATCH;
return n + 1;
@ -121,8 +118,6 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
struct css_task_iter it;
struct task_struct *p;
cgroup_sk_alloc_disable();
cs->classid = (u32)value;
css_task_iter_start(css, 0, &it);

View File

@ -207,8 +207,6 @@ static ssize_t write_priomap(struct kernfs_open_file *of,
if (!dev)
return -ENODEV;
cgroup_sk_alloc_disable();
rtnl_lock();
ret = netprio_set_prio(of_css(of), dev, prio);
@ -221,12 +219,10 @@ static ssize_t write_priomap(struct kernfs_open_file *of,
static int update_netprio(const void *v, struct file *file, unsigned n)
{
struct socket *sock = sock_from_file(file);
if (sock) {
spin_lock(&cgroup_sk_update_lock);
if (sock)
sock_cgroup_set_prioidx(&sock->sk->sk_cgrp_data,
(unsigned long)v);
spin_unlock(&cgroup_sk_update_lock);
}
return 0;
}
@ -235,8 +231,6 @@ static void net_prio_attach(struct cgroup_taskset *tset)
struct task_struct *p;
struct cgroup_subsys_state *css;
cgroup_sk_alloc_disable();
cgroup_taskset_for_each(p, css, tset) {
void *v = (void *)(unsigned long)css->id;

View File

@ -94,6 +94,8 @@ struct sock *dccp_create_openreq_child(const struct sock *sk,
newdp->dccps_role = DCCP_ROLE_SERVER;
newdp->dccps_hc_rx_ackvec = NULL;
newdp->dccps_service_list = NULL;
newdp->dccps_hc_rx_ccid = NULL;
newdp->dccps_hc_tx_ccid = NULL;
newdp->dccps_service = dreq->dreq_service;
newdp->dccps_timestamp_echo = dreq->dreq_timestamp_echo;
newdp->dccps_timestamp_time = dreq->dreq_timestamp_time;

View File

@ -345,6 +345,11 @@ bool dsa_schedule_work(struct work_struct *work)
return queue_work(dsa_owq, work);
}
void dsa_flush_workqueue(void)
{
flush_workqueue(dsa_owq);
}
int dsa_devlink_param_get(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
{

View File

@ -897,6 +897,33 @@ static void dsa_switch_teardown(struct dsa_switch *ds)
ds->setup = false;
}
/* First tear down the non-shared, then the shared ports. This ensures that
* all work items scheduled by our switchdev handlers for user ports have
* completed before we destroy the refcounting kept on the shared ports.
*/
static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
{
struct dsa_port *dp;
list_for_each_entry(dp, &dst->ports, list)
if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
dsa_port_teardown(dp);
dsa_flush_workqueue();
list_for_each_entry(dp, &dst->ports, list)
if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
dsa_port_teardown(dp);
}
static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
{
struct dsa_port *dp;
list_for_each_entry(dp, &dst->ports, list)
dsa_switch_teardown(dp->ds);
}
static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
{
struct dsa_port *dp;
@ -923,26 +950,13 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
return 0;
teardown:
list_for_each_entry(dp, &dst->ports, list)
dsa_port_teardown(dp);
dsa_tree_teardown_ports(dst);
list_for_each_entry(dp, &dst->ports, list)
dsa_switch_teardown(dp->ds);
dsa_tree_teardown_switches(dst);
return err;
}
static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
{
struct dsa_port *dp;
list_for_each_entry(dp, &dst->ports, list)
dsa_port_teardown(dp);
list_for_each_entry(dp, &dst->ports, list)
dsa_switch_teardown(dp->ds);
}
static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
{
struct dsa_port *dp;
@ -1052,6 +1066,8 @@ static void dsa_tree_teardown(struct dsa_switch_tree *dst)
dsa_tree_teardown_master(dst);
dsa_tree_teardown_ports(dst);
dsa_tree_teardown_switches(dst);
dsa_tree_teardown_cpu_ports(dst);

View File

@ -170,6 +170,7 @@ void dsa_tag_driver_put(const struct dsa_device_ops *ops);
const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf);
bool dsa_schedule_work(struct work_struct *work);
void dsa_flush_workqueue(void);
const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)

View File

@ -1854,13 +1854,11 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev)
* use the switch internal MDIO bus instead
*/
ret = dsa_slave_phy_connect(slave_dev, dp->index, phy_flags);
if (ret) {
netdev_err(slave_dev,
"failed to connect to port %d: %d\n",
dp->index, ret);
phylink_destroy(dp->pl);
return ret;
}
}
if (ret) {
netdev_err(slave_dev, "failed to connect to PHY: %pe\n",
ERR_PTR(ret));
phylink_destroy(dp->pl);
}
return ret;

View File

@ -1346,7 +1346,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
if (dup_sack && (sacked & TCPCB_RETRANS)) {
if (tp->undo_marker && tp->undo_retrans > 0 &&
after(end_seq, tp->undo_marker))
tp->undo_retrans--;
tp->undo_retrans = max_t(int, 0, tp->undo_retrans - pcount);
if ((sacked & TCPCB_SACKED_ACKED) &&
before(start_seq, state->reord))
state->reord = start_seq;

View File

@ -935,7 +935,7 @@ static int __init udp_tunnel_nic_init_module(void)
{
int err;
udp_tunnel_nic_workqueue = alloc_workqueue("udp_tunnel_nic", 0, 0);
udp_tunnel_nic_workqueue = alloc_ordered_workqueue("udp_tunnel_nic", 0);
if (!udp_tunnel_nic_workqueue)
return -ENOMEM;

View File

@ -1378,7 +1378,6 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
int err = -ENOMEM;
int allow_create = 1;
int replace_required = 0;
int sernum = fib6_new_sernum(info->nl_net);
if (info->nlh) {
if (!(info->nlh->nlmsg_flags & NLM_F_CREATE))
@ -1478,7 +1477,7 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
if (!err) {
if (rt->nh)
list_add(&rt->nh_list, &rt->nh->f6i_list);
__fib6_update_sernum_upto_root(rt, sernum);
__fib6_update_sernum_upto_root(rt, fib6_new_sernum(info->nl_net));
fib6_start_gc(info->nl_net, rt);
}

View File

@ -869,8 +869,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
}
if (tunnel->version == L2TP_HDR_VER_3 &&
l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) {
l2tp_session_dec_refcount(session);
goto invalid;
}
l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
l2tp_session_dec_refcount(session);

View File

@ -1083,8 +1083,10 @@ static void __net_exit mctp_routes_net_exit(struct net *net)
{
struct mctp_route *rt;
rcu_read_lock();
list_for_each_entry_rcu(rt, &net->mctp.routes, list)
mctp_route_release(rt);
rcu_read_unlock();
}
static struct pernet_operations mctp_net_ops = {

View File

@ -46,6 +46,8 @@
* Copyright (C) 2011, <lokec@ccs.neu.edu>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/ethtool.h>
#include <linux/types.h>
#include <linux/mm.h>

View File

@ -2423,7 +2423,7 @@ static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
u32 dport, struct sk_buff_head *xmitq)
{
unsigned long time_limit = jiffies + 2;
unsigned long time_limit = jiffies + usecs_to_jiffies(20000);
struct sk_buff *skb;
unsigned int lim;
atomic_t *dcnt;

View File

@ -3073,7 +3073,7 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
other = unix_peer(sk);
if (other && unix_peer(other) != sk &&
unix_recvq_full(other) &&
unix_recvq_full_lockless(other) &&
unix_dgram_peer_wake_me(sk, other))
writable = 0;

View File

@ -12,27 +12,36 @@
#include <unistd.h>
#include <ftw.h>
#include "cgroup_helpers.h"
/*
* To avoid relying on the system setup, when setup_cgroup_env is called
* we create a new mount namespace, and cgroup namespace. The cgroup2
* root is mounted at CGROUP_MOUNT_PATH
* we create a new mount namespace, and cgroup namespace. The cgroupv2
* root is mounted at CGROUP_MOUNT_PATH. Unfortunately, most people don't
* have cgroupv2 enabled at this point in time. It's easier to create our
* own mount namespace and manage it ourselves. We assume /mnt exists.
*
* Unfortunately, most people don't have cgroupv2 enabled at this point in time.
* It's easier to create our own mount namespace and manage it ourselves.
*
* We assume /mnt exists.
* Related cgroupv1 helpers are named *classid*(), since we only use the
* net_cls controller for tagging net_cls.classid. We assume the default
* mount under /sys/fs/cgroup/net_cls, which should be the case for the
* vast majority of users.
*/
#define WALK_FD_LIMIT 16
#define CGROUP_MOUNT_PATH "/mnt"
#define CGROUP_MOUNT_DFLT "/sys/fs/cgroup"
#define NETCLS_MOUNT_PATH CGROUP_MOUNT_DFLT "/net_cls"
#define CGROUP_WORK_DIR "/cgroup-test-work-dir"
#define format_cgroup_path(buf, path) \
snprintf(buf, sizeof(buf), "%s%s%s", CGROUP_MOUNT_PATH, \
CGROUP_WORK_DIR, path)
#define format_classid_path(buf) \
snprintf(buf, sizeof(buf), "%s%s", NETCLS_MOUNT_PATH, \
CGROUP_WORK_DIR)
/**
* enable_all_controllers() - Enable all available cgroup v2 controllers
*
@ -139,8 +148,7 @@ static int nftwfunc(const char *filename, const struct stat *statptr,
return 0;
}
static int join_cgroup_from_top(char *cgroup_path)
static int join_cgroup_from_top(const char *cgroup_path)
{
char cgroup_procs_path[PATH_MAX + 1];
pid_t pid = getpid();
@ -313,3 +321,114 @@ int cgroup_setup_and_join(const char *path) {
}
return cg_fd;
}
/**
* setup_classid_environment() - Setup the cgroupv1 net_cls environment
*
* After calling this function, cleanup_classid_environment should be called
* once testing is complete.
*
* This function will print an error to stderr and return 1 if it is unable
* to setup the cgroup environment. If setup is successful, 0 is returned.
*/
int setup_classid_environment(void)
{
char cgroup_workdir[PATH_MAX + 1];
format_classid_path(cgroup_workdir);
if (mount("tmpfs", CGROUP_MOUNT_DFLT, "tmpfs", 0, NULL) &&
errno != EBUSY) {
log_err("mount cgroup base");
return 1;
}
if (mkdir(NETCLS_MOUNT_PATH, 0777) && errno != EEXIST) {
log_err("mkdir cgroup net_cls");
return 1;
}
if (mount("net_cls", NETCLS_MOUNT_PATH, "cgroup", 0, "net_cls") &&
errno != EBUSY) {
log_err("mount cgroup net_cls");
return 1;
}
cleanup_classid_environment();
if (mkdir(cgroup_workdir, 0777) && errno != EEXIST) {
log_err("mkdir cgroup work dir");
return 1;
}
return 0;
}
/**
* set_classid() - Set a cgroupv1 net_cls classid
* @id: the numeric classid
*
* Writes the passed classid into the cgroup work dir's net_cls.classid
* file in order to later on trigger socket tagging.
*
* On success, it returns 0, otherwise on failure it returns 1. If there
* is a failure, it prints the error to stderr.
*/
int set_classid(unsigned int id)
{
char cgroup_workdir[PATH_MAX - 42];
char cgroup_classid_path[PATH_MAX + 1];
int fd, rc = 0;
format_classid_path(cgroup_workdir);
snprintf(cgroup_classid_path, sizeof(cgroup_classid_path),
"%s/net_cls.classid", cgroup_workdir);
fd = open(cgroup_classid_path, O_WRONLY);
if (fd < 0) {
log_err("Opening cgroup classid: %s", cgroup_classid_path);
return 1;
}
if (dprintf(fd, "%u\n", id) < 0) {
log_err("Setting cgroup classid");
rc = 1;
}
close(fd);
return rc;
}
/**
* join_classid() - Join a cgroupv1 net_cls classid
*
* This function expects the cgroup work dir to be already created, as we
* join it here. This causes the process sockets to be tagged with the given
* net_cls classid.
*
* On success, it returns 0, otherwise on failure it returns 1.
*/
int join_classid(void)
{
char cgroup_workdir[PATH_MAX + 1];
format_classid_path(cgroup_workdir);
return join_cgroup_from_top(cgroup_workdir);
}
/**
* cleanup_classid_environment() - Cleanup the cgroupv1 net_cls environment
*
* At call time, it moves the calling process to the root cgroup, and then
* runs the deletion process.
*
* On failure, it will print an error to stderr, and try to continue.
*/
void cleanup_classid_environment(void)
{
char cgroup_workdir[PATH_MAX + 1];
format_classid_path(cgroup_workdir);
join_cgroup_from_top(NETCLS_MOUNT_PATH);
nftw(cgroup_workdir, nftwfunc, WALK_FD_LIMIT, FTW_DEPTH | FTW_MOUNT);
}

View File

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __CGROUP_HELPERS_H
#define __CGROUP_HELPERS_H
#include <errno.h>
#include <string.h>
@ -8,12 +9,21 @@
#define log_err(MSG, ...) fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \
__FILE__, __LINE__, clean_errno(), ##__VA_ARGS__)
/* cgroupv2 related */
int cgroup_setup_and_join(const char *path);
int create_and_get_cgroup(const char *path);
int join_cgroup(const char *path);
int setup_cgroup_environment(void);
void cleanup_cgroup_environment(void);
unsigned long long get_cgroup_id(const char *path);
#endif
int join_cgroup(const char *path);
int setup_cgroup_environment(void);
void cleanup_cgroup_environment(void);
/* cgroupv1 related */
int set_classid(unsigned int id);
int join_classid(void);
int setup_classid_environment(void);
void cleanup_classid_environment(void);
#endif /* __CGROUP_HELPERS_H */

View File

@ -208,11 +208,26 @@ error_close:
static int connect_fd_to_addr(int fd,
const struct sockaddr_storage *addr,
socklen_t addrlen)
socklen_t addrlen, const bool must_fail)
{
if (connect(fd, (const struct sockaddr *)addr, addrlen)) {
log_err("Failed to connect to server");
return -1;
int ret;
errno = 0;
ret = connect(fd, (const struct sockaddr *)addr, addrlen);
if (must_fail) {
if (!ret) {
log_err("Unexpected success to connect to server");
return -1;
}
if (errno != EPERM) {
log_err("Unexpected error from connect to server");
return -1;
}
} else {
if (ret) {
log_err("Failed to connect to server");
return -1;
}
}
return 0;
@ -257,7 +272,7 @@ int connect_to_fd_opts(int server_fd, const struct network_helper_opts *opts)
strlen(opts->cc) + 1))
goto error_close;
if (connect_fd_to_addr(fd, &addr, addrlen))
if (connect_fd_to_addr(fd, &addr, addrlen, opts->must_fail))
goto error_close;
return fd;
@ -289,7 +304,7 @@ int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms)
return -1;
}
if (connect_fd_to_addr(client_fd, &addr, len))
if (connect_fd_to_addr(client_fd, &addr, len, false))
return -1;
return 0;

View File

@ -20,6 +20,7 @@ typedef __u16 __sum16;
struct network_helper_opts {
const char *cc;
int timeout_ms;
bool must_fail;
};
/* ipv4 test vector */

View File

@ -0,0 +1,79 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "connect4_dropper.skel.h"
#include "cgroup_helpers.h"
#include "network_helpers.h"
static int run_test(int cgroup_fd, int server_fd, bool classid)
{
struct network_helper_opts opts = {
.must_fail = true,
};
struct connect4_dropper *skel;
int fd, err = 0;
skel = connect4_dropper__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return -1;
skel->links.connect_v4_dropper =
bpf_program__attach_cgroup(skel->progs.connect_v4_dropper,
cgroup_fd);
if (!ASSERT_OK_PTR(skel->links.connect_v4_dropper, "prog_attach")) {
err = -1;
goto out;
}
if (classid && !ASSERT_OK(join_classid(), "join_classid")) {
err = -1;
goto out;
}
fd = connect_to_fd_opts(server_fd, &opts);
if (fd < 0)
err = -1;
else
close(fd);
out:
connect4_dropper__destroy(skel);
return err;
}
void test_cgroup_v1v2(void)
{
struct network_helper_opts opts = {};
int server_fd, client_fd, cgroup_fd;
static const int port = 60123;
/* Step 1: Check base connectivity works without any BPF. */
server_fd = start_server(AF_INET, SOCK_STREAM, NULL, port, 0);
if (!ASSERT_GE(server_fd, 0, "server_fd"))
return;
client_fd = connect_to_fd_opts(server_fd, &opts);
if (!ASSERT_GE(client_fd, 0, "client_fd")) {
close(server_fd);
return;
}
close(client_fd);
close(server_fd);
/* Step 2: Check BPF policy prog attached to cgroups drops connectivity. */
cgroup_fd = test__join_cgroup("/connect_dropper");
if (!ASSERT_GE(cgroup_fd, 0, "cgroup_fd"))
return;
server_fd = start_server(AF_INET, SOCK_STREAM, NULL, port, 0);
if (!ASSERT_GE(server_fd, 0, "server_fd")) {
close(cgroup_fd);
return;
}
ASSERT_OK(run_test(cgroup_fd, server_fd, false), "cgroup-v2-only");
setup_classid_environment();
set_classid(42);
ASSERT_OK(run_test(cgroup_fd, server_fd, true), "cgroup-v1v2");
cleanup_classid_environment();
close(server_fd);
close(cgroup_fd);
}

View File

@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <test_progs.h>
#include <linux/ptrace.h>
#include "test_task_pt_regs.skel.h"
void test_task_pt_regs(void)

View File

@ -0,0 +1,26 @@
// SPDX-License-Identifier: GPL-2.0
#include <string.h>
#include <linux/stddef.h>
#include <linux/bpf.h>
#include <sys/socket.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#define VERDICT_REJECT 0
#define VERDICT_PROCEED 1
SEC("cgroup/connect4")
int connect_v4_dropper(struct bpf_sock_addr *ctx)
{
if (ctx->type != SOCK_STREAM)
return VERDICT_PROCEED;
if (ctx->user_port == bpf_htons(60123))
return VERDICT_REJECT;
return VERDICT_PROCEED;
}
char _license[] SEC("license") = "GPL";

View File

@ -1,12 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/ptrace.h>
#include <linux/bpf.h>
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
struct pt_regs current_regs = {};
struct pt_regs ctx_regs = {};
#define PT_REGS_SIZE sizeof(struct pt_regs)
/*
* The kernel struct pt_regs isn't exported in its entirety to userspace.
* Pass it as an array to task_pt_regs.c
*/
char current_regs[PT_REGS_SIZE] = {};
char ctx_regs[PT_REGS_SIZE] = {};
int uprobe_res = 0;
SEC("uprobe/trigger_func")
@ -17,8 +22,10 @@ int handle_uprobe(struct pt_regs *ctx)
current = bpf_get_current_task_btf();
regs = (struct pt_regs *) bpf_task_pt_regs(current);
__builtin_memcpy(&current_regs, regs, sizeof(*regs));
__builtin_memcpy(&ctx_regs, ctx, sizeof(*ctx));
if (bpf_probe_read_kernel(current_regs, PT_REGS_SIZE, regs))
return 0;
if (bpf_probe_read_kernel(ctx_regs, PT_REGS_SIZE, ctx))
return 0;
/* Prove that uprobe was run */
uprobe_res = 1;

View File

@ -746,7 +746,7 @@ int read_write_nci_cmd(int nfc_sock, int virtual_fd, const __u8 *cmd, __u32 cmd_
const __u8 *rsp, __u32 rsp_len)
{
char buf[256];
unsigned int len;
int len;
send(nfc_sock, &cmd[3], cmd_len - 3, 0);
len = read(virtual_fd, buf, cmd_len);

View File

@ -45,7 +45,7 @@ altnames_test()
check_err $? "Got unexpected long alternative name from link show JSON"
ip link property del $DUMMY_DEV altname $SHORT_NAME
check_err $? "Failed to add short alternative name"
check_err $? "Failed to delete short alternative name"
ip -j -p link show $SHORT_NAME &>/dev/null
check_fail $? "Unexpected success while trying to do link show with deleted short alternative name"