mlx5-fixes-2017-06-28

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJZU3mtAAoJEEg/ir3gV/o+4k0IAKj5XCn3cviZlXMJRMHBvamt
 yWrMI90XgjoPhGPx3K9mf+bMhHOGiZR0Q2DFDJZa5U64DDBVPNvag7fy74GYgj1D
 Cet1zohkQ2xdb/R3jfML8tG2IVfvETWo3cgJGFtGUBlOULvpwinSK4A+8oUUGszc
 K1vAY0j3+Ncfjk+CZJ8hWqaIk1dyYtjtyn0ACOUOftqBa6+UZY7LbLTTOI7hOZoX
 3M35W7ntgGoBScONlxpDUXNUewia4ADTiQPWwHdT9+xNlwz1fzmCHlYi5pY+z9TC
 PKbbe1O4l1nsMftwqJVQNHrFnq+x/X69J5vlgobWkk0dQCRQWE9qanG8BfXPykY=
 =DUG8
 -----END PGP SIGNATURE-----

Merge tag 'mlx5-fixes-2017-06-28' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
Mellanox, mlx5 fixes 2017-06-28

This series contains some fixes for the mlx5 core and netdev driver.

Please pull and let me know if there's any problem.

For -stable:
("net/mlx5e: Fix TX carrier errors report in get stats ndo") Kernels >= v4.7

("net/mlx5: Cancel delayed recovery work when unloading the driver") Kernels >= v4.10
* When applied to net-next this will introduce a contextual conflict, it
should be easy to resolve, (a spin_lock was changed to spin_lock_irqsave in net-next),
if you need any help with this please let me know.

("net/mlx5: Fix driver load error flow when firmware is stuck") Kernels >= v4.4*
* This patch fixes: 6c780a0267 ("net/mlx5: Wait for FW readiness before initializing command interface")
which was submitted two weeks ago and queued up for v4.4.

Sorry about the mess, but other than the above, this series doesn't introduce
any conflict with the current mlx5 IPSec offload series.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2017-07-01 14:11:48 -07:00
commit ea23b42739
4 changed files with 17 additions and 5 deletions

View file

@ -3053,8 +3053,6 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
stats->tx_carrier_errors =
PPORT_802_3_GET(pstats, a_symbol_error_during_carrier);
stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
stats->rx_frame_errors;
stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;

View file

@ -67,6 +67,7 @@ enum {
enum {
MLX5_DROP_NEW_HEALTH_WORK,
MLX5_DROP_NEW_RECOVERY_WORK,
};
static u8 get_nic_state(struct mlx5_core_dev *dev)
@ -193,7 +194,7 @@ static void health_care(struct work_struct *work)
mlx5_handle_bad_state(dev);
spin_lock(&health->wq_lock);
if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
if (!test_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags))
schedule_delayed_work(&health->recover_work, recover_delay);
else
dev_err(&dev->pdev->dev,
@ -313,6 +314,7 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
init_timer(&health->timer);
health->sick = 0;
clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
clear_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
health->health = &dev->iseg->health;
health->health_counter = &dev->iseg->health_counter;
@ -335,11 +337,22 @@ void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
spin_lock(&health->wq_lock);
set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
spin_unlock(&health->wq_lock);
cancel_delayed_work_sync(&health->recover_work);
cancel_work_sync(&health->work);
}
void mlx5_drain_health_recovery(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
spin_lock(&health->wq_lock);
set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
spin_unlock(&health->wq_lock);
cancel_delayed_work_sync(&dev->priv.health.recover_work);
}
void mlx5_health_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;

View file

@ -1020,7 +1020,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
if (err) {
dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n",
FW_PRE_INIT_TIMEOUT_MILI);
goto out;
goto out_err;
}
err = mlx5_cmd_init(dev);
@ -1228,7 +1228,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
int err = 0;
if (cleanup)
mlx5_drain_health_wq(dev);
mlx5_drain_health_recovery(dev);
mutex_lock(&dev->intf_state_mutex);
if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {

View file

@ -925,6 +925,7 @@ int mlx5_health_init(struct mlx5_core_dev *dev);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
void mlx5_drain_health_recovery(struct mlx5_core_dev *dev);
int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_buf *buf, int node);
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);