2018-03-20 14:58:05 +00:00
// SPDX-License-Identifier: GPL-2.0
2023-10-25 21:41:52 +00:00
/* Copyright (c) 2018-2023, Intel Corporation. */
2018-03-20 14:58:05 +00:00
/* Intel(R) Ethernet Connection E800 Series Linux Driver */
# define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2020-05-29 07:18:33 +00:00
# include <generated/utsrelease.h>
2023-10-11 23:33:33 +00:00
# include <linux/crash_dump.h>
2018-03-20 14:58:05 +00:00
# include "ice.h"
2019-10-24 08:11:17 +00:00
# include "ice_base.h"
2018-09-20 00:23:04 +00:00
# include "ice_lib.h"
2020-05-08 00:41:08 +00:00
# include "ice_fltr.h"
2019-02-28 23:24:22 +00:00
# include "ice_dcb_lib.h"
2019-11-06 10:05:29 +00:00
# include "ice_dcb_nl.h"
2020-03-12 01:58:15 +00:00
# include "ice_devlink.h"
2023-12-01 18:08:39 +00:00
# include "ice_hwmon.h"
2021-06-08 23:35:17 +00:00
/* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
* ice tracepoint functions . This must be done exactly once across the
* ice driver .
*/
# define CREATE_TRACE_POINTS
# include "ice_trace.h"
2021-08-20 00:08:57 +00:00
# include "ice_eswitch.h"
2021-08-06 08:49:05 +00:00
# include "ice_tc_lib.h"
2021-12-02 16:38:46 +00:00
# include "ice_vsi_vlan_ops.h"
drivers: net: turn on XDP features
A summary of the flags being set for various drivers is given below.
Note that XDP_F_REDIRECT_TARGET and XDP_F_FRAG_TARGET are features
that can be turned off and on at runtime. This means that these flags
may be set and unset under RTNL lock protection by the driver. Hence,
READ_ONCE must be used by code loading the flag value.
Also, these flags are not used for synchronization against the availability
of XDP resources on a device. It is merely a hint, and hence the read
may race with the actual teardown of XDP resources on the device. This
may change in the future, e.g. operations taking a reference on the XDP
resources of the driver, and in turn inhibiting turning off this flag.
However, for now, it can only be used as a hint to check whether device
supports becoming a redirection target.
Turn 'hw-offload' feature flag on for:
- netronome (nfp)
- netdevsim.
Turn 'native' and 'zerocopy' features flags on for:
- intel (i40e, ice, ixgbe, igc)
- mellanox (mlx5).
- stmmac
- netronome (nfp)
Turn 'native' features flags on for:
- amazon (ena)
- broadcom (bnxt)
- freescale (dpaa, dpaa2, enetc)
- funeth
- intel (igb)
- marvell (mvneta, mvpp2, octeontx2)
- mellanox (mlx4)
- mtk_eth_soc
- qlogic (qede)
- sfc
- socionext (netsec)
- ti (cpsw)
- tap
- tsnep
- veth
- xen
- virtio_net.
Turn 'basic' (tx, pass, aborted and drop) features flags on for:
- netronome (nfp)
- cavium (thunder)
- hyperv.
Turn 'redirect_target' feature flag on for:
- amanzon (ena)
- broadcom (bnxt)
- freescale (dpaa, dpaa2)
- intel (i40e, ice, igb, ixgbe)
- ti (cpsw)
- marvell (mvneta, mvpp2)
- sfc
- socionext (netsec)
- qlogic (qede)
- mellanox (mlx5)
- tap
- veth
- virtio_net
- xen
Reviewed-by: Gerhard Engleder <gerhard@engleder-embedded.com>
Reviewed-by: Simon Horman <simon.horman@corigine.com>
Acked-by: Stanislav Fomichev <sdf@google.com>
Acked-by: Jakub Kicinski <kuba@kernel.org>
Co-developed-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: Marek Majtyka <alardam@gmail.com>
Link: https://lore.kernel.org/r/3eca9fafb308462f7edb1f58e451d59209aa07eb.1675245258.git.lorenzo@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-02-01 10:24:18 +00:00
# include <net/xdp_sock_drv.h>
2018-03-20 14:58:05 +00:00
# define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
static const char ice_driver_string [ ] = DRV_SUMMARY ;
static const char ice_copyright [ ] = " Copyright (c) 2018, Intel Corporation. " ;
2019-09-09 13:47:46 +00:00
/* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
# define ICE_DDP_PKG_PATH "intel / ice / ddp / "
# define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg"
2018-03-20 14:58:05 +00:00
MODULE_AUTHOR ( " Intel Corporation, <linux.nics@intel.com> " ) ;
MODULE_DESCRIPTION ( DRV_SUMMARY ) ;
2018-09-15 00:37:57 +00:00
MODULE_LICENSE ( " GPL v2 " ) ;
2019-09-09 13:47:46 +00:00
MODULE_FIRMWARE ( ICE_DDP_PKG_FILE ) ;
2018-03-20 14:58:05 +00:00
static int debug = - 1 ;
module_param ( debug , int , 0644 ) ;
2018-03-20 14:58:06 +00:00
# ifndef CONFIG_DYNAMIC_DEBUG
MODULE_PARM_DESC ( debug , " netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX) " ) ;
# else
MODULE_PARM_DESC ( debug , " netif level (0=none,...,16=all) " ) ;
# endif /* !CONFIG_DYNAMIC_DEBUG */
2018-03-20 14:58:05 +00:00
2021-08-19 12:00:03 +00:00
DEFINE_STATIC_KEY_FALSE ( ice_xdp_locking_key ) ;
EXPORT_SYMBOL ( ice_xdp_locking_key ) ;
2021-05-20 14:37:49 +00:00
ice: remove circular header dependencies on ice.h
Several headers in the ice driver include ice.h even though they are
themselves included by that header. The most notable of these is
ice_common.h, but several other headers also do this.
Such a recursive inclusion is problematic as it forces headers to be
included in a strict order, otherwise compilation errors can result. The
circular inclusions do not trigger an endless loop due to standard
header inclusion guards, however other errors can occur.
For example, ice_flow.h defines ice_rss_hash_cfg, which is used by
ice_sriov.h as part of the definition of ice_vf_hash_ip_ctx.
ice_flow.h includes ice_acl.h, which includes ice_common.h, and which
finally includes ice.h. Since ice.h itself includes ice_sriov.h, this
creates a circular dependency.
The definition in ice_sriov.h requires things from ice_flow.h, but
ice_flow.h itself will lead to trying to load ice_sriov.h as part of its
process for expanding ice.h. The current code avoids this issue by
having an implicit dependency without the include of ice_flow.h.
If we were to fix that so that ice_sriov.h explicitly depends on
ice_flow.h the following pattern would occur:
ice_flow.h -> ice_acl.h -> ice_common.h -> ice.h -> ice_sriov.h
At this point, during the expansion of, the header guard for ice_flow.h
is already set, so when ice_sriov.h attempts to load the ice_flow.h
header it is skipped. Then, we go on to begin including the rest of
ice_sriov.h, including structure definitions which depend on
ice_rss_hash_cfg. This produces a compiler warning because
ice_rss_hash_cfg hasn't yet been included. Remember, we're just at the
start of ice_flow.h!
If the order of headers is incorrect (ice_flow.h is not implicitly
loaded first in all files which include ice_sriov.h) then we get the
same failure.
Removing this recursive inclusion requires fixing a few cases where some
headers depended on the header inclusions from ice.h. In addition, a few
other changes are also required.
Most notably, ice_hw_to_dev is implemented as a macro in ice_osdep.h,
which is the likely reason that ice_common.h includes ice.h at all. This
macro implementation requires the full definition of ice_pf in order to
properly compile.
Fix this by moving it to a function declared in ice_main.c, so that we
do not require all files to depend on the layout of the ice_pf
structure.
Note that this change only fixes circular dependencies, but it does not
fully resolve all implicit dependencies where one header may depend on
the inclusion of another. I tried to fix as many of the implicit
dependencies as I noticed, but fixing them all requires a somewhat
tedious analysis of each header and attempting to compile it separately.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2022-02-23 00:26:50 +00:00
/**
* ice_hw_to_dev - Get device pointer from the hardware structure
* @ hw : pointer to the device HW structure
*
* Used to access the device pointer from compilation units which can ' t easily
* include the definition of struct ice_pf without leading to circular header
* dependencies .
*/
struct device * ice_hw_to_dev ( struct ice_hw * hw )
{
struct ice_pf * pf = container_of ( hw , struct ice_pf , hw ) ;
return & pf - > pdev - > dev ;
}
2018-03-20 14:58:10 +00:00
static struct workqueue_struct * ice_wq ;
2023-06-20 22:18:46 +00:00
struct workqueue_struct * ice_lag_wq ;
2019-09-09 13:47:46 +00:00
static const struct net_device_ops ice_netdev_safe_mode_ops ;
2018-03-20 14:58:13 +00:00
static const struct net_device_ops ice_netdev_ops ;
2018-03-20 14:58:10 +00:00
2019-09-09 13:47:46 +00:00
static void ice_rebuild ( struct ice_pf * pf , enum ice_reset_req reset_type ) ;
2018-09-20 00:23:07 +00:00
2018-08-09 13:29:50 +00:00
static void ice_vsi_release_all ( struct ice_pf * pf ) ;
2018-03-20 14:58:11 +00:00
2021-10-15 23:35:16 +00:00
static int ice_rebuild_channels ( struct ice_pf * pf ) ;
static void ice_remove_q_channels ( struct ice_vsi * vsi , bool rem_adv_fltr ) ;
2021-10-12 18:31:03 +00:00
static int
ice_indr_setup_tc_cb ( struct net_device * netdev , struct Qdisc * sch ,
void * cb_priv , enum tc_setup_type type , void * type_data ,
void * data ,
void ( * cleanup ) ( struct flow_block_cb * block_cb ) ) ;
2023-07-12 11:03:31 +00:00
bool netif_is_ice ( const struct net_device * dev )
2020-11-21 00:39:26 +00:00
{
return dev & & ( dev - > netdev_ops = = & ice_netdev_ops ) ;
}
2018-08-09 13:29:53 +00:00
/**
* ice_get_tx_pending - returns number of Tx descriptors not processed
* @ ring : the ring of descriptors
*/
2021-08-19 11:59:58 +00:00
static u16 ice_get_tx_pending ( struct ice_tx_ring * ring )
2018-08-09 13:29:53 +00:00
{
2019-07-25 08:55:28 +00:00
u16 head , tail ;
2018-08-09 13:29:53 +00:00
head = ring - > next_to_clean ;
2019-07-25 08:55:28 +00:00
tail = ring - > next_to_use ;
2018-08-09 13:29:53 +00:00
if ( head ! = tail )
return ( head < tail ) ?
tail - head : ( tail + ring - > count - head ) ;
return 0 ;
}
/**
* ice_check_for_hang_subtask - check for and recover hung queues
* @ pf : pointer to PF struct
*/
static void ice_check_for_hang_subtask ( struct ice_pf * pf )
{
struct ice_vsi * vsi = NULL ;
2019-04-16 17:30:51 +00:00
struct ice_hw * hw ;
2018-08-09 13:29:53 +00:00
unsigned int i ;
int packets ;
2019-04-16 17:30:51 +00:00
u32 v ;
2018-08-09 13:29:53 +00:00
ice_for_each_vsi ( pf , v )
if ( pf - > vsi [ v ] & & pf - > vsi [ v ] - > type = = ICE_VSI_PF ) {
vsi = pf - > vsi [ v ] ;
break ;
}
2021-03-02 18:15:37 +00:00
if ( ! vsi | | test_bit ( ICE_VSI_DOWN , vsi - > state ) )
2018-08-09 13:29:53 +00:00
return ;
if ( ! ( vsi - > netdev & & netif_carrier_ok ( vsi - > netdev ) ) )
return ;
2019-04-16 17:30:51 +00:00
hw = & vsi - > back - > hw ;
2021-08-19 12:00:04 +00:00
ice_for_each_txq ( vsi , i ) {
2021-08-19 11:59:58 +00:00
struct ice_tx_ring * tx_ring = vsi - > tx_rings [ i ] ;
ice: Accumulate ring statistics over reset
Resets may occur with or without user interaction. For example, a TX hang
or reconfiguration of parameters will result in a reset. During reset, the
VSI is freed, freeing any statistics structures inside as well. This would
create an issue for the user where a reset happens in the background,
statistics set to zero, and the user checks ring statistics expecting them
to be populated.
To ensure this doesn't happen, accumulate ring statistics over reset.
Define a new ring statistics structure, ice_ring_stats. The new structure
lives in the VSI's parent, preserving ring statistics when VSI is freed.
1. Define a new structure vsi_ring_stats in the PF scope
2. Allocate/free stats only during probe, unload, or change in ring size
3. Replace previous ring statistics functionality with new structure
Signed-off-by: Benjamin Mikailenko <benjamin.mikailenko@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2022-11-18 21:20:02 +00:00
struct ice_ring_stats * ring_stats ;
2018-08-09 13:29:53 +00:00
2021-10-15 23:35:16 +00:00
if ( ! tx_ring )
continue ;
if ( ice_ring_ch_enabled ( tx_ring ) )
continue ;
ice: Accumulate ring statistics over reset
Resets may occur with or without user interaction. For example, a TX hang
or reconfiguration of parameters will result in a reset. During reset, the
VSI is freed, freeing any statistics structures inside as well. This would
create an issue for the user where a reset happens in the background,
statistics set to zero, and the user checks ring statistics expecting them
to be populated.
To ensure this doesn't happen, accumulate ring statistics over reset.
Define a new ring statistics structure, ice_ring_stats. The new structure
lives in the VSI's parent, preserving ring statistics when VSI is freed.
1. Define a new structure vsi_ring_stats in the PF scope
2. Allocate/free stats only during probe, unload, or change in ring size
3. Replace previous ring statistics functionality with new structure
Signed-off-by: Benjamin Mikailenko <benjamin.mikailenko@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2022-11-18 21:20:02 +00:00
ring_stats = tx_ring - > ring_stats ;
if ( ! ring_stats )
continue ;
2021-10-15 23:35:16 +00:00
if ( tx_ring - > desc ) {
2018-08-09 13:29:53 +00:00
/* If packet counter has not changed the queue is
* likely stalled , so force an interrupt for this
* queue .
*
* prev_pkt would be negative if there was no
* pending work .
*/
ice: Accumulate ring statistics over reset
Resets may occur with or without user interaction. For example, a TX hang
or reconfiguration of parameters will result in a reset. During reset, the
VSI is freed, freeing any statistics structures inside as well. This would
create an issue for the user where a reset happens in the background,
statistics set to zero, and the user checks ring statistics expecting them
to be populated.
To ensure this doesn't happen, accumulate ring statistics over reset.
Define a new ring statistics structure, ice_ring_stats. The new structure
lives in the VSI's parent, preserving ring statistics when VSI is freed.
1. Define a new structure vsi_ring_stats in the PF scope
2. Allocate/free stats only during probe, unload, or change in ring size
3. Replace previous ring statistics functionality with new structure
Signed-off-by: Benjamin Mikailenko <benjamin.mikailenko@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2022-11-18 21:20:02 +00:00
packets = ring_stats - > stats . pkts & INT_MAX ;
if ( ring_stats - > tx_stats . prev_pkt = = packets ) {
2018-08-09 13:29:53 +00:00
/* Trigger sw interrupt to revive the queue */
2019-04-16 17:30:51 +00:00
ice_trigger_sw_intr ( hw , tx_ring - > q_vector ) ;
2018-08-09 13:29:53 +00:00
continue ;
}
/* Memory barrier between read of packet count and call
* to ice_get_tx_pending ( )
*/
smp_rmb ( ) ;
ice: Accumulate ring statistics over reset
Resets may occur with or without user interaction. For example, a TX hang
or reconfiguration of parameters will result in a reset. During reset, the
VSI is freed, freeing any statistics structures inside as well. This would
create an issue for the user where a reset happens in the background,
statistics set to zero, and the user checks ring statistics expecting them
to be populated.
To ensure this doesn't happen, accumulate ring statistics over reset.
Define a new ring statistics structure, ice_ring_stats. The new structure
lives in the VSI's parent, preserving ring statistics when VSI is freed.
1. Define a new structure vsi_ring_stats in the PF scope
2. Allocate/free stats only during probe, unload, or change in ring size
3. Replace previous ring statistics functionality with new structure
Signed-off-by: Benjamin Mikailenko <benjamin.mikailenko@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2022-11-18 21:20:02 +00:00
ring_stats - > tx_stats . prev_pkt =
2018-08-09 13:29:53 +00:00
ice_get_tx_pending ( tx_ring ) ? packets : - 1 ;
}
}
}
2019-04-16 17:34:50 +00:00
/**
* ice_init_mac_fltr - Set initial MAC filters
* @ pf : board private structure
*
2019-04-16 17:35:03 +00:00
* Set initial set of MAC filters for PF VSI ; configure filters for permanent
2019-04-16 17:34:50 +00:00
* address and broadcast address . If an error is encountered , netdevice will be
* unregistered .
*/
static int ice_init_mac_fltr ( struct ice_pf * pf )
{
struct ice_vsi * vsi ;
2020-05-08 00:41:08 +00:00
u8 * perm_addr ;
2019-04-16 17:34:50 +00:00
2019-08-08 14:39:33 +00:00
vsi = ice_get_main_vsi ( pf ) ;
2019-04-16 17:34:50 +00:00
if ( ! vsi )
return - EINVAL ;
2020-05-08 00:41:08 +00:00
perm_addr = vsi - > port_info - > mac . perm_addr ;
2021-10-07 23:01:58 +00:00
return ice_fltr_add_mac_and_broadcast ( vsi , perm_addr , ICE_FWD_TO_VSI ) ;
2019-04-16 17:34:50 +00:00
}
2018-03-20 14:58:19 +00:00
/**
2019-02-19 23:04:13 +00:00
* ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
2018-03-20 14:58:19 +00:00
* @ netdev : the net device on which the sync is happening
2019-02-19 23:04:13 +00:00
* @ addr : MAC address to sync
2018-03-20 14:58:19 +00:00
*
* This is a callback function which is called by the in kernel device sync
* functions ( like __dev_uc_sync , __dev_mc_sync , etc ) . This function only
* populates the tmp_sync_list , which is later used by ice_add_mac to add the
2019-02-19 23:04:13 +00:00
* MAC filters from the hardware .
2018-03-20 14:58:19 +00:00
*/
static int ice_add_mac_to_sync_list ( struct net_device * netdev , const u8 * addr )
{
struct ice_netdev_priv * np = netdev_priv ( netdev ) ;
struct ice_vsi * vsi = np - > vsi ;
2020-05-08 00:41:08 +00:00
if ( ice_fltr_add_mac_to_list ( vsi , & vsi - > tmp_sync_list , addr ,
ICE_FWD_TO_VSI ) )
2018-03-20 14:58:19 +00:00
return - EINVAL ;
return 0 ;
}
/**
2019-02-19 23:04:13 +00:00
* ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
2018-03-20 14:58:19 +00:00
* @ netdev : the net device on which the unsync is happening
2019-02-19 23:04:13 +00:00
* @ addr : MAC address to unsync
2018-03-20 14:58:19 +00:00
*
* This is a callback function which is called by the in kernel device unsync
* functions ( like __dev_uc_unsync , __dev_mc_unsync , etc ) . This function only
* populates the tmp_unsync_list , which is later used by ice_remove_mac to
2019-02-19 23:04:13 +00:00
* delete the MAC filters from the hardware .
2018-03-20 14:58:19 +00:00
*/
static int ice_add_mac_to_unsync_list ( struct net_device * netdev , const u8 * addr )
{
struct ice_netdev_priv * np = netdev_priv ( netdev ) ;
struct ice_vsi * vsi = np - > vsi ;
ice: don't remove netdev->dev_addr from uc sync list
In some circumstances, such as with bridging, it's possible that the
stack will add the device's own MAC address to its unicast address list.
If, later, the stack deletes this address, the driver will receive a
request to remove this address.
The driver stores its current MAC address as part of the VSI MAC filter
list instead of separately. So, this causes a problem when the device's
MAC address is deleted unexpectedly, which results in traffic failure in
some cases.
The following configuration steps will reproduce the previously
mentioned problem:
> ip link set eth0 up
> ip link add dev br0 type bridge
> ip link set br0 up
> ip addr flush dev eth0
> ip link set eth0 master br0
> echo 1 > /sys/class/net/br0/bridge/vlan_filtering
> modprobe -r veth
> modprobe -r bridge
> ip addr add 192.168.1.100/24 dev eth0
The following ping command fails due to the netdev->dev_addr being
deleted when removing the bridge module.
> ping <link partner>
Fix this by making sure to not delete the netdev->dev_addr during MAC
address sync. After fixing this issue it was noticed that the
netdev_warn() in .set_mac was overly verbose, so make it at
netdev_dbg().
Also, there is a possibility of a race condition between .set_mac and
.set_rx_mode. Fix this by calling netif_addr_lock_bh() and
netif_addr_unlock_bh() on the device's netdev when the netdev->dev_addr
is going to be updated in .set_mac.
Fixes: e94d44786693 ("ice: Implement filter sync, NDO operations and bump version")
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Liang Li <liali@redhat.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-08-06 16:51:27 +00:00
/* Under some circumstances, we might receive a request to delete our
* own device address from our uc list . Because we store the device
* address in the VSI ' s MAC filter list , we need to ignore such
* requests and not delete our device address from this list .
*/
if ( ether_addr_equal ( addr , netdev - > dev_addr ) )
return 0 ;
2020-05-08 00:41:08 +00:00
if ( ice_fltr_add_mac_to_list ( vsi , & vsi - > tmp_unsync_list , addr ,
ICE_FWD_TO_VSI ) )
2018-03-20 14:58:19 +00:00
return - EINVAL ;
return 0 ;
}
/**
* ice_vsi_fltr_changed - check if filter state changed
* @ vsi : VSI to be checked
*
* returns true if filter state has changed , false otherwise .
*/
static bool ice_vsi_fltr_changed ( struct ice_vsi * vsi )
{
2021-03-02 18:15:37 +00:00
return test_bit ( ICE_VSI_UMAC_FLTR_CHANGED , vsi - > state ) | |
2022-03-31 16:20:08 +00:00
test_bit ( ICE_VSI_MMAC_FLTR_CHANGED , vsi - > state ) ;
2018-03-20 14:58:19 +00:00
}
2019-02-27 00:35:14 +00:00
/**
2021-03-02 18:15:34 +00:00
* ice_set_promisc - Enable promiscuous mode for a given PF
2019-02-27 00:35:14 +00:00
* @ vsi : the VSI being configured
* @ promisc_m : mask of promiscuous config bits
*
*/
2021-03-02 18:15:34 +00:00
static int ice_set_promisc ( struct ice_vsi * vsi , u8 promisc_m )
2019-02-27 00:35:14 +00:00
{
2021-10-07 22:56:57 +00:00
int status ;
2019-02-27 00:35:14 +00:00
if ( vsi - > type ! = ICE_VSI_PF )
return 0 ;
2022-03-31 16:20:08 +00:00
if ( ice_vsi_has_non_zero_vlans ( vsi ) ) {
promisc_m | = ( ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX ) ;
status = ice_fltr_set_vlan_vsi_promisc ( & vsi - > back - > hw , vsi ,
promisc_m ) ;
} else {
status = ice_fltr_set_vsi_promisc ( & vsi - > back - > hw , vsi - > idx ,
promisc_m , 0 ) ;
}
2022-08-12 13:25:49 +00:00
if ( status & & status ! = - EEXIST )
return status ;
2022-03-31 16:20:08 +00:00
ice: fix lost multicast packets in promisc mode
There was a problem reported to us where the addition of a VF with an IPv6
address ending with a particular sequence would cause the parent device on
the PF to no longer be able to respond to neighbor discovery packets.
In this case, we had an ovs-bridge device living on top of a VLAN, which
was on top of a PF, and it would not be able to talk anymore (the neighbor
entry would expire and couldn't be restored).
The root cause of the issue is that if the PF is asked to be in IFF_PROMISC
mode (promiscuous mode) and it had an ipv6 address that needed the
33:33:ff:00:00:04 multicast address to work, then when the VF was added
with the need for the same multicast address, the VF would steal all the
traffic destined for that address.
The ice driver didn't auto-subscribe a request of IFF_PROMISC to the
"multicast replication from other port's traffic" meaning that it won't get
for instance, packets with an exact destination in the VF, as above.
The VF's IPv6 address, which adds a "perfect filter" for 33:33:ff:00:00:04,
results in no packets for that multicast address making it to the PF (which
is in promisc but NOT "multicast replication").
The fix is to enable "multicast promiscuous" whenever the driver is asked
to enable IFF_PROMISC, and make sure to disable it when appropriate.
Fixes: e94d44786693 ("ice: Implement filter sync, NDO operations and bump version")
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Rafal Romanowski <rafal.romanowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2023-02-06 23:54:36 +00:00
netdev_dbg ( vsi - > netdev , " set promisc filter bits for VSI %i: 0x%x \n " ,
vsi - > vsi_num , promisc_m ) ;
2022-08-12 13:25:49 +00:00
return 0 ;
2021-03-02 18:15:34 +00:00
}
/**
* ice_clear_promisc - Disable promiscuous mode for a given PF
* @ vsi : the VSI being configured
* @ promisc_m : mask of promiscuous config bits
*
*/
static int ice_clear_promisc ( struct ice_vsi * vsi , u8 promisc_m )
{
2021-10-07 22:56:57 +00:00
int status ;
2021-03-02 18:15:34 +00:00
if ( vsi - > type ! = ICE_VSI_PF )
return 0 ;
2019-02-27 00:35:14 +00:00
2022-03-31 16:20:08 +00:00
if ( ice_vsi_has_non_zero_vlans ( vsi ) ) {
promisc_m | = ( ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX ) ;
status = ice_fltr_clear_vlan_vsi_promisc ( & vsi - > back - > hw , vsi ,
promisc_m ) ;
} else {
status = ice_fltr_clear_vsi_promisc ( & vsi - > back - > hw , vsi - > idx ,
promisc_m , 0 ) ;
}
ice: fix lost multicast packets in promisc mode
There was a problem reported to us where the addition of a VF with an IPv6
address ending with a particular sequence would cause the parent device on
the PF to no longer be able to respond to neighbor discovery packets.
In this case, we had an ovs-bridge device living on top of a VLAN, which
was on top of a PF, and it would not be able to talk anymore (the neighbor
entry would expire and couldn't be restored).
The root cause of the issue is that if the PF is asked to be in IFF_PROMISC
mode (promiscuous mode) and it had an ipv6 address that needed the
33:33:ff:00:00:04 multicast address to work, then when the VF was added
with the need for the same multicast address, the VF would steal all the
traffic destined for that address.
The ice driver didn't auto-subscribe a request of IFF_PROMISC to the
"multicast replication from other port's traffic" meaning that it won't get
for instance, packets with an exact destination in the VF, as above.
The VF's IPv6 address, which adds a "perfect filter" for 33:33:ff:00:00:04,
results in no packets for that multicast address making it to the PF (which
is in promisc but NOT "multicast replication").
The fix is to enable "multicast promiscuous" whenever the driver is asked
to enable IFF_PROMISC, and make sure to disable it when appropriate.
Fixes: e94d44786693 ("ice: Implement filter sync, NDO operations and bump version")
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Rafal Romanowski <rafal.romanowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2023-02-06 23:54:36 +00:00
netdev_dbg ( vsi - > netdev , " clear promisc filter bits for VSI %i: 0x%x \n " ,
vsi - > vsi_num , promisc_m ) ;
2021-10-07 23:01:58 +00:00
return status ;
2019-02-27 00:35:14 +00:00
}
2018-03-20 14:58:19 +00:00
/**
* ice_vsi_sync_fltr - Update the VSI filter list to the HW
* @ vsi : ptr to the VSI
*
* Push any outstanding VSI filter changes through the AdminQ .
*/
static int ice_vsi_sync_fltr ( struct ice_vsi * vsi )
{
2021-12-02 16:38:46 +00:00
struct ice_vsi_vlan_ops * vlan_ops = ice_get_compat_vsi_vlan_ops ( vsi ) ;
2020-02-06 09:20:09 +00:00
struct device * dev = ice_pf_to_dev ( vsi - > back ) ;
2018-03-20 14:58:19 +00:00
struct net_device * netdev = vsi - > netdev ;
bool promisc_forced_on = false ;
struct ice_pf * pf = vsi - > back ;
struct ice_hw * hw = & pf - > hw ;
u32 changed_flags = 0 ;
2021-10-07 23:00:23 +00:00
int err ;
2018-03-20 14:58:19 +00:00
if ( ! vsi - > netdev )
return - EINVAL ;
2021-03-02 18:15:38 +00:00
while ( test_and_set_bit ( ICE_CFG_BUSY , vsi - > state ) )
2018-03-20 14:58:19 +00:00
usleep_range ( 1000 , 2000 ) ;
changed_flags = vsi - > current_netdev_flags ^ vsi - > netdev - > flags ;
vsi - > current_netdev_flags = vsi - > netdev - > flags ;
INIT_LIST_HEAD ( & vsi - > tmp_sync_list ) ;
INIT_LIST_HEAD ( & vsi - > tmp_unsync_list ) ;
if ( ice_vsi_fltr_changed ( vsi ) ) {
2021-03-02 18:15:37 +00:00
clear_bit ( ICE_VSI_UMAC_FLTR_CHANGED , vsi - > state ) ;
clear_bit ( ICE_VSI_MMAC_FLTR_CHANGED , vsi - > state ) ;
2018-03-20 14:58:19 +00:00
/* grab the netdev's addr_list_lock */
netif_addr_lock_bh ( netdev ) ;
__dev_uc_sync ( netdev , ice_add_mac_to_sync_list ,
ice_add_mac_to_unsync_list ) ;
__dev_mc_sync ( netdev , ice_add_mac_to_sync_list ,
ice_add_mac_to_unsync_list ) ;
/* our temp lists are populated. release lock */
netif_addr_unlock_bh ( netdev ) ;
}
2019-02-19 23:04:13 +00:00
/* Remove MAC addresses in the unsync list */
2021-10-07 23:00:23 +00:00
err = ice_fltr_remove_mac_list ( vsi , & vsi - > tmp_unsync_list ) ;
2020-05-08 00:41:08 +00:00
ice_fltr_free_list ( dev , & vsi - > tmp_unsync_list ) ;
2021-10-07 23:00:23 +00:00
if ( err ) {
2018-03-20 14:58:19 +00:00
netdev_err ( netdev , " Failed to delete MAC filters \n " ) ;
/* if we failed because of alloc failures, just bail */
2021-10-07 23:00:23 +00:00
if ( err = = - ENOMEM )
2018-03-20 14:58:19 +00:00
goto out ;
}
2019-02-19 23:04:13 +00:00
/* Add MAC addresses in the sync list */
2021-10-07 23:00:23 +00:00
err = ice_fltr_add_mac_list ( vsi , & vsi - > tmp_sync_list ) ;
2020-05-08 00:41:08 +00:00
ice_fltr_free_list ( dev , & vsi - > tmp_sync_list ) ;
2019-02-19 23:04:04 +00:00
/* If filter is added successfully or already exists, do not go into
* ' if ' condition and report it as error . Instead continue processing
* rest of the function .
*/
2021-10-07 23:00:23 +00:00
if ( err & & err ! = - EEXIST ) {
2018-03-20 14:58:19 +00:00
netdev_err ( netdev , " Failed to add MAC filters \n " ) ;
2019-02-19 23:04:13 +00:00
/* If there is no more space for new umac filters, VSI
2018-03-20 14:58:19 +00:00
* should go into promiscuous mode . There should be some
* space reserved for promiscuous filters .
*/
if ( hw - > adminq . sq_last_status = = ICE_AQ_RC_ENOSPC & &
2021-03-02 18:15:38 +00:00
! test_and_set_bit ( ICE_FLTR_OVERFLOW_PROMISC ,
2018-03-20 14:58:19 +00:00
vsi - > state ) ) {
promisc_forced_on = true ;
2020-02-06 09:20:10 +00:00
netdev_warn ( netdev , " Reached MAC filter limit, forcing promisc mode on VSI %d \n " ,
2018-03-20 14:58:19 +00:00
vsi - > vsi_num ) ;
} else {
goto out ;
}
}
2021-10-07 23:00:23 +00:00
err = 0 ;
2018-03-20 14:58:19 +00:00
/* check for changes in promiscuous modes */
2019-02-27 00:35:14 +00:00
if ( changed_flags & IFF_ALLMULTI ) {
if ( vsi - > current_netdev_flags & IFF_ALLMULTI ) {
2022-03-31 16:20:08 +00:00
err = ice_set_promisc ( vsi , ICE_MCAST_PROMISC_BITS ) ;
2019-02-27 00:35:14 +00:00
if ( err ) {
vsi - > current_netdev_flags & = ~ IFF_ALLMULTI ;
goto out_promisc ;
}
2020-05-08 00:41:10 +00:00
} else {
/* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
2022-03-31 16:20:08 +00:00
err = ice_clear_promisc ( vsi , ICE_MCAST_PROMISC_BITS ) ;
2019-02-27 00:35:14 +00:00
if ( err ) {
vsi - > current_netdev_flags | = IFF_ALLMULTI ;
goto out_promisc ;
}
}
}
2018-03-20 14:58:19 +00:00
if ( ( ( changed_flags & IFF_PROMISC ) | | promisc_forced_on ) | |
2021-03-02 18:15:37 +00:00
test_bit ( ICE_VSI_PROMISC_CHANGED , vsi - > state ) ) {
clear_bit ( ICE_VSI_PROMISC_CHANGED , vsi - > state ) ;
2018-03-20 14:58:19 +00:00
if ( vsi - > current_netdev_flags & IFF_PROMISC ) {
2019-02-19 23:04:13 +00:00
/* Apply Rx filter rule to get traffic from wire */
2022-07-04 13:12:26 +00:00
if ( ! ice_is_dflt_vsi_in_use ( vsi - > port_info ) ) {
err = ice_set_dflt_vsi ( vsi ) ;
2019-12-12 11:12:55 +00:00
if ( err & & err ! = - EEXIST ) {
2020-02-06 09:20:10 +00:00
netdev_err ( netdev , " Error %d setting default VSI %i Rx rule \n " ,
2019-12-12 11:12:55 +00:00
err , vsi - > vsi_num ) ;
vsi - > current_netdev_flags & =
~ IFF_PROMISC ;
goto out_promisc ;
}
2021-10-07 23:00:23 +00:00
err = 0 ;
2021-12-02 16:38:46 +00:00
vlan_ops - > dis_rx_filtering ( vsi ) ;
ice: fix lost multicast packets in promisc mode
There was a problem reported to us where the addition of a VF with an IPv6
address ending with a particular sequence would cause the parent device on
the PF to no longer be able to respond to neighbor discovery packets.
In this case, we had an ovs-bridge device living on top of a VLAN, which
was on top of a PF, and it would not be able to talk anymore (the neighbor
entry would expire and couldn't be restored).
The root cause of the issue is that if the PF is asked to be in IFF_PROMISC
mode (promiscuous mode) and it had an ipv6 address that needed the
33:33:ff:00:00:04 multicast address to work, then when the VF was added
with the need for the same multicast address, the VF would steal all the
traffic destined for that address.
The ice driver didn't auto-subscribe a request of IFF_PROMISC to the
"multicast replication from other port's traffic" meaning that it won't get
for instance, packets with an exact destination in the VF, as above.
The VF's IPv6 address, which adds a "perfect filter" for 33:33:ff:00:00:04,
results in no packets for that multicast address making it to the PF (which
is in promisc but NOT "multicast replication").
The fix is to enable "multicast promiscuous" whenever the driver is asked
to enable IFF_PROMISC, and make sure to disable it when appropriate.
Fixes: e94d44786693 ("ice: Implement filter sync, NDO operations and bump version")
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Rafal Romanowski <rafal.romanowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2023-02-06 23:54:36 +00:00
/* promiscuous mode implies allmulticast so
* that VSIs that are in promiscuous mode are
* subscribed to multicast packets coming to
* the port
*/
err = ice_set_promisc ( vsi ,
ICE_MCAST_PROMISC_BITS ) ;
if ( err )
goto out_promisc ;
2018-03-20 14:58:19 +00:00
}
} else {
2019-02-19 23:04:13 +00:00
/* Clear Rx filter to remove traffic from wire */
2022-07-04 13:12:26 +00:00
if ( ice_is_vsi_dflt_vsi ( vsi ) ) {
err = ice_clear_dflt_vsi ( vsi ) ;
2019-12-12 11:12:55 +00:00
if ( err ) {
2020-02-06 09:20:10 +00:00
netdev_err ( netdev , " Error %d clearing default VSI %i Rx rule \n " ,
2019-12-12 11:12:55 +00:00
err , vsi - > vsi_num ) ;
vsi - > current_netdev_flags | =
IFF_PROMISC ;
goto out_promisc ;
}
2022-07-29 10:17:54 +00:00
if ( vsi - > netdev - > features &
ice: Advertise 802.1ad VLAN filtering and offloads for PF netdev
In order for the driver to support 802.1ad VLAN filtering and offloads,
it needs to advertise those VLAN features and also support modifying
those VLAN features, so make the necessary changes to
ice_set_netdev_features(). By default, enable CTAG insertion/stripping
and CTAG filtering for both Single and Double VLAN Modes (SVM/DVM).
Also, in DVM, enable STAG filtering by default. This is done by
setting the feature bits in netdev->features. Also, in DVM, support
toggling of STAG insertion/stripping, but don't enable them by
default. This is done by setting the feature bits in
netdev->hw_features.
Since 802.1ad VLAN filtering and offloads are only supported in DVM, make
sure they are not enabled by default and that they cannot be enabled
during runtime, when the device is in SVM.
Add an implementation for the ndo_fix_features() callback. This is
needed since the hardware cannot support multiple VLAN ethertypes for
VLAN insertion/stripping simultaneously and all supported VLAN filtering
must either be enabled or disabled together.
Disable inner VLAN stripping by default when DVM is enabled. If a VSI
supports stripping the inner VLAN in DVM, then it will have to configure
that during runtime. For example if a VF is configured in a port VLAN
while DVM is enabled it will be allowed to offload inner VLANs.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-12-02 16:38:50 +00:00
NETIF_F_HW_VLAN_CTAG_FILTER )
2021-12-02 16:38:46 +00:00
vlan_ops - > ena_rx_filtering ( vsi ) ;
2018-03-20 14:58:19 +00:00
}
ice: fix lost multicast packets in promisc mode
There was a problem reported to us where the addition of a VF with an IPv6
address ending with a particular sequence would cause the parent device on
the PF to no longer be able to respond to neighbor discovery packets.
In this case, we had an ovs-bridge device living on top of a VLAN, which
was on top of a PF, and it would not be able to talk anymore (the neighbor
entry would expire and couldn't be restored).
The root cause of the issue is that if the PF is asked to be in IFF_PROMISC
mode (promiscuous mode) and it had an ipv6 address that needed the
33:33:ff:00:00:04 multicast address to work, then when the VF was added
with the need for the same multicast address, the VF would steal all the
traffic destined for that address.
The ice driver didn't auto-subscribe a request of IFF_PROMISC to the
"multicast replication from other port's traffic" meaning that it won't get
for instance, packets with an exact destination in the VF, as above.
The VF's IPv6 address, which adds a "perfect filter" for 33:33:ff:00:00:04,
results in no packets for that multicast address making it to the PF (which
is in promisc but NOT "multicast replication").
The fix is to enable "multicast promiscuous" whenever the driver is asked
to enable IFF_PROMISC, and make sure to disable it when appropriate.
Fixes: e94d44786693 ("ice: Implement filter sync, NDO operations and bump version")
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Rafal Romanowski <rafal.romanowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2023-02-06 23:54:36 +00:00
/* disable allmulti here, but only if allmulti is not
* still enabled for the netdev
*/
if ( ! ( vsi - > current_netdev_flags & IFF_ALLMULTI ) ) {
err = ice_clear_promisc ( vsi ,
ICE_MCAST_PROMISC_BITS ) ;
if ( err ) {
netdev_err ( netdev , " Error %d clearing multicast promiscuous on VSI %i \n " ,
err , vsi - > vsi_num ) ;
}
}
2018-03-20 14:58:19 +00:00
}
}
goto exit ;
out_promisc :
2021-03-02 18:15:37 +00:00
set_bit ( ICE_VSI_PROMISC_CHANGED , vsi - > state ) ;
2018-03-20 14:58:19 +00:00
goto exit ;
out :
/* if something went wrong then set the changed flag so we try again */
2021-03-02 18:15:37 +00:00
set_bit ( ICE_VSI_UMAC_FLTR_CHANGED , vsi - > state ) ;
set_bit ( ICE_VSI_MMAC_FLTR_CHANGED , vsi - > state ) ;
2018-03-20 14:58:19 +00:00
exit :
2021-03-02 18:15:38 +00:00
clear_bit ( ICE_CFG_BUSY , vsi - > state ) ;
2018-03-20 14:58:19 +00:00
return err ;
}
/**
* ice_sync_fltr_subtask - Sync the VSI filter list with HW
* @ pf : board private structure
*/
static void ice_sync_fltr_subtask ( struct ice_pf * pf )
{
int v ;
if ( ! pf | | ! ( test_bit ( ICE_FLAG_FLTR_SYNC , pf - > flags ) ) )
return ;
clear_bit ( ICE_FLAG_FLTR_SYNC , pf - > flags ) ;
2019-02-08 20:50:54 +00:00
ice_for_each_vsi ( pf , v )
2018-03-20 14:58:19 +00:00
if ( pf - > vsi [ v ] & & ice_vsi_fltr_changed ( pf - > vsi [ v ] ) & &
ice_vsi_sync_fltr ( pf - > vsi [ v ] ) ) {
/* come back and try again later */
set_bit ( ICE_FLAG_FLTR_SYNC , pf - > flags ) ;
break ;
}
}
2019-02-28 23:24:24 +00:00
/**
* ice_pf_dis_all_vsi - Pause all VSIs on a PF
* @ pf : the PF
* @ locked : is the rtnl_lock already held
*/
static void ice_pf_dis_all_vsi ( struct ice_pf * pf , bool locked )
{
2020-11-21 00:39:27 +00:00
int node ;
2019-02-28 23:24:24 +00:00
int v ;
ice_for_each_vsi ( pf , v )
if ( pf - > vsi [ v ] )
ice_dis_vsi ( pf - > vsi [ v ] , locked ) ;
2020-11-21 00:39:27 +00:00
for ( node = 0 ; node < ICE_MAX_PF_AGG_NODES ; node + + )
pf - > pf_agg_node [ node ] . num_vsis = 0 ;
for ( node = 0 ; node < ICE_MAX_VF_AGG_NODES ; node + + )
pf - > vf_agg_node [ node ] . num_vsis = 0 ;
2019-02-28 23:24:24 +00:00
}
2021-10-26 10:38:40 +00:00
/**
* ice_clear_sw_switch_recipes - clear switch recipes
* @ pf : board private structure
*
* Mark switch recipes as not created in sw structures . There are cases where
* rules ( especially advanced rules ) need to be restored , either re - read from
* hardware or added again . For example after the reset . ' recp_created ' flag
* prevents from doing that and need to be cleared upfront .
*/
static void ice_clear_sw_switch_recipes ( struct ice_pf * pf )
{
struct ice_sw_recipe * recp ;
u8 i ;
recp = pf - > hw . switch_info - > recp_list ;
for ( i = 0 ; i < ICE_MAX_NUM_RECIPES ; i + + )
recp [ i ] . recp_created = false ;
}
2018-03-20 14:58:18 +00:00
/**
2021-10-15 23:35:16 +00:00
* ice_prepare_for_reset - prep for reset
2018-03-20 14:58:18 +00:00
* @ pf : board private structure
2021-10-15 23:35:16 +00:00
* @ reset_type : reset type requested
2018-03-20 14:58:18 +00:00
*
* Inform or close all dependent features in prep for reset .
*/
static void
2021-10-15 23:35:16 +00:00
ice_prepare_for_reset ( struct ice_pf * pf , enum ice_reset_req reset_type )
2018-03-20 14:58:18 +00:00
{
struct ice_hw * hw = & pf - > hw ;
2021-10-15 23:35:16 +00:00
struct ice_vsi * vsi ;
2022-02-16 21:37:35 +00:00
struct ice_vf * vf ;
unsigned int bkt ;
2018-03-20 14:58:18 +00:00
2021-10-15 23:35:16 +00:00
dev_dbg ( ice_pf_to_dev ( pf ) , " reset_type=%d \n " , reset_type ) ;
2019-02-13 18:51:14 +00:00
/* already prepared for reset */
2021-03-02 18:15:38 +00:00
if ( test_bit ( ICE_PREPARED_FOR_RESET , pf - > state ) )
2019-02-13 18:51:14 +00:00
return ;
2021-05-20 14:37:51 +00:00
ice_unplug_aux_dev ( pf ) ;
2018-09-20 00:42:57 +00:00
/* Notify VFs of impending reset */
if ( ice_check_sq_alive ( hw , & hw - > mailboxq ) )
ice_vc_notify_reset ( pf ) ;
2019-06-26 09:20:18 +00:00
/* Disable VFs until reset is completed */
ice: convert VF storage to hash table with krefs and RCU
The ice driver stores VF structures in a simple array which is allocated
once at the time of VF creation. The VF structures are then accessed
from the array by their VF ID. The ID must be between 0 and the number
of allocated VFs.
Multiple threads can access this table:
* .ndo operations such as .ndo_get_vf_cfg or .ndo_set_vf_trust
* interrupts, such as due to messages from the VF using the virtchnl
communication
* processing such as device reset
* commands to add or remove VFs
The current implementation does not keep track of when all threads are
done operating on a VF and can potentially result in use-after-free
issues caused by one thread accessing a VF structure after it has been
released when removing VFs. Some of these are prevented with various
state flags and checks.
In addition, this structure is quite static and does not support a
planned future where virtualization can be more dynamic. As we begin to
look at supporting Scalable IOV with the ice driver (as opposed to just
supporting Single Root IOV), this structure is not sufficient.
In the future, VFs will be able to be added and removed individually and
dynamically.
To allow for this, and to better protect against a whole class of
use-after-free bugs, replace the VF storage with a combination of a hash
table and krefs to reference track all of the accesses to VFs through
the hash table.
A hash table still allows efficient look up of the VF given its ID, but
also allows adding and removing VFs. It does not require contiguous VF
IDs.
The use of krefs allows the cleanup of the VF memory to be delayed until
after all threads have released their reference (by calling ice_put_vf).
To prevent corruption of the hash table, a combination of RCU and the
mutex table_lock are used. Addition and removal from the hash table use
the RCU-aware hash macros. This allows simple read-only look ups that
iterate to locate a single VF can be fast using RCU. Accesses which
modify the hash table, or which can't take RCU because they sleep, will
hold the mutex lock.
By using this design, we have a stronger guarantee that the VF structure
can't be released until after all threads are finished operating on it.
We also pave the way for the more dynamic Scalable IOV implementation in
the future.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2022-02-16 21:37:38 +00:00
mutex_lock ( & pf - > vfs . table_lock ) ;
2022-02-16 21:37:35 +00:00
ice_for_each_vf ( pf , bkt , vf )
ice: introduce clear_reset_state operation
When hardware is reset, the VF relies on the VFGEN_RSTAT register to detect
when the VF is finished resetting. This is a tri-state register where 0
indicates a reset is in progress, 1 indicates the hardware is done
resetting, and 2 indicates that the software is done resetting.
Currently the PF driver relies on the device hardware resetting VFGEN_RSTAT
when a global reset occurs. This works ok, but it does mean that the VF
might not immediately notice a reset when the driver first detects that the
global reset is occurring.
This is also problematic for Scalable IOV, because there is no read/write
equivalent VFGEN_RSTAT register for the Scalable VSI type. Instead, the
Scalable IOV VFs will need to emulate this register.
To support this, introduce a new VF operation, clear_reset_state, which is
called when the PF driver first detects a global reset. The Single Root IOV
implementation can just write to VFGEN_RSTAT to ensure it's cleared
immediately, without waiting for the actual hardware reset to begin. The
Scalable IOV implementation will use this as part of its tracking of the
reset status to allow properly reporting the emulated VFGEN_RSTAT to the VF
driver.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
Tested-by: Marek Szlosek <marek.szlosek@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2023-01-19 01:16:51 +00:00
ice_set_vf_state_dis ( vf ) ;
ice: convert VF storage to hash table with krefs and RCU
The ice driver stores VF structures in a simple array which is allocated
once at the time of VF creation. The VF structures are then accessed
from the array by their VF ID. The ID must be between 0 and the number
of allocated VFs.
Multiple threads can access this table:
* .ndo operations such as .ndo_get_vf_cfg or .ndo_set_vf_trust
* interrupts, such as due to messages from the VF using the virtchnl
communication
* processing such as device reset
* commands to add or remove VFs
The current implementation does not keep track of when all threads are
done operating on a VF and can potentially result in use-after-free
issues caused by one thread accessing a VF structure after it has been
released when removing VFs. Some of these are prevented with various
state flags and checks.
In addition, this structure is quite static and does not support a
planned future where virtualization can be more dynamic. As we begin to
look at supporting Scalable IOV with the ice driver (as opposed to just
supporting Single Root IOV), this structure is not sufficient.
In the future, VFs will be able to be added and removed individually and
dynamically.
To allow for this, and to better protect against a whole class of
use-after-free bugs, replace the VF storage with a combination of a hash
table and krefs to reference track all of the accesses to VFs through
the hash table.
A hash table still allows efficient look up of the VF given its ID, but
also allows adding and removing VFs. It does not require contiguous VF
IDs.
The use of krefs allows the cleanup of the VF memory to be delayed until
after all threads have released their reference (by calling ice_put_vf).
To prevent corruption of the hash table, a combination of RCU and the
mutex table_lock are used. Addition and removal from the hash table use
the RCU-aware hash macros. This allows simple read-only look ups that
iterate to locate a single VF can be fast using RCU. Accesses which
modify the hash table, or which can't take RCU because they sleep, will
hold the mutex lock.
By using this design, we have a stronger guarantee that the VF structure
can't be released until after all threads are finished operating on it.
We also pave the way for the more dynamic Scalable IOV implementation in
the future.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2022-02-16 21:37:38 +00:00
mutex_unlock ( & pf - > vfs . table_lock ) ;
2019-06-26 09:20:18 +00:00
2021-10-26 10:38:40 +00:00
if ( ice_is_eswitch_mode_switchdev ( pf ) ) {
if ( reset_type ! = ICE_RESET_PFR )
ice_clear_sw_switch_recipes ( pf ) ;
}
2021-10-15 23:35:16 +00:00
/* release ADQ specific HW and SW resources */
vsi = ice_get_main_vsi ( pf ) ;
if ( ! vsi )
goto skip ;
/* to be on safe side, reset orig_rss_size so that normal flow
* of deciding rss_size can take precedence
*/
vsi - > orig_rss_size = 0 ;
if ( test_bit ( ICE_FLAG_TC_MQPRIO , pf - > flags ) ) {
if ( reset_type = = ICE_RESET_PFR ) {
vsi - > old_ena_tc = vsi - > all_enatc ;
vsi - > old_numtc = vsi - > all_numtc ;
} else {
ice_remove_q_channels ( vsi , true ) ;
/* for other reset type, do not support channel rebuild
* hence reset needed info
*/
vsi - > old_ena_tc = 0 ;
vsi - > all_enatc = 0 ;
vsi - > old_numtc = 0 ;
vsi - > all_numtc = 0 ;
vsi - > req_txq = 0 ;
vsi - > req_rxq = 0 ;
clear_bit ( ICE_FLAG_TC_MQPRIO , pf - > flags ) ;
memset ( & vsi - > mqprio_qopt , 0 , sizeof ( vsi - > mqprio_qopt ) ) ;
}
}
skip :
2019-09-09 13:47:46 +00:00
/* clear SW filtering DB */
ice_clear_hw_tbls ( hw ) ;
2018-03-20 14:58:18 +00:00
/* disable the VSIs and their queues that are not already DOWN */
2019-02-28 23:24:24 +00:00
ice_pf_dis_all_vsi ( pf , false ) ;
2018-03-20 14:58:18 +00:00
2021-06-09 16:39:50 +00:00
if ( test_bit ( ICE_FLAG_PTP_SUPPORTED , pf - > flags ) )
2024-01-25 21:57:50 +00:00
ice_ptp_prepare_for_reset ( pf , reset_type ) ;
2021-06-09 16:39:50 +00:00
2022-03-01 18:38:03 +00:00
if ( ice_is_feature_supported ( pf , ICE_F_GNSS ) )
ice_gnss_exit ( pf ) ;
2018-10-26 18:44:35 +00:00
if ( hw - > port_info )
ice_sched_clear_port ( hw - > port_info ) ;
2018-03-20 14:58:18 +00:00
ice_shutdown_all_ctrlq ( hw ) ;
2018-08-09 13:29:50 +00:00
2021-03-02 18:15:38 +00:00
set_bit ( ICE_PREPARED_FOR_RESET , pf - > state ) ;
2018-03-20 14:58:18 +00:00
}
/**
* ice_do_reset - Initiate one of many types of resets
* @ pf : board private structure
2021-10-15 23:35:16 +00:00
* @ reset_type : reset type requested before this function was called .
2018-03-20 14:58:18 +00:00
*/
static void ice_do_reset ( struct ice_pf * pf , enum ice_reset_req reset_type )
{
2019-11-08 14:23:26 +00:00
struct device * dev = ice_pf_to_dev ( pf ) ;
2018-03-20 14:58:18 +00:00
struct ice_hw * hw = & pf - > hw ;
dev_dbg ( dev , " reset_type 0x%x requested \n " , reset_type ) ;
2023-06-20 22:18:54 +00:00
if ( pf - > lag & & pf - > lag - > bonded & & reset_type = = ICE_RESET_PFR ) {
dev_dbg ( dev , " PFR on a bonded interface, promoting to CORER \n " ) ;
reset_type = ICE_RESET_CORER ;
}
2021-10-15 23:35:16 +00:00
ice_prepare_for_reset ( pf , reset_type ) ;
2018-03-20 14:58:18 +00:00
/* trigger the reset */
if ( ice_reset ( hw , reset_type ) ) {
dev_err ( dev , " reset %d failed \n " , reset_type ) ;
2021-03-02 18:15:38 +00:00
set_bit ( ICE_RESET_FAILED , pf - > state ) ;
clear_bit ( ICE_RESET_OICR_RECV , pf - > state ) ;
clear_bit ( ICE_PREPARED_FOR_RESET , pf - > state ) ;
clear_bit ( ICE_PFR_REQ , pf - > state ) ;
clear_bit ( ICE_CORER_REQ , pf - > state ) ;
clear_bit ( ICE_GLOBR_REQ , pf - > state ) ;
2021-05-06 15:39:59 +00:00
wake_up ( & pf - > reset_wait_queue ) ;
2018-03-20 14:58:18 +00:00
return ;
}
2018-08-09 13:29:50 +00:00
/* PFR is a bit of a special case because it doesn't result in an OICR
* interrupt . So for PFR , rebuild after the reset and clear the reset -
* associated state bits .
*/
2018-03-20 14:58:18 +00:00
if ( reset_type = = ICE_RESET_PFR ) {
pf - > pfr_count + + ;
2019-09-09 13:47:46 +00:00
ice_rebuild ( pf , reset_type ) ;
2021-03-02 18:15:38 +00:00
clear_bit ( ICE_PREPARED_FOR_RESET , pf - > state ) ;
clear_bit ( ICE_PFR_REQ , pf - > state ) ;
2021-05-06 15:39:59 +00:00
wake_up ( & pf - > reset_wait_queue ) ;
2022-02-23 00:27:04 +00:00
ice_reset_all_vfs ( pf ) ;
2018-03-20 14:58:18 +00:00
}
}
/**
* ice_reset_subtask - Set up for resetting the device and driver
* @ pf : board private structure
*/
static void ice_reset_subtask ( struct ice_pf * pf )
{
2018-08-09 13:29:50 +00:00
enum ice_reset_req reset_type = ICE_RESET_INVAL ;
2018-03-20 14:58:18 +00:00
/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
2018-08-09 13:29:50 +00:00
* OICR interrupt . The OICR handler ( ice_misc_intr ) determines what type
* of reset is pending and sets bits in pf - > state indicating the reset
2021-03-02 18:15:38 +00:00
* type and ICE_RESET_OICR_RECV . So , if the latter bit is set
2018-08-09 13:29:50 +00:00
* prepare for pending reset if not already ( for PF software - initiated
* global resets the software should already be prepared for it as
2021-03-02 18:15:38 +00:00
* indicated by ICE_PREPARED_FOR_RESET ; for global resets initiated
2018-08-09 13:29:50 +00:00
* by firmware or software on other PFs , that bit is not set so prepare
* for the reset now ) , poll for reset done , rebuild and return .
2018-03-20 14:58:18 +00:00
*/
2021-03-02 18:15:38 +00:00
if ( test_bit ( ICE_RESET_OICR_RECV , pf - > state ) ) {
2019-02-13 18:51:08 +00:00
/* Perform the largest reset requested */
2021-03-02 18:15:38 +00:00
if ( test_and_clear_bit ( ICE_CORER_RECV , pf - > state ) )
2019-02-13 18:51:08 +00:00
reset_type = ICE_RESET_CORER ;
2021-03-02 18:15:38 +00:00
if ( test_and_clear_bit ( ICE_GLOBR_RECV , pf - > state ) )
2019-02-13 18:51:08 +00:00
reset_type = ICE_RESET_GLOBR ;
2021-03-02 18:15:38 +00:00
if ( test_and_clear_bit ( ICE_EMPR_RECV , pf - > state ) )
2019-08-02 08:25:25 +00:00
reset_type = ICE_RESET_EMPR ;
2019-02-13 18:51:08 +00:00
/* return if no valid reset type requested */
if ( reset_type = = ICE_RESET_INVAL )
return ;
2021-10-15 23:35:16 +00:00
ice_prepare_for_reset ( pf , reset_type ) ;
2018-03-20 14:58:18 +00:00
/* make sure we are ready to rebuild */
2018-08-09 13:29:47 +00:00
if ( ice_check_reset ( & pf - > hw ) ) {
2021-03-02 18:15:38 +00:00
set_bit ( ICE_RESET_FAILED , pf - > state ) ;
2018-08-09 13:29:47 +00:00
} else {
/* done with reset. start rebuild */
pf - > hw . reset_ongoing = false ;
2019-09-09 13:47:46 +00:00
ice_rebuild ( pf , reset_type ) ;
2018-08-09 13:29:50 +00:00
/* clear bit to resume normal operations, but
2019-02-19 23:04:12 +00:00
* ICE_NEEDS_RESTART bit is set in case rebuild failed
2018-08-09 13:29:50 +00:00
*/
2021-03-02 18:15:38 +00:00
clear_bit ( ICE_RESET_OICR_RECV , pf - > state ) ;
clear_bit ( ICE_PREPARED_FOR_RESET , pf - > state ) ;
clear_bit ( ICE_PFR_REQ , pf - > state ) ;
clear_bit ( ICE_CORER_REQ , pf - > state ) ;
clear_bit ( ICE_GLOBR_REQ , pf - > state ) ;
2021-05-06 15:39:59 +00:00
wake_up ( & pf - > reset_wait_queue ) ;
2022-02-23 00:27:04 +00:00
ice_reset_all_vfs ( pf ) ;
2018-08-09 13:29:47 +00:00
}
2018-08-09 13:29:50 +00:00
return ;
2018-03-20 14:58:18 +00:00
}
/* No pending resets to finish processing. Check for new resets */
2023-06-20 22:18:54 +00:00
if ( test_bit ( ICE_PFR_REQ , pf - > state ) ) {
2018-08-09 13:29:50 +00:00
reset_type = ICE_RESET_PFR ;
2023-06-20 22:18:54 +00:00
if ( pf - > lag & & pf - > lag - > bonded ) {
dev_dbg ( ice_pf_to_dev ( pf ) , " PFR on a bonded interface, promoting to CORER \n " ) ;
reset_type = ICE_RESET_CORER ;
}
}
2021-03-02 18:15:38 +00:00
if ( test_bit ( ICE_CORER_REQ , pf - > state ) )
2018-08-09 13:29:50 +00:00
reset_type = ICE_RESET_CORER ;
2021-03-02 18:15:38 +00:00
if ( test_bit ( ICE_GLOBR_REQ , pf - > state ) )
2018-03-20 14:58:18 +00:00
reset_type = ICE_RESET_GLOBR ;
2018-08-09 13:29:50 +00:00
/* If no valid reset type requested just return */
if ( reset_type = = ICE_RESET_INVAL )
return ;
2018-03-20 14:58:18 +00:00
2018-08-09 13:29:50 +00:00
/* reset if not already down or busy */
2021-03-02 18:15:38 +00:00
if ( ! test_bit ( ICE_DOWN , pf - > state ) & &
! test_bit ( ICE_CFG_BUSY , pf - > state ) ) {
2018-03-20 14:58:18 +00:00
ice_do_reset ( pf , reset_type ) ;
}
}
2019-08-02 08:25:32 +00:00
/**
* ice_print_topo_conflict - print topology conflict message
* @ vsi : the VSI whose topology status is being checked
*/
static void ice_print_topo_conflict ( struct ice_vsi * vsi )
{
switch ( vsi - > port_info - > phy . link_info . topo_media_conflict ) {
case ICE_AQ_LINK_TOPO_CONFLICT :
case ICE_AQ_LINK_MEDIA_CONFLICT :
2019-10-09 14:09:49 +00:00
case ICE_AQ_LINK_TOPO_UNREACH_PRT :
case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT :
case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA :
2021-03-02 18:12:07 +00:00
netdev_info ( vsi - > netdev , " Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue. \n " ) ;
2019-08-02 08:25:32 +00:00
break ;
2019-10-09 14:09:49 +00:00
case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA :
2021-07-16 22:16:39 +00:00
if ( test_bit ( ICE_FLAG_LINK_LENIENT_MODE_ENA , vsi - > back - > flags ) )
netdev_warn ( vsi - > netdev , " An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules \n " ) ;
else
netdev_err ( vsi - > netdev , " Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules. \n " ) ;
2019-10-09 14:09:49 +00:00
break ;
2019-08-02 08:25:32 +00:00
default :
break ;
}
}
2018-03-20 14:58:13 +00:00
/**
* ice_print_link_msg - print link up or down message
* @ vsi : the VSI whose link status is being queried
* @ isup : boolean for if the link is now up or down
*/
2018-03-20 14:58:16 +00:00
void ice_print_link_msg ( struct ice_vsi * vsi , bool isup )
2018-03-20 14:58:13 +00:00
{
2019-04-16 17:34:52 +00:00
struct ice_aqc_get_phy_caps_data * caps ;
2020-07-09 16:16:10 +00:00
const char * an_advertised ;
2019-04-16 17:34:52 +00:00
const char * fec_req ;
2018-03-20 14:58:13 +00:00
const char * speed ;
2019-04-16 17:34:52 +00:00
const char * fec ;
2018-03-20 14:58:13 +00:00
const char * fc ;
2019-08-02 08:25:31 +00:00
const char * an ;
2021-10-07 22:59:03 +00:00
int status ;
2018-03-20 14:58:13 +00:00
2019-02-28 23:26:01 +00:00
if ( ! vsi )
return ;
2018-03-20 14:58:13 +00:00
if ( vsi - > current_isup = = isup )
return ;
vsi - > current_isup = isup ;
if ( ! isup ) {
netdev_info ( vsi - > netdev , " NIC Link is Down \n " ) ;
return ;
}
switch ( vsi - > port_info - > phy . link_info . link_speed ) {
2019-04-16 17:35:02 +00:00
case ICE_AQ_LINK_SPEED_100GB :
speed = " 100 G " ;
break ;
case ICE_AQ_LINK_SPEED_50GB :
speed = " 50 G " ;
break ;
2018-03-20 14:58:13 +00:00
case ICE_AQ_LINK_SPEED_40GB :
speed = " 40 G " ;
break ;
case ICE_AQ_LINK_SPEED_25GB :
speed = " 25 G " ;
break ;
case ICE_AQ_LINK_SPEED_20GB :
speed = " 20 G " ;
break ;
case ICE_AQ_LINK_SPEED_10GB :
speed = " 10 G " ;
break ;
case ICE_AQ_LINK_SPEED_5GB :
speed = " 5 G " ;
break ;
case ICE_AQ_LINK_SPEED_2500MB :
speed = " 2.5 G " ;
break ;
case ICE_AQ_LINK_SPEED_1000MB :
speed = " 1 G " ;
break ;
case ICE_AQ_LINK_SPEED_100MB :
speed = " 100 M " ;
break ;
default :
2020-09-17 20:13:47 +00:00
speed = " Unknown " ;
2018-03-20 14:58:13 +00:00
break ;
}
switch ( vsi - > port_info - > fc . current_mode ) {
case ICE_FC_FULL :
2019-04-16 17:35:03 +00:00
fc = " Rx/Tx " ;
2018-03-20 14:58:13 +00:00
break ;
case ICE_FC_TX_PAUSE :
2019-04-16 17:35:03 +00:00
fc = " Tx " ;
2018-03-20 14:58:13 +00:00
break ;
case ICE_FC_RX_PAUSE :
2019-04-16 17:35:03 +00:00
fc = " Rx " ;
2018-03-20 14:58:13 +00:00
break ;
2019-02-19 23:04:06 +00:00
case ICE_FC_NONE :
fc = " None " ;
break ;
2018-03-20 14:58:13 +00:00
default :
fc = " Unknown " ;
break ;
}
2019-04-16 17:34:52 +00:00
/* Get FEC mode based on negotiated link info */
switch ( vsi - > port_info - > phy . link_info . fec_info ) {
case ICE_AQ_LINK_25G_RS_528_FEC_EN :
case ICE_AQ_LINK_25G_RS_544_FEC_EN :
fec = " RS-FEC " ;
break ;
case ICE_AQ_LINK_25G_KR_FEC_EN :
fec = " FC-FEC/BASE-R " ;
break ;
default :
fec = " NONE " ;
break ;
}
2019-08-02 08:25:31 +00:00
/* check if autoneg completed, might be false due to not supported */
if ( vsi - > port_info - > phy . link_info . an_info & ICE_AQ_AN_COMPLETED )
an = " True " ;
else
an = " False " ;
2019-04-16 17:34:52 +00:00
/* Get FEC mode requested based on PHY caps last SW configuration */
2019-11-08 14:23:25 +00:00
caps = kzalloc ( sizeof ( * caps ) , GFP_KERNEL ) ;
2019-04-16 17:34:52 +00:00
if ( ! caps ) {
fec_req = " Unknown " ;
2020-07-09 16:16:10 +00:00
an_advertised = " Unknown " ;
2019-04-16 17:34:52 +00:00
goto done ;
}
status = ice_aq_get_phy_caps ( vsi - > port_info , false ,
2021-03-25 22:35:06 +00:00
ICE_AQC_REPORT_ACTIVE_CFG , caps , NULL ) ;
2019-04-16 17:34:52 +00:00
if ( status )
netdev_info ( vsi - > netdev , " Get phy capability failed. \n " ) ;
2020-07-09 16:16:10 +00:00
an_advertised = ice_is_phy_caps_an_enabled ( caps ) ? " On " : " Off " ;
2019-04-16 17:34:52 +00:00
if ( caps - > link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ | |
caps - > link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ )
fec_req = " RS-FEC " ;
else if ( caps - > link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | |
caps - > link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ )
fec_req = " FC-FEC/BASE-R " ;
else
fec_req = " NONE " ;
2019-11-08 14:23:25 +00:00
kfree ( caps ) ;
2019-04-16 17:34:52 +00:00
done :
2020-07-09 16:16:10 +00:00
netdev_info ( vsi - > netdev , " NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s \n " ,
speed , fec_req , fec , an_advertised , an , fc ) ;
2019-08-02 08:25:32 +00:00
ice_print_topo_conflict ( vsi ) ;
2018-03-20 14:58:13 +00:00
}
2018-03-20 14:58:18 +00:00
/**
2019-02-19 23:04:13 +00:00
* ice_vsi_link_event - update the VSI ' s netdev
* @ vsi : the VSI on which the link event occurred
* @ link_up : whether or not the VSI needs to be set up or down
2018-03-20 14:58:18 +00:00
*/
static void ice_vsi_link_event ( struct ice_vsi * vsi , bool link_up )
{
2019-02-28 23:26:01 +00:00
if ( ! vsi )
return ;
2021-03-02 18:15:37 +00:00
if ( test_bit ( ICE_VSI_DOWN , vsi - > state ) | | ! vsi - > netdev )
2018-03-20 14:58:18 +00:00
return ;
if ( vsi - > type = = ICE_VSI_PF ) {
2019-02-28 23:26:01 +00:00
if ( link_up = = netif_carrier_ok ( vsi - > netdev ) )
2018-03-20 14:58:18 +00:00
return ;
2019-02-28 23:26:01 +00:00
2018-03-20 14:58:18 +00:00
if ( link_up ) {
netif_carrier_on ( vsi - > netdev ) ;
netif_tx_wake_all_queues ( vsi - > netdev ) ;
} else {
netif_carrier_off ( vsi - > netdev ) ;
netif_tx_stop_all_queues ( vsi - > netdev ) ;
}
}
}
2020-07-13 20:53:04 +00:00
/**
* ice_set_dflt_mib - send a default config MIB to the FW
* @ pf : private PF struct
*
* This function sends a default configuration MIB to the FW .
*
* If this function errors out at any point , the driver is still able to
* function . The main impact is that LFC may not operate as expected .
* Therefore an error state in this function should be treated with a DBG
* message and continue on with driver rebuild / reenable .
*/
static void ice_set_dflt_mib ( struct ice_pf * pf )
{
struct device * dev = ice_pf_to_dev ( pf ) ;
u8 mib_type , * buf , * lldpmib = NULL ;
u16 len , typelen , offset = 0 ;
struct ice_lldp_org_tlv * tlv ;
2020-10-12 22:53:26 +00:00
struct ice_hw * hw = & pf - > hw ;
2020-07-13 20:53:04 +00:00
u32 ouisubtype ;
mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB ;
lldpmib = kzalloc ( ICE_LLDPDU_SIZE , GFP_KERNEL ) ;
if ( ! lldpmib ) {
dev_dbg ( dev , " %s Failed to allocate MIB memory \n " ,
__func__ ) ;
return ;
}
/* Add ETS CFG TLV */
tlv = ( struct ice_lldp_org_tlv * ) lldpmib ;
typelen = ( ( ICE_TLV_TYPE_ORG < < ICE_LLDP_TLV_TYPE_S ) |
ICE_IEEE_ETS_TLV_LEN ) ;
tlv - > typelen = htons ( typelen ) ;
ouisubtype = ( ( ICE_IEEE_8021QAZ_OUI < < ICE_LLDP_TLV_OUI_S ) |
ICE_IEEE_SUBTYPE_ETS_CFG ) ;
tlv - > ouisubtype = htonl ( ouisubtype ) ;
buf = tlv - > tlvinfo ;
buf [ 0 ] = 0 ;
/* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
* Octets 5 - 12 are BW values , set octet 5 to 100 % BW .
* Octets 13 - 20 are TSA values - leave as zeros
*/
buf [ 5 ] = 0x64 ;
2023-12-06 01:01:12 +00:00
len = FIELD_GET ( ICE_LLDP_TLV_LEN_M , typelen ) ;
2020-07-13 20:53:04 +00:00
offset + = len + 2 ;
tlv = ( struct ice_lldp_org_tlv * )
( ( char * ) tlv + sizeof ( tlv - > typelen ) + len ) ;
/* Add ETS REC TLV */
buf = tlv - > tlvinfo ;
tlv - > typelen = htons ( typelen ) ;
ouisubtype = ( ( ICE_IEEE_8021QAZ_OUI < < ICE_LLDP_TLV_OUI_S ) |
ICE_IEEE_SUBTYPE_ETS_REC ) ;
tlv - > ouisubtype = htonl ( ouisubtype ) ;
/* First octet of buf is reserved
* Octets 1 - 4 map UP to TC - all UPs map to zero
* Octets 5 - 12 are BW values - set TC 0 to 100 % .
* Octets 13 - 20 are TSA value - leave as zeros
*/
buf [ 5 ] = 0x64 ;
offset + = len + 2 ;
tlv = ( struct ice_lldp_org_tlv * )
( ( char * ) tlv + sizeof ( tlv - > typelen ) + len ) ;
/* Add PFC CFG TLV */
typelen = ( ( ICE_TLV_TYPE_ORG < < ICE_LLDP_TLV_TYPE_S ) |
ICE_IEEE_PFC_TLV_LEN ) ;
tlv - > typelen = htons ( typelen ) ;
ouisubtype = ( ( ICE_IEEE_8021QAZ_OUI < < ICE_LLDP_TLV_OUI_S ) |
ICE_IEEE_SUBTYPE_PFC_CFG ) ;
tlv - > ouisubtype = htonl ( ouisubtype ) ;
/* Octet 1 left as all zeros - PFC disabled */
buf [ 0 ] = 0x08 ;
2023-12-06 01:01:12 +00:00
len = FIELD_GET ( ICE_LLDP_TLV_LEN_M , typelen ) ;
2020-07-13 20:53:04 +00:00
offset + = len + 2 ;
if ( ice_aq_set_lldp_mib ( hw , mib_type , ( void * ) lldpmib , offset , NULL ) )
dev_dbg ( dev , " %s Failed to set default LLDP MIB \n " , __func__ ) ;
kfree ( lldpmib ) ;
}
2021-10-13 16:02:19 +00:00
/**
* ice_check_phy_fw_load - check if PHY FW load failed
* @ pf : pointer to PF struct
* @ link_cfg_err : bitmap from the link info structure
*
* check if external PHY FW load failed and print an error message if it did
*/
static void ice_check_phy_fw_load ( struct ice_pf * pf , u8 link_cfg_err )
{
if ( ! ( link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE ) ) {
clear_bit ( ICE_FLAG_PHY_FW_LOAD_FAILED , pf - > flags ) ;
return ;
}
if ( test_bit ( ICE_FLAG_PHY_FW_LOAD_FAILED , pf - > flags ) )
return ;
if ( link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE ) {
dev_err ( ice_pf_to_dev ( pf ) , " Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again \n " ) ;
set_bit ( ICE_FLAG_PHY_FW_LOAD_FAILED , pf - > flags ) ;
}
}
2021-05-06 15:40:01 +00:00
/**
* ice_check_module_power
* @ pf : pointer to PF struct
* @ link_cfg_err : bitmap from the link info structure
*
* check module power level returned by a previous call to aq_get_link_info
* and print error messages if module power level is not supported
*/
static void ice_check_module_power ( struct ice_pf * pf , u8 link_cfg_err )
{
/* if module power level is supported, clear the flag */
if ( ! ( link_cfg_err & ( ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED ) ) ) {
clear_bit ( ICE_FLAG_MOD_POWER_UNSUPPORTED , pf - > flags ) ;
return ;
}
/* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
* above block didn ' t clear this bit , there ' s nothing to do
*/
if ( test_bit ( ICE_FLAG_MOD_POWER_UNSUPPORTED , pf - > flags ) )
return ;
if ( link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT ) {
dev_err ( ice_pf_to_dev ( pf ) , " The installed module is incompatible with the device's NVM image. Cannot start link \n " ) ;
set_bit ( ICE_FLAG_MOD_POWER_UNSUPPORTED , pf - > flags ) ;
} else if ( link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED ) {
dev_err ( ice_pf_to_dev ( pf ) , " The module's power requirements exceed the device's power supply. Cannot start link \n " ) ;
set_bit ( ICE_FLAG_MOD_POWER_UNSUPPORTED , pf - > flags ) ;
}
}
2021-10-13 16:02:19 +00:00
/**
* ice_check_link_cfg_err - check if link configuration failed
* @ pf : pointer to the PF struct
* @ link_cfg_err : bitmap from the link info structure
*
* print if any link configuration failure happens due to the value in the
* link_cfg_err parameter in the link info structure
*/
static void ice_check_link_cfg_err ( struct ice_pf * pf , u8 link_cfg_err )
{
ice_check_module_power ( pf , link_cfg_err ) ;
ice_check_phy_fw_load ( pf , link_cfg_err ) ;
}
2018-03-20 14:58:18 +00:00
/**
* ice_link_event - process the link event
2019-04-16 17:35:03 +00:00
* @ pf : PF that the link event is associated with
2018-03-20 14:58:18 +00:00
* @ pi : port_info for the port that the link event is associated with
2019-02-28 23:26:01 +00:00
* @ link_up : true if the physical link is up and false if it is down
* @ link_speed : current link speed received from the link event
2018-03-20 14:58:18 +00:00
*
2019-02-28 23:26:01 +00:00
* Returns 0 on success and negative on failure
2018-03-20 14:58:18 +00:00
*/
static int
2019-02-28 23:26:01 +00:00
ice_link_event ( struct ice_pf * pf , struct ice_port_info * pi , bool link_up ,
u16 link_speed )
2018-03-20 14:58:18 +00:00
{
2019-11-08 14:23:26 +00:00
struct device * dev = ice_pf_to_dev ( pf ) ;
2018-03-20 14:58:18 +00:00
struct ice_phy_info * phy_info ;
2019-02-28 23:26:01 +00:00
struct ice_vsi * vsi ;
u16 old_link_speed ;
bool old_link ;
2021-10-07 22:59:03 +00:00
int status ;
2018-03-20 14:58:18 +00:00
phy_info = & pi - > phy ;
phy_info - > link_info_old = phy_info - > link_info ;
2019-02-28 23:26:01 +00:00
old_link = ! ! ( phy_info - > link_info_old . link_info & ICE_AQ_LINK_UP ) ;
2018-03-20 14:58:18 +00:00
old_link_speed = phy_info - > link_info_old . link_speed ;
2019-02-28 23:26:01 +00:00
/* update the link info structures and re-enable link events,
* don ' t bail on failure due to other book keeping needed
*/
2021-03-25 22:35:07 +00:00
status = ice_update_link_info ( pi ) ;
if ( status )
2021-10-07 22:56:02 +00:00
dev_dbg ( dev , " Failed to update link status on port %d, err %d aq_err %s \n " ,
pi - > lport , status ,
2021-03-25 22:35:07 +00:00
ice_aq_str ( pi - > hw - > adminq . sq_last_status ) ) ;
2018-03-20 14:58:18 +00:00
2021-10-13 16:02:19 +00:00
ice_check_link_cfg_err ( pf , pi - > phy . link_info . link_cfg_err ) ;
2021-05-06 15:40:01 +00:00
2020-07-13 20:53:06 +00:00
/* Check if the link state is up after updating link info, and treat
* this event as an UP event since the link is actually UP now .
*/
if ( phy_info - > link_info . link_info & ICE_AQ_LINK_UP )
link_up = true ;
2019-08-08 14:39:33 +00:00
vsi = ice_get_main_vsi ( pf ) ;
2019-02-28 23:26:01 +00:00
if ( ! vsi | | ! vsi - > port_info )
return - EINVAL ;
2018-03-20 14:58:18 +00:00
2019-06-26 09:20:17 +00:00
/* turn off PHY if media was removed */
if ( ! test_bit ( ICE_FLAG_NO_MEDIA , pf - > flags ) & &
! ( pi - > phy . link_info . link_info & ICE_AQ_MEDIA_AVAILABLE ) ) {
set_bit ( ICE_FLAG_NO_MEDIA , pf - > flags ) ;
2021-03-25 22:35:07 +00:00
ice_set_link ( vsi , false ) ;
2019-06-26 09:20:17 +00:00
}
2020-07-09 16:16:06 +00:00
/* if the old link up/down and speed is the same as the new */
if ( link_up = = old_link & & link_speed = = old_link_speed )
2021-03-25 22:35:07 +00:00
return 0 ;
2020-07-09 16:16:06 +00:00
2022-12-05 19:52:43 +00:00
ice_ptp_link_change ( pf , pf - > hw . pf_id , link_up ) ;
2021-10-13 15:54:51 +00:00
2020-07-13 20:53:04 +00:00
if ( ice_is_dcb_active ( pf ) ) {
if ( test_bit ( ICE_FLAG_DCB_ENA , pf - > flags ) )
ice_dcb_rebuild ( pf ) ;
} else {
if ( link_up )
ice_set_dflt_mib ( pf ) ;
}
2019-02-28 23:26:01 +00:00
ice_vsi_link_event ( vsi , link_up ) ;
ice_print_link_msg ( vsi , link_up ) ;
2018-03-20 14:58:18 +00:00
2019-12-12 11:13:01 +00:00
ice_vc_notify_link_state ( pf ) ;
2018-09-20 00:43:00 +00:00
2021-03-25 22:35:07 +00:00
return 0 ;
2018-03-20 14:58:18 +00:00
}
/**
2018-10-18 15:37:09 +00:00
* ice_watchdog_subtask - periodic tasks not using event driven scheduling
* @ pf : board private structure
2018-03-20 14:58:18 +00:00
*/
2018-10-18 15:37:09 +00:00
static void ice_watchdog_subtask ( struct ice_pf * pf )
2018-03-20 14:58:18 +00:00
{
2018-10-18 15:37:09 +00:00
int i ;
2018-03-20 14:58:18 +00:00
2018-10-18 15:37:09 +00:00
/* if interface is down do nothing */
2021-03-02 18:15:38 +00:00
if ( test_bit ( ICE_DOWN , pf - > state ) | |
test_bit ( ICE_CFG_BUSY , pf - > state ) )
2018-10-18 15:37:09 +00:00
return ;
2018-03-20 14:58:18 +00:00
2018-10-18 15:37:09 +00:00
/* make sure we don't do these things too often */
if ( time_before ( jiffies ,
pf - > serv_tmr_prev + pf - > serv_tmr_period ) )
return ;
2018-03-20 14:58:18 +00:00
2018-10-18 15:37:09 +00:00
pf - > serv_tmr_prev = jiffies ;
/* Update the stats for active netdevs so the network stack
* can look at updated numbers whenever it cares to
*/
ice_update_pf_stats ( pf ) ;
2019-02-08 20:50:54 +00:00
ice_for_each_vsi ( pf , i )
2018-10-18 15:37:09 +00:00
if ( pf - > vsi [ i ] & & pf - > vsi [ i ] - > netdev )
ice_update_vsi_stats ( pf - > vsi [ i ] ) ;
2018-03-20 14:58:18 +00:00
}
2019-02-27 00:35:23 +00:00
/**
* ice_init_link_events - enable / initialize link events
* @ pi : pointer to the port_info instance
*
* Returns - EIO on failure , 0 on success
*/
static int ice_init_link_events ( struct ice_port_info * pi )
{
u16 mask ;
mask = ~ ( ( u16 ) ( ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
2021-10-13 16:02:19 +00:00
ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL |
ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL ) ) ;
2019-02-27 00:35:23 +00:00
if ( ice_aq_set_event_mask ( pi - > hw , pi - > lport , mask , NULL ) ) {
2020-02-06 09:20:10 +00:00
dev_dbg ( ice_hw_to_dev ( pi - > hw ) , " Failed to set link event mask for port %d \n " ,
2019-02-27 00:35:23 +00:00
pi - > lport ) ;
return - EIO ;
}
if ( ice_aq_get_link_info ( pi , true , NULL , NULL ) ) {
2020-02-06 09:20:10 +00:00
dev_dbg ( ice_hw_to_dev ( pi - > hw ) , " Failed to enable link events for port %d \n " ,
2019-02-27 00:35:23 +00:00
pi - > lport ) ;
return - EIO ;
}
return 0 ;
}
/**
* ice_handle_link_event - handle link event via ARQ
2019-04-16 17:35:03 +00:00
* @ pf : PF that the link event is associated with
2019-02-28 23:26:01 +00:00
* @ event : event structure containing link status info
2019-02-27 00:35:23 +00:00
*/
2019-02-28 23:26:01 +00:00
static int
ice_handle_link_event ( struct ice_pf * pf , struct ice_rq_event_info * event )
2019-02-27 00:35:23 +00:00
{
2019-02-28 23:26:01 +00:00
struct ice_aqc_get_link_status_data * link_data ;
2019-02-27 00:35:23 +00:00
struct ice_port_info * port_info ;
int status ;
2019-02-28 23:26:01 +00:00
link_data = ( struct ice_aqc_get_link_status_data * ) event - > msg_buf ;
2019-02-27 00:35:23 +00:00
port_info = pf - > hw . port_info ;
if ( ! port_info )
return - EINVAL ;
2019-02-28 23:26:01 +00:00
status = ice_link_event ( pf , port_info ,
! ! ( link_data - > link_info & ICE_AQ_LINK_UP ) ,
le16_to_cpu ( link_data - > link_speed ) ) ;
2019-02-27 00:35:23 +00:00
if ( status )
2020-02-06 09:20:10 +00:00
dev_dbg ( ice_pf_to_dev ( pf ) , " Could not process link event, error %d \n " ,
status ) ;
2019-02-27 00:35:23 +00:00
return status ;
}
2023-12-13 05:07:14 +00:00
/**
* ice_get_fwlog_data - copy the FW log data from ARQ event
* @ pf : PF that the FW log event is associated with
* @ event : event structure containing FW log data
*/
static void
ice_get_fwlog_data ( struct ice_pf * pf , struct ice_rq_event_info * event )
{
struct ice_fwlog_data * fwlog ;
struct ice_hw * hw = & pf - > hw ;
fwlog = & hw - > fwlog_ring . rings [ hw - > fwlog_ring . tail ] ;
memset ( fwlog - > data , 0 , PAGE_SIZE ) ;
fwlog - > data_size = le16_to_cpu ( event - > desc . datalen ) ;
memcpy ( fwlog - > data , event - > msg_buf , fwlog - > data_size ) ;
ice_fwlog_ring_increment ( & hw - > fwlog_ring . tail , hw - > fwlog_ring . size ) ;
if ( ice_fwlog_ring_full ( & hw - > fwlog_ring ) ) {
/* the rings are full so bump the head to create room */
ice_fwlog_ring_increment ( & hw - > fwlog_ring . head ,
hw - > fwlog_ring . size ) ;
}
}
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 00:22:03 +00:00
/**
2023-08-08 21:54:17 +00:00
* ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 00:22:03 +00:00
* @ pf : pointer to the PF private structure
2023-08-08 21:54:17 +00:00
* @ task : intermediate helper storage and identifier for waiting
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 00:22:03 +00:00
* @ opcode : the opcode to wait for
*
2023-08-08 21:54:17 +00:00
* Prepares to wait for a specific AdminQ completion event on the ARQ for
* a given PF . Actual wait would be done by a call to ice_aq_wait_for_event ( ) .
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 00:22:03 +00:00
*
2023-08-08 21:54:17 +00:00
* Calls are separated to allow caller registering for event before sending
* the command , which mitigates a race between registering and FW responding .
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 00:22:03 +00:00
*
2023-08-08 21:54:17 +00:00
* To obtain only the descriptor contents , pass an task - > event with null
* msg_buf . If the complete data buffer is desired , allocate the
* task - > event . msg_buf with enough space ahead of time .
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 00:22:03 +00:00
*/
2023-08-08 21:54:17 +00:00
void ice_aq_prep_for_event ( struct ice_pf * pf , struct ice_aq_task * task ,
u16 opcode )
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 00:22:03 +00:00
{
INIT_HLIST_NODE ( & task - > entry ) ;
task - > opcode = opcode ;
task - > state = ICE_AQ_TASK_WAITING ;
spin_lock_bh ( & pf - > aq_wait_lock ) ;
hlist_add_head ( & task - > entry , & pf - > aq_wait_list ) ;
spin_unlock_bh ( & pf - > aq_wait_lock ) ;
2023-08-08 21:54:17 +00:00
}
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 00:22:03 +00:00
/**
2021-03-02 18:15:45 +00:00
* ice_aq_wait_for_event - Wait for an AdminQ event from firmware
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 00:22:03 +00:00
* @ pf : pointer to the PF private structure
2023-08-08 21:54:17 +00:00
* @ task : ptr prepared by ice_aq_prep_for_event ( )
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 00:22:03 +00:00
* @ timeout : how long to wait , in jiffies
*
* Waits for a specific AdminQ completion event on the ARQ for a given PF . The
* current thread will be put to sleep until the specified event occurs or
* until the given timeout is reached .
*
* Returns : zero on success , or a negative error code on failure .
*/
2023-08-08 21:54:17 +00:00
int ice_aq_wait_for_event ( struct ice_pf * pf , struct ice_aq_task * task ,
unsigned long timeout )
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 00:22:03 +00:00
{
2023-08-08 21:54:17 +00:00
enum ice_aq_task_state * state = & task - > state ;
2020-10-07 17:54:45 +00:00
struct device * dev = ice_pf_to_dev ( pf ) ;
2023-08-08 21:54:17 +00:00
unsigned long start = jiffies ;
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 00:22:03 +00:00
long ret ;
int err ;
2023-08-08 21:54:17 +00:00
ret = wait_event_interruptible_timeout ( pf - > aq_wait_queue ,
* state ! = ICE_AQ_TASK_WAITING ,
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 00:22:03 +00:00
timeout ) ;
2023-08-08 21:54:17 +00:00
switch ( * state ) {
case ICE_AQ_TASK_NOT_PREPARED :
WARN ( 1 , " call to %s without ice_aq_prep_for_event() " , __func__ ) ;
err = - EINVAL ;
break ;
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 00:22:03 +00:00
case ICE_AQ_TASK_WAITING :
err = ret < 0 ? ret : - ETIMEDOUT ;
break ;
case ICE_AQ_TASK_CANCELED :
err = ret < 0 ? ret : - ECANCELED ;
break ;
case ICE_AQ_TASK_COMPLETE :
err = ret < 0 ? ret : 0 ;
break ;
default :
2023-08-08 21:54:17 +00:00
WARN ( 1 , " Unexpected AdminQ wait task state %u " , * state ) ;
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 00:22:03 +00:00
err = - EINVAL ;
break ;
}
2020-10-07 17:54:45 +00:00
dev_dbg ( dev , " Waited %u msecs (max %u msecs) for firmware response to op 0x%04x \n " ,
jiffies_to_msecs ( jiffies - start ) ,
jiffies_to_msecs ( timeout ) ,
2023-08-08 21:54:17 +00:00
task - > opcode ) ;
2020-10-07 17:54:45 +00:00
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 00:22:03 +00:00
spin_lock_bh ( & pf - > aq_wait_lock ) ;
hlist_del ( & task - > entry ) ;
spin_unlock_bh ( & pf - > aq_wait_lock ) ;
return err ;
}
/**
* ice_aq_check_events - Check if any thread is waiting for an AdminQ event
* @ pf : pointer to the PF private structure
* @ opcode : the opcode of the event
* @ event : the event to check
*
* Loops over the current list of pending threads waiting for an AdminQ event .
* For each matching task , copy the contents of the event into the task
* structure and wake up the thread .
*
* If multiple threads wait for the same opcode , they will all be woken up .
*
* Note that event - > msg_buf will only be duplicated if the event has a buffer
* with enough space already allocated . Otherwise , only the descriptor and
* message length will be copied .
*
* Returns : true if an event was found , false otherwise
*/
static void ice_aq_check_events ( struct ice_pf * pf , u16 opcode ,
struct ice_rq_event_info * event )
{
2023-08-08 21:54:15 +00:00
struct ice_rq_event_info * task_ev ;
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 00:22:03 +00:00
struct ice_aq_task * task ;
bool found = false ;
spin_lock_bh ( & pf - > aq_wait_lock ) ;
hlist_for_each_entry ( task , & pf - > aq_wait_list , entry ) {
2023-08-08 21:54:17 +00:00
if ( task - > state ! = ICE_AQ_TASK_WAITING )
continue ;
if ( task - > opcode ! = opcode )
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 00:22:03 +00:00
continue ;
2023-08-08 21:54:16 +00:00
task_ev = & task - > event ;
2023-08-08 21:54:15 +00:00
memcpy ( & task_ev - > desc , & event - > desc , sizeof ( event - > desc ) ) ;
task_ev - > msg_len = event - > msg_len ;
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 00:22:03 +00:00
/* Only copy the data buffer if a destination was set */
2023-08-08 21:54:15 +00:00
if ( task_ev - > msg_buf & & task_ev - > buf_len > = event - > buf_len ) {
memcpy ( task_ev - > msg_buf , event - > msg_buf ,
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 00:22:03 +00:00
event - > buf_len ) ;
2023-08-08 21:54:15 +00:00
task_ev - > buf_len = event - > buf_len ;
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 00:22:03 +00:00
}
task - > state = ICE_AQ_TASK_COMPLETE ;
found = true ;
}
spin_unlock_bh ( & pf - > aq_wait_lock ) ;
if ( found )
wake_up ( & pf - > aq_wait_queue ) ;
}
/**
* ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
* @ pf : the PF private structure
*
* Set all waiting tasks to ICE_AQ_TASK_CANCELED , and wake up their threads .
* This will then cause ice_aq_wait_for_event to exit with - ECANCELED .
*/
static void ice_aq_cancel_waiting_tasks ( struct ice_pf * pf )
{
struct ice_aq_task * task ;
spin_lock_bh ( & pf - > aq_wait_lock ) ;
hlist_for_each_entry ( task , & pf - > aq_wait_list , entry )
task - > state = ICE_AQ_TASK_CANCELED ;
spin_unlock_bh ( & pf - > aq_wait_lock ) ;
wake_up ( & pf - > aq_wait_queue ) ;
}
2023-02-22 17:09:17 +00:00
# define ICE_MBX_OVERFLOW_WATERMARK 64
2018-03-20 14:58:10 +00:00
/**
* __ice_clean_ctrlq - helper function to clean controlq rings
* @ pf : ptr to struct ice_pf
* @ q_type : specific Control queue type
*/
static int __ice_clean_ctrlq ( struct ice_pf * pf , enum ice_ctl_q q_type )
{
2019-11-08 14:23:26 +00:00
struct device * dev = ice_pf_to_dev ( pf ) ;
2018-03-20 14:58:10 +00:00
struct ice_rq_event_info event ;
struct ice_hw * hw = & pf - > hw ;
struct ice_ctl_q_info * cq ;
u16 pending , i = 0 ;
const char * qtype ;
u32 oldval , val ;
2018-03-20 14:58:18 +00:00
/* Do not clean control queue if/when PF reset fails */
2021-03-02 18:15:38 +00:00
if ( test_bit ( ICE_RESET_FAILED , pf - > state ) )
2018-03-20 14:58:18 +00:00
return 0 ;
2018-03-20 14:58:10 +00:00
switch ( q_type ) {
case ICE_CTL_Q_ADMIN :
cq = & hw - > adminq ;
qtype = " Admin " ;
break ;
2021-06-09 16:39:46 +00:00
case ICE_CTL_Q_SB :
cq = & hw - > sbq ;
qtype = " Sideband " ;
break ;
2018-09-20 00:42:54 +00:00
case ICE_CTL_Q_MAILBOX :
cq = & hw - > mailboxq ;
qtype = " Mailbox " ;
2021-03-02 18:12:00 +00:00
/* we are going to try to detect a malicious VF, so set the
* state to begin detection
*/
hw - > mbx_snapshot . mbx_buf . state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT ;
2018-09-20 00:42:54 +00:00
break ;
2018-03-20 14:58:10 +00:00
default :
2019-11-08 14:23:26 +00:00
dev_warn ( dev , " Unknown control queue type 0x%x \n " , q_type ) ;
2018-03-20 14:58:10 +00:00
return 0 ;
}
/* check for error indications - PF_xx_AxQLEN register layout for
* FW / MBX / SB are identical so just use defines for PF_FW_AxQLEN .
*/
val = rd32 ( hw , cq - > rq . len ) ;
if ( val & ( PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
PF_FW_ARQLEN_ARQCRIT_M ) ) {
oldval = val ;
if ( val & PF_FW_ARQLEN_ARQVFE_M )
2019-11-08 14:23:26 +00:00
dev_dbg ( dev , " %s Receive Queue VF Error detected \n " ,
qtype ) ;
2018-03-20 14:58:10 +00:00
if ( val & PF_FW_ARQLEN_ARQOVFL_M ) {
2020-02-06 09:20:10 +00:00
dev_dbg ( dev , " %s Receive Queue Overflow Error detected \n " ,
2018-03-20 14:58:10 +00:00
qtype ) ;
}
if ( val & PF_FW_ARQLEN_ARQCRIT_M )
2020-02-06 09:20:10 +00:00
dev_dbg ( dev , " %s Receive Queue Critical Error detected \n " ,
2018-03-20 14:58:10 +00:00
qtype ) ;
val & = ~ ( PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
PF_FW_ARQLEN_ARQCRIT_M ) ;
if ( oldval ! = val )
wr32 ( hw , cq - > rq . len , val ) ;
}
val = rd32 ( hw , cq - > sq . len ) ;
if ( val & ( PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
PF_FW_ATQLEN_ATQCRIT_M ) ) {
oldval = val ;
if ( val & PF_FW_ATQLEN_ATQVFE_M )
2020-02-06 09:20:10 +00:00
dev_dbg ( dev , " %s Send Queue VF Error detected \n " ,
qtype ) ;
2018-03-20 14:58:10 +00:00
if ( val & PF_FW_ATQLEN_ATQOVFL_M ) {
2019-11-08 14:23:26 +00:00
dev_dbg ( dev , " %s Send Queue Overflow Error detected \n " ,
2018-03-20 14:58:10 +00:00
qtype ) ;
}
if ( val & PF_FW_ATQLEN_ATQCRIT_M )
2019-11-08 14:23:26 +00:00
dev_dbg ( dev , " %s Send Queue Critical Error detected \n " ,
2018-03-20 14:58:10 +00:00
qtype ) ;
val & = ~ ( PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
PF_FW_ATQLEN_ATQCRIT_M ) ;
if ( oldval ! = val )
wr32 ( hw , cq - > sq . len , val ) ;
}
event . buf_len = cq - > rq_buf_size ;
2019-11-08 14:23:25 +00:00
event . msg_buf = kzalloc ( event . buf_len , GFP_KERNEL ) ;
2018-03-20 14:58:10 +00:00
if ( ! event . msg_buf )
return 0 ;
do {
2023-02-22 17:09:17 +00:00
struct ice_mbx_data data = { } ;
2018-03-20 14:58:18 +00:00
u16 opcode ;
2021-10-07 22:59:03 +00:00
int ret ;
2018-03-20 14:58:10 +00:00
ret = ice_clean_rq_elem ( hw , cq , & event , & pending ) ;
2021-10-07 22:58:01 +00:00
if ( ret = = - EALREADY )
2018-03-20 14:58:10 +00:00
break ;
if ( ret ) {
2021-10-07 22:56:02 +00:00
dev_err ( dev , " %s Receive Queue event error %d \n " , qtype ,
ret ) ;
2018-03-20 14:58:10 +00:00
break ;
}
2018-03-20 14:58:18 +00:00
opcode = le16_to_cpu ( event . desc . opcode ) ;
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 00:22:03 +00:00
/* Notify any thread that might be waiting for this event */
ice_aq_check_events ( pf , opcode , & event ) ;
2018-03-20 14:58:18 +00:00
switch ( opcode ) {
2019-02-27 00:35:23 +00:00
case ice_aqc_opc_get_link_status :
2019-02-28 23:26:01 +00:00
if ( ice_handle_link_event ( pf , & event ) )
2019-11-08 14:23:26 +00:00
dev_err ( dev , " Could not handle link event \n " ) ;
2019-02-27 00:35:23 +00:00
break ;
2020-01-22 15:21:31 +00:00
case ice_aqc_opc_event_lan_overflow :
ice_vf_lan_overflow_event ( pf , & event ) ;
break ;
2018-09-20 00:42:59 +00:00
case ice_mbx_opc_send_msg_to_pf :
2023-02-22 17:09:17 +00:00
data . num_msg_proc = i ;
data . num_pending_arq = pending ;
data . max_num_msgs_mbx = hw - > mailboxq . num_rq_entries ;
data . async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK ;
2023-02-22 17:09:20 +00:00
ice_vc_process_vf_msg ( pf , & event , & data ) ;
2018-09-20 00:42:59 +00:00
break ;
2023-12-13 05:07:14 +00:00
case ice_aqc_opc_fw_logs_event :
ice_get_fwlog_data ( pf , & event ) ;
break ;
2019-02-28 23:24:26 +00:00
case ice_aqc_opc_lldp_set_mib_change :
ice_dcb_process_lldp_set_mib_change ( pf , & event ) ;
break ;
2018-03-20 14:58:18 +00:00
default :
2020-02-06 09:20:10 +00:00
dev_dbg ( dev , " %s Receive Queue unknown event 0x%04x ignored \n " ,
2018-03-20 14:58:18 +00:00
qtype , opcode ) ;
break ;
}
2018-03-20 14:58:10 +00:00
} while ( pending & & ( i + + < ICE_DFLT_IRQ_WORK ) ) ;
2019-11-08 14:23:25 +00:00
kfree ( event . msg_buf ) ;
2018-03-20 14:58:10 +00:00
return pending & & ( i = = ICE_DFLT_IRQ_WORK ) ;
}
2018-08-09 13:28:56 +00:00
/**
* ice_ctrlq_pending - check if there is a difference between ntc and ntu
* @ hw : pointer to hardware info
* @ cq : control queue information
*
* returns true if there are pending messages in a queue , false if there aren ' t
*/
static bool ice_ctrlq_pending ( struct ice_hw * hw , struct ice_ctl_q_info * cq )
{
u16 ntu ;
ntu = ( u16 ) ( rd32 ( hw , cq - > rq . head ) & cq - > rq . head_mask ) ;
return cq - > rq . next_to_clean ! = ntu ;
}
2018-03-20 14:58:10 +00:00
/**
* ice_clean_adminq_subtask - clean the AdminQ rings
* @ pf : board private structure
*/
static void ice_clean_adminq_subtask ( struct ice_pf * pf )
{
struct ice_hw * hw = & pf - > hw ;
2021-03-02 18:15:38 +00:00
if ( ! test_bit ( ICE_ADMINQ_EVENT_PENDING , pf - > state ) )
2018-03-20 14:58:10 +00:00
return ;
if ( __ice_clean_ctrlq ( pf , ICE_CTL_Q_ADMIN ) )
return ;
2021-03-02 18:15:38 +00:00
clear_bit ( ICE_ADMINQ_EVENT_PENDING , pf - > state ) ;
2018-03-20 14:58:10 +00:00
2018-08-09 13:28:56 +00:00
/* There might be a situation where new messages arrive to a control
* queue between processing the last message and clearing the
* EVENT_PENDING bit . So before exiting , check queue head again ( using
* ice_ctrlq_pending ) and process new messages if any .
*/
if ( ice_ctrlq_pending ( hw , & hw - > adminq ) )
__ice_clean_ctrlq ( pf , ICE_CTL_Q_ADMIN ) ;
2018-03-20 14:58:10 +00:00
ice_flush ( hw ) ;
}
2018-09-20 00:42:54 +00:00
/**
* ice_clean_mailboxq_subtask - clean the MailboxQ rings
* @ pf : board private structure
*/
static void ice_clean_mailboxq_subtask ( struct ice_pf * pf )
{
struct ice_hw * hw = & pf - > hw ;
2021-03-02 18:15:38 +00:00
if ( ! test_bit ( ICE_MAILBOXQ_EVENT_PENDING , pf - > state ) )
2018-09-20 00:42:54 +00:00
return ;
if ( __ice_clean_ctrlq ( pf , ICE_CTL_Q_MAILBOX ) )
return ;
2021-03-02 18:15:38 +00:00
clear_bit ( ICE_MAILBOXQ_EVENT_PENDING , pf - > state ) ;
2018-09-20 00:42:54 +00:00
if ( ice_ctrlq_pending ( hw , & hw - > mailboxq ) )
__ice_clean_ctrlq ( pf , ICE_CTL_Q_MAILBOX ) ;
ice_flush ( hw ) ;
}
2021-06-09 16:39:46 +00:00
/**
* ice_clean_sbq_subtask - clean the Sideband Queue rings
* @ pf : board private structure
*/
static void ice_clean_sbq_subtask ( struct ice_pf * pf )
{
struct ice_hw * hw = & pf - > hw ;
2023-12-06 19:29:18 +00:00
/* if mac_type is not generic, sideband is not supported
* and there ' s nothing to do here
*/
if ( ! ice_is_generic_mac ( hw ) ) {
2021-06-09 16:39:46 +00:00
clear_bit ( ICE_SIDEBANDQ_EVENT_PENDING , pf - > state ) ;
return ;
}
if ( ! test_bit ( ICE_SIDEBANDQ_EVENT_PENDING , pf - > state ) )
return ;
if ( __ice_clean_ctrlq ( pf , ICE_CTL_Q_SB ) )
return ;
clear_bit ( ICE_SIDEBANDQ_EVENT_PENDING , pf - > state ) ;
if ( ice_ctrlq_pending ( hw , & hw - > sbq ) )
__ice_clean_ctrlq ( pf , ICE_CTL_Q_SB ) ;
ice_flush ( hw ) ;
}
2018-03-20 14:58:10 +00:00
/**
* ice_service_task_schedule - schedule the service task to wake up
* @ pf : board private structure
*
* If not already scheduled , this puts the task into the work queue .
*/
2020-05-12 01:01:46 +00:00
void ice_service_task_schedule ( struct ice_pf * pf )
2018-03-20 14:58:10 +00:00
{
2021-03-02 18:15:38 +00:00
if ( ! test_bit ( ICE_SERVICE_DIS , pf - > state ) & &
! test_and_set_bit ( ICE_SERVICE_SCHED , pf - > state ) & &
! test_bit ( ICE_NEEDS_RESTART , pf - > state ) )
2018-03-20 14:58:10 +00:00
queue_work ( ice_wq , & pf - > serv_task ) ;
}
/**
* ice_service_task_complete - finish up the service task
* @ pf : board private structure
*/
static void ice_service_task_complete ( struct ice_pf * pf )
{
2021-03-02 18:15:38 +00:00
WARN_ON ( ! test_bit ( ICE_SERVICE_SCHED , pf - > state ) ) ;
2018-03-20 14:58:10 +00:00
/* force memory (pf->state) to sync before next service task */
smp_mb__before_atomic ( ) ;
2021-03-02 18:15:38 +00:00
clear_bit ( ICE_SERVICE_SCHED , pf - > state ) ;
2018-03-20 14:58:10 +00:00
}
2018-08-09 13:29:57 +00:00
/**
* ice_service_task_stop - stop service task and cancel works
* @ pf : board private structure
2020-07-09 16:16:03 +00:00
*
2021-03-02 18:15:38 +00:00
* Return 0 if the ICE_SERVICE_DIS bit was not already set ,
2020-07-09 16:16:03 +00:00
* 1 otherwise .
2018-08-09 13:29:57 +00:00
*/
2020-07-09 16:16:03 +00:00
static int ice_service_task_stop ( struct ice_pf * pf )
2018-08-09 13:29:57 +00:00
{
2020-07-09 16:16:03 +00:00
int ret ;
2021-03-02 18:15:38 +00:00
ret = test_and_set_bit ( ICE_SERVICE_DIS , pf - > state ) ;
2018-08-09 13:29:57 +00:00
if ( pf - > serv_tmr . function )
del_timer_sync ( & pf - > serv_tmr ) ;
if ( pf - > serv_task . func )
cancel_work_sync ( & pf - > serv_task ) ;
2021-03-02 18:15:38 +00:00
clear_bit ( ICE_SERVICE_SCHED , pf - > state ) ;
2020-07-09 16:16:03 +00:00
return ret ;
2018-08-09 13:29:57 +00:00
}
2019-02-13 18:51:15 +00:00
/**
* ice_service_task_restart - restart service task and schedule works
* @ pf : board private structure
*
* This function is needed for suspend and resume works ( e . g WoL scenario )
*/
static void ice_service_task_restart ( struct ice_pf * pf )
{
2021-03-02 18:15:38 +00:00
clear_bit ( ICE_SERVICE_DIS , pf - > state ) ;
2019-02-13 18:51:15 +00:00
ice_service_task_schedule ( pf ) ;
}
2018-03-20 14:58:10 +00:00
/**
* ice_service_timer - timer callback to schedule service task
* @ t : pointer to timer_list
*/
static void ice_service_timer ( struct timer_list * t )
{
struct ice_pf * pf = from_timer ( pf , t , serv_tmr ) ;
mod_timer ( & pf - > serv_tmr , round_jiffies ( pf - > serv_tmr_period + jiffies ) ) ;
ice_service_task_schedule ( pf ) ;
}
2018-08-09 13:29:53 +00:00
/**
* ice_handle_mdd_event - handle malicious driver detect event
* @ pf : pointer to the PF structure
*
2020-02-13 21:31:16 +00:00
* Called from service task . OICR interrupt handler indicates MDD event .
* VF MDD logging is guarded by net_ratelimit . Additional PF and VF log
* messages are wrapped by netif_msg_ [ rx | tx ] _err . Since VF Rx MDD events
* disable the queue , the PF can be configured to reset the VF using ethtool
* private flag mdd - auto - reset - vf .
2018-08-09 13:29:53 +00:00
*/
static void ice_handle_mdd_event ( struct ice_pf * pf )
{
2019-11-08 14:23:26 +00:00
struct device * dev = ice_pf_to_dev ( pf ) ;
2018-08-09 13:29:53 +00:00
struct ice_hw * hw = & pf - > hw ;
2022-02-16 21:37:35 +00:00
struct ice_vf * vf ;
unsigned int bkt ;
2018-08-09 13:29:53 +00:00
u32 reg ;
2021-03-02 18:15:38 +00:00
if ( ! test_and_clear_bit ( ICE_MDD_EVENT_PENDING , pf - > state ) ) {
2020-02-13 21:31:16 +00:00
/* Since the VF MDD event logging is rate limited, check if
* there are pending MDD events .
*/
ice_print_vfs_mdd_events ( pf ) ;
2018-08-09 13:29:53 +00:00
return ;
2020-02-13 21:31:16 +00:00
}
2018-08-09 13:29:53 +00:00
2020-02-13 21:31:16 +00:00
/* find what triggered an MDD event */
2018-08-09 13:29:53 +00:00
reg = rd32 ( hw , GL_MDET_TX_PQM ) ;
if ( reg & GL_MDET_TX_PQM_VALID_M ) {
2023-12-06 01:01:12 +00:00
u8 pf_num = FIELD_GET ( GL_MDET_TX_PQM_PF_NUM_M , reg ) ;
u16 vf_num = FIELD_GET ( GL_MDET_TX_PQM_VF_NUM_M , reg ) ;
u8 event = FIELD_GET ( GL_MDET_TX_PQM_MAL_TYPE_M , reg ) ;
u16 queue = FIELD_GET ( GL_MDET_TX_PQM_QNUM_M , reg ) ;
2018-08-09 13:29:53 +00:00
if ( netif_msg_tx_err ( pf ) )
2019-11-08 14:23:26 +00:00
dev_info ( dev , " Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d \n " ,
2018-08-09 13:29:53 +00:00
event , queue , pf_num , vf_num ) ;
wr32 ( hw , GL_MDET_TX_PQM , 0xffffffff ) ;
}
2023-10-25 21:41:52 +00:00
reg = rd32 ( hw , GL_MDET_TX_TCLAN_BY_MAC ( hw ) ) ;
2018-08-09 13:29:53 +00:00
if ( reg & GL_MDET_TX_TCLAN_VALID_M ) {
2023-12-06 01:01:12 +00:00
u8 pf_num = FIELD_GET ( GL_MDET_TX_TCLAN_PF_NUM_M , reg ) ;
u16 vf_num = FIELD_GET ( GL_MDET_TX_TCLAN_VF_NUM_M , reg ) ;
u8 event = FIELD_GET ( GL_MDET_TX_TCLAN_MAL_TYPE_M , reg ) ;
u16 queue = FIELD_GET ( GL_MDET_TX_TCLAN_QNUM_M , reg ) ;
2018-08-09 13:29:53 +00:00
2020-02-06 09:20:12 +00:00
if ( netif_msg_tx_err ( pf ) )
2019-11-08 14:23:26 +00:00
dev_info ( dev , " Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d \n " ,
2018-08-09 13:29:53 +00:00
event , queue , pf_num , vf_num ) ;
2023-10-25 21:41:52 +00:00
wr32 ( hw , GL_MDET_TX_TCLAN_BY_MAC ( hw ) , U32_MAX ) ;
2018-08-09 13:29:53 +00:00
}
reg = rd32 ( hw , GL_MDET_RX ) ;
if ( reg & GL_MDET_RX_VALID_M ) {
2023-12-06 01:01:12 +00:00
u8 pf_num = FIELD_GET ( GL_MDET_RX_PF_NUM_M , reg ) ;
u16 vf_num = FIELD_GET ( GL_MDET_RX_VF_NUM_M , reg ) ;
u8 event = FIELD_GET ( GL_MDET_RX_MAL_TYPE_M , reg ) ;
u16 queue = FIELD_GET ( GL_MDET_RX_QNUM_M , reg ) ;
2018-08-09 13:29:53 +00:00
if ( netif_msg_rx_err ( pf ) )
2019-11-08 14:23:26 +00:00
dev_info ( dev , " Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d \n " ,
2018-08-09 13:29:53 +00:00
event , queue , pf_num , vf_num ) ;
wr32 ( hw , GL_MDET_RX , 0xffffffff ) ;
}
2020-02-13 21:31:16 +00:00
/* check to see if this PF caused an MDD event */
reg = rd32 ( hw , PF_MDET_TX_PQM ) ;
if ( reg & PF_MDET_TX_PQM_VALID_M ) {
wr32 ( hw , PF_MDET_TX_PQM , 0xFFFF ) ;
if ( netif_msg_tx_err ( pf ) )
dev_info ( dev , " Malicious Driver Detection event TX_PQM detected on PF \n " ) ;
}
2018-08-09 13:29:53 +00:00
2023-10-25 21:41:52 +00:00
reg = rd32 ( hw , PF_MDET_TX_TCLAN_BY_MAC ( hw ) ) ;
2020-02-13 21:31:16 +00:00
if ( reg & PF_MDET_TX_TCLAN_VALID_M ) {
2023-10-25 21:41:52 +00:00
wr32 ( hw , PF_MDET_TX_TCLAN_BY_MAC ( hw ) , 0xffff ) ;
2020-02-13 21:31:16 +00:00
if ( netif_msg_tx_err ( pf ) )
dev_info ( dev , " Malicious Driver Detection event TX_TCLAN detected on PF \n " ) ;
}
2018-08-09 13:29:53 +00:00
2020-02-13 21:31:16 +00:00
reg = rd32 ( hw , PF_MDET_RX ) ;
if ( reg & PF_MDET_RX_VALID_M ) {
wr32 ( hw , PF_MDET_RX , 0xFFFF ) ;
if ( netif_msg_rx_err ( pf ) )
dev_info ( dev , " Malicious Driver Detection event RX detected on PF \n " ) ;
2018-08-09 13:29:53 +00:00
}
2020-02-13 21:31:16 +00:00
/* Check to see if one of the VFs caused an MDD event, and then
* increment counters and set print pending
*/
ice: convert VF storage to hash table with krefs and RCU
The ice driver stores VF structures in a simple array which is allocated
once at the time of VF creation. The VF structures are then accessed
from the array by their VF ID. The ID must be between 0 and the number
of allocated VFs.
Multiple threads can access this table:
* .ndo operations such as .ndo_get_vf_cfg or .ndo_set_vf_trust
* interrupts, such as due to messages from the VF using the virtchnl
communication
* processing such as device reset
* commands to add or remove VFs
The current implementation does not keep track of when all threads are
done operating on a VF and can potentially result in use-after-free
issues caused by one thread accessing a VF structure after it has been
released when removing VFs. Some of these are prevented with various
state flags and checks.
In addition, this structure is quite static and does not support a
planned future where virtualization can be more dynamic. As we begin to
look at supporting Scalable IOV with the ice driver (as opposed to just
supporting Single Root IOV), this structure is not sufficient.
In the future, VFs will be able to be added and removed individually and
dynamically.
To allow for this, and to better protect against a whole class of
use-after-free bugs, replace the VF storage with a combination of a hash
table and krefs to reference track all of the accesses to VFs through
the hash table.
A hash table still allows efficient look up of the VF given its ID, but
also allows adding and removing VFs. It does not require contiguous VF
IDs.
The use of krefs allows the cleanup of the VF memory to be delayed until
after all threads have released their reference (by calling ice_put_vf).
To prevent corruption of the hash table, a combination of RCU and the
mutex table_lock are used. Addition and removal from the hash table use
the RCU-aware hash macros. This allows simple read-only look ups that
iterate to locate a single VF can be fast using RCU. Accesses which
modify the hash table, or which can't take RCU because they sleep, will
hold the mutex lock.
By using this design, we have a stronger guarantee that the VF structure
can't be released until after all threads are finished operating on it.
We also pave the way for the more dynamic Scalable IOV implementation in
the future.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2022-02-16 21:37:38 +00:00
mutex_lock ( & pf - > vfs . table_lock ) ;
2022-02-16 21:37:35 +00:00
ice_for_each_vf ( pf , bkt , vf ) {
reg = rd32 ( hw , VP_MDET_TX_PQM ( vf - > vf_id ) ) ;
2018-09-20 00:43:01 +00:00
if ( reg & VP_MDET_TX_PQM_VALID_M ) {
2022-02-16 21:37:35 +00:00
wr32 ( hw , VP_MDET_TX_PQM ( vf - > vf_id ) , 0xFFFF ) ;
2020-02-13 21:31:16 +00:00
vf - > mdd_tx_events . count + + ;
2021-03-02 18:15:38 +00:00
set_bit ( ICE_MDD_VF_PRINT_PENDING , pf - > state ) ;
2020-02-13 21:31:16 +00:00
if ( netif_msg_tx_err ( pf ) )
dev_info ( dev , " Malicious Driver Detection event TX_PQM detected on VF %d \n " ,
2022-02-16 21:37:35 +00:00
vf - > vf_id ) ;
2018-09-20 00:43:01 +00:00
}
2022-02-16 21:37:35 +00:00
reg = rd32 ( hw , VP_MDET_TX_TCLAN ( vf - > vf_id ) ) ;
2018-09-20 00:43:01 +00:00
if ( reg & VP_MDET_TX_TCLAN_VALID_M ) {
2022-02-16 21:37:35 +00:00
wr32 ( hw , VP_MDET_TX_TCLAN ( vf - > vf_id ) , 0xFFFF ) ;
2020-02-13 21:31:16 +00:00
vf - > mdd_tx_events . count + + ;
2021-03-02 18:15:38 +00:00
set_bit ( ICE_MDD_VF_PRINT_PENDING , pf - > state ) ;
2020-02-13 21:31:16 +00:00
if ( netif_msg_tx_err ( pf ) )
dev_info ( dev , " Malicious Driver Detection event TX_TCLAN detected on VF %d \n " ,
2022-02-16 21:37:35 +00:00
vf - > vf_id ) ;
2018-09-20 00:43:01 +00:00
}
2022-02-16 21:37:35 +00:00
reg = rd32 ( hw , VP_MDET_TX_TDPU ( vf - > vf_id ) ) ;
2018-09-20 00:43:01 +00:00
if ( reg & VP_MDET_TX_TDPU_VALID_M ) {
2022-02-16 21:37:35 +00:00
wr32 ( hw , VP_MDET_TX_TDPU ( vf - > vf_id ) , 0xFFFF ) ;
2020-02-13 21:31:16 +00:00
vf - > mdd_tx_events . count + + ;
2021-03-02 18:15:38 +00:00
set_bit ( ICE_MDD_VF_PRINT_PENDING , pf - > state ) ;
2020-02-13 21:31:16 +00:00
if ( netif_msg_tx_err ( pf ) )
dev_info ( dev , " Malicious Driver Detection event TX_TDPU detected on VF %d \n " ,
2022-02-16 21:37:35 +00:00
vf - > vf_id ) ;
2018-09-20 00:43:01 +00:00
}
2022-02-16 21:37:35 +00:00
reg = rd32 ( hw , VP_MDET_RX ( vf - > vf_id ) ) ;
2018-09-20 00:43:01 +00:00
if ( reg & VP_MDET_RX_VALID_M ) {
2022-02-16 21:37:35 +00:00
wr32 ( hw , VP_MDET_RX ( vf - > vf_id ) , 0xFFFF ) ;
2020-02-13 21:31:16 +00:00
vf - > mdd_rx_events . count + + ;
2021-03-02 18:15:38 +00:00
set_bit ( ICE_MDD_VF_PRINT_PENDING , pf - > state ) ;
2020-02-13 21:31:16 +00:00
if ( netif_msg_rx_err ( pf ) )
dev_info ( dev , " Malicious Driver Detection event RX detected on VF %d \n " ,
2022-02-16 21:37:35 +00:00
vf - > vf_id ) ;
2020-02-13 21:31:16 +00:00
/* Since the queue is disabled on VF Rx MDD events, the
* PF can be configured to reset the VF through ethtool
* private flag mdd - auto - reset - vf .
*/
2020-05-16 00:36:31 +00:00
if ( test_bit ( ICE_FLAG_MDD_AUTO_RESET_VF , pf - > flags ) ) {
/* VF MDD event counters will be cleared by
* reset , so print the event prior to reset .
*/
ice_print_vf_rx_mdd_event ( vf ) ;
2022-02-23 00:27:09 +00:00
ice_reset_vf ( vf , ICE_VF_RESET_LOCK ) ;
2020-05-16 00:36:31 +00:00
}
2018-09-20 00:43:01 +00:00
}
}
ice: convert VF storage to hash table with krefs and RCU
The ice driver stores VF structures in a simple array which is allocated
once at the time of VF creation. The VF structures are then accessed
from the array by their VF ID. The ID must be between 0 and the number
of allocated VFs.
Multiple threads can access this table:
* .ndo operations such as .ndo_get_vf_cfg or .ndo_set_vf_trust
* interrupts, such as due to messages from the VF using the virtchnl
communication
* processing such as device reset
* commands to add or remove VFs
The current implementation does not keep track of when all threads are
done operating on a VF and can potentially result in use-after-free
issues caused by one thread accessing a VF structure after it has been
released when removing VFs. Some of these are prevented with various
state flags and checks.
In addition, this structure is quite static and does not support a
planned future where virtualization can be more dynamic. As we begin to
look at supporting Scalable IOV with the ice driver (as opposed to just
supporting Single Root IOV), this structure is not sufficient.
In the future, VFs will be able to be added and removed individually and
dynamically.
To allow for this, and to better protect against a whole class of
use-after-free bugs, replace the VF storage with a combination of a hash
table and krefs to reference track all of the accesses to VFs through
the hash table.
A hash table still allows efficient look up of the VF given its ID, but
also allows adding and removing VFs. It does not require contiguous VF
IDs.
The use of krefs allows the cleanup of the VF memory to be delayed until
after all threads have released their reference (by calling ice_put_vf).
To prevent corruption of the hash table, a combination of RCU and the
mutex table_lock are used. Addition and removal from the hash table use
the RCU-aware hash macros. This allows simple read-only look ups that
iterate to locate a single VF can be fast using RCU. Accesses which
modify the hash table, or which can't take RCU because they sleep, will
hold the mutex lock.
By using this design, we have a stronger guarantee that the VF structure
can't be released until after all threads are finished operating on it.
We also pave the way for the more dynamic Scalable IOV implementation in
the future.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2022-02-16 21:37:38 +00:00
mutex_unlock ( & pf - > vfs . table_lock ) ;
2020-02-13 21:31:16 +00:00
ice_print_vfs_mdd_events ( pf ) ;
2018-08-09 13:29:53 +00:00
}
2019-06-26 09:20:17 +00:00
/**
* ice_force_phys_link_state - Force the physical link state
* @ vsi : VSI to force the physical link state to up / down
* @ link_up : true / false indicates to set the physical link to up / down
*
* Force the physical link state by getting the current PHY capabilities from
* hardware and setting the PHY config based on the determined capabilities . If
* link changes a link event will be triggered because both the Enable Automatic
* Link Update and LESM Enable bits are set when setting the PHY capabilities .
*
* Returns 0 on success , negative on failure
*/
static int ice_force_phys_link_state ( struct ice_vsi * vsi , bool link_up )
{
struct ice_aqc_get_phy_caps_data * pcaps ;
struct ice_aqc_set_phy_cfg_data * cfg ;
struct ice_port_info * pi ;
struct device * dev ;
int retcode ;
if ( ! vsi | | ! vsi - > port_info | | ! vsi - > back )
return - EINVAL ;
if ( vsi - > type ! = ICE_VSI_PF )
return 0 ;
2020-02-06 09:20:09 +00:00
dev = ice_pf_to_dev ( vsi - > back ) ;
2019-06-26 09:20:17 +00:00
pi = vsi - > port_info ;
2019-11-08 14:23:25 +00:00
pcaps = kzalloc ( sizeof ( * pcaps ) , GFP_KERNEL ) ;
2019-06-26 09:20:17 +00:00
if ( ! pcaps )
return - ENOMEM ;
2021-03-25 22:35:06 +00:00
retcode = ice_aq_get_phy_caps ( pi , false , ICE_AQC_REPORT_ACTIVE_CFG , pcaps ,
2019-06-26 09:20:17 +00:00
NULL ) ;
if ( retcode ) {
2020-02-06 09:20:10 +00:00
dev_err ( dev , " Failed to get phy capabilities, VSI %d error %d \n " ,
2019-06-26 09:20:17 +00:00
vsi - > vsi_num , retcode ) ;
retcode = - EIO ;
goto out ;
}
/* No change in link */
if ( link_up = = ! ! ( pcaps - > caps & ICE_AQC_PHY_EN_LINK ) & &
link_up = = ! ! ( pi - > phy . link_info . link_info & ICE_AQ_LINK_UP ) )
goto out ;
2020-07-09 16:16:06 +00:00
/* Use the current user PHY configuration. The current user PHY
* configuration is initialized during probe from PHY capabilities
* software mode , and updated on set PHY configuration .
*/
cfg = kmemdup ( & pi - > phy . curr_user_phy_cfg , sizeof ( * cfg ) , GFP_KERNEL ) ;
2019-06-26 09:20:17 +00:00
if ( ! cfg ) {
retcode = - ENOMEM ;
goto out ;
}
2020-07-09 16:16:06 +00:00
cfg - > caps | = ICE_AQ_PHY_ENA_AUTO_LINK_UPDT ;
2019-06-26 09:20:17 +00:00
if ( link_up )
cfg - > caps | = ICE_AQ_PHY_ENA_LINK ;
else
cfg - > caps & = ~ ICE_AQ_PHY_ENA_LINK ;
2020-07-09 16:16:06 +00:00
retcode = ice_aq_set_phy_cfg ( & vsi - > back - > hw , pi , cfg , NULL ) ;
2019-06-26 09:20:17 +00:00
if ( retcode ) {
dev_err ( dev , " Failed to set phy config, VSI %d error %d \n " ,
vsi - > vsi_num , retcode ) ;
retcode = - EIO ;
}
2019-11-08 14:23:25 +00:00
kfree ( cfg ) ;
2019-06-26 09:20:17 +00:00
out :
2019-11-08 14:23:25 +00:00
kfree ( pcaps ) ;
2019-06-26 09:20:17 +00:00
return retcode ;
}
/**
2020-07-09 16:16:06 +00:00
* ice_init_nvm_phy_type - Initialize the NVM PHY type
* @ pi : port info structure
*
2020-07-09 16:16:07 +00:00
* Initialize nvm_phy_type_ [ low | high ] for link lenient mode support
2020-07-09 16:16:06 +00:00
*/
static int ice_init_nvm_phy_type ( struct ice_port_info * pi )
{
struct ice_aqc_get_phy_caps_data * pcaps ;
struct ice_pf * pf = pi - > hw - > back ;
2021-10-07 23:00:23 +00:00
int err ;
2020-07-09 16:16:06 +00:00
pcaps = kzalloc ( sizeof ( * pcaps ) , GFP_KERNEL ) ;
if ( ! pcaps )
return - ENOMEM ;
2021-10-07 23:00:23 +00:00
err = ice_aq_get_phy_caps ( pi , false , ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA ,
pcaps , NULL ) ;
2020-07-09 16:16:06 +00:00
2021-10-07 23:00:23 +00:00
if ( err ) {
2020-07-09 16:16:06 +00:00
dev_err ( ice_pf_to_dev ( pf ) , " Get PHY capability failed. \n " ) ;
goto out ;
}
pf - > nvm_phy_type_hi = pcaps - > phy_type_high ;
pf - > nvm_phy_type_lo = pcaps - > phy_type_low ;
out :
kfree ( pcaps ) ;
return err ;
}
2020-07-09 16:16:07 +00:00
/**
* ice_init_link_dflt_override - Initialize link default override
* @ pi : port info structure
2020-07-09 16:16:08 +00:00
*
* Initialize link default override and PHY total port shutdown during probe
2020-07-09 16:16:07 +00:00
*/
static void ice_init_link_dflt_override ( struct ice_port_info * pi )
{
struct ice_link_default_override_tlv * ldo ;
struct ice_pf * pf = pi - > hw - > back ;
ldo = & pf - > link_dflt_override ;
2020-07-09 16:16:08 +00:00
if ( ice_get_link_default_override ( ldo , pi ) )
return ;
if ( ! ( ldo - > options & ICE_LINK_OVERRIDE_PORT_DIS ) )
return ;
/* Enable Total Port Shutdown (override/replace link-down-on-close
* ethtool private flag ) for ports with Port Disable bit set .
*/
set_bit ( ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA , pf - > flags ) ;
set_bit ( ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA , pf - > flags ) ;
2020-07-09 16:16:07 +00:00
}
/**
* ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
* @ pi : port info structure
*
2021-03-25 22:35:12 +00:00
* If default override is enabled , initialize the user PHY cfg speed and FEC
2020-07-09 16:16:07 +00:00
* settings using the default override mask from the NVM .
*
* The PHY should only be configured with the default override settings the
2021-03-02 18:15:38 +00:00
* first time media is available . The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
2020-07-09 16:16:07 +00:00
* is used to indicate that the user PHY cfg default override is initialized
* and the PHY has not been configured with the default override settings . The
* state is set here , and cleared in ice_configure_phy the first time the PHY is
* configured .
2021-03-25 22:35:12 +00:00
*
* This function should be called only if the FW doesn ' t support default
* configuration mode , as reported by ice_fw_supports_report_dflt_cfg .
2020-07-09 16:16:07 +00:00
*/
static void ice_init_phy_cfg_dflt_override ( struct ice_port_info * pi )
{
struct ice_link_default_override_tlv * ldo ;
struct ice_aqc_set_phy_cfg_data * cfg ;
struct ice_phy_info * phy = & pi - > phy ;
struct ice_pf * pf = pi - > hw - > back ;
ldo = & pf - > link_dflt_override ;
/* If link default override is enabled, use to mask NVM PHY capabilities
* for speed and FEC default configuration .
*/
cfg = & phy - > curr_user_phy_cfg ;
if ( ldo - > phy_type_low | | ldo - > phy_type_high ) {
cfg - > phy_type_low = pf - > nvm_phy_type_lo &
cpu_to_le64 ( ldo - > phy_type_low ) ;
cfg - > phy_type_high = pf - > nvm_phy_type_hi &
cpu_to_le64 ( ldo - > phy_type_high ) ;
}
cfg - > link_fec_opt = ldo - > fec_options ;
phy - > curr_user_fec_req = ICE_FEC_AUTO ;
2021-03-02 18:15:38 +00:00
set_bit ( ICE_LINK_DEFAULT_OVERRIDE_PENDING , pf - > state ) ;
2020-07-09 16:16:07 +00:00
}
2020-07-09 16:16:06 +00:00
/**
* ice_init_phy_user_cfg - Initialize the PHY user configuration
* @ pi : port info structure
*
* Initialize the current user PHY configuration , speed , FEC , and FC requested
* mode to default . The PHY defaults are from get PHY capabilities topology
* with media so call when media is first available . An error is returned if
* called when media is not available . The PHY initialization completed state is
* set here .
*
* These configurations are used when setting PHY
* configuration . The user PHY configuration is updated on set PHY
* configuration . Returns 0 on success , negative on failure
*/
static int ice_init_phy_user_cfg ( struct ice_port_info * pi )
{
struct ice_aqc_get_phy_caps_data * pcaps ;
struct ice_phy_info * phy = & pi - > phy ;
struct ice_pf * pf = pi - > hw - > back ;
2021-10-07 23:00:23 +00:00
int err ;
2020-07-09 16:16:06 +00:00
if ( ! ( phy - > link_info . link_info & ICE_AQ_MEDIA_AVAILABLE ) )
return - EIO ;
pcaps = kzalloc ( sizeof ( * pcaps ) , GFP_KERNEL ) ;
if ( ! pcaps )
return - ENOMEM ;
2021-03-25 22:35:12 +00:00
if ( ice_fw_supports_report_dflt_cfg ( pi - > hw ) )
2021-10-07 23:00:23 +00:00
err = ice_aq_get_phy_caps ( pi , false , ICE_AQC_REPORT_DFLT_CFG ,
pcaps , NULL ) ;
2021-03-25 22:35:12 +00:00
else
2021-10-07 23:00:23 +00:00
err = ice_aq_get_phy_caps ( pi , false , ICE_AQC_REPORT_TOPO_CAP_MEDIA ,
pcaps , NULL ) ;
if ( err ) {
2020-07-09 16:16:06 +00:00
dev_err ( ice_pf_to_dev ( pf ) , " Get PHY capability failed. \n " ) ;
goto err_out ;
}
2020-07-09 16:16:07 +00:00
ice_copy_phy_caps_to_cfg ( pi , pcaps , & pi - > phy . curr_user_phy_cfg ) ;
/* check if lenient mode is supported and enabled */
2021-03-25 22:35:14 +00:00
if ( ice_fw_supports_link_override ( pi - > hw ) & &
2020-07-09 16:16:07 +00:00
! ( pcaps - > module_compliance_enforcement &
ICE_AQC_MOD_ENFORCE_STRICT_MODE ) ) {
set_bit ( ICE_FLAG_LINK_LENIENT_MODE_ENA , pf - > flags ) ;
2021-03-25 22:35:12 +00:00
/* if the FW supports default PHY configuration mode, then the driver
* does not have to apply link override settings . If not ,
* initialize user PHY configuration with link override values
2020-07-09 16:16:07 +00:00
*/
2021-03-25 22:35:12 +00:00
if ( ! ice_fw_supports_report_dflt_cfg ( pi - > hw ) & &
( pf - > link_dflt_override . options & ICE_LINK_OVERRIDE_EN ) ) {
2020-07-09 16:16:07 +00:00
ice_init_phy_cfg_dflt_override ( pi ) ;
goto out ;
}
}
2021-03-25 22:35:12 +00:00
/* if link default override is not enabled, set user flow control and
* FEC settings based on what get_phy_caps returned
2020-07-09 16:16:07 +00:00
*/
2020-07-09 16:16:06 +00:00
phy - > curr_user_fec_req = ice_caps_to_fec_mode ( pcaps - > caps ,
pcaps - > link_fec_options ) ;
phy - > curr_user_fc_req = ice_caps_to_fc_mode ( pcaps - > caps ) ;
2020-07-09 16:16:07 +00:00
out :
2020-07-09 16:16:06 +00:00
phy - > curr_user_speed_req = ICE_AQ_LINK_SPEED_M ;
2021-03-02 18:15:38 +00:00
set_bit ( ICE_PHY_INIT_COMPLETE , pf - > state ) ;
2020-07-09 16:16:06 +00:00
err_out :
kfree ( pcaps ) ;
return err ;
}
/**
* ice_configure_phy - configure PHY
* @ vsi : VSI of PHY
*
* Set the PHY configuration . If the current PHY configuration is the same as
* the curr_user_phy_cfg , then do nothing to avoid link flap . Otherwise
* configure the based get PHY capabilities for topology with media .
*/
static int ice_configure_phy ( struct ice_vsi * vsi )
{
struct device * dev = ice_pf_to_dev ( vsi - > back ) ;
2021-03-25 22:35:15 +00:00
struct ice_port_info * pi = vsi - > port_info ;
2020-07-09 16:16:06 +00:00
struct ice_aqc_get_phy_caps_data * pcaps ;
struct ice_aqc_set_phy_cfg_data * cfg ;
2021-03-25 22:35:15 +00:00
struct ice_phy_info * phy = & pi - > phy ;
struct ice_pf * pf = vsi - > back ;
2021-10-07 23:00:23 +00:00
int err ;
2020-07-09 16:16:06 +00:00
/* Ensure we have media as we cannot configure a medialess port */
2021-03-25 22:35:15 +00:00
if ( ! ( phy - > link_info . link_info & ICE_AQ_MEDIA_AVAILABLE ) )
2023-12-15 11:01:56 +00:00
return - ENOMEDIUM ;
2020-07-09 16:16:06 +00:00
ice_print_topo_conflict ( vsi ) ;
2021-07-16 22:16:39 +00:00
if ( ! test_bit ( ICE_FLAG_LINK_LENIENT_MODE_ENA , pf - > flags ) & &
phy - > link_info . topo_media_conflict = = ICE_AQ_LINK_TOPO_UNSUPP_MEDIA )
2020-07-09 16:16:06 +00:00
return - EPERM ;
2021-03-25 22:35:15 +00:00
if ( test_bit ( ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA , pf - > flags ) )
2020-07-09 16:16:06 +00:00
return ice_force_phys_link_state ( vsi , true ) ;
pcaps = kzalloc ( sizeof ( * pcaps ) , GFP_KERNEL ) ;
if ( ! pcaps )
return - ENOMEM ;
/* Get current PHY config */
2021-10-07 23:00:23 +00:00
err = ice_aq_get_phy_caps ( pi , false , ICE_AQC_REPORT_ACTIVE_CFG , pcaps ,
NULL ) ;
if ( err ) {
2021-10-07 22:56:02 +00:00
dev_err ( dev , " Failed to get PHY configuration, VSI %d error %d \n " ,
2021-10-07 23:00:23 +00:00
vsi - > vsi_num , err ) ;
2020-07-09 16:16:06 +00:00
goto done ;
}
/* If PHY enable link is configured and configuration has not changed,
* there ' s nothing to do
*/
if ( pcaps - > caps & ICE_AQC_PHY_EN_LINK & &
2021-03-25 22:35:15 +00:00
ice_phy_caps_equals_cfg ( pcaps , & phy - > curr_user_phy_cfg ) )
2020-07-09 16:16:06 +00:00
goto done ;
/* Use PHY topology as baseline for configuration */
memset ( pcaps , 0 , sizeof ( * pcaps ) ) ;
2021-03-25 22:35:12 +00:00
if ( ice_fw_supports_report_dflt_cfg ( pi - > hw ) )
2021-10-07 23:00:23 +00:00
err = ice_aq_get_phy_caps ( pi , false , ICE_AQC_REPORT_DFLT_CFG ,
pcaps , NULL ) ;
2021-03-25 22:35:12 +00:00
else
2021-10-07 23:00:23 +00:00
err = ice_aq_get_phy_caps ( pi , false , ICE_AQC_REPORT_TOPO_CAP_MEDIA ,
pcaps , NULL ) ;
if ( err ) {
2021-10-07 22:56:02 +00:00
dev_err ( dev , " Failed to get PHY caps, VSI %d error %d \n " ,
2021-10-07 23:00:23 +00:00
vsi - > vsi_num , err ) ;
2020-07-09 16:16:06 +00:00
goto done ;
}
cfg = kzalloc ( sizeof ( * cfg ) , GFP_KERNEL ) ;
if ( ! cfg ) {
err = - ENOMEM ;
goto done ;
}
2020-07-09 16:16:07 +00:00
ice_copy_phy_caps_to_cfg ( pi , pcaps , cfg ) ;
2020-07-09 16:16:06 +00:00
/* Speed - If default override pending, use curr_user_phy_cfg set in
* ice_init_phy_user_cfg_ldo .
*/
2021-03-02 18:15:38 +00:00
if ( test_and_clear_bit ( ICE_LINK_DEFAULT_OVERRIDE_PENDING ,
2020-07-09 16:16:07 +00:00
vsi - > back - > state ) ) {
2021-03-25 22:35:15 +00:00
cfg - > phy_type_low = phy - > curr_user_phy_cfg . phy_type_low ;
cfg - > phy_type_high = phy - > curr_user_phy_cfg . phy_type_high ;
2020-07-09 16:16:07 +00:00
} else {
u64 phy_low = 0 , phy_high = 0 ;
ice_update_phy_type ( & phy_low , & phy_high ,
pi - > phy . curr_user_speed_req ) ;
cfg - > phy_type_low = pcaps - > phy_type_low & cpu_to_le64 ( phy_low ) ;
cfg - > phy_type_high = pcaps - > phy_type_high &
cpu_to_le64 ( phy_high ) ;
}
2020-07-09 16:16:06 +00:00
/* Can't provide what was requested; use PHY capabilities */
if ( ! cfg - > phy_type_low & & ! cfg - > phy_type_high ) {
cfg - > phy_type_low = pcaps - > phy_type_low ;
cfg - > phy_type_high = pcaps - > phy_type_high ;
}
/* FEC */
2021-03-25 22:35:15 +00:00
ice_cfg_phy_fec ( pi , cfg , phy - > curr_user_fec_req ) ;
2020-07-09 16:16:06 +00:00
/* Can't provide what was requested; use PHY capabilities */
if ( cfg - > link_fec_opt ! =
( cfg - > link_fec_opt & pcaps - > link_fec_options ) ) {
cfg - > caps | = pcaps - > caps & ICE_AQC_PHY_EN_AUTO_FEC ;
cfg - > link_fec_opt = pcaps - > link_fec_options ;
}
/* Flow Control - always supported; no need to check against
* capabilities
*/
2021-03-25 22:35:15 +00:00
ice_cfg_phy_fc ( pi , cfg , phy - > curr_user_fc_req ) ;
2020-07-09 16:16:06 +00:00
/* Enable link and link update */
cfg - > caps | = ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK ;
2021-10-07 23:00:23 +00:00
err = ice_aq_set_phy_cfg ( & pf - > hw , pi , cfg , NULL ) ;
2021-10-07 23:01:58 +00:00
if ( err )
2021-10-07 22:56:02 +00:00
dev_err ( dev , " Failed to set phy config, VSI %d error %d \n " ,
2021-10-07 23:00:23 +00:00
vsi - > vsi_num , err ) ;
2020-07-09 16:16:06 +00:00
kfree ( cfg ) ;
done :
kfree ( pcaps ) ;
return err ;
}
/**
* ice_check_media_subtask - Check for media
2019-06-26 09:20:17 +00:00
* @ pf : pointer to PF struct
2020-07-09 16:16:06 +00:00
*
* If media is available , then initialize PHY user configuration if it is not
* been , and configure the PHY if the interface is up .
2019-06-26 09:20:17 +00:00
*/
static void ice_check_media_subtask ( struct ice_pf * pf )
{
struct ice_port_info * pi ;
struct ice_vsi * vsi ;
int err ;
2020-07-09 16:16:06 +00:00
/* No need to check for media if it's already present */
if ( ! test_bit ( ICE_FLAG_NO_MEDIA , pf - > flags ) )
2019-06-26 09:20:17 +00:00
return ;
2020-07-09 16:16:06 +00:00
vsi = ice_get_main_vsi ( pf ) ;
if ( ! vsi )
2019-06-26 09:20:17 +00:00
return ;
/* Refresh link info and check if media is present */
pi = vsi - > port_info ;
err = ice_update_link_info ( pi ) ;
if ( err )
return ;
2021-10-13 16:02:19 +00:00
ice_check_link_cfg_err ( pf , pi - > phy . link_info . link_cfg_err ) ;
2021-05-06 15:40:01 +00:00
2019-06-26 09:20:17 +00:00
if ( pi - > phy . link_info . link_info & ICE_AQ_MEDIA_AVAILABLE ) {
2021-03-02 18:15:38 +00:00
if ( ! test_bit ( ICE_PHY_INIT_COMPLETE , pf - > state ) )
2020-07-09 16:16:06 +00:00
ice_init_phy_user_cfg ( pi ) ;
/* PHY settings are reset on media insertion, reconfigure
* PHY to preserve settings .
*/
2021-03-02 18:15:37 +00:00
if ( test_bit ( ICE_VSI_DOWN , vsi - > state ) & &
2020-07-09 16:16:06 +00:00
test_bit ( ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA , vsi - > back - > flags ) )
2019-06-26 09:20:17 +00:00
return ;
2020-07-09 16:16:06 +00:00
err = ice_configure_phy ( vsi ) ;
if ( ! err )
clear_bit ( ICE_FLAG_NO_MEDIA , pf - > flags ) ;
2019-06-26 09:20:17 +00:00
/* A Link Status Event will be generated; the event handler
* will complete bringing the interface up
*/
}
}
2018-03-20 14:58:10 +00:00
/**
* ice_service_task - manage and run subtasks
* @ work : pointer to work_struct contained by the PF struct
*/
static void ice_service_task ( struct work_struct * work )
{
struct ice_pf * pf = container_of ( work , struct ice_pf , serv_task ) ;
unsigned long start_time = jiffies ;
/* subtasks */
2018-03-20 14:58:18 +00:00
/* process reset requests first */
ice_reset_subtask ( pf ) ;
2018-08-09 13:29:50 +00:00
/* bail if a reset/recovery cycle is pending or rebuild failed */
2018-09-20 00:23:11 +00:00
if ( ice_is_reset_in_progress ( pf - > state ) | |
2021-03-02 18:15:38 +00:00
test_bit ( ICE_SUSPENDED , pf - > state ) | |
test_bit ( ICE_NEEDS_RESTART , pf - > state ) ) {
2018-03-20 14:58:18 +00:00
ice_service_task_complete ( pf ) ;
return ;
}
2022-03-23 12:43:52 +00:00
if ( test_and_clear_bit ( ICE_AUX_ERR_PENDING , pf - > state ) ) {
struct iidc_event * event ;
event = kzalloc ( sizeof ( * event ) , GFP_KERNEL ) ;
if ( event ) {
set_bit ( IIDC_EVENT_CRIT_ERR , event - > type ) ;
/* report the entire OICR value to AUX driver */
swap ( event - > reg , pf - > oicr_err_reg ) ;
ice_send_event_to_aux ( pf , event ) ;
kfree ( event ) ;
}
}
2023-03-10 19:48:33 +00:00
/* unplug aux dev per request, if an unplug request came in
* while processing a plug request , this will handle it
*/
if ( test_and_clear_bit ( ICE_FLAG_UNPLUG_AUX_DEV , pf - > flags ) )
ice_unplug_aux_dev ( pf ) ;
2022-01-21 00:27:56 +00:00
2023-03-10 19:48:33 +00:00
/* Plug aux device per request */
if ( test_and_clear_bit ( ICE_FLAG_PLUG_AUX_DEV , pf - > flags ) )
ice_plug_aux_dev ( pf ) ;
2022-03-10 17:16:41 +00:00
2022-02-18 20:39:25 +00:00
if ( test_and_clear_bit ( ICE_FLAG_MTU_CHANGED , pf - > flags ) ) {
struct iidc_event * event ;
event = kzalloc ( sizeof ( * event ) , GFP_KERNEL ) ;
if ( event ) {
set_bit ( IIDC_EVENT_AFTER_MTU_CHANGE , event - > type ) ;
ice_send_event_to_aux ( pf , event ) ;
kfree ( event ) ;
}
}
2019-09-09 13:47:46 +00:00
ice_clean_adminq_subtask ( pf ) ;
2019-06-26 09:20:17 +00:00
ice_check_media_subtask ( pf ) ;
2018-08-09 13:29:53 +00:00
ice_check_for_hang_subtask ( pf ) ;
2018-03-20 14:58:19 +00:00
ice_sync_fltr_subtask ( pf ) ;
2018-08-09 13:29:53 +00:00
ice_handle_mdd_event ( pf ) ;
2018-03-20 14:58:16 +00:00
ice_watchdog_subtask ( pf ) ;
2019-09-09 13:47:46 +00:00
if ( ice_is_safe_mode ( pf ) ) {
ice_service_task_complete ( pf ) ;
return ;
}
ice_process_vflr_event ( pf ) ;
2018-09-20 00:42:54 +00:00
ice_clean_mailboxq_subtask ( pf ) ;
2021-06-09 16:39:46 +00:00
ice_clean_sbq_subtask ( pf ) ;
2020-05-12 01:01:46 +00:00
ice_sync_arfs_fltrs ( pf ) ;
2021-03-09 03:08:10 +00:00
ice_flush_fdir_ctx ( pf ) ;
2021-03-02 18:15:38 +00:00
/* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
2018-03-20 14:58:10 +00:00
ice_service_task_complete ( pf ) ;
/* If the tasks have taken longer than one service timer period
* or there is more work to be done , reset the service timer to
* schedule the service task now .
*/
if ( time_after ( jiffies , ( start_time + pf - > serv_tmr_period ) ) | |
2021-03-02 18:15:38 +00:00
test_bit ( ICE_MDD_EVENT_PENDING , pf - > state ) | |
test_bit ( ICE_VFLR_EVENT_PENDING , pf - > state ) | |
test_bit ( ICE_MAILBOXQ_EVENT_PENDING , pf - > state ) | |
test_bit ( ICE_FD_VF_FLUSH_CTX , pf - > state ) | |
2021-06-09 16:39:46 +00:00
test_bit ( ICE_SIDEBANDQ_EVENT_PENDING , pf - > state ) | |
2021-03-02 18:15:38 +00:00
test_bit ( ICE_ADMINQ_EVENT_PENDING , pf - > state ) )
2018-03-20 14:58:10 +00:00
mod_timer ( & pf - > serv_tmr , jiffies ) ;
}
2018-03-20 14:58:07 +00:00
/**
* ice_set_ctrlq_len - helper function to set controlq length
2019-02-19 23:04:13 +00:00
* @ hw : pointer to the HW instance
2018-03-20 14:58:07 +00:00
*/
static void ice_set_ctrlq_len ( struct ice_hw * hw )
{
hw - > adminq . num_rq_entries = ICE_AQ_LEN ;
hw - > adminq . num_sq_entries = ICE_AQ_LEN ;
hw - > adminq . rq_buf_size = ICE_AQ_MAX_BUF_LEN ;
hw - > adminq . sq_buf_size = ICE_AQ_MAX_BUF_LEN ;
2020-02-27 18:15:00 +00:00
hw - > mailboxq . num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M ;
2019-07-25 08:55:38 +00:00
hw - > mailboxq . num_sq_entries = ICE_MBXSQ_LEN ;
2018-09-20 00:42:54 +00:00
hw - > mailboxq . rq_buf_size = ICE_MBXQ_MAX_BUF_LEN ;
hw - > mailboxq . sq_buf_size = ICE_MBXQ_MAX_BUF_LEN ;
2021-06-09 16:39:46 +00:00
hw - > sbq . num_rq_entries = ICE_SBQ_LEN ;
hw - > sbq . num_sq_entries = ICE_SBQ_LEN ;
hw - > sbq . rq_buf_size = ICE_SBQ_MAX_BUF_LEN ;
hw - > sbq . sq_buf_size = ICE_SBQ_MAX_BUF_LEN ;
2018-03-20 14:58:07 +00:00
}
2019-11-08 14:23:29 +00:00
/**
* ice_schedule_reset - schedule a reset
* @ pf : board private structure
* @ reset : reset being requested
*/
int ice_schedule_reset ( struct ice_pf * pf , enum ice_reset_req reset )
{
struct device * dev = ice_pf_to_dev ( pf ) ;
/* bail out if earlier reset has failed */
2021-03-02 18:15:38 +00:00
if ( test_bit ( ICE_RESET_FAILED , pf - > state ) ) {
2019-11-08 14:23:29 +00:00
dev_dbg ( dev , " earlier reset has failed \n " ) ;
return - EIO ;
}
/* bail if reset/recovery already in progress */
if ( ice_is_reset_in_progress ( pf - > state ) ) {
dev_dbg ( dev , " Reset already in progress \n " ) ;
return - EBUSY ;
}
switch ( reset ) {
case ICE_RESET_PFR :
2021-03-02 18:15:38 +00:00
set_bit ( ICE_PFR_REQ , pf - > state ) ;
2019-11-08 14:23:29 +00:00
break ;
case ICE_RESET_CORER :
2021-03-02 18:15:38 +00:00
set_bit ( ICE_CORER_REQ , pf - > state ) ;
2019-11-08 14:23:29 +00:00
break ;
case ICE_RESET_GLOBR :
2021-03-02 18:15:38 +00:00
set_bit ( ICE_GLOBR_REQ , pf - > state ) ;
2019-11-08 14:23:29 +00:00
break ;
default :
return - EINVAL ;
}
ice_service_task_schedule ( pf ) ;
return 0 ;
}
2018-03-20 14:58:13 +00:00
/**
* ice_irq_affinity_notify - Callback for affinity changes
* @ notify : context as to what irq was changed
* @ mask : the new affinity mask
*
* This is a callback function used by the irq_set_affinity_notifier function
* so that we may register to receive changes to the irq affinity masks .
*/
2019-02-27 00:35:11 +00:00
static void
ice_irq_affinity_notify ( struct irq_affinity_notify * notify ,
const cpumask_t * mask )
2018-03-20 14:58:13 +00:00
{
struct ice_q_vector * q_vector =
container_of ( notify , struct ice_q_vector , affinity_notify ) ;
cpumask_copy ( & q_vector - > affinity_mask , mask ) ;
}
/**
* ice_irq_affinity_release - Callback for affinity notifier release
* @ ref : internal core kernel usage
*
* This is a callback function used by the irq_set_affinity_notifier function
* to inform the current notification subscriber that they will no longer
* receive notifications .
*/
static void ice_irq_affinity_release ( struct kref __always_unused * ref ) { }
/**
* ice_vsi_ena_irq - Enable IRQ for the given VSI
* @ vsi : the VSI being configured
*/
static int ice_vsi_ena_irq ( struct ice_vsi * vsi )
{
2019-06-26 09:20:25 +00:00
struct ice_hw * hw = & vsi - > back - > hw ;
int i ;
2018-03-20 14:58:13 +00:00
2019-06-26 09:20:25 +00:00
ice_for_each_q_vector ( vsi , i )
ice_irq_dynamic_ena ( hw , vsi , vsi - > q_vectors [ i ] ) ;
2018-03-20 14:58:13 +00:00
ice_flush ( hw ) ;
return 0 ;
}
/**
* ice_vsi_req_irq_msix - get MSI - X vectors from the OS for the VSI
* @ vsi : the VSI being configured
* @ basename : name for the vector
*/
static int ice_vsi_req_irq_msix ( struct ice_vsi * vsi , char * basename )
{
int q_vectors = vsi - > num_q_vectors ;
struct ice_pf * pf = vsi - > back ;
2019-11-08 14:23:26 +00:00
struct device * dev ;
2018-03-20 14:58:13 +00:00
int rx_int_idx = 0 ;
int tx_int_idx = 0 ;
int vector , err ;
int irq_num ;
2019-11-08 14:23:26 +00:00
dev = ice_pf_to_dev ( pf ) ;
2018-03-20 14:58:13 +00:00
for ( vector = 0 ; vector < q_vectors ; vector + + ) {
struct ice_q_vector * q_vector = vsi - > q_vectors [ vector ] ;
2023-05-15 19:03:17 +00:00
irq_num = q_vector - > irq . virq ;
2018-03-20 14:58:13 +00:00
2021-08-19 11:59:58 +00:00
if ( q_vector - > tx . tx_ring & & q_vector - > rx . rx_ring ) {
2018-03-20 14:58:13 +00:00
snprintf ( q_vector - > name , sizeof ( q_vector - > name ) - 1 ,
" %s-%s-%d " , basename , " TxRx " , rx_int_idx + + ) ;
tx_int_idx + + ;
2021-08-19 11:59:58 +00:00
} else if ( q_vector - > rx . rx_ring ) {
2018-03-20 14:58:13 +00:00
snprintf ( q_vector - > name , sizeof ( q_vector - > name ) - 1 ,
" %s-%s-%d " , basename , " rx " , rx_int_idx + + ) ;
2021-08-19 11:59:58 +00:00
} else if ( q_vector - > tx . tx_ring ) {
2018-03-20 14:58:13 +00:00
snprintf ( q_vector - > name , sizeof ( q_vector - > name ) - 1 ,
" %s-%s-%d " , basename , " tx " , tx_int_idx + + ) ;
} else {
/* skip this unused q_vector */
continue ;
}
2022-02-16 21:37:29 +00:00
if ( vsi - > type = = ICE_VSI_CTRL & & vsi - > vf )
2021-03-09 03:08:03 +00:00
err = devm_request_irq ( dev , irq_num , vsi - > irq_handler ,
IRQF_SHARED , q_vector - > name ,
q_vector ) ;
else
err = devm_request_irq ( dev , irq_num , vsi - > irq_handler ,
0 , q_vector - > name , q_vector ) ;
2018-03-20 14:58:13 +00:00
if ( err ) {
2020-02-06 09:20:10 +00:00
netdev_err ( vsi - > netdev , " MSIX request_irq failed, error: %d \n " ,
err ) ;
2018-03-20 14:58:13 +00:00
goto free_q_irqs ;
}
/* register for affinity change notifications */
2020-05-12 01:01:46 +00:00
if ( ! IS_ENABLED ( CONFIG_RFS_ACCEL ) ) {
struct irq_affinity_notify * affinity_notify ;
affinity_notify = & q_vector - > affinity_notify ;
affinity_notify - > notify = ice_irq_affinity_notify ;
affinity_notify - > release = ice_irq_affinity_release ;
irq_set_affinity_notifier ( irq_num , affinity_notify ) ;
}
2018-03-20 14:58:13 +00:00
/* assign the mask for this irq */
irq_set_affinity_hint ( irq_num , & q_vector - > affinity_mask ) ;
}
2022-04-04 16:15:09 +00:00
err = ice_set_cpu_rx_rmap ( vsi ) ;
if ( err ) {
netdev_err ( vsi - > netdev , " Failed to setup CPU RMAP on VSI %u: %pe \n " ,
vsi - > vsi_num , ERR_PTR ( err ) ) ;
goto free_q_irqs ;
}
2018-03-20 14:58:13 +00:00
vsi - > irqs_ready = true ;
return 0 ;
free_q_irqs :
2023-05-15 19:03:17 +00:00
while ( vector - - ) {
irq_num = vsi - > q_vectors [ vector ] - > irq . virq ;
2020-05-12 01:01:46 +00:00
if ( ! IS_ENABLED ( CONFIG_RFS_ACCEL ) )
irq_set_affinity_notifier ( irq_num , NULL ) ;
2018-03-20 14:58:13 +00:00
irq_set_affinity_hint ( irq_num , NULL ) ;
2019-11-08 14:23:26 +00:00
devm_free_irq ( dev , irq_num , & vsi - > q_vectors [ vector ] ) ;
2018-03-20 14:58:13 +00:00
}
return err ;
}
2019-11-04 17:38:56 +00:00
/**
* ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
* @ vsi : VSI to setup Tx rings used by XDP
*
* Return 0 on success and negative value on error
*/
static int ice_xdp_alloc_setup_rings ( struct ice_vsi * vsi )
{
2020-02-06 09:20:09 +00:00
struct device * dev = ice_pf_to_dev ( vsi - > back ) ;
ice: optimize XDP_TX workloads
Optimize Tx descriptor cleaning for XDP. Current approach doesn't
really scale and chokes when multiple flows are handled.
Introduce two ring fields, @next_dd and @next_rs that will keep track of
descriptor that should be looked at when the need for cleaning arise and
the descriptor that should have the RS bit set, respectively.
Note that at this point the threshold is a constant (32), but it is
something that we could make configurable.
First thing is to get away from setting RS bit on each descriptor. Let's
do this only once NTU is higher than the currently @next_rs value. In
such case, grab the tx_desc[next_rs], set the RS bit in descriptor and
advance the @next_rs by a 32.
Second thing is to clean the Tx ring only when there are less than 32
free entries. For that case, look up the tx_desc[next_dd] for a DD bit.
This bit is written back by HW to let the driver know that xmit was
successful. It will happen only for those descriptors that had RS bit
set. Clean only 32 descriptors and advance the DD bit.
Actual cleaning routine is moved from ice_napi_poll() down to the
ice_xmit_xdp_ring(). It is safe to do so as XDP ring will not get any
SKBs in there that would rely on interrupts for the cleaning. Nice side
effect is that for rare case of Tx fallback path (that next patch is
going to introduce) we don't have to trigger the SW irq to clean the
ring.
With those two concepts, ring is kept at being almost full, but it is
guaranteed that driver will be able to produce Tx descriptors.
This approach seems to work out well even though the Tx descriptors are
produced in one-by-one manner. Test was conducted with the ice HW
bombarded with packets from HW generator, configured to generate 30
flows.
Xdp2 sample yields the following results:
<snip>
proto 17: 79973066 pkt/s
proto 17: 80018911 pkt/s
proto 17: 80004654 pkt/s
proto 17: 79992395 pkt/s
proto 17: 79975162 pkt/s
proto 17: 79955054 pkt/s
proto 17: 79869168 pkt/s
proto 17: 79823947 pkt/s
proto 17: 79636971 pkt/s
</snip>
As that sample reports the Rx'ed frames, let's look at sar output.
It says that what we Rx'ed we do actually Tx, no noticeable drops.
Average: IFACE rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s %ifutil
Average: ens4f1 79842324.00 79842310.40 4678261.17 4678260.38 0.00 0.00 0.00 38.32
with tx_busy staying calm.
When compared to a state before:
Average: IFACE rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s %ifutil
Average: ens4f1 90919711.60 42233822.60 5327326.85 2474638.04 0.00 0.00 0.00 43.64
it can be observed that the amount of txpck/s is almost doubled, meaning
that the performance is improved by around 90%. All of this due to the
drops in the driver, previously the tx_busy stat was bumped at a 7mpps
rate.
Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Tested-by: George Kuruvinakunnel <george.kuruvinakunnel@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-08-19 12:00:02 +00:00
struct ice_tx_desc * tx_desc ;
int i , j ;
2019-11-04 17:38:56 +00:00
2021-08-19 12:00:04 +00:00
ice_for_each_xdp_txq ( vsi , i ) {
2019-11-04 17:38:56 +00:00
u16 xdp_q_idx = vsi - > alloc_txq + i ;
ice: Accumulate ring statistics over reset
Resets may occur with or without user interaction. For example, a TX hang
or reconfiguration of parameters will result in a reset. During reset, the
VSI is freed, freeing any statistics structures inside as well. This would
create an issue for the user where a reset happens in the background,
statistics set to zero, and the user checks ring statistics expecting them
to be populated.
To ensure this doesn't happen, accumulate ring statistics over reset.
Define a new ring statistics structure, ice_ring_stats. The new structure
lives in the VSI's parent, preserving ring statistics when VSI is freed.
1. Define a new structure vsi_ring_stats in the PF scope
2. Allocate/free stats only during probe, unload, or change in ring size
3. Replace previous ring statistics functionality with new structure
Signed-off-by: Benjamin Mikailenko <benjamin.mikailenko@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2022-11-18 21:20:02 +00:00
struct ice_ring_stats * ring_stats ;
2021-08-19 11:59:58 +00:00
struct ice_tx_ring * xdp_ring ;
2019-11-04 17:38:56 +00:00
xdp_ring = kzalloc ( sizeof ( * xdp_ring ) , GFP_KERNEL ) ;
if ( ! xdp_ring )
goto free_xdp_rings ;
ice: Accumulate ring statistics over reset
Resets may occur with or without user interaction. For example, a TX hang
or reconfiguration of parameters will result in a reset. During reset, the
VSI is freed, freeing any statistics structures inside as well. This would
create an issue for the user where a reset happens in the background,
statistics set to zero, and the user checks ring statistics expecting them
to be populated.
To ensure this doesn't happen, accumulate ring statistics over reset.
Define a new ring statistics structure, ice_ring_stats. The new structure
lives in the VSI's parent, preserving ring statistics when VSI is freed.
1. Define a new structure vsi_ring_stats in the PF scope
2. Allocate/free stats only during probe, unload, or change in ring size
3. Replace previous ring statistics functionality with new structure
Signed-off-by: Benjamin Mikailenko <benjamin.mikailenko@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2022-11-18 21:20:02 +00:00
ring_stats = kzalloc ( sizeof ( * ring_stats ) , GFP_KERNEL ) ;
if ( ! ring_stats ) {
ice_free_tx_ring ( xdp_ring ) ;
goto free_xdp_rings ;
}
xdp_ring - > ring_stats = ring_stats ;
2019-11-04 17:38:56 +00:00
xdp_ring - > q_index = xdp_q_idx ;
xdp_ring - > reg_idx = vsi - > txq_map [ xdp_q_idx ] ;
xdp_ring - > vsi = vsi ;
xdp_ring - > netdev = NULL ;
xdp_ring - > dev = dev ;
xdp_ring - > count = vsi - > num_tx_desc ;
2020-06-09 13:19:45 +00:00
WRITE_ONCE ( vsi - > xdp_rings [ i ] , xdp_ring ) ;
2019-11-04 17:38:56 +00:00
if ( ice_setup_tx_ring ( xdp_ring ) )
goto free_xdp_rings ;
ice_set_ring_xdp ( xdp_ring ) ;
2021-08-19 12:00:03 +00:00
spin_lock_init ( & xdp_ring - > tx_lock ) ;
ice: optimize XDP_TX workloads
Optimize Tx descriptor cleaning for XDP. Current approach doesn't
really scale and chokes when multiple flows are handled.
Introduce two ring fields, @next_dd and @next_rs that will keep track of
descriptor that should be looked at when the need for cleaning arise and
the descriptor that should have the RS bit set, respectively.
Note that at this point the threshold is a constant (32), but it is
something that we could make configurable.
First thing is to get away from setting RS bit on each descriptor. Let's
do this only once NTU is higher than the currently @next_rs value. In
such case, grab the tx_desc[next_rs], set the RS bit in descriptor and
advance the @next_rs by a 32.
Second thing is to clean the Tx ring only when there are less than 32
free entries. For that case, look up the tx_desc[next_dd] for a DD bit.
This bit is written back by HW to let the driver know that xmit was
successful. It will happen only for those descriptors that had RS bit
set. Clean only 32 descriptors and advance the DD bit.
Actual cleaning routine is moved from ice_napi_poll() down to the
ice_xmit_xdp_ring(). It is safe to do so as XDP ring will not get any
SKBs in there that would rely on interrupts for the cleaning. Nice side
effect is that for rare case of Tx fallback path (that next patch is
going to introduce) we don't have to trigger the SW irq to clean the
ring.
With those two concepts, ring is kept at being almost full, but it is
guaranteed that driver will be able to produce Tx descriptors.
This approach seems to work out well even though the Tx descriptors are
produced in one-by-one manner. Test was conducted with the ice HW
bombarded with packets from HW generator, configured to generate 30
flows.
Xdp2 sample yields the following results:
<snip>
proto 17: 79973066 pkt/s
proto 17: 80018911 pkt/s
proto 17: 80004654 pkt/s
proto 17: 79992395 pkt/s
proto 17: 79975162 pkt/s
proto 17: 79955054 pkt/s
proto 17: 79869168 pkt/s
proto 17: 79823947 pkt/s
proto 17: 79636971 pkt/s
</snip>
As that sample reports the Rx'ed frames, let's look at sar output.
It says that what we Rx'ed we do actually Tx, no noticeable drops.
Average: IFACE rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s %ifutil
Average: ens4f1 79842324.00 79842310.40 4678261.17 4678260.38 0.00 0.00 0.00 38.32
with tx_busy staying calm.
When compared to a state before:
Average: IFACE rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s %ifutil
Average: ens4f1 90919711.60 42233822.60 5327326.85 2474638.04 0.00 0.00 0.00 43.64
it can be observed that the amount of txpck/s is almost doubled, meaning
that the performance is improved by around 90%. All of this due to the
drops in the driver, previously the tx_busy stat was bumped at a 7mpps
rate.
Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Tested-by: George Kuruvinakunnel <george.kuruvinakunnel@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-08-19 12:00:02 +00:00
for ( j = 0 ; j < xdp_ring - > count ; j + + ) {
tx_desc = ICE_TX_DESC ( xdp_ring , j ) ;
2022-03-17 18:36:29 +00:00
tx_desc - > cmd_type_offset_bsz = 0 ;
ice: optimize XDP_TX workloads
Optimize Tx descriptor cleaning for XDP. Current approach doesn't
really scale and chokes when multiple flows are handled.
Introduce two ring fields, @next_dd and @next_rs that will keep track of
descriptor that should be looked at when the need for cleaning arise and
the descriptor that should have the RS bit set, respectively.
Note that at this point the threshold is a constant (32), but it is
something that we could make configurable.
First thing is to get away from setting RS bit on each descriptor. Let's
do this only once NTU is higher than the currently @next_rs value. In
such case, grab the tx_desc[next_rs], set the RS bit in descriptor and
advance the @next_rs by a 32.
Second thing is to clean the Tx ring only when there are less than 32
free entries. For that case, look up the tx_desc[next_dd] for a DD bit.
This bit is written back by HW to let the driver know that xmit was
successful. It will happen only for those descriptors that had RS bit
set. Clean only 32 descriptors and advance the DD bit.
Actual cleaning routine is moved from ice_napi_poll() down to the
ice_xmit_xdp_ring(). It is safe to do so as XDP ring will not get any
SKBs in there that would rely on interrupts for the cleaning. Nice side
effect is that for rare case of Tx fallback path (that next patch is
going to introduce) we don't have to trigger the SW irq to clean the
ring.
With those two concepts, ring is kept at being almost full, but it is
guaranteed that driver will be able to produce Tx descriptors.
This approach seems to work out well even though the Tx descriptors are
produced in one-by-one manner. Test was conducted with the ice HW
bombarded with packets from HW generator, configured to generate 30
flows.
Xdp2 sample yields the following results:
<snip>
proto 17: 79973066 pkt/s
proto 17: 80018911 pkt/s
proto 17: 80004654 pkt/s
proto 17: 79992395 pkt/s
proto 17: 79975162 pkt/s
proto 17: 79955054 pkt/s
proto 17: 79869168 pkt/s
proto 17: 79823947 pkt/s
proto 17: 79636971 pkt/s
</snip>
As that sample reports the Rx'ed frames, let's look at sar output.
It says that what we Rx'ed we do actually Tx, no noticeable drops.
Average: IFACE rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s %ifutil
Average: ens4f1 79842324.00 79842310.40 4678261.17 4678260.38 0.00 0.00 0.00 38.32
with tx_busy staying calm.
When compared to a state before:
Average: IFACE rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s %ifutil
Average: ens4f1 90919711.60 42233822.60 5327326.85 2474638.04 0.00 0.00 0.00 43.64
it can be observed that the amount of txpck/s is almost doubled, meaning
that the performance is improved by around 90%. All of this due to the
drops in the driver, previously the tx_busy stat was bumped at a 7mpps
rate.
Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Tested-by: George Kuruvinakunnel <george.kuruvinakunnel@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-08-19 12:00:02 +00:00
}
2019-11-04 17:38:56 +00:00
}
return 0 ;
free_xdp_rings :
ice: Accumulate ring statistics over reset
Resets may occur with or without user interaction. For example, a TX hang
or reconfiguration of parameters will result in a reset. During reset, the
VSI is freed, freeing any statistics structures inside as well. This would
create an issue for the user where a reset happens in the background,
statistics set to zero, and the user checks ring statistics expecting them
to be populated.
To ensure this doesn't happen, accumulate ring statistics over reset.
Define a new ring statistics structure, ice_ring_stats. The new structure
lives in the VSI's parent, preserving ring statistics when VSI is freed.
1. Define a new structure vsi_ring_stats in the PF scope
2. Allocate/free stats only during probe, unload, or change in ring size
3. Replace previous ring statistics functionality with new structure
Signed-off-by: Benjamin Mikailenko <benjamin.mikailenko@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2022-11-18 21:20:02 +00:00
for ( ; i > = 0 ; i - - ) {
if ( vsi - > xdp_rings [ i ] & & vsi - > xdp_rings [ i ] - > desc ) {
kfree_rcu ( vsi - > xdp_rings [ i ] - > ring_stats , rcu ) ;
vsi - > xdp_rings [ i ] - > ring_stats = NULL ;
2019-11-04 17:38:56 +00:00
ice_free_tx_ring ( vsi - > xdp_rings [ i ] ) ;
ice: Accumulate ring statistics over reset
Resets may occur with or without user interaction. For example, a TX hang
or reconfiguration of parameters will result in a reset. During reset, the
VSI is freed, freeing any statistics structures inside as well. This would
create an issue for the user where a reset happens in the background,
statistics set to zero, and the user checks ring statistics expecting them
to be populated.
To ensure this doesn't happen, accumulate ring statistics over reset.
Define a new ring statistics structure, ice_ring_stats. The new structure
lives in the VSI's parent, preserving ring statistics when VSI is freed.
1. Define a new structure vsi_ring_stats in the PF scope
2. Allocate/free stats only during probe, unload, or change in ring size
3. Replace previous ring statistics functionality with new structure
Signed-off-by: Benjamin Mikailenko <benjamin.mikailenko@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2022-11-18 21:20:02 +00:00
}
}
2019-11-04 17:38:56 +00:00
return - ENOMEM ;
}
/**
* ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
* @ vsi : VSI to set the bpf prog on
* @ prog : the bpf prog pointer
*/
static void ice_vsi_assign_bpf_prog ( struct ice_vsi * vsi , struct bpf_prog * prog )
{
struct bpf_prog * old_prog ;
int i ;
old_prog = xchg ( & vsi - > xdp_prog , prog ) ;
ice_for_each_rxq ( vsi , i )
WRITE_ONCE ( vsi - > rx_rings [ i ] - > xdp_prog , vsi - > xdp_prog ) ;
2023-06-15 11:33:26 +00:00
if ( old_prog )
bpf_prog_put ( old_prog ) ;
2019-11-04 17:38:56 +00:00
}
/**
* ice_prepare_xdp_rings - Allocate , configure and setup Tx rings for XDP
* @ vsi : VSI to bring up Tx rings used by XDP
* @ prog : bpf program that will be assigned to VSI
*
* Return 0 on success and negative value on error
*/
int ice_prepare_xdp_rings ( struct ice_vsi * vsi , struct bpf_prog * prog )
{
u16 max_txqs [ ICE_MAX_TRAFFIC_CLASS ] = { 0 } ;
int xdp_rings_rem = vsi - > num_xdp_txq ;
struct ice_pf * pf = vsi - > back ;
struct ice_qs_cfg xdp_qs_cfg = {
. qs_mutex = & pf - > avail_q_mutex ,
. pf_map = pf - > avail_txqs ,
. pf_map_size = pf - > max_pf_txqs ,
. q_count = vsi - > num_xdp_txq ,
. scatter_count = ICE_MAX_SCATTER_TXQS ,
. vsi_map = vsi - > txq_map ,
. vsi_map_offset = vsi - > alloc_txq ,
. mapping_mode = ICE_VSI_MAP_CONTIG
} ;
2019-11-08 14:23:26 +00:00
struct device * dev ;
2019-11-04 17:38:56 +00:00
int i , v_idx ;
2021-10-07 22:59:03 +00:00
int status ;
2019-11-04 17:38:56 +00:00
2019-11-08 14:23:26 +00:00
dev = ice_pf_to_dev ( pf ) ;
vsi - > xdp_rings = devm_kcalloc ( dev , vsi - > num_xdp_txq ,
2019-11-04 17:38:56 +00:00
sizeof ( * vsi - > xdp_rings ) , GFP_KERNEL ) ;
if ( ! vsi - > xdp_rings )
return - ENOMEM ;
vsi - > xdp_mapping_mode = xdp_qs_cfg . mapping_mode ;
if ( __ice_vsi_get_qs ( & xdp_qs_cfg ) )
goto err_map_xdp ;
2021-08-19 12:00:03 +00:00
if ( static_key_enabled ( & ice_xdp_locking_key ) )
netdev_warn ( vsi - > netdev ,
" Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower \n " ) ;
2019-11-04 17:38:56 +00:00
if ( ice_xdp_alloc_setup_rings ( vsi ) )
goto clear_xdp_rings ;
/* follow the logic from ice_vsi_map_rings_to_vectors */
ice_for_each_q_vector ( vsi , v_idx ) {
struct ice_q_vector * q_vector = vsi - > q_vectors [ v_idx ] ;
int xdp_rings_per_v , q_id , q_base ;
xdp_rings_per_v = DIV_ROUND_UP ( xdp_rings_rem ,
vsi - > num_q_vectors - v_idx ) ;
q_base = vsi - > num_xdp_txq - xdp_rings_rem ;
for ( q_id = q_base ; q_id < ( q_base + xdp_rings_per_v ) ; q_id + + ) {
2021-08-19 11:59:58 +00:00
struct ice_tx_ring * xdp_ring = vsi - > xdp_rings [ q_id ] ;
2019-11-04 17:38:56 +00:00
xdp_ring - > q_vector = q_vector ;
2021-08-19 11:59:58 +00:00
xdp_ring - > next = q_vector - > tx . tx_ring ;
q_vector - > tx . tx_ring = xdp_ring ;
2019-11-04 17:38:56 +00:00
}
xdp_rings_rem - = xdp_rings_per_v ;
}
2022-08-11 18:21:49 +00:00
ice_for_each_rxq ( vsi , i ) {
if ( static_key_enabled ( & ice_xdp_locking_key ) ) {
vsi - > rx_rings [ i ] - > xdp_ring = vsi - > xdp_rings [ i % vsi - > num_xdp_txq ] ;
} else {
struct ice_q_vector * q_vector = vsi - > rx_rings [ i ] - > q_vector ;
struct ice_tx_ring * ring ;
ice_for_each_tx_ring ( ring , q_vector - > tx ) {
if ( ice_ring_is_xdp ( ring ) ) {
vsi - > rx_rings [ i ] - > xdp_ring = ring ;
break ;
}
}
}
ice_tx_xsk_pool ( vsi , i ) ;
}
2019-11-04 17:38:56 +00:00
/* omit the scheduler update if in reset path; XDP queues will be
* taken into account at the end of ice_vsi_rebuild , where
* ice_cfg_vsi_lan is being called
*/
if ( ice_is_reset_in_progress ( pf - > state ) )
return 0 ;
/* tell the Tx scheduler that right now we have
* additional queues
*/
for ( i = 0 ; i < vsi - > tc_cfg . numtc ; i + + )
max_txqs [ i ] = vsi - > num_txq + vsi - > num_xdp_txq ;
status = ice_cfg_vsi_lan ( vsi - > port_info , vsi - > idx , vsi - > tc_cfg . ena_tc ,
max_txqs ) ;
if ( status ) {
2021-10-07 22:56:02 +00:00
dev_err ( dev , " Failed VSI LAN queue config for XDP, error: %d \n " ,
status ) ;
2019-11-04 17:38:56 +00:00
goto clear_xdp_rings ;
}
2021-10-26 16:47:19 +00:00
/* assign the prog only when it's not already present on VSI;
* this flow is a subject of both ethtool - L and ndo_bpf flows ;
* VSI rebuild that happens under ethtool - L can expose us to
* the bpf_prog refcount issues as we would be swapping same
* bpf_prog pointers from vsi - > xdp_prog and calling bpf_prog_put
* on it as it would be treated as an ' old_prog ' ; for ndo_bpf
* this is not harmful as dev_xdp_install bumps the refcount
* before calling the op exposed by the driver ;
*/
if ( ! ice_is_xdp_ena_vsi ( vsi ) )
ice_vsi_assign_bpf_prog ( vsi , prog ) ;
2019-11-04 17:38:56 +00:00
return 0 ;
clear_xdp_rings :
2021-08-19 12:00:04 +00:00
ice_for_each_xdp_txq ( vsi , i )
2019-11-04 17:38:56 +00:00
if ( vsi - > xdp_rings [ i ] ) {
kfree_rcu ( vsi - > xdp_rings [ i ] , rcu ) ;
vsi - > xdp_rings [ i ] = NULL ;
}
err_map_xdp :
mutex_lock ( & pf - > avail_q_mutex ) ;
2021-08-19 12:00:04 +00:00
ice_for_each_xdp_txq ( vsi , i ) {
2019-11-04 17:38:56 +00:00
clear_bit ( vsi - > txq_map [ i + vsi - > alloc_txq ] , pf - > avail_txqs ) ;
vsi - > txq_map [ i + vsi - > alloc_txq ] = ICE_INVAL_Q_INDEX ;
}
mutex_unlock ( & pf - > avail_q_mutex ) ;
2019-11-08 14:23:26 +00:00
devm_kfree ( dev , vsi - > xdp_rings ) ;
2019-11-04 17:38:56 +00:00
return - ENOMEM ;
}
/**
* ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
* @ vsi : VSI to remove XDP rings
*
* Detach XDP rings from irq vectors , clean up the PF bitmap and free
* resources
*/
int ice_destroy_xdp_rings ( struct ice_vsi * vsi )
{
u16 max_txqs [ ICE_MAX_TRAFFIC_CLASS ] = { 0 } ;
struct ice_pf * pf = vsi - > back ;
int i , v_idx ;
/* q_vectors are freed in reset path so there's no point in detaching
2020-10-07 17:54:41 +00:00
* rings ; in case of rebuild being triggered not from reset bits
2019-11-04 17:38:56 +00:00
* in pf - > state won ' t be set , so additionally check first q_vector
* against NULL
*/
if ( ice_is_reset_in_progress ( pf - > state ) | | ! vsi - > q_vectors [ 0 ] )
goto free_qmap ;
ice_for_each_q_vector ( vsi , v_idx ) {
struct ice_q_vector * q_vector = vsi - > q_vectors [ v_idx ] ;
2021-08-19 11:59:58 +00:00
struct ice_tx_ring * ring ;
2019-11-04 17:38:56 +00:00
2021-08-19 11:59:58 +00:00
ice_for_each_tx_ring ( ring , q_vector - > tx )
2019-11-04 17:38:56 +00:00
if ( ! ring - > tx_buf | | ! ice_ring_is_xdp ( ring ) )
break ;
/* restore the value of last node prior to XDP setup */
2021-08-19 11:59:58 +00:00
q_vector - > tx . tx_ring = ring ;
2019-11-04 17:38:56 +00:00
}
free_qmap :
mutex_lock ( & pf - > avail_q_mutex ) ;
2021-08-19 12:00:04 +00:00
ice_for_each_xdp_txq ( vsi , i ) {
2019-11-04 17:38:56 +00:00
clear_bit ( vsi - > txq_map [ i + vsi - > alloc_txq ] , pf - > avail_txqs ) ;
vsi - > txq_map [ i + vsi - > alloc_txq ] = ICE_INVAL_Q_INDEX ;
}
mutex_unlock ( & pf - > avail_q_mutex ) ;
2021-08-19 12:00:04 +00:00
ice_for_each_xdp_txq ( vsi , i )
2019-11-04 17:38:56 +00:00
if ( vsi - > xdp_rings [ i ] ) {
2022-03-17 18:36:27 +00:00
if ( vsi - > xdp_rings [ i ] - > desc ) {
synchronize_rcu ( ) ;
2019-11-04 17:38:56 +00:00
ice_free_tx_ring ( vsi - > xdp_rings [ i ] ) ;
2022-03-17 18:36:27 +00:00
}
ice: Accumulate ring statistics over reset
Resets may occur with or without user interaction. For example, a TX hang
or reconfiguration of parameters will result in a reset. During reset, the
VSI is freed, freeing any statistics structures inside as well. This would
create an issue for the user where a reset happens in the background,
statistics set to zero, and the user checks ring statistics expecting them
to be populated.
To ensure this doesn't happen, accumulate ring statistics over reset.
Define a new ring statistics structure, ice_ring_stats. The new structure
lives in the VSI's parent, preserving ring statistics when VSI is freed.
1. Define a new structure vsi_ring_stats in the PF scope
2. Allocate/free stats only during probe, unload, or change in ring size
3. Replace previous ring statistics functionality with new structure
Signed-off-by: Benjamin Mikailenko <benjamin.mikailenko@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2022-11-18 21:20:02 +00:00
kfree_rcu ( vsi - > xdp_rings [ i ] - > ring_stats , rcu ) ;
vsi - > xdp_rings [ i ] - > ring_stats = NULL ;
2019-11-04 17:38:56 +00:00
kfree_rcu ( vsi - > xdp_rings [ i ] , rcu ) ;
vsi - > xdp_rings [ i ] = NULL ;
}
2019-11-08 14:23:26 +00:00
devm_kfree ( ice_pf_to_dev ( pf ) , vsi - > xdp_rings ) ;
2019-11-04 17:38:56 +00:00
vsi - > xdp_rings = NULL ;
2021-08-19 12:00:03 +00:00
if ( static_key_enabled ( & ice_xdp_locking_key ) )
static_branch_dec ( & ice_xdp_locking_key ) ;
2019-11-04 17:38:56 +00:00
if ( ice_is_reset_in_progress ( pf - > state ) | | ! vsi - > q_vectors [ 0 ] )
return 0 ;
ice_vsi_assign_bpf_prog ( vsi , NULL ) ;
/* notify Tx scheduler that we destroyed XDP queues and bring
* back the old number of child nodes
*/
for ( i = 0 ; i < vsi - > tc_cfg . numtc ; i + + )
max_txqs [ i ] = vsi - > num_txq ;
2020-05-16 00:42:15 +00:00
/* change number of XDP Tx queues to 0 */
vsi - > num_xdp_txq = 0 ;
2019-11-04 17:38:56 +00:00
return ice_cfg_vsi_lan ( vsi - > port_info , vsi - > idx , vsi - > tc_cfg . ena_tc ,
max_txqs ) ;
}
2020-11-02 09:37:27 +00:00
/**
* ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
* @ vsi : VSI to schedule napi on
*/
static void ice_vsi_rx_napi_schedule ( struct ice_vsi * vsi )
{
int i ;
ice_for_each_rxq ( vsi , i ) {
2021-08-19 11:59:58 +00:00
struct ice_rx_ring * rx_ring = vsi - > rx_rings [ i ] ;
2020-11-02 09:37:27 +00:00
if ( rx_ring - > xsk_pool )
napi_schedule ( & rx_ring - > q_vector - > napi ) ;
}
}
2021-08-19 12:00:03 +00:00
/**
* ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
* @ vsi : VSI to determine the count of XDP Tx qs
*
* returns 0 if Tx qs count is higher than at least half of CPU count ,
* - ENOMEM otherwise
*/
int ice_vsi_determine_xdp_res ( struct ice_vsi * vsi )
{
u16 avail = ice_get_avail_txq_count ( vsi - > back ) ;
u16 cpus = num_possible_cpus ( ) ;
if ( avail < cpus / 2 )
return - ENOMEM ;
vsi - > num_xdp_txq = min_t ( u16 , avail , cpus ) ;
if ( vsi - > num_xdp_txq < cpus )
static_branch_inc ( & ice_xdp_locking_key ) ;
return 0 ;
}
2023-01-31 20:45:00 +00:00
/**
* ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
* @ vsi : Pointer to VSI structure
*/
static int ice_max_xdp_frame_size ( struct ice_vsi * vsi )
{
if ( test_bit ( ICE_FLAG_LEGACY_RX , vsi - > back - > flags ) )
return ICE_RXBUF_1664 ;
else
return ICE_RXBUF_3072 ;
}
2019-11-04 17:38:56 +00:00
/**
* ice_xdp_setup_prog - Add or remove XDP eBPF program
* @ vsi : VSI to setup XDP for
* @ prog : XDP program
* @ extack : netlink extended ack
*/
static int
ice_xdp_setup_prog ( struct ice_vsi * vsi , struct bpf_prog * prog ,
struct netlink_ext_ack * extack )
{
2023-01-31 20:45:00 +00:00
unsigned int frame_size = vsi - > netdev - > mtu + ICE_ETH_PKT_HDR_PAD ;
2019-11-04 17:38:56 +00:00
bool if_running = netif_running ( vsi - > netdev ) ;
int ret = 0 , xdp_ring_err = 0 ;
ice: Add support for XDP multi-buffer on Rx side
Ice driver needs to be a bit reworked on Rx data path in order to
support multi-buffer XDP. For skb path, it currently works in a way that
Rx ring carries pointer to skb so if driver didn't manage to combine
fragmented frame at current NAPI instance, it can restore the state on
next instance and keep looking for last fragment (so descriptor with EOP
bit set). What needs to be achieved is that xdp_buff needs to be
combined in such way (linear + frags part) in the first place. Then skb
will be ready to go in case of XDP_PASS or BPF program being not present
on interface. If BPF program is there, it would work on multi-buffer
XDP. At this point xdp_buff resides directly on Rx ring, so given the
fact that skb will be built straight from xdp_buff, there will be no
further need to carry skb on Rx ring.
Besides removing skb pointer from Rx ring, lots of members have been
moved around within ice_rx_ring. First and foremost reason was to place
rx_buf with xdp_buff on the same cacheline. This means that once we
touch rx_buf (which is a preceding step before touching xdp_buff),
xdp_buff will already be hot in cache. Second thing was that xdp_rxq is
used rather rarely and it occupies a separate cacheline, so maybe it is
better to have it at the end of ice_rx_ring.
Other change that affects ice_rx_ring is the introduction of
ice_rx_ring::first_desc. Its purpose is twofold - first is to propagate
rx_buf->act to all the parts of current xdp_buff after running XDP
program, so that ice_put_rx_buf() that got moved out of the main Rx
processing loop will be able to tak an appriopriate action on each
buffer. Second is for ice_construct_skb().
ice_construct_skb() has a copybreak mechanism which had an explicit
impact on xdp_buff->skb conversion in the new approach when legacy Rx
flag is toggled. It works in a way that linear part is 256 bytes long,
if frame is bigger than that, remaining bytes are going as a frag to
skb_shared_info.
This means while memcpying frags from xdp_buff to newly allocated skb,
care needs to be taken when picking the destination frag array entry.
Upon the time ice_construct_skb() is called, when dealing with
fragmented frame, current rx_buf points to the *last* fragment, but
copybreak needs to be done against the first one. That's where
ice_rx_ring::first_desc helps.
When frame building spans across NAPI polls (DD bit is not set on
current descriptor and xdp->data is not NULL) with current Rx buffer
handling state there might be some problems.
Since calls to ice_put_rx_buf() were pulled out of the main Rx
processing loop and were scoped from cached_ntc to current ntc, remember
that now mentioned function relies on rx_buf->act, which is set within
ice_run_xdp(). ice_run_xdp() is called when EOP bit was found, so
currently we could put Rx buffer with rx_buf->act being *uninitialized*.
To address this, change scoping to rely on first_desc on both boundaries
instead.
This also implies that cleaned_count which is used as an input to
ice_alloc_rx_buffers() and tells how many new buffers should be refilled
has to be adjusted. If it stayed as is, what could happen is a case
where ntc would go over ntu.
Therefore, remove cleaned_count altogether and use against allocing
routine newly introduced ICE_RX_DESC_UNUSED() macro which is an
equivalent of ICE_DESC_UNUSED() dedicated for Rx side and based on
struct ice_rx_ring::first_desc instead of next_to_clean.
Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Alexander Lobakin <alexandr.lobakin@intel.com>
Link: https://lore.kernel.org/bpf/20230131204506.219292-11-maciej.fijalkowski@intel.com
2023-01-31 20:45:03 +00:00
if ( prog & & ! prog - > aux - > xdp_has_frags ) {
if ( frame_size > ice_max_xdp_frame_size ( vsi ) ) {
NL_SET_ERR_MSG_MOD ( extack ,
" MTU is too large for linear frames and XDP prog does not support frags " ) ;
return - EOPNOTSUPP ;
}
2019-11-04 17:38:56 +00:00
}
2023-06-15 11:33:26 +00:00
/* hot swap progs and avoid toggling link */
if ( ice_is_xdp_ena_vsi ( vsi ) = = ! ! prog ) {
ice_vsi_assign_bpf_prog ( vsi , prog ) ;
return 0 ;
}
2019-11-04 17:38:56 +00:00
/* need to stop netdev while setting up the program for Rx rings */
2021-03-02 18:15:37 +00:00
if ( if_running & & ! test_and_set_bit ( ICE_VSI_DOWN , vsi - > state ) ) {
2019-11-04 17:38:56 +00:00
ret = ice_down ( vsi ) ;
if ( ret ) {
2020-02-13 21:31:26 +00:00
NL_SET_ERR_MSG_MOD ( extack , " Preparing device for XDP attach failed " ) ;
2019-11-04 17:38:56 +00:00
return ret ;
}
}
if ( ! ice_is_xdp_ena_vsi ( vsi ) & & prog ) {
2021-08-19 12:00:03 +00:00
xdp_ring_err = ice_vsi_determine_xdp_res ( vsi ) ;
if ( xdp_ring_err ) {
NL_SET_ERR_MSG_MOD ( extack , " Not enough Tx resources for XDP " ) ;
} else {
xdp_ring_err = ice_prepare_xdp_rings ( vsi , prog ) ;
if ( xdp_ring_err )
NL_SET_ERR_MSG_MOD ( extack , " Setting up XDP Tx resources failed " ) ;
}
2023-02-14 14:39:27 +00:00
xdp_features_set_redirect_target ( vsi - > netdev , true ) ;
2022-08-11 10:09:22 +00:00
/* reallocate Rx queues that are used for zero-copy */
xdp_ring_err = ice_realloc_zc_buf ( vsi , true ) ;
if ( xdp_ring_err )
NL_SET_ERR_MSG_MOD ( extack , " Setting up XDP Rx resources failed " ) ;
2019-11-04 17:38:56 +00:00
} else if ( ice_is_xdp_ena_vsi ( vsi ) & & ! prog ) {
drivers: net: turn on XDP features
A summary of the flags being set for various drivers is given below.
Note that XDP_F_REDIRECT_TARGET and XDP_F_FRAG_TARGET are features
that can be turned off and on at runtime. This means that these flags
may be set and unset under RTNL lock protection by the driver. Hence,
READ_ONCE must be used by code loading the flag value.
Also, these flags are not used for synchronization against the availability
of XDP resources on a device. It is merely a hint, and hence the read
may race with the actual teardown of XDP resources on the device. This
may change in the future, e.g. operations taking a reference on the XDP
resources of the driver, and in turn inhibiting turning off this flag.
However, for now, it can only be used as a hint to check whether device
supports becoming a redirection target.
Turn 'hw-offload' feature flag on for:
- netronome (nfp)
- netdevsim.
Turn 'native' and 'zerocopy' features flags on for:
- intel (i40e, ice, ixgbe, igc)
- mellanox (mlx5).
- stmmac
- netronome (nfp)
Turn 'native' features flags on for:
- amazon (ena)
- broadcom (bnxt)
- freescale (dpaa, dpaa2, enetc)
- funeth
- intel (igb)
- marvell (mvneta, mvpp2, octeontx2)
- mellanox (mlx4)
- mtk_eth_soc
- qlogic (qede)
- sfc
- socionext (netsec)
- ti (cpsw)
- tap
- tsnep
- veth
- xen
- virtio_net.
Turn 'basic' (tx, pass, aborted and drop) features flags on for:
- netronome (nfp)
- cavium (thunder)
- hyperv.
Turn 'redirect_target' feature flag on for:
- amanzon (ena)
- broadcom (bnxt)
- freescale (dpaa, dpaa2)
- intel (i40e, ice, igb, ixgbe)
- ti (cpsw)
- marvell (mvneta, mvpp2)
- sfc
- socionext (netsec)
- qlogic (qede)
- mellanox (mlx5)
- tap
- veth
- virtio_net
- xen
Reviewed-by: Gerhard Engleder <gerhard@engleder-embedded.com>
Reviewed-by: Simon Horman <simon.horman@corigine.com>
Acked-by: Stanislav Fomichev <sdf@google.com>
Acked-by: Jakub Kicinski <kuba@kernel.org>
Co-developed-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: Marek Majtyka <alardam@gmail.com>
Link: https://lore.kernel.org/r/3eca9fafb308462f7edb1f58e451d59209aa07eb.1675245258.git.lorenzo@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-02-01 10:24:18 +00:00
xdp_features_clear_redirect_target ( vsi - > netdev ) ;
2019-11-04 17:38:56 +00:00
xdp_ring_err = ice_destroy_xdp_rings ( vsi ) ;
if ( xdp_ring_err )
2020-02-13 21:31:26 +00:00
NL_SET_ERR_MSG_MOD ( extack , " Freeing XDP Tx resources failed " ) ;
2022-08-11 10:09:22 +00:00
/* reallocate Rx queues that were used for zero-copy */
xdp_ring_err = ice_realloc_zc_buf ( vsi , false ) ;
if ( xdp_ring_err )
NL_SET_ERR_MSG_MOD ( extack , " Freeing XDP Rx resources failed " ) ;
2019-11-04 17:38:56 +00:00
}
if ( if_running )
ret = ice_up ( vsi ) ;
2020-11-02 09:37:27 +00:00
if ( ! ret & & prog )
ice_vsi_rx_napi_schedule ( vsi ) ;
2019-11-04 17:38:56 +00:00
2019-11-04 17:38:56 +00:00
return ( ret | | xdp_ring_err ) ? - ENOMEM : 0 ;
}
2021-05-20 06:34:59 +00:00
/**
* ice_xdp_safe_mode - XDP handler for safe mode
* @ dev : netdevice
* @ xdp : XDP command
*/
static int ice_xdp_safe_mode ( struct net_device __always_unused * dev ,
struct netdev_bpf * xdp )
{
NL_SET_ERR_MSG_MOD ( xdp - > extack ,
" Please provide working DDP firmware package in order to use XDP \n "
" Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst " ) ;
return - EOPNOTSUPP ;
}
2019-11-04 17:38:56 +00:00
/**
* ice_xdp - implements XDP handler
* @ dev : netdevice
* @ xdp : XDP command
*/
static int ice_xdp ( struct net_device * dev , struct netdev_bpf * xdp )
{
struct ice_netdev_priv * np = netdev_priv ( dev ) ;
struct ice_vsi * vsi = np - > vsi ;
if ( vsi - > type ! = ICE_VSI_PF ) {
2020-02-13 21:31:26 +00:00
NL_SET_ERR_MSG_MOD ( xdp - > extack , " XDP can be loaded only on PF VSI " ) ;
2019-11-04 17:38:56 +00:00
return - EINVAL ;
}
switch ( xdp - > command ) {
case XDP_SETUP_PROG :
return ice_xdp_setup_prog ( vsi , xdp - > prog , xdp - > extack ) ;
2020-08-28 08:26:15 +00:00
case XDP_SETUP_XSK_POOL :
return ice_xsk_pool_setup ( vsi , xdp - > xsk . pool ,
2019-11-04 17:38:56 +00:00
xdp - > xsk . queue_id ) ;
2019-11-04 17:38:56 +00:00
default :
return - EINVAL ;
}
}
2018-03-20 14:58:10 +00:00
/**
* ice_ena_misc_vector - enable the non - queue interrupts
* @ pf : board private structure
*/
static void ice_ena_misc_vector ( struct ice_pf * pf )
{
struct ice_hw * hw = & pf - > hw ;
2023-11-29 12:40:23 +00:00
u32 pf_intr_start_offset ;
2018-03-20 14:58:10 +00:00
u32 val ;
2020-02-13 21:31:16 +00:00
/* Disable anti-spoof detection interrupt to prevent spurious event
* interrupts during a function reset . Anti - spoof functionally is
* still supported .
*/
val = rd32 ( hw , GL_MDCK_TX_TDPU ) ;
val | = GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M ;
wr32 ( hw , GL_MDCK_TX_TDPU , val ) ;
2018-03-20 14:58:10 +00:00
/* clear things first */
wr32 ( hw , PFINT_OICR_ENA , 0 ) ; /* disable all */
rd32 ( hw , PFINT_OICR ) ; /* read to clear */
2018-08-09 13:28:59 +00:00
val = ( PFINT_OICR_ECC_ERR_M |
2018-03-20 14:58:10 +00:00
PFINT_OICR_MAL_DETECT_M |
PFINT_OICR_GRST_M |
PFINT_OICR_PCI_EXCEPTION_M |
2018-09-20 00:42:57 +00:00
PFINT_OICR_VFLR_M |
2018-08-09 13:28:59 +00:00
PFINT_OICR_HMC_ERR_M |
2021-05-20 14:37:50 +00:00
PFINT_OICR_PE_PUSH_M |
2018-08-09 13:28:59 +00:00
PFINT_OICR_PE_CRITERR_M ) ;
2018-03-20 14:58:10 +00:00
wr32 ( hw , PFINT_OICR_ENA , val ) ;
/* SW_ITR_IDX = 0, but don't change INTENA */
2023-05-15 19:03:17 +00:00
wr32 ( hw , GLINT_DYN_CTL ( pf - > oicr_irq . index ) ,
2018-03-20 14:58:10 +00:00
GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M ) ;
2023-11-29 12:40:23 +00:00
if ( ! pf - > hw . dev_caps . ts_dev_info . ts_ll_int_read )
return ;
pf_intr_start_offset = rd32 ( hw , PFINT_ALLOC ) & PFINT_ALLOC_FIRST ;
wr32 ( hw , GLINT_DYN_CTL ( pf - > ll_ts_irq . index + pf_intr_start_offset ) ,
GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M ) ;
}
/**
* ice_ll_ts_intr - ll_ts interrupt handler
* @ irq : interrupt number
* @ data : pointer to a q_vector
*/
static irqreturn_t ice_ll_ts_intr ( int __always_unused irq , void * data )
{
struct ice_pf * pf = data ;
u32 pf_intr_start_offset ;
struct ice_ptp_tx * tx ;
unsigned long flags ;
struct ice_hw * hw ;
u32 val ;
u8 idx ;
hw = & pf - > hw ;
tx = & pf - > ptp . port . tx ;
spin_lock_irqsave ( & tx - > lock , flags ) ;
ice_ptp_complete_tx_single_tstamp ( tx ) ;
idx = find_next_bit_wrap ( tx - > in_use , tx - > len ,
tx - > last_ll_ts_idx_read + 1 ) ;
if ( idx ! = tx - > len )
ice_ptp_req_tx_single_tstamp ( tx , idx ) ;
spin_unlock_irqrestore ( & tx - > lock , flags ) ;
val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
( ICE_ITR_NONE < < GLINT_DYN_CTL_ITR_INDX_S ) ;
pf_intr_start_offset = rd32 ( hw , PFINT_ALLOC ) & PFINT_ALLOC_FIRST ;
wr32 ( hw , GLINT_DYN_CTL ( pf - > ll_ts_irq . index + pf_intr_start_offset ) ,
val ) ;
return IRQ_HANDLED ;
2018-03-20 14:58:10 +00:00
}
/**
* ice_misc_intr - misc interrupt handler
* @ irq : interrupt number
* @ data : pointer to a q_vector
*/
static irqreturn_t ice_misc_intr ( int __always_unused irq , void * data )
{
struct ice_pf * pf = ( struct ice_pf * ) data ;
2023-11-29 12:40:22 +00:00
irqreturn_t ret = IRQ_HANDLED ;
2018-03-20 14:58:10 +00:00
struct ice_hw * hw = & pf - > hw ;
2019-11-08 14:23:26 +00:00
struct device * dev ;
2018-03-20 14:58:10 +00:00
u32 oicr , ena_mask ;
2019-11-08 14:23:26 +00:00
dev = ice_pf_to_dev ( pf ) ;
2021-03-02 18:15:38 +00:00
set_bit ( ICE_ADMINQ_EVENT_PENDING , pf - > state ) ;
set_bit ( ICE_MAILBOXQ_EVENT_PENDING , pf - > state ) ;
2021-06-09 16:39:46 +00:00
set_bit ( ICE_SIDEBANDQ_EVENT_PENDING , pf - > state ) ;
2018-03-20 14:58:10 +00:00
oicr = rd32 ( hw , PFINT_OICR ) ;
ena_mask = rd32 ( hw , PFINT_OICR_ENA ) ;
2019-04-16 17:30:43 +00:00
if ( oicr & PFINT_OICR_SWINT_M ) {
ena_mask & = ~ PFINT_OICR_SWINT_M ;
pf - > sw_int_count + + ;
}
2018-08-09 13:29:53 +00:00
if ( oicr & PFINT_OICR_MAL_DETECT_M ) {
ena_mask & = ~ PFINT_OICR_MAL_DETECT_M ;
2021-03-02 18:15:38 +00:00
set_bit ( ICE_MDD_EVENT_PENDING , pf - > state ) ;
2018-08-09 13:29:53 +00:00
}
2018-09-20 00:42:57 +00:00
if ( oicr & PFINT_OICR_VFLR_M ) {
2020-02-27 18:14:55 +00:00
/* disable any further VFLR event notifications */
2021-03-02 18:15:38 +00:00
if ( test_bit ( ICE_VF_RESETS_DISABLED , pf - > state ) ) {
2020-02-27 18:14:55 +00:00
u32 reg = rd32 ( hw , PFINT_OICR_ENA ) ;
reg & = ~ PFINT_OICR_VFLR_M ;
wr32 ( hw , PFINT_OICR_ENA , reg ) ;
} else {
ena_mask & = ~ PFINT_OICR_VFLR_M ;
2021-03-02 18:15:38 +00:00
set_bit ( ICE_VFLR_EVENT_PENDING , pf - > state ) ;
2020-02-27 18:14:55 +00:00
}
2018-09-20 00:42:57 +00:00
}
2018-08-09 13:29:53 +00:00
2018-03-20 14:58:18 +00:00
if ( oicr & PFINT_OICR_GRST_M ) {
u32 reset ;
2018-08-09 13:29:53 +00:00
2018-03-20 14:58:18 +00:00
/* we have a reset warning */
ena_mask & = ~ PFINT_OICR_GRST_M ;
2023-12-06 01:01:12 +00:00
reset = FIELD_GET ( GLGEN_RSTAT_RESET_TYPE_M ,
rd32 ( hw , GLGEN_RSTAT ) ) ;
2018-03-20 14:58:18 +00:00
if ( reset = = ICE_RESET_CORER )
pf - > corer_count + + ;
else if ( reset = = ICE_RESET_GLOBR )
pf - > globr_count + + ;
2018-09-20 00:23:18 +00:00
else if ( reset = = ICE_RESET_EMPR )
2018-03-20 14:58:18 +00:00
pf - > empr_count + + ;
2018-09-20 00:23:18 +00:00
else
2019-11-08 14:23:26 +00:00
dev_dbg ( dev , " Invalid reset type %d \n " , reset ) ;
2018-03-20 14:58:18 +00:00
/* If a reset cycle isn't already in progress, we set a bit in
* pf - > state so that the service task can start a reset / rebuild .
*/
2021-03-02 18:15:38 +00:00
if ( ! test_and_set_bit ( ICE_RESET_OICR_RECV , pf - > state ) ) {
2018-03-20 14:58:18 +00:00
if ( reset = = ICE_RESET_CORER )
2021-03-02 18:15:38 +00:00
set_bit ( ICE_CORER_RECV , pf - > state ) ;
2018-03-20 14:58:18 +00:00
else if ( reset = = ICE_RESET_GLOBR )
2021-03-02 18:15:38 +00:00
set_bit ( ICE_GLOBR_RECV , pf - > state ) ;
2018-03-20 14:58:18 +00:00
else
2021-03-02 18:15:38 +00:00
set_bit ( ICE_EMPR_RECV , pf - > state ) ;
2018-03-20 14:58:18 +00:00
2018-08-09 13:29:47 +00:00
/* There are couple of different bits at play here.
* hw - > reset_ongoing indicates whether the hardware is
* in reset . This is set to true when a reset interrupt
* is received and set back to false after the driver
* has determined that the hardware is out of reset .
*
2021-03-02 18:15:38 +00:00
* ICE_RESET_OICR_RECV in pf - > state indicates
2018-08-09 13:29:47 +00:00
* that a post reset rebuild is required before the
* driver is operational again . This is set above .
*
* As this is the start of the reset / rebuild cycle , set
* both to indicate that .
*/
hw - > reset_ongoing = true ;
2018-03-20 14:58:18 +00:00
}
}
ice: enable transmit timestamps for E810 devices
Add support for enabling Tx timestamp requests for outgoing packets on
E810 devices.
The ice hardware can support multiple outstanding Tx timestamp requests.
When sending a descriptor to hardware, a Tx timestamp request is made by
setting a request bit, and assigning an index that represents which Tx
timestamp index to store the timestamp in.
Hardware makes no effort to synchronize the index use, so it is up to
software to ensure that Tx timestamp indexes are not re-used before the
timestamp is reported back.
To do this, introduce a Tx timestamp tracker which will keep track of
currently in-use indexes.
In the hot path, if a packet has a timestamp request, an index will be
requested from the tracker. Unfortunately, this does require a lock as
the indexes are shared across all queues on a PHY. There are not enough
indexes to reliably assign only 1 to each queue.
For the E810 devices, the timestamp indexes are not shared across PHYs,
so each port can have its own tracking.
Once hardware captures a timestamp, an interrupt is fired. In this
interrupt, trigger a new work item that will figure out which timestamp
was completed, and report the timestamp back to the stack.
This function loops through the Tx timestamp indexes and checks whether
there is now a valid timestamp. If so, it clears the PHY timestamp
indication in the PHY memory, locks and removes the SKB and bit in the
tracker, then reports the timestamp to the stack.
It is possible in some cases that a timestamp request will be initiated
but never completed. This might occur if the packet is dropped by
software or hardware before it reaches the PHY.
Add a task to the periodic work function that will check whether
a timestamp request is more than a few seconds old. If so, the timestamp
index is cleared in the PHY, and the SKB is released.
Just as with Rx timestamps, the Tx timestamps are only 40 bits wide, and
use the same overall logic for extending to 64 bits of nanoseconds.
With this change, E810 devices should be able to perform basic PTP
functionality.
Future changes will extend the support to cover the E822-based devices.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-06-09 16:39:53 +00:00
if ( oicr & PFINT_OICR_TSYN_TX_M ) {
ena_mask & = ~ PFINT_OICR_TSYN_TX_M ;
2023-11-29 12:40:23 +00:00
if ( ice_pf_state_is_nominal ( pf ) & &
pf - > hw . dev_caps . ts_dev_info . ts_ll_int_read ) {
struct ice_ptp_tx * tx = & pf - > ptp . port . tx ;
unsigned long flags ;
u8 idx ;
spin_lock_irqsave ( & tx - > lock , flags ) ;
idx = find_next_bit_wrap ( tx - > in_use , tx - > len ,
tx - > last_ll_ts_idx_read + 1 ) ;
if ( idx ! = tx - > len )
ice_ptp_req_tx_single_tstamp ( tx , idx ) ;
spin_unlock_irqrestore ( & tx - > lock , flags ) ;
} else if ( ice_ptp_pf_handles_tx_interrupt ( pf ) ) {
2023-06-01 21:15:03 +00:00
set_bit ( ICE_MISC_THREAD_TX_TSTAMP , pf - > misc_thread ) ;
2023-11-29 12:40:22 +00:00
ret = IRQ_WAKE_THREAD ;
}
ice: enable transmit timestamps for E810 devices
Add support for enabling Tx timestamp requests for outgoing packets on
E810 devices.
The ice hardware can support multiple outstanding Tx timestamp requests.
When sending a descriptor to hardware, a Tx timestamp request is made by
setting a request bit, and assigning an index that represents which Tx
timestamp index to store the timestamp in.
Hardware makes no effort to synchronize the index use, so it is up to
software to ensure that Tx timestamp indexes are not re-used before the
timestamp is reported back.
To do this, introduce a Tx timestamp tracker which will keep track of
currently in-use indexes.
In the hot path, if a packet has a timestamp request, an index will be
requested from the tracker. Unfortunately, this does require a lock as
the indexes are shared across all queues on a PHY. There are not enough
indexes to reliably assign only 1 to each queue.
For the E810 devices, the timestamp indexes are not shared across PHYs,
so each port can have its own tracking.
Once hardware captures a timestamp, an interrupt is fired. In this
interrupt, trigger a new work item that will figure out which timestamp
was completed, and report the timestamp back to the stack.
This function loops through the Tx timestamp indexes and checks whether
there is now a valid timestamp. If so, it clears the PHY timestamp
indication in the PHY memory, locks and removes the SKB and bit in the
tracker, then reports the timestamp to the stack.
It is possible in some cases that a timestamp request will be initiated
but never completed. This might occur if the packet is dropped by
software or hardware before it reaches the PHY.
Add a task to the periodic work function that will check whether
a timestamp request is more than a few seconds old. If so, the timestamp
index is cleared in the PHY, and the SKB is released.
Just as with Rx timestamps, the Tx timestamps are only 40 bits wide, and
use the same overall logic for extending to 64 bits of nanoseconds.
With this change, E810 devices should be able to perform basic PTP
functionality.
Future changes will extend the support to cover the E822-based devices.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-06-09 16:39:53 +00:00
}
2021-06-16 16:35:22 +00:00
if ( oicr & PFINT_OICR_TSYN_EVNT_M ) {
u8 tmr_idx = hw - > func_caps . ts_func_info . tmr_index_owned ;
u32 gltsyn_stat = rd32 ( hw , GLTSYN_STAT ( tmr_idx ) ) ;
ena_mask & = ~ PFINT_OICR_TSYN_EVNT_M ;
2023-06-01 21:15:03 +00:00
2023-09-08 21:37:14 +00:00
if ( ice_pf_src_tmr_owned ( pf ) ) {
2023-06-01 21:15:03 +00:00
/* Save EVENTs from GLTSYN register */
pf - > ptp . ext_ts_irq | = gltsyn_stat &
( GLTSYN_STAT_EVENT0_M |
GLTSYN_STAT_EVENT1_M |
GLTSYN_STAT_EVENT2_M ) ;
2023-11-29 12:40:22 +00:00
ice_ptp_extts_event ( pf ) ;
2023-06-01 21:15:03 +00:00
}
2021-06-16 16:35:22 +00:00
}
2021-05-20 14:37:50 +00:00
# define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
if ( oicr & ICE_AUX_CRIT_ERR ) {
2022-03-23 12:43:52 +00:00
pf - > oicr_err_reg | = oicr ;
set_bit ( ICE_AUX_ERR_PENDING , pf - > state ) ;
2021-05-20 14:37:50 +00:00
ena_mask & = ~ ICE_AUX_CRIT_ERR ;
2018-03-20 14:58:10 +00:00
}
2019-02-28 23:25:58 +00:00
/* Report any remaining unexpected interrupts */
2018-03-20 14:58:10 +00:00
oicr & = ena_mask ;
if ( oicr ) {
2019-11-08 14:23:26 +00:00
dev_dbg ( dev , " unhandled interrupt oicr=0x%08x \n " , oicr ) ;
2018-03-20 14:58:10 +00:00
/* If a critical error is pending there is no choice but to
* reset the device .
*/
2021-05-20 14:37:50 +00:00
if ( oicr & ( PFINT_OICR_PCI_EXCEPTION_M |
2018-03-20 14:58:18 +00:00
PFINT_OICR_ECC_ERR_M ) ) {
2021-03-02 18:15:38 +00:00
set_bit ( ICE_PFR_REQ , pf - > state ) ;
2018-03-20 14:58:18 +00:00
}
2018-03-20 14:58:10 +00:00
}
2023-11-29 12:40:22 +00:00
ice_service_task_schedule ( pf ) ;
if ( ret = = IRQ_HANDLED )
ice_irq_dynamic_ena ( hw , NULL , NULL ) ;
2018-03-20 14:58:10 +00:00
2023-11-29 12:40:22 +00:00
return ret ;
2018-03-20 14:58:10 +00:00
}
2022-09-16 20:17:28 +00:00
/**
* ice_misc_intr_thread_fn - misc interrupt thread function
* @ irq : interrupt number
* @ data : pointer to a q_vector
*/
static irqreturn_t ice_misc_intr_thread_fn ( int __always_unused irq , void * data )
{
struct ice_pf * pf = data ;
2023-06-01 21:15:07 +00:00
struct ice_hw * hw ;
hw = & pf - > hw ;
2022-09-16 20:17:28 +00:00
2022-11-18 22:27:29 +00:00
if ( ice_is_reset_in_progress ( pf - > state ) )
2023-11-29 12:40:22 +00:00
goto skip_irq ;
2023-06-01 21:15:03 +00:00
if ( test_and_clear_bit ( ICE_MISC_THREAD_TX_TSTAMP , pf - > misc_thread ) ) {
2023-06-01 21:15:06 +00:00
/* Process outstanding Tx timestamps. If there is more work,
* re - arm the interrupt to trigger again .
*/
if ( ice_ptp_process_ts ( pf ) = = ICE_TX_TSTAMP_WORK_PENDING ) {
wr32 ( hw , PFINT_OICR , PFINT_OICR_TSYN_TX_M ) ;
ice_flush ( hw ) ;
}
2023-06-01 21:15:03 +00:00
}
2022-11-18 22:27:29 +00:00
2023-11-29 12:40:22 +00:00
skip_irq :
2023-06-01 21:15:07 +00:00
ice_irq_dynamic_ena ( hw , NULL , NULL ) ;
2022-11-18 22:27:29 +00:00
return IRQ_HANDLED ;
2022-09-16 20:17:28 +00:00
}
2019-02-08 20:50:34 +00:00
/**
* ice_dis_ctrlq_interrupts - disable control queue interrupts
* @ hw : pointer to HW structure
*/
static void ice_dis_ctrlq_interrupts ( struct ice_hw * hw )
{
/* disable Admin queue Interrupt causes */
wr32 ( hw , PFINT_FW_CTL ,
rd32 ( hw , PFINT_FW_CTL ) & ~ PFINT_FW_CTL_CAUSE_ENA_M ) ;
/* disable Mailbox queue Interrupt causes */
wr32 ( hw , PFINT_MBX_CTL ,
rd32 ( hw , PFINT_MBX_CTL ) & ~ PFINT_MBX_CTL_CAUSE_ENA_M ) ;
2021-06-09 16:39:46 +00:00
wr32 ( hw , PFINT_SB_CTL ,
rd32 ( hw , PFINT_SB_CTL ) & ~ PFINT_SB_CTL_CAUSE_ENA_M ) ;
2019-02-08 20:50:34 +00:00
/* disable Control queue Interrupt causes */
wr32 ( hw , PFINT_OICR_CTL ,
rd32 ( hw , PFINT_OICR_CTL ) & ~ PFINT_OICR_CTL_CAUSE_ENA_M ) ;
ice_flush ( hw ) ;
}
2023-11-29 12:40:23 +00:00
/**
* ice_free_irq_msix_ll_ts - Unroll ll_ts vector setup
* @ pf : board private structure
*/
static void ice_free_irq_msix_ll_ts ( struct ice_pf * pf )
{
int irq_num = pf - > ll_ts_irq . virq ;
synchronize_irq ( irq_num ) ;
devm_free_irq ( ice_pf_to_dev ( pf ) , irq_num , pf ) ;
ice_free_irq ( pf , pf - > ll_ts_irq ) ;
}
2018-03-20 14:58:10 +00:00
/**
* ice_free_irq_msix_misc - Unroll misc vector setup
* @ pf : board private structure
*/
static void ice_free_irq_msix_misc ( struct ice_pf * pf )
{
2023-05-15 19:03:17 +00:00
int misc_irq_num = pf - > oicr_irq . virq ;
2019-02-08 20:50:34 +00:00
struct ice_hw * hw = & pf - > hw ;
ice_dis_ctrlq_interrupts ( hw ) ;
2018-03-20 14:58:10 +00:00
/* disable OICR interrupt */
2019-02-08 20:50:34 +00:00
wr32 ( hw , PFINT_OICR_ENA , 0 ) ;
ice_flush ( hw ) ;
2018-03-20 14:58:10 +00:00
2023-05-15 19:03:14 +00:00
synchronize_irq ( misc_irq_num ) ;
devm_free_irq ( ice_pf_to_dev ( pf ) , misc_irq_num , pf ) ;
2018-03-20 14:58:10 +00:00
2023-05-15 19:03:17 +00:00
ice_free_irq ( pf , pf - > oicr_irq ) ;
2023-11-29 12:40:23 +00:00
if ( pf - > hw . dev_caps . ts_dev_info . ts_ll_int_read )
ice_free_irq_msix_ll_ts ( pf ) ;
2018-03-20 14:58:10 +00:00
}
2019-02-08 20:50:34 +00:00
/**
* ice_ena_ctrlq_interrupts - enable control queue interrupts
* @ hw : pointer to HW structure
2019-02-28 23:25:59 +00:00
* @ reg_idx : HW vector index to associate the control queue interrupts with
2019-02-08 20:50:34 +00:00
*/
2019-02-28 23:25:59 +00:00
static void ice_ena_ctrlq_interrupts ( struct ice_hw * hw , u16 reg_idx )
2019-02-08 20:50:34 +00:00
{
u32 val ;
2019-02-28 23:25:59 +00:00
val = ( ( reg_idx & PFINT_OICR_CTL_MSIX_INDX_M ) |
2019-02-08 20:50:34 +00:00
PFINT_OICR_CTL_CAUSE_ENA_M ) ;
wr32 ( hw , PFINT_OICR_CTL , val ) ;
/* enable Admin queue Interrupt causes */
2019-02-28 23:25:59 +00:00
val = ( ( reg_idx & PFINT_FW_CTL_MSIX_INDX_M ) |
2019-02-08 20:50:34 +00:00
PFINT_FW_CTL_CAUSE_ENA_M ) ;
wr32 ( hw , PFINT_FW_CTL , val ) ;
/* enable Mailbox queue Interrupt causes */
2019-02-28 23:25:59 +00:00
val = ( ( reg_idx & PFINT_MBX_CTL_MSIX_INDX_M ) |
2019-02-08 20:50:34 +00:00
PFINT_MBX_CTL_CAUSE_ENA_M ) ;
wr32 ( hw , PFINT_MBX_CTL , val ) ;
2023-11-29 12:40:23 +00:00
if ( ! hw - > dev_caps . ts_dev_info . ts_ll_int_read ) {
/* enable Sideband queue Interrupt causes */
val = ( ( reg_idx & PFINT_SB_CTL_MSIX_INDX_M ) |
PFINT_SB_CTL_CAUSE_ENA_M ) ;
wr32 ( hw , PFINT_SB_CTL , val ) ;
}
2021-06-09 16:39:46 +00:00
2019-02-08 20:50:34 +00:00
ice_flush ( hw ) ;
}
2018-03-20 14:58:10 +00:00
/**
* ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
* @ pf : board private structure
*
* This sets up the handler for MSIX 0 , which is used to manage the
2018-10-26 18:44:46 +00:00
* non - queue interrupts , e . g . AdminQ and errors . This is not used
2018-03-20 14:58:10 +00:00
* when in MSI or Legacy interrupt mode .
*/
static int ice_req_irq_msix_misc ( struct ice_pf * pf )
{
2019-11-08 14:23:26 +00:00
struct device * dev = ice_pf_to_dev ( pf ) ;
2018-03-20 14:58:10 +00:00
struct ice_hw * hw = & pf - > hw ;
2023-11-29 12:40:23 +00:00
u32 pf_intr_start_offset ;
struct msi_map irq ;
2023-05-15 19:03:17 +00:00
int err = 0 ;
2018-03-20 14:58:10 +00:00
if ( ! pf - > int_name [ 0 ] )
snprintf ( pf - > int_name , sizeof ( pf - > int_name ) - 1 , " %s-%s:misc " ,
2019-11-08 14:23:26 +00:00
dev_driver_string ( dev ) , dev_name ( dev ) ) ;
2018-03-20 14:58:10 +00:00
2023-11-29 12:40:23 +00:00
if ( ! pf - > int_name_ll_ts [ 0 ] )
snprintf ( pf - > int_name_ll_ts , sizeof ( pf - > int_name_ll_ts ) - 1 ,
" %s-%s:ll_ts " , dev_driver_string ( dev ) , dev_name ( dev ) ) ;
2018-03-20 14:58:18 +00:00
/* Do not request IRQ but do enable OICR interrupt since settings are
* lost during reset . Note that this function is called only during
* rebuild path and not while reset is in progress .
*/
2018-09-20 00:23:11 +00:00
if ( ice_is_reset_in_progress ( pf - > state ) )
2018-03-20 14:58:18 +00:00
goto skip_req_irq ;
ice: Refactor interrupt tracking
Currently we have two MSI-x (IRQ) trackers, one for OS requested MSI-x
entries (sw_irq_tracker) and one for hardware MSI-x vectors
(hw_irq_tracker). Generally the sw_irq_tracker has less entries than the
hw_irq_tracker because the hw_irq_tracker has entries equal to the max
allowed MSI-x per PF and the sw_irq_tracker is mainly the minimum (non
SR-IOV portion of the vectors, kernel granted IRQs). All of the non
SR-IOV portions of the driver (i.e. LAN queues, RDMA queues, OICR, etc.)
take at least one of each type of tracker resource. SR-IOV only grabs
entries from the hw_irq_tracker. There are a few issues with this approach
that can be seen when doing any kind of device reconfiguration (i.e.
ethtool -L, SR-IOV, etc.). One of them being, any time the driver creates
an ice_q_vector and associates it to a LAN queue pair it will grab and
use one entry from the hw_irq_tracker and one from the sw_irq_tracker.
If the indices on these does not match it will cause a Tx timeout, which
will cause a reset and then the indices will match up again and traffic
will resume. The mismatched indices come from the trackers not being the
same size and/or the search_hint in the two trackers not being equal.
Another reason for the refactor is the co-existence of features with
SR-IOV. If SR-IOV is enabled and the interrupts are taken from the end
of the sw_irq_tracker then other features can no longer use this space
because the hardware has now given the remaining interrupts to SR-IOV.
This patch reworks how we track MSI-x vectors by removing the
hw_irq_tracker completely and instead MSI-x resources needed for SR-IOV
are determined all at once instead of per VF. This can be done because
when creating VFs we know how many are wanted and how many MSI-x vectors
each VF needs. This also allows us to start using MSI-x resources from
the end of the PF's allowed MSI-x vectors so we are less likely to use
entries needed for other features (i.e. RDMA, L2 Offload, etc).
This patch also reworks the ice_res_tracker structure by removing the
search_hint and adding a new member - "end". Instead of having a
search_hint we will always search from 0. The new member, "end", will be
used to manipulate the end of the ice_res_tracker (specifically
sw_irq_tracker) during runtime based on MSI-x vectors needed by SR-IOV.
In the normal case, the end of ice_res_tracker will be equal to the
ice_res_tracker's num_entries.
The sriov_base_vector member was added to the PF structure. It is used
to represent the starting MSI-x index of all the needed MSI-x vectors
for all SR-IOV VFs. Depending on how many MSI-x are needed, SR-IOV may
have to take resources from the sw_irq_tracker. This is done by setting
the sw_irq_tracker->end equal to the pf->sriov_base_vector. When all
SR-IOV VFs are removed then the sw_irq_tracker->end is reset back to
sw_irq_tracker->num_entries. The sriov_base_vector, along with the VF's
number of MSI-x (pf->num_vf_msix), vf_id, and the base MSI-x index on
the PF (pf->hw.func_caps.common_cap.msix_vector_first_id), is used to
calculate the first HW absolute MSI-x index for each VF, which is used
to write to the VPINT_ALLOC[_PCI] and GLINT_VECT2FUNC registers to
program the VFs MSI-x PCI configuration bits. Also, the sriov_base_vector
is used along with VF's num_vf_msix, vf_id, and q_vector->v_idx to
determine the MSI-x register index (used for writing to GLINT_DYN_CTL)
within the PF's space.
Interrupt changes removed any references to hw_base_vector, hw_oicr_idx,
and hw_irq_tracker. Only sw_base_vector, sw_oicr_idx, and sw_irq_tracker
variables remain. Change all of these by removing the "sw_" prefix to
help avoid confusion with these variables and their use.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2019-04-16 17:30:44 +00:00
/* reserve one vector in irq_tracker for misc interrupts */
2023-11-29 12:40:23 +00:00
irq = ice_alloc_irq ( pf , false ) ;
if ( irq . index < 0 )
return irq . index ;
2023-05-15 19:03:17 +00:00
2023-11-29 12:40:23 +00:00
pf - > oicr_irq = irq ;
2023-05-15 19:03:17 +00:00
err = devm_request_threaded_irq ( dev , pf - > oicr_irq . virq , ice_misc_intr ,
ice_misc_intr_thread_fn , 0 ,
pf - > int_name , pf ) ;
2018-03-20 14:58:10 +00:00
if ( err ) {
2022-09-16 20:17:28 +00:00
dev_err ( dev , " devm_request_threaded_irq for %s failed: %d \n " ,
2018-03-20 14:58:10 +00:00
pf - > int_name , err ) ;
2023-05-15 19:03:17 +00:00
ice_free_irq ( pf , pf - > oicr_irq ) ;
2018-03-20 14:58:10 +00:00
return err ;
}
2023-11-29 12:40:23 +00:00
/* reserve one vector in irq_tracker for ll_ts interrupt */
if ( ! pf - > hw . dev_caps . ts_dev_info . ts_ll_int_read )
goto skip_req_irq ;
irq = ice_alloc_irq ( pf , false ) ;
if ( irq . index < 0 )
return irq . index ;
pf - > ll_ts_irq = irq ;
err = devm_request_irq ( dev , pf - > ll_ts_irq . virq , ice_ll_ts_intr , 0 ,
pf - > int_name_ll_ts , pf ) ;
if ( err ) {
dev_err ( dev , " devm_request_irq for %s failed: %d \n " ,
pf - > int_name_ll_ts , err ) ;
ice_free_irq ( pf , pf - > ll_ts_irq ) ;
return err ;
}
2018-03-20 14:58:18 +00:00
skip_req_irq :
2018-03-20 14:58:10 +00:00
ice_ena_misc_vector ( pf ) ;
2023-05-15 19:03:17 +00:00
ice_ena_ctrlq_interrupts ( hw , pf - > oicr_irq . index ) ;
2023-11-29 12:40:23 +00:00
/* This enables LL TS interrupt */
pf_intr_start_offset = rd32 ( hw , PFINT_ALLOC ) & PFINT_ALLOC_FIRST ;
if ( pf - > hw . dev_caps . ts_dev_info . ts_ll_int_read )
wr32 ( hw , PFINT_SB_CTL ,
( ( pf - > ll_ts_irq . index + pf_intr_start_offset ) &
PFINT_SB_CTL_MSIX_INDX_M ) | PFINT_SB_CTL_CAUSE_ENA_M ) ;
2023-05-15 19:03:17 +00:00
wr32 ( hw , GLINT_ITR ( ICE_RX_ITR , pf - > oicr_irq . index ) ,
2018-12-19 18:03:29 +00:00
ITR_REG_ALIGN ( ICE_ITR_8K ) > > ICE_ITR_GRAN_S ) ;
2018-03-20 14:58:10 +00:00
ice_flush ( hw ) ;
2018-03-20 14:58:13 +00:00
ice_irq_dynamic_ena ( hw , NULL , NULL ) ;
2018-03-20 14:58:10 +00:00
return 0 ;
}
2018-03-20 14:58:11 +00:00
/**
2018-09-20 00:23:09 +00:00
* ice_napi_add - register NAPI handler for the VSI
* @ vsi : VSI for which NAPI handler is to be registered
2018-03-20 14:58:11 +00:00
*
2018-09-20 00:23:09 +00:00
* This function is only called in the driver ' s load path . Registering the NAPI
* handler is done in ice_vsi_alloc_q_vector ( ) for all other cases ( i . e . resume ,
* reset / rebuild , etc . )
2018-03-20 14:58:11 +00:00
*/
2018-09-20 00:23:09 +00:00
static void ice_napi_add ( struct ice_vsi * vsi )
2018-03-20 14:58:11 +00:00
{
2018-09-20 00:23:09 +00:00
int v_idx ;
2018-03-20 14:58:11 +00:00
2018-09-20 00:23:09 +00:00
if ( ! vsi - > netdev )
2018-03-20 14:58:11 +00:00
return ;
2023-12-01 23:28:40 +00:00
ice_for_each_q_vector ( vsi , v_idx ) {
2018-09-20 00:23:09 +00:00
netif_napi_add ( vsi - > netdev , & vsi - > q_vectors [ v_idx ] - > napi ,
2022-09-27 13:27:53 +00:00
ice_napi_poll ) ;
2024-02-13 19:48:50 +00:00
__ice_q_vector_set_napi_queues ( vsi - > q_vectors [ v_idx ] , false ) ;
2023-12-01 23:28:40 +00:00
}
2018-03-20 14:58:11 +00:00
}
/**
2019-09-09 13:47:46 +00:00
* ice_set_ops - set netdev and ethtools ops for the given netdev
2023-02-14 14:39:27 +00:00
* @ vsi : the VSI associated with the new netdev
2018-03-20 14:58:11 +00:00
*/
2023-02-14 14:39:27 +00:00
static void ice_set_ops ( struct ice_vsi * vsi )
2018-03-20 14:58:11 +00:00
{
2023-02-14 14:39:27 +00:00
struct net_device * netdev = vsi - > netdev ;
2019-09-09 13:47:46 +00:00
struct ice_pf * pf = ice_netdev_to_pf ( netdev ) ;
if ( ice_is_safe_mode ( pf ) ) {
netdev - > netdev_ops = & ice_netdev_safe_mode_ops ;
ice_set_ethtool_safe_mode_ops ( netdev ) ;
return ;
}
netdev - > netdev_ops = & ice_netdev_ops ;
2020-09-26 00:56:46 +00:00
netdev - > udp_tunnel_nic_info = & pf - > hw . udp_tunnel_nic ;
2023-12-05 21:08:34 +00:00
netdev - > xdp_metadata_ops = & ice_xdp_md_ops ;
2019-09-09 13:47:46 +00:00
ice_set_ethtool_ops ( netdev ) ;
2023-02-14 14:39:27 +00:00
if ( vsi - > type ! = ICE_VSI_PF )
return ;
netdev - > xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_XSK_ZEROCOPY |
NETDEV_XDP_ACT_RX_SG ;
2023-07-19 13:24:12 +00:00
netdev - > xdp_zc_max_segs = ICE_MAX_BUF_TXD ;
2019-09-09 13:47:46 +00:00
}
/**
* ice_set_netdev_features - set features for the given netdev
* @ netdev : netdev instance
*/
static void ice_set_netdev_features ( struct net_device * netdev )
{
struct ice_pf * pf = ice_netdev_to_pf ( netdev ) ;
ice: Advertise 802.1ad VLAN filtering and offloads for PF netdev
In order for the driver to support 802.1ad VLAN filtering and offloads,
it needs to advertise those VLAN features and also support modifying
those VLAN features, so make the necessary changes to
ice_set_netdev_features(). By default, enable CTAG insertion/stripping
and CTAG filtering for both Single and Double VLAN Modes (SVM/DVM).
Also, in DVM, enable STAG filtering by default. This is done by
setting the feature bits in netdev->features. Also, in DVM, support
toggling of STAG insertion/stripping, but don't enable them by
default. This is done by setting the feature bits in
netdev->hw_features.
Since 802.1ad VLAN filtering and offloads are only supported in DVM, make
sure they are not enabled by default and that they cannot be enabled
during runtime, when the device is in SVM.
Add an implementation for the ndo_fix_features() callback. This is
needed since the hardware cannot support multiple VLAN ethertypes for
VLAN insertion/stripping simultaneously and all supported VLAN filtering
must either be enabled or disabled together.
Disable inner VLAN stripping by default when DVM is enabled. If a VSI
supports stripping the inner VLAN in DVM, then it will have to configure
that during runtime. For example if a VF is configured in a port VLAN
while DVM is enabled it will be allowed to offload inner VLANs.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-12-02 16:38:50 +00:00
bool is_dvm_ena = ice_is_dvm_ena ( & pf - > hw ) ;
2018-03-20 14:58:15 +00:00
netdev_features_t csumo_features ;
netdev_features_t vlano_features ;
netdev_features_t dflt_features ;
netdev_features_t tso_features ;
2018-03-20 14:58:11 +00:00
2019-09-09 13:47:46 +00:00
if ( ice_is_safe_mode ( pf ) ) {
/* safe mode */
netdev - > features = NETIF_F_SG | NETIF_F_HIGHDMA ;
netdev - > hw_features = netdev - > features ;
return ;
}
2018-03-20 14:58:11 +00:00
2018-03-20 14:58:15 +00:00
dflt_features = NETIF_F_SG |
NETIF_F_HIGHDMA |
2020-05-12 01:01:40 +00:00
NETIF_F_NTUPLE |
2018-03-20 14:58:15 +00:00
NETIF_F_RXHASH ;
csumo_features = NETIF_F_RXCSUM |
NETIF_F_IP_CSUM |
2018-12-19 18:03:32 +00:00
NETIF_F_SCTP_CRC |
2018-03-20 14:58:15 +00:00
NETIF_F_IPV6_CSUM ;
vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX ;
ice: Advertise 802.1ad VLAN filtering and offloads for PF netdev
In order for the driver to support 802.1ad VLAN filtering and offloads,
it needs to advertise those VLAN features and also support modifying
those VLAN features, so make the necessary changes to
ice_set_netdev_features(). By default, enable CTAG insertion/stripping
and CTAG filtering for both Single and Double VLAN Modes (SVM/DVM).
Also, in DVM, enable STAG filtering by default. This is done by
setting the feature bits in netdev->features. Also, in DVM, support
toggling of STAG insertion/stripping, but don't enable them by
default. This is done by setting the feature bits in
netdev->hw_features.
Since 802.1ad VLAN filtering and offloads are only supported in DVM, make
sure they are not enabled by default and that they cannot be enabled
during runtime, when the device is in SVM.
Add an implementation for the ndo_fix_features() callback. This is
needed since the hardware cannot support multiple VLAN ethertypes for
VLAN insertion/stripping simultaneously and all supported VLAN filtering
must either be enabled or disabled together.
Disable inner VLAN stripping by default when DVM is enabled. If a VSI
supports stripping the inner VLAN in DVM, then it will have to configure
that during runtime. For example if a VF is configured in a port VLAN
while DVM is enabled it will be allowed to offload inner VLANs.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-12-02 16:38:50 +00:00
/* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */
if ( is_dvm_ena )
vlano_features | = NETIF_F_HW_VLAN_STAG_FILTER ;
2020-05-06 16:32:30 +00:00
tso_features = NETIF_F_TSO |
NETIF_F_TSO_ECN |
NETIF_F_TSO6 |
NETIF_F_GSO_GRE |
NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_GRE_CSUM |
NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_PARTIAL |
NETIF_F_GSO_IPXIP4 |
NETIF_F_GSO_IPXIP6 |
2019-12-12 11:12:53 +00:00
NETIF_F_GSO_UDP_L4 ;
2018-03-20 14:58:15 +00:00
2020-05-06 16:32:30 +00:00
netdev - > gso_partial_features | = NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_GRE_CSUM ;
2018-03-20 14:58:11 +00:00
/* set features that user can change */
2018-03-20 14:58:15 +00:00
netdev - > hw_features = dflt_features | csumo_features |
vlano_features | tso_features ;
2018-03-20 14:58:11 +00:00
2020-05-06 16:32:30 +00:00
/* add support for HW_CSUM on packets with MPLS header */
2022-03-18 04:12:12 +00:00
netdev - > mpls_features = NETIF_F_HW_CSUM |
NETIF_F_TSO |
NETIF_F_TSO6 ;
2020-05-06 16:32:30 +00:00
2018-03-20 14:58:11 +00:00
/* enable features */
netdev - > features | = netdev - > hw_features ;
2021-08-06 08:49:05 +00:00
netdev - > hw_features | = NETIF_F_HW_TC ;
2022-07-07 10:16:51 +00:00
netdev - > hw_features | = NETIF_F_LOOPBACK ;
2021-08-06 08:49:05 +00:00
2018-03-20 14:58:15 +00:00
/* encap and VLAN devices inherit default, csumo and tso features */
netdev - > hw_enc_features | = dflt_features | csumo_features |
tso_features ;
netdev - > vlan_features | = dflt_features | csumo_features |
tso_features ;
ice: Advertise 802.1ad VLAN filtering and offloads for PF netdev
In order for the driver to support 802.1ad VLAN filtering and offloads,
it needs to advertise those VLAN features and also support modifying
those VLAN features, so make the necessary changes to
ice_set_netdev_features(). By default, enable CTAG insertion/stripping
and CTAG filtering for both Single and Double VLAN Modes (SVM/DVM).
Also, in DVM, enable STAG filtering by default. This is done by
setting the feature bits in netdev->features. Also, in DVM, support
toggling of STAG insertion/stripping, but don't enable them by
default. This is done by setting the feature bits in
netdev->hw_features.
Since 802.1ad VLAN filtering and offloads are only supported in DVM, make
sure they are not enabled by default and that they cannot be enabled
during runtime, when the device is in SVM.
Add an implementation for the ndo_fix_features() callback. This is
needed since the hardware cannot support multiple VLAN ethertypes for
VLAN insertion/stripping simultaneously and all supported VLAN filtering
must either be enabled or disabled together.
Disable inner VLAN stripping by default when DVM is enabled. If a VSI
supports stripping the inner VLAN in DVM, then it will have to configure
that during runtime. For example if a VF is configured in a port VLAN
while DVM is enabled it will be allowed to offload inner VLANs.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-12-02 16:38:50 +00:00
/* advertise support but don't enable by default since only one type of
* VLAN offload can be enabled at a time ( i . e . CTAG or STAG ) . When one
* type turns on the other has to be turned off . This is enforced by the
* ice_fix_features ( ) ndo callback .
*/
if ( is_dvm_ena )
netdev - > hw_features | = NETIF_F_HW_VLAN_STAG_RX |
NETIF_F_HW_VLAN_STAG_TX ;
2022-07-27 07:24:05 +00:00
/* Leave CRC / FCS stripping enabled by default, but allow the value to
* be changed at runtime
*/
netdev - > hw_features | = NETIF_F_RXFCS ;
2023-02-07 16:23:03 +00:00
netif_set_tso_max_size ( netdev , ICE_MAX_TSO_SIZE ) ;
2019-09-09 13:47:46 +00:00
}
2018-03-20 14:58:15 +00:00
/**
* ice_fill_rss_lut - Fill the RSS lookup table with default values
* @ lut : Lookup table
* @ rss_table_size : Lookup table size
* @ rss_size : Range of queue number for hashing
*/
void ice_fill_rss_lut ( u8 * lut , u16 rss_table_size , u16 rss_size )
{
u16 i ;
for ( i = 0 ; i < rss_table_size ; i + + )
lut [ i ] = i % rss_size ;
}
2018-08-09 13:29:50 +00:00
/**
* ice_pf_vsi_setup - Set up a PF VSI
* @ pf : board private structure
* @ pi : pointer to the port_info instance
*
2019-04-16 17:30:43 +00:00
* Returns pointer to the successfully allocated VSI software struct
* on success , otherwise returns NULL on failure .
2018-08-09 13:29:50 +00:00
*/
static struct ice_vsi *
ice_pf_vsi_setup ( struct ice_pf * pf , struct ice_port_info * pi )
{
ice: refactor VSI setup to use parameter structure
The ice_vsi_setup function, ice_vsi_alloc, and ice_vsi_cfg functions have
grown a large number of parameters. These parameters are used to initialize
a new VSI, as well as re-configure an existing VSI
Any time we want to add a new parameter to this function chain, even if it
will usually be unset, we have to change many call sites due to changing
the function signature.
A future change is going to refactor ice_vsi_alloc and ice_vsi_cfg to move
the VSI configuration and initialization all into ice_vsi_cfg.
Before this, refactor the VSI setup flow to use a new ice_vsi_cfg_params
structure. This will contain the configuration (mainly pointers) used to
initialize a VSI.
Pass this from ice_vsi_setup into the related functions such as
ice_vsi_alloc, ice_vsi_cfg, and ice_vsi_cfg_def.
Introduce a helper, ice_vsi_to_params to convert an existing VSI to the
parameters used to initialize it. This will aid in the flows where we
rebuild an existing VSI.
Since we also pass the ICE_VSI_FLAG_INIT to more functions which do not
need (or cannot yet have) the VSI parameters, lets make this clear by
renaming the function parameter to vsi_flags and using a u32 instead of a
signed integer. The name vsi_flags also makes it clear that we may extend
the flags in the future.
This change will make it easier to refactor the setup flow in the future,
and will reduce the complexity required to add a new parameter for
configuration in the future.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2023-01-19 01:16:43 +00:00
struct ice_vsi_cfg_params params = { } ;
params . type = ICE_VSI_PF ;
params . pi = pi ;
params . flags = ICE_VSI_FLAG_INIT ;
return ice_vsi_setup ( pf , & params ) ;
2018-08-09 13:29:50 +00:00
}
2021-10-15 23:35:16 +00:00
static struct ice_vsi *
ice_chnl_vsi_setup ( struct ice_pf * pf , struct ice_port_info * pi ,
struct ice_channel * ch )
{
ice: refactor VSI setup to use parameter structure
The ice_vsi_setup function, ice_vsi_alloc, and ice_vsi_cfg functions have
grown a large number of parameters. These parameters are used to initialize
a new VSI, as well as re-configure an existing VSI
Any time we want to add a new parameter to this function chain, even if it
will usually be unset, we have to change many call sites due to changing
the function signature.
A future change is going to refactor ice_vsi_alloc and ice_vsi_cfg to move
the VSI configuration and initialization all into ice_vsi_cfg.
Before this, refactor the VSI setup flow to use a new ice_vsi_cfg_params
structure. This will contain the configuration (mainly pointers) used to
initialize a VSI.
Pass this from ice_vsi_setup into the related functions such as
ice_vsi_alloc, ice_vsi_cfg, and ice_vsi_cfg_def.
Introduce a helper, ice_vsi_to_params to convert an existing VSI to the
parameters used to initialize it. This will aid in the flows where we
rebuild an existing VSI.
Since we also pass the ICE_VSI_FLAG_INIT to more functions which do not
need (or cannot yet have) the VSI parameters, lets make this clear by
renaming the function parameter to vsi_flags and using a u32 instead of a
signed integer. The name vsi_flags also makes it clear that we may extend
the flags in the future.
This change will make it easier to refactor the setup flow in the future,
and will reduce the complexity required to add a new parameter for
configuration in the future.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2023-01-19 01:16:43 +00:00
struct ice_vsi_cfg_params params = { } ;
params . type = ICE_VSI_CHNL ;
params . pi = pi ;
params . ch = ch ;
params . flags = ICE_VSI_FLAG_INIT ;
return ice_vsi_setup ( pf , & params ) ;
2021-10-15 23:35:16 +00:00
}
2020-05-12 01:01:40 +00:00
/**
* ice_ctrl_vsi_setup - Set up a control VSI
* @ pf : board private structure
* @ pi : pointer to the port_info instance
*
* Returns pointer to the successfully allocated VSI software struct
* on success , otherwise returns NULL on failure .
*/
static struct ice_vsi *
ice_ctrl_vsi_setup ( struct ice_pf * pf , struct ice_port_info * pi )
{
ice: refactor VSI setup to use parameter structure
The ice_vsi_setup function, ice_vsi_alloc, and ice_vsi_cfg functions have
grown a large number of parameters. These parameters are used to initialize
a new VSI, as well as re-configure an existing VSI
Any time we want to add a new parameter to this function chain, even if it
will usually be unset, we have to change many call sites due to changing
the function signature.
A future change is going to refactor ice_vsi_alloc and ice_vsi_cfg to move
the VSI configuration and initialization all into ice_vsi_cfg.
Before this, refactor the VSI setup flow to use a new ice_vsi_cfg_params
structure. This will contain the configuration (mainly pointers) used to
initialize a VSI.
Pass this from ice_vsi_setup into the related functions such as
ice_vsi_alloc, ice_vsi_cfg, and ice_vsi_cfg_def.
Introduce a helper, ice_vsi_to_params to convert an existing VSI to the
parameters used to initialize it. This will aid in the flows where we
rebuild an existing VSI.
Since we also pass the ICE_VSI_FLAG_INIT to more functions which do not
need (or cannot yet have) the VSI parameters, lets make this clear by
renaming the function parameter to vsi_flags and using a u32 instead of a
signed integer. The name vsi_flags also makes it clear that we may extend
the flags in the future.
This change will make it easier to refactor the setup flow in the future,
and will reduce the complexity required to add a new parameter for
configuration in the future.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2023-01-19 01:16:43 +00:00
struct ice_vsi_cfg_params params = { } ;
params . type = ICE_VSI_CTRL ;
params . pi = pi ;
params . flags = ICE_VSI_FLAG_INIT ;
return ice_vsi_setup ( pf , & params ) ;
2020-05-12 01:01:40 +00:00
}
2019-04-16 17:30:43 +00:00
/**
* ice_lb_vsi_setup - Set up a loopback VSI
* @ pf : board private structure
* @ pi : pointer to the port_info instance
*
* Returns pointer to the successfully allocated VSI software struct
* on success , otherwise returns NULL on failure .
*/
struct ice_vsi *
ice_lb_vsi_setup ( struct ice_pf * pf , struct ice_port_info * pi )
{
ice: refactor VSI setup to use parameter structure
The ice_vsi_setup function, ice_vsi_alloc, and ice_vsi_cfg functions have
grown a large number of parameters. These parameters are used to initialize
a new VSI, as well as re-configure an existing VSI
Any time we want to add a new parameter to this function chain, even if it
will usually be unset, we have to change many call sites due to changing
the function signature.
A future change is going to refactor ice_vsi_alloc and ice_vsi_cfg to move
the VSI configuration and initialization all into ice_vsi_cfg.
Before this, refactor the VSI setup flow to use a new ice_vsi_cfg_params
structure. This will contain the configuration (mainly pointers) used to
initialize a VSI.
Pass this from ice_vsi_setup into the related functions such as
ice_vsi_alloc, ice_vsi_cfg, and ice_vsi_cfg_def.
Introduce a helper, ice_vsi_to_params to convert an existing VSI to the
parameters used to initialize it. This will aid in the flows where we
rebuild an existing VSI.
Since we also pass the ICE_VSI_FLAG_INIT to more functions which do not
need (or cannot yet have) the VSI parameters, lets make this clear by
renaming the function parameter to vsi_flags and using a u32 instead of a
signed integer. The name vsi_flags also makes it clear that we may extend
the flags in the future.
This change will make it easier to refactor the setup flow in the future,
and will reduce the complexity required to add a new parameter for
configuration in the future.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2023-01-19 01:16:43 +00:00
struct ice_vsi_cfg_params params = { } ;
params . type = ICE_VSI_LB ;
params . pi = pi ;
params . flags = ICE_VSI_FLAG_INIT ;
return ice_vsi_setup ( pf , & params ) ;
2019-04-16 17:30:43 +00:00
}
2018-03-20 14:58:15 +00:00
/**
2019-02-19 23:04:13 +00:00
* ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
2018-03-20 14:58:15 +00:00
* @ netdev : network interface to be adjusted
2021-12-02 16:38:44 +00:00
* @ proto : VLAN TPID
2019-02-19 23:04:13 +00:00
* @ vid : VLAN ID to be added
2018-03-20 14:58:15 +00:00
*
2019-02-19 23:04:13 +00:00
* net_device_ops implementation for adding VLAN IDs
2018-03-20 14:58:15 +00:00
*/
2019-02-27 00:35:11 +00:00
static int
2021-12-02 16:38:44 +00:00
ice_vlan_rx_add_vid ( struct net_device * netdev , __be16 proto , u16 vid )
2018-03-20 14:58:15 +00:00
{
struct ice_netdev_priv * np = netdev_priv ( netdev ) ;
2021-12-02 16:38:46 +00:00
struct ice_vsi_vlan_ops * vlan_ops ;
2018-03-20 14:58:15 +00:00
struct ice_vsi * vsi = np - > vsi ;
2021-12-02 16:38:42 +00:00
struct ice_vlan vlan ;
2019-02-27 00:35:14 +00:00
int ret ;
2018-03-20 14:58:15 +00:00
2020-01-22 15:21:24 +00:00
/* VLAN 0 is added by default during load/reset */
if ( ! vid )
return 0 ;
2022-03-31 16:20:08 +00:00
while ( test_and_set_bit ( ICE_CFG_BUSY , vsi - > state ) )
usleep_range ( 1000 , 2000 ) ;
/* Add multicast promisc rule for the VLAN ID to be added if
* all - multicast is currently enabled .
*/
if ( vsi - > current_netdev_flags & IFF_ALLMULTI ) {
ret = ice_fltr_set_vsi_promisc ( & vsi - > back - > hw , vsi - > idx ,
ICE_MCAST_VLAN_PROMISC_BITS ,
vid ) ;
if ( ret )
goto finish ;
}
2021-12-02 16:38:46 +00:00
vlan_ops = ice_get_compat_vsi_vlan_ops ( vsi ) ;
2018-08-09 13:29:56 +00:00
2020-01-22 15:21:24 +00:00
/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
* packets aren ' t pruned by the device ' s internal switch on Rx
2018-03-20 14:58:15 +00:00
*/
2021-12-02 16:38:44 +00:00
vlan = ICE_VLAN ( be16_to_cpu ( proto ) , vid , 0 ) ;
2021-12-02 16:38:46 +00:00
ret = vlan_ops - > add_vlan ( vsi , & vlan ) ;
2022-03-31 16:20:08 +00:00
if ( ret )
goto finish ;
/* If all-multicast is currently enabled and this VLAN ID is only one
* besides VLAN - 0 we have to update look - up type of multicast promisc
* rule for VLAN - 0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN .
*/
if ( ( vsi - > current_netdev_flags & IFF_ALLMULTI ) & &
ice_vsi_num_non_zero_vlans ( vsi ) = = 1 ) {
ice_fltr_clear_vsi_promisc ( & vsi - > back - > hw , vsi - > idx ,
ICE_MCAST_PROMISC_BITS , 0 ) ;
ice_fltr_set_vsi_promisc ( & vsi - > back - > hw , vsi - > idx ,
ICE_MCAST_VLAN_PROMISC_BITS , 0 ) ;
}
finish :
clear_bit ( ICE_CFG_BUSY , vsi - > state ) ;
2019-02-27 00:35:14 +00:00
return ret ;
2018-03-20 14:58:15 +00:00
}
/**
2019-02-19 23:04:13 +00:00
* ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
2018-03-20 14:58:15 +00:00
* @ netdev : network interface to be adjusted
2021-12-02 16:38:44 +00:00
* @ proto : VLAN TPID
2019-02-19 23:04:13 +00:00
* @ vid : VLAN ID to be removed
2018-03-20 14:58:15 +00:00
*
2019-02-19 23:04:13 +00:00
* net_device_ops implementation for removing VLAN IDs
2018-03-20 14:58:15 +00:00
*/
2019-02-27 00:35:11 +00:00
static int
2021-12-02 16:38:44 +00:00
ice_vlan_rx_kill_vid ( struct net_device * netdev , __be16 proto , u16 vid )
2018-03-20 14:58:15 +00:00
{
struct ice_netdev_priv * np = netdev_priv ( netdev ) ;
2021-12-02 16:38:46 +00:00
struct ice_vsi_vlan_ops * vlan_ops ;
2018-03-20 14:58:15 +00:00
struct ice_vsi * vsi = np - > vsi ;
2021-12-02 16:38:42 +00:00
struct ice_vlan vlan ;
2019-02-27 00:35:14 +00:00
int ret ;
2018-03-20 14:58:15 +00:00
2020-01-22 15:21:24 +00:00
/* don't allow removal of VLAN 0 */
if ( ! vid )
return 0 ;
2022-03-31 16:20:08 +00:00
while ( test_and_set_bit ( ICE_CFG_BUSY , vsi - > state ) )
usleep_range ( 1000 , 2000 ) ;
2022-08-12 13:25:49 +00:00
ret = ice_clear_vsi_promisc ( & vsi - > back - > hw , vsi - > idx ,
ICE_MCAST_VLAN_PROMISC_BITS , vid ) ;
if ( ret ) {
netdev_err ( netdev , " Error clearing multicast promiscuous mode on VSI %i \n " ,
vsi - > vsi_num ) ;
vsi - > current_netdev_flags | = IFF_ALLMULTI ;
}
2021-12-02 16:38:46 +00:00
vlan_ops = ice_get_compat_vsi_vlan_ops ( vsi ) ;
2021-12-02 16:38:41 +00:00
/* Make sure VLAN delete is successful before updating VLAN
2018-08-09 13:29:56 +00:00
* information
2018-03-20 14:58:15 +00:00
*/
2021-12-02 16:38:44 +00:00
vlan = ICE_VLAN ( be16_to_cpu ( proto ) , vid , 0 ) ;
2021-12-02 16:38:46 +00:00
ret = vlan_ops - > del_vlan ( vsi , & vlan ) ;
2019-02-27 00:35:14 +00:00
if ( ret )
2022-03-31 16:20:08 +00:00
goto finish ;
2018-03-20 14:58:15 +00:00
2022-03-31 16:20:08 +00:00
/* Remove multicast promisc rule for the removed VLAN ID if
* all - multicast is enabled .
*/
if ( vsi - > current_netdev_flags & IFF_ALLMULTI )
ice_fltr_clear_vsi_promisc ( & vsi - > back - > hw , vsi - > idx ,
ICE_MCAST_VLAN_PROMISC_BITS , vid ) ;
if ( ! ice_vsi_has_non_zero_vlans ( vsi ) ) {
/* Update look-up type of multicast promisc rule for VLAN 0
* from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when
* all - multicast is enabled and VLAN 0 is the only VLAN rule .
*/
if ( vsi - > current_netdev_flags & IFF_ALLMULTI ) {
ice_fltr_clear_vsi_promisc ( & vsi - > back - > hw , vsi - > idx ,
ICE_MCAST_VLAN_PROMISC_BITS ,
0 ) ;
ice_fltr_set_vsi_promisc ( & vsi - > back - > hw , vsi - > idx ,
ICE_MCAST_PROMISC_BITS , 0 ) ;
}
}
finish :
clear_bit ( ICE_CFG_BUSY , vsi - > state ) ;
return ret ;
2018-03-20 14:58:15 +00:00
}
2021-10-12 18:31:03 +00:00
/**
* ice_rep_indr_tc_block_unbind
* @ cb_priv : indirection block private data
*/
static void ice_rep_indr_tc_block_unbind ( void * cb_priv )
{
struct ice_indr_block_priv * indr_priv = cb_priv ;
list_del ( & indr_priv - > list ) ;
kfree ( indr_priv ) ;
}
/**
* ice_tc_indir_block_unregister - Unregister TC indirect block notifications
* @ vsi : VSI struct which has the netdev
*/
static void ice_tc_indir_block_unregister ( struct ice_vsi * vsi )
{
struct ice_netdev_priv * np = netdev_priv ( vsi - > netdev ) ;
flow_indr_dev_unregister ( ice_indr_setup_tc_cb , np ,
ice_rep_indr_tc_block_unbind ) ;
}
/**
* ice_tc_indir_block_register - Register TC indirect block notifications
* @ vsi : VSI struct which has the netdev
*
* Returns 0 on success , negative value on failure
*/
static int ice_tc_indir_block_register ( struct ice_vsi * vsi )
{
struct ice_netdev_priv * np ;
if ( ! vsi | | ! vsi - > netdev )
return - EINVAL ;
np = netdev_priv ( vsi - > netdev ) ;
INIT_LIST_HEAD ( & np - > tc_indr_block_priv_list ) ;
return flow_indr_dev_register ( ice_indr_setup_tc_cb , np ) ;
}
2018-03-20 14:58:10 +00:00
/**
2019-09-03 08:31:06 +00:00
* ice_get_avail_q_count - Get count of queues in use
* @ pf_qmap : bitmap to get queue use count from
* @ lock : pointer to a mutex that protects access to pf_qmap
* @ size : size of the bitmap
2018-03-20 14:58:10 +00:00
*/
2019-09-03 08:31:06 +00:00
static u16
ice_get_avail_q_count ( unsigned long * pf_qmap , struct mutex * lock , u16 size )
2018-03-20 14:58:10 +00:00
{
2020-05-08 00:41:05 +00:00
unsigned long bit ;
u16 count = 0 ;
2018-03-20 14:58:10 +00:00
2019-09-03 08:31:06 +00:00
mutex_lock ( lock ) ;
for_each_clear_bit ( bit , pf_qmap , size )
count + + ;
mutex_unlock ( lock ) ;
2018-03-20 14:58:10 +00:00
2019-09-03 08:31:06 +00:00
return count ;
}
2018-03-20 14:58:15 +00:00
2019-09-03 08:31:06 +00:00
/**
* ice_get_avail_txq_count - Get count of Tx queues in use
* @ pf : pointer to an ice_pf instance
*/
u16 ice_get_avail_txq_count ( struct ice_pf * pf )
{
return ice_get_avail_q_count ( pf - > avail_txqs , & pf - > avail_q_mutex ,
pf - > max_pf_txqs ) ;
}
2018-03-20 14:58:10 +00:00
2019-09-03 08:31:06 +00:00
/**
* ice_get_avail_rxq_count - Get count of Rx queues in use
* @ pf : pointer to an ice_pf instance
*/
u16 ice_get_avail_rxq_count ( struct ice_pf * pf )
{
return ice_get_avail_q_count ( pf - > avail_rxqs , & pf - > avail_q_mutex ,
pf - > max_pf_rxqs ) ;
2018-03-20 14:58:10 +00:00
}
/**
* ice_deinit_pf - Unrolls initialziations done by ice_init_pf
* @ pf : board private structure to initialize
*/
static void ice_deinit_pf ( struct ice_pf * pf )
{
2018-08-09 13:29:57 +00:00
ice_service_task_stop ( pf ) ;
2023-06-20 22:18:46 +00:00
mutex_destroy ( & pf - > lag_mutex ) ;
2022-04-23 10:20:21 +00:00
mutex_destroy ( & pf - > adev_mutex ) ;
2018-03-20 14:58:10 +00:00
mutex_destroy ( & pf - > sw_mutex ) ;
2019-11-06 10:05:29 +00:00
mutex_destroy ( & pf - > tc_mutex ) ;
2018-03-20 14:58:10 +00:00
mutex_destroy ( & pf - > avail_q_mutex ) ;
ice: convert VF storage to hash table with krefs and RCU
The ice driver stores VF structures in a simple array which is allocated
once at the time of VF creation. The VF structures are then accessed
from the array by their VF ID. The ID must be between 0 and the number
of allocated VFs.
Multiple threads can access this table:
* .ndo operations such as .ndo_get_vf_cfg or .ndo_set_vf_trust
* interrupts, such as due to messages from the VF using the virtchnl
communication
* processing such as device reset
* commands to add or remove VFs
The current implementation does not keep track of when all threads are
done operating on a VF and can potentially result in use-after-free
issues caused by one thread accessing a VF structure after it has been
released when removing VFs. Some of these are prevented with various
state flags and checks.
In addition, this structure is quite static and does not support a
planned future where virtualization can be more dynamic. As we begin to
look at supporting Scalable IOV with the ice driver (as opposed to just
supporting Single Root IOV), this structure is not sufficient.
In the future, VFs will be able to be added and removed individually and
dynamically.
To allow for this, and to better protect against a whole class of
use-after-free bugs, replace the VF storage with a combination of a hash
table and krefs to reference track all of the accesses to VFs through
the hash table.
A hash table still allows efficient look up of the VF given its ID, but
also allows adding and removing VFs. It does not require contiguous VF
IDs.
The use of krefs allows the cleanup of the VF memory to be delayed until
after all threads have released their reference (by calling ice_put_vf).
To prevent corruption of the hash table, a combination of RCU and the
mutex table_lock are used. Addition and removal from the hash table use
the RCU-aware hash macros. This allows simple read-only look ups that
iterate to locate a single VF can be fast using RCU. Accesses which
modify the hash table, or which can't take RCU because they sleep, will
hold the mutex lock.
By using this design, we have a stronger guarantee that the VF structure
can't be released until after all threads are finished operating on it.
We also pave the way for the more dynamic Scalable IOV implementation in
the future.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2022-02-16 21:37:38 +00:00
mutex_destroy ( & pf - > vfs . table_lock ) ;
2019-08-02 08:25:21 +00:00
if ( pf - > avail_txqs ) {
bitmap_free ( pf - > avail_txqs ) ;
pf - > avail_txqs = NULL ;
}
if ( pf - > avail_rxqs ) {
bitmap_free ( pf - > avail_rxqs ) ;
pf - > avail_rxqs = NULL ;
}
2021-06-09 16:39:50 +00:00
if ( pf - > ptp . clock )
ptp_clock_unregister ( pf - > ptp . clock ) ;
2018-03-20 14:58:10 +00:00
}
/**
2019-09-09 13:47:46 +00:00
* ice_set_pf_caps - set PFs capability flags
* @ pf : pointer to the PF instance
2018-03-20 14:58:10 +00:00
*/
2019-09-09 13:47:46 +00:00
static void ice_set_pf_caps ( struct ice_pf * pf )
2018-03-20 14:58:10 +00:00
{
2019-09-09 13:47:46 +00:00
struct ice_hw_func_caps * func_caps = & pf - > hw . func_caps ;
2021-05-20 14:37:49 +00:00
clear_bit ( ICE_FLAG_RDMA_ENA , pf - > flags ) ;
2022-02-11 18:26:03 +00:00
if ( func_caps - > common_cap . rdma )
2021-05-20 14:37:49 +00:00
set_bit ( ICE_FLAG_RDMA_ENA , pf - > flags ) ;
2019-09-09 13:47:46 +00:00
clear_bit ( ICE_FLAG_DCB_CAPABLE , pf - > flags ) ;
if ( func_caps - > common_cap . dcb )
2019-09-03 08:31:02 +00:00
set_bit ( ICE_FLAG_DCB_CAPABLE , pf - > flags ) ;
2019-09-09 13:47:46 +00:00
clear_bit ( ICE_FLAG_SRIOV_CAPABLE , pf - > flags ) ;
if ( func_caps - > common_cap . sr_iov_1_1 ) {
2018-09-20 00:42:54 +00:00
set_bit ( ICE_FLAG_SRIOV_CAPABLE , pf - > flags ) ;
2022-02-16 21:37:36 +00:00
pf - > vfs . num_supported = min_t ( int , func_caps - > num_allocd_vfs ,
2022-02-23 00:26:53 +00:00
ICE_MAX_SRIOV_VFS ) ;
2018-09-20 00:42:54 +00:00
}
2019-09-09 13:47:46 +00:00
clear_bit ( ICE_FLAG_RSS_ENA , pf - > flags ) ;
if ( func_caps - > common_cap . rss_table_size )
set_bit ( ICE_FLAG_RSS_ENA , pf - > flags ) ;
2018-03-20 14:58:10 +00:00
2020-05-12 01:01:40 +00:00
clear_bit ( ICE_FLAG_FD_ENA , pf - > flags ) ;
if ( func_caps - > fd_fltr_guar > 0 | | func_caps - > fd_fltr_best_effort > 0 ) {
u16 unused ;
/* ctrl_vsi_idx will be set to a valid value when flow director
* is setup by ice_init_fdir
*/
pf - > ctrl_vsi_idx = ICE_NO_VSI ;
set_bit ( ICE_FLAG_FD_ENA , pf - > flags ) ;
/* force guaranteed filter pool for PF */
ice_alloc_fd_guar_item ( & pf - > hw , & unused ,
func_caps - > fd_fltr_guar ) ;
/* force shared filter pool for PF */
ice_alloc_fd_shrd_item ( & pf - > hw , & unused ,
func_caps - > fd_fltr_best_effort ) ;
}
2021-06-09 16:39:50 +00:00
clear_bit ( ICE_FLAG_PTP_SUPPORTED , pf - > flags ) ;
2023-10-25 21:41:52 +00:00
if ( func_caps - > common_cap . ieee_1588 & &
! ( pf - > hw . mac_type = = ICE_MAC_E830 ) )
2021-06-09 16:39:50 +00:00
set_bit ( ICE_FLAG_PTP_SUPPORTED , pf - > flags ) ;
2019-09-09 13:47:46 +00:00
pf - > max_pf_txqs = func_caps - > common_cap . num_txq ;
pf - > max_pf_rxqs = func_caps - > common_cap . num_rxq ;
}
2018-03-20 14:58:10 +00:00
2019-09-09 13:47:46 +00:00
/**
* ice_init_pf - Initialize general software structures ( struct ice_pf )
* @ pf : board private structure to initialize
*/
static int ice_init_pf ( struct ice_pf * pf )
{
ice_set_pf_caps ( pf ) ;
mutex_init ( & pf - > sw_mutex ) ;
2019-11-06 10:05:29 +00:00
mutex_init ( & pf - > tc_mutex ) ;
2022-04-23 10:20:21 +00:00
mutex_init ( & pf - > adev_mutex ) ;
2023-06-20 22:18:46 +00:00
mutex_init ( & pf - > lag_mutex ) ;
2018-03-20 14:58:15 +00:00
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 00:22:03 +00:00
INIT_HLIST_HEAD ( & pf - > aq_wait_list ) ;
spin_lock_init ( & pf - > aq_wait_lock ) ;
init_waitqueue_head ( & pf - > aq_wait_queue ) ;
2021-05-06 15:39:59 +00:00
init_waitqueue_head ( & pf - > reset_wait_queue ) ;
2018-03-20 14:58:10 +00:00
/* setup service timer and periodic service task */
timer_setup ( & pf - > serv_tmr , ice_service_timer , 0 ) ;
pf - > serv_tmr_period = HZ ;
INIT_WORK ( & pf - > serv_task , ice_service_task ) ;
2021-03-02 18:15:38 +00:00
clear_bit ( ICE_SERVICE_SCHED , pf - > state ) ;
2019-08-02 08:25:21 +00:00
2019-09-09 13:47:46 +00:00
mutex_init ( & pf - > avail_q_mutex ) ;
2019-08-02 08:25:21 +00:00
pf - > avail_txqs = bitmap_zalloc ( pf - > max_pf_txqs , GFP_KERNEL ) ;
if ( ! pf - > avail_txqs )
return - ENOMEM ;
pf - > avail_rxqs = bitmap_zalloc ( pf - > max_pf_rxqs , GFP_KERNEL ) ;
if ( ! pf - > avail_rxqs ) {
2022-08-17 08:53:20 +00:00
bitmap_free ( pf - > avail_txqs ) ;
2019-08-02 08:25:21 +00:00
pf - > avail_txqs = NULL ;
return - ENOMEM ;
}
ice: convert VF storage to hash table with krefs and RCU
The ice driver stores VF structures in a simple array which is allocated
once at the time of VF creation. The VF structures are then accessed
from the array by their VF ID. The ID must be between 0 and the number
of allocated VFs.
Multiple threads can access this table:
* .ndo operations such as .ndo_get_vf_cfg or .ndo_set_vf_trust
* interrupts, such as due to messages from the VF using the virtchnl
communication
* processing such as device reset
* commands to add or remove VFs
The current implementation does not keep track of when all threads are
done operating on a VF and can potentially result in use-after-free
issues caused by one thread accessing a VF structure after it has been
released when removing VFs. Some of these are prevented with various
state flags and checks.
In addition, this structure is quite static and does not support a
planned future where virtualization can be more dynamic. As we begin to
look at supporting Scalable IOV with the ice driver (as opposed to just
supporting Single Root IOV), this structure is not sufficient.
In the future, VFs will be able to be added and removed individually and
dynamically.
To allow for this, and to better protect against a whole class of
use-after-free bugs, replace the VF storage with a combination of a hash
table and krefs to reference track all of the accesses to VFs through
the hash table.
A hash table still allows efficient look up of the VF given its ID, but
also allows adding and removing VFs. It does not require contiguous VF
IDs.
The use of krefs allows the cleanup of the VF memory to be delayed until
after all threads have released their reference (by calling ice_put_vf).
To prevent corruption of the hash table, a combination of RCU and the
mutex table_lock are used. Addition and removal from the hash table use
the RCU-aware hash macros. This allows simple read-only look ups that
iterate to locate a single VF can be fast using RCU. Accesses which
modify the hash table, or which can't take RCU because they sleep, will
hold the mutex lock.
By using this design, we have a stronger guarantee that the VF structure
can't be released until after all threads are finished operating on it.
We also pave the way for the more dynamic Scalable IOV implementation in
the future.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2022-02-16 21:37:38 +00:00
mutex_init ( & pf - > vfs . table_lock ) ;
hash_init ( pf - > vfs . table ) ;
2023-02-22 17:09:13 +00:00
ice_mbx_init_snapshot ( & pf - > hw ) ;
ice: convert VF storage to hash table with krefs and RCU
The ice driver stores VF structures in a simple array which is allocated
once at the time of VF creation. The VF structures are then accessed
from the array by their VF ID. The ID must be between 0 and the number
of allocated VFs.
Multiple threads can access this table:
* .ndo operations such as .ndo_get_vf_cfg or .ndo_set_vf_trust
* interrupts, such as due to messages from the VF using the virtchnl
communication
* processing such as device reset
* commands to add or remove VFs
The current implementation does not keep track of when all threads are
done operating on a VF and can potentially result in use-after-free
issues caused by one thread accessing a VF structure after it has been
released when removing VFs. Some of these are prevented with various
state flags and checks.
In addition, this structure is quite static and does not support a
planned future where virtualization can be more dynamic. As we begin to
look at supporting Scalable IOV with the ice driver (as opposed to just
supporting Single Root IOV), this structure is not sufficient.
In the future, VFs will be able to be added and removed individually and
dynamically.
To allow for this, and to better protect against a whole class of
use-after-free bugs, replace the VF storage with a combination of a hash
table and krefs to reference track all of the accesses to VFs through
the hash table.
A hash table still allows efficient look up of the VF given its ID, but
also allows adding and removing VFs. It does not require contiguous VF
IDs.
The use of krefs allows the cleanup of the VF memory to be delayed until
after all threads have released their reference (by calling ice_put_vf).
To prevent corruption of the hash table, a combination of RCU and the
mutex table_lock are used. Addition and removal from the hash table use
the RCU-aware hash macros. This allows simple read-only look ups that
iterate to locate a single VF can be fast using RCU. Accesses which
modify the hash table, or which can't take RCU because they sleep, will
hold the mutex lock.
By using this design, we have a stronger guarantee that the VF structure
can't be released until after all threads are finished operating on it.
We also pave the way for the more dynamic Scalable IOV implementation in
the future.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2022-02-16 21:37:38 +00:00
2019-08-02 08:25:21 +00:00
return 0 ;
2018-03-20 14:58:10 +00:00
}
2020-07-09 16:16:03 +00:00
/**
2021-02-26 21:19:30 +00:00
* ice_is_wol_supported - check if WoL is supported
* @ hw : pointer to hardware info
2020-07-09 16:16:03 +00:00
*
* Check if WoL is supported based on the HW configuration .
* Returns true if NVM supports and enables WoL for this port , false otherwise
*/
2021-02-26 21:19:30 +00:00
bool ice_is_wol_supported ( struct ice_hw * hw )
2020-07-09 16:16:03 +00:00
{
u16 wol_ctrl ;
/* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
* word ) indicates WoL is not supported on the corresponding PF ID .
*/
if ( ice_read_sr_word ( hw , ICE_SR_NVM_WOL_CFG , & wol_ctrl ) )
return false ;
2021-02-26 21:19:30 +00:00
return ! ( BIT ( hw - > port_info - > lport ) & wol_ctrl ) ;
2020-07-09 16:16:03 +00:00
}
2019-11-08 14:23:29 +00:00
/**
* ice_vsi_recfg_qs - Change the number of queues on a VSI
* @ vsi : VSI being changed
* @ new_rx : new number of Rx queues
* @ new_tx : new number of Tx queues
2023-01-24 17:19:43 +00:00
* @ locked : is adev device_lock held
2019-11-08 14:23:29 +00:00
*
* Only change the number of queues if new_tx , or new_rx is non - 0.
*
* Returns 0 on success .
*/
2023-01-24 17:19:43 +00:00
int ice_vsi_recfg_qs ( struct ice_vsi * vsi , int new_rx , int new_tx , bool locked )
2019-11-08 14:23:29 +00:00
{
struct ice_pf * pf = vsi - > back ;
int err = 0 , timeout = 50 ;
if ( ! new_rx & & ! new_tx )
return - EINVAL ;
2021-03-02 18:15:38 +00:00
while ( test_and_set_bit ( ICE_CFG_BUSY , pf - > state ) ) {
2019-11-08 14:23:29 +00:00
timeout - - ;
if ( ! timeout )
return - EBUSY ;
usleep_range ( 1000 , 2000 ) ;
}
if ( new_tx )
2020-05-08 00:41:05 +00:00
vsi - > req_txq = ( u16 ) new_tx ;
2019-11-08 14:23:29 +00:00
if ( new_rx )
2020-05-08 00:41:05 +00:00
vsi - > req_rxq = ( u16 ) new_rx ;
2019-11-08 14:23:29 +00:00
/* set for the next time the netdev is started */
if ( ! netif_running ( vsi - > netdev ) ) {
2022-12-21 11:38:16 +00:00
ice_vsi_rebuild ( vsi , ICE_VSI_FLAG_NO_INIT ) ;
2019-11-08 14:23:29 +00:00
dev_dbg ( ice_pf_to_dev ( pf ) , " Link is down, queue count change happens when link is brought up \n " ) ;
goto done ;
}
ice_vsi_close ( vsi ) ;
2022-12-21 11:38:16 +00:00
ice_vsi_rebuild ( vsi , ICE_VSI_FLAG_NO_INIT ) ;
2023-01-24 17:19:43 +00:00
ice_pf_dcb_recfg ( pf , locked ) ;
2019-11-08 14:23:29 +00:00
ice_vsi_open ( vsi ) ;
done :
2021-03-02 18:15:38 +00:00
clear_bit ( ICE_CFG_BUSY , pf - > state ) ;
2019-11-08 14:23:29 +00:00
return err ;
}
2020-07-13 20:53:14 +00:00
/**
* ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
* @ pf : PF to configure
*
* No VLAN offloads / filtering are advertised in safe mode so make sure the PF
* VSI can still Tx / Rx VLAN tagged packets .
*/
static void ice_set_safe_mode_vlan_cfg ( struct ice_pf * pf )
{
struct ice_vsi * vsi = ice_get_main_vsi ( pf ) ;
struct ice_vsi_ctx * ctxt ;
struct ice_hw * hw ;
2021-10-07 22:59:03 +00:00
int status ;
2020-07-13 20:53:14 +00:00
if ( ! vsi )
return ;
ctxt = kzalloc ( sizeof ( * ctxt ) , GFP_KERNEL ) ;
if ( ! ctxt )
return ;
hw = & pf - > hw ;
ctxt - > info = vsi - > info ;
ctxt - > info . valid_sections =
cpu_to_le16 ( ICE_AQ_VSI_PROP_VLAN_VALID |
ICE_AQ_VSI_PROP_SECURITY_VALID |
ICE_AQ_VSI_PROP_SW_VALID ) ;
/* disable VLAN anti-spoof */
ctxt - > info . sec_flags & = ~ ( ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA < <
ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S ) ;
/* disable VLAN pruning and keep all other settings */
ctxt - > info . sw_flags2 & = ~ ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA ;
/* allow all VLANs on Tx and don't strip on Rx */
2021-12-02 16:38:45 +00:00
ctxt - > info . inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL |
ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING ;
2020-07-13 20:53:14 +00:00
status = ice_update_vsi ( hw , vsi - > idx , ctxt , NULL ) ;
if ( status ) {
2021-10-07 22:56:02 +00:00
dev_err ( ice_pf_to_dev ( vsi - > back ) , " Failed to update VSI for safe mode VLANs, err %d aq_err %s \n " ,
2021-10-07 22:59:03 +00:00
status , ice_aq_str ( hw - > adminq . sq_last_status ) ) ;
2020-07-13 20:53:14 +00:00
} else {
vsi - > info . sec_flags = ctxt - > info . sec_flags ;
vsi - > info . sw_flags2 = ctxt - > info . sw_flags2 ;
2021-12-02 16:38:45 +00:00
vsi - > info . inner_vlan_flags = ctxt - > info . inner_vlan_flags ;
2020-07-13 20:53:14 +00:00
}
kfree ( ctxt ) ;
}
2019-09-09 13:47:46 +00:00
/**
* ice_log_pkg_init - log result of DDP package load
* @ hw : pointer to hardware info
2021-10-07 22:54:37 +00:00
* @ state : state of package load
2019-09-09 13:47:46 +00:00
*/
2021-10-07 22:54:37 +00:00
static void ice_log_pkg_init ( struct ice_hw * hw , enum ice_ddp_state state )
2019-09-09 13:47:46 +00:00
{
2021-10-07 22:54:37 +00:00
struct ice_pf * pf = hw - > back ;
struct device * dev ;
2019-09-09 13:47:46 +00:00
2021-10-07 22:54:37 +00:00
dev = ice_pf_to_dev ( pf ) ;
switch ( state ) {
case ICE_DDP_PKG_SUCCESS :
dev_info ( dev , " The DDP package was successfully loaded: %s version %d.%d.%d.%d \n " ,
hw - > active_pkg_name ,
hw - > active_pkg_ver . major ,
hw - > active_pkg_ver . minor ,
hw - > active_pkg_ver . update ,
hw - > active_pkg_ver . draft ) ;
break ;
case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED :
dev_info ( dev , " DDP package already present on device: %s version %d.%d.%d.%d \n " ,
hw - > active_pkg_name ,
hw - > active_pkg_ver . major ,
hw - > active_pkg_ver . minor ,
hw - > active_pkg_ver . update ,
hw - > active_pkg_ver . draft ) ;
break ;
case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED :
dev_err ( dev , " The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode. \n " ,
hw - > active_pkg_name ,
hw - > active_pkg_ver . major ,
hw - > active_pkg_ver . minor ,
ICE_PKG_SUPP_VER_MAJ , ICE_PKG_SUPP_VER_MNR ) ;
break ;
case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED :
dev_info ( dev , " The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d. \n " ,
hw - > active_pkg_name ,
hw - > active_pkg_ver . major ,
hw - > active_pkg_ver . minor ,
hw - > active_pkg_ver . update ,
hw - > active_pkg_ver . draft ,
hw - > pkg_name ,
hw - > pkg_ver . major ,
hw - > pkg_ver . minor ,
hw - > pkg_ver . update ,
hw - > pkg_ver . draft ) ;
2019-09-09 13:47:46 +00:00
break ;
2021-10-07 22:54:37 +00:00
case ICE_DDP_PKG_FW_MISMATCH :
2020-05-16 00:36:34 +00:00
dev_err ( dev , " The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode. \n " ) ;
break ;
2021-10-07 22:54:37 +00:00
case ICE_DDP_PKG_INVALID_FILE :
2020-02-06 09:20:10 +00:00
dev_err ( dev , " The DDP package file is invalid. Entering Safe Mode. \n " ) ;
2019-09-09 13:47:46 +00:00
break ;
2021-10-07 22:54:37 +00:00
case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH :
dev_err ( dev , " The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode. \n " ) ;
2019-09-09 13:47:46 +00:00
break ;
2021-10-07 22:54:37 +00:00
case ICE_DDP_PKG_FILE_VERSION_TOO_LOW :
dev_err ( dev , " The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode. \n " ,
ICE_PKG_SUPP_VER_MAJ , ICE_PKG_SUPP_VER_MNR ) ;
break ;
case ICE_DDP_PKG_FILE_SIGNATURE_INVALID :
dev_err ( dev , " The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode. \n " ) ;
break ;
case ICE_DDP_PKG_FILE_REVISION_TOO_LOW :
dev_err ( dev , " The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode. \n " ) ;
break ;
case ICE_DDP_PKG_LOAD_ERROR :
dev_err ( dev , " An error occurred on the device while loading the DDP package. The device will be reset. \n " ) ;
2021-12-21 23:05:38 +00:00
/* poll for reset to complete */
if ( ice_check_reset ( hw ) )
dev_err ( dev , " Error resetting device. Please reload the driver \n " ) ;
2019-09-09 13:47:46 +00:00
break ;
2021-10-07 22:54:37 +00:00
case ICE_DDP_PKG_ERR :
default :
dev_err ( dev , " An unknown error occurred when loading the DDP package. Entering Safe Mode. \n " ) ;
2021-12-21 23:05:38 +00:00
break ;
2019-09-09 13:47:46 +00:00
}
}
/**
* ice_load_pkg - load / reload the DDP Package file
* @ firmware : firmware structure when firmware requested or NULL for reload
* @ pf : pointer to the PF instance
*
* Called on probe and post CORER / GLOBR rebuild to load DDP Package and
* initialize HW tables .
*/
static void
ice_load_pkg ( const struct firmware * firmware , struct ice_pf * pf )
{
2021-10-07 22:54:37 +00:00
enum ice_ddp_state state = ICE_DDP_PKG_ERR ;
2019-11-08 14:23:26 +00:00
struct device * dev = ice_pf_to_dev ( pf ) ;
2019-09-09 13:47:46 +00:00
struct ice_hw * hw = & pf - > hw ;
/* Load DDP Package */
if ( firmware & & ! hw - > pkg_copy ) {
2021-10-07 22:54:37 +00:00
state = ice_copy_and_init_pkg ( hw , firmware - > data ,
firmware - > size ) ;
ice_log_pkg_init ( hw , state ) ;
2019-09-09 13:47:46 +00:00
} else if ( ! firmware & & hw - > pkg_copy ) {
/* Reload package during rebuild after CORER/GLOBR reset */
2021-10-07 22:54:37 +00:00
state = ice_init_pkg ( hw , hw - > pkg_copy , hw - > pkg_size ) ;
ice_log_pkg_init ( hw , state ) ;
2019-09-09 13:47:46 +00:00
} else {
2020-02-06 09:20:10 +00:00
dev_err ( dev , " The DDP package file failed to load. Entering Safe Mode. \n " ) ;
2019-09-09 13:47:46 +00:00
}
2021-10-07 22:54:37 +00:00
if ( ! ice_is_init_pkg_successful ( state ) ) {
2019-09-09 13:47:46 +00:00
/* Safe Mode */
clear_bit ( ICE_FLAG_ADV_FEATURES , pf - > flags ) ;
return ;
}
/* Successful download package is the precondition for advanced
* features , hence setting the ICE_FLAG_ADV_FEATURES flag
*/
set_bit ( ICE_FLAG_ADV_FEATURES , pf - > flags ) ;
}
ice: Fix tx_timeout in PF driver
Prior to this commit the driver was running into tx_timeouts when a
queue was stressed enough. This was happening because the HW tail
and SW tail (NTU) were incorrectly out of sync. Consequently this was
causing the HW head to collide with the HW tail, which to the hardware
means that all descriptors posted for Tx have been processed.
Due to the Tx logic used in the driver SW tail and HW tail are allowed
to be out of sync. This is done as an optimization because it allows the
driver to write HW tail as infrequently as possible, while still
updating the SW tail index to keep track. However, there are situations
where this results in the tail never getting updated, resulting in Tx
timeouts.
Tx HW tail write condition:
if (netif_xmit_stopped(txring_txq(tx_ring) || !skb->xmit_more)
writel(sw_tail, tx_ring->tail);
An issue was found in the Tx logic that was causing the afore mentioned
condition for updating HW tail to never happen, causing tx_timeouts.
In ice_xmit_frame_ring we calculate how many descriptors we need for the
Tx transaction based on the skb the kernel hands us. This is then passed
into ice_maybe_stop_tx along with some extra padding to determine if we
have enough descriptors available for this transaction. If we don't then
we return -EBUSY to the stack, otherwise we move on and eventually
prepare the Tx descriptors accordingly in ice_tx_map and set
next_to_watch. In ice_tx_map we make another call to ice_maybe_stop_tx
with a value of MAX_SKB_FRAGS + 4. The key here is that this value is
possibly less than the value we sent in the first call to
ice_maybe_stop_tx in ice_xmit_frame_ring. Now, if the number of unused
descriptors is between MAX_SKB_FRAGS + 4 and the value used in the first
call to ice_maybe_stop_tx in ice_xmit_frame_ring then we do not update
the HW tail because of the "Tx HW tail write condition" above. This is
because in ice_maybe_stop_tx we return success from ice_maybe_stop_tx
instead of calling __ice_maybe_stop_tx and subsequently calling
netif_stop_subqueue, which sets the __QUEUE_STATE_DEV_XOFF bit. This
bit is then checked in the "Tx HW tail write condition" by calling
netif_xmit_stopped and subsequently updating HW tail if the
afore mentioned bit is set.
In ice_clean_tx_irq, if next_to_watch is not NULL, we end up cleaning
the descriptors that HW sets the DD bit on and we have the budget. The
HW head will eventually run into the HW tail in response to the
description in the paragraph above.
The next time through ice_xmit_frame_ring we make the initial call to
ice_maybe_stop_tx with another skb from the stack. This time we do not
have enough descriptors available and we return NETDEV_TX_BUSY to the
stack and end up setting next_to_watch to NULL.
This is where we are stuck. In ice_clean_tx_irq we never clean anything
because next_to_watch is always NULL and in ice_xmit_frame_ring we never
update HW tail because we already return NETDEV_TX_BUSY to the stack and
eventually we hit a tx_timeout.
This issue was fixed by making sure that the second call to
ice_maybe_stop_tx in ice_tx_map is passed a value that is >= the value
that was used on the initial call to ice_maybe_stop_tx in
ice_xmit_frame_ring. This was done by adding the following defines to
make the logic more clear and to reduce the chance of mucking this up
again:
ICE_CACHE_LINE_BYTES 64
ICE_DESCS_PER_CACHE_LINE (ICE_CACHE_LINE_BYTES / \
sizeof(struct ice_tx_desc))
ICE_DESCS_FOR_CTX_DESC 1
ICE_DESCS_FOR_SKB_DATA_PTR 1
The ICE_CACHE_LINE_BYTES being 64 is an assumption being made so we
don't have to figure this out on every pass through the Tx path. Instead
I added a sanity check in ice_probe to verify cache line size and print
a message if it's not 64 Bytes. This will make it easier to file issues
if they are seen when the cache line size is not 64 Bytes when reading
from the GLPCI_CNF2 register.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-10-26 17:40:58 +00:00
/**
* ice_verify_cacheline_size - verify driver ' s assumption of 64 Byte cache lines
* @ pf : pointer to the PF structure
*
* There is no error returned here because the driver should be able to handle
* 128 Byte cache lines , so we only print a warning in case issues are seen ,
* specifically with Tx .
*/
static void ice_verify_cacheline_size ( struct ice_pf * pf )
{
if ( rd32 ( & pf - > hw , GLPCI_CNF2 ) & GLPCI_CNF2_CACHELINE_SIZE_M )
2020-02-06 09:20:10 +00:00
dev_warn ( ice_pf_to_dev ( pf ) , " %d Byte cache line assumption is invalid, driver may have Tx timeouts! \n " ,
ice: Fix tx_timeout in PF driver
Prior to this commit the driver was running into tx_timeouts when a
queue was stressed enough. This was happening because the HW tail
and SW tail (NTU) were incorrectly out of sync. Consequently this was
causing the HW head to collide with the HW tail, which to the hardware
means that all descriptors posted for Tx have been processed.
Due to the Tx logic used in the driver SW tail and HW tail are allowed
to be out of sync. This is done as an optimization because it allows the
driver to write HW tail as infrequently as possible, while still
updating the SW tail index to keep track. However, there are situations
where this results in the tail never getting updated, resulting in Tx
timeouts.
Tx HW tail write condition:
if (netif_xmit_stopped(txring_txq(tx_ring) || !skb->xmit_more)
writel(sw_tail, tx_ring->tail);
An issue was found in the Tx logic that was causing the afore mentioned
condition for updating HW tail to never happen, causing tx_timeouts.
In ice_xmit_frame_ring we calculate how many descriptors we need for the
Tx transaction based on the skb the kernel hands us. This is then passed
into ice_maybe_stop_tx along with some extra padding to determine if we
have enough descriptors available for this transaction. If we don't then
we return -EBUSY to the stack, otherwise we move on and eventually
prepare the Tx descriptors accordingly in ice_tx_map and set
next_to_watch. In ice_tx_map we make another call to ice_maybe_stop_tx
with a value of MAX_SKB_FRAGS + 4. The key here is that this value is
possibly less than the value we sent in the first call to
ice_maybe_stop_tx in ice_xmit_frame_ring. Now, if the number of unused
descriptors is between MAX_SKB_FRAGS + 4 and the value used in the first
call to ice_maybe_stop_tx in ice_xmit_frame_ring then we do not update
the HW tail because of the "Tx HW tail write condition" above. This is
because in ice_maybe_stop_tx we return success from ice_maybe_stop_tx
instead of calling __ice_maybe_stop_tx and subsequently calling
netif_stop_subqueue, which sets the __QUEUE_STATE_DEV_XOFF bit. This
bit is then checked in the "Tx HW tail write condition" by calling
netif_xmit_stopped and subsequently updating HW tail if the
afore mentioned bit is set.
In ice_clean_tx_irq, if next_to_watch is not NULL, we end up cleaning
the descriptors that HW sets the DD bit on and we have the budget. The
HW head will eventually run into the HW tail in response to the
description in the paragraph above.
The next time through ice_xmit_frame_ring we make the initial call to
ice_maybe_stop_tx with another skb from the stack. This time we do not
have enough descriptors available and we return NETDEV_TX_BUSY to the
stack and end up setting next_to_watch to NULL.
This is where we are stuck. In ice_clean_tx_irq we never clean anything
because next_to_watch is always NULL and in ice_xmit_frame_ring we never
update HW tail because we already return NETDEV_TX_BUSY to the stack and
eventually we hit a tx_timeout.
This issue was fixed by making sure that the second call to
ice_maybe_stop_tx in ice_tx_map is passed a value that is >= the value
that was used on the initial call to ice_maybe_stop_tx in
ice_xmit_frame_ring. This was done by adding the following defines to
make the logic more clear and to reduce the chance of mucking this up
again:
ICE_CACHE_LINE_BYTES 64
ICE_DESCS_PER_CACHE_LINE (ICE_CACHE_LINE_BYTES / \
sizeof(struct ice_tx_desc))
ICE_DESCS_FOR_CTX_DESC 1
ICE_DESCS_FOR_SKB_DATA_PTR 1
The ICE_CACHE_LINE_BYTES being 64 is an assumption being made so we
don't have to figure this out on every pass through the Tx path. Instead
I added a sanity check in ice_probe to verify cache line size and print
a message if it's not 64 Bytes. This will make it easier to file issues
if they are seen when the cache line size is not 64 Bytes when reading
from the GLPCI_CNF2 register.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-10-26 17:40:58 +00:00
ICE_CACHE_LINE_BYTES ) ;
}
2019-09-09 13:47:42 +00:00
/**
* ice_send_version - update firmware with driver version
* @ pf : PF struct
*
2021-10-07 22:58:01 +00:00
* Returns 0 on success , else error code
2019-09-09 13:47:42 +00:00
*/
2021-10-07 22:56:57 +00:00
static int ice_send_version ( struct ice_pf * pf )
2019-09-09 13:47:42 +00:00
{
struct ice_driver_ver dv ;
2020-05-29 07:18:33 +00:00
dv . major_ver = 0xff ;
dv . minor_ver = 0xff ;
dv . build_ver = 0xff ;
2019-09-09 13:47:42 +00:00
dv . subbuild_ver = 0 ;
2020-05-29 07:18:33 +00:00
strscpy ( ( char * ) dv . driver_string , UTS_RELEASE ,
2019-09-09 13:47:42 +00:00
sizeof ( dv . driver_string ) ) ;
return ice_aq_send_driver_ver ( & pf - > hw , & dv , NULL ) ;
}
2020-05-12 01:01:40 +00:00
/**
* ice_init_fdir - Initialize flow director VSI and configuration
* @ pf : pointer to the PF instance
*
* returns 0 on success , negative on error
*/
static int ice_init_fdir ( struct ice_pf * pf )
{
struct device * dev = ice_pf_to_dev ( pf ) ;
struct ice_vsi * ctrl_vsi ;
int err ;
/* Side Band Flow Director needs to have a control VSI.
* Allocate it and store it in the PF .
*/
ctrl_vsi = ice_ctrl_vsi_setup ( pf , pf - > hw . port_info ) ;
if ( ! ctrl_vsi ) {
dev_dbg ( dev , " could not create control VSI \n " ) ;
return - ENOMEM ;
}
err = ice_vsi_open_ctrl ( ctrl_vsi ) ;
if ( err ) {
dev_dbg ( dev , " could not open control VSI \n " ) ;
goto err_vsi_open ;
}
mutex_init ( & pf - > hw . fdir_fltr_lock ) ;
err = ice_fdir_create_dflt_rules ( pf ) ;
if ( err )
goto err_fdir_rule ;
return 0 ;
err_fdir_rule :
ice_fdir_release_flows ( & pf - > hw ) ;
ice_vsi_close ( ctrl_vsi ) ;
err_vsi_open :
ice_vsi_release ( ctrl_vsi ) ;
if ( pf - > ctrl_vsi_idx ! = ICE_NO_VSI ) {
pf - > vsi [ pf - > ctrl_vsi_idx ] = NULL ;
pf - > ctrl_vsi_idx = ICE_NO_VSI ;
}
return err ;
}
2022-12-21 11:38:18 +00:00
static void ice_deinit_fdir ( struct ice_pf * pf )
{
struct ice_vsi * vsi = ice_get_ctrl_vsi ( pf ) ;
if ( ! vsi )
return ;
ice_vsi_manage_fdir ( vsi , false ) ;
ice_vsi_release ( vsi ) ;
if ( pf - > ctrl_vsi_idx ! = ICE_NO_VSI ) {
pf - > vsi [ pf - > ctrl_vsi_idx ] = NULL ;
pf - > ctrl_vsi_idx = ICE_NO_VSI ;
}
mutex_destroy ( & ( & pf - > hw ) - > fdir_fltr_lock ) ;
}
2019-09-09 13:47:46 +00:00
/**
* ice_get_opt_fw_name - return optional firmware file name or NULL
* @ pf : pointer to the PF instance
*/
static char * ice_get_opt_fw_name ( struct ice_pf * pf )
{
/* Optional firmware name same as default with additional dash
* followed by a EUI - 64 identifier ( PCIe Device Serial Number )
*/
struct pci_dev * pdev = pf - > pdev ;
2020-03-03 02:25:03 +00:00
char * opt_fw_filename ;
u64 dsn ;
2019-09-09 13:47:46 +00:00
/* Determine the name of the optional file using the DSN (two
* dwords following the start of the DSN Capability ) .
*/
2020-03-03 02:25:03 +00:00
dsn = pci_get_dsn ( pdev ) ;
if ( ! dsn )
return NULL ;
opt_fw_filename = kzalloc ( NAME_MAX , GFP_KERNEL ) ;
if ( ! opt_fw_filename )
return NULL ;
2020-05-16 00:55:03 +00:00
snprintf ( opt_fw_filename , NAME_MAX , " %sice-%016llx.pkg " ,
2020-03-03 02:25:03 +00:00
ICE_DDP_PKG_PATH , dsn ) ;
2019-09-09 13:47:46 +00:00
return opt_fw_filename ;
}
/**
* ice_request_fw - Device initialization routine
* @ pf : pointer to the PF instance
*/
static void ice_request_fw ( struct ice_pf * pf )
{
char * opt_fw_filename = ice_get_opt_fw_name ( pf ) ;
const struct firmware * firmware = NULL ;
2019-11-08 14:23:26 +00:00
struct device * dev = ice_pf_to_dev ( pf ) ;
2019-09-09 13:47:46 +00:00
int err = 0 ;
/* optional device-specific DDP (if present) overrides the default DDP
* package file . kernel logs a debug message if the file doesn ' t exist ,
* and warning messages for other errors .
*/
if ( opt_fw_filename ) {
err = firmware_request_nowarn ( & firmware , opt_fw_filename , dev ) ;
if ( err ) {
kfree ( opt_fw_filename ) ;
goto dflt_pkg_load ;
}
/* request for firmware was successful. Download to device */
ice_load_pkg ( firmware , pf ) ;
kfree ( opt_fw_filename ) ;
release_firmware ( firmware ) ;
return ;
}
dflt_pkg_load :
err = request_firmware ( & firmware , ICE_DDP_PKG_FILE , dev ) ;
if ( err ) {
2020-02-06 09:20:10 +00:00
dev_err ( dev , " The DDP package file was not found or could not be read. Entering Safe Mode \n " ) ;
2019-09-09 13:47:46 +00:00
return ;
}
/* request for firmware was successful. Download to device */
ice_load_pkg ( firmware , pf ) ;
release_firmware ( firmware ) ;
}
2020-07-09 16:16:03 +00:00
/**
* ice_print_wake_reason - show the wake up cause in the log
* @ pf : pointer to the PF struct
*/
static void ice_print_wake_reason ( struct ice_pf * pf )
{
u32 wus = pf - > wakeup_reason ;
const char * wake_str ;
/* if no wake event, nothing to print */
if ( ! wus )
return ;
if ( wus & PFPM_WUS_LNKC_M )
wake_str = " Link \n " ;
else if ( wus & PFPM_WUS_MAG_M )
wake_str = " Magic Packet \n " ;
else if ( wus & PFPM_WUS_MNG_M )
wake_str = " Management \n " ;
else if ( wus & PFPM_WUS_FW_RST_WK_M )
wake_str = " Firmware Reset \n " ;
else
wake_str = " Unknown \n " ;
dev_info ( ice_pf_to_dev ( pf ) , " Wake reason: %s " , wake_str ) ;
}
2023-12-13 05:07:12 +00:00
/**
* ice_pf_fwlog_update_module - update 1 module
* @ pf : pointer to the PF struct
* @ log_level : log_level to use for the @ module
* @ module : module to update
*/
void ice_pf_fwlog_update_module ( struct ice_pf * pf , int log_level , int module )
{
struct ice_hw * hw = & pf - > hw ;
hw - > fwlog_cfg . module_entries [ module ] . log_level = log_level ;
}
2021-03-02 18:12:03 +00:00
/**
2023-01-24 00:57:14 +00:00
* ice_register_netdev - register netdev
2022-12-21 11:38:18 +00:00
* @ vsi : pointer to the VSI struct
2021-03-02 18:12:03 +00:00
*/
2022-12-21 11:38:18 +00:00
static int ice_register_netdev ( struct ice_vsi * vsi )
2021-03-02 18:12:03 +00:00
{
2022-12-21 11:38:18 +00:00
int err ;
2021-03-02 18:12:03 +00:00
if ( ! vsi | | ! vsi - > netdev )
return - EIO ;
err = register_netdev ( vsi - > netdev ) ;
if ( err )
2022-12-21 11:38:18 +00:00
return err ;
2021-03-02 18:12:03 +00:00
2021-03-02 18:15:41 +00:00
set_bit ( ICE_VSI_NETDEV_REGISTERED , vsi - > state ) ;
2021-03-02 18:12:03 +00:00
netif_carrier_off ( vsi - > netdev ) ;
netif_tx_stop_all_queues ( vsi - > netdev ) ;
return 0 ;
2022-12-21 11:38:18 +00:00
}
static void ice_unregister_netdev ( struct ice_vsi * vsi )
{
if ( ! vsi | | ! vsi - > netdev )
return ;
unregister_netdev ( vsi - > netdev ) ;
clear_bit ( ICE_VSI_NETDEV_REGISTERED , vsi - > state ) ;
2021-03-02 18:12:03 +00:00
}
2018-03-20 14:58:05 +00:00
/**
2022-12-21 11:38:18 +00:00
* ice_cfg_netdev - Allocate , configure and register a netdev
* @ vsi : the VSI associated with the new netdev
2018-03-20 14:58:05 +00:00
*
2022-12-21 11:38:18 +00:00
* Returns 0 on success , negative value on failure
2018-03-20 14:58:05 +00:00
*/
2022-12-21 11:38:18 +00:00
static int ice_cfg_netdev ( struct ice_vsi * vsi )
2018-03-20 14:58:05 +00:00
{
2022-12-21 11:38:18 +00:00
struct ice_netdev_priv * np ;
struct net_device * netdev ;
u8 mac_addr [ ETH_ALEN ] ;
2018-03-20 14:58:05 +00:00
2022-12-21 11:38:18 +00:00
netdev = alloc_etherdev_mqs ( sizeof ( * np ) , vsi - > alloc_txq ,
vsi - > alloc_rxq ) ;
if ( ! netdev )
return - ENOMEM ;
set_bit ( ICE_VSI_NETDEV_ALLOCD , vsi - > state ) ;
vsi - > netdev = netdev ;
np = netdev_priv ( netdev ) ;
np - > vsi = vsi ;
ice_set_netdev_features ( netdev ) ;
2023-02-14 14:39:27 +00:00
ice_set_ops ( vsi ) ;
2022-12-21 11:38:18 +00:00
if ( vsi - > type = = ICE_VSI_PF ) {
SET_NETDEV_DEV ( netdev , ice_pf_to_dev ( vsi - > back ) ) ;
ether_addr_copy ( mac_addr , vsi - > port_info - > mac . perm_addr ) ;
eth_hw_addr_set ( netdev , mac_addr ) ;
2021-07-28 19:39:10 +00:00
}
2022-12-21 11:38:18 +00:00
netdev - > priv_flags | = IFF_UNICAST_FLT ;
/* Setup netdev TC information */
ice_vsi_cfg_netdev_tc ( vsi , vsi - > tc_cfg . ena_tc ) ;
netdev - > max_mtu = ICE_MAX_MTU ;
return 0 ;
}
static void ice_decfg_netdev ( struct ice_vsi * vsi )
{
clear_bit ( ICE_VSI_NETDEV_ALLOCD , vsi - > state ) ;
free_netdev ( vsi - > netdev ) ;
vsi - > netdev = NULL ;
}
2023-07-13 13:21:24 +00:00
/**
* ice_wait_for_fw - wait for full FW readiness
* @ hw : pointer to the hardware structure
* @ timeout : milliseconds that can elapse before timing out
*/
static int ice_wait_for_fw ( struct ice_hw * hw , u32 timeout )
{
int fw_loading ;
u32 elapsed = 0 ;
while ( elapsed < = timeout ) {
fw_loading = rd32 ( hw , GL_MNG_FWSM ) & GL_MNG_FWSM_FW_LOADING_M ;
/* firmware was not yet loaded, we have to wait more */
if ( fw_loading ) {
elapsed + = 100 ;
msleep ( 100 ) ;
continue ;
}
return 0 ;
}
return - ETIMEDOUT ;
}
2024-02-05 13:03:56 +00:00
int ice_init_dev ( struct ice_pf * pf )
2022-12-21 11:38:18 +00:00
{
struct device * dev = ice_pf_to_dev ( pf ) ;
struct ice_hw * hw = & pf - > hw ;
int err ;
err = ice_init_hw ( hw ) ;
if ( err ) {
dev_err ( dev , " ice_init_hw failed: %d \n " , err ) ;
return err ;
}
2023-07-13 13:21:24 +00:00
/* Some cards require longer initialization times
* due to necessity of loading FW from an external source .
* This can take even half a minute .
*/
if ( ice_is_pf_c827 ( hw ) ) {
err = ice_wait_for_fw ( hw , 30000 ) ;
if ( err ) {
dev_err ( dev , " ice_wait_for_fw timed out " ) ;
return err ;
}
}
2022-12-21 11:38:18 +00:00
ice_init_feature_support ( pf ) ;
ice_request_fw ( pf ) ;
2019-09-09 13:47:46 +00:00
/* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
* set in pf - > state , which will cause ice_is_safe_mode to return
* true
*/
if ( ice_is_safe_mode ( pf ) ) {
/* we already got function/device capabilities but these don't
* reflect what the driver needs to do in safe mode . Instead of
* adding conditional logic everywhere to ignore these
* device / function capabilities , override them .
*/
ice_set_safe_mode_caps ( hw ) ;
}
2019-08-02 08:25:21 +00:00
err = ice_init_pf ( pf ) ;
if ( err ) {
dev_err ( dev , " ice_init_pf failed: %d \n " , err ) ;
2022-12-21 11:38:18 +00:00
goto err_init_pf ;
2019-08-02 08:25:21 +00:00
}
2018-03-20 14:58:10 +00:00
2020-09-26 00:56:46 +00:00
pf - > hw . udp_tunnel_nic . set_port = ice_udp_tunnel_set_port ;
pf - > hw . udp_tunnel_nic . unset_port = ice_udp_tunnel_unset_port ;
pf - > hw . udp_tunnel_nic . flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP ;
pf - > hw . udp_tunnel_nic . shared = & pf - > hw . udp_tunnel_shared ;
if ( pf - > hw . tnl . valid_count [ TNL_VXLAN ] ) {
2022-12-21 11:38:18 +00:00
pf - > hw . udp_tunnel_nic . tables [ 0 ] . n_entries =
2020-09-26 00:56:46 +00:00
pf - > hw . tnl . valid_count [ TNL_VXLAN ] ;
2022-12-21 11:38:18 +00:00
pf - > hw . udp_tunnel_nic . tables [ 0 ] . tunnel_types =
2020-09-26 00:56:46 +00:00
UDP_TUNNEL_TYPE_VXLAN ;
}
if ( pf - > hw . tnl . valid_count [ TNL_GENEVE ] ) {
2022-12-21 11:38:18 +00:00
pf - > hw . udp_tunnel_nic . tables [ 1 ] . n_entries =
2020-09-26 00:56:46 +00:00
pf - > hw . tnl . valid_count [ TNL_GENEVE ] ;
2022-12-21 11:38:18 +00:00
pf - > hw . udp_tunnel_nic . tables [ 1 ] . tunnel_types =
2020-09-26 00:56:46 +00:00
UDP_TUNNEL_TYPE_GENEVE ;
ice: Accumulate ring statistics over reset
Resets may occur with or without user interaction. For example, a TX hang
or reconfiguration of parameters will result in a reset. During reset, the
VSI is freed, freeing any statistics structures inside as well. This would
create an issue for the user where a reset happens in the background,
statistics set to zero, and the user checks ring statistics expecting them
to be populated.
To ensure this doesn't happen, accumulate ring statistics over reset.
Define a new ring statistics structure, ice_ring_stats. The new structure
lives in the VSI's parent, preserving ring statistics when VSI is freed.
1. Define a new structure vsi_ring_stats in the PF scope
2. Allocate/free stats only during probe, unload, or change in ring size
3. Replace previous ring statistics functionality with new structure
Signed-off-by: Benjamin Mikailenko <benjamin.mikailenko@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2022-11-18 21:20:02 +00:00
}
2018-03-20 14:58:10 +00:00
err = ice_init_interrupt_scheme ( pf ) ;
if ( err ) {
2019-02-08 20:50:50 +00:00
dev_err ( dev , " ice_init_interrupt_scheme failed: %d \n " , err ) ;
2018-03-20 14:58:10 +00:00
err = - EIO ;
2022-12-21 11:38:18 +00:00
goto err_init_interrupt_scheme ;
2018-03-20 14:58:10 +00:00
}
/* In case of MSIX we are going to setup the misc vector right here
* to handle admin queue events etc . In case of legacy and MSI
* the misc functionality and queue processing is combined in
* the same vector and that gets setup at open .
*/
2019-06-26 09:20:25 +00:00
err = ice_req_irq_msix_misc ( pf ) ;
if ( err ) {
dev_err ( dev , " setup of misc vector failed: %d \n " , err ) ;
2022-12-21 11:38:18 +00:00
goto err_req_irq_msix_misc ;
2018-03-20 14:58:10 +00:00
}
2022-12-21 11:38:18 +00:00
return 0 ;
2018-03-20 14:58:10 +00:00
2022-12-21 11:38:18 +00:00
err_req_irq_msix_misc :
ice_clear_interrupt_scheme ( pf ) ;
err_init_interrupt_scheme :
ice_deinit_pf ( pf ) ;
err_init_pf :
ice_deinit_hw ( hw ) ;
return err ;
}
2018-08-09 13:29:54 +00:00
2024-02-05 13:03:56 +00:00
void ice_deinit_dev ( struct ice_pf * pf )
2022-12-21 11:38:18 +00:00
{
ice_free_irq_msix_misc ( pf ) ;
ice_deinit_pf ( pf ) ;
ice_deinit_hw ( & pf - > hw ) ;
ice: Fix ice module unload
Clearing the interrupt scheme before PFR reset,
during the removal routine, could cause the hardware
errors and possibly lead to system reboot, as the PF
reset can cause the interrupt to be generated.
Place the call for PFR reset inside ice_deinit_dev(),
wait until reset and all pending transactions are done,
then call ice_clear_interrupt_scheme().
This introduces a PFR reset to multiple error paths.
Additionally, remove the call for the reset from
ice_load() - it will be a part of ice_unload() now.
Error example:
[ 75.229328] ice 0000:ca:00.1: Failed to read Tx Scheduler Tree - User Selection data from flash
[ 77.571315] {1}[Hardware Error]: Hardware error from APEI Generic Hardware Error Source: 1
[ 77.571418] {1}[Hardware Error]: event severity: recoverable
[ 77.571459] {1}[Hardware Error]: Error 0, type: recoverable
[ 77.571500] {1}[Hardware Error]: section_type: PCIe error
[ 77.571540] {1}[Hardware Error]: port_type: 4, root port
[ 77.571580] {1}[Hardware Error]: version: 3.0
[ 77.571615] {1}[Hardware Error]: command: 0x0547, status: 0x4010
[ 77.571661] {1}[Hardware Error]: device_id: 0000:c9:02.0
[ 77.571703] {1}[Hardware Error]: slot: 25
[ 77.571736] {1}[Hardware Error]: secondary_bus: 0xca
[ 77.571773] {1}[Hardware Error]: vendor_id: 0x8086, device_id: 0x347a
[ 77.571821] {1}[Hardware Error]: class_code: 060400
[ 77.571858] {1}[Hardware Error]: bridge: secondary_status: 0x2800, control: 0x0013
[ 77.572490] pcieport 0000:c9:02.0: AER: aer_status: 0x00200000, aer_mask: 0x00100020
[ 77.572870] pcieport 0000:c9:02.0: [21] ACSViol (First)
[ 77.573222] pcieport 0000:c9:02.0: AER: aer_layer=Transaction Layer, aer_agent=Receiver ID
[ 77.573554] pcieport 0000:c9:02.0: AER: aer_uncor_severity: 0x00463010
[ 77.691273] {2}[Hardware Error]: Hardware error from APEI Generic Hardware Error Source: 1
[ 77.691738] {2}[Hardware Error]: event severity: recoverable
[ 77.691971] {2}[Hardware Error]: Error 0, type: recoverable
[ 77.692192] {2}[Hardware Error]: section_type: PCIe error
[ 77.692403] {2}[Hardware Error]: port_type: 4, root port
[ 77.692616] {2}[Hardware Error]: version: 3.0
[ 77.692825] {2}[Hardware Error]: command: 0x0547, status: 0x4010
[ 77.693032] {2}[Hardware Error]: device_id: 0000:c9:02.0
[ 77.693238] {2}[Hardware Error]: slot: 25
[ 77.693440] {2}[Hardware Error]: secondary_bus: 0xca
[ 77.693641] {2}[Hardware Error]: vendor_id: 0x8086, device_id: 0x347a
[ 77.693853] {2}[Hardware Error]: class_code: 060400
[ 77.694054] {2}[Hardware Error]: bridge: secondary_status: 0x0800, control: 0x0013
[ 77.719115] pci 0000:ca:00.1: AER: can't recover (no error_detected callback)
[ 77.719140] pcieport 0000:c9:02.0: AER: device recovery failed
[ 77.719216] pcieport 0000:c9:02.0: AER: aer_status: 0x00200000, aer_mask: 0x00100020
[ 77.719390] pcieport 0000:c9:02.0: [21] ACSViol (First)
[ 77.719557] pcieport 0000:c9:02.0: AER: aer_layer=Transaction Layer, aer_agent=Receiver ID
[ 77.719723] pcieport 0000:c9:02.0: AER: aer_uncor_severity: 0x00463010
Fixes: 5b246e533d01 ("ice: split probe into smaller functions")
Signed-off-by: Jakub Buchocki <jakubx.buchocki@intel.com>
Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
Tested-by: Pucha Himasekhar Reddy <himasekharx.reddy.pucha@intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Reviewed-by: Simon Horman <simon.horman@corigine.com>
Link: https://lore.kernel.org/r/20230612171421.21570-1-anthony.l.nguyen@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-06-12 17:14:21 +00:00
/* Service task is already stopped, so call reset directly. */
ice_reset ( & pf - > hw , ICE_RESET_PFR ) ;
pci_wait_for_pending_transaction ( pf - > pdev ) ;
ice_clear_interrupt_scheme ( pf ) ;
2022-12-21 11:38:18 +00:00
}
2018-03-20 14:58:10 +00:00
2022-12-21 11:38:18 +00:00
static void ice_init_features ( struct ice_pf * pf )
{
struct device * dev = ice_pf_to_dev ( pf ) ;
2018-03-20 14:58:10 +00:00
2022-12-21 11:38:18 +00:00
if ( ice_is_safe_mode ( pf ) )
return ;
ice: Add support for switch filter programming
A VSI needs traffic directed towards it. This is done by programming
filter rules on the switch (embedded vSwitch) element in the hardware,
which connects the VSI to the ingress/egress port.
This patch introduces data structures and functions necessary to add
remove or update switch rules on the switch element. This is a pretty low
level function that is generic enough to add a whole range of filters.
This patch also introduces two top level functions ice_add_mac and
ice_remove mac which through a series of intermediate helper functions
eventually call ice_aq_sw_rules to add/delete simple MAC based filters.
It's worth noting that one invocation of ice_add_mac/ice_remove_mac
is capable of adding/deleting multiple MAC filters.
Also worth noting is the fact that the driver maintains a list of currently
active filters, so every filter addition/removal causes an update to this
list. This is done for a couple of reasons:
1) If two VSIs try to add the same filters, we need to detect it and do
things a little differently (i.e. use VSI lists, described below) as
the same filter can't be added more than once.
2) In the event of a hardware reset we can simply walk through this list
and restore the filters.
VSI Lists:
In a multi-VSI situation, it's possible that multiple VSIs want to add the
same filter rule. For example, two VSIs that want to receive broadcast
traffic would both add a filter for destination MAC ff:ff:ff:ff:ff:ff.
This can become cumbersome to maintain and so this is handled using a
VSI list.
A VSI list is resource that can be allocated in the hardware using the
ice_aq_alloc_free_res admin queue command. Simply put, a VSI list can
be thought of as a subscription list containing a set of VSIs to which
the packet should be forwarded, should the filter match.
For example, if VSI-0 has already added a broadcast filter, and VSI-1
wants to do the same thing, the filter creation flow will detect this,
allocate a VSI list and update the switch rule so that broadcast traffic
will now be forwarded to the VSI list which contains VSI-0 and VSI-1.
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 14:58:12 +00:00
2022-12-21 11:38:18 +00:00
/* initialize DDP driven features */
if ( test_bit ( ICE_FLAG_PTP_SUPPORTED , pf - > flags ) )
ice_ptp_init ( pf ) ;
ice: Add support for switch filter programming
A VSI needs traffic directed towards it. This is done by programming
filter rules on the switch (embedded vSwitch) element in the hardware,
which connects the VSI to the ingress/egress port.
This patch introduces data structures and functions necessary to add
remove or update switch rules on the switch element. This is a pretty low
level function that is generic enough to add a whole range of filters.
This patch also introduces two top level functions ice_add_mac and
ice_remove mac which through a series of intermediate helper functions
eventually call ice_aq_sw_rules to add/delete simple MAC based filters.
It's worth noting that one invocation of ice_add_mac/ice_remove_mac
is capable of adding/deleting multiple MAC filters.
Also worth noting is the fact that the driver maintains a list of currently
active filters, so every filter addition/removal causes an update to this
list. This is done for a couple of reasons:
1) If two VSIs try to add the same filters, we need to detect it and do
things a little differently (i.e. use VSI lists, described below) as
the same filter can't be added more than once.
2) In the event of a hardware reset we can simply walk through this list
and restore the filters.
VSI Lists:
In a multi-VSI situation, it's possible that multiple VSIs want to add the
same filter rule. For example, two VSIs that want to receive broadcast
traffic would both add a filter for destination MAC ff:ff:ff:ff:ff:ff.
This can become cumbersome to maintain and so this is handled using a
VSI list.
A VSI list is resource that can be allocated in the hardware using the
ice_aq_alloc_free_res admin queue command. Simply put, a VSI list can
be thought of as a subscription list containing a set of VSIs to which
the packet should be forwarded, should the filter match.
For example, if VSI-0 has already added a broadcast filter, and VSI-1
wants to do the same thing, the filter creation flow will detect this,
allocate a VSI list and update the switch rule so that broadcast traffic
will now be forwarded to the VSI list which contains VSI-0 and VSI-1.
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 14:58:12 +00:00
2022-12-21 11:38:18 +00:00
if ( ice_is_feature_supported ( pf , ICE_F_GNSS ) )
ice_gnss_init ( pf ) ;
2023-09-13 20:49:41 +00:00
if ( ice_is_feature_supported ( pf , ICE_F_CGU ) | |
ice_is_feature_supported ( pf , ICE_F_PHY_RCLK ) )
ice_dpll_init ( pf ) ;
2022-12-21 11:38:18 +00:00
/* Note: Flow director init failure is non-fatal to load */
if ( ice_init_fdir ( pf ) )
dev_err ( dev , " could not initialize flow director \n " ) ;
/* Note: DCB init failure is non-fatal to load */
if ( ice_init_pf_dcb ( pf , false ) ) {
clear_bit ( ICE_FLAG_DCB_CAPABLE , pf - > flags ) ;
clear_bit ( ICE_FLAG_DCB_ENA , pf - > flags ) ;
} else {
ice_cfg_lldp_mib_change ( & pf - > hw , true ) ;
2019-09-09 13:47:42 +00:00
}
2022-12-21 11:38:18 +00:00
if ( ice_init_lag ( pf ) )
dev_warn ( dev , " Failed to init link aggregation support \n " ) ;
2023-12-01 18:08:39 +00:00
ice_hwmon_init ( pf ) ;
2022-12-21 11:38:18 +00:00
}
static void ice_deinit_features ( struct ice_pf * pf )
{
2023-10-11 23:33:34 +00:00
if ( ice_is_safe_mode ( pf ) )
return ;
2022-12-21 11:38:18 +00:00
ice_deinit_lag ( pf ) ;
if ( test_bit ( ICE_FLAG_DCB_CAPABLE , pf - > flags ) )
ice_cfg_lldp_mib_change ( & pf - > hw , false ) ;
ice_deinit_fdir ( pf ) ;
if ( ice_is_feature_supported ( pf , ICE_F_GNSS ) )
ice_gnss_exit ( pf ) ;
if ( test_bit ( ICE_FLAG_PTP_SUPPORTED , pf - > flags ) )
ice_ptp_release ( pf ) ;
2023-09-13 20:49:41 +00:00
if ( test_bit ( ICE_FLAG_DPLL , pf - > flags ) )
ice_dpll_deinit ( pf ) ;
2023-10-24 11:09:20 +00:00
if ( pf - > eswitch_mode = = DEVLINK_ESWITCH_MODE_SWITCHDEV )
xa_destroy ( & pf - > eswitch . reprs ) ;
2022-12-21 11:38:18 +00:00
}
static void ice_init_wakeup ( struct ice_pf * pf )
{
/* Save wakeup reason register for later use */
pf - > wakeup_reason = rd32 ( & pf - > hw , PFPM_WUS ) ;
/* check for a power management event */
ice_print_wake_reason ( pf ) ;
/* clear wake status, all bits */
wr32 ( & pf - > hw , PFPM_WUS , U32_MAX ) ;
/* Disable WoL at init, wait for user to enable */
device_set_wakeup_enable ( ice_pf_to_dev ( pf ) , false ) ;
}
static int ice_init_link ( struct ice_pf * pf )
{
struct device * dev = ice_pf_to_dev ( pf ) ;
int err ;
ice: Add support for switch filter programming
A VSI needs traffic directed towards it. This is done by programming
filter rules on the switch (embedded vSwitch) element in the hardware,
which connects the VSI to the ingress/egress port.
This patch introduces data structures and functions necessary to add
remove or update switch rules on the switch element. This is a pretty low
level function that is generic enough to add a whole range of filters.
This patch also introduces two top level functions ice_add_mac and
ice_remove mac which through a series of intermediate helper functions
eventually call ice_aq_sw_rules to add/delete simple MAC based filters.
It's worth noting that one invocation of ice_add_mac/ice_remove_mac
is capable of adding/deleting multiple MAC filters.
Also worth noting is the fact that the driver maintains a list of currently
active filters, so every filter addition/removal causes an update to this
list. This is done for a couple of reasons:
1) If two VSIs try to add the same filters, we need to detect it and do
things a little differently (i.e. use VSI lists, described below) as
the same filter can't be added more than once.
2) In the event of a hardware reset we can simply walk through this list
and restore the filters.
VSI Lists:
In a multi-VSI situation, it's possible that multiple VSIs want to add the
same filter rule. For example, two VSIs that want to receive broadcast
traffic would both add a filter for destination MAC ff:ff:ff:ff:ff:ff.
This can become cumbersome to maintain and so this is handled using a
VSI list.
A VSI list is resource that can be allocated in the hardware using the
ice_aq_alloc_free_res admin queue command. Simply put, a VSI list can
be thought of as a subscription list containing a set of VSIs to which
the packet should be forwarded, should the filter match.
For example, if VSI-0 has already added a broadcast filter, and VSI-1
wants to do the same thing, the filter creation flow will detect this,
allocate a VSI list and update the switch rule so that broadcast traffic
will now be forwarded to the VSI list which contains VSI-0 and VSI-1.
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-03-20 14:58:12 +00:00
2019-02-27 00:35:23 +00:00
err = ice_init_link_events ( pf - > hw . port_info ) ;
if ( err ) {
dev_err ( dev , " ice_init_link_events failed: %d \n " , err ) ;
2022-12-21 11:38:18 +00:00
return err ;
2019-02-27 00:35:23 +00:00
}
2021-02-26 21:19:22 +00:00
/* not a fatal error if this fails */
2020-07-09 16:16:06 +00:00
err = ice_init_nvm_phy_type ( pf - > hw . port_info ) ;
2021-02-26 21:19:22 +00:00
if ( err )
2020-07-09 16:16:06 +00:00
dev_err ( dev , " ice_init_nvm_phy_type failed: %d \n " , err ) ;
2021-02-26 21:19:22 +00:00
/* not a fatal error if this fails */
2020-07-09 16:16:06 +00:00
err = ice_update_link_info ( pf - > hw . port_info ) ;
2021-02-26 21:19:22 +00:00
if ( err )
2020-07-09 16:16:06 +00:00
dev_err ( dev , " ice_update_link_info failed: %d \n " , err ) ;
2020-07-09 16:16:07 +00:00
ice_init_link_dflt_override ( pf - > hw . port_info ) ;
2021-10-13 16:02:19 +00:00
ice_check_link_cfg_err ( pf ,
pf - > hw . port_info - > phy . link_info . link_cfg_err ) ;
2021-05-06 15:40:01 +00:00
2020-07-09 16:16:06 +00:00
/* if media available, initialize PHY settings */
if ( pf - > hw . port_info - > phy . link_info . link_info &
ICE_AQ_MEDIA_AVAILABLE ) {
2021-02-26 21:19:22 +00:00
/* not a fatal error if this fails */
2020-07-09 16:16:06 +00:00
err = ice_init_phy_user_cfg ( pf - > hw . port_info ) ;
2021-02-26 21:19:22 +00:00
if ( err )
2020-07-09 16:16:06 +00:00
dev_err ( dev , " ice_init_phy_user_cfg failed: %d \n " , err ) ;
if ( ! test_bit ( ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA , pf - > flags ) ) {
struct ice_vsi * vsi = ice_get_main_vsi ( pf ) ;
if ( vsi )
ice_configure_phy ( vsi ) ;
}
} else {
set_bit ( ICE_FLAG_NO_MEDIA , pf - > flags ) ;
}
2022-12-21 11:38:18 +00:00
return err ;
}
ice: Fix tx_timeout in PF driver
Prior to this commit the driver was running into tx_timeouts when a
queue was stressed enough. This was happening because the HW tail
and SW tail (NTU) were incorrectly out of sync. Consequently this was
causing the HW head to collide with the HW tail, which to the hardware
means that all descriptors posted for Tx have been processed.
Due to the Tx logic used in the driver SW tail and HW tail are allowed
to be out of sync. This is done as an optimization because it allows the
driver to write HW tail as infrequently as possible, while still
updating the SW tail index to keep track. However, there are situations
where this results in the tail never getting updated, resulting in Tx
timeouts.
Tx HW tail write condition:
if (netif_xmit_stopped(txring_txq(tx_ring) || !skb->xmit_more)
writel(sw_tail, tx_ring->tail);
An issue was found in the Tx logic that was causing the afore mentioned
condition for updating HW tail to never happen, causing tx_timeouts.
In ice_xmit_frame_ring we calculate how many descriptors we need for the
Tx transaction based on the skb the kernel hands us. This is then passed
into ice_maybe_stop_tx along with some extra padding to determine if we
have enough descriptors available for this transaction. If we don't then
we return -EBUSY to the stack, otherwise we move on and eventually
prepare the Tx descriptors accordingly in ice_tx_map and set
next_to_watch. In ice_tx_map we make another call to ice_maybe_stop_tx
with a value of MAX_SKB_FRAGS + 4. The key here is that this value is
possibly less than the value we sent in the first call to
ice_maybe_stop_tx in ice_xmit_frame_ring. Now, if the number of unused
descriptors is between MAX_SKB_FRAGS + 4 and the value used in the first
call to ice_maybe_stop_tx in ice_xmit_frame_ring then we do not update
the HW tail because of the "Tx HW tail write condition" above. This is
because in ice_maybe_stop_tx we return success from ice_maybe_stop_tx
instead of calling __ice_maybe_stop_tx and subsequently calling
netif_stop_subqueue, which sets the __QUEUE_STATE_DEV_XOFF bit. This
bit is then checked in the "Tx HW tail write condition" by calling
netif_xmit_stopped and subsequently updating HW tail if the
afore mentioned bit is set.
In ice_clean_tx_irq, if next_to_watch is not NULL, we end up cleaning
the descriptors that HW sets the DD bit on and we have the budget. The
HW head will eventually run into the HW tail in response to the
description in the paragraph above.
The next time through ice_xmit_frame_ring we make the initial call to
ice_maybe_stop_tx with another skb from the stack. This time we do not
have enough descriptors available and we return NETDEV_TX_BUSY to the
stack and end up setting next_to_watch to NULL.
This is where we are stuck. In ice_clean_tx_irq we never clean anything
because next_to_watch is always NULL and in ice_xmit_frame_ring we never
update HW tail because we already return NETDEV_TX_BUSY to the stack and
eventually we hit a tx_timeout.
This issue was fixed by making sure that the second call to
ice_maybe_stop_tx in ice_tx_map is passed a value that is >= the value
that was used on the initial call to ice_maybe_stop_tx in
ice_xmit_frame_ring. This was done by adding the following defines to
make the logic more clear and to reduce the chance of mucking this up
again:
ICE_CACHE_LINE_BYTES 64
ICE_DESCS_PER_CACHE_LINE (ICE_CACHE_LINE_BYTES / \
sizeof(struct ice_tx_desc))
ICE_DESCS_FOR_CTX_DESC 1
ICE_DESCS_FOR_SKB_DATA_PTR 1
The ICE_CACHE_LINE_BYTES being 64 is an assumption being made so we
don't have to figure this out on every pass through the Tx path. Instead
I added a sanity check in ice_probe to verify cache line size and print
a message if it's not 64 Bytes. This will make it easier to file issues
if they are seen when the cache line size is not 64 Bytes when reading
from the GLPCI_CNF2 register.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-10-26 17:40:58 +00:00
2022-12-21 11:38:18 +00:00
static int ice_init_pf_sw ( struct ice_pf * pf )
{
bool dvm = ice_is_dvm_ena ( & pf - > hw ) ;
struct ice_vsi * vsi ;
int err ;
2020-07-09 16:16:03 +00:00
2022-12-21 11:38:18 +00:00
/* create switch struct for the switch element created by FW on boot */
pf - > first_sw = kzalloc ( sizeof ( * pf - > first_sw ) , GFP_KERNEL ) ;
if ( ! pf - > first_sw )
return - ENOMEM ;
2020-07-09 16:16:03 +00:00
2022-12-21 11:38:18 +00:00
if ( pf - > hw . evb_veb )
pf - > first_sw - > bridge_mode = BRIDGE_MODE_VEB ;
else
pf - > first_sw - > bridge_mode = BRIDGE_MODE_VEPA ;
2020-07-09 16:16:03 +00:00
2022-12-21 11:38:18 +00:00
pf - > first_sw - > pf = pf ;
2020-07-09 16:16:03 +00:00
2022-12-21 11:38:18 +00:00
/* record the sw_id available for later use */
pf - > first_sw - > sw_id = pf - > hw . port_info - > sw_id ;
err = ice_aq_set_port_params ( pf - > hw . port_info , dvm , NULL ) ;
if ( err )
goto err_aq_set_port_params ;
vsi = ice_pf_vsi_setup ( pf , pf - > hw . port_info ) ;
if ( ! vsi ) {
err = - ENOMEM ;
goto err_pf_vsi_setup ;
2020-07-13 20:53:14 +00:00
}
2019-09-09 13:47:46 +00:00
2022-12-21 11:38:18 +00:00
return 0 ;
2019-09-09 13:47:46 +00:00
2022-12-21 11:38:18 +00:00
err_pf_vsi_setup :
err_aq_set_port_params :
kfree ( pf - > first_sw ) ;
return err ;
}
2022-03-01 18:38:03 +00:00
2022-12-21 11:38:18 +00:00
static void ice_deinit_pf_sw ( struct ice_pf * pf )
{
struct ice_vsi * vsi = ice_get_main_vsi ( pf ) ;
2020-05-12 01:01:40 +00:00
2022-12-21 11:38:18 +00:00
if ( ! vsi )
return ;
ice_vsi_release ( vsi ) ;
kfree ( pf - > first_sw ) ;
}
static int ice_alloc_vsis ( struct ice_pf * pf )
{
struct device * dev = ice_pf_to_dev ( pf ) ;
pf - > num_alloc_vsi = pf - > hw . func_caps . guar_num_vsi ;
if ( ! pf - > num_alloc_vsi )
return - EIO ;
if ( pf - > num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES ) {
dev_warn ( dev ,
" limiting the VSI count due to UDP tunnel limitation %d > %d \n " ,
pf - > num_alloc_vsi , UDP_TUNNEL_NIC_MAX_SHARING_DEVICES ) ;
pf - > num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES ;
2019-09-09 13:47:46 +00:00
}
2022-12-21 11:38:18 +00:00
pf - > vsi = devm_kcalloc ( dev , pf - > num_alloc_vsi , sizeof ( * pf - > vsi ) ,
GFP_KERNEL ) ;
if ( ! pf - > vsi )
return - ENOMEM ;
2020-11-21 00:39:26 +00:00
2022-12-21 11:38:18 +00:00
pf - > vsi_stats = devm_kcalloc ( dev , pf - > num_alloc_vsi ,
sizeof ( * pf - > vsi_stats ) , GFP_KERNEL ) ;
if ( ! pf - > vsi_stats ) {
devm_kfree ( dev , pf - > vsi ) ;
return - ENOMEM ;
}
2019-10-09 14:09:50 +00:00
2022-12-21 11:38:18 +00:00
return 0 ;
}
static void ice_dealloc_vsis ( struct ice_pf * pf )
{
devm_kfree ( ice_pf_to_dev ( pf ) , pf - > vsi_stats ) ;
pf - > vsi_stats = NULL ;
pf - > num_alloc_vsi = 0 ;
devm_kfree ( ice_pf_to_dev ( pf ) , pf - > vsi ) ;
pf - > vsi = NULL ;
}
static int ice_init_devlink ( struct ice_pf * pf )
{
int err ;
err = ice_devlink_register_params ( pf ) ;
2023-01-24 00:57:14 +00:00
if ( err )
2022-12-21 11:38:18 +00:00
return err ;
2023-01-24 00:57:14 +00:00
2022-12-21 11:38:18 +00:00
ice_devlink_init_regions ( pf ) ;
ice_devlink_register ( pf ) ;
2023-01-24 00:57:14 +00:00
2022-12-21 11:38:18 +00:00
return 0 ;
}
static void ice_deinit_devlink ( struct ice_pf * pf )
{
ice_devlink_unregister ( pf ) ;
ice_devlink_destroy_regions ( pf ) ;
ice_devlink_unregister_params ( pf ) ;
}
2023-01-24 00:57:14 +00:00
2022-12-21 11:38:18 +00:00
static int ice_init ( struct ice_pf * pf )
{
int err ;
err = ice_init_dev ( pf ) ;
2021-03-02 18:12:03 +00:00
if ( err )
2022-12-21 11:38:18 +00:00
return err ;
2021-03-02 18:12:03 +00:00
2022-12-21 11:38:18 +00:00
err = ice_alloc_vsis ( pf ) ;
if ( err )
goto err_alloc_vsis ;
err = ice_init_pf_sw ( pf ) ;
if ( err )
goto err_init_pf_sw ;
ice_init_wakeup ( pf ) ;
err = ice_init_link ( pf ) ;
2021-10-18 23:16:02 +00:00
if ( err )
2022-12-21 11:38:18 +00:00
goto err_init_link ;
err = ice_send_version ( pf ) ;
if ( err )
goto err_init_link ;
ice_verify_cacheline_size ( pf ) ;
if ( ice_is_safe_mode ( pf ) )
ice_set_safe_mode_vlan_cfg ( pf ) ;
else
/* print PCI link speed and width */
pcie_print_link_status ( pf - > pdev ) ;
2021-10-18 23:16:02 +00:00
2020-05-08 00:41:03 +00:00
/* ready to go, so clear down state bit */
2021-03-02 18:15:38 +00:00
clear_bit ( ICE_DOWN , pf - > state ) ;
2022-12-21 11:38:18 +00:00
clear_bit ( ICE_SERVICE_DIS , pf - > state ) ;
/* since everything is good, start the service timer */
mod_timer ( & pf - > serv_tmr , round_jiffies ( jiffies + pf - > serv_tmr_period ) ) ;
return 0 ;
err_init_link :
ice_deinit_pf_sw ( pf ) ;
err_init_pf_sw :
ice_dealloc_vsis ( pf ) ;
err_alloc_vsis :
ice_deinit_dev ( pf ) ;
return err ;
}
static void ice_deinit ( struct ice_pf * pf )
{
set_bit ( ICE_SERVICE_DIS , pf - > state ) ;
set_bit ( ICE_DOWN , pf - > state ) ;
ice_deinit_pf_sw ( pf ) ;
ice_dealloc_vsis ( pf ) ;
ice_deinit_dev ( pf ) ;
}
/**
* ice_load - load pf by init hw and starting VSI
* @ pf : pointer to the pf instance
2024-02-05 13:03:56 +00:00
*
* This function has to be called under devl_lock .
2022-12-21 11:38:18 +00:00
*/
int ice_load ( struct ice_pf * pf )
{
struct ice_vsi * vsi ;
int err ;
2024-02-05 13:03:56 +00:00
devl_assert_locked ( priv_to_devlink ( pf ) ) ;
vsi = ice_get_main_vsi ( pf ) ;
/* init channel list */
INIT_LIST_HEAD ( & vsi - > ch_list ) ;
err = ice_cfg_netdev ( vsi ) ;
2022-12-21 11:38:18 +00:00
if ( err )
return err ;
2024-02-05 13:03:56 +00:00
/* Setup DCB netlink interface */
ice_dcbnl_setup ( vsi ) ;
ice: refactor VSI setup to use parameter structure
The ice_vsi_setup function, ice_vsi_alloc, and ice_vsi_cfg functions have
grown a large number of parameters. These parameters are used to initialize
a new VSI, as well as re-configure an existing VSI
Any time we want to add a new parameter to this function chain, even if it
will usually be unset, we have to change many call sites due to changing
the function signature.
A future change is going to refactor ice_vsi_alloc and ice_vsi_cfg to move
the VSI configuration and initialization all into ice_vsi_cfg.
Before this, refactor the VSI setup flow to use a new ice_vsi_cfg_params
structure. This will contain the configuration (mainly pointers) used to
initialize a VSI.
Pass this from ice_vsi_setup into the related functions such as
ice_vsi_alloc, ice_vsi_cfg, and ice_vsi_cfg_def.
Introduce a helper, ice_vsi_to_params to convert an existing VSI to the
parameters used to initialize it. This will aid in the flows where we
rebuild an existing VSI.
Since we also pass the ICE_VSI_FLAG_INIT to more functions which do not
need (or cannot yet have) the VSI parameters, lets make this clear by
renaming the function parameter to vsi_flags and using a u32 instead of a
signed integer. The name vsi_flags also makes it clear that we may extend
the flags in the future.
This change will make it easier to refactor the setup flow in the future,
and will reduce the complexity required to add a new parameter for
configuration in the future.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2023-01-19 01:16:43 +00:00
2024-02-05 13:03:56 +00:00
err = ice_init_mac_fltr ( pf ) ;
if ( err )
goto err_init_mac_fltr ;
ice: refactor VSI setup to use parameter structure
The ice_vsi_setup function, ice_vsi_alloc, and ice_vsi_cfg functions have
grown a large number of parameters. These parameters are used to initialize
a new VSI, as well as re-configure an existing VSI
Any time we want to add a new parameter to this function chain, even if it
will usually be unset, we have to change many call sites due to changing
the function signature.
A future change is going to refactor ice_vsi_alloc and ice_vsi_cfg to move
the VSI configuration and initialization all into ice_vsi_cfg.
Before this, refactor the VSI setup flow to use a new ice_vsi_cfg_params
structure. This will contain the configuration (mainly pointers) used to
initialize a VSI.
Pass this from ice_vsi_setup into the related functions such as
ice_vsi_alloc, ice_vsi_cfg, and ice_vsi_cfg_def.
Introduce a helper, ice_vsi_to_params to convert an existing VSI to the
parameters used to initialize it. This will aid in the flows where we
rebuild an existing VSI.
Since we also pass the ICE_VSI_FLAG_INIT to more functions which do not
need (or cannot yet have) the VSI parameters, lets make this clear by
renaming the function parameter to vsi_flags and using a u32 instead of a
signed integer. The name vsi_flags also makes it clear that we may extend
the flags in the future.
This change will make it easier to refactor the setup flow in the future,
and will reduce the complexity required to add a new parameter for
configuration in the future.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2023-01-19 01:16:43 +00:00
2024-02-05 13:03:56 +00:00
err = ice_devlink_create_pf_port ( pf ) ;
2022-12-21 11:38:18 +00:00
if ( err )
2024-02-05 13:03:56 +00:00
goto err_devlink_create_pf_port ;
2022-12-21 11:38:18 +00:00
2024-02-05 13:03:56 +00:00
SET_NETDEV_DEVLINK_PORT ( vsi - > netdev , & pf - > devlink_port ) ;
err = ice_register_netdev ( vsi ) ;
if ( err )
goto err_register_netdev ;
err = ice_tc_indir_block_register ( vsi ) ;
2022-12-21 11:38:18 +00:00
if ( err )
2024-02-05 13:03:56 +00:00
goto err_tc_indir_block_register ;
ice_napi_add ( vsi ) ;
2022-12-21 11:38:18 +00:00
2022-12-21 11:38:13 +00:00
err = ice_init_rdma ( pf ) ;
2022-12-21 11:38:18 +00:00
if ( err )
goto err_init_rdma ;
ice_init_features ( pf ) ;
ice_service_task_restart ( pf ) ;
clear_bit ( ICE_DOWN , pf - > state ) ;
return 0 ;
err_init_rdma :
2024-02-05 13:03:56 +00:00
ice_tc_indir_block_unregister ( vsi ) ;
err_tc_indir_block_register :
ice_unregister_netdev ( vsi ) ;
err_register_netdev :
ice_devlink_destroy_pf_port ( pf ) ;
err_devlink_create_pf_port :
err_init_mac_fltr :
ice_decfg_netdev ( vsi ) ;
2022-12-21 11:38:18 +00:00
return err ;
}
/**
* ice_unload - unload pf by stopping VSI and deinit hw
* @ pf : pointer to the pf instance
2024-02-05 13:03:56 +00:00
*
* This function has to be called under devl_lock .
2022-12-21 11:38:18 +00:00
*/
void ice_unload ( struct ice_pf * pf )
{
2024-02-05 13:03:56 +00:00
struct ice_vsi * vsi = ice_get_main_vsi ( pf ) ;
devl_assert_locked ( priv_to_devlink ( pf ) ) ;
2022-12-21 11:38:18 +00:00
ice_deinit_features ( pf ) ;
ice_deinit_rdma ( pf ) ;
2024-02-05 13:03:56 +00:00
ice_tc_indir_block_unregister ( vsi ) ;
ice_unregister_netdev ( vsi ) ;
ice_devlink_destroy_pf_port ( pf ) ;
ice_decfg_netdev ( vsi ) ;
2022-12-21 11:38:18 +00:00
}
/**
* ice_probe - Device initialization routine
* @ pdev : PCI device information struct
* @ ent : entry in ice_pci_tbl
*
* Returns 0 on success , negative on failure
*/
static int
ice_probe ( struct pci_dev * pdev , const struct pci_device_id __always_unused * ent )
{
struct device * dev = & pdev - > dev ;
struct ice_pf * pf ;
struct ice_hw * hw ;
int err ;
if ( pdev - > is_virtfn ) {
dev_err ( dev , " can't probe a virtual function \n " ) ;
return - EINVAL ;
}
2023-10-11 23:33:33 +00:00
/* when under a kdump kernel initiate a reset before enabling the
* device in order to clear out any pending DMA transactions . These
* transactions can cause some systems to machine check when doing
* the pcim_enable_device ( ) below .
*/
if ( is_kdump_kernel ( ) ) {
pci_save_state ( pdev ) ;
pci_clear_master ( pdev ) ;
err = pcie_flr ( pdev ) ;
if ( err )
return err ;
pci_restore_state ( pdev ) ;
}
2022-12-21 11:38:18 +00:00
/* this driver uses devres, see
* Documentation / driver - api / driver - model / devres . rst
*/
err = pcim_enable_device ( pdev ) ;
if ( err )
return err ;
err = pcim_iomap_regions ( pdev , BIT ( ICE_BAR0 ) , dev_driver_string ( dev ) ) ;
2022-12-21 11:38:13 +00:00
if ( err ) {
2022-12-21 11:38:18 +00:00
dev_err ( dev , " BAR0 I/O map error %d \n " , err ) ;
return err ;
2021-05-20 14:37:49 +00:00
}
2022-12-21 11:38:18 +00:00
pf = ice_allocate_pf ( dev ) ;
if ( ! pf )
return - ENOMEM ;
2018-03-20 14:58:07 +00:00
2022-12-21 11:38:18 +00:00
/* initialize Auxiliary index to invalid value */
pf - > aux_idx = - 1 ;
/* set up for high or low DMA */
err = dma_set_mask_and_coherent ( dev , DMA_BIT_MASK ( 64 ) ) ;
if ( err ) {
dev_err ( dev , " DMA configuration failed: 0x%x \n " , err ) ;
return err ;
}
pci_set_master ( pdev ) ;
pf - > pdev = pdev ;
pci_set_drvdata ( pdev , pf ) ;
2021-03-02 18:15:38 +00:00
set_bit ( ICE_DOWN , pf - > state ) ;
2022-12-21 11:38:18 +00:00
/* Disable service task until DOWN bit is cleared */
set_bit ( ICE_SERVICE_DIS , pf - > state ) ;
hw = & pf - > hw ;
hw - > hw_addr = pcim_iomap_table ( pdev ) [ ICE_BAR0 ] ;
pci_save_state ( pdev ) ;
hw - > back = pf ;
hw - > port_info = NULL ;
hw - > vendor_id = pdev - > vendor ;
hw - > device_id = pdev - > device ;
pci_read_config_byte ( pdev , PCI_REVISION_ID , & hw - > revision_id ) ;
hw - > subsystem_vendor_id = pdev - > subsystem_vendor ;
hw - > subsystem_device_id = pdev - > subsystem_device ;
hw - > bus . device = PCI_SLOT ( pdev - > devfn ) ;
hw - > bus . func = PCI_FUNC ( pdev - > devfn ) ;
ice_set_ctrlq_len ( hw ) ;
pf - > msg_enable = netif_msg_init ( debug , ICE_DFLT_NETIF_M ) ;
# ifndef CONFIG_DYNAMIC_DEBUG
if ( debug < - 1 )
hw - > debug_mask = debug ;
# endif
err = ice_init ( pf ) ;
if ( err )
goto err_init ;
2024-02-05 13:03:56 +00:00
devl_lock ( priv_to_devlink ( pf ) ) ;
err = ice_load ( pf ) ;
devl_unlock ( priv_to_devlink ( pf ) ) ;
2022-12-21 11:38:18 +00:00
if ( err )
2024-02-05 13:03:56 +00:00
goto err_load ;
2022-12-21 11:38:18 +00:00
err = ice_init_devlink ( pf ) ;
if ( err )
goto err_init_devlink ;
return 0 ;
err_init_devlink :
2024-02-05 13:03:56 +00:00
devl_lock ( priv_to_devlink ( pf ) ) ;
ice_unload ( pf ) ;
devl_unlock ( priv_to_devlink ( pf ) ) ;
err_load :
2022-12-21 11:38:18 +00:00
ice_deinit ( pf ) ;
err_init :
2020-07-09 16:16:03 +00:00
pci_disable_device ( pdev ) ;
2018-03-20 14:58:07 +00:00
return err ;
2018-03-20 14:58:05 +00:00
}
2020-07-09 16:16:03 +00:00
/**
* ice_set_wake - enable or disable Wake on LAN
* @ pf : pointer to the PF struct
*
* Simple helper for WoL control
*/
static void ice_set_wake ( struct ice_pf * pf )
{
struct ice_hw * hw = & pf - > hw ;
bool wol = pf - > wol_ena ;
/* clear wake state, otherwise new wake events won't fire */
wr32 ( hw , PFPM_WUS , U32_MAX ) ;
/* enable / disable APM wake up, no RMW needed */
wr32 ( hw , PFPM_APM , wol ? PFPM_APM_APME_M : 0 ) ;
/* set magic packet filter enabled */
wr32 ( hw , PFPM_WUFC , wol ? PFPM_WUFC_MAG_M : 0 ) ;
}
/**
2021-03-02 18:15:45 +00:00
* ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
2020-07-09 16:16:03 +00:00
* @ pf : pointer to the PF struct
*
* Issue firmware command to enable multicast magic wake , making
* sure that any locally administered address ( LAA ) is used for
* wake , and that PF reset doesn ' t undo the LAA .
*/
static void ice_setup_mc_magic_wake ( struct ice_pf * pf )
{
struct device * dev = ice_pf_to_dev ( pf ) ;
struct ice_hw * hw = & pf - > hw ;
u8 mac_addr [ ETH_ALEN ] ;
struct ice_vsi * vsi ;
2021-10-07 22:59:03 +00:00
int status ;
2020-07-09 16:16:03 +00:00
u8 flags ;
if ( ! pf - > wol_ena )
return ;
vsi = ice_get_main_vsi ( pf ) ;
if ( ! vsi )
return ;
/* Get current MAC address in case it's an LAA */
if ( vsi - > netdev )
ether_addr_copy ( mac_addr , vsi - > netdev - > dev_addr ) ;
else
ether_addr_copy ( mac_addr , vsi - > port_info - > mac . perm_addr ) ;
flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP ;
status = ice_aq_manage_mac_write ( hw , mac_addr , flags , NULL ) ;
if ( status )
2021-10-07 22:56:02 +00:00
dev_err ( dev , " Failed to enable Multicast Magic Packet wake, err %d aq_err %s \n " ,
2021-10-07 22:59:03 +00:00
status , ice_aq_str ( hw - > adminq . sq_last_status ) ) ;
2020-07-09 16:16:03 +00:00
}
2018-03-20 14:58:05 +00:00
/**
* ice_remove - Device removal routine
* @ pdev : PCI device information struct
*/
static void ice_remove ( struct pci_dev * pdev )
{
struct ice_pf * pf = pci_get_drvdata ( pdev ) ;
2018-09-20 00:43:07 +00:00
int i ;
2018-03-20 14:58:05 +00:00
2018-10-26 17:40:51 +00:00
for ( i = 0 ; i < ICE_MAX_RESET_WAIT ; i + + ) {
if ( ! ice_is_reset_in_progress ( pf - > state ) )
break ;
msleep ( 100 ) ;
}
2020-02-27 18:14:55 +00:00
if ( test_bit ( ICE_FLAG_SRIOV_ENA , pf - > flags ) ) {
2021-03-02 18:15:38 +00:00
set_bit ( ICE_VF_RESETS_DISABLED , pf - > state ) ;
2020-02-27 18:14:55 +00:00
ice_free_vfs ( pf ) ;
}
2023-12-01 18:08:39 +00:00
ice_hwmon_exit ( pf ) ;
2018-08-09 13:29:57 +00:00
ice_service_task_stop ( pf ) ;
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 00:22:03 +00:00
ice_aq_cancel_waiting_tasks ( pf ) ;
2021-05-20 14:37:51 +00:00
set_bit ( ICE_DOWN , pf - > state ) ;
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-24 00:22:03 +00:00
2020-05-12 01:01:46 +00:00
if ( ! ice_is_safe_mode ( pf ) )
ice_remove_arfs ( pf ) ;
2024-02-05 13:03:56 +00:00
2022-12-21 11:38:18 +00:00
ice_deinit_devlink ( pf ) ;
2024-02-05 13:03:56 +00:00
devl_lock ( priv_to_devlink ( pf ) ) ;
ice_unload ( pf ) ;
devl_unlock ( priv_to_devlink ( pf ) ) ;
ice_deinit ( pf ) ;
2018-08-09 13:29:50 +00:00
ice_vsi_release_all ( pf ) ;
2022-12-21 11:38:18 +00:00
ice_setup_mc_magic_wake ( pf ) ;
2020-07-09 16:16:03 +00:00
ice_set_wake ( pf ) ;
2020-03-12 01:58:15 +00:00
2020-07-09 16:16:03 +00:00
pci_disable_device ( pdev ) ;
}
/**
* ice_shutdown - PCI callback for shutting down device
* @ pdev : PCI device information struct
*/
static void ice_shutdown ( struct pci_dev * pdev )
{
struct ice_pf * pf = pci_get_drvdata ( pdev ) ;
ice_remove ( pdev ) ;
if ( system_state = = SYSTEM_POWER_OFF ) {
pci_wake_from_d3 ( pdev , pf - > wol_ena ) ;
pci_set_power_state ( pdev , PCI_D3hot ) ;
}
2018-03-20 14:58:05 +00:00
}
2020-07-09 16:16:03 +00:00
# ifdef CONFIG_PM
/**
* ice_prepare_for_shutdown - prep for PCI shutdown
* @ pf : board private structure
*
* Inform or close all dependent features in prep for PCI device shutdown
*/
static void ice_prepare_for_shutdown ( struct ice_pf * pf )
{
struct ice_hw * hw = & pf - > hw ;
u32 v ;
/* Notify VFs of impending reset */
if ( ice_check_sq_alive ( hw , & hw - > mailboxq ) )
ice_vc_notify_reset ( pf ) ;
dev_dbg ( ice_pf_to_dev ( pf ) , " Tearing down internal switch for shutdown \n " ) ;
/* disable the VSIs and their queues that are not already DOWN */
ice_pf_dis_all_vsi ( pf , false ) ;
ice_for_each_vsi ( pf , v )
if ( pf - > vsi [ v ] )
pf - > vsi [ v ] - > vsi_num = 0 ;
ice_shutdown_all_ctrlq ( hw ) ;
}
/**
* ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
* @ pf : board private structure to reinitialize
*
* This routine reinitialize interrupt scheme that was cleared during
* power management suspend callback .
*
* This should be called during resume routine to re - allocate the q_vectors
* and reacquire interrupts .
*/
static int ice_reinit_interrupt_scheme ( struct ice_pf * pf )
{
struct device * dev = ice_pf_to_dev ( pf ) ;
int ret , v ;
/* Since we clear MSIX flag during suspend, we need to
* set it back during resume . . .
*/
ret = ice_init_interrupt_scheme ( pf ) ;
if ( ret ) {
dev_err ( dev , " Failed to re-initialize interrupt %d \n " , ret ) ;
return ret ;
}
/* Remap vectors and rings, after successful re-init interrupts */
ice_for_each_vsi ( pf , v ) {
if ( ! pf - > vsi [ v ] )
continue ;
ret = ice_vsi_alloc_q_vectors ( pf - > vsi [ v ] ) ;
if ( ret )
goto err_reinit ;
ice_vsi_map_rings_to_vectors ( pf - > vsi [ v ] ) ;
2024-02-13 19:48:50 +00:00
ice_vsi_set_napi_queues ( pf - > vsi [ v ] ) ;
2020-07-09 16:16:03 +00:00
}
ret = ice_req_irq_msix_misc ( pf ) ;
if ( ret ) {
dev_err ( dev , " Setting up misc vector failed after device suspend %d \n " ,
ret ) ;
goto err_reinit ;
}
return 0 ;
err_reinit :
while ( v - - )
if ( pf - > vsi [ v ] )
ice_vsi_free_q_vectors ( pf - > vsi [ v ] ) ;
return ret ;
}
/**
* ice_suspend
* @ dev : generic device information structure
*
* Power Management callback to quiesce the device and prepare
* for D3 transition .
*/
2020-07-28 01:41:53 +00:00
static int __maybe_unused ice_suspend ( struct device * dev )
2020-07-09 16:16:03 +00:00
{
struct pci_dev * pdev = to_pci_dev ( dev ) ;
struct ice_pf * pf ;
int disabled , v ;
pf = pci_get_drvdata ( pdev ) ;
if ( ! ice_pf_state_is_nominal ( pf ) ) {
dev_err ( dev , " Device is not ready, no need to suspend it \n " ) ;
return - EBUSY ;
}
/* Stop watchdog tasks until resume completion.
* Even though it is most likely that the service task is
* disabled if the device is suspended or down , the service task ' s
* state is controlled by a different state bit , and we should
* store and honor whatever state that bit is in at this point .
*/
disabled = ice_service_task_stop ( pf ) ;
2021-05-20 14:37:51 +00:00
ice_unplug_aux_dev ( pf ) ;
2020-07-09 16:16:03 +00:00
/* Already suspended?, then there is nothing to do */
2021-03-02 18:15:38 +00:00
if ( test_and_set_bit ( ICE_SUSPENDED , pf - > state ) ) {
2020-07-09 16:16:03 +00:00
if ( ! disabled )
ice_service_task_restart ( pf ) ;
return 0 ;
}
2021-03-02 18:15:38 +00:00
if ( test_bit ( ICE_DOWN , pf - > state ) | |
2020-07-09 16:16:03 +00:00
ice_is_reset_in_progress ( pf - > state ) ) {
dev_err ( dev , " can't suspend device in reset or already down \n " ) ;
if ( ! disabled )
ice_service_task_restart ( pf ) ;
return 0 ;
}
ice_setup_mc_magic_wake ( pf ) ;
ice_prepare_for_shutdown ( pf ) ;
ice_set_wake ( pf ) ;
/* Free vectors, clear the interrupt scheme and release IRQs
* for proper hibernation , especially with large number of CPUs .
* Otherwise hibernation might fail when mapping all the vectors back
* to CPU0 .
*/
ice_free_irq_msix_misc ( pf ) ;
ice_for_each_vsi ( pf , v ) {
if ( ! pf - > vsi [ v ] )
continue ;
ice_vsi_free_q_vectors ( pf - > vsi [ v ] ) ;
}
ice_clear_interrupt_scheme ( pf ) ;
2020-09-02 15:53:45 +00:00
pci_save_state ( pdev ) ;
2020-07-09 16:16:03 +00:00
pci_wake_from_d3 ( pdev , pf - > wol_ena ) ;
pci_set_power_state ( pdev , PCI_D3hot ) ;
return 0 ;
}
/**
* ice_resume - PM callback for waking up from D3
* @ dev : generic device information structure
*/
2020-07-28 01:41:53 +00:00
static int __maybe_unused ice_resume ( struct device * dev )
2020-07-09 16:16:03 +00:00
{
struct pci_dev * pdev = to_pci_dev ( dev ) ;
enum ice_reset_req reset_type ;
struct ice_pf * pf ;
struct ice_hw * hw ;
int ret ;
pci_set_power_state ( pdev , PCI_D0 ) ;
pci_restore_state ( pdev ) ;
pci_save_state ( pdev ) ;
if ( ! pci_device_is_present ( pdev ) )
return - ENODEV ;
ret = pci_enable_device_mem ( pdev ) ;
if ( ret ) {
dev_err ( dev , " Cannot enable device after suspend \n " ) ;
return ret ;
}
pf = pci_get_drvdata ( pdev ) ;
hw = & pf - > hw ;
pf - > wakeup_reason = rd32 ( hw , PFPM_WUS ) ;
ice_print_wake_reason ( pf ) ;
/* We cleared the interrupt scheme when we suspended, so we need to
* restore it now to resume device functionality .
*/
ret = ice_reinit_interrupt_scheme ( pf ) ;
if ( ret )
dev_err ( dev , " Cannot restore interrupt scheme: %d \n " , ret ) ;
2021-03-02 18:15:38 +00:00
clear_bit ( ICE_DOWN , pf - > state ) ;
2020-07-09 16:16:03 +00:00
/* Now perform PF reset and rebuild */
reset_type = ICE_RESET_PFR ;
/* re-enable service task for reset, but allow reset to schedule it */
2021-03-02 18:15:38 +00:00
clear_bit ( ICE_SERVICE_DIS , pf - > state ) ;
2020-07-09 16:16:03 +00:00
if ( ice_schedule_reset ( pf , reset_type ) )
dev_err ( dev , " Reset during resume failed. \n " ) ;
2021-03-02 18:15:38 +00:00
clear_bit ( ICE_SUSPENDED , pf - > state ) ;
2020-07-09 16:16:03 +00:00
ice_service_task_restart ( pf ) ;
/* Restart the service task */
mod_timer ( & pf - > serv_tmr , round_jiffies ( jiffies + pf - > serv_tmr_period ) ) ;
return 0 ;
}
# endif /* CONFIG_PM */
2019-02-13 18:51:15 +00:00
/**
* ice_pci_err_detected - warning that PCI error has been detected
* @ pdev : PCI device information struct
* @ err : the type of PCI error
*
* Called to warn that something happened on the PCI bus and the error handling
* is in progress . Allows the driver to gracefully prepare / handle PCI errors .
*/
static pci_ers_result_t
2020-07-02 16:26:49 +00:00
ice_pci_err_detected ( struct pci_dev * pdev , pci_channel_state_t err )
2019-02-13 18:51:15 +00:00
{
struct ice_pf * pf = pci_get_drvdata ( pdev ) ;
if ( ! pf ) {
dev_err ( & pdev - > dev , " %s: unrecoverable device error %d \n " ,
__func__ , err ) ;
return PCI_ERS_RESULT_DISCONNECT ;
}
2021-03-02 18:15:38 +00:00
if ( ! test_bit ( ICE_SUSPENDED , pf - > state ) ) {
2019-02-13 18:51:15 +00:00
ice_service_task_stop ( pf ) ;
2021-03-02 18:15:38 +00:00
if ( ! test_bit ( ICE_PREPARED_FOR_RESET , pf - > state ) ) {
set_bit ( ICE_PFR_REQ , pf - > state ) ;
2021-10-15 23:35:16 +00:00
ice_prepare_for_reset ( pf , ICE_RESET_PFR ) ;
2019-02-13 18:51:15 +00:00
}
}
return PCI_ERS_RESULT_NEED_RESET ;
}
/**
* ice_pci_err_slot_reset - a PCI slot reset has just happened
* @ pdev : PCI device information struct
*
* Called to determine if the driver can recover from the PCI slot reset by
* using a register read to determine if the device is recoverable .
*/
static pci_ers_result_t ice_pci_err_slot_reset ( struct pci_dev * pdev )
{
struct ice_pf * pf = pci_get_drvdata ( pdev ) ;
pci_ers_result_t result ;
int err ;
u32 reg ;
err = pci_enable_device_mem ( pdev ) ;
if ( err ) {
2020-02-06 09:20:10 +00:00
dev_err ( & pdev - > dev , " Cannot re-enable PCI device after reset, error %d \n " ,
2019-02-13 18:51:15 +00:00
err ) ;
result = PCI_ERS_RESULT_DISCONNECT ;
} else {
pci_set_master ( pdev ) ;
pci_restore_state ( pdev ) ;
pci_save_state ( pdev ) ;
pci_wake_from_d3 ( pdev , false ) ;
/* Check for life */
reg = rd32 ( & pf - > hw , GLGEN_RTRIG ) ;
if ( ! reg )
result = PCI_ERS_RESULT_RECOVERED ;
else
result = PCI_ERS_RESULT_DISCONNECT ;
}
return result ;
}
/**
* ice_pci_err_resume - restart operations after PCI error recovery
* @ pdev : PCI device information struct
*
* Called to allow the driver to bring things back up after PCI error and / or
* reset recovery have finished
*/
static void ice_pci_err_resume ( struct pci_dev * pdev )
{
struct ice_pf * pf = pci_get_drvdata ( pdev ) ;
if ( ! pf ) {
2020-02-06 09:20:10 +00:00
dev_err ( & pdev - > dev , " %s failed, device is unrecoverable \n " ,
__func__ ) ;
2019-02-13 18:51:15 +00:00
return ;
}
2021-03-02 18:15:38 +00:00
if ( test_bit ( ICE_SUSPENDED , pf - > state ) ) {
2019-02-13 18:51:15 +00:00
dev_dbg ( & pdev - > dev , " %s failed to resume normal operations! \n " ,
__func__ ) ;
return ;
}
2023-10-19 17:32:19 +00:00
ice_restore_all_vfs_msi_state ( pf ) ;
2020-07-13 20:53:07 +00:00
2019-02-13 18:51:15 +00:00
ice_do_reset ( pf , ICE_RESET_PFR ) ;
ice_service_task_restart ( pf ) ;
mod_timer ( & pf - > serv_tmr , round_jiffies ( jiffies + pf - > serv_tmr_period ) ) ;
}
/**
* ice_pci_err_reset_prepare - prepare device driver for PCI reset
* @ pdev : PCI device information struct
*/
static void ice_pci_err_reset_prepare ( struct pci_dev * pdev )
{
struct ice_pf * pf = pci_get_drvdata ( pdev ) ;
2021-03-02 18:15:38 +00:00
if ( ! test_bit ( ICE_SUSPENDED , pf - > state ) ) {
2019-02-13 18:51:15 +00:00
ice_service_task_stop ( pf ) ;
2021-03-02 18:15:38 +00:00
if ( ! test_bit ( ICE_PREPARED_FOR_RESET , pf - > state ) ) {
set_bit ( ICE_PFR_REQ , pf - > state ) ;
2021-10-15 23:35:16 +00:00
ice_prepare_for_reset ( pf , ICE_RESET_PFR ) ;
2019-02-13 18:51:15 +00:00
}
}
}
/**
* ice_pci_err_reset_done - PCI reset done , device driver reset can begin
* @ pdev : PCI device information struct
*/
static void ice_pci_err_reset_done ( struct pci_dev * pdev )
{
ice_pci_err_resume ( pdev ) ;
}
2018-03-20 14:58:05 +00:00
/* ice_pci_tbl - PCI Device ID Table
*
* Wildcard entries ( PCI_ANY_ID ) should come last
* Last entry must be all 0 s
*
* { Vendor ID , Device ID , SubVendor ID , SubDevice ID ,
* Class , Class Mask , private data ( not used ) }
*/
static const struct pci_device_id ice_pci_tbl [ ] = {
2023-10-25 21:41:56 +00:00
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E810C_BACKPLANE ) } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E810C_QSFP ) } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E810C_SFP ) } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E810_XXV_BACKPLANE ) } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E810_XXV_QSFP ) } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E810_XXV_SFP ) } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E823C_BACKPLANE ) } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E823C_QSFP ) } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E823C_SFP ) } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E823C_10G_BASE_T ) } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E823C_SGMII ) } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E822C_BACKPLANE ) } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E822C_QSFP ) } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E822C_SFP ) } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E822C_10G_BASE_T ) } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E822C_SGMII ) } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E822L_BACKPLANE ) } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E822L_SFP ) } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E822L_10G_BASE_T ) } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E822L_SGMII ) } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E823L_BACKPLANE ) } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E823L_SFP ) } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E823L_10G_BASE_T ) } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E823L_1GBE ) } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E823L_QSFP ) } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E822_SI_DFLT ) } ,
2023-12-06 19:29:17 +00:00
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E825C_BACKPLANE ) , } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E825C_QSFP ) , } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E825C_SFP ) , } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E825C_SGMII ) , } ,
2023-10-25 21:41:57 +00:00
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E830_BACKPLANE ) } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E830_QSFP56 ) } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E830_SFP ) } ,
{ PCI_VDEVICE ( INTEL , ICE_DEV_ID_E830_SFP_DD ) } ,
2018-03-20 14:58:05 +00:00
/* required last entry */
2023-10-25 21:41:56 +00:00
{ }
2018-03-20 14:58:05 +00:00
} ;
MODULE_DEVICE_TABLE ( pci , ice_pci_tbl ) ;
2020-07-09 16:16:03 +00:00
static __maybe_unused SIMPLE_DEV_PM_OPS ( ice_pm_ops , ice_suspend , ice_resume ) ;
2019-02-13 18:51:15 +00:00
static const struct pci_error_handlers ice_pci_err_handler = {
. error_detected = ice_pci_err_detected ,
. slot_reset = ice_pci_err_slot_reset ,
. reset_prepare = ice_pci_err_reset_prepare ,
. reset_done = ice_pci_err_reset_done ,
. resume = ice_pci_err_resume
} ;
2018-03-20 14:58:05 +00:00
static struct pci_driver ice_driver = {
. name = KBUILD_MODNAME ,
. id_table = ice_pci_tbl ,
. probe = ice_probe ,
. remove = ice_remove ,
2020-07-09 16:16:03 +00:00
# ifdef CONFIG_PM
. driver . pm = & ice_pm_ops ,
# endif /* CONFIG_PM */
. shutdown = ice_shutdown ,
2018-09-20 00:42:55 +00:00
. sriov_configure = ice_sriov_configure ,
2023-10-19 17:32:22 +00:00
. sriov_get_vf_total_msix = ice_sriov_get_vf_total_msix ,
. sriov_set_msix_vec_count = ice_sriov_set_msix_vec_count ,
2019-02-13 18:51:15 +00:00
. err_handler = & ice_pci_err_handler
2018-03-20 14:58:05 +00:00
} ;
/**
* ice_module_init - Driver registration routine
*
* ice_module_init is the first routine called when the driver is
* loaded . All it does is register with the PCI subsystem .
*/
static int __init ice_module_init ( void )
{
2023-06-20 22:18:46 +00:00
int status = - ENOMEM ;
2018-03-20 14:58:05 +00:00
2020-05-29 07:18:33 +00:00
pr_info ( " %s \n " , ice_driver_string ) ;
2018-03-20 14:58:05 +00:00
pr_info ( " %s \n " , ice_copyright ) ;
2023-10-15 23:43:04 +00:00
ice_adv_lnk_speed_maps_init ( ) ;
2023-01-30 22:06:40 +00:00
ice_wq = alloc_workqueue ( " %s " , 0 , 0 , KBUILD_MODNAME ) ;
2018-03-20 14:58:10 +00:00
if ( ! ice_wq ) {
pr_err ( " Failed to create workqueue \n " ) ;
2023-06-20 22:18:46 +00:00
return status ;
}
ice_lag_wq = alloc_ordered_workqueue ( " ice_lag_wq " , 0 ) ;
if ( ! ice_lag_wq ) {
pr_err ( " Failed to create LAG workqueue \n " ) ;
goto err_dest_wq ;
2018-03-20 14:58:10 +00:00
}
2023-12-13 05:07:12 +00:00
ice_debugfs_init ( ) ;
2018-03-20 14:58:05 +00:00
status = pci_register_driver ( & ice_driver ) ;
2018-03-20 14:58:10 +00:00
if ( status ) {
2019-04-16 17:35:03 +00:00
pr_err ( " failed to register PCI driver, err %d \n " , status ) ;
2023-06-20 22:18:46 +00:00
goto err_dest_lag_wq ;
2018-03-20 14:58:10 +00:00
}
2018-03-20 14:58:05 +00:00
2023-06-20 22:18:46 +00:00
return 0 ;
err_dest_lag_wq :
destroy_workqueue ( ice_lag_wq ) ;
2023-12-13 05:07:12 +00:00
ice_debugfs_exit ( ) ;
2023-06-20 22:18:46 +00:00
err_dest_wq :
destroy_workqueue ( ice_wq ) ;
2018-03-20 14:58:05 +00:00
return status ;
}
module_init ( ice_module_init ) ;
/**
* ice_module_exit - Driver exit cleanup routine
*
* ice_module_exit is called just before the driver is removed
* from memory .
*/
static void __exit ice_module_exit ( void )
{
pci_unregister_driver ( & ice_driver ) ;
2024-02-05 13:03:57 +00:00
ice_debugfs_exit ( ) ;
2018-03-20 14:58:10 +00:00
destroy_workqueue ( ice_wq ) ;
2023-06-20 22:18:46 +00:00
destroy_workqueue ( ice_lag_wq ) ;
2018-03-20 14:58:05 +00:00
pr_info ( " module unloaded \n " ) ;
}
module_exit ( ice_module_exit ) ;
2018-03-20 14:58:11 +00:00
2018-03-20 14:58:19 +00:00
/**
2019-02-19 23:04:13 +00:00
* ice_set_mac_address - NDO callback to set MAC address
2018-03-20 14:58:19 +00:00
* @ netdev : network interface device structure
* @ pi : pointer to an address structure
*
* Returns 0 on success , negative on failure
*/
static int ice_set_mac_address ( struct net_device * netdev , void * pi )
{
struct ice_netdev_priv * np = netdev_priv ( netdev ) ;
struct ice_vsi * vsi = np - > vsi ;
struct ice_pf * pf = vsi - > back ;
struct ice_hw * hw = & pf - > hw ;
struct sockaddr * addr = pi ;
2021-08-24 19:27:53 +00:00
u8 old_mac [ ETH_ALEN ] ;
2018-03-20 14:58:19 +00:00
u8 flags = 0 ;
u8 * mac ;
2021-10-07 23:00:23 +00:00
int err ;
2018-03-20 14:58:19 +00:00
mac = ( u8 * ) addr - > sa_data ;
if ( ! is_valid_ether_addr ( mac ) )
return - EADDRNOTAVAIL ;
2021-03-02 18:15:38 +00:00
if ( test_bit ( ICE_DOWN , pf - > state ) | |
2018-09-20 00:23:11 +00:00
ice_is_reset_in_progress ( pf - > state ) ) {
2018-03-20 14:58:19 +00:00
netdev_err ( netdev , " can't set mac %pM. device not ready \n " ,
mac ) ;
return - EBUSY ;
}
2021-10-15 23:35:17 +00:00
if ( ice_chnl_dmac_fltr_cnt ( pf ) ) {
netdev_err ( netdev , " can't set mac %pM. Device has tc-flower filters, delete all of them and try again \n " ,
mac ) ;
return - EAGAIN ;
}
ice: don't remove netdev->dev_addr from uc sync list
In some circumstances, such as with bridging, it's possible that the
stack will add the device's own MAC address to its unicast address list.
If, later, the stack deletes this address, the driver will receive a
request to remove this address.
The driver stores its current MAC address as part of the VSI MAC filter
list instead of separately. So, this causes a problem when the device's
MAC address is deleted unexpectedly, which results in traffic failure in
some cases.
The following configuration steps will reproduce the previously
mentioned problem:
> ip link set eth0 up
> ip link add dev br0 type bridge
> ip link set br0 up
> ip addr flush dev eth0
> ip link set eth0 master br0
> echo 1 > /sys/class/net/br0/bridge/vlan_filtering
> modprobe -r veth
> modprobe -r bridge
> ip addr add 192.168.1.100/24 dev eth0
The following ping command fails due to the netdev->dev_addr being
deleted when removing the bridge module.
> ping <link partner>
Fix this by making sure to not delete the netdev->dev_addr during MAC
address sync. After fixing this issue it was noticed that the
netdev_warn() in .set_mac was overly verbose, so make it at
netdev_dbg().
Also, there is a possibility of a race condition between .set_mac and
.set_rx_mode. Fix this by calling netif_addr_lock_bh() and
netif_addr_unlock_bh() on the device's netdev when the netdev->dev_addr
is going to be updated in .set_mac.
Fixes: e94d44786693 ("ice: Implement filter sync, NDO operations and bump version")
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Liang Li <liali@redhat.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-08-06 16:51:27 +00:00
netif_addr_lock_bh ( netdev ) ;
2021-08-24 19:27:53 +00:00
ether_addr_copy ( old_mac , netdev - > dev_addr ) ;
/* change the netdev's MAC address */
2021-10-04 16:05:21 +00:00
eth_hw_addr_set ( netdev , mac ) ;
2021-08-24 19:27:53 +00:00
netif_addr_unlock_bh ( netdev ) ;
2020-05-08 00:41:09 +00:00
/* Clean up old MAC filter. Not an error if old filter doesn't exist */
2021-10-07 23:00:23 +00:00
err = ice_fltr_remove_mac ( vsi , old_mac , ICE_FWD_TO_VSI ) ;
if ( err & & err ! = - ENOENT ) {
2018-03-20 14:58:19 +00:00
err = - EADDRNOTAVAIL ;
ice: Fix issues updating VSI MAC filters
VSI, especially VF could request to add or remove filter for another VSI,
driver should really guide such request and disallow it.
However, instead of returning error for such malicious request, driver
can simply return success.
In addition, we are not tracking number of MAC filters configured per
VF correctly - and this leads to issue updating VF MAC filters whenever
they were removed and re-configured via bringing VF interface down and
up. Also, since VF could send request to update multiple MAC filters at
once, driver should program those filters individually in the switch, in
order to determine which action resulted to error, and communicate
accordingly to the VF.
So, with this changes, we now track number of filters added right from
when VF resources allocation is done, and could properly add filters for
both trusted and non_trusted VFs, without MAC filters mis-match issue in
the switch...
Also refactor code, so that driver can use new function to add or remove
MAC filters.
Signed-off-by: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2019-07-25 09:53:51 +00:00
goto err_update_filters ;
2018-03-20 14:58:19 +00:00
}
2020-11-21 00:38:33 +00:00
/* Add filter for new MAC. If filter exists, return success */
2021-10-07 23:00:23 +00:00
err = ice_fltr_add_mac ( vsi , mac , ICE_FWD_TO_VSI ) ;
2022-03-31 16:20:07 +00:00
if ( err = = - EEXIST ) {
2020-11-21 00:38:33 +00:00
/* Although this MAC filter is already present in hardware it's
* possible in some cases ( e . g . bonding ) that dev_addr was
* modified outside of the driver and needs to be restored back
* to this value .
*/
2020-05-08 00:41:09 +00:00
netdev_dbg ( netdev , " filter for MAC %pM already exists \n " , mac ) ;
2022-03-31 16:20:07 +00:00
return 0 ;
} else if ( err ) {
ice: don't remove netdev->dev_addr from uc sync list
In some circumstances, such as with bridging, it's possible that the
stack will add the device's own MAC address to its unicast address list.
If, later, the stack deletes this address, the driver will receive a
request to remove this address.
The driver stores its current MAC address as part of the VSI MAC filter
list instead of separately. So, this causes a problem when the device's
MAC address is deleted unexpectedly, which results in traffic failure in
some cases.
The following configuration steps will reproduce the previously
mentioned problem:
> ip link set eth0 up
> ip link add dev br0 type bridge
> ip link set br0 up
> ip addr flush dev eth0
> ip link set eth0 master br0
> echo 1 > /sys/class/net/br0/bridge/vlan_filtering
> modprobe -r veth
> modprobe -r bridge
> ip addr add 192.168.1.100/24 dev eth0
The following ping command fails due to the netdev->dev_addr being
deleted when removing the bridge module.
> ping <link partner>
Fix this by making sure to not delete the netdev->dev_addr during MAC
address sync. After fixing this issue it was noticed that the
netdev_warn() in .set_mac was overly verbose, so make it at
netdev_dbg().
Also, there is a possibility of a race condition between .set_mac and
.set_rx_mode. Fix this by calling netif_addr_lock_bh() and
netif_addr_unlock_bh() on the device's netdev when the netdev->dev_addr
is going to be updated in .set_mac.
Fixes: e94d44786693 ("ice: Implement filter sync, NDO operations and bump version")
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Liang Li <liali@redhat.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-08-06 16:51:27 +00:00
/* error if the new filter addition failed */
2020-05-08 00:41:09 +00:00
err = - EADDRNOTAVAIL ;
2022-03-31 16:20:07 +00:00
}
2020-05-08 00:41:09 +00:00
ice: Fix issues updating VSI MAC filters
VSI, especially VF could request to add or remove filter for another VSI,
driver should really guide such request and disallow it.
However, instead of returning error for such malicious request, driver
can simply return success.
In addition, we are not tracking number of MAC filters configured per
VF correctly - and this leads to issue updating VF MAC filters whenever
they were removed and re-configured via bringing VF interface down and
up. Also, since VF could send request to update multiple MAC filters at
once, driver should program those filters individually in the switch, in
order to determine which action resulted to error, and communicate
accordingly to the VF.
So, with this changes, we now track number of filters added right from
when VF resources allocation is done, and could properly add filters for
both trusted and non_trusted VFs, without MAC filters mis-match issue in
the switch...
Also refactor code, so that driver can use new function to add or remove
MAC filters.
Signed-off-by: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2019-07-25 09:53:51 +00:00
err_update_filters :
2018-03-20 14:58:19 +00:00
if ( err ) {
2019-04-16 17:35:03 +00:00
netdev_err ( netdev , " can't set MAC %pM. filter update failed \n " ,
2018-03-20 14:58:19 +00:00
mac ) ;
2021-08-24 19:27:53 +00:00
netif_addr_lock_bh ( netdev ) ;
2021-10-01 21:32:23 +00:00
eth_hw_addr_set ( netdev , old_mac ) ;
ice: don't remove netdev->dev_addr from uc sync list
In some circumstances, such as with bridging, it's possible that the
stack will add the device's own MAC address to its unicast address list.
If, later, the stack deletes this address, the driver will receive a
request to remove this address.
The driver stores its current MAC address as part of the VSI MAC filter
list instead of separately. So, this causes a problem when the device's
MAC address is deleted unexpectedly, which results in traffic failure in
some cases.
The following configuration steps will reproduce the previously
mentioned problem:
> ip link set eth0 up
> ip link add dev br0 type bridge
> ip link set br0 up
> ip addr flush dev eth0
> ip link set eth0 master br0
> echo 1 > /sys/class/net/br0/bridge/vlan_filtering
> modprobe -r veth
> modprobe -r bridge
> ip addr add 192.168.1.100/24 dev eth0
The following ping command fails due to the netdev->dev_addr being
deleted when removing the bridge module.
> ping <link partner>
Fix this by making sure to not delete the netdev->dev_addr during MAC
address sync. After fixing this issue it was noticed that the
netdev_warn() in .set_mac was overly verbose, so make it at
netdev_dbg().
Also, there is a possibility of a race condition between .set_mac and
.set_rx_mode. Fix this by calling netif_addr_lock_bh() and
netif_addr_unlock_bh() on the device's netdev when the netdev->dev_addr
is going to be updated in .set_mac.
Fixes: e94d44786693 ("ice: Implement filter sync, NDO operations and bump version")
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Liang Li <liali@redhat.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-08-06 16:51:27 +00:00
netif_addr_unlock_bh ( netdev ) ;
2018-03-20 14:58:19 +00:00
return err ;
}
2019-04-16 17:35:03 +00:00
netdev_dbg ( vsi - > netdev , " updated MAC address to %pM \n " ,
2018-03-20 14:58:19 +00:00
netdev - > dev_addr ) ;
2019-02-19 23:04:13 +00:00
/* write new MAC address to the firmware */
2018-03-20 14:58:19 +00:00
flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL ;
2021-10-07 23:00:23 +00:00
err = ice_aq_manage_mac_write ( hw , mac , flags , NULL ) ;
if ( err ) {
2021-10-07 22:56:02 +00:00
netdev_err ( netdev , " can't set MAC %pM. write to firmware failed error %d \n " ,
2021-10-07 23:00:23 +00:00
mac , err ) ;
2018-03-20 14:58:19 +00:00
}
return 0 ;
}
/**
* ice_set_rx_mode - NDO callback to set the netdev filters
* @ netdev : network interface device structure
*/
static void ice_set_rx_mode ( struct net_device * netdev )
{
struct ice_netdev_priv * np = netdev_priv ( netdev ) ;
struct ice_vsi * vsi = np - > vsi ;
2023-07-12 11:03:27 +00:00
if ( ! vsi | | ice_is_switchdev_running ( vsi - > back ) )
2018-03-20 14:58:19 +00:00
return ;
/* Set the flags to synchronize filters
* ndo_set_rx_mode may be triggered even without a change in netdev
* flags
*/
2021-03-02 18:15:37 +00:00
set_bit ( ICE_VSI_UMAC_FLTR_CHANGED , vsi - > state ) ;
set_bit ( ICE_VSI_MMAC_FLTR_CHANGED , vsi - > state ) ;
2018-03-20 14:58:19 +00:00
set_bit ( ICE_FLAG_FLTR_SYNC , vsi - > back - > flags ) ;
/* schedule our worker thread which will take care of
* applying the new filter changes
*/
ice_service_task_schedule ( vsi - > back ) ;
}
2019-11-06 10:05:28 +00:00
/**
* ice_set_tx_maxrate - NDO callback to set the maximum per - queue bitrate
* @ netdev : network interface device structure
* @ queue_index : Queue ID
* @ maxrate : maximum bandwidth in Mbps
*/
static int
ice_set_tx_maxrate ( struct net_device * netdev , int queue_index , u32 maxrate )
{
struct ice_netdev_priv * np = netdev_priv ( netdev ) ;
struct ice_vsi * vsi = np - > vsi ;
u16 q_handle ;
2021-10-07 22:59:03 +00:00
int status ;
2019-11-06 10:05:28 +00:00
u8 tc ;
/* Validate maxrate requested is within permitted range */
if ( maxrate & & ( maxrate > ( ICE_SCHED_MAX_BW / 1000 ) ) ) {
2020-02-06 09:20:10 +00:00
netdev_err ( netdev , " Invalid max rate %d specified for the queue %d \n " ,
2019-11-06 10:05:28 +00:00
maxrate , queue_index ) ;
return - EINVAL ;
}
q_handle = vsi - > tx_rings [ queue_index ] - > q_handle ;
tc = ice_dcb_get_tc ( vsi , queue_index ) ;
2023-06-10 00:40:24 +00:00
vsi = ice_locate_vsi_using_queue ( vsi , queue_index ) ;
if ( ! vsi ) {
netdev_err ( netdev , " Invalid VSI for given queue %d \n " ,
queue_index ) ;
return - EINVAL ;
}
2019-11-06 10:05:28 +00:00
/* Set BW back to default, when user set maxrate to 0 */
if ( ! maxrate )
status = ice_cfg_q_bw_dflt_lmt ( vsi - > port_info , vsi - > idx , tc ,
q_handle , ICE_MAX_BW ) ;
else
status = ice_cfg_q_bw_lmt ( vsi - > port_info , vsi - > idx , tc ,
q_handle , ICE_MAX_BW , maxrate * 1000 ) ;
2021-10-07 23:01:58 +00:00
if ( status )
2021-10-07 22:56:02 +00:00
netdev_err ( netdev , " Unable to set Tx max rate, error %d \n " ,
status ) ;
2019-11-06 10:05:28 +00:00
2021-10-07 23:01:58 +00:00
return status ;
2019-11-06 10:05:28 +00:00
}
2018-03-20 14:58:19 +00:00
/**
* ice_fdb_add - add an entry to the hardware database
* @ ndm : the input from the stack
* @ tb : pointer to array of nladdr ( unused )
* @ dev : the net device pointer
* @ addr : the MAC address entry being added
2019-02-19 23:04:13 +00:00
* @ vid : VLAN ID
2018-03-20 14:58:19 +00:00
* @ flags : instructions from stack about fdb operation
2019-02-08 20:50:28 +00:00
* @ extack : netlink extended ack
2018-03-20 14:58:19 +00:00
*/
2019-02-08 20:50:28 +00:00
static int
ice_fdb_add ( struct ndmsg * ndm , struct nlattr __always_unused * tb [ ] ,
struct net_device * dev , const unsigned char * addr , u16 vid ,
u16 flags , struct netlink_ext_ack __always_unused * extack )
2018-03-20 14:58:19 +00:00
{
int err ;
if ( vid ) {
netdev_err ( dev , " VLANs aren't supported yet for dev_uc|mc_add() \n " ) ;
return - EINVAL ;
}
if ( ndm - > ndm_state & & ! ( ndm - > ndm_state & NUD_PERMANENT ) ) {
netdev_err ( dev , " FDB only supports static addresses \n " ) ;
return - EINVAL ;
}
if ( is_unicast_ether_addr ( addr ) | | is_link_local_ether_addr ( addr ) )
err = dev_uc_add_excl ( dev , addr ) ;
else if ( is_multicast_ether_addr ( addr ) )
err = dev_mc_add_excl ( dev , addr ) ;
else
err = - EINVAL ;
/* Only return duplicate errors if NLM_F_EXCL is set */
if ( err = = - EEXIST & & ! ( flags & NLM_F_EXCL ) )
err = 0 ;
return err ;
}
/**
* ice_fdb_del - delete an entry from the hardware database
* @ ndm : the input from the stack
* @ tb : pointer to array of nladdr ( unused )
* @ dev : the net device pointer
* @ addr : the MAC address entry being added
2019-02-19 23:04:13 +00:00
* @ vid : VLAN ID
2022-05-05 15:09:57 +00:00
* @ extack : netlink extended ack
2018-03-20 14:58:19 +00:00
*/
2019-02-27 00:35:11 +00:00
static int
ice_fdb_del ( struct ndmsg * ndm , __always_unused struct nlattr * tb [ ] ,
struct net_device * dev , const unsigned char * addr ,
2022-05-05 15:09:57 +00:00
__always_unused u16 vid , struct netlink_ext_ack * extack )
2018-03-20 14:58:19 +00:00
{
int err ;
if ( ndm - > ndm_state & NUD_PERMANENT ) {
netdev_err ( dev , " FDB only supports static addresses \n " ) ;
return - EINVAL ;
}
if ( is_unicast_ether_addr ( addr ) )
err = dev_uc_del ( dev , addr ) ;
else if ( is_multicast_ether_addr ( addr ) )
err = dev_mc_del ( dev , addr ) ;
else
err = - EINVAL ;
return err ;
}
ice: Advertise 802.1ad VLAN filtering and offloads for PF netdev
In order for the driver to support 802.1ad VLAN filtering and offloads,
it needs to advertise those VLAN features and also support modifying
those VLAN features, so make the necessary changes to
ice_set_netdev_features(). By default, enable CTAG insertion/stripping
and CTAG filtering for both Single and Double VLAN Modes (SVM/DVM).
Also, in DVM, enable STAG filtering by default. This is done by
setting the feature bits in netdev->features. Also, in DVM, support
toggling of STAG insertion/stripping, but don't enable them by
default. This is done by setting the feature bits in
netdev->hw_features.
Since 802.1ad VLAN filtering and offloads are only supported in DVM, make
sure they are not enabled by default and that they cannot be enabled
during runtime, when the device is in SVM.
Add an implementation for the ndo_fix_features() callback. This is
needed since the hardware cannot support multiple VLAN ethertypes for
VLAN insertion/stripping simultaneously and all supported VLAN filtering
must either be enabled or disabled together.
Disable inner VLAN stripping by default when DVM is enabled. If a VSI
supports stripping the inner VLAN in DVM, then it will have to configure
that during runtime. For example if a VF is configured in a port VLAN
while DVM is enabled it will be allowed to offload inner VLANs.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-12-02 16:38:50 +00:00
# define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
NETIF_F_HW_VLAN_CTAG_TX | \
NETIF_F_HW_VLAN_STAG_RX | \
NETIF_F_HW_VLAN_STAG_TX )
2022-07-27 07:24:06 +00:00
# define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
NETIF_F_HW_VLAN_STAG_RX )
ice: Advertise 802.1ad VLAN filtering and offloads for PF netdev
In order for the driver to support 802.1ad VLAN filtering and offloads,
it needs to advertise those VLAN features and also support modifying
those VLAN features, so make the necessary changes to
ice_set_netdev_features(). By default, enable CTAG insertion/stripping
and CTAG filtering for both Single and Double VLAN Modes (SVM/DVM).
Also, in DVM, enable STAG filtering by default. This is done by
setting the feature bits in netdev->features. Also, in DVM, support
toggling of STAG insertion/stripping, but don't enable them by
default. This is done by setting the feature bits in
netdev->hw_features.
Since 802.1ad VLAN filtering and offloads are only supported in DVM, make
sure they are not enabled by default and that they cannot be enabled
during runtime, when the device is in SVM.
Add an implementation for the ndo_fix_features() callback. This is
needed since the hardware cannot support multiple VLAN ethertypes for
VLAN insertion/stripping simultaneously and all supported VLAN filtering
must either be enabled or disabled together.
Disable inner VLAN stripping by default when DVM is enabled. If a VSI
supports stripping the inner VLAN in DVM, then it will have to configure
that during runtime. For example if a VF is configured in a port VLAN
while DVM is enabled it will be allowed to offload inner VLANs.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-12-02 16:38:50 +00:00
# define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
NETIF_F_HW_VLAN_STAG_FILTER )
/**
* ice_fix_features - fix the netdev features flags based on device limitations
* @ netdev : ptr to the netdev that flags are being fixed on
* @ features : features that need to be checked and possibly fixed
*
* Make sure any fixups are made to features in this callback . This enables the
* driver to not have to check unsupported configurations throughout the driver
* because that ' s the responsiblity of this callback .
*
* Single VLAN Mode ( SVM ) Supported Features :
* NETIF_F_HW_VLAN_CTAG_FILTER
* NETIF_F_HW_VLAN_CTAG_RX
* NETIF_F_HW_VLAN_CTAG_TX
*
* Double VLAN Mode ( DVM ) Supported Features :
* NETIF_F_HW_VLAN_CTAG_FILTER
* NETIF_F_HW_VLAN_CTAG_RX
* NETIF_F_HW_VLAN_CTAG_TX
*
* NETIF_F_HW_VLAN_STAG_FILTER
* NETIF_HW_VLAN_STAG_RX
* NETIF_HW_VLAN_STAG_TX
*
* Features that need fixing :
* Cannot simultaneously enable CTAG and STAG stripping and / or insertion .
* These are mutually exlusive as the VSI context cannot support multiple
* VLAN ethertypes simultaneously for stripping and / or insertion . If this
* is not done , then default to clearing the requested STAG offload
* settings .
*
* All supported filtering has to be enabled or disabled together . For
* example , in DVM , CTAG and STAG filtering have to be enabled and disabled
* together . If this is not done , then default to VLAN filtering disabled .
* These are mutually exclusive as there is currently no way to
* enable / disable VLAN filtering based on VLAN ethertype when using VLAN
* prune rules .
*/
static netdev_features_t
ice_fix_features ( struct net_device * netdev , netdev_features_t features )
{
struct ice_netdev_priv * np = netdev_priv ( netdev ) ;
2022-06-07 06:54:57 +00:00
netdev_features_t req_vlan_fltr , cur_vlan_fltr ;
bool cur_ctag , cur_stag , req_ctag , req_stag ;
cur_vlan_fltr = netdev - > features & NETIF_VLAN_FILTERING_FEATURES ;
cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER ;
cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER ;
req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES ;
req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER ;
req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER ;
if ( req_vlan_fltr ! = cur_vlan_fltr ) {
if ( ice_is_dvm_ena ( & np - > vsi - > back - > hw ) ) {
if ( req_ctag & & req_stag ) {
features | = NETIF_VLAN_FILTERING_FEATURES ;
} else if ( ! req_ctag & & ! req_stag ) {
features & = ~ NETIF_VLAN_FILTERING_FEATURES ;
} else if ( ( ! cur_ctag & & req_ctag & & ! cur_stag ) | |
( ! cur_stag & & req_stag & & ! cur_ctag ) ) {
features | = NETIF_VLAN_FILTERING_FEATURES ;
netdev_warn ( netdev , " 802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types. \n " ) ;
} else if ( ( cur_ctag & & ! req_ctag & & cur_stag ) | |
( cur_stag & & ! req_stag & & cur_ctag ) ) {
features & = ~ NETIF_VLAN_FILTERING_FEATURES ;
netdev_warn ( netdev , " 802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types. \n " ) ;
}
ice: Advertise 802.1ad VLAN filtering and offloads for PF netdev
In order for the driver to support 802.1ad VLAN filtering and offloads,
it needs to advertise those VLAN features and also support modifying
those VLAN features, so make the necessary changes to
ice_set_netdev_features(). By default, enable CTAG insertion/stripping
and CTAG filtering for both Single and Double VLAN Modes (SVM/DVM).
Also, in DVM, enable STAG filtering by default. This is done by
setting the feature bits in netdev->features. Also, in DVM, support
toggling of STAG insertion/stripping, but don't enable them by
default. This is done by setting the feature bits in
netdev->hw_features.
Since 802.1ad VLAN filtering and offloads are only supported in DVM, make
sure they are not enabled by default and that they cannot be enabled
during runtime, when the device is in SVM.
Add an implementation for the ndo_fix_features() callback. This is
needed since the hardware cannot support multiple VLAN ethertypes for
VLAN insertion/stripping simultaneously and all supported VLAN filtering
must either be enabled or disabled together.
Disable inner VLAN stripping by default when DVM is enabled. If a VSI
supports stripping the inner VLAN in DVM, then it will have to configure
that during runtime. For example if a VF is configured in a port VLAN
while DVM is enabled it will be allowed to offload inner VLANs.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-12-02 16:38:50 +00:00
} else {
2022-06-07 06:54:57 +00:00
if ( req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER )
netdev_warn ( netdev , " cannot support requested 802.1ad filtering setting in SVM mode \n " ) ;
if ( req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER )
features | = NETIF_F_HW_VLAN_CTAG_FILTER ;
ice: Advertise 802.1ad VLAN filtering and offloads for PF netdev
In order for the driver to support 802.1ad VLAN filtering and offloads,
it needs to advertise those VLAN features and also support modifying
those VLAN features, so make the necessary changes to
ice_set_netdev_features(). By default, enable CTAG insertion/stripping
and CTAG filtering for both Single and Double VLAN Modes (SVM/DVM).
Also, in DVM, enable STAG filtering by default. This is done by
setting the feature bits in netdev->features. Also, in DVM, support
toggling of STAG insertion/stripping, but don't enable them by
default. This is done by setting the feature bits in
netdev->hw_features.
Since 802.1ad VLAN filtering and offloads are only supported in DVM, make
sure they are not enabled by default and that they cannot be enabled
during runtime, when the device is in SVM.
Add an implementation for the ndo_fix_features() callback. This is
needed since the hardware cannot support multiple VLAN ethertypes for
VLAN insertion/stripping simultaneously and all supported VLAN filtering
must either be enabled or disabled together.
Disable inner VLAN stripping by default when DVM is enabled. If a VSI
supports stripping the inner VLAN in DVM, then it will have to configure
that during runtime. For example if a VF is configured in a port VLAN
while DVM is enabled it will be allowed to offload inner VLANs.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-12-02 16:38:50 +00:00
}
}
if ( ( features & ( NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX ) ) & &
( features & ( NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX ) ) ) {
netdev_warn ( netdev , " cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings \n " ) ;
features & = ~ ( NETIF_F_HW_VLAN_STAG_RX |
NETIF_F_HW_VLAN_STAG_TX ) ;
}
2022-07-27 07:24:06 +00:00
if ( ! ( netdev - > features & NETIF_F_RXFCS ) & &
( features & NETIF_F_RXFCS ) & &
( features & NETIF_VLAN_STRIPPING_FEATURES ) & &
! ice_vsi_has_non_zero_vlans ( np - > vsi ) ) {
netdev_warn ( netdev , " Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured \n " ) ;
features & = ~ NETIF_VLAN_STRIPPING_FEATURES ;
}
ice: Advertise 802.1ad VLAN filtering and offloads for PF netdev
In order for the driver to support 802.1ad VLAN filtering and offloads,
it needs to advertise those VLAN features and also support modifying
those VLAN features, so make the necessary changes to
ice_set_netdev_features(). By default, enable CTAG insertion/stripping
and CTAG filtering for both Single and Double VLAN Modes (SVM/DVM).
Also, in DVM, enable STAG filtering by default. This is done by
setting the feature bits in netdev->features. Also, in DVM, support
toggling of STAG insertion/stripping, but don't enable them by
default. This is done by setting the feature bits in
netdev->hw_features.
Since 802.1ad VLAN filtering and offloads are only supported in DVM, make
sure they are not enabled by default and that they cannot be enabled
during runtime, when the device is in SVM.
Add an implementation for the ndo_fix_features() callback. This is
needed since the hardware cannot support multiple VLAN ethertypes for
VLAN insertion/stripping simultaneously and all supported VLAN filtering
must either be enabled or disabled together.
Disable inner VLAN stripping by default when DVM is enabled. If a VSI
supports stripping the inner VLAN in DVM, then it will have to configure
that during runtime. For example if a VF is configured in a port VLAN
while DVM is enabled it will be allowed to offload inner VLANs.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-12-02 16:38:50 +00:00
return features ;
}
2023-12-05 21:08:39 +00:00
/**
* ice_set_rx_rings_vlan_proto - update rings with new stripped VLAN proto
* @ vsi : PF ' s VSI
* @ vlan_ethertype : VLAN ethertype ( 802.1 Q or 802.1 ad ) in network byte order
*
* Store current stripped VLAN proto in ring packet context ,
* so it can be accessed more efficiently by packet processing code .
*/
static void
ice_set_rx_rings_vlan_proto ( struct ice_vsi * vsi , __be16 vlan_ethertype )
{
u16 i ;
ice_for_each_alloc_rxq ( vsi , i )
vsi - > rx_rings [ i ] - > pkt_ctx . vlan_proto = vlan_ethertype ;
}
ice: Advertise 802.1ad VLAN filtering and offloads for PF netdev
In order for the driver to support 802.1ad VLAN filtering and offloads,
it needs to advertise those VLAN features and also support modifying
those VLAN features, so make the necessary changes to
ice_set_netdev_features(). By default, enable CTAG insertion/stripping
and CTAG filtering for both Single and Double VLAN Modes (SVM/DVM).
Also, in DVM, enable STAG filtering by default. This is done by
setting the feature bits in netdev->features. Also, in DVM, support
toggling of STAG insertion/stripping, but don't enable them by
default. This is done by setting the feature bits in
netdev->hw_features.
Since 802.1ad VLAN filtering and offloads are only supported in DVM, make
sure they are not enabled by default and that they cannot be enabled
during runtime, when the device is in SVM.
Add an implementation for the ndo_fix_features() callback. This is
needed since the hardware cannot support multiple VLAN ethertypes for
VLAN insertion/stripping simultaneously and all supported VLAN filtering
must either be enabled or disabled together.
Disable inner VLAN stripping by default when DVM is enabled. If a VSI
supports stripping the inner VLAN in DVM, then it will have to configure
that during runtime. For example if a VF is configured in a port VLAN
while DVM is enabled it will be allowed to offload inner VLANs.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-12-02 16:38:50 +00:00
/**
* ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
* @ vsi : PF ' s VSI
* @ features : features used to determine VLAN offload settings
*
* First , determine the vlan_ethertype based on the VLAN offload bits in
* features . Then determine if stripping and insertion should be enabled or
* disabled . Finally enable or disable VLAN stripping and insertion .
*/
static int
ice_set_vlan_offload_features ( struct ice_vsi * vsi , netdev_features_t features )
{
bool enable_stripping = true , enable_insertion = true ;
struct ice_vsi_vlan_ops * vlan_ops ;
int strip_err = 0 , insert_err = 0 ;
u16 vlan_ethertype = 0 ;
vlan_ops = ice_get_compat_vsi_vlan_ops ( vsi ) ;
if ( features & ( NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX ) )
vlan_ethertype = ETH_P_8021AD ;
else if ( features & ( NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX ) )
vlan_ethertype = ETH_P_8021Q ;
if ( ! ( features & ( NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX ) ) )
enable_stripping = false ;
if ( ! ( features & ( NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX ) ) )
enable_insertion = false ;
if ( enable_stripping )
strip_err = vlan_ops - > ena_stripping ( vsi , vlan_ethertype ) ;
else
strip_err = vlan_ops - > dis_stripping ( vsi ) ;
if ( enable_insertion )
insert_err = vlan_ops - > ena_insertion ( vsi , vlan_ethertype ) ;
else
insert_err = vlan_ops - > dis_insertion ( vsi ) ;
if ( strip_err | | insert_err )
return - EIO ;
2023-12-05 21:08:39 +00:00
ice_set_rx_rings_vlan_proto ( vsi , enable_stripping ?
htons ( vlan_ethertype ) : 0 ) ;
ice: Advertise 802.1ad VLAN filtering and offloads for PF netdev
In order for the driver to support 802.1ad VLAN filtering and offloads,
it needs to advertise those VLAN features and also support modifying
those VLAN features, so make the necessary changes to
ice_set_netdev_features(). By default, enable CTAG insertion/stripping
and CTAG filtering for both Single and Double VLAN Modes (SVM/DVM).
Also, in DVM, enable STAG filtering by default. This is done by
setting the feature bits in netdev->features. Also, in DVM, support
toggling of STAG insertion/stripping, but don't enable them by
default. This is done by setting the feature bits in
netdev->hw_features.
Since 802.1ad VLAN filtering and offloads are only supported in DVM, make
sure they are not enabled by default and that they cannot be enabled
during runtime, when the device is in SVM.
Add an implementation for the ndo_fix_features() callback. This is
needed since the hardware cannot support multiple VLAN ethertypes for
VLAN insertion/stripping simultaneously and all supported VLAN filtering
must either be enabled or disabled together.
Disable inner VLAN stripping by default when DVM is enabled. If a VSI
supports stripping the inner VLAN in DVM, then it will have to configure
that during runtime. For example if a VF is configured in a port VLAN
while DVM is enabled it will be allowed to offload inner VLANs.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-12-02 16:38:50 +00:00
return 0 ;
}
/**
* ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI
* @ vsi : PF ' s VSI
* @ features : features used to determine VLAN filtering settings
*
* Enable or disable Rx VLAN filtering based on the VLAN filtering bits in the
* features .
*/
static int
ice_set_vlan_filtering_features ( struct ice_vsi * vsi , netdev_features_t features )
{
struct ice_vsi_vlan_ops * vlan_ops = ice_get_compat_vsi_vlan_ops ( vsi ) ;
int err = 0 ;
/* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking
* if either bit is set
*/
if ( features &
( NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER ) )
err = vlan_ops - > ena_rx_filtering ( vsi ) ;
else
err = vlan_ops - > dis_rx_filtering ( vsi ) ;
return err ;
}
/**
* ice_set_vlan_features - set VLAN settings based on suggested feature set
* @ netdev : ptr to the netdev being adjusted
* @ features : the feature set that the stack is suggesting
*
* Only update VLAN settings if the requested_vlan_features are different than
* the current_vlan_features .
*/
static int
ice_set_vlan_features ( struct net_device * netdev , netdev_features_t features )
{
netdev_features_t current_vlan_features , requested_vlan_features ;
struct ice_netdev_priv * np = netdev_priv ( netdev ) ;
struct ice_vsi * vsi = np - > vsi ;
int err ;
current_vlan_features = netdev - > features & NETIF_VLAN_OFFLOAD_FEATURES ;
requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES ;
if ( current_vlan_features ^ requested_vlan_features ) {
2022-07-27 07:24:06 +00:00
if ( ( features & NETIF_F_RXFCS ) & &
( features & NETIF_VLAN_STRIPPING_FEATURES ) ) {
dev_err ( ice_pf_to_dev ( vsi - > back ) ,
" To enable VLAN stripping, you must first enable FCS/CRC stripping \n " ) ;
return - EIO ;
}
ice: Advertise 802.1ad VLAN filtering and offloads for PF netdev
In order for the driver to support 802.1ad VLAN filtering and offloads,
it needs to advertise those VLAN features and also support modifying
those VLAN features, so make the necessary changes to
ice_set_netdev_features(). By default, enable CTAG insertion/stripping
and CTAG filtering for both Single and Double VLAN Modes (SVM/DVM).
Also, in DVM, enable STAG filtering by default. This is done by
setting the feature bits in netdev->features. Also, in DVM, support
toggling of STAG insertion/stripping, but don't enable them by
default. This is done by setting the feature bits in
netdev->hw_features.
Since 802.1ad VLAN filtering and offloads are only supported in DVM, make
sure they are not enabled by default and that they cannot be enabled
during runtime, when the device is in SVM.
Add an implementation for the ndo_fix_features() callback. This is
needed since the hardware cannot support multiple VLAN ethertypes for
VLAN insertion/stripping simultaneously and all supported VLAN filtering
must either be enabled or disabled together.
Disable inner VLAN stripping by default when DVM is enabled. If a VSI
supports stripping the inner VLAN in DVM, then it will have to configure
that during runtime. For example if a VF is configured in a port VLAN
while DVM is enabled it will be allowed to offload inner VLANs.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-12-02 16:38:50 +00:00
err = ice_set_vlan_offload_features ( vsi , features ) ;
if ( err )
return err ;
}
current_vlan_features = netdev - > features &
NETIF_VLAN_FILTERING_FEATURES ;
requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES ;
if ( current_vlan_features ^ requested_vlan_features ) {
err = ice_set_vlan_filtering_features ( vsi , features ) ;
if ( err )
return err ;
}
return 0 ;
}
2022-07-07 10:16:51 +00:00
/**
* ice_set_loopback - turn on / off loopback mode on underlying PF
* @ vsi : ptr to VSI
* @ ena : flag to indicate the on / off setting
*/
static int ice_set_loopback ( struct ice_vsi * vsi , bool ena )
{
bool if_running = netif_running ( vsi - > netdev ) ;
int ret ;
if ( if_running & & ! test_and_set_bit ( ICE_VSI_DOWN , vsi - > state ) ) {
ret = ice_down ( vsi ) ;
if ( ret ) {
netdev_err ( vsi - > netdev , " Preparing device to toggle loopback failed \n " ) ;
return ret ;
}
}
ret = ice_aq_set_mac_loopback ( & vsi - > back - > hw , ena , NULL ) ;
if ( ret )
netdev_err ( vsi - > netdev , " Failed to toggle loopback state \n " ) ;
if ( if_running )
ret = ice_up ( vsi ) ;
return ret ;
}
2018-03-20 14:58:15 +00:00
/**
* ice_set_features - set the netdev feature flags
* @ netdev : ptr to the netdev being adjusted
* @ features : the feature set that the stack is suggesting
*/
2019-02-27 00:35:11 +00:00
static int
ice_set_features ( struct net_device * netdev , netdev_features_t features )
2018-03-20 14:58:15 +00:00
{
2022-07-07 10:16:50 +00:00
netdev_features_t changed = netdev - > features ^ features ;
2018-03-20 14:58:15 +00:00
struct ice_netdev_priv * np = netdev_priv ( netdev ) ;
struct ice_vsi * vsi = np - > vsi ;
2019-11-06 10:05:30 +00:00
struct ice_pf * pf = vsi - > back ;
2018-03-20 14:58:15 +00:00
int ret = 0 ;
2019-09-09 13:47:46 +00:00
/* Don't set any netdev advanced features with device in Safe Mode */
2022-07-07 10:16:50 +00:00
if ( ice_is_safe_mode ( pf ) ) {
dev_err ( ice_pf_to_dev ( pf ) ,
" Device is in Safe Mode - not enabling advanced netdev features \n " ) ;
2019-09-09 13:47:46 +00:00
return ret ;
}
2019-11-06 10:05:30 +00:00
/* Do not change setting during reset */
if ( ice_is_reset_in_progress ( pf - > state ) ) {
2022-07-07 10:16:50 +00:00
dev_err ( ice_pf_to_dev ( pf ) ,
" Device is resetting, changing advanced netdev features temporarily unavailable. \n " ) ;
2019-11-06 10:05:30 +00:00
return - EBUSY ;
}
2019-04-16 17:21:23 +00:00
/* Multiple features can be changed in one call so keep features in
* separate if / else statements to guarantee each feature is checked
*/
2022-07-07 10:16:50 +00:00
if ( changed & NETIF_F_RXHASH )
ice_vsi_manage_rss_lut ( vsi , ! ! ( features & NETIF_F_RXHASH ) ) ;
2018-09-20 00:23:17 +00:00
ice: Advertise 802.1ad VLAN filtering and offloads for PF netdev
In order for the driver to support 802.1ad VLAN filtering and offloads,
it needs to advertise those VLAN features and also support modifying
those VLAN features, so make the necessary changes to
ice_set_netdev_features(). By default, enable CTAG insertion/stripping
and CTAG filtering for both Single and Double VLAN Modes (SVM/DVM).
Also, in DVM, enable STAG filtering by default. This is done by
setting the feature bits in netdev->features. Also, in DVM, support
toggling of STAG insertion/stripping, but don't enable them by
default. This is done by setting the feature bits in
netdev->hw_features.
Since 802.1ad VLAN filtering and offloads are only supported in DVM, make
sure they are not enabled by default and that they cannot be enabled
during runtime, when the device is in SVM.
Add an implementation for the ndo_fix_features() callback. This is
needed since the hardware cannot support multiple VLAN ethertypes for
VLAN insertion/stripping simultaneously and all supported VLAN filtering
must either be enabled or disabled together.
Disable inner VLAN stripping by default when DVM is enabled. If a VSI
supports stripping the inner VLAN in DVM, then it will have to configure
that during runtime. For example if a VF is configured in a port VLAN
while DVM is enabled it will be allowed to offload inner VLANs.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-12-02 16:38:50 +00:00
ret = ice_set_vlan_features ( netdev , features ) ;
if ( ret )
return ret ;
2019-04-16 17:30:39 +00:00
2022-07-27 07:24:05 +00:00
/* Turn on receive of FCS aka CRC, and after setting this
* flag the packet data will have the 4 byte CRC appended
*/
if ( changed & NETIF_F_RXFCS ) {
2022-07-27 07:24:06 +00:00
if ( ( features & NETIF_F_RXFCS ) & &
( features & NETIF_VLAN_STRIPPING_FEATURES ) ) {
dev_err ( ice_pf_to_dev ( vsi - > back ) ,
" To disable FCS/CRC stripping, you must first disable VLAN stripping \n " ) ;
return - EIO ;
}
2022-07-27 07:24:05 +00:00
ice_vsi_cfg_crc_strip ( vsi , ! ! ( features & NETIF_F_RXFCS ) ) ;
ret = ice_down_up ( vsi ) ;
if ( ret )
return ret ;
}
2022-07-07 10:16:50 +00:00
if ( changed & NETIF_F_NTUPLE ) {
bool ena = ! ! ( features & NETIF_F_NTUPLE ) ;
ice_vsi_manage_fdir ( vsi , ena ) ;
ena ? ice_init_arfs ( vsi ) : ice_clear_arfs ( vsi ) ;
2020-05-12 01:01:46 +00:00
}
2020-05-12 01:01:40 +00:00
2021-10-15 23:35:16 +00:00
/* don't turn off hw_tc_offload when ADQ is already enabled */
if ( ! ( features & NETIF_F_HW_TC ) & & ice_is_adq_active ( pf ) ) {
dev_err ( ice_pf_to_dev ( pf ) , " ADQ is active, can't turn hw_tc_offload off \n " ) ;
return - EACCES ;
}
2021-10-15 23:35:17 +00:00
2022-07-07 10:16:50 +00:00
if ( changed & NETIF_F_HW_TC ) {
bool ena = ! ! ( features & NETIF_F_HW_TC ) ;
2021-10-15 23:35:17 +00:00
2022-07-07 10:16:50 +00:00
ena ? set_bit ( ICE_FLAG_CLS_FLOWER , pf - > flags ) :
clear_bit ( ICE_FLAG_CLS_FLOWER , pf - > flags ) ;
}
2021-10-15 23:35:17 +00:00
2022-07-07 10:16:51 +00:00
if ( changed & NETIF_F_LOOPBACK )
ret = ice_set_loopback ( vsi , ! ! ( features & NETIF_F_LOOPBACK ) ) ;
return ret ;
2018-03-20 14:58:15 +00:00
}
/**
2021-12-02 16:38:46 +00:00
* ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI
2019-02-19 23:04:13 +00:00
* @ vsi : VSI to setup VLAN properties for
2018-03-20 14:58:15 +00:00
*/
static int ice_vsi_vlan_setup ( struct ice_vsi * vsi )
{
ice: Advertise 802.1ad VLAN filtering and offloads for PF netdev
In order for the driver to support 802.1ad VLAN filtering and offloads,
it needs to advertise those VLAN features and also support modifying
those VLAN features, so make the necessary changes to
ice_set_netdev_features(). By default, enable CTAG insertion/stripping
and CTAG filtering for both Single and Double VLAN Modes (SVM/DVM).
Also, in DVM, enable STAG filtering by default. This is done by
setting the feature bits in netdev->features. Also, in DVM, support
toggling of STAG insertion/stripping, but don't enable them by
default. This is done by setting the feature bits in
netdev->hw_features.
Since 802.1ad VLAN filtering and offloads are only supported in DVM, make
sure they are not enabled by default and that they cannot be enabled
during runtime, when the device is in SVM.
Add an implementation for the ndo_fix_features() callback. This is
needed since the hardware cannot support multiple VLAN ethertypes for
VLAN insertion/stripping simultaneously and all supported VLAN filtering
must either be enabled or disabled together.
Disable inner VLAN stripping by default when DVM is enabled. If a VSI
supports stripping the inner VLAN in DVM, then it will have to configure
that during runtime. For example if a VF is configured in a port VLAN
while DVM is enabled it will be allowed to offload inner VLANs.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-12-02 16:38:50 +00:00
int err ;
2018-03-20 14:58:15 +00:00
ice: Advertise 802.1ad VLAN filtering and offloads for PF netdev
In order for the driver to support 802.1ad VLAN filtering and offloads,
it needs to advertise those VLAN features and also support modifying
those VLAN features, so make the necessary changes to
ice_set_netdev_features(). By default, enable CTAG insertion/stripping
and CTAG filtering for both Single and Double VLAN Modes (SVM/DVM).
Also, in DVM, enable STAG filtering by default. This is done by
setting the feature bits in netdev->features. Also, in DVM, support
toggling of STAG insertion/stripping, but don't enable them by
default. This is done by setting the feature bits in
netdev->hw_features.
Since 802.1ad VLAN filtering and offloads are only supported in DVM, make
sure they are not enabled by default and that they cannot be enabled
during runtime, when the device is in SVM.
Add an implementation for the ndo_fix_features() callback. This is
needed since the hardware cannot support multiple VLAN ethertypes for
VLAN insertion/stripping simultaneously and all supported VLAN filtering
must either be enabled or disabled together.
Disable inner VLAN stripping by default when DVM is enabled. If a VSI
supports stripping the inner VLAN in DVM, then it will have to configure
that during runtime. For example if a VF is configured in a port VLAN
while DVM is enabled it will be allowed to offload inner VLANs.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-12-02 16:38:50 +00:00
err = ice_set_vlan_offload_features ( vsi , vsi - > netdev - > features ) ;
if ( err )
return err ;
2018-03-20 14:58:15 +00:00
ice: Advertise 802.1ad VLAN filtering and offloads for PF netdev
In order for the driver to support 802.1ad VLAN filtering and offloads,
it needs to advertise those VLAN features and also support modifying
those VLAN features, so make the necessary changes to
ice_set_netdev_features(). By default, enable CTAG insertion/stripping
and CTAG filtering for both Single and Double VLAN Modes (SVM/DVM).
Also, in DVM, enable STAG filtering by default. This is done by
setting the feature bits in netdev->features. Also, in DVM, support
toggling of STAG insertion/stripping, but don't enable them by
default. This is done by setting the feature bits in
netdev->hw_features.
Since 802.1ad VLAN filtering and offloads are only supported in DVM, make
sure they are not enabled by default and that they cannot be enabled
during runtime, when the device is in SVM.
Add an implementation for the ndo_fix_features() callback. This is
needed since the hardware cannot support multiple VLAN ethertypes for
VLAN insertion/stripping simultaneously and all supported VLAN filtering
must either be enabled or disabled together.
Disable inner VLAN stripping by default when DVM is enabled. If a VSI
supports stripping the inner VLAN in DVM, then it will have to configure
that during runtime. For example if a VF is configured in a port VLAN
while DVM is enabled it will be allowed to offload inner VLANs.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-12-02 16:38:50 +00:00
err = ice_set_vlan_filtering_features ( vsi , vsi - > netdev - > features ) ;
if ( err )
return err ;
2018-03-20 14:58:15 +00:00
2021-12-02 16:38:46 +00:00
return ice_vsi_add_vlan_zero ( vsi ) ;
2018-03-20 14:58:15 +00:00
}
2018-03-20 14:58:13 +00:00
/**
2022-12-21 11:38:15 +00:00
* ice_vsi_cfg_lan - Setup the VSI lan related config
2018-03-20 14:58:13 +00:00
* @ vsi : the VSI being configured
*
* Return 0 on success and negative value on error
*/
2022-12-21 11:38:15 +00:00
int ice_vsi_cfg_lan ( struct ice_vsi * vsi )
2018-03-20 14:58:13 +00:00
{
int err ;
2022-12-14 00:01:31 +00:00
if ( vsi - > netdev & & vsi - > type = = ICE_VSI_PF ) {
2018-08-09 13:29:00 +00:00
ice_set_rx_mode ( vsi - > netdev ) ;
2018-10-26 17:40:54 +00:00
2022-12-14 00:01:31 +00:00
err = ice_vsi_vlan_setup ( vsi ) ;
if ( err )
return err ;
2018-08-09 13:29:00 +00:00
}
2019-02-28 23:24:27 +00:00
ice_vsi_cfg_dcb_rings ( vsi ) ;
2018-12-19 18:03:27 +00:00
err = ice_vsi_cfg_lan_txqs ( vsi ) ;
2019-11-04 17:38:56 +00:00
if ( ! err & & ice_is_xdp_ena_vsi ( vsi ) )
err = ice_vsi_cfg_xdp_txqs ( vsi ) ;
2018-03-20 14:58:13 +00:00
if ( ! err )
err = ice_vsi_cfg_rxqs ( vsi ) ;
return err ;
}
ice: replace custom AIM algorithm with kernel's DIM library
The ice driver has support for adaptive interrupt moderation, an
algorithm for tuning the interrupt rate dynamically. This algorithm
is based on various assumptions about ring size, socket buffer size,
link speed, SKB overhead, ethernet frame overhead and more.
The Linux kernel has support for a dynamic interrupt moderation
algorithm known as "dimlib". Replace the custom driver-specific
implementation of dynamic interrupt moderation with the kernel's
algorithm.
The Intel hardware has a different hardware implementation than the
originators of the dimlib code had to work with, which requires the
driver to use a slightly different set of inputs for the actual
moderation values, while getting all the advice from dimlib of
better/worse, shift left or right.
The change made for this implementation is to use a pair of values
for each of the 5 "slots" that the dimlib moderation expects, and
the driver will program those pairs when dimlib recommends a slot to
use. The currently implementation uses two tables, one for receive
and one for transmit, and the pairs of values in each slot set the
maximum delay of an interrupt and a maximum number of interrupts per
second (both expressed in microseconds).
There are two separate kinds of bugs fixed by using DIMLIB, one is
UDP single stream send was too slow, and the other is that 8K
ping-pong was going to the most aggressive moderation and has much
too high latency.
The overall result of using DIMLIB is that we meet or exceed our
performance expectations set based on the old algorithm.
Co-developed-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-03-31 21:16:57 +00:00
/* THEORY OF MODERATION:
ice: update dim usage and moderation
The driver was having trouble with unreliable latency when doing single
threaded ping-pong tests. This was root caused to the DIM algorithm
landing on a too slow interrupt value, which caused high latency, and it
was especially present when queues were being switched frequently by the
scheduler as happens on default setups today.
In attempting to improve this, we allow the upper rate limit for
interrupts to move to rate limit of 4 microseconds as a max, which means
that no vector can generate more than 250,000 interrupts per second. The
old config was up to 100,000. The driver previously tried to program the
rate limit too frequently and if the receive and transmit side were both
active on the same vector, the INTRL would be set incorrectly, and this
change fixes that issue as a side effect of the redesign.
This driver will operate from now on with a slightly changed DIM table
with more emphasis towards latency sensitivity by having more table
entries with lower latency than with high latency (high being >= 64
microseconds).
The driver also resets the DIM algorithm state with a new stats set when
there is no work done and the data becomes stale (older than 1 second),
for the respective receive or transmit portion of the interrupt.
Add a new helper for setting rate limit, which will be used more
in a followup patch.
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-09-20 19:30:12 +00:00
* The ice driver hardware works differently than the hardware that DIMLIB was
ice: replace custom AIM algorithm with kernel's DIM library
The ice driver has support for adaptive interrupt moderation, an
algorithm for tuning the interrupt rate dynamically. This algorithm
is based on various assumptions about ring size, socket buffer size,
link speed, SKB overhead, ethernet frame overhead and more.
The Linux kernel has support for a dynamic interrupt moderation
algorithm known as "dimlib". Replace the custom driver-specific
implementation of dynamic interrupt moderation with the kernel's
algorithm.
The Intel hardware has a different hardware implementation than the
originators of the dimlib code had to work with, which requires the
driver to use a slightly different set of inputs for the actual
moderation values, while getting all the advice from dimlib of
better/worse, shift left or right.
The change made for this implementation is to use a pair of values
for each of the 5 "slots" that the dimlib moderation expects, and
the driver will program those pairs when dimlib recommends a slot to
use. The currently implementation uses two tables, one for receive
and one for transmit, and the pairs of values in each slot set the
maximum delay of an interrupt and a maximum number of interrupts per
second (both expressed in microseconds).
There are two separate kinds of bugs fixed by using DIMLIB, one is
UDP single stream send was too slow, and the other is that 8K
ping-pong was going to the most aggressive moderation and has much
too high latency.
The overall result of using DIMLIB is that we meet or exceed our
performance expectations set based on the old algorithm.
Co-developed-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-03-31 21:16:57 +00:00
* originally made for . ice hardware doesn ' t have packet count limits that
* can trigger an interrupt , but it * does * have interrupt rate limit support ,
ice: update dim usage and moderation
The driver was having trouble with unreliable latency when doing single
threaded ping-pong tests. This was root caused to the DIM algorithm
landing on a too slow interrupt value, which caused high latency, and it
was especially present when queues were being switched frequently by the
scheduler as happens on default setups today.
In attempting to improve this, we allow the upper rate limit for
interrupts to move to rate limit of 4 microseconds as a max, which means
that no vector can generate more than 250,000 interrupts per second. The
old config was up to 100,000. The driver previously tried to program the
rate limit too frequently and if the receive and transmit side were both
active on the same vector, the INTRL would be set incorrectly, and this
change fixes that issue as a side effect of the redesign.
This driver will operate from now on with a slightly changed DIM table
with more emphasis towards latency sensitivity by having more table
entries with lower latency than with high latency (high being >= 64
microseconds).
The driver also resets the DIM algorithm state with a new stats set when
there is no work done and the data becomes stale (older than 1 second),
for the respective receive or transmit portion of the interrupt.
Add a new helper for setting rate limit, which will be used more
in a followup patch.
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-09-20 19:30:12 +00:00
* which is hard - coded to a limit of 250 , 000 ints / second .
* If not using dynamic moderation , the INTRL value can be modified
* by ethtool rx - usecs - high .
ice: replace custom AIM algorithm with kernel's DIM library
The ice driver has support for adaptive interrupt moderation, an
algorithm for tuning the interrupt rate dynamically. This algorithm
is based on various assumptions about ring size, socket buffer size,
link speed, SKB overhead, ethernet frame overhead and more.
The Linux kernel has support for a dynamic interrupt moderation
algorithm known as "dimlib". Replace the custom driver-specific
implementation of dynamic interrupt moderation with the kernel's
algorithm.
The Intel hardware has a different hardware implementation than the
originators of the dimlib code had to work with, which requires the
driver to use a slightly different set of inputs for the actual
moderation values, while getting all the advice from dimlib of
better/worse, shift left or right.
The change made for this implementation is to use a pair of values
for each of the 5 "slots" that the dimlib moderation expects, and
the driver will program those pairs when dimlib recommends a slot to
use. The currently implementation uses two tables, one for receive
and one for transmit, and the pairs of values in each slot set the
maximum delay of an interrupt and a maximum number of interrupts per
second (both expressed in microseconds).
There are two separate kinds of bugs fixed by using DIMLIB, one is
UDP single stream send was too slow, and the other is that 8K
ping-pong was going to the most aggressive moderation and has much
too high latency.
The overall result of using DIMLIB is that we meet or exceed our
performance expectations set based on the old algorithm.
Co-developed-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-03-31 21:16:57 +00:00
*/
struct ice_dim {
/* the throttle rate for interrupts, basically worst case delay before
* an initial interrupt fires , value is stored in microseconds .
*/
u16 itr ;
} ;
/* Make a different profile for Rx that doesn't allow quite so aggressive
ice: update dim usage and moderation
The driver was having trouble with unreliable latency when doing single
threaded ping-pong tests. This was root caused to the DIM algorithm
landing on a too slow interrupt value, which caused high latency, and it
was especially present when queues were being switched frequently by the
scheduler as happens on default setups today.
In attempting to improve this, we allow the upper rate limit for
interrupts to move to rate limit of 4 microseconds as a max, which means
that no vector can generate more than 250,000 interrupts per second. The
old config was up to 100,000. The driver previously tried to program the
rate limit too frequently and if the receive and transmit side were both
active on the same vector, the INTRL would be set incorrectly, and this
change fixes that issue as a side effect of the redesign.
This driver will operate from now on with a slightly changed DIM table
with more emphasis towards latency sensitivity by having more table
entries with lower latency than with high latency (high being >= 64
microseconds).
The driver also resets the DIM algorithm state with a new stats set when
there is no work done and the data becomes stale (older than 1 second),
for the respective receive or transmit portion of the interrupt.
Add a new helper for setting rate limit, which will be used more
in a followup patch.
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-09-20 19:30:12 +00:00
* moderation at the high end ( it maxes out at 126u s or about 8 k interrupts a
* second .
ice: replace custom AIM algorithm with kernel's DIM library
The ice driver has support for adaptive interrupt moderation, an
algorithm for tuning the interrupt rate dynamically. This algorithm
is based on various assumptions about ring size, socket buffer size,
link speed, SKB overhead, ethernet frame overhead and more.
The Linux kernel has support for a dynamic interrupt moderation
algorithm known as "dimlib". Replace the custom driver-specific
implementation of dynamic interrupt moderation with the kernel's
algorithm.
The Intel hardware has a different hardware implementation than the
originators of the dimlib code had to work with, which requires the
driver to use a slightly different set of inputs for the actual
moderation values, while getting all the advice from dimlib of
better/worse, shift left or right.
The change made for this implementation is to use a pair of values
for each of the 5 "slots" that the dimlib moderation expects, and
the driver will program those pairs when dimlib recommends a slot to
use. The currently implementation uses two tables, one for receive
and one for transmit, and the pairs of values in each slot set the
maximum delay of an interrupt and a maximum number of interrupts per
second (both expressed in microseconds).
There are two separate kinds of bugs fixed by using DIMLIB, one is
UDP single stream send was too slow, and the other is that 8K
ping-pong was going to the most aggressive moderation and has much
too high latency.
The overall result of using DIMLIB is that we meet or exceed our
performance expectations set based on the old algorithm.
Co-developed-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-03-31 21:16:57 +00:00
*/
static const struct ice_dim rx_profile [ ] = {
ice: update dim usage and moderation
The driver was having trouble with unreliable latency when doing single
threaded ping-pong tests. This was root caused to the DIM algorithm
landing on a too slow interrupt value, which caused high latency, and it
was especially present when queues were being switched frequently by the
scheduler as happens on default setups today.
In attempting to improve this, we allow the upper rate limit for
interrupts to move to rate limit of 4 microseconds as a max, which means
that no vector can generate more than 250,000 interrupts per second. The
old config was up to 100,000. The driver previously tried to program the
rate limit too frequently and if the receive and transmit side were both
active on the same vector, the INTRL would be set incorrectly, and this
change fixes that issue as a side effect of the redesign.
This driver will operate from now on with a slightly changed DIM table
with more emphasis towards latency sensitivity by having more table
entries with lower latency than with high latency (high being >= 64
microseconds).
The driver also resets the DIM algorithm state with a new stats set when
there is no work done and the data becomes stale (older than 1 second),
for the respective receive or transmit portion of the interrupt.
Add a new helper for setting rate limit, which will be used more
in a followup patch.
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-09-20 19:30:12 +00:00
{ 2 } , /* 500,000 ints/s, capped at 250K by INTRL */
{ 8 } , /* 125,000 ints/s */
{ 16 } , /* 62,500 ints/s */
{ 62 } , /* 16,129 ints/s */
{ 126 } /* 7,936 ints/s */
ice: replace custom AIM algorithm with kernel's DIM library
The ice driver has support for adaptive interrupt moderation, an
algorithm for tuning the interrupt rate dynamically. This algorithm
is based on various assumptions about ring size, socket buffer size,
link speed, SKB overhead, ethernet frame overhead and more.
The Linux kernel has support for a dynamic interrupt moderation
algorithm known as "dimlib". Replace the custom driver-specific
implementation of dynamic interrupt moderation with the kernel's
algorithm.
The Intel hardware has a different hardware implementation than the
originators of the dimlib code had to work with, which requires the
driver to use a slightly different set of inputs for the actual
moderation values, while getting all the advice from dimlib of
better/worse, shift left or right.
The change made for this implementation is to use a pair of values
for each of the 5 "slots" that the dimlib moderation expects, and
the driver will program those pairs when dimlib recommends a slot to
use. The currently implementation uses two tables, one for receive
and one for transmit, and the pairs of values in each slot set the
maximum delay of an interrupt and a maximum number of interrupts per
second (both expressed in microseconds).
There are two separate kinds of bugs fixed by using DIMLIB, one is
UDP single stream send was too slow, and the other is that 8K
ping-pong was going to the most aggressive moderation and has much
too high latency.
The overall result of using DIMLIB is that we meet or exceed our
performance expectations set based on the old algorithm.
Co-developed-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-03-31 21:16:57 +00:00
} ;
/* The transmit profile, which has the same sorts of values
* as the previous struct
*/
static const struct ice_dim tx_profile [ ] = {
ice: update dim usage and moderation
The driver was having trouble with unreliable latency when doing single
threaded ping-pong tests. This was root caused to the DIM algorithm
landing on a too slow interrupt value, which caused high latency, and it
was especially present when queues were being switched frequently by the
scheduler as happens on default setups today.
In attempting to improve this, we allow the upper rate limit for
interrupts to move to rate limit of 4 microseconds as a max, which means
that no vector can generate more than 250,000 interrupts per second. The
old config was up to 100,000. The driver previously tried to program the
rate limit too frequently and if the receive and transmit side were both
active on the same vector, the INTRL would be set incorrectly, and this
change fixes that issue as a side effect of the redesign.
This driver will operate from now on with a slightly changed DIM table
with more emphasis towards latency sensitivity by having more table
entries with lower latency than with high latency (high being >= 64
microseconds).
The driver also resets the DIM algorithm state with a new stats set when
there is no work done and the data becomes stale (older than 1 second),
for the respective receive or transmit portion of the interrupt.
Add a new helper for setting rate limit, which will be used more
in a followup patch.
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-09-20 19:30:12 +00:00
{ 2 } , /* 500,000 ints/s, capped at 250K by INTRL */
{ 8 } , /* 125,000 ints/s */
{ 40 } , /* 16,125 ints/s */
{ 128 } , /* 7,812 ints/s */
{ 256 } /* 3,906 ints/s */
ice: replace custom AIM algorithm with kernel's DIM library
The ice driver has support for adaptive interrupt moderation, an
algorithm for tuning the interrupt rate dynamically. This algorithm
is based on various assumptions about ring size, socket buffer size,
link speed, SKB overhead, ethernet frame overhead and more.
The Linux kernel has support for a dynamic interrupt moderation
algorithm known as "dimlib". Replace the custom driver-specific
implementation of dynamic interrupt moderation with the kernel's
algorithm.
The Intel hardware has a different hardware implementation than the
originators of the dimlib code had to work with, which requires the
driver to use a slightly different set of inputs for the actual
moderation values, while getting all the advice from dimlib of
better/worse, shift left or right.
The change made for this implementation is to use a pair of values
for each of the 5 "slots" that the dimlib moderation expects, and
the driver will program those pairs when dimlib recommends a slot to
use. The currently implementation uses two tables, one for receive
and one for transmit, and the pairs of values in each slot set the
maximum delay of an interrupt and a maximum number of interrupts per
second (both expressed in microseconds).
There are two separate kinds of bugs fixed by using DIMLIB, one is
UDP single stream send was too slow, and the other is that 8K
ping-pong was going to the most aggressive moderation and has much
too high latency.
The overall result of using DIMLIB is that we meet or exceed our
performance expectations set based on the old algorithm.
Co-developed-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-03-31 21:16:57 +00:00
} ;
static void ice_tx_dim_work ( struct work_struct * work )
{
struct ice_ring_container * rc ;
struct dim * dim ;
ice: update dim usage and moderation
The driver was having trouble with unreliable latency when doing single
threaded ping-pong tests. This was root caused to the DIM algorithm
landing on a too slow interrupt value, which caused high latency, and it
was especially present when queues were being switched frequently by the
scheduler as happens on default setups today.
In attempting to improve this, we allow the upper rate limit for
interrupts to move to rate limit of 4 microseconds as a max, which means
that no vector can generate more than 250,000 interrupts per second. The
old config was up to 100,000. The driver previously tried to program the
rate limit too frequently and if the receive and transmit side were both
active on the same vector, the INTRL would be set incorrectly, and this
change fixes that issue as a side effect of the redesign.
This driver will operate from now on with a slightly changed DIM table
with more emphasis towards latency sensitivity by having more table
entries with lower latency than with high latency (high being >= 64
microseconds).
The driver also resets the DIM algorithm state with a new stats set when
there is no work done and the data becomes stale (older than 1 second),
for the respective receive or transmit portion of the interrupt.
Add a new helper for setting rate limit, which will be used more
in a followup patch.
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-09-20 19:30:12 +00:00
u16 itr ;
ice: replace custom AIM algorithm with kernel's DIM library
The ice driver has support for adaptive interrupt moderation, an
algorithm for tuning the interrupt rate dynamically. This algorithm
is based on various assumptions about ring size, socket buffer size,
link speed, SKB overhead, ethernet frame overhead and more.
The Linux kernel has support for a dynamic interrupt moderation
algorithm known as "dimlib". Replace the custom driver-specific
implementation of dynamic interrupt moderation with the kernel's
algorithm.
The Intel hardware has a different hardware implementation than the
originators of the dimlib code had to work with, which requires the
driver to use a slightly different set of inputs for the actual
moderation values, while getting all the advice from dimlib of
better/worse, shift left or right.
The change made for this implementation is to use a pair of values
for each of the 5 "slots" that the dimlib moderation expects, and
the driver will program those pairs when dimlib recommends a slot to
use. The currently implementation uses two tables, one for receive
and one for transmit, and the pairs of values in each slot set the
maximum delay of an interrupt and a maximum number of interrupts per
second (both expressed in microseconds).
There are two separate kinds of bugs fixed by using DIMLIB, one is
UDP single stream send was too slow, and the other is that 8K
ping-pong was going to the most aggressive moderation and has much
too high latency.
The overall result of using DIMLIB is that we meet or exceed our
performance expectations set based on the old algorithm.
Co-developed-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-03-31 21:16:57 +00:00
dim = container_of ( work , struct dim , work ) ;
2023-07-17 03:11:54 +00:00
rc = dim - > priv ;
ice: replace custom AIM algorithm with kernel's DIM library
The ice driver has support for adaptive interrupt moderation, an
algorithm for tuning the interrupt rate dynamically. This algorithm
is based on various assumptions about ring size, socket buffer size,
link speed, SKB overhead, ethernet frame overhead and more.
The Linux kernel has support for a dynamic interrupt moderation
algorithm known as "dimlib". Replace the custom driver-specific
implementation of dynamic interrupt moderation with the kernel's
algorithm.
The Intel hardware has a different hardware implementation than the
originators of the dimlib code had to work with, which requires the
driver to use a slightly different set of inputs for the actual
moderation values, while getting all the advice from dimlib of
better/worse, shift left or right.
The change made for this implementation is to use a pair of values
for each of the 5 "slots" that the dimlib moderation expects, and
the driver will program those pairs when dimlib recommends a slot to
use. The currently implementation uses two tables, one for receive
and one for transmit, and the pairs of values in each slot set the
maximum delay of an interrupt and a maximum number of interrupts per
second (both expressed in microseconds).
There are two separate kinds of bugs fixed by using DIMLIB, one is
UDP single stream send was too slow, and the other is that 8K
ping-pong was going to the most aggressive moderation and has much
too high latency.
The overall result of using DIMLIB is that we meet or exceed our
performance expectations set based on the old algorithm.
Co-developed-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-03-31 21:16:57 +00:00
ice: update dim usage and moderation
The driver was having trouble with unreliable latency when doing single
threaded ping-pong tests. This was root caused to the DIM algorithm
landing on a too slow interrupt value, which caused high latency, and it
was especially present when queues were being switched frequently by the
scheduler as happens on default setups today.
In attempting to improve this, we allow the upper rate limit for
interrupts to move to rate limit of 4 microseconds as a max, which means
that no vector can generate more than 250,000 interrupts per second. The
old config was up to 100,000. The driver previously tried to program the
rate limit too frequently and if the receive and transmit side were both
active on the same vector, the INTRL would be set incorrectly, and this
change fixes that issue as a side effect of the redesign.
This driver will operate from now on with a slightly changed DIM table
with more emphasis towards latency sensitivity by having more table
entries with lower latency than with high latency (high being >= 64
microseconds).
The driver also resets the DIM algorithm state with a new stats set when
there is no work done and the data becomes stale (older than 1 second),
for the respective receive or transmit portion of the interrupt.
Add a new helper for setting rate limit, which will be used more
in a followup patch.
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-09-20 19:30:12 +00:00
WARN_ON ( dim - > profile_ix > = ARRAY_SIZE ( tx_profile ) ) ;
ice: replace custom AIM algorithm with kernel's DIM library
The ice driver has support for adaptive interrupt moderation, an
algorithm for tuning the interrupt rate dynamically. This algorithm
is based on various assumptions about ring size, socket buffer size,
link speed, SKB overhead, ethernet frame overhead and more.
The Linux kernel has support for a dynamic interrupt moderation
algorithm known as "dimlib". Replace the custom driver-specific
implementation of dynamic interrupt moderation with the kernel's
algorithm.
The Intel hardware has a different hardware implementation than the
originators of the dimlib code had to work with, which requires the
driver to use a slightly different set of inputs for the actual
moderation values, while getting all the advice from dimlib of
better/worse, shift left or right.
The change made for this implementation is to use a pair of values
for each of the 5 "slots" that the dimlib moderation expects, and
the driver will program those pairs when dimlib recommends a slot to
use. The currently implementation uses two tables, one for receive
and one for transmit, and the pairs of values in each slot set the
maximum delay of an interrupt and a maximum number of interrupts per
second (both expressed in microseconds).
There are two separate kinds of bugs fixed by using DIMLIB, one is
UDP single stream send was too slow, and the other is that 8K
ping-pong was going to the most aggressive moderation and has much
too high latency.
The overall result of using DIMLIB is that we meet or exceed our
performance expectations set based on the old algorithm.
Co-developed-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-03-31 21:16:57 +00:00
/* look up the values in our local table */
itr = tx_profile [ dim - > profile_ix ] . itr ;
ice: update dim usage and moderation
The driver was having trouble with unreliable latency when doing single
threaded ping-pong tests. This was root caused to the DIM algorithm
landing on a too slow interrupt value, which caused high latency, and it
was especially present when queues were being switched frequently by the
scheduler as happens on default setups today.
In attempting to improve this, we allow the upper rate limit for
interrupts to move to rate limit of 4 microseconds as a max, which means
that no vector can generate more than 250,000 interrupts per second. The
old config was up to 100,000. The driver previously tried to program the
rate limit too frequently and if the receive and transmit side were both
active on the same vector, the INTRL would be set incorrectly, and this
change fixes that issue as a side effect of the redesign.
This driver will operate from now on with a slightly changed DIM table
with more emphasis towards latency sensitivity by having more table
entries with lower latency than with high latency (high being >= 64
microseconds).
The driver also resets the DIM algorithm state with a new stats set when
there is no work done and the data becomes stale (older than 1 second),
for the respective receive or transmit portion of the interrupt.
Add a new helper for setting rate limit, which will be used more
in a followup patch.
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-09-20 19:30:12 +00:00
ice_trace ( tx_dim_work , container_of ( rc , struct ice_q_vector , tx ) , dim ) ;
ice: replace custom AIM algorithm with kernel's DIM library
The ice driver has support for adaptive interrupt moderation, an
algorithm for tuning the interrupt rate dynamically. This algorithm
is based on various assumptions about ring size, socket buffer size,
link speed, SKB overhead, ethernet frame overhead and more.
The Linux kernel has support for a dynamic interrupt moderation
algorithm known as "dimlib". Replace the custom driver-specific
implementation of dynamic interrupt moderation with the kernel's
algorithm.
The Intel hardware has a different hardware implementation than the
originators of the dimlib code had to work with, which requires the
driver to use a slightly different set of inputs for the actual
moderation values, while getting all the advice from dimlib of
better/worse, shift left or right.
The change made for this implementation is to use a pair of values
for each of the 5 "slots" that the dimlib moderation expects, and
the driver will program those pairs when dimlib recommends a slot to
use. The currently implementation uses two tables, one for receive
and one for transmit, and the pairs of values in each slot set the
maximum delay of an interrupt and a maximum number of interrupts per
second (both expressed in microseconds).
There are two separate kinds of bugs fixed by using DIMLIB, one is
UDP single stream send was too slow, and the other is that 8K
ping-pong was going to the most aggressive moderation and has much
too high latency.
The overall result of using DIMLIB is that we meet or exceed our
performance expectations set based on the old algorithm.
Co-developed-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-03-31 21:16:57 +00:00
ice_write_itr ( rc , itr ) ;
dim - > state = DIM_START_MEASURE ;
}
static void ice_rx_dim_work ( struct work_struct * work )
{
struct ice_ring_container * rc ;
struct dim * dim ;
ice: update dim usage and moderation
The driver was having trouble with unreliable latency when doing single
threaded ping-pong tests. This was root caused to the DIM algorithm
landing on a too slow interrupt value, which caused high latency, and it
was especially present when queues were being switched frequently by the
scheduler as happens on default setups today.
In attempting to improve this, we allow the upper rate limit for
interrupts to move to rate limit of 4 microseconds as a max, which means
that no vector can generate more than 250,000 interrupts per second. The
old config was up to 100,000. The driver previously tried to program the
rate limit too frequently and if the receive and transmit side were both
active on the same vector, the INTRL would be set incorrectly, and this
change fixes that issue as a side effect of the redesign.
This driver will operate from now on with a slightly changed DIM table
with more emphasis towards latency sensitivity by having more table
entries with lower latency than with high latency (high being >= 64
microseconds).
The driver also resets the DIM algorithm state with a new stats set when
there is no work done and the data becomes stale (older than 1 second),
for the respective receive or transmit portion of the interrupt.
Add a new helper for setting rate limit, which will be used more
in a followup patch.
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-09-20 19:30:12 +00:00
u16 itr ;
ice: replace custom AIM algorithm with kernel's DIM library
The ice driver has support for adaptive interrupt moderation, an
algorithm for tuning the interrupt rate dynamically. This algorithm
is based on various assumptions about ring size, socket buffer size,
link speed, SKB overhead, ethernet frame overhead and more.
The Linux kernel has support for a dynamic interrupt moderation
algorithm known as "dimlib". Replace the custom driver-specific
implementation of dynamic interrupt moderation with the kernel's
algorithm.
The Intel hardware has a different hardware implementation than the
originators of the dimlib code had to work with, which requires the
driver to use a slightly different set of inputs for the actual
moderation values, while getting all the advice from dimlib of
better/worse, shift left or right.
The change made for this implementation is to use a pair of values
for each of the 5 "slots" that the dimlib moderation expects, and
the driver will program those pairs when dimlib recommends a slot to
use. The currently implementation uses two tables, one for receive
and one for transmit, and the pairs of values in each slot set the
maximum delay of an interrupt and a maximum number of interrupts per
second (both expressed in microseconds).
There are two separate kinds of bugs fixed by using DIMLIB, one is
UDP single stream send was too slow, and the other is that 8K
ping-pong was going to the most aggressive moderation and has much
too high latency.
The overall result of using DIMLIB is that we meet or exceed our
performance expectations set based on the old algorithm.
Co-developed-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-03-31 21:16:57 +00:00
dim = container_of ( work , struct dim , work ) ;
2023-07-17 03:11:54 +00:00
rc = dim - > priv ;
ice: replace custom AIM algorithm with kernel's DIM library
The ice driver has support for adaptive interrupt moderation, an
algorithm for tuning the interrupt rate dynamically. This algorithm
is based on various assumptions about ring size, socket buffer size,
link speed, SKB overhead, ethernet frame overhead and more.
The Linux kernel has support for a dynamic interrupt moderation
algorithm known as "dimlib". Replace the custom driver-specific
implementation of dynamic interrupt moderation with the kernel's
algorithm.
The Intel hardware has a different hardware implementation than the
originators of the dimlib code had to work with, which requires the
driver to use a slightly different set of inputs for the actual
moderation values, while getting all the advice from dimlib of
better/worse, shift left or right.
The change made for this implementation is to use a pair of values
for each of the 5 "slots" that the dimlib moderation expects, and
the driver will program those pairs when dimlib recommends a slot to
use. The currently implementation uses two tables, one for receive
and one for transmit, and the pairs of values in each slot set the
maximum delay of an interrupt and a maximum number of interrupts per
second (both expressed in microseconds).
There are two separate kinds of bugs fixed by using DIMLIB, one is
UDP single stream send was too slow, and the other is that 8K
ping-pong was going to the most aggressive moderation and has much
too high latency.
The overall result of using DIMLIB is that we meet or exceed our
performance expectations set based on the old algorithm.
Co-developed-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-03-31 21:16:57 +00:00
ice: update dim usage and moderation
The driver was having trouble with unreliable latency when doing single
threaded ping-pong tests. This was root caused to the DIM algorithm
landing on a too slow interrupt value, which caused high latency, and it
was especially present when queues were being switched frequently by the
scheduler as happens on default setups today.
In attempting to improve this, we allow the upper rate limit for
interrupts to move to rate limit of 4 microseconds as a max, which means
that no vector can generate more than 250,000 interrupts per second. The
old config was up to 100,000. The driver previously tried to program the
rate limit too frequently and if the receive and transmit side were both
active on the same vector, the INTRL would be set incorrectly, and this
change fixes that issue as a side effect of the redesign.
This driver will operate from now on with a slightly changed DIM table
with more emphasis towards latency sensitivity by having more table
entries with lower latency than with high latency (high being >= 64
microseconds).
The driver also resets the DIM algorithm state with a new stats set when
there is no work done and the data becomes stale (older than 1 second),
for the respective receive or transmit portion of the interrupt.
Add a new helper for setting rate limit, which will be used more
in a followup patch.
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-09-20 19:30:12 +00:00
WARN_ON ( dim - > profile_ix > = ARRAY_SIZE ( rx_profile ) ) ;
ice: replace custom AIM algorithm with kernel's DIM library
The ice driver has support for adaptive interrupt moderation, an
algorithm for tuning the interrupt rate dynamically. This algorithm
is based on various assumptions about ring size, socket buffer size,
link speed, SKB overhead, ethernet frame overhead and more.
The Linux kernel has support for a dynamic interrupt moderation
algorithm known as "dimlib". Replace the custom driver-specific
implementation of dynamic interrupt moderation with the kernel's
algorithm.
The Intel hardware has a different hardware implementation than the
originators of the dimlib code had to work with, which requires the
driver to use a slightly different set of inputs for the actual
moderation values, while getting all the advice from dimlib of
better/worse, shift left or right.
The change made for this implementation is to use a pair of values
for each of the 5 "slots" that the dimlib moderation expects, and
the driver will program those pairs when dimlib recommends a slot to
use. The currently implementation uses two tables, one for receive
and one for transmit, and the pairs of values in each slot set the
maximum delay of an interrupt and a maximum number of interrupts per
second (both expressed in microseconds).
There are two separate kinds of bugs fixed by using DIMLIB, one is
UDP single stream send was too slow, and the other is that 8K
ping-pong was going to the most aggressive moderation and has much
too high latency.
The overall result of using DIMLIB is that we meet or exceed our
performance expectations set based on the old algorithm.
Co-developed-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-03-31 21:16:57 +00:00
/* look up the values in our local table */
itr = rx_profile [ dim - > profile_ix ] . itr ;
ice: update dim usage and moderation
The driver was having trouble with unreliable latency when doing single
threaded ping-pong tests. This was root caused to the DIM algorithm
landing on a too slow interrupt value, which caused high latency, and it
was especially present when queues were being switched frequently by the
scheduler as happens on default setups today.
In attempting to improve this, we allow the upper rate limit for
interrupts to move to rate limit of 4 microseconds as a max, which means
that no vector can generate more than 250,000 interrupts per second. The
old config was up to 100,000. The driver previously tried to program the
rate limit too frequently and if the receive and transmit side were both
active on the same vector, the INTRL would be set incorrectly, and this
change fixes that issue as a side effect of the redesign.
This driver will operate from now on with a slightly changed DIM table
with more emphasis towards latency sensitivity by having more table
entries with lower latency than with high latency (high being >= 64
microseconds).
The driver also resets the DIM algorithm state with a new stats set when
there is no work done and the data becomes stale (older than 1 second),
for the respective receive or transmit portion of the interrupt.
Add a new helper for setting rate limit, which will be used more
in a followup patch.
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-09-20 19:30:12 +00:00
ice_trace ( rx_dim_work , container_of ( rc , struct ice_q_vector , rx ) , dim ) ;
ice: replace custom AIM algorithm with kernel's DIM library
The ice driver has support for adaptive interrupt moderation, an
algorithm for tuning the interrupt rate dynamically. This algorithm
is based on various assumptions about ring size, socket buffer size,
link speed, SKB overhead, ethernet frame overhead and more.
The Linux kernel has support for a dynamic interrupt moderation
algorithm known as "dimlib". Replace the custom driver-specific
implementation of dynamic interrupt moderation with the kernel's
algorithm.
The Intel hardware has a different hardware implementation than the
originators of the dimlib code had to work with, which requires the
driver to use a slightly different set of inputs for the actual
moderation values, while getting all the advice from dimlib of
better/worse, shift left or right.
The change made for this implementation is to use a pair of values
for each of the 5 "slots" that the dimlib moderation expects, and
the driver will program those pairs when dimlib recommends a slot to
use. The currently implementation uses two tables, one for receive
and one for transmit, and the pairs of values in each slot set the
maximum delay of an interrupt and a maximum number of interrupts per
second (both expressed in microseconds).
There are two separate kinds of bugs fixed by using DIMLIB, one is
UDP single stream send was too slow, and the other is that 8K
ping-pong was going to the most aggressive moderation and has much
too high latency.
The overall result of using DIMLIB is that we meet or exceed our
performance expectations set based on the old algorithm.
Co-developed-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-03-31 21:16:57 +00:00
ice_write_itr ( rc , itr ) ;
dim - > state = DIM_START_MEASURE ;
}
ice: update dim usage and moderation
The driver was having trouble with unreliable latency when doing single
threaded ping-pong tests. This was root caused to the DIM algorithm
landing on a too slow interrupt value, which caused high latency, and it
was especially present when queues were being switched frequently by the
scheduler as happens on default setups today.
In attempting to improve this, we allow the upper rate limit for
interrupts to move to rate limit of 4 microseconds as a max, which means
that no vector can generate more than 250,000 interrupts per second. The
old config was up to 100,000. The driver previously tried to program the
rate limit too frequently and if the receive and transmit side were both
active on the same vector, the INTRL would be set incorrectly, and this
change fixes that issue as a side effect of the redesign.
This driver will operate from now on with a slightly changed DIM table
with more emphasis towards latency sensitivity by having more table
entries with lower latency than with high latency (high being >= 64
microseconds).
The driver also resets the DIM algorithm state with a new stats set when
there is no work done and the data becomes stale (older than 1 second),
for the respective receive or transmit portion of the interrupt.
Add a new helper for setting rate limit, which will be used more
in a followup patch.
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-09-20 19:30:12 +00:00
# define ICE_DIM_DEFAULT_PROFILE_IX 1
/**
* ice_init_moderation - set up interrupt moderation
* @ q_vector : the vector containing rings to be configured
*
* Set up interrupt moderation registers , with the intent to do the right thing
* when called from reset or from probe , and whether or not dynamic moderation
* is enabled or not . Take special care to write all the registers in both
* dynamic moderation mode or not in order to make sure hardware is in a known
* state .
*/
static void ice_init_moderation ( struct ice_q_vector * q_vector )
{
struct ice_ring_container * rc ;
bool tx_dynamic , rx_dynamic ;
rc = & q_vector - > tx ;
INIT_WORK ( & rc - > dim . work , ice_tx_dim_work ) ;
rc - > dim . mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE ;
rc - > dim . profile_ix = ICE_DIM_DEFAULT_PROFILE_IX ;
rc - > dim . priv = rc ;
tx_dynamic = ITR_IS_DYNAMIC ( rc ) ;
/* set the initial TX ITR to match the above */
ice_write_itr ( rc , tx_dynamic ?
tx_profile [ rc - > dim . profile_ix ] . itr : rc - > itr_setting ) ;
rc = & q_vector - > rx ;
INIT_WORK ( & rc - > dim . work , ice_rx_dim_work ) ;
rc - > dim . mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE ;
rc - > dim . profile_ix = ICE_DIM_DEFAULT_PROFILE_IX ;
rc - > dim . priv = rc ;
rx_dynamic = ITR_IS_DYNAMIC ( rc ) ;
/* set the initial RX ITR to match the above */
ice_write_itr ( rc , rx_dynamic ? rx_profile [ rc - > dim . profile_ix ] . itr :
rc - > itr_setting ) ;
ice_set_q_vector_intrl ( q_vector ) ;
}
2018-03-20 14:58:14 +00:00
/**
* ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
* @ vsi : the VSI being configured
*/
static void ice_napi_enable_all ( struct ice_vsi * vsi )
{
int q_idx ;
if ( ! vsi - > netdev )
return ;
2019-04-16 17:21:28 +00:00
ice_for_each_q_vector ( vsi , q_idx ) {
2018-11-29 01:54:10 +00:00
struct ice_q_vector * q_vector = vsi - > q_vectors [ q_idx ] ;
ice: update dim usage and moderation
The driver was having trouble with unreliable latency when doing single
threaded ping-pong tests. This was root caused to the DIM algorithm
landing on a too slow interrupt value, which caused high latency, and it
was especially present when queues were being switched frequently by the
scheduler as happens on default setups today.
In attempting to improve this, we allow the upper rate limit for
interrupts to move to rate limit of 4 microseconds as a max, which means
that no vector can generate more than 250,000 interrupts per second. The
old config was up to 100,000. The driver previously tried to program the
rate limit too frequently and if the receive and transmit side were both
active on the same vector, the INTRL would be set incorrectly, and this
change fixes that issue as a side effect of the redesign.
This driver will operate from now on with a slightly changed DIM table
with more emphasis towards latency sensitivity by having more table
entries with lower latency than with high latency (high being >= 64
microseconds).
The driver also resets the DIM algorithm state with a new stats set when
there is no work done and the data becomes stale (older than 1 second),
for the respective receive or transmit portion of the interrupt.
Add a new helper for setting rate limit, which will be used more
in a followup patch.
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-09-20 19:30:12 +00:00
ice_init_moderation ( q_vector ) ;
ice: replace custom AIM algorithm with kernel's DIM library
The ice driver has support for adaptive interrupt moderation, an
algorithm for tuning the interrupt rate dynamically. This algorithm
is based on various assumptions about ring size, socket buffer size,
link speed, SKB overhead, ethernet frame overhead and more.
The Linux kernel has support for a dynamic interrupt moderation
algorithm known as "dimlib". Replace the custom driver-specific
implementation of dynamic interrupt moderation with the kernel's
algorithm.
The Intel hardware has a different hardware implementation than the
originators of the dimlib code had to work with, which requires the
driver to use a slightly different set of inputs for the actual
moderation values, while getting all the advice from dimlib of
better/worse, shift left or right.
The change made for this implementation is to use a pair of values
for each of the 5 "slots" that the dimlib moderation expects, and
the driver will program those pairs when dimlib recommends a slot to
use. The currently implementation uses two tables, one for receive
and one for transmit, and the pairs of values in each slot set the
maximum delay of an interrupt and a maximum number of interrupts per
second (both expressed in microseconds).
There are two separate kinds of bugs fixed by using DIMLIB, one is
UDP single stream send was too slow, and the other is that 8K
ping-pong was going to the most aggressive moderation and has much
too high latency.
The overall result of using DIMLIB is that we meet or exceed our
performance expectations set based on the old algorithm.
Co-developed-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-03-31 21:16:57 +00:00
2021-08-19 11:59:58 +00:00
if ( q_vector - > rx . rx_ring | | q_vector - > tx . tx_ring )
2018-11-29 01:54:10 +00:00
napi_enable ( & q_vector - > napi ) ;
}
2018-03-20 14:58:14 +00:00
}
2018-03-20 14:58:13 +00:00
/**
* ice_up_complete - Finish the last steps of bringing up a connection
* @ vsi : The VSI being configured
*
* Return 0 on success and negative value on error
*/
static int ice_up_complete ( struct ice_vsi * vsi )
{
struct ice_pf * pf = vsi - > back ;
int err ;
2019-06-26 09:20:25 +00:00
ice_vsi_cfg_msix ( vsi ) ;
2018-03-20 14:58:13 +00:00
/* Enable only Rx rings, Tx rings were enabled by the FW when the
* Tx queue group list was configured and the context bits were
* programmed using ice_vsi_cfg_txqs
*/
2020-01-22 15:21:29 +00:00
err = ice_vsi_start_all_rx_rings ( vsi ) ;
2018-03-20 14:58:13 +00:00
if ( err )
return err ;
2021-03-02 18:15:37 +00:00
clear_bit ( ICE_VSI_DOWN , vsi - > state ) ;
2018-03-20 14:58:14 +00:00
ice_napi_enable_all ( vsi ) ;
2018-03-20 14:58:13 +00:00
ice_vsi_ena_irq ( vsi ) ;
if ( vsi - > port_info & &
( vsi - > port_info - > phy . link_info . link_info & ICE_AQ_LINK_UP ) & &
2022-12-14 00:01:31 +00:00
vsi - > netdev & & vsi - > type = = ICE_VSI_PF ) {
2018-03-20 14:58:13 +00:00
ice_print_link_msg ( vsi , true ) ;
netif_tx_start_all_queues ( vsi - > netdev ) ;
netif_carrier_on ( vsi - > netdev ) ;
2022-12-05 19:52:43 +00:00
ice_ptp_link_change ( pf , pf - > hw . pf_id , true ) ;
2018-03-20 14:58:13 +00:00
}
2022-04-28 21:11:42 +00:00
/* Perform an initial read of the statistics registers now to
* set the baseline so counters are ready when interface is up
*/
ice_update_eth_stats ( vsi ) ;
2022-12-14 00:01:31 +00:00
if ( vsi - > type = = ICE_VSI_PF )
ice_service_task_schedule ( pf ) ;
2018-03-20 14:58:13 +00:00
2019-02-27 00:35:07 +00:00
return 0 ;
2018-03-20 14:58:13 +00:00
}
2018-03-20 14:58:16 +00:00
/**
* ice_up - Bring the connection back up after being down
* @ vsi : VSI being configured
*/
int ice_up ( struct ice_vsi * vsi )
{
int err ;
2022-12-21 11:38:15 +00:00
err = ice_vsi_cfg_lan ( vsi ) ;
2018-03-20 14:58:16 +00:00
if ( ! err )
err = ice_up_complete ( vsi ) ;
return err ;
}
/**
* ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
2021-08-19 11:59:58 +00:00
* @ syncp : pointer to u64_stats_sync
* @ stats : stats that pkts and bytes count will be taken from
2018-03-20 14:58:16 +00:00
* @ pkts : packets stats counter
* @ bytes : bytes stats counter
*
* This function fetches stats from the ring considering the atomic operations
* that needs to be performed to read u64 values in 32 bit machine .
*/
2022-01-27 15:04:26 +00:00
void
ice_fetch_u64_stats_per_ring ( struct u64_stats_sync * syncp ,
struct ice_q_stats stats , u64 * pkts , u64 * bytes )
2018-03-20 14:58:16 +00:00
{
unsigned int start ;
do {
2022-10-26 13:22:14 +00:00
start = u64_stats_fetch_begin ( syncp ) ;
2021-08-19 11:59:58 +00:00
* pkts = stats . pkts ;
* bytes = stats . bytes ;
2022-10-26 13:22:14 +00:00
} while ( u64_stats_fetch_retry ( syncp , start ) ) ;
2018-03-20 14:58:16 +00:00
}
2020-05-16 00:42:16 +00:00
/**
* ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
* @ vsi : the VSI to be updated
2021-11-13 01:06:02 +00:00
* @ vsi_stats : the stats struct to be updated
2020-05-16 00:42:16 +00:00
* @ rings : rings to work on
* @ count : number of rings
*/
static void
2021-11-13 01:06:02 +00:00
ice_update_vsi_tx_ring_stats ( struct ice_vsi * vsi ,
struct rtnl_link_stats64 * vsi_stats ,
struct ice_tx_ring * * rings , u16 count )
2020-05-16 00:42:16 +00:00
{
u16 i ;
for ( i = 0 ; i < count ; i + + ) {
2021-08-19 11:59:58 +00:00
struct ice_tx_ring * ring ;
u64 pkts = 0 , bytes = 0 ;
2020-05-16 00:42:16 +00:00
ring = READ_ONCE ( rings [ i ] ) ;
ice: Accumulate ring statistics over reset
Resets may occur with or without user interaction. For example, a TX hang
or reconfiguration of parameters will result in a reset. During reset, the
VSI is freed, freeing any statistics structures inside as well. This would
create an issue for the user where a reset happens in the background,
statistics set to zero, and the user checks ring statistics expecting them
to be populated.
To ensure this doesn't happen, accumulate ring statistics over reset.
Define a new ring statistics structure, ice_ring_stats. The new structure
lives in the VSI's parent, preserving ring statistics when VSI is freed.
1. Define a new structure vsi_ring_stats in the PF scope
2. Allocate/free stats only during probe, unload, or change in ring size
3. Replace previous ring statistics functionality with new structure
Signed-off-by: Benjamin Mikailenko <benjamin.mikailenko@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2022-11-18 21:20:02 +00:00
if ( ! ring | | ! ring - > ring_stats )
2022-03-07 17:47:39 +00:00
continue ;
ice: Accumulate ring statistics over reset
Resets may occur with or without user interaction. For example, a TX hang
or reconfiguration of parameters will result in a reset. During reset, the
VSI is freed, freeing any statistics structures inside as well. This would
create an issue for the user where a reset happens in the background,
statistics set to zero, and the user checks ring statistics expecting them
to be populated.
To ensure this doesn't happen, accumulate ring statistics over reset.
Define a new ring statistics structure, ice_ring_stats. The new structure
lives in the VSI's parent, preserving ring statistics when VSI is freed.
1. Define a new structure vsi_ring_stats in the PF scope
2. Allocate/free stats only during probe, unload, or change in ring size
3. Replace previous ring statistics functionality with new structure
Signed-off-by: Benjamin Mikailenko <benjamin.mikailenko@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2022-11-18 21:20:02 +00:00
ice_fetch_u64_stats_per_ring ( & ring - > ring_stats - > syncp ,
ring - > ring_stats - > stats , & pkts ,
& bytes ) ;
2020-05-16 00:42:16 +00:00
vsi_stats - > tx_packets + = pkts ;
vsi_stats - > tx_bytes + = bytes ;
ice: Accumulate ring statistics over reset
Resets may occur with or without user interaction. For example, a TX hang
or reconfiguration of parameters will result in a reset. During reset, the
VSI is freed, freeing any statistics structures inside as well. This would
create an issue for the user where a reset happens in the background,
statistics set to zero, and the user checks ring statistics expecting them
to be populated.
To ensure this doesn't happen, accumulate ring statistics over reset.
Define a new ring statistics structure, ice_ring_stats. The new structure
lives in the VSI's parent, preserving ring statistics when VSI is freed.
1. Define a new structure vsi_ring_stats in the PF scope
2. Allocate/free stats only during probe, unload, or change in ring size
3. Replace previous ring statistics functionality with new structure
Signed-off-by: Benjamin Mikailenko <benjamin.mikailenko@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2022-11-18 21:20:02 +00:00
vsi - > tx_restart + = ring - > ring_stats - > tx_stats . restart_q ;
vsi - > tx_busy + = ring - > ring_stats - > tx_stats . tx_busy ;
vsi - > tx_linearize + = ring - > ring_stats - > tx_stats . tx_linearize ;
2020-05-16 00:42:16 +00:00
}
}
2018-03-20 14:58:16 +00:00
/**
* ice_update_vsi_ring_stats - Update VSI stats counters
* @ vsi : the VSI to be updated
*/
static void ice_update_vsi_ring_stats ( struct ice_vsi * vsi )
{
2022-11-18 21:20:01 +00:00
struct rtnl_link_stats64 * net_stats , * stats_prev ;
2021-11-13 01:06:02 +00:00
struct rtnl_link_stats64 * vsi_stats ;
2024-02-27 14:31:06 +00:00
struct ice_pf * pf = vsi - > back ;
2018-03-20 14:58:16 +00:00
u64 pkts , bytes ;
int i ;
2021-11-13 01:06:02 +00:00
vsi_stats = kzalloc ( sizeof ( * vsi_stats ) , GFP_ATOMIC ) ;
if ( ! vsi_stats )
return ;
2018-03-20 14:58:16 +00:00
/* reset non-netdev (extended) stats */
vsi - > tx_restart = 0 ;
vsi - > tx_busy = 0 ;
vsi - > tx_linearize = 0 ;
vsi - > rx_buf_failed = 0 ;
vsi - > rx_page_failed = 0 ;
rcu_read_lock ( ) ;
/* update Tx rings counters */
2021-11-13 01:06:02 +00:00
ice_update_vsi_tx_ring_stats ( vsi , vsi_stats , vsi - > tx_rings ,
vsi - > num_txq ) ;
2018-03-20 14:58:16 +00:00
/* update Rx rings counters */
ice_for_each_rxq ( vsi , i ) {
2021-08-19 11:59:58 +00:00
struct ice_rx_ring * ring = READ_ONCE ( vsi - > rx_rings [ i ] ) ;
ice: Accumulate ring statistics over reset
Resets may occur with or without user interaction. For example, a TX hang
or reconfiguration of parameters will result in a reset. During reset, the
VSI is freed, freeing any statistics structures inside as well. This would
create an issue for the user where a reset happens in the background,
statistics set to zero, and the user checks ring statistics expecting them
to be populated.
To ensure this doesn't happen, accumulate ring statistics over reset.
Define a new ring statistics structure, ice_ring_stats. The new structure
lives in the VSI's parent, preserving ring statistics when VSI is freed.
1. Define a new structure vsi_ring_stats in the PF scope
2. Allocate/free stats only during probe, unload, or change in ring size
3. Replace previous ring statistics functionality with new structure
Signed-off-by: Benjamin Mikailenko <benjamin.mikailenko@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2022-11-18 21:20:02 +00:00
struct ice_ring_stats * ring_stats ;
2021-05-06 15:40:07 +00:00
ice: Accumulate ring statistics over reset
Resets may occur with or without user interaction. For example, a TX hang
or reconfiguration of parameters will result in a reset. During reset, the
VSI is freed, freeing any statistics structures inside as well. This would
create an issue for the user where a reset happens in the background,
statistics set to zero, and the user checks ring statistics expecting them
to be populated.
To ensure this doesn't happen, accumulate ring statistics over reset.
Define a new ring statistics structure, ice_ring_stats. The new structure
lives in the VSI's parent, preserving ring statistics when VSI is freed.
1. Define a new structure vsi_ring_stats in the PF scope
2. Allocate/free stats only during probe, unload, or change in ring size
3. Replace previous ring statistics functionality with new structure
Signed-off-by: Benjamin Mikailenko <benjamin.mikailenko@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2022-11-18 21:20:02 +00:00
ring_stats = ring - > ring_stats ;
ice_fetch_u64_stats_per_ring ( & ring_stats - > syncp ,
ring_stats - > stats , & pkts ,
& bytes ) ;
2018-03-20 14:58:16 +00:00
vsi_stats - > rx_packets + = pkts ;
vsi_stats - > rx_bytes + = bytes ;
ice: Accumulate ring statistics over reset
Resets may occur with or without user interaction. For example, a TX hang
or reconfiguration of parameters will result in a reset. During reset, the
VSI is freed, freeing any statistics structures inside as well. This would
create an issue for the user where a reset happens in the background,
statistics set to zero, and the user checks ring statistics expecting them
to be populated.
To ensure this doesn't happen, accumulate ring statistics over reset.
Define a new ring statistics structure, ice_ring_stats. The new structure
lives in the VSI's parent, preserving ring statistics when VSI is freed.
1. Define a new structure vsi_ring_stats in the PF scope
2. Allocate/free stats only during probe, unload, or change in ring size
3. Replace previous ring statistics functionality with new structure
Signed-off-by: Benjamin Mikailenko <benjamin.mikailenko@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2022-11-18 21:20:02 +00:00
vsi - > rx_buf_failed + = ring_stats - > rx_stats . alloc_buf_failed ;
vsi - > rx_page_failed + = ring_stats - > rx_stats . alloc_page_failed ;
2018-03-20 14:58:16 +00:00
}
2020-05-16 00:42:16 +00:00
/* update XDP Tx rings counters */
if ( ice_is_xdp_ena_vsi ( vsi ) )
2021-11-13 01:06:02 +00:00
ice_update_vsi_tx_ring_stats ( vsi , vsi_stats , vsi - > xdp_rings ,
2020-05-16 00:42:16 +00:00
vsi - > num_xdp_txq ) ;
2018-03-20 14:58:16 +00:00
rcu_read_unlock ( ) ;
2021-11-13 01:06:02 +00:00
2022-11-18 21:20:01 +00:00
net_stats = & vsi - > net_stats ;
stats_prev = & vsi - > net_stats_prev ;
2024-02-27 14:31:06 +00:00
/* Update netdev counters, but keep in mind that values could start at
* random value after PF reset . And as we increase the reported stat by
* diff of Prev - Cur , we need to be sure that Prev is valid . If it ' s not ,
* let ' s skip this round .
*/
if ( likely ( pf - > stat_prev_loaded ) ) {
net_stats - > tx_packets + = vsi_stats - > tx_packets - stats_prev - > tx_packets ;
net_stats - > tx_bytes + = vsi_stats - > tx_bytes - stats_prev - > tx_bytes ;
net_stats - > rx_packets + = vsi_stats - > rx_packets - stats_prev - > rx_packets ;
net_stats - > rx_bytes + = vsi_stats - > rx_bytes - stats_prev - > rx_bytes ;
2022-11-18 21:20:01 +00:00
}
stats_prev - > tx_packets = vsi_stats - > tx_packets ;
stats_prev - > tx_bytes = vsi_stats - > tx_bytes ;
stats_prev - > rx_packets = vsi_stats - > rx_packets ;
stats_prev - > rx_bytes = vsi_stats - > rx_bytes ;
2021-11-13 01:06:02 +00:00
kfree ( vsi_stats ) ;
2018-03-20 14:58:16 +00:00
}
/**
* ice_update_vsi_stats - Update VSI stats counters
* @ vsi : the VSI to be updated
*/
2019-07-25 09:53:50 +00:00
void ice_update_vsi_stats ( struct ice_vsi * vsi )
2018-03-20 14:58:16 +00:00
{
struct rtnl_link_stats64 * cur_ns = & vsi - > net_stats ;
struct ice_eth_stats * cur_es = & vsi - > eth_stats ;
struct ice_pf * pf = vsi - > back ;
2021-03-02 18:15:37 +00:00
if ( test_bit ( ICE_VSI_DOWN , vsi - > state ) | |
2021-03-02 18:15:38 +00:00
test_bit ( ICE_CFG_BUSY , pf - > state ) )
2018-03-20 14:58:16 +00:00
return ;
/* get stats as recorded by Tx/Rx rings */
ice_update_vsi_ring_stats ( vsi ) ;
/* get VSI stats as recorded by the hardware */
ice_update_eth_stats ( vsi ) ;
cur_ns - > tx_errors = cur_es - > tx_errors ;
2021-03-25 22:35:16 +00:00
cur_ns - > rx_dropped = cur_es - > rx_discards ;
2018-03-20 14:58:16 +00:00
cur_ns - > tx_dropped = cur_es - > tx_discards ;
cur_ns - > multicast = cur_es - > rx_multicast ;
/* update some more netdev stats if this is main VSI */
if ( vsi - > type = = ICE_VSI_PF ) {
cur_ns - > rx_crc_errors = pf - > stats . crc_errors ;
cur_ns - > rx_errors = pf - > stats . crc_errors +
2020-05-16 00:36:44 +00:00
pf - > stats . illegal_bytes +
pf - > stats . rx_undersize +
pf - > hw_csum_rx_error +
pf - > stats . rx_jabber +
pf - > stats . rx_fragments +
pf - > stats . rx_oversize ;
2019-06-26 09:20:22 +00:00
/* record drops from the port level */
cur_ns - > rx_missed_errors = pf - > stats . eth . rx_discards ;
2018-03-20 14:58:16 +00:00
}
}
/**
* ice_update_pf_stats - Update PF port stats counters
* @ pf : PF whose stats needs to be updated
*/
2019-07-25 09:53:50 +00:00
void ice_update_pf_stats ( struct ice_pf * pf )
2018-03-20 14:58:16 +00:00
{
struct ice_hw_port_stats * prev_ps , * cur_ps ;
struct ice_hw * hw = & pf - > hw ;
2020-05-12 01:01:41 +00:00
u16 fd_ctr_base ;
2019-07-25 09:53:53 +00:00
u8 port ;
2018-03-20 14:58:16 +00:00
2019-07-25 09:53:53 +00:00
port = hw - > port_info - > lport ;
2018-03-20 14:58:16 +00:00
prev_ps = & pf - > stats_prev ;
cur_ps = & pf - > stats ;
2022-11-18 21:20:01 +00:00
if ( ice_is_reset_in_progress ( pf - > state ) )
pf - > stat_prev_loaded = false ;
2019-07-25 09:53:53 +00:00
ice_stat_update40 ( hw , GLPRT_GORCL ( port ) , pf - > stat_prev_loaded ,
2019-06-26 09:20:13 +00:00
& prev_ps - > eth . rx_bytes ,
2018-03-20 14:58:16 +00:00
& cur_ps - > eth . rx_bytes ) ;
2019-07-25 09:53:53 +00:00
ice_stat_update40 ( hw , GLPRT_UPRCL ( port ) , pf - > stat_prev_loaded ,
2019-06-26 09:20:13 +00:00
& prev_ps - > eth . rx_unicast ,
2018-03-20 14:58:16 +00:00
& cur_ps - > eth . rx_unicast ) ;
2019-07-25 09:53:53 +00:00
ice_stat_update40 ( hw , GLPRT_MPRCL ( port ) , pf - > stat_prev_loaded ,
2019-06-26 09:20:13 +00:00
& prev_ps - > eth . rx_multicast ,
2018-03-20 14:58:16 +00:00
& cur_ps - > eth . rx_multicast ) ;
2019-07-25 09:53:53 +00:00
ice_stat_update40 ( hw , GLPRT_BPRCL ( port ) , pf - > stat_prev_loaded ,
2019-06-26 09:20:13 +00:00
& prev_ps - > eth . rx_broadcast ,
2018-03-20 14:58:16 +00:00
& cur_ps - > eth . rx_broadcast ) ;
2019-06-26 09:20:22 +00:00
ice_stat_update32 ( hw , PRTRPB_RDPC , pf - > stat_prev_loaded ,
& prev_ps - > eth . rx_discards ,
& cur_ps - > eth . rx_discards ) ;
2019-07-25 09:53:53 +00:00
ice_stat_update40 ( hw , GLPRT_GOTCL ( port ) , pf - > stat_prev_loaded ,
2019-06-26 09:20:13 +00:00
& prev_ps - > eth . tx_bytes ,
2018-03-20 14:58:16 +00:00
& cur_ps - > eth . tx_bytes ) ;
2019-07-25 09:53:53 +00:00
ice_stat_update40 ( hw , GLPRT_UPTCL ( port ) , pf - > stat_prev_loaded ,
2019-06-26 09:20:13 +00:00
& prev_ps - > eth . tx_unicast ,
2018-03-20 14:58:16 +00:00
& cur_ps - > eth . tx_unicast ) ;
2019-07-25 09:53:53 +00:00
ice_stat_update40 ( hw , GLPRT_MPTCL ( port ) , pf - > stat_prev_loaded ,
2019-06-26 09:20:13 +00:00
& prev_ps - > eth . tx_multicast ,
2018-03-20 14:58:16 +00:00
& cur_ps - > eth . tx_multicast ) ;
2019-07-25 09:53:53 +00:00
ice_stat_update40 ( hw , GLPRT_BPTCL ( port ) , pf - > stat_prev_loaded ,
2019-06-26 09:20:13 +00:00
& prev_ps - > eth . tx_broadcast ,
2018-03-20 14:58:16 +00:00
& cur_ps - > eth . tx_broadcast ) ;
2019-07-25 09:53:53 +00:00
ice_stat_update32 ( hw , GLPRT_TDOLD ( port ) , pf - > stat_prev_loaded ,
2018-03-20 14:58:16 +00:00
& prev_ps - > tx_dropped_link_down ,
& cur_ps - > tx_dropped_link_down ) ;
2019-07-25 09:53:53 +00:00
ice_stat_update40 ( hw , GLPRT_PRC64L ( port ) , pf - > stat_prev_loaded ,
2019-06-26 09:20:13 +00:00
& prev_ps - > rx_size_64 , & cur_ps - > rx_size_64 ) ;
2018-03-20 14:58:16 +00:00
2019-07-25 09:53:53 +00:00
ice_stat_update40 ( hw , GLPRT_PRC127L ( port ) , pf - > stat_prev_loaded ,
2019-06-26 09:20:13 +00:00
& prev_ps - > rx_size_127 , & cur_ps - > rx_size_127 ) ;
2018-03-20 14:58:16 +00:00
2019-07-25 09:53:53 +00:00
ice_stat_update40 ( hw , GLPRT_PRC255L ( port ) , pf - > stat_prev_loaded ,
2019-06-26 09:20:13 +00:00
& prev_ps - > rx_size_255 , & cur_ps - > rx_size_255 ) ;
2018-03-20 14:58:16 +00:00
2019-07-25 09:53:53 +00:00
ice_stat_update40 ( hw , GLPRT_PRC511L ( port ) , pf - > stat_prev_loaded ,
2019-06-26 09:20:13 +00:00
& prev_ps - > rx_size_511 , & cur_ps - > rx_size_511 ) ;
2018-03-20 14:58:16 +00:00
2019-07-25 09:53:53 +00:00
ice_stat_update40 ( hw , GLPRT_PRC1023L ( port ) , pf - > stat_prev_loaded ,
2018-03-20 14:58:16 +00:00
& prev_ps - > rx_size_1023 , & cur_ps - > rx_size_1023 ) ;
2019-07-25 09:53:53 +00:00
ice_stat_update40 ( hw , GLPRT_PRC1522L ( port ) , pf - > stat_prev_loaded ,
2018-03-20 14:58:16 +00:00
& prev_ps - > rx_size_1522 , & cur_ps - > rx_size_1522 ) ;
2019-07-25 09:53:53 +00:00
ice_stat_update40 ( hw , GLPRT_PRC9522L ( port ) , pf - > stat_prev_loaded ,
2018-03-20 14:58:16 +00:00
& prev_ps - > rx_size_big , & cur_ps - > rx_size_big ) ;
2019-07-25 09:53:53 +00:00
ice_stat_update40 ( hw , GLPRT_PTC64L ( port ) , pf - > stat_prev_loaded ,
2019-06-26 09:20:13 +00:00
& prev_ps - > tx_size_64 , & cur_ps - > tx_size_64 ) ;
2018-03-20 14:58:16 +00:00
2019-07-25 09:53:53 +00:00
ice_stat_update40 ( hw , GLPRT_PTC127L ( port ) , pf - > stat_prev_loaded ,
2019-06-26 09:20:13 +00:00
& prev_ps - > tx_size_127 , & cur_ps - > tx_size_127 ) ;
2018-03-20 14:58:16 +00:00
2019-07-25 09:53:53 +00:00
ice_stat_update40 ( hw , GLPRT_PTC255L ( port ) , pf - > stat_prev_loaded ,
2019-06-26 09:20:13 +00:00
& prev_ps - > tx_size_255 , & cur_ps - > tx_size_255 ) ;
2018-03-20 14:58:16 +00:00
2019-07-25 09:53:53 +00:00
ice_stat_update40 ( hw , GLPRT_PTC511L ( port ) , pf - > stat_prev_loaded ,
2019-06-26 09:20:13 +00:00
& prev_ps - > tx_size_511 , & cur_ps - > tx_size_511 ) ;
2018-03-20 14:58:16 +00:00
2019-07-25 09:53:53 +00:00
ice_stat_update40 ( hw , GLPRT_PTC1023L ( port ) , pf - > stat_prev_loaded ,
2018-03-20 14:58:16 +00:00
& prev_ps - > tx_size_1023 , & cur_ps - > tx_size_1023 ) ;
2019-07-25 09:53:53 +00:00
ice_stat_update40 ( hw , GLPRT_PTC1522L ( port ) , pf - > stat_prev_loaded ,
2018-03-20 14:58:16 +00:00
& prev_ps - > tx_size_1522 , & cur_ps - > tx_size_1522 ) ;
2019-07-25 09:53:53 +00:00
ice_stat_update40 ( hw , GLPRT_PTC9522L ( port ) , pf - > stat_prev_loaded ,
2018-03-20 14:58:16 +00:00
& prev_ps - > tx_size_big , & cur_ps - > tx_size_big ) ;
2020-05-12 01:01:41 +00:00
fd_ctr_base = hw - > fd_ctr_base ;
ice_stat_update40 ( hw ,
GLSTAT_FD_CNT0L ( ICE_FD_SB_STAT_IDX ( fd_ctr_base ) ) ,
pf - > stat_prev_loaded , & prev_ps - > fd_sb_match ,
& cur_ps - > fd_sb_match ) ;
2019-07-25 09:53:53 +00:00
ice_stat_update32 ( hw , GLPRT_LXONRXC ( port ) , pf - > stat_prev_loaded ,
2018-03-20 14:58:16 +00:00
& prev_ps - > link_xon_rx , & cur_ps - > link_xon_rx ) ;
2019-07-25 09:53:53 +00:00
ice_stat_update32 ( hw , GLPRT_LXOFFRXC ( port ) , pf - > stat_prev_loaded ,
2018-03-20 14:58:16 +00:00
& prev_ps - > link_xoff_rx , & cur_ps - > link_xoff_rx ) ;
2019-07-25 09:53:53 +00:00
ice_stat_update32 ( hw , GLPRT_LXONTXC ( port ) , pf - > stat_prev_loaded ,
2018-03-20 14:58:16 +00:00
& prev_ps - > link_xon_tx , & cur_ps - > link_xon_tx ) ;
2019-07-25 09:53:53 +00:00
ice_stat_update32 ( hw , GLPRT_LXOFFTXC ( port ) , pf - > stat_prev_loaded ,
2018-03-20 14:58:16 +00:00
& prev_ps - > link_xoff_tx , & cur_ps - > link_xoff_tx ) ;
2019-02-28 23:24:29 +00:00
ice_update_dcb_stats ( pf ) ;
2019-07-25 09:53:53 +00:00
ice_stat_update32 ( hw , GLPRT_CRCERRS ( port ) , pf - > stat_prev_loaded ,
2018-03-20 14:58:16 +00:00
& prev_ps - > crc_errors , & cur_ps - > crc_errors ) ;
2019-07-25 09:53:53 +00:00
ice_stat_update32 ( hw , GLPRT_ILLERRC ( port ) , pf - > stat_prev_loaded ,
2018-03-20 14:58:16 +00:00
& prev_ps - > illegal_bytes , & cur_ps - > illegal_bytes ) ;
2019-07-25 09:53:53 +00:00
ice_stat_update32 ( hw , GLPRT_MLFC ( port ) , pf - > stat_prev_loaded ,
2018-03-20 14:58:16 +00:00
& prev_ps - > mac_local_faults ,
& cur_ps - > mac_local_faults ) ;
2019-07-25 09:53:53 +00:00
ice_stat_update32 ( hw , GLPRT_MRFC ( port ) , pf - > stat_prev_loaded ,
2018-03-20 14:58:16 +00:00
& prev_ps - > mac_remote_faults ,
& cur_ps - > mac_remote_faults ) ;
2019-07-25 09:53:53 +00:00
ice_stat_update32 ( hw , GLPRT_RUC ( port ) , pf - > stat_prev_loaded ,
2018-03-20 14:58:16 +00:00
& prev_ps - > rx_undersize , & cur_ps - > rx_undersize ) ;
2019-07-25 09:53:53 +00:00
ice_stat_update32 ( hw , GLPRT_RFC ( port ) , pf - > stat_prev_loaded ,
2018-03-20 14:58:16 +00:00
& prev_ps - > rx_fragments , & cur_ps - > rx_fragments ) ;
2019-07-25 09:53:53 +00:00
ice_stat_update32 ( hw , GLPRT_ROC ( port ) , pf - > stat_prev_loaded ,
2018-03-20 14:58:16 +00:00
& prev_ps - > rx_oversize , & cur_ps - > rx_oversize ) ;
2019-07-25 09:53:53 +00:00
ice_stat_update32 ( hw , GLPRT_RJC ( port ) , pf - > stat_prev_loaded ,
2018-03-20 14:58:16 +00:00
& prev_ps - > rx_jabber , & cur_ps - > rx_jabber ) ;
2020-05-12 01:01:41 +00:00
cur_ps - > fd_sb_status = test_bit ( ICE_FLAG_FD_ENA , pf - > flags ) ? 1 : 0 ;
2018-03-20 14:58:16 +00:00
pf - > stat_prev_loaded = true ;
}
/**
* ice_get_stats64 - get statistics for network device structure
* @ netdev : network interface device structure
* @ stats : main device statistics structure
*/
static
void ice_get_stats64 ( struct net_device * netdev , struct rtnl_link_stats64 * stats )
{
struct ice_netdev_priv * np = netdev_priv ( netdev ) ;
struct rtnl_link_stats64 * vsi_stats ;
struct ice_vsi * vsi = np - > vsi ;
vsi_stats = & vsi - > net_stats ;
2019-08-08 14:39:28 +00:00
if ( ! vsi - > num_txq | | ! vsi - > num_rxq )
2018-03-20 14:58:16 +00:00
return ;
2019-08-08 14:39:28 +00:00
2018-03-20 14:58:16 +00:00
/* netdev packet/byte stats come from ring counter. These are obtained
* by summing up ring counters ( done by ice_update_vsi_ring_stats ) .
2019-08-08 14:39:28 +00:00
* But , only call the update routine and read the registers if VSI is
* not down .
2018-03-20 14:58:16 +00:00
*/
2021-03-02 18:15:37 +00:00
if ( ! test_bit ( ICE_VSI_DOWN , vsi - > state ) )
2019-08-08 14:39:28 +00:00
ice_update_vsi_ring_stats ( vsi ) ;
2018-03-20 14:58:16 +00:00
stats - > tx_packets = vsi_stats - > tx_packets ;
stats - > tx_bytes = vsi_stats - > tx_bytes ;
stats - > rx_packets = vsi_stats - > rx_packets ;
stats - > rx_bytes = vsi_stats - > rx_bytes ;
/* The rest of the stats can be read from the hardware but instead we
* just return values that the watchdog task has already obtained from
* the hardware .
*/
stats - > multicast = vsi_stats - > multicast ;
stats - > tx_errors = vsi_stats - > tx_errors ;
stats - > tx_dropped = vsi_stats - > tx_dropped ;
stats - > rx_errors = vsi_stats - > rx_errors ;
stats - > rx_dropped = vsi_stats - > rx_dropped ;
stats - > rx_crc_errors = vsi_stats - > rx_crc_errors ;
stats - > rx_length_errors = vsi_stats - > rx_length_errors ;
}
2018-03-20 14:58:14 +00:00
/**
* ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
* @ vsi : VSI having NAPI disabled
*/
static void ice_napi_disable_all ( struct ice_vsi * vsi )
{
int q_idx ;
if ( ! vsi - > netdev )
return ;
2019-02-28 23:25:53 +00:00
ice_for_each_q_vector ( vsi , q_idx ) {
2018-11-29 01:54:10 +00:00
struct ice_q_vector * q_vector = vsi - > q_vectors [ q_idx ] ;
2021-08-19 11:59:58 +00:00
if ( q_vector - > rx . rx_ring | | q_vector - > tx . tx_ring )
2018-11-29 01:54:10 +00:00
napi_disable ( & q_vector - > napi ) ;
ice: replace custom AIM algorithm with kernel's DIM library
The ice driver has support for adaptive interrupt moderation, an
algorithm for tuning the interrupt rate dynamically. This algorithm
is based on various assumptions about ring size, socket buffer size,
link speed, SKB overhead, ethernet frame overhead and more.
The Linux kernel has support for a dynamic interrupt moderation
algorithm known as "dimlib". Replace the custom driver-specific
implementation of dynamic interrupt moderation with the kernel's
algorithm.
The Intel hardware has a different hardware implementation than the
originators of the dimlib code had to work with, which requires the
driver to use a slightly different set of inputs for the actual
moderation values, while getting all the advice from dimlib of
better/worse, shift left or right.
The change made for this implementation is to use a pair of values
for each of the 5 "slots" that the dimlib moderation expects, and
the driver will program those pairs when dimlib recommends a slot to
use. The currently implementation uses two tables, one for receive
and one for transmit, and the pairs of values in each slot set the
maximum delay of an interrupt and a maximum number of interrupts per
second (both expressed in microseconds).
There are two separate kinds of bugs fixed by using DIMLIB, one is
UDP single stream send was too slow, and the other is that 8K
ping-pong was going to the most aggressive moderation and has much
too high latency.
The overall result of using DIMLIB is that we meet or exceed our
performance expectations set based on the old algorithm.
Co-developed-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-03-31 21:16:57 +00:00
cancel_work_sync ( & q_vector - > tx . dim . work ) ;
cancel_work_sync ( & q_vector - > rx . dim . work ) ;
2018-11-29 01:54:10 +00:00
}
2018-03-20 14:58:14 +00:00
}
2024-02-23 16:06:27 +00:00
/**
* ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
* @ vsi : the VSI being un - configured
*/
static void ice_vsi_dis_irq ( struct ice_vsi * vsi )
{
struct ice_pf * pf = vsi - > back ;
struct ice_hw * hw = & pf - > hw ;
u32 val ;
int i ;
/* disable interrupt causation from each Rx queue; Tx queues are
* handled in ice_vsi_stop_tx_ring ( )
*/
if ( vsi - > rx_rings ) {
ice_for_each_rxq ( vsi , i ) {
if ( vsi - > rx_rings [ i ] ) {
u16 reg ;
reg = vsi - > rx_rings [ i ] - > reg_idx ;
val = rd32 ( hw , QINT_RQCTL ( reg ) ) ;
val & = ~ QINT_RQCTL_CAUSE_ENA_M ;
wr32 ( hw , QINT_RQCTL ( reg ) , val ) ;
}
}
}
/* disable each interrupt */
ice_for_each_q_vector ( vsi , i ) {
if ( ! vsi - > q_vectors [ i ] )
continue ;
wr32 ( hw , GLINT_DYN_CTL ( vsi - > q_vectors [ i ] - > reg_idx ) , 0 ) ;
}
ice_flush ( hw ) ;
/* don't call synchronize_irq() for VF's from the host */
if ( vsi - > type = = ICE_VSI_VF )
return ;
ice_for_each_q_vector ( vsi , i )
synchronize_irq ( vsi - > q_vectors [ i ] - > irq . virq ) ;
}
2018-03-20 14:58:13 +00:00
/**
* ice_down - Shutdown the connection
* @ vsi : The VSI being stopped
2021-10-26 00:08:25 +00:00
*
* Caller of this function is expected to set the vsi - > state ICE_DOWN bit
2018-03-20 14:58:13 +00:00
*/
2018-03-20 14:58:16 +00:00
int ice_down ( struct ice_vsi * vsi )
2018-03-20 14:58:13 +00:00
{
2022-08-26 08:31:23 +00:00
int i , tx_err , rx_err , vlan_err = 0 ;
2018-03-20 14:58:13 +00:00
2021-10-26 00:08:25 +00:00
WARN_ON ( ! test_bit ( ICE_VSI_DOWN , vsi - > state ) ) ;
2021-08-20 00:08:57 +00:00
if ( vsi - > netdev & & vsi - > type = = ICE_VSI_PF ) {
2021-12-02 16:38:46 +00:00
vlan_err = ice_vsi_del_vlan_zero ( vsi ) ;
2022-12-05 19:52:43 +00:00
ice_ptp_link_change ( vsi - > back , vsi - > back - > hw . pf_id , false ) ;
2018-03-20 14:58:13 +00:00
netif_carrier_off ( vsi - > netdev ) ;
netif_tx_disable ( vsi - > netdev ) ;
2021-08-20 00:08:57 +00:00
} else if ( vsi - > type = = ICE_VSI_SWITCHDEV_CTRL ) {
ice_eswitch_stop_all_tx_queues ( vsi - > back ) ;
2018-03-20 14:58:13 +00:00
}
ice_vsi_dis_irq ( vsi ) ;
2018-12-19 18:03:27 +00:00
tx_err = ice_vsi_stop_lan_tx_rings ( vsi , ICE_NO_RESET , 0 ) ;
2018-09-20 00:23:05 +00:00
if ( tx_err )
2020-02-06 09:20:10 +00:00
netdev_err ( vsi - > netdev , " Failed stop Tx rings, VSI %d error %d \n " ,
2018-09-20 00:23:05 +00:00
vsi - > vsi_num , tx_err ) ;
2019-11-04 17:38:56 +00:00
if ( ! tx_err & & ice_is_xdp_ena_vsi ( vsi ) ) {
tx_err = ice_vsi_stop_xdp_tx_rings ( vsi ) ;
if ( tx_err )
2020-02-06 09:20:10 +00:00
netdev_err ( vsi - > netdev , " Failed stop XDP rings, VSI %d error %d \n " ,
2019-11-04 17:38:56 +00:00
vsi - > vsi_num , tx_err ) ;
}
2018-09-20 00:23:05 +00:00
2020-01-22 15:21:29 +00:00
rx_err = ice_vsi_stop_all_rx_rings ( vsi ) ;
2018-09-20 00:23:05 +00:00
if ( rx_err )
2020-02-06 09:20:10 +00:00
netdev_err ( vsi - > netdev , " Failed stop Rx rings, VSI %d error %d \n " ,
2018-09-20 00:23:05 +00:00
vsi - > vsi_num , rx_err ) ;
2018-03-20 14:58:14 +00:00
ice_napi_disable_all ( vsi ) ;
2018-03-20 14:58:13 +00:00
ice_for_each_txq ( vsi , i )
ice_clean_tx_ring ( vsi - > tx_rings [ i ] ) ;
2023-06-06 10:33:58 +00:00
if ( ice_is_xdp_ena_vsi ( vsi ) )
ice_for_each_xdp_txq ( vsi , i )
ice_clean_tx_ring ( vsi - > xdp_rings [ i ] ) ;
2018-03-20 14:58:13 +00:00
ice_for_each_rxq ( vsi , i )
ice_clean_rx_ring ( vsi - > rx_rings [ i ] ) ;
2022-08-26 08:31:23 +00:00
if ( tx_err | | rx_err | | vlan_err ) {
2020-02-06 09:20:10 +00:00
netdev_err ( vsi - > netdev , " Failed to close VSI 0x%04X on switch 0x%04X \n " ,
2018-03-20 14:58:13 +00:00
vsi - > vsi_num , vsi - > vsw - > sw_id ) ;
2018-09-20 00:23:05 +00:00
return - EIO ;
}
return 0 ;
2018-03-20 14:58:13 +00:00
}
2022-07-27 07:24:05 +00:00
/**
* ice_down_up - shutdown the VSI connection and bring it up
* @ vsi : the VSI to be reconnected
*/
int ice_down_up ( struct ice_vsi * vsi )
{
int ret ;
/* if DOWN already set, nothing to do */
if ( test_and_set_bit ( ICE_VSI_DOWN , vsi - > state ) )
return 0 ;
ret = ice_down ( vsi ) ;
if ( ret )
return ret ;
ret = ice_up ( vsi ) ;
if ( ret ) {
netdev_err ( vsi - > netdev , " reallocating resources failed during netdev features change, may need to reload driver \n " ) ;
return ret ;
}
return 0 ;
}
2018-03-20 14:58:13 +00:00
/**
* ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
* @ vsi : VSI having resources allocated
*
* Return 0 on success , negative on failure
*/
2019-04-16 17:30:43 +00:00
int ice_vsi_setup_tx_rings ( struct ice_vsi * vsi )
2018-03-20 14:58:13 +00:00
{
2018-08-09 13:29:01 +00:00
int i , err = 0 ;
2018-03-20 14:58:13 +00:00
if ( ! vsi - > num_txq ) {
2020-02-06 09:20:09 +00:00
dev_err ( ice_pf_to_dev ( vsi - > back ) , " VSI %d has 0 Tx queues \n " ,
2018-03-20 14:58:13 +00:00
vsi - > vsi_num ) ;
return - EINVAL ;
}
ice_for_each_txq ( vsi , i ) {
2021-08-19 11:59:58 +00:00
struct ice_tx_ring * ring = vsi - > tx_rings [ i ] ;
2019-10-09 14:09:47 +00:00
if ( ! ring )
return - EINVAL ;
2021-08-20 00:08:56 +00:00
if ( vsi - > netdev )
ring - > netdev = vsi - > netdev ;
2019-10-09 14:09:47 +00:00
err = ice_setup_tx_ring ( ring ) ;
2018-03-20 14:58:13 +00:00
if ( err )
break ;
}
return err ;
}
/**
* ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
* @ vsi : VSI having resources allocated
*
* Return 0 on success , negative on failure
*/
2019-04-16 17:30:43 +00:00
int ice_vsi_setup_rx_rings ( struct ice_vsi * vsi )
2018-03-20 14:58:13 +00:00
{
2018-08-09 13:29:01 +00:00
int i , err = 0 ;
2018-03-20 14:58:13 +00:00
if ( ! vsi - > num_rxq ) {
2020-02-06 09:20:09 +00:00
dev_err ( ice_pf_to_dev ( vsi - > back ) , " VSI %d has 0 Rx queues \n " ,
2018-03-20 14:58:13 +00:00
vsi - > vsi_num ) ;
return - EINVAL ;
}
ice_for_each_rxq ( vsi , i ) {
2021-08-19 11:59:58 +00:00
struct ice_rx_ring * ring = vsi - > rx_rings [ i ] ;
2019-10-09 14:09:47 +00:00
if ( ! ring )
return - EINVAL ;
2021-08-20 00:08:56 +00:00
if ( vsi - > netdev )
ring - > netdev = vsi - > netdev ;
2019-10-09 14:09:47 +00:00
err = ice_setup_rx_ring ( ring ) ;
2018-03-20 14:58:13 +00:00
if ( err )
break ;
}
return err ;
}
2020-05-12 01:01:40 +00:00
/**
* ice_vsi_open_ctrl - open control VSI for use
* @ vsi : the VSI to open
*
* Initialization of the Control VSI
*
* Returns 0 on success , negative value on error
*/
int ice_vsi_open_ctrl ( struct ice_vsi * vsi )
{
char int_name [ ICE_INT_NAME_STR_LEN ] ;
struct ice_pf * pf = vsi - > back ;
struct device * dev ;
int err ;
dev = ice_pf_to_dev ( pf ) ;
/* allocate descriptors */
err = ice_vsi_setup_tx_rings ( vsi ) ;
if ( err )
goto err_setup_tx ;
err = ice_vsi_setup_rx_rings ( vsi ) ;
if ( err )
goto err_setup_rx ;
2022-12-21 11:38:15 +00:00
err = ice_vsi_cfg_lan ( vsi ) ;
2020-05-12 01:01:40 +00:00
if ( err )
goto err_setup_rx ;
snprintf ( int_name , sizeof ( int_name ) - 1 , " %s-%s:ctrl " ,
dev_driver_string ( dev ) , dev_name ( dev ) ) ;
err = ice_vsi_req_irq_msix ( vsi , int_name ) ;
if ( err )
goto err_setup_rx ;
ice_vsi_cfg_msix ( vsi ) ;
err = ice_vsi_start_all_rx_rings ( vsi ) ;
if ( err )
goto err_up_complete ;
2021-03-02 18:15:37 +00:00
clear_bit ( ICE_VSI_DOWN , vsi - > state ) ;
2020-05-12 01:01:40 +00:00
ice_vsi_ena_irq ( vsi ) ;
return 0 ;
err_up_complete :
ice_down ( vsi ) ;
err_setup_rx :
ice_vsi_free_rx_rings ( vsi ) ;
err_setup_tx :
ice_vsi_free_tx_rings ( vsi ) ;
return err ;
}
2018-03-20 14:58:13 +00:00
/**
* ice_vsi_open - Called when a network interface is made active
* @ vsi : the VSI to open
*
* Initialization of the VSI
*
* Returns 0 on success , negative value on error
*/
2021-08-20 00:08:54 +00:00
int ice_vsi_open ( struct ice_vsi * vsi )
2018-03-20 14:58:13 +00:00
{
char int_name [ ICE_INT_NAME_STR_LEN ] ;
struct ice_pf * pf = vsi - > back ;
int err ;
/* allocate descriptors */
err = ice_vsi_setup_tx_rings ( vsi ) ;
if ( err )
goto err_setup_tx ;
err = ice_vsi_setup_rx_rings ( vsi ) ;
if ( err )
goto err_setup_rx ;
2022-12-21 11:38:15 +00:00
err = ice_vsi_cfg_lan ( vsi ) ;
2018-03-20 14:58:13 +00:00
if ( err )
goto err_setup_rx ;
snprintf ( int_name , sizeof ( int_name ) - 1 , " %s-%s " ,
2019-11-08 14:23:26 +00:00
dev_driver_string ( ice_pf_to_dev ( pf ) ) , vsi - > netdev - > name ) ;
2019-06-26 09:20:25 +00:00
err = ice_vsi_req_irq_msix ( vsi , int_name ) ;
2018-03-20 14:58:13 +00:00
if ( err )
goto err_setup_rx ;
2022-08-08 09:58:54 +00:00
ice_vsi_cfg_netdev_tc ( vsi , vsi - > tc_cfg . ena_tc ) ;
2021-08-20 00:08:54 +00:00
if ( vsi - > type = = ICE_VSI_PF ) {
/* Notify the stack of the actual queue counts. */
err = netif_set_real_num_tx_queues ( vsi - > netdev , vsi - > num_txq ) ;
if ( err )
goto err_set_qs ;
2018-03-20 14:58:13 +00:00
2021-08-20 00:08:54 +00:00
err = netif_set_real_num_rx_queues ( vsi - > netdev , vsi - > num_rxq ) ;
if ( err )
goto err_set_qs ;
}
2018-03-20 14:58:13 +00:00
err = ice_up_complete ( vsi ) ;
if ( err )
goto err_up_complete ;
return 0 ;
err_up_complete :
ice_down ( vsi ) ;
err_set_qs :
ice_vsi_free_irq ( vsi ) ;
err_setup_rx :
ice_vsi_free_rx_rings ( vsi ) ;
err_setup_tx :
ice_vsi_free_tx_rings ( vsi ) ;
return err ;
}
2018-08-09 13:29:50 +00:00
/**
* ice_vsi_release_all - Delete all VSIs
* @ pf : PF from which all VSIs are being removed
*/
static void ice_vsi_release_all ( struct ice_pf * pf )
{
int err , i ;
if ( ! pf - > vsi )
return ;
2019-02-08 20:50:54 +00:00
ice_for_each_vsi ( pf , i ) {
2018-08-09 13:29:50 +00:00
if ( ! pf - > vsi [ i ] )
continue ;
2021-10-15 23:35:16 +00:00
if ( pf - > vsi [ i ] - > type = = ICE_VSI_CHNL )
continue ;
2018-08-09 13:29:50 +00:00
err = ice_vsi_release ( pf - > vsi [ i ] ) ;
if ( err )
2020-02-06 09:20:10 +00:00
dev_dbg ( ice_pf_to_dev ( pf ) , " Failed to release pf->vsi[%d], err %d, vsi_num = %d \n " ,
2018-08-09 13:29:50 +00:00
i , err , pf - > vsi [ i ] - > vsi_num ) ;
}
}
/**
2019-09-09 13:47:46 +00:00
* ice_vsi_rebuild_by_type - Rebuild VSI of a given type
* @ pf : pointer to the PF instance
* @ type : VSI type to rebuild
*
* Iterates through the pf - > vsi array and rebuilds VSIs of the requested type
2018-08-09 13:29:50 +00:00
*/
2019-09-09 13:47:46 +00:00
static int ice_vsi_rebuild_by_type ( struct ice_pf * pf , enum ice_vsi_type type )
2018-08-09 13:29:50 +00:00
{
2019-11-08 14:23:26 +00:00
struct device * dev = ice_pf_to_dev ( pf ) ;
2019-09-09 13:47:46 +00:00
int i , err ;
2018-08-09 13:29:50 +00:00
2019-02-08 20:50:54 +00:00
ice_for_each_vsi ( pf , i ) {
2019-07-25 09:53:57 +00:00
struct ice_vsi * vsi = pf - > vsi [ i ] ;
2018-08-09 13:29:50 +00:00
2019-09-09 13:47:46 +00:00
if ( ! vsi | | vsi - > type ! = type )
2018-08-09 13:29:50 +00:00
continue ;
2019-09-09 13:47:46 +00:00
/* rebuild the VSI */
2022-12-21 11:38:16 +00:00
err = ice_vsi_rebuild ( vsi , ICE_VSI_FLAG_INIT ) ;
2018-08-09 13:29:50 +00:00
if ( err ) {
2020-02-06 09:20:10 +00:00
dev_err ( dev , " rebuild VSI failed, err %d, VSI index %d, type %s \n " ,
2019-11-06 10:05:39 +00:00
err , vsi - > idx , ice_vsi_type_str ( type ) ) ;
2018-08-09 13:29:50 +00:00
return err ;
}
2019-09-09 13:47:46 +00:00
/* replay filters for the VSI */
2021-10-07 23:00:23 +00:00
err = ice_replay_vsi ( & pf - > hw , vsi - > idx ) ;
if ( err ) {
2021-10-07 22:56:02 +00:00
dev_err ( dev , " replay VSI failed, error %d, VSI index %d, type %s \n " ,
2021-10-07 23:00:23 +00:00
err , vsi - > idx , ice_vsi_type_str ( type ) ) ;
2021-10-07 23:01:58 +00:00
return err ;
2019-09-09 13:47:46 +00:00
}
/* Re-map HW VSI number, using VSI handle that has been
* previously validated in ice_replay_vsi ( ) call above
*/
vsi - > vsi_num = ice_get_hw_vsi_num ( & pf - > hw , vsi - > idx ) ;
/* enable the VSI */
err = ice_ena_vsi ( vsi , false ) ;
if ( err ) {
2020-02-06 09:20:10 +00:00
dev_err ( dev , " enable VSI failed, err %d, VSI index %d, type %s \n " ,
2019-11-06 10:05:39 +00:00
err , vsi - > idx , ice_vsi_type_str ( type ) ) ;
2019-09-09 13:47:46 +00:00
return err ;
}
2019-11-08 14:23:26 +00:00
dev_info ( dev , " VSI rebuilt. VSI index %d, type %s \n " , vsi - > idx ,
ice_vsi_type_str ( type ) ) ;
2018-08-09 13:29:50 +00:00
}
return 0 ;
2018-03-20 14:58:18 +00:00
}
ice: Implement VSI replay framework
Currently, switch filters get replayed after reset. In addition to
filters, other VSI attributes (like RSS configuration, Tx scheduler
configuration, etc.) also need to be replayed after reset.
Thus, instead of replaying based on functional blocks (i.e. replay
all filters for all VSIs, followed by RSS configuration replay for
all VSIs, and so on), it makes more sense to have the replay centered
around a VSI. In other words, replay all configurations for a VSI before
moving on to rebuilding the next VSI.
To that effect, this patch introduces a VSI replay framework in a new
function ice_vsi_replay_all. Currently it only replays switch filters,
but it will be expanded in the future to replay additional VSI attributes.
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-09-20 00:23:14 +00:00
/**
2019-09-09 13:47:46 +00:00
* ice_update_pf_netdev_link - Update PF netdev link status
* @ pf : pointer to the PF instance
ice: Implement VSI replay framework
Currently, switch filters get replayed after reset. In addition to
filters, other VSI attributes (like RSS configuration, Tx scheduler
configuration, etc.) also need to be replayed after reset.
Thus, instead of replaying based on functional blocks (i.e. replay
all filters for all VSIs, followed by RSS configuration replay for
all VSIs, and so on), it makes more sense to have the replay centered
around a VSI. In other words, replay all configurations for a VSI before
moving on to rebuilding the next VSI.
To that effect, this patch introduces a VSI replay framework in a new
function ice_vsi_replay_all. Currently it only replays switch filters,
but it will be expanded in the future to replay additional VSI attributes.
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-09-20 00:23:14 +00:00
*/
2019-09-09 13:47:46 +00:00
static void ice_update_pf_netdev_link ( struct ice_pf * pf )
ice: Implement VSI replay framework
Currently, switch filters get replayed after reset. In addition to
filters, other VSI attributes (like RSS configuration, Tx scheduler
configuration, etc.) also need to be replayed after reset.
Thus, instead of replaying based on functional blocks (i.e. replay
all filters for all VSIs, followed by RSS configuration replay for
all VSIs, and so on), it makes more sense to have the replay centered
around a VSI. In other words, replay all configurations for a VSI before
moving on to rebuilding the next VSI.
To that effect, this patch introduces a VSI replay framework in a new
function ice_vsi_replay_all. Currently it only replays switch filters,
but it will be expanded in the future to replay additional VSI attributes.
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-09-20 00:23:14 +00:00
{
2019-09-09 13:47:46 +00:00
bool link_up ;
ice: Implement VSI replay framework
Currently, switch filters get replayed after reset. In addition to
filters, other VSI attributes (like RSS configuration, Tx scheduler
configuration, etc.) also need to be replayed after reset.
Thus, instead of replaying based on functional blocks (i.e. replay
all filters for all VSIs, followed by RSS configuration replay for
all VSIs, and so on), it makes more sense to have the replay centered
around a VSI. In other words, replay all configurations for a VSI before
moving on to rebuilding the next VSI.
To that effect, this patch introduces a VSI replay framework in a new
function ice_vsi_replay_all. Currently it only replays switch filters,
but it will be expanded in the future to replay additional VSI attributes.
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-09-20 00:23:14 +00:00
int i ;
2019-02-08 20:50:54 +00:00
ice_for_each_vsi ( pf , i ) {
2019-07-25 09:53:57 +00:00
struct ice_vsi * vsi = pf - > vsi [ i ] ;
2019-09-09 13:47:46 +00:00
if ( ! vsi | | vsi - > type ! = ICE_VSI_PF )
return ;
ice: Implement VSI replay framework
Currently, switch filters get replayed after reset. In addition to
filters, other VSI attributes (like RSS configuration, Tx scheduler
configuration, etc.) also need to be replayed after reset.
Thus, instead of replaying based on functional blocks (i.e. replay
all filters for all VSIs, followed by RSS configuration replay for
all VSIs, and so on), it makes more sense to have the replay centered
around a VSI. In other words, replay all configurations for a VSI before
moving on to rebuilding the next VSI.
To that effect, this patch introduces a VSI replay framework in a new
function ice_vsi_replay_all. Currently it only replays switch filters,
but it will be expanded in the future to replay additional VSI attributes.
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-09-20 00:23:14 +00:00
2019-09-09 13:47:46 +00:00
ice_get_link_status ( pf - > vsi [ i ] - > port_info , & link_up ) ;
if ( link_up ) {
netif_carrier_on ( pf - > vsi [ i ] - > netdev ) ;
netif_tx_wake_all_queues ( pf - > vsi [ i ] - > netdev ) ;
} else {
netif_carrier_off ( pf - > vsi [ i ] - > netdev ) ;
netif_tx_stop_all_queues ( pf - > vsi [ i ] - > netdev ) ;
ice: Implement VSI replay framework
Currently, switch filters get replayed after reset. In addition to
filters, other VSI attributes (like RSS configuration, Tx scheduler
configuration, etc.) also need to be replayed after reset.
Thus, instead of replaying based on functional blocks (i.e. replay
all filters for all VSIs, followed by RSS configuration replay for
all VSIs, and so on), it makes more sense to have the replay centered
around a VSI. In other words, replay all configurations for a VSI before
moving on to rebuilding the next VSI.
To that effect, this patch introduces a VSI replay framework in a new
function ice_vsi_replay_all. Currently it only replays switch filters,
but it will be expanded in the future to replay additional VSI attributes.
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2018-09-20 00:23:14 +00:00
}
}
}
2018-03-20 14:58:18 +00:00
/**
* ice_rebuild - rebuild after reset
2019-04-16 17:35:03 +00:00
* @ pf : PF to rebuild
2019-09-09 13:47:46 +00:00
* @ reset_type : type of reset
2020-05-16 00:51:15 +00:00
*
* Do not rebuild VF VSI in this flow because that is already handled via
* ice_reset_all_vfs ( ) . This is because requirements for resetting a VF after a
* PFR / CORER / GLOBER / etc . are different than the normal flow . Also , we don ' t want
* to reset / rebuild all the VF VSI twice .
2018-03-20 14:58:18 +00:00
*/
2019-09-09 13:47:46 +00:00
static void ice_rebuild ( struct ice_pf * pf , enum ice_reset_req reset_type )
2018-03-20 14:58:18 +00:00
{
2019-11-08 14:23:26 +00:00
struct device * dev = ice_pf_to_dev ( pf ) ;
2018-03-20 14:58:18 +00:00
struct ice_hw * hw = & pf - > hw ;
2021-12-02 16:38:49 +00:00
bool dvm ;
2019-09-09 13:47:46 +00:00
int err ;
2018-03-20 14:58:18 +00:00
2021-03-02 18:15:38 +00:00
if ( test_bit ( ICE_DOWN , pf - > state ) )
2018-03-20 14:58:18 +00:00
goto clear_recovery ;
2019-09-09 13:47:46 +00:00
dev_dbg ( dev , " rebuilding PF after reset_type=%d \n " , reset_type ) ;
2018-03-20 14:58:18 +00:00
ice: wait 5 s for EMP reset after firmware flash
We need to wait 5 s for EMP reset after firmware flash. Code was extracted
from OOT driver (ice v1.8.3 downloaded from sourceforge). Without this
wait, fw_activate let card in inconsistent state and recoverable only
by second flash/activate. Flash was tested on these fw's:
From -> To
3.00 -> 3.10/3.20
3.10 -> 3.00/3.20
3.20 -> 3.00/3.10
Reproducer:
[root@host ~]# devlink dev flash pci/0000:ca:00.0 file E810_XXVDA4_FH_O_SEC_FW_1p6p1p9_NVM_3p10_PLDMoMCTP_0.11_8000AD7B.bin
Preparing to flash
[fw.mgmt] Erasing
[fw.mgmt] Erasing done
[fw.mgmt] Flashing 100%
[fw.mgmt] Flashing done 100%
[fw.undi] Erasing
[fw.undi] Erasing done
[fw.undi] Flashing 100%
[fw.undi] Flashing done 100%
[fw.netlist] Erasing
[fw.netlist] Erasing done
[fw.netlist] Flashing 100%
[fw.netlist] Flashing done 100%
Activate new firmware by devlink reload
[root@host ~]# devlink dev reload pci/0000:ca:00.0 action fw_activate
reload_actions_performed:
fw_activate
[root@host ~]# ip link show ens7f0
71: ens7f0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc mq state DOWN mode DEFAULT group default qlen 1000
link/ether b4:96:91:dc:72:e0 brd ff:ff:ff:ff:ff:ff
altname enp202s0f0
dmesg after flash:
[ 55.120788] ice: Copyright (c) 2018, Intel Corporation.
[ 55.274734] ice 0000:ca:00.0: Get PHY capabilities failed status = -5, continuing anyway
[ 55.569797] ice 0000:ca:00.0: The DDP package was successfully loaded: ICE OS Default Package version 1.3.28.0
[ 55.603629] ice 0000:ca:00.0: Get PHY capability failed.
[ 55.608951] ice 0000:ca:00.0: ice_init_nvm_phy_type failed: -5
[ 55.647348] ice 0000:ca:00.0: PTP init successful
[ 55.675536] ice 0000:ca:00.0: DCB is enabled in the hardware, max number of TCs supported on this port are 8
[ 55.685365] ice 0000:ca:00.0: FW LLDP is disabled, DCBx/LLDP in SW mode.
[ 55.692179] ice 0000:ca:00.0: Commit DCB Configuration to the hardware
[ 55.701382] ice 0000:ca:00.0: 126.024 Gb/s available PCIe bandwidth, limited by 16.0 GT/s PCIe x8 link at 0000:c9:02.0 (capable of 252.048 Gb/s with 16.0 GT/s PCIe x16 link)
Reboot doesn’t help, only second flash/activate with OOT or patched
driver put card back in consistent state.
After patch:
[root@host ~]# devlink dev flash pci/0000:ca:00.0 file E810_XXVDA4_FH_O_SEC_FW_1p6p1p9_NVM_3p10_PLDMoMCTP_0.11_8000AD7B.bin
Preparing to flash
[fw.mgmt] Erasing
[fw.mgmt] Erasing done
[fw.mgmt] Flashing 100%
[fw.mgmt] Flashing done 100%
[fw.undi] Erasing
[fw.undi] Erasing done
[fw.undi] Flashing 100%
[fw.undi] Flashing done 100%
[fw.netlist] Erasing
[fw.netlist] Erasing done
[fw.netlist] Flashing 100%
[fw.netlist] Flashing done 100%
Activate new firmware by devlink reload
[root@host ~]# devlink dev reload pci/0000:ca:00.0 action fw_activate
reload_actions_performed:
fw_activate
[root@host ~]# ip link show ens7f0
19: ens7f0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000
link/ether b4:96:91:dc:72:e0 brd ff:ff:ff:ff:ff:ff
altname enp202s0f0
Fixes: 399e27dbbd9e94 ("ice: support immediate firmware activation via devlink reload")
Signed-off-by: Petr Oros <poros@redhat.com>
Tested-by: Gurucharan <gurucharanx.g@intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2022-04-13 15:37:45 +00:00
# define ICE_EMP_RESET_SLEEP_MS 5000
ice: support immediate firmware activation via devlink reload
The ice hardware contains an embedded chip with firmware which can be
updated using devlink flash. The firmware which runs on this chip is
referred to as the Embedded Management Processor firmware (EMP
firmware).
Activating the new firmware image currently requires that the system be
rebooted. This is not ideal as rebooting the system can cause unwanted
downtime.
In practical terms, activating the firmware does not always require a
full system reboot. In many cases it is possible to activate the EMP
firmware immediately. There are a couple of different scenarios to
cover.
* The EMP firmware itself can be reloaded by issuing a special update
to the device called an Embedded Management Processor reset (EMP
reset). This reset causes the device to reset and reload the EMP
firmware.
* PCI configuration changes are only reloaded after a cold PCIe reset.
Unfortunately there is no generic way to trigger this for a PCIe
device without a system reboot.
When performing a flash update, firmware is capable of responding with
some information about the specific update requirements.
The driver updates the flash by programming a secondary inactive bank
with the contents of the new image, and then issuing a command to
request to switch the active bank starting from the next load.
The response to the final command for updating the inactive NVM flash
bank includes an indication of the minimum reset required to fully
update the device. This can be one of the following:
* A full power on is required
* A cold PCIe reset is required
* An EMP reset is required
The response to the command to switch flash banks includes an indication
of whether or not the firmware will allow an EMP reset request.
For most updates, an EMP reset is sufficient to load the new EMP
firmware without issues. In some cases, this reset is not sufficient
because the PCI configuration space has changed. When this could cause
incompatibility with the new EMP image, the firmware is capable of
rejecting the EMP reset request.
Add logic to ice_fw_update.c to handle the response data flash update
AdminQ commands.
For the reset level, issue a devlink status notification informing the
user of how to complete the update with a simple suggestion like
"Activate new firmware by rebooting the system".
Cache the status of whether or not firmware will restrict the EMP reset
for use in implementing devlink reload.
Implement support for devlink reload with the "fw_activate" flag. This
allows user space to request the firmware be activated immediately.
For the .reload_down handler, we will issue a request for the EMP reset
using the appropriate firmware AdminQ command. If we know that the
firmware will not allow an EMP reset, simply exit with a suitable
netlink extended ACK message indicating that the EMP reset is not
available.
For the .reload_up handler, simply wait until the driver has finished
resetting. Logic to handle processing of an EMP reset already exists in
the driver as part of its reset and rebuild flows.
Implement support for the devlink reload interface with the
"fw_activate" action. This allows userspace to request activation of
firmware without a reboot.
Note that support for indicating the required reset and EMP reset
restriction is not supported on old versions of firmware. The driver can
determine if the two features are supported by checking the device
capabilities report. I confirmed support has existed since at least
version 5.5.2 as reported by the 'fw.mgmt' version. Support to issue the
EMP reset request has existed in all version of the EMP firmware for the
ice hardware.
Check the device capabilities report to determine whether or not the
indications are reported by the running firmware. If the reset
requirement indication is not supported, always assume a full power on
is necessary. If the reset restriction capability is not supported,
always assume the EMP reset is available.
Users can verify if the EMP reset has activated the firmware by using
the devlink info report to check that the 'running' firmware version has
updated. For example a user might do the following:
# Check current version
$ devlink dev info
# Update the device
$ devlink dev flash pci/0000:af:00.0 file firmware.bin
# Confirm stored version updated
$ devlink dev info
# Reload to activate new firmware
$ devlink dev reload pci/0000:af:00.0 action fw_activate
# Confirm running version updated
$ devlink dev info
Finally, this change does *not* implement basic driver-only reload
support. I did look into trying to do this. However, it requires
significant refactor of how the ice driver probes and loads everything.
The ice driver probe and allocation flows were not designed with such
a reload in mind. Refactoring the flow to support this is beyond the
scope of this change.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-10-27 23:22:55 +00:00
if ( reset_type = = ICE_RESET_EMPR ) {
/* If an EMP reset has occurred, any previously pending flash
* update will have completed . We no longer know whether or
* not the NVM update EMP reset is restricted .
*/
pf - > fw_emp_reset_disabled = false ;
ice: wait 5 s for EMP reset after firmware flash
We need to wait 5 s for EMP reset after firmware flash. Code was extracted
from OOT driver (ice v1.8.3 downloaded from sourceforge). Without this
wait, fw_activate let card in inconsistent state and recoverable only
by second flash/activate. Flash was tested on these fw's:
From -> To
3.00 -> 3.10/3.20
3.10 -> 3.00/3.20
3.20 -> 3.00/3.10
Reproducer:
[root@host ~]# devlink dev flash pci/0000:ca:00.0 file E810_XXVDA4_FH_O_SEC_FW_1p6p1p9_NVM_3p10_PLDMoMCTP_0.11_8000AD7B.bin
Preparing to flash
[fw.mgmt] Erasing
[fw.mgmt] Erasing done
[fw.mgmt] Flashing 100%
[fw.mgmt] Flashing done 100%
[fw.undi] Erasing
[fw.undi] Erasing done
[fw.undi] Flashing 100%
[fw.undi] Flashing done 100%
[fw.netlist] Erasing
[fw.netlist] Erasing done
[fw.netlist] Flashing 100%
[fw.netlist] Flashing done 100%
Activate new firmware by devlink reload
[root@host ~]# devlink dev reload pci/0000:ca:00.0 action fw_activate
reload_actions_performed:
fw_activate
[root@host ~]# ip link show ens7f0
71: ens7f0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc mq state DOWN mode DEFAULT group default qlen 1000
link/ether b4:96:91:dc:72:e0 brd ff:ff:ff:ff:ff:ff
altname enp202s0f0
dmesg after flash:
[ 55.120788] ice: Copyright (c) 2018, Intel Corporation.
[ 55.274734] ice 0000:ca:00.0: Get PHY capabilities failed status = -5, continuing anyway
[ 55.569797] ice 0000:ca:00.0: The DDP package was successfully loaded: ICE OS Default Package version 1.3.28.0
[ 55.603629] ice 0000:ca:00.0: Get PHY capability failed.
[ 55.608951] ice 0000:ca:00.0: ice_init_nvm_phy_type failed: -5
[ 55.647348] ice 0000:ca:00.0: PTP init successful
[ 55.675536] ice 0000:ca:00.0: DCB is enabled in the hardware, max number of TCs supported on this port are 8
[ 55.685365] ice 0000:ca:00.0: FW LLDP is disabled, DCBx/LLDP in SW mode.
[ 55.692179] ice 0000:ca:00.0: Commit DCB Configuration to the hardware
[ 55.701382] ice 0000:ca:00.0: 126.024 Gb/s available PCIe bandwidth, limited by 16.0 GT/s PCIe x8 link at 0000:c9:02.0 (capable of 252.048 Gb/s with 16.0 GT/s PCIe x16 link)
Reboot doesn’t help, only second flash/activate with OOT or patched
driver put card back in consistent state.
After patch:
[root@host ~]# devlink dev flash pci/0000:ca:00.0 file E810_XXVDA4_FH_O_SEC_FW_1p6p1p9_NVM_3p10_PLDMoMCTP_0.11_8000AD7B.bin
Preparing to flash
[fw.mgmt] Erasing
[fw.mgmt] Erasing done
[fw.mgmt] Flashing 100%
[fw.mgmt] Flashing done 100%
[fw.undi] Erasing
[fw.undi] Erasing done
[fw.undi] Flashing 100%
[fw.undi] Flashing done 100%
[fw.netlist] Erasing
[fw.netlist] Erasing done
[fw.netlist] Flashing 100%
[fw.netlist] Flashing done 100%
Activate new firmware by devlink reload
[root@host ~]# devlink dev reload pci/0000:ca:00.0 action fw_activate
reload_actions_performed:
fw_activate
[root@host ~]# ip link show ens7f0
19: ens7f0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000
link/ether b4:96:91:dc:72:e0 brd ff:ff:ff:ff:ff:ff
altname enp202s0f0
Fixes: 399e27dbbd9e94 ("ice: support immediate firmware activation via devlink reload")
Signed-off-by: Petr Oros <poros@redhat.com>
Tested-by: Gurucharan <gurucharanx.g@intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2022-04-13 15:37:45 +00:00
msleep ( ICE_EMP_RESET_SLEEP_MS ) ;
ice: support immediate firmware activation via devlink reload
The ice hardware contains an embedded chip with firmware which can be
updated using devlink flash. The firmware which runs on this chip is
referred to as the Embedded Management Processor firmware (EMP
firmware).
Activating the new firmware image currently requires that the system be
rebooted. This is not ideal as rebooting the system can cause unwanted
downtime.
In practical terms, activating the firmware does not always require a
full system reboot. In many cases it is possible to activate the EMP
firmware immediately. There are a couple of different scenarios to
cover.
* The EMP firmware itself can be reloaded by issuing a special update
to the device called an Embedded Management Processor reset (EMP
reset). This reset causes the device to reset and reload the EMP
firmware.
* PCI configuration changes are only reloaded after a cold PCIe reset.
Unfortunately there is no generic way to trigger this for a PCIe
device without a system reboot.
When performing a flash update, firmware is capable of responding with
some information about the specific update requirements.
The driver updates the flash by programming a secondary inactive bank
with the contents of the new image, and then issuing a command to
request to switch the active bank starting from the next load.
The response to the final command for updating the inactive NVM flash
bank includes an indication of the minimum reset required to fully
update the device. This can be one of the following:
* A full power on is required
* A cold PCIe reset is required
* An EMP reset is required
The response to the command to switch flash banks includes an indication
of whether or not the firmware will allow an EMP reset request.
For most updates, an EMP reset is sufficient to load the new EMP
firmware without issues. In some cases, this reset is not sufficient
because the PCI configuration space has changed. When this could cause
incompatibility with the new EMP image, the firmware is capable of
rejecting the EMP reset request.
Add logic to ice_fw_update.c to handle the response data flash update
AdminQ commands.
For the reset level, issue a devlink status notification informing the
user of how to complete the update with a simple suggestion like
"Activate new firmware by rebooting the system".
Cache the status of whether or not firmware will restrict the EMP reset
for use in implementing devlink reload.
Implement support for devlink reload with the "fw_activate" flag. This
allows user space to request the firmware be activated immediately.
For the .reload_down handler, we will issue a request for the EMP reset
using the appropriate firmware AdminQ command. If we know that the
firmware will not allow an EMP reset, simply exit with a suitable
netlink extended ACK message indicating that the EMP reset is not
available.
For the .reload_up handler, simply wait until the driver has finished
resetting. Logic to handle processing of an EMP reset already exists in
the driver as part of its reset and rebuild flows.
Implement support for the devlink reload interface with the
"fw_activate" action. This allows userspace to request activation of
firmware without a reboot.
Note that support for indicating the required reset and EMP reset
restriction is not supported on old versions of firmware. The driver can
determine if the two features are supported by checking the device
capabilities report. I confirmed support has existed since at least
version 5.5.2 as reported by the 'fw.mgmt' version. Support to issue the
EMP reset request has existed in all version of the EMP firmware for the
ice hardware.
Check the device capabilities report to determine whether or not the
indications are reported by the running firmware. If the reset
requirement indication is not supported, always assume a full power on
is necessary. If the reset restriction capability is not supported,
always assume the EMP reset is available.
Users can verify if the EMP reset has activated the firmware by using
the devlink info report to check that the 'running' firmware version has
updated. For example a user might do the following:
# Check current version
$ devlink dev info
# Update the device
$ devlink dev flash pci/0000:af:00.0 file firmware.bin
# Confirm stored version updated
$ devlink dev info
# Reload to activate new firmware
$ devlink dev reload pci/0000:af:00.0 action fw_activate
# Confirm running version updated
$ devlink dev info
Finally, this change does *not* implement basic driver-only reload
support. I did look into trying to do this. However, it requires
significant refactor of how the ice driver probes and loads everything.
The ice driver probe and allocation flows were not designed with such
a reload in mind. Refactoring the flow to support this is beyond the
scope of this change.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-10-27 23:22:55 +00:00
}
2021-10-07 23:00:23 +00:00
err = ice_init_all_ctrlq ( hw ) ;
if ( err ) {
dev_err ( dev , " control queues init failed %d \n " , err ) ;
2018-08-09 13:29:50 +00:00
goto err_init_ctrlq ;
2018-03-20 14:58:18 +00:00
}
2019-09-09 13:47:46 +00:00
/* if DDP was previously loaded successfully */
if ( ! ice_is_safe_mode ( pf ) ) {
/* reload the SW DB of filter tables */
if ( reset_type = = ICE_RESET_PFR )
ice_fill_blk_tbls ( hw ) ;
else
/* Reload DDP Package after CORER/GLOBR reset */
ice_load_pkg ( NULL , pf ) ;
}
2021-10-07 23:00:23 +00:00
err = ice_clear_pf_cfg ( hw ) ;
if ( err ) {
dev_err ( dev , " clear PF configuration failed %d \n " , err ) ;
2018-08-09 13:29:50 +00:00
goto err_init_ctrlq ;
2018-03-20 14:58:18 +00:00
}
ice_clear_pxe_mode ( hw ) ;
2021-10-07 23:00:23 +00:00
err = ice_init_nvm ( hw ) ;
if ( err ) {
dev_err ( dev , " ice_init_nvm failed %d \n " , err ) ;
2021-05-06 15:40:00 +00:00
goto err_init_ctrlq ;
}
2021-10-07 23:00:23 +00:00
err = ice_get_caps ( hw ) ;
if ( err ) {
dev_err ( dev , " ice_get_caps failed %d \n " , err ) ;
2018-08-09 13:29:50 +00:00
goto err_init_ctrlq ;
2018-03-20 14:58:18 +00:00
}
2021-10-07 23:00:23 +00:00
err = ice_aq_set_mac_cfg ( hw , ICE_AQ_SET_MAC_FRAME_SIZE_MAX , NULL ) ;
if ( err ) {
dev_err ( dev , " set_mac_cfg failed %d \n " , err ) ;
2020-05-16 00:36:30 +00:00
goto err_init_ctrlq ;
}
2021-12-02 16:38:49 +00:00
dvm = ice_is_dvm_ena ( hw ) ;
err = ice_aq_set_port_params ( pf - > hw . port_info , dvm , NULL ) ;
if ( err )
goto err_init_ctrlq ;
2018-08-09 13:29:50 +00:00
err = ice_sched_init_port ( hw - > port_info ) ;
if ( err )
goto err_sched_init_port ;
2018-03-20 14:58:18 +00:00
/* start misc vector */
2019-06-26 09:20:25 +00:00
err = ice_req_irq_msix_misc ( pf ) ;
if ( err ) {
dev_err ( dev , " misc vector setup failed: %d \n " , err ) ;
2019-09-09 13:47:46 +00:00
goto err_sched_init_port ;
2018-03-20 14:58:18 +00:00
}
2020-05-12 01:01:45 +00:00
if ( test_bit ( ICE_FLAG_FD_ENA , pf - > flags ) ) {
wr32 ( hw , PFQF_FD_ENA , PFQF_FD_ENA_FD_ENA_M ) ;
if ( ! rd32 ( hw , PFQF_FD_SIZE ) ) {
u16 unused , guar , b_effort ;
guar = hw - > func_caps . fd_fltr_guar ;
b_effort = hw - > func_caps . fd_fltr_best_effort ;
/* force guaranteed filter pool for PF */
ice_alloc_fd_guar_item ( hw , & unused , guar ) ;
/* force shared filter pool for PF */
ice_alloc_fd_shrd_item ( hw , & unused , b_effort ) ;
}
}
2019-09-09 13:47:46 +00:00
if ( test_bit ( ICE_FLAG_DCB_ENA , pf - > flags ) )
ice_dcb_rebuild ( pf ) ;
2021-06-09 16:39:50 +00:00
/* If the PF previously had enabled PTP, PTP init needs to happen before
* the VSI rebuild . If not , this causes the PTP link status events to
* fail .
*/
if ( test_bit ( ICE_FLAG_PTP_SUPPORTED , pf - > flags ) )
2024-01-25 21:57:54 +00:00
ice_ptp_rebuild ( pf , reset_type ) ;
2021-06-09 16:39:50 +00:00
2022-03-01 18:38:03 +00:00
if ( ice_is_feature_supported ( pf , ICE_F_GNSS ) )
ice_gnss_init ( pf ) ;
2019-09-09 13:47:46 +00:00
/* rebuild PF VSI */
err = ice_vsi_rebuild_by_type ( pf , ICE_VSI_PF ) ;
2018-08-09 13:29:50 +00:00
if ( err ) {
2019-09-09 13:47:46 +00:00
dev_err ( dev , " PF VSI rebuild failed: %d \n " , err ) ;
2018-08-09 13:29:50 +00:00
goto err_vsi_rebuild ;
}
2018-03-20 14:58:18 +00:00
2023-10-24 11:09:28 +00:00
err = ice_eswitch_rebuild ( pf ) ;
2021-08-20 00:08:57 +00:00
if ( err ) {
2023-10-24 11:09:28 +00:00
dev_err ( dev , " Switchdev rebuild failed: %d \n " , err ) ;
2021-08-20 00:08:57 +00:00
goto err_vsi_rebuild ;
}
2021-10-15 23:35:16 +00:00
if ( reset_type = = ICE_RESET_PFR ) {
err = ice_rebuild_channels ( pf ) ;
if ( err ) {
dev_err ( dev , " failed to rebuild and replay ADQ VSIs, err %d \n " ,
err ) ;
goto err_vsi_rebuild ;
}
}
2020-05-12 01:01:45 +00:00
/* If Flow Director is active */
if ( test_bit ( ICE_FLAG_FD_ENA , pf - > flags ) ) {
err = ice_vsi_rebuild_by_type ( pf , ICE_VSI_CTRL ) ;
if ( err ) {
dev_err ( dev , " control VSI rebuild failed: %d \n " , err ) ;
goto err_vsi_rebuild ;
}
/* replay HW Flow Director recipes */
if ( hw - > fdir_prof )
ice_fdir_replay_flows ( hw ) ;
/* replay Flow Director filters */
ice_fdir_replay_fltrs ( pf ) ;
2020-05-12 01:01:46 +00:00
ice_rebuild_arfs ( pf ) ;
2020-05-12 01:01:45 +00:00
}
2019-09-09 13:47:46 +00:00
ice_update_pf_netdev_link ( pf ) ;
/* tell the firmware we are up */
2021-10-07 23:00:23 +00:00
err = ice_send_version ( pf ) ;
if ( err ) {
2021-10-07 22:56:02 +00:00
dev_err ( dev , " Rebuild failed due to error sending driver version: %d \n " ,
2021-10-07 23:00:23 +00:00
err ) ;
2019-09-09 13:47:46 +00:00
goto err_vsi_rebuild ;
}
ice_replay_post ( hw ) ;
2018-08-09 13:29:50 +00:00
/* if we get here, reset flow is successful */
2021-03-02 18:15:38 +00:00
clear_bit ( ICE_RESET_FAILED , pf - > state ) ;
2021-05-20 14:37:51 +00:00
ice_plug_aux_dev ( pf ) ;
2023-06-20 22:18:54 +00:00
if ( ice_is_feature_supported ( pf , ICE_F_SRIOV_LAG ) )
ice_lag_rebuild ( pf ) ;
ice: restore timestamp configuration after device reset
The driver calls ice_ptp_cfg_timestamp() during ice_ptp_prepare_for_reset()
to disable timestamping while the device is resetting. This operation
destroys the user requested configuration. While the driver does call
ice_ptp_cfg_timestamp in ice_rebuild() to restore some hardware settings
after a reset, it unconditionally passes true or false, resulting in
failure to restore previous user space configuration.
This results in a device reset forcibly disabling timestamp configuration
regardless of current user settings.
This was not detected previously due to a quirk of the LinuxPTP ptp4l
application. If ptp4l detects a missing timestamp, it enters a fault state
and performs recovery logic which includes executing SIOCSHWTSTAMP again,
restoring the now accidentally cleared configuration.
Not every application does this, and for these applications, timestamps
will mysteriously stop after a PF reset, without being restored until an
application restart.
Fix this by replacing ice_ptp_cfg_timestamp() with two new functions:
1) ice_ptp_disable_timestamp_mode() which unconditionally disables the
timestamping logic in ice_ptp_prepare_for_reset() and ice_ptp_release()
2) ice_ptp_restore_timestamp_mode() which calls
ice_ptp_restore_tx_interrupt() to restore Tx timestamping configuration,
calls ice_set_rx_tstamp() to restore Rx timestamping configuration, and
issues an immediate TSYN_TX interrupt to ensure that timestamps which
may have occurred during the device reset get processed.
Modify the ice_ptp_set_timestamp_mode to directly save the user
configuration and then call ice_ptp_restore_timestamp_mode. This way, reset
no longer destroys the saved user configuration.
This obsoletes the ice_set_tx_tstamp() function which can now be safely
removed.
With this change, all devices should now restore Tx and Rx timestamping
functionality correctly after a PF reset without application intervention.
Fixes: 77a781155a65 ("ice: enable receive hardware timestamping")
Fixes: ea9b847cda64 ("ice: enable transmit timestamps for E810 devices")
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Reviewed-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Tested-by: Pucha Himasekhar Reddy <himasekharx.reddy.pucha@intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2023-11-21 21:12:57 +00:00
/* Restore timestamp mode settings after VSI rebuild */
ice_ptp_restore_timestamp_mode ( pf ) ;
2018-03-20 14:58:18 +00:00
return ;
2018-08-09 13:29:50 +00:00
err_vsi_rebuild :
err_sched_init_port :
ice_sched_cleanup_all ( hw ) ;
err_init_ctrlq :
2018-03-20 14:58:18 +00:00
ice_shutdown_all_ctrlq ( hw ) ;
2021-03-02 18:15:38 +00:00
set_bit ( ICE_RESET_FAILED , pf - > state ) ;
2018-03-20 14:58:18 +00:00
clear_recovery :
2018-08-09 13:29:50 +00:00
/* set this bit in PF state to control service task scheduling */
2021-03-02 18:15:38 +00:00
set_bit ( ICE_NEEDS_RESTART , pf - > state ) ;
2018-08-09 13:29:50 +00:00
dev_err ( dev , " Rebuild failed, unload and reload driver \n " ) ;
2018-03-20 14:58:18 +00:00
}
2018-03-20 14:58:19 +00:00
/**
* ice_change_mtu - NDO callback to change the MTU
* @ netdev : network interface device structure
* @ new_mtu : new value for maximum frame size
*
* Returns 0 on success , negative on failure
*/
static int ice_change_mtu ( struct net_device * netdev , int new_mtu )
{
struct ice_netdev_priv * np = netdev_priv ( netdev ) ;
struct ice_vsi * vsi = np - > vsi ;
struct ice_pf * pf = vsi - > back ;
ice: Add support for XDP multi-buffer on Rx side
Ice driver needs to be a bit reworked on Rx data path in order to
support multi-buffer XDP. For skb path, it currently works in a way that
Rx ring carries pointer to skb so if driver didn't manage to combine
fragmented frame at current NAPI instance, it can restore the state on
next instance and keep looking for last fragment (so descriptor with EOP
bit set). What needs to be achieved is that xdp_buff needs to be
combined in such way (linear + frags part) in the first place. Then skb
will be ready to go in case of XDP_PASS or BPF program being not present
on interface. If BPF program is there, it would work on multi-buffer
XDP. At this point xdp_buff resides directly on Rx ring, so given the
fact that skb will be built straight from xdp_buff, there will be no
further need to carry skb on Rx ring.
Besides removing skb pointer from Rx ring, lots of members have been
moved around within ice_rx_ring. First and foremost reason was to place
rx_buf with xdp_buff on the same cacheline. This means that once we
touch rx_buf (which is a preceding step before touching xdp_buff),
xdp_buff will already be hot in cache. Second thing was that xdp_rxq is
used rather rarely and it occupies a separate cacheline, so maybe it is
better to have it at the end of ice_rx_ring.
Other change that affects ice_rx_ring is the introduction of
ice_rx_ring::first_desc. Its purpose is twofold - first is to propagate
rx_buf->act to all the parts of current xdp_buff after running XDP
program, so that ice_put_rx_buf() that got moved out of the main Rx
processing loop will be able to tak an appriopriate action on each
buffer. Second is for ice_construct_skb().
ice_construct_skb() has a copybreak mechanism which had an explicit
impact on xdp_buff->skb conversion in the new approach when legacy Rx
flag is toggled. It works in a way that linear part is 256 bytes long,
if frame is bigger than that, remaining bytes are going as a frag to
skb_shared_info.
This means while memcpying frags from xdp_buff to newly allocated skb,
care needs to be taken when picking the destination frag array entry.
Upon the time ice_construct_skb() is called, when dealing with
fragmented frame, current rx_buf points to the *last* fragment, but
copybreak needs to be done against the first one. That's where
ice_rx_ring::first_desc helps.
When frame building spans across NAPI polls (DD bit is not set on
current descriptor and xdp->data is not NULL) with current Rx buffer
handling state there might be some problems.
Since calls to ice_put_rx_buf() were pulled out of the main Rx
processing loop and were scoped from cached_ntc to current ntc, remember
that now mentioned function relies on rx_buf->act, which is set within
ice_run_xdp(). ice_run_xdp() is called when EOP bit was found, so
currently we could put Rx buffer with rx_buf->act being *uninitialized*.
To address this, change scoping to rely on first_desc on both boundaries
instead.
This also implies that cleaned_count which is used as an input to
ice_alloc_rx_buffers() and tells how many new buffers should be refilled
has to be adjusted. If it stayed as is, what could happen is a case
where ntc would go over ntu.
Therefore, remove cleaned_count altogether and use against allocing
routine newly introduced ICE_RX_DESC_UNUSED() macro which is an
equivalent of ICE_DESC_UNUSED() dedicated for Rx side and based on
struct ice_rx_ring::first_desc instead of next_to_clean.
Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Alexander Lobakin <alexandr.lobakin@intel.com>
Link: https://lore.kernel.org/bpf/20230131204506.219292-11-maciej.fijalkowski@intel.com
2023-01-31 20:45:03 +00:00
struct bpf_prog * prog ;
2018-03-20 14:58:19 +00:00
u8 count = 0 ;
2021-05-20 14:37:50 +00:00
int err = 0 ;
2018-03-20 14:58:19 +00:00
2020-05-16 00:36:38 +00:00
if ( new_mtu = = ( int ) netdev - > mtu ) {
2019-04-16 17:35:03 +00:00
netdev_warn ( netdev , " MTU is already %u \n " , netdev - > mtu ) ;
2018-03-20 14:58:19 +00:00
return 0 ;
}
ice: Add support for XDP multi-buffer on Rx side
Ice driver needs to be a bit reworked on Rx data path in order to
support multi-buffer XDP. For skb path, it currently works in a way that
Rx ring carries pointer to skb so if driver didn't manage to combine
fragmented frame at current NAPI instance, it can restore the state on
next instance and keep looking for last fragment (so descriptor with EOP
bit set). What needs to be achieved is that xdp_buff needs to be
combined in such way (linear + frags part) in the first place. Then skb
will be ready to go in case of XDP_PASS or BPF program being not present
on interface. If BPF program is there, it would work on multi-buffer
XDP. At this point xdp_buff resides directly on Rx ring, so given the
fact that skb will be built straight from xdp_buff, there will be no
further need to carry skb on Rx ring.
Besides removing skb pointer from Rx ring, lots of members have been
moved around within ice_rx_ring. First and foremost reason was to place
rx_buf with xdp_buff on the same cacheline. This means that once we
touch rx_buf (which is a preceding step before touching xdp_buff),
xdp_buff will already be hot in cache. Second thing was that xdp_rxq is
used rather rarely and it occupies a separate cacheline, so maybe it is
better to have it at the end of ice_rx_ring.
Other change that affects ice_rx_ring is the introduction of
ice_rx_ring::first_desc. Its purpose is twofold - first is to propagate
rx_buf->act to all the parts of current xdp_buff after running XDP
program, so that ice_put_rx_buf() that got moved out of the main Rx
processing loop will be able to tak an appriopriate action on each
buffer. Second is for ice_construct_skb().
ice_construct_skb() has a copybreak mechanism which had an explicit
impact on xdp_buff->skb conversion in the new approach when legacy Rx
flag is toggled. It works in a way that linear part is 256 bytes long,
if frame is bigger than that, remaining bytes are going as a frag to
skb_shared_info.
This means while memcpying frags from xdp_buff to newly allocated skb,
care needs to be taken when picking the destination frag array entry.
Upon the time ice_construct_skb() is called, when dealing with
fragmented frame, current rx_buf points to the *last* fragment, but
copybreak needs to be done against the first one. That's where
ice_rx_ring::first_desc helps.
When frame building spans across NAPI polls (DD bit is not set on
current descriptor and xdp->data is not NULL) with current Rx buffer
handling state there might be some problems.
Since calls to ice_put_rx_buf() were pulled out of the main Rx
processing loop and were scoped from cached_ntc to current ntc, remember
that now mentioned function relies on rx_buf->act, which is set within
ice_run_xdp(). ice_run_xdp() is called when EOP bit was found, so
currently we could put Rx buffer with rx_buf->act being *uninitialized*.
To address this, change scoping to rely on first_desc on both boundaries
instead.
This also implies that cleaned_count which is used as an input to
ice_alloc_rx_buffers() and tells how many new buffers should be refilled
has to be adjusted. If it stayed as is, what could happen is a case
where ntc would go over ntu.
Therefore, remove cleaned_count altogether and use against allocing
routine newly introduced ICE_RX_DESC_UNUSED() macro which is an
equivalent of ICE_DESC_UNUSED() dedicated for Rx side and based on
struct ice_rx_ring::first_desc instead of next_to_clean.
Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Alexander Lobakin <alexandr.lobakin@intel.com>
Link: https://lore.kernel.org/bpf/20230131204506.219292-11-maciej.fijalkowski@intel.com
2023-01-31 20:45:03 +00:00
prog = vsi - > xdp_prog ;
if ( prog & & ! prog - > aux - > xdp_has_frags ) {
2019-10-24 08:11:25 +00:00
int frame_size = ice_max_xdp_frame_size ( vsi ) ;
2019-11-04 17:38:56 +00:00
if ( new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size ) {
netdev_err ( netdev , " max MTU for XDP usage is %d \n " ,
2019-10-24 08:11:25 +00:00
frame_size - ICE_ETH_PKT_HDR_PAD ) ;
2019-11-04 17:38:56 +00:00
return - EINVAL ;
}
2023-01-31 20:44:54 +00:00
} else if ( test_bit ( ICE_FLAG_LEGACY_RX , pf - > flags ) ) {
if ( new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX ) {
netdev_err ( netdev , " Too big MTU for legacy-rx; Max is %d \n " ,
ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD ) ;
return - EINVAL ;
}
2019-11-04 17:38:56 +00:00
}
2018-03-20 14:58:19 +00:00
/* if a reset is in progress, wait for some time for it to complete */
do {
2018-09-20 00:23:11 +00:00
if ( ice_is_reset_in_progress ( pf - > state ) ) {
2018-03-20 14:58:19 +00:00
count + + ;
usleep_range ( 1000 , 2000 ) ;
} else {
break ;
}
} while ( count < 100 ) ;
if ( count = = 100 ) {
2019-04-16 17:35:03 +00:00
netdev_err ( netdev , " can't change MTU. Device is busy \n " ) ;
2018-03-20 14:58:19 +00:00
return - EBUSY ;
}
2020-05-16 00:36:38 +00:00
netdev - > mtu = ( unsigned int ) new_mtu ;
2023-06-13 11:35:52 +00:00
err = ice_down_up ( vsi ) ;
if ( err )
return err ;
2018-03-20 14:58:19 +00:00
2019-12-12 11:13:03 +00:00
netdev_dbg ( netdev , " changed MTU to %d \n " , new_mtu ) ;
2022-02-18 20:39:25 +00:00
set_bit ( ICE_FLAG_MTU_CHANGED , pf - > flags ) ;
2021-05-20 14:37:50 +00:00
return err ;
2018-03-20 14:58:19 +00:00
}
ice: enable receive hardware timestamping
Add SIOCGHWTSTAMP and SIOCSHWTSTAMP ioctl handlers to respond to
requests to enable timestamping support. If the request is for enabling
Rx timestamps, set a bit in the Rx descriptors to indicate that receive
timestamps should be reported.
Hardware captures receive timestamps in the PHY which only captures part
of the timer, and reports only 40 bits into the Rx descriptor. The upper
32 bits represent the contents of GLTSYN_TIME_L at the point of packet
reception, while the lower 8 bits represent the upper 8 bits of
GLTSYN_TIME_0.
The networking and PTP stack expect 64 bit timestamps in nanoseconds. To
support this, implement some logic to extend the timestamps by using the
full PHC time.
If the Rx timestamp was captured prior to the PHC time, then the real
timestamp is
PHC - (lower_32_bits(PHC) - timestamp)
If the Rx timestamp was captured after the PHC time, then the real
timestamp is
PHC + (timestamp - lower_32_bits(PHC))
These calculations are correct as long as neither the PHC timestamp nor
the Rx timestamps are more than 2^32-1 nanseconds old. Further, we can
detect when the Rx timestamp is before or after the PHC as long as the
PHC timestamp is no more than 2^31-1 nanoseconds old.
In that case, we calculate the delta between the lower 32 bits of the
PHC and the Rx timestamp. If it's larger than 2^31-1 then the Rx
timestamp must have been captured in the past. If it's smaller, then the
Rx timestamp must have been captured after PHC time.
Add an ice_ptp_extend_32b_ts function that relies on a cached copy of
the PHC time and implements this algorithm to calculate the proper upper
32bits of the Rx timestamps.
Cache the PHC time periodically in all of the Rx rings. This enables
each Rx ring to simply call the extension function with a recent copy of
the PHC time. By ensuring that the PHC time is kept up to date
periodically, we ensure this algorithm doesn't use stale data and
produce incorrect results.
To cache the time, introduce a kworker and a kwork item to periodically
store the Rx time. It might seem like we should use the .do_aux_work
interface of the PTP clock. This doesn't work because all PFs must cache
this time, but only one PF owns the PTP clock device.
Thus, the ice driver will manage its own kthread instead of relying on
the PTP do_aux_work handler.
With this change, the driver can now report Rx timestamps on all
incoming packets.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-06-09 16:39:52 +00:00
/**
2021-07-27 13:45:13 +00:00
* ice_eth_ioctl - Access the hwtstamp interface
ice: enable receive hardware timestamping
Add SIOCGHWTSTAMP and SIOCSHWTSTAMP ioctl handlers to respond to
requests to enable timestamping support. If the request is for enabling
Rx timestamps, set a bit in the Rx descriptors to indicate that receive
timestamps should be reported.
Hardware captures receive timestamps in the PHY which only captures part
of the timer, and reports only 40 bits into the Rx descriptor. The upper
32 bits represent the contents of GLTSYN_TIME_L at the point of packet
reception, while the lower 8 bits represent the upper 8 bits of
GLTSYN_TIME_0.
The networking and PTP stack expect 64 bit timestamps in nanoseconds. To
support this, implement some logic to extend the timestamps by using the
full PHC time.
If the Rx timestamp was captured prior to the PHC time, then the real
timestamp is
PHC - (lower_32_bits(PHC) - timestamp)
If the Rx timestamp was captured after the PHC time, then the real
timestamp is
PHC + (timestamp - lower_32_bits(PHC))
These calculations are correct as long as neither the PHC timestamp nor
the Rx timestamps are more than 2^32-1 nanseconds old. Further, we can
detect when the Rx timestamp is before or after the PHC as long as the
PHC timestamp is no more than 2^31-1 nanoseconds old.
In that case, we calculate the delta between the lower 32 bits of the
PHC and the Rx timestamp. If it's larger than 2^31-1 then the Rx
timestamp must have been captured in the past. If it's smaller, then the
Rx timestamp must have been captured after PHC time.
Add an ice_ptp_extend_32b_ts function that relies on a cached copy of
the PHC time and implements this algorithm to calculate the proper upper
32bits of the Rx timestamps.
Cache the PHC time periodically in all of the Rx rings. This enables
each Rx ring to simply call the extension function with a recent copy of
the PHC time. By ensuring that the PHC time is kept up to date
periodically, we ensure this algorithm doesn't use stale data and
produce incorrect results.
To cache the time, introduce a kworker and a kwork item to periodically
store the Rx time. It might seem like we should use the .do_aux_work
interface of the PTP clock. This doesn't work because all PFs must cache
this time, but only one PF owns the PTP clock device.
Thus, the ice driver will manage its own kthread instead of relying on
the PTP do_aux_work handler.
With this change, the driver can now report Rx timestamps on all
incoming packets.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-06-09 16:39:52 +00:00
* @ netdev : network interface device structure
* @ ifr : interface request data
* @ cmd : ioctl command
*/
2021-07-27 13:45:13 +00:00
static int ice_eth_ioctl ( struct net_device * netdev , struct ifreq * ifr , int cmd )
ice: enable receive hardware timestamping
Add SIOCGHWTSTAMP and SIOCSHWTSTAMP ioctl handlers to respond to
requests to enable timestamping support. If the request is for enabling
Rx timestamps, set a bit in the Rx descriptors to indicate that receive
timestamps should be reported.
Hardware captures receive timestamps in the PHY which only captures part
of the timer, and reports only 40 bits into the Rx descriptor. The upper
32 bits represent the contents of GLTSYN_TIME_L at the point of packet
reception, while the lower 8 bits represent the upper 8 bits of
GLTSYN_TIME_0.
The networking and PTP stack expect 64 bit timestamps in nanoseconds. To
support this, implement some logic to extend the timestamps by using the
full PHC time.
If the Rx timestamp was captured prior to the PHC time, then the real
timestamp is
PHC - (lower_32_bits(PHC) - timestamp)
If the Rx timestamp was captured after the PHC time, then the real
timestamp is
PHC + (timestamp - lower_32_bits(PHC))
These calculations are correct as long as neither the PHC timestamp nor
the Rx timestamps are more than 2^32-1 nanseconds old. Further, we can
detect when the Rx timestamp is before or after the PHC as long as the
PHC timestamp is no more than 2^31-1 nanoseconds old.
In that case, we calculate the delta between the lower 32 bits of the
PHC and the Rx timestamp. If it's larger than 2^31-1 then the Rx
timestamp must have been captured in the past. If it's smaller, then the
Rx timestamp must have been captured after PHC time.
Add an ice_ptp_extend_32b_ts function that relies on a cached copy of
the PHC time and implements this algorithm to calculate the proper upper
32bits of the Rx timestamps.
Cache the PHC time periodically in all of the Rx rings. This enables
each Rx ring to simply call the extension function with a recent copy of
the PHC time. By ensuring that the PHC time is kept up to date
periodically, we ensure this algorithm doesn't use stale data and
produce incorrect results.
To cache the time, introduce a kworker and a kwork item to periodically
store the Rx time. It might seem like we should use the .do_aux_work
interface of the PTP clock. This doesn't work because all PFs must cache
this time, but only one PF owns the PTP clock device.
Thus, the ice driver will manage its own kthread instead of relying on
the PTP do_aux_work handler.
With this change, the driver can now report Rx timestamps on all
incoming packets.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-06-09 16:39:52 +00:00
{
struct ice_netdev_priv * np = netdev_priv ( netdev ) ;
struct ice_pf * pf = np - > vsi - > back ;
switch ( cmd ) {
case SIOCGHWTSTAMP :
return ice_ptp_get_ts_config ( pf , ifr ) ;
case SIOCSHWTSTAMP :
return ice_ptp_set_ts_config ( pf , ifr ) ;
default :
return - EOPNOTSUPP ;
}
}
2020-05-08 00:41:04 +00:00
/**
* ice_aq_str - convert AQ err code to a string
* @ aq_err : the AQ error code to convert
*/
const char * ice_aq_str ( enum ice_aq_err aq_err )
{
switch ( aq_err ) {
case ICE_AQ_RC_OK :
return " OK " ;
case ICE_AQ_RC_EPERM :
return " ICE_AQ_RC_EPERM " ;
case ICE_AQ_RC_ENOENT :
return " ICE_AQ_RC_ENOENT " ;
case ICE_AQ_RC_ENOMEM :
return " ICE_AQ_RC_ENOMEM " ;
case ICE_AQ_RC_EBUSY :
return " ICE_AQ_RC_EBUSY " ;
case ICE_AQ_RC_EEXIST :
return " ICE_AQ_RC_EEXIST " ;
case ICE_AQ_RC_EINVAL :
return " ICE_AQ_RC_EINVAL " ;
case ICE_AQ_RC_ENOSPC :
return " ICE_AQ_RC_ENOSPC " ;
case ICE_AQ_RC_ENOSYS :
return " ICE_AQ_RC_ENOSYS " ;
2020-05-16 00:55:06 +00:00
case ICE_AQ_RC_EMODE :
return " ICE_AQ_RC_EMODE " ;
2020-05-08 00:41:04 +00:00
case ICE_AQ_RC_ENOSEC :
return " ICE_AQ_RC_ENOSEC " ;
case ICE_AQ_RC_EBADSIG :
return " ICE_AQ_RC_EBADSIG " ;
case ICE_AQ_RC_ESVN :
return " ICE_AQ_RC_ESVN " ;
case ICE_AQ_RC_EBADMAN :
return " ICE_AQ_RC_EBADMAN " ;
case ICE_AQ_RC_EBADBUF :
return " ICE_AQ_RC_EBADBUF " ;
}
return " ICE_AQ_RC_UNKNOWN " ;
}
2018-03-20 14:58:15 +00:00
/**
2021-03-02 18:15:36 +00:00
* ice_set_rss_lut - Set RSS LUT
2018-03-20 14:58:15 +00:00
* @ vsi : Pointer to VSI structure
* @ lut : Lookup table
* @ lut_size : Lookup table size
*
* Returns 0 on success , negative on failure
*/
2021-03-02 18:15:36 +00:00
int ice_set_rss_lut ( struct ice_vsi * vsi , u8 * lut , u16 lut_size )
2018-03-20 14:58:15 +00:00
{
2021-03-02 18:15:36 +00:00
struct ice_aq_get_set_rss_lut_params params = { } ;
struct ice_hw * hw = & vsi - > back - > hw ;
2021-10-07 22:56:57 +00:00
int status ;
2018-03-20 14:58:15 +00:00
2021-03-02 18:15:36 +00:00
if ( ! lut )
return - EINVAL ;
2018-03-20 14:58:15 +00:00
2021-03-02 18:15:36 +00:00
params . vsi_handle = vsi - > idx ;
params . lut_size = lut_size ;
params . lut_type = vsi - > rss_lut_type ;
params . lut = lut ;
2018-03-20 14:58:15 +00:00
2021-03-02 18:15:36 +00:00
status = ice_aq_set_rss_lut ( hw , & params ) ;
2021-10-07 23:01:58 +00:00
if ( status )
2021-10-07 22:56:02 +00:00
dev_err ( ice_pf_to_dev ( vsi - > back ) , " Cannot set RSS lut, err %d aq_err %s \n " ,
2021-10-07 22:59:03 +00:00
status , ice_aq_str ( hw - > adminq . sq_last_status ) ) ;
2018-03-20 14:58:15 +00:00
2021-10-07 23:01:58 +00:00
return status ;
2021-03-02 18:15:36 +00:00
}
2021-03-02 18:15:35 +00:00
2021-03-02 18:15:36 +00:00
/**
* ice_set_rss_key - Set RSS key
* @ vsi : Pointer to the VSI structure
* @ seed : RSS hash seed
*
* Returns 0 on success , negative on failure
*/
int ice_set_rss_key ( struct ice_vsi * vsi , u8 * seed )
{
struct ice_hw * hw = & vsi - > back - > hw ;
2021-10-07 22:56:57 +00:00
int status ;
2021-03-02 18:15:36 +00:00
if ( ! seed )
return - EINVAL ;
status = ice_aq_set_rss_key ( hw , vsi - > idx , ( struct ice_aqc_get_set_rss_keys * ) seed ) ;
2021-10-07 23:01:58 +00:00
if ( status )
2021-10-07 22:56:02 +00:00
dev_err ( ice_pf_to_dev ( vsi - > back ) , " Cannot set RSS key, err %d aq_err %s \n " ,
2021-10-07 22:59:03 +00:00
status , ice_aq_str ( hw - > adminq . sq_last_status ) ) ;
2018-03-20 14:58:15 +00:00
2021-10-07 23:01:58 +00:00
return status ;
2018-03-20 14:58:15 +00:00
}
/**
2021-03-02 18:15:36 +00:00
* ice_get_rss_lut - Get RSS LUT
2018-03-20 14:58:15 +00:00
* @ vsi : Pointer to VSI structure
* @ lut : Buffer to store the lookup table entries
* @ lut_size : Size of buffer to store the lookup table entries
*
* Returns 0 on success , negative on failure
*/
2021-03-02 18:15:36 +00:00
int ice_get_rss_lut ( struct ice_vsi * vsi , u8 * lut , u16 lut_size )
2018-03-20 14:58:15 +00:00
{
2021-03-02 18:15:36 +00:00
struct ice_aq_get_set_rss_lut_params params = { } ;
struct ice_hw * hw = & vsi - > back - > hw ;
2021-10-07 22:56:57 +00:00
int status ;
2018-03-20 14:58:15 +00:00
2021-03-02 18:15:36 +00:00
if ( ! lut )
return - EINVAL ;
2018-03-20 14:58:15 +00:00
2021-03-02 18:15:36 +00:00
params . vsi_handle = vsi - > idx ;
params . lut_size = lut_size ;
params . lut_type = vsi - > rss_lut_type ;
params . lut = lut ;
status = ice_aq_get_rss_lut ( hw , & params ) ;
2021-10-07 23:01:58 +00:00
if ( status )
2021-10-07 22:56:02 +00:00
dev_err ( ice_pf_to_dev ( vsi - > back ) , " Cannot get RSS lut, err %d aq_err %s \n " ,
2021-10-07 22:59:03 +00:00
status , ice_aq_str ( hw - > adminq . sq_last_status ) ) ;
2018-03-20 14:58:15 +00:00
2021-10-07 23:01:58 +00:00
return status ;
2021-03-02 18:15:36 +00:00
}
2021-03-02 18:15:35 +00:00
2021-03-02 18:15:36 +00:00
/**
* ice_get_rss_key - Get RSS key
* @ vsi : Pointer to VSI structure
* @ seed : Buffer to store the key in
*
* Returns 0 on success , negative on failure
*/
int ice_get_rss_key ( struct ice_vsi * vsi , u8 * seed )
{
struct ice_hw * hw = & vsi - > back - > hw ;
2021-10-07 22:56:57 +00:00
int status ;
2021-03-02 18:15:36 +00:00
if ( ! seed )
return - EINVAL ;
status = ice_aq_get_rss_key ( hw , vsi - > idx , ( struct ice_aqc_get_set_rss_keys * ) seed ) ;
2021-10-07 23:01:58 +00:00
if ( status )
2021-10-07 22:56:02 +00:00
dev_err ( ice_pf_to_dev ( vsi - > back ) , " Cannot get RSS key, err %d aq_err %s \n " ,
2021-10-07 22:59:03 +00:00
status , ice_aq_str ( hw - > adminq . sq_last_status ) ) ;
2018-03-20 14:58:15 +00:00
2021-10-07 23:01:58 +00:00
return status ;
2018-03-20 14:58:15 +00:00
}
2023-12-13 00:33:20 +00:00
/**
* ice_set_rss_hfunc - Set RSS HASH function
* @ vsi : Pointer to VSI structure
* @ hfunc : hash function ( ICE_AQ_VSI_Q_OPT_RSS_ * )
*
* Returns 0 on success , negative on failure
*/
int ice_set_rss_hfunc ( struct ice_vsi * vsi , u8 hfunc )
{
struct ice_hw * hw = & vsi - > back - > hw ;
struct ice_vsi_ctx * ctx ;
bool symm ;
int err ;
if ( hfunc = = vsi - > rss_hfunc )
return 0 ;
if ( hfunc ! = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ & &
hfunc ! = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ )
return - EOPNOTSUPP ;
ctx = kzalloc ( sizeof ( * ctx ) , GFP_KERNEL ) ;
if ( ! ctx )
return - ENOMEM ;
ctx - > info . valid_sections = cpu_to_le16 ( ICE_AQ_VSI_PROP_Q_OPT_VALID ) ;
ctx - > info . q_opt_rss = vsi - > info . q_opt_rss ;
ctx - > info . q_opt_rss & = ~ ICE_AQ_VSI_Q_OPT_RSS_HASH_M ;
ctx - > info . q_opt_rss | =
FIELD_PREP ( ICE_AQ_VSI_Q_OPT_RSS_HASH_M , hfunc ) ;
ctx - > info . q_opt_tc = vsi - > info . q_opt_tc ;
ctx - > info . q_opt_flags = vsi - > info . q_opt_rss ;
err = ice_update_vsi ( hw , vsi - > idx , ctx , NULL ) ;
if ( err ) {
dev_err ( ice_pf_to_dev ( vsi - > back ) , " Failed to configure RSS hash for VSI %d, error %d \n " ,
vsi - > vsi_num , err ) ;
} else {
vsi - > info . q_opt_rss = ctx - > info . q_opt_rss ;
vsi - > rss_hfunc = hfunc ;
netdev_info ( vsi - > netdev , " Hash function set to: %sToeplitz \n " ,
hfunc = = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ ?
" Symmetric " : " " ) ;
}
kfree ( ctx ) ;
if ( err )
return err ;
/* Fix the symmetry setting for all existing RSS configurations */
symm = ! ! ( hfunc = = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ ) ;
return ice_set_rss_cfg_symm ( hw , vsi , symm ) ;
}
2018-08-09 13:29:54 +00:00
/**
* ice_bridge_getlink - Get the hardware bridge mode
* @ skb : skb buff
2019-02-19 23:04:13 +00:00
* @ pid : process ID
2018-08-09 13:29:54 +00:00
* @ seq : RTNL message seq
* @ dev : the netdev being configured
* @ filter_mask : filter mask passed in
* @ nlflags : netlink flags passed in
*
* Return the bridge mode ( VEB / VEPA )
*/
static int
ice_bridge_getlink ( struct sk_buff * skb , u32 pid , u32 seq ,
struct net_device * dev , u32 filter_mask , int nlflags )
{
struct ice_netdev_priv * np = netdev_priv ( dev ) ;
struct ice_vsi * vsi = np - > vsi ;
struct ice_pf * pf = vsi - > back ;
u16 bmode ;
bmode = pf - > first_sw - > bridge_mode ;
return ndo_dflt_bridge_getlink ( skb , pid , seq , dev , bmode , 0 , 0 , nlflags ,
filter_mask , NULL ) ;
}
/**
* ice_vsi_update_bridge_mode - Update VSI for switching bridge mode ( VEB / VEPA )
* @ vsi : Pointer to VSI structure
* @ bmode : Hardware bridge mode ( VEB / VEPA )
*
* Returns 0 on success , negative on failure
*/
static int ice_vsi_update_bridge_mode ( struct ice_vsi * vsi , u16 bmode )
{
struct ice_aqc_vsi_props * vsi_props ;
struct ice_hw * hw = & vsi - > back - > hw ;
2019-02-08 20:50:32 +00:00
struct ice_vsi_ctx * ctxt ;
2021-10-07 23:00:23 +00:00
int ret ;
2018-08-09 13:29:54 +00:00
vsi_props = & vsi - > info ;
2019-02-08 20:50:32 +00:00
2019-11-08 14:23:25 +00:00
ctxt = kzalloc ( sizeof ( * ctxt ) , GFP_KERNEL ) ;
2019-02-08 20:50:32 +00:00
if ( ! ctxt )
return - ENOMEM ;
ctxt - > info = vsi - > info ;
2018-08-09 13:29:54 +00:00
if ( bmode = = BRIDGE_MODE_VEB )
/* change from VEPA to VEB mode */
2019-02-08 20:50:32 +00:00
ctxt - > info . sw_flags | = ICE_AQ_VSI_SW_FLAG_ALLOW_LB ;
2018-08-09 13:29:54 +00:00
else
/* change from VEB to VEPA mode */
2019-02-08 20:50:32 +00:00
ctxt - > info . sw_flags & = ~ ICE_AQ_VSI_SW_FLAG_ALLOW_LB ;
ctxt - > info . valid_sections = cpu_to_le16 ( ICE_AQ_VSI_PROP_SW_VALID ) ;
2018-09-20 00:23:12 +00:00
2021-10-07 23:00:23 +00:00
ret = ice_update_vsi ( hw , vsi - > idx , ctxt , NULL ) ;
if ( ret ) {
2021-10-07 22:56:02 +00:00
dev_err ( ice_pf_to_dev ( vsi - > back ) , " update VSI for bridge mode failed, bmode = %d err %d aq_err %s \n " ,
2021-10-07 23:00:23 +00:00
bmode , ret , ice_aq_str ( hw - > adminq . sq_last_status ) ) ;
2019-02-08 20:50:32 +00:00
goto out ;
2018-08-09 13:29:54 +00:00
}
/* Update sw flags for book keeping */
2019-02-08 20:50:32 +00:00
vsi_props - > sw_flags = ctxt - > info . sw_flags ;
2018-08-09 13:29:54 +00:00
2019-02-08 20:50:32 +00:00
out :
2019-11-08 14:23:25 +00:00
kfree ( ctxt ) ;
2019-02-08 20:50:32 +00:00
return ret ;
2018-08-09 13:29:54 +00:00
}
/**
* ice_bridge_setlink - Set the hardware bridge mode
* @ dev : the netdev being configured
* @ nlh : RTNL message
* @ flags : bridge setlink flags
2018-12-12 17:02:48 +00:00
* @ extack : netlink extended ack
2018-08-09 13:29:54 +00:00
*
* Sets the bridge mode ( VEB / VEPA ) of the switch to which the netdev ( VSI ) is
* hooked up to . Iterates through the PF VSI list and sets the loopback mode ( if
* not already set for all VSIs connected to this switch . And also update the
* unicast switch filter rules for the corresponding switch of the netdev .
*/
static int
ice_bridge_setlink ( struct net_device * dev , struct nlmsghdr * nlh ,
2018-12-19 18:03:20 +00:00
u16 __always_unused flags ,
struct netlink_ext_ack __always_unused * extack )
2018-08-09 13:29:54 +00:00
{
struct ice_netdev_priv * np = netdev_priv ( dev ) ;
struct ice_pf * pf = np - > vsi - > back ;
struct nlattr * attr , * br_spec ;
struct ice_hw * hw = & pf - > hw ;
struct ice_sw * pf_sw ;
int rem , v , err = 0 ;
pf_sw = pf - > first_sw ;
/* find the attribute in the netlink message */
br_spec = nlmsg_find_attr ( nlh , sizeof ( struct ifinfomsg ) , IFLA_AF_SPEC ) ;
2024-02-28 15:54:48 +00:00
if ( ! br_spec )
return - EINVAL ;
2018-08-09 13:29:54 +00:00
nla_for_each_nested ( attr , br_spec , rem ) {
__u16 mode ;
if ( nla_type ( attr ) ! = IFLA_BRIDGE_MODE )
continue ;
mode = nla_get_u16 ( attr ) ;
if ( mode ! = BRIDGE_MODE_VEPA & & mode ! = BRIDGE_MODE_VEB )
return - EINVAL ;
/* Continue if bridge mode is not being flipped */
if ( mode = = pf_sw - > bridge_mode )
continue ;
/* Iterates through the PF VSI list and update the loopback
* mode of the VSI
*/
ice_for_each_vsi ( pf , v ) {
if ( ! pf - > vsi [ v ] )
continue ;
err = ice_vsi_update_bridge_mode ( pf - > vsi [ v ] , mode ) ;
if ( err )
return err ;
}
hw - > evb_veb = ( mode = = BRIDGE_MODE_VEB ) ;
/* Update the unicast switch filter rules for the corresponding
* switch of the netdev
*/
2021-10-07 23:00:23 +00:00
err = ice_update_sw_rule_bridge_mode ( hw ) ;
if ( err ) {
2021-10-07 22:56:02 +00:00
netdev_err ( dev , " switch rule update failed, mode = %d err %d aq_err %s \n " ,
2021-10-07 23:00:23 +00:00
mode , err ,
2020-05-08 00:41:04 +00:00
ice_aq_str ( hw - > adminq . sq_last_status ) ) ;
2018-08-09 13:29:54 +00:00
/* revert hw->evb_veb */
hw - > evb_veb = ( pf_sw - > bridge_mode = = BRIDGE_MODE_VEB ) ;
2021-10-07 23:01:58 +00:00
return err ;
2018-08-09 13:29:54 +00:00
}
pf_sw - > bridge_mode = mode ;
}
return 0 ;
}
2018-08-09 13:29:53 +00:00
/**
* ice_tx_timeout - Respond to a Tx Hang
* @ netdev : network interface device structure
2020-01-22 15:21:37 +00:00
* @ txqueue : Tx queue
2018-08-09 13:29:53 +00:00
*/
netdev: pass the stuck queue to the timeout handler
This allows incrementing the correct timeout statistic without any mess.
Down the road, devices can learn to reset just the specific queue.
The patch was generated with the following script:
use strict;
use warnings;
our $^I = '.bak';
my @work = (
["arch/m68k/emu/nfeth.c", "nfeth_tx_timeout"],
["arch/um/drivers/net_kern.c", "uml_net_tx_timeout"],
["arch/um/drivers/vector_kern.c", "vector_net_tx_timeout"],
["arch/xtensa/platforms/iss/network.c", "iss_net_tx_timeout"],
["drivers/char/pcmcia/synclink_cs.c", "hdlcdev_tx_timeout"],
["drivers/infiniband/ulp/ipoib/ipoib_main.c", "ipoib_timeout"],
["drivers/infiniband/ulp/ipoib/ipoib_main.c", "ipoib_timeout"],
["drivers/message/fusion/mptlan.c", "mpt_lan_tx_timeout"],
["drivers/misc/sgi-xp/xpnet.c", "xpnet_dev_tx_timeout"],
["drivers/net/appletalk/cops.c", "cops_timeout"],
["drivers/net/arcnet/arcdevice.h", "arcnet_timeout"],
["drivers/net/arcnet/arcnet.c", "arcnet_timeout"],
["drivers/net/arcnet/com20020.c", "arcnet_timeout"],
["drivers/net/ethernet/3com/3c509.c", "el3_tx_timeout"],
["drivers/net/ethernet/3com/3c515.c", "corkscrew_timeout"],
["drivers/net/ethernet/3com/3c574_cs.c", "el3_tx_timeout"],
["drivers/net/ethernet/3com/3c589_cs.c", "el3_tx_timeout"],
["drivers/net/ethernet/3com/3c59x.c", "vortex_tx_timeout"],
["drivers/net/ethernet/3com/3c59x.c", "vortex_tx_timeout"],
["drivers/net/ethernet/3com/typhoon.c", "typhoon_tx_timeout"],
["drivers/net/ethernet/8390/8390.h", "ei_tx_timeout"],
["drivers/net/ethernet/8390/8390.h", "eip_tx_timeout"],
["drivers/net/ethernet/8390/8390.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/8390p.c", "eip_tx_timeout"],
["drivers/net/ethernet/8390/ax88796.c", "ax_ei_tx_timeout"],
["drivers/net/ethernet/8390/axnet_cs.c", "axnet_tx_timeout"],
["drivers/net/ethernet/8390/etherh.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/hydra.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/mac8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/mcf8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/lib8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/ne2k-pci.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/pcnet_cs.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/smc-ultra.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/wd.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/zorro8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/adaptec/starfire.c", "tx_timeout"],
["drivers/net/ethernet/agere/et131x.c", "et131x_tx_timeout"],
["drivers/net/ethernet/allwinner/sun4i-emac.c", "emac_timeout"],
["drivers/net/ethernet/alteon/acenic.c", "ace_watchdog"],
["drivers/net/ethernet/amazon/ena/ena_netdev.c", "ena_tx_timeout"],
["drivers/net/ethernet/amd/7990.h", "lance_tx_timeout"],
["drivers/net/ethernet/amd/7990.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/a2065.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/am79c961a.c", "am79c961_timeout"],
["drivers/net/ethernet/amd/amd8111e.c", "amd8111e_tx_timeout"],
["drivers/net/ethernet/amd/ariadne.c", "ariadne_tx_timeout"],
["drivers/net/ethernet/amd/atarilance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/au1000_eth.c", "au1000_tx_timeout"],
["drivers/net/ethernet/amd/declance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/lance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/mvme147.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/ni65.c", "ni65_timeout"],
["drivers/net/ethernet/amd/nmclan_cs.c", "mace_tx_timeout"],
["drivers/net/ethernet/amd/pcnet32.c", "pcnet32_tx_timeout"],
["drivers/net/ethernet/amd/sunlance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/xgbe/xgbe-drv.c", "xgbe_tx_timeout"],
["drivers/net/ethernet/apm/xgene-v2/main.c", "xge_timeout"],
["drivers/net/ethernet/apm/xgene/xgene_enet_main.c", "xgene_enet_timeout"],
["drivers/net/ethernet/apple/macmace.c", "mace_tx_timeout"],
["drivers/net/ethernet/atheros/ag71xx.c", "ag71xx_tx_timeout"],
["drivers/net/ethernet/atheros/alx/main.c", "alx_tx_timeout"],
["drivers/net/ethernet/atheros/atl1c/atl1c_main.c", "atl1c_tx_timeout"],
["drivers/net/ethernet/atheros/atl1e/atl1e_main.c", "atl1e_tx_timeout"],
["drivers/net/ethernet/atheros/atlx/atl.c", "atlx_tx_timeout"],
["drivers/net/ethernet/atheros/atlx/atl1.c", "atlx_tx_timeout"],
["drivers/net/ethernet/atheros/atlx/atl2.c", "atl2_tx_timeout"],
["drivers/net/ethernet/broadcom/b44.c", "b44_tx_timeout"],
["drivers/net/ethernet/broadcom/bcmsysport.c", "bcm_sysport_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2.c", "bnx2_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h", "bnx2x_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c", "bnx2x_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c", "bnx2x_tx_timeout"],
["drivers/net/ethernet/broadcom/bnxt/bnxt.c", "bnxt_tx_timeout"],
["drivers/net/ethernet/broadcom/genet/bcmgenet.c", "bcmgenet_timeout"],
["drivers/net/ethernet/broadcom/sb1250-mac.c", "sbmac_tx_timeout"],
["drivers/net/ethernet/broadcom/tg3.c", "tg3_tx_timeout"],
["drivers/net/ethernet/calxeda/xgmac.c", "xgmac_tx_timeout"],
["drivers/net/ethernet/cavium/liquidio/lio_main.c", "liquidio_tx_timeout"],
["drivers/net/ethernet/cavium/liquidio/lio_vf_main.c", "liquidio_tx_timeout"],
["drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c", "lio_vf_rep_tx_timeout"],
["drivers/net/ethernet/cavium/thunder/nicvf_main.c", "nicvf_tx_timeout"],
["drivers/net/ethernet/cirrus/cs89x0.c", "net_timeout"],
["drivers/net/ethernet/cisco/enic/enic_main.c", "enic_tx_timeout"],
["drivers/net/ethernet/cisco/enic/enic_main.c", "enic_tx_timeout"],
["drivers/net/ethernet/cortina/gemini.c", "gmac_tx_timeout"],
["drivers/net/ethernet/davicom/dm9000.c", "dm9000_timeout"],
["drivers/net/ethernet/dec/tulip/de2104x.c", "de_tx_timeout"],
["drivers/net/ethernet/dec/tulip/tulip_core.c", "tulip_tx_timeout"],
["drivers/net/ethernet/dec/tulip/winbond-840.c", "tx_timeout"],
["drivers/net/ethernet/dlink/dl2k.c", "rio_tx_timeout"],
["drivers/net/ethernet/dlink/sundance.c", "tx_timeout"],
["drivers/net/ethernet/emulex/benet/be_main.c", "be_tx_timeout"],
["drivers/net/ethernet/ethoc.c", "ethoc_tx_timeout"],
["drivers/net/ethernet/faraday/ftgmac100.c", "ftgmac100_tx_timeout"],
["drivers/net/ethernet/fealnx.c", "fealnx_tx_timeout"],
["drivers/net/ethernet/freescale/dpaa/dpaa_eth.c", "dpaa_tx_timeout"],
["drivers/net/ethernet/freescale/fec_main.c", "fec_timeout"],
["drivers/net/ethernet/freescale/fec_mpc52xx.c", "mpc52xx_fec_tx_timeout"],
["drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c", "fs_timeout"],
["drivers/net/ethernet/freescale/gianfar.c", "gfar_timeout"],
["drivers/net/ethernet/freescale/ucc_geth.c", "ucc_geth_timeout"],
["drivers/net/ethernet/fujitsu/fmvj18x_cs.c", "fjn_tx_timeout"],
["drivers/net/ethernet/google/gve/gve_main.c", "gve_tx_timeout"],
["drivers/net/ethernet/hisilicon/hip04_eth.c", "hip04_timeout"],
["drivers/net/ethernet/hisilicon/hix5hd2_gmac.c", "hix5hd2_net_timeout"],
["drivers/net/ethernet/hisilicon/hns/hns_enet.c", "hns_nic_net_timeout"],
["drivers/net/ethernet/hisilicon/hns3/hns3_enet.c", "hns3_nic_net_timeout"],
["drivers/net/ethernet/huawei/hinic/hinic_main.c", "hinic_tx_timeout"],
["drivers/net/ethernet/i825xx/82596.c", "i596_tx_timeout"],
["drivers/net/ethernet/i825xx/ether1.c", "ether1_timeout"],
["drivers/net/ethernet/i825xx/lib82596.c", "i596_tx_timeout"],
["drivers/net/ethernet/i825xx/sun3_82586.c", "sun3_82586_timeout"],
["drivers/net/ethernet/ibm/ehea/ehea_main.c", "ehea_tx_watchdog"],
["drivers/net/ethernet/ibm/emac/core.c", "emac_tx_timeout"],
["drivers/net/ethernet/ibm/emac/core.c", "emac_tx_timeout"],
["drivers/net/ethernet/ibm/ibmvnic.c", "ibmvnic_tx_timeout"],
["drivers/net/ethernet/intel/e100.c", "e100_tx_timeout"],
["drivers/net/ethernet/intel/e1000/e1000_main.c", "e1000_tx_timeout"],
["drivers/net/ethernet/intel/e1000e/netdev.c", "e1000_tx_timeout"],
["drivers/net/ethernet/intel/fm10k/fm10k_netdev.c", "fm10k_tx_timeout"],
["drivers/net/ethernet/intel/i40e/i40e_main.c", "i40e_tx_timeout"],
["drivers/net/ethernet/intel/iavf/iavf_main.c", "iavf_tx_timeout"],
["drivers/net/ethernet/intel/ice/ice_main.c", "ice_tx_timeout"],
["drivers/net/ethernet/intel/ice/ice_main.c", "ice_tx_timeout"],
["drivers/net/ethernet/intel/igb/igb_main.c", "igb_tx_timeout"],
["drivers/net/ethernet/intel/igbvf/netdev.c", "igbvf_tx_timeout"],
["drivers/net/ethernet/intel/ixgb/ixgb_main.c", "ixgb_tx_timeout"],
["drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c", "adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev);"],
["drivers/net/ethernet/intel/ixgbe/ixgbe_main.c", "ixgbe_tx_timeout"],
["drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c", "ixgbevf_tx_timeout"],
["drivers/net/ethernet/jme.c", "jme_tx_timeout"],
["drivers/net/ethernet/korina.c", "korina_tx_timeout"],
["drivers/net/ethernet/lantiq_etop.c", "ltq_etop_tx_timeout"],
["drivers/net/ethernet/marvell/mv643xx_eth.c", "mv643xx_eth_tx_timeout"],
["drivers/net/ethernet/marvell/pxa168_eth.c", "pxa168_eth_tx_timeout"],
["drivers/net/ethernet/marvell/skge.c", "skge_tx_timeout"],
["drivers/net/ethernet/marvell/sky2.c", "sky2_tx_timeout"],
["drivers/net/ethernet/marvell/sky2.c", "sky2_tx_timeout"],
["drivers/net/ethernet/mediatek/mtk_eth_soc.c", "mtk_tx_timeout"],
["drivers/net/ethernet/mellanox/mlx4/en_netdev.c", "mlx4_en_tx_timeout"],
["drivers/net/ethernet/mellanox/mlx4/en_netdev.c", "mlx4_en_tx_timeout"],
["drivers/net/ethernet/mellanox/mlx5/core/en_main.c", "mlx5e_tx_timeout"],
["drivers/net/ethernet/micrel/ks8842.c", "ks8842_tx_timeout"],
["drivers/net/ethernet/micrel/ksz884x.c", "netdev_tx_timeout"],
["drivers/net/ethernet/microchip/enc28j60.c", "enc28j60_tx_timeout"],
["drivers/net/ethernet/microchip/encx24j600.c", "encx24j600_tx_timeout"],
["drivers/net/ethernet/natsemi/sonic.h", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/sonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/jazzsonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/macsonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/natsemi.c", "ns_tx_timeout"],
["drivers/net/ethernet/natsemi/ns83820.c", "ns83820_tx_timeout"],
["drivers/net/ethernet/natsemi/xtsonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/neterion/s2io.h", "s2io_tx_watchdog"],
["drivers/net/ethernet/neterion/s2io.c", "s2io_tx_watchdog"],
["drivers/net/ethernet/neterion/vxge/vxge-main.c", "vxge_tx_watchdog"],
["drivers/net/ethernet/netronome/nfp/nfp_net_common.c", "nfp_net_tx_timeout"],
["drivers/net/ethernet/nvidia/forcedeth.c", "nv_tx_timeout"],
["drivers/net/ethernet/nvidia/forcedeth.c", "nv_tx_timeout"],
["drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c", "pch_gbe_tx_timeout"],
["drivers/net/ethernet/packetengines/hamachi.c", "hamachi_tx_timeout"],
["drivers/net/ethernet/packetengines/yellowfin.c", "yellowfin_tx_timeout"],
["drivers/net/ethernet/pensando/ionic/ionic_lif.c", "ionic_tx_timeout"],
["drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c", "netxen_tx_timeout"],
["drivers/net/ethernet/qlogic/qla3xxx.c", "ql3xxx_tx_timeout"],
["drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c", "qlcnic_tx_timeout"],
["drivers/net/ethernet/qualcomm/emac/emac.c", "emac_tx_timeout"],
["drivers/net/ethernet/qualcomm/qca_spi.c", "qcaspi_netdev_tx_timeout"],
["drivers/net/ethernet/qualcomm/qca_uart.c", "qcauart_netdev_tx_timeout"],
["drivers/net/ethernet/rdc/r6040.c", "r6040_tx_timeout"],
["drivers/net/ethernet/realtek/8139cp.c", "cp_tx_timeout"],
["drivers/net/ethernet/realtek/8139too.c", "rtl8139_tx_timeout"],
["drivers/net/ethernet/realtek/atp.c", "tx_timeout"],
["drivers/net/ethernet/realtek/r8169_main.c", "rtl8169_tx_timeout"],
["drivers/net/ethernet/renesas/ravb_main.c", "ravb_tx_timeout"],
["drivers/net/ethernet/renesas/sh_eth.c", "sh_eth_tx_timeout"],
["drivers/net/ethernet/renesas/sh_eth.c", "sh_eth_tx_timeout"],
["drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c", "sxgbe_tx_timeout"],
["drivers/net/ethernet/seeq/ether3.c", "ether3_timeout"],
["drivers/net/ethernet/seeq/sgiseeq.c", "timeout"],
["drivers/net/ethernet/sfc/efx.c", "efx_watchdog"],
["drivers/net/ethernet/sfc/falcon/efx.c", "ef4_watchdog"],
["drivers/net/ethernet/sgi/ioc3-eth.c", "ioc3_timeout"],
["drivers/net/ethernet/sgi/meth.c", "meth_tx_timeout"],
["drivers/net/ethernet/silan/sc92031.c", "sc92031_tx_timeout"],
["drivers/net/ethernet/sis/sis190.c", "sis190_tx_timeout"],
["drivers/net/ethernet/sis/sis900.c", "sis900_tx_timeout"],
["drivers/net/ethernet/smsc/epic100.c", "epic_tx_timeout"],
["drivers/net/ethernet/smsc/smc911x.c", "smc911x_timeout"],
["drivers/net/ethernet/smsc/smc9194.c", "smc_timeout"],
["drivers/net/ethernet/smsc/smc91c92_cs.c", "smc_tx_timeout"],
["drivers/net/ethernet/smsc/smc91x.c", "smc_timeout"],
["drivers/net/ethernet/stmicro/stmmac/stmmac_main.c", "stmmac_tx_timeout"],
["drivers/net/ethernet/sun/cassini.c", "cas_tx_timeout"],
["drivers/net/ethernet/sun/ldmvsw.c", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/sun/niu.c", "niu_tx_timeout"],
["drivers/net/ethernet/sun/sunbmac.c", "bigmac_tx_timeout"],
["drivers/net/ethernet/sun/sungem.c", "gem_tx_timeout"],
["drivers/net/ethernet/sun/sunhme.c", "happy_meal_tx_timeout"],
["drivers/net/ethernet/sun/sunqe.c", "qe_tx_timeout"],
["drivers/net/ethernet/sun/sunvnet.c", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/sun/sunvnet_common.c", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/sun/sunvnet_common.h", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/synopsys/dwc-xlgmac-net.c", "xlgmac_tx_timeout"],
["drivers/net/ethernet/ti/cpmac.c", "cpmac_tx_timeout"],
["drivers/net/ethernet/ti/cpsw.c", "cpsw_ndo_tx_timeout"],
["drivers/net/ethernet/ti/cpsw_priv.c", "cpsw_ndo_tx_timeout"],
["drivers/net/ethernet/ti/cpsw_priv.h", "cpsw_ndo_tx_timeout"],
["drivers/net/ethernet/ti/davinci_emac.c", "emac_dev_tx_timeout"],
["drivers/net/ethernet/ti/netcp_core.c", "netcp_ndo_tx_timeout"],
["drivers/net/ethernet/ti/tlan.c", "tlan_tx_timeout"],
["drivers/net/ethernet/toshiba/ps3_gelic_net.h", "gelic_net_tx_timeout"],
["drivers/net/ethernet/toshiba/ps3_gelic_net.c", "gelic_net_tx_timeout"],
["drivers/net/ethernet/toshiba/ps3_gelic_wireless.c", "gelic_net_tx_timeout"],
["drivers/net/ethernet/toshiba/spider_net.c", "spider_net_tx_timeout"],
["drivers/net/ethernet/toshiba/tc35815.c", "tc35815_tx_timeout"],
["drivers/net/ethernet/via/via-rhine.c", "rhine_tx_timeout"],
["drivers/net/ethernet/wiznet/w5100.c", "w5100_tx_timeout"],
["drivers/net/ethernet/wiznet/w5300.c", "w5300_tx_timeout"],
["drivers/net/ethernet/xilinx/xilinx_emaclite.c", "xemaclite_tx_timeout"],
["drivers/net/ethernet/xircom/xirc2ps_cs.c", "xirc_tx_timeout"],
["drivers/net/fjes/fjes_main.c", "fjes_tx_retry"],
["drivers/net/slip/slip.c", "sl_tx_timeout"],
["include/linux/usb/usbnet.h", "usbnet_tx_timeout"],
["drivers/net/usb/aqc111.c", "usbnet_tx_timeout"],
["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"],
["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"],
["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"],
["drivers/net/usb/ax88172a.c", "usbnet_tx_timeout"],
["drivers/net/usb/ax88179_178a.c", "usbnet_tx_timeout"],
["drivers/net/usb/catc.c", "catc_tx_timeout"],
["drivers/net/usb/cdc_mbim.c", "usbnet_tx_timeout"],
["drivers/net/usb/cdc_ncm.c", "usbnet_tx_timeout"],
["drivers/net/usb/dm9601.c", "usbnet_tx_timeout"],
["drivers/net/usb/hso.c", "hso_net_tx_timeout"],
["drivers/net/usb/int51x1.c", "usbnet_tx_timeout"],
["drivers/net/usb/ipheth.c", "ipheth_tx_timeout"],
["drivers/net/usb/kaweth.c", "kaweth_tx_timeout"],
["drivers/net/usb/lan78xx.c", "lan78xx_tx_timeout"],
["drivers/net/usb/mcs7830.c", "usbnet_tx_timeout"],
["drivers/net/usb/pegasus.c", "pegasus_tx_timeout"],
["drivers/net/usb/qmi_wwan.c", "usbnet_tx_timeout"],
["drivers/net/usb/r8152.c", "rtl8152_tx_timeout"],
["drivers/net/usb/rndis_host.c", "usbnet_tx_timeout"],
["drivers/net/usb/rtl8150.c", "rtl8150_tx_timeout"],
["drivers/net/usb/sierra_net.c", "usbnet_tx_timeout"],
["drivers/net/usb/smsc75xx.c", "usbnet_tx_timeout"],
["drivers/net/usb/smsc95xx.c", "usbnet_tx_timeout"],
["drivers/net/usb/sr9700.c", "usbnet_tx_timeout"],
["drivers/net/usb/sr9800.c", "usbnet_tx_timeout"],
["drivers/net/usb/usbnet.c", "usbnet_tx_timeout"],
["drivers/net/vmxnet3/vmxnet3_drv.c", "vmxnet3_tx_timeout"],
["drivers/net/wan/cosa.c", "cosa_net_timeout"],
["drivers/net/wan/farsync.c", "fst_tx_timeout"],
["drivers/net/wan/fsl_ucc_hdlc.c", "uhdlc_tx_timeout"],
["drivers/net/wan/lmc/lmc_main.c", "lmc_driver_timeout"],
["drivers/net/wan/x25_asy.c", "x25_asy_timeout"],
["drivers/net/wimax/i2400m/netdev.c", "i2400m_tx_timeout"],
["drivers/net/wireless/intel/ipw2x00/ipw2100.c", "ipw2100_tx_timeout"],
["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"],
["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"],
["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"],
["drivers/net/wireless/intersil/orinoco/main.c", "orinoco_tx_timeout"],
["drivers/net/wireless/intersil/orinoco/orinoco_usb.c", "orinoco_tx_timeout"],
["drivers/net/wireless/intersil/orinoco/orinoco.h", "orinoco_tx_timeout"],
["drivers/net/wireless/intersil/prism54/islpci_dev.c", "islpci_eth_tx_timeout"],
["drivers/net/wireless/intersil/prism54/islpci_eth.c", "islpci_eth_tx_timeout"],
["drivers/net/wireless/intersil/prism54/islpci_eth.h", "islpci_eth_tx_timeout"],
["drivers/net/wireless/marvell/mwifiex/main.c", "mwifiex_tx_timeout"],
["drivers/net/wireless/quantenna/qtnfmac/core.c", "qtnf_netdev_tx_timeout"],
["drivers/net/wireless/quantenna/qtnfmac/core.h", "qtnf_netdev_tx_timeout"],
["drivers/net/wireless/rndis_wlan.c", "usbnet_tx_timeout"],
["drivers/net/wireless/wl3501_cs.c", "wl3501_tx_timeout"],
["drivers/net/wireless/zydas/zd1201.c", "zd1201_tx_timeout"],
["drivers/s390/net/qeth_core.h", "qeth_tx_timeout"],
["drivers/s390/net/qeth_core_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l2_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l2_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l3_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l3_main.c", "qeth_tx_timeout"],
["drivers/staging/ks7010/ks_wlan_net.c", "ks_wlan_tx_timeout"],
["drivers/staging/qlge/qlge_main.c", "qlge_tx_timeout"],
["drivers/staging/rtl8192e/rtl8192e/rtl_core.c", "_rtl92e_tx_timeout"],
["drivers/staging/rtl8192u/r8192U_core.c", "tx_timeout"],
["drivers/staging/unisys/visornic/visornic_main.c", "visornic_xmit_timeout"],
["drivers/staging/wlan-ng/p80211netdev.c", "p80211knetdev_tx_timeout"],
["drivers/tty/n_gsm.c", "gsm_mux_net_tx_timeout"],
["drivers/tty/synclink.c", "hdlcdev_tx_timeout"],
["drivers/tty/synclink_gt.c", "hdlcdev_tx_timeout"],
["drivers/tty/synclinkmp.c", "hdlcdev_tx_timeout"],
["net/atm/lec.c", "lec_tx_timeout"],
["net/bluetooth/bnep/netdev.c", "bnep_net_timeout"]
);
for my $p (@work) {
my @pair = @$p;
my $file = $pair[0];
my $func = $pair[1];
print STDERR $file , ": ", $func,"\n";
our @ARGV = ($file);
while (<ARGV>) {
if (m/($func\s*\(struct\s+net_device\s+\*[A-Za-z_]?[A-Za-z-0-9_]*)(\))/) {
print STDERR "found $1+$2 in $file\n";
}
if (s/($func\s*\(struct\s+net_device\s+\*[A-Za-z_]?[A-Za-z-0-9_]*)(\))/$1, unsigned int txqueue$2/) {
print STDERR "$func found in $file\n";
}
print;
}
}
where the list of files and functions is simply from:
git grep ndo_tx_timeout, with manual addition of headers
in the rare cases where the function is from a header,
then manually changing the few places which actually
call ndo_tx_timeout.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Heiner Kallweit <hkallweit1@gmail.com>
Acked-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Acked-by: Shannon Nelson <snelson@pensando.io>
Reviewed-by: Martin Habets <mhabets@solarflare.com>
changes from v9:
fixup a forward declaration
changes from v9:
more leftovers from v3 change
changes from v8:
fix up a missing direct call to timeout
rebased on net-next
changes from v7:
fixup leftovers from v3 change
changes from v6:
fix typo in rtl driver
changes from v5:
add missing files (allow any net device argument name)
changes from v4:
add a missing driver header
changes from v3:
change queue # to unsigned
Changes from v2:
added headers
Changes from v1:
Fix errors found by kbuild:
generalize the pattern a bit, to pick up
a couple of instances missed by the previous
version.
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-12-10 14:23:51 +00:00
static void ice_tx_timeout ( struct net_device * netdev , unsigned int txqueue )
2018-08-09 13:29:53 +00:00
{
struct ice_netdev_priv * np = netdev_priv ( netdev ) ;
2021-08-19 11:59:58 +00:00
struct ice_tx_ring * tx_ring = NULL ;
2018-08-09 13:29:53 +00:00
struct ice_vsi * vsi = np - > vsi ;
struct ice_pf * pf = vsi - > back ;
2018-10-26 17:41:00 +00:00
u32 i ;
2018-08-09 13:29:53 +00:00
pf - > tx_timeout_count + + ;
2020-05-08 00:41:00 +00:00
/* Check if PFC is enabled for the TC to which the queue belongs
* to . If yes then Tx timeout is not caused by a hung queue , no
* need to reset and rebuild
*/
if ( ice_is_pfc_causing_hung_q ( pf , txqueue ) ) {
dev_info ( ice_pf_to_dev ( pf ) , " Fake Tx hang detected on queue %u, timeout caused by PFC storm \n " ,
txqueue ) ;
return ;
}
2019-12-18 18:38:45 +00:00
/* now that we have an index, find the tx_ring struct */
2021-08-19 12:00:04 +00:00
ice_for_each_txq ( vsi , i )
2019-12-18 18:38:45 +00:00
if ( vsi - > tx_rings [ i ] & & vsi - > tx_rings [ i ] - > desc )
if ( txqueue = = vsi - > tx_rings [ i ] - > q_index ) {
tx_ring = vsi - > tx_rings [ i ] ;
break ;
}
2018-08-09 13:29:53 +00:00
/* Reset recovery level if enough time has elapsed after last timeout.
* Also ensure no new reset action happens before next timeout period .
*/
if ( time_after ( jiffies , ( pf - > tx_timeout_last_recovery + HZ * 20 ) ) )
pf - > tx_timeout_recovery_level = 1 ;
else if ( time_before ( jiffies , ( pf - > tx_timeout_last_recovery +
netdev - > watchdog_timeo ) ) )
return ;
if ( tx_ring ) {
2018-10-26 17:41:00 +00:00
struct ice_hw * hw = & pf - > hw ;
u32 head , val = 0 ;
2023-12-06 01:01:12 +00:00
head = FIELD_GET ( QTX_COMM_HEAD_HEAD_M ,
rd32 ( hw , QTX_COMM_HEAD ( vsi - > txq_map [ txqueue ] ) ) ) ;
2018-08-09 13:29:53 +00:00
/* Read interrupt register */
2019-06-26 09:20:25 +00:00
val = rd32 ( hw , GLINT_DYN_CTL ( tx_ring - > q_vector - > reg_idx ) ) ;
2018-08-09 13:29:53 +00:00
2020-02-27 18:15:02 +00:00
netdev_info ( netdev , " tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x \n " ,
2019-12-18 18:38:45 +00:00
vsi - > vsi_num , txqueue , tx_ring - > next_to_clean ,
2018-10-26 17:41:00 +00:00
head , tx_ring - > next_to_use , val ) ;
2018-08-09 13:29:53 +00:00
}
pf - > tx_timeout_last_recovery = jiffies ;
2020-02-27 18:15:02 +00:00
netdev_info ( netdev , " tx_timeout recovery level %d, txqueue %u \n " ,
2019-12-18 18:38:45 +00:00
pf - > tx_timeout_recovery_level , txqueue ) ;
2018-08-09 13:29:53 +00:00
switch ( pf - > tx_timeout_recovery_level ) {
case 1 :
2021-03-02 18:15:38 +00:00
set_bit ( ICE_PFR_REQ , pf - > state ) ;
2018-08-09 13:29:53 +00:00
break ;
case 2 :
2021-03-02 18:15:38 +00:00
set_bit ( ICE_CORER_REQ , pf - > state ) ;
2018-08-09 13:29:53 +00:00
break ;
case 3 :
2021-03-02 18:15:38 +00:00
set_bit ( ICE_GLOBR_REQ , pf - > state ) ;
2018-08-09 13:29:53 +00:00
break ;
default :
netdev_err ( netdev , " tx_timeout recovery unsuccessful, device is in unrecoverable state. \n " ) ;
2021-03-02 18:15:38 +00:00
set_bit ( ICE_DOWN , pf - > state ) ;
2021-03-02 18:15:37 +00:00
set_bit ( ICE_VSI_NEEDS_RESTART , vsi - > state ) ;
2021-03-02 18:15:38 +00:00
set_bit ( ICE_SERVICE_DIS , pf - > state ) ;
2018-08-09 13:29:53 +00:00
break ;
}
ice_service_task_schedule ( pf ) ;
pf - > tx_timeout_recovery_level + + ;
}
2021-08-06 08:49:05 +00:00
/**
* ice_setup_tc_cls_flower - flower classifier offloads
* @ np : net device to configure
* @ filter_dev : device on which filter is added
* @ cls_flower : offload data
*/
static int
ice_setup_tc_cls_flower ( struct ice_netdev_priv * np ,
struct net_device * filter_dev ,
struct flow_cls_offload * cls_flower )
{
struct ice_vsi * vsi = np - > vsi ;
if ( cls_flower - > common . chain_index )
return - EOPNOTSUPP ;
switch ( cls_flower - > command ) {
case FLOW_CLS_REPLACE :
return ice_add_cls_flower ( filter_dev , vsi , cls_flower ) ;
case FLOW_CLS_DESTROY :
return ice_del_cls_flower ( vsi , cls_flower ) ;
default :
return - EINVAL ;
}
}
/**
* ice_setup_tc_block_cb - callback handler registered for TC block
* @ type : TC SETUP type
* @ type_data : TC flower offload data that contains user input
* @ cb_priv : netdev private data
*/
static int
ice_setup_tc_block_cb ( enum tc_setup_type type , void * type_data , void * cb_priv )
{
struct ice_netdev_priv * np = cb_priv ;
switch ( type ) {
case TC_SETUP_CLSFLOWER :
return ice_setup_tc_cls_flower ( np , np - > vsi - > netdev ,
type_data ) ;
default :
return - EOPNOTSUPP ;
}
}
2021-10-15 23:35:16 +00:00
/**
* ice_validate_mqprio_qopt - Validate TCF input parameters
* @ vsi : Pointer to VSI
* @ mqprio_qopt : input parameters for mqprio queue configuration
*
* This function validates MQPRIO params , such as qcount ( power of 2 wherever
* needed ) , and make sure user doesn ' t specify qcount and BW rate limit
* for TCs , which are more than " num_tc "
*/
static int
ice_validate_mqprio_qopt ( struct ice_vsi * vsi ,
struct tc_mqprio_qopt_offload * mqprio_qopt )
{
int non_power_of_2_qcount = 0 ;
struct ice_pf * pf = vsi - > back ;
int max_rss_q_cnt = 0 ;
2023-06-10 00:40:23 +00:00
u64 sum_min_rate = 0 ;
2021-10-15 23:35:16 +00:00
struct device * dev ;
int i , speed ;
u8 num_tc ;
if ( vsi - > type ! = ICE_VSI_PF )
return - EINVAL ;
if ( mqprio_qopt - > qopt . offset [ 0 ] ! = 0 | |
mqprio_qopt - > qopt . num_tc < 1 | |
mqprio_qopt - > qopt . num_tc > ICE_CHNL_MAX_TC )
return - EINVAL ;
dev = ice_pf_to_dev ( pf ) ;
vsi - > ch_rss_size = 0 ;
num_tc = mqprio_qopt - > qopt . num_tc ;
2023-06-10 00:40:23 +00:00
speed = ice_get_link_speed_kbps ( vsi ) ;
2021-10-15 23:35:16 +00:00
for ( i = 0 ; num_tc ; i + + ) {
int qcount = mqprio_qopt - > qopt . count [ i ] ;
u64 max_rate , min_rate , rem ;
if ( ! qcount )
return - EINVAL ;
if ( is_power_of_2 ( qcount ) ) {
if ( non_power_of_2_qcount & &
qcount > non_power_of_2_qcount ) {
dev_err ( dev , " qcount[%d] cannot be greater than non power of 2 qcount[%d] \n " ,
qcount , non_power_of_2_qcount ) ;
return - EINVAL ;
}
if ( qcount > max_rss_q_cnt )
max_rss_q_cnt = qcount ;
} else {
if ( non_power_of_2_qcount & &
qcount ! = non_power_of_2_qcount ) {
dev_err ( dev , " Only one non power of 2 qcount allowed[%d,%d] \n " ,
qcount , non_power_of_2_qcount ) ;
return - EINVAL ;
}
if ( qcount < max_rss_q_cnt ) {
dev_err ( dev , " non power of 2 qcount[%d] cannot be less than other qcount[%d] \n " ,
qcount , max_rss_q_cnt ) ;
return - EINVAL ;
}
max_rss_q_cnt = qcount ;
non_power_of_2_qcount = qcount ;
}
/* TC command takes input in K/N/Gbps or K/M/Gbit etc but
* converts the bandwidth rate limit into Bytes / s when
* passing it down to the driver . So convert input bandwidth
* from Bytes / s to Kbps
*/
max_rate = mqprio_qopt - > max_rate [ i ] ;
max_rate = div_u64 ( max_rate , ICE_BW_KBPS_DIVISOR ) ;
/* min_rate is minimum guaranteed rate and it can't be zero */
min_rate = mqprio_qopt - > min_rate [ i ] ;
min_rate = div_u64 ( min_rate , ICE_BW_KBPS_DIVISOR ) ;
sum_min_rate + = min_rate ;
if ( min_rate & & min_rate < ICE_MIN_BW_LIMIT ) {
dev_err ( dev , " TC%d: min_rate(%llu Kbps) < %u Kbps \n " , i ,
min_rate , ICE_MIN_BW_LIMIT ) ;
return - EINVAL ;
}
2023-06-10 00:40:23 +00:00
if ( max_rate & & max_rate > speed ) {
dev_err ( dev , " TC%d: max_rate(%llu Kbps) > link speed of %u Kbps \n " ,
i , max_rate , speed ) ;
return - EINVAL ;
}
2021-10-15 23:35:16 +00:00
iter_div_u64_rem ( min_rate , ICE_MIN_BW_LIMIT , & rem ) ;
if ( rem ) {
dev_err ( dev , " TC%d: Min Rate not multiple of %u Kbps " ,
i , ICE_MIN_BW_LIMIT ) ;
return - EINVAL ;
}
iter_div_u64_rem ( max_rate , ICE_MIN_BW_LIMIT , & rem ) ;
if ( rem ) {
dev_err ( dev , " TC%d: Max Rate not multiple of %u Kbps " ,
i , ICE_MIN_BW_LIMIT ) ;
return - EINVAL ;
}
/* min_rate can't be more than max_rate, except when max_rate
* is zero ( implies max_rate sought is max line rate ) . In such
* a case min_rate can be more than max .
*/
if ( max_rate & & min_rate > max_rate ) {
dev_err ( dev , " min_rate %llu Kbps can't be more than max_rate %llu Kbps \n " ,
min_rate , max_rate ) ;
return - EINVAL ;
}
if ( i > = mqprio_qopt - > qopt . num_tc - 1 )
break ;
if ( mqprio_qopt - > qopt . offset [ i + 1 ] ! =
( mqprio_qopt - > qopt . offset [ i ] + qcount ) )
return - EINVAL ;
}
if ( vsi - > num_rxq <
( mqprio_qopt - > qopt . offset [ i ] + mqprio_qopt - > qopt . count [ i ] ) )
return - EINVAL ;
if ( vsi - > num_txq <
( mqprio_qopt - > qopt . offset [ i ] + mqprio_qopt - > qopt . count [ i ] ) )
return - EINVAL ;
if ( sum_min_rate & & sum_min_rate > ( u64 ) speed ) {
dev_err ( dev , " Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified \n " ,
sum_min_rate , speed ) ;
return - EINVAL ;
}
/* make sure vsi->ch_rss_size is set correctly based on TC's qcount */
vsi - > ch_rss_size = max_rss_q_cnt ;
return 0 ;
}
2021-12-29 18:54:33 +00:00
/**
* ice_add_vsi_to_fdir - add a VSI to the flow director group for PF
* @ pf : ptr to PF device
* @ vsi : ptr to VSI
*/
static int ice_add_vsi_to_fdir ( struct ice_pf * pf , struct ice_vsi * vsi )
{
struct device * dev = ice_pf_to_dev ( pf ) ;
bool added = false ;
struct ice_hw * hw ;
int flow ;
if ( ! ( vsi - > num_gfltr | | vsi - > num_bfltr ) )
return - EINVAL ;
hw = & pf - > hw ;
for ( flow = 0 ; flow < ICE_FLTR_PTYPE_MAX ; flow + + ) {
struct ice_fd_hw_prof * prof ;
int tun , status ;
u64 entry_h ;
if ( ! ( hw - > fdir_prof & & hw - > fdir_prof [ flow ] & &
hw - > fdir_prof [ flow ] - > cnt ) )
continue ;
for ( tun = 0 ; tun < ICE_FD_HW_SEG_MAX ; tun + + ) {
enum ice_flow_priority prio ;
/* add this VSI to FDir profile for this flow */
prio = ICE_FLOW_PRIO_NORMAL ;
prof = hw - > fdir_prof [ flow ] ;
2023-12-13 00:33:19 +00:00
status = ice_flow_add_entry ( hw , ICE_BLK_FD ,
prof - > prof_id [ tun ] ,
2021-12-29 18:54:33 +00:00
prof - > vsi_h [ 0 ] , vsi - > idx ,
prio , prof - > fdir_seg [ tun ] ,
& entry_h ) ;
if ( status ) {
dev_err ( dev , " channel VSI idx %d, not able to add to group %d \n " ,
vsi - > idx , flow ) ;
continue ;
}
prof - > entry_h [ prof - > cnt ] [ tun ] = entry_h ;
}
/* store VSI for filter replay and delete */
prof - > vsi_h [ prof - > cnt ] = vsi - > idx ;
prof - > cnt + + ;
added = true ;
dev_dbg ( dev , " VSI idx %d added to fdir group %d \n " , vsi - > idx ,
flow ) ;
}
if ( ! added )
dev_dbg ( dev , " VSI idx %d not added to fdir groups \n " , vsi - > idx ) ;
return 0 ;
}
2021-10-15 23:35:16 +00:00
/**
* ice_add_channel - add a channel by adding VSI
* @ pf : ptr to PF device
* @ sw_id : underlying HW switching element ID
* @ ch : ptr to channel structure
*
* Add a channel ( VSI ) using add_vsi and queue_map
*/
static int ice_add_channel ( struct ice_pf * pf , u16 sw_id , struct ice_channel * ch )
{
struct device * dev = ice_pf_to_dev ( pf ) ;
struct ice_vsi * vsi ;
if ( ch - > type ! = ICE_VSI_CHNL ) {
dev_err ( dev , " add new VSI failed, ch->type %d \n " , ch - > type ) ;
return - EINVAL ;
}
vsi = ice_chnl_vsi_setup ( pf , pf - > hw . port_info , ch ) ;
if ( ! vsi | | vsi - > type ! = ICE_VSI_CHNL ) {
dev_err ( dev , " create chnl VSI failure \n " ) ;
return - EINVAL ;
}
2021-12-29 18:54:33 +00:00
ice_add_vsi_to_fdir ( pf , vsi ) ;
2021-10-15 23:35:16 +00:00
ch - > sw_id = sw_id ;
ch - > vsi_num = vsi - > vsi_num ;
ch - > info . mapping_flags = vsi - > info . mapping_flags ;
ch - > ch_vsi = vsi ;
/* set the back pointer of channel for newly created VSI */
vsi - > ch = ch ;
memcpy ( & ch - > info . q_mapping , & vsi - > info . q_mapping ,
sizeof ( vsi - > info . q_mapping ) ) ;
memcpy ( & ch - > info . tc_mapping , vsi - > info . tc_mapping ,
sizeof ( vsi - > info . tc_mapping ) ) ;
return 0 ;
}
/**
* ice_chnl_cfg_res
* @ vsi : the VSI being setup
* @ ch : ptr to channel structure
*
* Configure channel specific resources such as rings , vector .
*/
static void ice_chnl_cfg_res ( struct ice_vsi * vsi , struct ice_channel * ch )
{
int i ;
for ( i = 0 ; i < ch - > num_txq ; i + + ) {
struct ice_q_vector * tx_q_vector , * rx_q_vector ;
struct ice_ring_container * rc ;
struct ice_tx_ring * tx_ring ;
struct ice_rx_ring * rx_ring ;
tx_ring = vsi - > tx_rings [ ch - > base_q + i ] ;
rx_ring = vsi - > rx_rings [ ch - > base_q + i ] ;
if ( ! tx_ring | | ! rx_ring )
continue ;
/* setup ring being channel enabled */
tx_ring - > ch = ch ;
rx_ring - > ch = ch ;
/* following code block sets up vector specific attributes */
tx_q_vector = tx_ring - > q_vector ;
rx_q_vector = rx_ring - > q_vector ;
if ( ! tx_q_vector & & ! rx_q_vector )
continue ;
if ( tx_q_vector ) {
tx_q_vector - > ch = ch ;
/* setup Tx and Rx ITR setting if DIM is off */
rc = & tx_q_vector - > tx ;
if ( ! ITR_IS_DYNAMIC ( rc ) )
ice_write_itr ( rc , rc - > itr_setting ) ;
}
if ( rx_q_vector ) {
rx_q_vector - > ch = ch ;
/* setup Tx and Rx ITR setting if DIM is off */
rc = & rx_q_vector - > rx ;
if ( ! ITR_IS_DYNAMIC ( rc ) )
ice_write_itr ( rc , rc - > itr_setting ) ;
}
}
/* it is safe to assume that, if channel has non-zero num_t[r]xq, then
* GLINT_ITR register would have written to perform in - context
* update , hence perform flush
*/
if ( ch - > num_txq | | ch - > num_rxq )
ice_flush ( & vsi - > back - > hw ) ;
}
/**
* ice_cfg_chnl_all_res - configure channel resources
* @ vsi : pte to main_vsi
* @ ch : ptr to channel structure
*
* This function configures channel specific resources such as flow - director
* counter index , and other resources such as queues , vectors , ITR settings
*/
static void
ice_cfg_chnl_all_res ( struct ice_vsi * vsi , struct ice_channel * ch )
{
/* configure channel (aka ADQ) resources such as queues, vectors,
* ITR settings for channel specific vectors and anything else
*/
ice_chnl_cfg_res ( vsi , ch ) ;
}
/**
* ice_setup_hw_channel - setup new channel
* @ pf : ptr to PF device
* @ vsi : the VSI being setup
* @ ch : ptr to channel structure
* @ sw_id : underlying HW switching element ID
* @ type : type of channel to be created ( VMDq2 / VF )
*
* Setup new channel ( VSI ) based on specified type ( VMDq2 / VF )
* and configures Tx rings accordingly
*/
static int
ice_setup_hw_channel ( struct ice_pf * pf , struct ice_vsi * vsi ,
struct ice_channel * ch , u16 sw_id , u8 type )
{
struct device * dev = ice_pf_to_dev ( pf ) ;
int ret ;
ch - > base_q = vsi - > next_base_q ;
ch - > type = type ;
ret = ice_add_channel ( pf , sw_id , ch ) ;
if ( ret ) {
dev_err ( dev , " failed to add_channel using sw_id %u \n " , sw_id ) ;
return ret ;
}
/* configure/setup ADQ specific resources */
ice_cfg_chnl_all_res ( vsi , ch ) ;
/* make sure to update the next_base_q so that subsequent channel's
* ( aka ADQ ) VSI queue map is correct
*/
vsi - > next_base_q = vsi - > next_base_q + ch - > num_rxq ;
dev_dbg ( dev , " added channel: vsi_num %u, num_rxq %u \n " , ch - > vsi_num ,
ch - > num_rxq ) ;
return 0 ;
}
/**
* ice_setup_channel - setup new channel using uplink element
* @ pf : ptr to PF device
* @ vsi : the VSI being setup
* @ ch : ptr to channel structure
*
* Setup new channel ( VSI ) based on specified type ( VMDq2 / VF )
* and uplink switching element
*/
static bool
ice_setup_channel ( struct ice_pf * pf , struct ice_vsi * vsi ,
struct ice_channel * ch )
{
struct device * dev = ice_pf_to_dev ( pf ) ;
u16 sw_id ;
int ret ;
if ( vsi - > type ! = ICE_VSI_PF ) {
dev_err ( dev , " unsupported parent VSI type(%d) \n " , vsi - > type ) ;
return false ;
}
sw_id = pf - > first_sw - > sw_id ;
/* create channel (VSI) */
ret = ice_setup_hw_channel ( pf , vsi , ch , sw_id , ICE_VSI_CHNL ) ;
if ( ret ) {
dev_err ( dev , " failed to setup hw_channel \n " ) ;
return false ;
}
dev_dbg ( dev , " successfully created channel() \n " ) ;
return ch - > ch_vsi ? true : false ;
}
/**
* ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
* @ vsi : VSI to be configured
* @ max_tx_rate : max Tx rate in Kbps to be configured as maximum BW limit
* @ min_tx_rate : min Tx rate in Kbps to be configured as minimum BW limit
*/
static int
ice_set_bw_limit ( struct ice_vsi * vsi , u64 max_tx_rate , u64 min_tx_rate )
{
int err ;
err = ice_set_min_bw_limit ( vsi , min_tx_rate ) ;
if ( err )
return err ;
return ice_set_max_bw_limit ( vsi , max_tx_rate ) ;
}
/**
* ice_create_q_channel - function to create channel
* @ vsi : VSI to be configured
* @ ch : ptr to channel ( it contains channel specific params )
*
* This function creates channel ( VSI ) using num_queues specified by user ,
* reconfigs RSS if needed .
*/
static int ice_create_q_channel ( struct ice_vsi * vsi , struct ice_channel * ch )
{
struct ice_pf * pf = vsi - > back ;
struct device * dev ;
if ( ! ch )
return - EINVAL ;
dev = ice_pf_to_dev ( pf ) ;
if ( ! ch - > num_txq | | ! ch - > num_rxq ) {
dev_err ( dev , " Invalid num_queues requested: %d \n " , ch - > num_rxq ) ;
return - EINVAL ;
}
if ( ! vsi - > cnt_q_avail | | vsi - > cnt_q_avail < ch - > num_txq ) {
dev_err ( dev , " cnt_q_avail (%u) less than num_queues %d \n " ,
vsi - > cnt_q_avail , ch - > num_txq ) ;
return - EINVAL ;
}
if ( ! ice_setup_channel ( pf , vsi , ch ) ) {
dev_info ( dev , " Failed to setup channel \n " ) ;
return - EINVAL ;
}
/* configure BW rate limit */
if ( ch - > ch_vsi & & ( ch - > max_tx_rate | | ch - > min_tx_rate ) ) {
int ret ;
ret = ice_set_bw_limit ( ch - > ch_vsi , ch - > max_tx_rate ,
ch - > min_tx_rate ) ;
if ( ret )
dev_err ( dev , " failed to set Tx rate of %llu Kbps for VSI(%u) \n " ,
ch - > max_tx_rate , ch - > ch_vsi - > vsi_num ) ;
else
dev_dbg ( dev , " set Tx rate of %llu Kbps for VSI(%u) \n " ,
ch - > max_tx_rate , ch - > ch_vsi - > vsi_num ) ;
}
vsi - > cnt_q_avail - = ch - > num_txq ;
return 0 ;
}
2021-10-15 23:35:17 +00:00
/**
* ice_rem_all_chnl_fltrs - removes all channel filters
* @ pf : ptr to PF , TC - flower based filter are tracked at PF level
*
* Remove all advanced switch filters only if they are channel specific
* tc - flower based filter
*/
static void ice_rem_all_chnl_fltrs ( struct ice_pf * pf )
{
struct ice_tc_flower_fltr * fltr ;
struct hlist_node * node ;
/* to remove all channel filters, iterate an ordered list of filters */
hlist_for_each_entry_safe ( fltr , node ,
& pf - > tc_flower_fltr_list ,
tc_flower_node ) {
struct ice_rule_query_data rule ;
int status ;
/* for now process only channel specific filters */
if ( ! ice_is_chnl_fltr ( fltr ) )
continue ;
rule . rid = fltr - > rid ;
rule . rule_id = fltr - > rule_id ;
2022-10-21 07:58:45 +00:00
rule . vsi_handle = fltr - > dest_vsi_handle ;
2021-10-15 23:35:17 +00:00
status = ice_rem_adv_rule_by_id ( & pf - > hw , & rule ) ;
if ( status ) {
if ( status = = - ENOENT )
dev_dbg ( ice_pf_to_dev ( pf ) , " TC flower filter (rule_id %u) does not exist \n " ,
rule . rule_id ) ;
else
dev_err ( ice_pf_to_dev ( pf ) , " failed to delete TC flower filter, status %d \n " ,
status ) ;
} else if ( fltr - > dest_vsi ) {
/* update advanced switch filter count */
if ( fltr - > dest_vsi - > type = = ICE_VSI_CHNL ) {
u32 flags = fltr - > flags ;
fltr - > dest_vsi - > num_chnl_fltr - - ;
if ( flags & ( ICE_TC_FLWR_FIELD_DST_MAC |
ICE_TC_FLWR_FIELD_ENC_DST_MAC ) )
pf - > num_dmac_chnl_fltrs - - ;
}
}
hlist_del ( & fltr - > tc_flower_node ) ;
kfree ( fltr ) ;
}
}
2021-10-15 23:35:16 +00:00
/**
* ice_remove_q_channels - Remove queue channels for the TCs
* @ vsi : VSI to be configured
* @ rem_fltr : delete advanced switch filter or not
*
* Remove queue channels for the TCs
*/
2021-10-15 23:35:17 +00:00
static void ice_remove_q_channels ( struct ice_vsi * vsi , bool rem_fltr )
2021-10-15 23:35:16 +00:00
{
struct ice_channel * ch , * ch_tmp ;
2021-10-15 23:35:17 +00:00
struct ice_pf * pf = vsi - > back ;
2021-10-15 23:35:16 +00:00
int i ;
2021-10-15 23:35:17 +00:00
/* remove all tc-flower based filter if they are channel filters only */
if ( rem_fltr )
ice_rem_all_chnl_fltrs ( pf ) ;
2021-12-29 18:54:33 +00:00
/* remove ntuple filters since queue configuration is being changed */
if ( vsi - > netdev - > features & NETIF_F_NTUPLE ) {
struct ice_hw * hw = & pf - > hw ;
mutex_lock ( & hw - > fdir_fltr_lock ) ;
ice_fdir_del_all_fltrs ( vsi ) ;
mutex_unlock ( & hw - > fdir_fltr_lock ) ;
}
2021-10-15 23:35:16 +00:00
/* perform cleanup for channels if they exist */
list_for_each_entry_safe ( ch , ch_tmp , & vsi - > ch_list , list ) {
struct ice_vsi * ch_vsi ;
list_del ( & ch - > list ) ;
ch_vsi = ch - > ch_vsi ;
if ( ! ch_vsi ) {
kfree ( ch ) ;
continue ;
}
/* Reset queue contexts */
for ( i = 0 ; i < ch - > num_rxq ; i + + ) {
struct ice_tx_ring * tx_ring ;
struct ice_rx_ring * rx_ring ;
tx_ring = vsi - > tx_rings [ ch - > base_q + i ] ;
rx_ring = vsi - > rx_rings [ ch - > base_q + i ] ;
if ( tx_ring ) {
tx_ring - > ch = NULL ;
if ( tx_ring - > q_vector )
tx_ring - > q_vector - > ch = NULL ;
}
if ( rx_ring ) {
rx_ring - > ch = NULL ;
if ( rx_ring - > q_vector )
rx_ring - > q_vector - > ch = NULL ;
}
}
2021-12-29 18:54:33 +00:00
/* Release FD resources for the channel VSI */
ice_fdir_rem_adq_chnl ( & pf - > hw , ch - > ch_vsi - > idx ) ;
2021-10-15 23:35:16 +00:00
/* clear the VSI from scheduler tree */
ice_rm_vsi_lan_cfg ( ch - > ch_vsi - > port_info , ch - > ch_vsi - > idx ) ;
2022-12-21 11:38:20 +00:00
/* Delete VSI from FW, PF and HW VSI arrays */
2021-10-15 23:35:16 +00:00
ice_vsi_delete ( ch - > ch_vsi ) ;
/* free the channel */
kfree ( ch ) ;
}
/* clear the channel VSI map which is stored in main VSI */
ice_for_each_chnl_tc ( i )
vsi - > tc_map_vsi [ i ] = NULL ;
/* reset main VSI's all TC information */
vsi - > all_enatc = 0 ;
vsi - > all_numtc = 0 ;
}
/**
* ice_rebuild_channels - rebuild channel
* @ pf : ptr to PF
*
* Recreate channel VSIs and replay filters
*/
static int ice_rebuild_channels ( struct ice_pf * pf )
{
struct device * dev = ice_pf_to_dev ( pf ) ;
struct ice_vsi * main_vsi ;
bool rem_adv_fltr = true ;
struct ice_channel * ch ;
struct ice_vsi * vsi ;
int tc_idx = 1 ;
int i , err ;
main_vsi = ice_get_main_vsi ( pf ) ;
if ( ! main_vsi )
return 0 ;
if ( ! test_bit ( ICE_FLAG_TC_MQPRIO , pf - > flags ) | |
main_vsi - > old_numtc = = 1 )
return 0 ; /* nothing to be done */
/* reconfigure main VSI based on old value of TC and cached values
* for MQPRIO opts
*/
err = ice_vsi_cfg_tc ( main_vsi , main_vsi - > old_ena_tc ) ;
if ( err ) {
dev_err ( dev , " failed configuring TC(ena_tc:0x%02x) for HW VSI=%u \n " ,
main_vsi - > old_ena_tc , main_vsi - > vsi_num ) ;
return err ;
}
/* rebuild ADQ VSIs */
ice_for_each_vsi ( pf , i ) {
enum ice_vsi_type type ;
vsi = pf - > vsi [ i ] ;
if ( ! vsi | | vsi - > type ! = ICE_VSI_CHNL )
continue ;
type = vsi - > type ;
/* rebuild ADQ VSI */
2022-12-21 11:38:16 +00:00
err = ice_vsi_rebuild ( vsi , ICE_VSI_FLAG_INIT ) ;
2021-10-15 23:35:16 +00:00
if ( err ) {
dev_err ( dev , " VSI (type:%s) at index %d rebuild failed, err %d \n " ,
ice_vsi_type_str ( type ) , vsi - > idx , err ) ;
goto cleanup ;
}
/* Re-map HW VSI number, using VSI handle that has been
* previously validated in ice_replay_vsi ( ) call above
*/
vsi - > vsi_num = ice_get_hw_vsi_num ( & pf - > hw , vsi - > idx ) ;
/* replay filters for the VSI */
err = ice_replay_vsi ( & pf - > hw , vsi - > idx ) ;
if ( err ) {
dev_err ( dev , " VSI (type:%s) replay failed, err %d, VSI index %d \n " ,
ice_vsi_type_str ( type ) , err , vsi - > idx ) ;
rem_adv_fltr = false ;
goto cleanup ;
}
dev_info ( dev , " VSI (type:%s) at index %d rebuilt successfully \n " ,
ice_vsi_type_str ( type ) , vsi - > idx ) ;
/* store ADQ VSI at correct TC index in main VSI's
* map of TC to VSI
*/
main_vsi - > tc_map_vsi [ tc_idx + + ] = vsi ;
}
/* ADQ VSI(s) has been rebuilt successfully, so setup
* channel for main VSI ' s Tx and Rx rings
*/
list_for_each_entry ( ch , & main_vsi - > ch_list , list ) {
struct ice_vsi * ch_vsi ;
ch_vsi = ch - > ch_vsi ;
if ( ! ch_vsi )
continue ;
/* reconfig channel resources */
ice_cfg_chnl_all_res ( main_vsi , ch ) ;
/* replay BW rate limit if it is non-zero */
if ( ! ch - > max_tx_rate & & ! ch - > min_tx_rate )
continue ;
err = ice_set_bw_limit ( ch_vsi , ch - > max_tx_rate ,
ch - > min_tx_rate ) ;
if ( err )
dev_err ( dev , " failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u) \n " ,
err , ch - > max_tx_rate , ch - > min_tx_rate ,
ch_vsi - > vsi_num ) ;
else
dev_dbg ( dev , " successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u) \n " ,
ch - > max_tx_rate , ch - > min_tx_rate ,
ch_vsi - > vsi_num ) ;
}
/* reconfig RSS for main VSI */
if ( main_vsi - > ch_rss_size )
ice_vsi_cfg_rss_lut_key ( main_vsi ) ;
return 0 ;
cleanup :
ice_remove_q_channels ( main_vsi , rem_adv_fltr ) ;
return err ;
}
/**
* ice_create_q_channels - Add queue channel for the given TCs
* @ vsi : VSI to be configured
*
* Configures queue channel mapping to the given TCs
*/
static int ice_create_q_channels ( struct ice_vsi * vsi )
{
struct ice_pf * pf = vsi - > back ;
struct ice_channel * ch ;
int ret = 0 , i ;
ice_for_each_chnl_tc ( i ) {
if ( ! ( vsi - > all_enatc & BIT ( i ) ) )
continue ;
ch = kzalloc ( sizeof ( * ch ) , GFP_KERNEL ) ;
if ( ! ch ) {
ret = - ENOMEM ;
goto err_free ;
}
INIT_LIST_HEAD ( & ch - > list ) ;
ch - > num_rxq = vsi - > mqprio_qopt . qopt . count [ i ] ;
ch - > num_txq = vsi - > mqprio_qopt . qopt . count [ i ] ;
ch - > base_q = vsi - > mqprio_qopt . qopt . offset [ i ] ;
ch - > max_tx_rate = vsi - > mqprio_qopt . max_rate [ i ] ;
ch - > min_tx_rate = vsi - > mqprio_qopt . min_rate [ i ] ;
/* convert to Kbits/s */
if ( ch - > max_tx_rate )
ch - > max_tx_rate = div_u64 ( ch - > max_tx_rate ,
ICE_BW_KBPS_DIVISOR ) ;
if ( ch - > min_tx_rate )
ch - > min_tx_rate = div_u64 ( ch - > min_tx_rate ,
ICE_BW_KBPS_DIVISOR ) ;
ret = ice_create_q_channel ( vsi , ch ) ;
if ( ret ) {
dev_err ( ice_pf_to_dev ( pf ) ,
" failed creating channel TC:%d \n " , i ) ;
kfree ( ch ) ;
goto err_free ;
}
list_add_tail ( & ch - > list , & vsi - > ch_list ) ;
vsi - > tc_map_vsi [ i ] = ch - > ch_vsi ;
dev_dbg ( ice_pf_to_dev ( pf ) ,
" successfully created channel: VSI %pK \n " , ch - > ch_vsi ) ;
}
return 0 ;
err_free :
ice_remove_q_channels ( vsi , false ) ;
return ret ;
}
/**
* ice_setup_tc_mqprio_qdisc - configure multiple traffic classes
* @ netdev : net device to configure
* @ type_data : TC offload data
*/
static int ice_setup_tc_mqprio_qdisc ( struct net_device * netdev , void * type_data )
{
struct tc_mqprio_qopt_offload * mqprio_qopt = type_data ;
struct ice_netdev_priv * np = netdev_priv ( netdev ) ;
struct ice_vsi * vsi = np - > vsi ;
struct ice_pf * pf = vsi - > back ;
u16 mode , ena_tc_qdisc = 0 ;
int cur_txq , cur_rxq ;
u8 hw = 0 , num_tcf ;
struct device * dev ;
int ret , i ;
dev = ice_pf_to_dev ( pf ) ;
num_tcf = mqprio_qopt - > qopt . num_tc ;
hw = mqprio_qopt - > qopt . hw ;
mode = mqprio_qopt - > mode ;
if ( ! hw ) {
clear_bit ( ICE_FLAG_TC_MQPRIO , pf - > flags ) ;
vsi - > ch_rss_size = 0 ;
memcpy ( & vsi - > mqprio_qopt , mqprio_qopt , sizeof ( * mqprio_qopt ) ) ;
goto config_tcf ;
}
/* Generate queue region map for number of TCF requested */
for ( i = 0 ; i < num_tcf ; i + + )
ena_tc_qdisc | = BIT ( i ) ;
switch ( mode ) {
case TC_MQPRIO_MODE_CHANNEL :
2022-11-15 10:48:23 +00:00
if ( pf - > hw . port_info - > is_custom_tx_enabled ) {
dev_err ( dev , " Custom Tx scheduler feature enabled, can't configure ADQ \n " ) ;
return - EBUSY ;
}
ice_tear_down_devlink_rate_tree ( pf ) ;
2021-10-15 23:35:16 +00:00
ret = ice_validate_mqprio_qopt ( vsi , mqprio_qopt ) ;
if ( ret ) {
netdev_err ( netdev , " failed to validate_mqprio_qopt(), ret %d \n " ,
ret ) ;
return ret ;
}
memcpy ( & vsi - > mqprio_qopt , mqprio_qopt , sizeof ( * mqprio_qopt ) ) ;
set_bit ( ICE_FLAG_TC_MQPRIO , pf - > flags ) ;
2021-10-15 23:35:17 +00:00
/* don't assume state of hw_tc_offload during driver load
* and set the flag for TC flower filter if hw_tc_offload
* already ON
*/
if ( vsi - > netdev - > features & NETIF_F_HW_TC )
set_bit ( ICE_FLAG_CLS_FLOWER , pf - > flags ) ;
2021-10-15 23:35:16 +00:00
break ;
default :
return - EINVAL ;
}
config_tcf :
/* Requesting same TCF configuration as already enabled */
if ( ena_tc_qdisc = = vsi - > tc_cfg . ena_tc & &
mode ! = TC_MQPRIO_MODE_CHANNEL )
return 0 ;
/* Pause VSI queues */
ice_dis_vsi ( vsi , true ) ;
if ( ! hw & & ! test_bit ( ICE_FLAG_TC_MQPRIO , pf - > flags ) )
ice_remove_q_channels ( vsi , true ) ;
if ( ! hw & & ! test_bit ( ICE_FLAG_TC_MQPRIO , pf - > flags ) ) {
vsi - > req_txq = min_t ( int , ice_get_avail_txq_count ( pf ) ,
num_online_cpus ( ) ) ;
vsi - > req_rxq = min_t ( int , ice_get_avail_rxq_count ( pf ) ,
num_online_cpus ( ) ) ;
} else {
/* logic to rebuild VSI, same like ethtool -L */
u16 offset = 0 , qcount_tx = 0 , qcount_rx = 0 ;
for ( i = 0 ; i < num_tcf ; i + + ) {
if ( ! ( ena_tc_qdisc & BIT ( i ) ) )
continue ;
offset = vsi - > mqprio_qopt . qopt . offset [ i ] ;
qcount_rx = vsi - > mqprio_qopt . qopt . count [ i ] ;
qcount_tx = vsi - > mqprio_qopt . qopt . count [ i ] ;
}
vsi - > req_txq = offset + qcount_tx ;
vsi - > req_rxq = offset + qcount_rx ;
/* store away original rss_size info, so that it gets reused
* form ice_vsi_rebuild during tc - qdisc delete stage - to
* determine , what should be the rss_sizefor main VSI
*/
vsi - > orig_rss_size = vsi - > rss_size ;
}
/* save current values of Tx and Rx queues before calling VSI rebuild
* for fallback option
*/
cur_txq = vsi - > num_txq ;
cur_rxq = vsi - > num_rxq ;
/* proceed with rebuild main VSI using correct number of queues */
2022-12-21 11:38:16 +00:00
ret = ice_vsi_rebuild ( vsi , ICE_VSI_FLAG_NO_INIT ) ;
2021-10-15 23:35:16 +00:00
if ( ret ) {
/* fallback to current number of queues */
dev_info ( dev , " Rebuild failed with new queues, try with current number of queues \n " ) ;
vsi - > req_txq = cur_txq ;
vsi - > req_rxq = cur_rxq ;
clear_bit ( ICE_RESET_FAILED , pf - > state ) ;
2022-12-21 11:38:16 +00:00
if ( ice_vsi_rebuild ( vsi , ICE_VSI_FLAG_NO_INIT ) ) {
2021-10-15 23:35:16 +00:00
dev_err ( dev , " Rebuild of main VSI failed again \n " ) ;
return ret ;
}
}
vsi - > all_numtc = num_tcf ;
vsi - > all_enatc = ena_tc_qdisc ;
ret = ice_vsi_cfg_tc ( vsi , ena_tc_qdisc ) ;
if ( ret ) {
netdev_err ( netdev , " failed configuring TC for VSI id=%d \n " ,
vsi - > vsi_num ) ;
goto exit ;
}
if ( test_bit ( ICE_FLAG_TC_MQPRIO , pf - > flags ) ) {
u64 max_tx_rate = vsi - > mqprio_qopt . max_rate [ 0 ] ;
u64 min_tx_rate = vsi - > mqprio_qopt . min_rate [ 0 ] ;
/* set TC0 rate limit if specified */
if ( max_tx_rate | | min_tx_rate ) {
/* convert to Kbits/s */
if ( max_tx_rate )
max_tx_rate = div_u64 ( max_tx_rate , ICE_BW_KBPS_DIVISOR ) ;
if ( min_tx_rate )
min_tx_rate = div_u64 ( min_tx_rate , ICE_BW_KBPS_DIVISOR ) ;
ret = ice_set_bw_limit ( vsi , max_tx_rate , min_tx_rate ) ;
if ( ! ret ) {
dev_dbg ( dev , " set Tx rate max %llu min %llu for VSI(%u) \n " ,
max_tx_rate , min_tx_rate , vsi - > vsi_num ) ;
} else {
dev_err ( dev , " failed to set Tx rate max %llu min %llu for VSI(%u) \n " ,
max_tx_rate , min_tx_rate , vsi - > vsi_num ) ;
goto exit ;
}
}
ret = ice_create_q_channels ( vsi ) ;
if ( ret ) {
netdev_err ( netdev , " failed configuring queue channels \n " ) ;
goto exit ;
} else {
netdev_dbg ( netdev , " successfully configured channels \n " ) ;
}
}
if ( vsi - > ch_rss_size )
ice_vsi_cfg_rss_lut_key ( vsi ) ;
exit :
/* if error, reset the all_numtc and all_enatc */
if ( ret ) {
vsi - > all_numtc = 0 ;
vsi - > all_enatc = 0 ;
}
/* resume VSI */
ice_ena_vsi ( vsi , true ) ;
return ret ;
}
2021-08-06 08:49:05 +00:00
static LIST_HEAD ( ice_block_cb_list ) ;
static int
ice_setup_tc ( struct net_device * netdev , enum tc_setup_type type ,
void * type_data )
{
struct ice_netdev_priv * np = netdev_priv ( netdev ) ;
2021-10-15 23:35:16 +00:00
struct ice_pf * pf = np - > vsi - > back ;
2023-07-28 17:12:43 +00:00
bool locked = false ;
2021-10-15 23:35:16 +00:00
int err ;
2021-08-06 08:49:05 +00:00
switch ( type ) {
case TC_SETUP_BLOCK :
return flow_block_cb_setup_simple ( type_data ,
& ice_block_cb_list ,
ice_setup_tc_block_cb ,
np , np , true ) ;
2021-10-15 23:35:16 +00:00
case TC_SETUP_QDISC_MQPRIO :
2023-08-16 19:34:05 +00:00
if ( ice_is_eswitch_mode_switchdev ( pf ) ) {
netdev_err ( netdev , " TC MQPRIO offload not supported, switchdev is enabled \n " ) ;
return - EOPNOTSUPP ;
}
2023-07-28 17:12:43 +00:00
if ( pf - > adev ) {
mutex_lock ( & pf - > adev_mutex ) ;
device_lock ( & pf - > adev - > dev ) ;
locked = true ;
if ( pf - > adev - > dev . driver ) {
netdev_err ( netdev , " Cannot change qdisc when RDMA is active \n " ) ;
err = - EBUSY ;
goto adev_unlock ;
}
}
2021-10-15 23:35:16 +00:00
/* setup traffic classifier for receive side */
mutex_lock ( & pf - > tc_mutex ) ;
err = ice_setup_tc_mqprio_qdisc ( netdev , type_data ) ;
mutex_unlock ( & pf - > tc_mutex ) ;
2023-07-28 17:12:43 +00:00
adev_unlock :
if ( locked ) {
device_unlock ( & pf - > adev - > dev ) ;
mutex_unlock ( & pf - > adev_mutex ) ;
}
2021-10-15 23:35:16 +00:00
return err ;
2021-08-06 08:49:05 +00:00
default :
return - EOPNOTSUPP ;
}
return - EOPNOTSUPP ;
}
2021-10-12 18:31:03 +00:00
static struct ice_indr_block_priv *
ice_indr_block_priv_lookup ( struct ice_netdev_priv * np ,
struct net_device * netdev )
{
struct ice_indr_block_priv * cb_priv ;
list_for_each_entry ( cb_priv , & np - > tc_indr_block_priv_list , list ) {
if ( ! cb_priv - > netdev )
return NULL ;
if ( cb_priv - > netdev = = netdev )
return cb_priv ;
}
return NULL ;
}
static int
ice_indr_setup_block_cb ( enum tc_setup_type type , void * type_data ,
void * indr_priv )
{
struct ice_indr_block_priv * priv = indr_priv ;
struct ice_netdev_priv * np = priv - > np ;
switch ( type ) {
case TC_SETUP_CLSFLOWER :
return ice_setup_tc_cls_flower ( np , priv - > netdev ,
( struct flow_cls_offload * )
type_data ) ;
default :
return - EOPNOTSUPP ;
}
}
static int
ice_indr_setup_tc_block ( struct net_device * netdev , struct Qdisc * sch ,
struct ice_netdev_priv * np ,
struct flow_block_offload * f , void * data ,
void ( * cleanup ) ( struct flow_block_cb * block_cb ) )
{
struct ice_indr_block_priv * indr_priv ;
struct flow_block_cb * block_cb ;
2021-10-12 18:31:04 +00:00
if ( ! ice_is_tunnel_supported ( netdev ) & &
! ( is_vlan_dev ( netdev ) & &
vlan_dev_real_dev ( netdev ) = = np - > vsi - > netdev ) )
return - EOPNOTSUPP ;
2021-10-12 18:31:03 +00:00
if ( f - > binder_type ! = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS )
return - EOPNOTSUPP ;
switch ( f - > command ) {
case FLOW_BLOCK_BIND :
indr_priv = ice_indr_block_priv_lookup ( np , netdev ) ;
if ( indr_priv )
return - EEXIST ;
indr_priv = kzalloc ( sizeof ( * indr_priv ) , GFP_KERNEL ) ;
if ( ! indr_priv )
return - ENOMEM ;
indr_priv - > netdev = netdev ;
indr_priv - > np = np ;
list_add ( & indr_priv - > list , & np - > tc_indr_block_priv_list ) ;
block_cb =
flow_indr_block_cb_alloc ( ice_indr_setup_block_cb ,
indr_priv , indr_priv ,
ice_rep_indr_tc_block_unbind ,
f , netdev , sch , data , np ,
cleanup ) ;
if ( IS_ERR ( block_cb ) ) {
list_del ( & indr_priv - > list ) ;
kfree ( indr_priv ) ;
return PTR_ERR ( block_cb ) ;
}
flow_block_cb_add ( block_cb , f ) ;
list_add_tail ( & block_cb - > driver_list , & ice_block_cb_list ) ;
break ;
case FLOW_BLOCK_UNBIND :
indr_priv = ice_indr_block_priv_lookup ( np , netdev ) ;
if ( ! indr_priv )
return - ENOENT ;
block_cb = flow_block_cb_lookup ( f - > block ,
ice_indr_setup_block_cb ,
indr_priv ) ;
if ( ! block_cb )
return - ENOENT ;
flow_indr_block_cb_remove ( block_cb , f ) ;
list_del ( & block_cb - > driver_list ) ;
break ;
default :
return - EOPNOTSUPP ;
}
return 0 ;
}
static int
ice_indr_setup_tc_cb ( struct net_device * netdev , struct Qdisc * sch ,
void * cb_priv , enum tc_setup_type type , void * type_data ,
void * data ,
void ( * cleanup ) ( struct flow_block_cb * block_cb ) )
{
switch ( type ) {
case TC_SETUP_BLOCK :
return ice_indr_setup_tc_block ( netdev , sch , cb_priv , type_data ,
data , cleanup ) ;
default :
return - EOPNOTSUPP ;
}
}
2018-03-20 14:58:13 +00:00
/**
* ice_open - Called when a network interface becomes active
* @ netdev : network interface device structure
*
* The open entry point is called when a network interface is made
2018-10-26 18:44:46 +00:00
* active by the system ( IFF_UP ) . At this point all resources needed
2018-03-20 14:58:13 +00:00
* for transmit and receive operations are allocated , the interrupt
* handler is registered with the OS , the netdev watchdog is enabled ,
* and the stack is notified that the interface is ready .
*
* Returns 0 on success , negative value on failure
*/
2019-04-16 17:30:43 +00:00
int ice_open ( struct net_device * netdev )
2021-02-26 21:19:26 +00:00
{
struct ice_netdev_priv * np = netdev_priv ( netdev ) ;
struct ice_pf * pf = np - > vsi - > back ;
if ( ice_is_reset_in_progress ( pf - > state ) ) {
netdev_err ( netdev , " can't open net device while reset is in progress " ) ;
return - EBUSY ;
}
return ice_open_internal ( netdev ) ;
}
/**
* ice_open_internal - Called when a network interface becomes active
* @ netdev : network interface device structure
*
* Internal ice_open implementation . Should not be used directly except for ice_open and reset
* handling routine
*
* Returns 0 on success , negative value on failure
*/
int ice_open_internal ( struct net_device * netdev )
2018-03-20 14:58:13 +00:00
{
struct ice_netdev_priv * np = netdev_priv ( netdev ) ;
struct ice_vsi * vsi = np - > vsi ;
2020-05-08 00:41:03 +00:00
struct ice_pf * pf = vsi - > back ;
2019-06-26 09:20:17 +00:00
struct ice_port_info * pi ;
2018-03-20 14:58:13 +00:00
int err ;
2021-03-02 18:15:38 +00:00
if ( test_bit ( ICE_NEEDS_RESTART , pf - > state ) ) {
2018-08-09 13:29:50 +00:00
netdev_err ( netdev , " driver needs to be unloaded and reloaded \n " ) ;
return - EIO ;
}
2018-03-20 14:58:13 +00:00
netif_carrier_off ( netdev ) ;
2019-06-26 09:20:17 +00:00
pi = vsi - > port_info ;
2021-10-07 23:00:23 +00:00
err = ice_update_link_info ( pi ) ;
if ( err ) {
netdev_err ( netdev , " Failed to get link info, error %d \n " , err ) ;
2021-10-07 23:01:58 +00:00
return err ;
2018-12-19 18:03:25 +00:00
}
2018-03-20 14:58:13 +00:00
2021-10-13 16:02:19 +00:00
ice_check_link_cfg_err ( pf , pi - > phy . link_info . link_cfg_err ) ;
2021-05-06 15:40:01 +00:00
2019-06-26 09:20:17 +00:00
/* Set PHY if there is media, otherwise, turn off PHY */
if ( pi - > phy . link_info . link_info & ICE_AQ_MEDIA_AVAILABLE ) {
2020-07-09 16:16:06 +00:00
clear_bit ( ICE_FLAG_NO_MEDIA , pf - > flags ) ;
2021-03-02 18:15:38 +00:00
if ( ! test_bit ( ICE_PHY_INIT_COMPLETE , pf - > state ) ) {
2020-07-09 16:16:06 +00:00
err = ice_init_phy_user_cfg ( pi ) ;
if ( err ) {
netdev_err ( netdev , " Failed to initialize PHY settings, error %d \n " ,
err ) ;
return err ;
}
}
err = ice_configure_phy ( vsi ) ;
2019-06-26 09:20:17 +00:00
if ( err ) {
2020-02-06 09:20:10 +00:00
netdev_err ( netdev , " Failed to set physical link up, error %d \n " ,
2019-06-26 09:20:17 +00:00
err ) ;
return err ;
}
} else {
2020-07-09 16:16:06 +00:00
set_bit ( ICE_FLAG_NO_MEDIA , pf - > flags ) ;
2021-03-25 22:35:07 +00:00
ice_set_link ( vsi , false ) ;
2019-06-26 09:20:17 +00:00
}
2018-12-19 18:03:25 +00:00
err = ice_vsi_open ( vsi ) ;
2018-03-20 14:58:13 +00:00
if ( err )
netdev_err ( netdev , " Failed to open VSI 0x%04X on switch 0x%04X \n " ,
vsi - > vsi_num , vsi - > vsw - > sw_id ) ;
2020-05-06 16:32:30 +00:00
/* Update existing tunnels information */
udp_tunnel_get_rx_info ( netdev ) ;
2018-03-20 14:58:13 +00:00
return err ;
}
/**
* ice_stop - Disables a network interface
* @ netdev : network interface device structure
*
* The stop entry point is called when an interface is de - activated by the OS ,
2018-10-26 18:44:46 +00:00
* and the netdevice enters the DOWN state . The hardware is still under the
2018-03-20 14:58:13 +00:00
* driver ' s control , but the netdev interface is disabled .
*
* Returns success only - not allowed to fail
*/
2019-04-16 17:30:43 +00:00
int ice_stop ( struct net_device * netdev )
2018-03-20 14:58:13 +00:00
{
struct ice_netdev_priv * np = netdev_priv ( netdev ) ;
struct ice_vsi * vsi = np - > vsi ;
2021-02-26 21:19:26 +00:00
struct ice_pf * pf = vsi - > back ;
if ( ice_is_reset_in_progress ( pf - > state ) ) {
netdev_err ( netdev , " can't stop net device while reset is in progress " ) ;
return - EBUSY ;
}
2018-03-20 14:58:13 +00:00
2022-08-26 08:31:23 +00:00
if ( test_bit ( ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA , vsi - > back - > flags ) ) {
int link_err = ice_force_phys_link_state ( vsi , false ) ;
if ( link_err ) {
2023-12-15 11:01:56 +00:00
if ( link_err = = - ENOMEDIUM )
netdev_info ( vsi - > netdev , " Skipping link reconfig - no media attached, VSI %d \n " ,
vsi - > vsi_num ) ;
else
netdev_err ( vsi - > netdev , " Failed to set physical link down, VSI %d error %d \n " ,
vsi - > vsi_num , link_err ) ;
2023-12-15 11:01:57 +00:00
ice_vsi_close ( vsi ) ;
2022-08-26 08:31:23 +00:00
return - EIO ;
}
}
2018-03-20 14:58:13 +00:00
ice_vsi_close ( vsi ) ;
return 0 ;
}
2018-03-20 14:58:19 +00:00
/**
* ice_features_check - Validate encapsulated packet conforms to limits
* @ skb : skb buffer
* @ netdev : This port ' s netdev
* @ features : Offload features that the stack believes apply
*/
static netdev_features_t
ice_features_check ( struct sk_buff * skb ,
struct net_device __always_unused * netdev ,
netdev_features_t features )
{
2022-01-14 23:38:39 +00:00
bool gso = skb_is_gso ( skb ) ;
2018-03-20 14:58:19 +00:00
size_t len ;
/* No point in doing any of this if neither checksum nor GSO are
2018-10-26 18:44:46 +00:00
* being requested for this frame . We can rule out both by just
2018-03-20 14:58:19 +00:00
* checking for CHECKSUM_PARTIAL
*/
if ( skb - > ip_summed ! = CHECKSUM_PARTIAL )
return features ;
/* We cannot support GSO if the MSS is going to be less than
2018-10-26 18:44:46 +00:00
* 64 bytes . If it is then we need to drop support for GSO .
2018-03-20 14:58:19 +00:00
*/
2022-01-14 23:38:39 +00:00
if ( gso & & ( skb_shinfo ( skb ) - > gso_size < ICE_TXD_CTX_MIN_MSS ) )
2018-03-20 14:58:19 +00:00
features & = ~ NETIF_F_GSO_MASK ;
2022-01-14 23:38:39 +00:00
len = skb_network_offset ( skb ) ;
2020-05-06 16:32:30 +00:00
if ( len > ICE_TXD_MACLEN_MAX | | len & 0x1 )
2018-03-20 14:58:19 +00:00
goto out_rm_features ;
2022-01-14 23:38:39 +00:00
len = skb_network_header_len ( skb ) ;
2020-05-06 16:32:30 +00:00
if ( len > ICE_TXD_IPLEN_MAX | | len & 0x1 )
2018-03-20 14:58:19 +00:00
goto out_rm_features ;
if ( skb - > encapsulation ) {
2022-01-14 23:38:39 +00:00
/* this must work for VXLAN frames AND IPIP/SIT frames, and in
* the case of IPIP frames , the transport header pointer is
* after the inner header ! So check to make sure that this
* is a GRE or UDP_TUNNEL frame before doing that math .
*/
if ( gso & & ( skb_shinfo ( skb ) - > gso_type &
( SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL ) ) ) {
len = skb_inner_network_header ( skb ) -
skb_transport_header ( skb ) ;
if ( len > ICE_TXD_L4LEN_MAX | | len & 0x1 )
goto out_rm_features ;
}
2018-03-20 14:58:19 +00:00
2022-01-14 23:38:39 +00:00
len = skb_inner_network_header_len ( skb ) ;
2020-05-06 16:32:30 +00:00
if ( len > ICE_TXD_IPLEN_MAX | | len & 0x1 )
2018-03-20 14:58:19 +00:00
goto out_rm_features ;
}
return features ;
out_rm_features :
return features & ~ ( NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK ) ;
}
2019-09-09 13:47:46 +00:00
static const struct net_device_ops ice_netdev_safe_mode_ops = {
. ndo_open = ice_open ,
. ndo_stop = ice_stop ,
. ndo_start_xmit = ice_start_xmit ,
. ndo_set_mac_address = ice_set_mac_address ,
. ndo_validate_addr = eth_validate_addr ,
. ndo_change_mtu = ice_change_mtu ,
. ndo_get_stats64 = ice_get_stats64 ,
. ndo_tx_timeout = ice_tx_timeout ,
2021-05-20 06:34:59 +00:00
. ndo_bpf = ice_xdp_safe_mode ,
2019-09-09 13:47:46 +00:00
} ;
2018-03-20 14:58:13 +00:00
static const struct net_device_ops ice_netdev_ops = {
. ndo_open = ice_open ,
. ndo_stop = ice_stop ,
2018-03-20 14:58:14 +00:00
. ndo_start_xmit = ice_start_xmit ,
2021-08-06 20:53:56 +00:00
. ndo_select_queue = ice_select_queue ,
2018-03-20 14:58:19 +00:00
. ndo_features_check = ice_features_check ,
ice: Advertise 802.1ad VLAN filtering and offloads for PF netdev
In order for the driver to support 802.1ad VLAN filtering and offloads,
it needs to advertise those VLAN features and also support modifying
those VLAN features, so make the necessary changes to
ice_set_netdev_features(). By default, enable CTAG insertion/stripping
and CTAG filtering for both Single and Double VLAN Modes (SVM/DVM).
Also, in DVM, enable STAG filtering by default. This is done by
setting the feature bits in netdev->features. Also, in DVM, support
toggling of STAG insertion/stripping, but don't enable them by
default. This is done by setting the feature bits in
netdev->hw_features.
Since 802.1ad VLAN filtering and offloads are only supported in DVM, make
sure they are not enabled by default and that they cannot be enabled
during runtime, when the device is in SVM.
Add an implementation for the ndo_fix_features() callback. This is
needed since the hardware cannot support multiple VLAN ethertypes for
VLAN insertion/stripping simultaneously and all supported VLAN filtering
must either be enabled or disabled together.
Disable inner VLAN stripping by default when DVM is enabled. If a VSI
supports stripping the inner VLAN in DVM, then it will have to configure
that during runtime. For example if a VF is configured in a port VLAN
while DVM is enabled it will be allowed to offload inner VLANs.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-12-02 16:38:50 +00:00
. ndo_fix_features = ice_fix_features ,
2018-03-20 14:58:19 +00:00
. ndo_set_rx_mode = ice_set_rx_mode ,
. ndo_set_mac_address = ice_set_mac_address ,
. ndo_validate_addr = eth_validate_addr ,
. ndo_change_mtu = ice_change_mtu ,
2018-03-20 14:58:16 +00:00
. ndo_get_stats64 = ice_get_stats64 ,
2019-11-06 10:05:28 +00:00
. ndo_set_tx_maxrate = ice_set_tx_maxrate ,
2021-07-27 13:45:13 +00:00
. ndo_eth_ioctl = ice_eth_ioctl ,
2018-09-20 00:42:58 +00:00
. ndo_set_vf_spoofchk = ice_set_vf_spoofchk ,
. ndo_set_vf_mac = ice_set_vf_mac ,
. ndo_get_vf_config = ice_get_vf_cfg ,
. ndo_set_vf_trust = ice_set_vf_trust ,
. ndo_set_vf_vlan = ice_set_vf_port_vlan ,
. ndo_set_vf_link_state = ice_set_vf_link_state ,
2019-11-08 14:23:28 +00:00
. ndo_get_vf_stats = ice_get_vf_stats ,
2021-09-13 18:22:19 +00:00
. ndo_set_vf_rate = ice_set_vf_bw ,
2018-03-20 14:58:15 +00:00
. ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid ,
. ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid ,
2021-08-06 08:49:05 +00:00
. ndo_setup_tc = ice_setup_tc ,
2018-03-20 14:58:15 +00:00
. ndo_set_features = ice_set_features ,
2018-08-09 13:29:54 +00:00
. ndo_bridge_getlink = ice_bridge_getlink ,
. ndo_bridge_setlink = ice_bridge_setlink ,
2018-03-20 14:58:19 +00:00
. ndo_fdb_add = ice_fdb_add ,
. ndo_fdb_del = ice_fdb_del ,
2020-05-12 01:01:46 +00:00
# ifdef CONFIG_RFS_ACCEL
. ndo_rx_flow_steer = ice_rx_flow_steer ,
# endif
2018-08-09 13:29:53 +00:00
. ndo_tx_timeout = ice_tx_timeout ,
2019-11-04 17:38:56 +00:00
. ndo_bpf = ice_xdp ,
. ndo_xdp_xmit = ice_xdp_xmit ,
2019-11-04 17:38:56 +00:00
. ndo_xsk_wakeup = ice_xsk_wakeup ,
2018-03-20 14:58:13 +00:00
} ;