Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2024-03-06 (iavf, i40e, ixgbe)

This series contains updates to iavf, i40e, and ixgbe drivers.

Alexey Kodanev removes duplicate calls related to cloud filters on iavf
and unnecessary null checks on i40e.

Maciej adds helper functions for common code relating to updating
statistics for ixgbe.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2024-03-08 11:43:21 +00:00
commit e3eec34977
6 changed files with 53 additions and 42 deletions

View File

@ -148,8 +148,6 @@ static void i40e_client_release_qvlist(struct i40e_info *ldev)
u32 reg_idx;
qv_info = &qvlist_info->qv_info[i];
if (!qv_info)
continue;
reg_idx = I40E_PFINT_LNKLSTN(qv_info->v_idx - 1);
wr32(&pf->hw, reg_idx, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
}
@ -576,8 +574,6 @@ static int i40e_client_setup_qvlist(struct i40e_info *ldev,
for (i = 0; i < qvlist_info->num_vectors; i++) {
qv_info = &qvlist_info->qv_info[i];
if (!qv_info)
continue;
v_idx = qv_info->v_idx;
/* Validate vector id belongs to this client */

View File

@ -491,8 +491,6 @@ static void i40e_release_rdma_qvlist(struct i40e_vf *vf)
u32 v_idx, reg_idx, reg;
qv_info = &qvlist_info->qv_info[i];
if (!qv_info)
continue;
v_idx = qv_info->v_idx;
if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
/* Figure out the queue after CEQ and make that the
@ -562,8 +560,6 @@ i40e_config_rdma_qvlist(struct i40e_vf *vf,
msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
for (i = 0; i < qvlist_info->num_vectors; i++) {
qv_info = &qvlist_info->qv_info[i];
if (!qv_info)
continue;
/* Validate vector id belongs to this vf */
if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) {

View File

@ -2170,19 +2170,10 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
iavf_add_cloud_filter(adapter);
return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
iavf_del_cloud_filter(adapter);
return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
iavf_del_cloud_filter(adapter);
return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
iavf_add_cloud_filter(adapter);
return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) {
iavf_add_fdir_filter(adapter);
return IAVF_SUCCESS;

View File

@ -1105,6 +1105,44 @@ static int ixgbe_tx_maxrate(struct net_device *netdev,
return 0;
}
/**
* ixgbe_update_tx_ring_stats - Update Tx ring specific counters
* @tx_ring: ring to update
* @q_vector: queue vector ring belongs to
* @pkts: number of processed packets
* @bytes: number of processed bytes
*/
void ixgbe_update_tx_ring_stats(struct ixgbe_ring *tx_ring,
struct ixgbe_q_vector *q_vector, u64 pkts,
u64 bytes)
{
u64_stats_update_begin(&tx_ring->syncp);
tx_ring->stats.bytes += bytes;
tx_ring->stats.packets += pkts;
u64_stats_update_end(&tx_ring->syncp);
q_vector->tx.total_bytes += bytes;
q_vector->tx.total_packets += pkts;
}
/**
* ixgbe_update_rx_ring_stats - Update Rx ring specific counters
* @rx_ring: ring to update
* @q_vector: queue vector ring belongs to
* @pkts: number of processed packets
* @bytes: number of processed bytes
*/
void ixgbe_update_rx_ring_stats(struct ixgbe_ring *rx_ring,
struct ixgbe_q_vector *q_vector, u64 pkts,
u64 bytes)
{
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.bytes += bytes;
rx_ring->stats.packets += pkts;
u64_stats_update_end(&rx_ring->syncp);
q_vector->rx.total_bytes += bytes;
q_vector->rx.total_packets += pkts;
}
/**
* ixgbe_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: structure containing interrupt and ring information
@ -1207,12 +1245,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
i += tx_ring->count;
tx_ring->next_to_clean = i;
u64_stats_update_begin(&tx_ring->syncp);
tx_ring->stats.bytes += total_bytes;
tx_ring->stats.packets += total_packets;
u64_stats_update_end(&tx_ring->syncp);
q_vector->tx.total_bytes += total_bytes;
q_vector->tx.total_packets += total_packets;
ixgbe_update_tx_ring_stats(tx_ring, q_vector, total_packets,
total_bytes);
adapter->tx_ipsec += total_ipsec;
if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
@ -2429,12 +2463,8 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
ixgbe_xdp_ring_update_tail_locked(ring);
}
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
rx_ring->stats.bytes += total_rx_bytes;
u64_stats_update_end(&rx_ring->syncp);
q_vector->rx.total_packets += total_rx_packets;
q_vector->rx.total_bytes += total_rx_bytes;
ixgbe_update_rx_ring_stats(rx_ring, q_vector, total_rx_packets,
total_rx_bytes);
return total_rx_packets;
}

View File

@ -46,4 +46,11 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
int ixgbe_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring);
void ixgbe_update_tx_ring_stats(struct ixgbe_ring *tx_ring,
struct ixgbe_q_vector *q_vector, u64 pkts,
u64 bytes);
void ixgbe_update_rx_ring_stats(struct ixgbe_ring *rx_ring,
struct ixgbe_q_vector *q_vector, u64 pkts,
u64 bytes);
#endif /* #define _IXGBE_TXRX_COMMON_H_ */

View File

@ -359,12 +359,8 @@ construct_skb:
ixgbe_xdp_ring_update_tail_locked(ring);
}
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
rx_ring->stats.bytes += total_rx_bytes;
u64_stats_update_end(&rx_ring->syncp);
q_vector->rx.total_packets += total_rx_packets;
q_vector->rx.total_bytes += total_rx_bytes;
ixgbe_update_rx_ring_stats(rx_ring, q_vector, total_rx_packets,
total_rx_bytes);
if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
@ -499,13 +495,8 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
}
tx_ring->next_to_clean = ntc;
u64_stats_update_begin(&tx_ring->syncp);
tx_ring->stats.bytes += total_bytes;
tx_ring->stats.packets += total_packets;
u64_stats_update_end(&tx_ring->syncp);
q_vector->tx.total_bytes += total_bytes;
q_vector->tx.total_packets += total_packets;
ixgbe_update_tx_ring_stats(tx_ring, q_vector, total_packets,
total_bytes);
if (xsk_frames)
xsk_tx_completed(pool, xsk_frames);