ice: reserve number of CP queues

Rebuilding CP VSI each time the PR is created drastically increase the
time of maximum VFs creation. Add function to reserve number of CP
queues to deal with this problem.

Use the same function to decrease number of queues in case of removing
VFs. Assume that caller of ice_eswitch_reserve_cp_queues() will also
call ice_eswitch_attach/detach() correct number of times.

Still one by one PR adding is handy for VF resetting routine.

Reviewed-by: Wojciech Drewek <wojciech.drewek@intel.com>
Signed-off-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
Tested-by: Sujai Buvaneswaran <sujai.buvaneswaran@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
This commit is contained in:
Michal Swiatkowski 2023-10-24 13:09:29 +02:00 committed by Tony Nguyen
parent c9663f79cd
commit 19b39caec0
4 changed files with 58 additions and 7 deletions

View File

@ -528,6 +528,12 @@ struct ice_eswitch {
struct ice_esw_br_offloads *br_offloads;
struct xarray reprs;
bool is_running;
/* struct to allow cp queues management optimization */
struct {
int to_reach;
int value;
bool is_reaching;
} qs;
};
struct ice_agg_node {

View File

@ -176,7 +176,7 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_eswitch *eswitch)
repr = xa_find(&eswitch->reprs, &repr_id, U32_MAX,
XA_PRESENT);
if (WARN_ON(!repr))
if (!repr)
break;
repr_id += 1;
@ -455,6 +455,8 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
return -ENODEV;
ctrl_vsi = pf->eswitch.control_vsi;
/* cp VSI is createad with 1 queue as default */
pf->eswitch.qs.value = 1;
pf->eswitch.uplink_vsi = uplink_vsi;
if (ice_eswitch_setup_env(pf))
@ -487,6 +489,7 @@ static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
ice_vsi_release(ctrl_vsi);
pf->eswitch.is_running = false;
pf->eswitch.qs.is_reaching = false;
}
/**
@ -615,15 +618,33 @@ static void
ice_eswitch_cp_change_queues(struct ice_eswitch *eswitch, int change)
{
struct ice_vsi *cp = eswitch->control_vsi;
int queues = 0;
ice_vsi_close(cp);
if (eswitch->qs.is_reaching) {
if (eswitch->qs.to_reach >= eswitch->qs.value + change) {
queues = eswitch->qs.to_reach;
eswitch->qs.is_reaching = false;
} else {
queues = 0;
}
} else if ((change > 0 && cp->alloc_txq <= eswitch->qs.value) ||
change < 0) {
queues = cp->alloc_txq + change;
}
cp->req_txq = cp->alloc_txq + change;
cp->req_rxq = cp->alloc_rxq + change;
ice_vsi_rebuild(cp, ICE_VSI_FLAG_NO_INIT);
if (queues) {
cp->req_txq = queues;
cp->req_rxq = queues;
ice_vsi_close(cp);
ice_vsi_rebuild(cp, ICE_VSI_FLAG_NO_INIT);
ice_vsi_open(cp);
} else if (!change) {
/* change == 0 means that VSI wasn't open, open it here */
ice_vsi_open(cp);
}
eswitch->qs.value += change;
ice_eswitch_remap_rings_to_vectors(eswitch);
ice_vsi_open(cp);
}
int
@ -641,6 +662,7 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
if (err)
return err;
/* Control plane VSI is created with 1 queue as default */
pf->eswitch.qs.to_reach -= 1;
change = 0;
}
@ -732,3 +754,19 @@ int ice_eswitch_rebuild(struct ice_pf *pf)
return 0;
}
/**
* ice_eswitch_reserve_cp_queues - reserve control plane VSI queues
* @pf: pointer to PF structure
* @change: how many more (or less) queues is needed
*
* Remember to call ice_eswitch_attach/detach() the "change" times.
*/
void ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change)
{
if (pf->eswitch.qs.value + change < 0)
return;
pf->eswitch.qs.to_reach = pf->eswitch.qs.value + change;
pf->eswitch.qs.is_reaching = true;
}

View File

@ -26,6 +26,7 @@ void ice_eswitch_set_target_vsi(struct sk_buff *skb,
struct ice_tx_offload_params *off);
netdev_tx_t
ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev);
void ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change);
#else /* CONFIG_ICE_SWITCHDEV */
static inline void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) { }
@ -76,5 +77,8 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
return NETDEV_TX_BUSY;
}
static inline void
ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change) { }
#endif /* CONFIG_ICE_SWITCHDEV */
#endif /* _ICE_ESWITCH_H_ */

View File

@ -172,6 +172,8 @@ void ice_free_vfs(struct ice_pf *pf)
else
dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
ice_eswitch_reserve_cp_queues(pf, -ice_get_num_vfs(pf));
mutex_lock(&vfs->table_lock);
ice_for_each_vf(pf, bkt, vf) {
@ -930,6 +932,7 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
goto err_unroll_sriov;
}
ice_eswitch_reserve_cp_queues(pf, num_vfs);
ret = ice_start_vfs(pf);
if (ret) {
dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret);