mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-27 21:03:32 +00:00
net/mlx5: Verify support for scheduling element and TSAR type
Before creating a scheduling element in a NIC or E-Switch scheduler, ensure that the requested element type is supported. If the element is of type Transmit Scheduling Arbiter (TSAR), also verify that the specific TSAR type is supported. Fixes:214baf2287
("net/mlx5e: Support HTB offload") Fixes:85c5f7c920
("net/mlx5: E-switch, Create QoS on demand") Fixes:0fe132eac3
("net/mlx5: E-switch, Allow to add vports to rate groups") Signed-off-by: Carolina Jubran <cjubran@nvidia.com> Reviewed-by: Cosmin Ratiu <cratiu@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
This commit is contained in:
parent
452ef7f860
commit
861cd9b9cb
2 changed files with 31 additions and 20 deletions
|
@ -312,6 +312,25 @@ static int esw_qos_set_group_max_rate(struct mlx5_eswitch *esw,
|
|||
return err;
|
||||
}
|
||||
|
||||
static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type)
|
||||
{
|
||||
switch (type) {
|
||||
case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
|
||||
return MLX5_CAP_QOS(dev, esw_element_type) &
|
||||
ELEMENT_TYPE_CAP_MASK_TSAR;
|
||||
case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
|
||||
return MLX5_CAP_QOS(dev, esw_element_type) &
|
||||
ELEMENT_TYPE_CAP_MASK_VPORT;
|
||||
case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
|
||||
return MLX5_CAP_QOS(dev, esw_element_type) &
|
||||
ELEMENT_TYPE_CAP_MASK_VPORT_TC;
|
||||
case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
|
||||
return MLX5_CAP_QOS(dev, esw_element_type) &
|
||||
ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static int esw_qos_vport_create_sched_element(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport,
|
||||
u32 max_rate, u32 bw_share)
|
||||
|
@ -323,6 +342,9 @@ static int esw_qos_vport_create_sched_element(struct mlx5_eswitch *esw,
|
|||
void *vport_elem;
|
||||
int err;
|
||||
|
||||
if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
parent_tsar_ix = group ? group->tsar_ix : esw->qos.root_tsar_ix;
|
||||
MLX5_SET(scheduling_context, sched_ctx, element_type,
|
||||
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
|
||||
|
@ -533,25 +555,6 @@ static int esw_qos_destroy_rate_group(struct mlx5_eswitch *esw,
|
|||
return err;
|
||||
}
|
||||
|
||||
static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type)
|
||||
{
|
||||
switch (type) {
|
||||
case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
|
||||
return MLX5_CAP_QOS(dev, esw_element_type) &
|
||||
ELEMENT_TYPE_CAP_MASK_TSAR;
|
||||
case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
|
||||
return MLX5_CAP_QOS(dev, esw_element_type) &
|
||||
ELEMENT_TYPE_CAP_MASK_VPORT;
|
||||
case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
|
||||
return MLX5_CAP_QOS(dev, esw_element_type) &
|
||||
ELEMENT_TYPE_CAP_MASK_VPORT_TC;
|
||||
case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
|
||||
return MLX5_CAP_QOS(dev, esw_element_type) &
|
||||
ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
|
||||
{
|
||||
u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
|
||||
|
@ -562,7 +565,8 @@ static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta
|
|||
if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR))
|
||||
if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR) ||
|
||||
!(MLX5_CAP_QOS(dev, esw_tsar_type) & TSAR_TYPE_CAP_MASK_DWRR))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
MLX5_SET(scheduling_context, tsar_ctx, element_type,
|
||||
|
|
|
@ -28,6 +28,9 @@ int mlx5_qos_create_leaf_node(struct mlx5_core_dev *mdev, u32 parent_id,
|
|||
{
|
||||
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
|
||||
|
||||
if (!(MLX5_CAP_QOS(mdev, nic_element_type) & ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
|
||||
MLX5_SET(scheduling_context, sched_ctx, element_type,
|
||||
SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP);
|
||||
|
@ -44,6 +47,10 @@ int mlx5_qos_create_inner_node(struct mlx5_core_dev *mdev, u32 parent_id,
|
|||
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
|
||||
void *attr;
|
||||
|
||||
if (!(MLX5_CAP_QOS(mdev, nic_element_type) & ELEMENT_TYPE_CAP_MASK_TSAR) ||
|
||||
!(MLX5_CAP_QOS(mdev, nic_tsar_type) & TSAR_TYPE_CAP_MASK_DWRR))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
|
||||
MLX5_SET(scheduling_context, sched_ctx, element_type,
|
||||
SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
|
||||
|
|
Loading…
Reference in a new issue