net/mlx5: Shift control IRQ to the last index

Control IRQ is the first IRQ vector. This complicates handling of
completion irqs as we need to offset them by one.
in the next patch, there are scenarios where completion and control EQs
will share the same irq. for example: functions with single IRQ. To ease
such scenarios, we shift control IRQ to the end of the irq array.

Signed-off-by: Shay Drory <shayd@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
This commit is contained in:
Shay Drory 2021-08-19 16:18:57 +03:00 committed by Saeed Mahameed
parent 575baa92fd
commit 3663ad34bc
5 changed files with 13 additions and 11 deletions

View File

@ -1559,6 +1559,7 @@ int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
param = (struct mlx5_eq_param) {
.irq_index = MLX5_IRQ_EQ_CTRL,
.nent = MLX5_IB_NUM_PF_EQE,
};
param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT;

View File

@ -632,6 +632,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
mlx5_eq_notifier_register(dev, &table->cq_err_nb);
param = (struct mlx5_eq_param) {
.irq_index = MLX5_IRQ_EQ_CTRL,
.nent = MLX5_NUM_CMD_EQE,
.mask[0] = 1ull << MLX5_EVENT_TYPE_CMD,
};
@ -644,6 +645,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
param = (struct mlx5_eq_param) {
.irq_index = MLX5_IRQ_EQ_CTRL,
.nent = MLX5_NUM_ASYNC_EQE,
};
@ -653,6 +655,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
goto err2;
param = (struct mlx5_eq_param) {
.irq_index = MLX5_IRQ_EQ_CTRL,
.nent = /* TODO: sriov max_vf + */ 1,
.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST,
};
@ -806,8 +809,8 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
ncomp_eqs = table->num_comp_eqs;
nent = MLX5_COMP_EQ_SIZE;
for (i = 0; i < ncomp_eqs; i++) {
int vecidx = i + MLX5_IRQ_VEC_COMP_BASE;
struct mlx5_eq_param param = {};
int vecidx = i;
eq = kzalloc(sizeof(*eq), GFP_KERNEL);
if (!eq) {
@ -953,9 +956,7 @@ static int set_rmap(struct mlx5_core_dev *mdev)
goto err_out;
}
vecidx = MLX5_IRQ_VEC_COMP_BASE;
for (; vecidx < eq_table->num_comp_eqs + MLX5_IRQ_VEC_COMP_BASE;
vecidx++) {
for (vecidx = 0; vecidx < eq_table->num_comp_eqs; vecidx++) {
err = irq_cpu_rmap_add(eq_table->rmap,
pci_irq_vector(mdev->pdev, vecidx));
if (err) {

View File

@ -8,8 +8,6 @@
#define MLX5_COMP_EQS_PER_SF 8
#define MLX5_IRQ_EQ_CTRL (0)
struct mlx5_irq;
int mlx5_irq_table_init(struct mlx5_core_dev *dev);

View File

@ -194,15 +194,14 @@ static void irq_sf_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
snprintf(name, MLX5_MAX_IRQ_NAME, "%s%d", pool->name, vecidx);
}
static void irq_set_name(char *name, int vecidx)
static void irq_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
{
if (vecidx == 0) {
if (vecidx == pool->xa_num_irqs.max) {
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_async%d", vecidx);
return;
}
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d",
vecidx - MLX5_IRQ_VEC_COMP_BASE);
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", vecidx);
}
static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
@ -217,7 +216,7 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
return ERR_PTR(-ENOMEM);
irq->irqn = pci_irq_vector(dev->pdev, i);
if (!pool->name[0])
irq_set_name(name, i);
irq_set_name(pool, name, i);
else
irq_sf_set_name(pool, name, i);
ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
@ -440,6 +439,7 @@ struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
}
pf_irq:
pool = irq_table->pf_pool;
vecidx = (vecidx == MLX5_IRQ_EQ_CTRL) ? pool->xa_num_irqs.max : vecidx;
irq = irq_pool_request_vector(pool, vecidx, affinity);
out:
if (IS_ERR(irq))

View File

@ -59,6 +59,8 @@
#define MLX5_ADEV_NAME "mlx5_core"
#define MLX5_IRQ_EQ_CTRL (U8_MAX)
enum {
MLX5_BOARD_ID_LEN = 64,
};