2019-06-10 23:38:37 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
|
|
|
|
/* Copyright (c) 2019 Mellanox Technologies. */
|
|
|
|
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/notifier.h>
|
|
|
|
#include <linux/mlx5/driver.h>
|
|
|
|
#include "mlx5_core.h"
|
2021-04-06 18:42:17 +00:00
|
|
|
#include "mlx5_irq.h"
|
2021-11-23 08:48:07 +00:00
|
|
|
#include "pci_irq.h"
|
2021-02-23 09:57:32 +00:00
|
|
|
#include "lib/sf.h"
|
2019-06-10 23:38:37 +00:00
|
|
|
#ifdef CONFIG_RFS_ACCEL
|
|
|
|
#include <linux/cpu_rmap.h>
|
|
|
|
#endif
|
|
|
|
|
2021-02-23 09:48:17 +00:00
|
|
|
#define MLX5_SFS_PER_CTRL_IRQ 64
|
|
|
|
#define MLX5_IRQ_CTRL_SF_MAX 8
|
2021-07-30 03:03:00 +00:00
|
|
|
/* min num of vectors for SFs to be enabled */
|
2021-02-23 09:48:17 +00:00
|
|
|
#define MLX5_IRQ_VEC_COMP_BASE_SF 2
|
2019-06-10 23:38:37 +00:00
|
|
|
|
2021-02-23 09:57:32 +00:00
|
|
|
#define MLX5_EQ_SHARE_IRQ_MAX_COMP (8)
|
|
|
|
#define MLX5_EQ_SHARE_IRQ_MAX_CTRL (UINT_MAX)
|
|
|
|
#define MLX5_EQ_SHARE_IRQ_MIN_COMP (1)
|
|
|
|
#define MLX5_EQ_SHARE_IRQ_MIN_CTRL (4)
|
|
|
|
|
2019-06-10 23:38:39 +00:00
|
|
|
struct mlx5_irq {
|
2019-06-10 23:38:37 +00:00
|
|
|
struct atomic_notifier_head nh;
|
|
|
|
cpumask_var_t mask;
|
|
|
|
char name[MLX5_MAX_IRQ_NAME];
|
2021-06-16 15:58:26 +00:00
|
|
|
struct mlx5_irq_pool *pool;
|
2021-06-22 11:20:16 +00:00
|
|
|
int refcount;
|
2021-06-16 15:58:26 +00:00
|
|
|
u32 index;
|
2021-02-23 07:32:21 +00:00
|
|
|
int irqn;
|
2019-06-10 23:38:37 +00:00
|
|
|
};
|
|
|
|
|
2021-02-23 09:48:17 +00:00
|
|
|
struct mlx5_irq_table {
|
|
|
|
struct mlx5_irq_pool *pf_pool;
|
|
|
|
struct mlx5_irq_pool *sf_ctrl_pool;
|
|
|
|
struct mlx5_irq_pool *sf_comp_pool;
|
|
|
|
};
|
2019-06-10 23:38:37 +00:00
|
|
|
|
2021-03-14 12:42:55 +00:00
|
|
|
/**
|
|
|
|
* mlx5_get_default_msix_vec_count - Get the default number of MSI-X vectors
|
|
|
|
* to be ssigned to each VF.
|
|
|
|
* @dev: PF to work on
|
|
|
|
* @num_vfs: Number of enabled VFs
|
|
|
|
*/
|
|
|
|
int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs)
|
|
|
|
{
|
|
|
|
int num_vf_msix, min_msix, max_msix;
|
|
|
|
|
|
|
|
num_vf_msix = MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
|
|
|
|
if (!num_vf_msix)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
min_msix = MLX5_CAP_GEN(dev, min_dynamic_vf_msix_table_size);
|
|
|
|
max_msix = MLX5_CAP_GEN(dev, max_dynamic_vf_msix_table_size);
|
|
|
|
|
|
|
|
/* Limit maximum number of MSI-X vectors so the default configuration
|
|
|
|
* has some available in the pool. This will allow the user to increase
|
|
|
|
* the number of vectors in a VF without having to first size-down other
|
|
|
|
* VFs.
|
|
|
|
*/
|
|
|
|
return max(min(num_vf_msix / num_vfs, max_msix / 2), min_msix);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* mlx5_set_msix_vec_count - Set dynamically allocated MSI-X on the VF
|
|
|
|
* @dev: PF to work on
|
|
|
|
* @function_id: Internal PCI VF function IDd
|
|
|
|
* @msix_vec_count: Number of MSI-X vectors to set
|
|
|
|
*/
|
|
|
|
int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
|
|
|
|
int msix_vec_count)
|
|
|
|
{
|
2021-04-25 10:28:10 +00:00
|
|
|
int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
|
|
|
|
int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
|
|
|
|
void *hca_cap = NULL, *query_cap = NULL, *cap;
|
2021-03-14 12:42:55 +00:00
|
|
|
int num_vf_msix, min_msix, max_msix;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
num_vf_msix = MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
|
|
|
|
if (!num_vf_msix)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!MLX5_CAP_GEN(dev, vport_group_manager) || !mlx5_core_is_pf(dev))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
min_msix = MLX5_CAP_GEN(dev, min_dynamic_vf_msix_table_size);
|
|
|
|
max_msix = MLX5_CAP_GEN(dev, max_dynamic_vf_msix_table_size);
|
|
|
|
|
|
|
|
if (msix_vec_count < min_msix)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (msix_vec_count > max_msix)
|
|
|
|
return -EOVERFLOW;
|
|
|
|
|
2021-04-27 14:23:16 +00:00
|
|
|
query_cap = kvzalloc(query_sz, GFP_KERNEL);
|
|
|
|
hca_cap = kvzalloc(set_sz, GFP_KERNEL);
|
2021-04-25 10:28:10 +00:00
|
|
|
if (!hca_cap || !query_cap) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = mlx5_vport_get_other_func_cap(dev, function_id, query_cap);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
2021-03-14 12:42:55 +00:00
|
|
|
|
|
|
|
cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
|
2021-04-25 10:28:10 +00:00
|
|
|
memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
|
|
|
|
MLX5_UN_SZ_BYTES(hca_cap_union));
|
2021-03-14 12:42:55 +00:00
|
|
|
MLX5_SET(cmd_hca_cap, cap, dynamic_msix_table_size, msix_vec_count);
|
|
|
|
|
|
|
|
MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
|
|
|
|
MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1);
|
|
|
|
MLX5_SET(set_hca_cap_in, hca_cap, function_id, function_id);
|
|
|
|
|
|
|
|
MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
|
|
|
|
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1);
|
|
|
|
ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap);
|
2021-04-25 10:28:10 +00:00
|
|
|
out:
|
2021-04-27 14:23:16 +00:00
|
|
|
kvfree(hca_cap);
|
|
|
|
kvfree(query_cap);
|
2021-03-14 12:42:55 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-06-22 11:20:16 +00:00
|
|
|
static void irq_release(struct mlx5_irq *irq)
|
2021-02-23 07:32:21 +00:00
|
|
|
{
|
2021-02-23 09:48:17 +00:00
|
|
|
struct mlx5_irq_pool *pool = irq->pool;
|
2021-02-23 07:32:21 +00:00
|
|
|
|
2021-02-23 09:48:17 +00:00
|
|
|
xa_erase(&pool->irqs, irq->index);
|
2021-09-03 15:24:29 +00:00
|
|
|
/* free_irq requires that affinity_hint and rmap will be cleared
|
2021-02-23 09:08:26 +00:00
|
|
|
* before calling it. This is why there is asymmetry with set_rmap
|
|
|
|
* which should be called after alloc_irq but before request_irq.
|
|
|
|
*/
|
2021-09-03 15:24:29 +00:00
|
|
|
irq_update_affinity_hint(irq->irqn, NULL);
|
2021-02-23 08:37:05 +00:00
|
|
|
free_cpumask_var(irq->mask);
|
2021-02-23 07:32:21 +00:00
|
|
|
free_irq(irq->irqn, &irq->nh);
|
2021-02-23 09:38:52 +00:00
|
|
|
kfree(irq);
|
2021-02-23 07:32:21 +00:00
|
|
|
}
|
|
|
|
|
2021-11-23 10:50:19 +00:00
|
|
|
int mlx5_irq_put(struct mlx5_irq *irq)
|
2021-02-23 07:32:21 +00:00
|
|
|
{
|
2021-02-23 09:57:32 +00:00
|
|
|
struct mlx5_irq_pool *pool = irq->pool;
|
2021-11-23 10:50:19 +00:00
|
|
|
int ret = 0;
|
2021-02-23 09:57:32 +00:00
|
|
|
|
|
|
|
mutex_lock(&pool->lock);
|
2021-06-22 11:20:16 +00:00
|
|
|
irq->refcount--;
|
2021-11-23 10:50:19 +00:00
|
|
|
if (!irq->refcount) {
|
2021-06-22 11:20:16 +00:00
|
|
|
irq_release(irq);
|
2021-11-23 10:50:19 +00:00
|
|
|
ret = 1;
|
|
|
|
}
|
2021-02-23 09:57:32 +00:00
|
|
|
mutex_unlock(&pool->lock);
|
2021-11-23 10:50:19 +00:00
|
|
|
return ret;
|
2021-02-23 07:32:21 +00:00
|
|
|
}
|
|
|
|
|
2021-11-23 08:48:07 +00:00
|
|
|
int mlx5_irq_read_locked(struct mlx5_irq *irq)
|
|
|
|
{
|
|
|
|
lockdep_assert_held(&irq->pool->lock);
|
|
|
|
return irq->refcount;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mlx5_irq_get_locked(struct mlx5_irq *irq)
|
2021-06-22 11:20:16 +00:00
|
|
|
{
|
|
|
|
lockdep_assert_held(&irq->pool->lock);
|
|
|
|
if (WARN_ON_ONCE(!irq->refcount))
|
|
|
|
return 0;
|
|
|
|
irq->refcount++;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int irq_get(struct mlx5_irq *irq)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
mutex_lock(&irq->pool->lock);
|
2021-11-23 08:48:07 +00:00
|
|
|
err = mlx5_irq_get_locked(irq);
|
2021-06-22 11:20:16 +00:00
|
|
|
mutex_unlock(&irq->pool->lock);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2021-02-23 09:15:43 +00:00
|
|
|
static irqreturn_t irq_int_handler(int irq, void *nh)
|
2019-06-10 23:38:37 +00:00
|
|
|
{
|
|
|
|
atomic_notifier_call_chain(nh, 0, NULL);
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
2021-02-23 09:48:17 +00:00
|
|
|
static void irq_sf_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
|
|
|
|
{
|
|
|
|
snprintf(name, MLX5_MAX_IRQ_NAME, "%s%d", pool->name, vecidx);
|
|
|
|
}
|
|
|
|
|
2021-08-19 13:18:57 +00:00
|
|
|
static void irq_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
|
2019-06-10 23:38:37 +00:00
|
|
|
{
|
2021-08-01 09:08:49 +00:00
|
|
|
if (!pool->xa_num_irqs.max) {
|
|
|
|
/* in case we only have a single irq for the device */
|
|
|
|
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_combined%d", vecidx);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-08-19 13:18:57 +00:00
|
|
|
if (vecidx == pool->xa_num_irqs.max) {
|
2021-02-23 09:48:17 +00:00
|
|
|
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_async%d", vecidx);
|
2019-06-10 23:38:41 +00:00
|
|
|
return;
|
2019-06-10 23:38:37 +00:00
|
|
|
}
|
2019-06-10 23:38:41 +00:00
|
|
|
|
2021-08-19 13:18:57 +00:00
|
|
|
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", vecidx);
|
2019-06-10 23:38:37 +00:00
|
|
|
}
|
|
|
|
|
2021-11-23 08:48:07 +00:00
|
|
|
struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
|
|
|
|
const struct cpumask *affinity)
|
2019-06-10 23:38:37 +00:00
|
|
|
{
|
2021-02-23 09:48:17 +00:00
|
|
|
struct mlx5_core_dev *dev = pool->dev;
|
2019-06-10 23:38:37 +00:00
|
|
|
char name[MLX5_MAX_IRQ_NAME];
|
2021-02-23 09:24:47 +00:00
|
|
|
struct mlx5_irq *irq;
|
2019-06-10 23:38:37 +00:00
|
|
|
int err;
|
2021-02-23 09:15:43 +00:00
|
|
|
|
2021-02-23 09:38:52 +00:00
|
|
|
irq = kzalloc(sizeof(*irq), GFP_KERNEL);
|
|
|
|
if (!irq)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
2021-02-23 09:15:43 +00:00
|
|
|
irq->irqn = pci_irq_vector(dev->pdev, i);
|
2021-11-23 10:50:19 +00:00
|
|
|
if (!mlx5_irq_pool_is_sf_pool(pool))
|
2021-08-19 13:18:57 +00:00
|
|
|
irq_set_name(pool, name, i);
|
2021-02-23 09:48:17 +00:00
|
|
|
else
|
|
|
|
irq_sf_set_name(pool, name, i);
|
2021-02-23 09:15:43 +00:00
|
|
|
ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
|
|
|
|
snprintf(irq->name, MLX5_MAX_IRQ_NAME,
|
|
|
|
"%s@pci:%s", name, pci_name(dev->pdev));
|
|
|
|
err = request_irq(irq->irqn, irq_int_handler, 0, irq->name,
|
|
|
|
&irq->nh);
|
|
|
|
if (err) {
|
|
|
|
mlx5_core_err(dev, "Failed to request irq. err = %d\n", err);
|
2021-02-23 09:24:47 +00:00
|
|
|
goto err_req_irq;
|
2019-06-10 23:38:37 +00:00
|
|
|
}
|
2021-02-23 09:15:43 +00:00
|
|
|
if (!zalloc_cpumask_var(&irq->mask, GFP_KERNEL)) {
|
|
|
|
mlx5_core_warn(dev, "zalloc_cpumask_var failed\n");
|
2021-02-23 09:24:47 +00:00
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_cpumask;
|
2021-02-23 09:15:43 +00:00
|
|
|
}
|
2021-11-23 07:14:58 +00:00
|
|
|
if (affinity) {
|
|
|
|
cpumask_copy(irq->mask, affinity);
|
2022-01-13 16:53:45 +00:00
|
|
|
irq_set_affinity_and_hint(irq->irqn, irq->mask);
|
2021-11-23 07:14:58 +00:00
|
|
|
}
|
2021-06-16 15:59:05 +00:00
|
|
|
irq->pool = pool;
|
2021-06-22 11:20:16 +00:00
|
|
|
irq->refcount = 1;
|
2021-02-23 09:57:32 +00:00
|
|
|
irq->index = i;
|
|
|
|
err = xa_err(xa_store(&pool->irqs, irq->index, irq, GFP_KERNEL));
|
2021-02-23 09:38:52 +00:00
|
|
|
if (err) {
|
|
|
|
mlx5_core_err(dev, "Failed to alloc xa entry for irq(%u). err = %d\n",
|
|
|
|
irq->index, err);
|
|
|
|
goto err_xa;
|
|
|
|
}
|
|
|
|
return irq;
|
|
|
|
err_xa:
|
2022-01-13 16:53:45 +00:00
|
|
|
irq_update_affinity_hint(irq->irqn, NULL);
|
2021-02-23 09:38:52 +00:00
|
|
|
free_cpumask_var(irq->mask);
|
2021-02-23 09:24:47 +00:00
|
|
|
err_cpumask:
|
|
|
|
free_irq(irq->irqn, &irq->nh);
|
|
|
|
err_req_irq:
|
2021-02-23 09:38:52 +00:00
|
|
|
kfree(irq);
|
|
|
|
return ERR_PTR(err);
|
2021-02-23 09:15:43 +00:00
|
|
|
}
|
2019-06-10 23:38:37 +00:00
|
|
|
|
2021-02-23 09:48:17 +00:00
|
|
|
int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
|
|
|
|
{
|
2021-06-22 11:20:16 +00:00
|
|
|
int ret;
|
2021-02-23 09:48:17 +00:00
|
|
|
|
2021-06-22 11:20:16 +00:00
|
|
|
ret = irq_get(irq);
|
|
|
|
if (!ret)
|
2021-02-23 09:48:17 +00:00
|
|
|
/* Something very bad happens here, we are enabling EQ
|
|
|
|
* on non-existing IRQ.
|
|
|
|
*/
|
|
|
|
return -ENOENT;
|
2021-06-22 11:20:16 +00:00
|
|
|
ret = atomic_notifier_chain_register(&irq->nh, nb);
|
|
|
|
if (ret)
|
2021-11-23 10:50:19 +00:00
|
|
|
mlx5_irq_put(irq);
|
2021-06-22 11:20:16 +00:00
|
|
|
return ret;
|
2021-02-23 09:48:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
|
|
|
|
{
|
2021-06-16 15:54:05 +00:00
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
err = atomic_notifier_chain_unregister(&irq->nh, nb);
|
2021-11-23 10:50:19 +00:00
|
|
|
mlx5_irq_put(irq);
|
2021-06-16 15:54:05 +00:00
|
|
|
return err;
|
2021-02-23 09:48:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq)
|
|
|
|
{
|
|
|
|
return irq->mask;
|
|
|
|
}
|
|
|
|
|
2021-02-23 09:57:32 +00:00
|
|
|
int mlx5_irq_get_index(struct mlx5_irq *irq)
|
|
|
|
{
|
|
|
|
return irq->index;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* irq_pool API */
|
|
|
|
|
|
|
|
/* requesting an irq from a given pool according to given index */
|
|
|
|
static struct mlx5_irq *
|
|
|
|
irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx,
|
|
|
|
struct cpumask *affinity)
|
|
|
|
{
|
|
|
|
struct mlx5_irq *irq;
|
|
|
|
|
|
|
|
mutex_lock(&pool->lock);
|
|
|
|
irq = xa_load(&pool->irqs, vecidx);
|
|
|
|
if (irq) {
|
2021-11-23 08:48:07 +00:00
|
|
|
mlx5_irq_get_locked(irq);
|
2021-02-23 09:57:32 +00:00
|
|
|
goto unlock;
|
|
|
|
}
|
2021-11-23 08:48:07 +00:00
|
|
|
irq = mlx5_irq_alloc(pool, vecidx, affinity);
|
2021-02-23 09:57:32 +00:00
|
|
|
unlock:
|
|
|
|
mutex_unlock(&pool->lock);
|
|
|
|
return irq;
|
|
|
|
}
|
|
|
|
|
2021-11-14 11:01:21 +00:00
|
|
|
static struct mlx5_irq_pool *sf_ctrl_irq_pool_get(struct mlx5_irq_table *irq_table)
|
|
|
|
{
|
|
|
|
return irq_table->sf_ctrl_pool;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct mlx5_irq_pool *sf_irq_pool_get(struct mlx5_irq_table *irq_table)
|
2021-02-23 09:57:32 +00:00
|
|
|
{
|
|
|
|
return irq_table->sf_comp_pool;
|
|
|
|
}
|
|
|
|
|
2021-11-23 10:50:19 +00:00
|
|
|
struct mlx5_irq_pool *mlx5_irq_pool_get(struct mlx5_core_dev *dev)
|
|
|
|
{
|
|
|
|
struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
|
|
|
|
struct mlx5_irq_pool *pool = NULL;
|
|
|
|
|
|
|
|
if (mlx5_core_is_sf(dev))
|
|
|
|
pool = sf_irq_pool_get(irq_table);
|
|
|
|
|
|
|
|
/* In some configs, there won't be a pool of SFs IRQs. Hence, returning
|
|
|
|
* the PF IRQs pool in case the SF pool doesn't exist.
|
|
|
|
*/
|
|
|
|
return pool ? pool : irq_table->pf_pool;
|
|
|
|
}
|
|
|
|
|
2021-11-14 11:01:21 +00:00
|
|
|
static struct mlx5_irq_pool *ctrl_irq_pool_get(struct mlx5_core_dev *dev)
|
|
|
|
{
|
|
|
|
struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
|
|
|
|
struct mlx5_irq_pool *pool = NULL;
|
|
|
|
|
|
|
|
if (mlx5_core_is_sf(dev))
|
|
|
|
pool = sf_ctrl_irq_pool_get(irq_table);
|
|
|
|
|
|
|
|
/* In some configs, there won't be a pool of SFs IRQs. Hence, returning
|
|
|
|
* the PF IRQs pool in case the SF pool doesn't exist.
|
|
|
|
*/
|
|
|
|
return pool ? pool : irq_table->pf_pool;
|
|
|
|
}
|
|
|
|
|
2021-02-23 09:15:43 +00:00
|
|
|
/**
|
2021-12-12 12:51:27 +00:00
|
|
|
* mlx5_irqs_release - release one or more IRQs back to the system.
|
|
|
|
* @irqs: IRQs to be released.
|
|
|
|
* @nirqs: number of IRQs to be released.
|
2021-02-23 09:15:43 +00:00
|
|
|
*/
|
2021-12-12 12:51:27 +00:00
|
|
|
static void mlx5_irqs_release(struct mlx5_irq **irqs, int nirqs)
|
2021-02-23 09:15:43 +00:00
|
|
|
{
|
2021-12-12 12:51:27 +00:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < nirqs; i++) {
|
|
|
|
synchronize_irq(irqs[i]->irqn);
|
2021-11-23 10:50:19 +00:00
|
|
|
mlx5_irq_put(irqs[i]);
|
2021-12-12 12:51:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* mlx5_ctrl_irq_release - release a ctrl IRQ back to the system.
|
|
|
|
* @ctrl_irq: ctrl IRQ to be released.
|
|
|
|
*/
|
|
|
|
void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq)
|
|
|
|
{
|
|
|
|
mlx5_irqs_release(&ctrl_irq, 1);
|
2021-02-23 09:15:43 +00:00
|
|
|
}
|
2019-06-10 23:38:37 +00:00
|
|
|
|
2021-11-14 11:01:21 +00:00
|
|
|
/**
|
|
|
|
* mlx5_ctrl_irq_request - request a ctrl IRQ for mlx5 device.
|
|
|
|
* @dev: mlx5 device that requesting the IRQ.
|
|
|
|
*
|
|
|
|
* This function returns a pointer to IRQ, or ERR_PTR in case of error.
|
|
|
|
*/
|
|
|
|
struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev)
|
|
|
|
{
|
|
|
|
struct mlx5_irq_pool *pool = ctrl_irq_pool_get(dev);
|
|
|
|
cpumask_var_t req_mask;
|
|
|
|
struct mlx5_irq *irq;
|
|
|
|
|
|
|
|
if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL))
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
cpumask_copy(req_mask, cpu_online_mask);
|
2021-11-23 10:50:19 +00:00
|
|
|
if (!mlx5_irq_pool_is_sf_pool(pool)) {
|
2021-11-14 11:01:21 +00:00
|
|
|
/* In case we are allocating a control IRQ for PF/VF */
|
|
|
|
if (!pool->xa_num_irqs.max) {
|
|
|
|
cpumask_clear(req_mask);
|
|
|
|
/* In case we only have a single IRQ for PF/VF */
|
|
|
|
cpumask_set_cpu(cpumask_first(cpu_online_mask), req_mask);
|
|
|
|
}
|
|
|
|
/* Allocate the IRQ in the last index of the pool */
|
|
|
|
irq = irq_pool_request_vector(pool, pool->xa_num_irqs.max, req_mask);
|
|
|
|
} else {
|
2021-11-23 08:48:07 +00:00
|
|
|
irq = mlx5_irq_affinity_request(pool, req_mask);
|
2021-11-14 11:01:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
free_cpumask_var(req_mask);
|
|
|
|
return irq;
|
|
|
|
}
|
|
|
|
|
2021-02-23 09:15:43 +00:00
|
|
|
/**
|
2021-11-23 10:50:19 +00:00
|
|
|
* mlx5_irq_request - request an IRQ for mlx5 PF/VF device.
|
2021-02-23 09:15:43 +00:00
|
|
|
* @dev: mlx5 device that requesting the IRQ.
|
|
|
|
* @vecidx: vector index of the IRQ. This argument is ignore if affinity is
|
|
|
|
* provided.
|
|
|
|
* @affinity: cpumask requested for this IRQ.
|
|
|
|
*
|
|
|
|
* This function returns a pointer to IRQ, or ERR_PTR in case of error.
|
|
|
|
*/
|
2021-02-23 09:57:32 +00:00
|
|
|
struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
|
2021-02-23 09:15:43 +00:00
|
|
|
struct cpumask *affinity)
|
|
|
|
{
|
2021-02-23 09:38:52 +00:00
|
|
|
struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
|
2021-02-23 09:48:17 +00:00
|
|
|
struct mlx5_irq_pool *pool;
|
2021-02-23 09:38:52 +00:00
|
|
|
struct mlx5_irq *irq;
|
2021-02-23 09:15:43 +00:00
|
|
|
|
2021-02-23 09:57:32 +00:00
|
|
|
pool = irq_table->pf_pool;
|
|
|
|
irq = irq_pool_request_vector(pool, vecidx, affinity);
|
2021-02-23 09:38:52 +00:00
|
|
|
if (IS_ERR(irq))
|
2021-02-23 09:15:43 +00:00
|
|
|
return irq;
|
2021-02-23 09:57:32 +00:00
|
|
|
mlx5_core_dbg(dev, "irq %u mapped to cpu %*pbl, %u EQs on this irq\n",
|
|
|
|
irq->irqn, cpumask_pr_args(affinity),
|
2021-06-22 11:20:16 +00:00
|
|
|
irq->refcount / MLX5_EQ_REFS_PER_IRQ);
|
2021-02-23 09:15:43 +00:00
|
|
|
return irq;
|
2019-06-10 23:38:37 +00:00
|
|
|
}
|
|
|
|
|
2021-12-12 12:51:27 +00:00
|
|
|
/**
|
|
|
|
* mlx5_irqs_release_vectors - release one or more IRQs back to the system.
|
|
|
|
* @irqs: IRQs to be released.
|
|
|
|
* @nirqs: number of IRQs to be released.
|
|
|
|
*/
|
|
|
|
void mlx5_irqs_release_vectors(struct mlx5_irq **irqs, int nirqs)
|
|
|
|
{
|
|
|
|
mlx5_irqs_release(irqs, nirqs);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* mlx5_irqs_request_vectors - request one or more IRQs for mlx5 device.
|
|
|
|
* @dev: mlx5 device that is requesting the IRQs.
|
|
|
|
* @cpus: CPUs array for binding the IRQs
|
|
|
|
* @nirqs: number of IRQs to request.
|
|
|
|
* @irqs: an output array of IRQs pointers.
|
|
|
|
*
|
|
|
|
* Each IRQ is bound to at most 1 CPU.
|
|
|
|
* This function is requests nirqs IRQs, starting from @vecidx.
|
|
|
|
*
|
|
|
|
* This function returns the number of IRQs requested, (which might be smaller than
|
|
|
|
* @nirqs), if successful, or a negative error code in case of an error.
|
|
|
|
*/
|
|
|
|
int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs,
|
|
|
|
struct mlx5_irq **irqs)
|
|
|
|
{
|
|
|
|
cpumask_var_t req_mask;
|
|
|
|
struct mlx5_irq *irq;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL))
|
|
|
|
return -ENOMEM;
|
|
|
|
for (i = 0; i < nirqs; i++) {
|
|
|
|
cpumask_set_cpu(cpus[i], req_mask);
|
|
|
|
irq = mlx5_irq_request(dev, i, req_mask);
|
|
|
|
if (IS_ERR(irq))
|
|
|
|
break;
|
|
|
|
cpumask_clear(req_mask);
|
|
|
|
irqs[i] = irq;
|
|
|
|
}
|
|
|
|
|
|
|
|
free_cpumask_var(req_mask);
|
|
|
|
return i ? i : PTR_ERR(irq);
|
|
|
|
}
|
|
|
|
|
2021-02-23 09:48:17 +00:00
|
|
|
static struct mlx5_irq_pool *
|
2021-02-23 09:57:32 +00:00
|
|
|
irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
|
|
|
|
u32 min_threshold, u32 max_threshold)
|
2019-06-10 23:38:37 +00:00
|
|
|
{
|
2021-02-23 09:48:17 +00:00
|
|
|
struct mlx5_irq_pool *pool = kvzalloc(sizeof(*pool), GFP_KERNEL);
|
|
|
|
|
|
|
|
if (!pool)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
pool->dev = dev;
|
2021-06-16 22:51:00 +00:00
|
|
|
mutex_init(&pool->lock);
|
2021-02-23 09:48:17 +00:00
|
|
|
xa_init_flags(&pool->irqs, XA_FLAGS_ALLOC);
|
|
|
|
pool->xa_num_irqs.min = start;
|
|
|
|
pool->xa_num_irqs.max = start + size - 1;
|
|
|
|
if (name)
|
|
|
|
snprintf(pool->name, MLX5_MAX_IRQ_NAME - MLX5_MAX_IRQ_IDX_CHARS,
|
2021-11-03 21:01:05 +00:00
|
|
|
"%s", name);
|
2021-02-23 09:57:32 +00:00
|
|
|
pool->min_threshold = min_threshold * MLX5_EQ_REFS_PER_IRQ;
|
|
|
|
pool->max_threshold = max_threshold * MLX5_EQ_REFS_PER_IRQ;
|
2021-02-23 09:48:17 +00:00
|
|
|
mlx5_core_dbg(dev, "pool->name = %s, pool->size = %d, pool->start = %d",
|
|
|
|
name, size, start);
|
|
|
|
return pool;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void irq_pool_free(struct mlx5_irq_pool *pool)
|
|
|
|
{
|
|
|
|
struct mlx5_irq *irq;
|
|
|
|
unsigned long index;
|
|
|
|
|
2021-06-22 11:20:16 +00:00
|
|
|
/* There are cases in which we are destrying the irq_table before
|
|
|
|
* freeing all the IRQs, fast teardown for example. Hence, free the irqs
|
|
|
|
* which might not have been freed.
|
|
|
|
*/
|
2021-02-23 09:48:17 +00:00
|
|
|
xa_for_each(&pool->irqs, index, irq)
|
2021-06-22 11:20:16 +00:00
|
|
|
irq_release(irq);
|
2021-02-23 09:48:17 +00:00
|
|
|
xa_destroy(&pool->irqs);
|
2021-06-16 22:51:00 +00:00
|
|
|
mutex_destroy(&pool->lock);
|
2021-11-23 10:50:19 +00:00
|
|
|
kfree(pool->irqs_per_cpu);
|
2021-02-23 09:48:17 +00:00
|
|
|
kvfree(pool);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pf_vec)
|
|
|
|
{
|
|
|
|
struct mlx5_irq_table *table = dev->priv.irq_table;
|
|
|
|
int num_sf_ctrl_by_msix;
|
|
|
|
int num_sf_ctrl_by_sfs;
|
|
|
|
int num_sf_ctrl;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* init pf_pool */
|
2021-02-23 09:57:32 +00:00
|
|
|
table->pf_pool = irq_pool_alloc(dev, 0, pf_vec, NULL,
|
|
|
|
MLX5_EQ_SHARE_IRQ_MIN_COMP,
|
|
|
|
MLX5_EQ_SHARE_IRQ_MAX_COMP);
|
2021-02-23 09:48:17 +00:00
|
|
|
if (IS_ERR(table->pf_pool))
|
|
|
|
return PTR_ERR(table->pf_pool);
|
|
|
|
if (!mlx5_sf_max_functions(dev))
|
|
|
|
return 0;
|
|
|
|
if (sf_vec < MLX5_IRQ_VEC_COMP_BASE_SF) {
|
2021-06-29 11:47:30 +00:00
|
|
|
mlx5_core_dbg(dev, "Not enught IRQs for SFs. SF may run at lower performance\n");
|
2021-02-23 09:48:17 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* init sf_ctrl_pool */
|
|
|
|
num_sf_ctrl_by_msix = DIV_ROUND_UP(sf_vec, MLX5_COMP_EQS_PER_SF);
|
|
|
|
num_sf_ctrl_by_sfs = DIV_ROUND_UP(mlx5_sf_max_functions(dev),
|
|
|
|
MLX5_SFS_PER_CTRL_IRQ);
|
|
|
|
num_sf_ctrl = min_t(int, num_sf_ctrl_by_msix, num_sf_ctrl_by_sfs);
|
|
|
|
num_sf_ctrl = min_t(int, MLX5_IRQ_CTRL_SF_MAX, num_sf_ctrl);
|
|
|
|
table->sf_ctrl_pool = irq_pool_alloc(dev, pf_vec, num_sf_ctrl,
|
2021-02-23 09:57:32 +00:00
|
|
|
"mlx5_sf_ctrl",
|
|
|
|
MLX5_EQ_SHARE_IRQ_MIN_CTRL,
|
|
|
|
MLX5_EQ_SHARE_IRQ_MAX_CTRL);
|
2021-02-23 09:48:17 +00:00
|
|
|
if (IS_ERR(table->sf_ctrl_pool)) {
|
|
|
|
err = PTR_ERR(table->sf_ctrl_pool);
|
|
|
|
goto err_pf;
|
|
|
|
}
|
|
|
|
/* init sf_comp_pool */
|
|
|
|
table->sf_comp_pool = irq_pool_alloc(dev, pf_vec + num_sf_ctrl,
|
2021-02-23 09:57:32 +00:00
|
|
|
sf_vec - num_sf_ctrl, "mlx5_sf_comp",
|
|
|
|
MLX5_EQ_SHARE_IRQ_MIN_COMP,
|
|
|
|
MLX5_EQ_SHARE_IRQ_MAX_COMP);
|
2021-02-23 09:48:17 +00:00
|
|
|
if (IS_ERR(table->sf_comp_pool)) {
|
|
|
|
err = PTR_ERR(table->sf_comp_pool);
|
|
|
|
goto err_sf_ctrl;
|
|
|
|
}
|
2021-11-23 10:50:19 +00:00
|
|
|
|
|
|
|
table->sf_comp_pool->irqs_per_cpu = kcalloc(nr_cpu_ids, sizeof(u16), GFP_KERNEL);
|
|
|
|
if (!table->sf_comp_pool->irqs_per_cpu) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_irqs_per_cpu;
|
|
|
|
}
|
|
|
|
|
2021-02-23 09:48:17 +00:00
|
|
|
return 0;
|
2021-11-23 10:50:19 +00:00
|
|
|
|
|
|
|
err_irqs_per_cpu:
|
|
|
|
irq_pool_free(table->sf_comp_pool);
|
2021-02-23 09:48:17 +00:00
|
|
|
err_sf_ctrl:
|
|
|
|
irq_pool_free(table->sf_ctrl_pool);
|
|
|
|
err_pf:
|
|
|
|
irq_pool_free(table->pf_pool);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void irq_pools_destroy(struct mlx5_irq_table *table)
|
|
|
|
{
|
|
|
|
if (table->sf_ctrl_pool) {
|
|
|
|
irq_pool_free(table->sf_comp_pool);
|
|
|
|
irq_pool_free(table->sf_ctrl_pool);
|
|
|
|
}
|
|
|
|
irq_pool_free(table->pf_pool);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* irq_table API */
|
|
|
|
|
|
|
|
int mlx5_irq_table_init(struct mlx5_core_dev *dev)
|
|
|
|
{
|
|
|
|
struct mlx5_irq_table *irq_table;
|
|
|
|
|
|
|
|
if (mlx5_core_is_sf(dev))
|
|
|
|
return 0;
|
|
|
|
|
2020-12-30 09:59:55 +00:00
|
|
|
irq_table = kvzalloc_node(sizeof(*irq_table), GFP_KERNEL,
|
|
|
|
dev->priv.numa_node);
|
2021-02-23 09:48:17 +00:00
|
|
|
if (!irq_table)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
dev->priv.irq_table = irq_table;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev)
|
|
|
|
{
|
|
|
|
if (mlx5_core_is_sf(dev))
|
|
|
|
return;
|
|
|
|
|
|
|
|
kvfree(dev->priv.irq_table);
|
|
|
|
}
|
|
|
|
|
|
|
|
int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table)
|
|
|
|
{
|
2021-08-01 09:08:49 +00:00
|
|
|
if (!table->pf_pool->xa_num_irqs.max)
|
|
|
|
return 1;
|
2021-02-23 09:48:17 +00:00
|
|
|
return table->pf_pool->xa_num_irqs.max - table->pf_pool->xa_num_irqs.min;
|
2019-06-10 23:38:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int mlx5_irq_table_create(struct mlx5_core_dev *dev)
|
|
|
|
{
|
|
|
|
int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
|
|
|
|
MLX5_CAP_GEN(dev, max_num_eqs) :
|
|
|
|
1 << MLX5_CAP_GEN(dev, log_max_eq);
|
2021-02-23 09:48:17 +00:00
|
|
|
int total_vec;
|
|
|
|
int pf_vec;
|
2019-06-10 23:38:37 +00:00
|
|
|
int err;
|
|
|
|
|
2020-12-12 06:12:18 +00:00
|
|
|
if (mlx5_core_is_sf(dev))
|
|
|
|
return 0;
|
|
|
|
|
2021-08-01 09:08:49 +00:00
|
|
|
pf_vec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + 1;
|
2021-02-23 09:48:17 +00:00
|
|
|
pf_vec = min_t(int, pf_vec, num_eqs);
|
2019-06-10 23:38:37 +00:00
|
|
|
|
2021-02-23 09:48:17 +00:00
|
|
|
total_vec = pf_vec;
|
|
|
|
if (mlx5_sf_max_functions(dev))
|
|
|
|
total_vec += MLX5_IRQ_CTRL_SF_MAX +
|
|
|
|
MLX5_COMP_EQS_PER_SF * mlx5_sf_max_functions(dev);
|
2019-06-10 23:38:37 +00:00
|
|
|
|
2021-08-01 09:08:49 +00:00
|
|
|
total_vec = pci_alloc_irq_vectors(dev->pdev, 1, total_vec, PCI_IRQ_MSIX);
|
2021-02-23 09:48:17 +00:00
|
|
|
if (total_vec < 0)
|
|
|
|
return total_vec;
|
|
|
|
pf_vec = min(pf_vec, total_vec);
|
2019-06-10 23:38:37 +00:00
|
|
|
|
2021-02-23 09:48:17 +00:00
|
|
|
err = irq_pools_init(dev, total_vec - pf_vec, pf_vec);
|
|
|
|
if (err)
|
|
|
|
pci_free_irq_vectors(dev->pdev);
|
2019-06-10 23:38:37 +00:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
|
|
|
|
{
|
|
|
|
struct mlx5_irq_table *table = dev->priv.irq_table;
|
|
|
|
|
2020-12-12 06:12:18 +00:00
|
|
|
if (mlx5_core_is_sf(dev))
|
|
|
|
return;
|
|
|
|
|
2021-02-23 09:38:52 +00:00
|
|
|
/* There are cases where IRQs still will be in used when we reaching
|
2021-07-30 03:03:00 +00:00
|
|
|
* to here. Hence, making sure all the irqs are released.
|
2021-02-23 09:38:52 +00:00
|
|
|
*/
|
2021-02-23 09:48:17 +00:00
|
|
|
irq_pools_destroy(table);
|
2019-06-10 23:38:37 +00:00
|
|
|
pci_free_irq_vectors(dev->pdev);
|
|
|
|
}
|
|
|
|
|
2021-02-23 09:57:32 +00:00
|
|
|
int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table)
|
|
|
|
{
|
|
|
|
if (table->sf_comp_pool)
|
2021-09-14 07:13:02 +00:00
|
|
|
return min_t(int, num_online_cpus(),
|
|
|
|
table->sf_comp_pool->xa_num_irqs.max -
|
|
|
|
table->sf_comp_pool->xa_num_irqs.min + 1);
|
2021-02-23 09:57:32 +00:00
|
|
|
else
|
|
|
|
return mlx5_irq_table_get_num_comp(table);
|
|
|
|
}
|
|
|
|
|
2020-12-12 06:12:18 +00:00
|
|
|
struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_MLX5_SF
|
|
|
|
if (mlx5_core_is_sf(dev))
|
|
|
|
return dev->priv.parent_mdev->priv.irq_table;
|
|
|
|
#endif
|
|
|
|
return dev->priv.irq_table;
|
|
|
|
}
|