IB/mlx5: Move to fully dynamic UAR mode once user space supports it

Move to fully dynamic UAR mode once user space supports it.  In this case
we prevent any legacy mode of UARs on the allocated context and prevent
redundant allocation of the static ones.

Link: https://lore.kernel.org/r/20200324060143.1569116-6-leon@kernel.org
Signed-off-by: Yishai Hadas <yishaih@mellanox.com>
Reviewed-by: Michael Guralnik <michaelgur@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
Yishai Hadas 2020-03-24 08:01:43 +02:00 committed by Jason Gunthorpe
parent 2152862298
commit 0a2fd01c28
5 changed files with 26 additions and 3 deletions

View file

@ -741,10 +741,14 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
MLX5_SET(cqc, cqc, log_page_size,
page_shift - MLX5_ADAPTER_PAGE_SHIFT);
if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX)
if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX) {
*index = ucmd.uar_page_index;
else
} else if (context->bfregi.lib_uar_dyn) {
err = -EINVAL;
goto err_cqb;
} else {
*index = context->bfregi.sys_pages[0];
}
if (ucmd.cqe_comp_en == 1) {
int mini_cqe_format;

View file

@ -1786,6 +1786,7 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
max_cqe_version);
u32 dump_fill_mkey;
bool lib_uar_4k;
bool lib_uar_dyn;
if (!dev->ib_active)
return -EAGAIN;
@ -1844,8 +1845,14 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
}
lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
lib_uar_dyn = req.lib_caps & MLX5_LIB_CAP_DYN_UAR;
bfregi = &context->bfregi;
if (lib_uar_dyn) {
bfregi->lib_uar_dyn = lib_uar_dyn;
goto uar_done;
}
/* updates req->total_num_bfregs */
err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi);
if (err)
@ -1872,6 +1879,7 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
if (err)
goto out_sys_pages;
uar_done:
if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
err = mlx5_ib_devx_create(dev, true);
if (err < 0)
@ -1893,7 +1901,7 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
INIT_LIST_HEAD(&context->db_page_list);
mutex_init(&context->db_page_mutex);
resp.tot_bfregs = req.total_num_bfregs;
resp.tot_bfregs = lib_uar_dyn ? 0 : req.total_num_bfregs;
resp.num_ports = dev->num_ports;
if (offsetofend(typeof(resp), cqe_version) <= udata->outlen)
@ -2141,6 +2149,9 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
int max_valid_idx = dyn_uar ? bfregi->num_sys_pages :
bfregi->num_static_sys_pages;
if (bfregi->lib_uar_dyn)
return -EINVAL;
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EINVAL;

View file

@ -139,6 +139,7 @@ struct mlx5_bfreg_info {
struct mutex lock;
u32 ver;
u8 lib_uar_4k : 1;
u8 lib_uar_dyn : 1;
u32 num_sys_pages;
u32 num_static_sys_pages;
u32 total_num_bfregs;

View file

@ -697,6 +697,9 @@ static int alloc_bfreg(struct mlx5_ib_dev *dev,
{
int bfregn = -ENOMEM;
if (bfregi->lib_uar_dyn)
return -EINVAL;
mutex_lock(&bfregi->lock);
if (bfregi->ver >= 2) {
bfregn = alloc_high_class_bfreg(dev, bfregi);
@ -768,6 +771,9 @@ int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
u32 index_of_sys_page;
u32 offset;
if (bfregi->lib_uar_dyn)
return -EINVAL;
bfregs_per_sys_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k) *
MLX5_NON_FP_BFREGS_PER_UAR;
index_of_sys_page = bfregn / bfregs_per_sys_page;

View file

@ -79,6 +79,7 @@ struct mlx5_ib_alloc_ucontext_req {
enum mlx5_lib_caps {
MLX5_LIB_CAP_4K_UAR = (__u64)1 << 0,
MLX5_LIB_CAP_DYN_UAR = (__u64)1 << 1,
};
enum mlx5_ib_alloc_uctx_v2_flags {