RDMA/bnxt_re: Add support for dmabuf pinned memory regions

Support the new verb which indicates dmabuf support.  bnxt doesn't support
ODP. So use the pinned version of the dmabuf APIs to enable bnxt_re
devices to work as dmabuf importer.

Link: https://lore.kernel.org/r/1690790473-25850-2-git-send-email-selvin.xavier@broadcom.com
Signed-off-by: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
Saravanan Vajravel 2023-07-31 01:01:13 -07:00 committed by Jason Gunthorpe
parent 213d2b9bb2
commit 0a30e59f22
3 changed files with 62 additions and 26 deletions

View File

@ -3974,16 +3974,13 @@ int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
return rc;
}
/* uverbs */
struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
struct ib_udata *udata)
static struct ib_mr *__bnxt_re_user_reg_mr(struct ib_pd *ib_pd, u64 length, u64 virt_addr,
int mr_access_flags, struct ib_umem *umem)
{
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_dev *rdev = pd->rdev;
struct bnxt_re_mr *mr;
struct ib_umem *umem;
unsigned long page_size;
struct bnxt_re_mr *mr;
int umem_pgs, rc;
u32 active_mrs;
@ -3993,6 +3990,12 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
return ERR_PTR(-ENOMEM);
}
page_size = ib_umem_find_best_pgsz(umem, BNXT_RE_PAGE_SIZE_SUPPORTED, virt_addr);
if (!page_size) {
ibdev_err(&rdev->ibdev, "umem page size unsupported!");
return ERR_PTR(-EINVAL);
}
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
@ -4004,36 +4007,23 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
if (rc) {
ibdev_err(&rdev->ibdev, "Failed to allocate MR");
ibdev_err(&rdev->ibdev, "Failed to allocate MR rc = %d", rc);
rc = -EIO;
goto free_mr;
}
/* The fixed portion of the rkey is the same as the lkey */
mr->ib_mr.rkey = mr->qplib_mr.rkey;
umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
if (IS_ERR(umem)) {
ibdev_err(&rdev->ibdev, "Failed to get umem");
rc = -EFAULT;
goto free_mrw;
}
mr->ib_umem = umem;
mr->qplib_mr.va = virt_addr;
page_size = ib_umem_find_best_pgsz(
umem, BNXT_RE_PAGE_SIZE_SUPPORTED, virt_addr);
if (!page_size) {
ibdev_err(&rdev->ibdev, "umem page size unsupported!");
rc = -EFAULT;
goto free_umem;
}
mr->qplib_mr.total_size = length;
umem_pgs = ib_umem_num_dma_blocks(umem, page_size);
rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, umem,
umem_pgs, page_size);
if (rc) {
ibdev_err(&rdev->ibdev, "Failed to register user MR");
goto free_umem;
ibdev_err(&rdev->ibdev, "Failed to register user MR - rc = %d\n", rc);
rc = -EIO;
goto free_mrw;
}
mr->ib_mr.lkey = mr->qplib_mr.lkey;
@ -4043,8 +4033,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
rdev->stats.res.mr_watermark = active_mrs;
return &mr->ib_mr;
free_umem:
ib_umem_release(umem);
free_mrw:
bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
free_mr:
@ -4052,6 +4041,48 @@ free_mr:
return ERR_PTR(rc);
}
struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
struct ib_udata *udata)
{
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_dev *rdev = pd->rdev;
struct ib_umem *umem;
struct ib_mr *ib_mr;
umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
if (IS_ERR(umem))
return ERR_CAST(umem);
ib_mr = __bnxt_re_user_reg_mr(ib_pd, length, virt_addr, mr_access_flags, umem);
if (IS_ERR(ib_mr))
ib_umem_release(umem);
return ib_mr;
}
struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
u64 length, u64 virt_addr, int fd,
int mr_access_flags, struct ib_udata *udata)
{
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_dev *rdev = pd->rdev;
struct ib_umem_dmabuf *umem_dmabuf;
struct ib_umem *umem;
struct ib_mr *ib_mr;
umem_dmabuf = ib_umem_dmabuf_get_pinned(&rdev->ibdev, start, length,
fd, mr_access_flags);
if (IS_ERR(umem_dmabuf))
return ERR_CAST(umem_dmabuf);
umem = &umem_dmabuf->umem;
ib_mr = __bnxt_re_user_reg_mr(ib_pd, length, virt_addr, mr_access_flags, umem);
if (IS_ERR(ib_mr))
ib_umem_release(umem);
return ib_mr;
}
int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
{
struct ib_device *ibdev = ctx->device;

View File

@ -229,6 +229,10 @@ int bnxt_re_dealloc_mw(struct ib_mw *mw);
struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
struct ib_udata *udata);
struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
u64 length, u64 virt_addr,
int fd, int mr_access_flags,
struct ib_udata *udata);
int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata);
void bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);

View File

@ -861,6 +861,7 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
.query_qp = bnxt_re_query_qp,
.query_srq = bnxt_re_query_srq,
.reg_user_mr = bnxt_re_reg_user_mr,
.reg_user_mr_dmabuf = bnxt_re_reg_user_mr_dmabuf,
.req_notify_cq = bnxt_re_req_notify_cq,
.resize_cq = bnxt_re_resize_cq,
INIT_RDMA_OBJ_SIZE(ib_ah, bnxt_re_ah, ib_ah),