RDMA/irdma: Split CQ handler into irdma_reg_user_mr_type_cq

Split the source codes related with CQ handling into a new function.

Reviewed-by: Shiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
Link: https://lore.kernel.org/r/20230116193502.66540-5-yanjun.zhu@intel.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
This commit is contained in:
Zhu Yanjun 2023-01-16 14:35:02 -05:00 committed by Leon Romanovsky
parent e965ef0e7b
commit 2f25e3bab0

View file

@ -2867,6 +2867,40 @@ static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
return 0; return 0;
} }
static int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req,
struct ib_udata *udata,
struct irdma_mr *iwmr)
{
struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
struct irdma_pbl *iwpbl = &iwmr->iwpbl;
struct irdma_ucontext *ucontext = NULL;
u8 shadow_pgcnt = 1;
unsigned long flags;
bool use_pbles;
u32 total;
int err;
if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
shadow_pgcnt = 0;
total = req.cq_pages + shadow_pgcnt;
if (total > iwmr->page_cnt)
return -EINVAL;
use_pbles = req.cq_pages > 1;
err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
if (err)
return err;
ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
ibucontext);
spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
iwpbl->on_list = true;
spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
return 0;
}
/** /**
* irdma_reg_user_mr - Register a user memory region * irdma_reg_user_mr - Register a user memory region
* @pd: ptr of pd * @pd: ptr of pd
@ -2882,16 +2916,10 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
{ {
#define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages) #define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
struct irdma_device *iwdev = to_iwdev(pd->device); struct irdma_device *iwdev = to_iwdev(pd->device);
struct irdma_ucontext *ucontext; struct irdma_mem_reg_req req = {};
struct irdma_pbl *iwpbl; struct ib_umem *region = NULL;
struct irdma_mr *iwmr; struct irdma_mr *iwmr = NULL;
struct ib_umem *region; int err;
struct irdma_mem_reg_req req;
u32 total;
u8 shadow_pgcnt = 1;
bool use_pbles = false;
unsigned long flags;
int err = -EINVAL;
if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size) if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
@ -2918,8 +2946,6 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
return (struct ib_mr *)iwmr; return (struct ib_mr *)iwmr;
} }
iwpbl = &iwmr->iwpbl;
switch (req.reg_type) { switch (req.reg_type) {
case IRDMA_MEMREG_TYPE_QP: case IRDMA_MEMREG_TYPE_QP:
err = irdma_reg_user_mr_type_qp(req, udata, iwmr); err = irdma_reg_user_mr_type_qp(req, udata, iwmr);
@ -2928,25 +2954,9 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
break; break;
case IRDMA_MEMREG_TYPE_CQ: case IRDMA_MEMREG_TYPE_CQ:
if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE) err = irdma_reg_user_mr_type_cq(req, udata, iwmr);
shadow_pgcnt = 0;
total = req.cq_pages + shadow_pgcnt;
if (total > iwmr->page_cnt) {
err = -EINVAL;
goto error;
}
use_pbles = (req.cq_pages > 1);
err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
if (err) if (err)
goto error; goto error;
ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
ibucontext);
spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
iwpbl->on_list = true;
spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
break; break;
case IRDMA_MEMREG_TYPE_MEM: case IRDMA_MEMREG_TYPE_MEM:
err = irdma_reg_user_mr_type_mem(iwmr, access); err = irdma_reg_user_mr_type_mem(iwmr, access);
@ -2955,6 +2965,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
break; break;
default: default:
err = -EINVAL;
goto error; goto error;
} }