RDMA/rxe: Add support for bind MW work requests

Add support for bind MW work requests from user space.  Since rdma/core
does not support bind mw in ib_send_wr there is no way to support bind mw
in kernel space.

Added bind_mw local operation in rxe_req.c. Added bind_mw WR operation in
rxe_opcode.c. Added bind_mw WC in rxe_comp.c.  Added additional fields to
rxe_mw in rxe_verbs.h. Added rxe_do_dealloc_mw() subroutine to cleanup an
mw when rxe_dealloc_mw is called.  Added code to implement bind_mw
operation in rxe_mw.c

Link: https://lore.kernel.org/r/20210608042552.33275-8-rpearsonhpe@gmail.com
Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
Bob Pearson 2021-06-07 23:25:50 -05:00 committed by Jason Gunthorpe
parent c1a411268a
commit 32a577b4c3
6 changed files with 229 additions and 5 deletions

View file

@ -103,6 +103,7 @@ static enum ib_wc_opcode wr_to_wc_opcode(enum ib_wr_opcode opcode)
case IB_WR_RDMA_READ_WITH_INV: return IB_WC_RDMA_READ;
case IB_WR_LOCAL_INV: return IB_WC_LOCAL_INV;
case IB_WR_REG_MR: return IB_WC_REG_MR;
case IB_WR_BIND_MW: return IB_WC_BIND_MW;
default:
return 0xff;

View file

@ -110,6 +110,7 @@ int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
/* rxe_mw.c */
int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata);
int rxe_dealloc_mw(struct ib_mw *ibmw);
int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe);
void rxe_mw_cleanup(struct rxe_pool_entry *arg);
/* rxe_net.c */

View file

@ -29,6 +29,29 @@ int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
return 0;
}
static void rxe_do_dealloc_mw(struct rxe_mw *mw)
{
if (mw->mr) {
struct rxe_mr *mr = mw->mr;
mw->mr = NULL;
atomic_dec(&mr->num_mw);
rxe_drop_ref(mr);
}
if (mw->qp) {
struct rxe_qp *qp = mw->qp;
mw->qp = NULL;
rxe_drop_ref(qp);
}
mw->access = 0;
mw->addr = 0;
mw->length = 0;
mw->state = RXE_MW_STATE_INVALID;
}
int rxe_dealloc_mw(struct ib_mw *ibmw)
{
struct rxe_mw *mw = to_rmw(ibmw);
@ -36,7 +59,7 @@ int rxe_dealloc_mw(struct ib_mw *ibmw)
unsigned long flags;
spin_lock_irqsave(&mw->lock, flags);
mw->state = RXE_MW_STATE_INVALID;
rxe_do_dealloc_mw(mw);
spin_unlock_irqrestore(&mw->lock, flags);
rxe_drop_ref(mw);
@ -45,6 +68,183 @@ int rxe_dealloc_mw(struct ib_mw *ibmw)
return 0;
}
static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
struct rxe_mw *mw, struct rxe_mr *mr)
{
if (mw->ibmw.type == IB_MW_TYPE_1) {
if (unlikely(mw->state != RXE_MW_STATE_VALID)) {
pr_err_once(
"attempt to bind a type 1 MW not in the valid state\n");
return -EINVAL;
}
/* o10-36.2.2 */
if (unlikely((mw->access & IB_ZERO_BASED))) {
pr_err_once("attempt to bind a zero based type 1 MW\n");
return -EINVAL;
}
}
if (mw->ibmw.type == IB_MW_TYPE_2) {
/* o10-37.2.30 */
if (unlikely(mw->state != RXE_MW_STATE_FREE)) {
pr_err_once(
"attempt to bind a type 2 MW not in the free state\n");
return -EINVAL;
}
/* C10-72 */
if (unlikely(qp->pd != to_rpd(mw->ibmw.pd))) {
pr_err_once(
"attempt to bind type 2 MW with qp with different PD\n");
return -EINVAL;
}
/* o10-37.2.40 */
if (unlikely(!mr || wqe->wr.wr.mw.length == 0)) {
pr_err_once(
"attempt to invalidate type 2 MW by binding with NULL or zero length MR\n");
return -EINVAL;
}
}
if (unlikely((wqe->wr.wr.mw.rkey & 0xff) == (mw->ibmw.rkey & 0xff))) {
pr_err_once("attempt to bind MW with same key\n");
return -EINVAL;
}
/* remaining checks only apply to a nonzero MR */
if (!mr)
return 0;
if (unlikely(mr->access & IB_ZERO_BASED)) {
pr_err_once("attempt to bind MW to zero based MR\n");
return -EINVAL;
}
/* C10-73 */
if (unlikely(!(mr->access & IB_ACCESS_MW_BIND))) {
pr_err_once(
"attempt to bind an MW to an MR without bind access\n");
return -EINVAL;
}
/* C10-74 */
if (unlikely((mw->access &
(IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_ATOMIC)) &&
!(mr->access & IB_ACCESS_LOCAL_WRITE))) {
pr_err_once(
"attempt to bind an writeable MW to an MR without local write access\n");
return -EINVAL;
}
/* C10-75 */
if (mw->access & IB_ZERO_BASED) {
if (unlikely(wqe->wr.wr.mw.length > mr->length)) {
pr_err_once(
"attempt to bind a ZB MW outside of the MR\n");
return -EINVAL;
}
} else {
if (unlikely((wqe->wr.wr.mw.addr < mr->iova) ||
((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) >
(mr->iova + mr->length)))) {
pr_err_once(
"attempt to bind a VA MW outside of the MR\n");
return -EINVAL;
}
}
return 0;
}
static void rxe_do_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
struct rxe_mw *mw, struct rxe_mr *mr)
{
u32 rkey;
u32 new_rkey;
rkey = mw->ibmw.rkey;
new_rkey = (rkey & 0xffffff00) | (wqe->wr.wr.mw.rkey & 0x000000ff);
mw->ibmw.rkey = new_rkey;
mw->access = wqe->wr.wr.mw.access;
mw->state = RXE_MW_STATE_VALID;
mw->addr = wqe->wr.wr.mw.addr;
mw->length = wqe->wr.wr.mw.length;
if (mw->mr) {
rxe_drop_ref(mw->mr);
atomic_dec(&mw->mr->num_mw);
mw->mr = NULL;
}
if (mw->length) {
mw->mr = mr;
atomic_inc(&mr->num_mw);
rxe_add_ref(mr);
}
if (mw->ibmw.type == IB_MW_TYPE_2) {
rxe_add_ref(qp);
mw->qp = qp;
}
}
int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
{
int ret;
struct rxe_mw *mw;
struct rxe_mr *mr;
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
unsigned long flags;
mw = rxe_pool_get_index(&rxe->mw_pool,
wqe->wr.wr.mw.mw_rkey >> 8);
if (unlikely(!mw)) {
ret = -EINVAL;
goto err;
}
if (unlikely(mw->ibmw.rkey != wqe->wr.wr.mw.mw_rkey)) {
ret = -EINVAL;
goto err_drop_mw;
}
if (likely(wqe->wr.wr.mw.length)) {
mr = rxe_pool_get_index(&rxe->mr_pool,
wqe->wr.wr.mw.mr_lkey >> 8);
if (unlikely(!mr)) {
ret = -EINVAL;
goto err_drop_mw;
}
if (unlikely(mr->ibmr.lkey != wqe->wr.wr.mw.mr_lkey)) {
ret = -EINVAL;
goto err_drop_mr;
}
} else {
mr = NULL;
}
spin_lock_irqsave(&mw->lock, flags);
ret = rxe_check_bind_mw(qp, wqe, mw, mr);
if (ret)
goto err_unlock;
rxe_do_bind_mw(qp, wqe, mw, mr);
err_unlock:
spin_unlock_irqrestore(&mw->lock, flags);
err_drop_mr:
if (mr)
rxe_drop_ref(mr);
err_drop_mw:
rxe_drop_ref(mw);
err:
return ret;
}
void rxe_mw_cleanup(struct rxe_pool_entry *elem)
{
struct rxe_mw *mw = container_of(elem, typeof(*mw), pelem);

View file

@ -96,6 +96,13 @@ struct rxe_wr_opcode_info rxe_wr_opcode_info[] = {
[IB_QPT_RC] = WR_LOCAL_OP_MASK,
},
},
[IB_WR_BIND_MW] = {
.name = "IB_WR_BIND_MW",
.mask = {
[IB_QPT_RC] = WR_LOCAL_OP_MASK,
[IB_QPT_UC] = WR_LOCAL_OP_MASK,
},
},
};
struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {

View file

@ -584,6 +584,7 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
struct rxe_dev *rxe;
struct rxe_mr *mr;
u32 rkey;
int ret;
switch (opcode) {
case IB_WR_LOCAL_INV:
@ -609,6 +610,13 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
mr->iova = wqe->wr.wr.reg.mr->iova;
rxe_drop_ref(mr);
break;
case IB_WR_BIND_MW:
ret = rxe_bind_mw(qp, wqe);
if (unlikely(ret)) {
wqe->status = IB_WC_MW_BIND_ERR;
return ret;
}
break;
default:
pr_err("Unexpected send wqe opcode %d\n", opcode);
wqe->status = IB_WC_LOC_QP_OP_ERR;

View file

@ -315,6 +315,8 @@ struct rxe_mr {
u32 num_map;
struct rxe_map **map;
atomic_t num_mw;
};
enum rxe_mw_state {
@ -324,10 +326,15 @@ enum rxe_mw_state {
};
struct rxe_mw {
struct ib_mw ibmw;
struct rxe_pool_entry pelem;
spinlock_t lock;
enum rxe_mw_state state;
struct ib_mw ibmw;
struct rxe_pool_entry pelem;
spinlock_t lock;
enum rxe_mw_state state;
struct rxe_qp *qp; /* Type 2 only */
struct rxe_mr *mr;
int access;
u64 addr;
u64 length;
};
struct rxe_mc_grp {