RDMA/rxe: Implement memory access through MWs

Add code to implement memory access through memory windows.

Link: https://lore.kernel.org/r/20210608042552.33275-10-rpearsonhpe@gmail.com
Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
Bob Pearson 2021-06-07 23:25:52 -05:00 committed by Jason Gunthorpe
parent 3902b429ca
commit cdd0b85675
4 changed files with 76 additions and 16 deletions

View File

@ -94,6 +94,7 @@ int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata);
int rxe_dealloc_mw(struct ib_mw *ibmw);
int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe);
int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey);
struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey);
void rxe_mw_cleanup(struct rxe_pool_entry *arg);
/* rxe_net.c */

View File

@ -312,6 +312,29 @@ err:
return ret;
}
struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey)
{
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
struct rxe_pd *pd = to_rpd(qp->ibqp.pd);
struct rxe_mw *mw;
int index = rkey >> 8;
mw = rxe_pool_get_index(&rxe->mw_pool, index);
if (!mw)
return NULL;
if (unlikely((rxe_mw_rkey(mw) != rkey) || rxe_mw_pd(mw) != pd ||
(mw->ibmw.type == IB_MW_TYPE_2 && mw->qp != qp) ||
(mw->length == 0) ||
(access && !(access & mw->access)) ||
mw->state != RXE_MW_STATE_VALID)) {
rxe_drop_ref(mw);
return NULL;
}
return mw;
}
void rxe_mw_cleanup(struct rxe_pool_entry *elem)
{
struct rxe_mw *mw = container_of(elem, typeof(*mw), pelem);

View File

@ -413,6 +413,7 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
struct rxe_pkt_info *pkt)
{
struct rxe_mr *mr = NULL;
struct rxe_mw *mw = NULL;
u64 va;
u32 rkey;
u32 resid;
@ -424,6 +425,7 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
if (pkt->mask & (RXE_READ_MASK | RXE_WRITE_MASK)) {
if (pkt->mask & RXE_RETH_MASK) {
qp->resp.va = reth_va(pkt);
qp->resp.offset = 0;
qp->resp.rkey = reth_rkey(pkt);
qp->resp.resid = reth_len(pkt);
qp->resp.length = reth_len(pkt);
@ -432,6 +434,7 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
: IB_ACCESS_REMOTE_WRITE;
} else if (pkt->mask & RXE_ATOMIC_MASK) {
qp->resp.va = atmeth_va(pkt);
qp->resp.offset = 0;
qp->resp.rkey = atmeth_rkey(pkt);
qp->resp.resid = sizeof(u64);
access = IB_ACCESS_REMOTE_ATOMIC;
@ -451,18 +454,36 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
resid = qp->resp.resid;
pktlen = payload_size(pkt);
mr = lookup_mr(qp->pd, access, rkey, RXE_LOOKUP_REMOTE);
if (!mr) {
state = RESPST_ERR_RKEY_VIOLATION;
goto err;
if (rkey_is_mw(rkey)) {
mw = rxe_lookup_mw(qp, access, rkey);
if (!mw) {
pr_err("%s: no MW matches rkey %#x\n", __func__, rkey);
state = RESPST_ERR_RKEY_VIOLATION;
goto err;
}
mr = mw->mr;
if (!mr) {
pr_err("%s: MW doesn't have an MR\n", __func__);
state = RESPST_ERR_RKEY_VIOLATION;
goto err;
}
if (mw->access & IB_ZERO_BASED)
qp->resp.offset = mw->addr;
rxe_drop_ref(mw);
rxe_add_ref(mr);
} else {
mr = lookup_mr(qp->pd, access, rkey, RXE_LOOKUP_REMOTE);
if (!mr) {
pr_err("%s: no MR matches rkey %#x\n", __func__, rkey);
state = RESPST_ERR_RKEY_VIOLATION;
goto err;
}
}
if (unlikely(mr->state == RXE_MR_STATE_FREE)) {
state = RESPST_ERR_RKEY_VIOLATION;
goto err;
}
if (mr_check_range(mr, va, resid)) {
if (mr_check_range(mr, va + qp->resp.offset, resid)) {
state = RESPST_ERR_RKEY_VIOLATION;
goto err;
}
@ -496,6 +517,9 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
err:
if (mr)
rxe_drop_ref(mr);
if (mw)
rxe_drop_ref(mw);
return state;
}
@ -520,8 +544,8 @@ static enum resp_states write_data_in(struct rxe_qp *qp,
int err;
int data_len = payload_size(pkt);
err = rxe_mr_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt), data_len,
RXE_TO_MR_OBJ, NULL);
err = rxe_mr_copy(qp->resp.mr, qp->resp.va + qp->resp.offset,
payload_addr(pkt), data_len, RXE_TO_MR_OBJ, NULL);
if (err) {
rc = RESPST_ERR_RKEY_VIOLATION;
goto out;
@ -540,7 +564,6 @@ static DEFINE_SPINLOCK(atomic_ops_lock);
static enum resp_states process_atomic(struct rxe_qp *qp,
struct rxe_pkt_info *pkt)
{
u64 iova = atmeth_va(pkt);
u64 *vaddr;
enum resp_states ret;
struct rxe_mr *mr = qp->resp.mr;
@ -550,7 +573,7 @@ static enum resp_states process_atomic(struct rxe_qp *qp,
goto out;
}
vaddr = iova_to_vaddr(mr, iova, sizeof(u64));
vaddr = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset, sizeof(u64));
/* check vaddr is 8 bytes aligned. */
if (!vaddr || (uintptr_t)vaddr & 7) {
@ -674,8 +697,10 @@ static enum resp_states read_reply(struct rxe_qp *qp,
res->type = RXE_READ_MASK;
res->replay = 0;
res->read.va = qp->resp.va;
res->read.va_org = qp->resp.va;
res->read.va = qp->resp.va +
qp->resp.offset;
res->read.va_org = qp->resp.va +
qp->resp.offset;
res->first_psn = req_pkt->psn;

View File

@ -186,6 +186,7 @@ struct rxe_resp_info {
/* RDMA read / atomic only */
u64 va;
u64 offset;
struct rxe_mr *mr;
u32 resid;
u32 rkey;
@ -483,6 +484,16 @@ static inline u32 mr_rkey(struct rxe_mr *mr)
return mr->ibmr.rkey;
}
static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw)
{
return to_rpd(mw->ibmw.pd);
}
static inline u32 rxe_mw_rkey(struct rxe_mw *mw)
{
return mw->ibmw.rkey;
}
int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name);
void rxe_mc_cleanup(struct rxe_pool_entry *arg);