RDMA/rxe: Fix over copying in get_srq_wqe

Currently get_srq_wqe() in rxe_resp.c copies the maximum possible number
of bytes from the wqe into the QPs copy of the SRQ wqe. This is usually
extra work and risks reading past the end of the SRQ circular buffer if
the SRQ is configured with less than the maximum possible number of SGEs.

Check the number of SGEs is not too large.
Compute the actual number of bytes in the WR and copy only those.

Fixes: 8700e3e7c4 ("Soft RoCE driver")
Link: https://lore.kernel.org/r/20210618045742.204195-5-rpearsonhpe@gmail.com
Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
Bob Pearson 2021-06-17 23:57:41 -05:00 committed by Jason Gunthorpe
parent 1993cbed65
commit ec0fa2445c
1 changed files with 8 additions and 2 deletions

View File

@ -296,6 +296,7 @@ static enum resp_states get_srq_wqe(struct rxe_qp *qp)
struct rxe_recv_wqe *wqe;
struct ib_event ev;
unsigned int count;
size_t size;
if (srq->error)
return RESPST_ERR_RNR;
@ -311,8 +312,13 @@ static enum resp_states get_srq_wqe(struct rxe_qp *qp)
return RESPST_ERR_RNR;
}
/* note kernel and user space recv wqes have same size */
memcpy(&qp->resp.srq_wqe, wqe, sizeof(qp->resp.srq_wqe));
/* don't trust user space data */
if (unlikely(wqe->dma.num_sge > srq->rq.max_sge)) {
pr_warn("%s: invalid num_sge in SRQ entry\n", __func__);
return RESPST_ERR_MALFORMED_WQE;
}
size = sizeof(wqe) + wqe->dma.num_sge*sizeof(struct rxe_sge);
memcpy(&qp->resp.srq_wqe, wqe, size);
qp->resp.wqe = &qp->resp.srq_wqe.wqe;
if (qp->is_user) {