xprtrdma: Move unmap-safe logic to rpcrdma_marshal_req

Clean up. This logic is related to marshaling the request, and I'd
like to keep everything that touches req->rl_registered close
together, for CPU cache efficiency.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:
Chuck Lever 2017-12-14 20:57:14 -05:00 committed by Anna Schumaker
parent 20035edf3c
commit a2b6470b1c
2 changed files with 11 additions and 5 deletions

View file

@ -821,6 +821,17 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
rtype = rpcrdma_areadch;
}
/* If this is a retransmit, discard previously registered
* chunks. Very likely the connection has been replaced,
* so these registrations are invalid and unusable.
*/
while (unlikely(!list_empty(&req->rl_registered))) {
struct rpcrdma_mw *mw;
mw = rpcrdma_pop_mw(&req->rl_registered);
rpcrdma_defer_mr_recovery(mw);
}
/* This implementation supports the following combinations
* of chunk lists in one RPC-over-RDMA Call message:
*

View file

@ -731,11 +731,6 @@ xprt_rdma_send_request(struct rpc_task *task)
if (!xprt_connected(xprt))
goto drop_connection;
/* On retransmit, remove any previously registered chunks */
if (unlikely(!list_empty(&req->rl_registered)))
r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt,
&req->rl_registered);
rc = rpcrdma_marshal_req(r_xprt, rqst);
if (rc < 0)
goto failed_marshal;