NFSv4.1: Use the nfs_client's rpc timeouts for backchannel

For backchannel requests that lookup the appropriate nfs_client, use the
state-management rpc_clnt's rpc_timeout parameters for the backchannel's
response.  When the nfs_client cannot be found, fall back to using the
xprt's default timeout parameters.

Signed-off-by: Benjamin Coddington <bcodding@redhat.com>
Tested-by: Chuck Lever <chuck.lever@oracle.com>
Tested-by: Jeff Layton <jlayton@kernel.org>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:
Benjamin Coddington 2024-01-04 09:58:46 -05:00 committed by Anna Schumaker
parent e6f533b615
commit 57331a59ac
8 changed files with 45 additions and 19 deletions

View File

@ -967,6 +967,11 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp)
nops--;
}
if (svc_is_backchannel(rqstp) && cps.clp) {
rqstp->bc_to_initval = cps.clp->cl_rpcclient->cl_timeout->to_initval;
rqstp->bc_to_retries = cps.clp->cl_rpcclient->cl_timeout->to_retries;
}
*hdr_res.status = status;
*hdr_res.nops = htonl(nops);
nfs4_cb_free_slot(&cps);

View File

@ -20,7 +20,8 @@
#ifdef CONFIG_SUNRPC_BACKCHANNEL
struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid);
void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied);
void xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task);
void xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task,
const struct rpc_timeout *to);
void xprt_free_bc_request(struct rpc_rqst *req);
int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs);
void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs);

View File

@ -37,6 +37,17 @@ struct rpc_wait {
struct list_head timer_list; /* Timer list */
};
/*
* This describes a timeout strategy
*/
struct rpc_timeout {
unsigned long to_initval, /* initial timeout */
to_maxval, /* max timeout */
to_increment; /* if !exponential */
unsigned int to_retries; /* max # of retries */
unsigned char to_exponential;
};
/*
* This is the RPC task struct
*/
@ -205,7 +216,8 @@ struct rpc_wait_queue {
*/
struct rpc_task *rpc_new_task(const struct rpc_task_setup *);
struct rpc_task *rpc_run_task(const struct rpc_task_setup *);
struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req);
struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
struct rpc_timeout *timeout);
void rpc_put_task(struct rpc_task *);
void rpc_put_task_async(struct rpc_task *);
bool rpc_task_set_rpc_status(struct rpc_task *task, int rpc_status);

View File

@ -250,6 +250,8 @@ struct svc_rqst {
struct net *rq_bc_net; /* pointer to backchannel's
* net namespace
*/
unsigned long bc_to_initval;
unsigned int bc_to_retries;
void ** rq_lease_breaker; /* The v4 client breaking a lease */
unsigned int rq_status_counter; /* RPC processing counter */
};

View File

@ -30,17 +30,6 @@
#define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT)
#define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
/*
* This describes a timeout strategy
*/
struct rpc_timeout {
unsigned long to_initval, /* initial timeout */
to_maxval, /* max timeout */
to_increment; /* if !exponential */
unsigned int to_retries; /* max # of retries */
unsigned char to_exponential;
};
enum rpc_display_format_t {
RPC_DISPLAY_ADDR = 0,
RPC_DISPLAY_PORT,

View File

@ -1311,8 +1311,10 @@ static void call_bc_encode(struct rpc_task *task);
* rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
* rpc_execute against it
* @req: RPC request
* @timeout: timeout values to use for this task
*/
struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req)
struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
struct rpc_timeout *timeout)
{
struct rpc_task *task;
struct rpc_task_setup task_setup_data = {
@ -1331,7 +1333,7 @@ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req)
return task;
}
xprt_init_bc_request(req, task);
xprt_init_bc_request(req, task, timeout);
task->tk_action = call_bc_encode;
atomic_inc(&task->tk_count);

View File

@ -1557,6 +1557,7 @@ void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp)
{
struct rpc_task *task;
int proc_error;
struct rpc_timeout timeout;
/* Build the svc_rqst used by the common processing routine */
rqstp->rq_xid = req->rq_xid;
@ -1602,8 +1603,16 @@ void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp)
return;
}
/* Finally, send the reply synchronously */
if (rqstp->bc_to_initval > 0) {
timeout.to_initval = rqstp->bc_to_initval;
timeout.to_retries = rqstp->bc_to_initval;
} else {
timeout.to_initval = req->rq_xprt->timeout->to_initval;
timeout.to_initval = req->rq_xprt->timeout->to_retries;
}
memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
task = rpc_run_bc_task(req);
task = rpc_run_bc_task(req, &timeout);
if (IS_ERR(task))
return;

View File

@ -1986,7 +1986,8 @@ void xprt_release(struct rpc_task *task)
#ifdef CONFIG_SUNRPC_BACKCHANNEL
void
xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task)
xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task,
const struct rpc_timeout *to)
{
struct xdr_buf *xbufp = &req->rq_snd_buf;
@ -1999,8 +2000,13 @@ xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task)
*/
xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
xbufp->tail[0].iov_len;
xprt_init_majortimeo(task, req, req->rq_xprt->timeout);
/*
* Backchannel Replies are sent with !RPC_TASK_SOFT and
* RPC_TASK_NO_RETRANS_TIMEOUT. The major timeout setting
* affects only how long each Reply waits to be sent when
* a transport connection cannot be established.
*/
xprt_init_majortimeo(task, req, to);
}
#endif