staging: lustre: replace l_wait_event_exclusive_head() with wait_event_idle_exclusive

This l_wait_event_exclusive_head() will wait indefinitely
if the timeout is zero.  If it does wait with a timeout
and times out, the timeout for next time is set to zero.

The can be mapped to a call to either
 wait_event_idle_exclusive()
or
 wait_event_idle_exclusive_timeout()
depending in the timeout setting.

The current code arranges for LIFO queuing of waiters,
but include/event.h doesn't support that yet.
Until it does, fall back on FIFO with
wait_event_idle_exclusive{,_timeout}().

Reviewed-by: James Simmons <jsimmons@infradead.org>
Signed-off-by: NeilBrown <neilb@suse.com>
Reviewed-by: Patrick Farrell <paf@cray.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
NeilBrown 2018-02-13 10:47:59 +11:00 committed by Greg Kroah-Hartman
parent cda3520497
commit 1c6ce08297

View file

@ -1897,15 +1897,6 @@ ptlrpc_check_rqbd_pool(struct ptlrpc_service_part *svcpt)
}
}
static int
ptlrpc_retry_rqbds(void *arg)
{
struct ptlrpc_service_part *svcpt = arg;
svcpt->scp_rqbd_timeout = 0;
return -ETIMEDOUT;
}
static inline int
ptlrpc_threads_enough(struct ptlrpc_service_part *svcpt)
{
@ -1968,13 +1959,17 @@ ptlrpc_server_request_incoming(struct ptlrpc_service_part *svcpt)
return !list_empty(&svcpt->scp_req_incoming);
}
/* We perfer lifo queuing, but kernel doesn't provide that yet. */
#ifndef wait_event_idle_exclusive_lifo
#define wait_event_idle_exclusive_lifo wait_event_idle_exclusive
#define wait_event_idle_exclusive_lifo_timeout wait_event_idle_exclusive_timeout
#endif
static __attribute__((__noinline__)) int
ptlrpc_wait_event(struct ptlrpc_service_part *svcpt,
struct ptlrpc_thread *thread)
{
/* Don't exit while there are replies to be handled */
struct l_wait_info lwi = LWI_TIMEOUT(svcpt->scp_rqbd_timeout,
ptlrpc_retry_rqbds, svcpt);
/* XXX: Add this back when libcfs watchdog is merged upstream
lc_watchdog_disable(thread->t_watchdog);
@ -1982,13 +1977,25 @@ ptlrpc_wait_event(struct ptlrpc_service_part *svcpt,
cond_resched();
l_wait_event_exclusive_head(svcpt->scp_waitq,
ptlrpc_thread_stopping(thread) ||
ptlrpc_server_request_incoming(svcpt) ||
ptlrpc_server_request_pending(svcpt,
false) ||
ptlrpc_rqbd_pending(svcpt) ||
ptlrpc_at_check(svcpt), &lwi);
if (svcpt->scp_rqbd_timeout == 0)
wait_event_idle_exclusive_lifo(
svcpt->scp_waitq,
ptlrpc_thread_stopping(thread) ||
ptlrpc_server_request_incoming(svcpt) ||
ptlrpc_server_request_pending(svcpt,
false) ||
ptlrpc_rqbd_pending(svcpt) ||
ptlrpc_at_check(svcpt));
else if (0 == wait_event_idle_exclusive_lifo_timeout(
svcpt->scp_waitq,
ptlrpc_thread_stopping(thread) ||
ptlrpc_server_request_incoming(svcpt) ||
ptlrpc_server_request_pending(svcpt,
false) ||
ptlrpc_rqbd_pending(svcpt) ||
ptlrpc_at_check(svcpt),
svcpt->scp_rqbd_timeout))
svcpt->scp_rqbd_timeout = 0;
if (ptlrpc_thread_stopping(thread))
return -EINTR;