mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-30 08:02:30 +00:00
rcu/nocb: Don't invoke local rcu core on callback overload from nocb kthread
rcu_core() tries to ensure that its self-invocation in case of callbacks overload only happen in softirq/rcuc mode. Indeed it doesn't make sense to trigger local RCU core from nocb_cb kthread since it can execute on a CPU different from the target rdp. Also in case of overload, the nocb_cb kthread simply iterates a new loop of callbacks processing. However the "offloaded" check that aims at preventing misplaced rcu_core() invocations is wrong. First of all that state is volatile and second: softirq/rcuc can execute while the target rdp is offloaded. As a result rcu_core() can be invoked on the wrong CPU while in the process of (de-)offloading. Fix that with moving the rcu_core() self-invocation to rcu_core() itself, irrespective of the rdp offloaded state. Tested-by: Valentin Schneider <valentin.schneider@arm.com> Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Cc: Valentin Schneider <valentin.schneider@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Cc: Josh Triplett <josh@joshtriplett.org> Cc: Joel Fernandes <joel@joelfernandes.org> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Neeraj Upadhyay <neeraju@codeaurora.org> Cc: Uladzislau Rezki <urezki@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
parent
a554ba2888
commit
0598a4d442
1 changed files with 5 additions and 5 deletions
|
@ -2460,7 +2460,6 @@ static void rcu_do_batch(struct rcu_data *rdp)
|
||||||
int div;
|
int div;
|
||||||
bool __maybe_unused empty;
|
bool __maybe_unused empty;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
const bool offloaded = rcu_rdp_is_offloaded(rdp);
|
|
||||||
struct rcu_head *rhp;
|
struct rcu_head *rhp;
|
||||||
struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
|
struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
|
||||||
long bl, count = 0;
|
long bl, count = 0;
|
||||||
|
@ -2582,9 +2581,6 @@ static void rcu_do_batch(struct rcu_data *rdp)
|
||||||
|
|
||||||
rcu_nocb_unlock_irqrestore(rdp, flags);
|
rcu_nocb_unlock_irqrestore(rdp, flags);
|
||||||
|
|
||||||
/* Re-invoke RCU core processing if there are callbacks remaining. */
|
|
||||||
if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist))
|
|
||||||
invoke_rcu_core();
|
|
||||||
tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
|
tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2771,8 +2767,12 @@ static __latent_entropy void rcu_core(void)
|
||||||
|
|
||||||
/* If there are callbacks ready, invoke them. */
|
/* If there are callbacks ready, invoke them. */
|
||||||
if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) &&
|
if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) &&
|
||||||
likely(READ_ONCE(rcu_scheduler_fully_active)))
|
likely(READ_ONCE(rcu_scheduler_fully_active))) {
|
||||||
rcu_do_batch(rdp);
|
rcu_do_batch(rdp);
|
||||||
|
/* Re-invoke RCU core processing if there are callbacks remaining. */
|
||||||
|
if (rcu_segcblist_ready_cbs(&rdp->cblist))
|
||||||
|
invoke_rcu_core();
|
||||||
|
}
|
||||||
|
|
||||||
/* Do any needed deferred wakeups of rcuo kthreads. */
|
/* Do any needed deferred wakeups of rcuo kthreads. */
|
||||||
do_nocb_deferred_wakeup(rdp);
|
do_nocb_deferred_wakeup(rdp);
|
||||||
|
|
Loading…
Reference in a new issue