irq_work: Also rcuwait for !IRQ_WORK_HARD_IRQ on PREEMPT_RT

On PREEMPT_RT most items are processed as LAZY via softirq context.
Avoid to spin-wait for them because irq_work_sync() could have higher
priority and not allow the irq-work to be completed.

Wait additionally for !IRQ_WORK_HARD_IRQ irq_work items on PREEMPT_RT.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20211006111852.1514359-5-bigeasy@linutronix.de
This commit is contained in:
Sebastian Andrzej Siewior 2021-10-06 13:18:52 +02:00 committed by Peter Zijlstra
parent b4c6f86ec2
commit 09089db798
2 changed files with 9 additions and 2 deletions

View File

@ -49,6 +49,11 @@ static inline bool irq_work_is_busy(struct irq_work *work)
return atomic_read(&work->node.a_flags) & IRQ_WORK_BUSY;
}
static inline bool irq_work_is_hard(struct irq_work *work)
{
return atomic_read(&work->node.a_flags) & IRQ_WORK_HARD_IRQ;
}
bool irq_work_queue(struct irq_work *work);
bool irq_work_queue_on(struct irq_work *work, int cpu);

View File

@ -217,7 +217,8 @@ void irq_work_single(void *arg)
*/
(void)atomic_cmpxchg(&work->node.a_flags, flags, flags & ~IRQ_WORK_BUSY);
if (!arch_irq_work_has_interrupt())
if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) ||
!arch_irq_work_has_interrupt())
rcuwait_wake_up(&work->irqwait);
}
@ -277,7 +278,8 @@ void irq_work_sync(struct irq_work *work)
lockdep_assert_irqs_enabled();
might_sleep();
if (!arch_irq_work_has_interrupt()) {
if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) ||
!arch_irq_work_has_interrupt()) {
rcuwait_wait_event(&work->irqwait, !irq_work_is_busy(work),
TASK_UNINTERRUPTIBLE);
return;