mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-01 22:54:01 +00:00
cgroup: use irqsave in cgroup_rstat_flush_locked().
All callers of cgroup_rstat_flush_locked() acquire cgroup_rstat_lock either with spin_lock_irq() or spin_lock_irqsave(). cgroup_rstat_flush_locked() itself acquires cgroup_rstat_cpu_lock which is a raw_spin_lock. This lock is also acquired in cgroup_rstat_updated() in IRQ context and therefore requires _irqsave() locking suffix in cgroup_rstat_flush_locked(). Since there is no difference between spin_lock_t and raw_spin_lock_t on !RT lockdep does not complain here. On RT lockdep complains because the interrupts were not disabled here and a deadlock is possible. Acquire the raw_spin_lock_t with disabled interrupts. Link: https://lkml.kernel.org/r/20220301122143.1521823-2-bigeasy@linutronix.de Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Acked-by: Tejun Heo <tj@kernel.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Zefan Li <lizefan.x@bytedance.com> From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Subject: cgroup: add a comment to cgroup_rstat_flush_locked(). Add a comment why spin_lock_irq() -> raw_spin_lock_irqsave() is needed. Link: https://lkml.kernel.org/r/Yh+DOK73hfVV5ThX@linutronix.de Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Acked-by: Tejun Heo <tj@kernel.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Zefan Li <lizefan.x@bytedance.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
2cd50532ce
commit
b1e2c8df0f
1 changed files with 11 additions and 2 deletions
|
@ -153,8 +153,17 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
|
||||||
raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock,
|
raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock,
|
||||||
cpu);
|
cpu);
|
||||||
struct cgroup *pos = NULL;
|
struct cgroup *pos = NULL;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
raw_spin_lock(cpu_lock);
|
/*
|
||||||
|
* The _irqsave() is needed because cgroup_rstat_lock is
|
||||||
|
* spinlock_t which is a sleeping lock on PREEMPT_RT. Acquiring
|
||||||
|
* this lock with the _irq() suffix only disables interrupts on
|
||||||
|
* a non-PREEMPT_RT kernel. The raw_spinlock_t below disables
|
||||||
|
* interrupts on both configurations. The _irqsave() ensures
|
||||||
|
* that interrupts are always disabled and later restored.
|
||||||
|
*/
|
||||||
|
raw_spin_lock_irqsave(cpu_lock, flags);
|
||||||
while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu))) {
|
while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu))) {
|
||||||
struct cgroup_subsys_state *css;
|
struct cgroup_subsys_state *css;
|
||||||
|
|
||||||
|
@ -166,7 +175,7 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
|
||||||
css->ss->css_rstat_flush(css, cpu);
|
css->ss->css_rstat_flush(css, cpu);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
raw_spin_unlock(cpu_lock);
|
raw_spin_unlock_irqrestore(cpu_lock, flags);
|
||||||
|
|
||||||
/* if @may_sleep, play nice and yield if necessary */
|
/* if @may_sleep, play nice and yield if necessary */
|
||||||
if (may_sleep && (need_resched() ||
|
if (may_sleep && (need_resched() ||
|
||||||
|
|
Loading…
Reference in a new issue