mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-30 06:10:56 +00:00
drop_monitor: replace spin_lock by raw_spin_lock
[ Upstream commit f1e197a665
]
trace_drop_common() is called with preemption disabled, and it acquires
a spin_lock. This is problematic for RT kernels because spin_locks are
sleeping locks in this configuration, which causes the following splat:
BUG: sleeping function called from invalid context at kernel/locking/spinlock_rt.c:48
in_atomic(): 1, irqs_disabled(): 1, non_block: 0, pid: 449, name: rcuc/47
preempt_count: 1, expected: 0
RCU nest depth: 2, expected: 2
5 locks held by rcuc/47/449:
#0: ff1100086ec30a60 ((softirq_ctrl.lock)){+.+.}-{2:2}, at: __local_bh_disable_ip+0x105/0x210
#1: ffffffffb394a280 (rcu_read_lock){....}-{1:2}, at: rt_spin_lock+0xbf/0x130
#2: ffffffffb394a280 (rcu_read_lock){....}-{1:2}, at: __local_bh_disable_ip+0x11c/0x210
#3: ffffffffb394a160 (rcu_callback){....}-{0:0}, at: rcu_do_batch+0x360/0xc70
#4: ff1100086ee07520 (&data->lock){+.+.}-{2:2}, at: trace_drop_common.constprop.0+0xb5/0x290
irq event stamp: 139909
hardirqs last enabled at (139908): [<ffffffffb1df2b33>] _raw_spin_unlock_irqrestore+0x63/0x80
hardirqs last disabled at (139909): [<ffffffffb19bd03d>] trace_drop_common.constprop.0+0x26d/0x290
softirqs last enabled at (139892): [<ffffffffb07a1083>] __local_bh_enable_ip+0x103/0x170
softirqs last disabled at (139898): [<ffffffffb0909b33>] rcu_cpu_kthread+0x93/0x1f0
Preemption disabled at:
[<ffffffffb1de786b>] rt_mutex_slowunlock+0xab/0x2e0
CPU: 47 PID: 449 Comm: rcuc/47 Not tainted 6.9.0-rc2-rt1+ #7
Hardware name: Dell Inc. PowerEdge R650/0Y2G81, BIOS 1.6.5 04/15/2022
Call Trace:
<TASK>
dump_stack_lvl+0x8c/0xd0
dump_stack+0x14/0x20
__might_resched+0x21e/0x2f0
rt_spin_lock+0x5e/0x130
? trace_drop_common.constprop.0+0xb5/0x290
? skb_queue_purge_reason.part.0+0x1bf/0x230
trace_drop_common.constprop.0+0xb5/0x290
? preempt_count_sub+0x1c/0xd0
? _raw_spin_unlock_irqrestore+0x4a/0x80
? __pfx_trace_drop_common.constprop.0+0x10/0x10
? rt_mutex_slowunlock+0x26a/0x2e0
? skb_queue_purge_reason.part.0+0x1bf/0x230
? __pfx_rt_mutex_slowunlock+0x10/0x10
? skb_queue_purge_reason.part.0+0x1bf/0x230
trace_kfree_skb_hit+0x15/0x20
trace_kfree_skb+0xe9/0x150
kfree_skb_reason+0x7b/0x110
skb_queue_purge_reason.part.0+0x1bf/0x230
? __pfx_skb_queue_purge_reason.part.0+0x10/0x10
? mark_lock.part.0+0x8a/0x520
...
trace_drop_common() also disables interrupts, but this is a minor issue
because we could easily replace it with a local_lock.
Replace the spin_lock with raw_spin_lock to avoid sleeping in atomic
context.
Signed-off-by: Wander Lairson Costa <wander@redhat.com>
Reported-by: Hu Chunyu <chuhu@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
63310043ac
commit
f251ccef1d
1 changed files with 10 additions and 10 deletions
|
@ -72,7 +72,7 @@ struct net_dm_hw_entries {
|
|||
};
|
||||
|
||||
struct per_cpu_dm_data {
|
||||
spinlock_t lock; /* Protects 'skb', 'hw_entries' and
|
||||
raw_spinlock_t lock; /* Protects 'skb', 'hw_entries' and
|
||||
* 'send_timer'
|
||||
*/
|
||||
union {
|
||||
|
@ -166,9 +166,9 @@ static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
|
|||
err:
|
||||
mod_timer(&data->send_timer, jiffies + HZ / 10);
|
||||
out:
|
||||
spin_lock_irqsave(&data->lock, flags);
|
||||
raw_spin_lock_irqsave(&data->lock, flags);
|
||||
swap(data->skb, skb);
|
||||
spin_unlock_irqrestore(&data->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&data->lock, flags);
|
||||
|
||||
if (skb) {
|
||||
struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
|
||||
|
@ -223,7 +223,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
|
|||
|
||||
local_irq_save(flags);
|
||||
data = this_cpu_ptr(&dm_cpu_data);
|
||||
spin_lock(&data->lock);
|
||||
raw_spin_lock(&data->lock);
|
||||
dskb = data->skb;
|
||||
|
||||
if (!dskb)
|
||||
|
@ -257,7 +257,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
|
|||
}
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&data->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&data->lock, flags);
|
||||
}
|
||||
|
||||
static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb,
|
||||
|
@ -312,9 +312,9 @@ net_dm_hw_reset_per_cpu_data(struct per_cpu_dm_data *hw_data)
|
|||
mod_timer(&hw_data->send_timer, jiffies + HZ / 10);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&hw_data->lock, flags);
|
||||
raw_spin_lock_irqsave(&hw_data->lock, flags);
|
||||
swap(hw_data->hw_entries, hw_entries);
|
||||
spin_unlock_irqrestore(&hw_data->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&hw_data->lock, flags);
|
||||
|
||||
return hw_entries;
|
||||
}
|
||||
|
@ -446,7 +446,7 @@ net_dm_hw_trap_summary_probe(void *ignore, const struct devlink *devlink,
|
|||
return;
|
||||
|
||||
hw_data = this_cpu_ptr(&dm_hw_cpu_data);
|
||||
spin_lock_irqsave(&hw_data->lock, flags);
|
||||
raw_spin_lock_irqsave(&hw_data->lock, flags);
|
||||
hw_entries = hw_data->hw_entries;
|
||||
|
||||
if (!hw_entries)
|
||||
|
@ -475,7 +475,7 @@ net_dm_hw_trap_summary_probe(void *ignore, const struct devlink *devlink,
|
|||
}
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&hw_data->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&hw_data->lock, flags);
|
||||
}
|
||||
|
||||
static const struct net_dm_alert_ops net_dm_alert_summary_ops = {
|
||||
|
@ -1658,7 +1658,7 @@ static struct notifier_block dropmon_net_notifier = {
|
|||
|
||||
static void __net_dm_cpu_data_init(struct per_cpu_dm_data *data)
|
||||
{
|
||||
spin_lock_init(&data->lock);
|
||||
raw_spin_lock_init(&data->lock);
|
||||
skb_queue_head_init(&data->drop_queue);
|
||||
u64_stats_init(&data->stats.syncp);
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue