net: do not provide hard irq safety for sd->defer_lock
kfree_skb() can be called from hard irq handlers, but skb_attempt_defer_free() is meant to be used from process or BH contexts, and skb_defer_free_flush() is meant to be called from BH contexts. Not having to mask hard irq can save some cycles. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
e8e1ce8454
commit
931e93bdf8
|
@ -6632,11 +6632,11 @@ static void skb_defer_free_flush(struct softnet_data *sd)
|
||||||
if (!READ_ONCE(sd->defer_list))
|
if (!READ_ONCE(sd->defer_list))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
spin_lock_irq(&sd->defer_lock);
|
spin_lock(&sd->defer_lock);
|
||||||
skb = sd->defer_list;
|
skb = sd->defer_list;
|
||||||
sd->defer_list = NULL;
|
sd->defer_list = NULL;
|
||||||
sd->defer_count = 0;
|
sd->defer_count = 0;
|
||||||
spin_unlock_irq(&sd->defer_lock);
|
spin_unlock(&sd->defer_lock);
|
||||||
|
|
||||||
while (skb != NULL) {
|
while (skb != NULL) {
|
||||||
next = skb->next;
|
next = skb->next;
|
||||||
|
|
|
@ -6870,7 +6870,6 @@ void skb_attempt_defer_free(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
int cpu = skb->alloc_cpu;
|
int cpu = skb->alloc_cpu;
|
||||||
struct softnet_data *sd;
|
struct softnet_data *sd;
|
||||||
unsigned long flags;
|
|
||||||
unsigned int defer_max;
|
unsigned int defer_max;
|
||||||
bool kick;
|
bool kick;
|
||||||
|
|
||||||
|
@ -6889,7 +6888,7 @@ nodefer: __kfree_skb(skb);
|
||||||
if (READ_ONCE(sd->defer_count) >= defer_max)
|
if (READ_ONCE(sd->defer_count) >= defer_max)
|
||||||
goto nodefer;
|
goto nodefer;
|
||||||
|
|
||||||
spin_lock_irqsave(&sd->defer_lock, flags);
|
spin_lock_bh(&sd->defer_lock);
|
||||||
/* Send an IPI every time queue reaches half capacity. */
|
/* Send an IPI every time queue reaches half capacity. */
|
||||||
kick = sd->defer_count == (defer_max >> 1);
|
kick = sd->defer_count == (defer_max >> 1);
|
||||||
/* Paired with the READ_ONCE() few lines above */
|
/* Paired with the READ_ONCE() few lines above */
|
||||||
|
@ -6898,7 +6897,7 @@ nodefer: __kfree_skb(skb);
|
||||||
skb->next = sd->defer_list;
|
skb->next = sd->defer_list;
|
||||||
/* Paired with READ_ONCE() in skb_defer_free_flush() */
|
/* Paired with READ_ONCE() in skb_defer_free_flush() */
|
||||||
WRITE_ONCE(sd->defer_list, skb);
|
WRITE_ONCE(sd->defer_list, skb);
|
||||||
spin_unlock_irqrestore(&sd->defer_lock, flags);
|
spin_unlock_bh(&sd->defer_lock);
|
||||||
|
|
||||||
/* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
|
/* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
|
||||||
* if we are unlucky enough (this seems very unlikely).
|
* if we are unlucky enough (this seems very unlikely).
|
||||||
|
|
Loading…
Reference in New Issue