diff options
-rw-r--r-- | net/core/dev.c | 4 | ||||
-rw-r--r-- | net/core/skbuff.c | 5 |
2 files changed, 4 insertions, 5 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 1551aabac343..d15568f5a44f 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6632,11 +6632,11 @@ static void skb_defer_free_flush(struct softnet_data *sd) if (!READ_ONCE(sd->defer_list)) return; - spin_lock_irq(&sd->defer_lock); + spin_lock(&sd->defer_lock); skb = sd->defer_list; sd->defer_list = NULL; sd->defer_count = 0; - spin_unlock_irq(&sd->defer_lock); + spin_unlock(&sd->defer_lock); while (skb != NULL) { next = skb->next; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index bd815a00d2af..304a966164d8 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -6870,7 +6870,6 @@ void skb_attempt_defer_free(struct sk_buff *skb) { int cpu = skb->alloc_cpu; struct softnet_data *sd; - unsigned long flags; unsigned int defer_max; bool kick; @@ -6889,7 +6888,7 @@ nodefer: __kfree_skb(skb); if (READ_ONCE(sd->defer_count) >= defer_max) goto nodefer; - spin_lock_irqsave(&sd->defer_lock, flags); + spin_lock_bh(&sd->defer_lock); /* Send an IPI every time queue reaches half capacity. */ kick = sd->defer_count == (defer_max >> 1); /* Paired with the READ_ONCE() few lines above */ @@ -6898,7 +6897,7 @@ nodefer: __kfree_skb(skb); skb->next = sd->defer_list; /* Paired with READ_ONCE() in skb_defer_free_flush() */ WRITE_ONCE(sd->defer_list, skb); - spin_unlock_irqrestore(&sd->defer_lock, flags); + spin_unlock_bh(&sd->defer_lock); /* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU * if we are unlucky enough (this seems very unlikely). |