|
@@ -3448,6 +3448,8 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
|
|
local_irq_save(flags);
|
|
local_irq_save(flags);
|
|
|
|
|
|
rps_lock(sd);
|
|
rps_lock(sd);
|
|
|
|
+ if (!netif_running(skb->dev))
|
|
|
|
+ goto drop;
|
|
qlen = skb_queue_len(&sd->input_pkt_queue);
|
|
qlen = skb_queue_len(&sd->input_pkt_queue);
|
|
if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
|
|
if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
|
|
if (qlen) {
|
|
if (qlen) {
|
|
@@ -3469,6 +3471,7 @@ enqueue:
|
|
goto enqueue;
|
|
goto enqueue;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+drop:
|
|
sd->dropped++;
|
|
sd->dropped++;
|
|
rps_unlock(sd);
|
|
rps_unlock(sd);
|
|
|
|
|
|
@@ -6135,6 +6138,7 @@ static void rollback_registered_many(struct list_head *head)
|
|
unlist_netdevice(dev);
|
|
unlist_netdevice(dev);
|
|
|
|
|
|
dev->reg_state = NETREG_UNREGISTERING;
|
|
dev->reg_state = NETREG_UNREGISTERING;
|
|
|
|
+ on_each_cpu(flush_backlog, dev, 1);
|
|
}
|
|
}
|
|
|
|
|
|
synchronize_net();
|
|
synchronize_net();
|
|
@@ -6770,8 +6774,6 @@ void netdev_run_todo(void)
|
|
|
|
|
|
dev->reg_state = NETREG_UNREGISTERED;
|
|
dev->reg_state = NETREG_UNREGISTERED;
|
|
|
|
|
|
- on_each_cpu(flush_backlog, dev, 1);
|
|
|
|
-
|
|
|
|
netdev_wait_allrefs(dev);
|
|
netdev_wait_allrefs(dev);
|
|
|
|
|
|
/* paranoia */
|
|
/* paranoia */
|