|
@@ -3448,6 +3448,8 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
|
|
|
local_irq_save(flags);
|
|
|
|
|
|
rps_lock(sd);
|
|
|
+ if (!netif_running(skb->dev))
|
|
|
+ goto drop;
|
|
|
qlen = skb_queue_len(&sd->input_pkt_queue);
|
|
|
if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
|
|
|
if (qlen) {
|
|
@@ -3469,6 +3471,7 @@ enqueue:
|
|
|
goto enqueue;
|
|
|
}
|
|
|
|
|
|
+drop:
|
|
|
sd->dropped++;
|
|
|
rps_unlock(sd);
|
|
|
|
|
@@ -3771,8 +3774,6 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
|
|
|
|
|
|
pt_prev = NULL;
|
|
|
|
|
|
- rcu_read_lock();
|
|
|
-
|
|
|
another_round:
|
|
|
skb->skb_iif = skb->dev->ifindex;
|
|
|
|
|
@@ -3782,7 +3783,7 @@ another_round:
|
|
|
skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
|
|
|
skb = skb_vlan_untag(skb);
|
|
|
if (unlikely(!skb))
|
|
|
- goto unlock;
|
|
|
+ goto out;
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_NET_CLS_ACT
|
|
@@ -3812,10 +3813,10 @@ skip_taps:
|
|
|
if (static_key_false(&ingress_needed)) {
|
|
|
skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
|
|
|
if (!skb)
|
|
|
- goto unlock;
|
|
|
+ goto out;
|
|
|
|
|
|
if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
|
|
|
- goto unlock;
|
|
|
+ goto out;
|
|
|
}
|
|
|
#endif
|
|
|
#ifdef CONFIG_NET_CLS_ACT
|
|
@@ -3833,7 +3834,7 @@ ncls:
|
|
|
if (vlan_do_receive(&skb))
|
|
|
goto another_round;
|
|
|
else if (unlikely(!skb))
|
|
|
- goto unlock;
|
|
|
+ goto out;
|
|
|
}
|
|
|
|
|
|
rx_handler = rcu_dereference(skb->dev->rx_handler);
|
|
@@ -3845,7 +3846,7 @@ ncls:
|
|
|
switch (rx_handler(&skb)) {
|
|
|
case RX_HANDLER_CONSUMED:
|
|
|
ret = NET_RX_SUCCESS;
|
|
|
- goto unlock;
|
|
|
+ goto out;
|
|
|
case RX_HANDLER_ANOTHER:
|
|
|
goto another_round;
|
|
|
case RX_HANDLER_EXACT:
|
|
@@ -3899,8 +3900,7 @@ drop:
|
|
|
ret = NET_RX_DROP;
|
|
|
}
|
|
|
|
|
|
-unlock:
|
|
|
- rcu_read_unlock();
|
|
|
+out:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -3931,29 +3931,30 @@ static int __netif_receive_skb(struct sk_buff *skb)
|
|
|
|
|
|
static int netif_receive_skb_internal(struct sk_buff *skb)
|
|
|
{
|
|
|
+ int ret;
|
|
|
+
|
|
|
net_timestamp_check(netdev_tstamp_prequeue, skb);
|
|
|
|
|
|
if (skb_defer_rx_timestamp(skb))
|
|
|
return NET_RX_SUCCESS;
|
|
|
|
|
|
+ rcu_read_lock();
|
|
|
+
|
|
|
#ifdef CONFIG_RPS
|
|
|
if (static_key_false(&rps_needed)) {
|
|
|
struct rps_dev_flow voidflow, *rflow = &voidflow;
|
|
|
- int cpu, ret;
|
|
|
-
|
|
|
- rcu_read_lock();
|
|
|
-
|
|
|
- cpu = get_rps_cpu(skb->dev, skb, &rflow);
|
|
|
+ int cpu = get_rps_cpu(skb->dev, skb, &rflow);
|
|
|
|
|
|
if (cpu >= 0) {
|
|
|
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
|
|
|
rcu_read_unlock();
|
|
|
return ret;
|
|
|
}
|
|
|
- rcu_read_unlock();
|
|
|
}
|
|
|
#endif
|
|
|
- return __netif_receive_skb(skb);
|
|
|
+ ret = __netif_receive_skb(skb);
|
|
|
+ rcu_read_unlock();
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -4498,8 +4499,10 @@ static int process_backlog(struct napi_struct *napi, int quota)
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
while ((skb = __skb_dequeue(&sd->process_queue))) {
|
|
|
+ rcu_read_lock();
|
|
|
local_irq_enable();
|
|
|
__netif_receive_skb(skb);
|
|
|
+ rcu_read_unlock();
|
|
|
local_irq_disable();
|
|
|
input_queue_head_incr(sd);
|
|
|
if (++work >= quota) {
|
|
@@ -6135,6 +6138,7 @@ static void rollback_registered_many(struct list_head *head)
|
|
|
unlist_netdevice(dev);
|
|
|
|
|
|
dev->reg_state = NETREG_UNREGISTERING;
|
|
|
+ on_each_cpu(flush_backlog, dev, 1);
|
|
|
}
|
|
|
|
|
|
synchronize_net();
|
|
@@ -6770,8 +6774,6 @@ void netdev_run_todo(void)
|
|
|
|
|
|
dev->reg_state = NETREG_UNREGISTERED;
|
|
|
|
|
|
- on_each_cpu(flush_backlog, dev, 1);
|
|
|
-
|
|
|
netdev_wait_allrefs(dev);
|
|
|
|
|
|
/* paranoia */
|