|
@@ -656,7 +656,7 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
|
|
|
if (__skb_array_empty(q))
|
|
|
continue;
|
|
|
|
|
|
- skb = skb_array_consume_bh(q);
|
|
|
+ skb = __skb_array_consume(q);
|
|
|
}
|
|
|
if (likely(skb)) {
|
|
|
qdisc_qstats_cpu_backlog_dec(qdisc, skb);
|
|
@@ -697,7 +697,7 @@ static void pfifo_fast_reset(struct Qdisc *qdisc)
|
|
|
if (!q->ring.queue)
|
|
|
continue;
|
|
|
|
|
|
- while ((skb = skb_array_consume_bh(q)) != NULL)
|
|
|
+ while ((skb = __skb_array_consume(q)) != NULL)
|
|
|
kfree_skb(skb);
|
|
|
}
|
|
|
|
|
@@ -858,6 +858,11 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
|
|
|
lockdep_set_class(&sch->busylock,
|
|
|
dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
|
|
|
|
|
|
+ /* seqlock has the same scope of busylock, for NOLOCK qdisc */
|
|
|
+ spin_lock_init(&sch->seqlock);
|
|
|
+ lockdep_set_class(&sch->busylock,
|
|
|
+ dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
|
|
|
+
|
|
|
seqcount_init(&sch->running);
|
|
|
lockdep_set_class(&sch->running,
|
|
|
dev->qdisc_running_key ?: &qdisc_running_key);
|
|
@@ -1097,6 +1102,10 @@ static void dev_deactivate_queue(struct net_device *dev,
|
|
|
|
|
|
qdisc = rtnl_dereference(dev_queue->qdisc);
|
|
|
if (qdisc) {
|
|
|
+ bool nolock = qdisc->flags & TCQ_F_NOLOCK;
|
|
|
+
|
|
|
+ if (nolock)
|
|
|
+ spin_lock_bh(&qdisc->seqlock);
|
|
|
spin_lock_bh(qdisc_lock(qdisc));
|
|
|
|
|
|
if (!(qdisc->flags & TCQ_F_BUILTIN))
|
|
@@ -1106,6 +1115,8 @@ static void dev_deactivate_queue(struct net_device *dev,
|
|
|
qdisc_reset(qdisc);
|
|
|
|
|
|
spin_unlock_bh(qdisc_lock(qdisc));
|
|
|
+ if (nolock)
|
|
|
+ spin_unlock_bh(&qdisc->seqlock);
|
|
|
}
|
|
|
}
|
|
|
|