|
@@ -106,6 +106,14 @@ static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
|
|
|
|
|
|
__skb_queue_tail(&q->skb_bad_txq, skb);
|
|
|
|
|
|
+ if (qdisc_is_percpu_stats(q)) {
|
|
|
+ qdisc_qstats_cpu_backlog_inc(q, skb);
|
|
|
+ qdisc_qstats_cpu_qlen_inc(q);
|
|
|
+ } else {
|
|
|
+ qdisc_qstats_backlog_inc(q, skb);
|
|
|
+ q->q.qlen++;
|
|
|
+ }
|
|
|
+
|
|
|
if (lock)
|
|
|
spin_unlock(lock);
|
|
|
}
|
|
@@ -196,14 +204,6 @@ static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
|
|
|
break;
|
|
|
if (unlikely(skb_get_queue_mapping(nskb) != mapping)) {
|
|
|
qdisc_enqueue_skb_bad_txq(q, nskb);
|
|
|
-
|
|
|
- if (qdisc_is_percpu_stats(q)) {
|
|
|
- qdisc_qstats_cpu_backlog_inc(q, nskb);
|
|
|
- qdisc_qstats_cpu_qlen_inc(q);
|
|
|
- } else {
|
|
|
- qdisc_qstats_backlog_inc(q, nskb);
|
|
|
- q->q.qlen++;
|
|
|
- }
|
|
|
break;
|
|
|
}
|
|
|
skb->next = nskb;
|
|
@@ -628,6 +628,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
|
|
|
int band = prio2band[skb->priority & TC_PRIO_MAX];
|
|
|
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
|
|
|
struct skb_array *q = band2list(priv, band);
|
|
|
+ unsigned int pkt_len = qdisc_pkt_len(skb);
|
|
|
int err;
|
|
|
|
|
|
err = skb_array_produce(q, skb);
|
|
@@ -636,7 +637,10 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
|
|
|
return qdisc_drop_cpu(skb, qdisc, to_free);
|
|
|
|
|
|
qdisc_qstats_cpu_qlen_inc(qdisc);
|
|
|
- qdisc_qstats_cpu_backlog_inc(qdisc, skb);
|
|
|
+ /* Note: skb can not be used after skb_array_produce(),
|
|
|
+ * so we better not use qdisc_qstats_cpu_backlog_inc()
|
|
|
+ */
|
|
|
+ this_cpu_add(qdisc->cpu_qstats->backlog, pkt_len);
|
|
|
return NET_XMIT_SUCCESS;
|
|
|
}
|
|
|
|