|
@@ -397,7 +397,8 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
|
|
|
* when we statistically choose to corrupt one, we instead segment it, returning
|
|
|
* the first packet to be corrupted, and re-enqueue the remaining frames
|
|
|
*/
|
|
|
-static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch)
|
|
|
+static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
|
|
|
+ struct sk_buff **to_free)
|
|
|
{
|
|
|
struct sk_buff *segs;
|
|
|
netdev_features_t features = netif_skb_features(skb);
|
|
@@ -405,7 +406,7 @@ static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch)
|
|
|
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
|
|
|
|
|
|
if (IS_ERR_OR_NULL(segs)) {
|
|
|
- qdisc_drop(skb, sch);
|
|
|
+ qdisc_drop(skb, sch, to_free);
|
|
|
return NULL;
|
|
|
}
|
|
|
consume_skb(skb);
|
|
@@ -418,7 +419,8 @@ static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch)
|
|
|
* NET_XMIT_DROP: queue length didn't change.
|
|
|
* NET_XMIT_SUCCESS: one skb was queued.
|
|
|
*/
|
|
|
-static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|
|
+static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|
|
+ struct sk_buff **to_free)
|
|
|
{
|
|
|
struct netem_sched_data *q = qdisc_priv(sch);
|
|
|
/* We don't fill cb now as skb_unshare() may invalidate it */
|
|
@@ -443,7 +445,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|
|
}
|
|
|
if (count == 0) {
|
|
|
qdisc_qstats_drop(sch);
|
|
|
- kfree_skb(skb);
|
|
|
+ __qdisc_drop(skb, to_free);
|
|
|
return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
|
|
}
|
|
|
|
|
@@ -463,7 +465,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|
|
u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
|
|
|
|
|
|
q->duplicate = 0;
|
|
|
- rootq->enqueue(skb2, rootq);
|
|
|
+ rootq->enqueue(skb2, rootq, to_free);
|
|
|
q->duplicate = dupsave;
|
|
|
}
|
|
|
|
|
@@ -475,7 +477,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|
|
*/
|
|
|
if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
|
|
|
if (skb_is_gso(skb)) {
|
|
|
- segs = netem_segment(skb, sch);
|
|
|
+ segs = netem_segment(skb, sch, to_free);
|
|
|
if (!segs)
|
|
|
return NET_XMIT_DROP;
|
|
|
} else {
|
|
@@ -488,7 +490,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|
|
if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
|
|
|
(skb->ip_summed == CHECKSUM_PARTIAL &&
|
|
|
skb_checksum_help(skb))) {
|
|
|
- rc = qdisc_drop(skb, sch);
|
|
|
+ rc = qdisc_drop(skb, sch, to_free);
|
|
|
goto finish_segs;
|
|
|
}
|
|
|
|
|
@@ -497,7 +499,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|
|
}
|
|
|
|
|
|
if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
|
|
|
- return qdisc_drop(skb, sch);
|
|
|
+ return qdisc_drop(skb, sch, to_free);
|
|
|
|
|
|
qdisc_qstats_backlog_inc(sch, skb);
|
|
|
|
|
@@ -557,7 +559,7 @@ finish_segs:
|
|
|
segs->next = NULL;
|
|
|
qdisc_skb_cb(segs)->pkt_len = segs->len;
|
|
|
last_len = segs->len;
|
|
|
- rc = qdisc_enqueue(segs, sch);
|
|
|
+ rc = qdisc_enqueue(segs, sch, to_free);
|
|
|
if (rc != NET_XMIT_SUCCESS) {
|
|
|
if (net_xmit_drop_count(rc))
|
|
|
qdisc_qstats_drop(sch);
|
|
@@ -615,8 +617,11 @@ deliver:
|
|
|
#endif
|
|
|
|
|
|
if (q->qdisc) {
|
|
|
- int err = qdisc_enqueue(skb, q->qdisc);
|
|
|
+ struct sk_buff *to_free = NULL;
|
|
|
+ int err;
|
|
|
|
|
|
+ err = qdisc_enqueue(skb, q->qdisc, &to_free);
|
|
|
+ kfree_skb_list(to_free);
|
|
|
if (unlikely(err != NET_XMIT_SUCCESS)) {
|
|
|
if (net_xmit_drop_count(err)) {
|
|
|
qdisc_qstats_drop(sch);
|