|
@@ -78,9 +78,7 @@ static void ifb_ri_tasklet(unsigned long _txp)
|
|
|
}
|
|
|
|
|
|
while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
|
|
|
- u32 from = skb->tc_from;
|
|
|
-
|
|
|
- skb_reset_tc(skb);
|
|
|
+ skb->tc_redirected = 0;
|
|
|
skb->tc_skip_classify = 1;
|
|
|
|
|
|
u64_stats_update_begin(&txp->tsync);
|
|
@@ -101,13 +99,12 @@ static void ifb_ri_tasklet(unsigned long _txp)
|
|
|
rcu_read_unlock();
|
|
|
skb->skb_iif = txp->dev->ifindex;
|
|
|
|
|
|
- if (from & AT_EGRESS) {
|
|
|
+ if (!skb->tc_from_ingress) {
|
|
|
dev_queue_xmit(skb);
|
|
|
- } else if (from & AT_INGRESS) {
|
|
|
+ } else {
|
|
|
skb_pull(skb, skb->mac_len);
|
|
|
netif_receive_skb(skb);
|
|
|
- } else
|
|
|
- BUG();
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
if (__netif_tx_trylock(txq)) {
|
|
@@ -246,7 +243,7 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
txp->rx_bytes += skb->len;
|
|
|
u64_stats_update_end(&txp->rsync);
|
|
|
|
|
|
- if (skb->tc_from == AT_STACK || !skb->skb_iif) {
|
|
|
+ if (!skb->tc_redirected || !skb->skb_iif) {
|
|
|
dev_kfree_skb(skb);
|
|
|
dev->stats.rx_dropped++;
|
|
|
return NETDEV_TX_OK;
|