|
|
@@ -1628,6 +1628,19 @@ static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
|
|
|
return dev_forward_skb(dev, skb);
|
|
|
}
|
|
|
|
|
|
+static inline int __bpf_rx_skb_no_mac(struct net_device *dev,
|
|
|
+ struct sk_buff *skb)
|
|
|
+{
|
|
|
+ int ret = ____dev_forward_skb(dev, skb);
|
|
|
+
|
|
|
+ if (likely(!ret)) {
|
|
|
+ skb->dev = dev;
|
|
|
+ ret = netif_rx(skb);
|
|
|
+ }
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
|
|
|
{
|
|
|
int ret;
|
|
|
@@ -1647,6 +1660,51 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
|
|
|
+ u32 flags)
|
|
|
+{
|
|
|
+ /* skb->mac_len is not set on normal egress */
|
|
|
+ unsigned int mlen = skb->network_header - skb->mac_header;
|
|
|
+
|
|
|
+ __skb_pull(skb, mlen);
|
|
|
+
|
|
|
+ /* At ingress, the mac header has already been pulled once.
|
|
|
+ * At egress, skb_pospull_rcsum has to be done in case that
|
|
|
+ * the skb is originated from ingress (i.e. a forwarded skb)
|
|
|
+ * to ensure that rcsum starts at net header.
|
|
|
+ */
|
|
|
+ if (!skb_at_tc_ingress(skb))
|
|
|
+ skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
|
|
|
+ skb_pop_mac_header(skb);
|
|
|
+ skb_reset_mac_len(skb);
|
|
|
+ return flags & BPF_F_INGRESS ?
|
|
|
+ __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb);
|
|
|
+}
|
|
|
+
|
|
|
+static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev,
|
|
|
+ u32 flags)
|
|
|
+{
|
|
|
+ bpf_push_mac_rcsum(skb);
|
|
|
+ return flags & BPF_F_INGRESS ?
|
|
|
+ __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
|
|
|
+}
|
|
|
+
|
|
|
+static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev,
|
|
|
+ u32 flags)
|
|
|
+{
|
|
|
+ switch (dev->type) {
|
|
|
+ case ARPHRD_TUNNEL:
|
|
|
+ case ARPHRD_TUNNEL6:
|
|
|
+ case ARPHRD_SIT:
|
|
|
+ case ARPHRD_IPGRE:
|
|
|
+ case ARPHRD_VOID:
|
|
|
+ case ARPHRD_NONE:
|
|
|
+ return __bpf_redirect_no_mac(skb, dev, flags);
|
|
|
+ default:
|
|
|
+ return __bpf_redirect_common(skb, dev, flags);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
|
|
|
{
|
|
|
struct net_device *dev;
|
|
|
@@ -1675,10 +1733,7 @@ BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
|
- bpf_push_mac_rcsum(clone);
|
|
|
-
|
|
|
- return flags & BPF_F_INGRESS ?
|
|
|
- __bpf_rx_skb(dev, clone) : __bpf_tx_skb(dev, clone);
|
|
|
+ return __bpf_redirect(clone, dev, flags);
|
|
|
}
|
|
|
|
|
|
static const struct bpf_func_proto bpf_clone_redirect_proto = {
|
|
|
@@ -1722,10 +1777,7 @@ int skb_do_redirect(struct sk_buff *skb)
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
- bpf_push_mac_rcsum(skb);
|
|
|
-
|
|
|
- return ri->flags & BPF_F_INGRESS ?
|
|
|
- __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
|
|
|
+ return __bpf_redirect(skb, dev, ri->flags);
|
|
|
}
|
|
|
|
|
|
static const struct bpf_func_proto bpf_redirect_proto = {
|