|
|
@@ -341,6 +341,40 @@ static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
|
|
|
return skb;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * __vlan_hwaccel_push_inside - pushes vlan tag to the payload
|
|
|
+ * @skb: skbuff to tag
|
|
|
+ *
|
|
|
+ * Pushes the VLAN tag from @skb->vlan_tci inside to the payload.
|
|
|
+ *
|
|
|
+ * Following the skb_unshare() example, in case of error, the calling function
|
|
|
+ * doesn't have to worry about freeing the original skb.
|
|
|
+ */
|
|
|
+static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
|
|
|
+{
|
|
|
+ skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
|
|
|
+ vlan_tx_tag_get(skb));
|
|
|
+ if (likely(skb))
|
|
|
+ skb->vlan_tci = 0;
|
|
|
+ return skb;
|
|
|
+}
|
|
|
+/*
|
|
|
+ * vlan_hwaccel_push_inside - pushes vlan tag to the payload
|
|
|
+ * @skb: skbuff to tag
|
|
|
+ *
|
|
|
+ * Checks is tag is present in @skb->vlan_tci and if it is, it pushes the
|
|
|
+ * VLAN tag from @skb->vlan_tci inside to the payload.
|
|
|
+ *
|
|
|
+ * Following the skb_unshare() example, in case of error, the calling function
|
|
|
+ * doesn't have to worry about freeing the original skb.
|
|
|
+ */
|
|
|
+static inline struct sk_buff *vlan_hwaccel_push_inside(struct sk_buff *skb)
|
|
|
+{
|
|
|
+ if (vlan_tx_tag_present(skb))
|
|
|
+ skb = __vlan_hwaccel_push_inside(skb);
|
|
|
+ return skb;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting
|
|
|
* @skb: skbuff to tag
|