|
@@ -124,6 +124,7 @@
|
|
|
#include <linux/if_ether.h>
|
|
|
#include <linux/net_tstamp.h>
|
|
|
#include <linux/phy.h>
|
|
|
+#include <net/vxlan.h>
|
|
|
|
|
|
#include "xgbe.h"
|
|
|
#include "xgbe-common.h"
|
|
@@ -756,6 +757,7 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
|
|
|
ADDMACADRSEL);
|
|
|
hw_feat->ts_src = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
|
|
|
hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
|
|
|
+ hw_feat->vxn = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VXN);
|
|
|
|
|
|
/* Hardware feature register 1 */
|
|
|
hw_feat->rx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
|
|
@@ -860,6 +862,8 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
|
|
|
(hw_feat->ts_src == 3) ? "internal/external" : "n/a");
|
|
|
dev_dbg(pdata->dev, " SA/VLAN insertion : %s\n",
|
|
|
hw_feat->sa_vlan_ins ? "yes" : "no");
|
|
|
+ dev_dbg(pdata->dev, " VXLAN/NVGRE support : %s\n",
|
|
|
+ hw_feat->vxn ? "yes" : "no");
|
|
|
|
|
|
/* Hardware feature register 1 */
|
|
|
dev_dbg(pdata->dev, " RX fifo size : %u\n",
|
|
@@ -903,6 +907,116 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static void xgbe_disable_vxlan_offloads(struct xgbe_prv_data *pdata)
|
|
|
+{
|
|
|
+ struct net_device *netdev = pdata->netdev;
|
|
|
+
|
|
|
+ if (!pdata->vxlan_offloads_set)
|
|
|
+ return;
|
|
|
+
|
|
|
+ netdev_info(netdev, "disabling VXLAN offloads\n");
|
|
|
+
|
|
|
+ netdev->hw_enc_features &= ~(NETIF_F_SG |
|
|
|
+ NETIF_F_IP_CSUM |
|
|
|
+ NETIF_F_IPV6_CSUM |
|
|
|
+ NETIF_F_RXCSUM |
|
|
|
+ NETIF_F_TSO |
|
|
|
+ NETIF_F_TSO6 |
|
|
|
+ NETIF_F_GRO |
|
|
|
+ NETIF_F_GSO_UDP_TUNNEL |
|
|
|
+ NETIF_F_GSO_UDP_TUNNEL_CSUM);
|
|
|
+
|
|
|
+ netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL |
|
|
|
+ NETIF_F_GSO_UDP_TUNNEL_CSUM);
|
|
|
+
|
|
|
+ pdata->vxlan_offloads_set = 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void xgbe_disable_vxlan_hw(struct xgbe_prv_data *pdata)
|
|
|
+{
|
|
|
+ if (!pdata->vxlan_port_set)
|
|
|
+ return;
|
|
|
+
|
|
|
+ pdata->hw_if.disable_vxlan(pdata);
|
|
|
+
|
|
|
+ pdata->vxlan_port_set = 0;
|
|
|
+ pdata->vxlan_port = 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void xgbe_disable_vxlan_accel(struct xgbe_prv_data *pdata)
|
|
|
+{
|
|
|
+ xgbe_disable_vxlan_offloads(pdata);
|
|
|
+
|
|
|
+ xgbe_disable_vxlan_hw(pdata);
|
|
|
+}
|
|
|
+
|
|
|
+static void xgbe_enable_vxlan_offloads(struct xgbe_prv_data *pdata)
|
|
|
+{
|
|
|
+ struct net_device *netdev = pdata->netdev;
|
|
|
+
|
|
|
+ if (pdata->vxlan_offloads_set)
|
|
|
+ return;
|
|
|
+
|
|
|
+ netdev_info(netdev, "enabling VXLAN offloads\n");
|
|
|
+
|
|
|
+ netdev->hw_enc_features |= NETIF_F_SG |
|
|
|
+ NETIF_F_IP_CSUM |
|
|
|
+ NETIF_F_IPV6_CSUM |
|
|
|
+ NETIF_F_RXCSUM |
|
|
|
+ NETIF_F_TSO |
|
|
|
+ NETIF_F_TSO6 |
|
|
|
+ NETIF_F_GRO |
|
|
|
+ pdata->vxlan_features;
|
|
|
+
|
|
|
+ netdev->features |= pdata->vxlan_features;
|
|
|
+
|
|
|
+ pdata->vxlan_offloads_set = 1;
|
|
|
+}
|
|
|
+
|
|
|
+static void xgbe_enable_vxlan_hw(struct xgbe_prv_data *pdata)
|
|
|
+{
|
|
|
+ struct xgbe_vxlan_data *vdata;
|
|
|
+
|
|
|
+ if (pdata->vxlan_port_set)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (list_empty(&pdata->vxlan_ports))
|
|
|
+ return;
|
|
|
+
|
|
|
+ vdata = list_first_entry(&pdata->vxlan_ports,
|
|
|
+ struct xgbe_vxlan_data, list);
|
|
|
+
|
|
|
+ pdata->vxlan_port_set = 1;
|
|
|
+ pdata->vxlan_port = be16_to_cpu(vdata->port);
|
|
|
+
|
|
|
+ pdata->hw_if.enable_vxlan(pdata);
|
|
|
+}
|
|
|
+
|
|
|
+static void xgbe_enable_vxlan_accel(struct xgbe_prv_data *pdata)
|
|
|
+{
|
|
|
+ /* VXLAN acceleration desired? */
|
|
|
+ if (!pdata->vxlan_features)
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* VXLAN acceleration possible? */
|
|
|
+ if (pdata->vxlan_force_disable)
|
|
|
+ return;
|
|
|
+
|
|
|
+ xgbe_enable_vxlan_hw(pdata);
|
|
|
+
|
|
|
+ xgbe_enable_vxlan_offloads(pdata);
|
|
|
+}
|
|
|
+
|
|
|
+static void xgbe_reset_vxlan_accel(struct xgbe_prv_data *pdata)
|
|
|
+{
|
|
|
+ xgbe_disable_vxlan_hw(pdata);
|
|
|
+
|
|
|
+ if (pdata->vxlan_features)
|
|
|
+ xgbe_enable_vxlan_offloads(pdata);
|
|
|
+
|
|
|
+ pdata->vxlan_force_disable = 0;
|
|
|
+}
|
|
|
+
|
|
|
static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
|
|
|
{
|
|
|
struct xgbe_channel *channel;
|
|
@@ -1226,6 +1340,8 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
|
|
|
hw_if->enable_tx(pdata);
|
|
|
hw_if->enable_rx(pdata);
|
|
|
|
|
|
+ udp_tunnel_get_rx_info(netdev);
|
|
|
+
|
|
|
netif_tx_start_all_queues(netdev);
|
|
|
|
|
|
xgbe_start_timers(pdata);
|
|
@@ -1267,6 +1383,8 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
|
|
|
xgbe_stop_timers(pdata);
|
|
|
flush_workqueue(pdata->dev_workqueue);
|
|
|
|
|
|
+ xgbe_reset_vxlan_accel(pdata);
|
|
|
+
|
|
|
hw_if->disable_tx(pdata);
|
|
|
hw_if->disable_rx(pdata);
|
|
|
|
|
@@ -1555,10 +1673,18 @@ static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
|
- packet->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
|
|
- packet->tcp_header_len = tcp_hdrlen(skb);
|
|
|
+ if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, VXLAN)) {
|
|
|
+ packet->header_len = skb_inner_transport_offset(skb) +
|
|
|
+ inner_tcp_hdrlen(skb);
|
|
|
+ packet->tcp_header_len = inner_tcp_hdrlen(skb);
|
|
|
+ } else {
|
|
|
+ packet->header_len = skb_transport_offset(skb) +
|
|
|
+ tcp_hdrlen(skb);
|
|
|
+ packet->tcp_header_len = tcp_hdrlen(skb);
|
|
|
+ }
|
|
|
packet->tcp_payload_len = skb->len - packet->header_len;
|
|
|
packet->mss = skb_shinfo(skb)->gso_size;
|
|
|
+
|
|
|
DBGPR(" packet->header_len=%u\n", packet->header_len);
|
|
|
DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
|
|
|
packet->tcp_header_len, packet->tcp_payload_len);
|
|
@@ -1573,6 +1699,49 @@ static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static bool xgbe_is_vxlan(struct xgbe_prv_data *pdata, struct sk_buff *skb)
|
|
|
+{
|
|
|
+ struct xgbe_vxlan_data *vdata;
|
|
|
+
|
|
|
+ if (pdata->vxlan_force_disable)
|
|
|
+ return false;
|
|
|
+
|
|
|
+ if (!skb->encapsulation)
|
|
|
+ return false;
|
|
|
+
|
|
|
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
|
|
|
+ return false;
|
|
|
+
|
|
|
+ switch (skb->protocol) {
|
|
|
+ case htons(ETH_P_IP):
|
|
|
+ if (ip_hdr(skb)->protocol != IPPROTO_UDP)
|
|
|
+ return false;
|
|
|
+ break;
|
|
|
+
|
|
|
+ case htons(ETH_P_IPV6):
|
|
|
+ if (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP)
|
|
|
+ return false;
|
|
|
+ break;
|
|
|
+
|
|
|
+ default:
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* See if we have the UDP port in our list */
|
|
|
+ list_for_each_entry(vdata, &pdata->vxlan_ports, list) {
|
|
|
+ if ((skb->protocol == htons(ETH_P_IP)) &&
|
|
|
+ (vdata->sa_family == AF_INET) &&
|
|
|
+ (vdata->port == udp_hdr(skb)->dest))
|
|
|
+ return true;
|
|
|
+ else if ((skb->protocol == htons(ETH_P_IPV6)) &&
|
|
|
+ (vdata->sa_family == AF_INET6) &&
|
|
|
+ (vdata->port == udp_hdr(skb)->dest))
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
static int xgbe_is_tso(struct sk_buff *skb)
|
|
|
{
|
|
|
if (skb->ip_summed != CHECKSUM_PARTIAL)
|
|
@@ -1621,6 +1790,10 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
|
|
|
XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
|
|
|
CSUM_ENABLE, 1);
|
|
|
|
|
|
+ if (xgbe_is_vxlan(pdata, skb))
|
|
|
+ XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
|
|
|
+ VXLAN, 1);
|
|
|
+
|
|
|
if (skb_vlan_tag_present(skb)) {
|
|
|
/* VLAN requires an extra descriptor if tag is different */
|
|
|
if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
|
|
@@ -2050,18 +2223,83 @@ static int xgbe_setup_tc(struct net_device *netdev, enum tc_setup_type type,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static netdev_features_t xgbe_fix_features(struct net_device *netdev,
|
|
|
+ netdev_features_t features)
|
|
|
+{
|
|
|
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
|
|
|
+ netdev_features_t vxlan_base, vxlan_mask;
|
|
|
+
|
|
|
+ vxlan_base = NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RX_UDP_TUNNEL_PORT;
|
|
|
+ vxlan_mask = vxlan_base | NETIF_F_GSO_UDP_TUNNEL_CSUM;
|
|
|
+
|
|
|
+ pdata->vxlan_features = features & vxlan_mask;
|
|
|
+
|
|
|
+ /* Only fix VXLAN-related features */
|
|
|
+ if (!pdata->vxlan_features)
|
|
|
+ return features;
|
|
|
+
|
|
|
+ /* If VXLAN isn't supported then clear any features:
|
|
|
+ * This is needed because NETIF_F_RX_UDP_TUNNEL_PORT gets
|
|
|
+ * automatically set if ndo_udp_tunnel_add is set.
|
|
|
+ */
|
|
|
+ if (!pdata->hw_feat.vxn)
|
|
|
+ return features & ~vxlan_mask;
|
|
|
+
|
|
|
+ /* VXLAN CSUM requires VXLAN base */
|
|
|
+ if ((features & NETIF_F_GSO_UDP_TUNNEL_CSUM) &&
|
|
|
+ !(features & NETIF_F_GSO_UDP_TUNNEL)) {
|
|
|
+ netdev_notice(netdev,
|
|
|
+ "forcing tx udp tunnel support\n");
|
|
|
+ features |= NETIF_F_GSO_UDP_TUNNEL;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Can't do one without doing the other */
|
|
|
+ if ((features & vxlan_base) != vxlan_base) {
|
|
|
+ netdev_notice(netdev,
|
|
|
+ "forcing both tx and rx udp tunnel support\n");
|
|
|
+ features |= vxlan_base;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
|
|
|
+ if (!(features & NETIF_F_GSO_UDP_TUNNEL_CSUM)) {
|
|
|
+ netdev_notice(netdev,
|
|
|
+ "forcing tx udp tunnel checksumming on\n");
|
|
|
+ features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) {
|
|
|
+ netdev_notice(netdev,
|
|
|
+ "forcing tx udp tunnel checksumming off\n");
|
|
|
+ features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ pdata->vxlan_features = features & vxlan_mask;
|
|
|
+
|
|
|
+ /* Adjust UDP Tunnel based on current state */
|
|
|
+ if (pdata->vxlan_force_disable) {
|
|
|
+ netdev_notice(netdev,
|
|
|
+ "VXLAN acceleration disabled, turning off udp tunnel features\n");
|
|
|
+ features &= ~vxlan_mask;
|
|
|
+ }
|
|
|
+
|
|
|
+ return features;
|
|
|
+}
|
|
|
+
|
|
|
static int xgbe_set_features(struct net_device *netdev,
|
|
|
netdev_features_t features)
|
|
|
{
|
|
|
struct xgbe_prv_data *pdata = netdev_priv(netdev);
|
|
|
struct xgbe_hw_if *hw_if = &pdata->hw_if;
|
|
|
netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
|
|
|
+ netdev_features_t udp_tunnel;
|
|
|
int ret = 0;
|
|
|
|
|
|
rxhash = pdata->netdev_features & NETIF_F_RXHASH;
|
|
|
rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
|
|
|
rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
|
|
|
rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
|
|
|
+ udp_tunnel = pdata->netdev_features & NETIF_F_GSO_UDP_TUNNEL;
|
|
|
|
|
|
if ((features & NETIF_F_RXHASH) && !rxhash)
|
|
|
ret = hw_if->enable_rss(pdata);
|
|
@@ -2085,6 +2323,11 @@ static int xgbe_set_features(struct net_device *netdev,
|
|
|
else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
|
|
|
hw_if->disable_rx_vlan_filtering(pdata);
|
|
|
|
|
|
+ if ((features & NETIF_F_GSO_UDP_TUNNEL) && !udp_tunnel)
|
|
|
+ xgbe_enable_vxlan_accel(pdata);
|
|
|
+ else if (!(features & NETIF_F_GSO_UDP_TUNNEL) && udp_tunnel)
|
|
|
+ xgbe_disable_vxlan_accel(pdata);
|
|
|
+
|
|
|
pdata->netdev_features = features;
|
|
|
|
|
|
DBGPR("<--xgbe_set_features\n");
|
|
@@ -2092,6 +2335,111 @@ static int xgbe_set_features(struct net_device *netdev,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static void xgbe_udp_tunnel_add(struct net_device *netdev,
|
|
|
+ struct udp_tunnel_info *ti)
|
|
|
+{
|
|
|
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
|
|
|
+ struct xgbe_vxlan_data *vdata;
|
|
|
+
|
|
|
+ if (!pdata->hw_feat.vxn)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
|
|
|
+ return;
|
|
|
+
|
|
|
+ pdata->vxlan_port_count++;
|
|
|
+
|
|
|
+ netif_dbg(pdata, drv, netdev,
|
|
|
+ "adding VXLAN tunnel, family=%hx/port=%hx\n",
|
|
|
+ ti->sa_family, be16_to_cpu(ti->port));
|
|
|
+
|
|
|
+ if (pdata->vxlan_force_disable)
|
|
|
+ return;
|
|
|
+
|
|
|
+ vdata = kzalloc(sizeof(*vdata), GFP_ATOMIC);
|
|
|
+ if (!vdata) {
|
|
|
+ /* Can no longer properly track VXLAN ports */
|
|
|
+ pdata->vxlan_force_disable = 1;
|
|
|
+ netif_dbg(pdata, drv, netdev,
|
|
|
+ "internal error, disabling VXLAN accelerations\n");
|
|
|
+
|
|
|
+ xgbe_disable_vxlan_accel(pdata);
|
|
|
+
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ vdata->sa_family = ti->sa_family;
|
|
|
+ vdata->port = ti->port;
|
|
|
+
|
|
|
+ list_add_tail(&vdata->list, &pdata->vxlan_ports);
|
|
|
+
|
|
|
+ /* First port added? */
|
|
|
+ if (pdata->vxlan_port_count == 1) {
|
|
|
+ xgbe_enable_vxlan_accel(pdata);
|
|
|
+
|
|
|
+ return;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void xgbe_udp_tunnel_del(struct net_device *netdev,
|
|
|
+ struct udp_tunnel_info *ti)
|
|
|
+{
|
|
|
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
|
|
|
+ struct xgbe_vxlan_data *vdata;
|
|
|
+
|
|
|
+ if (!pdata->hw_feat.vxn)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
|
|
|
+ return;
|
|
|
+
|
|
|
+ netif_dbg(pdata, drv, netdev,
|
|
|
+ "deleting VXLAN tunnel, family=%hx/port=%hx\n",
|
|
|
+ ti->sa_family, be16_to_cpu(ti->port));
|
|
|
+
|
|
|
+ /* Don't need safe version since loop terminates with deletion */
|
|
|
+ list_for_each_entry(vdata, &pdata->vxlan_ports, list) {
|
|
|
+ if (vdata->sa_family != ti->sa_family)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ if (vdata->port != ti->port)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ list_del(&vdata->list);
|
|
|
+ kfree(vdata);
|
|
|
+
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ pdata->vxlan_port_count--;
|
|
|
+ if (!pdata->vxlan_port_count) {
|
|
|
+ xgbe_reset_vxlan_accel(pdata);
|
|
|
+
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (pdata->vxlan_force_disable)
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* See if VXLAN tunnel id needs to be changed */
|
|
|
+ vdata = list_first_entry(&pdata->vxlan_ports,
|
|
|
+ struct xgbe_vxlan_data, list);
|
|
|
+ if (pdata->vxlan_port == be16_to_cpu(vdata->port))
|
|
|
+ return;
|
|
|
+
|
|
|
+ pdata->vxlan_port = be16_to_cpu(vdata->port);
|
|
|
+ pdata->hw_if.set_vxlan_id(pdata);
|
|
|
+}
|
|
|
+
|
|
|
+static netdev_features_t xgbe_features_check(struct sk_buff *skb,
|
|
|
+ struct net_device *netdev,
|
|
|
+ netdev_features_t features)
|
|
|
+{
|
|
|
+ features = vlan_features_check(skb, features);
|
|
|
+ features = vxlan_features_check(skb, features);
|
|
|
+
|
|
|
+ return features;
|
|
|
+}
|
|
|
+
|
|
|
static const struct net_device_ops xgbe_netdev_ops = {
|
|
|
.ndo_open = xgbe_open,
|
|
|
.ndo_stop = xgbe_close,
|
|
@@ -2109,7 +2457,11 @@ static const struct net_device_ops xgbe_netdev_ops = {
|
|
|
.ndo_poll_controller = xgbe_poll_controller,
|
|
|
#endif
|
|
|
.ndo_setup_tc = xgbe_setup_tc,
|
|
|
+ .ndo_fix_features = xgbe_fix_features,
|
|
|
.ndo_set_features = xgbe_set_features,
|
|
|
+ .ndo_udp_tunnel_add = xgbe_udp_tunnel_add,
|
|
|
+ .ndo_udp_tunnel_del = xgbe_udp_tunnel_del,
|
|
|
+ .ndo_features_check = xgbe_features_check,
|
|
|
};
|
|
|
|
|
|
const struct net_device_ops *xgbe_get_netdev_ops(void)
|
|
@@ -2421,6 +2773,15 @@ skip_data:
|
|
|
RX_PACKET_ATTRIBUTES, CSUM_DONE))
|
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
|
|
|
|
+ if (XGMAC_GET_BITS(packet->attributes,
|
|
|
+ RX_PACKET_ATTRIBUTES, TNP)) {
|
|
|
+ skb->encapsulation = 1;
|
|
|
+
|
|
|
+ if (XGMAC_GET_BITS(packet->attributes,
|
|
|
+ RX_PACKET_ATTRIBUTES, TNPCSUM_DONE))
|
|
|
+ skb->csum_level = 1;
|
|
|
+ }
|
|
|
+
|
|
|
if (XGMAC_GET_BITS(packet->attributes,
|
|
|
RX_PACKET_ATTRIBUTES, VLAN_CTAG))
|
|
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
|