|
|
@@ -24,7 +24,12 @@
|
|
|
#include <linux/netdev_features.h>
|
|
|
#include <linux/udp.h>
|
|
|
#include <linux/tcp.h>
|
|
|
+#ifdef CONFIG_QEDE_VXLAN
|
|
|
#include <net/vxlan.h>
|
|
|
+#endif
|
|
|
+#ifdef CONFIG_QEDE_GENEVE
|
|
|
+#include <net/geneve.h>
|
|
|
+#endif
|
|
|
#include <linux/ip.h>
|
|
|
#include <net/ipv6.h>
|
|
|
#include <net/tcp.h>
|
|
|
@@ -310,6 +315,9 @@ static u32 qede_xmit_type(struct qede_dev *edev,
|
|
|
(ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
|
|
|
*ipv6_ext = 1;
|
|
|
|
|
|
+ if (skb->encapsulation)
|
|
|
+ rc |= XMIT_ENC;
|
|
|
+
|
|
|
if (skb_is_gso(skb))
|
|
|
rc |= XMIT_LSO;
|
|
|
|
|
|
@@ -371,6 +379,16 @@ static int map_frag_to_bd(struct qede_dev *edev,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
|
|
|
+{
|
|
|
+ if (is_encap_pkt)
|
|
|
+ return (skb_inner_transport_header(skb) +
|
|
|
+ inner_tcp_hdrlen(skb) - skb->data);
|
|
|
+ else
|
|
|
+ return (skb_transport_header(skb) +
|
|
|
+ tcp_hdrlen(skb) - skb->data);
|
|
|
+}
|
|
|
+
|
|
|
/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
|
|
|
#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
|
|
|
static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
|
|
|
@@ -381,8 +399,7 @@ static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
|
|
|
if (xmit_type & XMIT_LSO) {
|
|
|
int hlen;
|
|
|
|
|
|
- hlen = skb_transport_header(skb) +
|
|
|
- tcp_hdrlen(skb) - skb->data;
|
|
|
+ hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
|
|
|
|
|
|
/* linear payload would require its own BD */
|
|
|
if (skb_headlen(skb) > hlen)
|
|
|
@@ -490,7 +507,18 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
|
|
|
first_bd->data.bd_flags.bitfields |=
|
|
|
1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
|
|
|
|
|
|
- first_bd->data.bitfields |= cpu_to_le16(temp);
|
|
|
+ if (xmit_type & XMIT_ENC) {
|
|
|
+ first_bd->data.bd_flags.bitfields |=
|
|
|
+ 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
|
|
|
+ } else {
|
|
|
+ /* In cases when OS doesn't indicate for inner offloads
|
|
|
+ * when packet is tunnelled, we need to override the HW
|
|
|
+ * tunnel configuration so that packets are treated as
|
|
|
+ * regular non tunnelled packets and no inner offloads
|
|
|
+ * are done by the hardware.
|
|
|
+ */
|
|
|
+ first_bd->data.bitfields |= cpu_to_le16(temp);
|
|
|
+ }
|
|
|
|
|
|
/* If the packet is IPv6 with extension header, indicate that
|
|
|
* to FW and pass few params, since the device cracker doesn't
|
|
|
@@ -506,10 +534,15 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
|
|
|
third_bd->data.lso_mss =
|
|
|
cpu_to_le16(skb_shinfo(skb)->gso_size);
|
|
|
|
|
|
- first_bd->data.bd_flags.bitfields |=
|
|
|
- 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
|
|
|
- hlen = skb_transport_header(skb) +
|
|
|
- tcp_hdrlen(skb) - skb->data;
|
|
|
+ if (unlikely(xmit_type & XMIT_ENC)) {
|
|
|
+ first_bd->data.bd_flags.bitfields |=
|
|
|
+ 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
|
|
|
+ hlen = qede_get_skb_hlen(skb, true);
|
|
|
+ } else {
|
|
|
+ first_bd->data.bd_flags.bitfields |=
|
|
|
+ 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
|
|
|
+ hlen = qede_get_skb_hlen(skb, false);
|
|
|
+ }
|
|
|
|
|
|
/* @@@TBD - if will not be removed need to check */
|
|
|
third_bd->data.bitfields |=
|
|
|
@@ -843,6 +876,9 @@ static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
|
|
|
|
|
|
if (csum_flag & QEDE_CSUM_UNNECESSARY)
|
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
|
+
|
|
|
+ if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY)
|
|
|
+ skb->csum_level = 1;
|
|
|
}
|
|
|
|
|
|
static inline void qede_skb_receive(struct qede_dev *edev,
|
|
|
@@ -1132,13 +1168,47 @@ err:
|
|
|
tpa_info->skb = NULL;
|
|
|
}
|
|
|
|
|
|
-static u8 qede_check_csum(u16 flag)
|
|
|
+static bool qede_tunn_exist(u16 flag)
|
|
|
+{
|
|
|
+ return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
|
|
|
+ PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT));
|
|
|
+}
|
|
|
+
|
|
|
+static u8 qede_check_tunn_csum(u16 flag)
|
|
|
+{
|
|
|
+ u16 csum_flag = 0;
|
|
|
+ u8 tcsum = 0;
|
|
|
+
|
|
|
+ if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
|
|
|
+ PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT))
|
|
|
+ csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
|
|
|
+ PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
|
|
|
+
|
|
|
+ if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
|
|
|
+ PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
|
|
|
+ csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
|
|
|
+ PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
|
|
|
+ tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
|
|
|
+ }
|
|
|
+
|
|
|
+ csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
|
|
|
+ PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
|
|
|
+ PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
|
|
|
+ PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
|
|
|
+
|
|
|
+ if (csum_flag & flag)
|
|
|
+ return QEDE_CSUM_ERROR;
|
|
|
+
|
|
|
+ return QEDE_CSUM_UNNECESSARY | tcsum;
|
|
|
+}
|
|
|
+
|
|
|
+static u8 qede_check_notunn_csum(u16 flag)
|
|
|
{
|
|
|
u16 csum_flag = 0;
|
|
|
u8 csum = 0;
|
|
|
|
|
|
- if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
|
|
|
- PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
|
|
|
+ if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
|
|
|
+ PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
|
|
|
csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
|
|
|
PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
|
|
|
csum = QEDE_CSUM_UNNECESSARY;
|
|
|
@@ -1153,6 +1223,14 @@ static u8 qede_check_csum(u16 flag)
|
|
|
return csum;
|
|
|
}
|
|
|
|
|
|
+static u8 qede_check_csum(u16 flag)
|
|
|
+{
|
|
|
+ if (!qede_tunn_exist(flag))
|
|
|
+ return qede_check_notunn_csum(flag);
|
|
|
+ else
|
|
|
+ return qede_check_tunn_csum(flag);
|
|
|
+}
|
|
|
+
|
|
|
static int qede_rx_int(struct qede_fastpath *fp, int budget)
|
|
|
{
|
|
|
struct qede_dev *edev = fp->edev;
|
|
|
@@ -1821,6 +1899,76 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
|
|
|
edev->accept_any_vlan = false;
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_QEDE_VXLAN
|
|
|
+static void qede_add_vxlan_port(struct net_device *dev,
|
|
|
+ sa_family_t sa_family, __be16 port)
|
|
|
+{
|
|
|
+ struct qede_dev *edev = netdev_priv(dev);
|
|
|
+ u16 t_port = ntohs(port);
|
|
|
+
|
|
|
+ if (edev->vxlan_dst_port)
|
|
|
+ return;
|
|
|
+
|
|
|
+ edev->vxlan_dst_port = t_port;
|
|
|
+
|
|
|
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d", t_port);
|
|
|
+
|
|
|
+ set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
|
|
|
+ schedule_delayed_work(&edev->sp_task, 0);
|
|
|
+}
|
|
|
+
|
|
|
+static void qede_del_vxlan_port(struct net_device *dev,
|
|
|
+ sa_family_t sa_family, __be16 port)
|
|
|
+{
|
|
|
+ struct qede_dev *edev = netdev_priv(dev);
|
|
|
+ u16 t_port = ntohs(port);
|
|
|
+
|
|
|
+ if (t_port != edev->vxlan_dst_port)
|
|
|
+ return;
|
|
|
+
|
|
|
+ edev->vxlan_dst_port = 0;
|
|
|
+
|
|
|
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d", t_port);
|
|
|
+
|
|
|
+ set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
|
|
|
+ schedule_delayed_work(&edev->sp_task, 0);
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
+#ifdef CONFIG_QEDE_GENEVE
|
|
|
+static void qede_add_geneve_port(struct net_device *dev,
|
|
|
+ sa_family_t sa_family, __be16 port)
|
|
|
+{
|
|
|
+ struct qede_dev *edev = netdev_priv(dev);
|
|
|
+ u16 t_port = ntohs(port);
|
|
|
+
|
|
|
+ if (edev->geneve_dst_port)
|
|
|
+ return;
|
|
|
+
|
|
|
+ edev->geneve_dst_port = t_port;
|
|
|
+
|
|
|
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d", t_port);
|
|
|
+ set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
|
|
|
+ schedule_delayed_work(&edev->sp_task, 0);
|
|
|
+}
|
|
|
+
|
|
|
+static void qede_del_geneve_port(struct net_device *dev,
|
|
|
+ sa_family_t sa_family, __be16 port)
|
|
|
+{
|
|
|
+ struct qede_dev *edev = netdev_priv(dev);
|
|
|
+ u16 t_port = ntohs(port);
|
|
|
+
|
|
|
+ if (t_port != edev->geneve_dst_port)
|
|
|
+ return;
|
|
|
+
|
|
|
+ edev->geneve_dst_port = 0;
|
|
|
+
|
|
|
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d", t_port);
|
|
|
+ set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
|
|
|
+ schedule_delayed_work(&edev->sp_task, 0);
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
static const struct net_device_ops qede_netdev_ops = {
|
|
|
.ndo_open = qede_open,
|
|
|
.ndo_stop = qede_close,
|
|
|
@@ -1832,6 +1980,14 @@ static const struct net_device_ops qede_netdev_ops = {
|
|
|
.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
|
|
|
.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
|
|
|
.ndo_get_stats64 = qede_get_stats64,
|
|
|
+#ifdef CONFIG_QEDE_VXLAN
|
|
|
+ .ndo_add_vxlan_port = qede_add_vxlan_port,
|
|
|
+ .ndo_del_vxlan_port = qede_del_vxlan_port,
|
|
|
+#endif
|
|
|
+#ifdef CONFIG_QEDE_GENEVE
|
|
|
+ .ndo_add_geneve_port = qede_add_geneve_port,
|
|
|
+ .ndo_del_geneve_port = qede_del_geneve_port,
|
|
|
+#endif
|
|
|
};
|
|
|
|
|
|
/* -------------------------------------------------------------------------
|
|
|
@@ -1904,6 +2060,14 @@ static void qede_init_ndev(struct qede_dev *edev)
|
|
|
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
|
|
|
NETIF_F_TSO | NETIF_F_TSO6;
|
|
|
|
|
|
+ /* Encap features*/
|
|
|
+ hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
|
|
|
+ NETIF_F_TSO_ECN;
|
|
|
+ ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
|
|
|
+ NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN |
|
|
|
+ NETIF_F_TSO6 | NETIF_F_GSO_GRE |
|
|
|
+ NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM;
|
|
|
+
|
|
|
ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
|
|
|
NETIF_F_HIGHDMA;
|
|
|
ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
|
|
|
@@ -2004,6 +2168,8 @@ static void qede_sp_task(struct work_struct *work)
|
|
|
{
|
|
|
struct qede_dev *edev = container_of(work, struct qede_dev,
|
|
|
sp_task.work);
|
|
|
+ struct qed_dev *cdev = edev->cdev;
|
|
|
+
|
|
|
mutex_lock(&edev->qede_lock);
|
|
|
|
|
|
if (edev->state == QEDE_STATE_OPEN) {
|
|
|
@@ -2011,6 +2177,24 @@ static void qede_sp_task(struct work_struct *work)
|
|
|
qede_config_rx_mode(edev->ndev);
|
|
|
}
|
|
|
|
|
|
+ if (test_and_clear_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags)) {
|
|
|
+ struct qed_tunn_params tunn_params;
|
|
|
+
|
|
|
+ memset(&tunn_params, 0, sizeof(tunn_params));
|
|
|
+ tunn_params.update_vxlan_port = 1;
|
|
|
+ tunn_params.vxlan_port = edev->vxlan_dst_port;
|
|
|
+ qed_ops->tunn_config(cdev, &tunn_params);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (test_and_clear_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags)) {
|
|
|
+ struct qed_tunn_params tunn_params;
|
|
|
+
|
|
|
+ memset(&tunn_params, 0, sizeof(tunn_params));
|
|
|
+ tunn_params.update_geneve_port = 1;
|
|
|
+ tunn_params.geneve_port = edev->geneve_dst_port;
|
|
|
+ qed_ops->tunn_config(cdev, &tunn_params);
|
|
|
+ }
|
|
|
+
|
|
|
mutex_unlock(&edev->qede_lock);
|
|
|
}
|
|
|
|
|
|
@@ -3149,12 +3333,24 @@ void qede_reload(struct qede_dev *edev,
|
|
|
static int qede_open(struct net_device *ndev)
|
|
|
{
|
|
|
struct qede_dev *edev = netdev_priv(ndev);
|
|
|
+ int rc;
|
|
|
|
|
|
netif_carrier_off(ndev);
|
|
|
|
|
|
edev->ops->common->set_power_state(edev->cdev, PCI_D0);
|
|
|
|
|
|
- return qede_load(edev, QEDE_LOAD_NORMAL);
|
|
|
+ rc = qede_load(edev, QEDE_LOAD_NORMAL);
|
|
|
+
|
|
|
+ if (rc)
|
|
|
+ return rc;
|
|
|
+
|
|
|
+#ifdef CONFIG_QEDE_VXLAN
|
|
|
+ vxlan_get_rx_port(ndev);
|
|
|
+#endif
|
|
|
+#ifdef CONFIG_QEDE_GENEVE
|
|
|
+ geneve_get_rx_port(ndev);
|
|
|
+#endif
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
static int qede_close(struct net_device *ndev)
|