|
@@ -191,8 +191,16 @@ static void enic_udp_tunnel_add(struct net_device *netdev,
|
|
goto error;
|
|
goto error;
|
|
}
|
|
}
|
|
|
|
|
|
- if (ti->sa_family != AF_INET) {
|
|
|
|
- netdev_info(netdev, "vxlan: only IPv4 offload supported");
|
|
|
|
|
|
+ switch (ti->sa_family) {
|
|
|
|
+ case AF_INET6:
|
|
|
|
+ if (!(enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6)) {
|
|
|
|
+ netdev_info(netdev, "vxlan: only IPv4 offload supported");
|
|
|
|
+ goto error;
|
|
|
|
+ }
|
|
|
|
+ /* Fall through */
|
|
|
|
+ case AF_INET:
|
|
|
|
+ break;
|
|
|
|
+ default:
|
|
goto error;
|
|
goto error;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -204,6 +212,11 @@ static void enic_udp_tunnel_add(struct net_device *netdev,
|
|
|
|
|
|
goto error;
|
|
goto error;
|
|
}
|
|
}
|
|
|
|
+ if ((vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ) != 1) &&
|
|
|
|
+ !(enic->vxlan.flags & ENIC_VXLAN_MULTI_WQ)) {
|
|
|
|
+ netdev_info(netdev, "vxlan: vxlan offload with multi wq not supported on this adapter");
|
|
|
|
+ goto error;
|
|
|
|
+ }
|
|
|
|
|
|
err = vnic_dev_overlay_offload_cfg(enic->vdev,
|
|
err = vnic_dev_overlay_offload_cfg(enic->vdev,
|
|
OVERLAY_CFG_VXLAN_PORT_UPDATE,
|
|
OVERLAY_CFG_VXLAN_PORT_UPDATE,
|
|
@@ -271,22 +284,37 @@ static netdev_features_t enic_features_check(struct sk_buff *skb,
|
|
struct enic *enic = netdev_priv(dev);
|
|
struct enic *enic = netdev_priv(dev);
|
|
struct udphdr *udph;
|
|
struct udphdr *udph;
|
|
u16 port = 0;
|
|
u16 port = 0;
|
|
- u16 proto;
|
|
|
|
|
|
+ u8 proto;
|
|
|
|
|
|
if (!skb->encapsulation)
|
|
if (!skb->encapsulation)
|
|
return features;
|
|
return features;
|
|
|
|
|
|
features = vxlan_features_check(skb, features);
|
|
features = vxlan_features_check(skb, features);
|
|
|
|
|
|
- /* hardware only supports IPv4 vxlan tunnel */
|
|
|
|
- if (vlan_get_protocol(skb) != htons(ETH_P_IP))
|
|
|
|
|
|
+ switch (vlan_get_protocol(skb)) {
|
|
|
|
+ case htons(ETH_P_IPV6):
|
|
|
|
+ if (!(enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6))
|
|
|
|
+ goto out;
|
|
|
|
+ proto = ipv6_hdr(skb)->nexthdr;
|
|
|
|
+ break;
|
|
|
|
+ case htons(ETH_P_IP):
|
|
|
|
+ proto = ip_hdr(skb)->protocol;
|
|
|
|
+ break;
|
|
|
|
+ default:
|
|
goto out;
|
|
goto out;
|
|
|
|
+ }
|
|
|
|
|
|
- /* hardware does not support offload of ipv6 inner pkt */
|
|
|
|
- if (eth->h_proto != ntohs(ETH_P_IP))
|
|
|
|
|
|
+ switch (eth->h_proto) {
|
|
|
|
+ case ntohs(ETH_P_IPV6):
|
|
|
|
+ if (!(enic->vxlan.flags & ENIC_VXLAN_INNER_IPV6))
|
|
|
|
+ goto out;
|
|
|
|
+ /* Fall through */
|
|
|
|
+ case ntohs(ETH_P_IP):
|
|
|
|
+ break;
|
|
|
|
+ default:
|
|
goto out;
|
|
goto out;
|
|
|
|
+ }
|
|
|
|
|
|
- proto = ip_hdr(skb)->protocol;
|
|
|
|
|
|
|
|
if (proto == IPPROTO_UDP) {
|
|
if (proto == IPPROTO_UDP) {
|
|
udph = udp_hdr(skb);
|
|
udph = udp_hdr(skb);
|
|
@@ -635,12 +663,25 @@ static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq,
|
|
|
|
|
|
static void enic_preload_tcp_csum_encap(struct sk_buff *skb)
|
|
static void enic_preload_tcp_csum_encap(struct sk_buff *skb)
|
|
{
|
|
{
|
|
- if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
|
|
|
|
|
|
+ const struct ethhdr *eth = (struct ethhdr *)skb_inner_mac_header(skb);
|
|
|
|
+
|
|
|
|
+ switch (eth->h_proto) {
|
|
|
|
+ case ntohs(ETH_P_IP):
|
|
inner_ip_hdr(skb)->check = 0;
|
|
inner_ip_hdr(skb)->check = 0;
|
|
inner_tcp_hdr(skb)->check =
|
|
inner_tcp_hdr(skb)->check =
|
|
~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr,
|
|
~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr,
|
|
inner_ip_hdr(skb)->daddr, 0,
|
|
inner_ip_hdr(skb)->daddr, 0,
|
|
IPPROTO_TCP, 0);
|
|
IPPROTO_TCP, 0);
|
|
|
|
+ break;
|
|
|
|
+ case ntohs(ETH_P_IPV6):
|
|
|
|
+ inner_tcp_hdr(skb)->check =
|
|
|
|
+ ~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr,
|
|
|
|
+ &inner_ipv6_hdr(skb)->daddr, 0,
|
|
|
|
+ IPPROTO_TCP, 0);
|
|
|
|
+ break;
|
|
|
|
+ default:
|
|
|
|
+ WARN_ONCE(1, "Non ipv4/ipv6 inner pkt for encap offload");
|
|
|
|
+ break;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1898,6 +1939,8 @@ static int enic_open(struct net_device *netdev)
|
|
}
|
|
}
|
|
|
|
|
|
for (i = 0; i < enic->rq_count; i++) {
|
|
for (i = 0; i < enic->rq_count; i++) {
|
|
|
|
+ /* enable rq before updating rq desc */
|
|
|
|
+ vnic_rq_enable(&enic->rq[i]);
|
|
vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
|
|
vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
|
|
/* Need at least one buffer on ring to get going */
|
|
/* Need at least one buffer on ring to get going */
|
|
if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
|
|
if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
|
|
@@ -1909,8 +1952,6 @@ static int enic_open(struct net_device *netdev)
|
|
|
|
|
|
for (i = 0; i < enic->wq_count; i++)
|
|
for (i = 0; i < enic->wq_count; i++)
|
|
vnic_wq_enable(&enic->wq[i]);
|
|
vnic_wq_enable(&enic->wq[i]);
|
|
- for (i = 0; i < enic->rq_count; i++)
|
|
|
|
- vnic_rq_enable(&enic->rq[i]);
|
|
|
|
|
|
|
|
if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
|
|
if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
|
|
enic_dev_add_station_addr(enic);
|
|
enic_dev_add_station_addr(enic);
|
|
@@ -1936,8 +1977,12 @@ static int enic_open(struct net_device *netdev)
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
err_out_free_rq:
|
|
err_out_free_rq:
|
|
- for (i = 0; i < enic->rq_count; i++)
|
|
|
|
|
|
+ for (i = 0; i < enic->rq_count; i++) {
|
|
|
|
+ err = vnic_rq_disable(&enic->rq[i]);
|
|
|
|
+ if (err)
|
|
|
|
+ return err;
|
|
vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
|
|
vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
|
|
|
|
+ }
|
|
enic_dev_notify_unset(enic);
|
|
enic_dev_notify_unset(enic);
|
|
err_out_free_intr:
|
|
err_out_free_intr:
|
|
enic_unset_affinity_hint(enic);
|
|
enic_unset_affinity_hint(enic);
|
|
@@ -2151,9 +2196,10 @@ static int enic_dev_wait(struct vnic_dev *vdev,
|
|
static int enic_dev_open(struct enic *enic)
|
|
static int enic_dev_open(struct enic *enic)
|
|
{
|
|
{
|
|
int err;
|
|
int err;
|
|
|
|
+ u32 flags = CMD_OPENF_IG_DESCCACHE;
|
|
|
|
|
|
err = enic_dev_wait(enic->vdev, vnic_dev_open,
|
|
err = enic_dev_wait(enic->vdev, vnic_dev_open,
|
|
- vnic_dev_open_done, 0);
|
|
|
|
|
|
+ vnic_dev_open_done, flags);
|
|
if (err)
|
|
if (err)
|
|
dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n",
|
|
dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n",
|
|
err);
|
|
err);
|
|
@@ -2275,7 +2321,7 @@ static int enic_set_rss_nic_cfg(struct enic *enic)
|
|
{
|
|
{
|
|
struct device *dev = enic_get_dev(enic);
|
|
struct device *dev = enic_get_dev(enic);
|
|
const u8 rss_default_cpu = 0;
|
|
const u8 rss_default_cpu = 0;
|
|
- const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
|
|
|
|
|
|
+ u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
|
|
NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
|
|
NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
|
|
NIC_CFG_RSS_HASH_TYPE_IPV6 |
|
|
NIC_CFG_RSS_HASH_TYPE_IPV6 |
|
|
NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
|
|
NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
|
|
@@ -2283,6 +2329,8 @@ static int enic_set_rss_nic_cfg(struct enic *enic)
|
|
const u8 rss_base_cpu = 0;
|
|
const u8 rss_base_cpu = 0;
|
|
u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
|
|
u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
|
|
|
|
|
|
|
|
+ if (vnic_dev_capable_udp_rss(enic->vdev))
|
|
|
|
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP;
|
|
if (rss_enable) {
|
|
if (rss_enable) {
|
|
if (!enic_set_rsskey(enic)) {
|
|
if (!enic_set_rsskey(enic)) {
|
|
if (enic_set_rsscpu(enic, rss_hash_bits)) {
|
|
if (enic_set_rsscpu(enic, rss_hash_bits)) {
|
|
@@ -2901,9 +2949,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
netdev->hw_features |= NETIF_F_RXCSUM;
|
|
netdev->hw_features |= NETIF_F_RXCSUM;
|
|
if (ENIC_SETTING(enic, VXLAN)) {
|
|
if (ENIC_SETTING(enic, VXLAN)) {
|
|
u64 patch_level;
|
|
u64 patch_level;
|
|
|
|
+ u64 a1 = 0;
|
|
|
|
|
|
netdev->hw_enc_features |= NETIF_F_RXCSUM |
|
|
netdev->hw_enc_features |= NETIF_F_RXCSUM |
|
|
NETIF_F_TSO |
|
|
NETIF_F_TSO |
|
|
|
|
+ NETIF_F_TSO6 |
|
|
NETIF_F_TSO_ECN |
|
|
NETIF_F_TSO_ECN |
|
|
NETIF_F_GSO_UDP_TUNNEL |
|
|
NETIF_F_GSO_UDP_TUNNEL |
|
|
NETIF_F_HW_CSUM |
|
|
NETIF_F_HW_CSUM |
|
|
@@ -2922,9 +2972,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
*/
|
|
*/
|
|
err = vnic_dev_get_supported_feature_ver(enic->vdev,
|
|
err = vnic_dev_get_supported_feature_ver(enic->vdev,
|
|
VIC_FEATURE_VXLAN,
|
|
VIC_FEATURE_VXLAN,
|
|
- &patch_level);
|
|
|
|
|
|
+ &patch_level, &a1);
|
|
if (err)
|
|
if (err)
|
|
patch_level = 0;
|
|
patch_level = 0;
|
|
|
|
+ enic->vxlan.flags = (u8)a1;
|
|
/* mask bits that are supported by driver
|
|
/* mask bits that are supported by driver
|
|
*/
|
|
*/
|
|
patch_level &= BIT_ULL(0) | BIT_ULL(2);
|
|
patch_level &= BIT_ULL(0) | BIT_ULL(2);
|