Prechádzať zdrojové kódy

Merge tag 'mlx5-fixes-2017-09-28' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-fixes-2017-09-28

Misc. fixes for mlx5 drivers.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
David S. Miller 7 rokov pred
rodič
commit
3e7e07288e

+ 2 - 2
drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h

@@ -139,7 +139,7 @@ TRACE_EVENT(mlx5_fs_del_fg,
 	{MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO, "NEXT_PRIO"}
 	{MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO, "NEXT_PRIO"}
 
 
 TRACE_EVENT(mlx5_fs_set_fte,
 TRACE_EVENT(mlx5_fs_set_fte,
-	    TP_PROTO(const struct fs_fte *fte, bool new_fte),
+	    TP_PROTO(const struct fs_fte *fte, int new_fte),
 	    TP_ARGS(fte, new_fte),
 	    TP_ARGS(fte, new_fte),
 	    TP_STRUCT__entry(
 	    TP_STRUCT__entry(
 		__field(const struct fs_fte *, fte)
 		__field(const struct fs_fte *, fte)
@@ -149,7 +149,7 @@ TRACE_EVENT(mlx5_fs_set_fte,
 		__field(u32, action)
 		__field(u32, action)
 		__field(u32, flow_tag)
 		__field(u32, flow_tag)
 		__field(u8,  mask_enable)
 		__field(u8,  mask_enable)
-		__field(bool, new_fte)
+		__field(int, new_fte)
 		__array(u32, mask_outer, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4))
 		__array(u32, mask_outer, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4))
 		__array(u32, mask_inner, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4))
 		__array(u32, mask_inner, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4))
 		__array(u32, mask_misc, MLX5_ST_SZ_DW(fte_match_set_misc))
 		__array(u32, mask_misc, MLX5_ST_SZ_DW(fte_match_set_misc))

+ 2 - 2
drivers/net/ethernet/mellanox/mlx5/core/en_fs.c

@@ -291,7 +291,7 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
 	priv->fs.vlan.filter_disabled = false;
 	priv->fs.vlan.filter_disabled = false;
 	if (priv->netdev->flags & IFF_PROMISC)
 	if (priv->netdev->flags & IFF_PROMISC)
 		return;
 		return;
-	mlx5e_del_any_vid_rules(priv);
+	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
 }
 }
 
 
 void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
 void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
@@ -302,7 +302,7 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
 	priv->fs.vlan.filter_disabled = true;
 	priv->fs.vlan.filter_disabled = true;
 	if (priv->netdev->flags & IFF_PROMISC)
 	if (priv->netdev->flags & IFF_PROMISC)
 		return;
 		return;
-	mlx5e_add_any_vid_rules(priv);
+	mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
 }
 }
 
 
 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,

+ 5 - 8
drivers/net/ethernet/mellanox/mlx5/core/en_main.c

@@ -184,7 +184,6 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
 	struct mlx5e_sw_stats temp, *s = &temp;
 	struct mlx5e_sw_stats temp, *s = &temp;
 	struct mlx5e_rq_stats *rq_stats;
 	struct mlx5e_rq_stats *rq_stats;
 	struct mlx5e_sq_stats *sq_stats;
 	struct mlx5e_sq_stats *sq_stats;
-	u64 tx_offload_none = 0;
 	int i, j;
 	int i, j;
 
 
 	memset(s, 0, sizeof(*s));
 	memset(s, 0, sizeof(*s));
@@ -199,6 +198,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
 		s->rx_lro_bytes	+= rq_stats->lro_bytes;
 		s->rx_lro_bytes	+= rq_stats->lro_bytes;
 		s->rx_csum_none	+= rq_stats->csum_none;
 		s->rx_csum_none	+= rq_stats->csum_none;
 		s->rx_csum_complete += rq_stats->csum_complete;
 		s->rx_csum_complete += rq_stats->csum_complete;
+		s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
 		s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
 		s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
 		s->rx_xdp_drop += rq_stats->xdp_drop;
 		s->rx_xdp_drop += rq_stats->xdp_drop;
 		s->rx_xdp_tx += rq_stats->xdp_tx;
 		s->rx_xdp_tx += rq_stats->xdp_tx;
@@ -229,14 +229,11 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
 			s->tx_queue_dropped	+= sq_stats->dropped;
 			s->tx_queue_dropped	+= sq_stats->dropped;
 			s->tx_xmit_more		+= sq_stats->xmit_more;
 			s->tx_xmit_more		+= sq_stats->xmit_more;
 			s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
 			s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
-			tx_offload_none		+= sq_stats->csum_none;
+			s->tx_csum_none		+= sq_stats->csum_none;
+			s->tx_csum_partial	+= sq_stats->csum_partial;
 		}
 		}
 	}
 	}
 
 
-	/* Update calculated offload counters */
-	s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner;
-	s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete;
-
 	s->link_down_events_phy = MLX5_GET(ppcnt_reg,
 	s->link_down_events_phy = MLX5_GET(ppcnt_reg,
 				priv->stats.pport.phy_counters,
 				priv->stats.pport.phy_counters,
 				counter_set.phys_layer_cntrs.link_down_events);
 				counter_set.phys_layer_cntrs.link_down_events);
@@ -3333,8 +3330,8 @@ static int mlx5e_handle_feature(struct net_device *netdev,
 
 
 	err = feature_handler(netdev, enable);
 	err = feature_handler(netdev, enable);
 	if (err) {
 	if (err) {
-		netdev_err(netdev, "%s feature 0x%llx failed err %d\n",
-			   enable ? "Enable" : "Disable", feature, err);
+		netdev_err(netdev, "%s feature %pNF failed, err %d\n",
+			   enable ? "Enable" : "Disable", &feature, err);
 		return err;
 		return err;
 	}
 	}
 
 

+ 3 - 0
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c

@@ -627,6 +627,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
 
 
 	if (lro) {
 	if (lro) {
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
+		rq->stats.csum_unnecessary++;
 		return;
 		return;
 	}
 	}
 
 
@@ -644,7 +645,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
 			skb->csum_level = 1;
 			skb->csum_level = 1;
 			skb->encapsulation = 1;
 			skb->encapsulation = 1;
 			rq->stats.csum_unnecessary_inner++;
 			rq->stats.csum_unnecessary_inner++;
+			return;
 		}
 		}
+		rq->stats.csum_unnecessary++;
 		return;
 		return;
 	}
 	}
 csum_none:
 csum_none:

+ 6 - 0
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h

@@ -68,6 +68,7 @@ struct mlx5e_sw_stats {
 	u64 rx_xdp_drop;
 	u64 rx_xdp_drop;
 	u64 rx_xdp_tx;
 	u64 rx_xdp_tx;
 	u64 rx_xdp_tx_full;
 	u64 rx_xdp_tx_full;
+	u64 tx_csum_none;
 	u64 tx_csum_partial;
 	u64 tx_csum_partial;
 	u64 tx_csum_partial_inner;
 	u64 tx_csum_partial_inner;
 	u64 tx_queue_stopped;
 	u64 tx_queue_stopped;
@@ -108,6 +109,7 @@ static const struct counter_desc sw_stats_desc[] = {
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
@@ -339,6 +341,7 @@ struct mlx5e_rq_stats {
 	u64 packets;
 	u64 packets;
 	u64 bytes;
 	u64 bytes;
 	u64 csum_complete;
 	u64 csum_complete;
+	u64 csum_unnecessary;
 	u64 csum_unnecessary_inner;
 	u64 csum_unnecessary_inner;
 	u64 csum_none;
 	u64 csum_none;
 	u64 lro_packets;
 	u64 lro_packets;
@@ -363,6 +366,7 @@ static const struct counter_desc rq_stats_desc[] = {
 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
@@ -392,6 +396,7 @@ struct mlx5e_sq_stats {
 	u64 tso_bytes;
 	u64 tso_bytes;
 	u64 tso_inner_packets;
 	u64 tso_inner_packets;
 	u64 tso_inner_bytes;
 	u64 tso_inner_bytes;
+	u64 csum_partial;
 	u64 csum_partial_inner;
 	u64 csum_partial_inner;
 	u64 nop;
 	u64 nop;
 	/* less likely accessed in data path */
 	/* less likely accessed in data path */
@@ -408,6 +413,7 @@ static const struct counter_desc sq_stats_desc[] = {
 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
+	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },

+ 83 - 8
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c

@@ -1317,6 +1317,69 @@ static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 upda
 	return true;
 	return true;
 }
 }
 
 
+static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
+					  struct tcf_exts *exts)
+{
+	const struct tc_action *a;
+	bool modify_ip_header;
+	LIST_HEAD(actions);
+	u8 htype, ip_proto;
+	void *headers_v;
+	u16 ethertype;
+	int nkeys, i;
+
+	headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
+	ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
+
+	/* for non-IP we only re-write MACs, so we're okay */
+	if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
+		goto out_ok;
+
+	modify_ip_header = false;
+	tcf_exts_to_list(exts, &actions);
+	list_for_each_entry(a, &actions, list) {
+		if (!is_tcf_pedit(a))
+			continue;
+
+		nkeys = tcf_pedit_nkeys(a);
+		for (i = 0; i < nkeys; i++) {
+			htype = tcf_pedit_htype(a, i);
+			if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 ||
+			    htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) {
+				modify_ip_header = true;
+				break;
+			}
+		}
+	}
+
+	ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
+	if (modify_ip_header && ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
+		pr_info("can't offload re-write of ip proto %d\n", ip_proto);
+		return false;
+	}
+
+out_ok:
+	return true;
+}
+
+static bool actions_match_supported(struct mlx5e_priv *priv,
+				    struct tcf_exts *exts,
+				    struct mlx5e_tc_flow_parse_attr *parse_attr,
+				    struct mlx5e_tc_flow *flow)
+{
+	u32 actions;
+
+	if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
+		actions = flow->esw_attr->action;
+	else
+		actions = flow->nic_attr->action;
+
+	if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+		return modify_header_match_supported(&parse_attr->spec, exts);
+
+	return true;
+}
+
 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
 				struct mlx5e_tc_flow_parse_attr *parse_attr,
 				struct mlx5e_tc_flow_parse_attr *parse_attr,
 				struct mlx5e_tc_flow *flow)
 				struct mlx5e_tc_flow *flow)
@@ -1378,6 +1441,9 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
+	if (!actions_match_supported(priv, exts, parse_attr, flow))
+		return -EOPNOTSUPP;
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -1564,7 +1630,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
 		break;
 		break;
 	default:
 	default:
 		err = -EOPNOTSUPP;
 		err = -EOPNOTSUPP;
-		goto out;
+		goto free_encap;
 	}
 	}
 	fl4.flowi4_tos = tun_key->tos;
 	fl4.flowi4_tos = tun_key->tos;
 	fl4.daddr = tun_key->u.ipv4.dst;
 	fl4.daddr = tun_key->u.ipv4.dst;
@@ -1573,7 +1639,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
 	err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev,
 	err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev,
 				      &fl4, &n, &ttl);
 				      &fl4, &n, &ttl);
 	if (err)
 	if (err)
-		goto out;
+		goto free_encap;
 
 
 	/* used by mlx5e_detach_encap to lookup a neigh hash table
 	/* used by mlx5e_detach_encap to lookup a neigh hash table
 	 * entry in the neigh hash table when a user deletes a rule
 	 * entry in the neigh hash table when a user deletes a rule
@@ -1590,7 +1656,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
 	 */
 	 */
 	err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
 	err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
 	if (err)
 	if (err)
-		goto out;
+		goto free_encap;
 
 
 	read_lock_bh(&n->lock);
 	read_lock_bh(&n->lock);
 	nud_state = n->nud_state;
 	nud_state = n->nud_state;
@@ -1630,8 +1696,9 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
 
 
 destroy_neigh_entry:
 destroy_neigh_entry:
 	mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
 	mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
-out:
+free_encap:
 	kfree(encap_header);
 	kfree(encap_header);
+out:
 	if (n)
 	if (n)
 		neigh_release(n);
 		neigh_release(n);
 	return err;
 	return err;
@@ -1668,7 +1735,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
 		break;
 		break;
 	default:
 	default:
 		err = -EOPNOTSUPP;
 		err = -EOPNOTSUPP;
-		goto out;
+		goto free_encap;
 	}
 	}
 
 
 	fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
 	fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
@@ -1678,7 +1745,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
 	err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev,
 	err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev,
 				      &fl6, &n, &ttl);
 				      &fl6, &n, &ttl);
 	if (err)
 	if (err)
-		goto out;
+		goto free_encap;
 
 
 	/* used by mlx5e_detach_encap to lookup a neigh hash table
 	/* used by mlx5e_detach_encap to lookup a neigh hash table
 	 * entry in the neigh hash table when a user deletes a rule
 	 * entry in the neigh hash table when a user deletes a rule
@@ -1695,7 +1762,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
 	 */
 	 */
 	err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
 	err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
 	if (err)
 	if (err)
-		goto out;
+		goto free_encap;
 
 
 	read_lock_bh(&n->lock);
 	read_lock_bh(&n->lock);
 	nud_state = n->nud_state;
 	nud_state = n->nud_state;
@@ -1736,8 +1803,9 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
 
 
 destroy_neigh_entry:
 destroy_neigh_entry:
 	mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
 	mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
-out:
+free_encap:
 	kfree(encap_header);
 	kfree(encap_header);
+out:
 	if (n)
 	if (n)
 		neigh_release(n);
 		neigh_release(n);
 	return err;
 	return err;
@@ -1791,6 +1859,7 @@ vxlan_encap_offload_err:
 		}
 		}
 	}
 	}
 
 
+	/* must verify if encap is valid or not */
 	if (found)
 	if (found)
 		goto attach_flow;
 		goto attach_flow;
 
 
@@ -1817,6 +1886,8 @@ attach_flow:
 	*encap_dev = e->out_dev;
 	*encap_dev = e->out_dev;
 	if (e->flags & MLX5_ENCAP_ENTRY_VALID)
 	if (e->flags & MLX5_ENCAP_ENTRY_VALID)
 		attr->encap_id = e->encap_id;
 		attr->encap_id = e->encap_id;
+	else
+		err = -EAGAIN;
 
 
 	return err;
 	return err;
 
 
@@ -1934,6 +2005,10 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
 
 
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
+
+	if (!actions_match_supported(priv, exts, parse_attr, flow))
+		return -EOPNOTSUPP;
+
 	return err;
 	return err;
 }
 }
 
 

+ 1 - 0
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c

@@ -193,6 +193,7 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct
 			sq->stats.csum_partial_inner++;
 			sq->stats.csum_partial_inner++;
 		} else {
 		} else {
 			eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
 			eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
+			sq->stats.csum_partial++;
 		}
 		}
 	} else
 	} else
 		sq->stats.csum_none++;
 		sq->stats.csum_none++;

+ 2 - 2
drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c

@@ -71,11 +71,11 @@ int mlx5_fpga_access_reg(struct mlx5_core_dev *dev, u8 size, u64 addr,
 	return 0;
 	return 0;
 }
 }
 
 
-int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps)
+int mlx5_fpga_caps(struct mlx5_core_dev *dev)
 {
 {
 	u32 in[MLX5_ST_SZ_DW(fpga_cap)] = {0};
 	u32 in[MLX5_ST_SZ_DW(fpga_cap)] = {0};
 
 
-	return mlx5_core_access_reg(dev, in, sizeof(in), caps,
+	return mlx5_core_access_reg(dev, in, sizeof(in), dev->caps.fpga,
 				    MLX5_ST_SZ_BYTES(fpga_cap),
 				    MLX5_ST_SZ_BYTES(fpga_cap),
 				    MLX5_REG_FPGA_CAP, 0, 0);
 				    MLX5_REG_FPGA_CAP, 0, 0);
 }
 }

+ 1 - 1
drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h

@@ -65,7 +65,7 @@ struct mlx5_fpga_qp_counters {
 	u64 rx_total_drop;
 	u64 rx_total_drop;
 };
 };
 
 
-int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps);
+int mlx5_fpga_caps(struct mlx5_core_dev *dev);
 int mlx5_fpga_query(struct mlx5_core_dev *dev, struct mlx5_fpga_query *query);
 int mlx5_fpga_query(struct mlx5_core_dev *dev, struct mlx5_fpga_query *query);
 int mlx5_fpga_ctrl_op(struct mlx5_core_dev *dev, u8 op);
 int mlx5_fpga_ctrl_op(struct mlx5_core_dev *dev, u8 op);
 int mlx5_fpga_access_reg(struct mlx5_core_dev *dev, u8 size, u64 addr,
 int mlx5_fpga_access_reg(struct mlx5_core_dev *dev, u8 size, u64 addr,

+ 1 - 2
drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c

@@ -139,8 +139,7 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
 	if (err)
 	if (err)
 		goto out;
 		goto out;
 
 
-	err = mlx5_fpga_caps(fdev->mdev,
-			     fdev->mdev->caps.hca_cur[MLX5_CAP_FPGA]);
+	err = mlx5_fpga_caps(fdev->mdev);
 	if (err)
 	if (err)
 		goto out;
 		goto out;
 
 

+ 8 - 0
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c

@@ -293,6 +293,9 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
 	}
 	}
 
 
 	if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
 	if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+		int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
+					log_max_flow_counter,
+					ft->type));
 		int list_size = 0;
 		int list_size = 0;
 
 
 		list_for_each_entry(dst, &fte->node.children, node.list) {
 		list_for_each_entry(dst, &fte->node.children, node.list) {
@@ -305,12 +308,17 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
 			in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
 			in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
 			list_size++;
 			list_size++;
 		}
 		}
+		if (list_size > max_list_size) {
+			err = -EINVAL;
+			goto err_out;
+		}
 
 
 		MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
 		MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
 			 list_size);
 			 list_size);
 	}
 	}
 
 
 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+err_out:
 	kvfree(in);
 	kvfree(in);
 	return err;
 	return err;
 }
 }

+ 11 - 0
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h

@@ -52,6 +52,7 @@ enum fs_flow_table_type {
 	FS_FT_FDB             = 0X4,
 	FS_FT_FDB             = 0X4,
 	FS_FT_SNIFFER_RX	= 0X5,
 	FS_FT_SNIFFER_RX	= 0X5,
 	FS_FT_SNIFFER_TX	= 0X6,
 	FS_FT_SNIFFER_TX	= 0X6,
+	FS_FT_MAX_TYPE = FS_FT_SNIFFER_TX,
 };
 };
 
 
 enum fs_flow_table_op_mod {
 enum fs_flow_table_op_mod {
@@ -260,4 +261,14 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
 #define fs_for_each_dst(pos, fte)			\
 #define fs_for_each_dst(pos, fte)			\
 	fs_list_for_each_entry(pos, &(fte)->node.children)
 	fs_list_for_each_entry(pos, &(fte)->node.children)
 
 
+#define MLX5_CAP_FLOWTABLE_TYPE(mdev, cap, type) (		\
+	(type == FS_FT_NIC_RX) ? MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) :		\
+	(type == FS_FT_ESW_EGRESS_ACL) ? MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) :		\
+	(type == FS_FT_ESW_INGRESS_ACL) ? MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) :		\
+	(type == FS_FT_FDB) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) :		\
+	(type == FS_FT_SNIFFER_RX) ? MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) :		\
+	(type == FS_FT_SNIFFER_TX) ? MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) :		\
+	(BUILD_BUG_ON_ZERO(FS_FT_SNIFFER_TX != FS_FT_MAX_TYPE))\
+	)
+
 #endif
 #endif

+ 2 - 1
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c

@@ -572,12 +572,13 @@ void mlx5_rdma_netdev_free(struct net_device *netdev)
 {
 {
 	struct mlx5e_priv          *priv    = mlx5i_epriv(netdev);
 	struct mlx5e_priv          *priv    = mlx5i_epriv(netdev);
 	const struct mlx5e_profile *profile = priv->profile;
 	const struct mlx5e_profile *profile = priv->profile;
+	struct mlx5_core_dev       *mdev    = priv->mdev;
 
 
 	mlx5e_detach_netdev(priv);
 	mlx5e_detach_netdev(priv);
 	profile->cleanup(priv);
 	profile->cleanup(priv);
 	destroy_workqueue(priv->wq);
 	destroy_workqueue(priv->wq);
 	free_netdev(netdev);
 	free_netdev(netdev);
 
 
-	mlx5e_destroy_mdev_resources(priv->mdev);
+	mlx5e_destroy_mdev_resources(mdev);
 }
 }
 EXPORT_SYMBOL(mlx5_rdma_netdev_free);
 EXPORT_SYMBOL(mlx5_rdma_netdev_free);

+ 1 - 1
drivers/net/ethernet/mellanox/mlx5/core/sriov.c

@@ -109,7 +109,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
 				mlx5_core_warn(dev,
 				mlx5_core_warn(dev,
 					       "failed to restore VF %d settings, err %d\n",
 					       "failed to restore VF %d settings, err %d\n",
 					       vf, err);
 					       vf, err);
-			continue;
+				continue;
 			}
 			}
 		}
 		}
 		mlx5_core_dbg(dev, "successfully enabled VF* %d\n", vf);
 		mlx5_core_dbg(dev, "successfully enabled VF* %d\n", vf);

+ 2 - 3
include/linux/mlx5/device.h

@@ -980,7 +980,6 @@ enum mlx5_cap_type {
 	MLX5_CAP_RESERVED,
 	MLX5_CAP_RESERVED,
 	MLX5_CAP_VECTOR_CALC,
 	MLX5_CAP_VECTOR_CALC,
 	MLX5_CAP_QOS,
 	MLX5_CAP_QOS,
-	MLX5_CAP_FPGA,
 	/* NUM OF CAP Types */
 	/* NUM OF CAP Types */
 	MLX5_CAP_NUM
 	MLX5_CAP_NUM
 };
 };
@@ -1110,10 +1109,10 @@ enum mlx5_mcam_feature_groups {
 	MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
 	MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
 
 
 #define MLX5_CAP_FPGA(mdev, cap) \
 #define MLX5_CAP_FPGA(mdev, cap) \
-	MLX5_GET(fpga_cap, (mdev)->caps.hca_cur[MLX5_CAP_FPGA], cap)
+	MLX5_GET(fpga_cap, (mdev)->caps.fpga, cap)
 
 
 #define MLX5_CAP64_FPGA(mdev, cap) \
 #define MLX5_CAP64_FPGA(mdev, cap) \
-	MLX5_GET64(fpga_cap, (mdev)->caps.hca_cur[MLX5_CAP_FPGA], cap)
+	MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap)
 
 
 enum {
 enum {
 	MLX5_CMD_STAT_OK			= 0x0,
 	MLX5_CMD_STAT_OK			= 0x0,

+ 1 - 0
include/linux/mlx5/driver.h

@@ -774,6 +774,7 @@ struct mlx5_core_dev {
 		u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
 		u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
 		u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
 		u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
 		u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
 		u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
+		u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
 	} caps;
 	} caps;
 	phys_addr_t		iseg_base;
 	phys_addr_t		iseg_base;
 	struct mlx5_init_seg __iomem *iseg;
 	struct mlx5_init_seg __iomem *iseg;

+ 2 - 1
include/linux/mlx5/mlx5_ifc.h

@@ -327,7 +327,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
 	u8         reserved_at_80[0x18];
 	u8         reserved_at_80[0x18];
 	u8         log_max_destination[0x8];
 	u8         log_max_destination[0x8];
 
 
-	u8         reserved_at_a0[0x18];
+	u8         log_max_flow_counter[0x8];
+	u8         reserved_at_a8[0x10];
 	u8         log_max_flow[0x8];
 	u8         log_max_flow[0x8];
 
 
 	u8         reserved_at_c0[0x40];
 	u8         reserved_at_c0[0x40];