|
@@ -66,11 +66,14 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
|
|
|
switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
|
|
|
case TCP_V4_FLOW:
|
|
|
case UDP_V4_FLOW:
|
|
|
+ case TCP_V6_FLOW:
|
|
|
+ case UDP_V6_FLOW:
|
|
|
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
|
|
|
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
|
|
|
eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
|
|
|
break;
|
|
|
case IP_USER_FLOW:
|
|
|
+ case IPV6_USER_FLOW:
|
|
|
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
|
|
|
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
|
|
|
eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
|
|
@@ -115,29 +118,203 @@ static void mask_spec(u8 *mask, u8 *val, size_t size)
|
|
|
*((u8 *)val) = *((u8 *)mask) & *((u8 *)val);
|
|
|
}
|
|
|
|
|
|
-static void set_ips(void *outer_headers_v, void *outer_headers_c, __be32 ip4src_m,
|
|
|
- __be32 ip4src_v, __be32 ip4dst_m, __be32 ip4dst_v)
|
|
|
+#define MLX5E_FTE_SET(header_p, fld, v) \
|
|
|
+ MLX5_SET(fte_match_set_lyr_2_4, header_p, fld, v)
|
|
|
+
|
|
|
+#define MLX5E_FTE_ADDR_OF(header_p, fld) \
|
|
|
+ MLX5_ADDR_OF(fte_match_set_lyr_2_4, header_p, fld)
|
|
|
+
|
|
|
+static void
|
|
|
+set_ip4(void *headers_c, void *headers_v, __be32 ip4src_m,
|
|
|
+ __be32 ip4src_v, __be32 ip4dst_m, __be32 ip4dst_v)
|
|
|
{
|
|
|
if (ip4src_m) {
|
|
|
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
|
|
|
- src_ipv4_src_ipv6.ipv4_layout.ipv4),
|
|
|
+ memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv4_layout.ipv4),
|
|
|
&ip4src_v, sizeof(ip4src_v));
|
|
|
- memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
|
|
|
- src_ipv4_src_ipv6.ipv4_layout.ipv4),
|
|
|
+ memset(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv4_layout.ipv4),
|
|
|
0xff, sizeof(ip4src_m));
|
|
|
}
|
|
|
if (ip4dst_m) {
|
|
|
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
|
|
|
- dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
|
|
|
+ memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
|
|
|
&ip4dst_v, sizeof(ip4dst_v));
|
|
|
- memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
|
|
|
- dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
|
|
|
+ memset(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
|
|
|
0xff, sizeof(ip4dst_m));
|
|
|
}
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
|
|
|
- ethertype, ETH_P_IP);
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
|
|
|
- ethertype, 0xffff);
|
|
|
+
|
|
|
+ MLX5E_FTE_SET(headers_c, ethertype, 0xffff);
|
|
|
+ MLX5E_FTE_SET(headers_v, ethertype, ETH_P_IP);
|
|
|
+}
|
|
|
+
|
|
|
+static void
|
|
|
+set_ip6(void *headers_c, void *headers_v, __be32 ip6src_m[4],
|
|
|
+ __be32 ip6src_v[4], __be32 ip6dst_m[4], __be32 ip6dst_v[4])
|
|
|
+{
|
|
|
+ u8 ip6_sz = MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6);
|
|
|
+
|
|
|
+ if (!ipv6_addr_any((struct in6_addr *)ip6src_m)) {
|
|
|
+ memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv6_layout.ipv6),
|
|
|
+ ip6src_v, ip6_sz);
|
|
|
+ memcpy(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv6_layout.ipv6),
|
|
|
+ ip6src_m, ip6_sz);
|
|
|
+ }
|
|
|
+ if (!ipv6_addr_any((struct in6_addr *)ip6dst_m)) {
|
|
|
+ memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
|
|
|
+ ip6dst_v, ip6_sz);
|
|
|
+ memcpy(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
|
|
|
+ ip6dst_m, ip6_sz);
|
|
|
+ }
|
|
|
+
|
|
|
+ MLX5E_FTE_SET(headers_c, ethertype, 0xffff);
|
|
|
+ MLX5E_FTE_SET(headers_v, ethertype, ETH_P_IPV6);
|
|
|
+}
|
|
|
+
|
|
|
+static void
|
|
|
+set_tcp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
|
|
|
+ __be16 pdst_m, __be16 pdst_v)
|
|
|
+{
|
|
|
+ if (psrc_m) {
|
|
|
+ MLX5E_FTE_SET(headers_c, tcp_sport, 0xffff);
|
|
|
+ MLX5E_FTE_SET(headers_v, tcp_sport, ntohs(psrc_v));
|
|
|
+ }
|
|
|
+ if (pdst_m) {
|
|
|
+ MLX5E_FTE_SET(headers_c, tcp_dport, 0xffff);
|
|
|
+ MLX5E_FTE_SET(headers_v, tcp_dport, ntohs(pdst_v));
|
|
|
+ }
|
|
|
+
|
|
|
+ MLX5E_FTE_SET(headers_c, ip_protocol, 0xffff);
|
|
|
+ MLX5E_FTE_SET(headers_v, ip_protocol, IPPROTO_TCP);
|
|
|
+}
|
|
|
+
|
|
|
+static void
|
|
|
+set_udp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
|
|
|
+ __be16 pdst_m, __be16 pdst_v)
|
|
|
+{
|
|
|
+ if (psrc_m) {
|
|
|
+ MLX5E_FTE_SET(headers_c, udp_sport, 0xffff);
|
|
|
+ MLX5E_FTE_SET(headers_c, udp_sport, ntohs(psrc_v));
|
|
|
+ }
|
|
|
+
|
|
|
+ if (pdst_m) {
|
|
|
+ MLX5E_FTE_SET(headers_c, udp_dport, 0xffff);
|
|
|
+ MLX5E_FTE_SET(headers_v, udp_dport, ntohs(pdst_v));
|
|
|
+ }
|
|
|
+
|
|
|
+ MLX5E_FTE_SET(headers_c, ip_protocol, 0xffff);
|
|
|
+ MLX5E_FTE_SET(headers_v, ip_protocol, IPPROTO_UDP);
|
|
|
+}
|
|
|
+
|
|
|
+static void
|
|
|
+parse_tcp4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
|
|
|
+{
|
|
|
+ struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec;
|
|
|
+ struct ethtool_tcpip4_spec *l4_val = &fs->h_u.tcp_ip4_spec;
|
|
|
+
|
|
|
+ set_ip4(headers_c, headers_v, l4_mask->ip4src, l4_val->ip4src,
|
|
|
+ l4_mask->ip4dst, l4_val->ip4dst);
|
|
|
+
|
|
|
+ set_tcp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
|
|
|
+ l4_mask->pdst, l4_val->pdst);
|
|
|
+}
|
|
|
+
|
|
|
+static void
|
|
|
+parse_udp4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
|
|
|
+{
|
|
|
+ struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.udp_ip4_spec;
|
|
|
+ struct ethtool_tcpip4_spec *l4_val = &fs->h_u.udp_ip4_spec;
|
|
|
+
|
|
|
+ set_ip4(headers_c, headers_v, l4_mask->ip4src, l4_val->ip4src,
|
|
|
+ l4_mask->ip4dst, l4_val->ip4dst);
|
|
|
+
|
|
|
+ set_udp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
|
|
|
+ l4_mask->pdst, l4_val->pdst);
|
|
|
+}
|
|
|
+
|
|
|
+static void
|
|
|
+parse_ip4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
|
|
|
+{
|
|
|
+ struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
|
|
|
+ struct ethtool_usrip4_spec *l3_val = &fs->h_u.usr_ip4_spec;
|
|
|
+
|
|
|
+ set_ip4(headers_c, headers_v, l3_mask->ip4src, l3_val->ip4src,
|
|
|
+ l3_mask->ip4dst, l3_val->ip4dst);
|
|
|
+
|
|
|
+ if (l3_mask->proto) {
|
|
|
+ MLX5E_FTE_SET(headers_c, ip_protocol, l3_mask->proto);
|
|
|
+ MLX5E_FTE_SET(headers_v, ip_protocol, l3_val->proto);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void
|
|
|
+parse_ip6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
|
|
|
+{
|
|
|
+ struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec;
|
|
|
+ struct ethtool_usrip6_spec *l3_val = &fs->h_u.usr_ip6_spec;
|
|
|
+
|
|
|
+ set_ip6(headers_c, headers_v, l3_mask->ip6src,
|
|
|
+ l3_val->ip6src, l3_mask->ip6dst, l3_val->ip6dst);
|
|
|
+
|
|
|
+ if (l3_mask->l4_proto) {
|
|
|
+ MLX5E_FTE_SET(headers_c, ip_protocol, l3_mask->l4_proto);
|
|
|
+ MLX5E_FTE_SET(headers_v, ip_protocol, l3_val->l4_proto);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void
|
|
|
+parse_tcp6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
|
|
|
+{
|
|
|
+ struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec;
|
|
|
+ struct ethtool_tcpip6_spec *l4_val = &fs->h_u.tcp_ip6_spec;
|
|
|
+
|
|
|
+ set_ip6(headers_c, headers_v, l4_mask->ip6src,
|
|
|
+ l4_val->ip6src, l4_mask->ip6dst, l4_val->ip6dst);
|
|
|
+
|
|
|
+ set_tcp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
|
|
|
+ l4_mask->pdst, l4_val->pdst);
|
|
|
+}
|
|
|
+
|
|
|
+static void
|
|
|
+parse_udp6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
|
|
|
+{
|
|
|
+ struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.udp_ip6_spec;
|
|
|
+ struct ethtool_tcpip6_spec *l4_val = &fs->h_u.udp_ip6_spec;
|
|
|
+
|
|
|
+ set_ip6(headers_c, headers_v, l4_mask->ip6src,
|
|
|
+ l4_val->ip6src, l4_mask->ip6dst, l4_val->ip6dst);
|
|
|
+
|
|
|
+ set_udp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
|
|
|
+ l4_mask->pdst, l4_val->pdst);
|
|
|
+}
|
|
|
+
|
|
|
+static void
|
|
|
+parse_ether(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
|
|
|
+{
|
|
|
+ struct ethhdr *eth_mask = &fs->m_u.ether_spec;
|
|
|
+ struct ethhdr *eth_val = &fs->h_u.ether_spec;
|
|
|
+
|
|
|
+ mask_spec((u8 *)eth_mask, (u8 *)eth_val, sizeof(*eth_mask));
|
|
|
+ ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, smac_47_16), eth_mask->h_source);
|
|
|
+ ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, smac_47_16), eth_val->h_source);
|
|
|
+ ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, dmac_47_16), eth_mask->h_dest);
|
|
|
+ ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, dmac_47_16), eth_val->h_dest);
|
|
|
+ MLX5E_FTE_SET(headers_c, ethertype, ntohs(eth_mask->h_proto));
|
|
|
+ MLX5E_FTE_SET(headers_v, ethertype, ntohs(eth_val->h_proto));
|
|
|
+}
|
|
|
+
|
|
|
+static void
|
|
|
+set_cvlan(void *headers_c, void *headers_v, __be16 vlan_tci)
|
|
|
+{
|
|
|
+ MLX5E_FTE_SET(headers_c, cvlan_tag, 1);
|
|
|
+ MLX5E_FTE_SET(headers_v, cvlan_tag, 1);
|
|
|
+ MLX5E_FTE_SET(headers_c, first_vid, 0xfff);
|
|
|
+ MLX5E_FTE_SET(headers_v, first_vid, ntohs(vlan_tci));
|
|
|
+}
|
|
|
+
|
|
|
+static void
|
|
|
+set_dmac(void *headers_c, void *headers_v,
|
|
|
+ unsigned char m_dest[ETH_ALEN], unsigned char v_dest[ETH_ALEN])
|
|
|
+{
|
|
|
+ ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, dmac_47_16), m_dest);
|
|
|
+ ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, dmac_47_16), v_dest);
|
|
|
}
|
|
|
|
|
|
static int set_flow_attrs(u32 *match_c, u32 *match_v,
|
|
@@ -148,112 +325,42 @@ static int set_flow_attrs(u32 *match_c, u32 *match_v,
|
|
|
void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
|
|
|
outer_headers);
|
|
|
u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
|
|
|
- struct ethtool_tcpip4_spec *l4_mask;
|
|
|
- struct ethtool_tcpip4_spec *l4_val;
|
|
|
- struct ethtool_usrip4_spec *l3_mask;
|
|
|
- struct ethtool_usrip4_spec *l3_val;
|
|
|
- struct ethhdr *eth_val;
|
|
|
- struct ethhdr *eth_mask;
|
|
|
|
|
|
switch (flow_type) {
|
|
|
case TCP_V4_FLOW:
|
|
|
- l4_mask = &fs->m_u.tcp_ip4_spec;
|
|
|
- l4_val = &fs->h_u.tcp_ip4_spec;
|
|
|
- set_ips(outer_headers_v, outer_headers_c, l4_mask->ip4src,
|
|
|
- l4_val->ip4src, l4_mask->ip4dst, l4_val->ip4dst);
|
|
|
-
|
|
|
- if (l4_mask->psrc) {
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport,
|
|
|
- 0xffff);
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_sport,
|
|
|
- ntohs(l4_val->psrc));
|
|
|
- }
|
|
|
- if (l4_mask->pdst) {
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport,
|
|
|
- 0xffff);
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_dport,
|
|
|
- ntohs(l4_val->pdst));
|
|
|
- }
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
|
|
|
- 0xffff);
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
|
|
|
- IPPROTO_TCP);
|
|
|
+ parse_tcp4(outer_headers_c, outer_headers_v, fs);
|
|
|
break;
|
|
|
case UDP_V4_FLOW:
|
|
|
- l4_mask = &fs->m_u.tcp_ip4_spec;
|
|
|
- l4_val = &fs->h_u.tcp_ip4_spec;
|
|
|
- set_ips(outer_headers_v, outer_headers_c, l4_mask->ip4src,
|
|
|
- l4_val->ip4src, l4_mask->ip4dst, l4_val->ip4dst);
|
|
|
-
|
|
|
- if (l4_mask->psrc) {
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_sport,
|
|
|
- 0xffff);
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_sport,
|
|
|
- ntohs(l4_val->psrc));
|
|
|
- }
|
|
|
- if (l4_mask->pdst) {
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_dport,
|
|
|
- 0xffff);
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_dport,
|
|
|
- ntohs(l4_val->pdst));
|
|
|
- }
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
|
|
|
- 0xffff);
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
|
|
|
- IPPROTO_UDP);
|
|
|
+ parse_udp4(outer_headers_c, outer_headers_v, fs);
|
|
|
break;
|
|
|
case IP_USER_FLOW:
|
|
|
- l3_mask = &fs->m_u.usr_ip4_spec;
|
|
|
- l3_val = &fs->h_u.usr_ip4_spec;
|
|
|
- set_ips(outer_headers_v, outer_headers_c, l3_mask->ip4src,
|
|
|
- l3_val->ip4src, l3_mask->ip4dst, l3_val->ip4dst);
|
|
|
+ parse_ip4(outer_headers_c, outer_headers_v, fs);
|
|
|
+ break;
|
|
|
+ case TCP_V6_FLOW:
|
|
|
+ parse_tcp6(outer_headers_c, outer_headers_v, fs);
|
|
|
+ break;
|
|
|
+ case UDP_V6_FLOW:
|
|
|
+ parse_udp6(outer_headers_c, outer_headers_v, fs);
|
|
|
+ break;
|
|
|
+ case IPV6_USER_FLOW:
|
|
|
+ parse_ip6(outer_headers_c, outer_headers_v, fs);
|
|
|
break;
|
|
|
case ETHER_FLOW:
|
|
|
- eth_mask = &fs->m_u.ether_spec;
|
|
|
- eth_val = &fs->h_u.ether_spec;
|
|
|
-
|
|
|
- mask_spec((u8 *)eth_mask, (u8 *)eth_val, sizeof(*eth_mask));
|
|
|
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
|
|
|
- outer_headers_c, smac_47_16),
|
|
|
- eth_mask->h_source);
|
|
|
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
|
|
|
- outer_headers_v, smac_47_16),
|
|
|
- eth_val->h_source);
|
|
|
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
|
|
|
- outer_headers_c, dmac_47_16),
|
|
|
- eth_mask->h_dest);
|
|
|
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
|
|
|
- outer_headers_v, dmac_47_16),
|
|
|
- eth_val->h_dest);
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ethertype,
|
|
|
- ntohs(eth_mask->h_proto));
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ethertype,
|
|
|
- ntohs(eth_val->h_proto));
|
|
|
+ parse_ether(outer_headers_c, outer_headers_v, fs);
|
|
|
break;
|
|
|
default:
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
if ((fs->flow_type & FLOW_EXT) &&
|
|
|
- (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) {
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
|
|
|
- cvlan_tag, 1);
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
|
|
|
- cvlan_tag, 1);
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
|
|
|
- first_vid, 0xfff);
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
|
|
|
- first_vid, ntohs(fs->h_ext.vlan_tci));
|
|
|
- }
|
|
|
+ (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)))
|
|
|
+ set_cvlan(outer_headers_c, outer_headers_v, fs->h_ext.vlan_tci);
|
|
|
+
|
|
|
if (fs->flow_type & FLOW_MAC_EXT &&
|
|
|
!is_zero_ether_addr(fs->m_ext.h_dest)) {
|
|
|
mask_spec(fs->m_ext.h_dest, fs->h_ext.h_dest, ETH_ALEN);
|
|
|
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
|
|
|
- outer_headers_c, dmac_47_16),
|
|
|
- fs->m_ext.h_dest);
|
|
|
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
|
|
|
- outer_headers_v, dmac_47_16),
|
|
|
- fs->h_ext.h_dest);
|
|
|
+ set_dmac(outer_headers_c, outer_headers_v, fs->m_ext.h_dest,
|
|
|
+ fs->h_ext.h_dest);
|
|
|
}
|
|
|
|
|
|
return 0;
|
|
@@ -379,16 +486,143 @@ static struct mlx5e_ethtool_rule *get_ethtool_rule(struct mlx5e_priv *priv,
|
|
|
#define all_zeros_or_all_ones(field) \
|
|
|
((field) == 0 || (field) == (__force typeof(field))-1)
|
|
|
|
|
|
+static int validate_ethter(struct ethtool_rx_flow_spec *fs)
|
|
|
+{
|
|
|
+ struct ethhdr *eth_mask = &fs->m_u.ether_spec;
|
|
|
+ int ntuples = 0;
|
|
|
+
|
|
|
+ if (!is_zero_ether_addr(eth_mask->h_dest))
|
|
|
+ ntuples++;
|
|
|
+ if (!is_zero_ether_addr(eth_mask->h_source))
|
|
|
+ ntuples++;
|
|
|
+ if (eth_mask->h_proto)
|
|
|
+ ntuples++;
|
|
|
+ return ntuples;
|
|
|
+}
|
|
|
+
|
|
|
+static int validate_tcpudp4(struct ethtool_rx_flow_spec *fs)
|
|
|
+{
|
|
|
+ struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec;
|
|
|
+ int ntuples = 0;
|
|
|
+
|
|
|
+ if (l4_mask->tos)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (l4_mask->ip4src) {
|
|
|
+ if (!all_ones(l4_mask->ip4src))
|
|
|
+ return -EINVAL;
|
|
|
+ ntuples++;
|
|
|
+ }
|
|
|
+ if (l4_mask->ip4dst) {
|
|
|
+ if (!all_ones(l4_mask->ip4dst))
|
|
|
+ return -EINVAL;
|
|
|
+ ntuples++;
|
|
|
+ }
|
|
|
+ if (l4_mask->psrc) {
|
|
|
+ if (!all_ones(l4_mask->psrc))
|
|
|
+ return -EINVAL;
|
|
|
+ ntuples++;
|
|
|
+ }
|
|
|
+ if (l4_mask->pdst) {
|
|
|
+ if (!all_ones(l4_mask->pdst))
|
|
|
+ return -EINVAL;
|
|
|
+ ntuples++;
|
|
|
+ }
|
|
|
+ /* Flow is TCP/UDP */
|
|
|
+ return ++ntuples;
|
|
|
+}
|
|
|
+
|
|
|
+static int validate_ip4(struct ethtool_rx_flow_spec *fs)
|
|
|
+{
|
|
|
+ struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
|
|
|
+ int ntuples = 0;
|
|
|
+
|
|
|
+ if (l3_mask->l4_4_bytes || l3_mask->tos ||
|
|
|
+ fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
|
|
|
+ return -EINVAL;
|
|
|
+ if (l3_mask->ip4src) {
|
|
|
+ if (!all_ones(l3_mask->ip4src))
|
|
|
+ return -EINVAL;
|
|
|
+ ntuples++;
|
|
|
+ }
|
|
|
+ if (l3_mask->ip4dst) {
|
|
|
+ if (!all_ones(l3_mask->ip4dst))
|
|
|
+ return -EINVAL;
|
|
|
+ ntuples++;
|
|
|
+ }
|
|
|
+ if (l3_mask->proto)
|
|
|
+ ntuples++;
|
|
|
+ /* Flow is IPv4 */
|
|
|
+ return ++ntuples;
|
|
|
+}
|
|
|
+
|
|
|
+static int validate_ip6(struct ethtool_rx_flow_spec *fs)
|
|
|
+{
|
|
|
+ struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec;
|
|
|
+ int ntuples = 0;
|
|
|
+
|
|
|
+ if (l3_mask->l4_4_bytes || l3_mask->tclass)
|
|
|
+ return -EINVAL;
|
|
|
+ if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6src))
|
|
|
+ ntuples++;
|
|
|
+
|
|
|
+ if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6dst))
|
|
|
+ ntuples++;
|
|
|
+ if (l3_mask->l4_proto)
|
|
|
+ ntuples++;
|
|
|
+ /* Flow is IPv6 */
|
|
|
+ return ++ntuples;
|
|
|
+}
|
|
|
+
|
|
|
+static int validate_tcpudp6(struct ethtool_rx_flow_spec *fs)
|
|
|
+{
|
|
|
+ struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec;
|
|
|
+ int ntuples = 0;
|
|
|
+
|
|
|
+ if (l4_mask->tclass)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6src))
|
|
|
+ ntuples++;
|
|
|
+
|
|
|
+ if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6dst))
|
|
|
+ ntuples++;
|
|
|
+
|
|
|
+ if (l4_mask->psrc) {
|
|
|
+ if (!all_ones(l4_mask->psrc))
|
|
|
+ return -EINVAL;
|
|
|
+ ntuples++;
|
|
|
+ }
|
|
|
+ if (l4_mask->pdst) {
|
|
|
+ if (!all_ones(l4_mask->pdst))
|
|
|
+ return -EINVAL;
|
|
|
+ ntuples++;
|
|
|
+ }
|
|
|
+ /* Flow is TCP/UDP */
|
|
|
+ return ++ntuples;
|
|
|
+}
|
|
|
+
|
|
|
+static int validate_vlan(struct ethtool_rx_flow_spec *fs)
|
|
|
+{
|
|
|
+ if (fs->m_ext.vlan_etype ||
|
|
|
+ fs->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (fs->m_ext.vlan_tci &&
|
|
|
+ (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
static int validate_flow(struct mlx5e_priv *priv,
|
|
|
struct ethtool_rx_flow_spec *fs)
|
|
|
{
|
|
|
- struct ethtool_tcpip4_spec *l4_mask;
|
|
|
- struct ethtool_usrip4_spec *l3_mask;
|
|
|
- struct ethhdr *eth_mask;
|
|
|
int num_tuples = 0;
|
|
|
+ int ret = 0;
|
|
|
|
|
|
if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES)
|
|
|
- return -EINVAL;
|
|
|
+ return -ENOSPC;
|
|
|
|
|
|
if (fs->ring_cookie >= priv->channels.params.num_channels &&
|
|
|
fs->ring_cookie != RX_CLS_FLOW_DISC)
|
|
@@ -396,73 +630,42 @@ static int validate_flow(struct mlx5e_priv *priv,
|
|
|
|
|
|
switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
|
|
|
case ETHER_FLOW:
|
|
|
- eth_mask = &fs->m_u.ether_spec;
|
|
|
- if (!is_zero_ether_addr(eth_mask->h_dest))
|
|
|
- num_tuples++;
|
|
|
- if (!is_zero_ether_addr(eth_mask->h_source))
|
|
|
- num_tuples++;
|
|
|
- if (eth_mask->h_proto)
|
|
|
- num_tuples++;
|
|
|
+ num_tuples += validate_ethter(fs);
|
|
|
break;
|
|
|
case TCP_V4_FLOW:
|
|
|
case UDP_V4_FLOW:
|
|
|
- if (fs->m_u.tcp_ip4_spec.tos)
|
|
|
- return -EINVAL;
|
|
|
- l4_mask = &fs->m_u.tcp_ip4_spec;
|
|
|
- if (l4_mask->ip4src) {
|
|
|
- if (!all_ones(l4_mask->ip4src))
|
|
|
- return -EINVAL;
|
|
|
- num_tuples++;
|
|
|
- }
|
|
|
- if (l4_mask->ip4dst) {
|
|
|
- if (!all_ones(l4_mask->ip4dst))
|
|
|
- return -EINVAL;
|
|
|
- num_tuples++;
|
|
|
- }
|
|
|
- if (l4_mask->psrc) {
|
|
|
- if (!all_ones(l4_mask->psrc))
|
|
|
- return -EINVAL;
|
|
|
- num_tuples++;
|
|
|
- }
|
|
|
- if (l4_mask->pdst) {
|
|
|
- if (!all_ones(l4_mask->pdst))
|
|
|
- return -EINVAL;
|
|
|
- num_tuples++;
|
|
|
- }
|
|
|
- /* Flow is TCP/UDP */
|
|
|
- num_tuples++;
|
|
|
+ ret = validate_tcpudp4(fs);
|
|
|
+ if (ret < 0)
|
|
|
+ return ret;
|
|
|
+ num_tuples += ret;
|
|
|
break;
|
|
|
case IP_USER_FLOW:
|
|
|
- l3_mask = &fs->m_u.usr_ip4_spec;
|
|
|
- if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto ||
|
|
|
- fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
|
|
|
- return -EINVAL;
|
|
|
- if (l3_mask->ip4src) {
|
|
|
- if (!all_ones(l3_mask->ip4src))
|
|
|
- return -EINVAL;
|
|
|
- num_tuples++;
|
|
|
- }
|
|
|
- if (l3_mask->ip4dst) {
|
|
|
- if (!all_ones(l3_mask->ip4dst))
|
|
|
- return -EINVAL;
|
|
|
- num_tuples++;
|
|
|
- }
|
|
|
- /* Flow is IPv4 */
|
|
|
- num_tuples++;
|
|
|
+ ret = validate_ip4(fs);
|
|
|
+ if (ret < 0)
|
|
|
+ return ret;
|
|
|
+ num_tuples += ret;
|
|
|
+ break;
|
|
|
+ case TCP_V6_FLOW:
|
|
|
+ case UDP_V6_FLOW:
|
|
|
+ ret = validate_tcpudp6(fs);
|
|
|
+ if (ret < 0)
|
|
|
+ return ret;
|
|
|
+ num_tuples += ret;
|
|
|
+ break;
|
|
|
+ case IPV6_USER_FLOW:
|
|
|
+ ret = validate_ip6(fs);
|
|
|
+ if (ret < 0)
|
|
|
+ return ret;
|
|
|
+ num_tuples += ret;
|
|
|
break;
|
|
|
default:
|
|
|
- return -EINVAL;
|
|
|
+ return -ENOTSUPP;
|
|
|
}
|
|
|
if ((fs->flow_type & FLOW_EXT)) {
|
|
|
- if (fs->m_ext.vlan_etype ||
|
|
|
- (fs->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK)))
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- if (fs->m_ext.vlan_tci) {
|
|
|
- if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
|
|
|
- return -EINVAL;
|
|
|
- }
|
|
|
- num_tuples++;
|
|
|
+ ret = validate_vlan(fs);
|
|
|
+ if (ret < 0)
|
|
|
+ return ret;
|
|
|
+ num_tuples += ret;
|
|
|
}
|
|
|
|
|
|
if (fs->flow_type & FLOW_MAC_EXT &&
|
|
@@ -472,8 +675,9 @@ static int validate_flow(struct mlx5e_priv *priv,
|
|
|
return num_tuples;
|
|
|
}
|
|
|
|
|
|
-int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
|
|
|
- struct ethtool_rx_flow_spec *fs)
|
|
|
+static int
|
|
|
+mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
|
|
|
+ struct ethtool_rx_flow_spec *fs)
|
|
|
{
|
|
|
struct mlx5e_ethtool_table *eth_ft;
|
|
|
struct mlx5e_ethtool_rule *eth_rule;
|
|
@@ -483,8 +687,9 @@ int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
|
|
|
|
|
|
num_tuples = validate_flow(priv, fs);
|
|
|
if (num_tuples <= 0) {
|
|
|
- netdev_warn(priv->netdev, "%s: flow is not valid\n", __func__);
|
|
|
- return -EINVAL;
|
|
|
+ netdev_warn(priv->netdev, "%s: flow is not valid %d\n",
|
|
|
+ __func__, num_tuples);
|
|
|
+ return num_tuples;
|
|
|
}
|
|
|
|
|
|
eth_ft = get_flow_table(priv, fs, num_tuples);
|
|
@@ -519,8 +724,8 @@ del_ethtool_rule:
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
-int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv,
|
|
|
- int location)
|
|
|
+static int
|
|
|
+mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv, int location)
|
|
|
{
|
|
|
struct mlx5e_ethtool_rule *eth_rule;
|
|
|
int err = 0;
|
|
@@ -539,8 +744,9 @@ out:
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
-int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
|
|
|
- int location)
|
|
|
+static int
|
|
|
+mlx5e_ethtool_get_flow(struct mlx5e_priv *priv,
|
|
|
+ struct ethtool_rxnfc *info, int location)
|
|
|
{
|
|
|
struct mlx5e_ethtool_rule *eth_rule;
|
|
|
|
|
@@ -557,8 +763,9 @@ int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
|
|
|
return -ENOENT;
|
|
|
}
|
|
|
|
|
|
-int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
|
|
|
- u32 *rule_locs)
|
|
|
+static int
|
|
|
+mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
|
|
|
+ struct ethtool_rxnfc *info, u32 *rule_locs)
|
|
|
{
|
|
|
int location = 0;
|
|
|
int idx = 0;
|
|
@@ -587,3 +794,51 @@ void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
|
|
|
{
|
|
|
INIT_LIST_HEAD(&priv->fs.ethtool.rules);
|
|
|
}
|
|
|
+
|
|
|
+int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
|
|
|
+{
|
|
|
+ int err = 0;
|
|
|
+ struct mlx5e_priv *priv = netdev_priv(dev);
|
|
|
+
|
|
|
+ switch (cmd->cmd) {
|
|
|
+ case ETHTOOL_SRXCLSRLINS:
|
|
|
+ err = mlx5e_ethtool_flow_replace(priv, &cmd->fs);
|
|
|
+ break;
|
|
|
+ case ETHTOOL_SRXCLSRLDEL:
|
|
|
+ err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ err = -EOPNOTSUPP;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ return err;
|
|
|
+}
|
|
|
+
|
|
|
+int mlx5e_get_rxnfc(struct net_device *dev,
|
|
|
+ struct ethtool_rxnfc *info, u32 *rule_locs)
|
|
|
+{
|
|
|
+ struct mlx5e_priv *priv = netdev_priv(dev);
|
|
|
+ int err = 0;
|
|
|
+
|
|
|
+ switch (info->cmd) {
|
|
|
+ case ETHTOOL_GRXRINGS:
|
|
|
+ info->data = priv->channels.params.num_channels;
|
|
|
+ break;
|
|
|
+ case ETHTOOL_GRXCLSRLCNT:
|
|
|
+ info->rule_cnt = priv->fs.ethtool.tot_num_rules;
|
|
|
+ break;
|
|
|
+ case ETHTOOL_GRXCLSRULE:
|
|
|
+ err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
|
|
|
+ break;
|
|
|
+ case ETHTOOL_GRXCLSRLALL:
|
|
|
+ err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ err = -EOPNOTSUPP;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ return err;
|
|
|
+}
|
|
|
+
|