|
@@ -127,7 +127,7 @@ static int mlx5_netdev_event(struct notifier_block *this,
|
|
|
|
|
|
if ((upper == ndev || (!upper && ndev == ibdev->roce.netdev))
|
|
|
&& ibdev->ib_active) {
|
|
|
- struct ib_event ibev = {0};
|
|
|
+ struct ib_event ibev = { };
|
|
|
|
|
|
ibev.device = &ibdev->ib_dev;
|
|
|
ibev.event = (event == NETDEV_UP) ?
|
|
@@ -496,6 +496,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
|
|
|
struct mlx5_ib_dev *dev = to_mdev(ibdev);
|
|
|
struct mlx5_core_dev *mdev = dev->mdev;
|
|
|
int err = -ENOMEM;
|
|
|
+ int max_sq_desc;
|
|
|
int max_rq_sg;
|
|
|
int max_sq_sg;
|
|
|
u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
|
|
@@ -618,9 +619,10 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
|
|
|
props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
|
|
|
max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
|
|
|
sizeof(struct mlx5_wqe_data_seg);
|
|
|
- max_sq_sg = (MLX5_CAP_GEN(mdev, max_wqe_sz_sq) -
|
|
|
- sizeof(struct mlx5_wqe_ctrl_seg)) /
|
|
|
- sizeof(struct mlx5_wqe_data_seg);
|
|
|
+ max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
|
|
|
+ max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) -
|
|
|
+ sizeof(struct mlx5_wqe_raddr_seg)) /
|
|
|
+ sizeof(struct mlx5_wqe_data_seg);
|
|
|
props->max_sge = min(max_rq_sg, max_sq_sg);
|
|
|
props->max_sge_rd = MLX5_MAX_SGE_RD;
|
|
|
props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
|
|
@@ -643,6 +645,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
|
|
|
props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
|
|
|
props->max_mcast_grp;
|
|
|
props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
|
|
|
+ props->max_ah = INT_MAX;
|
|
|
props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
|
|
|
props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
|
|
|
|
|
@@ -669,6 +672,40 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
|
|
|
1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
|
|
|
}
|
|
|
|
|
|
+ if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
|
|
|
+ uhw->outlen)) {
|
|
|
+ resp.mlx5_ib_support_multi_pkt_send_wqes =
|
|
|
+ MLX5_CAP_ETH(mdev, multi_pkt_send_wqe);
|
|
|
+ resp.response_length +=
|
|
|
+ sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (field_avail(typeof(resp), reserved, uhw->outlen))
|
|
|
+ resp.response_length += sizeof(resp.reserved);
|
|
|
+
|
|
|
+ if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
|
|
|
+ resp.cqe_comp_caps.max_num =
|
|
|
+ MLX5_CAP_GEN(dev->mdev, cqe_compression) ?
|
|
|
+ MLX5_CAP_GEN(dev->mdev, cqe_compression_max_num) : 0;
|
|
|
+ resp.cqe_comp_caps.supported_format =
|
|
|
+ MLX5_IB_CQE_RES_FORMAT_HASH |
|
|
|
+ MLX5_IB_CQE_RES_FORMAT_CSUM;
|
|
|
+ resp.response_length += sizeof(resp.cqe_comp_caps);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen)) {
|
|
|
+ if (MLX5_CAP_QOS(mdev, packet_pacing) &&
|
|
|
+ MLX5_CAP_GEN(mdev, qos)) {
|
|
|
+ resp.packet_pacing_caps.qp_rate_limit_max =
|
|
|
+ MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
|
|
|
+ resp.packet_pacing_caps.qp_rate_limit_min =
|
|
|
+ MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
|
|
|
+ resp.packet_pacing_caps.supported_qpts |=
|
|
|
+ 1 << IB_QPT_RAW_PACKET;
|
|
|
+ }
|
|
|
+ resp.response_length += sizeof(resp.packet_pacing_caps);
|
|
|
+ }
|
|
|
+
|
|
|
if (uhw->outlen) {
|
|
|
err = ib_copy_to_udata(uhw, &resp, resp.response_length);
|
|
|
|
|
@@ -1093,7 +1130,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
|
|
|
resp.response_length += sizeof(resp.cqe_version);
|
|
|
|
|
|
if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) {
|
|
|
- resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE;
|
|
|
+ resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE |
|
|
|
+ MLX5_USER_CMDS_SUPP_UHW_CREATE_AH;
|
|
|
resp.response_length += sizeof(resp.cmds_supp_uhw);
|
|
|
}
|
|
|
|
|
@@ -1502,6 +1540,22 @@ static void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
|
|
|
MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
|
|
|
}
|
|
|
|
|
|
+static void set_flow_label(void *misc_c, void *misc_v, u8 mask, u8 val,
|
|
|
+ bool inner)
|
|
|
+{
|
|
|
+ if (inner) {
|
|
|
+ MLX5_SET(fte_match_set_misc,
|
|
|
+ misc_c, inner_ipv6_flow_label, mask);
|
|
|
+ MLX5_SET(fte_match_set_misc,
|
|
|
+ misc_v, inner_ipv6_flow_label, val);
|
|
|
+ } else {
|
|
|
+ MLX5_SET(fte_match_set_misc,
|
|
|
+ misc_c, outer_ipv6_flow_label, mask);
|
|
|
+ MLX5_SET(fte_match_set_misc,
|
|
|
+ misc_v, outer_ipv6_flow_label, val);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
|
|
|
{
|
|
|
MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask);
|
|
@@ -1515,6 +1569,7 @@ static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
|
|
|
#define LAST_IPV4_FIELD tos
|
|
|
#define LAST_IPV6_FIELD traffic_class
|
|
|
#define LAST_TCP_UDP_FIELD src_port
|
|
|
+#define LAST_TUNNEL_FIELD tunnel_id
|
|
|
|
|
|
/* Field is the last supported field */
|
|
|
#define FIELDS_NOT_SUPPORTED(filter, field)\
|
|
@@ -1527,155 +1582,164 @@ static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
|
|
|
static int parse_flow_attr(u32 *match_c, u32 *match_v,
|
|
|
const union ib_flow_spec *ib_spec)
|
|
|
{
|
|
|
- void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
|
|
|
- outer_headers);
|
|
|
- void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
|
|
|
- outer_headers);
|
|
|
void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
|
|
|
misc_parameters);
|
|
|
void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
|
|
|
misc_parameters);
|
|
|
+ void *headers_c;
|
|
|
+ void *headers_v;
|
|
|
+
|
|
|
+ if (ib_spec->type & IB_FLOW_SPEC_INNER) {
|
|
|
+ headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
|
|
|
+ inner_headers);
|
|
|
+ headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
|
|
|
+ inner_headers);
|
|
|
+ } else {
|
|
|
+ headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
|
|
|
+ outer_headers);
|
|
|
+ headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
|
|
|
+ outer_headers);
|
|
|
+ }
|
|
|
|
|
|
- switch (ib_spec->type) {
|
|
|
+ switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
|
|
|
case IB_FLOW_SPEC_ETH:
|
|
|
if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
|
|
|
return -ENOTSUPP;
|
|
|
|
|
|
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
|
|
|
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
|
|
|
dmac_47_16),
|
|
|
ib_spec->eth.mask.dst_mac);
|
|
|
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
|
|
|
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
|
|
|
dmac_47_16),
|
|
|
ib_spec->eth.val.dst_mac);
|
|
|
|
|
|
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
|
|
|
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
|
|
|
smac_47_16),
|
|
|
ib_spec->eth.mask.src_mac);
|
|
|
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
|
|
|
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
|
|
|
smac_47_16),
|
|
|
ib_spec->eth.val.src_mac);
|
|
|
|
|
|
if (ib_spec->eth.mask.vlan_tag) {
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
|
|
|
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
|
|
|
vlan_tag, 1);
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
|
|
|
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
|
|
|
vlan_tag, 1);
|
|
|
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
|
|
|
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
|
|
|
first_vid, ntohs(ib_spec->eth.mask.vlan_tag));
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
|
|
|
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
|
|
|
first_vid, ntohs(ib_spec->eth.val.vlan_tag));
|
|
|
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
|
|
|
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
|
|
|
first_cfi,
|
|
|
ntohs(ib_spec->eth.mask.vlan_tag) >> 12);
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
|
|
|
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
|
|
|
first_cfi,
|
|
|
ntohs(ib_spec->eth.val.vlan_tag) >> 12);
|
|
|
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
|
|
|
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
|
|
|
first_prio,
|
|
|
ntohs(ib_spec->eth.mask.vlan_tag) >> 13);
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
|
|
|
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
|
|
|
first_prio,
|
|
|
ntohs(ib_spec->eth.val.vlan_tag) >> 13);
|
|
|
}
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
|
|
|
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
|
|
|
ethertype, ntohs(ib_spec->eth.mask.ether_type));
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
|
|
|
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
|
|
|
ethertype, ntohs(ib_spec->eth.val.ether_type));
|
|
|
break;
|
|
|
case IB_FLOW_SPEC_IPV4:
|
|
|
if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
|
|
|
return -ENOTSUPP;
|
|
|
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
|
|
|
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
|
|
|
ethertype, 0xffff);
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
|
|
|
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
|
|
|
ethertype, ETH_P_IP);
|
|
|
|
|
|
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
|
|
|
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
|
|
|
src_ipv4_src_ipv6.ipv4_layout.ipv4),
|
|
|
&ib_spec->ipv4.mask.src_ip,
|
|
|
sizeof(ib_spec->ipv4.mask.src_ip));
|
|
|
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
|
|
|
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
|
|
|
src_ipv4_src_ipv6.ipv4_layout.ipv4),
|
|
|
&ib_spec->ipv4.val.src_ip,
|
|
|
sizeof(ib_spec->ipv4.val.src_ip));
|
|
|
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
|
|
|
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
|
|
|
dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
|
|
|
&ib_spec->ipv4.mask.dst_ip,
|
|
|
sizeof(ib_spec->ipv4.mask.dst_ip));
|
|
|
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
|
|
|
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
|
|
|
dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
|
|
|
&ib_spec->ipv4.val.dst_ip,
|
|
|
sizeof(ib_spec->ipv4.val.dst_ip));
|
|
|
|
|
|
- set_tos(outer_headers_c, outer_headers_v,
|
|
|
+ set_tos(headers_c, headers_v,
|
|
|
ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos);
|
|
|
|
|
|
- set_proto(outer_headers_c, outer_headers_v,
|
|
|
+ set_proto(headers_c, headers_v,
|
|
|
ib_spec->ipv4.mask.proto, ib_spec->ipv4.val.proto);
|
|
|
break;
|
|
|
case IB_FLOW_SPEC_IPV6:
|
|
|
if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD))
|
|
|
return -ENOTSUPP;
|
|
|
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
|
|
|
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
|
|
|
ethertype, 0xffff);
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
|
|
|
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
|
|
|
ethertype, ETH_P_IPV6);
|
|
|
|
|
|
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
|
|
|
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
|
|
|
src_ipv4_src_ipv6.ipv6_layout.ipv6),
|
|
|
&ib_spec->ipv6.mask.src_ip,
|
|
|
sizeof(ib_spec->ipv6.mask.src_ip));
|
|
|
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
|
|
|
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
|
|
|
src_ipv4_src_ipv6.ipv6_layout.ipv6),
|
|
|
&ib_spec->ipv6.val.src_ip,
|
|
|
sizeof(ib_spec->ipv6.val.src_ip));
|
|
|
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
|
|
|
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
|
|
|
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
|
|
|
&ib_spec->ipv6.mask.dst_ip,
|
|
|
sizeof(ib_spec->ipv6.mask.dst_ip));
|
|
|
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
|
|
|
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
|
|
|
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
|
|
|
&ib_spec->ipv6.val.dst_ip,
|
|
|
sizeof(ib_spec->ipv6.val.dst_ip));
|
|
|
|
|
|
- set_tos(outer_headers_c, outer_headers_v,
|
|
|
+ set_tos(headers_c, headers_v,
|
|
|
ib_spec->ipv6.mask.traffic_class,
|
|
|
ib_spec->ipv6.val.traffic_class);
|
|
|
|
|
|
- set_proto(outer_headers_c, outer_headers_v,
|
|
|
+ set_proto(headers_c, headers_v,
|
|
|
ib_spec->ipv6.mask.next_hdr,
|
|
|
ib_spec->ipv6.val.next_hdr);
|
|
|
|
|
|
- MLX5_SET(fte_match_set_misc, misc_params_c,
|
|
|
- outer_ipv6_flow_label,
|
|
|
- ntohl(ib_spec->ipv6.mask.flow_label));
|
|
|
- MLX5_SET(fte_match_set_misc, misc_params_v,
|
|
|
- outer_ipv6_flow_label,
|
|
|
- ntohl(ib_spec->ipv6.val.flow_label));
|
|
|
+ set_flow_label(misc_params_c, misc_params_v,
|
|
|
+ ntohl(ib_spec->ipv6.mask.flow_label),
|
|
|
+ ntohl(ib_spec->ipv6.val.flow_label),
|
|
|
+ ib_spec->type & IB_FLOW_SPEC_INNER);
|
|
|
+
|
|
|
break;
|
|
|
case IB_FLOW_SPEC_TCP:
|
|
|
if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
|
|
|
LAST_TCP_UDP_FIELD))
|
|
|
return -ENOTSUPP;
|
|
|
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
|
|
|
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
|
|
|
0xff);
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
|
|
|
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
|
|
|
IPPROTO_TCP);
|
|
|
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport,
|
|
|
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport,
|
|
|
ntohs(ib_spec->tcp_udp.mask.src_port));
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_sport,
|
|
|
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
|
|
|
ntohs(ib_spec->tcp_udp.val.src_port));
|
|
|
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport,
|
|
|
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_dport,
|
|
|
ntohs(ib_spec->tcp_udp.mask.dst_port));
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_dport,
|
|
|
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
|
|
|
ntohs(ib_spec->tcp_udp.val.dst_port));
|
|
|
break;
|
|
|
case IB_FLOW_SPEC_UDP:
|
|
@@ -1683,21 +1747,31 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v,
|
|
|
LAST_TCP_UDP_FIELD))
|
|
|
return -ENOTSUPP;
|
|
|
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
|
|
|
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
|
|
|
0xff);
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
|
|
|
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
|
|
|
IPPROTO_UDP);
|
|
|
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_sport,
|
|
|
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
|
|
|
ntohs(ib_spec->tcp_udp.mask.src_port));
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_sport,
|
|
|
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
|
|
|
ntohs(ib_spec->tcp_udp.val.src_port));
|
|
|
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_dport,
|
|
|
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport,
|
|
|
ntohs(ib_spec->tcp_udp.mask.dst_port));
|
|
|
- MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_dport,
|
|
|
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
|
|
|
ntohs(ib_spec->tcp_udp.val.dst_port));
|
|
|
break;
|
|
|
+ case IB_FLOW_SPEC_VXLAN_TUNNEL:
|
|
|
+ if (FIELDS_NOT_SUPPORTED(ib_spec->tunnel.mask,
|
|
|
+ LAST_TUNNEL_FIELD))
|
|
|
+ return -ENOTSUPP;
|
|
|
+
|
|
|
+ MLX5_SET(fte_match_set_misc, misc_params_c, vxlan_vni,
|
|
|
+ ntohl(ib_spec->tunnel.mask.tunnel_id));
|
|
|
+ MLX5_SET(fte_match_set_misc, misc_params_v, vxlan_vni,
|
|
|
+ ntohl(ib_spec->tunnel.val.tunnel_id));
|
|
|
+ break;
|
|
|
default:
|
|
|
return -EINVAL;
|
|
|
}
|
|
@@ -2718,6 +2792,8 @@ static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
|
|
|
struct ib_port_immutable *immutable)
|
|
|
{
|
|
|
struct ib_port_attr attr;
|
|
|
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
|
|
|
+ enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
|
|
|
int err;
|
|
|
|
|
|
err = mlx5_ib_query_port(ibdev, port_num, &attr);
|
|
@@ -2727,7 +2803,8 @@ static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
|
|
|
immutable->pkey_tbl_len = attr.pkey_tbl_len;
|
|
|
immutable->gid_tbl_len = attr.gid_tbl_len;
|
|
|
immutable->core_cap_flags = get_core_cap_flags(ibdev);
|
|
|
- immutable->max_mad_size = IB_MGMT_MAD_SIZE;
|
|
|
+ if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
|
|
|
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -2741,7 +2818,7 @@ static void get_dev_fw_str(struct ib_device *ibdev, char *str,
|
|
|
fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
|
|
|
}
|
|
|
|
|
|
-static int mlx5_roce_lag_init(struct mlx5_ib_dev *dev)
|
|
|
+static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
|
|
|
{
|
|
|
struct mlx5_core_dev *mdev = dev->mdev;
|
|
|
struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev,
|
|
@@ -2770,7 +2847,7 @@ err_destroy_vport_lag:
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
-static void mlx5_roce_lag_cleanup(struct mlx5_ib_dev *dev)
|
|
|
+static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
|
|
|
{
|
|
|
struct mlx5_core_dev *mdev = dev->mdev;
|
|
|
|
|
@@ -2782,7 +2859,21 @@ static void mlx5_roce_lag_cleanup(struct mlx5_ib_dev *dev)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static void mlx5_remove_roce_notifier(struct mlx5_ib_dev *dev)
|
|
|
+static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev)
|
|
|
+{
|
|
|
+ int err;
|
|
|
+
|
|
|
+ dev->roce.nb.notifier_call = mlx5_netdev_event;
|
|
|
+ err = register_netdevice_notifier(&dev->roce.nb);
|
|
|
+ if (err) {
|
|
|
+ dev->roce.nb.notifier_call = NULL;
|
|
|
+ return err;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev)
|
|
|
{
|
|
|
if (dev->roce.nb.notifier_call) {
|
|
|
unregister_netdevice_notifier(&dev->roce.nb);
|
|
@@ -2790,39 +2881,40 @@ static void mlx5_remove_roce_notifier(struct mlx5_ib_dev *dev)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static int mlx5_enable_roce(struct mlx5_ib_dev *dev)
|
|
|
+static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
|
|
|
{
|
|
|
int err;
|
|
|
|
|
|
- dev->roce.nb.notifier_call = mlx5_netdev_event;
|
|
|
- err = register_netdevice_notifier(&dev->roce.nb);
|
|
|
- if (err) {
|
|
|
- dev->roce.nb.notifier_call = NULL;
|
|
|
+ err = mlx5_add_netdev_notifier(dev);
|
|
|
+ if (err)
|
|
|
return err;
|
|
|
- }
|
|
|
|
|
|
- err = mlx5_nic_vport_enable_roce(dev->mdev);
|
|
|
- if (err)
|
|
|
- goto err_unregister_netdevice_notifier;
|
|
|
+ if (MLX5_CAP_GEN(dev->mdev, roce)) {
|
|
|
+ err = mlx5_nic_vport_enable_roce(dev->mdev);
|
|
|
+ if (err)
|
|
|
+ goto err_unregister_netdevice_notifier;
|
|
|
+ }
|
|
|
|
|
|
- err = mlx5_roce_lag_init(dev);
|
|
|
+ err = mlx5_eth_lag_init(dev);
|
|
|
if (err)
|
|
|
goto err_disable_roce;
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
err_disable_roce:
|
|
|
- mlx5_nic_vport_disable_roce(dev->mdev);
|
|
|
+ if (MLX5_CAP_GEN(dev->mdev, roce))
|
|
|
+ mlx5_nic_vport_disable_roce(dev->mdev);
|
|
|
|
|
|
err_unregister_netdevice_notifier:
|
|
|
- mlx5_remove_roce_notifier(dev);
|
|
|
+ mlx5_remove_netdev_notifier(dev);
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
-static void mlx5_disable_roce(struct mlx5_ib_dev *dev)
|
|
|
+static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
|
|
|
{
|
|
|
- mlx5_roce_lag_cleanup(dev);
|
|
|
- mlx5_nic_vport_disable_roce(dev->mdev);
|
|
|
+ mlx5_eth_lag_cleanup(dev);
|
|
|
+ if (MLX5_CAP_GEN(dev->mdev, roce))
|
|
|
+ mlx5_nic_vport_disable_roce(dev->mdev);
|
|
|
}
|
|
|
|
|
|
static void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev)
|
|
@@ -2944,9 +3036,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
|
|
|
port_type_cap = MLX5_CAP_GEN(mdev, port_type);
|
|
|
ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
|
|
|
|
|
|
- if ((ll == IB_LINK_LAYER_ETHERNET) && !MLX5_CAP_GEN(mdev, roce))
|
|
|
- return NULL;
|
|
|
-
|
|
|
printk_once(KERN_INFO "%s", mlx5_version);
|
|
|
|
|
|
dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
|
|
@@ -2992,6 +3081,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
|
|
|
(1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
|
|
|
(1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
|
|
|
(1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
|
|
|
+ (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
|
|
|
+ (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
|
|
|
(1ull << IB_USER_VERBS_CMD_REG_MR) |
|
|
|
(1ull << IB_USER_VERBS_CMD_REREG_MR) |
|
|
|
(1ull << IB_USER_VERBS_CMD_DEREG_MR) |
|
|
@@ -3014,7 +3105,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
|
|
|
dev->ib_dev.uverbs_ex_cmd_mask =
|
|
|
(1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
|
|
|
(1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
|
|
|
- (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
|
|
|
+ (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP) |
|
|
|
+ (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP);
|
|
|
|
|
|
dev->ib_dev.query_device = mlx5_ib_query_device;
|
|
|
dev->ib_dev.query_port = mlx5_ib_query_port;
|
|
@@ -3125,14 +3217,14 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
|
|
|
spin_lock_init(&dev->reset_flow_resource_lock);
|
|
|
|
|
|
if (ll == IB_LINK_LAYER_ETHERNET) {
|
|
|
- err = mlx5_enable_roce(dev);
|
|
|
+ err = mlx5_enable_eth(dev);
|
|
|
if (err)
|
|
|
goto err_dealloc;
|
|
|
}
|
|
|
|
|
|
err = create_dev_resources(&dev->devr);
|
|
|
if (err)
|
|
|
- goto err_disable_roce;
|
|
|
+ goto err_disable_eth;
|
|
|
|
|
|
err = mlx5_ib_odp_init_one(dev);
|
|
|
if (err)
|
|
@@ -3176,10 +3268,10 @@ err_odp:
|
|
|
err_rsrc:
|
|
|
destroy_dev_resources(&dev->devr);
|
|
|
|
|
|
-err_disable_roce:
|
|
|
+err_disable_eth:
|
|
|
if (ll == IB_LINK_LAYER_ETHERNET) {
|
|
|
- mlx5_disable_roce(dev);
|
|
|
- mlx5_remove_roce_notifier(dev);
|
|
|
+ mlx5_disable_eth(dev);
|
|
|
+ mlx5_remove_netdev_notifier(dev);
|
|
|
}
|
|
|
|
|
|
err_free_port:
|
|
@@ -3196,14 +3288,14 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
|
|
|
struct mlx5_ib_dev *dev = context;
|
|
|
enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
|
|
|
|
|
|
- mlx5_remove_roce_notifier(dev);
|
|
|
+ mlx5_remove_netdev_notifier(dev);
|
|
|
ib_unregister_device(&dev->ib_dev);
|
|
|
mlx5_ib_dealloc_q_counters(dev);
|
|
|
destroy_umrc_res(dev);
|
|
|
mlx5_ib_odp_remove_one(dev);
|
|
|
destroy_dev_resources(&dev->devr);
|
|
|
if (ll == IB_LINK_LAYER_ETHERNET)
|
|
|
- mlx5_disable_roce(dev);
|
|
|
+ mlx5_disable_eth(dev);
|
|
|
kfree(dev->port);
|
|
|
ib_dealloc_device(&dev->ib_dev);
|
|
|
}
|