|
@@ -32,6 +32,7 @@
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
#include <rdma/ib_umem.h>
|
|
|
+#include <rdma/ib_cache.h>
|
|
|
#include "mlx5_ib.h"
|
|
|
#include "user.h"
|
|
|
|
|
@@ -1364,17 +1365,12 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
|
|
|
struct mlx5_qp_path *path, u8 port, int attr_mask,
|
|
|
u32 path_flags, const struct ib_qp_attr *attr)
|
|
|
{
|
|
|
+ enum rdma_link_layer ll = rdma_port_get_link_layer(&dev->ib_dev, port);
|
|
|
int err;
|
|
|
|
|
|
- path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
|
|
|
- path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0;
|
|
|
-
|
|
|
if (attr_mask & IB_QP_PKEY_INDEX)
|
|
|
path->pkey_index = attr->pkey_index;
|
|
|
|
|
|
- path->grh_mlid = ah->src_path_bits & 0x7f;
|
|
|
- path->rlid = cpu_to_be16(ah->dlid);
|
|
|
-
|
|
|
if (ah->ah_flags & IB_AH_GRH) {
|
|
|
if (ah->grh.sgid_index >=
|
|
|
dev->mdev->port_caps[port - 1].gid_table_len) {
|
|
@@ -1383,7 +1379,27 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
|
|
|
dev->mdev->port_caps[port - 1].gid_table_len);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
- path->grh_mlid |= 1 << 7;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (ll == IB_LINK_LAYER_ETHERNET) {
|
|
|
+ if (!(ah->ah_flags & IB_AH_GRH))
|
|
|
+ return -EINVAL;
|
|
|
+ memcpy(path->rmac, ah->dmac, sizeof(ah->dmac));
|
|
|
+ path->udp_sport = mlx5_get_roce_udp_sport(dev, port,
|
|
|
+ ah->grh.sgid_index);
|
|
|
+ path->dci_cfi_prio_sl = (ah->sl & 0x7) << 4;
|
|
|
+ } else {
|
|
|
+ path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
|
|
|
+ path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 :
|
|
|
+ 0;
|
|
|
+ path->rlid = cpu_to_be16(ah->dlid);
|
|
|
+ path->grh_mlid = ah->src_path_bits & 0x7f;
|
|
|
+ if (ah->ah_flags & IB_AH_GRH)
|
|
|
+ path->grh_mlid |= 1 << 7;
|
|
|
+ path->dci_cfi_prio_sl = ah->sl & 0xf;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (ah->ah_flags & IB_AH_GRH) {
|
|
|
path->mgid_index = ah->grh.sgid_index;
|
|
|
path->hop_limit = ah->grh.hop_limit;
|
|
|
path->tclass_flowlabel =
|
|
@@ -1401,8 +1417,6 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
|
|
|
if (attr_mask & IB_QP_TIMEOUT)
|
|
|
path->ackto_lt = attr->timeout << 3;
|
|
|
|
|
|
- path->sl = ah->sl & 0xf;
|
|
|
-
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1765,15 +1779,21 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|
|
enum ib_qp_state cur_state, new_state;
|
|
|
int err = -EINVAL;
|
|
|
int port;
|
|
|
+ enum rdma_link_layer ll = IB_LINK_LAYER_UNSPECIFIED;
|
|
|
|
|
|
mutex_lock(&qp->mutex);
|
|
|
|
|
|
cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
|
|
|
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
|
|
|
|
|
|
+ if (!(cur_state == new_state && cur_state == IB_QPS_RESET)) {
|
|
|
+ port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
|
|
|
+ ll = dev->ib_dev.get_link_layer(&dev->ib_dev, port);
|
|
|
+ }
|
|
|
+
|
|
|
if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR &&
|
|
|
!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
|
|
|
- IB_LINK_LAYER_UNSPECIFIED))
|
|
|
+ ll))
|
|
|
goto out;
|
|
|
|
|
|
if ((attr_mask & IB_QP_PORT) &&
|
|
@@ -3003,7 +3023,7 @@ static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_at
|
|
|
ib_ah_attr->port_num > MLX5_CAP_GEN(dev, num_ports))
|
|
|
return;
|
|
|
|
|
|
- ib_ah_attr->sl = path->sl & 0xf;
|
|
|
+ ib_ah_attr->sl = path->dci_cfi_prio_sl & 0xf;
|
|
|
|
|
|
ib_ah_attr->dlid = be16_to_cpu(path->rlid);
|
|
|
ib_ah_attr->src_path_bits = path->grh_mlid & 0x7f;
|